aarch64: Add support for vgx2 and vgx4
[binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bool
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return (qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q);
110 }
111
112 static inline bool
113 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
114 {
115 return (qualifier >= AARCH64_OPND_QLF_S_B
116 && qualifier <= AARCH64_OPND_QLF_S_Q);
117 }
118
119 enum data_pattern
120 {
121 DP_UNKNOWN,
122 DP_VECTOR_3SAME,
123 DP_VECTOR_LONG,
124 DP_VECTOR_WIDE,
125 DP_VECTOR_ACROSS_LANES,
126 };
127
128 static const char significant_operand_index [] =
129 {
130 0, /* DP_UNKNOWN, by default using operand 0. */
131 0, /* DP_VECTOR_3SAME */
132 1, /* DP_VECTOR_LONG */
133 2, /* DP_VECTOR_WIDE */
134 1, /* DP_VECTOR_ACROSS_LANES */
135 };
136
137 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
138 the data pattern.
139 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
140 corresponds to one of a sequence of operands. */
141
142 static enum data_pattern
143 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
144 {
145 if (vector_qualifier_p (qualifiers[0]))
146 {
147 /* e.g. v.4s, v.4s, v.4s
148 or v.4h, v.4h, v.h[3]. */
149 if (qualifiers[0] == qualifiers[1]
150 && vector_qualifier_p (qualifiers[2])
151 && (aarch64_get_qualifier_esize (qualifiers[0])
152 == aarch64_get_qualifier_esize (qualifiers[1]))
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[2])))
155 return DP_VECTOR_3SAME;
156 /* e.g. v.8h, v.8b, v.8b.
157 or v.4s, v.4h, v.h[2].
158 or v.8h, v.16b. */
159 if (vector_qualifier_p (qualifiers[1])
160 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
161 && (aarch64_get_qualifier_esize (qualifiers[0])
162 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
163 return DP_VECTOR_LONG;
164 /* e.g. v.8h, v.8h, v.8b. */
165 if (qualifiers[0] == qualifiers[1]
166 && vector_qualifier_p (qualifiers[2])
167 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
168 && (aarch64_get_qualifier_esize (qualifiers[0])
169 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[1])))
172 return DP_VECTOR_WIDE;
173 }
174 else if (fp_qualifier_p (qualifiers[0]))
175 {
176 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
177 if (vector_qualifier_p (qualifiers[1])
178 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
179 return DP_VECTOR_ACROSS_LANES;
180 }
181
182 return DP_UNKNOWN;
183 }
184
185 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
186 the AdvSIMD instructions. */
187 /* N.B. it is possible to do some optimization that doesn't call
188 get_data_pattern each time when we need to select an operand. We can
189 either buffer the caculated the result or statically generate the data,
190 however, it is not obvious that the optimization will bring significant
191 benefit. */
192
193 int
194 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
195 {
196 return
197 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
198 }
199 \f
200 /* Instruction bit-fields.
201 + Keep synced with 'enum aarch64_field_kind'. */
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 8, 4 }, /* CRm: in the system instructions. */
206 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
207 { 12, 4 }, /* CRn: in the system instructions. */
208 { 10, 8 }, /* CSSC_imm8. */
209 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
210 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
211 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
212 { 22, 1 }, /* N: in logical (immediate) instructions. */
213 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
214 { 10, 5 }, /* Ra: in fp instructions. */
215 { 0, 5 }, /* Rd: in many integer instructions. */
216 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
217 { 5, 5 }, /* Rn: in many integer instructions. */
218 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
219 { 0, 5 }, /* Rt: in load/store instructions. */
220 { 10, 5 }, /* Rt2: in load/store pair instructions. */
221 { 12, 1 }, /* S: in load/store reg offset instructions. */
222 { 12, 2 }, /* SM3_imm2: Indexed element SM3 2 bits index immediate. */
223 { 13, 3 }, /* SME_Pm: second source scalable predicate register P0-P7. */
224 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
225 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
226 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
227 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
228 { 0, 2 }, /* SME_ZAda_2b: tile ZA0-ZA3. */
229 { 0, 3 }, /* SME_ZAda_3b: tile ZA0-ZA7. */
230 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
231 { 22, 2 }, /* SME_size_22: size<1>, size<0> class field, [23:22]. */
232 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
233 { 18, 3 }, /* SME_tszl: immediate and qualifier field, bits [20:18]. */
234 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
235 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
236 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
237 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
238 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
239 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
240 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
241 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
242 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
243 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
244 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
245 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
246 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
247 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
248 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
249 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
250 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
251 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
252 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
253 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
254 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
255 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
256 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
257 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
258 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
259 { 5, 1 }, /* SVE_i1: single-bit immediate. */
260 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
261 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
262 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
263 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
264 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
265 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
266 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
267 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
268 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
269 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
270 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
271 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
272 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
273 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
274 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
275 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
276 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
277 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
278 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
279 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
280 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
281 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
282 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
283 { 16, 4 }, /* SVE_tsz: triangular size select. */
284 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
285 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
286 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
287 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
288 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
289 { 22, 1 }, /* S_imm10: in LDRAA and LDRAB instructions. */
290 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
291 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
292 { 19, 5 }, /* b40: in the test bit and branch instructions. */
293 { 31, 1 }, /* b5: in the test bit and branch instructions. */
294 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
295 { 12, 4 }, /* cond: condition flags as a source operand. */
296 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
297 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
298 { 21, 2 }, /* hw: in move wide constant instructions. */
299 { 10, 3 }, /* imm3_10: in add/sub extended reg instructions. */
300 { 0, 4 }, /* imm4_0: in rmif instructions. */
301 { 5, 4 }, /* imm4_5: in SME instructions. */
302 { 10, 4 }, /* imm4_10: in adddg/subg instructions. */
303 { 11, 4 }, /* imm4_11: in advsimd ext and advsimd ins instructions. */
304 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
305 { 10, 6 }, /* imm6_10: in add/sub reg shifted instructions. */
306 { 15, 6 }, /* imm6_15: in rmif instructions. */
307 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
308 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
309 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
310 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
311 { 5, 14 }, /* imm14: in test bit and branch instructions. */
312 { 0, 16 }, /* imm16_0: in udf instruction. */
313 { 5, 16 }, /* imm16_5: in exception instructions. */
314 { 5, 19 }, /* imm19: e.g. in CBZ. */
315 { 0, 26 }, /* imm26: in unconditional branch instructions. */
316 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
317 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
318 { 5, 19 }, /* immhi: e.g. in ADRP. */
319 { 29, 2 }, /* immlo: e.g. in ADRP. */
320 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
321 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
322 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
323 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
324 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
325 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
326 { 30, 1 }, /* lse_sz: in LSE extension atomic instructions. */
327 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
328 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
329 { 19, 2 }, /* op0: in the system instructions. */
330 { 16, 3 }, /* op1: in the system instructions. */
331 { 5, 3 }, /* op2: in the system instructions. */
332 { 22, 2 }, /* opc: in load/store reg offset instructions. */
333 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
334 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
335 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
336 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
337 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
338 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
339 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
340 { 31, 1 }, /* sf: in integer data processing instructions. */
341 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
342 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
343 { 22, 1 }, /* sz: 1-bit element size select. */
344 { 22, 2 }, /* type: floating point type field in fp data inst. */
345 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
346 };
347
348 enum aarch64_operand_class
349 aarch64_get_operand_class (enum aarch64_opnd type)
350 {
351 return aarch64_operands[type].op_class;
352 }
353
354 const char *
355 aarch64_get_operand_name (enum aarch64_opnd type)
356 {
357 return aarch64_operands[type].name;
358 }
359
360 /* Get operand description string.
361 This is usually for the diagnosis purpose. */
362 const char *
363 aarch64_get_operand_desc (enum aarch64_opnd type)
364 {
365 return aarch64_operands[type].desc;
366 }
367
368 /* Table of all conditional affixes. */
369 const aarch64_cond aarch64_conds[16] =
370 {
371 {{"eq", "none"}, 0x0},
372 {{"ne", "any"}, 0x1},
373 {{"cs", "hs", "nlast"}, 0x2},
374 {{"cc", "lo", "ul", "last"}, 0x3},
375 {{"mi", "first"}, 0x4},
376 {{"pl", "nfrst"}, 0x5},
377 {{"vs"}, 0x6},
378 {{"vc"}, 0x7},
379 {{"hi", "pmore"}, 0x8},
380 {{"ls", "plast"}, 0x9},
381 {{"ge", "tcont"}, 0xa},
382 {{"lt", "tstop"}, 0xb},
383 {{"gt"}, 0xc},
384 {{"le"}, 0xd},
385 {{"al"}, 0xe},
386 {{"nv"}, 0xf},
387 };
388
389 const aarch64_cond *
390 get_cond_from_value (aarch64_insn value)
391 {
392 assert (value < 16);
393 return &aarch64_conds[(unsigned int) value];
394 }
395
396 const aarch64_cond *
397 get_inverted_cond (const aarch64_cond *cond)
398 {
399 return &aarch64_conds[cond->value ^ 0x1];
400 }
401
402 /* Table describing the operand extension/shifting operators; indexed by
403 enum aarch64_modifier_kind.
404
405 The value column provides the most common values for encoding modifiers,
406 which enables table-driven encoding/decoding for the modifiers. */
407 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
408 {
409 {"none", 0x0},
410 {"msl", 0x0},
411 {"ror", 0x3},
412 {"asr", 0x2},
413 {"lsr", 0x1},
414 {"lsl", 0x0},
415 {"uxtb", 0x0},
416 {"uxth", 0x1},
417 {"uxtw", 0x2},
418 {"uxtx", 0x3},
419 {"sxtb", 0x4},
420 {"sxth", 0x5},
421 {"sxtw", 0x6},
422 {"sxtx", 0x7},
423 {"mul", 0x0},
424 {"mul vl", 0x0},
425 {NULL, 0},
426 };
427
428 enum aarch64_modifier_kind
429 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
430 {
431 return desc - aarch64_operand_modifiers;
432 }
433
434 aarch64_insn
435 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
436 {
437 return aarch64_operand_modifiers[kind].value;
438 }
439
440 enum aarch64_modifier_kind
441 aarch64_get_operand_modifier_from_value (aarch64_insn value,
442 bool extend_p)
443 {
444 if (extend_p)
445 return AARCH64_MOD_UXTB + value;
446 else
447 return AARCH64_MOD_LSL - value;
448 }
449
450 bool
451 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
452 {
453 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
454 }
455
456 static inline bool
457 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
458 {
459 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
460 }
461
462 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
463 {
464 { "#0x00", 0x0 },
465 { "oshld", 0x1 },
466 { "oshst", 0x2 },
467 { "osh", 0x3 },
468 { "#0x04", 0x4 },
469 { "nshld", 0x5 },
470 { "nshst", 0x6 },
471 { "nsh", 0x7 },
472 { "#0x08", 0x8 },
473 { "ishld", 0x9 },
474 { "ishst", 0xa },
475 { "ish", 0xb },
476 { "#0x0c", 0xc },
477 { "ld", 0xd },
478 { "st", 0xe },
479 { "sy", 0xf },
480 };
481
482 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
483 { /* CRm<3:2> #imm */
484 { "oshnxs", 16 }, /* 00 16 */
485 { "nshnxs", 20 }, /* 01 20 */
486 { "ishnxs", 24 }, /* 10 24 */
487 { "synxs", 28 }, /* 11 28 */
488 };
489
490 /* Table describing the operands supported by the aliases of the HINT
491 instruction.
492
493 The name column is the operand that is accepted for the alias. The value
494 column is the hint number of the alias. The list of operands is terminated
495 by NULL in the name column. */
496
497 const struct aarch64_name_value_pair aarch64_hint_options[] =
498 {
499 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
500 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
501 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
502 { "c", HINT_OPD_C }, /* BTI C. */
503 { "j", HINT_OPD_J }, /* BTI J. */
504 { "jc", HINT_OPD_JC }, /* BTI JC. */
505 { NULL, HINT_OPD_NULL },
506 };
507
508 /* op -> op: load = 0 instruction = 1 store = 2
509 l -> level: 1-3
510 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
511 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
512 const struct aarch64_name_value_pair aarch64_prfops[32] =
513 {
514 { "pldl1keep", B(0, 1, 0) },
515 { "pldl1strm", B(0, 1, 1) },
516 { "pldl2keep", B(0, 2, 0) },
517 { "pldl2strm", B(0, 2, 1) },
518 { "pldl3keep", B(0, 3, 0) },
519 { "pldl3strm", B(0, 3, 1) },
520 { NULL, 0x06 },
521 { NULL, 0x07 },
522 { "plil1keep", B(1, 1, 0) },
523 { "plil1strm", B(1, 1, 1) },
524 { "plil2keep", B(1, 2, 0) },
525 { "plil2strm", B(1, 2, 1) },
526 { "plil3keep", B(1, 3, 0) },
527 { "plil3strm", B(1, 3, 1) },
528 { NULL, 0x0e },
529 { NULL, 0x0f },
530 { "pstl1keep", B(2, 1, 0) },
531 { "pstl1strm", B(2, 1, 1) },
532 { "pstl2keep", B(2, 2, 0) },
533 { "pstl2strm", B(2, 2, 1) },
534 { "pstl3keep", B(2, 3, 0) },
535 { "pstl3strm", B(2, 3, 1) },
536 { NULL, 0x16 },
537 { NULL, 0x17 },
538 { NULL, 0x18 },
539 { NULL, 0x19 },
540 { NULL, 0x1a },
541 { NULL, 0x1b },
542 { NULL, 0x1c },
543 { NULL, 0x1d },
544 { NULL, 0x1e },
545 { NULL, 0x1f },
546 };
547 #undef B
548 \f
549 /* Utilities on value constraint. */
550
551 static inline int
552 value_in_range_p (int64_t value, int low, int high)
553 {
554 return (value >= low && value <= high) ? 1 : 0;
555 }
556
557 /* Return true if VALUE is a multiple of ALIGN. */
558 static inline int
559 value_aligned_p (int64_t value, int align)
560 {
561 return (value % align) == 0;
562 }
563
564 /* A signed value fits in a field. */
565 static inline int
566 value_fit_signed_field_p (int64_t value, unsigned width)
567 {
568 assert (width < 32);
569 if (width < sizeof (value) * 8)
570 {
571 int64_t lim = (uint64_t) 1 << (width - 1);
572 if (value >= -lim && value < lim)
573 return 1;
574 }
575 return 0;
576 }
577
578 /* An unsigned value fits in a field. */
579 static inline int
580 value_fit_unsigned_field_p (int64_t value, unsigned width)
581 {
582 assert (width < 32);
583 if (width < sizeof (value) * 8)
584 {
585 int64_t lim = (uint64_t) 1 << width;
586 if (value >= 0 && value < lim)
587 return 1;
588 }
589 return 0;
590 }
591
592 /* Return 1 if OPERAND is SP or WSP. */
593 int
594 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
595 {
596 return ((aarch64_get_operand_class (operand->type)
597 == AARCH64_OPND_CLASS_INT_REG)
598 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
599 && operand->reg.regno == 31);
600 }
601
602 /* Return 1 if OPERAND is XZR or WZP. */
603 int
604 aarch64_zero_register_p (const aarch64_opnd_info *operand)
605 {
606 return ((aarch64_get_operand_class (operand->type)
607 == AARCH64_OPND_CLASS_INT_REG)
608 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
609 && operand->reg.regno == 31);
610 }
611
612 /* Return true if the operand *OPERAND that has the operand code
613 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
614 qualified by the qualifier TARGET. */
615
616 static inline int
617 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
618 aarch64_opnd_qualifier_t target)
619 {
620 switch (operand->qualifier)
621 {
622 case AARCH64_OPND_QLF_W:
623 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
624 return 1;
625 break;
626 case AARCH64_OPND_QLF_X:
627 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
628 return 1;
629 break;
630 case AARCH64_OPND_QLF_WSP:
631 if (target == AARCH64_OPND_QLF_W
632 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
633 return 1;
634 break;
635 case AARCH64_OPND_QLF_SP:
636 if (target == AARCH64_OPND_QLF_X
637 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
638 return 1;
639 break;
640 default:
641 break;
642 }
643
644 return 0;
645 }
646
647 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
648 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
649
650 Return NIL if more than one expected qualifiers are found. */
651
652 aarch64_opnd_qualifier_t
653 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
654 int idx,
655 const aarch64_opnd_qualifier_t known_qlf,
656 int known_idx)
657 {
658 int i, saved_i;
659
660 /* Special case.
661
662 When the known qualifier is NIL, we have to assume that there is only
663 one qualifier sequence in the *QSEQ_LIST and return the corresponding
664 qualifier directly. One scenario is that for instruction
665 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
666 which has only one possible valid qualifier sequence
667 NIL, S_D
668 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
669 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
670
671 Because the qualifier NIL has dual roles in the qualifier sequence:
672 it can mean no qualifier for the operand, or the qualifer sequence is
673 not in use (when all qualifiers in the sequence are NILs), we have to
674 handle this special case here. */
675 if (known_qlf == AARCH64_OPND_NIL)
676 {
677 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
678 return qseq_list[0][idx];
679 }
680
681 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
682 {
683 if (qseq_list[i][known_idx] == known_qlf)
684 {
685 if (saved_i != -1)
686 /* More than one sequences are found to have KNOWN_QLF at
687 KNOWN_IDX. */
688 return AARCH64_OPND_NIL;
689 saved_i = i;
690 }
691 }
692
693 return qseq_list[saved_i][idx];
694 }
695
696 enum operand_qualifier_kind
697 {
698 OQK_NIL,
699 OQK_OPD_VARIANT,
700 OQK_VALUE_IN_RANGE,
701 OQK_MISC,
702 };
703
704 /* Operand qualifier description. */
705 struct operand_qualifier_data
706 {
707 /* The usage of the three data fields depends on the qualifier kind. */
708 int data0;
709 int data1;
710 int data2;
711 /* Description. */
712 const char *desc;
713 /* Kind. */
714 enum operand_qualifier_kind kind;
715 };
716
717 /* Indexed by the operand qualifier enumerators. */
718 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
719 {
720 {0, 0, 0, "NIL", OQK_NIL},
721
722 /* Operand variant qualifiers.
723 First 3 fields:
724 element size, number of elements and common value for encoding. */
725
726 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
727 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
728 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
729 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
730
731 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
732 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
733 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
734 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
735 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
736 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
737 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
738
739 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
740 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
741 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
742 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
743 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
744 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
745 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
746 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
747 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
748 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
749 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
750
751 {0, 0, 0, "z", OQK_OPD_VARIANT},
752 {0, 0, 0, "m", OQK_OPD_VARIANT},
753
754 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
755 {16, 0, 0, "tag", OQK_OPD_VARIANT},
756
757 /* Qualifiers constraining the value range.
758 First 3 fields:
759 Lower bound, higher bound, unused. */
760
761 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
762 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
763 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
764 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
765 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
766 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
767 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
768
769 /* Qualifiers for miscellaneous purpose.
770 First 3 fields:
771 unused, unused and unused. */
772
773 {0, 0, 0, "lsl", 0},
774 {0, 0, 0, "msl", 0},
775
776 {0, 0, 0, "retrieving", 0},
777 };
778
779 static inline bool
780 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
781 {
782 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
783 }
784
785 static inline bool
786 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
787 {
788 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
789 }
790
791 const char*
792 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
793 {
794 return aarch64_opnd_qualifiers[qualifier].desc;
795 }
796
797 /* Given an operand qualifier, return the expected data element size
798 of a qualified operand. */
799 unsigned char
800 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
801 {
802 assert (operand_variant_qualifier_p (qualifier));
803 return aarch64_opnd_qualifiers[qualifier].data0;
804 }
805
806 unsigned char
807 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
808 {
809 assert (operand_variant_qualifier_p (qualifier));
810 return aarch64_opnd_qualifiers[qualifier].data1;
811 }
812
813 aarch64_insn
814 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
815 {
816 assert (operand_variant_qualifier_p (qualifier));
817 return aarch64_opnd_qualifiers[qualifier].data2;
818 }
819
820 static int
821 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
822 {
823 assert (qualifier_value_in_range_constraint_p (qualifier));
824 return aarch64_opnd_qualifiers[qualifier].data0;
825 }
826
827 static int
828 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
829 {
830 assert (qualifier_value_in_range_constraint_p (qualifier));
831 return aarch64_opnd_qualifiers[qualifier].data1;
832 }
833
834 #ifdef DEBUG_AARCH64
835 void
836 aarch64_verbose (const char *str, ...)
837 {
838 va_list ap;
839 va_start (ap, str);
840 printf ("#### ");
841 vprintf (str, ap);
842 printf ("\n");
843 va_end (ap);
844 }
845
846 static inline void
847 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
848 {
849 int i;
850 printf ("#### \t");
851 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
852 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
853 printf ("\n");
854 }
855
856 static void
857 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
858 const aarch64_opnd_qualifier_t *qualifier)
859 {
860 int i;
861 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
862
863 aarch64_verbose ("dump_match_qualifiers:");
864 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
865 curr[i] = opnd[i].qualifier;
866 dump_qualifier_sequence (curr);
867 aarch64_verbose ("against");
868 dump_qualifier_sequence (qualifier);
869 }
870 #endif /* DEBUG_AARCH64 */
871
872 /* This function checks if the given instruction INSN is a destructive
873 instruction based on the usage of the registers. It does not recognize
874 unary destructive instructions. */
875 bool
876 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
877 {
878 int i = 0;
879 const enum aarch64_opnd *opnds = opcode->operands;
880
881 if (opnds[0] == AARCH64_OPND_NIL)
882 return false;
883
884 while (opnds[++i] != AARCH64_OPND_NIL)
885 if (opnds[i] == opnds[0])
886 return true;
887
888 return false;
889 }
890
891 /* TODO improve this, we can have an extra field at the runtime to
892 store the number of operands rather than calculating it every time. */
893
894 int
895 aarch64_num_of_operands (const aarch64_opcode *opcode)
896 {
897 int i = 0;
898 const enum aarch64_opnd *opnds = opcode->operands;
899 while (opnds[i++] != AARCH64_OPND_NIL)
900 ;
901 --i;
902 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
903 return i;
904 }
905
906 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
907 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
908
909 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
910 This is always 0 if the function succeeds.
911
912 N.B. on the entry, it is very likely that only some operands in *INST
913 have had their qualifiers been established.
914
915 If STOP_AT is not -1, the function will only try to match
916 the qualifier sequence for operands before and including the operand
917 of index STOP_AT; and on success *RET will only be filled with the first
918 (STOP_AT+1) qualifiers.
919
920 A couple examples of the matching algorithm:
921
922 X,W,NIL should match
923 X,W,NIL
924
925 NIL,NIL should match
926 X ,NIL
927
928 Apart from serving the main encoding routine, this can also be called
929 during or after the operand decoding. */
930
931 int
932 aarch64_find_best_match (const aarch64_inst *inst,
933 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
934 int stop_at, aarch64_opnd_qualifier_t *ret,
935 int *invalid_count)
936 {
937 int i, num_opnds, invalid, min_invalid;
938 const aarch64_opnd_qualifier_t *qualifiers;
939
940 num_opnds = aarch64_num_of_operands (inst->opcode);
941 if (num_opnds == 0)
942 {
943 DEBUG_TRACE ("SUCCEED: no operand");
944 *invalid_count = 0;
945 return 1;
946 }
947
948 if (stop_at < 0 || stop_at >= num_opnds)
949 stop_at = num_opnds - 1;
950
951 /* For each pattern. */
952 min_invalid = num_opnds;
953 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
954 {
955 int j;
956 qualifiers = *qualifiers_list;
957
958 /* Start as positive. */
959 invalid = 0;
960
961 DEBUG_TRACE ("%d", i);
962 #ifdef DEBUG_AARCH64
963 if (debug_dump)
964 dump_match_qualifiers (inst->operands, qualifiers);
965 #endif
966
967 /* The first entry should be taken literally, even if it's an empty
968 qualifier sequence. (This matters for strict testing.) In other
969 positions an empty sequence acts as a terminator. */
970 if (i > 0 && empty_qualifier_sequence_p (qualifiers))
971 break;
972
973 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
974 {
975 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL
976 && !(inst->opcode->flags & F_STRICT))
977 {
978 /* Either the operand does not have qualifier, or the qualifier
979 for the operand needs to be deduced from the qualifier
980 sequence.
981 In the latter case, any constraint checking related with
982 the obtained qualifier should be done later in
983 operand_general_constraint_met_p. */
984 continue;
985 }
986 else if (*qualifiers != inst->operands[j].qualifier)
987 {
988 /* Unless the target qualifier can also qualify the operand
989 (which has already had a non-nil qualifier), non-equal
990 qualifiers are generally un-matched. */
991 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
992 continue;
993 else
994 invalid += 1;
995 }
996 else
997 continue; /* Equal qualifiers are certainly matched. */
998 }
999
1000 if (min_invalid > invalid)
1001 min_invalid = invalid;
1002
1003 /* Qualifiers established. */
1004 if (min_invalid == 0)
1005 break;
1006 }
1007
1008 *invalid_count = min_invalid;
1009 if (min_invalid == 0)
1010 {
1011 /* Fill the result in *RET. */
1012 int j;
1013 qualifiers = *qualifiers_list;
1014
1015 DEBUG_TRACE ("complete qualifiers using list %d", i);
1016 #ifdef DEBUG_AARCH64
1017 if (debug_dump)
1018 dump_qualifier_sequence (qualifiers);
1019 #endif
1020
1021 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1022 ret[j] = *qualifiers;
1023 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1024 ret[j] = AARCH64_OPND_QLF_NIL;
1025
1026 DEBUG_TRACE ("SUCCESS");
1027 return 1;
1028 }
1029
1030 DEBUG_TRACE ("FAIL");
1031 return 0;
1032 }
1033
1034 /* Operand qualifier matching and resolving.
1035
1036 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1037 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1038
1039 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
1040 This is always 0 if the function succeeds.
1041
1042 if UPDATE_P, update the qualifier(s) in *INST after the matching
1043 succeeds. */
1044
1045 static int
1046 match_operands_qualifier (aarch64_inst *inst, bool update_p,
1047 int *invalid_count)
1048 {
1049 int i;
1050 aarch64_opnd_qualifier_seq_t qualifiers;
1051
1052 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1053 qualifiers, invalid_count))
1054 {
1055 DEBUG_TRACE ("matching FAIL");
1056 return 0;
1057 }
1058
1059 /* Update the qualifiers. */
1060 if (update_p)
1061 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1062 {
1063 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1064 break;
1065 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1066 "update %s with %s for operand %d",
1067 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1068 aarch64_get_qualifier_name (qualifiers[i]), i);
1069 inst->operands[i].qualifier = qualifiers[i];
1070 }
1071
1072 DEBUG_TRACE ("matching SUCCESS");
1073 return 1;
1074 }
1075
1076 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1077 register by MOVZ.
1078
1079 IS32 indicates whether value is a 32-bit immediate or not.
1080 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1081 amount will be returned in *SHIFT_AMOUNT. */
1082
1083 bool
1084 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1085 {
1086 int amount;
1087
1088 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1089
1090 if (is32)
1091 {
1092 /* Allow all zeros or all ones in top 32-bits, so that
1093 32-bit constant expressions like ~0x80000000 are
1094 permitted. */
1095 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1096 /* Immediate out of range. */
1097 return false;
1098 value &= 0xffffffff;
1099 }
1100
1101 /* first, try movz then movn */
1102 amount = -1;
1103 if ((value & ((uint64_t) 0xffff << 0)) == value)
1104 amount = 0;
1105 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1106 amount = 16;
1107 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1108 amount = 32;
1109 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1110 amount = 48;
1111
1112 if (amount == -1)
1113 {
1114 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1115 return false;
1116 }
1117
1118 if (shift_amount != NULL)
1119 *shift_amount = amount;
1120
1121 DEBUG_TRACE ("exit true with amount %d", amount);
1122
1123 return true;
1124 }
1125
1126 /* Build the accepted values for immediate logical SIMD instructions.
1127
1128 The standard encodings of the immediate value are:
1129 N imms immr SIMD size R S
1130 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1131 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1132 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1133 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1134 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1135 0 11110s 00000r 2 UInt(r) UInt(s)
1136 where all-ones value of S is reserved.
1137
1138 Let's call E the SIMD size.
1139
1140 The immediate value is: S+1 bits '1' rotated to the right by R.
1141
1142 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1143 (remember S != E - 1). */
1144
1145 #define TOTAL_IMM_NB 5334
1146
1147 typedef struct
1148 {
1149 uint64_t imm;
1150 aarch64_insn encoding;
1151 } simd_imm_encoding;
1152
1153 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1154
1155 static int
1156 simd_imm_encoding_cmp(const void *i1, const void *i2)
1157 {
1158 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1159 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1160
1161 if (imm1->imm < imm2->imm)
1162 return -1;
1163 if (imm1->imm > imm2->imm)
1164 return +1;
1165 return 0;
1166 }
1167
1168 /* immediate bitfield standard encoding
1169 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1170 1 ssssss rrrrrr 64 rrrrrr ssssss
1171 0 0sssss 0rrrrr 32 rrrrr sssss
1172 0 10ssss 00rrrr 16 rrrr ssss
1173 0 110sss 000rrr 8 rrr sss
1174 0 1110ss 0000rr 4 rr ss
1175 0 11110s 00000r 2 r s */
1176 static inline int
1177 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1178 {
1179 return (is64 << 12) | (r << 6) | s;
1180 }
1181
1182 static void
1183 build_immediate_table (void)
1184 {
1185 uint32_t log_e, e, s, r, s_mask;
1186 uint64_t mask, imm;
1187 int nb_imms;
1188 int is64;
1189
1190 nb_imms = 0;
1191 for (log_e = 1; log_e <= 6; log_e++)
1192 {
1193 /* Get element size. */
1194 e = 1u << log_e;
1195 if (log_e == 6)
1196 {
1197 is64 = 1;
1198 mask = 0xffffffffffffffffull;
1199 s_mask = 0;
1200 }
1201 else
1202 {
1203 is64 = 0;
1204 mask = (1ull << e) - 1;
1205 /* log_e s_mask
1206 1 ((1 << 4) - 1) << 2 = 111100
1207 2 ((1 << 3) - 1) << 3 = 111000
1208 3 ((1 << 2) - 1) << 4 = 110000
1209 4 ((1 << 1) - 1) << 5 = 100000
1210 5 ((1 << 0) - 1) << 6 = 000000 */
1211 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1212 }
1213 for (s = 0; s < e - 1; s++)
1214 for (r = 0; r < e; r++)
1215 {
1216 /* s+1 consecutive bits to 1 (s < 63) */
1217 imm = (1ull << (s + 1)) - 1;
1218 /* rotate right by r */
1219 if (r != 0)
1220 imm = (imm >> r) | ((imm << (e - r)) & mask);
1221 /* replicate the constant depending on SIMD size */
1222 switch (log_e)
1223 {
1224 case 1: imm = (imm << 2) | imm;
1225 /* Fall through. */
1226 case 2: imm = (imm << 4) | imm;
1227 /* Fall through. */
1228 case 3: imm = (imm << 8) | imm;
1229 /* Fall through. */
1230 case 4: imm = (imm << 16) | imm;
1231 /* Fall through. */
1232 case 5: imm = (imm << 32) | imm;
1233 /* Fall through. */
1234 case 6: break;
1235 default: abort ();
1236 }
1237 simd_immediates[nb_imms].imm = imm;
1238 simd_immediates[nb_imms].encoding =
1239 encode_immediate_bitfield(is64, s | s_mask, r);
1240 nb_imms++;
1241 }
1242 }
1243 assert (nb_imms == TOTAL_IMM_NB);
1244 qsort(simd_immediates, nb_imms,
1245 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1246 }
1247
1248 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1249 be accepted by logical (immediate) instructions
1250 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1251
1252 ESIZE is the number of bytes in the decoded immediate value.
1253 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1254 VALUE will be returned in *ENCODING. */
1255
1256 bool
1257 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1258 {
1259 simd_imm_encoding imm_enc;
1260 const simd_imm_encoding *imm_encoding;
1261 static bool initialized = false;
1262 uint64_t upper;
1263 int i;
1264
1265 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1266 value, esize);
1267
1268 if (!initialized)
1269 {
1270 build_immediate_table ();
1271 initialized = true;
1272 }
1273
1274 /* Allow all zeros or all ones in top bits, so that
1275 constant expressions like ~1 are permitted. */
1276 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1277 if ((value & ~upper) != value && (value | upper) != value)
1278 return false;
1279
1280 /* Replicate to a full 64-bit value. */
1281 value &= ~upper;
1282 for (i = esize * 8; i < 64; i *= 2)
1283 value |= (value << i);
1284
1285 imm_enc.imm = value;
1286 imm_encoding = (const simd_imm_encoding *)
1287 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1288 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1289 if (imm_encoding == NULL)
1290 {
1291 DEBUG_TRACE ("exit with false");
1292 return false;
1293 }
1294 if (encoding != NULL)
1295 *encoding = imm_encoding->encoding;
1296 DEBUG_TRACE ("exit with true");
1297 return true;
1298 }
1299
1300 /* If 64-bit immediate IMM is in the format of
1301 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1302 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1303 of value "abcdefgh". Otherwise return -1. */
1304 int
1305 aarch64_shrink_expanded_imm8 (uint64_t imm)
1306 {
1307 int i, ret;
1308 uint32_t byte;
1309
1310 ret = 0;
1311 for (i = 0; i < 8; i++)
1312 {
1313 byte = (imm >> (8 * i)) & 0xff;
1314 if (byte == 0xff)
1315 ret |= 1 << i;
1316 else if (byte != 0x00)
1317 return -1;
1318 }
1319 return ret;
1320 }
1321
1322 /* Utility inline functions for operand_general_constraint_met_p. */
1323
1324 static inline void
1325 set_error (aarch64_operand_error *mismatch_detail,
1326 enum aarch64_operand_error_kind kind, int idx,
1327 const char* error)
1328 {
1329 if (mismatch_detail == NULL)
1330 return;
1331 mismatch_detail->kind = kind;
1332 mismatch_detail->index = idx;
1333 mismatch_detail->error = error;
1334 }
1335
1336 static inline void
1337 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1338 const char* error)
1339 {
1340 if (mismatch_detail == NULL)
1341 return;
1342 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1343 }
1344
1345 static inline void
1346 set_invalid_regno_error (aarch64_operand_error *mismatch_detail, int idx,
1347 const char *prefix, int lower_bound, int upper_bound)
1348 {
1349 if (mismatch_detail == NULL)
1350 return;
1351 set_error (mismatch_detail, AARCH64_OPDE_INVALID_REGNO, idx, NULL);
1352 mismatch_detail->data[0].s = prefix;
1353 mismatch_detail->data[1].i = lower_bound;
1354 mismatch_detail->data[2].i = upper_bound;
1355 }
1356
1357 static inline void
1358 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1359 int idx, int lower_bound, int upper_bound,
1360 const char* error)
1361 {
1362 if (mismatch_detail == NULL)
1363 return;
1364 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1365 mismatch_detail->data[0].i = lower_bound;
1366 mismatch_detail->data[1].i = upper_bound;
1367 }
1368
1369 static inline void
1370 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1371 int idx, int lower_bound, int upper_bound)
1372 {
1373 if (mismatch_detail == NULL)
1374 return;
1375 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1376 _("immediate value"));
1377 }
1378
1379 static inline void
1380 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1381 int idx, int lower_bound, int upper_bound)
1382 {
1383 if (mismatch_detail == NULL)
1384 return;
1385 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1386 _("immediate offset"));
1387 }
1388
1389 static inline void
1390 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1391 int idx, int lower_bound, int upper_bound)
1392 {
1393 if (mismatch_detail == NULL)
1394 return;
1395 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1396 _("register number"));
1397 }
1398
1399 static inline void
1400 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1401 int idx, int lower_bound, int upper_bound)
1402 {
1403 if (mismatch_detail == NULL)
1404 return;
1405 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1406 _("register element index"));
1407 }
1408
1409 static inline void
1410 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1411 int idx, int lower_bound, int upper_bound)
1412 {
1413 if (mismatch_detail == NULL)
1414 return;
1415 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1416 _("shift amount"));
1417 }
1418
1419 /* Report that the MUL modifier in operand IDX should be in the range
1420 [LOWER_BOUND, UPPER_BOUND]. */
1421 static inline void
1422 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1423 int idx, int lower_bound, int upper_bound)
1424 {
1425 if (mismatch_detail == NULL)
1426 return;
1427 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1428 _("multiplier"));
1429 }
1430
1431 static inline void
1432 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1433 int alignment)
1434 {
1435 if (mismatch_detail == NULL)
1436 return;
1437 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1438 mismatch_detail->data[0].i = alignment;
1439 }
1440
1441 static inline void
1442 set_reg_list_length_error (aarch64_operand_error *mismatch_detail, int idx,
1443 int expected_num)
1444 {
1445 if (mismatch_detail == NULL)
1446 return;
1447 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_LENGTH, idx, NULL);
1448 mismatch_detail->data[0].i = 1 << expected_num;
1449 }
1450
1451 static inline void
1452 set_reg_list_stride_error (aarch64_operand_error *mismatch_detail, int idx,
1453 int expected_num)
1454 {
1455 if (mismatch_detail == NULL)
1456 return;
1457 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_STRIDE, idx, NULL);
1458 mismatch_detail->data[0].i = 1 << expected_num;
1459 }
1460
1461 static inline void
1462 set_invalid_vg_size (aarch64_operand_error *mismatch_detail,
1463 int idx, int expected)
1464 {
1465 if (mismatch_detail == NULL)
1466 return;
1467 set_error (mismatch_detail, AARCH64_OPDE_INVALID_VG_SIZE, idx, NULL);
1468 mismatch_detail->data[0].i = expected;
1469 }
1470
1471 static inline void
1472 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1473 const char* error)
1474 {
1475 if (mismatch_detail == NULL)
1476 return;
1477 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1478 }
1479
1480 /* Check that indexed register operand OPND has a register in the range
1481 [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX].
1482 PREFIX is the register prefix, such as "z" for SVE vector registers. */
1483
1484 static bool
1485 check_reglane (const aarch64_opnd_info *opnd,
1486 aarch64_operand_error *mismatch_detail, int idx,
1487 const char *prefix, int min_regno, int max_regno,
1488 int min_index, int max_index)
1489 {
1490 if (!value_in_range_p (opnd->reglane.regno, min_regno, max_regno))
1491 {
1492 set_invalid_regno_error (mismatch_detail, idx, prefix, min_regno,
1493 max_regno);
1494 return false;
1495 }
1496 if (!value_in_range_p (opnd->reglane.index, min_index, max_index))
1497 {
1498 set_elem_idx_out_of_range_error (mismatch_detail, idx, min_index,
1499 max_index);
1500 return false;
1501 }
1502 return true;
1503 }
1504
1505 /* Check that register list operand OPND has NUM_REGS registers and a
1506 register stride of STRIDE. */
1507
1508 static bool
1509 check_reglist (const aarch64_opnd_info *opnd,
1510 aarch64_operand_error *mismatch_detail, int idx,
1511 int num_regs, int stride)
1512 {
1513 if (opnd->reglist.num_regs != num_regs)
1514 {
1515 set_reg_list_length_error (mismatch_detail, idx, num_regs);
1516 return false;
1517 }
1518 if (opnd->reglist.stride != stride)
1519 {
1520 set_reg_list_stride_error (mismatch_detail, idx, stride);
1521 return false;
1522 }
1523 return true;
1524 }
1525
1526 /* Check that indexed ZA operand OPND has:
1527
1528 - a selection register in the range [MIN_WREG, MIN_WREG + 3]
1529
1530 - an immediate offset in the range [0, MAX_VALUE].
1531
1532 - a vector group size of GROUP_SIZE. */
1533
1534 static bool
1535 check_za_access (const aarch64_opnd_info *opnd,
1536 aarch64_operand_error *mismatch_detail, int idx,
1537 int min_wreg, int max_value, int group_size)
1538 {
1539 if (!value_in_range_p (opnd->indexed_za.index.regno, min_wreg, min_wreg + 3))
1540 {
1541 if (min_wreg == 12)
1542 set_other_error (mismatch_detail, idx,
1543 _("expected a selection register in the"
1544 " range w12-w15"));
1545 else
1546 abort ();
1547 return false;
1548 }
1549
1550 if (!value_in_range_p (opnd->indexed_za.index.imm, 0, max_value))
1551 {
1552 set_offset_out_of_range_error (mismatch_detail, idx, 0, max_value);
1553 return false;
1554 }
1555
1556 /* The vector group specifier is optional in assembly code. */
1557 if (opnd->indexed_za.group_size != 0
1558 && opnd->indexed_za.group_size != group_size)
1559 {
1560 set_invalid_vg_size (mismatch_detail, idx, group_size);
1561 return false;
1562 }
1563
1564 return true;
1565 }
1566
1567 /* General constraint checking based on operand code.
1568
1569 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1570 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1571
1572 This function has to be called after the qualifiers for all operands
1573 have been resolved.
1574
1575 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1576 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1577 of error message during the disassembling where error message is not
1578 wanted. We avoid the dynamic construction of strings of error messages
1579 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1580 use a combination of error code, static string and some integer data to
1581 represent an error. */
1582
1583 static int
1584 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1585 enum aarch64_opnd type,
1586 const aarch64_opcode *opcode,
1587 aarch64_operand_error *mismatch_detail)
1588 {
1589 unsigned num, modifiers, shift;
1590 unsigned char size;
1591 int64_t imm, min_value, max_value;
1592 uint64_t uvalue, mask;
1593 const aarch64_opnd_info *opnd = opnds + idx;
1594 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1595 int i;
1596
1597 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1598
1599 switch (aarch64_operands[type].op_class)
1600 {
1601 case AARCH64_OPND_CLASS_INT_REG:
1602 /* Check pair reg constraints for cas* instructions. */
1603 if (type == AARCH64_OPND_PAIRREG)
1604 {
1605 assert (idx == 1 || idx == 3);
1606 if (opnds[idx - 1].reg.regno % 2 != 0)
1607 {
1608 set_syntax_error (mismatch_detail, idx - 1,
1609 _("reg pair must start from even reg"));
1610 return 0;
1611 }
1612 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1613 {
1614 set_syntax_error (mismatch_detail, idx,
1615 _("reg pair must be contiguous"));
1616 return 0;
1617 }
1618 break;
1619 }
1620
1621 /* <Xt> may be optional in some IC and TLBI instructions. */
1622 if (type == AARCH64_OPND_Rt_SYS)
1623 {
1624 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1625 == AARCH64_OPND_CLASS_SYSTEM));
1626 if (opnds[1].present
1627 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1628 {
1629 set_other_error (mismatch_detail, idx, _("extraneous register"));
1630 return 0;
1631 }
1632 if (!opnds[1].present
1633 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1634 {
1635 set_other_error (mismatch_detail, idx, _("missing register"));
1636 return 0;
1637 }
1638 }
1639 switch (qualifier)
1640 {
1641 case AARCH64_OPND_QLF_WSP:
1642 case AARCH64_OPND_QLF_SP:
1643 if (!aarch64_stack_pointer_p (opnd))
1644 {
1645 set_other_error (mismatch_detail, idx,
1646 _("stack pointer register expected"));
1647 return 0;
1648 }
1649 break;
1650 default:
1651 break;
1652 }
1653 break;
1654
1655 case AARCH64_OPND_CLASS_SVE_REG:
1656 switch (type)
1657 {
1658 case AARCH64_OPND_SVE_Zm3_INDEX:
1659 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1660 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1661 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1662 case AARCH64_OPND_SVE_Zm4_INDEX:
1663 size = get_operand_fields_width (get_operand_from_code (type));
1664 shift = get_operand_specific_data (&aarch64_operands[type]);
1665 if (!check_reglane (opnd, mismatch_detail, idx,
1666 "z", 0, (1 << shift) - 1,
1667 0, (1u << (size - shift)) - 1))
1668 return 0;
1669 break;
1670
1671 case AARCH64_OPND_SVE_Zn_INDEX:
1672 size = aarch64_get_qualifier_esize (opnd->qualifier);
1673 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1674 0, 64 / size - 1))
1675 return 0;
1676 break;
1677
1678 case AARCH64_OPND_SME_PnT_Wm_imm:
1679 size = aarch64_get_qualifier_esize (opnd->qualifier);
1680 max_value = 16 / size - 1;
1681 if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, 0))
1682 return 0;
1683 break;
1684
1685 default:
1686 break;
1687 }
1688 break;
1689
1690 case AARCH64_OPND_CLASS_SVE_REGLIST:
1691 num = get_opcode_dependent_value (opcode);
1692 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
1693 return 0;
1694 break;
1695
1696 case AARCH64_OPND_CLASS_ZA_ACCESS:
1697 switch (type)
1698 {
1699 case AARCH64_OPND_SME_ZA_HV_idx_src:
1700 case AARCH64_OPND_SME_ZA_HV_idx_dest:
1701 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
1702 size = aarch64_get_qualifier_esize (opnd->qualifier);
1703 max_value = 16 / size - 1;
1704 if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value,
1705 get_opcode_dependent_value (opcode)))
1706 return 0;
1707 break;
1708
1709 case AARCH64_OPND_SME_ZA_array_off4:
1710 if (!check_za_access (opnd, mismatch_detail, idx, 12, 15,
1711 get_opcode_dependent_value (opcode)))
1712 return 0;
1713 break;
1714
1715 default:
1716 abort ();
1717 }
1718 break;
1719
1720 case AARCH64_OPND_CLASS_PRED_REG:
1721 if (opnd->reg.regno >= 8
1722 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1723 {
1724 set_invalid_regno_error (mismatch_detail, idx, "p", 0, 7);
1725 return 0;
1726 }
1727 break;
1728
1729 case AARCH64_OPND_CLASS_COND:
1730 if (type == AARCH64_OPND_COND1
1731 && (opnds[idx].cond->value & 0xe) == 0xe)
1732 {
1733 /* Not allow AL or NV. */
1734 set_syntax_error (mismatch_detail, idx, NULL);
1735 }
1736 break;
1737
1738 case AARCH64_OPND_CLASS_ADDRESS:
1739 /* Check writeback. */
1740 switch (opcode->iclass)
1741 {
1742 case ldst_pos:
1743 case ldst_unscaled:
1744 case ldstnapair_offs:
1745 case ldstpair_off:
1746 case ldst_unpriv:
1747 if (opnd->addr.writeback == 1)
1748 {
1749 set_syntax_error (mismatch_detail, idx,
1750 _("unexpected address writeback"));
1751 return 0;
1752 }
1753 break;
1754 case ldst_imm10:
1755 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1756 {
1757 set_syntax_error (mismatch_detail, idx,
1758 _("unexpected address writeback"));
1759 return 0;
1760 }
1761 break;
1762 case ldst_imm9:
1763 case ldstpair_indexed:
1764 case asisdlsep:
1765 case asisdlsop:
1766 if (opnd->addr.writeback == 0)
1767 {
1768 set_syntax_error (mismatch_detail, idx,
1769 _("address writeback expected"));
1770 return 0;
1771 }
1772 break;
1773 default:
1774 assert (opnd->addr.writeback == 0);
1775 break;
1776 }
1777 switch (type)
1778 {
1779 case AARCH64_OPND_ADDR_SIMM7:
1780 /* Scaled signed 7 bits immediate offset. */
1781 /* Get the size of the data element that is accessed, which may be
1782 different from that of the source register size,
1783 e.g. in strb/ldrb. */
1784 size = aarch64_get_qualifier_esize (opnd->qualifier);
1785 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1786 {
1787 set_offset_out_of_range_error (mismatch_detail, idx,
1788 -64 * size, 63 * size);
1789 return 0;
1790 }
1791 if (!value_aligned_p (opnd->addr.offset.imm, size))
1792 {
1793 set_unaligned_error (mismatch_detail, idx, size);
1794 return 0;
1795 }
1796 break;
1797 case AARCH64_OPND_ADDR_OFFSET:
1798 case AARCH64_OPND_ADDR_SIMM9:
1799 /* Unscaled signed 9 bits immediate offset. */
1800 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1801 {
1802 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1803 return 0;
1804 }
1805 break;
1806
1807 case AARCH64_OPND_ADDR_SIMM9_2:
1808 /* Unscaled signed 9 bits immediate offset, which has to be negative
1809 or unaligned. */
1810 size = aarch64_get_qualifier_esize (qualifier);
1811 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1812 && !value_aligned_p (opnd->addr.offset.imm, size))
1813 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1814 return 1;
1815 set_other_error (mismatch_detail, idx,
1816 _("negative or unaligned offset expected"));
1817 return 0;
1818
1819 case AARCH64_OPND_ADDR_SIMM10:
1820 /* Scaled signed 10 bits immediate offset. */
1821 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1822 {
1823 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1824 return 0;
1825 }
1826 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1827 {
1828 set_unaligned_error (mismatch_detail, idx, 8);
1829 return 0;
1830 }
1831 break;
1832
1833 case AARCH64_OPND_ADDR_SIMM11:
1834 /* Signed 11 bits immediate offset (multiple of 16). */
1835 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1836 {
1837 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1838 return 0;
1839 }
1840
1841 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1842 {
1843 set_unaligned_error (mismatch_detail, idx, 16);
1844 return 0;
1845 }
1846 break;
1847
1848 case AARCH64_OPND_ADDR_SIMM13:
1849 /* Signed 13 bits immediate offset (multiple of 16). */
1850 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1851 {
1852 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1853 return 0;
1854 }
1855
1856 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1857 {
1858 set_unaligned_error (mismatch_detail, idx, 16);
1859 return 0;
1860 }
1861 break;
1862
1863 case AARCH64_OPND_SIMD_ADDR_POST:
1864 /* AdvSIMD load/store multiple structures, post-index. */
1865 assert (idx == 1);
1866 if (opnd->addr.offset.is_reg)
1867 {
1868 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1869 return 1;
1870 else
1871 {
1872 set_other_error (mismatch_detail, idx,
1873 _("invalid register offset"));
1874 return 0;
1875 }
1876 }
1877 else
1878 {
1879 const aarch64_opnd_info *prev = &opnds[idx-1];
1880 unsigned num_bytes; /* total number of bytes transferred. */
1881 /* The opcode dependent area stores the number of elements in
1882 each structure to be loaded/stored. */
1883 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1884 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1885 /* Special handling of loading single structure to all lane. */
1886 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1887 * aarch64_get_qualifier_esize (prev->qualifier);
1888 else
1889 num_bytes = prev->reglist.num_regs
1890 * aarch64_get_qualifier_esize (prev->qualifier)
1891 * aarch64_get_qualifier_nelem (prev->qualifier);
1892 if ((int) num_bytes != opnd->addr.offset.imm)
1893 {
1894 set_other_error (mismatch_detail, idx,
1895 _("invalid post-increment amount"));
1896 return 0;
1897 }
1898 }
1899 break;
1900
1901 case AARCH64_OPND_ADDR_REGOFF:
1902 /* Get the size of the data element that is accessed, which may be
1903 different from that of the source register size,
1904 e.g. in strb/ldrb. */
1905 size = aarch64_get_qualifier_esize (opnd->qualifier);
1906 /* It is either no shift or shift by the binary logarithm of SIZE. */
1907 if (opnd->shifter.amount != 0
1908 && opnd->shifter.amount != (int)get_logsz (size))
1909 {
1910 set_other_error (mismatch_detail, idx,
1911 _("invalid shift amount"));
1912 return 0;
1913 }
1914 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1915 operators. */
1916 switch (opnd->shifter.kind)
1917 {
1918 case AARCH64_MOD_UXTW:
1919 case AARCH64_MOD_LSL:
1920 case AARCH64_MOD_SXTW:
1921 case AARCH64_MOD_SXTX: break;
1922 default:
1923 set_other_error (mismatch_detail, idx,
1924 _("invalid extend/shift operator"));
1925 return 0;
1926 }
1927 break;
1928
1929 case AARCH64_OPND_ADDR_UIMM12:
1930 imm = opnd->addr.offset.imm;
1931 /* Get the size of the data element that is accessed, which may be
1932 different from that of the source register size,
1933 e.g. in strb/ldrb. */
1934 size = aarch64_get_qualifier_esize (qualifier);
1935 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1936 {
1937 set_offset_out_of_range_error (mismatch_detail, idx,
1938 0, 4095 * size);
1939 return 0;
1940 }
1941 if (!value_aligned_p (opnd->addr.offset.imm, size))
1942 {
1943 set_unaligned_error (mismatch_detail, idx, size);
1944 return 0;
1945 }
1946 break;
1947
1948 case AARCH64_OPND_ADDR_PCREL14:
1949 case AARCH64_OPND_ADDR_PCREL19:
1950 case AARCH64_OPND_ADDR_PCREL21:
1951 case AARCH64_OPND_ADDR_PCREL26:
1952 imm = opnd->imm.value;
1953 if (operand_need_shift_by_two (get_operand_from_code (type)))
1954 {
1955 /* The offset value in a PC-relative branch instruction is alway
1956 4-byte aligned and is encoded without the lowest 2 bits. */
1957 if (!value_aligned_p (imm, 4))
1958 {
1959 set_unaligned_error (mismatch_detail, idx, 4);
1960 return 0;
1961 }
1962 /* Right shift by 2 so that we can carry out the following check
1963 canonically. */
1964 imm >>= 2;
1965 }
1966 size = get_operand_fields_width (get_operand_from_code (type));
1967 if (!value_fit_signed_field_p (imm, size))
1968 {
1969 set_other_error (mismatch_detail, idx,
1970 _("immediate out of range"));
1971 return 0;
1972 }
1973 break;
1974
1975 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
1976 if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
1977 {
1978 set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
1979 return 0;
1980 }
1981 break;
1982
1983 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1984 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1985 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1986 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1987 min_value = -8;
1988 max_value = 7;
1989 sve_imm_offset_vl:
1990 assert (!opnd->addr.offset.is_reg);
1991 assert (opnd->addr.preind);
1992 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1993 min_value *= num;
1994 max_value *= num;
1995 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1996 || (opnd->shifter.operator_present
1997 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1998 {
1999 set_other_error (mismatch_detail, idx,
2000 _("invalid addressing mode"));
2001 return 0;
2002 }
2003 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2004 {
2005 set_offset_out_of_range_error (mismatch_detail, idx,
2006 min_value, max_value);
2007 return 0;
2008 }
2009 if (!value_aligned_p (opnd->addr.offset.imm, num))
2010 {
2011 set_unaligned_error (mismatch_detail, idx, num);
2012 return 0;
2013 }
2014 break;
2015
2016 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
2017 min_value = -32;
2018 max_value = 31;
2019 goto sve_imm_offset_vl;
2020
2021 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
2022 min_value = -256;
2023 max_value = 255;
2024 goto sve_imm_offset_vl;
2025
2026 case AARCH64_OPND_SVE_ADDR_RI_U6:
2027 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
2028 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
2029 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
2030 min_value = 0;
2031 max_value = 63;
2032 sve_imm_offset:
2033 assert (!opnd->addr.offset.is_reg);
2034 assert (opnd->addr.preind);
2035 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
2036 min_value *= num;
2037 max_value *= num;
2038 if (opnd->shifter.operator_present
2039 || opnd->shifter.amount_present)
2040 {
2041 set_other_error (mismatch_detail, idx,
2042 _("invalid addressing mode"));
2043 return 0;
2044 }
2045 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2046 {
2047 set_offset_out_of_range_error (mismatch_detail, idx,
2048 min_value, max_value);
2049 return 0;
2050 }
2051 if (!value_aligned_p (opnd->addr.offset.imm, num))
2052 {
2053 set_unaligned_error (mismatch_detail, idx, num);
2054 return 0;
2055 }
2056 break;
2057
2058 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
2059 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
2060 min_value = -8;
2061 max_value = 7;
2062 goto sve_imm_offset;
2063
2064 case AARCH64_OPND_SVE_ADDR_ZX:
2065 /* Everything is already ensured by parse_operands or
2066 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
2067 argument type). */
2068 assert (opnd->addr.offset.is_reg);
2069 assert (opnd->addr.preind);
2070 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
2071 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2072 assert (opnd->shifter.operator_present == 0);
2073 break;
2074
2075 case AARCH64_OPND_SVE_ADDR_R:
2076 case AARCH64_OPND_SVE_ADDR_RR:
2077 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
2078 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
2079 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
2080 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
2081 case AARCH64_OPND_SVE_ADDR_RX:
2082 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
2083 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
2084 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
2085 case AARCH64_OPND_SVE_ADDR_RZ:
2086 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
2087 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
2088 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
2089 modifiers = 1 << AARCH64_MOD_LSL;
2090 sve_rr_operand:
2091 assert (opnd->addr.offset.is_reg);
2092 assert (opnd->addr.preind);
2093 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
2094 && opnd->addr.offset.regno == 31)
2095 {
2096 set_other_error (mismatch_detail, idx,
2097 _("index register xzr is not allowed"));
2098 return 0;
2099 }
2100 if (((1 << opnd->shifter.kind) & modifiers) == 0
2101 || (opnd->shifter.amount
2102 != get_operand_specific_data (&aarch64_operands[type])))
2103 {
2104 set_other_error (mismatch_detail, idx,
2105 _("invalid addressing mode"));
2106 return 0;
2107 }
2108 break;
2109
2110 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
2111 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
2112 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
2113 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
2114 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
2115 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
2116 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
2117 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
2118 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
2119 goto sve_rr_operand;
2120
2121 case AARCH64_OPND_SVE_ADDR_ZI_U5:
2122 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
2123 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
2124 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
2125 min_value = 0;
2126 max_value = 31;
2127 goto sve_imm_offset;
2128
2129 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
2130 modifiers = 1 << AARCH64_MOD_LSL;
2131 sve_zz_operand:
2132 assert (opnd->addr.offset.is_reg);
2133 assert (opnd->addr.preind);
2134 if (((1 << opnd->shifter.kind) & modifiers) == 0
2135 || opnd->shifter.amount < 0
2136 || opnd->shifter.amount > 3)
2137 {
2138 set_other_error (mismatch_detail, idx,
2139 _("invalid addressing mode"));
2140 return 0;
2141 }
2142 break;
2143
2144 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2145 modifiers = (1 << AARCH64_MOD_SXTW);
2146 goto sve_zz_operand;
2147
2148 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2149 modifiers = 1 << AARCH64_MOD_UXTW;
2150 goto sve_zz_operand;
2151
2152 default:
2153 break;
2154 }
2155 break;
2156
2157 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2158 if (type == AARCH64_OPND_LEt)
2159 {
2160 /* Get the upper bound for the element index. */
2161 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2162 if (!value_in_range_p (opnd->reglist.index, 0, num))
2163 {
2164 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2165 return 0;
2166 }
2167 }
2168 /* The opcode dependent area stores the number of elements in
2169 each structure to be loaded/stored. */
2170 num = get_opcode_dependent_value (opcode);
2171 switch (type)
2172 {
2173 case AARCH64_OPND_LVt:
2174 assert (num >= 1 && num <= 4);
2175 /* Unless LD1/ST1, the number of registers should be equal to that
2176 of the structure elements. */
2177 if (num != 1 && !check_reglist (opnd, mismatch_detail, idx, num, 1))
2178 return 0;
2179 break;
2180 case AARCH64_OPND_LVt_AL:
2181 case AARCH64_OPND_LEt:
2182 assert (num >= 1 && num <= 4);
2183 /* The number of registers should be equal to that of the structure
2184 elements. */
2185 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
2186 return 0;
2187 break;
2188 default:
2189 break;
2190 }
2191 if (opnd->reglist.stride != 1)
2192 {
2193 set_reg_list_stride_error (mismatch_detail, idx, 1);
2194 return 0;
2195 }
2196 break;
2197
2198 case AARCH64_OPND_CLASS_IMMEDIATE:
2199 /* Constraint check on immediate operand. */
2200 imm = opnd->imm.value;
2201 /* E.g. imm_0_31 constrains value to be 0..31. */
2202 if (qualifier_value_in_range_constraint_p (qualifier)
2203 && !value_in_range_p (imm, get_lower_bound (qualifier),
2204 get_upper_bound (qualifier)))
2205 {
2206 set_imm_out_of_range_error (mismatch_detail, idx,
2207 get_lower_bound (qualifier),
2208 get_upper_bound (qualifier));
2209 return 0;
2210 }
2211
2212 switch (type)
2213 {
2214 case AARCH64_OPND_AIMM:
2215 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2216 {
2217 set_other_error (mismatch_detail, idx,
2218 _("invalid shift operator"));
2219 return 0;
2220 }
2221 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2222 {
2223 set_other_error (mismatch_detail, idx,
2224 _("shift amount must be 0 or 12"));
2225 return 0;
2226 }
2227 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2228 {
2229 set_other_error (mismatch_detail, idx,
2230 _("immediate out of range"));
2231 return 0;
2232 }
2233 break;
2234
2235 case AARCH64_OPND_HALF:
2236 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2237 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2238 {
2239 set_other_error (mismatch_detail, idx,
2240 _("invalid shift operator"));
2241 return 0;
2242 }
2243 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2244 if (!value_aligned_p (opnd->shifter.amount, 16))
2245 {
2246 set_other_error (mismatch_detail, idx,
2247 _("shift amount must be a multiple of 16"));
2248 return 0;
2249 }
2250 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2251 {
2252 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2253 0, size * 8 - 16);
2254 return 0;
2255 }
2256 if (opnd->imm.value < 0)
2257 {
2258 set_other_error (mismatch_detail, idx,
2259 _("negative immediate value not allowed"));
2260 return 0;
2261 }
2262 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2263 {
2264 set_other_error (mismatch_detail, idx,
2265 _("immediate out of range"));
2266 return 0;
2267 }
2268 break;
2269
2270 case AARCH64_OPND_IMM_MOV:
2271 {
2272 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2273 imm = opnd->imm.value;
2274 assert (idx == 1);
2275 switch (opcode->op)
2276 {
2277 case OP_MOV_IMM_WIDEN:
2278 imm = ~imm;
2279 /* Fall through. */
2280 case OP_MOV_IMM_WIDE:
2281 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2282 {
2283 set_other_error (mismatch_detail, idx,
2284 _("immediate out of range"));
2285 return 0;
2286 }
2287 break;
2288 case OP_MOV_IMM_LOG:
2289 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2290 {
2291 set_other_error (mismatch_detail, idx,
2292 _("immediate out of range"));
2293 return 0;
2294 }
2295 break;
2296 default:
2297 assert (0);
2298 return 0;
2299 }
2300 }
2301 break;
2302
2303 case AARCH64_OPND_NZCV:
2304 case AARCH64_OPND_CCMP_IMM:
2305 case AARCH64_OPND_EXCEPTION:
2306 case AARCH64_OPND_UNDEFINED:
2307 case AARCH64_OPND_TME_UIMM16:
2308 case AARCH64_OPND_UIMM4:
2309 case AARCH64_OPND_UIMM4_ADDG:
2310 case AARCH64_OPND_UIMM7:
2311 case AARCH64_OPND_UIMM3_OP1:
2312 case AARCH64_OPND_UIMM3_OP2:
2313 case AARCH64_OPND_SVE_UIMM3:
2314 case AARCH64_OPND_SVE_UIMM7:
2315 case AARCH64_OPND_SVE_UIMM8:
2316 case AARCH64_OPND_SVE_UIMM8_53:
2317 case AARCH64_OPND_CSSC_UIMM8:
2318 size = get_operand_fields_width (get_operand_from_code (type));
2319 assert (size < 32);
2320 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2321 {
2322 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2323 (1u << size) - 1);
2324 return 0;
2325 }
2326 break;
2327
2328 case AARCH64_OPND_UIMM10:
2329 /* Scaled unsigned 10 bits immediate offset. */
2330 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2331 {
2332 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2333 return 0;
2334 }
2335
2336 if (!value_aligned_p (opnd->imm.value, 16))
2337 {
2338 set_unaligned_error (mismatch_detail, idx, 16);
2339 return 0;
2340 }
2341 break;
2342
2343 case AARCH64_OPND_SIMM5:
2344 case AARCH64_OPND_SVE_SIMM5:
2345 case AARCH64_OPND_SVE_SIMM5B:
2346 case AARCH64_OPND_SVE_SIMM6:
2347 case AARCH64_OPND_SVE_SIMM8:
2348 case AARCH64_OPND_CSSC_SIMM8:
2349 size = get_operand_fields_width (get_operand_from_code (type));
2350 assert (size < 32);
2351 if (!value_fit_signed_field_p (opnd->imm.value, size))
2352 {
2353 set_imm_out_of_range_error (mismatch_detail, idx,
2354 -(1 << (size - 1)),
2355 (1 << (size - 1)) - 1);
2356 return 0;
2357 }
2358 break;
2359
2360 case AARCH64_OPND_WIDTH:
2361 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2362 && opnds[0].type == AARCH64_OPND_Rd);
2363 size = get_upper_bound (qualifier);
2364 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2365 /* lsb+width <= reg.size */
2366 {
2367 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2368 size - opnds[idx-1].imm.value);
2369 return 0;
2370 }
2371 break;
2372
2373 case AARCH64_OPND_LIMM:
2374 case AARCH64_OPND_SVE_LIMM:
2375 {
2376 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2377 uint64_t uimm = opnd->imm.value;
2378 if (opcode->op == OP_BIC)
2379 uimm = ~uimm;
2380 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2381 {
2382 set_other_error (mismatch_detail, idx,
2383 _("immediate out of range"));
2384 return 0;
2385 }
2386 }
2387 break;
2388
2389 case AARCH64_OPND_IMM0:
2390 case AARCH64_OPND_FPIMM0:
2391 if (opnd->imm.value != 0)
2392 {
2393 set_other_error (mismatch_detail, idx,
2394 _("immediate zero expected"));
2395 return 0;
2396 }
2397 break;
2398
2399 case AARCH64_OPND_IMM_ROT1:
2400 case AARCH64_OPND_IMM_ROT2:
2401 case AARCH64_OPND_SVE_IMM_ROT2:
2402 if (opnd->imm.value != 0
2403 && opnd->imm.value != 90
2404 && opnd->imm.value != 180
2405 && opnd->imm.value != 270)
2406 {
2407 set_other_error (mismatch_detail, idx,
2408 _("rotate expected to be 0, 90, 180 or 270"));
2409 return 0;
2410 }
2411 break;
2412
2413 case AARCH64_OPND_IMM_ROT3:
2414 case AARCH64_OPND_SVE_IMM_ROT1:
2415 case AARCH64_OPND_SVE_IMM_ROT3:
2416 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2417 {
2418 set_other_error (mismatch_detail, idx,
2419 _("rotate expected to be 90 or 270"));
2420 return 0;
2421 }
2422 break;
2423
2424 case AARCH64_OPND_SHLL_IMM:
2425 assert (idx == 2);
2426 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2427 if (opnd->imm.value != size)
2428 {
2429 set_other_error (mismatch_detail, idx,
2430 _("invalid shift amount"));
2431 return 0;
2432 }
2433 break;
2434
2435 case AARCH64_OPND_IMM_VLSL:
2436 size = aarch64_get_qualifier_esize (qualifier);
2437 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2438 {
2439 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2440 size * 8 - 1);
2441 return 0;
2442 }
2443 break;
2444
2445 case AARCH64_OPND_IMM_VLSR:
2446 size = aarch64_get_qualifier_esize (qualifier);
2447 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2448 {
2449 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2450 return 0;
2451 }
2452 break;
2453
2454 case AARCH64_OPND_SIMD_IMM:
2455 case AARCH64_OPND_SIMD_IMM_SFT:
2456 /* Qualifier check. */
2457 switch (qualifier)
2458 {
2459 case AARCH64_OPND_QLF_LSL:
2460 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2461 {
2462 set_other_error (mismatch_detail, idx,
2463 _("invalid shift operator"));
2464 return 0;
2465 }
2466 break;
2467 case AARCH64_OPND_QLF_MSL:
2468 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2469 {
2470 set_other_error (mismatch_detail, idx,
2471 _("invalid shift operator"));
2472 return 0;
2473 }
2474 break;
2475 case AARCH64_OPND_QLF_NIL:
2476 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2477 {
2478 set_other_error (mismatch_detail, idx,
2479 _("shift is not permitted"));
2480 return 0;
2481 }
2482 break;
2483 default:
2484 assert (0);
2485 return 0;
2486 }
2487 /* Is the immediate valid? */
2488 assert (idx == 1);
2489 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2490 {
2491 /* uimm8 or simm8 */
2492 if (!value_in_range_p (opnd->imm.value, -128, 255))
2493 {
2494 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2495 return 0;
2496 }
2497 }
2498 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2499 {
2500 /* uimm64 is not
2501 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2502 ffffffffgggggggghhhhhhhh'. */
2503 set_other_error (mismatch_detail, idx,
2504 _("invalid value for immediate"));
2505 return 0;
2506 }
2507 /* Is the shift amount valid? */
2508 switch (opnd->shifter.kind)
2509 {
2510 case AARCH64_MOD_LSL:
2511 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2512 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2513 {
2514 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2515 (size - 1) * 8);
2516 return 0;
2517 }
2518 if (!value_aligned_p (opnd->shifter.amount, 8))
2519 {
2520 set_unaligned_error (mismatch_detail, idx, 8);
2521 return 0;
2522 }
2523 break;
2524 case AARCH64_MOD_MSL:
2525 /* Only 8 and 16 are valid shift amount. */
2526 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2527 {
2528 set_other_error (mismatch_detail, idx,
2529 _("shift amount must be 0 or 16"));
2530 return 0;
2531 }
2532 break;
2533 default:
2534 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2535 {
2536 set_other_error (mismatch_detail, idx,
2537 _("invalid shift operator"));
2538 return 0;
2539 }
2540 break;
2541 }
2542 break;
2543
2544 case AARCH64_OPND_FPIMM:
2545 case AARCH64_OPND_SIMD_FPIMM:
2546 case AARCH64_OPND_SVE_FPIMM8:
2547 if (opnd->imm.is_fp == 0)
2548 {
2549 set_other_error (mismatch_detail, idx,
2550 _("floating-point immediate expected"));
2551 return 0;
2552 }
2553 /* The value is expected to be an 8-bit floating-point constant with
2554 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2555 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2556 instruction). */
2557 if (!value_in_range_p (opnd->imm.value, 0, 255))
2558 {
2559 set_other_error (mismatch_detail, idx,
2560 _("immediate out of range"));
2561 return 0;
2562 }
2563 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2564 {
2565 set_other_error (mismatch_detail, idx,
2566 _("invalid shift operator"));
2567 return 0;
2568 }
2569 break;
2570
2571 case AARCH64_OPND_SVE_AIMM:
2572 min_value = 0;
2573 sve_aimm:
2574 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2575 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2576 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2577 uvalue = opnd->imm.value;
2578 shift = opnd->shifter.amount;
2579 if (size == 1)
2580 {
2581 if (shift != 0)
2582 {
2583 set_other_error (mismatch_detail, idx,
2584 _("no shift amount allowed for"
2585 " 8-bit constants"));
2586 return 0;
2587 }
2588 }
2589 else
2590 {
2591 if (shift != 0 && shift != 8)
2592 {
2593 set_other_error (mismatch_detail, idx,
2594 _("shift amount must be 0 or 8"));
2595 return 0;
2596 }
2597 if (shift == 0 && (uvalue & 0xff) == 0)
2598 {
2599 shift = 8;
2600 uvalue = (int64_t) uvalue / 256;
2601 }
2602 }
2603 mask >>= shift;
2604 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2605 {
2606 set_other_error (mismatch_detail, idx,
2607 _("immediate too big for element size"));
2608 return 0;
2609 }
2610 uvalue = (uvalue - min_value) & mask;
2611 if (uvalue > 0xff)
2612 {
2613 set_other_error (mismatch_detail, idx,
2614 _("invalid arithmetic immediate"));
2615 return 0;
2616 }
2617 break;
2618
2619 case AARCH64_OPND_SVE_ASIMM:
2620 min_value = -128;
2621 goto sve_aimm;
2622
2623 case AARCH64_OPND_SVE_I1_HALF_ONE:
2624 assert (opnd->imm.is_fp);
2625 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2626 {
2627 set_other_error (mismatch_detail, idx,
2628 _("floating-point value must be 0.5 or 1.0"));
2629 return 0;
2630 }
2631 break;
2632
2633 case AARCH64_OPND_SVE_I1_HALF_TWO:
2634 assert (opnd->imm.is_fp);
2635 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2636 {
2637 set_other_error (mismatch_detail, idx,
2638 _("floating-point value must be 0.5 or 2.0"));
2639 return 0;
2640 }
2641 break;
2642
2643 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2644 assert (opnd->imm.is_fp);
2645 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2646 {
2647 set_other_error (mismatch_detail, idx,
2648 _("floating-point value must be 0.0 or 1.0"));
2649 return 0;
2650 }
2651 break;
2652
2653 case AARCH64_OPND_SVE_INV_LIMM:
2654 {
2655 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2656 uint64_t uimm = ~opnd->imm.value;
2657 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2658 {
2659 set_other_error (mismatch_detail, idx,
2660 _("immediate out of range"));
2661 return 0;
2662 }
2663 }
2664 break;
2665
2666 case AARCH64_OPND_SVE_LIMM_MOV:
2667 {
2668 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2669 uint64_t uimm = opnd->imm.value;
2670 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2671 {
2672 set_other_error (mismatch_detail, idx,
2673 _("immediate out of range"));
2674 return 0;
2675 }
2676 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2677 {
2678 set_other_error (mismatch_detail, idx,
2679 _("invalid replicated MOV immediate"));
2680 return 0;
2681 }
2682 }
2683 break;
2684
2685 case AARCH64_OPND_SVE_PATTERN_SCALED:
2686 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2687 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2688 {
2689 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2690 return 0;
2691 }
2692 break;
2693
2694 case AARCH64_OPND_SVE_SHLIMM_PRED:
2695 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2696 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2697 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2698 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2699 {
2700 set_imm_out_of_range_error (mismatch_detail, idx,
2701 0, 8 * size - 1);
2702 return 0;
2703 }
2704 break;
2705
2706 case AARCH64_OPND_SVE_SHRIMM_PRED:
2707 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2708 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2709 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2710 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2711 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2712 {
2713 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2714 return 0;
2715 }
2716 break;
2717
2718 default:
2719 break;
2720 }
2721 break;
2722
2723 case AARCH64_OPND_CLASS_SYSTEM:
2724 switch (type)
2725 {
2726 case AARCH64_OPND_PSTATEFIELD:
2727 for (i = 0; aarch64_pstatefields[i].name; ++i)
2728 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2729 break;
2730 assert (aarch64_pstatefields[i].name);
2731 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2732 max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
2733 if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
2734 {
2735 set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
2736 return 0;
2737 }
2738 break;
2739 case AARCH64_OPND_PRFOP:
2740 if (opcode->iclass == ldst_regoff && opnd->prfop->value >= 24)
2741 {
2742 set_other_error (mismatch_detail, idx,
2743 _("the register-index form of PRFM does"
2744 " not accept opcodes in the range 24-31"));
2745 return 0;
2746 }
2747 break;
2748 default:
2749 break;
2750 }
2751 break;
2752
2753 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2754 /* Get the upper bound for the element index. */
2755 if (opcode->op == OP_FCMLA_ELEM)
2756 /* FCMLA index range depends on the vector size of other operands
2757 and is halfed because complex numbers take two elements. */
2758 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2759 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2760 else
2761 num = 16;
2762 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2763 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2764
2765 /* Index out-of-range. */
2766 if (!value_in_range_p (opnd->reglane.index, 0, num))
2767 {
2768 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2769 return 0;
2770 }
2771 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2772 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2773 number is encoded in "size:M:Rm":
2774 size <Vm>
2775 00 RESERVED
2776 01 0:Rm
2777 10 M:Rm
2778 11 RESERVED */
2779 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2780 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2781 {
2782 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2783 return 0;
2784 }
2785 break;
2786
2787 case AARCH64_OPND_CLASS_MODIFIED_REG:
2788 assert (idx == 1 || idx == 2);
2789 switch (type)
2790 {
2791 case AARCH64_OPND_Rm_EXT:
2792 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2793 && opnd->shifter.kind != AARCH64_MOD_LSL)
2794 {
2795 set_other_error (mismatch_detail, idx,
2796 _("extend operator expected"));
2797 return 0;
2798 }
2799 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2800 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2801 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2802 case. */
2803 if (!aarch64_stack_pointer_p (opnds + 0)
2804 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2805 {
2806 if (!opnd->shifter.operator_present)
2807 {
2808 set_other_error (mismatch_detail, idx,
2809 _("missing extend operator"));
2810 return 0;
2811 }
2812 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2813 {
2814 set_other_error (mismatch_detail, idx,
2815 _("'LSL' operator not allowed"));
2816 return 0;
2817 }
2818 }
2819 assert (opnd->shifter.operator_present /* Default to LSL. */
2820 || opnd->shifter.kind == AARCH64_MOD_LSL);
2821 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2822 {
2823 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2824 return 0;
2825 }
2826 /* In the 64-bit form, the final register operand is written as Wm
2827 for all but the (possibly omitted) UXTX/LSL and SXTX
2828 operators.
2829 N.B. GAS allows X register to be used with any operator as a
2830 programming convenience. */
2831 if (qualifier == AARCH64_OPND_QLF_X
2832 && opnd->shifter.kind != AARCH64_MOD_LSL
2833 && opnd->shifter.kind != AARCH64_MOD_UXTX
2834 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2835 {
2836 set_other_error (mismatch_detail, idx, _("W register expected"));
2837 return 0;
2838 }
2839 break;
2840
2841 case AARCH64_OPND_Rm_SFT:
2842 /* ROR is not available to the shifted register operand in
2843 arithmetic instructions. */
2844 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2845 {
2846 set_other_error (mismatch_detail, idx,
2847 _("shift operator expected"));
2848 return 0;
2849 }
2850 if (opnd->shifter.kind == AARCH64_MOD_ROR
2851 && opcode->iclass != log_shift)
2852 {
2853 set_other_error (mismatch_detail, idx,
2854 _("'ROR' operator not allowed"));
2855 return 0;
2856 }
2857 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2858 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2859 {
2860 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2861 return 0;
2862 }
2863 break;
2864
2865 default:
2866 break;
2867 }
2868 break;
2869
2870 default:
2871 break;
2872 }
2873
2874 return 1;
2875 }
2876
2877 /* Main entrypoint for the operand constraint checking.
2878
2879 Return 1 if operands of *INST meet the constraint applied by the operand
2880 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2881 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2882 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2883 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2884 error kind when it is notified that an instruction does not pass the check).
2885
2886 Un-determined operand qualifiers may get established during the process. */
2887
2888 int
2889 aarch64_match_operands_constraint (aarch64_inst *inst,
2890 aarch64_operand_error *mismatch_detail)
2891 {
2892 int i;
2893
2894 DEBUG_TRACE ("enter");
2895
2896 i = inst->opcode->tied_operand;
2897
2898 if (i > 0)
2899 {
2900 /* Check for tied_operands with specific opcode iclass. */
2901 switch (inst->opcode->iclass)
2902 {
2903 /* For SME LDR and STR instructions #imm must have the same numerical
2904 value for both operands.
2905 */
2906 case sme_ldr:
2907 case sme_str:
2908 assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array_off4);
2909 assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
2910 if (inst->operands[0].indexed_za.index.imm
2911 != inst->operands[1].addr.offset.imm)
2912 {
2913 if (mismatch_detail)
2914 {
2915 mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
2916 mismatch_detail->index = i;
2917 }
2918 return 0;
2919 }
2920 break;
2921
2922 default:
2923 /* Check for cases where a source register needs to be the same as the
2924 destination register. Do this before matching qualifiers since if
2925 an instruction has both invalid tying and invalid qualifiers,
2926 the error about qualifiers would suggest several alternative
2927 instructions that also have invalid tying. */
2928 if (inst->operands[0].reg.regno
2929 != inst->operands[i].reg.regno)
2930 {
2931 if (mismatch_detail)
2932 {
2933 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2934 mismatch_detail->index = i;
2935 mismatch_detail->error = NULL;
2936 }
2937 return 0;
2938 }
2939 break;
2940 }
2941 }
2942
2943 /* Match operands' qualifier.
2944 *INST has already had qualifier establish for some, if not all, of
2945 its operands; we need to find out whether these established
2946 qualifiers match one of the qualifier sequence in
2947 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2948 with the corresponding qualifier in such a sequence.
2949 Only basic operand constraint checking is done here; the more thorough
2950 constraint checking will carried out by operand_general_constraint_met_p,
2951 which has be to called after this in order to get all of the operands'
2952 qualifiers established. */
2953 int invalid_count;
2954 if (match_operands_qualifier (inst, true /* update_p */,
2955 &invalid_count) == 0)
2956 {
2957 DEBUG_TRACE ("FAIL on operand qualifier matching");
2958 if (mismatch_detail)
2959 {
2960 /* Return an error type to indicate that it is the qualifier
2961 matching failure; we don't care about which operand as there
2962 are enough information in the opcode table to reproduce it. */
2963 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2964 mismatch_detail->index = -1;
2965 mismatch_detail->error = NULL;
2966 mismatch_detail->data[0].i = invalid_count;
2967 }
2968 return 0;
2969 }
2970
2971 /* Match operands' constraint. */
2972 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2973 {
2974 enum aarch64_opnd type = inst->opcode->operands[i];
2975 if (type == AARCH64_OPND_NIL)
2976 break;
2977 if (inst->operands[i].skip)
2978 {
2979 DEBUG_TRACE ("skip the incomplete operand %d", i);
2980 continue;
2981 }
2982 if (operand_general_constraint_met_p (inst->operands, i, type,
2983 inst->opcode, mismatch_detail) == 0)
2984 {
2985 DEBUG_TRACE ("FAIL on operand %d", i);
2986 return 0;
2987 }
2988 }
2989
2990 DEBUG_TRACE ("PASS");
2991
2992 return 1;
2993 }
2994
2995 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2996 Also updates the TYPE of each INST->OPERANDS with the corresponding
2997 value of OPCODE->OPERANDS.
2998
2999 Note that some operand qualifiers may need to be manually cleared by
3000 the caller before it further calls the aarch64_opcode_encode; by
3001 doing this, it helps the qualifier matching facilities work
3002 properly. */
3003
3004 const aarch64_opcode*
3005 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
3006 {
3007 int i;
3008 const aarch64_opcode *old = inst->opcode;
3009
3010 inst->opcode = opcode;
3011
3012 /* Update the operand types. */
3013 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3014 {
3015 inst->operands[i].type = opcode->operands[i];
3016 if (opcode->operands[i] == AARCH64_OPND_NIL)
3017 break;
3018 }
3019
3020 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
3021
3022 return old;
3023 }
3024
3025 int
3026 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
3027 {
3028 int i;
3029 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3030 if (operands[i] == operand)
3031 return i;
3032 else if (operands[i] == AARCH64_OPND_NIL)
3033 break;
3034 return -1;
3035 }
3036 \f
3037 /* R0...R30, followed by FOR31. */
3038 #define BANK(R, FOR31) \
3039 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
3040 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
3041 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
3042 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
3043 /* [0][0] 32-bit integer regs with sp Wn
3044 [0][1] 64-bit integer regs with sp Xn sf=1
3045 [1][0] 32-bit integer regs with #0 Wn
3046 [1][1] 64-bit integer regs with #0 Xn sf=1 */
3047 static const char *int_reg[2][2][32] = {
3048 #define R32(X) "w" #X
3049 #define R64(X) "x" #X
3050 { BANK (R32, "wsp"), BANK (R64, "sp") },
3051 { BANK (R32, "wzr"), BANK (R64, "xzr") }
3052 #undef R64
3053 #undef R32
3054 };
3055
3056 /* Names of the SVE vector registers, first with .S suffixes,
3057 then with .D suffixes. */
3058
3059 static const char *sve_reg[2][32] = {
3060 #define ZS(X) "z" #X ".s"
3061 #define ZD(X) "z" #X ".d"
3062 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
3063 #undef ZD
3064 #undef ZS
3065 };
3066 #undef BANK
3067
3068 /* Return the integer register name.
3069 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
3070
3071 static inline const char *
3072 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
3073 {
3074 const int has_zr = sp_reg_p ? 0 : 1;
3075 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
3076 return int_reg[has_zr][is_64][regno];
3077 }
3078
3079 /* Like get_int_reg_name, but IS_64 is always 1. */
3080
3081 static inline const char *
3082 get_64bit_int_reg_name (int regno, int sp_reg_p)
3083 {
3084 const int has_zr = sp_reg_p ? 0 : 1;
3085 return int_reg[has_zr][1][regno];
3086 }
3087
3088 /* Get the name of the integer offset register in OPND, using the shift type
3089 to decide whether it's a word or doubleword. */
3090
3091 static inline const char *
3092 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
3093 {
3094 switch (opnd->shifter.kind)
3095 {
3096 case AARCH64_MOD_UXTW:
3097 case AARCH64_MOD_SXTW:
3098 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
3099
3100 case AARCH64_MOD_LSL:
3101 case AARCH64_MOD_SXTX:
3102 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
3103
3104 default:
3105 abort ();
3106 }
3107 }
3108
3109 /* Get the name of the SVE vector offset register in OPND, using the operand
3110 qualifier to decide whether the suffix should be .S or .D. */
3111
3112 static inline const char *
3113 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
3114 {
3115 assert (qualifier == AARCH64_OPND_QLF_S_S
3116 || qualifier == AARCH64_OPND_QLF_S_D);
3117 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
3118 }
3119
3120 /* Types for expanding an encoded 8-bit value to a floating-point value. */
3121
3122 typedef union
3123 {
3124 uint64_t i;
3125 double d;
3126 } double_conv_t;
3127
3128 typedef union
3129 {
3130 uint32_t i;
3131 float f;
3132 } single_conv_t;
3133
3134 typedef union
3135 {
3136 uint32_t i;
3137 float f;
3138 } half_conv_t;
3139
3140 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
3141 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
3142 (depending on the type of the instruction). IMM8 will be expanded to a
3143 single-precision floating-point value (SIZE == 4) or a double-precision
3144 floating-point value (SIZE == 8). A half-precision floating-point value
3145 (SIZE == 2) is expanded to a single-precision floating-point value. The
3146 expanded value is returned. */
3147
3148 static uint64_t
3149 expand_fp_imm (int size, uint32_t imm8)
3150 {
3151 uint64_t imm = 0;
3152 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3153
3154 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3155 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3156 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3157 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3158 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3159 if (size == 8)
3160 {
3161 imm = (imm8_7 << (63-32)) /* imm8<7> */
3162 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3163 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3164 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3165 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3166 imm <<= 32;
3167 }
3168 else if (size == 4 || size == 2)
3169 {
3170 imm = (imm8_7 << 31) /* imm8<7> */
3171 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3172 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3173 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3174 }
3175 else
3176 {
3177 /* An unsupported size. */
3178 assert (0);
3179 }
3180
3181 return imm;
3182 }
3183
3184 /* Return a string based on FMT with the register style applied. */
3185
3186 static const char *
3187 style_reg (struct aarch64_styler *styler, const char *fmt, ...)
3188 {
3189 const char *txt;
3190 va_list ap;
3191
3192 va_start (ap, fmt);
3193 txt = styler->apply_style (styler, dis_style_register, fmt, ap);
3194 va_end (ap);
3195
3196 return txt;
3197 }
3198
3199 /* Return a string based on FMT with the immediate style applied. */
3200
3201 static const char *
3202 style_imm (struct aarch64_styler *styler, const char *fmt, ...)
3203 {
3204 const char *txt;
3205 va_list ap;
3206
3207 va_start (ap, fmt);
3208 txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
3209 va_end (ap);
3210
3211 return txt;
3212 }
3213
3214 /* Return a string based on FMT with the sub-mnemonic style applied. */
3215
3216 static const char *
3217 style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
3218 {
3219 const char *txt;
3220 va_list ap;
3221
3222 va_start (ap, fmt);
3223 txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
3224 va_end (ap);
3225
3226 return txt;
3227 }
3228
3229 /* Return a string based on FMT with the address style applied. */
3230
3231 static const char *
3232 style_addr (struct aarch64_styler *styler, const char *fmt, ...)
3233 {
3234 const char *txt;
3235 va_list ap;
3236
3237 va_start (ap, fmt);
3238 txt = styler->apply_style (styler, dis_style_address, fmt, ap);
3239 va_end (ap);
3240
3241 return txt;
3242 }
3243
3244 /* Produce the string representation of the register list operand *OPND
3245 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3246 the register name that comes before the register number, such as "v". */
3247 static void
3248 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3249 const char *prefix, struct aarch64_styler *styler)
3250 {
3251 const int num_regs = opnd->reglist.num_regs;
3252 const int stride = opnd->reglist.stride;
3253 const int first_reg = opnd->reglist.first_regno;
3254 const int last_reg = (first_reg + (num_regs - 1) * stride) & 0x1f;
3255 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3256 char tb[16]; /* Temporary buffer. */
3257
3258 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3259 assert (num_regs >= 1 && num_regs <= 4);
3260
3261 /* Prepare the index if any. */
3262 if (opnd->reglist.has_index)
3263 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3264 snprintf (tb, sizeof (tb), "[%s]",
3265 style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
3266 else
3267 tb[0] = '\0';
3268
3269 /* The hyphenated form is preferred for disassembly if there are
3270 more than two registers in the list, and the register numbers
3271 are monotonically increasing in increments of one. */
3272 if (stride == 1 && num_regs > 1)
3273 snprintf (buf, size, "{%s-%s}%s",
3274 style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
3275 style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
3276 else
3277 {
3278 const int reg0 = first_reg;
3279 const int reg1 = (first_reg + stride) & 0x1f;
3280 const int reg2 = (first_reg + stride * 2) & 0x1f;
3281 const int reg3 = (first_reg + stride * 3) & 0x1f;
3282
3283 switch (num_regs)
3284 {
3285 case 1:
3286 snprintf (buf, size, "{%s}%s",
3287 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3288 tb);
3289 break;
3290 case 2:
3291 snprintf (buf, size, "{%s, %s}%s",
3292 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3293 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3294 tb);
3295 break;
3296 case 3:
3297 snprintf (buf, size, "{%s, %s, %s}%s",
3298 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3299 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3300 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3301 tb);
3302 break;
3303 case 4:
3304 snprintf (buf, size, "{%s, %s, %s, %s}%s",
3305 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3306 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3307 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3308 style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
3309 tb);
3310 break;
3311 }
3312 }
3313 }
3314
3315 /* Print the register+immediate address in OPND to BUF, which has SIZE
3316 characters. BASE is the name of the base register. */
3317
3318 static void
3319 print_immediate_offset_address (char *buf, size_t size,
3320 const aarch64_opnd_info *opnd,
3321 const char *base,
3322 struct aarch64_styler *styler)
3323 {
3324 if (opnd->addr.writeback)
3325 {
3326 if (opnd->addr.preind)
3327 {
3328 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3329 snprintf (buf, size, "[%s]!", style_reg (styler, base));
3330 else
3331 snprintf (buf, size, "[%s, %s]!",
3332 style_reg (styler, base),
3333 style_imm (styler, "#%d", opnd->addr.offset.imm));
3334 }
3335 else
3336 snprintf (buf, size, "[%s], %s",
3337 style_reg (styler, base),
3338 style_imm (styler, "#%d", opnd->addr.offset.imm));
3339 }
3340 else
3341 {
3342 if (opnd->shifter.operator_present)
3343 {
3344 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3345 snprintf (buf, size, "[%s, %s, %s]",
3346 style_reg (styler, base),
3347 style_imm (styler, "#%d", opnd->addr.offset.imm),
3348 style_sub_mnem (styler, "mul vl"));
3349 }
3350 else if (opnd->addr.offset.imm)
3351 snprintf (buf, size, "[%s, %s]",
3352 style_reg (styler, base),
3353 style_imm (styler, "#%d", opnd->addr.offset.imm));
3354 else
3355 snprintf (buf, size, "[%s]", style_reg (styler, base));
3356 }
3357 }
3358
3359 /* Produce the string representation of the register offset address operand
3360 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3361 the names of the base and offset registers. */
3362 static void
3363 print_register_offset_address (char *buf, size_t size,
3364 const aarch64_opnd_info *opnd,
3365 const char *base, const char *offset,
3366 struct aarch64_styler *styler)
3367 {
3368 char tb[32]; /* Temporary buffer. */
3369 bool print_extend_p = true;
3370 bool print_amount_p = true;
3371 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3372
3373 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3374 || !opnd->shifter.amount_present))
3375 {
3376 /* Not print the shift/extend amount when the amount is zero and
3377 when it is not the special case of 8-bit load/store instruction. */
3378 print_amount_p = false;
3379 /* Likewise, no need to print the shift operator LSL in such a
3380 situation. */
3381 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3382 print_extend_p = false;
3383 }
3384
3385 /* Prepare for the extend/shift. */
3386 if (print_extend_p)
3387 {
3388 if (print_amount_p)
3389 snprintf (tb, sizeof (tb), ", %s %s",
3390 style_sub_mnem (styler, shift_name),
3391 style_imm (styler, "#%" PRIi64,
3392 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3393 (opnd->shifter.amount % 100)));
3394 else
3395 snprintf (tb, sizeof (tb), ", %s",
3396 style_sub_mnem (styler, shift_name));
3397 }
3398 else
3399 tb[0] = '\0';
3400
3401 snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
3402 style_reg (styler, offset), tb);
3403 }
3404
3405 /* Print ZA tiles from imm8 in ZERO instruction.
3406
3407 The preferred disassembly of this instruction uses the shortest list of tile
3408 names that represent the encoded immediate mask.
3409
3410 For example:
3411 * An all-ones immediate is disassembled as {ZA}.
3412 * An all-zeros immediate is disassembled as an empty list { }.
3413 */
3414 static void
3415 print_sme_za_list (char *buf, size_t size, int mask,
3416 struct aarch64_styler *styler)
3417 {
3418 const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
3419 "za1.s", "za2.s", "za3.s", "za0.d",
3420 "za1.d", "za2.d", "za3.d", "za4.d",
3421 "za5.d", "za6.d", "za7.d", " " };
3422 const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
3423 0x22, 0x44, 0x88, 0x01,
3424 0x02, 0x04, 0x08, 0x10,
3425 0x20, 0x40, 0x80, 0x00 };
3426 int i, k;
3427 const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
3428
3429 k = snprintf (buf, size, "{");
3430 for (i = 0; i < ZAN_SIZE; i++)
3431 {
3432 if ((mask & zan_v[i]) == zan_v[i])
3433 {
3434 mask &= ~zan_v[i];
3435 if (k > 1)
3436 k += snprintf (buf + k, size - k, ", ");
3437
3438 k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i]));
3439 }
3440 if (mask == 0)
3441 break;
3442 }
3443 snprintf (buf + k, size - k, "}");
3444 }
3445
3446 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3447 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3448 PC, PCREL_P and ADDRESS are used to pass in and return information about
3449 the PC-relative address calculation, where the PC value is passed in
3450 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3451 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3452 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3453
3454 The function serves both the disassembler and the assembler diagnostics
3455 issuer, which is the reason why it lives in this file. */
3456
3457 void
3458 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3459 const aarch64_opcode *opcode,
3460 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3461 bfd_vma *address, char** notes,
3462 char *comment, size_t comment_size,
3463 aarch64_feature_set features,
3464 struct aarch64_styler *styler)
3465 {
3466 unsigned int i, num_conds;
3467 const char *name = NULL;
3468 const aarch64_opnd_info *opnd = opnds + idx;
3469 enum aarch64_modifier_kind kind;
3470 uint64_t addr, enum_value;
3471
3472 if (comment != NULL)
3473 {
3474 assert (comment_size > 0);
3475 comment[0] = '\0';
3476 }
3477 else
3478 assert (comment_size == 0);
3479
3480 buf[0] = '\0';
3481 if (pcrel_p)
3482 *pcrel_p = 0;
3483
3484 switch (opnd->type)
3485 {
3486 case AARCH64_OPND_Rd:
3487 case AARCH64_OPND_Rn:
3488 case AARCH64_OPND_Rm:
3489 case AARCH64_OPND_Rt:
3490 case AARCH64_OPND_Rt2:
3491 case AARCH64_OPND_Rs:
3492 case AARCH64_OPND_Ra:
3493 case AARCH64_OPND_Rt_LS64:
3494 case AARCH64_OPND_Rt_SYS:
3495 case AARCH64_OPND_PAIRREG:
3496 case AARCH64_OPND_SVE_Rm:
3497 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3498 the <ic_op>, therefore we use opnd->present to override the
3499 generic optional-ness information. */
3500 if (opnd->type == AARCH64_OPND_Rt_SYS)
3501 {
3502 if (!opnd->present)
3503 break;
3504 }
3505 /* Omit the operand, e.g. RET. */
3506 else if (optional_operand_p (opcode, idx)
3507 && (opnd->reg.regno
3508 == get_optional_operand_default_value (opcode)))
3509 break;
3510 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3511 || opnd->qualifier == AARCH64_OPND_QLF_X);
3512 snprintf (buf, size, "%s",
3513 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3514 opnd->qualifier, 0)));
3515 break;
3516
3517 case AARCH64_OPND_Rd_SP:
3518 case AARCH64_OPND_Rn_SP:
3519 case AARCH64_OPND_Rt_SP:
3520 case AARCH64_OPND_SVE_Rn_SP:
3521 case AARCH64_OPND_Rm_SP:
3522 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3523 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3524 || opnd->qualifier == AARCH64_OPND_QLF_X
3525 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3526 snprintf (buf, size, "%s",
3527 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3528 opnd->qualifier, 1)));
3529 break;
3530
3531 case AARCH64_OPND_Rm_EXT:
3532 kind = opnd->shifter.kind;
3533 assert (idx == 1 || idx == 2);
3534 if ((aarch64_stack_pointer_p (opnds)
3535 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3536 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3537 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3538 && kind == AARCH64_MOD_UXTW)
3539 || (opnd->qualifier == AARCH64_OPND_QLF_X
3540 && kind == AARCH64_MOD_UXTX)))
3541 {
3542 /* 'LSL' is the preferred form in this case. */
3543 kind = AARCH64_MOD_LSL;
3544 if (opnd->shifter.amount == 0)
3545 {
3546 /* Shifter omitted. */
3547 snprintf (buf, size, "%s",
3548 style_reg (styler,
3549 get_int_reg_name (opnd->reg.regno,
3550 opnd->qualifier, 0)));
3551 break;
3552 }
3553 }
3554 if (opnd->shifter.amount)
3555 snprintf (buf, size, "%s, %s %s",
3556 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3557 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
3558 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3559 else
3560 snprintf (buf, size, "%s, %s",
3561 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3562 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
3563 break;
3564
3565 case AARCH64_OPND_Rm_SFT:
3566 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3567 || opnd->qualifier == AARCH64_OPND_QLF_X);
3568 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3569 snprintf (buf, size, "%s",
3570 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3571 opnd->qualifier, 0)));
3572 else
3573 snprintf (buf, size, "%s, %s %s",
3574 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3575 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
3576 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3577 break;
3578
3579 case AARCH64_OPND_Fd:
3580 case AARCH64_OPND_Fn:
3581 case AARCH64_OPND_Fm:
3582 case AARCH64_OPND_Fa:
3583 case AARCH64_OPND_Ft:
3584 case AARCH64_OPND_Ft2:
3585 case AARCH64_OPND_Sd:
3586 case AARCH64_OPND_Sn:
3587 case AARCH64_OPND_Sm:
3588 case AARCH64_OPND_SVE_VZn:
3589 case AARCH64_OPND_SVE_Vd:
3590 case AARCH64_OPND_SVE_Vm:
3591 case AARCH64_OPND_SVE_Vn:
3592 snprintf (buf, size, "%s",
3593 style_reg (styler, "%s%d",
3594 aarch64_get_qualifier_name (opnd->qualifier),
3595 opnd->reg.regno));
3596 break;
3597
3598 case AARCH64_OPND_Va:
3599 case AARCH64_OPND_Vd:
3600 case AARCH64_OPND_Vn:
3601 case AARCH64_OPND_Vm:
3602 snprintf (buf, size, "%s",
3603 style_reg (styler, "v%d.%s", opnd->reg.regno,
3604 aarch64_get_qualifier_name (opnd->qualifier)));
3605 break;
3606
3607 case AARCH64_OPND_Ed:
3608 case AARCH64_OPND_En:
3609 case AARCH64_OPND_Em:
3610 case AARCH64_OPND_Em16:
3611 case AARCH64_OPND_SM3_IMM2:
3612 snprintf (buf, size, "%s[%s]",
3613 style_reg (styler, "v%d.%s", opnd->reglane.regno,
3614 aarch64_get_qualifier_name (opnd->qualifier)),
3615 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3616 break;
3617
3618 case AARCH64_OPND_VdD1:
3619 case AARCH64_OPND_VnD1:
3620 snprintf (buf, size, "%s[%s]",
3621 style_reg (styler, "v%d.d", opnd->reg.regno),
3622 style_imm (styler, "1"));
3623 break;
3624
3625 case AARCH64_OPND_LVn:
3626 case AARCH64_OPND_LVt:
3627 case AARCH64_OPND_LVt_AL:
3628 case AARCH64_OPND_LEt:
3629 print_register_list (buf, size, opnd, "v", styler);
3630 break;
3631
3632 case AARCH64_OPND_SVE_Pd:
3633 case AARCH64_OPND_SVE_Pg3:
3634 case AARCH64_OPND_SVE_Pg4_5:
3635 case AARCH64_OPND_SVE_Pg4_10:
3636 case AARCH64_OPND_SVE_Pg4_16:
3637 case AARCH64_OPND_SVE_Pm:
3638 case AARCH64_OPND_SVE_Pn:
3639 case AARCH64_OPND_SVE_Pt:
3640 case AARCH64_OPND_SME_Pm:
3641 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3642 snprintf (buf, size, "%s",
3643 style_reg (styler, "p%d", opnd->reg.regno));
3644 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3645 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3646 snprintf (buf, size, "%s",
3647 style_reg (styler, "p%d/%s", opnd->reg.regno,
3648 aarch64_get_qualifier_name (opnd->qualifier)));
3649 else
3650 snprintf (buf, size, "%s",
3651 style_reg (styler, "p%d.%s", opnd->reg.regno,
3652 aarch64_get_qualifier_name (opnd->qualifier)));
3653 break;
3654
3655 case AARCH64_OPND_SVE_Za_5:
3656 case AARCH64_OPND_SVE_Za_16:
3657 case AARCH64_OPND_SVE_Zd:
3658 case AARCH64_OPND_SVE_Zm_5:
3659 case AARCH64_OPND_SVE_Zm_16:
3660 case AARCH64_OPND_SVE_Zn:
3661 case AARCH64_OPND_SVE_Zt:
3662 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3663 snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
3664 else
3665 snprintf (buf, size, "%s",
3666 style_reg (styler, "z%d.%s", opnd->reg.regno,
3667 aarch64_get_qualifier_name (opnd->qualifier)));
3668 break;
3669
3670 case AARCH64_OPND_SVE_ZnxN:
3671 case AARCH64_OPND_SVE_ZtxN:
3672 print_register_list (buf, size, opnd, "z", styler);
3673 break;
3674
3675 case AARCH64_OPND_SVE_Zm3_INDEX:
3676 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3677 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3678 case AARCH64_OPND_SVE_Zm4_11_INDEX:
3679 case AARCH64_OPND_SVE_Zm4_INDEX:
3680 case AARCH64_OPND_SVE_Zn_INDEX:
3681 snprintf (buf, size, "%s[%s]",
3682 style_reg (styler, "z%d.%s", opnd->reglane.regno,
3683 aarch64_get_qualifier_name (opnd->qualifier)),
3684 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3685 break;
3686
3687 case AARCH64_OPND_SME_ZAda_2b:
3688 case AARCH64_OPND_SME_ZAda_3b:
3689 snprintf (buf, size, "%s",
3690 style_reg (styler, "za%d.%s", opnd->reg.regno,
3691 aarch64_get_qualifier_name (opnd->qualifier)));
3692 break;
3693
3694 case AARCH64_OPND_SME_ZA_HV_idx_src:
3695 case AARCH64_OPND_SME_ZA_HV_idx_dest:
3696 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
3697 snprintf (buf, size, "%s%s[%s, %s%s%s]%s",
3698 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
3699 style_reg (styler, "za%d%c.%s",
3700 opnd->indexed_za.regno,
3701 opnd->indexed_za.v == 1 ? 'v' : 'h',
3702 aarch64_get_qualifier_name (opnd->qualifier)),
3703 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
3704 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
3705 opnd->indexed_za.group_size ? ", " : "",
3706 opnd->indexed_za.group_size == 2
3707 ? style_sub_mnem (styler, "vgx2")
3708 : opnd->indexed_za.group_size == 4
3709 ? style_sub_mnem (styler, "vgx4") : "",
3710 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
3711 break;
3712
3713 case AARCH64_OPND_SME_list_of_64bit_tiles:
3714 print_sme_za_list (buf, size, opnd->reg.regno, styler);
3715 break;
3716
3717 case AARCH64_OPND_SME_ZA_array_off4:
3718 snprintf (buf, size, "%s[%s, %s%s%s]",
3719 style_reg (styler, "za"),
3720 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
3721 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
3722 opnd->indexed_za.group_size ? ", " : "",
3723 opnd->indexed_za.group_size == 2
3724 ? style_sub_mnem (styler, "vgx2")
3725 : opnd->indexed_za.group_size == 4
3726 ? style_sub_mnem (styler, "vgx4") : "");
3727 break;
3728
3729 case AARCH64_OPND_SME_SM_ZA:
3730 snprintf (buf, size, "%s",
3731 style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
3732 break;
3733
3734 case AARCH64_OPND_SME_PnT_Wm_imm:
3735 snprintf (buf, size, "%s[%s, %s]",
3736 style_reg (styler, "p%d.%s", opnd->indexed_za.regno,
3737 aarch64_get_qualifier_name (opnd->qualifier)),
3738 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
3739 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm));
3740 break;
3741
3742 case AARCH64_OPND_CRn:
3743 case AARCH64_OPND_CRm:
3744 snprintf (buf, size, "%s",
3745 style_reg (styler, "C%" PRIi64, opnd->imm.value));
3746 break;
3747
3748 case AARCH64_OPND_IDX:
3749 case AARCH64_OPND_MASK:
3750 case AARCH64_OPND_IMM:
3751 case AARCH64_OPND_IMM_2:
3752 case AARCH64_OPND_WIDTH:
3753 case AARCH64_OPND_UIMM3_OP1:
3754 case AARCH64_OPND_UIMM3_OP2:
3755 case AARCH64_OPND_BIT_NUM:
3756 case AARCH64_OPND_IMM_VLSL:
3757 case AARCH64_OPND_IMM_VLSR:
3758 case AARCH64_OPND_SHLL_IMM:
3759 case AARCH64_OPND_IMM0:
3760 case AARCH64_OPND_IMMR:
3761 case AARCH64_OPND_IMMS:
3762 case AARCH64_OPND_UNDEFINED:
3763 case AARCH64_OPND_FBITS:
3764 case AARCH64_OPND_TME_UIMM16:
3765 case AARCH64_OPND_SIMM5:
3766 case AARCH64_OPND_SVE_SHLIMM_PRED:
3767 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3768 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3769 case AARCH64_OPND_SVE_SHRIMM_PRED:
3770 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3771 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3772 case AARCH64_OPND_SVE_SIMM5:
3773 case AARCH64_OPND_SVE_SIMM5B:
3774 case AARCH64_OPND_SVE_SIMM6:
3775 case AARCH64_OPND_SVE_SIMM8:
3776 case AARCH64_OPND_SVE_UIMM3:
3777 case AARCH64_OPND_SVE_UIMM7:
3778 case AARCH64_OPND_SVE_UIMM8:
3779 case AARCH64_OPND_SVE_UIMM8_53:
3780 case AARCH64_OPND_IMM_ROT1:
3781 case AARCH64_OPND_IMM_ROT2:
3782 case AARCH64_OPND_IMM_ROT3:
3783 case AARCH64_OPND_SVE_IMM_ROT1:
3784 case AARCH64_OPND_SVE_IMM_ROT2:
3785 case AARCH64_OPND_SVE_IMM_ROT3:
3786 case AARCH64_OPND_CSSC_SIMM8:
3787 case AARCH64_OPND_CSSC_UIMM8:
3788 snprintf (buf, size, "%s",
3789 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3790 break;
3791
3792 case AARCH64_OPND_SVE_I1_HALF_ONE:
3793 case AARCH64_OPND_SVE_I1_HALF_TWO:
3794 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3795 {
3796 single_conv_t c;
3797 c.i = opnd->imm.value;
3798 snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
3799 break;
3800 }
3801
3802 case AARCH64_OPND_SVE_PATTERN:
3803 if (optional_operand_p (opcode, idx)
3804 && opnd->imm.value == get_optional_operand_default_value (opcode))
3805 break;
3806 enum_value = opnd->imm.value;
3807 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3808 if (aarch64_sve_pattern_array[enum_value])
3809 snprintf (buf, size, "%s",
3810 style_reg (styler, aarch64_sve_pattern_array[enum_value]));
3811 else
3812 snprintf (buf, size, "%s",
3813 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3814 break;
3815
3816 case AARCH64_OPND_SVE_PATTERN_SCALED:
3817 if (optional_operand_p (opcode, idx)
3818 && !opnd->shifter.operator_present
3819 && opnd->imm.value == get_optional_operand_default_value (opcode))
3820 break;
3821 enum_value = opnd->imm.value;
3822 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3823 if (aarch64_sve_pattern_array[opnd->imm.value])
3824 snprintf (buf, size, "%s",
3825 style_reg (styler,
3826 aarch64_sve_pattern_array[opnd->imm.value]));
3827 else
3828 snprintf (buf, size, "%s",
3829 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3830 if (opnd->shifter.operator_present)
3831 {
3832 size_t len = strlen (buf);
3833 const char *shift_name
3834 = aarch64_operand_modifiers[opnd->shifter.kind].name;
3835 snprintf (buf + len, size - len, ", %s %s",
3836 style_sub_mnem (styler, shift_name),
3837 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3838 }
3839 break;
3840
3841 case AARCH64_OPND_SVE_PRFOP:
3842 enum_value = opnd->imm.value;
3843 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3844 if (aarch64_sve_prfop_array[enum_value])
3845 snprintf (buf, size, "%s",
3846 style_reg (styler, aarch64_sve_prfop_array[enum_value]));
3847 else
3848 snprintf (buf, size, "%s",
3849 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3850 break;
3851
3852 case AARCH64_OPND_IMM_MOV:
3853 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3854 {
3855 case 4: /* e.g. MOV Wd, #<imm32>. */
3856 {
3857 int imm32 = opnd->imm.value;
3858 snprintf (buf, size, "%s",
3859 style_imm (styler, "#0x%-20x", imm32));
3860 snprintf (comment, comment_size, "#%d", imm32);
3861 }
3862 break;
3863 case 8: /* e.g. MOV Xd, #<imm64>. */
3864 snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
3865 opnd->imm.value));
3866 snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
3867 break;
3868 default:
3869 snprintf (buf, size, "<invalid>");
3870 break;
3871 }
3872 break;
3873
3874 case AARCH64_OPND_FPIMM0:
3875 snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
3876 break;
3877
3878 case AARCH64_OPND_LIMM:
3879 case AARCH64_OPND_AIMM:
3880 case AARCH64_OPND_HALF:
3881 case AARCH64_OPND_SVE_INV_LIMM:
3882 case AARCH64_OPND_SVE_LIMM:
3883 case AARCH64_OPND_SVE_LIMM_MOV:
3884 if (opnd->shifter.amount)
3885 snprintf (buf, size, "%s, %s %s",
3886 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
3887 style_sub_mnem (styler, "lsl"),
3888 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3889 else
3890 snprintf (buf, size, "%s",
3891 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
3892 break;
3893
3894 case AARCH64_OPND_SIMD_IMM:
3895 case AARCH64_OPND_SIMD_IMM_SFT:
3896 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3897 || opnd->shifter.kind == AARCH64_MOD_NONE)
3898 snprintf (buf, size, "%s",
3899 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
3900 else
3901 snprintf (buf, size, "%s, %s %s",
3902 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
3903 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
3904 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3905 break;
3906
3907 case AARCH64_OPND_SVE_AIMM:
3908 case AARCH64_OPND_SVE_ASIMM:
3909 if (opnd->shifter.amount)
3910 snprintf (buf, size, "%s, %s %s",
3911 style_imm (styler, "#%" PRIi64, opnd->imm.value),
3912 style_sub_mnem (styler, "lsl"),
3913 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3914 else
3915 snprintf (buf, size, "%s",
3916 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3917 break;
3918
3919 case AARCH64_OPND_FPIMM:
3920 case AARCH64_OPND_SIMD_FPIMM:
3921 case AARCH64_OPND_SVE_FPIMM8:
3922 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3923 {
3924 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3925 {
3926 half_conv_t c;
3927 c.i = expand_fp_imm (2, opnd->imm.value);
3928 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
3929 }
3930 break;
3931 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3932 {
3933 single_conv_t c;
3934 c.i = expand_fp_imm (4, opnd->imm.value);
3935 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
3936 }
3937 break;
3938 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3939 {
3940 double_conv_t c;
3941 c.i = expand_fp_imm (8, opnd->imm.value);
3942 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
3943 }
3944 break;
3945 default:
3946 snprintf (buf, size, "<invalid>");
3947 break;
3948 }
3949 break;
3950
3951 case AARCH64_OPND_CCMP_IMM:
3952 case AARCH64_OPND_NZCV:
3953 case AARCH64_OPND_EXCEPTION:
3954 case AARCH64_OPND_UIMM4:
3955 case AARCH64_OPND_UIMM4_ADDG:
3956 case AARCH64_OPND_UIMM7:
3957 case AARCH64_OPND_UIMM10:
3958 if (optional_operand_p (opcode, idx)
3959 && (opnd->imm.value ==
3960 (int64_t) get_optional_operand_default_value (opcode)))
3961 /* Omit the operand, e.g. DCPS1. */
3962 break;
3963 snprintf (buf, size, "%s",
3964 style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
3965 break;
3966
3967 case AARCH64_OPND_COND:
3968 case AARCH64_OPND_COND1:
3969 snprintf (buf, size, "%s",
3970 style_sub_mnem (styler, opnd->cond->names[0]));
3971 num_conds = ARRAY_SIZE (opnd->cond->names);
3972 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3973 {
3974 size_t len = comment != NULL ? strlen (comment) : 0;
3975 if (i == 1)
3976 snprintf (comment + len, comment_size - len, "%s = %s",
3977 opnd->cond->names[0], opnd->cond->names[i]);
3978 else
3979 snprintf (comment + len, comment_size - len, ", %s",
3980 opnd->cond->names[i]);
3981 }
3982 break;
3983
3984 case AARCH64_OPND_ADDR_ADRP:
3985 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3986 + opnd->imm.value;
3987 if (pcrel_p)
3988 *pcrel_p = 1;
3989 if (address)
3990 *address = addr;
3991 /* This is not necessary during the disassembling, as print_address_func
3992 in the disassemble_info will take care of the printing. But some
3993 other callers may be still interested in getting the string in *STR,
3994 so here we do snprintf regardless. */
3995 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
3996 break;
3997
3998 case AARCH64_OPND_ADDR_PCREL14:
3999 case AARCH64_OPND_ADDR_PCREL19:
4000 case AARCH64_OPND_ADDR_PCREL21:
4001 case AARCH64_OPND_ADDR_PCREL26:
4002 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
4003 if (pcrel_p)
4004 *pcrel_p = 1;
4005 if (address)
4006 *address = addr;
4007 /* This is not necessary during the disassembling, as print_address_func
4008 in the disassemble_info will take care of the printing. But some
4009 other callers may be still interested in getting the string in *STR,
4010 so here we do snprintf regardless. */
4011 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
4012 break;
4013
4014 case AARCH64_OPND_ADDR_SIMPLE:
4015 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
4016 case AARCH64_OPND_SIMD_ADDR_POST:
4017 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4018 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
4019 {
4020 if (opnd->addr.offset.is_reg)
4021 snprintf (buf, size, "[%s], %s",
4022 style_reg (styler, name),
4023 style_reg (styler, "x%d", opnd->addr.offset.regno));
4024 else
4025 snprintf (buf, size, "[%s], %s",
4026 style_reg (styler, name),
4027 style_imm (styler, "#%d", opnd->addr.offset.imm));
4028 }
4029 else
4030 snprintf (buf, size, "[%s]", style_reg (styler, name));
4031 break;
4032
4033 case AARCH64_OPND_ADDR_REGOFF:
4034 case AARCH64_OPND_SVE_ADDR_R:
4035 case AARCH64_OPND_SVE_ADDR_RR:
4036 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
4037 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
4038 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
4039 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
4040 case AARCH64_OPND_SVE_ADDR_RX:
4041 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
4042 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
4043 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
4044 print_register_offset_address
4045 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4046 get_offset_int_reg_name (opnd), styler);
4047 break;
4048
4049 case AARCH64_OPND_SVE_ADDR_ZX:
4050 print_register_offset_address
4051 (buf, size, opnd,
4052 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4053 get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
4054 break;
4055
4056 case AARCH64_OPND_SVE_ADDR_RZ:
4057 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
4058 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
4059 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
4060 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
4061 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
4062 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
4063 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
4064 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
4065 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
4066 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
4067 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
4068 print_register_offset_address
4069 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4070 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4071 styler);
4072 break;
4073
4074 case AARCH64_OPND_ADDR_SIMM7:
4075 case AARCH64_OPND_ADDR_SIMM9:
4076 case AARCH64_OPND_ADDR_SIMM9_2:
4077 case AARCH64_OPND_ADDR_SIMM10:
4078 case AARCH64_OPND_ADDR_SIMM11:
4079 case AARCH64_OPND_ADDR_SIMM13:
4080 case AARCH64_OPND_ADDR_OFFSET:
4081 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
4082 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
4083 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
4084 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
4085 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
4086 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
4087 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
4088 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
4089 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
4090 case AARCH64_OPND_SVE_ADDR_RI_U6:
4091 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
4092 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
4093 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
4094 print_immediate_offset_address
4095 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4096 styler);
4097 break;
4098
4099 case AARCH64_OPND_SVE_ADDR_ZI_U5:
4100 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
4101 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
4102 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
4103 print_immediate_offset_address
4104 (buf, size, opnd,
4105 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4106 styler);
4107 break;
4108
4109 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
4110 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
4111 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
4112 print_register_offset_address
4113 (buf, size, opnd,
4114 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4115 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4116 styler);
4117 break;
4118
4119 case AARCH64_OPND_ADDR_UIMM12:
4120 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4121 if (opnd->addr.offset.imm)
4122 snprintf (buf, size, "[%s, %s]",
4123 style_reg (styler, name),
4124 style_imm (styler, "#%d", opnd->addr.offset.imm));
4125 else
4126 snprintf (buf, size, "[%s]", style_reg (styler, name));
4127 break;
4128
4129 case AARCH64_OPND_SYSREG:
4130 for (i = 0; aarch64_sys_regs[i].name; ++i)
4131 {
4132 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
4133
4134 bool exact_match
4135 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
4136 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
4137 && AARCH64_CPU_HAS_FEATURE (features, sr->features);
4138
4139 /* Try and find an exact match, But if that fails, return the first
4140 partial match that was found. */
4141 if (aarch64_sys_regs[i].value == opnd->sysreg.value
4142 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
4143 && (name == NULL || exact_match))
4144 {
4145 name = aarch64_sys_regs[i].name;
4146 if (exact_match)
4147 {
4148 if (notes)
4149 *notes = NULL;
4150 break;
4151 }
4152
4153 /* If we didn't match exactly, that means the presense of a flag
4154 indicates what we didn't want for this instruction. e.g. If
4155 F_REG_READ is there, that means we were looking for a write
4156 register. See aarch64_ext_sysreg. */
4157 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
4158 *notes = _("reading from a write-only register");
4159 else if (aarch64_sys_regs[i].flags & F_REG_READ)
4160 *notes = _("writing to a read-only register");
4161 }
4162 }
4163
4164 if (name)
4165 snprintf (buf, size, "%s", style_reg (styler, name));
4166 else
4167 {
4168 /* Implementation defined system register. */
4169 unsigned int value = opnd->sysreg.value;
4170 snprintf (buf, size, "%s",
4171 style_reg (styler, "s%u_%u_c%u_c%u_%u",
4172 (value >> 14) & 0x3, (value >> 11) & 0x7,
4173 (value >> 7) & 0xf, (value >> 3) & 0xf,
4174 value & 0x7));
4175 }
4176 break;
4177
4178 case AARCH64_OPND_PSTATEFIELD:
4179 for (i = 0; aarch64_pstatefields[i].name; ++i)
4180 if (aarch64_pstatefields[i].value == opnd->pstatefield)
4181 {
4182 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
4183 SVCRZA and SVCRSMZA. */
4184 uint32_t flags = aarch64_pstatefields[i].flags;
4185 if (flags & F_REG_IN_CRM
4186 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
4187 != PSTATE_DECODE_CRM (flags)))
4188 continue;
4189 break;
4190 }
4191 assert (aarch64_pstatefields[i].name);
4192 snprintf (buf, size, "%s",
4193 style_reg (styler, aarch64_pstatefields[i].name));
4194 break;
4195
4196 case AARCH64_OPND_SYSREG_AT:
4197 case AARCH64_OPND_SYSREG_DC:
4198 case AARCH64_OPND_SYSREG_IC:
4199 case AARCH64_OPND_SYSREG_TLBI:
4200 case AARCH64_OPND_SYSREG_SR:
4201 snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
4202 break;
4203
4204 case AARCH64_OPND_BARRIER:
4205 case AARCH64_OPND_BARRIER_DSB_NXS:
4206 {
4207 if (opnd->barrier->name[0] == '#')
4208 snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
4209 else
4210 snprintf (buf, size, "%s",
4211 style_sub_mnem (styler, opnd->barrier->name));
4212 }
4213 break;
4214
4215 case AARCH64_OPND_BARRIER_ISB:
4216 /* Operand can be omitted, e.g. in DCPS1. */
4217 if (! optional_operand_p (opcode, idx)
4218 || (opnd->barrier->value
4219 != get_optional_operand_default_value (opcode)))
4220 snprintf (buf, size, "%s",
4221 style_imm (styler, "#0x%x", opnd->barrier->value));
4222 break;
4223
4224 case AARCH64_OPND_PRFOP:
4225 if (opnd->prfop->name != NULL)
4226 snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
4227 else
4228 snprintf (buf, size, "%s", style_imm (styler, "#0x%02x",
4229 opnd->prfop->value));
4230 break;
4231
4232 case AARCH64_OPND_BARRIER_PSB:
4233 snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
4234 break;
4235
4236 case AARCH64_OPND_BTI_TARGET:
4237 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
4238 snprintf (buf, size, "%s",
4239 style_sub_mnem (styler, opnd->hint_option->name));
4240 break;
4241
4242 case AARCH64_OPND_MOPS_ADDR_Rd:
4243 case AARCH64_OPND_MOPS_ADDR_Rs:
4244 snprintf (buf, size, "[%s]!",
4245 style_reg (styler,
4246 get_int_reg_name (opnd->reg.regno,
4247 AARCH64_OPND_QLF_X, 0)));
4248 break;
4249
4250 case AARCH64_OPND_MOPS_WB_Rn:
4251 snprintf (buf, size, "%s!",
4252 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4253 AARCH64_OPND_QLF_X, 0)));
4254 break;
4255
4256 default:
4257 snprintf (buf, size, "<invalid>");
4258 break;
4259 }
4260 }
4261 \f
4262 #define CPENC(op0,op1,crn,crm,op2) \
4263 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
4264 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
4265 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
4266 /* for 3.9.10 System Instructions */
4267 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
4268
4269 #define C0 0
4270 #define C1 1
4271 #define C2 2
4272 #define C3 3
4273 #define C4 4
4274 #define C5 5
4275 #define C6 6
4276 #define C7 7
4277 #define C8 8
4278 #define C9 9
4279 #define C10 10
4280 #define C11 11
4281 #define C12 12
4282 #define C13 13
4283 #define C14 14
4284 #define C15 15
4285
4286 #define SYSREG(name, encoding, flags, features) \
4287 { name, encoding, flags, features }
4288
4289 #define SR_CORE(n,e,f) SYSREG (n,e,f,0)
4290
4291 #define SR_FEAT(n,e,f,feat) \
4292 SYSREG ((n), (e), (f) | F_ARCHEXT, AARCH64_FEATURE_##feat)
4293
4294 #define SR_FEAT2(n,e,f,fe1,fe2) \
4295 SYSREG ((n), (e), (f) | F_ARCHEXT, \
4296 AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
4297
4298 #define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
4299 #define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
4300
4301 #define SR_V8_A(n,e,f) SR_FEAT (n,e,f,V8_A)
4302 #define SR_V8_R(n,e,f) SR_FEAT (n,e,f,V8_R)
4303 #define SR_V8_1(n,e,f) SR_FEAT (n,e,f,V8_1)
4304 #define SR_V8_2(n,e,f) SR_FEAT (n,e,f,V8_2)
4305 #define SR_V8_3(n,e,f) SR_FEAT (n,e,f,V8_3)
4306 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
4307 #define SR_V8_6(n,e,f) SR_FEAT (n,e,f,V8_6)
4308 #define SR_V8_7(n,e,f) SR_FEAT (n,e,f,V8_7)
4309 #define SR_V8_8(n,e,f) SR_FEAT (n,e,f,V8_8)
4310 /* Has no separate libopcodes feature flag, but separated out for clarity. */
4311 #define SR_GIC(n,e,f) SR_CORE (n,e,f)
4312 /* Has no separate libopcodes feature flag, but separated out for clarity. */
4313 #define SR_AMU(n,e,f) SR_FEAT (n,e,f,V8_4)
4314 #define SR_LOR(n,e,f) SR_FEAT (n,e,f,LOR)
4315 #define SR_PAN(n,e,f) SR_FEAT (n,e,f,PAN)
4316 #define SR_RAS(n,e,f) SR_FEAT (n,e,f,RAS)
4317 #define SR_RNG(n,e,f) SR_FEAT (n,e,f,RNG)
4318 #define SR_SME(n,e,f) SR_FEAT (n,e,f,SME)
4319 #define SR_SSBS(n,e,f) SR_FEAT (n,e,f,SSBS)
4320 #define SR_SVE(n,e,f) SR_FEAT (n,e,f,SVE)
4321 #define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
4322 #define SR_PROFILE(n,e,f) SR_FEAT (n,e,f,PROFILE)
4323 #define SR_MEMTAG(n,e,f) SR_FEAT (n,e,f,MEMTAG)
4324 #define SR_SCXTNUM(n,e,f) SR_FEAT (n,e,f,SCXTNUM)
4325
4326 #define SR_EXPAND_ELx(f,x) \
4327 f (x, 1), \
4328 f (x, 2), \
4329 f (x, 3), \
4330 f (x, 4), \
4331 f (x, 5), \
4332 f (x, 6), \
4333 f (x, 7), \
4334 f (x, 8), \
4335 f (x, 9), \
4336 f (x, 10), \
4337 f (x, 11), \
4338 f (x, 12), \
4339 f (x, 13), \
4340 f (x, 14), \
4341 f (x, 15),
4342
4343 #define SR_EXPAND_EL12(f) \
4344 SR_EXPAND_ELx (f,1) \
4345 SR_EXPAND_ELx (f,2)
4346
4347 /* TODO there is one more issues need to be resolved
4348 1. handle cpu-implementation-defined system registers.
4349
4350 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4351 respectively. If neither of these are set then the register is read-write. */
4352 const aarch64_sys_reg aarch64_sys_regs [] =
4353 {
4354 SR_CORE ("spsr_el1", CPEN_ (0,C0,0), 0), /* = spsr_svc. */
4355 SR_V8_1 ("spsr_el12", CPEN_ (5,C0,0), 0),
4356 SR_CORE ("elr_el1", CPEN_ (0,C0,1), 0),
4357 SR_V8_1 ("elr_el12", CPEN_ (5,C0,1), 0),
4358 SR_CORE ("sp_el0", CPEN_ (0,C1,0), 0),
4359 SR_CORE ("spsel", CPEN_ (0,C2,0), 0),
4360 SR_CORE ("daif", CPEN_ (3,C2,1), 0),
4361 SR_CORE ("currentel", CPEN_ (0,C2,2), F_REG_READ),
4362 SR_PAN ("pan", CPEN_ (0,C2,3), 0),
4363 SR_V8_2 ("uao", CPEN_ (0,C2,4), 0),
4364 SR_CORE ("nzcv", CPEN_ (3,C2,0), 0),
4365 SR_SSBS ("ssbs", CPEN_ (3,C2,6), 0),
4366 SR_CORE ("fpcr", CPEN_ (3,C4,0), 0),
4367 SR_CORE ("fpsr", CPEN_ (3,C4,1), 0),
4368 SR_CORE ("dspsr_el0", CPEN_ (3,C5,0), 0),
4369 SR_CORE ("dlr_el0", CPEN_ (3,C5,1), 0),
4370 SR_CORE ("spsr_el2", CPEN_ (4,C0,0), 0), /* = spsr_hyp. */
4371 SR_CORE ("elr_el2", CPEN_ (4,C0,1), 0),
4372 SR_CORE ("sp_el1", CPEN_ (4,C1,0), 0),
4373 SR_CORE ("spsr_irq", CPEN_ (4,C3,0), 0),
4374 SR_CORE ("spsr_abt", CPEN_ (4,C3,1), 0),
4375 SR_CORE ("spsr_und", CPEN_ (4,C3,2), 0),
4376 SR_CORE ("spsr_fiq", CPEN_ (4,C3,3), 0),
4377 SR_CORE ("spsr_el3", CPEN_ (6,C0,0), 0),
4378 SR_CORE ("elr_el3", CPEN_ (6,C0,1), 0),
4379 SR_CORE ("sp_el2", CPEN_ (6,C1,0), 0),
4380 SR_CORE ("spsr_svc", CPEN_ (0,C0,0), F_DEPRECATED), /* = spsr_el1. */
4381 SR_CORE ("spsr_hyp", CPEN_ (4,C0,0), F_DEPRECATED), /* = spsr_el2. */
4382 SR_CORE ("midr_el1", CPENC (3,0,C0,C0,0), F_REG_READ),
4383 SR_CORE ("ctr_el0", CPENC (3,3,C0,C0,1), F_REG_READ),
4384 SR_CORE ("mpidr_el1", CPENC (3,0,C0,C0,5), F_REG_READ),
4385 SR_CORE ("revidr_el1", CPENC (3,0,C0,C0,6), F_REG_READ),
4386 SR_CORE ("aidr_el1", CPENC (3,1,C0,C0,7), F_REG_READ),
4387 SR_CORE ("dczid_el0", CPENC (3,3,C0,C0,7), F_REG_READ),
4388 SR_CORE ("id_dfr0_el1", CPENC (3,0,C0,C1,2), F_REG_READ),
4389 SR_CORE ("id_dfr1_el1", CPENC (3,0,C0,C3,5), F_REG_READ),
4390 SR_CORE ("id_pfr0_el1", CPENC (3,0,C0,C1,0), F_REG_READ),
4391 SR_CORE ("id_pfr1_el1", CPENC (3,0,C0,C1,1), F_REG_READ),
4392 SR_ID_PFR2 ("id_pfr2_el1", CPENC (3,0,C0,C3,4), F_REG_READ),
4393 SR_CORE ("id_afr0_el1", CPENC (3,0,C0,C1,3), F_REG_READ),
4394 SR_CORE ("id_mmfr0_el1", CPENC (3,0,C0,C1,4), F_REG_READ),
4395 SR_CORE ("id_mmfr1_el1", CPENC (3,0,C0,C1,5), F_REG_READ),
4396 SR_CORE ("id_mmfr2_el1", CPENC (3,0,C0,C1,6), F_REG_READ),
4397 SR_CORE ("id_mmfr3_el1", CPENC (3,0,C0,C1,7), F_REG_READ),
4398 SR_CORE ("id_mmfr4_el1", CPENC (3,0,C0,C2,6), F_REG_READ),
4399 SR_CORE ("id_mmfr5_el1", CPENC (3,0,C0,C3,6), F_REG_READ),
4400 SR_CORE ("id_isar0_el1", CPENC (3,0,C0,C2,0), F_REG_READ),
4401 SR_CORE ("id_isar1_el1", CPENC (3,0,C0,C2,1), F_REG_READ),
4402 SR_CORE ("id_isar2_el1", CPENC (3,0,C0,C2,2), F_REG_READ),
4403 SR_CORE ("id_isar3_el1", CPENC (3,0,C0,C2,3), F_REG_READ),
4404 SR_CORE ("id_isar4_el1", CPENC (3,0,C0,C2,4), F_REG_READ),
4405 SR_CORE ("id_isar5_el1", CPENC (3,0,C0,C2,5), F_REG_READ),
4406 SR_CORE ("id_isar6_el1", CPENC (3,0,C0,C2,7), F_REG_READ),
4407 SR_CORE ("mvfr0_el1", CPENC (3,0,C0,C3,0), F_REG_READ),
4408 SR_CORE ("mvfr1_el1", CPENC (3,0,C0,C3,1), F_REG_READ),
4409 SR_CORE ("mvfr2_el1", CPENC (3,0,C0,C3,2), F_REG_READ),
4410 SR_CORE ("ccsidr_el1", CPENC (3,1,C0,C0,0), F_REG_READ),
4411 SR_V8_3 ("ccsidr2_el1", CPENC (3,1,C0,C0,2), F_REG_READ),
4412 SR_CORE ("id_aa64pfr0_el1", CPENC (3,0,C0,C4,0), F_REG_READ),
4413 SR_CORE ("id_aa64pfr1_el1", CPENC (3,0,C0,C4,1), F_REG_READ),
4414 SR_CORE ("id_aa64dfr0_el1", CPENC (3,0,C0,C5,0), F_REG_READ),
4415 SR_CORE ("id_aa64dfr1_el1", CPENC (3,0,C0,C5,1), F_REG_READ),
4416 SR_CORE ("id_aa64isar0_el1", CPENC (3,0,C0,C6,0), F_REG_READ),
4417 SR_CORE ("id_aa64isar1_el1", CPENC (3,0,C0,C6,1), F_REG_READ),
4418 SR_CORE ("id_aa64isar2_el1", CPENC (3,0,C0,C6,2), F_REG_READ),
4419 SR_CORE ("id_aa64mmfr0_el1", CPENC (3,0,C0,C7,0), F_REG_READ),
4420 SR_CORE ("id_aa64mmfr1_el1", CPENC (3,0,C0,C7,1), F_REG_READ),
4421 SR_CORE ("id_aa64mmfr2_el1", CPENC (3,0,C0,C7,2), F_REG_READ),
4422 SR_CORE ("id_aa64afr0_el1", CPENC (3,0,C0,C5,4), F_REG_READ),
4423 SR_CORE ("id_aa64afr1_el1", CPENC (3,0,C0,C5,5), F_REG_READ),
4424 SR_SVE ("id_aa64zfr0_el1", CPENC (3,0,C0,C4,4), F_REG_READ),
4425 SR_CORE ("clidr_el1", CPENC (3,1,C0,C0,1), F_REG_READ),
4426 SR_CORE ("csselr_el1", CPENC (3,2,C0,C0,0), 0),
4427 SR_CORE ("vpidr_el2", CPENC (3,4,C0,C0,0), 0),
4428 SR_CORE ("vmpidr_el2", CPENC (3,4,C0,C0,5), 0),
4429 SR_CORE ("sctlr_el1", CPENC (3,0,C1,C0,0), 0),
4430 SR_CORE ("sctlr_el2", CPENC (3,4,C1,C0,0), 0),
4431 SR_CORE ("sctlr_el3", CPENC (3,6,C1,C0,0), 0),
4432 SR_V8_1 ("sctlr_el12", CPENC (3,5,C1,C0,0), 0),
4433 SR_CORE ("actlr_el1", CPENC (3,0,C1,C0,1), 0),
4434 SR_CORE ("actlr_el2", CPENC (3,4,C1,C0,1), 0),
4435 SR_CORE ("actlr_el3", CPENC (3,6,C1,C0,1), 0),
4436 SR_CORE ("cpacr_el1", CPENC (3,0,C1,C0,2), 0),
4437 SR_V8_1 ("cpacr_el12", CPENC (3,5,C1,C0,2), 0),
4438 SR_CORE ("cptr_el2", CPENC (3,4,C1,C1,2), 0),
4439 SR_CORE ("cptr_el3", CPENC (3,6,C1,C1,2), 0),
4440 SR_CORE ("scr_el3", CPENC (3,6,C1,C1,0), 0),
4441 SR_CORE ("hcr_el2", CPENC (3,4,C1,C1,0), 0),
4442 SR_CORE ("mdcr_el2", CPENC (3,4,C1,C1,1), 0),
4443 SR_CORE ("mdcr_el3", CPENC (3,6,C1,C3,1), 0),
4444 SR_CORE ("hstr_el2", CPENC (3,4,C1,C1,3), 0),
4445 SR_CORE ("hacr_el2", CPENC (3,4,C1,C1,7), 0),
4446 SR_SVE ("zcr_el1", CPENC (3,0,C1,C2,0), 0),
4447 SR_SVE ("zcr_el12", CPENC (3,5,C1,C2,0), 0),
4448 SR_SVE ("zcr_el2", CPENC (3,4,C1,C2,0), 0),
4449 SR_SVE ("zcr_el3", CPENC (3,6,C1,C2,0), 0),
4450 SR_CORE ("ttbr0_el1", CPENC (3,0,C2,C0,0), 0),
4451 SR_CORE ("ttbr1_el1", CPENC (3,0,C2,C0,1), 0),
4452 SR_V8_A ("ttbr0_el2", CPENC (3,4,C2,C0,0), 0),
4453 SR_V8_1_A ("ttbr1_el2", CPENC (3,4,C2,C0,1), 0),
4454 SR_CORE ("ttbr0_el3", CPENC (3,6,C2,C0,0), 0),
4455 SR_V8_1 ("ttbr0_el12", CPENC (3,5,C2,C0,0), 0),
4456 SR_V8_1 ("ttbr1_el12", CPENC (3,5,C2,C0,1), 0),
4457 SR_V8_A ("vttbr_el2", CPENC (3,4,C2,C1,0), 0),
4458 SR_CORE ("tcr_el1", CPENC (3,0,C2,C0,2), 0),
4459 SR_CORE ("tcr_el2", CPENC (3,4,C2,C0,2), 0),
4460 SR_CORE ("tcr_el3", CPENC (3,6,C2,C0,2), 0),
4461 SR_V8_1 ("tcr_el12", CPENC (3,5,C2,C0,2), 0),
4462 SR_CORE ("vtcr_el2", CPENC (3,4,C2,C1,2), 0),
4463 SR_V8_3 ("apiakeylo_el1", CPENC (3,0,C2,C1,0), 0),
4464 SR_V8_3 ("apiakeyhi_el1", CPENC (3,0,C2,C1,1), 0),
4465 SR_V8_3 ("apibkeylo_el1", CPENC (3,0,C2,C1,2), 0),
4466 SR_V8_3 ("apibkeyhi_el1", CPENC (3,0,C2,C1,3), 0),
4467 SR_V8_3 ("apdakeylo_el1", CPENC (3,0,C2,C2,0), 0),
4468 SR_V8_3 ("apdakeyhi_el1", CPENC (3,0,C2,C2,1), 0),
4469 SR_V8_3 ("apdbkeylo_el1", CPENC (3,0,C2,C2,2), 0),
4470 SR_V8_3 ("apdbkeyhi_el1", CPENC (3,0,C2,C2,3), 0),
4471 SR_V8_3 ("apgakeylo_el1", CPENC (3,0,C2,C3,0), 0),
4472 SR_V8_3 ("apgakeyhi_el1", CPENC (3,0,C2,C3,1), 0),
4473 SR_CORE ("afsr0_el1", CPENC (3,0,C5,C1,0), 0),
4474 SR_CORE ("afsr1_el1", CPENC (3,0,C5,C1,1), 0),
4475 SR_CORE ("afsr0_el2", CPENC (3,4,C5,C1,0), 0),
4476 SR_CORE ("afsr1_el2", CPENC (3,4,C5,C1,1), 0),
4477 SR_CORE ("afsr0_el3", CPENC (3,6,C5,C1,0), 0),
4478 SR_V8_1 ("afsr0_el12", CPENC (3,5,C5,C1,0), 0),
4479 SR_CORE ("afsr1_el3", CPENC (3,6,C5,C1,1), 0),
4480 SR_V8_1 ("afsr1_el12", CPENC (3,5,C5,C1,1), 0),
4481 SR_CORE ("esr_el1", CPENC (3,0,C5,C2,0), 0),
4482 SR_CORE ("esr_el2", CPENC (3,4,C5,C2,0), 0),
4483 SR_CORE ("esr_el3", CPENC (3,6,C5,C2,0), 0),
4484 SR_V8_1 ("esr_el12", CPENC (3,5,C5,C2,0), 0),
4485 SR_RAS ("vsesr_el2", CPENC (3,4,C5,C2,3), 0),
4486 SR_CORE ("fpexc32_el2", CPENC (3,4,C5,C3,0), 0),
4487 SR_RAS ("erridr_el1", CPENC (3,0,C5,C3,0), F_REG_READ),
4488 SR_RAS ("errselr_el1", CPENC (3,0,C5,C3,1), 0),
4489 SR_RAS ("erxfr_el1", CPENC (3,0,C5,C4,0), F_REG_READ),
4490 SR_RAS ("erxctlr_el1", CPENC (3,0,C5,C4,1), 0),
4491 SR_RAS ("erxstatus_el1", CPENC (3,0,C5,C4,2), 0),
4492 SR_RAS ("erxaddr_el1", CPENC (3,0,C5,C4,3), 0),
4493 SR_RAS ("erxmisc0_el1", CPENC (3,0,C5,C5,0), 0),
4494 SR_RAS ("erxmisc1_el1", CPENC (3,0,C5,C5,1), 0),
4495 SR_RAS ("erxmisc2_el1", CPENC (3,0,C5,C5,2), 0),
4496 SR_RAS ("erxmisc3_el1", CPENC (3,0,C5,C5,3), 0),
4497 SR_RAS ("erxpfgcdn_el1", CPENC (3,0,C5,C4,6), 0),
4498 SR_RAS ("erxpfgctl_el1", CPENC (3,0,C5,C4,5), 0),
4499 SR_RAS ("erxpfgf_el1", CPENC (3,0,C5,C4,4), F_REG_READ),
4500 SR_CORE ("far_el1", CPENC (3,0,C6,C0,0), 0),
4501 SR_CORE ("far_el2", CPENC (3,4,C6,C0,0), 0),
4502 SR_CORE ("far_el3", CPENC (3,6,C6,C0,0), 0),
4503 SR_V8_1 ("far_el12", CPENC (3,5,C6,C0,0), 0),
4504 SR_CORE ("hpfar_el2", CPENC (3,4,C6,C0,4), 0),
4505 SR_CORE ("par_el1", CPENC (3,0,C7,C4,0), 0),
4506 SR_CORE ("mair_el1", CPENC (3,0,C10,C2,0), 0),
4507 SR_CORE ("mair_el2", CPENC (3,4,C10,C2,0), 0),
4508 SR_CORE ("mair_el3", CPENC (3,6,C10,C2,0), 0),
4509 SR_V8_1 ("mair_el12", CPENC (3,5,C10,C2,0), 0),
4510 SR_CORE ("amair_el1", CPENC (3,0,C10,C3,0), 0),
4511 SR_CORE ("amair_el2", CPENC (3,4,C10,C3,0), 0),
4512 SR_CORE ("amair_el3", CPENC (3,6,C10,C3,0), 0),
4513 SR_V8_1 ("amair_el12", CPENC (3,5,C10,C3,0), 0),
4514 SR_CORE ("vbar_el1", CPENC (3,0,C12,C0,0), 0),
4515 SR_CORE ("vbar_el2", CPENC (3,4,C12,C0,0), 0),
4516 SR_CORE ("vbar_el3", CPENC (3,6,C12,C0,0), 0),
4517 SR_V8_1 ("vbar_el12", CPENC (3,5,C12,C0,0), 0),
4518 SR_CORE ("rvbar_el1", CPENC (3,0,C12,C0,1), F_REG_READ),
4519 SR_CORE ("rvbar_el2", CPENC (3,4,C12,C0,1), F_REG_READ),
4520 SR_CORE ("rvbar_el3", CPENC (3,6,C12,C0,1), F_REG_READ),
4521 SR_CORE ("rmr_el1", CPENC (3,0,C12,C0,2), 0),
4522 SR_CORE ("rmr_el2", CPENC (3,4,C12,C0,2), 0),
4523 SR_CORE ("rmr_el3", CPENC (3,6,C12,C0,2), 0),
4524 SR_CORE ("isr_el1", CPENC (3,0,C12,C1,0), F_REG_READ),
4525 SR_RAS ("disr_el1", CPENC (3,0,C12,C1,1), 0),
4526 SR_RAS ("vdisr_el2", CPENC (3,4,C12,C1,1), 0),
4527 SR_CORE ("contextidr_el1", CPENC (3,0,C13,C0,1), 0),
4528 SR_V8_1 ("contextidr_el2", CPENC (3,4,C13,C0,1), 0),
4529 SR_V8_1 ("contextidr_el12", CPENC (3,5,C13,C0,1), 0),
4530 SR_RNG ("rndr", CPENC (3,3,C2,C4,0), F_REG_READ),
4531 SR_RNG ("rndrrs", CPENC (3,3,C2,C4,1), F_REG_READ),
4532 SR_MEMTAG ("tco", CPENC (3,3,C4,C2,7), 0),
4533 SR_MEMTAG ("tfsre0_el1", CPENC (3,0,C5,C6,1), 0),
4534 SR_MEMTAG ("tfsr_el1", CPENC (3,0,C5,C6,0), 0),
4535 SR_MEMTAG ("tfsr_el2", CPENC (3,4,C5,C6,0), 0),
4536 SR_MEMTAG ("tfsr_el3", CPENC (3,6,C5,C6,0), 0),
4537 SR_MEMTAG ("tfsr_el12", CPENC (3,5,C5,C6,0), 0),
4538 SR_MEMTAG ("rgsr_el1", CPENC (3,0,C1,C0,5), 0),
4539 SR_MEMTAG ("gcr_el1", CPENC (3,0,C1,C0,6), 0),
4540 SR_MEMTAG ("gmid_el1", CPENC (3,1,C0,C0,4), F_REG_READ),
4541 SR_CORE ("tpidr_el0", CPENC (3,3,C13,C0,2), 0),
4542 SR_CORE ("tpidrro_el0", CPENC (3,3,C13,C0,3), 0),
4543 SR_CORE ("tpidr_el1", CPENC (3,0,C13,C0,4), 0),
4544 SR_CORE ("tpidr_el2", CPENC (3,4,C13,C0,2), 0),
4545 SR_CORE ("tpidr_el3", CPENC (3,6,C13,C0,2), 0),
4546 SR_SCXTNUM ("scxtnum_el0", CPENC (3,3,C13,C0,7), 0),
4547 SR_SCXTNUM ("scxtnum_el1", CPENC (3,0,C13,C0,7), 0),
4548 SR_SCXTNUM ("scxtnum_el2", CPENC (3,4,C13,C0,7), 0),
4549 SR_SCXTNUM ("scxtnum_el12", CPENC (3,5,C13,C0,7), 0),
4550 SR_SCXTNUM ("scxtnum_el3", CPENC (3,6,C13,C0,7), 0),
4551 SR_CORE ("teecr32_el1", CPENC (2,2,C0, C0,0), 0), /* See section 3.9.7.1. */
4552 SR_CORE ("cntfrq_el0", CPENC (3,3,C14,C0,0), 0),
4553 SR_CORE ("cntpct_el0", CPENC (3,3,C14,C0,1), F_REG_READ),
4554 SR_CORE ("cntvct_el0", CPENC (3,3,C14,C0,2), F_REG_READ),
4555 SR_CORE ("cntvoff_el2", CPENC (3,4,C14,C0,3), 0),
4556 SR_CORE ("cntkctl_el1", CPENC (3,0,C14,C1,0), 0),
4557 SR_V8_1 ("cntkctl_el12", CPENC (3,5,C14,C1,0), 0),
4558 SR_CORE ("cnthctl_el2", CPENC (3,4,C14,C1,0), 0),
4559 SR_CORE ("cntp_tval_el0", CPENC (3,3,C14,C2,0), 0),
4560 SR_V8_1 ("cntp_tval_el02", CPENC (3,5,C14,C2,0), 0),
4561 SR_CORE ("cntp_ctl_el0", CPENC (3,3,C14,C2,1), 0),
4562 SR_V8_1 ("cntp_ctl_el02", CPENC (3,5,C14,C2,1), 0),
4563 SR_CORE ("cntp_cval_el0", CPENC (3,3,C14,C2,2), 0),
4564 SR_V8_1 ("cntp_cval_el02", CPENC (3,5,C14,C2,2), 0),
4565 SR_CORE ("cntv_tval_el0", CPENC (3,3,C14,C3,0), 0),
4566 SR_V8_1 ("cntv_tval_el02", CPENC (3,5,C14,C3,0), 0),
4567 SR_CORE ("cntv_ctl_el0", CPENC (3,3,C14,C3,1), 0),
4568 SR_V8_1 ("cntv_ctl_el02", CPENC (3,5,C14,C3,1), 0),
4569 SR_CORE ("cntv_cval_el0", CPENC (3,3,C14,C3,2), 0),
4570 SR_V8_1 ("cntv_cval_el02", CPENC (3,5,C14,C3,2), 0),
4571 SR_CORE ("cnthp_tval_el2", CPENC (3,4,C14,C2,0), 0),
4572 SR_CORE ("cnthp_ctl_el2", CPENC (3,4,C14,C2,1), 0),
4573 SR_CORE ("cnthp_cval_el2", CPENC (3,4,C14,C2,2), 0),
4574 SR_CORE ("cntps_tval_el1", CPENC (3,7,C14,C2,0), 0),
4575 SR_CORE ("cntps_ctl_el1", CPENC (3,7,C14,C2,1), 0),
4576 SR_CORE ("cntps_cval_el1", CPENC (3,7,C14,C2,2), 0),
4577 SR_V8_1 ("cnthv_tval_el2", CPENC (3,4,C14,C3,0), 0),
4578 SR_V8_1 ("cnthv_ctl_el2", CPENC (3,4,C14,C3,1), 0),
4579 SR_V8_1 ("cnthv_cval_el2", CPENC (3,4,C14,C3,2), 0),
4580 SR_CORE ("dacr32_el2", CPENC (3,4,C3,C0,0), 0),
4581 SR_CORE ("ifsr32_el2", CPENC (3,4,C5,C0,1), 0),
4582 SR_CORE ("teehbr32_el1", CPENC (2,2,C1,C0,0), 0),
4583 SR_CORE ("sder32_el3", CPENC (3,6,C1,C1,1), 0),
4584 SR_CORE ("mdscr_el1", CPENC (2,0,C0,C2,2), 0),
4585 SR_CORE ("mdccsr_el0", CPENC (2,3,C0,C1,0), F_REG_READ),
4586 SR_CORE ("mdccint_el1", CPENC (2,0,C0,C2,0), 0),
4587 SR_CORE ("dbgdtr_el0", CPENC (2,3,C0,C4,0), 0),
4588 SR_CORE ("dbgdtrrx_el0", CPENC (2,3,C0,C5,0), F_REG_READ),
4589 SR_CORE ("dbgdtrtx_el0", CPENC (2,3,C0,C5,0), F_REG_WRITE),
4590 SR_CORE ("osdtrrx_el1", CPENC (2,0,C0,C0,2), 0),
4591 SR_CORE ("osdtrtx_el1", CPENC (2,0,C0,C3,2), 0),
4592 SR_CORE ("oseccr_el1", CPENC (2,0,C0,C6,2), 0),
4593 SR_CORE ("dbgvcr32_el2", CPENC (2,4,C0,C7,0), 0),
4594 SR_CORE ("dbgbvr0_el1", CPENC (2,0,C0,C0,4), 0),
4595 SR_CORE ("dbgbvr1_el1", CPENC (2,0,C0,C1,4), 0),
4596 SR_CORE ("dbgbvr2_el1", CPENC (2,0,C0,C2,4), 0),
4597 SR_CORE ("dbgbvr3_el1", CPENC (2,0,C0,C3,4), 0),
4598 SR_CORE ("dbgbvr4_el1", CPENC (2,0,C0,C4,4), 0),
4599 SR_CORE ("dbgbvr5_el1", CPENC (2,0,C0,C5,4), 0),
4600 SR_CORE ("dbgbvr6_el1", CPENC (2,0,C0,C6,4), 0),
4601 SR_CORE ("dbgbvr7_el1", CPENC (2,0,C0,C7,4), 0),
4602 SR_CORE ("dbgbvr8_el1", CPENC (2,0,C0,C8,4), 0),
4603 SR_CORE ("dbgbvr9_el1", CPENC (2,0,C0,C9,4), 0),
4604 SR_CORE ("dbgbvr10_el1", CPENC (2,0,C0,C10,4), 0),
4605 SR_CORE ("dbgbvr11_el1", CPENC (2,0,C0,C11,4), 0),
4606 SR_CORE ("dbgbvr12_el1", CPENC (2,0,C0,C12,4), 0),
4607 SR_CORE ("dbgbvr13_el1", CPENC (2,0,C0,C13,4), 0),
4608 SR_CORE ("dbgbvr14_el1", CPENC (2,0,C0,C14,4), 0),
4609 SR_CORE ("dbgbvr15_el1", CPENC (2,0,C0,C15,4), 0),
4610 SR_CORE ("dbgbcr0_el1", CPENC (2,0,C0,C0,5), 0),
4611 SR_CORE ("dbgbcr1_el1", CPENC (2,0,C0,C1,5), 0),
4612 SR_CORE ("dbgbcr2_el1", CPENC (2,0,C0,C2,5), 0),
4613 SR_CORE ("dbgbcr3_el1", CPENC (2,0,C0,C3,5), 0),
4614 SR_CORE ("dbgbcr4_el1", CPENC (2,0,C0,C4,5), 0),
4615 SR_CORE ("dbgbcr5_el1", CPENC (2,0,C0,C5,5), 0),
4616 SR_CORE ("dbgbcr6_el1", CPENC (2,0,C0,C6,5), 0),
4617 SR_CORE ("dbgbcr7_el1", CPENC (2,0,C0,C7,5), 0),
4618 SR_CORE ("dbgbcr8_el1", CPENC (2,0,C0,C8,5), 0),
4619 SR_CORE ("dbgbcr9_el1", CPENC (2,0,C0,C9,5), 0),
4620 SR_CORE ("dbgbcr10_el1", CPENC (2,0,C0,C10,5), 0),
4621 SR_CORE ("dbgbcr11_el1", CPENC (2,0,C0,C11,5), 0),
4622 SR_CORE ("dbgbcr12_el1", CPENC (2,0,C0,C12,5), 0),
4623 SR_CORE ("dbgbcr13_el1", CPENC (2,0,C0,C13,5), 0),
4624 SR_CORE ("dbgbcr14_el1", CPENC (2,0,C0,C14,5), 0),
4625 SR_CORE ("dbgbcr15_el1", CPENC (2,0,C0,C15,5), 0),
4626 SR_CORE ("dbgwvr0_el1", CPENC (2,0,C0,C0,6), 0),
4627 SR_CORE ("dbgwvr1_el1", CPENC (2,0,C0,C1,6), 0),
4628 SR_CORE ("dbgwvr2_el1", CPENC (2,0,C0,C2,6), 0),
4629 SR_CORE ("dbgwvr3_el1", CPENC (2,0,C0,C3,6), 0),
4630 SR_CORE ("dbgwvr4_el1", CPENC (2,0,C0,C4,6), 0),
4631 SR_CORE ("dbgwvr5_el1", CPENC (2,0,C0,C5,6), 0),
4632 SR_CORE ("dbgwvr6_el1", CPENC (2,0,C0,C6,6), 0),
4633 SR_CORE ("dbgwvr7_el1", CPENC (2,0,C0,C7,6), 0),
4634 SR_CORE ("dbgwvr8_el1", CPENC (2,0,C0,C8,6), 0),
4635 SR_CORE ("dbgwvr9_el1", CPENC (2,0,C0,C9,6), 0),
4636 SR_CORE ("dbgwvr10_el1", CPENC (2,0,C0,C10,6), 0),
4637 SR_CORE ("dbgwvr11_el1", CPENC (2,0,C0,C11,6), 0),
4638 SR_CORE ("dbgwvr12_el1", CPENC (2,0,C0,C12,6), 0),
4639 SR_CORE ("dbgwvr13_el1", CPENC (2,0,C0,C13,6), 0),
4640 SR_CORE ("dbgwvr14_el1", CPENC (2,0,C0,C14,6), 0),
4641 SR_CORE ("dbgwvr15_el1", CPENC (2,0,C0,C15,6), 0),
4642 SR_CORE ("dbgwcr0_el1", CPENC (2,0,C0,C0,7), 0),
4643 SR_CORE ("dbgwcr1_el1", CPENC (2,0,C0,C1,7), 0),
4644 SR_CORE ("dbgwcr2_el1", CPENC (2,0,C0,C2,7), 0),
4645 SR_CORE ("dbgwcr3_el1", CPENC (2,0,C0,C3,7), 0),
4646 SR_CORE ("dbgwcr4_el1", CPENC (2,0,C0,C4,7), 0),
4647 SR_CORE ("dbgwcr5_el1", CPENC (2,0,C0,C5,7), 0),
4648 SR_CORE ("dbgwcr6_el1", CPENC (2,0,C0,C6,7), 0),
4649 SR_CORE ("dbgwcr7_el1", CPENC (2,0,C0,C7,7), 0),
4650 SR_CORE ("dbgwcr8_el1", CPENC (2,0,C0,C8,7), 0),
4651 SR_CORE ("dbgwcr9_el1", CPENC (2,0,C0,C9,7), 0),
4652 SR_CORE ("dbgwcr10_el1", CPENC (2,0,C0,C10,7), 0),
4653 SR_CORE ("dbgwcr11_el1", CPENC (2,0,C0,C11,7), 0),
4654 SR_CORE ("dbgwcr12_el1", CPENC (2,0,C0,C12,7), 0),
4655 SR_CORE ("dbgwcr13_el1", CPENC (2,0,C0,C13,7), 0),
4656 SR_CORE ("dbgwcr14_el1", CPENC (2,0,C0,C14,7), 0),
4657 SR_CORE ("dbgwcr15_el1", CPENC (2,0,C0,C15,7), 0),
4658 SR_CORE ("mdrar_el1", CPENC (2,0,C1,C0,0), F_REG_READ),
4659 SR_CORE ("oslar_el1", CPENC (2,0,C1,C0,4), F_REG_WRITE),
4660 SR_CORE ("oslsr_el1", CPENC (2,0,C1,C1,4), F_REG_READ),
4661 SR_CORE ("osdlr_el1", CPENC (2,0,C1,C3,4), 0),
4662 SR_CORE ("dbgprcr_el1", CPENC (2,0,C1,C4,4), 0),
4663 SR_CORE ("dbgclaimset_el1", CPENC (2,0,C7,C8,6), 0),
4664 SR_CORE ("dbgclaimclr_el1", CPENC (2,0,C7,C9,6), 0),
4665 SR_CORE ("dbgauthstatus_el1", CPENC (2,0,C7,C14,6), F_REG_READ),
4666 SR_PROFILE ("pmblimitr_el1", CPENC (3,0,C9,C10,0), 0),
4667 SR_PROFILE ("pmbptr_el1", CPENC (3,0,C9,C10,1), 0),
4668 SR_PROFILE ("pmbsr_el1", CPENC (3,0,C9,C10,3), 0),
4669 SR_PROFILE ("pmbidr_el1", CPENC (3,0,C9,C10,7), F_REG_READ),
4670 SR_PROFILE ("pmscr_el1", CPENC (3,0,C9,C9,0), 0),
4671 SR_PROFILE ("pmsicr_el1", CPENC (3,0,C9,C9,2), 0),
4672 SR_PROFILE ("pmsirr_el1", CPENC (3,0,C9,C9,3), 0),
4673 SR_PROFILE ("pmsfcr_el1", CPENC (3,0,C9,C9,4), 0),
4674 SR_PROFILE ("pmsevfr_el1", CPENC (3,0,C9,C9,5), 0),
4675 SR_PROFILE ("pmslatfr_el1", CPENC (3,0,C9,C9,6), 0),
4676 SR_PROFILE ("pmsidr_el1", CPENC (3,0,C9,C9,7), F_REG_READ),
4677 SR_PROFILE ("pmscr_el2", CPENC (3,4,C9,C9,0), 0),
4678 SR_PROFILE ("pmscr_el12", CPENC (3,5,C9,C9,0), 0),
4679 SR_CORE ("pmcr_el0", CPENC (3,3,C9,C12,0), 0),
4680 SR_CORE ("pmcntenset_el0", CPENC (3,3,C9,C12,1), 0),
4681 SR_CORE ("pmcntenclr_el0", CPENC (3,3,C9,C12,2), 0),
4682 SR_CORE ("pmovsclr_el0", CPENC (3,3,C9,C12,3), 0),
4683 SR_CORE ("pmswinc_el0", CPENC (3,3,C9,C12,4), F_REG_WRITE),
4684 SR_CORE ("pmselr_el0", CPENC (3,3,C9,C12,5), 0),
4685 SR_CORE ("pmceid0_el0", CPENC (3,3,C9,C12,6), F_REG_READ),
4686 SR_CORE ("pmceid1_el0", CPENC (3,3,C9,C12,7), F_REG_READ),
4687 SR_CORE ("pmccntr_el0", CPENC (3,3,C9,C13,0), 0),
4688 SR_CORE ("pmxevtyper_el0", CPENC (3,3,C9,C13,1), 0),
4689 SR_CORE ("pmxevcntr_el0", CPENC (3,3,C9,C13,2), 0),
4690 SR_CORE ("pmuserenr_el0", CPENC (3,3,C9,C14,0), 0),
4691 SR_CORE ("pmintenset_el1", CPENC (3,0,C9,C14,1), 0),
4692 SR_CORE ("pmintenclr_el1", CPENC (3,0,C9,C14,2), 0),
4693 SR_CORE ("pmovsset_el0", CPENC (3,3,C9,C14,3), 0),
4694 SR_CORE ("pmevcntr0_el0", CPENC (3,3,C14,C8,0), 0),
4695 SR_CORE ("pmevcntr1_el0", CPENC (3,3,C14,C8,1), 0),
4696 SR_CORE ("pmevcntr2_el0", CPENC (3,3,C14,C8,2), 0),
4697 SR_CORE ("pmevcntr3_el0", CPENC (3,3,C14,C8,3), 0),
4698 SR_CORE ("pmevcntr4_el0", CPENC (3,3,C14,C8,4), 0),
4699 SR_CORE ("pmevcntr5_el0", CPENC (3,3,C14,C8,5), 0),
4700 SR_CORE ("pmevcntr6_el0", CPENC (3,3,C14,C8,6), 0),
4701 SR_CORE ("pmevcntr7_el0", CPENC (3,3,C14,C8,7), 0),
4702 SR_CORE ("pmevcntr8_el0", CPENC (3,3,C14,C9,0), 0),
4703 SR_CORE ("pmevcntr9_el0", CPENC (3,3,C14,C9,1), 0),
4704 SR_CORE ("pmevcntr10_el0", CPENC (3,3,C14,C9,2), 0),
4705 SR_CORE ("pmevcntr11_el0", CPENC (3,3,C14,C9,3), 0),
4706 SR_CORE ("pmevcntr12_el0", CPENC (3,3,C14,C9,4), 0),
4707 SR_CORE ("pmevcntr13_el0", CPENC (3,3,C14,C9,5), 0),
4708 SR_CORE ("pmevcntr14_el0", CPENC (3,3,C14,C9,6), 0),
4709 SR_CORE ("pmevcntr15_el0", CPENC (3,3,C14,C9,7), 0),
4710 SR_CORE ("pmevcntr16_el0", CPENC (3,3,C14,C10,0), 0),
4711 SR_CORE ("pmevcntr17_el0", CPENC (3,3,C14,C10,1), 0),
4712 SR_CORE ("pmevcntr18_el0", CPENC (3,3,C14,C10,2), 0),
4713 SR_CORE ("pmevcntr19_el0", CPENC (3,3,C14,C10,3), 0),
4714 SR_CORE ("pmevcntr20_el0", CPENC (3,3,C14,C10,4), 0),
4715 SR_CORE ("pmevcntr21_el0", CPENC (3,3,C14,C10,5), 0),
4716 SR_CORE ("pmevcntr22_el0", CPENC (3,3,C14,C10,6), 0),
4717 SR_CORE ("pmevcntr23_el0", CPENC (3,3,C14,C10,7), 0),
4718 SR_CORE ("pmevcntr24_el0", CPENC (3,3,C14,C11,0), 0),
4719 SR_CORE ("pmevcntr25_el0", CPENC (3,3,C14,C11,1), 0),
4720 SR_CORE ("pmevcntr26_el0", CPENC (3,3,C14,C11,2), 0),
4721 SR_CORE ("pmevcntr27_el0", CPENC (3,3,C14,C11,3), 0),
4722 SR_CORE ("pmevcntr28_el0", CPENC (3,3,C14,C11,4), 0),
4723 SR_CORE ("pmevcntr29_el0", CPENC (3,3,C14,C11,5), 0),
4724 SR_CORE ("pmevcntr30_el0", CPENC (3,3,C14,C11,6), 0),
4725 SR_CORE ("pmevtyper0_el0", CPENC (3,3,C14,C12,0), 0),
4726 SR_CORE ("pmevtyper1_el0", CPENC (3,3,C14,C12,1), 0),
4727 SR_CORE ("pmevtyper2_el0", CPENC (3,3,C14,C12,2), 0),
4728 SR_CORE ("pmevtyper3_el0", CPENC (3,3,C14,C12,3), 0),
4729 SR_CORE ("pmevtyper4_el0", CPENC (3,3,C14,C12,4), 0),
4730 SR_CORE ("pmevtyper5_el0", CPENC (3,3,C14,C12,5), 0),
4731 SR_CORE ("pmevtyper6_el0", CPENC (3,3,C14,C12,6), 0),
4732 SR_CORE ("pmevtyper7_el0", CPENC (3,3,C14,C12,7), 0),
4733 SR_CORE ("pmevtyper8_el0", CPENC (3,3,C14,C13,0), 0),
4734 SR_CORE ("pmevtyper9_el0", CPENC (3,3,C14,C13,1), 0),
4735 SR_CORE ("pmevtyper10_el0", CPENC (3,3,C14,C13,2), 0),
4736 SR_CORE ("pmevtyper11_el0", CPENC (3,3,C14,C13,3), 0),
4737 SR_CORE ("pmevtyper12_el0", CPENC (3,3,C14,C13,4), 0),
4738 SR_CORE ("pmevtyper13_el0", CPENC (3,3,C14,C13,5), 0),
4739 SR_CORE ("pmevtyper14_el0", CPENC (3,3,C14,C13,6), 0),
4740 SR_CORE ("pmevtyper15_el0", CPENC (3,3,C14,C13,7), 0),
4741 SR_CORE ("pmevtyper16_el0", CPENC (3,3,C14,C14,0), 0),
4742 SR_CORE ("pmevtyper17_el0", CPENC (3,3,C14,C14,1), 0),
4743 SR_CORE ("pmevtyper18_el0", CPENC (3,3,C14,C14,2), 0),
4744 SR_CORE ("pmevtyper19_el0", CPENC (3,3,C14,C14,3), 0),
4745 SR_CORE ("pmevtyper20_el0", CPENC (3,3,C14,C14,4), 0),
4746 SR_CORE ("pmevtyper21_el0", CPENC (3,3,C14,C14,5), 0),
4747 SR_CORE ("pmevtyper22_el0", CPENC (3,3,C14,C14,6), 0),
4748 SR_CORE ("pmevtyper23_el0", CPENC (3,3,C14,C14,7), 0),
4749 SR_CORE ("pmevtyper24_el0", CPENC (3,3,C14,C15,0), 0),
4750 SR_CORE ("pmevtyper25_el0", CPENC (3,3,C14,C15,1), 0),
4751 SR_CORE ("pmevtyper26_el0", CPENC (3,3,C14,C15,2), 0),
4752 SR_CORE ("pmevtyper27_el0", CPENC (3,3,C14,C15,3), 0),
4753 SR_CORE ("pmevtyper28_el0", CPENC (3,3,C14,C15,4), 0),
4754 SR_CORE ("pmevtyper29_el0", CPENC (3,3,C14,C15,5), 0),
4755 SR_CORE ("pmevtyper30_el0", CPENC (3,3,C14,C15,6), 0),
4756 SR_CORE ("pmccfiltr_el0", CPENC (3,3,C14,C15,7), 0),
4757
4758 SR_V8_4 ("dit", CPEN_ (3,C2,5), 0),
4759 SR_V8_4 ("trfcr_el1", CPENC (3,0,C1,C2,1), 0),
4760 SR_V8_4 ("pmmir_el1", CPENC (3,0,C9,C14,6), F_REG_READ),
4761 SR_V8_4 ("trfcr_el2", CPENC (3,4,C1,C2,1), 0),
4762 SR_V8_4 ("vstcr_el2", CPENC (3,4,C2,C6,2), 0),
4763 SR_V8_4_A ("vsttbr_el2", CPENC (3,4,C2,C6,0), 0),
4764 SR_V8_4 ("cnthvs_tval_el2", CPENC (3,4,C14,C4,0), 0),
4765 SR_V8_4 ("cnthvs_cval_el2", CPENC (3,4,C14,C4,2), 0),
4766 SR_V8_4 ("cnthvs_ctl_el2", CPENC (3,4,C14,C4,1), 0),
4767 SR_V8_4 ("cnthps_tval_el2", CPENC (3,4,C14,C5,0), 0),
4768 SR_V8_4 ("cnthps_cval_el2", CPENC (3,4,C14,C5,2), 0),
4769 SR_V8_4 ("cnthps_ctl_el2", CPENC (3,4,C14,C5,1), 0),
4770 SR_V8_4 ("sder32_el2", CPENC (3,4,C1,C3,1), 0),
4771 SR_V8_4 ("vncr_el2", CPENC (3,4,C2,C2,0), 0),
4772 SR_V8_4 ("trfcr_el12", CPENC (3,5,C1,C2,1), 0),
4773
4774 SR_CORE ("mpam0_el1", CPENC (3,0,C10,C5,1), 0),
4775 SR_CORE ("mpam1_el1", CPENC (3,0,C10,C5,0), 0),
4776 SR_CORE ("mpam1_el12", CPENC (3,5,C10,C5,0), 0),
4777 SR_CORE ("mpam2_el2", CPENC (3,4,C10,C5,0), 0),
4778 SR_CORE ("mpam3_el3", CPENC (3,6,C10,C5,0), 0),
4779 SR_CORE ("mpamhcr_el2", CPENC (3,4,C10,C4,0), 0),
4780 SR_CORE ("mpamidr_el1", CPENC (3,0,C10,C4,4), F_REG_READ),
4781 SR_CORE ("mpamvpm0_el2", CPENC (3,4,C10,C6,0), 0),
4782 SR_CORE ("mpamvpm1_el2", CPENC (3,4,C10,C6,1), 0),
4783 SR_CORE ("mpamvpm2_el2", CPENC (3,4,C10,C6,2), 0),
4784 SR_CORE ("mpamvpm3_el2", CPENC (3,4,C10,C6,3), 0),
4785 SR_CORE ("mpamvpm4_el2", CPENC (3,4,C10,C6,4), 0),
4786 SR_CORE ("mpamvpm5_el2", CPENC (3,4,C10,C6,5), 0),
4787 SR_CORE ("mpamvpm6_el2", CPENC (3,4,C10,C6,6), 0),
4788 SR_CORE ("mpamvpm7_el2", CPENC (3,4,C10,C6,7), 0),
4789 SR_CORE ("mpamvpmv_el2", CPENC (3,4,C10,C4,1), 0),
4790
4791 SR_V8_R ("mpuir_el1", CPENC (3,0,C0,C0,4), F_REG_READ),
4792 SR_V8_R ("mpuir_el2", CPENC (3,4,C0,C0,4), F_REG_READ),
4793 SR_V8_R ("prbar_el1", CPENC (3,0,C6,C8,0), 0),
4794 SR_V8_R ("prbar_el2", CPENC (3,4,C6,C8,0), 0),
4795
4796 #define ENC_BARLAR(x,n,lar) \
4797 CPENC (3, (x-1) << 2, C6, 8 | (n >> 1), ((n & 1) << 2) | lar)
4798
4799 #define PRBARn_ELx(x,n) SR_V8_R ("prbar" #n "_el" #x, ENC_BARLAR (x,n,0), 0)
4800 #define PRLARn_ELx(x,n) SR_V8_R ("prlar" #n "_el" #x, ENC_BARLAR (x,n,1), 0)
4801
4802 SR_EXPAND_EL12 (PRBARn_ELx)
4803 SR_V8_R ("prenr_el1", CPENC (3,0,C6,C1,1), 0),
4804 SR_V8_R ("prenr_el2", CPENC (3,4,C6,C1,1), 0),
4805 SR_V8_R ("prlar_el1", CPENC (3,0,C6,C8,1), 0),
4806 SR_V8_R ("prlar_el2", CPENC (3,4,C6,C8,1), 0),
4807 SR_EXPAND_EL12 (PRLARn_ELx)
4808 SR_V8_R ("prselr_el1", CPENC (3,0,C6,C2,1), 0),
4809 SR_V8_R ("prselr_el2", CPENC (3,4,C6,C2,1), 0),
4810 SR_V8_R ("vsctlr_el2", CPENC (3,4,C2,C0,0), 0),
4811
4812 SR_CORE("trbbaser_el1", CPENC (3,0,C9,C11,2), 0),
4813 SR_CORE("trbidr_el1", CPENC (3,0,C9,C11,7), F_REG_READ),
4814 SR_CORE("trblimitr_el1", CPENC (3,0,C9,C11,0), 0),
4815 SR_CORE("trbmar_el1", CPENC (3,0,C9,C11,4), 0),
4816 SR_CORE("trbptr_el1", CPENC (3,0,C9,C11,1), 0),
4817 SR_CORE("trbsr_el1", CPENC (3,0,C9,C11,3), 0),
4818 SR_CORE("trbtrg_el1", CPENC (3,0,C9,C11,6), 0),
4819
4820 SR_CORE ("trcauthstatus", CPENC (2,1,C7,C14,6), F_REG_READ),
4821 SR_CORE ("trccidr0", CPENC (2,1,C7,C12,7), F_REG_READ),
4822 SR_CORE ("trccidr1", CPENC (2,1,C7,C13,7), F_REG_READ),
4823 SR_CORE ("trccidr2", CPENC (2,1,C7,C14,7), F_REG_READ),
4824 SR_CORE ("trccidr3", CPENC (2,1,C7,C15,7), F_REG_READ),
4825 SR_CORE ("trcdevaff0", CPENC (2,1,C7,C10,6), F_REG_READ),
4826 SR_CORE ("trcdevaff1", CPENC (2,1,C7,C11,6), F_REG_READ),
4827 SR_CORE ("trcdevarch", CPENC (2,1,C7,C15,6), F_REG_READ),
4828 SR_CORE ("trcdevid", CPENC (2,1,C7,C2,7), F_REG_READ),
4829 SR_CORE ("trcdevtype", CPENC (2,1,C7,C3,7), F_REG_READ),
4830 SR_CORE ("trcidr0", CPENC (2,1,C0,C8,7), F_REG_READ),
4831 SR_CORE ("trcidr1", CPENC (2,1,C0,C9,7), F_REG_READ),
4832 SR_CORE ("trcidr2", CPENC (2,1,C0,C10,7), F_REG_READ),
4833 SR_CORE ("trcidr3", CPENC (2,1,C0,C11,7), F_REG_READ),
4834 SR_CORE ("trcidr4", CPENC (2,1,C0,C12,7), F_REG_READ),
4835 SR_CORE ("trcidr5", CPENC (2,1,C0,C13,7), F_REG_READ),
4836 SR_CORE ("trcidr6", CPENC (2,1,C0,C14,7), F_REG_READ),
4837 SR_CORE ("trcidr7", CPENC (2,1,C0,C15,7), F_REG_READ),
4838 SR_CORE ("trcidr8", CPENC (2,1,C0,C0,6), F_REG_READ),
4839 SR_CORE ("trcidr9", CPENC (2,1,C0,C1,6), F_REG_READ),
4840 SR_CORE ("trcidr10", CPENC (2,1,C0,C2,6), F_REG_READ),
4841 SR_CORE ("trcidr11", CPENC (2,1,C0,C3,6), F_REG_READ),
4842 SR_CORE ("trcidr12", CPENC (2,1,C0,C4,6), F_REG_READ),
4843 SR_CORE ("trcidr13", CPENC (2,1,C0,C5,6), F_REG_READ),
4844 SR_CORE ("trclsr", CPENC (2,1,C7,C13,6), F_REG_READ),
4845 SR_CORE ("trcoslsr", CPENC (2,1,C1,C1,4), F_REG_READ),
4846 SR_CORE ("trcpdsr", CPENC (2,1,C1,C5,4), F_REG_READ),
4847 SR_CORE ("trcpidr0", CPENC (2,1,C7,C8,7), F_REG_READ),
4848 SR_CORE ("trcpidr1", CPENC (2,1,C7,C9,7), F_REG_READ),
4849 SR_CORE ("trcpidr2", CPENC (2,1,C7,C10,7), F_REG_READ),
4850 SR_CORE ("trcpidr3", CPENC (2,1,C7,C11,7), F_REG_READ),
4851 SR_CORE ("trcpidr4", CPENC (2,1,C7,C4,7), F_REG_READ),
4852 SR_CORE ("trcpidr5", CPENC (2,1,C7,C5,7), F_REG_READ),
4853 SR_CORE ("trcpidr6", CPENC (2,1,C7,C6,7), F_REG_READ),
4854 SR_CORE ("trcpidr7", CPENC (2,1,C7,C7,7), F_REG_READ),
4855 SR_CORE ("trcstatr", CPENC (2,1,C0,C3,0), F_REG_READ),
4856 SR_CORE ("trcacatr0", CPENC (2,1,C2,C0,2), 0),
4857 SR_CORE ("trcacatr1", CPENC (2,1,C2,C2,2), 0),
4858 SR_CORE ("trcacatr2", CPENC (2,1,C2,C4,2), 0),
4859 SR_CORE ("trcacatr3", CPENC (2,1,C2,C6,2), 0),
4860 SR_CORE ("trcacatr4", CPENC (2,1,C2,C8,2), 0),
4861 SR_CORE ("trcacatr5", CPENC (2,1,C2,C10,2), 0),
4862 SR_CORE ("trcacatr6", CPENC (2,1,C2,C12,2), 0),
4863 SR_CORE ("trcacatr7", CPENC (2,1,C2,C14,2), 0),
4864 SR_CORE ("trcacatr8", CPENC (2,1,C2,C0,3), 0),
4865 SR_CORE ("trcacatr9", CPENC (2,1,C2,C2,3), 0),
4866 SR_CORE ("trcacatr10", CPENC (2,1,C2,C4,3), 0),
4867 SR_CORE ("trcacatr11", CPENC (2,1,C2,C6,3), 0),
4868 SR_CORE ("trcacatr12", CPENC (2,1,C2,C8,3), 0),
4869 SR_CORE ("trcacatr13", CPENC (2,1,C2,C10,3), 0),
4870 SR_CORE ("trcacatr14", CPENC (2,1,C2,C12,3), 0),
4871 SR_CORE ("trcacatr15", CPENC (2,1,C2,C14,3), 0),
4872 SR_CORE ("trcacvr0", CPENC (2,1,C2,C0,0), 0),
4873 SR_CORE ("trcacvr1", CPENC (2,1,C2,C2,0), 0),
4874 SR_CORE ("trcacvr2", CPENC (2,1,C2,C4,0), 0),
4875 SR_CORE ("trcacvr3", CPENC (2,1,C2,C6,0), 0),
4876 SR_CORE ("trcacvr4", CPENC (2,1,C2,C8,0), 0),
4877 SR_CORE ("trcacvr5", CPENC (2,1,C2,C10,0), 0),
4878 SR_CORE ("trcacvr6", CPENC (2,1,C2,C12,0), 0),
4879 SR_CORE ("trcacvr7", CPENC (2,1,C2,C14,0), 0),
4880 SR_CORE ("trcacvr8", CPENC (2,1,C2,C0,1), 0),
4881 SR_CORE ("trcacvr9", CPENC (2,1,C2,C2,1), 0),
4882 SR_CORE ("trcacvr10", CPENC (2,1,C2,C4,1), 0),
4883 SR_CORE ("trcacvr11", CPENC (2,1,C2,C6,1), 0),
4884 SR_CORE ("trcacvr12", CPENC (2,1,C2,C8,1), 0),
4885 SR_CORE ("trcacvr13", CPENC (2,1,C2,C10,1), 0),
4886 SR_CORE ("trcacvr14", CPENC (2,1,C2,C12,1), 0),
4887 SR_CORE ("trcacvr15", CPENC (2,1,C2,C14,1), 0),
4888 SR_CORE ("trcauxctlr", CPENC (2,1,C0,C6,0), 0),
4889 SR_CORE ("trcbbctlr", CPENC (2,1,C0,C15,0), 0),
4890 SR_CORE ("trcccctlr", CPENC (2,1,C0,C14,0), 0),
4891 SR_CORE ("trccidcctlr0", CPENC (2,1,C3,C0,2), 0),
4892 SR_CORE ("trccidcctlr1", CPENC (2,1,C3,C1,2), 0),
4893 SR_CORE ("trccidcvr0", CPENC (2,1,C3,C0,0), 0),
4894 SR_CORE ("trccidcvr1", CPENC (2,1,C3,C2,0), 0),
4895 SR_CORE ("trccidcvr2", CPENC (2,1,C3,C4,0), 0),
4896 SR_CORE ("trccidcvr3", CPENC (2,1,C3,C6,0), 0),
4897 SR_CORE ("trccidcvr4", CPENC (2,1,C3,C8,0), 0),
4898 SR_CORE ("trccidcvr5", CPENC (2,1,C3,C10,0), 0),
4899 SR_CORE ("trccidcvr6", CPENC (2,1,C3,C12,0), 0),
4900 SR_CORE ("trccidcvr7", CPENC (2,1,C3,C14,0), 0),
4901 SR_CORE ("trcclaimclr", CPENC (2,1,C7,C9,6), 0),
4902 SR_CORE ("trcclaimset", CPENC (2,1,C7,C8,6), 0),
4903 SR_CORE ("trccntctlr0", CPENC (2,1,C0,C4,5), 0),
4904 SR_CORE ("trccntctlr1", CPENC (2,1,C0,C5,5), 0),
4905 SR_CORE ("trccntctlr2", CPENC (2,1,C0,C6,5), 0),
4906 SR_CORE ("trccntctlr3", CPENC (2,1,C0,C7,5), 0),
4907 SR_CORE ("trccntrldvr0", CPENC (2,1,C0,C0,5), 0),
4908 SR_CORE ("trccntrldvr1", CPENC (2,1,C0,C1,5), 0),
4909 SR_CORE ("trccntrldvr2", CPENC (2,1,C0,C2,5), 0),
4910 SR_CORE ("trccntrldvr3", CPENC (2,1,C0,C3,5), 0),
4911 SR_CORE ("trccntvr0", CPENC (2,1,C0,C8,5), 0),
4912 SR_CORE ("trccntvr1", CPENC (2,1,C0,C9,5), 0),
4913 SR_CORE ("trccntvr2", CPENC (2,1,C0,C10,5), 0),
4914 SR_CORE ("trccntvr3", CPENC (2,1,C0,C11,5), 0),
4915 SR_CORE ("trcconfigr", CPENC (2,1,C0,C4,0), 0),
4916 SR_CORE ("trcdvcmr0", CPENC (2,1,C2,C0,6), 0),
4917 SR_CORE ("trcdvcmr1", CPENC (2,1,C2,C4,6), 0),
4918 SR_CORE ("trcdvcmr2", CPENC (2,1,C2,C8,6), 0),
4919 SR_CORE ("trcdvcmr3", CPENC (2,1,C2,C12,6), 0),
4920 SR_CORE ("trcdvcmr4", CPENC (2,1,C2,C0,7), 0),
4921 SR_CORE ("trcdvcmr5", CPENC (2,1,C2,C4,7), 0),
4922 SR_CORE ("trcdvcmr6", CPENC (2,1,C2,C8,7), 0),
4923 SR_CORE ("trcdvcmr7", CPENC (2,1,C2,C12,7), 0),
4924 SR_CORE ("trcdvcvr0", CPENC (2,1,C2,C0,4), 0),
4925 SR_CORE ("trcdvcvr1", CPENC (2,1,C2,C4,4), 0),
4926 SR_CORE ("trcdvcvr2", CPENC (2,1,C2,C8,4), 0),
4927 SR_CORE ("trcdvcvr3", CPENC (2,1,C2,C12,4), 0),
4928 SR_CORE ("trcdvcvr4", CPENC (2,1,C2,C0,5), 0),
4929 SR_CORE ("trcdvcvr5", CPENC (2,1,C2,C4,5), 0),
4930 SR_CORE ("trcdvcvr6", CPENC (2,1,C2,C8,5), 0),
4931 SR_CORE ("trcdvcvr7", CPENC (2,1,C2,C12,5), 0),
4932 SR_CORE ("trceventctl0r", CPENC (2,1,C0,C8,0), 0),
4933 SR_CORE ("trceventctl1r", CPENC (2,1,C0,C9,0), 0),
4934 SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4), 0),
4935 SR_CORE ("trcextinselr", CPENC (2,1,C0,C8,4), 0),
4936 SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4), 0),
4937 SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
4938 SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
4939 SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
4940 SR_CORE ("trcimspec1", CPENC (2,1,C0,C1,7), 0),
4941 SR_CORE ("trcimspec2", CPENC (2,1,C0,C2,7), 0),
4942 SR_CORE ("trcimspec3", CPENC (2,1,C0,C3,7), 0),
4943 SR_CORE ("trcimspec4", CPENC (2,1,C0,C4,7), 0),
4944 SR_CORE ("trcimspec5", CPENC (2,1,C0,C5,7), 0),
4945 SR_CORE ("trcimspec6", CPENC (2,1,C0,C6,7), 0),
4946 SR_CORE ("trcimspec7", CPENC (2,1,C0,C7,7), 0),
4947 SR_CORE ("trcitctrl", CPENC (2,1,C7,C0,4), 0),
4948 SR_CORE ("trcpdcr", CPENC (2,1,C1,C4,4), 0),
4949 SR_CORE ("trcprgctlr", CPENC (2,1,C0,C1,0), 0),
4950 SR_CORE ("trcprocselr", CPENC (2,1,C0,C2,0), 0),
4951 SR_CORE ("trcqctlr", CPENC (2,1,C0,C1,1), 0),
4952 SR_CORE ("trcrsr", CPENC (2,1,C0,C10,0), 0),
4953 SR_CORE ("trcrsctlr2", CPENC (2,1,C1,C2,0), 0),
4954 SR_CORE ("trcrsctlr3", CPENC (2,1,C1,C3,0), 0),
4955 SR_CORE ("trcrsctlr4", CPENC (2,1,C1,C4,0), 0),
4956 SR_CORE ("trcrsctlr5", CPENC (2,1,C1,C5,0), 0),
4957 SR_CORE ("trcrsctlr6", CPENC (2,1,C1,C6,0), 0),
4958 SR_CORE ("trcrsctlr7", CPENC (2,1,C1,C7,0), 0),
4959 SR_CORE ("trcrsctlr8", CPENC (2,1,C1,C8,0), 0),
4960 SR_CORE ("trcrsctlr9", CPENC (2,1,C1,C9,0), 0),
4961 SR_CORE ("trcrsctlr10", CPENC (2,1,C1,C10,0), 0),
4962 SR_CORE ("trcrsctlr11", CPENC (2,1,C1,C11,0), 0),
4963 SR_CORE ("trcrsctlr12", CPENC (2,1,C1,C12,0), 0),
4964 SR_CORE ("trcrsctlr13", CPENC (2,1,C1,C13,0), 0),
4965 SR_CORE ("trcrsctlr14", CPENC (2,1,C1,C14,0), 0),
4966 SR_CORE ("trcrsctlr15", CPENC (2,1,C1,C15,0), 0),
4967 SR_CORE ("trcrsctlr16", CPENC (2,1,C1,C0,1), 0),
4968 SR_CORE ("trcrsctlr17", CPENC (2,1,C1,C1,1), 0),
4969 SR_CORE ("trcrsctlr18", CPENC (2,1,C1,C2,1), 0),
4970 SR_CORE ("trcrsctlr19", CPENC (2,1,C1,C3,1), 0),
4971 SR_CORE ("trcrsctlr20", CPENC (2,1,C1,C4,1), 0),
4972 SR_CORE ("trcrsctlr21", CPENC (2,1,C1,C5,1), 0),
4973 SR_CORE ("trcrsctlr22", CPENC (2,1,C1,C6,1), 0),
4974 SR_CORE ("trcrsctlr23", CPENC (2,1,C1,C7,1), 0),
4975 SR_CORE ("trcrsctlr24", CPENC (2,1,C1,C8,1), 0),
4976 SR_CORE ("trcrsctlr25", CPENC (2,1,C1,C9,1), 0),
4977 SR_CORE ("trcrsctlr26", CPENC (2,1,C1,C10,1), 0),
4978 SR_CORE ("trcrsctlr27", CPENC (2,1,C1,C11,1), 0),
4979 SR_CORE ("trcrsctlr28", CPENC (2,1,C1,C12,1), 0),
4980 SR_CORE ("trcrsctlr29", CPENC (2,1,C1,C13,1), 0),
4981 SR_CORE ("trcrsctlr30", CPENC (2,1,C1,C14,1), 0),
4982 SR_CORE ("trcrsctlr31", CPENC (2,1,C1,C15,1), 0),
4983 SR_CORE ("trcseqevr0", CPENC (2,1,C0,C0,4), 0),
4984 SR_CORE ("trcseqevr1", CPENC (2,1,C0,C1,4), 0),
4985 SR_CORE ("trcseqevr2", CPENC (2,1,C0,C2,4), 0),
4986 SR_CORE ("trcseqrstevr", CPENC (2,1,C0,C6,4), 0),
4987 SR_CORE ("trcseqstr", CPENC (2,1,C0,C7,4), 0),
4988 SR_CORE ("trcssccr0", CPENC (2,1,C1,C0,2), 0),
4989 SR_CORE ("trcssccr1", CPENC (2,1,C1,C1,2), 0),
4990 SR_CORE ("trcssccr2", CPENC (2,1,C1,C2,2), 0),
4991 SR_CORE ("trcssccr3", CPENC (2,1,C1,C3,2), 0),
4992 SR_CORE ("trcssccr4", CPENC (2,1,C1,C4,2), 0),
4993 SR_CORE ("trcssccr5", CPENC (2,1,C1,C5,2), 0),
4994 SR_CORE ("trcssccr6", CPENC (2,1,C1,C6,2), 0),
4995 SR_CORE ("trcssccr7", CPENC (2,1,C1,C7,2), 0),
4996 SR_CORE ("trcsscsr0", CPENC (2,1,C1,C8,2), 0),
4997 SR_CORE ("trcsscsr1", CPENC (2,1,C1,C9,2), 0),
4998 SR_CORE ("trcsscsr2", CPENC (2,1,C1,C10,2), 0),
4999 SR_CORE ("trcsscsr3", CPENC (2,1,C1,C11,2), 0),
5000 SR_CORE ("trcsscsr4", CPENC (2,1,C1,C12,2), 0),
5001 SR_CORE ("trcsscsr5", CPENC (2,1,C1,C13,2), 0),
5002 SR_CORE ("trcsscsr6", CPENC (2,1,C1,C14,2), 0),
5003 SR_CORE ("trcsscsr7", CPENC (2,1,C1,C15,2), 0),
5004 SR_CORE ("trcsspcicr0", CPENC (2,1,C1,C0,3), 0),
5005 SR_CORE ("trcsspcicr1", CPENC (2,1,C1,C1,3), 0),
5006 SR_CORE ("trcsspcicr2", CPENC (2,1,C1,C2,3), 0),
5007 SR_CORE ("trcsspcicr3", CPENC (2,1,C1,C3,3), 0),
5008 SR_CORE ("trcsspcicr4", CPENC (2,1,C1,C4,3), 0),
5009 SR_CORE ("trcsspcicr5", CPENC (2,1,C1,C5,3), 0),
5010 SR_CORE ("trcsspcicr6", CPENC (2,1,C1,C6,3), 0),
5011 SR_CORE ("trcsspcicr7", CPENC (2,1,C1,C7,3), 0),
5012 SR_CORE ("trcstallctlr", CPENC (2,1,C0,C11,0), 0),
5013 SR_CORE ("trcsyncpr", CPENC (2,1,C0,C13,0), 0),
5014 SR_CORE ("trctraceidr", CPENC (2,1,C0,C0,1), 0),
5015 SR_CORE ("trctsctlr", CPENC (2,1,C0,C12,0), 0),
5016 SR_CORE ("trcvdarcctlr", CPENC (2,1,C0,C10,2), 0),
5017 SR_CORE ("trcvdctlr", CPENC (2,1,C0,C8,2), 0),
5018 SR_CORE ("trcvdsacctlr", CPENC (2,1,C0,C9,2), 0),
5019 SR_CORE ("trcvictlr", CPENC (2,1,C0,C0,2), 0),
5020 SR_CORE ("trcviiectlr", CPENC (2,1,C0,C1,2), 0),
5021 SR_CORE ("trcvipcssctlr", CPENC (2,1,C0,C3,2), 0),
5022 SR_CORE ("trcvissctlr", CPENC (2,1,C0,C2,2), 0),
5023 SR_CORE ("trcvmidcctlr0", CPENC (2,1,C3,C2,2), 0),
5024 SR_CORE ("trcvmidcctlr1", CPENC (2,1,C3,C3,2), 0),
5025 SR_CORE ("trcvmidcvr0", CPENC (2,1,C3,C0,1), 0),
5026 SR_CORE ("trcvmidcvr1", CPENC (2,1,C3,C2,1), 0),
5027 SR_CORE ("trcvmidcvr2", CPENC (2,1,C3,C4,1), 0),
5028 SR_CORE ("trcvmidcvr3", CPENC (2,1,C3,C6,1), 0),
5029 SR_CORE ("trcvmidcvr4", CPENC (2,1,C3,C8,1), 0),
5030 SR_CORE ("trcvmidcvr5", CPENC (2,1,C3,C10,1), 0),
5031 SR_CORE ("trcvmidcvr6", CPENC (2,1,C3,C12,1), 0),
5032 SR_CORE ("trcvmidcvr7", CPENC (2,1,C3,C14,1), 0),
5033 SR_CORE ("trclar", CPENC (2,1,C7,C12,6), F_REG_WRITE),
5034 SR_CORE ("trcoslar", CPENC (2,1,C1,C0,4), F_REG_WRITE),
5035
5036 SR_CORE ("csrcr_el0", CPENC (2,3,C8,C0,0), 0),
5037 SR_CORE ("csrptr_el0", CPENC (2,3,C8,C0,1), 0),
5038 SR_CORE ("csridr_el0", CPENC (2,3,C8,C0,2), F_REG_READ),
5039 SR_CORE ("csrptridx_el0", CPENC (2,3,C8,C0,3), F_REG_READ),
5040 SR_CORE ("csrcr_el1", CPENC (2,0,C8,C0,0), 0),
5041 SR_CORE ("csrcr_el12", CPENC (2,5,C8,C0,0), 0),
5042 SR_CORE ("csrptr_el1", CPENC (2,0,C8,C0,1), 0),
5043 SR_CORE ("csrptr_el12", CPENC (2,5,C8,C0,1), 0),
5044 SR_CORE ("csrptridx_el1", CPENC (2,0,C8,C0,3), F_REG_READ),
5045 SR_CORE ("csrcr_el2", CPENC (2,4,C8,C0,0), 0),
5046 SR_CORE ("csrptr_el2", CPENC (2,4,C8,C0,1), 0),
5047 SR_CORE ("csrptridx_el2", CPENC (2,4,C8,C0,3), F_REG_READ),
5048
5049 SR_LOR ("lorid_el1", CPENC (3,0,C10,C4,7), F_REG_READ),
5050 SR_LOR ("lorc_el1", CPENC (3,0,C10,C4,3), 0),
5051 SR_LOR ("lorea_el1", CPENC (3,0,C10,C4,1), 0),
5052 SR_LOR ("lorn_el1", CPENC (3,0,C10,C4,2), 0),
5053 SR_LOR ("lorsa_el1", CPENC (3,0,C10,C4,0), 0),
5054
5055 SR_CORE ("icc_ctlr_el3", CPENC (3,6,C12,C12,4), 0),
5056 SR_CORE ("icc_sre_el1", CPENC (3,0,C12,C12,5), 0),
5057 SR_CORE ("icc_sre_el2", CPENC (3,4,C12,C9,5), 0),
5058 SR_CORE ("icc_sre_el3", CPENC (3,6,C12,C12,5), 0),
5059 SR_CORE ("ich_vtr_el2", CPENC (3,4,C12,C11,1), F_REG_READ),
5060
5061 SR_CORE ("brbcr_el1", CPENC (2,1,C9,C0,0), 0),
5062 SR_CORE ("brbcr_el12", CPENC (2,5,C9,C0,0), 0),
5063 SR_CORE ("brbfcr_el1", CPENC (2,1,C9,C0,1), 0),
5064 SR_CORE ("brbts_el1", CPENC (2,1,C9,C0,2), 0),
5065 SR_CORE ("brbinfinj_el1", CPENC (2,1,C9,C1,0), 0),
5066 SR_CORE ("brbsrcinj_el1", CPENC (2,1,C9,C1,1), 0),
5067 SR_CORE ("brbtgtinj_el1", CPENC (2,1,C9,C1,2), 0),
5068 SR_CORE ("brbidr0_el1", CPENC (2,1,C9,C2,0), F_REG_READ),
5069 SR_CORE ("brbcr_el2", CPENC (2,4,C9,C0,0), 0),
5070 SR_CORE ("brbsrc0_el1", CPENC (2,1,C8,C0,1), F_REG_READ),
5071 SR_CORE ("brbsrc1_el1", CPENC (2,1,C8,C1,1), F_REG_READ),
5072 SR_CORE ("brbsrc2_el1", CPENC (2,1,C8,C2,1), F_REG_READ),
5073 SR_CORE ("brbsrc3_el1", CPENC (2,1,C8,C3,1), F_REG_READ),
5074 SR_CORE ("brbsrc4_el1", CPENC (2,1,C8,C4,1), F_REG_READ),
5075 SR_CORE ("brbsrc5_el1", CPENC (2,1,C8,C5,1), F_REG_READ),
5076 SR_CORE ("brbsrc6_el1", CPENC (2,1,C8,C6,1), F_REG_READ),
5077 SR_CORE ("brbsrc7_el1", CPENC (2,1,C8,C7,1), F_REG_READ),
5078 SR_CORE ("brbsrc8_el1", CPENC (2,1,C8,C8,1), F_REG_READ),
5079 SR_CORE ("brbsrc9_el1", CPENC (2,1,C8,C9,1), F_REG_READ),
5080 SR_CORE ("brbsrc10_el1", CPENC (2,1,C8,C10,1), F_REG_READ),
5081 SR_CORE ("brbsrc11_el1", CPENC (2,1,C8,C11,1), F_REG_READ),
5082 SR_CORE ("brbsrc12_el1", CPENC (2,1,C8,C12,1), F_REG_READ),
5083 SR_CORE ("brbsrc13_el1", CPENC (2,1,C8,C13,1), F_REG_READ),
5084 SR_CORE ("brbsrc14_el1", CPENC (2,1,C8,C14,1), F_REG_READ),
5085 SR_CORE ("brbsrc15_el1", CPENC (2,1,C8,C15,1), F_REG_READ),
5086 SR_CORE ("brbsrc16_el1", CPENC (2,1,C8,C0,5), F_REG_READ),
5087 SR_CORE ("brbsrc17_el1", CPENC (2,1,C8,C1,5), F_REG_READ),
5088 SR_CORE ("brbsrc18_el1", CPENC (2,1,C8,C2,5), F_REG_READ),
5089 SR_CORE ("brbsrc19_el1", CPENC (2,1,C8,C3,5), F_REG_READ),
5090 SR_CORE ("brbsrc20_el1", CPENC (2,1,C8,C4,5), F_REG_READ),
5091 SR_CORE ("brbsrc21_el1", CPENC (2,1,C8,C5,5), F_REG_READ),
5092 SR_CORE ("brbsrc22_el1", CPENC (2,1,C8,C6,5), F_REG_READ),
5093 SR_CORE ("brbsrc23_el1", CPENC (2,1,C8,C7,5), F_REG_READ),
5094 SR_CORE ("brbsrc24_el1", CPENC (2,1,C8,C8,5), F_REG_READ),
5095 SR_CORE ("brbsrc25_el1", CPENC (2,1,C8,C9,5), F_REG_READ),
5096 SR_CORE ("brbsrc26_el1", CPENC (2,1,C8,C10,5), F_REG_READ),
5097 SR_CORE ("brbsrc27_el1", CPENC (2,1,C8,C11,5), F_REG_READ),
5098 SR_CORE ("brbsrc28_el1", CPENC (2,1,C8,C12,5), F_REG_READ),
5099 SR_CORE ("brbsrc29_el1", CPENC (2,1,C8,C13,5), F_REG_READ),
5100 SR_CORE ("brbsrc30_el1", CPENC (2,1,C8,C14,5), F_REG_READ),
5101 SR_CORE ("brbsrc31_el1", CPENC (2,1,C8,C15,5), F_REG_READ),
5102 SR_CORE ("brbtgt0_el1", CPENC (2,1,C8,C0,2), F_REG_READ),
5103 SR_CORE ("brbtgt1_el1", CPENC (2,1,C8,C1,2), F_REG_READ),
5104 SR_CORE ("brbtgt2_el1", CPENC (2,1,C8,C2,2), F_REG_READ),
5105 SR_CORE ("brbtgt3_el1", CPENC (2,1,C8,C3,2), F_REG_READ),
5106 SR_CORE ("brbtgt4_el1", CPENC (2,1,C8,C4,2), F_REG_READ),
5107 SR_CORE ("brbtgt5_el1", CPENC (2,1,C8,C5,2), F_REG_READ),
5108 SR_CORE ("brbtgt6_el1", CPENC (2,1,C8,C6,2), F_REG_READ),
5109 SR_CORE ("brbtgt7_el1", CPENC (2,1,C8,C7,2), F_REG_READ),
5110 SR_CORE ("brbtgt8_el1", CPENC (2,1,C8,C8,2), F_REG_READ),
5111 SR_CORE ("brbtgt9_el1", CPENC (2,1,C8,C9,2), F_REG_READ),
5112 SR_CORE ("brbtgt10_el1", CPENC (2,1,C8,C10,2), F_REG_READ),
5113 SR_CORE ("brbtgt11_el1", CPENC (2,1,C8,C11,2), F_REG_READ),
5114 SR_CORE ("brbtgt12_el1", CPENC (2,1,C8,C12,2), F_REG_READ),
5115 SR_CORE ("brbtgt13_el1", CPENC (2,1,C8,C13,2), F_REG_READ),
5116 SR_CORE ("brbtgt14_el1", CPENC (2,1,C8,C14,2), F_REG_READ),
5117 SR_CORE ("brbtgt15_el1", CPENC (2,1,C8,C15,2), F_REG_READ),
5118 SR_CORE ("brbtgt16_el1", CPENC (2,1,C8,C0,6), F_REG_READ),
5119 SR_CORE ("brbtgt17_el1", CPENC (2,1,C8,C1,6), F_REG_READ),
5120 SR_CORE ("brbtgt18_el1", CPENC (2,1,C8,C2,6), F_REG_READ),
5121 SR_CORE ("brbtgt19_el1", CPENC (2,1,C8,C3,6), F_REG_READ),
5122 SR_CORE ("brbtgt20_el1", CPENC (2,1,C8,C4,6), F_REG_READ),
5123 SR_CORE ("brbtgt21_el1", CPENC (2,1,C8,C5,6), F_REG_READ),
5124 SR_CORE ("brbtgt22_el1", CPENC (2,1,C8,C6,6), F_REG_READ),
5125 SR_CORE ("brbtgt23_el1", CPENC (2,1,C8,C7,6), F_REG_READ),
5126 SR_CORE ("brbtgt24_el1", CPENC (2,1,C8,C8,6), F_REG_READ),
5127 SR_CORE ("brbtgt25_el1", CPENC (2,1,C8,C9,6), F_REG_READ),
5128 SR_CORE ("brbtgt26_el1", CPENC (2,1,C8,C10,6), F_REG_READ),
5129 SR_CORE ("brbtgt27_el1", CPENC (2,1,C8,C11,6), F_REG_READ),
5130 SR_CORE ("brbtgt28_el1", CPENC (2,1,C8,C12,6), F_REG_READ),
5131 SR_CORE ("brbtgt29_el1", CPENC (2,1,C8,C13,6), F_REG_READ),
5132 SR_CORE ("brbtgt30_el1", CPENC (2,1,C8,C14,6), F_REG_READ),
5133 SR_CORE ("brbtgt31_el1", CPENC (2,1,C8,C15,6), F_REG_READ),
5134 SR_CORE ("brbinf0_el1", CPENC (2,1,C8,C0,0), F_REG_READ),
5135 SR_CORE ("brbinf1_el1", CPENC (2,1,C8,C1,0), F_REG_READ),
5136 SR_CORE ("brbinf2_el1", CPENC (2,1,C8,C2,0), F_REG_READ),
5137 SR_CORE ("brbinf3_el1", CPENC (2,1,C8,C3,0), F_REG_READ),
5138 SR_CORE ("brbinf4_el1", CPENC (2,1,C8,C4,0), F_REG_READ),
5139 SR_CORE ("brbinf5_el1", CPENC (2,1,C8,C5,0), F_REG_READ),
5140 SR_CORE ("brbinf6_el1", CPENC (2,1,C8,C6,0), F_REG_READ),
5141 SR_CORE ("brbinf7_el1", CPENC (2,1,C8,C7,0), F_REG_READ),
5142 SR_CORE ("brbinf8_el1", CPENC (2,1,C8,C8,0), F_REG_READ),
5143 SR_CORE ("brbinf9_el1", CPENC (2,1,C8,C9,0), F_REG_READ),
5144 SR_CORE ("brbinf10_el1", CPENC (2,1,C8,C10,0), F_REG_READ),
5145 SR_CORE ("brbinf11_el1", CPENC (2,1,C8,C11,0), F_REG_READ),
5146 SR_CORE ("brbinf12_el1", CPENC (2,1,C8,C12,0), F_REG_READ),
5147 SR_CORE ("brbinf13_el1", CPENC (2,1,C8,C13,0), F_REG_READ),
5148 SR_CORE ("brbinf14_el1", CPENC (2,1,C8,C14,0), F_REG_READ),
5149 SR_CORE ("brbinf15_el1", CPENC (2,1,C8,C15,0), F_REG_READ),
5150 SR_CORE ("brbinf16_el1", CPENC (2,1,C8,C0,4), F_REG_READ),
5151 SR_CORE ("brbinf17_el1", CPENC (2,1,C8,C1,4), F_REG_READ),
5152 SR_CORE ("brbinf18_el1", CPENC (2,1,C8,C2,4), F_REG_READ),
5153 SR_CORE ("brbinf19_el1", CPENC (2,1,C8,C3,4), F_REG_READ),
5154 SR_CORE ("brbinf20_el1", CPENC (2,1,C8,C4,4), F_REG_READ),
5155 SR_CORE ("brbinf21_el1", CPENC (2,1,C8,C5,4), F_REG_READ),
5156 SR_CORE ("brbinf22_el1", CPENC (2,1,C8,C6,4), F_REG_READ),
5157 SR_CORE ("brbinf23_el1", CPENC (2,1,C8,C7,4), F_REG_READ),
5158 SR_CORE ("brbinf24_el1", CPENC (2,1,C8,C8,4), F_REG_READ),
5159 SR_CORE ("brbinf25_el1", CPENC (2,1,C8,C9,4), F_REG_READ),
5160 SR_CORE ("brbinf26_el1", CPENC (2,1,C8,C10,4), F_REG_READ),
5161 SR_CORE ("brbinf27_el1", CPENC (2,1,C8,C11,4), F_REG_READ),
5162 SR_CORE ("brbinf28_el1", CPENC (2,1,C8,C12,4), F_REG_READ),
5163 SR_CORE ("brbinf29_el1", CPENC (2,1,C8,C13,4), F_REG_READ),
5164 SR_CORE ("brbinf30_el1", CPENC (2,1,C8,C14,4), F_REG_READ),
5165 SR_CORE ("brbinf31_el1", CPENC (2,1,C8,C15,4), F_REG_READ),
5166
5167 SR_CORE ("accdata_el1", CPENC (3,0,C13,C0,5), 0),
5168
5169 SR_CORE ("mfar_el3", CPENC (3,6,C6,C0,5), 0),
5170 SR_CORE ("gpccr_el3", CPENC (3,6,C2,C1,6), 0),
5171 SR_CORE ("gptbr_el3", CPENC (3,6,C2,C1,4), 0),
5172
5173 SR_CORE ("mecidr_el2", CPENC (3,4,C10,C8,7), F_REG_READ),
5174 SR_CORE ("mecid_p0_el2", CPENC (3,4,C10,C8,0), 0),
5175 SR_CORE ("mecid_a0_el2", CPENC (3,4,C10,C8,1), 0),
5176 SR_CORE ("mecid_p1_el2", CPENC (3,4,C10,C8,2), 0),
5177 SR_CORE ("mecid_a1_el2", CPENC (3,4,C10,C8,3), 0),
5178 SR_CORE ("vmecid_p_el2", CPENC (3,4,C10,C9,0), 0),
5179 SR_CORE ("vmecid_a_el2", CPENC (3,4,C10,C9,1), 0),
5180 SR_CORE ("mecid_rl_a_el3",CPENC (3,6,C10,C10,1), 0),
5181
5182 SR_SME ("svcr", CPENC (3,3,C4,C2,2), 0),
5183 SR_SME ("id_aa64smfr0_el1", CPENC (3,0,C0,C4,5), F_REG_READ),
5184 SR_SME ("smcr_el1", CPENC (3,0,C1,C2,6), 0),
5185 SR_SME ("smcr_el12", CPENC (3,5,C1,C2,6), 0),
5186 SR_SME ("smcr_el2", CPENC (3,4,C1,C2,6), 0),
5187 SR_SME ("smcr_el3", CPENC (3,6,C1,C2,6), 0),
5188 SR_SME ("smpri_el1", CPENC (3,0,C1,C2,4), 0),
5189 SR_SME ("smprimap_el2", CPENC (3,4,C1,C2,5), 0),
5190 SR_SME ("smidr_el1", CPENC (3,1,C0,C0,6), F_REG_READ),
5191 SR_SME ("tpidr2_el0", CPENC (3,3,C13,C0,5), 0),
5192 SR_SME ("mpamsm_el1", CPENC (3,0,C10,C5,3), 0),
5193
5194 SR_AMU ("amcr_el0", CPENC (3,3,C13,C2,0), 0),
5195 SR_AMU ("amcfgr_el0", CPENC (3,3,C13,C2,1), F_REG_READ),
5196 SR_AMU ("amcgcr_el0", CPENC (3,3,C13,C2,2), F_REG_READ),
5197 SR_AMU ("amuserenr_el0", CPENC (3,3,C13,C2,3), 0),
5198 SR_AMU ("amcntenclr0_el0", CPENC (3,3,C13,C2,4), 0),
5199 SR_AMU ("amcntenset0_el0", CPENC (3,3,C13,C2,5), 0),
5200 SR_AMU ("amcntenclr1_el0", CPENC (3,3,C13,C3,0), 0),
5201 SR_AMU ("amcntenset1_el0", CPENC (3,3,C13,C3,1), 0),
5202 SR_AMU ("amevcntr00_el0", CPENC (3,3,C13,C4,0), 0),
5203 SR_AMU ("amevcntr01_el0", CPENC (3,3,C13,C4,1), 0),
5204 SR_AMU ("amevcntr02_el0", CPENC (3,3,C13,C4,2), 0),
5205 SR_AMU ("amevcntr03_el0", CPENC (3,3,C13,C4,3), 0),
5206 SR_AMU ("amevtyper00_el0", CPENC (3,3,C13,C6,0), F_REG_READ),
5207 SR_AMU ("amevtyper01_el0", CPENC (3,3,C13,C6,1), F_REG_READ),
5208 SR_AMU ("amevtyper02_el0", CPENC (3,3,C13,C6,2), F_REG_READ),
5209 SR_AMU ("amevtyper03_el0", CPENC (3,3,C13,C6,3), F_REG_READ),
5210 SR_AMU ("amevcntr10_el0", CPENC (3,3,C13,C12,0), 0),
5211 SR_AMU ("amevcntr11_el0", CPENC (3,3,C13,C12,1), 0),
5212 SR_AMU ("amevcntr12_el0", CPENC (3,3,C13,C12,2), 0),
5213 SR_AMU ("amevcntr13_el0", CPENC (3,3,C13,C12,3), 0),
5214 SR_AMU ("amevcntr14_el0", CPENC (3,3,C13,C12,4), 0),
5215 SR_AMU ("amevcntr15_el0", CPENC (3,3,C13,C12,5), 0),
5216 SR_AMU ("amevcntr16_el0", CPENC (3,3,C13,C12,6), 0),
5217 SR_AMU ("amevcntr17_el0", CPENC (3,3,C13,C12,7), 0),
5218 SR_AMU ("amevcntr18_el0", CPENC (3,3,C13,C13,0), 0),
5219 SR_AMU ("amevcntr19_el0", CPENC (3,3,C13,C13,1), 0),
5220 SR_AMU ("amevcntr110_el0", CPENC (3,3,C13,C13,2), 0),
5221 SR_AMU ("amevcntr111_el0", CPENC (3,3,C13,C13,3), 0),
5222 SR_AMU ("amevcntr112_el0", CPENC (3,3,C13,C13,4), 0),
5223 SR_AMU ("amevcntr113_el0", CPENC (3,3,C13,C13,5), 0),
5224 SR_AMU ("amevcntr114_el0", CPENC (3,3,C13,C13,6), 0),
5225 SR_AMU ("amevcntr115_el0", CPENC (3,3,C13,C13,7), 0),
5226 SR_AMU ("amevtyper10_el0", CPENC (3,3,C13,C14,0), 0),
5227 SR_AMU ("amevtyper11_el0", CPENC (3,3,C13,C14,1), 0),
5228 SR_AMU ("amevtyper12_el0", CPENC (3,3,C13,C14,2), 0),
5229 SR_AMU ("amevtyper13_el0", CPENC (3,3,C13,C14,3), 0),
5230 SR_AMU ("amevtyper14_el0", CPENC (3,3,C13,C14,4), 0),
5231 SR_AMU ("amevtyper15_el0", CPENC (3,3,C13,C14,5), 0),
5232 SR_AMU ("amevtyper16_el0", CPENC (3,3,C13,C14,6), 0),
5233 SR_AMU ("amevtyper17_el0", CPENC (3,3,C13,C14,7), 0),
5234 SR_AMU ("amevtyper18_el0", CPENC (3,3,C13,C15,0), 0),
5235 SR_AMU ("amevtyper19_el0", CPENC (3,3,C13,C15,1), 0),
5236 SR_AMU ("amevtyper110_el0", CPENC (3,3,C13,C15,2), 0),
5237 SR_AMU ("amevtyper111_el0", CPENC (3,3,C13,C15,3), 0),
5238 SR_AMU ("amevtyper112_el0", CPENC (3,3,C13,C15,4), 0),
5239 SR_AMU ("amevtyper113_el0", CPENC (3,3,C13,C15,5), 0),
5240 SR_AMU ("amevtyper114_el0", CPENC (3,3,C13,C15,6), 0),
5241 SR_AMU ("amevtyper115_el0", CPENC (3,3,C13,C15,7), 0),
5242
5243 SR_GIC ("icc_pmr_el1", CPENC (3,0,C4,C6,0), 0),
5244 SR_GIC ("icc_iar0_el1", CPENC (3,0,C12,C8,0), F_REG_READ),
5245 SR_GIC ("icc_eoir0_el1", CPENC (3,0,C12,C8,1), F_REG_WRITE),
5246 SR_GIC ("icc_hppir0_el1", CPENC (3,0,C12,C8,2), F_REG_READ),
5247 SR_GIC ("icc_bpr0_el1", CPENC (3,0,C12,C8,3), 0),
5248 SR_GIC ("icc_ap0r0_el1", CPENC (3,0,C12,C8,4), 0),
5249 SR_GIC ("icc_ap0r1_el1", CPENC (3,0,C12,C8,5), 0),
5250 SR_GIC ("icc_ap0r2_el1", CPENC (3,0,C12,C8,6), 0),
5251 SR_GIC ("icc_ap0r3_el1", CPENC (3,0,C12,C8,7), 0),
5252 SR_GIC ("icc_ap1r0_el1", CPENC (3,0,C12,C9,0), 0),
5253 SR_GIC ("icc_ap1r1_el1", CPENC (3,0,C12,C9,1), 0),
5254 SR_GIC ("icc_ap1r2_el1", CPENC (3,0,C12,C9,2), 0),
5255 SR_GIC ("icc_ap1r3_el1", CPENC (3,0,C12,C9,3), 0),
5256 SR_GIC ("icc_dir_el1", CPENC (3,0,C12,C11,1), F_REG_WRITE),
5257 SR_GIC ("icc_rpr_el1", CPENC (3,0,C12,C11,3), F_REG_READ),
5258 SR_GIC ("icc_sgi1r_el1", CPENC (3,0,C12,C11,5), F_REG_WRITE),
5259 SR_GIC ("icc_asgi1r_el1", CPENC (3,0,C12,C11,6), F_REG_WRITE),
5260 SR_GIC ("icc_sgi0r_el1", CPENC (3,0,C12,C11,7), F_REG_WRITE),
5261 SR_GIC ("icc_iar1_el1", CPENC (3,0,C12,C12,0), F_REG_READ),
5262 SR_GIC ("icc_eoir1_el1", CPENC (3,0,C12,C12,1), F_REG_WRITE),
5263 SR_GIC ("icc_hppir1_el1", CPENC (3,0,C12,C12,2), F_REG_READ),
5264 SR_GIC ("icc_bpr1_el1", CPENC (3,0,C12,C12,3), 0),
5265 SR_GIC ("icc_ctlr_el1", CPENC (3,0,C12,C12,4), 0),
5266 SR_GIC ("icc_igrpen0_el1", CPENC (3,0,C12,C12,6), 0),
5267 SR_GIC ("icc_igrpen1_el1", CPENC (3,0,C12,C12,7), 0),
5268 SR_GIC ("ich_ap0r0_el2", CPENC (3,4,C12,C8,0), 0),
5269 SR_GIC ("ich_ap0r1_el2", CPENC (3,4,C12,C8,1), 0),
5270 SR_GIC ("ich_ap0r2_el2", CPENC (3,4,C12,C8,2), 0),
5271 SR_GIC ("ich_ap0r3_el2", CPENC (3,4,C12,C8,3), 0),
5272 SR_GIC ("ich_ap1r0_el2", CPENC (3,4,C12,C9,0), 0),
5273 SR_GIC ("ich_ap1r1_el2", CPENC (3,4,C12,C9,1), 0),
5274 SR_GIC ("ich_ap1r2_el2", CPENC (3,4,C12,C9,2), 0),
5275 SR_GIC ("ich_ap1r3_el2", CPENC (3,4,C12,C9,3), 0),
5276 SR_GIC ("ich_hcr_el2", CPENC (3,4,C12,C11,0), 0),
5277 SR_GIC ("ich_misr_el2", CPENC (3,4,C12,C11,2), F_REG_READ),
5278 SR_GIC ("ich_eisr_el2", CPENC (3,4,C12,C11,3), F_REG_READ),
5279 SR_GIC ("ich_elrsr_el2", CPENC (3,4,C12,C11,5), F_REG_READ),
5280 SR_GIC ("ich_vmcr_el2", CPENC (3,4,C12,C11,7), 0),
5281 SR_GIC ("ich_lr0_el2", CPENC (3,4,C12,C12,0), 0),
5282 SR_GIC ("ich_lr1_el2", CPENC (3,4,C12,C12,1), 0),
5283 SR_GIC ("ich_lr2_el2", CPENC (3,4,C12,C12,2), 0),
5284 SR_GIC ("ich_lr3_el2", CPENC (3,4,C12,C12,3), 0),
5285 SR_GIC ("ich_lr4_el2", CPENC (3,4,C12,C12,4), 0),
5286 SR_GIC ("ich_lr5_el2", CPENC (3,4,C12,C12,5), 0),
5287 SR_GIC ("ich_lr6_el2", CPENC (3,4,C12,C12,6), 0),
5288 SR_GIC ("ich_lr7_el2", CPENC (3,4,C12,C12,7), 0),
5289 SR_GIC ("ich_lr8_el2", CPENC (3,4,C12,C13,0), 0),
5290 SR_GIC ("ich_lr9_el2", CPENC (3,4,C12,C13,1), 0),
5291 SR_GIC ("ich_lr10_el2", CPENC (3,4,C12,C13,2), 0),
5292 SR_GIC ("ich_lr11_el2", CPENC (3,4,C12,C13,3), 0),
5293 SR_GIC ("ich_lr12_el2", CPENC (3,4,C12,C13,4), 0),
5294 SR_GIC ("ich_lr13_el2", CPENC (3,4,C12,C13,5), 0),
5295 SR_GIC ("ich_lr14_el2", CPENC (3,4,C12,C13,6), 0),
5296 SR_GIC ("ich_lr15_el2", CPENC (3,4,C12,C13,7), 0),
5297 SR_GIC ("icc_igrpen1_el3", CPENC (3,6,C12,C12,7), 0),
5298
5299 SR_V8_6 ("amcg1idr_el0", CPENC (3,3,C13,C2,6), F_REG_READ),
5300 SR_V8_6 ("cntpctss_el0", CPENC (3,3,C14,C0,5), F_REG_READ),
5301 SR_V8_6 ("cntvctss_el0", CPENC (3,3,C14,C0,6), F_REG_READ),
5302 SR_V8_6 ("hfgrtr_el2", CPENC (3,4,C1,C1,4), 0),
5303 SR_V8_6 ("hfgwtr_el2", CPENC (3,4,C1,C1,5), 0),
5304 SR_V8_6 ("hfgitr_el2", CPENC (3,4,C1,C1,6), 0),
5305 SR_V8_6 ("hdfgrtr_el2", CPENC (3,4,C3,C1,4), 0),
5306 SR_V8_6 ("hdfgwtr_el2", CPENC (3,4,C3,C1,5), 0),
5307 SR_V8_6 ("hafgrtr_el2", CPENC (3,4,C3,C1,6), 0),
5308 SR_V8_6 ("amevcntvoff00_el2", CPENC (3,4,C13,C8,0), 0),
5309 SR_V8_6 ("amevcntvoff01_el2", CPENC (3,4,C13,C8,1), 0),
5310 SR_V8_6 ("amevcntvoff02_el2", CPENC (3,4,C13,C8,2), 0),
5311 SR_V8_6 ("amevcntvoff03_el2", CPENC (3,4,C13,C8,3), 0),
5312 SR_V8_6 ("amevcntvoff04_el2", CPENC (3,4,C13,C8,4), 0),
5313 SR_V8_6 ("amevcntvoff05_el2", CPENC (3,4,C13,C8,5), 0),
5314 SR_V8_6 ("amevcntvoff06_el2", CPENC (3,4,C13,C8,6), 0),
5315 SR_V8_6 ("amevcntvoff07_el2", CPENC (3,4,C13,C8,7), 0),
5316 SR_V8_6 ("amevcntvoff08_el2", CPENC (3,4,C13,C9,0), 0),
5317 SR_V8_6 ("amevcntvoff09_el2", CPENC (3,4,C13,C9,1), 0),
5318 SR_V8_6 ("amevcntvoff010_el2", CPENC (3,4,C13,C9,2), 0),
5319 SR_V8_6 ("amevcntvoff011_el2", CPENC (3,4,C13,C9,3), 0),
5320 SR_V8_6 ("amevcntvoff012_el2", CPENC (3,4,C13,C9,4), 0),
5321 SR_V8_6 ("amevcntvoff013_el2", CPENC (3,4,C13,C9,5), 0),
5322 SR_V8_6 ("amevcntvoff014_el2", CPENC (3,4,C13,C9,6), 0),
5323 SR_V8_6 ("amevcntvoff015_el2", CPENC (3,4,C13,C9,7), 0),
5324 SR_V8_6 ("amevcntvoff10_el2", CPENC (3,4,C13,C10,0), 0),
5325 SR_V8_6 ("amevcntvoff11_el2", CPENC (3,4,C13,C10,1), 0),
5326 SR_V8_6 ("amevcntvoff12_el2", CPENC (3,4,C13,C10,2), 0),
5327 SR_V8_6 ("amevcntvoff13_el2", CPENC (3,4,C13,C10,3), 0),
5328 SR_V8_6 ("amevcntvoff14_el2", CPENC (3,4,C13,C10,4), 0),
5329 SR_V8_6 ("amevcntvoff15_el2", CPENC (3,4,C13,C10,5), 0),
5330 SR_V8_6 ("amevcntvoff16_el2", CPENC (3,4,C13,C10,6), 0),
5331 SR_V8_6 ("amevcntvoff17_el2", CPENC (3,4,C13,C10,7), 0),
5332 SR_V8_6 ("amevcntvoff18_el2", CPENC (3,4,C13,C11,0), 0),
5333 SR_V8_6 ("amevcntvoff19_el2", CPENC (3,4,C13,C11,1), 0),
5334 SR_V8_6 ("amevcntvoff110_el2", CPENC (3,4,C13,C11,2), 0),
5335 SR_V8_6 ("amevcntvoff111_el2", CPENC (3,4,C13,C11,3), 0),
5336 SR_V8_6 ("amevcntvoff112_el2", CPENC (3,4,C13,C11,4), 0),
5337 SR_V8_6 ("amevcntvoff113_el2", CPENC (3,4,C13,C11,5), 0),
5338 SR_V8_6 ("amevcntvoff114_el2", CPENC (3,4,C13,C11,6), 0),
5339 SR_V8_6 ("amevcntvoff115_el2", CPENC (3,4,C13,C11,7), 0),
5340 SR_V8_6 ("cntpoff_el2", CPENC (3,4,C14,C0,6), 0),
5341
5342 SR_V8_7 ("pmsnevfr_el1", CPENC (3,0,C9,C9,1), 0),
5343 SR_V8_7 ("hcrx_el2", CPENC (3,4,C1,C2,2), 0),
5344
5345 SR_V8_8 ("allint", CPENC (3,0,C4,C3,0), 0),
5346 SR_V8_8 ("icc_nmiar1_el1", CPENC (3,0,C12,C9,5), F_REG_READ),
5347
5348 { 0, CPENC (0,0,0,0,0), 0, 0 }
5349 };
5350
5351 bool
5352 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
5353 {
5354 return (reg_flags & F_DEPRECATED) != 0;
5355 }
5356
5357 /* The CPENC below is fairly misleading, the fields
5358 here are not in CPENC form. They are in op2op1 form. The fields are encoded
5359 by ins_pstatefield, which just shifts the value by the width of the fields
5360 in a loop. So if you CPENC them only the first value will be set, the rest
5361 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
5362 value of 0b110000000001000000 (0x30040) while what you want is
5363 0b011010 (0x1a). */
5364 const aarch64_sys_reg aarch64_pstatefields [] =
5365 {
5366 SR_CORE ("spsel", 0x05, F_REG_MAX_VALUE (1)),
5367 SR_CORE ("daifset", 0x1e, F_REG_MAX_VALUE (15)),
5368 SR_CORE ("daifclr", 0x1f, F_REG_MAX_VALUE (15)),
5369 SR_PAN ("pan", 0x04, F_REG_MAX_VALUE (1)),
5370 SR_V8_2 ("uao", 0x03, F_REG_MAX_VALUE (1)),
5371 SR_SSBS ("ssbs", 0x19, F_REG_MAX_VALUE (1)),
5372 SR_V8_4 ("dit", 0x1a, F_REG_MAX_VALUE (1)),
5373 SR_MEMTAG ("tco", 0x1c, F_REG_MAX_VALUE (1)),
5374 SR_SME ("svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x2,0x1)
5375 | F_REG_MAX_VALUE (1)),
5376 SR_SME ("svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x4,0x1)
5377 | F_REG_MAX_VALUE (1)),
5378 SR_SME ("svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x6,0x1)
5379 | F_REG_MAX_VALUE (1)),
5380 SR_V8_8 ("allint", 0x08, F_REG_MAX_VALUE (1)),
5381 { 0, CPENC (0,0,0,0,0), 0, 0 },
5382 };
5383
5384 bool
5385 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
5386 const aarch64_sys_reg *reg)
5387 {
5388 if (!(reg->flags & F_ARCHEXT))
5389 return true;
5390
5391 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
5392 }
5393
5394 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
5395 {
5396 { "ialluis", CPENS(0,C7,C1,0), 0 },
5397 { "iallu", CPENS(0,C7,C5,0), 0 },
5398 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
5399 { 0, CPENS(0,0,0,0), 0 }
5400 };
5401
5402 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
5403 {
5404 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
5405 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
5406 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
5407 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
5408 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
5409 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
5410 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
5411 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
5412 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
5413 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
5414 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
5415 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
5416 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
5417 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
5418 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
5419 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
5420 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
5421 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
5422 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
5423 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
5424 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
5425 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
5426 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
5427 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
5428 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
5429 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
5430 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
5431 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
5432 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT },
5433 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT },
5434 { 0, CPENS(0,0,0,0), 0 }
5435 };
5436
5437 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
5438 {
5439 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
5440 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
5441 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
5442 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
5443 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
5444 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
5445 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
5446 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
5447 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
5448 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
5449 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
5450 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
5451 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
5452 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
5453 { 0, CPENS(0,0,0,0), 0 }
5454 };
5455
5456 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
5457 {
5458 { "vmalle1", CPENS(0,C8,C7,0), 0 },
5459 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
5460 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
5461 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
5462 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
5463 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
5464 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
5465 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
5466 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
5467 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
5468 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
5469 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
5470 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
5471 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
5472 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
5473 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
5474 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
5475 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
5476 { "alle2", CPENS(4,C8,C7,0), 0 },
5477 { "alle2is", CPENS(4,C8,C3,0), 0 },
5478 { "alle1", CPENS(4,C8,C7,4), 0 },
5479 { "alle1is", CPENS(4,C8,C3,4), 0 },
5480 { "alle3", CPENS(6,C8,C7,0), 0 },
5481 { "alle3is", CPENS(6,C8,C3,0), 0 },
5482 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
5483 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
5484 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
5485 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
5486 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
5487 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
5488 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
5489 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
5490
5491 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
5492 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
5493 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
5494 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
5495 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
5496 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
5497 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
5498 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
5499 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
5500 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
5501 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
5502 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
5503 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
5504 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
5505 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
5506 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
5507
5508 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
5509 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
5510 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
5511 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
5512 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
5513 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
5514 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
5515 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
5516 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
5517 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
5518 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
5519 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
5520 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
5521 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
5522 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
5523 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
5524 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
5525 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
5526 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
5527 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
5528 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
5529 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
5530 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
5531 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
5532 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
5533 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
5534 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
5535 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
5536 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
5537 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
5538
5539 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT },
5540 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT },
5541 { "paallos", CPENS (6, C8, C1, 4), 0},
5542 { "paall", CPENS (6, C8, C7, 4), 0},
5543
5544 { 0, CPENS(0,0,0,0), 0 }
5545 };
5546
5547 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
5548 {
5549 /* RCTX is somewhat unique in a way that it has different values
5550 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5551 Thus op2 is masked out and instead encoded directly in the
5552 aarch64_opcode_table entries for the respective instructions. */
5553 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
5554
5555 { 0, CPENS(0,0,0,0), 0 }
5556 };
5557
5558 bool
5559 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
5560 {
5561 return (sys_ins_reg->flags & F_HASXT) != 0;
5562 }
5563
5564 extern bool
5565 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
5566 const char *reg_name,
5567 aarch64_insn reg_value,
5568 uint32_t reg_flags,
5569 aarch64_feature_set reg_features)
5570 {
5571 /* Armv8-R has no EL3. */
5572 if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_R))
5573 {
5574 const char *suffix = strrchr (reg_name, '_');
5575 if (suffix && !strcmp (suffix, "_el3"))
5576 return false;
5577 }
5578
5579 if (!(reg_flags & F_ARCHEXT))
5580 return true;
5581
5582 if (reg_features
5583 && AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
5584 return true;
5585
5586 /* ARMv8.4 TLB instructions. */
5587 if ((reg_value == CPENS (0, C8, C1, 0)
5588 || reg_value == CPENS (0, C8, C1, 1)
5589 || reg_value == CPENS (0, C8, C1, 2)
5590 || reg_value == CPENS (0, C8, C1, 3)
5591 || reg_value == CPENS (0, C8, C1, 5)
5592 || reg_value == CPENS (0, C8, C1, 7)
5593 || reg_value == CPENS (4, C8, C4, 0)
5594 || reg_value == CPENS (4, C8, C4, 4)
5595 || reg_value == CPENS (4, C8, C1, 1)
5596 || reg_value == CPENS (4, C8, C1, 5)
5597 || reg_value == CPENS (4, C8, C1, 6)
5598 || reg_value == CPENS (6, C8, C1, 1)
5599 || reg_value == CPENS (6, C8, C1, 5)
5600 || reg_value == CPENS (4, C8, C1, 0)
5601 || reg_value == CPENS (4, C8, C1, 4)
5602 || reg_value == CPENS (6, C8, C1, 0)
5603 || reg_value == CPENS (0, C8, C6, 1)
5604 || reg_value == CPENS (0, C8, C6, 3)
5605 || reg_value == CPENS (0, C8, C6, 5)
5606 || reg_value == CPENS (0, C8, C6, 7)
5607 || reg_value == CPENS (0, C8, C2, 1)
5608 || reg_value == CPENS (0, C8, C2, 3)
5609 || reg_value == CPENS (0, C8, C2, 5)
5610 || reg_value == CPENS (0, C8, C2, 7)
5611 || reg_value == CPENS (0, C8, C5, 1)
5612 || reg_value == CPENS (0, C8, C5, 3)
5613 || reg_value == CPENS (0, C8, C5, 5)
5614 || reg_value == CPENS (0, C8, C5, 7)
5615 || reg_value == CPENS (4, C8, C0, 2)
5616 || reg_value == CPENS (4, C8, C0, 6)
5617 || reg_value == CPENS (4, C8, C4, 2)
5618 || reg_value == CPENS (4, C8, C4, 6)
5619 || reg_value == CPENS (4, C8, C4, 3)
5620 || reg_value == CPENS (4, C8, C4, 7)
5621 || reg_value == CPENS (4, C8, C6, 1)
5622 || reg_value == CPENS (4, C8, C6, 5)
5623 || reg_value == CPENS (4, C8, C2, 1)
5624 || reg_value == CPENS (4, C8, C2, 5)
5625 || reg_value == CPENS (4, C8, C5, 1)
5626 || reg_value == CPENS (4, C8, C5, 5)
5627 || reg_value == CPENS (6, C8, C6, 1)
5628 || reg_value == CPENS (6, C8, C6, 5)
5629 || reg_value == CPENS (6, C8, C2, 1)
5630 || reg_value == CPENS (6, C8, C2, 5)
5631 || reg_value == CPENS (6, C8, C5, 1)
5632 || reg_value == CPENS (6, C8, C5, 5))
5633 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
5634 return true;
5635
5636 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
5637 if (reg_value == CPENS (3, C7, C12, 1)
5638 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5639 return true;
5640
5641 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
5642 if (reg_value == CPENS (3, C7, C13, 1)
5643 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
5644 return true;
5645
5646 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
5647 if ((reg_value == CPENS (0, C7, C6, 3)
5648 || reg_value == CPENS (0, C7, C6, 4)
5649 || reg_value == CPENS (0, C7, C10, 4)
5650 || reg_value == CPENS (0, C7, C14, 4)
5651 || reg_value == CPENS (3, C7, C10, 3)
5652 || reg_value == CPENS (3, C7, C12, 3)
5653 || reg_value == CPENS (3, C7, C13, 3)
5654 || reg_value == CPENS (3, C7, C14, 3)
5655 || reg_value == CPENS (3, C7, C4, 3)
5656 || reg_value == CPENS (0, C7, C6, 5)
5657 || reg_value == CPENS (0, C7, C6, 6)
5658 || reg_value == CPENS (0, C7, C10, 6)
5659 || reg_value == CPENS (0, C7, C14, 6)
5660 || reg_value == CPENS (3, C7, C10, 5)
5661 || reg_value == CPENS (3, C7, C12, 5)
5662 || reg_value == CPENS (3, C7, C13, 5)
5663 || reg_value == CPENS (3, C7, C14, 5)
5664 || reg_value == CPENS (3, C7, C4, 4))
5665 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
5666 return true;
5667
5668 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
5669 if ((reg_value == CPENS (0, C7, C9, 0)
5670 || reg_value == CPENS (0, C7, C9, 1))
5671 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5672 return true;
5673
5674 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5675 if (reg_value == CPENS (3, C7, C3, 0)
5676 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
5677 return true;
5678
5679 return false;
5680 }
5681
5682 #undef C0
5683 #undef C1
5684 #undef C2
5685 #undef C3
5686 #undef C4
5687 #undef C5
5688 #undef C6
5689 #undef C7
5690 #undef C8
5691 #undef C9
5692 #undef C10
5693 #undef C11
5694 #undef C12
5695 #undef C13
5696 #undef C14
5697 #undef C15
5698
5699 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5700 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5701
5702 static enum err_type
5703 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5704 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5705 bool encoding ATTRIBUTE_UNUSED,
5706 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5707 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5708 {
5709 int t = BITS (insn, 4, 0);
5710 int n = BITS (insn, 9, 5);
5711 int t2 = BITS (insn, 14, 10);
5712
5713 if (BIT (insn, 23))
5714 {
5715 /* Write back enabled. */
5716 if ((t == n || t2 == n) && n != 31)
5717 return ERR_UND;
5718 }
5719
5720 if (BIT (insn, 22))
5721 {
5722 /* Load */
5723 if (t == t2)
5724 return ERR_UND;
5725 }
5726
5727 return ERR_OK;
5728 }
5729
5730 /* Verifier for vector by element 3 operands functions where the
5731 conditions `if sz:L == 11 then UNDEFINED` holds. */
5732
5733 static enum err_type
5734 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5735 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5736 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5737 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5738 {
5739 const aarch64_insn undef_pattern = 0x3;
5740 aarch64_insn value;
5741
5742 assert (inst->opcode);
5743 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5744 value = encoding ? inst->value : insn;
5745 assert (value);
5746
5747 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5748 return ERR_UND;
5749
5750 return ERR_OK;
5751 }
5752
5753 /* Check an instruction that takes three register operands and that
5754 requires the register numbers to be distinct from one another. */
5755
5756 static enum err_type
5757 verify_three_different_regs (const struct aarch64_inst *inst,
5758 const aarch64_insn insn ATTRIBUTE_UNUSED,
5759 bfd_vma pc ATTRIBUTE_UNUSED,
5760 bool encoding ATTRIBUTE_UNUSED,
5761 aarch64_operand_error *mismatch_detail
5762 ATTRIBUTE_UNUSED,
5763 aarch64_instr_sequence *insn_sequence
5764 ATTRIBUTE_UNUSED)
5765 {
5766 int rd, rs, rn;
5767
5768 rd = inst->operands[0].reg.regno;
5769 rs = inst->operands[1].reg.regno;
5770 rn = inst->operands[2].reg.regno;
5771 if (rd == rs || rd == rn || rs == rn)
5772 {
5773 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5774 mismatch_detail->error
5775 = _("the three register operands must be distinct from one another");
5776 mismatch_detail->index = -1;
5777 return ERR_UND;
5778 }
5779
5780 return ERR_OK;
5781 }
5782
5783 /* Add INST to the end of INSN_SEQUENCE. */
5784
5785 static void
5786 add_insn_to_sequence (const struct aarch64_inst *inst,
5787 aarch64_instr_sequence *insn_sequence)
5788 {
5789 insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
5790 }
5791
5792 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5793 If INST is NULL the given insn_sequence is cleared and the sequence is left
5794 uninitialized. */
5795
5796 void
5797 init_insn_sequence (const struct aarch64_inst *inst,
5798 aarch64_instr_sequence *insn_sequence)
5799 {
5800 int num_req_entries = 0;
5801
5802 if (insn_sequence->instr)
5803 {
5804 XDELETE (insn_sequence->instr);
5805 insn_sequence->instr = NULL;
5806 }
5807
5808 /* Handle all the cases here. May need to think of something smarter than
5809 a giant if/else chain if this grows. At that time, a lookup table may be
5810 best. */
5811 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5812 num_req_entries = 1;
5813 if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
5814 num_req_entries = 2;
5815
5816 insn_sequence->num_added_insns = 0;
5817 insn_sequence->num_allocated_insns = num_req_entries;
5818
5819 if (num_req_entries != 0)
5820 {
5821 insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
5822 add_insn_to_sequence (inst, insn_sequence);
5823 }
5824 }
5825
5826 /* Subroutine of verify_constraints. Check whether the instruction
5827 is part of a MOPS P/M/E sequence and, if so, whether sequencing
5828 expectations are met. Return true if the check passes, otherwise
5829 describe the problem in MISMATCH_DETAIL.
5830
5831 IS_NEW_SECTION is true if INST is assumed to start a new section.
5832 The other arguments are as for verify_constraints. */
5833
5834 static bool
5835 verify_mops_pme_sequence (const struct aarch64_inst *inst,
5836 bool is_new_section,
5837 aarch64_operand_error *mismatch_detail,
5838 aarch64_instr_sequence *insn_sequence)
5839 {
5840 const struct aarch64_opcode *opcode;
5841 const struct aarch64_inst *prev_insn;
5842 int i;
5843
5844 opcode = inst->opcode;
5845 if (insn_sequence->instr)
5846 prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
5847 else
5848 prev_insn = NULL;
5849
5850 if (prev_insn
5851 && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
5852 && prev_insn->opcode != opcode - 1)
5853 {
5854 mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
5855 mismatch_detail->error = NULL;
5856 mismatch_detail->index = -1;
5857 mismatch_detail->data[0].s = prev_insn->opcode[1].name;
5858 mismatch_detail->data[1].s = prev_insn->opcode->name;
5859 mismatch_detail->non_fatal = true;
5860 return false;
5861 }
5862
5863 if (opcode->constraints & C_SCAN_MOPS_PME)
5864 {
5865 if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
5866 {
5867 mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
5868 mismatch_detail->error = NULL;
5869 mismatch_detail->index = -1;
5870 mismatch_detail->data[0].s = opcode->name;
5871 mismatch_detail->data[1].s = opcode[-1].name;
5872 mismatch_detail->non_fatal = true;
5873 return false;
5874 }
5875
5876 for (i = 0; i < 3; ++i)
5877 /* There's no specific requirement for the data register to be
5878 the same between consecutive SET* instructions. */
5879 if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
5880 || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
5881 || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
5882 && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
5883 {
5884 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5885 if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
5886 mismatch_detail->error = _("destination register differs from "
5887 "preceding instruction");
5888 else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
5889 mismatch_detail->error = _("source register differs from "
5890 "preceding instruction");
5891 else
5892 mismatch_detail->error = _("size register differs from "
5893 "preceding instruction");
5894 mismatch_detail->index = i;
5895 mismatch_detail->non_fatal = true;
5896 return false;
5897 }
5898 }
5899
5900 return true;
5901 }
5902
5903 /* This function verifies that the instruction INST adheres to its specified
5904 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5905 returned and MISMATCH_DETAIL contains the reason why verification failed.
5906
5907 The function is called both during assembly and disassembly. If assembling
5908 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5909 and will contain the PC of the current instruction w.r.t to the section.
5910
5911 If ENCODING and PC=0 then you are at a start of a section. The constraints
5912 are verified against the given state insn_sequence which is updated as it
5913 transitions through the verification. */
5914
5915 enum err_type
5916 verify_constraints (const struct aarch64_inst *inst,
5917 const aarch64_insn insn ATTRIBUTE_UNUSED,
5918 bfd_vma pc,
5919 bool encoding,
5920 aarch64_operand_error *mismatch_detail,
5921 aarch64_instr_sequence *insn_sequence)
5922 {
5923 assert (inst);
5924 assert (inst->opcode);
5925
5926 const struct aarch64_opcode *opcode = inst->opcode;
5927 if (!opcode->constraints && !insn_sequence->instr)
5928 return ERR_OK;
5929
5930 assert (insn_sequence);
5931
5932 enum err_type res = ERR_OK;
5933
5934 /* This instruction puts a constraint on the insn_sequence. */
5935 if (opcode->flags & F_SCAN)
5936 {
5937 if (insn_sequence->instr)
5938 {
5939 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5940 mismatch_detail->error = _("instruction opens new dependency "
5941 "sequence without ending previous one");
5942 mismatch_detail->index = -1;
5943 mismatch_detail->non_fatal = true;
5944 res = ERR_VFI;
5945 }
5946
5947 init_insn_sequence (inst, insn_sequence);
5948 return res;
5949 }
5950
5951 bool is_new_section = (!encoding && pc == 0);
5952 if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
5953 insn_sequence))
5954 {
5955 res = ERR_VFI;
5956 if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
5957 init_insn_sequence (NULL, insn_sequence);
5958 }
5959
5960 /* Verify constraints on an existing sequence. */
5961 if (insn_sequence->instr)
5962 {
5963 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5964 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5965 closed a previous one that we should have. */
5966 if (is_new_section && res == ERR_OK)
5967 {
5968 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5969 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5970 mismatch_detail->index = -1;
5971 mismatch_detail->non_fatal = true;
5972 res = ERR_VFI;
5973 /* Reset the sequence. */
5974 init_insn_sequence (NULL, insn_sequence);
5975 return res;
5976 }
5977
5978 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5979 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5980 {
5981 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5982 instruction for better error messages. */
5983 if (!opcode->avariant
5984 || !(*opcode->avariant &
5985 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
5986 {
5987 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5988 mismatch_detail->error = _("SVE instruction expected after "
5989 "`movprfx'");
5990 mismatch_detail->index = -1;
5991 mismatch_detail->non_fatal = true;
5992 res = ERR_VFI;
5993 goto done;
5994 }
5995
5996 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5997 instruction that is allowed to be used with a MOVPRFX. */
5998 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5999 {
6000 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6001 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
6002 "expected");
6003 mismatch_detail->index = -1;
6004 mismatch_detail->non_fatal = true;
6005 res = ERR_VFI;
6006 goto done;
6007 }
6008
6009 /* Next check for usage of the predicate register. */
6010 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
6011 aarch64_opnd_info blk_pred, inst_pred;
6012 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
6013 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
6014 bool predicated = false;
6015 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
6016
6017 /* Determine if the movprfx instruction used is predicated or not. */
6018 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
6019 {
6020 predicated = true;
6021 blk_pred = insn_sequence->instr->operands[1];
6022 }
6023
6024 unsigned char max_elem_size = 0;
6025 unsigned char current_elem_size;
6026 int num_op_used = 0, last_op_usage = 0;
6027 int i, inst_pred_idx = -1;
6028 int num_ops = aarch64_num_of_operands (opcode);
6029 for (i = 0; i < num_ops; i++)
6030 {
6031 aarch64_opnd_info inst_op = inst->operands[i];
6032 switch (inst_op.type)
6033 {
6034 case AARCH64_OPND_SVE_Zd:
6035 case AARCH64_OPND_SVE_Zm_5:
6036 case AARCH64_OPND_SVE_Zm_16:
6037 case AARCH64_OPND_SVE_Zn:
6038 case AARCH64_OPND_SVE_Zt:
6039 case AARCH64_OPND_SVE_Vm:
6040 case AARCH64_OPND_SVE_Vn:
6041 case AARCH64_OPND_Va:
6042 case AARCH64_OPND_Vn:
6043 case AARCH64_OPND_Vm:
6044 case AARCH64_OPND_Sn:
6045 case AARCH64_OPND_Sm:
6046 if (inst_op.reg.regno == blk_dest.reg.regno)
6047 {
6048 num_op_used++;
6049 last_op_usage = i;
6050 }
6051 current_elem_size
6052 = aarch64_get_qualifier_esize (inst_op.qualifier);
6053 if (current_elem_size > max_elem_size)
6054 max_elem_size = current_elem_size;
6055 break;
6056 case AARCH64_OPND_SVE_Pd:
6057 case AARCH64_OPND_SVE_Pg3:
6058 case AARCH64_OPND_SVE_Pg4_5:
6059 case AARCH64_OPND_SVE_Pg4_10:
6060 case AARCH64_OPND_SVE_Pg4_16:
6061 case AARCH64_OPND_SVE_Pm:
6062 case AARCH64_OPND_SVE_Pn:
6063 case AARCH64_OPND_SVE_Pt:
6064 case AARCH64_OPND_SME_Pm:
6065 inst_pred = inst_op;
6066 inst_pred_idx = i;
6067 break;
6068 default:
6069 break;
6070 }
6071 }
6072
6073 assert (max_elem_size != 0);
6074 aarch64_opnd_info inst_dest = inst->operands[0];
6075 /* Determine the size that should be used to compare against the
6076 movprfx size. */
6077 current_elem_size
6078 = opcode->constraints & C_MAX_ELEM
6079 ? max_elem_size
6080 : aarch64_get_qualifier_esize (inst_dest.qualifier);
6081
6082 /* If movprfx is predicated do some extra checks. */
6083 if (predicated)
6084 {
6085 /* The instruction must be predicated. */
6086 if (inst_pred_idx < 0)
6087 {
6088 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6089 mismatch_detail->error = _("predicated instruction expected "
6090 "after `movprfx'");
6091 mismatch_detail->index = -1;
6092 mismatch_detail->non_fatal = true;
6093 res = ERR_VFI;
6094 goto done;
6095 }
6096
6097 /* The instruction must have a merging predicate. */
6098 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
6099 {
6100 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6101 mismatch_detail->error = _("merging predicate expected due "
6102 "to preceding `movprfx'");
6103 mismatch_detail->index = inst_pred_idx;
6104 mismatch_detail->non_fatal = true;
6105 res = ERR_VFI;
6106 goto done;
6107 }
6108
6109 /* The same register must be used in instruction. */
6110 if (blk_pred.reg.regno != inst_pred.reg.regno)
6111 {
6112 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6113 mismatch_detail->error = _("predicate register differs "
6114 "from that in preceding "
6115 "`movprfx'");
6116 mismatch_detail->index = inst_pred_idx;
6117 mismatch_detail->non_fatal = true;
6118 res = ERR_VFI;
6119 goto done;
6120 }
6121 }
6122
6123 /* Destructive operations by definition must allow one usage of the
6124 same register. */
6125 int allowed_usage
6126 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
6127
6128 /* Operand is not used at all. */
6129 if (num_op_used == 0)
6130 {
6131 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6132 mismatch_detail->error = _("output register of preceding "
6133 "`movprfx' not used in current "
6134 "instruction");
6135 mismatch_detail->index = 0;
6136 mismatch_detail->non_fatal = true;
6137 res = ERR_VFI;
6138 goto done;
6139 }
6140
6141 /* We now know it's used, now determine exactly where it's used. */
6142 if (blk_dest.reg.regno != inst_dest.reg.regno)
6143 {
6144 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6145 mismatch_detail->error = _("output register of preceding "
6146 "`movprfx' expected as output");
6147 mismatch_detail->index = 0;
6148 mismatch_detail->non_fatal = true;
6149 res = ERR_VFI;
6150 goto done;
6151 }
6152
6153 /* Operand used more than allowed for the specific opcode type. */
6154 if (num_op_used > allowed_usage)
6155 {
6156 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6157 mismatch_detail->error = _("output register of preceding "
6158 "`movprfx' used as input");
6159 mismatch_detail->index = last_op_usage;
6160 mismatch_detail->non_fatal = true;
6161 res = ERR_VFI;
6162 goto done;
6163 }
6164
6165 /* Now the only thing left is the qualifiers checks. The register
6166 must have the same maximum element size. */
6167 if (inst_dest.qualifier
6168 && blk_dest.qualifier
6169 && current_elem_size
6170 != aarch64_get_qualifier_esize (blk_dest.qualifier))
6171 {
6172 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6173 mismatch_detail->error = _("register size not compatible with "
6174 "previous `movprfx'");
6175 mismatch_detail->index = 0;
6176 mismatch_detail->non_fatal = true;
6177 res = ERR_VFI;
6178 goto done;
6179 }
6180 }
6181
6182 done:
6183 if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
6184 /* We've checked the last instruction in the sequence and so
6185 don't need the sequence any more. */
6186 init_insn_sequence (NULL, insn_sequence);
6187 else
6188 add_insn_to_sequence (inst, insn_sequence);
6189 }
6190
6191 return res;
6192 }
6193
6194
6195 /* Return true if VALUE cannot be moved into an SVE register using DUP
6196 (with any element size, not just ESIZE) and if using DUPM would
6197 therefore be OK. ESIZE is the number of bytes in the immediate. */
6198
6199 bool
6200 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
6201 {
6202 int64_t svalue = uvalue;
6203 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
6204
6205 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
6206 return false;
6207 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
6208 {
6209 svalue = (int32_t) uvalue;
6210 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
6211 {
6212 svalue = (int16_t) uvalue;
6213 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
6214 return false;
6215 }
6216 }
6217 if ((svalue & 0xff) == 0)
6218 svalue /= 256;
6219 return svalue < -128 || svalue >= 128;
6220 }
6221
6222 /* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT
6223 supports the instruction described by INST. */
6224
6225 bool
6226 aarch64_cpu_supports_inst_p (uint64_t cpu_variant, aarch64_inst *inst)
6227 {
6228 if (!inst->opcode->avariant
6229 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *inst->opcode->avariant))
6230 return false;
6231
6232 return true;
6233 }
6234
6235 /* Include the opcode description table as well as the operand description
6236 table. */
6237 #define VERIFIER(x) verify_##x
6238 #include "aarch64-tbl.h"