aarch64: [SME] Add new SME system registers
[binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2021 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bool
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return (qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q);
110 }
111
112 static inline bool
113 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
114 {
115 return (qualifier >= AARCH64_OPND_QLF_S_B
116 && qualifier <= AARCH64_OPND_QLF_S_Q);
117 }
118
119 enum data_pattern
120 {
121 DP_UNKNOWN,
122 DP_VECTOR_3SAME,
123 DP_VECTOR_LONG,
124 DP_VECTOR_WIDE,
125 DP_VECTOR_ACROSS_LANES,
126 };
127
128 static const char significant_operand_index [] =
129 {
130 0, /* DP_UNKNOWN, by default using operand 0. */
131 0, /* DP_VECTOR_3SAME */
132 1, /* DP_VECTOR_LONG */
133 2, /* DP_VECTOR_WIDE */
134 1, /* DP_VECTOR_ACROSS_LANES */
135 };
136
137 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
138 the data pattern.
139 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
140 corresponds to one of a sequence of operands. */
141
142 static enum data_pattern
143 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
144 {
145 if (vector_qualifier_p (qualifiers[0]))
146 {
147 /* e.g. v.4s, v.4s, v.4s
148 or v.4h, v.4h, v.h[3]. */
149 if (qualifiers[0] == qualifiers[1]
150 && vector_qualifier_p (qualifiers[2])
151 && (aarch64_get_qualifier_esize (qualifiers[0])
152 == aarch64_get_qualifier_esize (qualifiers[1]))
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[2])))
155 return DP_VECTOR_3SAME;
156 /* e.g. v.8h, v.8b, v.8b.
157 or v.4s, v.4h, v.h[2].
158 or v.8h, v.16b. */
159 if (vector_qualifier_p (qualifiers[1])
160 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
161 && (aarch64_get_qualifier_esize (qualifiers[0])
162 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
163 return DP_VECTOR_LONG;
164 /* e.g. v.8h, v.8h, v.8b. */
165 if (qualifiers[0] == qualifiers[1]
166 && vector_qualifier_p (qualifiers[2])
167 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
168 && (aarch64_get_qualifier_esize (qualifiers[0])
169 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[1])))
172 return DP_VECTOR_WIDE;
173 }
174 else if (fp_qualifier_p (qualifiers[0]))
175 {
176 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
177 if (vector_qualifier_p (qualifiers[1])
178 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
179 return DP_VECTOR_ACROSS_LANES;
180 }
181
182 return DP_UNKNOWN;
183 }
184
185 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
186 the AdvSIMD instructions. */
187 /* N.B. it is possible to do some optimization that doesn't call
188 get_data_pattern each time when we need to select an operand. We can
189 either buffer the caculated the result or statically generate the data,
190 however, it is not obvious that the optimization will bring significant
191 benefit. */
192
193 int
194 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
195 {
196 return
197 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
198 }
199 \f
200 /* Instruction bit-fields.
201 + Keep synced with 'enum aarch64_field_kind'. */
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
247 { 5, 4 }, /* imm4_5: in SME instructions. */
248 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
249 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
250 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
251 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
252 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
253 { 5, 14 }, /* imm14: in test bit and branch instructions. */
254 { 5, 16 }, /* imm16: in exception instructions. */
255 { 0, 16 }, /* imm16_2: in udf instruction. */
256 { 0, 26 }, /* imm26: in unconditional branch instructions. */
257 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
258 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
259 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
260 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
261 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
262 { 22, 1 }, /* N: in logical (immediate) instructions. */
263 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
264 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
265 { 31, 1 }, /* sf: in integer data processing instructions. */
266 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
267 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
268 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
269 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
270 { 31, 1 }, /* b5: in the test bit and branch instructions. */
271 { 19, 5 }, /* b40: in the test bit and branch instructions. */
272 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
273 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
274 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
275 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
276 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
277 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
278 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
279 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
280 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
281 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
282 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
283 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
284 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
285 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
286 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
287 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
288 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
289 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
290 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
292 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
293 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
294 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
295 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
296 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
297 { 5, 1 }, /* SVE_i1: single-bit immediate. */
298 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
299 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
300 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
301 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
302 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
303 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
304 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
305 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
306 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
307 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
308 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
309 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
310 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
311 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
312 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
313 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
314 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
315 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
316 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
317 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
318 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
319 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
320 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
321 { 16, 4 }, /* SVE_tsz: triangular size select. */
322 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
323 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
324 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
325 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
326 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
327 { 0, 2 }, /* SME ZAda tile ZA0-ZA3. */
328 { 0, 3 }, /* SME ZAda tile ZA0-ZA7. */
329 { 22, 2 }, /* SME_size_10: size<1>, size<0> class field, [23:22]. */
330 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
331 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
332 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
333 { 13, 3 }, /* SME Pm second source scalable predicate register P0-P7. */
334 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
335 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
336 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
337 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
338 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
339 { 22, 1 }, /* sz: 1-bit element size select. */
340 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
341 };
342
343 enum aarch64_operand_class
344 aarch64_get_operand_class (enum aarch64_opnd type)
345 {
346 return aarch64_operands[type].op_class;
347 }
348
349 const char *
350 aarch64_get_operand_name (enum aarch64_opnd type)
351 {
352 return aarch64_operands[type].name;
353 }
354
355 /* Get operand description string.
356 This is usually for the diagnosis purpose. */
357 const char *
358 aarch64_get_operand_desc (enum aarch64_opnd type)
359 {
360 return aarch64_operands[type].desc;
361 }
362
363 /* Table of all conditional affixes. */
364 const aarch64_cond aarch64_conds[16] =
365 {
366 {{"eq", "none"}, 0x0},
367 {{"ne", "any"}, 0x1},
368 {{"cs", "hs", "nlast"}, 0x2},
369 {{"cc", "lo", "ul", "last"}, 0x3},
370 {{"mi", "first"}, 0x4},
371 {{"pl", "nfrst"}, 0x5},
372 {{"vs"}, 0x6},
373 {{"vc"}, 0x7},
374 {{"hi", "pmore"}, 0x8},
375 {{"ls", "plast"}, 0x9},
376 {{"ge", "tcont"}, 0xa},
377 {{"lt", "tstop"}, 0xb},
378 {{"gt"}, 0xc},
379 {{"le"}, 0xd},
380 {{"al"}, 0xe},
381 {{"nv"}, 0xf},
382 };
383
384 const aarch64_cond *
385 get_cond_from_value (aarch64_insn value)
386 {
387 assert (value < 16);
388 return &aarch64_conds[(unsigned int) value];
389 }
390
391 const aarch64_cond *
392 get_inverted_cond (const aarch64_cond *cond)
393 {
394 return &aarch64_conds[cond->value ^ 0x1];
395 }
396
397 /* Table describing the operand extension/shifting operators; indexed by
398 enum aarch64_modifier_kind.
399
400 The value column provides the most common values for encoding modifiers,
401 which enables table-driven encoding/decoding for the modifiers. */
402 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
403 {
404 {"none", 0x0},
405 {"msl", 0x0},
406 {"ror", 0x3},
407 {"asr", 0x2},
408 {"lsr", 0x1},
409 {"lsl", 0x0},
410 {"uxtb", 0x0},
411 {"uxth", 0x1},
412 {"uxtw", 0x2},
413 {"uxtx", 0x3},
414 {"sxtb", 0x4},
415 {"sxth", 0x5},
416 {"sxtw", 0x6},
417 {"sxtx", 0x7},
418 {"mul", 0x0},
419 {"mul vl", 0x0},
420 {NULL, 0},
421 };
422
423 enum aarch64_modifier_kind
424 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
425 {
426 return desc - aarch64_operand_modifiers;
427 }
428
429 aarch64_insn
430 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
431 {
432 return aarch64_operand_modifiers[kind].value;
433 }
434
435 enum aarch64_modifier_kind
436 aarch64_get_operand_modifier_from_value (aarch64_insn value,
437 bool extend_p)
438 {
439 if (extend_p)
440 return AARCH64_MOD_UXTB + value;
441 else
442 return AARCH64_MOD_LSL - value;
443 }
444
445 bool
446 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
447 {
448 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
449 }
450
451 static inline bool
452 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
453 {
454 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
455 }
456
457 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
458 {
459 { "#0x00", 0x0 },
460 { "oshld", 0x1 },
461 { "oshst", 0x2 },
462 { "osh", 0x3 },
463 { "#0x04", 0x4 },
464 { "nshld", 0x5 },
465 { "nshst", 0x6 },
466 { "nsh", 0x7 },
467 { "#0x08", 0x8 },
468 { "ishld", 0x9 },
469 { "ishst", 0xa },
470 { "ish", 0xb },
471 { "#0x0c", 0xc },
472 { "ld", 0xd },
473 { "st", 0xe },
474 { "sy", 0xf },
475 };
476
477 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
478 { /* CRm<3:2> #imm */
479 { "oshnxs", 16 }, /* 00 16 */
480 { "nshnxs", 20 }, /* 01 20 */
481 { "ishnxs", 24 }, /* 10 24 */
482 { "synxs", 28 }, /* 11 28 */
483 };
484
485 /* Table describing the operands supported by the aliases of the HINT
486 instruction.
487
488 The name column is the operand that is accepted for the alias. The value
489 column is the hint number of the alias. The list of operands is terminated
490 by NULL in the name column. */
491
492 const struct aarch64_name_value_pair aarch64_hint_options[] =
493 {
494 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
495 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
496 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
497 { "c", HINT_OPD_C }, /* BTI C. */
498 { "j", HINT_OPD_J }, /* BTI J. */
499 { "jc", HINT_OPD_JC }, /* BTI JC. */
500 { NULL, HINT_OPD_NULL },
501 };
502
503 /* op -> op: load = 0 instruction = 1 store = 2
504 l -> level: 1-3
505 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
506 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
507 const struct aarch64_name_value_pair aarch64_prfops[32] =
508 {
509 { "pldl1keep", B(0, 1, 0) },
510 { "pldl1strm", B(0, 1, 1) },
511 { "pldl2keep", B(0, 2, 0) },
512 { "pldl2strm", B(0, 2, 1) },
513 { "pldl3keep", B(0, 3, 0) },
514 { "pldl3strm", B(0, 3, 1) },
515 { NULL, 0x06 },
516 { NULL, 0x07 },
517 { "plil1keep", B(1, 1, 0) },
518 { "plil1strm", B(1, 1, 1) },
519 { "plil2keep", B(1, 2, 0) },
520 { "plil2strm", B(1, 2, 1) },
521 { "plil3keep", B(1, 3, 0) },
522 { "plil3strm", B(1, 3, 1) },
523 { NULL, 0x0e },
524 { NULL, 0x0f },
525 { "pstl1keep", B(2, 1, 0) },
526 { "pstl1strm", B(2, 1, 1) },
527 { "pstl2keep", B(2, 2, 0) },
528 { "pstl2strm", B(2, 2, 1) },
529 { "pstl3keep", B(2, 3, 0) },
530 { "pstl3strm", B(2, 3, 1) },
531 { NULL, 0x16 },
532 { NULL, 0x17 },
533 { NULL, 0x18 },
534 { NULL, 0x19 },
535 { NULL, 0x1a },
536 { NULL, 0x1b },
537 { NULL, 0x1c },
538 { NULL, 0x1d },
539 { NULL, 0x1e },
540 { NULL, 0x1f },
541 };
542 #undef B
543 \f
544 /* Utilities on value constraint. */
545
546 static inline int
547 value_in_range_p (int64_t value, int low, int high)
548 {
549 return (value >= low && value <= high) ? 1 : 0;
550 }
551
552 /* Return true if VALUE is a multiple of ALIGN. */
553 static inline int
554 value_aligned_p (int64_t value, int align)
555 {
556 return (value % align) == 0;
557 }
558
559 /* A signed value fits in a field. */
560 static inline int
561 value_fit_signed_field_p (int64_t value, unsigned width)
562 {
563 assert (width < 32);
564 if (width < sizeof (value) * 8)
565 {
566 int64_t lim = (uint64_t) 1 << (width - 1);
567 if (value >= -lim && value < lim)
568 return 1;
569 }
570 return 0;
571 }
572
573 /* An unsigned value fits in a field. */
574 static inline int
575 value_fit_unsigned_field_p (int64_t value, unsigned width)
576 {
577 assert (width < 32);
578 if (width < sizeof (value) * 8)
579 {
580 int64_t lim = (uint64_t) 1 << width;
581 if (value >= 0 && value < lim)
582 return 1;
583 }
584 return 0;
585 }
586
587 /* Return 1 if OPERAND is SP or WSP. */
588 int
589 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
590 {
591 return ((aarch64_get_operand_class (operand->type)
592 == AARCH64_OPND_CLASS_INT_REG)
593 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
594 && operand->reg.regno == 31);
595 }
596
597 /* Return 1 if OPERAND is XZR or WZP. */
598 int
599 aarch64_zero_register_p (const aarch64_opnd_info *operand)
600 {
601 return ((aarch64_get_operand_class (operand->type)
602 == AARCH64_OPND_CLASS_INT_REG)
603 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
604 && operand->reg.regno == 31);
605 }
606
607 /* Return true if the operand *OPERAND that has the operand code
608 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
609 qualified by the qualifier TARGET. */
610
611 static inline int
612 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
613 aarch64_opnd_qualifier_t target)
614 {
615 switch (operand->qualifier)
616 {
617 case AARCH64_OPND_QLF_W:
618 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
619 return 1;
620 break;
621 case AARCH64_OPND_QLF_X:
622 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
623 return 1;
624 break;
625 case AARCH64_OPND_QLF_WSP:
626 if (target == AARCH64_OPND_QLF_W
627 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
628 return 1;
629 break;
630 case AARCH64_OPND_QLF_SP:
631 if (target == AARCH64_OPND_QLF_X
632 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
633 return 1;
634 break;
635 default:
636 break;
637 }
638
639 return 0;
640 }
641
642 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
643 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
644
645 Return NIL if more than one expected qualifiers are found. */
646
647 aarch64_opnd_qualifier_t
648 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
649 int idx,
650 const aarch64_opnd_qualifier_t known_qlf,
651 int known_idx)
652 {
653 int i, saved_i;
654
655 /* Special case.
656
657 When the known qualifier is NIL, we have to assume that there is only
658 one qualifier sequence in the *QSEQ_LIST and return the corresponding
659 qualifier directly. One scenario is that for instruction
660 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
661 which has only one possible valid qualifier sequence
662 NIL, S_D
663 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
664 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
665
666 Because the qualifier NIL has dual roles in the qualifier sequence:
667 it can mean no qualifier for the operand, or the qualifer sequence is
668 not in use (when all qualifiers in the sequence are NILs), we have to
669 handle this special case here. */
670 if (known_qlf == AARCH64_OPND_NIL)
671 {
672 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
673 return qseq_list[0][idx];
674 }
675
676 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
677 {
678 if (qseq_list[i][known_idx] == known_qlf)
679 {
680 if (saved_i != -1)
681 /* More than one sequences are found to have KNOWN_QLF at
682 KNOWN_IDX. */
683 return AARCH64_OPND_NIL;
684 saved_i = i;
685 }
686 }
687
688 return qseq_list[saved_i][idx];
689 }
690
691 enum operand_qualifier_kind
692 {
693 OQK_NIL,
694 OQK_OPD_VARIANT,
695 OQK_VALUE_IN_RANGE,
696 OQK_MISC,
697 };
698
699 /* Operand qualifier description. */
700 struct operand_qualifier_data
701 {
702 /* The usage of the three data fields depends on the qualifier kind. */
703 int data0;
704 int data1;
705 int data2;
706 /* Description. */
707 const char *desc;
708 /* Kind. */
709 enum operand_qualifier_kind kind;
710 };
711
712 /* Indexed by the operand qualifier enumerators. */
713 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
714 {
715 {0, 0, 0, "NIL", OQK_NIL},
716
717 /* Operand variant qualifiers.
718 First 3 fields:
719 element size, number of elements and common value for encoding. */
720
721 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
722 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
723 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
724 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
725
726 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
727 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
728 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
729 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
730 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
731 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
732 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
733
734 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
735 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
736 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
737 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
738 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
739 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
740 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
741 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
742 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
743 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
744 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
745
746 {0, 0, 0, "z", OQK_OPD_VARIANT},
747 {0, 0, 0, "m", OQK_OPD_VARIANT},
748
749 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
750 {16, 0, 0, "tag", OQK_OPD_VARIANT},
751
752 /* Qualifiers constraining the value range.
753 First 3 fields:
754 Lower bound, higher bound, unused. */
755
756 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
757 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
758 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
759 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
760 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
761 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
762 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
763
764 /* Qualifiers for miscellaneous purpose.
765 First 3 fields:
766 unused, unused and unused. */
767
768 {0, 0, 0, "lsl", 0},
769 {0, 0, 0, "msl", 0},
770
771 {0, 0, 0, "retrieving", 0},
772 };
773
774 static inline bool
775 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
776 {
777 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
778 }
779
780 static inline bool
781 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
782 {
783 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
784 }
785
786 const char*
787 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
788 {
789 return aarch64_opnd_qualifiers[qualifier].desc;
790 }
791
792 /* Given an operand qualifier, return the expected data element size
793 of a qualified operand. */
794 unsigned char
795 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
796 {
797 assert (operand_variant_qualifier_p (qualifier));
798 return aarch64_opnd_qualifiers[qualifier].data0;
799 }
800
801 unsigned char
802 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
803 {
804 assert (operand_variant_qualifier_p (qualifier));
805 return aarch64_opnd_qualifiers[qualifier].data1;
806 }
807
808 aarch64_insn
809 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
810 {
811 assert (operand_variant_qualifier_p (qualifier));
812 return aarch64_opnd_qualifiers[qualifier].data2;
813 }
814
815 static int
816 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
817 {
818 assert (qualifier_value_in_range_constraint_p (qualifier));
819 return aarch64_opnd_qualifiers[qualifier].data0;
820 }
821
822 static int
823 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
824 {
825 assert (qualifier_value_in_range_constraint_p (qualifier));
826 return aarch64_opnd_qualifiers[qualifier].data1;
827 }
828
829 #ifdef DEBUG_AARCH64
830 void
831 aarch64_verbose (const char *str, ...)
832 {
833 va_list ap;
834 va_start (ap, str);
835 printf ("#### ");
836 vprintf (str, ap);
837 printf ("\n");
838 va_end (ap);
839 }
840
841 static inline void
842 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
843 {
844 int i;
845 printf ("#### \t");
846 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
847 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
848 printf ("\n");
849 }
850
851 static void
852 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
853 const aarch64_opnd_qualifier_t *qualifier)
854 {
855 int i;
856 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
857
858 aarch64_verbose ("dump_match_qualifiers:");
859 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
860 curr[i] = opnd[i].qualifier;
861 dump_qualifier_sequence (curr);
862 aarch64_verbose ("against");
863 dump_qualifier_sequence (qualifier);
864 }
865 #endif /* DEBUG_AARCH64 */
866
867 /* This function checks if the given instruction INSN is a destructive
868 instruction based on the usage of the registers. It does not recognize
869 unary destructive instructions. */
870 bool
871 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
872 {
873 int i = 0;
874 const enum aarch64_opnd *opnds = opcode->operands;
875
876 if (opnds[0] == AARCH64_OPND_NIL)
877 return false;
878
879 while (opnds[++i] != AARCH64_OPND_NIL)
880 if (opnds[i] == opnds[0])
881 return true;
882
883 return false;
884 }
885
886 /* TODO improve this, we can have an extra field at the runtime to
887 store the number of operands rather than calculating it every time. */
888
889 int
890 aarch64_num_of_operands (const aarch64_opcode *opcode)
891 {
892 int i = 0;
893 const enum aarch64_opnd *opnds = opcode->operands;
894 while (opnds[i++] != AARCH64_OPND_NIL)
895 ;
896 --i;
897 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
898 return i;
899 }
900
901 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
902 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
903
904 N.B. on the entry, it is very likely that only some operands in *INST
905 have had their qualifiers been established.
906
907 If STOP_AT is not -1, the function will only try to match
908 the qualifier sequence for operands before and including the operand
909 of index STOP_AT; and on success *RET will only be filled with the first
910 (STOP_AT+1) qualifiers.
911
912 A couple examples of the matching algorithm:
913
914 X,W,NIL should match
915 X,W,NIL
916
917 NIL,NIL should match
918 X ,NIL
919
920 Apart from serving the main encoding routine, this can also be called
921 during or after the operand decoding. */
922
923 int
924 aarch64_find_best_match (const aarch64_inst *inst,
925 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
926 int stop_at, aarch64_opnd_qualifier_t *ret)
927 {
928 int found = 0;
929 int i, num_opnds;
930 const aarch64_opnd_qualifier_t *qualifiers;
931
932 num_opnds = aarch64_num_of_operands (inst->opcode);
933 if (num_opnds == 0)
934 {
935 DEBUG_TRACE ("SUCCEED: no operand");
936 return 1;
937 }
938
939 if (stop_at < 0 || stop_at >= num_opnds)
940 stop_at = num_opnds - 1;
941
942 /* For each pattern. */
943 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
944 {
945 int j;
946 qualifiers = *qualifiers_list;
947
948 /* Start as positive. */
949 found = 1;
950
951 DEBUG_TRACE ("%d", i);
952 #ifdef DEBUG_AARCH64
953 if (debug_dump)
954 dump_match_qualifiers (inst->operands, qualifiers);
955 #endif
956
957 /* Most opcodes has much fewer patterns in the list.
958 First NIL qualifier indicates the end in the list. */
959 if (empty_qualifier_sequence_p (qualifiers))
960 {
961 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
962 if (i)
963 found = 0;
964 break;
965 }
966
967 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
968 {
969 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
970 {
971 /* Either the operand does not have qualifier, or the qualifier
972 for the operand needs to be deduced from the qualifier
973 sequence.
974 In the latter case, any constraint checking related with
975 the obtained qualifier should be done later in
976 operand_general_constraint_met_p. */
977 continue;
978 }
979 else if (*qualifiers != inst->operands[j].qualifier)
980 {
981 /* Unless the target qualifier can also qualify the operand
982 (which has already had a non-nil qualifier), non-equal
983 qualifiers are generally un-matched. */
984 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
985 continue;
986 else
987 {
988 found = 0;
989 break;
990 }
991 }
992 else
993 continue; /* Equal qualifiers are certainly matched. */
994 }
995
996 /* Qualifiers established. */
997 if (found == 1)
998 break;
999 }
1000
1001 if (found == 1)
1002 {
1003 /* Fill the result in *RET. */
1004 int j;
1005 qualifiers = *qualifiers_list;
1006
1007 DEBUG_TRACE ("complete qualifiers using list %d", i);
1008 #ifdef DEBUG_AARCH64
1009 if (debug_dump)
1010 dump_qualifier_sequence (qualifiers);
1011 #endif
1012
1013 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1014 ret[j] = *qualifiers;
1015 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1016 ret[j] = AARCH64_OPND_QLF_NIL;
1017
1018 DEBUG_TRACE ("SUCCESS");
1019 return 1;
1020 }
1021
1022 DEBUG_TRACE ("FAIL");
1023 return 0;
1024 }
1025
1026 /* Operand qualifier matching and resolving.
1027
1028 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1029 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1030
1031 if UPDATE_P, update the qualifier(s) in *INST after the matching
1032 succeeds. */
1033
1034 static int
1035 match_operands_qualifier (aarch64_inst *inst, bool update_p)
1036 {
1037 int i, nops;
1038 aarch64_opnd_qualifier_seq_t qualifiers;
1039
1040 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1041 qualifiers))
1042 {
1043 DEBUG_TRACE ("matching FAIL");
1044 return 0;
1045 }
1046
1047 if (inst->opcode->flags & F_STRICT)
1048 {
1049 /* Require an exact qualifier match, even for NIL qualifiers. */
1050 nops = aarch64_num_of_operands (inst->opcode);
1051 for (i = 0; i < nops; ++i)
1052 if (inst->operands[i].qualifier != qualifiers[i])
1053 return false;
1054 }
1055
1056 /* Update the qualifiers. */
1057 if (update_p)
1058 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1059 {
1060 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1061 break;
1062 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1063 "update %s with %s for operand %d",
1064 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1065 aarch64_get_qualifier_name (qualifiers[i]), i);
1066 inst->operands[i].qualifier = qualifiers[i];
1067 }
1068
1069 DEBUG_TRACE ("matching SUCCESS");
1070 return 1;
1071 }
1072
1073 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1074 register by MOVZ.
1075
1076 IS32 indicates whether value is a 32-bit immediate or not.
1077 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1078 amount will be returned in *SHIFT_AMOUNT. */
1079
1080 bool
1081 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1082 {
1083 int amount;
1084
1085 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1086
1087 if (is32)
1088 {
1089 /* Allow all zeros or all ones in top 32-bits, so that
1090 32-bit constant expressions like ~0x80000000 are
1091 permitted. */
1092 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1093 /* Immediate out of range. */
1094 return false;
1095 value &= 0xffffffff;
1096 }
1097
1098 /* first, try movz then movn */
1099 amount = -1;
1100 if ((value & ((uint64_t) 0xffff << 0)) == value)
1101 amount = 0;
1102 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1103 amount = 16;
1104 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1105 amount = 32;
1106 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1107 amount = 48;
1108
1109 if (amount == -1)
1110 {
1111 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1112 return false;
1113 }
1114
1115 if (shift_amount != NULL)
1116 *shift_amount = amount;
1117
1118 DEBUG_TRACE ("exit true with amount %d", amount);
1119
1120 return true;
1121 }
1122
1123 /* Build the accepted values for immediate logical SIMD instructions.
1124
1125 The standard encodings of the immediate value are:
1126 N imms immr SIMD size R S
1127 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1128 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1129 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1130 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1131 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1132 0 11110s 00000r 2 UInt(r) UInt(s)
1133 where all-ones value of S is reserved.
1134
1135 Let's call E the SIMD size.
1136
1137 The immediate value is: S+1 bits '1' rotated to the right by R.
1138
1139 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1140 (remember S != E - 1). */
1141
1142 #define TOTAL_IMM_NB 5334
1143
1144 typedef struct
1145 {
1146 uint64_t imm;
1147 aarch64_insn encoding;
1148 } simd_imm_encoding;
1149
1150 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1151
1152 static int
1153 simd_imm_encoding_cmp(const void *i1, const void *i2)
1154 {
1155 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1156 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1157
1158 if (imm1->imm < imm2->imm)
1159 return -1;
1160 if (imm1->imm > imm2->imm)
1161 return +1;
1162 return 0;
1163 }
1164
1165 /* immediate bitfield standard encoding
1166 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1167 1 ssssss rrrrrr 64 rrrrrr ssssss
1168 0 0sssss 0rrrrr 32 rrrrr sssss
1169 0 10ssss 00rrrr 16 rrrr ssss
1170 0 110sss 000rrr 8 rrr sss
1171 0 1110ss 0000rr 4 rr ss
1172 0 11110s 00000r 2 r s */
1173 static inline int
1174 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1175 {
1176 return (is64 << 12) | (r << 6) | s;
1177 }
1178
1179 static void
1180 build_immediate_table (void)
1181 {
1182 uint32_t log_e, e, s, r, s_mask;
1183 uint64_t mask, imm;
1184 int nb_imms;
1185 int is64;
1186
1187 nb_imms = 0;
1188 for (log_e = 1; log_e <= 6; log_e++)
1189 {
1190 /* Get element size. */
1191 e = 1u << log_e;
1192 if (log_e == 6)
1193 {
1194 is64 = 1;
1195 mask = 0xffffffffffffffffull;
1196 s_mask = 0;
1197 }
1198 else
1199 {
1200 is64 = 0;
1201 mask = (1ull << e) - 1;
1202 /* log_e s_mask
1203 1 ((1 << 4) - 1) << 2 = 111100
1204 2 ((1 << 3) - 1) << 3 = 111000
1205 3 ((1 << 2) - 1) << 4 = 110000
1206 4 ((1 << 1) - 1) << 5 = 100000
1207 5 ((1 << 0) - 1) << 6 = 000000 */
1208 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1209 }
1210 for (s = 0; s < e - 1; s++)
1211 for (r = 0; r < e; r++)
1212 {
1213 /* s+1 consecutive bits to 1 (s < 63) */
1214 imm = (1ull << (s + 1)) - 1;
1215 /* rotate right by r */
1216 if (r != 0)
1217 imm = (imm >> r) | ((imm << (e - r)) & mask);
1218 /* replicate the constant depending on SIMD size */
1219 switch (log_e)
1220 {
1221 case 1: imm = (imm << 2) | imm;
1222 /* Fall through. */
1223 case 2: imm = (imm << 4) | imm;
1224 /* Fall through. */
1225 case 3: imm = (imm << 8) | imm;
1226 /* Fall through. */
1227 case 4: imm = (imm << 16) | imm;
1228 /* Fall through. */
1229 case 5: imm = (imm << 32) | imm;
1230 /* Fall through. */
1231 case 6: break;
1232 default: abort ();
1233 }
1234 simd_immediates[nb_imms].imm = imm;
1235 simd_immediates[nb_imms].encoding =
1236 encode_immediate_bitfield(is64, s | s_mask, r);
1237 nb_imms++;
1238 }
1239 }
1240 assert (nb_imms == TOTAL_IMM_NB);
1241 qsort(simd_immediates, nb_imms,
1242 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1243 }
1244
1245 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1246 be accepted by logical (immediate) instructions
1247 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1248
1249 ESIZE is the number of bytes in the decoded immediate value.
1250 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1251 VALUE will be returned in *ENCODING. */
1252
1253 bool
1254 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1255 {
1256 simd_imm_encoding imm_enc;
1257 const simd_imm_encoding *imm_encoding;
1258 static bool initialized = false;
1259 uint64_t upper;
1260 int i;
1261
1262 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1263 value, esize);
1264
1265 if (!initialized)
1266 {
1267 build_immediate_table ();
1268 initialized = true;
1269 }
1270
1271 /* Allow all zeros or all ones in top bits, so that
1272 constant expressions like ~1 are permitted. */
1273 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1274 if ((value & ~upper) != value && (value | upper) != value)
1275 return false;
1276
1277 /* Replicate to a full 64-bit value. */
1278 value &= ~upper;
1279 for (i = esize * 8; i < 64; i *= 2)
1280 value |= (value << i);
1281
1282 imm_enc.imm = value;
1283 imm_encoding = (const simd_imm_encoding *)
1284 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1285 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1286 if (imm_encoding == NULL)
1287 {
1288 DEBUG_TRACE ("exit with false");
1289 return false;
1290 }
1291 if (encoding != NULL)
1292 *encoding = imm_encoding->encoding;
1293 DEBUG_TRACE ("exit with true");
1294 return true;
1295 }
1296
1297 /* If 64-bit immediate IMM is in the format of
1298 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1299 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1300 of value "abcdefgh". Otherwise return -1. */
1301 int
1302 aarch64_shrink_expanded_imm8 (uint64_t imm)
1303 {
1304 int i, ret;
1305 uint32_t byte;
1306
1307 ret = 0;
1308 for (i = 0; i < 8; i++)
1309 {
1310 byte = (imm >> (8 * i)) & 0xff;
1311 if (byte == 0xff)
1312 ret |= 1 << i;
1313 else if (byte != 0x00)
1314 return -1;
1315 }
1316 return ret;
1317 }
1318
1319 /* Utility inline functions for operand_general_constraint_met_p. */
1320
1321 static inline void
1322 set_error (aarch64_operand_error *mismatch_detail,
1323 enum aarch64_operand_error_kind kind, int idx,
1324 const char* error)
1325 {
1326 if (mismatch_detail == NULL)
1327 return;
1328 mismatch_detail->kind = kind;
1329 mismatch_detail->index = idx;
1330 mismatch_detail->error = error;
1331 }
1332
1333 static inline void
1334 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1335 const char* error)
1336 {
1337 if (mismatch_detail == NULL)
1338 return;
1339 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1340 }
1341
1342 static inline void
1343 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1344 int idx, int lower_bound, int upper_bound,
1345 const char* error)
1346 {
1347 if (mismatch_detail == NULL)
1348 return;
1349 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1350 mismatch_detail->data[0] = lower_bound;
1351 mismatch_detail->data[1] = upper_bound;
1352 }
1353
1354 static inline void
1355 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1356 int idx, int lower_bound, int upper_bound)
1357 {
1358 if (mismatch_detail == NULL)
1359 return;
1360 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1361 _("immediate value"));
1362 }
1363
1364 static inline void
1365 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1366 int idx, int lower_bound, int upper_bound)
1367 {
1368 if (mismatch_detail == NULL)
1369 return;
1370 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1371 _("immediate offset"));
1372 }
1373
1374 static inline void
1375 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1376 int idx, int lower_bound, int upper_bound)
1377 {
1378 if (mismatch_detail == NULL)
1379 return;
1380 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1381 _("register number"));
1382 }
1383
1384 static inline void
1385 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1386 int idx, int lower_bound, int upper_bound)
1387 {
1388 if (mismatch_detail == NULL)
1389 return;
1390 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1391 _("register element index"));
1392 }
1393
1394 static inline void
1395 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1396 int idx, int lower_bound, int upper_bound)
1397 {
1398 if (mismatch_detail == NULL)
1399 return;
1400 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1401 _("shift amount"));
1402 }
1403
1404 /* Report that the MUL modifier in operand IDX should be in the range
1405 [LOWER_BOUND, UPPER_BOUND]. */
1406 static inline void
1407 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1408 int idx, int lower_bound, int upper_bound)
1409 {
1410 if (mismatch_detail == NULL)
1411 return;
1412 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1413 _("multiplier"));
1414 }
1415
1416 static inline void
1417 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1418 int alignment)
1419 {
1420 if (mismatch_detail == NULL)
1421 return;
1422 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1423 mismatch_detail->data[0] = alignment;
1424 }
1425
1426 static inline void
1427 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1428 int expected_num)
1429 {
1430 if (mismatch_detail == NULL)
1431 return;
1432 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1433 mismatch_detail->data[0] = expected_num;
1434 }
1435
1436 static inline void
1437 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1438 const char* error)
1439 {
1440 if (mismatch_detail == NULL)
1441 return;
1442 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1443 }
1444
1445 /* General constraint checking based on operand code.
1446
1447 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1448 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1449
1450 This function has to be called after the qualifiers for all operands
1451 have been resolved.
1452
1453 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1454 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1455 of error message during the disassembling where error message is not
1456 wanted. We avoid the dynamic construction of strings of error messages
1457 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1458 use a combination of error code, static string and some integer data to
1459 represent an error. */
1460
1461 static int
1462 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1463 enum aarch64_opnd type,
1464 const aarch64_opcode *opcode,
1465 aarch64_operand_error *mismatch_detail)
1466 {
1467 unsigned num, modifiers, shift;
1468 unsigned char size;
1469 int64_t imm, min_value, max_value;
1470 uint64_t uvalue, mask;
1471 const aarch64_opnd_info *opnd = opnds + idx;
1472 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1473
1474 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1475
1476 switch (aarch64_operands[type].op_class)
1477 {
1478 case AARCH64_OPND_CLASS_INT_REG:
1479 /* Check pair reg constraints for cas* instructions. */
1480 if (type == AARCH64_OPND_PAIRREG)
1481 {
1482 assert (idx == 1 || idx == 3);
1483 if (opnds[idx - 1].reg.regno % 2 != 0)
1484 {
1485 set_syntax_error (mismatch_detail, idx - 1,
1486 _("reg pair must start from even reg"));
1487 return 0;
1488 }
1489 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1490 {
1491 set_syntax_error (mismatch_detail, idx,
1492 _("reg pair must be contiguous"));
1493 return 0;
1494 }
1495 break;
1496 }
1497
1498 /* <Xt> may be optional in some IC and TLBI instructions. */
1499 if (type == AARCH64_OPND_Rt_SYS)
1500 {
1501 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1502 == AARCH64_OPND_CLASS_SYSTEM));
1503 if (opnds[1].present
1504 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1505 {
1506 set_other_error (mismatch_detail, idx, _("extraneous register"));
1507 return 0;
1508 }
1509 if (!opnds[1].present
1510 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1511 {
1512 set_other_error (mismatch_detail, idx, _("missing register"));
1513 return 0;
1514 }
1515 }
1516 switch (qualifier)
1517 {
1518 case AARCH64_OPND_QLF_WSP:
1519 case AARCH64_OPND_QLF_SP:
1520 if (!aarch64_stack_pointer_p (opnd))
1521 {
1522 set_other_error (mismatch_detail, idx,
1523 _("stack pointer register expected"));
1524 return 0;
1525 }
1526 break;
1527 default:
1528 break;
1529 }
1530 break;
1531
1532 case AARCH64_OPND_CLASS_SVE_REG:
1533 switch (type)
1534 {
1535 case AARCH64_OPND_SVE_Zm3_INDEX:
1536 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1537 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1538 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1539 case AARCH64_OPND_SVE_Zm4_INDEX:
1540 size = get_operand_fields_width (get_operand_from_code (type));
1541 shift = get_operand_specific_data (&aarch64_operands[type]);
1542 mask = (1 << shift) - 1;
1543 if (opnd->reg.regno > mask)
1544 {
1545 assert (mask == 7 || mask == 15);
1546 set_other_error (mismatch_detail, idx,
1547 mask == 15
1548 ? _("z0-z15 expected")
1549 : _("z0-z7 expected"));
1550 return 0;
1551 }
1552 mask = (1u << (size - shift)) - 1;
1553 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1554 {
1555 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1556 return 0;
1557 }
1558 break;
1559
1560 case AARCH64_OPND_SVE_Zn_INDEX:
1561 size = aarch64_get_qualifier_esize (opnd->qualifier);
1562 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1563 {
1564 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1565 0, 64 / size - 1);
1566 return 0;
1567 }
1568 break;
1569
1570 case AARCH64_OPND_SVE_ZnxN:
1571 case AARCH64_OPND_SVE_ZtxN:
1572 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1573 {
1574 set_other_error (mismatch_detail, idx,
1575 _("invalid register list"));
1576 return 0;
1577 }
1578 break;
1579
1580 default:
1581 break;
1582 }
1583 break;
1584
1585 case AARCH64_OPND_CLASS_PRED_REG:
1586 if (opnd->reg.regno >= 8
1587 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1588 {
1589 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1590 return 0;
1591 }
1592 break;
1593
1594 case AARCH64_OPND_CLASS_COND:
1595 if (type == AARCH64_OPND_COND1
1596 && (opnds[idx].cond->value & 0xe) == 0xe)
1597 {
1598 /* Not allow AL or NV. */
1599 set_syntax_error (mismatch_detail, idx, NULL);
1600 }
1601 break;
1602
1603 case AARCH64_OPND_CLASS_ADDRESS:
1604 /* Check writeback. */
1605 switch (opcode->iclass)
1606 {
1607 case ldst_pos:
1608 case ldst_unscaled:
1609 case ldstnapair_offs:
1610 case ldstpair_off:
1611 case ldst_unpriv:
1612 if (opnd->addr.writeback == 1)
1613 {
1614 set_syntax_error (mismatch_detail, idx,
1615 _("unexpected address writeback"));
1616 return 0;
1617 }
1618 break;
1619 case ldst_imm10:
1620 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1621 {
1622 set_syntax_error (mismatch_detail, idx,
1623 _("unexpected address writeback"));
1624 return 0;
1625 }
1626 break;
1627 case ldst_imm9:
1628 case ldstpair_indexed:
1629 case asisdlsep:
1630 case asisdlsop:
1631 if (opnd->addr.writeback == 0)
1632 {
1633 set_syntax_error (mismatch_detail, idx,
1634 _("address writeback expected"));
1635 return 0;
1636 }
1637 break;
1638 default:
1639 assert (opnd->addr.writeback == 0);
1640 break;
1641 }
1642 switch (type)
1643 {
1644 case AARCH64_OPND_ADDR_SIMM7:
1645 /* Scaled signed 7 bits immediate offset. */
1646 /* Get the size of the data element that is accessed, which may be
1647 different from that of the source register size,
1648 e.g. in strb/ldrb. */
1649 size = aarch64_get_qualifier_esize (opnd->qualifier);
1650 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1651 {
1652 set_offset_out_of_range_error (mismatch_detail, idx,
1653 -64 * size, 63 * size);
1654 return 0;
1655 }
1656 if (!value_aligned_p (opnd->addr.offset.imm, size))
1657 {
1658 set_unaligned_error (mismatch_detail, idx, size);
1659 return 0;
1660 }
1661 break;
1662 case AARCH64_OPND_ADDR_OFFSET:
1663 case AARCH64_OPND_ADDR_SIMM9:
1664 /* Unscaled signed 9 bits immediate offset. */
1665 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1666 {
1667 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1668 return 0;
1669 }
1670 break;
1671
1672 case AARCH64_OPND_ADDR_SIMM9_2:
1673 /* Unscaled signed 9 bits immediate offset, which has to be negative
1674 or unaligned. */
1675 size = aarch64_get_qualifier_esize (qualifier);
1676 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1677 && !value_aligned_p (opnd->addr.offset.imm, size))
1678 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1679 return 1;
1680 set_other_error (mismatch_detail, idx,
1681 _("negative or unaligned offset expected"));
1682 return 0;
1683
1684 case AARCH64_OPND_ADDR_SIMM10:
1685 /* Scaled signed 10 bits immediate offset. */
1686 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1687 {
1688 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1689 return 0;
1690 }
1691 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1692 {
1693 set_unaligned_error (mismatch_detail, idx, 8);
1694 return 0;
1695 }
1696 break;
1697
1698 case AARCH64_OPND_ADDR_SIMM11:
1699 /* Signed 11 bits immediate offset (multiple of 16). */
1700 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1701 {
1702 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1703 return 0;
1704 }
1705
1706 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1707 {
1708 set_unaligned_error (mismatch_detail, idx, 16);
1709 return 0;
1710 }
1711 break;
1712
1713 case AARCH64_OPND_ADDR_SIMM13:
1714 /* Signed 13 bits immediate offset (multiple of 16). */
1715 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1716 {
1717 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1718 return 0;
1719 }
1720
1721 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1722 {
1723 set_unaligned_error (mismatch_detail, idx, 16);
1724 return 0;
1725 }
1726 break;
1727
1728 case AARCH64_OPND_SIMD_ADDR_POST:
1729 /* AdvSIMD load/store multiple structures, post-index. */
1730 assert (idx == 1);
1731 if (opnd->addr.offset.is_reg)
1732 {
1733 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1734 return 1;
1735 else
1736 {
1737 set_other_error (mismatch_detail, idx,
1738 _("invalid register offset"));
1739 return 0;
1740 }
1741 }
1742 else
1743 {
1744 const aarch64_opnd_info *prev = &opnds[idx-1];
1745 unsigned num_bytes; /* total number of bytes transferred. */
1746 /* The opcode dependent area stores the number of elements in
1747 each structure to be loaded/stored. */
1748 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1749 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1750 /* Special handling of loading single structure to all lane. */
1751 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1752 * aarch64_get_qualifier_esize (prev->qualifier);
1753 else
1754 num_bytes = prev->reglist.num_regs
1755 * aarch64_get_qualifier_esize (prev->qualifier)
1756 * aarch64_get_qualifier_nelem (prev->qualifier);
1757 if ((int) num_bytes != opnd->addr.offset.imm)
1758 {
1759 set_other_error (mismatch_detail, idx,
1760 _("invalid post-increment amount"));
1761 return 0;
1762 }
1763 }
1764 break;
1765
1766 case AARCH64_OPND_ADDR_REGOFF:
1767 /* Get the size of the data element that is accessed, which may be
1768 different from that of the source register size,
1769 e.g. in strb/ldrb. */
1770 size = aarch64_get_qualifier_esize (opnd->qualifier);
1771 /* It is either no shift or shift by the binary logarithm of SIZE. */
1772 if (opnd->shifter.amount != 0
1773 && opnd->shifter.amount != (int)get_logsz (size))
1774 {
1775 set_other_error (mismatch_detail, idx,
1776 _("invalid shift amount"));
1777 return 0;
1778 }
1779 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1780 operators. */
1781 switch (opnd->shifter.kind)
1782 {
1783 case AARCH64_MOD_UXTW:
1784 case AARCH64_MOD_LSL:
1785 case AARCH64_MOD_SXTW:
1786 case AARCH64_MOD_SXTX: break;
1787 default:
1788 set_other_error (mismatch_detail, idx,
1789 _("invalid extend/shift operator"));
1790 return 0;
1791 }
1792 break;
1793
1794 case AARCH64_OPND_ADDR_UIMM12:
1795 imm = opnd->addr.offset.imm;
1796 /* Get the size of the data element that is accessed, which may be
1797 different from that of the source register size,
1798 e.g. in strb/ldrb. */
1799 size = aarch64_get_qualifier_esize (qualifier);
1800 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1801 {
1802 set_offset_out_of_range_error (mismatch_detail, idx,
1803 0, 4095 * size);
1804 return 0;
1805 }
1806 if (!value_aligned_p (opnd->addr.offset.imm, size))
1807 {
1808 set_unaligned_error (mismatch_detail, idx, size);
1809 return 0;
1810 }
1811 break;
1812
1813 case AARCH64_OPND_ADDR_PCREL14:
1814 case AARCH64_OPND_ADDR_PCREL19:
1815 case AARCH64_OPND_ADDR_PCREL21:
1816 case AARCH64_OPND_ADDR_PCREL26:
1817 imm = opnd->imm.value;
1818 if (operand_need_shift_by_two (get_operand_from_code (type)))
1819 {
1820 /* The offset value in a PC-relative branch instruction is alway
1821 4-byte aligned and is encoded without the lowest 2 bits. */
1822 if (!value_aligned_p (imm, 4))
1823 {
1824 set_unaligned_error (mismatch_detail, idx, 4);
1825 return 0;
1826 }
1827 /* Right shift by 2 so that we can carry out the following check
1828 canonically. */
1829 imm >>= 2;
1830 }
1831 size = get_operand_fields_width (get_operand_from_code (type));
1832 if (!value_fit_signed_field_p (imm, size))
1833 {
1834 set_other_error (mismatch_detail, idx,
1835 _("immediate out of range"));
1836 return 0;
1837 }
1838 break;
1839
1840 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
1841 if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
1842 {
1843 set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
1844 return 0;
1845 }
1846 break;
1847
1848 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1849 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1850 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1851 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1852 min_value = -8;
1853 max_value = 7;
1854 sve_imm_offset_vl:
1855 assert (!opnd->addr.offset.is_reg);
1856 assert (opnd->addr.preind);
1857 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1858 min_value *= num;
1859 max_value *= num;
1860 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1861 || (opnd->shifter.operator_present
1862 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1863 {
1864 set_other_error (mismatch_detail, idx,
1865 _("invalid addressing mode"));
1866 return 0;
1867 }
1868 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1869 {
1870 set_offset_out_of_range_error (mismatch_detail, idx,
1871 min_value, max_value);
1872 return 0;
1873 }
1874 if (!value_aligned_p (opnd->addr.offset.imm, num))
1875 {
1876 set_unaligned_error (mismatch_detail, idx, num);
1877 return 0;
1878 }
1879 break;
1880
1881 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1882 min_value = -32;
1883 max_value = 31;
1884 goto sve_imm_offset_vl;
1885
1886 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1887 min_value = -256;
1888 max_value = 255;
1889 goto sve_imm_offset_vl;
1890
1891 case AARCH64_OPND_SVE_ADDR_RI_U6:
1892 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1893 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1894 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1895 min_value = 0;
1896 max_value = 63;
1897 sve_imm_offset:
1898 assert (!opnd->addr.offset.is_reg);
1899 assert (opnd->addr.preind);
1900 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1901 min_value *= num;
1902 max_value *= num;
1903 if (opnd->shifter.operator_present
1904 || opnd->shifter.amount_present)
1905 {
1906 set_other_error (mismatch_detail, idx,
1907 _("invalid addressing mode"));
1908 return 0;
1909 }
1910 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1911 {
1912 set_offset_out_of_range_error (mismatch_detail, idx,
1913 min_value, max_value);
1914 return 0;
1915 }
1916 if (!value_aligned_p (opnd->addr.offset.imm, num))
1917 {
1918 set_unaligned_error (mismatch_detail, idx, num);
1919 return 0;
1920 }
1921 break;
1922
1923 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1924 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
1925 min_value = -8;
1926 max_value = 7;
1927 goto sve_imm_offset;
1928
1929 case AARCH64_OPND_SVE_ADDR_ZX:
1930 /* Everything is already ensured by parse_operands or
1931 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
1932 argument type). */
1933 assert (opnd->addr.offset.is_reg);
1934 assert (opnd->addr.preind);
1935 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
1936 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
1937 assert (opnd->shifter.operator_present == 0);
1938 break;
1939
1940 case AARCH64_OPND_SVE_ADDR_R:
1941 case AARCH64_OPND_SVE_ADDR_RR:
1942 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1943 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1944 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1945 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
1946 case AARCH64_OPND_SVE_ADDR_RX:
1947 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1948 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1949 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1950 case AARCH64_OPND_SVE_ADDR_RZ:
1951 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1952 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1953 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1954 modifiers = 1 << AARCH64_MOD_LSL;
1955 sve_rr_operand:
1956 assert (opnd->addr.offset.is_reg);
1957 assert (opnd->addr.preind);
1958 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1959 && opnd->addr.offset.regno == 31)
1960 {
1961 set_other_error (mismatch_detail, idx,
1962 _("index register xzr is not allowed"));
1963 return 0;
1964 }
1965 if (((1 << opnd->shifter.kind) & modifiers) == 0
1966 || (opnd->shifter.amount
1967 != get_operand_specific_data (&aarch64_operands[type])))
1968 {
1969 set_other_error (mismatch_detail, idx,
1970 _("invalid addressing mode"));
1971 return 0;
1972 }
1973 break;
1974
1975 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1976 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1977 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1978 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1979 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1980 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1981 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1982 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1983 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1984 goto sve_rr_operand;
1985
1986 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1987 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1988 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1989 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1990 min_value = 0;
1991 max_value = 31;
1992 goto sve_imm_offset;
1993
1994 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1995 modifiers = 1 << AARCH64_MOD_LSL;
1996 sve_zz_operand:
1997 assert (opnd->addr.offset.is_reg);
1998 assert (opnd->addr.preind);
1999 if (((1 << opnd->shifter.kind) & modifiers) == 0
2000 || opnd->shifter.amount < 0
2001 || opnd->shifter.amount > 3)
2002 {
2003 set_other_error (mismatch_detail, idx,
2004 _("invalid addressing mode"));
2005 return 0;
2006 }
2007 break;
2008
2009 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2010 modifiers = (1 << AARCH64_MOD_SXTW);
2011 goto sve_zz_operand;
2012
2013 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2014 modifiers = 1 << AARCH64_MOD_UXTW;
2015 goto sve_zz_operand;
2016
2017 default:
2018 break;
2019 }
2020 break;
2021
2022 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2023 if (type == AARCH64_OPND_LEt)
2024 {
2025 /* Get the upper bound for the element index. */
2026 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2027 if (!value_in_range_p (opnd->reglist.index, 0, num))
2028 {
2029 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2030 return 0;
2031 }
2032 }
2033 /* The opcode dependent area stores the number of elements in
2034 each structure to be loaded/stored. */
2035 num = get_opcode_dependent_value (opcode);
2036 switch (type)
2037 {
2038 case AARCH64_OPND_LVt:
2039 assert (num >= 1 && num <= 4);
2040 /* Unless LD1/ST1, the number of registers should be equal to that
2041 of the structure elements. */
2042 if (num != 1 && opnd->reglist.num_regs != num)
2043 {
2044 set_reg_list_error (mismatch_detail, idx, num);
2045 return 0;
2046 }
2047 break;
2048 case AARCH64_OPND_LVt_AL:
2049 case AARCH64_OPND_LEt:
2050 assert (num >= 1 && num <= 4);
2051 /* The number of registers should be equal to that of the structure
2052 elements. */
2053 if (opnd->reglist.num_regs != num)
2054 {
2055 set_reg_list_error (mismatch_detail, idx, num);
2056 return 0;
2057 }
2058 break;
2059 default:
2060 break;
2061 }
2062 break;
2063
2064 case AARCH64_OPND_CLASS_IMMEDIATE:
2065 /* Constraint check on immediate operand. */
2066 imm = opnd->imm.value;
2067 /* E.g. imm_0_31 constrains value to be 0..31. */
2068 if (qualifier_value_in_range_constraint_p (qualifier)
2069 && !value_in_range_p (imm, get_lower_bound (qualifier),
2070 get_upper_bound (qualifier)))
2071 {
2072 set_imm_out_of_range_error (mismatch_detail, idx,
2073 get_lower_bound (qualifier),
2074 get_upper_bound (qualifier));
2075 return 0;
2076 }
2077
2078 switch (type)
2079 {
2080 case AARCH64_OPND_AIMM:
2081 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2082 {
2083 set_other_error (mismatch_detail, idx,
2084 _("invalid shift operator"));
2085 return 0;
2086 }
2087 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2088 {
2089 set_other_error (mismatch_detail, idx,
2090 _("shift amount must be 0 or 12"));
2091 return 0;
2092 }
2093 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2094 {
2095 set_other_error (mismatch_detail, idx,
2096 _("immediate out of range"));
2097 return 0;
2098 }
2099 break;
2100
2101 case AARCH64_OPND_HALF:
2102 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2103 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2104 {
2105 set_other_error (mismatch_detail, idx,
2106 _("invalid shift operator"));
2107 return 0;
2108 }
2109 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2110 if (!value_aligned_p (opnd->shifter.amount, 16))
2111 {
2112 set_other_error (mismatch_detail, idx,
2113 _("shift amount must be a multiple of 16"));
2114 return 0;
2115 }
2116 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2117 {
2118 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2119 0, size * 8 - 16);
2120 return 0;
2121 }
2122 if (opnd->imm.value < 0)
2123 {
2124 set_other_error (mismatch_detail, idx,
2125 _("negative immediate value not allowed"));
2126 return 0;
2127 }
2128 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2129 {
2130 set_other_error (mismatch_detail, idx,
2131 _("immediate out of range"));
2132 return 0;
2133 }
2134 break;
2135
2136 case AARCH64_OPND_IMM_MOV:
2137 {
2138 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2139 imm = opnd->imm.value;
2140 assert (idx == 1);
2141 switch (opcode->op)
2142 {
2143 case OP_MOV_IMM_WIDEN:
2144 imm = ~imm;
2145 /* Fall through. */
2146 case OP_MOV_IMM_WIDE:
2147 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2148 {
2149 set_other_error (mismatch_detail, idx,
2150 _("immediate out of range"));
2151 return 0;
2152 }
2153 break;
2154 case OP_MOV_IMM_LOG:
2155 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2156 {
2157 set_other_error (mismatch_detail, idx,
2158 _("immediate out of range"));
2159 return 0;
2160 }
2161 break;
2162 default:
2163 assert (0);
2164 return 0;
2165 }
2166 }
2167 break;
2168
2169 case AARCH64_OPND_NZCV:
2170 case AARCH64_OPND_CCMP_IMM:
2171 case AARCH64_OPND_EXCEPTION:
2172 case AARCH64_OPND_UNDEFINED:
2173 case AARCH64_OPND_TME_UIMM16:
2174 case AARCH64_OPND_UIMM4:
2175 case AARCH64_OPND_UIMM4_ADDG:
2176 case AARCH64_OPND_UIMM7:
2177 case AARCH64_OPND_UIMM3_OP1:
2178 case AARCH64_OPND_UIMM3_OP2:
2179 case AARCH64_OPND_SVE_UIMM3:
2180 case AARCH64_OPND_SVE_UIMM7:
2181 case AARCH64_OPND_SVE_UIMM8:
2182 case AARCH64_OPND_SVE_UIMM8_53:
2183 size = get_operand_fields_width (get_operand_from_code (type));
2184 assert (size < 32);
2185 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2186 {
2187 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2188 (1u << size) - 1);
2189 return 0;
2190 }
2191 break;
2192
2193 case AARCH64_OPND_UIMM10:
2194 /* Scaled unsigned 10 bits immediate offset. */
2195 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2196 {
2197 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2198 return 0;
2199 }
2200
2201 if (!value_aligned_p (opnd->imm.value, 16))
2202 {
2203 set_unaligned_error (mismatch_detail, idx, 16);
2204 return 0;
2205 }
2206 break;
2207
2208 case AARCH64_OPND_SIMM5:
2209 case AARCH64_OPND_SVE_SIMM5:
2210 case AARCH64_OPND_SVE_SIMM5B:
2211 case AARCH64_OPND_SVE_SIMM6:
2212 case AARCH64_OPND_SVE_SIMM8:
2213 size = get_operand_fields_width (get_operand_from_code (type));
2214 assert (size < 32);
2215 if (!value_fit_signed_field_p (opnd->imm.value, size))
2216 {
2217 set_imm_out_of_range_error (mismatch_detail, idx,
2218 -(1 << (size - 1)),
2219 (1 << (size - 1)) - 1);
2220 return 0;
2221 }
2222 break;
2223
2224 case AARCH64_OPND_WIDTH:
2225 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2226 && opnds[0].type == AARCH64_OPND_Rd);
2227 size = get_upper_bound (qualifier);
2228 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2229 /* lsb+width <= reg.size */
2230 {
2231 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2232 size - opnds[idx-1].imm.value);
2233 return 0;
2234 }
2235 break;
2236
2237 case AARCH64_OPND_LIMM:
2238 case AARCH64_OPND_SVE_LIMM:
2239 {
2240 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2241 uint64_t uimm = opnd->imm.value;
2242 if (opcode->op == OP_BIC)
2243 uimm = ~uimm;
2244 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2245 {
2246 set_other_error (mismatch_detail, idx,
2247 _("immediate out of range"));
2248 return 0;
2249 }
2250 }
2251 break;
2252
2253 case AARCH64_OPND_IMM0:
2254 case AARCH64_OPND_FPIMM0:
2255 if (opnd->imm.value != 0)
2256 {
2257 set_other_error (mismatch_detail, idx,
2258 _("immediate zero expected"));
2259 return 0;
2260 }
2261 break;
2262
2263 case AARCH64_OPND_IMM_ROT1:
2264 case AARCH64_OPND_IMM_ROT2:
2265 case AARCH64_OPND_SVE_IMM_ROT2:
2266 if (opnd->imm.value != 0
2267 && opnd->imm.value != 90
2268 && opnd->imm.value != 180
2269 && opnd->imm.value != 270)
2270 {
2271 set_other_error (mismatch_detail, idx,
2272 _("rotate expected to be 0, 90, 180 or 270"));
2273 return 0;
2274 }
2275 break;
2276
2277 case AARCH64_OPND_IMM_ROT3:
2278 case AARCH64_OPND_SVE_IMM_ROT1:
2279 case AARCH64_OPND_SVE_IMM_ROT3:
2280 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2281 {
2282 set_other_error (mismatch_detail, idx,
2283 _("rotate expected to be 90 or 270"));
2284 return 0;
2285 }
2286 break;
2287
2288 case AARCH64_OPND_SHLL_IMM:
2289 assert (idx == 2);
2290 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2291 if (opnd->imm.value != size)
2292 {
2293 set_other_error (mismatch_detail, idx,
2294 _("invalid shift amount"));
2295 return 0;
2296 }
2297 break;
2298
2299 case AARCH64_OPND_IMM_VLSL:
2300 size = aarch64_get_qualifier_esize (qualifier);
2301 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2302 {
2303 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2304 size * 8 - 1);
2305 return 0;
2306 }
2307 break;
2308
2309 case AARCH64_OPND_IMM_VLSR:
2310 size = aarch64_get_qualifier_esize (qualifier);
2311 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2312 {
2313 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2314 return 0;
2315 }
2316 break;
2317
2318 case AARCH64_OPND_SIMD_IMM:
2319 case AARCH64_OPND_SIMD_IMM_SFT:
2320 /* Qualifier check. */
2321 switch (qualifier)
2322 {
2323 case AARCH64_OPND_QLF_LSL:
2324 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2325 {
2326 set_other_error (mismatch_detail, idx,
2327 _("invalid shift operator"));
2328 return 0;
2329 }
2330 break;
2331 case AARCH64_OPND_QLF_MSL:
2332 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2333 {
2334 set_other_error (mismatch_detail, idx,
2335 _("invalid shift operator"));
2336 return 0;
2337 }
2338 break;
2339 case AARCH64_OPND_QLF_NIL:
2340 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2341 {
2342 set_other_error (mismatch_detail, idx,
2343 _("shift is not permitted"));
2344 return 0;
2345 }
2346 break;
2347 default:
2348 assert (0);
2349 return 0;
2350 }
2351 /* Is the immediate valid? */
2352 assert (idx == 1);
2353 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2354 {
2355 /* uimm8 or simm8 */
2356 if (!value_in_range_p (opnd->imm.value, -128, 255))
2357 {
2358 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2359 return 0;
2360 }
2361 }
2362 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2363 {
2364 /* uimm64 is not
2365 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2366 ffffffffgggggggghhhhhhhh'. */
2367 set_other_error (mismatch_detail, idx,
2368 _("invalid value for immediate"));
2369 return 0;
2370 }
2371 /* Is the shift amount valid? */
2372 switch (opnd->shifter.kind)
2373 {
2374 case AARCH64_MOD_LSL:
2375 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2376 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2377 {
2378 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2379 (size - 1) * 8);
2380 return 0;
2381 }
2382 if (!value_aligned_p (opnd->shifter.amount, 8))
2383 {
2384 set_unaligned_error (mismatch_detail, idx, 8);
2385 return 0;
2386 }
2387 break;
2388 case AARCH64_MOD_MSL:
2389 /* Only 8 and 16 are valid shift amount. */
2390 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2391 {
2392 set_other_error (mismatch_detail, idx,
2393 _("shift amount must be 0 or 16"));
2394 return 0;
2395 }
2396 break;
2397 default:
2398 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2399 {
2400 set_other_error (mismatch_detail, idx,
2401 _("invalid shift operator"));
2402 return 0;
2403 }
2404 break;
2405 }
2406 break;
2407
2408 case AARCH64_OPND_FPIMM:
2409 case AARCH64_OPND_SIMD_FPIMM:
2410 case AARCH64_OPND_SVE_FPIMM8:
2411 if (opnd->imm.is_fp == 0)
2412 {
2413 set_other_error (mismatch_detail, idx,
2414 _("floating-point immediate expected"));
2415 return 0;
2416 }
2417 /* The value is expected to be an 8-bit floating-point constant with
2418 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2419 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2420 instruction). */
2421 if (!value_in_range_p (opnd->imm.value, 0, 255))
2422 {
2423 set_other_error (mismatch_detail, idx,
2424 _("immediate out of range"));
2425 return 0;
2426 }
2427 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2428 {
2429 set_other_error (mismatch_detail, idx,
2430 _("invalid shift operator"));
2431 return 0;
2432 }
2433 break;
2434
2435 case AARCH64_OPND_SVE_AIMM:
2436 min_value = 0;
2437 sve_aimm:
2438 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2439 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2440 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2441 uvalue = opnd->imm.value;
2442 shift = opnd->shifter.amount;
2443 if (size == 1)
2444 {
2445 if (shift != 0)
2446 {
2447 set_other_error (mismatch_detail, idx,
2448 _("no shift amount allowed for"
2449 " 8-bit constants"));
2450 return 0;
2451 }
2452 }
2453 else
2454 {
2455 if (shift != 0 && shift != 8)
2456 {
2457 set_other_error (mismatch_detail, idx,
2458 _("shift amount must be 0 or 8"));
2459 return 0;
2460 }
2461 if (shift == 0 && (uvalue & 0xff) == 0)
2462 {
2463 shift = 8;
2464 uvalue = (int64_t) uvalue / 256;
2465 }
2466 }
2467 mask >>= shift;
2468 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2469 {
2470 set_other_error (mismatch_detail, idx,
2471 _("immediate too big for element size"));
2472 return 0;
2473 }
2474 uvalue = (uvalue - min_value) & mask;
2475 if (uvalue > 0xff)
2476 {
2477 set_other_error (mismatch_detail, idx,
2478 _("invalid arithmetic immediate"));
2479 return 0;
2480 }
2481 break;
2482
2483 case AARCH64_OPND_SVE_ASIMM:
2484 min_value = -128;
2485 goto sve_aimm;
2486
2487 case AARCH64_OPND_SVE_I1_HALF_ONE:
2488 assert (opnd->imm.is_fp);
2489 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2490 {
2491 set_other_error (mismatch_detail, idx,
2492 _("floating-point value must be 0.5 or 1.0"));
2493 return 0;
2494 }
2495 break;
2496
2497 case AARCH64_OPND_SVE_I1_HALF_TWO:
2498 assert (opnd->imm.is_fp);
2499 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2500 {
2501 set_other_error (mismatch_detail, idx,
2502 _("floating-point value must be 0.5 or 2.0"));
2503 return 0;
2504 }
2505 break;
2506
2507 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2508 assert (opnd->imm.is_fp);
2509 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2510 {
2511 set_other_error (mismatch_detail, idx,
2512 _("floating-point value must be 0.0 or 1.0"));
2513 return 0;
2514 }
2515 break;
2516
2517 case AARCH64_OPND_SVE_INV_LIMM:
2518 {
2519 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2520 uint64_t uimm = ~opnd->imm.value;
2521 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2522 {
2523 set_other_error (mismatch_detail, idx,
2524 _("immediate out of range"));
2525 return 0;
2526 }
2527 }
2528 break;
2529
2530 case AARCH64_OPND_SVE_LIMM_MOV:
2531 {
2532 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2533 uint64_t uimm = opnd->imm.value;
2534 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2535 {
2536 set_other_error (mismatch_detail, idx,
2537 _("immediate out of range"));
2538 return 0;
2539 }
2540 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2541 {
2542 set_other_error (mismatch_detail, idx,
2543 _("invalid replicated MOV immediate"));
2544 return 0;
2545 }
2546 }
2547 break;
2548
2549 case AARCH64_OPND_SVE_PATTERN_SCALED:
2550 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2551 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2552 {
2553 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2554 return 0;
2555 }
2556 break;
2557
2558 case AARCH64_OPND_SVE_SHLIMM_PRED:
2559 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2560 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2561 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2562 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2563 {
2564 set_imm_out_of_range_error (mismatch_detail, idx,
2565 0, 8 * size - 1);
2566 return 0;
2567 }
2568 break;
2569
2570 case AARCH64_OPND_SVE_SHRIMM_PRED:
2571 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2572 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2573 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2574 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2575 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2576 {
2577 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2578 return 0;
2579 }
2580 break;
2581
2582 default:
2583 break;
2584 }
2585 break;
2586
2587 case AARCH64_OPND_CLASS_SYSTEM:
2588 switch (type)
2589 {
2590 case AARCH64_OPND_PSTATEFIELD:
2591 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2592 /* MSR UAO, #uimm4
2593 MSR PAN, #uimm4
2594 MSR SSBS,#uimm4
2595 MSR SVCRSM, #uimm4
2596 MSR SVCRZA, #uimm4
2597 MSR SVCRSMZA, #uimm4
2598 The immediate must be #0 or #1. */
2599 if ((opnd->pstatefield == 0x03 /* UAO. */
2600 || opnd->pstatefield == 0x04 /* PAN. */
2601 || opnd->pstatefield == 0x19 /* SSBS. */
2602 || opnd->pstatefield == 0x1a /* DIT. */
2603 || opnd->pstatefield == 0x1b) /* SVCRSM, SVCRZA or SVCRSMZA. */
2604 && opnds[1].imm.value > 1)
2605 {
2606 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2607 return 0;
2608 }
2609 /* MSR SPSel, #uimm4
2610 Uses uimm4 as a control value to select the stack pointer: if
2611 bit 0 is set it selects the current exception level's stack
2612 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2613 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2614 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2615 {
2616 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2617 return 0;
2618 }
2619 break;
2620 default:
2621 break;
2622 }
2623 break;
2624
2625 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2626 /* Get the upper bound for the element index. */
2627 if (opcode->op == OP_FCMLA_ELEM)
2628 /* FCMLA index range depends on the vector size of other operands
2629 and is halfed because complex numbers take two elements. */
2630 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2631 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2632 else
2633 num = 16;
2634 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2635 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2636
2637 /* Index out-of-range. */
2638 if (!value_in_range_p (opnd->reglane.index, 0, num))
2639 {
2640 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2641 return 0;
2642 }
2643 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2644 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2645 number is encoded in "size:M:Rm":
2646 size <Vm>
2647 00 RESERVED
2648 01 0:Rm
2649 10 M:Rm
2650 11 RESERVED */
2651 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2652 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2653 {
2654 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2655 return 0;
2656 }
2657 break;
2658
2659 case AARCH64_OPND_CLASS_MODIFIED_REG:
2660 assert (idx == 1 || idx == 2);
2661 switch (type)
2662 {
2663 case AARCH64_OPND_Rm_EXT:
2664 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2665 && opnd->shifter.kind != AARCH64_MOD_LSL)
2666 {
2667 set_other_error (mismatch_detail, idx,
2668 _("extend operator expected"));
2669 return 0;
2670 }
2671 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2672 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2673 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2674 case. */
2675 if (!aarch64_stack_pointer_p (opnds + 0)
2676 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2677 {
2678 if (!opnd->shifter.operator_present)
2679 {
2680 set_other_error (mismatch_detail, idx,
2681 _("missing extend operator"));
2682 return 0;
2683 }
2684 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2685 {
2686 set_other_error (mismatch_detail, idx,
2687 _("'LSL' operator not allowed"));
2688 return 0;
2689 }
2690 }
2691 assert (opnd->shifter.operator_present /* Default to LSL. */
2692 || opnd->shifter.kind == AARCH64_MOD_LSL);
2693 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2694 {
2695 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2696 return 0;
2697 }
2698 /* In the 64-bit form, the final register operand is written as Wm
2699 for all but the (possibly omitted) UXTX/LSL and SXTX
2700 operators.
2701 N.B. GAS allows X register to be used with any operator as a
2702 programming convenience. */
2703 if (qualifier == AARCH64_OPND_QLF_X
2704 && opnd->shifter.kind != AARCH64_MOD_LSL
2705 && opnd->shifter.kind != AARCH64_MOD_UXTX
2706 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2707 {
2708 set_other_error (mismatch_detail, idx, _("W register expected"));
2709 return 0;
2710 }
2711 break;
2712
2713 case AARCH64_OPND_Rm_SFT:
2714 /* ROR is not available to the shifted register operand in
2715 arithmetic instructions. */
2716 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2717 {
2718 set_other_error (mismatch_detail, idx,
2719 _("shift operator expected"));
2720 return 0;
2721 }
2722 if (opnd->shifter.kind == AARCH64_MOD_ROR
2723 && opcode->iclass != log_shift)
2724 {
2725 set_other_error (mismatch_detail, idx,
2726 _("'ROR' operator not allowed"));
2727 return 0;
2728 }
2729 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2730 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2731 {
2732 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2733 return 0;
2734 }
2735 break;
2736
2737 default:
2738 break;
2739 }
2740 break;
2741
2742 default:
2743 break;
2744 }
2745
2746 return 1;
2747 }
2748
2749 /* Main entrypoint for the operand constraint checking.
2750
2751 Return 1 if operands of *INST meet the constraint applied by the operand
2752 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2753 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2754 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2755 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2756 error kind when it is notified that an instruction does not pass the check).
2757
2758 Un-determined operand qualifiers may get established during the process. */
2759
2760 int
2761 aarch64_match_operands_constraint (aarch64_inst *inst,
2762 aarch64_operand_error *mismatch_detail)
2763 {
2764 int i;
2765
2766 DEBUG_TRACE ("enter");
2767
2768 i = inst->opcode->tied_operand;
2769
2770 if (i > 0)
2771 {
2772 /* Check for tied_operands with specific opcode iclass. */
2773 switch (inst->opcode->iclass)
2774 {
2775 /* For SME LDR and STR instructions #imm must have the same numerical
2776 value for both operands.
2777 */
2778 case sme_ldr:
2779 case sme_str:
2780 assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array);
2781 assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
2782 if (inst->operands[0].za_tile_vector.index.imm
2783 != inst->operands[1].addr.offset.imm)
2784 {
2785 if (mismatch_detail)
2786 {
2787 mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
2788 mismatch_detail->index = i;
2789 }
2790 return 0;
2791 }
2792 break;
2793
2794 default:
2795 /* Check for cases where a source register needs to be the same as the
2796 destination register. Do this before matching qualifiers since if
2797 an instruction has both invalid tying and invalid qualifiers,
2798 the error about qualifiers would suggest several alternative
2799 instructions that also have invalid tying. */
2800 if (inst->operands[0].reg.regno
2801 != inst->operands[i].reg.regno)
2802 {
2803 if (mismatch_detail)
2804 {
2805 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2806 mismatch_detail->index = i;
2807 mismatch_detail->error = NULL;
2808 }
2809 return 0;
2810 }
2811 break;
2812 }
2813 }
2814
2815 /* Match operands' qualifier.
2816 *INST has already had qualifier establish for some, if not all, of
2817 its operands; we need to find out whether these established
2818 qualifiers match one of the qualifier sequence in
2819 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2820 with the corresponding qualifier in such a sequence.
2821 Only basic operand constraint checking is done here; the more thorough
2822 constraint checking will carried out by operand_general_constraint_met_p,
2823 which has be to called after this in order to get all of the operands'
2824 qualifiers established. */
2825 if (match_operands_qualifier (inst, true /* update_p */) == 0)
2826 {
2827 DEBUG_TRACE ("FAIL on operand qualifier matching");
2828 if (mismatch_detail)
2829 {
2830 /* Return an error type to indicate that it is the qualifier
2831 matching failure; we don't care about which operand as there
2832 are enough information in the opcode table to reproduce it. */
2833 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2834 mismatch_detail->index = -1;
2835 mismatch_detail->error = NULL;
2836 }
2837 return 0;
2838 }
2839
2840 /* Match operands' constraint. */
2841 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2842 {
2843 enum aarch64_opnd type = inst->opcode->operands[i];
2844 if (type == AARCH64_OPND_NIL)
2845 break;
2846 if (inst->operands[i].skip)
2847 {
2848 DEBUG_TRACE ("skip the incomplete operand %d", i);
2849 continue;
2850 }
2851 if (operand_general_constraint_met_p (inst->operands, i, type,
2852 inst->opcode, mismatch_detail) == 0)
2853 {
2854 DEBUG_TRACE ("FAIL on operand %d", i);
2855 return 0;
2856 }
2857 }
2858
2859 DEBUG_TRACE ("PASS");
2860
2861 return 1;
2862 }
2863
2864 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2865 Also updates the TYPE of each INST->OPERANDS with the corresponding
2866 value of OPCODE->OPERANDS.
2867
2868 Note that some operand qualifiers may need to be manually cleared by
2869 the caller before it further calls the aarch64_opcode_encode; by
2870 doing this, it helps the qualifier matching facilities work
2871 properly. */
2872
2873 const aarch64_opcode*
2874 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2875 {
2876 int i;
2877 const aarch64_opcode *old = inst->opcode;
2878
2879 inst->opcode = opcode;
2880
2881 /* Update the operand types. */
2882 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2883 {
2884 inst->operands[i].type = opcode->operands[i];
2885 if (opcode->operands[i] == AARCH64_OPND_NIL)
2886 break;
2887 }
2888
2889 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2890
2891 return old;
2892 }
2893
2894 int
2895 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2896 {
2897 int i;
2898 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2899 if (operands[i] == operand)
2900 return i;
2901 else if (operands[i] == AARCH64_OPND_NIL)
2902 break;
2903 return -1;
2904 }
2905 \f
2906 /* R0...R30, followed by FOR31. */
2907 #define BANK(R, FOR31) \
2908 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2909 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2910 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2911 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2912 /* [0][0] 32-bit integer regs with sp Wn
2913 [0][1] 64-bit integer regs with sp Xn sf=1
2914 [1][0] 32-bit integer regs with #0 Wn
2915 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2916 static const char *int_reg[2][2][32] = {
2917 #define R32(X) "w" #X
2918 #define R64(X) "x" #X
2919 { BANK (R32, "wsp"), BANK (R64, "sp") },
2920 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2921 #undef R64
2922 #undef R32
2923 };
2924
2925 /* Names of the SVE vector registers, first with .S suffixes,
2926 then with .D suffixes. */
2927
2928 static const char *sve_reg[2][32] = {
2929 #define ZS(X) "z" #X ".s"
2930 #define ZD(X) "z" #X ".d"
2931 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2932 #undef ZD
2933 #undef ZS
2934 };
2935 #undef BANK
2936
2937 /* Return the integer register name.
2938 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2939
2940 static inline const char *
2941 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2942 {
2943 const int has_zr = sp_reg_p ? 0 : 1;
2944 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2945 return int_reg[has_zr][is_64][regno];
2946 }
2947
2948 /* Like get_int_reg_name, but IS_64 is always 1. */
2949
2950 static inline const char *
2951 get_64bit_int_reg_name (int regno, int sp_reg_p)
2952 {
2953 const int has_zr = sp_reg_p ? 0 : 1;
2954 return int_reg[has_zr][1][regno];
2955 }
2956
2957 /* Get the name of the integer offset register in OPND, using the shift type
2958 to decide whether it's a word or doubleword. */
2959
2960 static inline const char *
2961 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2962 {
2963 switch (opnd->shifter.kind)
2964 {
2965 case AARCH64_MOD_UXTW:
2966 case AARCH64_MOD_SXTW:
2967 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2968
2969 case AARCH64_MOD_LSL:
2970 case AARCH64_MOD_SXTX:
2971 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2972
2973 default:
2974 abort ();
2975 }
2976 }
2977
2978 /* Get the name of the SVE vector offset register in OPND, using the operand
2979 qualifier to decide whether the suffix should be .S or .D. */
2980
2981 static inline const char *
2982 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2983 {
2984 assert (qualifier == AARCH64_OPND_QLF_S_S
2985 || qualifier == AARCH64_OPND_QLF_S_D);
2986 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2987 }
2988
2989 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2990
2991 typedef union
2992 {
2993 uint64_t i;
2994 double d;
2995 } double_conv_t;
2996
2997 typedef union
2998 {
2999 uint32_t i;
3000 float f;
3001 } single_conv_t;
3002
3003 typedef union
3004 {
3005 uint32_t i;
3006 float f;
3007 } half_conv_t;
3008
3009 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
3010 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
3011 (depending on the type of the instruction). IMM8 will be expanded to a
3012 single-precision floating-point value (SIZE == 4) or a double-precision
3013 floating-point value (SIZE == 8). A half-precision floating-point value
3014 (SIZE == 2) is expanded to a single-precision floating-point value. The
3015 expanded value is returned. */
3016
3017 static uint64_t
3018 expand_fp_imm (int size, uint32_t imm8)
3019 {
3020 uint64_t imm = 0;
3021 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3022
3023 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3024 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3025 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3026 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3027 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3028 if (size == 8)
3029 {
3030 imm = (imm8_7 << (63-32)) /* imm8<7> */
3031 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3032 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3033 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3034 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3035 imm <<= 32;
3036 }
3037 else if (size == 4 || size == 2)
3038 {
3039 imm = (imm8_7 << 31) /* imm8<7> */
3040 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3041 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3042 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3043 }
3044 else
3045 {
3046 /* An unsupported size. */
3047 assert (0);
3048 }
3049
3050 return imm;
3051 }
3052
3053 /* Produce the string representation of the register list operand *OPND
3054 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3055 the register name that comes before the register number, such as "v". */
3056 static void
3057 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3058 const char *prefix)
3059 {
3060 const int num_regs = opnd->reglist.num_regs;
3061 const int first_reg = opnd->reglist.first_regno;
3062 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
3063 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3064 char tb[8]; /* Temporary buffer. */
3065
3066 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3067 assert (num_regs >= 1 && num_regs <= 4);
3068
3069 /* Prepare the index if any. */
3070 if (opnd->reglist.has_index)
3071 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3072 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
3073 else
3074 tb[0] = '\0';
3075
3076 /* The hyphenated form is preferred for disassembly if there are
3077 more than two registers in the list, and the register numbers
3078 are monotonically increasing in increments of one. */
3079 if (num_regs > 2 && last_reg > first_reg)
3080 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
3081 prefix, last_reg, qlf_name, tb);
3082 else
3083 {
3084 const int reg0 = first_reg;
3085 const int reg1 = (first_reg + 1) & 0x1f;
3086 const int reg2 = (first_reg + 2) & 0x1f;
3087 const int reg3 = (first_reg + 3) & 0x1f;
3088
3089 switch (num_regs)
3090 {
3091 case 1:
3092 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3093 break;
3094 case 2:
3095 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3096 prefix, reg1, qlf_name, tb);
3097 break;
3098 case 3:
3099 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3100 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3101 prefix, reg2, qlf_name, tb);
3102 break;
3103 case 4:
3104 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3105 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3106 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3107 break;
3108 }
3109 }
3110 }
3111
3112 /* Print the register+immediate address in OPND to BUF, which has SIZE
3113 characters. BASE is the name of the base register. */
3114
3115 static void
3116 print_immediate_offset_address (char *buf, size_t size,
3117 const aarch64_opnd_info *opnd,
3118 const char *base)
3119 {
3120 if (opnd->addr.writeback)
3121 {
3122 if (opnd->addr.preind)
3123 {
3124 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3125 snprintf (buf, size, "[%s]!", base);
3126 else
3127 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3128 }
3129 else
3130 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3131 }
3132 else
3133 {
3134 if (opnd->shifter.operator_present)
3135 {
3136 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3137 snprintf (buf, size, "[%s, #%d, mul vl]",
3138 base, opnd->addr.offset.imm);
3139 }
3140 else if (opnd->addr.offset.imm)
3141 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3142 else
3143 snprintf (buf, size, "[%s]", base);
3144 }
3145 }
3146
3147 /* Produce the string representation of the register offset address operand
3148 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3149 the names of the base and offset registers. */
3150 static void
3151 print_register_offset_address (char *buf, size_t size,
3152 const aarch64_opnd_info *opnd,
3153 const char *base, const char *offset)
3154 {
3155 char tb[16]; /* Temporary buffer. */
3156 bool print_extend_p = true;
3157 bool print_amount_p = true;
3158 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3159
3160 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3161 || !opnd->shifter.amount_present))
3162 {
3163 /* Not print the shift/extend amount when the amount is zero and
3164 when it is not the special case of 8-bit load/store instruction. */
3165 print_amount_p = false;
3166 /* Likewise, no need to print the shift operator LSL in such a
3167 situation. */
3168 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3169 print_extend_p = false;
3170 }
3171
3172 /* Prepare for the extend/shift. */
3173 if (print_extend_p)
3174 {
3175 if (print_amount_p)
3176 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3177 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3178 (opnd->shifter.amount % 100));
3179 else
3180 snprintf (tb, sizeof (tb), ", %s", shift_name);
3181 }
3182 else
3183 tb[0] = '\0';
3184
3185 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3186 }
3187
3188 /* Print ZA tiles from imm8 in ZERO instruction.
3189
3190 The preferred disassembly of this instruction uses the shortest list of tile
3191 names that represent the encoded immediate mask.
3192
3193 For example:
3194 * An all-ones immediate is disassembled as {ZA}.
3195 * An all-zeros immediate is disassembled as an empty list { }.
3196 */
3197 static void
3198 print_sme_za_list(char *buf, size_t size, int mask)
3199 {
3200 const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
3201 "za1.s", "za2.s", "za3.s", "za0.d",
3202 "za1.d", "za2.d", "za3.d", "za4.d",
3203 "za5.d", "za6.d", "za7.d", " " };
3204 const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
3205 0x22, 0x44, 0x88, 0x01,
3206 0x02, 0x04, 0x08, 0x10,
3207 0x20, 0x40, 0x80, 0x00 };
3208 int i, k;
3209 const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
3210
3211 k = snprintf (buf, size, "{");
3212 for (i = 0; i < ZAN_SIZE; i++)
3213 {
3214 if ((mask & zan_v[i]) == zan_v[i])
3215 {
3216 mask &= ~zan_v[i];
3217 if (k > 1)
3218 k += snprintf (buf + k, size - k, ", %s", zan[i]);
3219 else
3220 k += snprintf (buf + k, size - k, "%s", zan[i]);
3221 }
3222 if (mask == 0)
3223 break;
3224 }
3225 snprintf (buf + k, size - k, "}");
3226 }
3227
3228 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3229 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3230 PC, PCREL_P and ADDRESS are used to pass in and return information about
3231 the PC-relative address calculation, where the PC value is passed in
3232 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3233 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3234 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3235
3236 The function serves both the disassembler and the assembler diagnostics
3237 issuer, which is the reason why it lives in this file. */
3238
3239 void
3240 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3241 const aarch64_opcode *opcode,
3242 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3243 bfd_vma *address, char** notes,
3244 aarch64_feature_set features)
3245 {
3246 unsigned int i, num_conds;
3247 const char *name = NULL;
3248 const aarch64_opnd_info *opnd = opnds + idx;
3249 enum aarch64_modifier_kind kind;
3250 uint64_t addr, enum_value;
3251
3252 buf[0] = '\0';
3253 if (pcrel_p)
3254 *pcrel_p = 0;
3255
3256 switch (opnd->type)
3257 {
3258 case AARCH64_OPND_Rd:
3259 case AARCH64_OPND_Rn:
3260 case AARCH64_OPND_Rm:
3261 case AARCH64_OPND_Rt:
3262 case AARCH64_OPND_Rt2:
3263 case AARCH64_OPND_Rs:
3264 case AARCH64_OPND_Ra:
3265 case AARCH64_OPND_Rt_LS64:
3266 case AARCH64_OPND_Rt_SYS:
3267 case AARCH64_OPND_PAIRREG:
3268 case AARCH64_OPND_SVE_Rm:
3269 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3270 the <ic_op>, therefore we use opnd->present to override the
3271 generic optional-ness information. */
3272 if (opnd->type == AARCH64_OPND_Rt_SYS)
3273 {
3274 if (!opnd->present)
3275 break;
3276 }
3277 /* Omit the operand, e.g. RET. */
3278 else if (optional_operand_p (opcode, idx)
3279 && (opnd->reg.regno
3280 == get_optional_operand_default_value (opcode)))
3281 break;
3282 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3283 || opnd->qualifier == AARCH64_OPND_QLF_X);
3284 snprintf (buf, size, "%s",
3285 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3286 break;
3287
3288 case AARCH64_OPND_Rd_SP:
3289 case AARCH64_OPND_Rn_SP:
3290 case AARCH64_OPND_Rt_SP:
3291 case AARCH64_OPND_SVE_Rn_SP:
3292 case AARCH64_OPND_Rm_SP:
3293 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3294 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3295 || opnd->qualifier == AARCH64_OPND_QLF_X
3296 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3297 snprintf (buf, size, "%s",
3298 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3299 break;
3300
3301 case AARCH64_OPND_Rm_EXT:
3302 kind = opnd->shifter.kind;
3303 assert (idx == 1 || idx == 2);
3304 if ((aarch64_stack_pointer_p (opnds)
3305 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3306 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3307 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3308 && kind == AARCH64_MOD_UXTW)
3309 || (opnd->qualifier == AARCH64_OPND_QLF_X
3310 && kind == AARCH64_MOD_UXTX)))
3311 {
3312 /* 'LSL' is the preferred form in this case. */
3313 kind = AARCH64_MOD_LSL;
3314 if (opnd->shifter.amount == 0)
3315 {
3316 /* Shifter omitted. */
3317 snprintf (buf, size, "%s",
3318 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3319 break;
3320 }
3321 }
3322 if (opnd->shifter.amount)
3323 snprintf (buf, size, "%s, %s #%" PRIi64,
3324 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3325 aarch64_operand_modifiers[kind].name,
3326 opnd->shifter.amount);
3327 else
3328 snprintf (buf, size, "%s, %s",
3329 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3330 aarch64_operand_modifiers[kind].name);
3331 break;
3332
3333 case AARCH64_OPND_Rm_SFT:
3334 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3335 || opnd->qualifier == AARCH64_OPND_QLF_X);
3336 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3337 snprintf (buf, size, "%s",
3338 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3339 else
3340 snprintf (buf, size, "%s, %s #%" PRIi64,
3341 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3342 aarch64_operand_modifiers[opnd->shifter.kind].name,
3343 opnd->shifter.amount);
3344 break;
3345
3346 case AARCH64_OPND_Fd:
3347 case AARCH64_OPND_Fn:
3348 case AARCH64_OPND_Fm:
3349 case AARCH64_OPND_Fa:
3350 case AARCH64_OPND_Ft:
3351 case AARCH64_OPND_Ft2:
3352 case AARCH64_OPND_Sd:
3353 case AARCH64_OPND_Sn:
3354 case AARCH64_OPND_Sm:
3355 case AARCH64_OPND_SVE_VZn:
3356 case AARCH64_OPND_SVE_Vd:
3357 case AARCH64_OPND_SVE_Vm:
3358 case AARCH64_OPND_SVE_Vn:
3359 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3360 opnd->reg.regno);
3361 break;
3362
3363 case AARCH64_OPND_Va:
3364 case AARCH64_OPND_Vd:
3365 case AARCH64_OPND_Vn:
3366 case AARCH64_OPND_Vm:
3367 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3368 aarch64_get_qualifier_name (opnd->qualifier));
3369 break;
3370
3371 case AARCH64_OPND_Ed:
3372 case AARCH64_OPND_En:
3373 case AARCH64_OPND_Em:
3374 case AARCH64_OPND_Em16:
3375 case AARCH64_OPND_SM3_IMM2:
3376 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3377 aarch64_get_qualifier_name (opnd->qualifier),
3378 opnd->reglane.index);
3379 break;
3380
3381 case AARCH64_OPND_VdD1:
3382 case AARCH64_OPND_VnD1:
3383 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3384 break;
3385
3386 case AARCH64_OPND_LVn:
3387 case AARCH64_OPND_LVt:
3388 case AARCH64_OPND_LVt_AL:
3389 case AARCH64_OPND_LEt:
3390 print_register_list (buf, size, opnd, "v");
3391 break;
3392
3393 case AARCH64_OPND_SVE_Pd:
3394 case AARCH64_OPND_SVE_Pg3:
3395 case AARCH64_OPND_SVE_Pg4_5:
3396 case AARCH64_OPND_SVE_Pg4_10:
3397 case AARCH64_OPND_SVE_Pg4_16:
3398 case AARCH64_OPND_SVE_Pm:
3399 case AARCH64_OPND_SVE_Pn:
3400 case AARCH64_OPND_SVE_Pt:
3401 case AARCH64_OPND_SME_Pm:
3402 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3403 snprintf (buf, size, "p%d", opnd->reg.regno);
3404 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3405 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3406 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3407 aarch64_get_qualifier_name (opnd->qualifier));
3408 else
3409 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3410 aarch64_get_qualifier_name (opnd->qualifier));
3411 break;
3412
3413 case AARCH64_OPND_SVE_Za_5:
3414 case AARCH64_OPND_SVE_Za_16:
3415 case AARCH64_OPND_SVE_Zd:
3416 case AARCH64_OPND_SVE_Zm_5:
3417 case AARCH64_OPND_SVE_Zm_16:
3418 case AARCH64_OPND_SVE_Zn:
3419 case AARCH64_OPND_SVE_Zt:
3420 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3421 snprintf (buf, size, "z%d", opnd->reg.regno);
3422 else
3423 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3424 aarch64_get_qualifier_name (opnd->qualifier));
3425 break;
3426
3427 case AARCH64_OPND_SVE_ZnxN:
3428 case AARCH64_OPND_SVE_ZtxN:
3429 print_register_list (buf, size, opnd, "z");
3430 break;
3431
3432 case AARCH64_OPND_SVE_Zm3_INDEX:
3433 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3434 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3435 case AARCH64_OPND_SVE_Zm4_11_INDEX:
3436 case AARCH64_OPND_SVE_Zm4_INDEX:
3437 case AARCH64_OPND_SVE_Zn_INDEX:
3438 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3439 aarch64_get_qualifier_name (opnd->qualifier),
3440 opnd->reglane.index);
3441 break;
3442
3443 case AARCH64_OPND_SME_ZAda_2b:
3444 case AARCH64_OPND_SME_ZAda_3b:
3445 snprintf (buf, size, "za%d.%s", opnd->reg.regno,
3446 aarch64_get_qualifier_name (opnd->qualifier));
3447 break;
3448
3449 case AARCH64_OPND_SME_ZA_HV_idx_src:
3450 case AARCH64_OPND_SME_ZA_HV_idx_dest:
3451 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
3452 snprintf (buf, size, "%sza%d%c.%s[w%d, %d]%s",
3453 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
3454 opnd->za_tile_vector.regno,
3455 opnd->za_tile_vector.v == 1 ? 'v' : 'h',
3456 aarch64_get_qualifier_name (opnd->qualifier),
3457 opnd->za_tile_vector.index.regno,
3458 opnd->za_tile_vector.index.imm,
3459 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
3460 break;
3461
3462 case AARCH64_OPND_SME_list_of_64bit_tiles:
3463 print_sme_za_list (buf, size, opnd->reg.regno);
3464 break;
3465
3466 case AARCH64_OPND_SME_ZA_array:
3467 snprintf (buf, size, "za[w%d, %d]",
3468 opnd->za_tile_vector.index.regno,
3469 opnd->za_tile_vector.index.imm);
3470 break;
3471
3472 case AARCH64_OPND_SME_SM_ZA:
3473 snprintf (buf, size, "%s", opnd->reg.regno == 's' ? "sm" : "za");
3474 break;
3475
3476 case AARCH64_OPND_CRn:
3477 case AARCH64_OPND_CRm:
3478 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3479 break;
3480
3481 case AARCH64_OPND_IDX:
3482 case AARCH64_OPND_MASK:
3483 case AARCH64_OPND_IMM:
3484 case AARCH64_OPND_IMM_2:
3485 case AARCH64_OPND_WIDTH:
3486 case AARCH64_OPND_UIMM3_OP1:
3487 case AARCH64_OPND_UIMM3_OP2:
3488 case AARCH64_OPND_BIT_NUM:
3489 case AARCH64_OPND_IMM_VLSL:
3490 case AARCH64_OPND_IMM_VLSR:
3491 case AARCH64_OPND_SHLL_IMM:
3492 case AARCH64_OPND_IMM0:
3493 case AARCH64_OPND_IMMR:
3494 case AARCH64_OPND_IMMS:
3495 case AARCH64_OPND_UNDEFINED:
3496 case AARCH64_OPND_FBITS:
3497 case AARCH64_OPND_TME_UIMM16:
3498 case AARCH64_OPND_SIMM5:
3499 case AARCH64_OPND_SVE_SHLIMM_PRED:
3500 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3501 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3502 case AARCH64_OPND_SVE_SHRIMM_PRED:
3503 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3504 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3505 case AARCH64_OPND_SVE_SIMM5:
3506 case AARCH64_OPND_SVE_SIMM5B:
3507 case AARCH64_OPND_SVE_SIMM6:
3508 case AARCH64_OPND_SVE_SIMM8:
3509 case AARCH64_OPND_SVE_UIMM3:
3510 case AARCH64_OPND_SVE_UIMM7:
3511 case AARCH64_OPND_SVE_UIMM8:
3512 case AARCH64_OPND_SVE_UIMM8_53:
3513 case AARCH64_OPND_IMM_ROT1:
3514 case AARCH64_OPND_IMM_ROT2:
3515 case AARCH64_OPND_IMM_ROT3:
3516 case AARCH64_OPND_SVE_IMM_ROT1:
3517 case AARCH64_OPND_SVE_IMM_ROT2:
3518 case AARCH64_OPND_SVE_IMM_ROT3:
3519 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3520 break;
3521
3522 case AARCH64_OPND_SVE_I1_HALF_ONE:
3523 case AARCH64_OPND_SVE_I1_HALF_TWO:
3524 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3525 {
3526 single_conv_t c;
3527 c.i = opnd->imm.value;
3528 snprintf (buf, size, "#%.1f", c.f);
3529 break;
3530 }
3531
3532 case AARCH64_OPND_SVE_PATTERN:
3533 if (optional_operand_p (opcode, idx)
3534 && opnd->imm.value == get_optional_operand_default_value (opcode))
3535 break;
3536 enum_value = opnd->imm.value;
3537 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3538 if (aarch64_sve_pattern_array[enum_value])
3539 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3540 else
3541 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3542 break;
3543
3544 case AARCH64_OPND_SVE_PATTERN_SCALED:
3545 if (optional_operand_p (opcode, idx)
3546 && !opnd->shifter.operator_present
3547 && opnd->imm.value == get_optional_operand_default_value (opcode))
3548 break;
3549 enum_value = opnd->imm.value;
3550 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3551 if (aarch64_sve_pattern_array[opnd->imm.value])
3552 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3553 else
3554 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3555 if (opnd->shifter.operator_present)
3556 {
3557 size_t len = strlen (buf);
3558 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3559 aarch64_operand_modifiers[opnd->shifter.kind].name,
3560 opnd->shifter.amount);
3561 }
3562 break;
3563
3564 case AARCH64_OPND_SVE_PRFOP:
3565 enum_value = opnd->imm.value;
3566 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3567 if (aarch64_sve_prfop_array[enum_value])
3568 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3569 else
3570 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3571 break;
3572
3573 case AARCH64_OPND_IMM_MOV:
3574 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3575 {
3576 case 4: /* e.g. MOV Wd, #<imm32>. */
3577 {
3578 int imm32 = opnd->imm.value;
3579 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3580 }
3581 break;
3582 case 8: /* e.g. MOV Xd, #<imm64>. */
3583 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3584 opnd->imm.value, opnd->imm.value);
3585 break;
3586 default: assert (0);
3587 }
3588 break;
3589
3590 case AARCH64_OPND_FPIMM0:
3591 snprintf (buf, size, "#0.0");
3592 break;
3593
3594 case AARCH64_OPND_LIMM:
3595 case AARCH64_OPND_AIMM:
3596 case AARCH64_OPND_HALF:
3597 case AARCH64_OPND_SVE_INV_LIMM:
3598 case AARCH64_OPND_SVE_LIMM:
3599 case AARCH64_OPND_SVE_LIMM_MOV:
3600 if (opnd->shifter.amount)
3601 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3602 opnd->shifter.amount);
3603 else
3604 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3605 break;
3606
3607 case AARCH64_OPND_SIMD_IMM:
3608 case AARCH64_OPND_SIMD_IMM_SFT:
3609 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3610 || opnd->shifter.kind == AARCH64_MOD_NONE)
3611 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3612 else
3613 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3614 aarch64_operand_modifiers[opnd->shifter.kind].name,
3615 opnd->shifter.amount);
3616 break;
3617
3618 case AARCH64_OPND_SVE_AIMM:
3619 case AARCH64_OPND_SVE_ASIMM:
3620 if (opnd->shifter.amount)
3621 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3622 opnd->shifter.amount);
3623 else
3624 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3625 break;
3626
3627 case AARCH64_OPND_FPIMM:
3628 case AARCH64_OPND_SIMD_FPIMM:
3629 case AARCH64_OPND_SVE_FPIMM8:
3630 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3631 {
3632 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3633 {
3634 half_conv_t c;
3635 c.i = expand_fp_imm (2, opnd->imm.value);
3636 snprintf (buf, size, "#%.18e", c.f);
3637 }
3638 break;
3639 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3640 {
3641 single_conv_t c;
3642 c.i = expand_fp_imm (4, opnd->imm.value);
3643 snprintf (buf, size, "#%.18e", c.f);
3644 }
3645 break;
3646 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3647 {
3648 double_conv_t c;
3649 c.i = expand_fp_imm (8, opnd->imm.value);
3650 snprintf (buf, size, "#%.18e", c.d);
3651 }
3652 break;
3653 default: assert (0);
3654 }
3655 break;
3656
3657 case AARCH64_OPND_CCMP_IMM:
3658 case AARCH64_OPND_NZCV:
3659 case AARCH64_OPND_EXCEPTION:
3660 case AARCH64_OPND_UIMM4:
3661 case AARCH64_OPND_UIMM4_ADDG:
3662 case AARCH64_OPND_UIMM7:
3663 case AARCH64_OPND_UIMM10:
3664 if (optional_operand_p (opcode, idx)
3665 && (opnd->imm.value ==
3666 (int64_t) get_optional_operand_default_value (opcode)))
3667 /* Omit the operand, e.g. DCPS1. */
3668 break;
3669 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3670 break;
3671
3672 case AARCH64_OPND_COND:
3673 case AARCH64_OPND_COND1:
3674 snprintf (buf, size, "%s", opnd->cond->names[0]);
3675 num_conds = ARRAY_SIZE (opnd->cond->names);
3676 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3677 {
3678 size_t len = strlen (buf);
3679 if (i == 1)
3680 snprintf (buf + len, size - len, " // %s = %s",
3681 opnd->cond->names[0], opnd->cond->names[i]);
3682 else
3683 snprintf (buf + len, size - len, ", %s",
3684 opnd->cond->names[i]);
3685 }
3686 break;
3687
3688 case AARCH64_OPND_ADDR_ADRP:
3689 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3690 + opnd->imm.value;
3691 if (pcrel_p)
3692 *pcrel_p = 1;
3693 if (address)
3694 *address = addr;
3695 /* This is not necessary during the disassembling, as print_address_func
3696 in the disassemble_info will take care of the printing. But some
3697 other callers may be still interested in getting the string in *STR,
3698 so here we do snprintf regardless. */
3699 snprintf (buf, size, "#0x%" PRIx64, addr);
3700 break;
3701
3702 case AARCH64_OPND_ADDR_PCREL14:
3703 case AARCH64_OPND_ADDR_PCREL19:
3704 case AARCH64_OPND_ADDR_PCREL21:
3705 case AARCH64_OPND_ADDR_PCREL26:
3706 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3707 if (pcrel_p)
3708 *pcrel_p = 1;
3709 if (address)
3710 *address = addr;
3711 /* This is not necessary during the disassembling, as print_address_func
3712 in the disassemble_info will take care of the printing. But some
3713 other callers may be still interested in getting the string in *STR,
3714 so here we do snprintf regardless. */
3715 snprintf (buf, size, "#0x%" PRIx64, addr);
3716 break;
3717
3718 case AARCH64_OPND_ADDR_SIMPLE:
3719 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3720 case AARCH64_OPND_SIMD_ADDR_POST:
3721 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3722 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3723 {
3724 if (opnd->addr.offset.is_reg)
3725 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3726 else
3727 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3728 }
3729 else
3730 snprintf (buf, size, "[%s]", name);
3731 break;
3732
3733 case AARCH64_OPND_ADDR_REGOFF:
3734 case AARCH64_OPND_SVE_ADDR_R:
3735 case AARCH64_OPND_SVE_ADDR_RR:
3736 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3737 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3738 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3739 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
3740 case AARCH64_OPND_SVE_ADDR_RX:
3741 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3742 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3743 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3744 print_register_offset_address
3745 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3746 get_offset_int_reg_name (opnd));
3747 break;
3748
3749 case AARCH64_OPND_SVE_ADDR_ZX:
3750 print_register_offset_address
3751 (buf, size, opnd,
3752 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3753 get_64bit_int_reg_name (opnd->addr.offset.regno, 0));
3754 break;
3755
3756 case AARCH64_OPND_SVE_ADDR_RZ:
3757 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3758 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3759 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3760 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3761 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3762 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3763 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3764 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3765 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3766 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3767 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3768 print_register_offset_address
3769 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3770 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3771 break;
3772
3773 case AARCH64_OPND_ADDR_SIMM7:
3774 case AARCH64_OPND_ADDR_SIMM9:
3775 case AARCH64_OPND_ADDR_SIMM9_2:
3776 case AARCH64_OPND_ADDR_SIMM10:
3777 case AARCH64_OPND_ADDR_SIMM11:
3778 case AARCH64_OPND_ADDR_SIMM13:
3779 case AARCH64_OPND_ADDR_OFFSET:
3780 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
3781 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3782 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
3783 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3784 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3785 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3786 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3787 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3788 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3789 case AARCH64_OPND_SVE_ADDR_RI_U6:
3790 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3791 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3792 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3793 print_immediate_offset_address
3794 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3795 break;
3796
3797 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3798 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3799 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3800 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3801 print_immediate_offset_address
3802 (buf, size, opnd,
3803 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3804 break;
3805
3806 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3807 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3808 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3809 print_register_offset_address
3810 (buf, size, opnd,
3811 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3812 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3813 break;
3814
3815 case AARCH64_OPND_ADDR_UIMM12:
3816 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3817 if (opnd->addr.offset.imm)
3818 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3819 else
3820 snprintf (buf, size, "[%s]", name);
3821 break;
3822
3823 case AARCH64_OPND_SYSREG:
3824 for (i = 0; aarch64_sys_regs[i].name; ++i)
3825 {
3826 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
3827
3828 bool exact_match
3829 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
3830 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
3831 && AARCH64_CPU_HAS_FEATURE (features, sr->features);
3832
3833 /* Try and find an exact match, But if that fails, return the first
3834 partial match that was found. */
3835 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3836 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
3837 && (name == NULL || exact_match))
3838 {
3839 name = aarch64_sys_regs[i].name;
3840 if (exact_match)
3841 {
3842 if (notes)
3843 *notes = NULL;
3844 break;
3845 }
3846
3847 /* If we didn't match exactly, that means the presense of a flag
3848 indicates what we didn't want for this instruction. e.g. If
3849 F_REG_READ is there, that means we were looking for a write
3850 register. See aarch64_ext_sysreg. */
3851 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3852 *notes = _("reading from a write-only register");
3853 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3854 *notes = _("writing to a read-only register");
3855 }
3856 }
3857
3858 if (name)
3859 snprintf (buf, size, "%s", name);
3860 else
3861 {
3862 /* Implementation defined system register. */
3863 unsigned int value = opnd->sysreg.value;
3864 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3865 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3866 value & 0x7);
3867 }
3868 break;
3869
3870 case AARCH64_OPND_PSTATEFIELD:
3871 for (i = 0; aarch64_pstatefields[i].name; ++i)
3872 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3873 {
3874 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
3875 SVCRZA and SVCRSMZA. */
3876 uint32_t flags = aarch64_pstatefields[i].flags;
3877 if (flags & F_REG_IN_CRM
3878 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
3879 != PSTATE_DECODE_CRM (flags)))
3880 continue;
3881 break;
3882 }
3883 assert (aarch64_pstatefields[i].name);
3884 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3885 break;
3886
3887 case AARCH64_OPND_SYSREG_AT:
3888 case AARCH64_OPND_SYSREG_DC:
3889 case AARCH64_OPND_SYSREG_IC:
3890 case AARCH64_OPND_SYSREG_TLBI:
3891 case AARCH64_OPND_SYSREG_SR:
3892 snprintf (buf, size, "%s", opnd->sysins_op->name);
3893 break;
3894
3895 case AARCH64_OPND_BARRIER:
3896 case AARCH64_OPND_BARRIER_DSB_NXS:
3897 snprintf (buf, size, "%s", opnd->barrier->name);
3898 break;
3899
3900 case AARCH64_OPND_BARRIER_ISB:
3901 /* Operand can be omitted, e.g. in DCPS1. */
3902 if (! optional_operand_p (opcode, idx)
3903 || (opnd->barrier->value
3904 != get_optional_operand_default_value (opcode)))
3905 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3906 break;
3907
3908 case AARCH64_OPND_PRFOP:
3909 if (opnd->prfop->name != NULL)
3910 snprintf (buf, size, "%s", opnd->prfop->name);
3911 else
3912 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3913 break;
3914
3915 case AARCH64_OPND_BARRIER_PSB:
3916 snprintf (buf, size, "csync");
3917 break;
3918
3919 case AARCH64_OPND_BTI_TARGET:
3920 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3921 snprintf (buf, size, "%s", opnd->hint_option->name);
3922 break;
3923
3924 default:
3925 assert (0);
3926 }
3927 }
3928 \f
3929 #define CPENC(op0,op1,crn,crm,op2) \
3930 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3931 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3932 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3933 /* for 3.9.10 System Instructions */
3934 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3935
3936 #define C0 0
3937 #define C1 1
3938 #define C2 2
3939 #define C3 3
3940 #define C4 4
3941 #define C5 5
3942 #define C6 6
3943 #define C7 7
3944 #define C8 8
3945 #define C9 9
3946 #define C10 10
3947 #define C11 11
3948 #define C12 12
3949 #define C13 13
3950 #define C14 14
3951 #define C15 15
3952
3953 #define SYSREG(name, encoding, flags, features) \
3954 { name, encoding, flags, features }
3955
3956 #define SR_CORE(n,e,f) SYSREG (n,e,f,0)
3957
3958 #define SR_FEAT(n,e,f,feat) \
3959 SYSREG ((n), (e), (f) | F_ARCHEXT, AARCH64_FEATURE_##feat)
3960
3961 #define SR_FEAT2(n,e,f,fe1,fe2) \
3962 SYSREG ((n), (e), (f) | F_ARCHEXT, \
3963 AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
3964
3965 #define SR_RNG(n,e,f) SR_FEAT2(n,e,f,RNG,V8_5)
3966 #define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
3967 #define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
3968
3969 #define SR_V8_A(n,e,f) SR_FEAT (n,e,f,V8_A)
3970 #define SR_V8_R(n,e,f) SR_FEAT (n,e,f,V8_R)
3971 #define SR_V8_1(n,e,f) SR_FEAT (n,e,f,V8_1)
3972 #define SR_V8_2(n,e,f) SR_FEAT (n,e,f,V8_2)
3973 #define SR_V8_3(n,e,f) SR_FEAT (n,e,f,V8_3)
3974 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
3975 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
3976 #define SR_PAN(n,e,f) SR_FEAT (n,e,f,PAN)
3977 #define SR_RAS(n,e,f) SR_FEAT (n,e,f,RAS)
3978 #define SR_SME(n,e,f) SR_FEAT (n,e,f,SME)
3979 #define SR_SSBS(n,e,f) SR_FEAT (n,e,f,SSBS)
3980 #define SR_SVE(n,e,f) SR_FEAT (n,e,f,SVE)
3981 #define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
3982 #define SR_PROFILE(n,e,f) SR_FEAT (n,e,f,PROFILE)
3983 #define SR_MEMTAG(n,e,f) SR_FEAT (n,e,f,MEMTAG)
3984 #define SR_SCXTNUM(n,e,f) SR_FEAT (n,e,f,SCXTNUM)
3985
3986 #define SR_EXPAND_ELx(f,x) \
3987 f (x, 1), \
3988 f (x, 2), \
3989 f (x, 3), \
3990 f (x, 4), \
3991 f (x, 5), \
3992 f (x, 6), \
3993 f (x, 7), \
3994 f (x, 8), \
3995 f (x, 9), \
3996 f (x, 10), \
3997 f (x, 11), \
3998 f (x, 12), \
3999 f (x, 13), \
4000 f (x, 14), \
4001 f (x, 15),
4002
4003 #define SR_EXPAND_EL12(f) \
4004 SR_EXPAND_ELx (f,1) \
4005 SR_EXPAND_ELx (f,2)
4006
4007 /* TODO there is one more issues need to be resolved
4008 1. handle cpu-implementation-defined system registers.
4009
4010 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4011 respectively. If neither of these are set then the register is read-write. */
4012 const aarch64_sys_reg aarch64_sys_regs [] =
4013 {
4014 SR_CORE ("spsr_el1", CPEN_ (0,C0,0), 0), /* = spsr_svc. */
4015 SR_V8_1 ("spsr_el12", CPEN_ (5,C0,0), 0),
4016 SR_CORE ("elr_el1", CPEN_ (0,C0,1), 0),
4017 SR_V8_1 ("elr_el12", CPEN_ (5,C0,1), 0),
4018 SR_CORE ("sp_el0", CPEN_ (0,C1,0), 0),
4019 SR_CORE ("spsel", CPEN_ (0,C2,0), 0),
4020 SR_CORE ("daif", CPEN_ (3,C2,1), 0),
4021 SR_CORE ("currentel", CPEN_ (0,C2,2), F_REG_READ),
4022 SR_PAN ("pan", CPEN_ (0,C2,3), 0),
4023 SR_V8_2 ("uao", CPEN_ (0,C2,4), 0),
4024 SR_CORE ("nzcv", CPEN_ (3,C2,0), 0),
4025 SR_SSBS ("ssbs", CPEN_ (3,C2,6), 0),
4026 SR_CORE ("fpcr", CPEN_ (3,C4,0), 0),
4027 SR_CORE ("fpsr", CPEN_ (3,C4,1), 0),
4028 SR_CORE ("dspsr_el0", CPEN_ (3,C5,0), 0),
4029 SR_CORE ("dlr_el0", CPEN_ (3,C5,1), 0),
4030 SR_CORE ("spsr_el2", CPEN_ (4,C0,0), 0), /* = spsr_hyp. */
4031 SR_CORE ("elr_el2", CPEN_ (4,C0,1), 0),
4032 SR_CORE ("sp_el1", CPEN_ (4,C1,0), 0),
4033 SR_CORE ("spsr_irq", CPEN_ (4,C3,0), 0),
4034 SR_CORE ("spsr_abt", CPEN_ (4,C3,1), 0),
4035 SR_CORE ("spsr_und", CPEN_ (4,C3,2), 0),
4036 SR_CORE ("spsr_fiq", CPEN_ (4,C3,3), 0),
4037 SR_CORE ("spsr_el3", CPEN_ (6,C0,0), 0),
4038 SR_CORE ("elr_el3", CPEN_ (6,C0,1), 0),
4039 SR_CORE ("sp_el2", CPEN_ (6,C1,0), 0),
4040 SR_CORE ("spsr_svc", CPEN_ (0,C0,0), F_DEPRECATED), /* = spsr_el1. */
4041 SR_CORE ("spsr_hyp", CPEN_ (4,C0,0), F_DEPRECATED), /* = spsr_el2. */
4042 SR_CORE ("midr_el1", CPENC (3,0,C0,C0,0), F_REG_READ),
4043 SR_CORE ("ctr_el0", CPENC (3,3,C0,C0,1), F_REG_READ),
4044 SR_CORE ("mpidr_el1", CPENC (3,0,C0,C0,5), F_REG_READ),
4045 SR_CORE ("revidr_el1", CPENC (3,0,C0,C0,6), F_REG_READ),
4046 SR_CORE ("aidr_el1", CPENC (3,1,C0,C0,7), F_REG_READ),
4047 SR_CORE ("dczid_el0", CPENC (3,3,C0,C0,7), F_REG_READ),
4048 SR_CORE ("id_dfr0_el1", CPENC (3,0,C0,C1,2), F_REG_READ),
4049 SR_CORE ("id_pfr0_el1", CPENC (3,0,C0,C1,0), F_REG_READ),
4050 SR_CORE ("id_pfr1_el1", CPENC (3,0,C0,C1,1), F_REG_READ),
4051 SR_ID_PFR2 ("id_pfr2_el1", CPENC (3,0,C0,C3,4), F_REG_READ),
4052 SR_CORE ("id_afr0_el1", CPENC (3,0,C0,C1,3), F_REG_READ),
4053 SR_CORE ("id_mmfr0_el1", CPENC (3,0,C0,C1,4), F_REG_READ),
4054 SR_CORE ("id_mmfr1_el1", CPENC (3,0,C0,C1,5), F_REG_READ),
4055 SR_CORE ("id_mmfr2_el1", CPENC (3,0,C0,C1,6), F_REG_READ),
4056 SR_CORE ("id_mmfr3_el1", CPENC (3,0,C0,C1,7), F_REG_READ),
4057 SR_CORE ("id_mmfr4_el1", CPENC (3,0,C0,C2,6), F_REG_READ),
4058 SR_CORE ("id_isar0_el1", CPENC (3,0,C0,C2,0), F_REG_READ),
4059 SR_CORE ("id_isar1_el1", CPENC (3,0,C0,C2,1), F_REG_READ),
4060 SR_CORE ("id_isar2_el1", CPENC (3,0,C0,C2,2), F_REG_READ),
4061 SR_CORE ("id_isar3_el1", CPENC (3,0,C0,C2,3), F_REG_READ),
4062 SR_CORE ("id_isar4_el1", CPENC (3,0,C0,C2,4), F_REG_READ),
4063 SR_CORE ("id_isar5_el1", CPENC (3,0,C0,C2,5), F_REG_READ),
4064 SR_CORE ("mvfr0_el1", CPENC (3,0,C0,C3,0), F_REG_READ),
4065 SR_CORE ("mvfr1_el1", CPENC (3,0,C0,C3,1), F_REG_READ),
4066 SR_CORE ("mvfr2_el1", CPENC (3,0,C0,C3,2), F_REG_READ),
4067 SR_CORE ("ccsidr_el1", CPENC (3,1,C0,C0,0), F_REG_READ),
4068 SR_CORE ("id_aa64pfr0_el1", CPENC (3,0,C0,C4,0), F_REG_READ),
4069 SR_CORE ("id_aa64pfr1_el1", CPENC (3,0,C0,C4,1), F_REG_READ),
4070 SR_CORE ("id_aa64dfr0_el1", CPENC (3,0,C0,C5,0), F_REG_READ),
4071 SR_CORE ("id_aa64dfr1_el1", CPENC (3,0,C0,C5,1), F_REG_READ),
4072 SR_CORE ("id_aa64isar0_el1", CPENC (3,0,C0,C6,0), F_REG_READ),
4073 SR_CORE ("id_aa64isar1_el1", CPENC (3,0,C0,C6,1), F_REG_READ),
4074 SR_CORE ("id_aa64mmfr0_el1", CPENC (3,0,C0,C7,0), F_REG_READ),
4075 SR_CORE ("id_aa64mmfr1_el1", CPENC (3,0,C0,C7,1), F_REG_READ),
4076 SR_CORE ("id_aa64mmfr2_el1", CPENC (3,0,C0,C7,2), F_REG_READ),
4077 SR_CORE ("id_aa64afr0_el1", CPENC (3,0,C0,C5,4), F_REG_READ),
4078 SR_CORE ("id_aa64afr1_el1", CPENC (3,0,C0,C5,5), F_REG_READ),
4079 SR_SVE ("id_aa64zfr0_el1", CPENC (3,0,C0,C4,4), F_REG_READ),
4080 SR_CORE ("clidr_el1", CPENC (3,1,C0,C0,1), F_REG_READ),
4081 SR_CORE ("csselr_el1", CPENC (3,2,C0,C0,0), 0),
4082 SR_CORE ("vpidr_el2", CPENC (3,4,C0,C0,0), 0),
4083 SR_CORE ("vmpidr_el2", CPENC (3,4,C0,C0,5), 0),
4084 SR_CORE ("sctlr_el1", CPENC (3,0,C1,C0,0), 0),
4085 SR_CORE ("sctlr_el2", CPENC (3,4,C1,C0,0), 0),
4086 SR_CORE ("sctlr_el3", CPENC (3,6,C1,C0,0), 0),
4087 SR_V8_1 ("sctlr_el12", CPENC (3,5,C1,C0,0), 0),
4088 SR_CORE ("actlr_el1", CPENC (3,0,C1,C0,1), 0),
4089 SR_CORE ("actlr_el2", CPENC (3,4,C1,C0,1), 0),
4090 SR_CORE ("actlr_el3", CPENC (3,6,C1,C0,1), 0),
4091 SR_CORE ("cpacr_el1", CPENC (3,0,C1,C0,2), 0),
4092 SR_V8_1 ("cpacr_el12", CPENC (3,5,C1,C0,2), 0),
4093 SR_CORE ("cptr_el2", CPENC (3,4,C1,C1,2), 0),
4094 SR_CORE ("cptr_el3", CPENC (3,6,C1,C1,2), 0),
4095 SR_CORE ("scr_el3", CPENC (3,6,C1,C1,0), 0),
4096 SR_CORE ("hcr_el2", CPENC (3,4,C1,C1,0), 0),
4097 SR_CORE ("mdcr_el2", CPENC (3,4,C1,C1,1), 0),
4098 SR_CORE ("mdcr_el3", CPENC (3,6,C1,C3,1), 0),
4099 SR_CORE ("hstr_el2", CPENC (3,4,C1,C1,3), 0),
4100 SR_CORE ("hacr_el2", CPENC (3,4,C1,C1,7), 0),
4101 SR_SVE ("zcr_el1", CPENC (3,0,C1,C2,0), 0),
4102 SR_SVE ("zcr_el12", CPENC (3,5,C1,C2,0), 0),
4103 SR_SVE ("zcr_el2", CPENC (3,4,C1,C2,0), 0),
4104 SR_SVE ("zcr_el3", CPENC (3,6,C1,C2,0), 0),
4105 SR_SVE ("zidr_el1", CPENC (3,0,C0,C0,7), 0),
4106 SR_CORE ("ttbr0_el1", CPENC (3,0,C2,C0,0), 0),
4107 SR_CORE ("ttbr1_el1", CPENC (3,0,C2,C0,1), 0),
4108 SR_V8_A ("ttbr0_el2", CPENC (3,4,C2,C0,0), 0),
4109 SR_V8_1_A ("ttbr1_el2", CPENC (3,4,C2,C0,1), 0),
4110 SR_CORE ("ttbr0_el3", CPENC (3,6,C2,C0,0), 0),
4111 SR_V8_1 ("ttbr0_el12", CPENC (3,5,C2,C0,0), 0),
4112 SR_V8_1 ("ttbr1_el12", CPENC (3,5,C2,C0,1), 0),
4113 SR_V8_A ("vttbr_el2", CPENC (3,4,C2,C1,0), 0),
4114 SR_CORE ("tcr_el1", CPENC (3,0,C2,C0,2), 0),
4115 SR_CORE ("tcr_el2", CPENC (3,4,C2,C0,2), 0),
4116 SR_CORE ("tcr_el3", CPENC (3,6,C2,C0,2), 0),
4117 SR_V8_1 ("tcr_el12", CPENC (3,5,C2,C0,2), 0),
4118 SR_CORE ("vtcr_el2", CPENC (3,4,C2,C1,2), 0),
4119 SR_V8_3 ("apiakeylo_el1", CPENC (3,0,C2,C1,0), 0),
4120 SR_V8_3 ("apiakeyhi_el1", CPENC (3,0,C2,C1,1), 0),
4121 SR_V8_3 ("apibkeylo_el1", CPENC (3,0,C2,C1,2), 0),
4122 SR_V8_3 ("apibkeyhi_el1", CPENC (3,0,C2,C1,3), 0),
4123 SR_V8_3 ("apdakeylo_el1", CPENC (3,0,C2,C2,0), 0),
4124 SR_V8_3 ("apdakeyhi_el1", CPENC (3,0,C2,C2,1), 0),
4125 SR_V8_3 ("apdbkeylo_el1", CPENC (3,0,C2,C2,2), 0),
4126 SR_V8_3 ("apdbkeyhi_el1", CPENC (3,0,C2,C2,3), 0),
4127 SR_V8_3 ("apgakeylo_el1", CPENC (3,0,C2,C3,0), 0),
4128 SR_V8_3 ("apgakeyhi_el1", CPENC (3,0,C2,C3,1), 0),
4129 SR_CORE ("afsr0_el1", CPENC (3,0,C5,C1,0), 0),
4130 SR_CORE ("afsr1_el1", CPENC (3,0,C5,C1,1), 0),
4131 SR_CORE ("afsr0_el2", CPENC (3,4,C5,C1,0), 0),
4132 SR_CORE ("afsr1_el2", CPENC (3,4,C5,C1,1), 0),
4133 SR_CORE ("afsr0_el3", CPENC (3,6,C5,C1,0), 0),
4134 SR_V8_1 ("afsr0_el12", CPENC (3,5,C5,C1,0), 0),
4135 SR_CORE ("afsr1_el3", CPENC (3,6,C5,C1,1), 0),
4136 SR_V8_1 ("afsr1_el12", CPENC (3,5,C5,C1,1), 0),
4137 SR_CORE ("esr_el1", CPENC (3,0,C5,C2,0), 0),
4138 SR_CORE ("esr_el2", CPENC (3,4,C5,C2,0), 0),
4139 SR_CORE ("esr_el3", CPENC (3,6,C5,C2,0), 0),
4140 SR_V8_1 ("esr_el12", CPENC (3,5,C5,C2,0), 0),
4141 SR_RAS ("vsesr_el2", CPENC (3,4,C5,C2,3), 0),
4142 SR_CORE ("fpexc32_el2", CPENC (3,4,C5,C3,0), 0),
4143 SR_RAS ("erridr_el1", CPENC (3,0,C5,C3,0), F_REG_READ),
4144 SR_RAS ("errselr_el1", CPENC (3,0,C5,C3,1), 0),
4145 SR_RAS ("erxfr_el1", CPENC (3,0,C5,C4,0), F_REG_READ),
4146 SR_RAS ("erxctlr_el1", CPENC (3,0,C5,C4,1), 0),
4147 SR_RAS ("erxstatus_el1", CPENC (3,0,C5,C4,2), 0),
4148 SR_RAS ("erxaddr_el1", CPENC (3,0,C5,C4,3), 0),
4149 SR_RAS ("erxmisc0_el1", CPENC (3,0,C5,C5,0), 0),
4150 SR_RAS ("erxmisc1_el1", CPENC (3,0,C5,C5,1), 0),
4151 SR_RAS ("erxmisc2_el1", CPENC (3,0,C5,C5,2), 0),
4152 SR_RAS ("erxmisc3_el1", CPENC (3,0,C5,C5,3), 0),
4153 SR_RAS ("erxpfgcdn_el1", CPENC (3,0,C5,C4,6), 0),
4154 SR_RAS ("erxpfgctl_el1", CPENC (3,0,C5,C4,5), 0),
4155 SR_RAS ("erxpfgf_el1", CPENC (3,0,C5,C4,4), F_REG_READ),
4156 SR_CORE ("far_el1", CPENC (3,0,C6,C0,0), 0),
4157 SR_CORE ("far_el2", CPENC (3,4,C6,C0,0), 0),
4158 SR_CORE ("far_el3", CPENC (3,6,C6,C0,0), 0),
4159 SR_V8_1 ("far_el12", CPENC (3,5,C6,C0,0), 0),
4160 SR_CORE ("hpfar_el2", CPENC (3,4,C6,C0,4), 0),
4161 SR_CORE ("par_el1", CPENC (3,0,C7,C4,0), 0),
4162 SR_CORE ("mair_el1", CPENC (3,0,C10,C2,0), 0),
4163 SR_CORE ("mair_el2", CPENC (3,4,C10,C2,0), 0),
4164 SR_CORE ("mair_el3", CPENC (3,6,C10,C2,0), 0),
4165 SR_V8_1 ("mair_el12", CPENC (3,5,C10,C2,0), 0),
4166 SR_CORE ("amair_el1", CPENC (3,0,C10,C3,0), 0),
4167 SR_CORE ("amair_el2", CPENC (3,4,C10,C3,0), 0),
4168 SR_CORE ("amair_el3", CPENC (3,6,C10,C3,0), 0),
4169 SR_V8_1 ("amair_el12", CPENC (3,5,C10,C3,0), 0),
4170 SR_CORE ("vbar_el1", CPENC (3,0,C12,C0,0), 0),
4171 SR_CORE ("vbar_el2", CPENC (3,4,C12,C0,0), 0),
4172 SR_CORE ("vbar_el3", CPENC (3,6,C12,C0,0), 0),
4173 SR_V8_1 ("vbar_el12", CPENC (3,5,C12,C0,0), 0),
4174 SR_CORE ("rvbar_el1", CPENC (3,0,C12,C0,1), F_REG_READ),
4175 SR_CORE ("rvbar_el2", CPENC (3,4,C12,C0,1), F_REG_READ),
4176 SR_CORE ("rvbar_el3", CPENC (3,6,C12,C0,1), F_REG_READ),
4177 SR_CORE ("rmr_el1", CPENC (3,0,C12,C0,2), 0),
4178 SR_CORE ("rmr_el2", CPENC (3,4,C12,C0,2), 0),
4179 SR_CORE ("rmr_el3", CPENC (3,6,C12,C0,2), 0),
4180 SR_CORE ("isr_el1", CPENC (3,0,C12,C1,0), F_REG_READ),
4181 SR_RAS ("disr_el1", CPENC (3,0,C12,C1,1), 0),
4182 SR_RAS ("vdisr_el2", CPENC (3,4,C12,C1,1), 0),
4183 SR_CORE ("contextidr_el1", CPENC (3,0,C13,C0,1), 0),
4184 SR_V8_1 ("contextidr_el2", CPENC (3,4,C13,C0,1), 0),
4185 SR_V8_1 ("contextidr_el12", CPENC (3,5,C13,C0,1), 0),
4186 SR_RNG ("rndr", CPENC (3,3,C2,C4,0), F_REG_READ),
4187 SR_RNG ("rndrrs", CPENC (3,3,C2,C4,1), F_REG_READ),
4188 SR_MEMTAG ("tco", CPENC (3,3,C4,C2,7), 0),
4189 SR_MEMTAG ("tfsre0_el1", CPENC (3,0,C5,C6,1), 0),
4190 SR_MEMTAG ("tfsr_el1", CPENC (3,0,C5,C6,0), 0),
4191 SR_MEMTAG ("tfsr_el2", CPENC (3,4,C5,C6,0), 0),
4192 SR_MEMTAG ("tfsr_el3", CPENC (3,6,C5,C6,0), 0),
4193 SR_MEMTAG ("tfsr_el12", CPENC (3,5,C5,C6,0), 0),
4194 SR_MEMTAG ("rgsr_el1", CPENC (3,0,C1,C0,5), 0),
4195 SR_MEMTAG ("gcr_el1", CPENC (3,0,C1,C0,6), 0),
4196 SR_MEMTAG ("gmid_el1", CPENC (3,1,C0,C0,4), F_REG_READ),
4197 SR_CORE ("tpidr_el0", CPENC (3,3,C13,C0,2), 0),
4198 SR_CORE ("tpidrro_el0", CPENC (3,3,C13,C0,3), 0),
4199 SR_CORE ("tpidr_el1", CPENC (3,0,C13,C0,4), 0),
4200 SR_CORE ("tpidr_el2", CPENC (3,4,C13,C0,2), 0),
4201 SR_CORE ("tpidr_el3", CPENC (3,6,C13,C0,2), 0),
4202 SR_SCXTNUM ("scxtnum_el0", CPENC (3,3,C13,C0,7), 0),
4203 SR_SCXTNUM ("scxtnum_el1", CPENC (3,0,C13,C0,7), 0),
4204 SR_SCXTNUM ("scxtnum_el2", CPENC (3,4,C13,C0,7), 0),
4205 SR_SCXTNUM ("scxtnum_el12", CPENC (3,5,C13,C0,7), 0),
4206 SR_SCXTNUM ("scxtnum_el3", CPENC (3,6,C13,C0,7), 0),
4207 SR_CORE ("teecr32_el1", CPENC (2,2,C0, C0,0), 0), /* See section 3.9.7.1. */
4208 SR_CORE ("cntfrq_el0", CPENC (3,3,C14,C0,0), 0),
4209 SR_CORE ("cntpct_el0", CPENC (3,3,C14,C0,1), F_REG_READ),
4210 SR_CORE ("cntvct_el0", CPENC (3,3,C14,C0,2), F_REG_READ),
4211 SR_CORE ("cntvoff_el2", CPENC (3,4,C14,C0,3), 0),
4212 SR_CORE ("cntkctl_el1", CPENC (3,0,C14,C1,0), 0),
4213 SR_V8_1 ("cntkctl_el12", CPENC (3,5,C14,C1,0), 0),
4214 SR_CORE ("cnthctl_el2", CPENC (3,4,C14,C1,0), 0),
4215 SR_CORE ("cntp_tval_el0", CPENC (3,3,C14,C2,0), 0),
4216 SR_V8_1 ("cntp_tval_el02", CPENC (3,5,C14,C2,0), 0),
4217 SR_CORE ("cntp_ctl_el0", CPENC (3,3,C14,C2,1), 0),
4218 SR_V8_1 ("cntp_ctl_el02", CPENC (3,5,C14,C2,1), 0),
4219 SR_CORE ("cntp_cval_el0", CPENC (3,3,C14,C2,2), 0),
4220 SR_V8_1 ("cntp_cval_el02", CPENC (3,5,C14,C2,2), 0),
4221 SR_CORE ("cntv_tval_el0", CPENC (3,3,C14,C3,0), 0),
4222 SR_V8_1 ("cntv_tval_el02", CPENC (3,5,C14,C3,0), 0),
4223 SR_CORE ("cntv_ctl_el0", CPENC (3,3,C14,C3,1), 0),
4224 SR_V8_1 ("cntv_ctl_el02", CPENC (3,5,C14,C3,1), 0),
4225 SR_CORE ("cntv_cval_el0", CPENC (3,3,C14,C3,2), 0),
4226 SR_V8_1 ("cntv_cval_el02", CPENC (3,5,C14,C3,2), 0),
4227 SR_CORE ("cnthp_tval_el2", CPENC (3,4,C14,C2,0), 0),
4228 SR_CORE ("cnthp_ctl_el2", CPENC (3,4,C14,C2,1), 0),
4229 SR_CORE ("cnthp_cval_el2", CPENC (3,4,C14,C2,2), 0),
4230 SR_CORE ("cntps_tval_el1", CPENC (3,7,C14,C2,0), 0),
4231 SR_CORE ("cntps_ctl_el1", CPENC (3,7,C14,C2,1), 0),
4232 SR_CORE ("cntps_cval_el1", CPENC (3,7,C14,C2,2), 0),
4233 SR_V8_1 ("cnthv_tval_el2", CPENC (3,4,C14,C3,0), 0),
4234 SR_V8_1 ("cnthv_ctl_el2", CPENC (3,4,C14,C3,1), 0),
4235 SR_V8_1 ("cnthv_cval_el2", CPENC (3,4,C14,C3,2), 0),
4236 SR_CORE ("dacr32_el2", CPENC (3,4,C3,C0,0), 0),
4237 SR_CORE ("ifsr32_el2", CPENC (3,4,C5,C0,1), 0),
4238 SR_CORE ("teehbr32_el1", CPENC (2,2,C1,C0,0), 0),
4239 SR_CORE ("sder32_el3", CPENC (3,6,C1,C1,1), 0),
4240 SR_CORE ("mdscr_el1", CPENC (2,0,C0,C2,2), 0),
4241 SR_CORE ("mdccsr_el0", CPENC (2,3,C0,C1,0), F_REG_READ),
4242 SR_CORE ("mdccint_el1", CPENC (2,0,C0,C2,0), 0),
4243 SR_CORE ("dbgdtr_el0", CPENC (2,3,C0,C4,0), 0),
4244 SR_CORE ("dbgdtrrx_el0", CPENC (2,3,C0,C5,0), F_REG_READ),
4245 SR_CORE ("dbgdtrtx_el0", CPENC (2,3,C0,C5,0), F_REG_WRITE),
4246 SR_CORE ("osdtrrx_el1", CPENC (2,0,C0,C0,2), 0),
4247 SR_CORE ("osdtrtx_el1", CPENC (2,0,C0,C3,2), 0),
4248 SR_CORE ("oseccr_el1", CPENC (2,0,C0,C6,2), 0),
4249 SR_CORE ("dbgvcr32_el2", CPENC (2,4,C0,C7,0), 0),
4250 SR_CORE ("dbgbvr0_el1", CPENC (2,0,C0,C0,4), 0),
4251 SR_CORE ("dbgbvr1_el1", CPENC (2,0,C0,C1,4), 0),
4252 SR_CORE ("dbgbvr2_el1", CPENC (2,0,C0,C2,4), 0),
4253 SR_CORE ("dbgbvr3_el1", CPENC (2,0,C0,C3,4), 0),
4254 SR_CORE ("dbgbvr4_el1", CPENC (2,0,C0,C4,4), 0),
4255 SR_CORE ("dbgbvr5_el1", CPENC (2,0,C0,C5,4), 0),
4256 SR_CORE ("dbgbvr6_el1", CPENC (2,0,C0,C6,4), 0),
4257 SR_CORE ("dbgbvr7_el1", CPENC (2,0,C0,C7,4), 0),
4258 SR_CORE ("dbgbvr8_el1", CPENC (2,0,C0,C8,4), 0),
4259 SR_CORE ("dbgbvr9_el1", CPENC (2,0,C0,C9,4), 0),
4260 SR_CORE ("dbgbvr10_el1", CPENC (2,0,C0,C10,4), 0),
4261 SR_CORE ("dbgbvr11_el1", CPENC (2,0,C0,C11,4), 0),
4262 SR_CORE ("dbgbvr12_el1", CPENC (2,0,C0,C12,4), 0),
4263 SR_CORE ("dbgbvr13_el1", CPENC (2,0,C0,C13,4), 0),
4264 SR_CORE ("dbgbvr14_el1", CPENC (2,0,C0,C14,4), 0),
4265 SR_CORE ("dbgbvr15_el1", CPENC (2,0,C0,C15,4), 0),
4266 SR_CORE ("dbgbcr0_el1", CPENC (2,0,C0,C0,5), 0),
4267 SR_CORE ("dbgbcr1_el1", CPENC (2,0,C0,C1,5), 0),
4268 SR_CORE ("dbgbcr2_el1", CPENC (2,0,C0,C2,5), 0),
4269 SR_CORE ("dbgbcr3_el1", CPENC (2,0,C0,C3,5), 0),
4270 SR_CORE ("dbgbcr4_el1", CPENC (2,0,C0,C4,5), 0),
4271 SR_CORE ("dbgbcr5_el1", CPENC (2,0,C0,C5,5), 0),
4272 SR_CORE ("dbgbcr6_el1", CPENC (2,0,C0,C6,5), 0),
4273 SR_CORE ("dbgbcr7_el1", CPENC (2,0,C0,C7,5), 0),
4274 SR_CORE ("dbgbcr8_el1", CPENC (2,0,C0,C8,5), 0),
4275 SR_CORE ("dbgbcr9_el1", CPENC (2,0,C0,C9,5), 0),
4276 SR_CORE ("dbgbcr10_el1", CPENC (2,0,C0,C10,5), 0),
4277 SR_CORE ("dbgbcr11_el1", CPENC (2,0,C0,C11,5), 0),
4278 SR_CORE ("dbgbcr12_el1", CPENC (2,0,C0,C12,5), 0),
4279 SR_CORE ("dbgbcr13_el1", CPENC (2,0,C0,C13,5), 0),
4280 SR_CORE ("dbgbcr14_el1", CPENC (2,0,C0,C14,5), 0),
4281 SR_CORE ("dbgbcr15_el1", CPENC (2,0,C0,C15,5), 0),
4282 SR_CORE ("dbgwvr0_el1", CPENC (2,0,C0,C0,6), 0),
4283 SR_CORE ("dbgwvr1_el1", CPENC (2,0,C0,C1,6), 0),
4284 SR_CORE ("dbgwvr2_el1", CPENC (2,0,C0,C2,6), 0),
4285 SR_CORE ("dbgwvr3_el1", CPENC (2,0,C0,C3,6), 0),
4286 SR_CORE ("dbgwvr4_el1", CPENC (2,0,C0,C4,6), 0),
4287 SR_CORE ("dbgwvr5_el1", CPENC (2,0,C0,C5,6), 0),
4288 SR_CORE ("dbgwvr6_el1", CPENC (2,0,C0,C6,6), 0),
4289 SR_CORE ("dbgwvr7_el1", CPENC (2,0,C0,C7,6), 0),
4290 SR_CORE ("dbgwvr8_el1", CPENC (2,0,C0,C8,6), 0),
4291 SR_CORE ("dbgwvr9_el1", CPENC (2,0,C0,C9,6), 0),
4292 SR_CORE ("dbgwvr10_el1", CPENC (2,0,C0,C10,6), 0),
4293 SR_CORE ("dbgwvr11_el1", CPENC (2,0,C0,C11,6), 0),
4294 SR_CORE ("dbgwvr12_el1", CPENC (2,0,C0,C12,6), 0),
4295 SR_CORE ("dbgwvr13_el1", CPENC (2,0,C0,C13,6), 0),
4296 SR_CORE ("dbgwvr14_el1", CPENC (2,0,C0,C14,6), 0),
4297 SR_CORE ("dbgwvr15_el1", CPENC (2,0,C0,C15,6), 0),
4298 SR_CORE ("dbgwcr0_el1", CPENC (2,0,C0,C0,7), 0),
4299 SR_CORE ("dbgwcr1_el1", CPENC (2,0,C0,C1,7), 0),
4300 SR_CORE ("dbgwcr2_el1", CPENC (2,0,C0,C2,7), 0),
4301 SR_CORE ("dbgwcr3_el1", CPENC (2,0,C0,C3,7), 0),
4302 SR_CORE ("dbgwcr4_el1", CPENC (2,0,C0,C4,7), 0),
4303 SR_CORE ("dbgwcr5_el1", CPENC (2,0,C0,C5,7), 0),
4304 SR_CORE ("dbgwcr6_el1", CPENC (2,0,C0,C6,7), 0),
4305 SR_CORE ("dbgwcr7_el1", CPENC (2,0,C0,C7,7), 0),
4306 SR_CORE ("dbgwcr8_el1", CPENC (2,0,C0,C8,7), 0),
4307 SR_CORE ("dbgwcr9_el1", CPENC (2,0,C0,C9,7), 0),
4308 SR_CORE ("dbgwcr10_el1", CPENC (2,0,C0,C10,7), 0),
4309 SR_CORE ("dbgwcr11_el1", CPENC (2,0,C0,C11,7), 0),
4310 SR_CORE ("dbgwcr12_el1", CPENC (2,0,C0,C12,7), 0),
4311 SR_CORE ("dbgwcr13_el1", CPENC (2,0,C0,C13,7), 0),
4312 SR_CORE ("dbgwcr14_el1", CPENC (2,0,C0,C14,7), 0),
4313 SR_CORE ("dbgwcr15_el1", CPENC (2,0,C0,C15,7), 0),
4314 SR_CORE ("mdrar_el1", CPENC (2,0,C1,C0,0), F_REG_READ),
4315 SR_CORE ("oslar_el1", CPENC (2,0,C1,C0,4), F_REG_WRITE),
4316 SR_CORE ("oslsr_el1", CPENC (2,0,C1,C1,4), F_REG_READ),
4317 SR_CORE ("osdlr_el1", CPENC (2,0,C1,C3,4), 0),
4318 SR_CORE ("dbgprcr_el1", CPENC (2,0,C1,C4,4), 0),
4319 SR_CORE ("dbgclaimset_el1", CPENC (2,0,C7,C8,6), 0),
4320 SR_CORE ("dbgclaimclr_el1", CPENC (2,0,C7,C9,6), 0),
4321 SR_CORE ("dbgauthstatus_el1", CPENC (2,0,C7,C14,6), F_REG_READ),
4322 SR_PROFILE ("pmblimitr_el1", CPENC (3,0,C9,C10,0), 0),
4323 SR_PROFILE ("pmbptr_el1", CPENC (3,0,C9,C10,1), 0),
4324 SR_PROFILE ("pmbsr_el1", CPENC (3,0,C9,C10,3), 0),
4325 SR_PROFILE ("pmbidr_el1", CPENC (3,0,C9,C10,7), F_REG_READ),
4326 SR_PROFILE ("pmscr_el1", CPENC (3,0,C9,C9,0), 0),
4327 SR_PROFILE ("pmsicr_el1", CPENC (3,0,C9,C9,2), 0),
4328 SR_PROFILE ("pmsirr_el1", CPENC (3,0,C9,C9,3), 0),
4329 SR_PROFILE ("pmsfcr_el1", CPENC (3,0,C9,C9,4), 0),
4330 SR_PROFILE ("pmsevfr_el1", CPENC (3,0,C9,C9,5), 0),
4331 SR_PROFILE ("pmslatfr_el1", CPENC (3,0,C9,C9,6), 0),
4332 SR_PROFILE ("pmsidr_el1", CPENC (3,0,C9,C9,7), 0),
4333 SR_PROFILE ("pmscr_el2", CPENC (3,4,C9,C9,0), 0),
4334 SR_PROFILE ("pmscr_el12", CPENC (3,5,C9,C9,0), 0),
4335 SR_CORE ("pmcr_el0", CPENC (3,3,C9,C12,0), 0),
4336 SR_CORE ("pmcntenset_el0", CPENC (3,3,C9,C12,1), 0),
4337 SR_CORE ("pmcntenclr_el0", CPENC (3,3,C9,C12,2), 0),
4338 SR_CORE ("pmovsclr_el0", CPENC (3,3,C9,C12,3), 0),
4339 SR_CORE ("pmswinc_el0", CPENC (3,3,C9,C12,4), F_REG_WRITE),
4340 SR_CORE ("pmselr_el0", CPENC (3,3,C9,C12,5), 0),
4341 SR_CORE ("pmceid0_el0", CPENC (3,3,C9,C12,6), F_REG_READ),
4342 SR_CORE ("pmceid1_el0", CPENC (3,3,C9,C12,7), F_REG_READ),
4343 SR_CORE ("pmccntr_el0", CPENC (3,3,C9,C13,0), 0),
4344 SR_CORE ("pmxevtyper_el0", CPENC (3,3,C9,C13,1), 0),
4345 SR_CORE ("pmxevcntr_el0", CPENC (3,3,C9,C13,2), 0),
4346 SR_CORE ("pmuserenr_el0", CPENC (3,3,C9,C14,0), 0),
4347 SR_CORE ("pmintenset_el1", CPENC (3,0,C9,C14,1), 0),
4348 SR_CORE ("pmintenclr_el1", CPENC (3,0,C9,C14,2), 0),
4349 SR_CORE ("pmovsset_el0", CPENC (3,3,C9,C14,3), 0),
4350 SR_CORE ("pmevcntr0_el0", CPENC (3,3,C14,C8,0), 0),
4351 SR_CORE ("pmevcntr1_el0", CPENC (3,3,C14,C8,1), 0),
4352 SR_CORE ("pmevcntr2_el0", CPENC (3,3,C14,C8,2), 0),
4353 SR_CORE ("pmevcntr3_el0", CPENC (3,3,C14,C8,3), 0),
4354 SR_CORE ("pmevcntr4_el0", CPENC (3,3,C14,C8,4), 0),
4355 SR_CORE ("pmevcntr5_el0", CPENC (3,3,C14,C8,5), 0),
4356 SR_CORE ("pmevcntr6_el0", CPENC (3,3,C14,C8,6), 0),
4357 SR_CORE ("pmevcntr7_el0", CPENC (3,3,C14,C8,7), 0),
4358 SR_CORE ("pmevcntr8_el0", CPENC (3,3,C14,C9,0), 0),
4359 SR_CORE ("pmevcntr9_el0", CPENC (3,3,C14,C9,1), 0),
4360 SR_CORE ("pmevcntr10_el0", CPENC (3,3,C14,C9,2), 0),
4361 SR_CORE ("pmevcntr11_el0", CPENC (3,3,C14,C9,3), 0),
4362 SR_CORE ("pmevcntr12_el0", CPENC (3,3,C14,C9,4), 0),
4363 SR_CORE ("pmevcntr13_el0", CPENC (3,3,C14,C9,5), 0),
4364 SR_CORE ("pmevcntr14_el0", CPENC (3,3,C14,C9,6), 0),
4365 SR_CORE ("pmevcntr15_el0", CPENC (3,3,C14,C9,7), 0),
4366 SR_CORE ("pmevcntr16_el0", CPENC (3,3,C14,C10,0), 0),
4367 SR_CORE ("pmevcntr17_el0", CPENC (3,3,C14,C10,1), 0),
4368 SR_CORE ("pmevcntr18_el0", CPENC (3,3,C14,C10,2), 0),
4369 SR_CORE ("pmevcntr19_el0", CPENC (3,3,C14,C10,3), 0),
4370 SR_CORE ("pmevcntr20_el0", CPENC (3,3,C14,C10,4), 0),
4371 SR_CORE ("pmevcntr21_el0", CPENC (3,3,C14,C10,5), 0),
4372 SR_CORE ("pmevcntr22_el0", CPENC (3,3,C14,C10,6), 0),
4373 SR_CORE ("pmevcntr23_el0", CPENC (3,3,C14,C10,7), 0),
4374 SR_CORE ("pmevcntr24_el0", CPENC (3,3,C14,C11,0), 0),
4375 SR_CORE ("pmevcntr25_el0", CPENC (3,3,C14,C11,1), 0),
4376 SR_CORE ("pmevcntr26_el0", CPENC (3,3,C14,C11,2), 0),
4377 SR_CORE ("pmevcntr27_el0", CPENC (3,3,C14,C11,3), 0),
4378 SR_CORE ("pmevcntr28_el0", CPENC (3,3,C14,C11,4), 0),
4379 SR_CORE ("pmevcntr29_el0", CPENC (3,3,C14,C11,5), 0),
4380 SR_CORE ("pmevcntr30_el0", CPENC (3,3,C14,C11,6), 0),
4381 SR_CORE ("pmevtyper0_el0", CPENC (3,3,C14,C12,0), 0),
4382 SR_CORE ("pmevtyper1_el0", CPENC (3,3,C14,C12,1), 0),
4383 SR_CORE ("pmevtyper2_el0", CPENC (3,3,C14,C12,2), 0),
4384 SR_CORE ("pmevtyper3_el0", CPENC (3,3,C14,C12,3), 0),
4385 SR_CORE ("pmevtyper4_el0", CPENC (3,3,C14,C12,4), 0),
4386 SR_CORE ("pmevtyper5_el0", CPENC (3,3,C14,C12,5), 0),
4387 SR_CORE ("pmevtyper6_el0", CPENC (3,3,C14,C12,6), 0),
4388 SR_CORE ("pmevtyper7_el0", CPENC (3,3,C14,C12,7), 0),
4389 SR_CORE ("pmevtyper8_el0", CPENC (3,3,C14,C13,0), 0),
4390 SR_CORE ("pmevtyper9_el0", CPENC (3,3,C14,C13,1), 0),
4391 SR_CORE ("pmevtyper10_el0", CPENC (3,3,C14,C13,2), 0),
4392 SR_CORE ("pmevtyper11_el0", CPENC (3,3,C14,C13,3), 0),
4393 SR_CORE ("pmevtyper12_el0", CPENC (3,3,C14,C13,4), 0),
4394 SR_CORE ("pmevtyper13_el0", CPENC (3,3,C14,C13,5), 0),
4395 SR_CORE ("pmevtyper14_el0", CPENC (3,3,C14,C13,6), 0),
4396 SR_CORE ("pmevtyper15_el0", CPENC (3,3,C14,C13,7), 0),
4397 SR_CORE ("pmevtyper16_el0", CPENC (3,3,C14,C14,0), 0),
4398 SR_CORE ("pmevtyper17_el0", CPENC (3,3,C14,C14,1), 0),
4399 SR_CORE ("pmevtyper18_el0", CPENC (3,3,C14,C14,2), 0),
4400 SR_CORE ("pmevtyper19_el0", CPENC (3,3,C14,C14,3), 0),
4401 SR_CORE ("pmevtyper20_el0", CPENC (3,3,C14,C14,4), 0),
4402 SR_CORE ("pmevtyper21_el0", CPENC (3,3,C14,C14,5), 0),
4403 SR_CORE ("pmevtyper22_el0", CPENC (3,3,C14,C14,6), 0),
4404 SR_CORE ("pmevtyper23_el0", CPENC (3,3,C14,C14,7), 0),
4405 SR_CORE ("pmevtyper24_el0", CPENC (3,3,C14,C15,0), 0),
4406 SR_CORE ("pmevtyper25_el0", CPENC (3,3,C14,C15,1), 0),
4407 SR_CORE ("pmevtyper26_el0", CPENC (3,3,C14,C15,2), 0),
4408 SR_CORE ("pmevtyper27_el0", CPENC (3,3,C14,C15,3), 0),
4409 SR_CORE ("pmevtyper28_el0", CPENC (3,3,C14,C15,4), 0),
4410 SR_CORE ("pmevtyper29_el0", CPENC (3,3,C14,C15,5), 0),
4411 SR_CORE ("pmevtyper30_el0", CPENC (3,3,C14,C15,6), 0),
4412 SR_CORE ("pmccfiltr_el0", CPENC (3,3,C14,C15,7), 0),
4413
4414 SR_V8_4 ("dit", CPEN_ (3,C2,5), 0),
4415 SR_V8_4 ("vstcr_el2", CPENC (3,4,C2,C6,2), 0),
4416 SR_V8_4_A ("vsttbr_el2", CPENC (3,4,C2,C6,0), 0),
4417 SR_V8_4 ("cnthvs_tval_el2", CPENC (3,4,C14,C4,0), 0),
4418 SR_V8_4 ("cnthvs_cval_el2", CPENC (3,4,C14,C4,2), 0),
4419 SR_V8_4 ("cnthvs_ctl_el2", CPENC (3,4,C14,C4,1), 0),
4420 SR_V8_4 ("cnthps_tval_el2", CPENC (3,4,C14,C5,0), 0),
4421 SR_V8_4 ("cnthps_cval_el2", CPENC (3,4,C14,C5,2), 0),
4422 SR_V8_4 ("cnthps_ctl_el2", CPENC (3,4,C14,C5,1), 0),
4423 SR_V8_4 ("sder32_el2", CPENC (3,4,C1,C3,1), 0),
4424 SR_V8_4 ("vncr_el2", CPENC (3,4,C2,C2,0), 0),
4425
4426 SR_CORE ("mpam0_el1", CPENC (3,0,C10,C5,1), 0),
4427 SR_CORE ("mpam1_el1", CPENC (3,0,C10,C5,0), 0),
4428 SR_CORE ("mpam1_el12", CPENC (3,5,C10,C5,0), 0),
4429 SR_CORE ("mpam2_el2", CPENC (3,4,C10,C5,0), 0),
4430 SR_CORE ("mpam3_el3", CPENC (3,6,C10,C5,0), 0),
4431 SR_CORE ("mpamhcr_el2", CPENC (3,4,C10,C4,0), 0),
4432 SR_CORE ("mpamidr_el1", CPENC (3,0,C10,C4,4), F_REG_READ),
4433 SR_CORE ("mpamvpm0_el2", CPENC (3,4,C10,C6,0), 0),
4434 SR_CORE ("mpamvpm1_el2", CPENC (3,4,C10,C6,1), 0),
4435 SR_CORE ("mpamvpm2_el2", CPENC (3,4,C10,C6,2), 0),
4436 SR_CORE ("mpamvpm3_el2", CPENC (3,4,C10,C6,3), 0),
4437 SR_CORE ("mpamvpm4_el2", CPENC (3,4,C10,C6,4), 0),
4438 SR_CORE ("mpamvpm5_el2", CPENC (3,4,C10,C6,5), 0),
4439 SR_CORE ("mpamvpm6_el2", CPENC (3,4,C10,C6,6), 0),
4440 SR_CORE ("mpamvpm7_el2", CPENC (3,4,C10,C6,7), 0),
4441 SR_CORE ("mpamvpmv_el2", CPENC (3,4,C10,C4,1), 0),
4442
4443 SR_V8_R ("mpuir_el1", CPENC (3,0,C0,C0,4), F_REG_READ),
4444 SR_V8_R ("mpuir_el2", CPENC (3,4,C0,C0,4), F_REG_READ),
4445 SR_V8_R ("prbar_el1", CPENC (3,0,C6,C8,0), 0),
4446 SR_V8_R ("prbar_el2", CPENC (3,4,C6,C8,0), 0),
4447
4448 #define ENC_BARLAR(x,n,lar) \
4449 CPENC (3, (x-1) << 2, C6, 8 | (n >> 1), ((n & 1) << 2) | lar)
4450
4451 #define PRBARn_ELx(x,n) SR_V8_R ("prbar" #n "_el" #x, ENC_BARLAR (x,n,0), 0)
4452 #define PRLARn_ELx(x,n) SR_V8_R ("prlar" #n "_el" #x, ENC_BARLAR (x,n,1), 0)
4453
4454 SR_EXPAND_EL12 (PRBARn_ELx)
4455 SR_V8_R ("prenr_el1", CPENC (3,0,C6,C1,1), 0),
4456 SR_V8_R ("prenr_el2", CPENC (3,4,C6,C1,1), 0),
4457 SR_V8_R ("prlar_el1", CPENC (3,0,C6,C8,1), 0),
4458 SR_V8_R ("prlar_el2", CPENC (3,4,C6,C8,1), 0),
4459 SR_EXPAND_EL12 (PRLARn_ELx)
4460 SR_V8_R ("prselr_el1", CPENC (3,0,C6,C2,1), 0),
4461 SR_V8_R ("prselr_el2", CPENC (3,4,C6,C2,1), 0),
4462 SR_V8_R ("vsctlr_el2", CPENC (3,4,C2,C0,0), 0),
4463
4464 SR_CORE("trbbaser_el1", CPENC (3,0,C9,C11,2), 0),
4465 SR_CORE("trbidr_el1", CPENC (3,0,C9,C11,7), F_REG_READ),
4466 SR_CORE("trblimitr_el1", CPENC (3,0,C9,C11,0), 0),
4467 SR_CORE("trbmar_el1", CPENC (3,0,C9,C11,4), 0),
4468 SR_CORE("trbptr_el1", CPENC (3,0,C9,C11,1), 0),
4469 SR_CORE("trbsr_el1", CPENC (3,0,C9,C11,3), 0),
4470 SR_CORE("trbtrg_el1", CPENC (3,0,C9,C11,6), 0),
4471
4472 SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4), 0),
4473 SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4), 0),
4474 SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
4475 SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
4476 SR_CORE ("trcrsr", CPENC (2,1,C0,C10,0), 0),
4477
4478 SR_CORE ("trcauthstatus", CPENC (2,1,C7,C14,6), F_REG_READ),
4479 SR_CORE ("trccidr0", CPENC (2,1,C7,C12,7), F_REG_READ),
4480 SR_CORE ("trccidr1", CPENC (2,1,C7,C13,7), F_REG_READ),
4481 SR_CORE ("trccidr2", CPENC (2,1,C7,C14,7), F_REG_READ),
4482 SR_CORE ("trccidr3", CPENC (2,1,C7,C15,7), F_REG_READ),
4483 SR_CORE ("trcdevaff0", CPENC (2,1,C7,C10,6), F_REG_READ),
4484 SR_CORE ("trcdevaff1", CPENC (2,1,C7,C11,6), F_REG_READ),
4485 SR_CORE ("trcdevarch", CPENC (2,1,C7,C15,6), F_REG_READ),
4486 SR_CORE ("trcdevid", CPENC (2,1,C7,C2,7), F_REG_READ),
4487 SR_CORE ("trcdevtype", CPENC (2,1,C7,C3,7), F_REG_READ),
4488 SR_CORE ("trcidr0", CPENC (2,1,C0,C8,7), F_REG_READ),
4489 SR_CORE ("trcidr1", CPENC (2,1,C0,C9,7), F_REG_READ),
4490 SR_CORE ("trcidr2", CPENC (2,1,C0,C10,7), F_REG_READ),
4491 SR_CORE ("trcidr3", CPENC (2,1,C0,C11,7), F_REG_READ),
4492 SR_CORE ("trcidr4", CPENC (2,1,C0,C12,7), F_REG_READ),
4493 SR_CORE ("trcidr5", CPENC (2,1,C0,C13,7), F_REG_READ),
4494 SR_CORE ("trcidr6", CPENC (2,1,C0,C14,7), F_REG_READ),
4495 SR_CORE ("trcidr7", CPENC (2,1,C0,C15,7), F_REG_READ),
4496 SR_CORE ("trcidr8", CPENC (2,1,C0,C0,6), F_REG_READ),
4497 SR_CORE ("trcidr9", CPENC (2,1,C0,C1,6), F_REG_READ),
4498 SR_CORE ("trcidr10", CPENC (2,1,C0,C2,6), F_REG_READ),
4499 SR_CORE ("trcidr11", CPENC (2,1,C0,C3,6), F_REG_READ),
4500 SR_CORE ("trcidr12", CPENC (2,1,C0,C4,6), F_REG_READ),
4501 SR_CORE ("trcidr13", CPENC (2,1,C0,C5,6), F_REG_READ),
4502 SR_CORE ("trclsr", CPENC (2,1,C7,C13,6), F_REG_READ),
4503 SR_CORE ("trcoslsr", CPENC (2,1,C1,C1,4), F_REG_READ),
4504 SR_CORE ("trcpdsr", CPENC (2,1,C1,C5,4), F_REG_READ),
4505 SR_CORE ("trcpidr0", CPENC (2,1,C7,C8,7), F_REG_READ),
4506 SR_CORE ("trcpidr1", CPENC (2,1,C7,C9,7), F_REG_READ),
4507 SR_CORE ("trcpidr2", CPENC (2,1,C7,C10,7), F_REG_READ),
4508 SR_CORE ("trcpidr3", CPENC (2,1,C7,C11,7), F_REG_READ),
4509 SR_CORE ("trcpidr4", CPENC (2,1,C7,C4,7), F_REG_READ),
4510 SR_CORE ("trcpidr5", CPENC (2,1,C7,C5,7), F_REG_READ),
4511 SR_CORE ("trcpidr6", CPENC (2,1,C7,C6,7), F_REG_READ),
4512 SR_CORE ("trcpidr7", CPENC (2,1,C7,C7,7), F_REG_READ),
4513 SR_CORE ("trcstatr", CPENC (2,1,C0,C3,0), F_REG_READ),
4514 SR_CORE ("trcacatr0", CPENC (2,1,C2,C0,2), 0),
4515 SR_CORE ("trcacatr1", CPENC (2,1,C2,C2,2), 0),
4516 SR_CORE ("trcacatr2", CPENC (2,1,C2,C4,2), 0),
4517 SR_CORE ("trcacatr3", CPENC (2,1,C2,C6,2), 0),
4518 SR_CORE ("trcacatr4", CPENC (2,1,C2,C8,2), 0),
4519 SR_CORE ("trcacatr5", CPENC (2,1,C2,C10,2), 0),
4520 SR_CORE ("trcacatr6", CPENC (2,1,C2,C12,2), 0),
4521 SR_CORE ("trcacatr7", CPENC (2,1,C2,C14,2), 0),
4522 SR_CORE ("trcacatr8", CPENC (2,1,C2,C0,3), 0),
4523 SR_CORE ("trcacatr9", CPENC (2,1,C2,C2,3), 0),
4524 SR_CORE ("trcacatr10", CPENC (2,1,C2,C4,3), 0),
4525 SR_CORE ("trcacatr11", CPENC (2,1,C2,C6,3), 0),
4526 SR_CORE ("trcacatr12", CPENC (2,1,C2,C8,3), 0),
4527 SR_CORE ("trcacatr13", CPENC (2,1,C2,C10,3), 0),
4528 SR_CORE ("trcacatr14", CPENC (2,1,C2,C12,3), 0),
4529 SR_CORE ("trcacatr15", CPENC (2,1,C2,C14,3), 0),
4530 SR_CORE ("trcacvr0", CPENC (2,1,C2,C0,0), 0),
4531 SR_CORE ("trcacvr1", CPENC (2,1,C2,C2,0), 0),
4532 SR_CORE ("trcacvr2", CPENC (2,1,C2,C4,0), 0),
4533 SR_CORE ("trcacvr3", CPENC (2,1,C2,C6,0), 0),
4534 SR_CORE ("trcacvr4", CPENC (2,1,C2,C8,0), 0),
4535 SR_CORE ("trcacvr5", CPENC (2,1,C2,C10,0), 0),
4536 SR_CORE ("trcacvr6", CPENC (2,1,C2,C12,0), 0),
4537 SR_CORE ("trcacvr7", CPENC (2,1,C2,C14,0), 0),
4538 SR_CORE ("trcacvr8", CPENC (2,1,C2,C0,1), 0),
4539 SR_CORE ("trcacvr9", CPENC (2,1,C2,C2,1), 0),
4540 SR_CORE ("trcacvr10", CPENC (2,1,C2,C4,1), 0),
4541 SR_CORE ("trcacvr11", CPENC (2,1,C2,C6,1), 0),
4542 SR_CORE ("trcacvr12", CPENC (2,1,C2,C8,1), 0),
4543 SR_CORE ("trcacvr13", CPENC (2,1,C2,C10,1), 0),
4544 SR_CORE ("trcacvr14", CPENC (2,1,C2,C12,1), 0),
4545 SR_CORE ("trcacvr15", CPENC (2,1,C2,C14,1), 0),
4546 SR_CORE ("trcauxctlr", CPENC (2,1,C0,C6,0), 0),
4547 SR_CORE ("trcbbctlr", CPENC (2,1,C0,C15,0), 0),
4548 SR_CORE ("trcccctlr", CPENC (2,1,C0,C14,0), 0),
4549 SR_CORE ("trccidcctlr0", CPENC (2,1,C3,C0,2), 0),
4550 SR_CORE ("trccidcctlr1", CPENC (2,1,C3,C1,2), 0),
4551 SR_CORE ("trccidcvr0", CPENC (2,1,C3,C0,0), 0),
4552 SR_CORE ("trccidcvr1", CPENC (2,1,C3,C2,0), 0),
4553 SR_CORE ("trccidcvr2", CPENC (2,1,C3,C4,0), 0),
4554 SR_CORE ("trccidcvr3", CPENC (2,1,C3,C6,0), 0),
4555 SR_CORE ("trccidcvr4", CPENC (2,1,C3,C8,0), 0),
4556 SR_CORE ("trccidcvr5", CPENC (2,1,C3,C10,0), 0),
4557 SR_CORE ("trccidcvr6", CPENC (2,1,C3,C12,0), 0),
4558 SR_CORE ("trccidcvr7", CPENC (2,1,C3,C14,0), 0),
4559 SR_CORE ("trcclaimclr", CPENC (2,1,C7,C9,6), 0),
4560 SR_CORE ("trcclaimset", CPENC (2,1,C7,C8,6), 0),
4561 SR_CORE ("trccntctlr0", CPENC (2,1,C0,C4,5), 0),
4562 SR_CORE ("trccntctlr1", CPENC (2,1,C0,C5,5), 0),
4563 SR_CORE ("trccntctlr2", CPENC (2,1,C0,C6,5), 0),
4564 SR_CORE ("trccntctlr3", CPENC (2,1,C0,C7,5), 0),
4565 SR_CORE ("trccntrldvr0", CPENC (2,1,C0,C0,5), 0),
4566 SR_CORE ("trccntrldvr1", CPENC (2,1,C0,C1,5), 0),
4567 SR_CORE ("trccntrldvr2", CPENC (2,1,C0,C2,5), 0),
4568 SR_CORE ("trccntrldvr3", CPENC (2,1,C0,C3,5), 0),
4569 SR_CORE ("trccntvr0", CPENC (2,1,C0,C8,5), 0),
4570 SR_CORE ("trccntvr1", CPENC (2,1,C0,C9,5), 0),
4571 SR_CORE ("trccntvr2", CPENC (2,1,C0,C10,5), 0),
4572 SR_CORE ("trccntvr3", CPENC (2,1,C0,C11,5), 0),
4573 SR_CORE ("trcconfigr", CPENC (2,1,C0,C4,0), 0),
4574 SR_CORE ("trcdvcmr0", CPENC (2,1,C2,C0,6), 0),
4575 SR_CORE ("trcdvcmr1", CPENC (2,1,C2,C4,6), 0),
4576 SR_CORE ("trcdvcmr2", CPENC (2,1,C2,C8,6), 0),
4577 SR_CORE ("trcdvcmr3", CPENC (2,1,C2,C12,6), 0),
4578 SR_CORE ("trcdvcmr4", CPENC (2,1,C2,C0,7), 0),
4579 SR_CORE ("trcdvcmr5", CPENC (2,1,C2,C4,7), 0),
4580 SR_CORE ("trcdvcmr6", CPENC (2,1,C2,C8,7), 0),
4581 SR_CORE ("trcdvcmr7", CPENC (2,1,C2,C12,7), 0),
4582 SR_CORE ("trcdvcvr0", CPENC (2,1,C2,C0,4), 0),
4583 SR_CORE ("trcdvcvr1", CPENC (2,1,C2,C4,4), 0),
4584 SR_CORE ("trcdvcvr2", CPENC (2,1,C2,C8,4), 0),
4585 SR_CORE ("trcdvcvr3", CPENC (2,1,C2,C12,4), 0),
4586 SR_CORE ("trcdvcvr4", CPENC (2,1,C2,C0,5), 0),
4587 SR_CORE ("trcdvcvr5", CPENC (2,1,C2,C4,5), 0),
4588 SR_CORE ("trcdvcvr6", CPENC (2,1,C2,C8,5), 0),
4589 SR_CORE ("trcdvcvr7", CPENC (2,1,C2,C12,5), 0),
4590 SR_CORE ("trceventctl0r", CPENC (2,1,C0,C8,0), 0),
4591 SR_CORE ("trceventctl1r", CPENC (2,1,C0,C9,0), 0),
4592 SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4), 0),
4593 SR_CORE ("trcextinselr", CPENC (2,1,C0,C8,4), 0),
4594 SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4), 0),
4595 SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
4596 SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
4597 SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
4598 SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
4599 SR_CORE ("trcimspec1", CPENC (2,1,C0,C1,7), 0),
4600 SR_CORE ("trcimspec2", CPENC (2,1,C0,C2,7), 0),
4601 SR_CORE ("trcimspec3", CPENC (2,1,C0,C3,7), 0),
4602 SR_CORE ("trcimspec4", CPENC (2,1,C0,C4,7), 0),
4603 SR_CORE ("trcimspec5", CPENC (2,1,C0,C5,7), 0),
4604 SR_CORE ("trcimspec6", CPENC (2,1,C0,C6,7), 0),
4605 SR_CORE ("trcimspec7", CPENC (2,1,C0,C7,7), 0),
4606 SR_CORE ("trcitctrl", CPENC (2,1,C7,C0,4), 0),
4607 SR_CORE ("trcpdcr", CPENC (2,1,C1,C4,4), 0),
4608 SR_CORE ("trcprgctlr", CPENC (2,1,C0,C1,0), 0),
4609 SR_CORE ("trcprocselr", CPENC (2,1,C0,C2,0), 0),
4610 SR_CORE ("trcqctlr", CPENC (2,1,C0,C1,1), 0),
4611 SR_CORE ("trcrsctlr2", CPENC (2,1,C1,C2,0), 0),
4612 SR_CORE ("trcrsctlr3", CPENC (2,1,C1,C3,0), 0),
4613 SR_CORE ("trcrsctlr4", CPENC (2,1,C1,C4,0), 0),
4614 SR_CORE ("trcrsctlr5", CPENC (2,1,C1,C5,0), 0),
4615 SR_CORE ("trcrsctlr6", CPENC (2,1,C1,C6,0), 0),
4616 SR_CORE ("trcrsctlr7", CPENC (2,1,C1,C7,0), 0),
4617 SR_CORE ("trcrsctlr8", CPENC (2,1,C1,C8,0), 0),
4618 SR_CORE ("trcrsctlr9", CPENC (2,1,C1,C9,0), 0),
4619 SR_CORE ("trcrsctlr10", CPENC (2,1,C1,C10,0), 0),
4620 SR_CORE ("trcrsctlr11", CPENC (2,1,C1,C11,0), 0),
4621 SR_CORE ("trcrsctlr12", CPENC (2,1,C1,C12,0), 0),
4622 SR_CORE ("trcrsctlr13", CPENC (2,1,C1,C13,0), 0),
4623 SR_CORE ("trcrsctlr14", CPENC (2,1,C1,C14,0), 0),
4624 SR_CORE ("trcrsctlr15", CPENC (2,1,C1,C15,0), 0),
4625 SR_CORE ("trcrsctlr16", CPENC (2,1,C1,C0,1), 0),
4626 SR_CORE ("trcrsctlr17", CPENC (2,1,C1,C1,1), 0),
4627 SR_CORE ("trcrsctlr18", CPENC (2,1,C1,C2,1), 0),
4628 SR_CORE ("trcrsctlr19", CPENC (2,1,C1,C3,1), 0),
4629 SR_CORE ("trcrsctlr20", CPENC (2,1,C1,C4,1), 0),
4630 SR_CORE ("trcrsctlr21", CPENC (2,1,C1,C5,1), 0),
4631 SR_CORE ("trcrsctlr22", CPENC (2,1,C1,C6,1), 0),
4632 SR_CORE ("trcrsctlr23", CPENC (2,1,C1,C7,1), 0),
4633 SR_CORE ("trcrsctlr24", CPENC (2,1,C1,C8,1), 0),
4634 SR_CORE ("trcrsctlr25", CPENC (2,1,C1,C9,1), 0),
4635 SR_CORE ("trcrsctlr26", CPENC (2,1,C1,C10,1), 0),
4636 SR_CORE ("trcrsctlr27", CPENC (2,1,C1,C11,1), 0),
4637 SR_CORE ("trcrsctlr28", CPENC (2,1,C1,C12,1), 0),
4638 SR_CORE ("trcrsctlr29", CPENC (2,1,C1,C13,1), 0),
4639 SR_CORE ("trcrsctlr30", CPENC (2,1,C1,C14,1), 0),
4640 SR_CORE ("trcrsctlr31", CPENC (2,1,C1,C15,1), 0),
4641 SR_CORE ("trcseqevr0", CPENC (2,1,C0,C0,4), 0),
4642 SR_CORE ("trcseqevr1", CPENC (2,1,C0,C1,4), 0),
4643 SR_CORE ("trcseqevr2", CPENC (2,1,C0,C2,4), 0),
4644 SR_CORE ("trcseqrstevr", CPENC (2,1,C0,C6,4), 0),
4645 SR_CORE ("trcseqstr", CPENC (2,1,C0,C7,4), 0),
4646 SR_CORE ("trcssccr0", CPENC (2,1,C1,C0,2), 0),
4647 SR_CORE ("trcssccr1", CPENC (2,1,C1,C1,2), 0),
4648 SR_CORE ("trcssccr2", CPENC (2,1,C1,C2,2), 0),
4649 SR_CORE ("trcssccr3", CPENC (2,1,C1,C3,2), 0),
4650 SR_CORE ("trcssccr4", CPENC (2,1,C1,C4,2), 0),
4651 SR_CORE ("trcssccr5", CPENC (2,1,C1,C5,2), 0),
4652 SR_CORE ("trcssccr6", CPENC (2,1,C1,C6,2), 0),
4653 SR_CORE ("trcssccr7", CPENC (2,1,C1,C7,2), 0),
4654 SR_CORE ("trcsscsr0", CPENC (2,1,C1,C8,2), 0),
4655 SR_CORE ("trcsscsr1", CPENC (2,1,C1,C9,2), 0),
4656 SR_CORE ("trcsscsr2", CPENC (2,1,C1,C10,2), 0),
4657 SR_CORE ("trcsscsr3", CPENC (2,1,C1,C11,2), 0),
4658 SR_CORE ("trcsscsr4", CPENC (2,1,C1,C12,2), 0),
4659 SR_CORE ("trcsscsr5", CPENC (2,1,C1,C13,2), 0),
4660 SR_CORE ("trcsscsr6", CPENC (2,1,C1,C14,2), 0),
4661 SR_CORE ("trcsscsr7", CPENC (2,1,C1,C15,2), 0),
4662 SR_CORE ("trcsspcicr0", CPENC (2,1,C1,C0,3), 0),
4663 SR_CORE ("trcsspcicr1", CPENC (2,1,C1,C1,3), 0),
4664 SR_CORE ("trcsspcicr2", CPENC (2,1,C1,C2,3), 0),
4665 SR_CORE ("trcsspcicr3", CPENC (2,1,C1,C3,3), 0),
4666 SR_CORE ("trcsspcicr4", CPENC (2,1,C1,C4,3), 0),
4667 SR_CORE ("trcsspcicr5", CPENC (2,1,C1,C5,3), 0),
4668 SR_CORE ("trcsspcicr6", CPENC (2,1,C1,C6,3), 0),
4669 SR_CORE ("trcsspcicr7", CPENC (2,1,C1,C7,3), 0),
4670 SR_CORE ("trcstallctlr", CPENC (2,1,C0,C11,0), 0),
4671 SR_CORE ("trcsyncpr", CPENC (2,1,C0,C13,0), 0),
4672 SR_CORE ("trctraceidr", CPENC (2,1,C0,C0,1), 0),
4673 SR_CORE ("trctsctlr", CPENC (2,1,C0,C12,0), 0),
4674 SR_CORE ("trcvdarcctlr", CPENC (2,1,C0,C10,2), 0),
4675 SR_CORE ("trcvdctlr", CPENC (2,1,C0,C8,2), 0),
4676 SR_CORE ("trcvdsacctlr", CPENC (2,1,C0,C9,2), 0),
4677 SR_CORE ("trcvictlr", CPENC (2,1,C0,C0,2), 0),
4678 SR_CORE ("trcviiectlr", CPENC (2,1,C0,C1,2), 0),
4679 SR_CORE ("trcvipcssctlr", CPENC (2,1,C0,C3,2), 0),
4680 SR_CORE ("trcvissctlr", CPENC (2,1,C0,C2,2), 0),
4681 SR_CORE ("trcvmidcctlr0", CPENC (2,1,C3,C2,2), 0),
4682 SR_CORE ("trcvmidcctlr1", CPENC (2,1,C3,C3,2), 0),
4683 SR_CORE ("trcvmidcvr0", CPENC (2,1,C3,C0,1), 0),
4684 SR_CORE ("trcvmidcvr1", CPENC (2,1,C3,C2,1), 0),
4685 SR_CORE ("trcvmidcvr2", CPENC (2,1,C3,C4,1), 0),
4686 SR_CORE ("trcvmidcvr3", CPENC (2,1,C3,C6,1), 0),
4687 SR_CORE ("trcvmidcvr4", CPENC (2,1,C3,C8,1), 0),
4688 SR_CORE ("trcvmidcvr5", CPENC (2,1,C3,C10,1), 0),
4689 SR_CORE ("trcvmidcvr6", CPENC (2,1,C3,C12,1), 0),
4690 SR_CORE ("trcvmidcvr7", CPENC (2,1,C3,C14,1), 0),
4691 SR_CORE ("trclar", CPENC (2,1,C7,C12,6), F_REG_WRITE),
4692 SR_CORE ("trcoslar", CPENC (2,1,C1,C0,4), F_REG_WRITE),
4693
4694 SR_CORE ("csrcr_el0", CPENC (2,3,C8,C0,0), 0),
4695 SR_CORE ("csrptr_el0", CPENC (2,3,C8,C0,1), 0),
4696 SR_CORE ("csridr_el0", CPENC (2,3,C8,C0,2), F_REG_READ),
4697 SR_CORE ("csrptridx_el0", CPENC (2,3,C8,C0,3), F_REG_READ),
4698 SR_CORE ("csrcr_el1", CPENC (2,0,C8,C0,0), 0),
4699 SR_CORE ("csrcr_el12", CPENC (2,5,C8,C0,0), 0),
4700 SR_CORE ("csrptr_el1", CPENC (2,0,C8,C0,1), 0),
4701 SR_CORE ("csrptr_el12", CPENC (2,5,C8,C0,1), 0),
4702 SR_CORE ("csrptridx_el1", CPENC (2,0,C8,C0,3), F_REG_READ),
4703 SR_CORE ("csrcr_el2", CPENC (2,4,C8,C0,0), 0),
4704 SR_CORE ("csrptr_el2", CPENC (2,4,C8,C0,1), 0),
4705 SR_CORE ("csrptridx_el2", CPENC (2,4,C8,C0,3), F_REG_READ),
4706
4707 SR_CORE ("lorc_el1", CPENC (3,0,C10,C4,3), 0),
4708 SR_CORE ("lorea_el1", CPENC (3,0,C10,C4,1), 0),
4709 SR_CORE ("lorn_el1", CPENC (3,0,C10,C4,2), 0),
4710 SR_CORE ("lorsa_el1", CPENC (3,0,C10,C4,0), 0),
4711 SR_CORE ("icc_ctlr_el3", CPENC (3,6,C12,C12,4), 0),
4712 SR_CORE ("icc_sre_el1", CPENC (3,0,C12,C12,5), 0),
4713 SR_CORE ("icc_sre_el2", CPENC (3,4,C12,C9,5), 0),
4714 SR_CORE ("icc_sre_el3", CPENC (3,6,C12,C12,5), 0),
4715 SR_CORE ("ich_vtr_el2", CPENC (3,4,C12,C11,1), F_REG_READ),
4716
4717 SR_CORE ("brbcr_el1", CPENC (2,1,C9,C0,0), 0),
4718 SR_CORE ("brbcr_el12", CPENC (2,5,C9,C0,0), 0),
4719 SR_CORE ("brbfcr_el1", CPENC (2,1,C9,C0,1), 0),
4720 SR_CORE ("brbts_el1", CPENC (2,1,C9,C0,2), 0),
4721 SR_CORE ("brbinfinj_el1", CPENC (2,1,C9,C1,0), 0),
4722 SR_CORE ("brbsrcinj_el1", CPENC (2,1,C9,C1,1), 0),
4723 SR_CORE ("brbtgtinj_el1", CPENC (2,1,C9,C1,2), 0),
4724 SR_CORE ("brbidr0_el1", CPENC (2,1,C9,C2,0), F_REG_READ),
4725 SR_CORE ("brbcr_el2", CPENC (2,4,C9,C0,0), 0),
4726 SR_CORE ("brbsrc0_el1", CPENC (2,1,C8,C0,1), F_REG_READ),
4727 SR_CORE ("brbsrc1_el1", CPENC (2,1,C8,C1,1), F_REG_READ),
4728 SR_CORE ("brbsrc2_el1", CPENC (2,1,C8,C2,1), F_REG_READ),
4729 SR_CORE ("brbsrc3_el1", CPENC (2,1,C8,C3,1), F_REG_READ),
4730 SR_CORE ("brbsrc4_el1", CPENC (2,1,C8,C4,1), F_REG_READ),
4731 SR_CORE ("brbsrc5_el1", CPENC (2,1,C8,C5,1), F_REG_READ),
4732 SR_CORE ("brbsrc6_el1", CPENC (2,1,C8,C6,1), F_REG_READ),
4733 SR_CORE ("brbsrc7_el1", CPENC (2,1,C8,C7,1), F_REG_READ),
4734 SR_CORE ("brbsrc8_el1", CPENC (2,1,C8,C8,1), F_REG_READ),
4735 SR_CORE ("brbsrc9_el1", CPENC (2,1,C8,C9,1), F_REG_READ),
4736 SR_CORE ("brbsrc10_el1", CPENC (2,1,C8,C10,1), F_REG_READ),
4737 SR_CORE ("brbsrc11_el1", CPENC (2,1,C8,C11,1), F_REG_READ),
4738 SR_CORE ("brbsrc12_el1", CPENC (2,1,C8,C12,1), F_REG_READ),
4739 SR_CORE ("brbsrc13_el1", CPENC (2,1,C8,C13,1), F_REG_READ),
4740 SR_CORE ("brbsrc14_el1", CPENC (2,1,C8,C14,1), F_REG_READ),
4741 SR_CORE ("brbsrc15_el1", CPENC (2,1,C8,C15,1), F_REG_READ),
4742 SR_CORE ("brbsrc16_el1", CPENC (2,1,C8,C0,5), F_REG_READ),
4743 SR_CORE ("brbsrc17_el1", CPENC (2,1,C8,C1,5), F_REG_READ),
4744 SR_CORE ("brbsrc18_el1", CPENC (2,1,C8,C2,5), F_REG_READ),
4745 SR_CORE ("brbsrc19_el1", CPENC (2,1,C8,C3,5), F_REG_READ),
4746 SR_CORE ("brbsrc20_el1", CPENC (2,1,C8,C4,5), F_REG_READ),
4747 SR_CORE ("brbsrc21_el1", CPENC (2,1,C8,C5,5), F_REG_READ),
4748 SR_CORE ("brbsrc22_el1", CPENC (2,1,C8,C6,5), F_REG_READ),
4749 SR_CORE ("brbsrc23_el1", CPENC (2,1,C8,C7,5), F_REG_READ),
4750 SR_CORE ("brbsrc24_el1", CPENC (2,1,C8,C8,5), F_REG_READ),
4751 SR_CORE ("brbsrc25_el1", CPENC (2,1,C8,C9,5), F_REG_READ),
4752 SR_CORE ("brbsrc26_el1", CPENC (2,1,C8,C10,5), F_REG_READ),
4753 SR_CORE ("brbsrc27_el1", CPENC (2,1,C8,C11,5), F_REG_READ),
4754 SR_CORE ("brbsrc28_el1", CPENC (2,1,C8,C12,5), F_REG_READ),
4755 SR_CORE ("brbsrc29_el1", CPENC (2,1,C8,C13,5), F_REG_READ),
4756 SR_CORE ("brbsrc30_el1", CPENC (2,1,C8,C14,5), F_REG_READ),
4757 SR_CORE ("brbsrc31_el1", CPENC (2,1,C8,C15,5), F_REG_READ),
4758 SR_CORE ("brbtgt0_el1", CPENC (2,1,C8,C0,2), F_REG_READ),
4759 SR_CORE ("brbtgt1_el1", CPENC (2,1,C8,C1,2), F_REG_READ),
4760 SR_CORE ("brbtgt2_el1", CPENC (2,1,C8,C2,2), F_REG_READ),
4761 SR_CORE ("brbtgt3_el1", CPENC (2,1,C8,C3,2), F_REG_READ),
4762 SR_CORE ("brbtgt4_el1", CPENC (2,1,C8,C4,2), F_REG_READ),
4763 SR_CORE ("brbtgt5_el1", CPENC (2,1,C8,C5,2), F_REG_READ),
4764 SR_CORE ("brbtgt6_el1", CPENC (2,1,C8,C6,2), F_REG_READ),
4765 SR_CORE ("brbtgt7_el1", CPENC (2,1,C8,C7,2), F_REG_READ),
4766 SR_CORE ("brbtgt8_el1", CPENC (2,1,C8,C8,2), F_REG_READ),
4767 SR_CORE ("brbtgt9_el1", CPENC (2,1,C8,C9,2), F_REG_READ),
4768 SR_CORE ("brbtgt10_el1", CPENC (2,1,C8,C10,2), F_REG_READ),
4769 SR_CORE ("brbtgt11_el1", CPENC (2,1,C8,C11,2), F_REG_READ),
4770 SR_CORE ("brbtgt12_el1", CPENC (2,1,C8,C12,2), F_REG_READ),
4771 SR_CORE ("brbtgt13_el1", CPENC (2,1,C8,C13,2), F_REG_READ),
4772 SR_CORE ("brbtgt14_el1", CPENC (2,1,C8,C14,2), F_REG_READ),
4773 SR_CORE ("brbtgt15_el1", CPENC (2,1,C8,C15,2), F_REG_READ),
4774 SR_CORE ("brbtgt16_el1", CPENC (2,1,C8,C0,6), F_REG_READ),
4775 SR_CORE ("brbtgt17_el1", CPENC (2,1,C8,C1,6), F_REG_READ),
4776 SR_CORE ("brbtgt18_el1", CPENC (2,1,C8,C2,6), F_REG_READ),
4777 SR_CORE ("brbtgt19_el1", CPENC (2,1,C8,C3,6), F_REG_READ),
4778 SR_CORE ("brbtgt20_el1", CPENC (2,1,C8,C4,6), F_REG_READ),
4779 SR_CORE ("brbtgt21_el1", CPENC (2,1,C8,C5,6), F_REG_READ),
4780 SR_CORE ("brbtgt22_el1", CPENC (2,1,C8,C6,6), F_REG_READ),
4781 SR_CORE ("brbtgt23_el1", CPENC (2,1,C8,C7,6), F_REG_READ),
4782 SR_CORE ("brbtgt24_el1", CPENC (2,1,C8,C8,6), F_REG_READ),
4783 SR_CORE ("brbtgt25_el1", CPENC (2,1,C8,C9,6), F_REG_READ),
4784 SR_CORE ("brbtgt26_el1", CPENC (2,1,C8,C10,6), F_REG_READ),
4785 SR_CORE ("brbtgt27_el1", CPENC (2,1,C8,C11,6), F_REG_READ),
4786 SR_CORE ("brbtgt28_el1", CPENC (2,1,C8,C12,6), F_REG_READ),
4787 SR_CORE ("brbtgt29_el1", CPENC (2,1,C8,C13,6), F_REG_READ),
4788 SR_CORE ("brbtgt30_el1", CPENC (2,1,C8,C14,6), F_REG_READ),
4789 SR_CORE ("brbtgt31_el1", CPENC (2,1,C8,C15,6), F_REG_READ),
4790 SR_CORE ("brbinf0_el1", CPENC (2,1,C8,C0,0), F_REG_READ),
4791 SR_CORE ("brbinf1_el1", CPENC (2,1,C8,C1,0), F_REG_READ),
4792 SR_CORE ("brbinf2_el1", CPENC (2,1,C8,C2,0), F_REG_READ),
4793 SR_CORE ("brbinf3_el1", CPENC (2,1,C8,C3,0), F_REG_READ),
4794 SR_CORE ("brbinf4_el1", CPENC (2,1,C8,C4,0), F_REG_READ),
4795 SR_CORE ("brbinf5_el1", CPENC (2,1,C8,C5,0), F_REG_READ),
4796 SR_CORE ("brbinf6_el1", CPENC (2,1,C8,C6,0), F_REG_READ),
4797 SR_CORE ("brbinf7_el1", CPENC (2,1,C8,C7,0), F_REG_READ),
4798 SR_CORE ("brbinf8_el1", CPENC (2,1,C8,C8,0), F_REG_READ),
4799 SR_CORE ("brbinf9_el1", CPENC (2,1,C8,C9,0), F_REG_READ),
4800 SR_CORE ("brbinf10_el1", CPENC (2,1,C8,C10,0), F_REG_READ),
4801 SR_CORE ("brbinf11_el1", CPENC (2,1,C8,C11,0), F_REG_READ),
4802 SR_CORE ("brbinf12_el1", CPENC (2,1,C8,C12,0), F_REG_READ),
4803 SR_CORE ("brbinf13_el1", CPENC (2,1,C8,C13,0), F_REG_READ),
4804 SR_CORE ("brbinf14_el1", CPENC (2,1,C8,C14,0), F_REG_READ),
4805 SR_CORE ("brbinf15_el1", CPENC (2,1,C8,C15,0), F_REG_READ),
4806 SR_CORE ("brbinf16_el1", CPENC (2,1,C8,C0,4), F_REG_READ),
4807 SR_CORE ("brbinf17_el1", CPENC (2,1,C8,C1,4), F_REG_READ),
4808 SR_CORE ("brbinf18_el1", CPENC (2,1,C8,C2,4), F_REG_READ),
4809 SR_CORE ("brbinf19_el1", CPENC (2,1,C8,C3,4), F_REG_READ),
4810 SR_CORE ("brbinf20_el1", CPENC (2,1,C8,C4,4), F_REG_READ),
4811 SR_CORE ("brbinf21_el1", CPENC (2,1,C8,C5,4), F_REG_READ),
4812 SR_CORE ("brbinf22_el1", CPENC (2,1,C8,C6,4), F_REG_READ),
4813 SR_CORE ("brbinf23_el1", CPENC (2,1,C8,C7,4), F_REG_READ),
4814 SR_CORE ("brbinf24_el1", CPENC (2,1,C8,C8,4), F_REG_READ),
4815 SR_CORE ("brbinf25_el1", CPENC (2,1,C8,C9,4), F_REG_READ),
4816 SR_CORE ("brbinf26_el1", CPENC (2,1,C8,C10,4), F_REG_READ),
4817 SR_CORE ("brbinf27_el1", CPENC (2,1,C8,C11,4), F_REG_READ),
4818 SR_CORE ("brbinf28_el1", CPENC (2,1,C8,C12,4), F_REG_READ),
4819 SR_CORE ("brbinf29_el1", CPENC (2,1,C8,C13,4), F_REG_READ),
4820 SR_CORE ("brbinf30_el1", CPENC (2,1,C8,C14,4), F_REG_READ),
4821 SR_CORE ("brbinf31_el1", CPENC (2,1,C8,C15,4), F_REG_READ),
4822
4823 SR_CORE ("accdata_el1", CPENC (3,0,C13,C0,5), 0),
4824
4825 SR_CORE ("mfar_el3", CPENC (3,6,C6,C0,5), F_REG_READ),
4826 SR_CORE ("gpccr_el3", CPENC (3,6,C2,C1,6), 0),
4827 SR_CORE ("gptbr_el3", CPENC (3,6,C2,C1,4), 0),
4828
4829 SR_SME ("svcr", CPENC (3,3,C4,C2,2), 0),
4830 SR_SME ("id_aa64smfr0_el1", CPENC (3,0,C0,C4,5), F_REG_READ),
4831 SR_SME ("smcr_el1", CPENC (3,0,C1,C2,6), 0),
4832 SR_SME ("smcr_el12", CPENC (3,5,C1,C2,6), 0),
4833 SR_SME ("smcr_el2", CPENC (3,4,C1,C2,6), 0),
4834 SR_SME ("smcr_el3", CPENC (3,6,C1,C2,6), 0),
4835 SR_SME ("smpri_el1", CPENC (3,0,C1,C2,4), 0),
4836 SR_SME ("smprimap_el2", CPENC (3,4,C1,C2,5), 0),
4837 SR_SME ("smidr_el1", CPENC (3,1,C0,C0,6), F_REG_READ),
4838 SR_SME ("tpidr2_el0", CPENC (3,3,C13,C0,5), 0),
4839 SR_SME ("mpamsm_el1", CPENC (3,0,C10,C5,3), 0),
4840
4841 { 0, CPENC (0,0,0,0,0), 0, 0 }
4842 };
4843
4844 bool
4845 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
4846 {
4847 return (reg_flags & F_DEPRECATED) != 0;
4848 }
4849
4850 /* The CPENC below is fairly misleading, the fields
4851 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4852 by ins_pstatefield, which just shifts the value by the width of the fields
4853 in a loop. So if you CPENC them only the first value will be set, the rest
4854 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4855 value of 0b110000000001000000 (0x30040) while what you want is
4856 0b011010 (0x1a). */
4857 const aarch64_sys_reg aarch64_pstatefields [] =
4858 {
4859 SR_CORE ("spsel", 0x05, 0),
4860 SR_CORE ("daifset", 0x1e, 0),
4861 SR_CORE ("daifclr", 0x1f, 0),
4862 SR_PAN ("pan", 0x04, 0),
4863 SR_V8_2 ("uao", 0x03, 0),
4864 SR_SSBS ("ssbs", 0x19, 0),
4865 SR_V8_4 ("dit", 0x1a, 0),
4866 SR_MEMTAG ("tco", 0x1c, 0),
4867 SR_SME ("svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x2,0x1)),
4868 SR_SME ("svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x4,0x1)),
4869 SR_SME ("svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x6,0x1)),
4870 { 0, CPENC (0,0,0,0,0), 0, 0 },
4871 };
4872
4873 bool
4874 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4875 const aarch64_sys_reg *reg)
4876 {
4877 if (!(reg->flags & F_ARCHEXT))
4878 return true;
4879
4880 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
4881 }
4882
4883 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4884 {
4885 { "ialluis", CPENS(0,C7,C1,0), 0 },
4886 { "iallu", CPENS(0,C7,C5,0), 0 },
4887 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4888 { 0, CPENS(0,0,0,0), 0 }
4889 };
4890
4891 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4892 {
4893 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4894 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
4895 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
4896 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4897 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
4898 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
4899 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4900 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
4901 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
4902 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4903 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4904 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4905 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4906 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4907 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4908 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4909 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4910 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4911 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4912 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4913 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4914 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4915 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4916 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4917 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4918 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4919 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4920 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4921 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT },
4922 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT },
4923 { 0, CPENS(0,0,0,0), 0 }
4924 };
4925
4926 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4927 {
4928 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4929 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4930 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4931 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4932 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4933 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4934 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4935 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4936 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4937 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4938 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4939 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4940 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4941 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4942 { 0, CPENS(0,0,0,0), 0 }
4943 };
4944
4945 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4946 {
4947 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4948 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4949 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4950 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4951 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4952 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4953 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4954 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4955 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4956 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4957 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4958 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4959 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4960 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4961 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4962 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4963 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4964 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4965 { "alle2", CPENS(4,C8,C7,0), 0 },
4966 { "alle2is", CPENS(4,C8,C3,0), 0 },
4967 { "alle1", CPENS(4,C8,C7,4), 0 },
4968 { "alle1is", CPENS(4,C8,C3,4), 0 },
4969 { "alle3", CPENS(6,C8,C7,0), 0 },
4970 { "alle3is", CPENS(6,C8,C3,0), 0 },
4971 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4972 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4973 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4974 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4975 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4976 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4977 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4978 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4979
4980 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4981 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4982 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4983 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4984 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4985 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4986 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4987 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4988 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4989 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4990 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4991 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4992 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4993 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4994 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4995 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4996
4997 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4998 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4999 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
5000 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
5001 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
5002 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
5003 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
5004 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
5005 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
5006 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
5007 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
5008 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
5009 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
5010 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
5011 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
5012 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
5013 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
5014 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
5015 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
5016 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
5017 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
5018 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
5019 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
5020 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
5021 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
5022 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
5023 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
5024 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
5025 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
5026 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
5027
5028 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT },
5029 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT },
5030 { "paallos", CPENS (6, C8, C1, 4), 0},
5031 { "paall", CPENS (6, C8, C7, 4), 0},
5032
5033 { 0, CPENS(0,0,0,0), 0 }
5034 };
5035
5036 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
5037 {
5038 /* RCTX is somewhat unique in a way that it has different values
5039 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5040 Thus op2 is masked out and instead encoded directly in the
5041 aarch64_opcode_table entries for the respective instructions. */
5042 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
5043
5044 { 0, CPENS(0,0,0,0), 0 }
5045 };
5046
5047 bool
5048 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
5049 {
5050 return (sys_ins_reg->flags & F_HASXT) != 0;
5051 }
5052
5053 extern bool
5054 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
5055 const char *reg_name,
5056 aarch64_insn reg_value,
5057 uint32_t reg_flags,
5058 aarch64_feature_set reg_features)
5059 {
5060 /* Armv8-R has no EL3. */
5061 if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_R))
5062 {
5063 const char *suffix = strrchr (reg_name, '_');
5064 if (suffix && !strcmp (suffix, "_el3"))
5065 return false;
5066 }
5067
5068 if (!(reg_flags & F_ARCHEXT))
5069 return true;
5070
5071 if (reg_features
5072 && AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
5073 return true;
5074
5075 /* ARMv8.4 TLB instructions. */
5076 if ((reg_value == CPENS (0, C8, C1, 0)
5077 || reg_value == CPENS (0, C8, C1, 1)
5078 || reg_value == CPENS (0, C8, C1, 2)
5079 || reg_value == CPENS (0, C8, C1, 3)
5080 || reg_value == CPENS (0, C8, C1, 5)
5081 || reg_value == CPENS (0, C8, C1, 7)
5082 || reg_value == CPENS (4, C8, C4, 0)
5083 || reg_value == CPENS (4, C8, C4, 4)
5084 || reg_value == CPENS (4, C8, C1, 1)
5085 || reg_value == CPENS (4, C8, C1, 5)
5086 || reg_value == CPENS (4, C8, C1, 6)
5087 || reg_value == CPENS (6, C8, C1, 1)
5088 || reg_value == CPENS (6, C8, C1, 5)
5089 || reg_value == CPENS (4, C8, C1, 0)
5090 || reg_value == CPENS (4, C8, C1, 4)
5091 || reg_value == CPENS (6, C8, C1, 0)
5092 || reg_value == CPENS (0, C8, C6, 1)
5093 || reg_value == CPENS (0, C8, C6, 3)
5094 || reg_value == CPENS (0, C8, C6, 5)
5095 || reg_value == CPENS (0, C8, C6, 7)
5096 || reg_value == CPENS (0, C8, C2, 1)
5097 || reg_value == CPENS (0, C8, C2, 3)
5098 || reg_value == CPENS (0, C8, C2, 5)
5099 || reg_value == CPENS (0, C8, C2, 7)
5100 || reg_value == CPENS (0, C8, C5, 1)
5101 || reg_value == CPENS (0, C8, C5, 3)
5102 || reg_value == CPENS (0, C8, C5, 5)
5103 || reg_value == CPENS (0, C8, C5, 7)
5104 || reg_value == CPENS (4, C8, C0, 2)
5105 || reg_value == CPENS (4, C8, C0, 6)
5106 || reg_value == CPENS (4, C8, C4, 2)
5107 || reg_value == CPENS (4, C8, C4, 6)
5108 || reg_value == CPENS (4, C8, C4, 3)
5109 || reg_value == CPENS (4, C8, C4, 7)
5110 || reg_value == CPENS (4, C8, C6, 1)
5111 || reg_value == CPENS (4, C8, C6, 5)
5112 || reg_value == CPENS (4, C8, C2, 1)
5113 || reg_value == CPENS (4, C8, C2, 5)
5114 || reg_value == CPENS (4, C8, C5, 1)
5115 || reg_value == CPENS (4, C8, C5, 5)
5116 || reg_value == CPENS (6, C8, C6, 1)
5117 || reg_value == CPENS (6, C8, C6, 5)
5118 || reg_value == CPENS (6, C8, C2, 1)
5119 || reg_value == CPENS (6, C8, C2, 5)
5120 || reg_value == CPENS (6, C8, C5, 1)
5121 || reg_value == CPENS (6, C8, C5, 5))
5122 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
5123 return true;
5124
5125 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
5126 if (reg_value == CPENS (3, C7, C12, 1)
5127 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5128 return true;
5129
5130 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
5131 if (reg_value == CPENS (3, C7, C13, 1)
5132 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
5133 return true;
5134
5135 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
5136 if ((reg_value == CPENS (0, C7, C6, 3)
5137 || reg_value == CPENS (0, C7, C6, 4)
5138 || reg_value == CPENS (0, C7, C10, 4)
5139 || reg_value == CPENS (0, C7, C14, 4)
5140 || reg_value == CPENS (3, C7, C10, 3)
5141 || reg_value == CPENS (3, C7, C12, 3)
5142 || reg_value == CPENS (3, C7, C13, 3)
5143 || reg_value == CPENS (3, C7, C14, 3)
5144 || reg_value == CPENS (3, C7, C4, 3)
5145 || reg_value == CPENS (0, C7, C6, 5)
5146 || reg_value == CPENS (0, C7, C6, 6)
5147 || reg_value == CPENS (0, C7, C10, 6)
5148 || reg_value == CPENS (0, C7, C14, 6)
5149 || reg_value == CPENS (3, C7, C10, 5)
5150 || reg_value == CPENS (3, C7, C12, 5)
5151 || reg_value == CPENS (3, C7, C13, 5)
5152 || reg_value == CPENS (3, C7, C14, 5)
5153 || reg_value == CPENS (3, C7, C4, 4))
5154 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
5155 return true;
5156
5157 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
5158 if ((reg_value == CPENS (0, C7, C9, 0)
5159 || reg_value == CPENS (0, C7, C9, 1))
5160 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5161 return true;
5162
5163 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5164 if (reg_value == CPENS (3, C7, C3, 0)
5165 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
5166 return true;
5167
5168 return false;
5169 }
5170
5171 #undef C0
5172 #undef C1
5173 #undef C2
5174 #undef C3
5175 #undef C4
5176 #undef C5
5177 #undef C6
5178 #undef C7
5179 #undef C8
5180 #undef C9
5181 #undef C10
5182 #undef C11
5183 #undef C12
5184 #undef C13
5185 #undef C14
5186 #undef C15
5187
5188 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5189 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5190
5191 static enum err_type
5192 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5193 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5194 bool encoding ATTRIBUTE_UNUSED,
5195 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5196 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5197 {
5198 int t = BITS (insn, 4, 0);
5199 int n = BITS (insn, 9, 5);
5200 int t2 = BITS (insn, 14, 10);
5201
5202 if (BIT (insn, 23))
5203 {
5204 /* Write back enabled. */
5205 if ((t == n || t2 == n) && n != 31)
5206 return ERR_UND;
5207 }
5208
5209 if (BIT (insn, 22))
5210 {
5211 /* Load */
5212 if (t == t2)
5213 return ERR_UND;
5214 }
5215
5216 return ERR_OK;
5217 }
5218
5219 /* Verifier for vector by element 3 operands functions where the
5220 conditions `if sz:L == 11 then UNDEFINED` holds. */
5221
5222 static enum err_type
5223 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5224 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5225 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5226 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5227 {
5228 const aarch64_insn undef_pattern = 0x3;
5229 aarch64_insn value;
5230
5231 assert (inst->opcode);
5232 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5233 value = encoding ? inst->value : insn;
5234 assert (value);
5235
5236 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5237 return ERR_UND;
5238
5239 return ERR_OK;
5240 }
5241
5242 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5243 If INST is NULL the given insn_sequence is cleared and the sequence is left
5244 uninitialized. */
5245
5246 void
5247 init_insn_sequence (const struct aarch64_inst *inst,
5248 aarch64_instr_sequence *insn_sequence)
5249 {
5250 int num_req_entries = 0;
5251 insn_sequence->next_insn = 0;
5252 insn_sequence->num_insns = num_req_entries;
5253 if (insn_sequence->instr)
5254 XDELETE (insn_sequence->instr);
5255 insn_sequence->instr = NULL;
5256
5257 if (inst)
5258 {
5259 insn_sequence->instr = XNEW (aarch64_inst);
5260 memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
5261 }
5262
5263 /* Handle all the cases here. May need to think of something smarter than
5264 a giant if/else chain if this grows. At that time, a lookup table may be
5265 best. */
5266 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5267 num_req_entries = 1;
5268
5269 if (insn_sequence->current_insns)
5270 XDELETEVEC (insn_sequence->current_insns);
5271 insn_sequence->current_insns = NULL;
5272
5273 if (num_req_entries != 0)
5274 {
5275 size_t size = num_req_entries * sizeof (aarch64_inst);
5276 insn_sequence->current_insns
5277 = (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
5278 memset (insn_sequence->current_insns, 0, size);
5279 }
5280 }
5281
5282
5283 /* This function verifies that the instruction INST adheres to its specified
5284 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5285 returned and MISMATCH_DETAIL contains the reason why verification failed.
5286
5287 The function is called both during assembly and disassembly. If assembling
5288 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5289 and will contain the PC of the current instruction w.r.t to the section.
5290
5291 If ENCODING and PC=0 then you are at a start of a section. The constraints
5292 are verified against the given state insn_sequence which is updated as it
5293 transitions through the verification. */
5294
5295 enum err_type
5296 verify_constraints (const struct aarch64_inst *inst,
5297 const aarch64_insn insn ATTRIBUTE_UNUSED,
5298 bfd_vma pc,
5299 bool encoding,
5300 aarch64_operand_error *mismatch_detail,
5301 aarch64_instr_sequence *insn_sequence)
5302 {
5303 assert (inst);
5304 assert (inst->opcode);
5305
5306 const struct aarch64_opcode *opcode = inst->opcode;
5307 if (!opcode->constraints && !insn_sequence->instr)
5308 return ERR_OK;
5309
5310 assert (insn_sequence);
5311
5312 enum err_type res = ERR_OK;
5313
5314 /* This instruction puts a constraint on the insn_sequence. */
5315 if (opcode->flags & F_SCAN)
5316 {
5317 if (insn_sequence->instr)
5318 {
5319 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5320 mismatch_detail->error = _("instruction opens new dependency "
5321 "sequence without ending previous one");
5322 mismatch_detail->index = -1;
5323 mismatch_detail->non_fatal = true;
5324 res = ERR_VFI;
5325 }
5326
5327 init_insn_sequence (inst, insn_sequence);
5328 return res;
5329 }
5330
5331 /* Verify constraints on an existing sequence. */
5332 if (insn_sequence->instr)
5333 {
5334 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5335 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5336 closed a previous one that we should have. */
5337 if (!encoding && pc == 0)
5338 {
5339 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5340 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5341 mismatch_detail->index = -1;
5342 mismatch_detail->non_fatal = true;
5343 res = ERR_VFI;
5344 /* Reset the sequence. */
5345 init_insn_sequence (NULL, insn_sequence);
5346 return res;
5347 }
5348
5349 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5350 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5351 {
5352 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5353 instruction for better error messages. */
5354 if (!opcode->avariant
5355 || !(*opcode->avariant &
5356 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
5357 {
5358 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5359 mismatch_detail->error = _("SVE instruction expected after "
5360 "`movprfx'");
5361 mismatch_detail->index = -1;
5362 mismatch_detail->non_fatal = true;
5363 res = ERR_VFI;
5364 goto done;
5365 }
5366
5367 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5368 instruction that is allowed to be used with a MOVPRFX. */
5369 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5370 {
5371 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5372 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5373 "expected");
5374 mismatch_detail->index = -1;
5375 mismatch_detail->non_fatal = true;
5376 res = ERR_VFI;
5377 goto done;
5378 }
5379
5380 /* Next check for usage of the predicate register. */
5381 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5382 aarch64_opnd_info blk_pred, inst_pred;
5383 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5384 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5385 bool predicated = false;
5386 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5387
5388 /* Determine if the movprfx instruction used is predicated or not. */
5389 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5390 {
5391 predicated = true;
5392 blk_pred = insn_sequence->instr->operands[1];
5393 }
5394
5395 unsigned char max_elem_size = 0;
5396 unsigned char current_elem_size;
5397 int num_op_used = 0, last_op_usage = 0;
5398 int i, inst_pred_idx = -1;
5399 int num_ops = aarch64_num_of_operands (opcode);
5400 for (i = 0; i < num_ops; i++)
5401 {
5402 aarch64_opnd_info inst_op = inst->operands[i];
5403 switch (inst_op.type)
5404 {
5405 case AARCH64_OPND_SVE_Zd:
5406 case AARCH64_OPND_SVE_Zm_5:
5407 case AARCH64_OPND_SVE_Zm_16:
5408 case AARCH64_OPND_SVE_Zn:
5409 case AARCH64_OPND_SVE_Zt:
5410 case AARCH64_OPND_SVE_Vm:
5411 case AARCH64_OPND_SVE_Vn:
5412 case AARCH64_OPND_Va:
5413 case AARCH64_OPND_Vn:
5414 case AARCH64_OPND_Vm:
5415 case AARCH64_OPND_Sn:
5416 case AARCH64_OPND_Sm:
5417 if (inst_op.reg.regno == blk_dest.reg.regno)
5418 {
5419 num_op_used++;
5420 last_op_usage = i;
5421 }
5422 current_elem_size
5423 = aarch64_get_qualifier_esize (inst_op.qualifier);
5424 if (current_elem_size > max_elem_size)
5425 max_elem_size = current_elem_size;
5426 break;
5427 case AARCH64_OPND_SVE_Pd:
5428 case AARCH64_OPND_SVE_Pg3:
5429 case AARCH64_OPND_SVE_Pg4_5:
5430 case AARCH64_OPND_SVE_Pg4_10:
5431 case AARCH64_OPND_SVE_Pg4_16:
5432 case AARCH64_OPND_SVE_Pm:
5433 case AARCH64_OPND_SVE_Pn:
5434 case AARCH64_OPND_SVE_Pt:
5435 case AARCH64_OPND_SME_Pm:
5436 inst_pred = inst_op;
5437 inst_pred_idx = i;
5438 break;
5439 default:
5440 break;
5441 }
5442 }
5443
5444 assert (max_elem_size != 0);
5445 aarch64_opnd_info inst_dest = inst->operands[0];
5446 /* Determine the size that should be used to compare against the
5447 movprfx size. */
5448 current_elem_size
5449 = opcode->constraints & C_MAX_ELEM
5450 ? max_elem_size
5451 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5452
5453 /* If movprfx is predicated do some extra checks. */
5454 if (predicated)
5455 {
5456 /* The instruction must be predicated. */
5457 if (inst_pred_idx < 0)
5458 {
5459 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5460 mismatch_detail->error = _("predicated instruction expected "
5461 "after `movprfx'");
5462 mismatch_detail->index = -1;
5463 mismatch_detail->non_fatal = true;
5464 res = ERR_VFI;
5465 goto done;
5466 }
5467
5468 /* The instruction must have a merging predicate. */
5469 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5470 {
5471 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5472 mismatch_detail->error = _("merging predicate expected due "
5473 "to preceding `movprfx'");
5474 mismatch_detail->index = inst_pred_idx;
5475 mismatch_detail->non_fatal = true;
5476 res = ERR_VFI;
5477 goto done;
5478 }
5479
5480 /* The same register must be used in instruction. */
5481 if (blk_pred.reg.regno != inst_pred.reg.regno)
5482 {
5483 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5484 mismatch_detail->error = _("predicate register differs "
5485 "from that in preceding "
5486 "`movprfx'");
5487 mismatch_detail->index = inst_pred_idx;
5488 mismatch_detail->non_fatal = true;
5489 res = ERR_VFI;
5490 goto done;
5491 }
5492 }
5493
5494 /* Destructive operations by definition must allow one usage of the
5495 same register. */
5496 int allowed_usage
5497 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5498
5499 /* Operand is not used at all. */
5500 if (num_op_used == 0)
5501 {
5502 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5503 mismatch_detail->error = _("output register of preceding "
5504 "`movprfx' not used in current "
5505 "instruction");
5506 mismatch_detail->index = 0;
5507 mismatch_detail->non_fatal = true;
5508 res = ERR_VFI;
5509 goto done;
5510 }
5511
5512 /* We now know it's used, now determine exactly where it's used. */
5513 if (blk_dest.reg.regno != inst_dest.reg.regno)
5514 {
5515 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5516 mismatch_detail->error = _("output register of preceding "
5517 "`movprfx' expected as output");
5518 mismatch_detail->index = 0;
5519 mismatch_detail->non_fatal = true;
5520 res = ERR_VFI;
5521 goto done;
5522 }
5523
5524 /* Operand used more than allowed for the specific opcode type. */
5525 if (num_op_used > allowed_usage)
5526 {
5527 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5528 mismatch_detail->error = _("output register of preceding "
5529 "`movprfx' used as input");
5530 mismatch_detail->index = last_op_usage;
5531 mismatch_detail->non_fatal = true;
5532 res = ERR_VFI;
5533 goto done;
5534 }
5535
5536 /* Now the only thing left is the qualifiers checks. The register
5537 must have the same maximum element size. */
5538 if (inst_dest.qualifier
5539 && blk_dest.qualifier
5540 && current_elem_size
5541 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5542 {
5543 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5544 mismatch_detail->error = _("register size not compatible with "
5545 "previous `movprfx'");
5546 mismatch_detail->index = 0;
5547 mismatch_detail->non_fatal = true;
5548 res = ERR_VFI;
5549 goto done;
5550 }
5551 }
5552
5553 done:
5554 /* Add the new instruction to the sequence. */
5555 memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
5556 inst, sizeof (aarch64_inst));
5557
5558 /* Check if sequence is now full. */
5559 if (insn_sequence->next_insn >= insn_sequence->num_insns)
5560 {
5561 /* Sequence is full, but we don't have anything special to do for now,
5562 so clear and reset it. */
5563 init_insn_sequence (NULL, insn_sequence);
5564 }
5565 }
5566
5567 return res;
5568 }
5569
5570
5571 /* Return true if VALUE cannot be moved into an SVE register using DUP
5572 (with any element size, not just ESIZE) and if using DUPM would
5573 therefore be OK. ESIZE is the number of bytes in the immediate. */
5574
5575 bool
5576 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5577 {
5578 int64_t svalue = uvalue;
5579 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5580
5581 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5582 return false;
5583 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5584 {
5585 svalue = (int32_t) uvalue;
5586 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5587 {
5588 svalue = (int16_t) uvalue;
5589 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5590 return false;
5591 }
5592 }
5593 if ((svalue & 0xff) == 0)
5594 svalue /= 256;
5595 return svalue < -128 || svalue >= 128;
5596 }
5597
5598 /* Include the opcode description table as well as the operand description
5599 table. */
5600 #define VERIFIER(x) verify_##x
5601 #include "aarch64-tbl.h"