aarch64: Regularise FLD_* suffixes
[binutils-gdb.git] / opcodes / aarch64-asm.c
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25 #include "opintl.h"
26
27 /* Utilities. */
28
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
32
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
38
39 static inline void
40 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
41 {
42 uint32_t num;
43 const aarch64_field *field;
44 enum aarch64_field_kind kind;
45 va_list va;
46
47 va_start (va, mask);
48 num = va_arg (va, uint32_t);
49 assert (num <= 5);
50 while (num--)
51 {
52 kind = va_arg (va, enum aarch64_field_kind);
53 field = &fields[kind];
54 insert_field (kind, code, value, mask);
55 value >>= field->width;
56 }
57 va_end (va);
58 }
59
60 /* Insert a raw field value VALUE into all fields in SELF->fields.
61 The least significant bit goes in the final field. */
62
63 static void
64 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
65 aarch64_insn value)
66 {
67 unsigned int i;
68 enum aarch64_field_kind kind;
69
70 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
71 if (self->fields[i] != FLD_NIL)
72 {
73 kind = self->fields[i];
74 insert_field (kind, code, value, 0);
75 value >>= fields[kind].width;
76 }
77 }
78
79 /* Operand inserters. */
80
81 /* Insert nothing. */
82 bool
83 aarch64_ins_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
84 const aarch64_opnd_info *info ATTRIBUTE_UNUSED,
85 aarch64_insn *code ATTRIBUTE_UNUSED,
86 const aarch64_inst *inst ATTRIBUTE_UNUSED,
87 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
88 {
89 return true;
90 }
91
92 /* Insert register number. */
93 bool
94 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code,
96 const aarch64_inst *inst ATTRIBUTE_UNUSED,
97 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
98 {
99 insert_field (self->fields[0], code, info->reg.regno, 0);
100 return true;
101 }
102
103 /* Insert register number, index and/or other data for SIMD register element
104 operand, e.g. the last source operand in
105 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
106 bool
107 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
108 aarch64_insn *code, const aarch64_inst *inst,
109 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
110 {
111 /* regno */
112 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
113 /* index and/or type */
114 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
115 {
116 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
117 if (info->type == AARCH64_OPND_En
118 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
119 {
120 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
121 assert (info->idx == 1); /* Vn */
122 aarch64_insn value = info->reglane.index << pos;
123 insert_field (FLD_imm4_11, code, value, 0);
124 }
125 else
126 {
127 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
128 imm5<3:0> <V>
129 0000 RESERVED
130 xxx1 B
131 xx10 H
132 x100 S
133 1000 D */
134 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
135 insert_field (FLD_imm5, code, value, 0);
136 }
137 }
138 else if (inst->opcode->iclass == dotproduct)
139 {
140 unsigned reglane_index = info->reglane.index;
141 switch (info->qualifier)
142 {
143 case AARCH64_OPND_QLF_S_4B:
144 case AARCH64_OPND_QLF_S_2H:
145 /* L:H */
146 assert (reglane_index < 4);
147 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
148 break;
149 default:
150 return false;
151 }
152 }
153 else if (inst->opcode->iclass == cryptosm3)
154 {
155 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
156 unsigned reglane_index = info->reglane.index;
157 assert (reglane_index < 4);
158 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
159 }
160 else
161 {
162 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
163 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
164 unsigned reglane_index = info->reglane.index;
165
166 if (inst->opcode->op == OP_FCMLA_ELEM)
167 /* Complex operand takes two elements. */
168 reglane_index *= 2;
169
170 switch (info->qualifier)
171 {
172 case AARCH64_OPND_QLF_S_H:
173 /* H:L:M */
174 assert (reglane_index < 8);
175 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
176 break;
177 case AARCH64_OPND_QLF_S_S:
178 /* H:L */
179 assert (reglane_index < 4);
180 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
181 break;
182 case AARCH64_OPND_QLF_S_D:
183 /* H */
184 assert (reglane_index < 2);
185 insert_field (FLD_H, code, reglane_index, 0);
186 break;
187 default:
188 return false;
189 }
190 }
191 return true;
192 }
193
194 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
195 bool
196 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
197 aarch64_insn *code,
198 const aarch64_inst *inst ATTRIBUTE_UNUSED,
199 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
200 {
201 /* R */
202 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
203 /* len */
204 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
205 return true;
206 }
207
208 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
209 in AdvSIMD load/store instructions. */
210 bool
211 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
212 const aarch64_opnd_info *info, aarch64_insn *code,
213 const aarch64_inst *inst,
214 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
215 {
216 aarch64_insn value = 0;
217 /* Number of elements in each structure to be loaded/stored. */
218 unsigned num = get_opcode_dependent_value (inst->opcode);
219
220 /* Rt */
221 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
222 /* opcode */
223 switch (num)
224 {
225 case 1:
226 switch (info->reglist.num_regs)
227 {
228 case 1: value = 0x7; break;
229 case 2: value = 0xa; break;
230 case 3: value = 0x6; break;
231 case 4: value = 0x2; break;
232 default: return false;
233 }
234 break;
235 case 2:
236 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
237 break;
238 case 3:
239 value = 0x4;
240 break;
241 case 4:
242 value = 0x0;
243 break;
244 default:
245 return false;
246 }
247 insert_field (FLD_opcode, code, value, 0);
248
249 return true;
250 }
251
252 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
253 single structure to all lanes instructions. */
254 bool
255 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
256 const aarch64_opnd_info *info, aarch64_insn *code,
257 const aarch64_inst *inst,
258 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
259 {
260 aarch64_insn value;
261 /* The opcode dependent area stores the number of elements in
262 each structure to be loaded/stored. */
263 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
264
265 /* Rt */
266 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
267 /* S */
268 value = (aarch64_insn) 0;
269 if (is_ld1r && info->reglist.num_regs == 2)
270 /* OP_LD1R does not have alternating variant, but have "two consecutive"
271 instead. */
272 value = (aarch64_insn) 1;
273 insert_field (FLD_S, code, value, 0);
274
275 return true;
276 }
277
278 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
279 operand e.g. Vt in AdvSIMD load/store single element instructions. */
280 bool
281 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
282 const aarch64_opnd_info *info, aarch64_insn *code,
283 const aarch64_inst *inst ATTRIBUTE_UNUSED,
284 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
285 {
286 aarch64_field field = {0, 0};
287 aarch64_insn QSsize = 0; /* fields Q:S:size. */
288 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
289
290 assert (info->reglist.has_index);
291
292 /* Rt */
293 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
294 /* Encode the index, opcode<2:1> and size. */
295 switch (info->qualifier)
296 {
297 case AARCH64_OPND_QLF_S_B:
298 /* Index encoded in "Q:S:size". */
299 QSsize = info->reglist.index;
300 opcodeh2 = 0x0;
301 break;
302 case AARCH64_OPND_QLF_S_H:
303 /* Index encoded in "Q:S:size<1>". */
304 QSsize = info->reglist.index << 1;
305 opcodeh2 = 0x1;
306 break;
307 case AARCH64_OPND_QLF_S_S:
308 /* Index encoded in "Q:S". */
309 QSsize = info->reglist.index << 2;
310 opcodeh2 = 0x2;
311 break;
312 case AARCH64_OPND_QLF_S_D:
313 /* Index encoded in "Q". */
314 QSsize = info->reglist.index << 3 | 0x1;
315 opcodeh2 = 0x2;
316 break;
317 default:
318 return false;
319 }
320 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
321 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
322 insert_field_2 (&field, code, opcodeh2, 0);
323
324 return true;
325 }
326
327 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
328 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
329 or SSHR <V><d>, <V><n>, #<shift>. */
330 bool
331 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
332 const aarch64_opnd_info *info,
333 aarch64_insn *code, const aarch64_inst *inst,
334 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
335 {
336 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
337 aarch64_insn Q, imm;
338
339 if (inst->opcode->iclass == asimdshf)
340 {
341 /* Q
342 immh Q <T>
343 0000 x SEE AdvSIMD modified immediate
344 0001 0 8B
345 0001 1 16B
346 001x 0 4H
347 001x 1 8H
348 01xx 0 2S
349 01xx 1 4S
350 1xxx 0 RESERVED
351 1xxx 1 2D */
352 Q = (val & 0x1) ? 1 : 0;
353 insert_field (FLD_Q, code, Q, inst->opcode->mask);
354 val >>= 1;
355 }
356
357 assert (info->type == AARCH64_OPND_IMM_VLSR
358 || info->type == AARCH64_OPND_IMM_VLSL);
359
360 if (info->type == AARCH64_OPND_IMM_VLSR)
361 /* immh:immb
362 immh <shift>
363 0000 SEE AdvSIMD modified immediate
364 0001 (16-UInt(immh:immb))
365 001x (32-UInt(immh:immb))
366 01xx (64-UInt(immh:immb))
367 1xxx (128-UInt(immh:immb)) */
368 imm = (16 << (unsigned)val) - info->imm.value;
369 else
370 /* immh:immb
371 immh <shift>
372 0000 SEE AdvSIMD modified immediate
373 0001 (UInt(immh:immb)-8)
374 001x (UInt(immh:immb)-16)
375 01xx (UInt(immh:immb)-32)
376 1xxx (UInt(immh:immb)-64) */
377 imm = info->imm.value + (8 << (unsigned)val);
378 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
379
380 return true;
381 }
382
383 /* Insert fields for e.g. the immediate operands in
384 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
385 bool
386 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
387 aarch64_insn *code,
388 const aarch64_inst *inst ATTRIBUTE_UNUSED,
389 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
390 {
391 int64_t imm;
392
393 imm = info->imm.value;
394 if (operand_need_shift_by_two (self))
395 imm >>= 2;
396 if (operand_need_shift_by_four (self))
397 imm >>= 4;
398 insert_all_fields (self, code, imm);
399 return true;
400 }
401
402 /* Insert immediate and its shift amount for e.g. the last operand in
403 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
404 bool
405 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
406 aarch64_insn *code, const aarch64_inst *inst,
407 aarch64_operand_error *errors)
408 {
409 /* imm16 */
410 aarch64_ins_imm (self, info, code, inst, errors);
411 /* hw */
412 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
413 return true;
414 }
415
416 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
417 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
418 bool
419 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
420 const aarch64_opnd_info *info,
421 aarch64_insn *code,
422 const aarch64_inst *inst ATTRIBUTE_UNUSED,
423 aarch64_operand_error *errors
424 ATTRIBUTE_UNUSED)
425 {
426 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
427 uint64_t imm = info->imm.value;
428 enum aarch64_modifier_kind kind = info->shifter.kind;
429 int amount = info->shifter.amount;
430 aarch64_field field = {0, 0};
431
432 /* a:b:c:d:e:f:g:h */
433 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
434 {
435 /* Either MOVI <Dd>, #<imm>
436 or MOVI <Vd>.2D, #<imm>.
437 <imm> is a 64-bit immediate
438 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
439 encoded in "a:b:c:d:e:f:g:h". */
440 imm = aarch64_shrink_expanded_imm8 (imm);
441 assert ((int)imm >= 0);
442 }
443 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
444
445 if (kind == AARCH64_MOD_NONE)
446 return true;
447
448 /* shift amount partially in cmode */
449 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
450 if (kind == AARCH64_MOD_LSL)
451 {
452 /* AARCH64_MOD_LSL: shift zeros. */
453 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
454 assert (esize == 4 || esize == 2 || esize == 1);
455 /* For 8-bit move immediate, the optional LSL #0 does not require
456 encoding. */
457 if (esize == 1)
458 return true;
459 amount >>= 3;
460 if (esize == 4)
461 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
462 else
463 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
464 }
465 else
466 {
467 /* AARCH64_MOD_MSL: shift ones. */
468 amount >>= 4;
469 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
470 }
471 insert_field_2 (&field, code, amount, 0);
472
473 return true;
474 }
475
476 /* Insert fields for an 8-bit floating-point immediate. */
477 bool
478 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
479 aarch64_insn *code,
480 const aarch64_inst *inst ATTRIBUTE_UNUSED,
481 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
482 {
483 insert_all_fields (self, code, info->imm.value);
484 return true;
485 }
486
487 /* Insert 1-bit rotation immediate (#90 or #270). */
488 bool
489 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
490 const aarch64_opnd_info *info,
491 aarch64_insn *code, const aarch64_inst *inst,
492 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
493 {
494 uint64_t rot = (info->imm.value - 90) / 180;
495 assert (rot < 2U);
496 insert_field (self->fields[0], code, rot, inst->opcode->mask);
497 return true;
498 }
499
500 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
501 bool
502 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
503 const aarch64_opnd_info *info,
504 aarch64_insn *code, const aarch64_inst *inst,
505 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
506 {
507 uint64_t rot = info->imm.value / 90;
508 assert (rot < 4U);
509 insert_field (self->fields[0], code, rot, inst->opcode->mask);
510 return true;
511 }
512
513 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
514 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
515 bool
516 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
517 aarch64_insn *code,
518 const aarch64_inst *inst ATTRIBUTE_UNUSED,
519 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
520 {
521 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
522 return true;
523 }
524
525 /* Insert arithmetic immediate for e.g. the last operand in
526 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
527 bool
528 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
529 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
530 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
531 {
532 /* shift */
533 aarch64_insn value = info->shifter.amount ? 1 : 0;
534 insert_field (self->fields[0], code, value, 0);
535 /* imm12 (unsigned) */
536 insert_field (self->fields[1], code, info->imm.value, 0);
537 return true;
538 }
539
540 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
541 the operand should be inverted before encoding. */
542 static bool
543 aarch64_ins_limm_1 (const aarch64_operand *self,
544 const aarch64_opnd_info *info, aarch64_insn *code,
545 const aarch64_inst *inst, bool invert_p,
546 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
547 {
548 bool res;
549 aarch64_insn value;
550 uint64_t imm = info->imm.value;
551 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
552
553 if (invert_p)
554 imm = ~imm;
555 /* The constraint check should guarantee that this will work. */
556 res = aarch64_logical_immediate_p (imm, esize, &value);
557 if (res)
558 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
559 self->fields[0]);
560 return res;
561 }
562
563 /* Insert logical/bitmask immediate for e.g. the last operand in
564 ORR <Wd|WSP>, <Wn>, #<imm>. */
565 bool
566 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
567 aarch64_insn *code, const aarch64_inst *inst,
568 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
569 {
570 return aarch64_ins_limm_1 (self, info, code, inst,
571 inst->opcode->op == OP_BIC, errors);
572 }
573
574 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
575 bool
576 aarch64_ins_inv_limm (const aarch64_operand *self,
577 const aarch64_opnd_info *info, aarch64_insn *code,
578 const aarch64_inst *inst,
579 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
580 {
581 return aarch64_ins_limm_1 (self, info, code, inst, true, errors);
582 }
583
584 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
585 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
586 bool
587 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
588 aarch64_insn *code, const aarch64_inst *inst,
589 aarch64_operand_error *errors)
590 {
591 aarch64_insn value = 0;
592
593 assert (info->idx == 0);
594
595 /* Rt */
596 aarch64_ins_regno (self, info, code, inst, errors);
597 if (inst->opcode->iclass == ldstpair_indexed
598 || inst->opcode->iclass == ldstnapair_offs
599 || inst->opcode->iclass == ldstpair_off
600 || inst->opcode->iclass == loadlit)
601 {
602 /* size */
603 switch (info->qualifier)
604 {
605 case AARCH64_OPND_QLF_S_S: value = 0; break;
606 case AARCH64_OPND_QLF_S_D: value = 1; break;
607 case AARCH64_OPND_QLF_S_Q: value = 2; break;
608 default: return false;
609 }
610 insert_field (FLD_ldst_size, code, value, 0);
611 }
612 else
613 {
614 /* opc[1]:size */
615 value = aarch64_get_qualifier_standard_value (info->qualifier);
616 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
617 }
618
619 return true;
620 }
621
622 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
623 bool
624 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
625 const aarch64_opnd_info *info, aarch64_insn *code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED,
627 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
628 {
629 /* Rn */
630 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
631 return true;
632 }
633
634 /* Encode the address operand for e.g.
635 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
636 bool
637 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
638 const aarch64_opnd_info *info, aarch64_insn *code,
639 const aarch64_inst *inst ATTRIBUTE_UNUSED,
640 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
641 {
642 aarch64_insn S;
643 enum aarch64_modifier_kind kind = info->shifter.kind;
644
645 /* Rn */
646 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
647 /* Rm */
648 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
649 /* option */
650 if (kind == AARCH64_MOD_LSL)
651 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
652 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
653 /* S */
654 if (info->qualifier != AARCH64_OPND_QLF_S_B)
655 S = info->shifter.amount != 0;
656 else
657 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
658 S <amount>
659 0 [absent]
660 1 #0
661 Must be #0 if <extend> is explicitly LSL. */
662 S = info->shifter.operator_present && info->shifter.amount_present;
663 insert_field (FLD_S, code, S, 0);
664
665 return true;
666 }
667
668 /* Encode the address operand for e.g.
669 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
670 bool
671 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
672 const aarch64_opnd_info *info, aarch64_insn *code,
673 const aarch64_inst *inst ATTRIBUTE_UNUSED,
674 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
675 {
676 /* Rn */
677 insert_field (self->fields[0], code, info->addr.base_regno, 0);
678
679 /* simm9 */
680 int imm = info->addr.offset.imm;
681 insert_field (self->fields[1], code, imm, 0);
682
683 /* writeback */
684 if (info->addr.writeback)
685 {
686 assert (info->addr.preind == 1 && info->addr.postind == 0);
687 insert_field (self->fields[2], code, 1, 0);
688 }
689 return true;
690 }
691
692 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
693 bool
694 aarch64_ins_addr_simm (const aarch64_operand *self,
695 const aarch64_opnd_info *info,
696 aarch64_insn *code,
697 const aarch64_inst *inst ATTRIBUTE_UNUSED,
698 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
699 {
700 int imm;
701
702 /* Rn */
703 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
704 /* simm (imm9 or imm7) */
705 imm = info->addr.offset.imm;
706 if (self->fields[0] == FLD_imm7
707 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
708 /* scaled immediate in ld/st pair instructions.. */
709 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
710 insert_field (self->fields[0], code, imm, 0);
711 /* pre/post- index */
712 if (info->addr.writeback)
713 {
714 assert (inst->opcode->iclass != ldst_unscaled
715 && inst->opcode->iclass != ldstnapair_offs
716 && inst->opcode->iclass != ldstpair_off
717 && inst->opcode->iclass != ldst_unpriv);
718 assert (info->addr.preind != info->addr.postind);
719 if (info->addr.preind)
720 insert_field (self->fields[1], code, 1, 0);
721 }
722
723 return true;
724 }
725
726 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
727 bool
728 aarch64_ins_addr_simm10 (const aarch64_operand *self,
729 const aarch64_opnd_info *info,
730 aarch64_insn *code,
731 const aarch64_inst *inst ATTRIBUTE_UNUSED,
732 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
733 {
734 int imm;
735
736 /* Rn */
737 insert_field (self->fields[0], code, info->addr.base_regno, 0);
738 /* simm10 */
739 imm = info->addr.offset.imm >> 3;
740 insert_field (self->fields[1], code, imm >> 9, 0);
741 insert_field (self->fields[2], code, imm, 0);
742 /* writeback */
743 if (info->addr.writeback)
744 {
745 assert (info->addr.preind == 1 && info->addr.postind == 0);
746 insert_field (self->fields[3], code, 1, 0);
747 }
748 return true;
749 }
750
751 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
752 bool
753 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
754 const aarch64_opnd_info *info,
755 aarch64_insn *code,
756 const aarch64_inst *inst ATTRIBUTE_UNUSED,
757 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
758 {
759 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
760
761 /* Rn */
762 insert_field (self->fields[0], code, info->addr.base_regno, 0);
763 /* uimm12 */
764 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
765 return true;
766 }
767
768 /* Encode the address operand for e.g.
769 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
770 bool
771 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
772 const aarch64_opnd_info *info, aarch64_insn *code,
773 const aarch64_inst *inst ATTRIBUTE_UNUSED,
774 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
775 {
776 /* Rn */
777 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
778 /* Rm | #<amount> */
779 if (info->addr.offset.is_reg)
780 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
781 else
782 insert_field (FLD_Rm, code, 0x1f, 0);
783 return true;
784 }
785
786 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
787 bool
788 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
789 const aarch64_opnd_info *info, aarch64_insn *code,
790 const aarch64_inst *inst ATTRIBUTE_UNUSED,
791 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
792 {
793 /* cond */
794 insert_field (FLD_cond, code, info->cond->value, 0);
795 return true;
796 }
797
798 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
799 bool
800 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
801 const aarch64_opnd_info *info, aarch64_insn *code,
802 const aarch64_inst *inst,
803 aarch64_operand_error *detail ATTRIBUTE_UNUSED)
804 {
805 /* If a system instruction check if we have any restrictions on which
806 registers it can use. */
807 if (inst->opcode->iclass == ic_system)
808 {
809 uint64_t opcode_flags
810 = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
811 uint32_t sysreg_flags
812 = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
813
814 /* Check to see if it's read-only, else check if it's write only.
815 if it's both or unspecified don't care. */
816 if (opcode_flags == F_SYS_READ
817 && sysreg_flags
818 && sysreg_flags != F_REG_READ)
819 {
820 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
821 detail->error = _("specified register cannot be read from");
822 detail->index = info->idx;
823 detail->non_fatal = true;
824 }
825 else if (opcode_flags == F_SYS_WRITE
826 && sysreg_flags
827 && sysreg_flags != F_REG_WRITE)
828 {
829 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
830 detail->error = _("specified register cannot be written to");
831 detail->index = info->idx;
832 detail->non_fatal = true;
833 }
834 }
835 /* op0:op1:CRn:CRm:op2 */
836 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
837 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
838 return true;
839 }
840
841 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
842 bool
843 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
844 const aarch64_opnd_info *info, aarch64_insn *code,
845 const aarch64_inst *inst ATTRIBUTE_UNUSED,
846 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
847 {
848 /* op1:op2 */
849 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
850 FLD_op2, FLD_op1);
851
852 /* Extra CRm mask. */
853 if (info->sysreg.flags | F_REG_IN_CRM)
854 insert_field (FLD_CRm, code, PSTATE_DECODE_CRM (info->sysreg.flags), 0);
855 return true;
856 }
857
858 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
859 bool
860 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
861 const aarch64_opnd_info *info, aarch64_insn *code,
862 const aarch64_inst *inst ATTRIBUTE_UNUSED,
863 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
864 {
865 /* op1:CRn:CRm:op2 */
866 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
867 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
868 return true;
869 }
870
871 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
872
873 bool
874 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
875 const aarch64_opnd_info *info, aarch64_insn *code,
876 const aarch64_inst *inst ATTRIBUTE_UNUSED,
877 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
878 {
879 /* CRm */
880 insert_field (FLD_CRm, code, info->barrier->value, 0);
881 return true;
882 }
883
884 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */
885
886 bool
887 aarch64_ins_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
888 const aarch64_opnd_info *info, aarch64_insn *code,
889 const aarch64_inst *inst ATTRIBUTE_UNUSED,
890 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
891 {
892 /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
893 encoded in CRm<3:2>. */
894 aarch64_insn value = (info->barrier->value >> 2) - 4;
895 insert_field (FLD_CRm_dsb_nxs, code, value, 0);
896 return true;
897 }
898
899 /* Encode the prefetch operation option operand for e.g.
900 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
901
902 bool
903 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
904 const aarch64_opnd_info *info, aarch64_insn *code,
905 const aarch64_inst *inst ATTRIBUTE_UNUSED,
906 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
907 {
908 /* prfop in Rt */
909 insert_field (FLD_Rt, code, info->prfop->value, 0);
910 return true;
911 }
912
913 /* Encode the hint number for instructions that alias HINT but take an
914 operand. */
915
916 bool
917 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
918 const aarch64_opnd_info *info, aarch64_insn *code,
919 const aarch64_inst *inst ATTRIBUTE_UNUSED,
920 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
921 {
922 /* CRm:op2. */
923 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
924 return true;
925 }
926
927 /* Encode the extended register operand for e.g.
928 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
929 bool
930 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
931 const aarch64_opnd_info *info, aarch64_insn *code,
932 const aarch64_inst *inst ATTRIBUTE_UNUSED,
933 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
934 {
935 enum aarch64_modifier_kind kind;
936
937 /* Rm */
938 insert_field (FLD_Rm, code, info->reg.regno, 0);
939 /* option */
940 kind = info->shifter.kind;
941 if (kind == AARCH64_MOD_LSL)
942 kind = info->qualifier == AARCH64_OPND_QLF_W
943 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
944 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
945 /* imm3 */
946 insert_field (FLD_imm3, code, info->shifter.amount, 0);
947
948 return true;
949 }
950
951 /* Encode the shifted register operand for e.g.
952 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
953 bool
954 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
955 const aarch64_opnd_info *info, aarch64_insn *code,
956 const aarch64_inst *inst ATTRIBUTE_UNUSED,
957 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
958 {
959 /* Rm */
960 insert_field (FLD_Rm, code, info->reg.regno, 0);
961 /* shift */
962 insert_field (FLD_shift, code,
963 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
964 /* imm6 */
965 insert_field (FLD_imm6_10, code, info->shifter.amount, 0);
966
967 return true;
968 }
969
970 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
971 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
972 SELF's operand-dependent value. fields[0] specifies the field that
973 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
974 bool
975 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
976 const aarch64_opnd_info *info,
977 aarch64_insn *code,
978 const aarch64_inst *inst ATTRIBUTE_UNUSED,
979 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
980 {
981 int factor = 1 + get_operand_specific_data (self);
982 insert_field (self->fields[0], code, info->addr.base_regno, 0);
983 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
984 return true;
985 }
986
987 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
988 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
989 SELF's operand-dependent value. fields[0] specifies the field that
990 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
991 bool
992 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
993 const aarch64_opnd_info *info,
994 aarch64_insn *code,
995 const aarch64_inst *inst ATTRIBUTE_UNUSED,
996 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
997 {
998 int factor = 1 + get_operand_specific_data (self);
999 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1000 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1001 return true;
1002 }
1003
1004 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1005 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1006 SELF's operand-dependent value. fields[0] specifies the field that
1007 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1008 and imm3 fields, with imm3 being the less-significant part. */
1009 bool
1010 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
1011 const aarch64_opnd_info *info,
1012 aarch64_insn *code,
1013 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1014 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1015 {
1016 int factor = 1 + get_operand_specific_data (self);
1017 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1018 insert_fields (code, info->addr.offset.imm / factor, 0,
1019 2, FLD_imm3, FLD_SVE_imm6);
1020 return true;
1021 }
1022
1023 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1024 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1025 value. fields[0] specifies the base register field. */
1026 bool
1027 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
1028 const aarch64_opnd_info *info, aarch64_insn *code,
1029 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1030 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1031 {
1032 int factor = 1 << get_operand_specific_data (self);
1033 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1034 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1035 return true;
1036 }
1037
1038 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1039 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1040 value. fields[0] specifies the base register field. */
1041 bool
1042 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1043 const aarch64_opnd_info *info, aarch64_insn *code,
1044 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1045 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1046 {
1047 int factor = 1 << get_operand_specific_data (self);
1048 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1049 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1050 return true;
1051 }
1052
1053 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1054 is SELF's operand-dependent value. fields[0] specifies the base
1055 register field and fields[1] specifies the offset register field. */
1056 bool
1057 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1058 const aarch64_opnd_info *info, aarch64_insn *code,
1059 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1060 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1061 {
1062 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1063 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1064 return true;
1065 }
1066
1067 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1068 <shift> is SELF's operand-dependent value. fields[0] specifies the
1069 base register field, fields[1] specifies the offset register field and
1070 fields[2] is a single-bit field that selects SXTW over UXTW. */
1071 bool
1072 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1073 const aarch64_opnd_info *info, aarch64_insn *code,
1074 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1075 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1076 {
1077 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1078 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1079 if (info->shifter.kind == AARCH64_MOD_UXTW)
1080 insert_field (self->fields[2], code, 0, 0);
1081 else
1082 insert_field (self->fields[2], code, 1, 0);
1083 return true;
1084 }
1085
1086 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1087 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1088 fields[0] specifies the base register field. */
1089 bool
1090 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1091 const aarch64_opnd_info *info, aarch64_insn *code,
1092 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1093 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1094 {
1095 int factor = 1 << get_operand_specific_data (self);
1096 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1097 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
1098 return true;
1099 }
1100
1101 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1102 where <modifier> is fixed by the instruction and where <msz> is a
1103 2-bit unsigned number. fields[0] specifies the base register field
1104 and fields[1] specifies the offset register field. */
1105 static bool
1106 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
1107 const aarch64_opnd_info *info, aarch64_insn *code,
1108 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1109 {
1110 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1111 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1112 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1113 return true;
1114 }
1115
1116 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1117 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1118 field and fields[1] specifies the offset register field. */
1119 bool
1120 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1121 const aarch64_opnd_info *info, aarch64_insn *code,
1122 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1123 aarch64_operand_error *errors)
1124 {
1125 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1126 }
1127
1128 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1129 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1130 field and fields[1] specifies the offset register field. */
1131 bool
1132 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1133 const aarch64_opnd_info *info,
1134 aarch64_insn *code,
1135 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1136 aarch64_operand_error *errors)
1137 {
1138 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1139 }
1140
1141 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1142 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1143 field and fields[1] specifies the offset register field. */
1144 bool
1145 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1146 const aarch64_opnd_info *info,
1147 aarch64_insn *code,
1148 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1149 aarch64_operand_error *errors)
1150 {
1151 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1152 }
1153
1154 /* Encode an SVE ADD/SUB immediate. */
1155 bool
1156 aarch64_ins_sve_aimm (const aarch64_operand *self,
1157 const aarch64_opnd_info *info, aarch64_insn *code,
1158 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1159 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1160 {
1161 if (info->shifter.amount == 8)
1162 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1163 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1164 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1165 else
1166 insert_all_fields (self, code, info->imm.value & 0xff);
1167 return true;
1168 }
1169
1170 /* Encode an SVE CPY/DUP immediate. */
1171 bool
1172 aarch64_ins_sve_asimm (const aarch64_operand *self,
1173 const aarch64_opnd_info *info, aarch64_insn *code,
1174 const aarch64_inst *inst,
1175 aarch64_operand_error *errors)
1176 {
1177 return aarch64_ins_sve_aimm (self, info, code, inst, errors);
1178 }
1179
1180 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1181 array specifies which field to use for Zn. MM is encoded in the
1182 concatenation of imm5 and SVE_tszh, with imm5 being the less
1183 significant part. */
1184 bool
1185 aarch64_ins_sve_index (const aarch64_operand *self,
1186 const aarch64_opnd_info *info, aarch64_insn *code,
1187 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1188 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1189 {
1190 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1191 insert_field (self->fields[0], code, info->reglane.regno, 0);
1192 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1193 2, FLD_imm5, FLD_SVE_tszh);
1194 return true;
1195 }
1196
1197 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1198 bool
1199 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1200 const aarch64_opnd_info *info, aarch64_insn *code,
1201 const aarch64_inst *inst,
1202 aarch64_operand_error *errors)
1203 {
1204 return aarch64_ins_limm (self, info, code, inst, errors);
1205 }
1206
1207 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1208 and where MM occupies the most-significant part. The operand-dependent
1209 value specifies the number of bits in Zn. */
1210 bool
1211 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1212 const aarch64_opnd_info *info, aarch64_insn *code,
1213 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1214 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1215 {
1216 unsigned int reg_bits = get_operand_specific_data (self);
1217 assert (info->reglane.regno < (1U << reg_bits));
1218 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1219 insert_all_fields (self, code, val);
1220 return true;
1221 }
1222
1223 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1224 to use for Zn. */
1225 bool
1226 aarch64_ins_sve_reglist (const aarch64_operand *self,
1227 const aarch64_opnd_info *info, aarch64_insn *code,
1228 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1229 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1230 {
1231 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1232 return true;
1233 }
1234
1235 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1236 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1237 field. */
1238 bool
1239 aarch64_ins_sve_scale (const aarch64_operand *self,
1240 const aarch64_opnd_info *info, aarch64_insn *code,
1241 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1242 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1243 {
1244 insert_all_fields (self, code, info->imm.value);
1245 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1246 return true;
1247 }
1248
1249 /* Encode an SVE shift left immediate. */
1250 bool
1251 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1252 const aarch64_opnd_info *info, aarch64_insn *code,
1253 const aarch64_inst *inst,
1254 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1255 {
1256 const aarch64_opnd_info *prev_operand;
1257 unsigned int esize;
1258
1259 assert (info->idx > 0);
1260 prev_operand = &inst->operands[info->idx - 1];
1261 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1262 insert_all_fields (self, code, 8 * esize + info->imm.value);
1263 return true;
1264 }
1265
1266 /* Encode an SVE shift right immediate. */
1267 bool
1268 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1269 const aarch64_opnd_info *info, aarch64_insn *code,
1270 const aarch64_inst *inst,
1271 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1272 {
1273 const aarch64_opnd_info *prev_operand;
1274 unsigned int esize;
1275
1276 unsigned int opnd_backshift = get_operand_specific_data (self);
1277 assert (info->idx >= (int)opnd_backshift);
1278 prev_operand = &inst->operands[info->idx - opnd_backshift];
1279 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1280 insert_all_fields (self, code, 16 * esize - info->imm.value);
1281 return true;
1282 }
1283
1284 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1285 The fields array specifies which field to use. */
1286 bool
1287 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1288 const aarch64_opnd_info *info,
1289 aarch64_insn *code,
1290 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1291 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1292 {
1293 if (info->imm.value == 0x3f000000)
1294 insert_field (self->fields[0], code, 0, 0);
1295 else
1296 insert_field (self->fields[0], code, 1, 0);
1297 return true;
1298 }
1299
1300 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1301 The fields array specifies which field to use. */
1302 bool
1303 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1304 const aarch64_opnd_info *info,
1305 aarch64_insn *code,
1306 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1307 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1308 {
1309 if (info->imm.value == 0x3f000000)
1310 insert_field (self->fields[0], code, 0, 0);
1311 else
1312 insert_field (self->fields[0], code, 1, 0);
1313 return true;
1314 }
1315
1316 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1317 The fields array specifies which field to use. */
1318 bool
1319 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1320 const aarch64_opnd_info *info,
1321 aarch64_insn *code,
1322 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1323 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1324 {
1325 if (info->imm.value == 0)
1326 insert_field (self->fields[0], code, 0, 0);
1327 else
1328 insert_field (self->fields[0], code, 1, 0);
1329 return true;
1330 }
1331
1332 /* Encode in SME instruction such as MOVA ZA tile vector register number,
1333 vector indicator, vector selector and immediate. */
1334 bool
1335 aarch64_ins_sme_za_hv_tiles (const aarch64_operand *self,
1336 const aarch64_opnd_info *info,
1337 aarch64_insn *code,
1338 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1339 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1340 {
1341 int fld_size;
1342 int fld_q;
1343 int fld_v = info->indexed_za.v;
1344 int fld_rv = info->indexed_za.index.regno - 12;
1345 int fld_zan_imm = info->indexed_za.index.imm;
1346 int regno = info->indexed_za.regno;
1347
1348 switch (info->qualifier)
1349 {
1350 case AARCH64_OPND_QLF_S_B:
1351 fld_size = 0;
1352 fld_q = 0;
1353 break;
1354 case AARCH64_OPND_QLF_S_H:
1355 fld_size = 1;
1356 fld_q = 0;
1357 fld_zan_imm |= regno << 3;
1358 break;
1359 case AARCH64_OPND_QLF_S_S:
1360 fld_size = 2;
1361 fld_q = 0;
1362 fld_zan_imm |= regno << 2;
1363 break;
1364 case AARCH64_OPND_QLF_S_D:
1365 fld_size = 3;
1366 fld_q = 0;
1367 fld_zan_imm |= regno << 1;
1368 break;
1369 case AARCH64_OPND_QLF_S_Q:
1370 fld_size = 3;
1371 fld_q = 1;
1372 fld_zan_imm = regno;
1373 break;
1374 default:
1375 return false;
1376 }
1377
1378 insert_field (self->fields[0], code, fld_size, 0);
1379 insert_field (self->fields[1], code, fld_q, 0);
1380 insert_field (self->fields[2], code, fld_v, 0);
1381 insert_field (self->fields[3], code, fld_rv, 0);
1382 insert_field (self->fields[4], code, fld_zan_imm, 0);
1383
1384 return true;
1385 }
1386
1387 /* Encode in SME instruction ZERO list of up to eight 64-bit element tile names
1388 separated by commas, encoded in the "imm8" field.
1389
1390 For programmer convenience an assembler must also accept the names of
1391 32-bit, 16-bit and 8-bit element tiles which are converted into the
1392 corresponding set of 64-bit element tiles.
1393 */
1394 bool
1395 aarch64_ins_sme_za_list (const aarch64_operand *self,
1396 const aarch64_opnd_info *info,
1397 aarch64_insn *code,
1398 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1399 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1400 {
1401 int fld_mask = info->imm.value;
1402 insert_field (self->fields[0], code, fld_mask, 0);
1403 return true;
1404 }
1405
1406 bool
1407 aarch64_ins_sme_za_array (const aarch64_operand *self,
1408 const aarch64_opnd_info *info,
1409 aarch64_insn *code,
1410 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1411 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1412 {
1413 int regno = info->indexed_za.index.regno - 12;
1414 int imm = info->indexed_za.index.imm;
1415 insert_field (self->fields[0], code, regno, 0);
1416 insert_field (self->fields[1], code, imm, 0);
1417 return true;
1418 }
1419
1420 bool
1421 aarch64_ins_sme_addr_ri_u4xvl (const aarch64_operand *self,
1422 const aarch64_opnd_info *info,
1423 aarch64_insn *code,
1424 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1425 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1426 {
1427 int regno = info->addr.base_regno;
1428 int imm = info->addr.offset.imm;
1429 insert_field (self->fields[0], code, regno, 0);
1430 insert_field (self->fields[1], code, imm, 0);
1431 return true;
1432 }
1433
1434 /* Encode in SMSTART and SMSTOP {SM | ZA } mode. */
1435 bool
1436 aarch64_ins_sme_sm_za (const aarch64_operand *self,
1437 const aarch64_opnd_info *info,
1438 aarch64_insn *code,
1439 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1440 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1441 {
1442 aarch64_insn fld_crm;
1443 /* Set CRm[3:1] bits. */
1444 if (info->reg.regno == 's')
1445 fld_crm = 0x02 ; /* SVCRSM. */
1446 else if (info->reg.regno == 'z')
1447 fld_crm = 0x04; /* SVCRZA. */
1448 else
1449 return false;
1450
1451 insert_field (self->fields[0], code, fld_crm, 0);
1452 return true;
1453 }
1454
1455 /* Encode source scalable predicate register (Pn), name of the index base
1456 register W12-W15 (Rm), and optional element index, defaulting to 0, in the
1457 range 0 to one less than the number of vector elements in a 128-bit vector
1458 register, encoded in "i1:tszh:tszl".
1459 */
1460 bool
1461 aarch64_ins_sme_pred_reg_with_index (const aarch64_operand *self,
1462 const aarch64_opnd_info *info,
1463 aarch64_insn *code,
1464 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1465 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1466 {
1467 int fld_pn = info->indexed_za.regno;
1468 int fld_rm = info->indexed_za.index.regno - 12;
1469 int imm = info->indexed_za.index.imm;
1470 int fld_i1, fld_tszh, fld_tshl;
1471
1472 insert_field (self->fields[0], code, fld_rm, 0);
1473 insert_field (self->fields[1], code, fld_pn, 0);
1474
1475 /* Optional element index, defaulting to 0, in the range 0 to one less than
1476 the number of vector elements in a 128-bit vector register, encoded in
1477 "i1:tszh:tszl".
1478
1479 i1 tszh tszl <T>
1480 0 0 000 RESERVED
1481 x x xx1 B
1482 x x x10 H
1483 x x 100 S
1484 x 1 000 D
1485 */
1486 switch (info->qualifier)
1487 {
1488 case AARCH64_OPND_QLF_S_B:
1489 /* <imm> is 4 bit value. */
1490 fld_i1 = (imm >> 3) & 0x1;
1491 fld_tszh = (imm >> 2) & 0x1;
1492 fld_tshl = ((imm << 1) | 0x1) & 0x7;
1493 break;
1494 case AARCH64_OPND_QLF_S_H:
1495 /* <imm> is 3 bit value. */
1496 fld_i1 = (imm >> 2) & 0x1;
1497 fld_tszh = (imm >> 1) & 0x1;
1498 fld_tshl = ((imm << 2) | 0x2) & 0x7;
1499 break;
1500 case AARCH64_OPND_QLF_S_S:
1501 /* <imm> is 2 bit value. */
1502 fld_i1 = (imm >> 1) & 0x1;
1503 fld_tszh = imm & 0x1;
1504 fld_tshl = 0x4;
1505 break;
1506 case AARCH64_OPND_QLF_S_D:
1507 /* <imm> is 1 bit value. */
1508 fld_i1 = imm & 0x1;
1509 fld_tszh = 0x1;
1510 fld_tshl = 0x0;
1511 break;
1512 default:
1513 return false;
1514 }
1515
1516 insert_field (self->fields[2], code, fld_i1, 0);
1517 insert_field (self->fields[3], code, fld_tszh, 0);
1518 insert_field (self->fields[4], code, fld_tshl, 0);
1519 return true;
1520 }
1521
1522 /* Insert X0-X30. Register 31 is unallocated. */
1523 bool
1524 aarch64_ins_x0_to_x30 (const aarch64_operand *self,
1525 const aarch64_opnd_info *info,
1526 aarch64_insn *code,
1527 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1528 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1529 {
1530 assert (info->reg.regno <= 30);
1531 insert_field (self->fields[0], code, info->reg.regno, 0);
1532 return true;
1533 }
1534
1535 /* Miscellaneous encoding functions. */
1536
1537 /* Encode size[0], i.e. bit 22, for
1538 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1539
1540 static void
1541 encode_asimd_fcvt (aarch64_inst *inst)
1542 {
1543 aarch64_insn value;
1544 aarch64_field field = {0, 0};
1545 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_NIL;
1546
1547 switch (inst->opcode->op)
1548 {
1549 case OP_FCVTN:
1550 case OP_FCVTN2:
1551 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1552 qualifier = inst->operands[1].qualifier;
1553 break;
1554 case OP_FCVTL:
1555 case OP_FCVTL2:
1556 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1557 qualifier = inst->operands[0].qualifier;
1558 break;
1559 default:
1560 return;
1561 }
1562 assert (qualifier == AARCH64_OPND_QLF_V_4S
1563 || qualifier == AARCH64_OPND_QLF_V_2D);
1564 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1565 gen_sub_field (FLD_size, 0, 1, &field);
1566 insert_field_2 (&field, &inst->value, value, 0);
1567 }
1568
1569 /* Encode size[0], i.e. bit 22, for
1570 e.g. FCVTXN <Vb><d>, <Va><n>. */
1571
1572 static void
1573 encode_asisd_fcvtxn (aarch64_inst *inst)
1574 {
1575 aarch64_insn val = 1;
1576 aarch64_field field = {0, 0};
1577 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1578 gen_sub_field (FLD_size, 0, 1, &field);
1579 insert_field_2 (&field, &inst->value, val, 0);
1580 }
1581
1582 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1583 static void
1584 encode_fcvt (aarch64_inst *inst)
1585 {
1586 aarch64_insn val;
1587 const aarch64_field field = {15, 2};
1588
1589 /* opc dstsize */
1590 switch (inst->operands[0].qualifier)
1591 {
1592 case AARCH64_OPND_QLF_S_S: val = 0; break;
1593 case AARCH64_OPND_QLF_S_D: val = 1; break;
1594 case AARCH64_OPND_QLF_S_H: val = 3; break;
1595 default: abort ();
1596 }
1597 insert_field_2 (&field, &inst->value, val, 0);
1598
1599 return;
1600 }
1601
1602 /* Return the index in qualifiers_list that INST is using. Should only
1603 be called once the qualifiers are known to be valid. */
1604
1605 static int
1606 aarch64_get_variant (struct aarch64_inst *inst)
1607 {
1608 int i, nops, variant;
1609
1610 nops = aarch64_num_of_operands (inst->opcode);
1611 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1612 {
1613 for (i = 0; i < nops; ++i)
1614 if (inst->opcode->qualifiers_list[variant][i]
1615 != inst->operands[i].qualifier)
1616 break;
1617 if (i == nops)
1618 return variant;
1619 }
1620 abort ();
1621 }
1622
1623 /* Do miscellaneous encodings that are not common enough to be driven by
1624 flags. */
1625
1626 static void
1627 do_misc_encoding (aarch64_inst *inst)
1628 {
1629 unsigned int value;
1630
1631 switch (inst->opcode->op)
1632 {
1633 case OP_FCVT:
1634 encode_fcvt (inst);
1635 break;
1636 case OP_FCVTN:
1637 case OP_FCVTN2:
1638 case OP_FCVTL:
1639 case OP_FCVTL2:
1640 encode_asimd_fcvt (inst);
1641 break;
1642 case OP_FCVTXN_S:
1643 encode_asisd_fcvtxn (inst);
1644 break;
1645 case OP_MOV_P_P:
1646 case OP_MOVS_P_P:
1647 /* Copy Pn to Pm and Pg. */
1648 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1649 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1650 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1651 break;
1652 case OP_MOV_Z_P_Z:
1653 /* Copy Zd to Zm. */
1654 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1655 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1656 break;
1657 case OP_MOV_Z_V:
1658 /* Fill in the zero immediate. */
1659 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1660 2, FLD_imm5, FLD_SVE_tszh);
1661 break;
1662 case OP_MOV_Z_Z:
1663 /* Copy Zn to Zm. */
1664 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1665 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1666 break;
1667 case OP_MOV_Z_Zi:
1668 break;
1669 case OP_MOVM_P_P_P:
1670 /* Copy Pd to Pm. */
1671 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1672 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1673 break;
1674 case OP_MOVZS_P_P_P:
1675 case OP_MOVZ_P_P_P:
1676 /* Copy Pn to Pm. */
1677 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1678 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1679 break;
1680 case OP_NOTS_P_P_P_Z:
1681 case OP_NOT_P_P_P_Z:
1682 /* Copy Pg to Pm. */
1683 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1684 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1685 break;
1686 default: break;
1687 }
1688 }
1689
1690 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1691 static void
1692 encode_sizeq (aarch64_inst *inst)
1693 {
1694 aarch64_insn sizeq;
1695 enum aarch64_field_kind kind;
1696 int idx;
1697
1698 /* Get the index of the operand whose information we are going to use
1699 to encode the size and Q fields.
1700 This is deduced from the possible valid qualifier lists. */
1701 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1702 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1703 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1704 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1705 /* Q */
1706 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1707 /* size */
1708 if (inst->opcode->iclass == asisdlse
1709 || inst->opcode->iclass == asisdlsep
1710 || inst->opcode->iclass == asisdlso
1711 || inst->opcode->iclass == asisdlsop)
1712 kind = FLD_vldst_size;
1713 else
1714 kind = FLD_size;
1715 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1716 }
1717
1718 /* Opcodes that have fields shared by multiple operands are usually flagged
1719 with flags. In this function, we detect such flags and use the
1720 information in one of the related operands to do the encoding. The 'one'
1721 operand is not any operand but one of the operands that has the enough
1722 information for such an encoding. */
1723
1724 static void
1725 do_special_encoding (struct aarch64_inst *inst)
1726 {
1727 int idx;
1728 aarch64_insn value = 0;
1729
1730 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1731
1732 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1733 if (inst->opcode->flags & F_COND)
1734 {
1735 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1736 }
1737 if (inst->opcode->flags & F_SF)
1738 {
1739 idx = select_operand_for_sf_field_coding (inst->opcode);
1740 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1741 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1742 ? 1 : 0;
1743 insert_field (FLD_sf, &inst->value, value, 0);
1744 if (inst->opcode->flags & F_N)
1745 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1746 }
1747 if (inst->opcode->flags & F_LSE_SZ)
1748 {
1749 idx = select_operand_for_sf_field_coding (inst->opcode);
1750 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1751 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1752 ? 1 : 0;
1753 insert_field (FLD_lse_sz, &inst->value, value, 0);
1754 }
1755 if (inst->opcode->flags & F_SIZEQ)
1756 encode_sizeq (inst);
1757 if (inst->opcode->flags & F_FPTYPE)
1758 {
1759 idx = select_operand_for_fptype_field_coding (inst->opcode);
1760 switch (inst->operands[idx].qualifier)
1761 {
1762 case AARCH64_OPND_QLF_S_S: value = 0; break;
1763 case AARCH64_OPND_QLF_S_D: value = 1; break;
1764 case AARCH64_OPND_QLF_S_H: value = 3; break;
1765 default: return;
1766 }
1767 insert_field (FLD_type, &inst->value, value, 0);
1768 }
1769 if (inst->opcode->flags & F_SSIZE)
1770 {
1771 enum aarch64_opnd_qualifier qualifier;
1772 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1773 qualifier = inst->operands[idx].qualifier;
1774 assert (qualifier >= AARCH64_OPND_QLF_S_B
1775 && qualifier <= AARCH64_OPND_QLF_S_Q);
1776 value = aarch64_get_qualifier_standard_value (qualifier);
1777 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1778 }
1779 if (inst->opcode->flags & F_T)
1780 {
1781 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1782 aarch64_field field = {0, 0};
1783 enum aarch64_opnd_qualifier qualifier;
1784
1785 idx = 0;
1786 qualifier = inst->operands[idx].qualifier;
1787 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1788 == AARCH64_OPND_CLASS_SIMD_REG
1789 && qualifier >= AARCH64_OPND_QLF_V_8B
1790 && qualifier <= AARCH64_OPND_QLF_V_2D);
1791 /* imm5<3:0> q <t>
1792 0000 x reserved
1793 xxx1 0 8b
1794 xxx1 1 16b
1795 xx10 0 4h
1796 xx10 1 8h
1797 x100 0 2s
1798 x100 1 4s
1799 1000 0 reserved
1800 1000 1 2d */
1801 value = aarch64_get_qualifier_standard_value (qualifier);
1802 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1803 num = (int) value >> 1;
1804 assert (num >= 0 && num <= 3);
1805 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1806 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1807 }
1808 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1809 {
1810 /* Use Rt to encode in the case of e.g.
1811 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1812 enum aarch64_opnd_qualifier qualifier;
1813 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1814 if (idx == -1)
1815 /* Otherwise use the result operand, which has to be a integer
1816 register. */
1817 idx = 0;
1818 assert (idx == 0 || idx == 1);
1819 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1820 == AARCH64_OPND_CLASS_INT_REG);
1821 qualifier = inst->operands[idx].qualifier;
1822 insert_field (FLD_Q, &inst->value,
1823 aarch64_get_qualifier_standard_value (qualifier), 0);
1824 }
1825 if (inst->opcode->flags & F_LDS_SIZE)
1826 {
1827 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1828 enum aarch64_opnd_qualifier qualifier;
1829 aarch64_field field = {0, 0};
1830 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1831 == AARCH64_OPND_CLASS_INT_REG);
1832 gen_sub_field (FLD_opc, 0, 1, &field);
1833 qualifier = inst->operands[0].qualifier;
1834 insert_field_2 (&field, &inst->value,
1835 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1836 }
1837 /* Miscellaneous encoding as the last step. */
1838 if (inst->opcode->flags & F_MISC)
1839 do_misc_encoding (inst);
1840
1841 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1842 }
1843
1844 /* Some instructions (including all SVE ones) use the instruction class
1845 to describe how a qualifiers_list index is represented in the instruction
1846 encoding. If INST is such an instruction, encode the chosen qualifier
1847 variant. */
1848
1849 static void
1850 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1851 {
1852 int variant = 0;
1853 switch (inst->opcode->iclass)
1854 {
1855 case sme_mov:
1856 case sme_psel:
1857 /* The variant is encoded as part of the immediate. */
1858 break;
1859
1860 case sve_cpy:
1861 insert_fields (&inst->value, aarch64_get_variant (inst),
1862 0, 2, FLD_SVE_M_14, FLD_size);
1863 break;
1864
1865 case sve_index:
1866 case sve_shift_pred:
1867 case sve_shift_unpred:
1868 case sve_shift_tsz_hsd:
1869 case sve_shift_tsz_bhsd:
1870 /* For indices and shift amounts, the variant is encoded as
1871 part of the immediate. */
1872 break;
1873
1874 case sve_limm:
1875 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1876 and depend on the immediate. They don't have a separate
1877 encoding. */
1878 break;
1879
1880 case sme_misc:
1881 case sve_misc:
1882 /* These instructions have only a single variant. */
1883 break;
1884
1885 case sve_movprfx:
1886 insert_fields (&inst->value, aarch64_get_variant (inst),
1887 0, 2, FLD_SVE_M_16, FLD_size);
1888 break;
1889
1890 case sve_pred_zm:
1891 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1892 break;
1893
1894 case sve_size_bhs:
1895 case sve_size_bhsd:
1896 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1897 break;
1898
1899 case sve_size_hsd:
1900 /* MOD 3 For `OP_SVE_Vv_HSD`. */
1901 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) % 3 + 1, 0);
1902 break;
1903
1904 case sve_size_bh:
1905 case sve_size_sd:
1906 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1907 break;
1908
1909 case sve_size_sd2:
1910 insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0);
1911 break;
1912
1913 case sve_size_hsd2:
1914 insert_field (FLD_SVE_size, &inst->value,
1915 aarch64_get_variant (inst) + 1, 0);
1916 break;
1917
1918 case sve_size_tsz_bhs:
1919 insert_fields (&inst->value,
1920 (1 << aarch64_get_variant (inst)),
1921 0, 2, FLD_SVE_tszl_19, FLD_SVE_sz);
1922 break;
1923
1924 case sve_size_13:
1925 variant = aarch64_get_variant (inst) + 1;
1926 if (variant == 2)
1927 variant = 3;
1928 insert_field (FLD_size, &inst->value, variant, 0);
1929 break;
1930
1931 default:
1932 break;
1933 }
1934 }
1935
1936 /* Converters converting an alias opcode instruction to its real form. */
1937
1938 /* ROR <Wd>, <Ws>, #<shift>
1939 is equivalent to:
1940 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1941 static void
1942 convert_ror_to_extr (aarch64_inst *inst)
1943 {
1944 copy_operand_info (inst, 3, 2);
1945 copy_operand_info (inst, 2, 1);
1946 }
1947
1948 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1949 is equivalent to:
1950 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1951 static void
1952 convert_xtl_to_shll (aarch64_inst *inst)
1953 {
1954 inst->operands[2].qualifier = inst->operands[1].qualifier;
1955 inst->operands[2].imm.value = 0;
1956 }
1957
1958 /* Convert
1959 LSR <Xd>, <Xn>, #<shift>
1960 to
1961 UBFM <Xd>, <Xn>, #<shift>, #63. */
1962 static void
1963 convert_sr_to_bfm (aarch64_inst *inst)
1964 {
1965 inst->operands[3].imm.value =
1966 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1967 }
1968
1969 /* Convert MOV to ORR. */
1970 static void
1971 convert_mov_to_orr (aarch64_inst *inst)
1972 {
1973 /* MOV <Vd>.<T>, <Vn>.<T>
1974 is equivalent to:
1975 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1976 copy_operand_info (inst, 2, 1);
1977 }
1978
1979 /* When <imms> >= <immr>, the instruction written:
1980 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1981 is equivalent to:
1982 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1983
1984 static void
1985 convert_bfx_to_bfm (aarch64_inst *inst)
1986 {
1987 int64_t lsb, width;
1988
1989 /* Convert the operand. */
1990 lsb = inst->operands[2].imm.value;
1991 width = inst->operands[3].imm.value;
1992 inst->operands[2].imm.value = lsb;
1993 inst->operands[3].imm.value = lsb + width - 1;
1994 }
1995
1996 /* When <imms> < <immr>, the instruction written:
1997 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1998 is equivalent to:
1999 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2000
2001 static void
2002 convert_bfi_to_bfm (aarch64_inst *inst)
2003 {
2004 int64_t lsb, width;
2005
2006 /* Convert the operand. */
2007 lsb = inst->operands[2].imm.value;
2008 width = inst->operands[3].imm.value;
2009 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2010 {
2011 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
2012 inst->operands[3].imm.value = width - 1;
2013 }
2014 else
2015 {
2016 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
2017 inst->operands[3].imm.value = width - 1;
2018 }
2019 }
2020
2021 /* The instruction written:
2022 BFC <Xd>, #<lsb>, #<width>
2023 is equivalent to:
2024 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2025
2026 static void
2027 convert_bfc_to_bfm (aarch64_inst *inst)
2028 {
2029 int64_t lsb, width;
2030
2031 /* Insert XZR. */
2032 copy_operand_info (inst, 3, 2);
2033 copy_operand_info (inst, 2, 1);
2034 copy_operand_info (inst, 1, 0);
2035 inst->operands[1].reg.regno = 0x1f;
2036
2037 /* Convert the immediate operand. */
2038 lsb = inst->operands[2].imm.value;
2039 width = inst->operands[3].imm.value;
2040 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2041 {
2042 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
2043 inst->operands[3].imm.value = width - 1;
2044 }
2045 else
2046 {
2047 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
2048 inst->operands[3].imm.value = width - 1;
2049 }
2050 }
2051
2052 /* The instruction written:
2053 LSL <Xd>, <Xn>, #<shift>
2054 is equivalent to:
2055 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2056
2057 static void
2058 convert_lsl_to_ubfm (aarch64_inst *inst)
2059 {
2060 int64_t shift = inst->operands[2].imm.value;
2061
2062 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2063 {
2064 inst->operands[2].imm.value = (32 - shift) & 0x1f;
2065 inst->operands[3].imm.value = 31 - shift;
2066 }
2067 else
2068 {
2069 inst->operands[2].imm.value = (64 - shift) & 0x3f;
2070 inst->operands[3].imm.value = 63 - shift;
2071 }
2072 }
2073
2074 /* CINC <Wd>, <Wn>, <cond>
2075 is equivalent to:
2076 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
2077
2078 static void
2079 convert_to_csel (aarch64_inst *inst)
2080 {
2081 copy_operand_info (inst, 3, 2);
2082 copy_operand_info (inst, 2, 1);
2083 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
2084 }
2085
2086 /* CSET <Wd>, <cond>
2087 is equivalent to:
2088 CSINC <Wd>, WZR, WZR, invert(<cond>). */
2089
2090 static void
2091 convert_cset_to_csinc (aarch64_inst *inst)
2092 {
2093 copy_operand_info (inst, 3, 1);
2094 copy_operand_info (inst, 2, 0);
2095 copy_operand_info (inst, 1, 0);
2096 inst->operands[1].reg.regno = 0x1f;
2097 inst->operands[2].reg.regno = 0x1f;
2098 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
2099 }
2100
2101 /* MOV <Wd>, #<imm>
2102 is equivalent to:
2103 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
2104
2105 static void
2106 convert_mov_to_movewide (aarch64_inst *inst)
2107 {
2108 int is32;
2109 uint32_t shift_amount;
2110 uint64_t value = ~(uint64_t)0;
2111
2112 switch (inst->opcode->op)
2113 {
2114 case OP_MOV_IMM_WIDE:
2115 value = inst->operands[1].imm.value;
2116 break;
2117 case OP_MOV_IMM_WIDEN:
2118 value = ~inst->operands[1].imm.value;
2119 break;
2120 default:
2121 return;
2122 }
2123 inst->operands[1].type = AARCH64_OPND_HALF;
2124 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2125 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
2126 /* The constraint check should have guaranteed this wouldn't happen. */
2127 return;
2128 value >>= shift_amount;
2129 value &= 0xffff;
2130 inst->operands[1].imm.value = value;
2131 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
2132 inst->operands[1].shifter.amount = shift_amount;
2133 }
2134
2135 /* MOV <Wd>, #<imm>
2136 is equivalent to:
2137 ORR <Wd>, WZR, #<imm>. */
2138
2139 static void
2140 convert_mov_to_movebitmask (aarch64_inst *inst)
2141 {
2142 copy_operand_info (inst, 2, 1);
2143 inst->operands[1].reg.regno = 0x1f;
2144 inst->operands[1].skip = 0;
2145 }
2146
2147 /* Some alias opcodes are assembled by being converted to their real-form. */
2148
2149 static void
2150 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
2151 {
2152 const aarch64_opcode *alias = inst->opcode;
2153
2154 if ((alias->flags & F_CONV) == 0)
2155 goto convert_to_real_return;
2156
2157 switch (alias->op)
2158 {
2159 case OP_ASR_IMM:
2160 case OP_LSR_IMM:
2161 convert_sr_to_bfm (inst);
2162 break;
2163 case OP_LSL_IMM:
2164 convert_lsl_to_ubfm (inst);
2165 break;
2166 case OP_CINC:
2167 case OP_CINV:
2168 case OP_CNEG:
2169 convert_to_csel (inst);
2170 break;
2171 case OP_CSET:
2172 case OP_CSETM:
2173 convert_cset_to_csinc (inst);
2174 break;
2175 case OP_UBFX:
2176 case OP_BFXIL:
2177 case OP_SBFX:
2178 convert_bfx_to_bfm (inst);
2179 break;
2180 case OP_SBFIZ:
2181 case OP_BFI:
2182 case OP_UBFIZ:
2183 convert_bfi_to_bfm (inst);
2184 break;
2185 case OP_BFC:
2186 convert_bfc_to_bfm (inst);
2187 break;
2188 case OP_MOV_V:
2189 convert_mov_to_orr (inst);
2190 break;
2191 case OP_MOV_IMM_WIDE:
2192 case OP_MOV_IMM_WIDEN:
2193 convert_mov_to_movewide (inst);
2194 break;
2195 case OP_MOV_IMM_LOG:
2196 convert_mov_to_movebitmask (inst);
2197 break;
2198 case OP_ROR_IMM:
2199 convert_ror_to_extr (inst);
2200 break;
2201 case OP_SXTL:
2202 case OP_SXTL2:
2203 case OP_UXTL:
2204 case OP_UXTL2:
2205 convert_xtl_to_shll (inst);
2206 break;
2207 default:
2208 break;
2209 }
2210
2211 convert_to_real_return:
2212 aarch64_replace_opcode (inst, real);
2213 }
2214
2215 /* Encode *INST_ORI of the opcode code OPCODE.
2216 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
2217 matched operand qualifier sequence in *QLF_SEQ. */
2218
2219 bool
2220 aarch64_opcode_encode (const aarch64_opcode *opcode,
2221 const aarch64_inst *inst_ori, aarch64_insn *code,
2222 aarch64_opnd_qualifier_t *qlf_seq,
2223 aarch64_operand_error *mismatch_detail,
2224 aarch64_instr_sequence* insn_sequence)
2225 {
2226 int i;
2227 const aarch64_opcode *aliased;
2228 aarch64_inst copy, *inst;
2229
2230 DEBUG_TRACE ("enter with %s", opcode->name);
2231
2232 /* Create a copy of *INST_ORI, so that we can do any change we want. */
2233 copy = *inst_ori;
2234 inst = &copy;
2235
2236 assert (inst->opcode == NULL || inst->opcode == opcode);
2237 if (inst->opcode == NULL)
2238 inst->opcode = opcode;
2239
2240 /* Constrain the operands.
2241 After passing this, the encoding is guaranteed to succeed. */
2242 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
2243 {
2244 DEBUG_TRACE ("FAIL since operand constraint not met");
2245 return 0;
2246 }
2247
2248 /* Get the base value.
2249 Note: this has to be before the aliasing handling below in order to
2250 get the base value from the alias opcode before we move on to the
2251 aliased opcode for encoding. */
2252 inst->value = opcode->opcode;
2253
2254 /* No need to do anything else if the opcode does not have any operand. */
2255 if (aarch64_num_of_operands (opcode) == 0)
2256 goto encoding_exit;
2257
2258 /* Assign operand indexes and check types. Also put the matched
2259 operand qualifiers in *QLF_SEQ to return. */
2260 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2261 {
2262 assert (opcode->operands[i] == inst->operands[i].type);
2263 inst->operands[i].idx = i;
2264 if (qlf_seq != NULL)
2265 *qlf_seq = inst->operands[i].qualifier;
2266 }
2267
2268 aliased = aarch64_find_real_opcode (opcode);
2269 /* If the opcode is an alias and it does not ask for direct encoding by
2270 itself, the instruction will be transformed to the form of real opcode
2271 and the encoding will be carried out using the rules for the aliased
2272 opcode. */
2273 if (aliased != NULL && (opcode->flags & F_CONV))
2274 {
2275 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2276 aliased->name, opcode->name);
2277 /* Convert the operands to the form of the real opcode. */
2278 convert_to_real (inst, aliased);
2279 opcode = aliased;
2280 }
2281
2282 aarch64_opnd_info *info = inst->operands;
2283
2284 /* Call the inserter of each operand. */
2285 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2286 {
2287 const aarch64_operand *opnd;
2288 enum aarch64_opnd type = opcode->operands[i];
2289 if (type == AARCH64_OPND_NIL)
2290 break;
2291 if (info->skip)
2292 {
2293 DEBUG_TRACE ("skip the incomplete operand %d", i);
2294 continue;
2295 }
2296 opnd = &aarch64_operands[type];
2297 if (operand_has_inserter (opnd)
2298 && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2299 mismatch_detail))
2300 return false;
2301 }
2302
2303 /* Call opcode encoders indicated by flags. */
2304 if (opcode_has_special_coder (opcode))
2305 do_special_encoding (inst);
2306
2307 /* Possibly use the instruction class to encode the chosen qualifier
2308 variant. */
2309 aarch64_encode_variant_using_iclass (inst);
2310
2311 /* Run a verifier if the instruction has one set. */
2312 if (opcode->verifier)
2313 {
2314 enum err_type result = opcode->verifier (inst, *code, 0, true,
2315 mismatch_detail, insn_sequence);
2316 switch (result)
2317 {
2318 case ERR_UND:
2319 case ERR_UNP:
2320 case ERR_NYI:
2321 return false;
2322 default:
2323 break;
2324 }
2325 }
2326
2327 /* Always run constrain verifiers, this is needed because constrains need to
2328 maintain a global state. Regardless if the instruction has the flag set
2329 or not. */
2330 enum err_type result = verify_constraints (inst, *code, 0, true,
2331 mismatch_detail, insn_sequence);
2332 switch (result)
2333 {
2334 case ERR_UND:
2335 case ERR_UNP:
2336 case ERR_NYI:
2337 return false;
2338 default:
2339 break;
2340 }
2341
2342
2343 encoding_exit:
2344 DEBUG_TRACE ("exit with %s", opcode->name);
2345
2346 *code = inst->value;
2347
2348 return true;
2349 }