34482eb4f973963adb6b4d9587642c112df42bf3
[binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright (C) 1998-2021 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street - Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .eb
28 .estate
29 .lb
30 .popsection
31 .previous
32 .psr
33 .pushsection
34 - labels are wrong if automatic alignment is introduced
35 (e.g., checkout the second real10 definition in test-data.s)
36 - DV-related stuff:
37 <reg>.safe_across_calls and any other DV-related directives I don't
38 have documentation for.
39 verify mod-sched-brs reads/writes are checked/marked (and other
40 notes)
41
42 */
43
44 #include "as.h"
45 #include "safe-ctype.h"
46 #include "dwarf2dbg.h"
47 #include "subsegs.h"
48
49 #include "opcode/ia64.h"
50
51 #include "elf/ia64.h"
52 #include "bfdver.h"
53 #include <time.h>
54 #include <limits.h>
55
56 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
57
58 /* Some systems define MIN in, e.g., param.h. */
59 #undef MIN
60 #define MIN(a,b) ((a) < (b) ? (a) : (b))
61
62 #define NUM_SLOTS 4
63 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
64 #define CURR_SLOT md.slot[md.curr_slot]
65
66 #define O_pseudo_fixup (O_max + 1)
67
68 enum special_section
69 {
70 /* IA-64 ABI section pseudo-ops. */
71 SPECIAL_SECTION_BSS = 0,
72 SPECIAL_SECTION_SBSS,
73 SPECIAL_SECTION_SDATA,
74 SPECIAL_SECTION_RODATA,
75 SPECIAL_SECTION_COMMENT,
76 SPECIAL_SECTION_UNWIND,
77 SPECIAL_SECTION_UNWIND_INFO,
78 /* HPUX specific section pseudo-ops. */
79 SPECIAL_SECTION_INIT_ARRAY,
80 SPECIAL_SECTION_FINI_ARRAY,
81 };
82
83 enum reloc_func
84 {
85 FUNC_DTP_MODULE,
86 FUNC_DTP_RELATIVE,
87 FUNC_FPTR_RELATIVE,
88 FUNC_GP_RELATIVE,
89 FUNC_LT_RELATIVE,
90 FUNC_LT_RELATIVE_X,
91 FUNC_PC_RELATIVE,
92 FUNC_PLT_RELATIVE,
93 FUNC_SEC_RELATIVE,
94 FUNC_SEG_RELATIVE,
95 FUNC_TP_RELATIVE,
96 FUNC_LTV_RELATIVE,
97 FUNC_LT_FPTR_RELATIVE,
98 FUNC_LT_DTP_MODULE,
99 FUNC_LT_DTP_RELATIVE,
100 FUNC_LT_TP_RELATIVE,
101 FUNC_IPLT_RELOC,
102 #ifdef TE_VMS
103 FUNC_SLOTCOUNT_RELOC,
104 #endif
105 };
106
107 enum reg_symbol
108 {
109 REG_GR = 0,
110 REG_FR = (REG_GR + 128),
111 REG_AR = (REG_FR + 128),
112 REG_CR = (REG_AR + 128),
113 REG_DAHR = (REG_CR + 128),
114 REG_P = (REG_DAHR + 8),
115 REG_BR = (REG_P + 64),
116 REG_IP = (REG_BR + 8),
117 REG_CFM,
118 REG_PR,
119 REG_PR_ROT,
120 REG_PSR,
121 REG_PSR_L,
122 REG_PSR_UM,
123 /* The following are pseudo-registers for use by gas only. */
124 IND_CPUID,
125 IND_DBR,
126 IND_DTR,
127 IND_ITR,
128 IND_IBR,
129 IND_MSR,
130 IND_PKR,
131 IND_PMC,
132 IND_PMD,
133 IND_DAHR,
134 IND_RR,
135 /* The following pseudo-registers are used for unwind directives only: */
136 REG_PSP,
137 REG_PRIUNAT,
138 REG_NUM
139 };
140
141 enum dynreg_type
142 {
143 DYNREG_GR = 0, /* dynamic general purpose register */
144 DYNREG_FR, /* dynamic floating point register */
145 DYNREG_PR, /* dynamic predicate register */
146 DYNREG_NUM_TYPES
147 };
148
149 enum operand_match_result
150 {
151 OPERAND_MATCH,
152 OPERAND_OUT_OF_RANGE,
153 OPERAND_MISMATCH
154 };
155
156 /* On the ia64, we can't know the address of a text label until the
157 instructions are packed into a bundle. To handle this, we keep
158 track of the list of labels that appear in front of each
159 instruction. */
160 struct label_fix
161 {
162 struct label_fix *next;
163 struct symbol *sym;
164 bool dw2_mark_labels;
165 };
166
167 #ifdef TE_VMS
168 /* An internally used relocation. */
169 #define DUMMY_RELOC_IA64_SLOTCOUNT (BFD_RELOC_UNUSED + 1)
170 #endif
171
172 /* This is the endianness of the current section. */
173 extern int target_big_endian;
174
175 /* This is the default endianness. */
176 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
177
178 void (*ia64_number_to_chars) (char *, valueT, int);
179
180 static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int);
181 static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int);
182
183 static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int);
184
185 static htab_t alias_hash;
186 static htab_t alias_name_hash;
187 static htab_t secalias_hash;
188 static htab_t secalias_name_hash;
189
190 /* List of chars besides those in app.c:symbol_chars that can start an
191 operand. Used to prevent the scrubber eating vital white-space. */
192 const char ia64_symbol_chars[] = "@?";
193
194 /* Characters which always start a comment. */
195 const char comment_chars[] = "";
196
197 /* Characters which start a comment at the beginning of a line. */
198 const char line_comment_chars[] = "#";
199
200 /* Characters which may be used to separate multiple commands on a
201 single line. */
202 const char line_separator_chars[] = ";{}";
203
204 /* Characters which are used to indicate an exponent in a floating
205 point number. */
206 const char EXP_CHARS[] = "eE";
207
208 /* Characters which mean that a number is a floating point constant,
209 as in 0d1.0. */
210 const char FLT_CHARS[] = "rRsSfFdDxXpP";
211
212 /* ia64-specific option processing: */
213
214 const char *md_shortopts = "m:N:x::";
215
216 struct option md_longopts[] =
217 {
218 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
219 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
220 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
221 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
222 };
223
224 size_t md_longopts_size = sizeof (md_longopts);
225
226 static struct
227 {
228 htab_t pseudo_hash; /* pseudo opcode hash table */
229 htab_t reg_hash; /* register name hash table */
230 htab_t dynreg_hash; /* dynamic register hash table */
231 htab_t const_hash; /* constant hash table */
232 htab_t entry_hash; /* code entry hint hash table */
233
234 /* If X_op is != O_absent, the register name for the instruction's
235 qualifying predicate. If NULL, p0 is assumed for instructions
236 that are predictable. */
237 expressionS qp;
238
239 /* Optimize for which CPU. */
240 enum
241 {
242 itanium1,
243 itanium2
244 } tune;
245
246 /* What to do when hint.b is used. */
247 enum
248 {
249 hint_b_error,
250 hint_b_warning,
251 hint_b_ok
252 } hint_b;
253
254 unsigned int
255 manual_bundling : 1,
256 debug_dv: 1,
257 detect_dv: 1,
258 explicit_mode : 1, /* which mode we're in */
259 default_explicit_mode : 1, /* which mode is the default */
260 mode_explicitly_set : 1, /* was the current mode explicitly set? */
261 auto_align : 1,
262 keep_pending_output : 1;
263
264 /* What to do when something is wrong with unwind directives. */
265 enum
266 {
267 unwind_check_warning,
268 unwind_check_error
269 } unwind_check;
270
271 /* Each bundle consists of up to three instructions. We keep
272 track of four most recent instructions so we can correctly set
273 the end_of_insn_group for the last instruction in a bundle. */
274 int curr_slot;
275 int num_slots_in_use;
276 struct slot
277 {
278 unsigned int
279 end_of_insn_group : 1,
280 manual_bundling_on : 1,
281 manual_bundling_off : 1,
282 loc_directive_seen : 1;
283 signed char user_template; /* user-selected template, if any */
284 unsigned char qp_regno; /* qualifying predicate */
285 /* This duplicates a good fraction of "struct fix" but we
286 can't use a "struct fix" instead since we can't call
287 fix_new_exp() until we know the address of the instruction. */
288 int num_fixups;
289 struct insn_fix
290 {
291 bfd_reloc_code_real_type code;
292 enum ia64_opnd opnd; /* type of operand in need of fix */
293 unsigned int is_pcrel : 1; /* is operand pc-relative? */
294 expressionS expr; /* the value to be inserted */
295 }
296 fixup[2]; /* at most two fixups per insn */
297 struct ia64_opcode *idesc;
298 struct label_fix *label_fixups;
299 struct label_fix *tag_fixups;
300 struct unw_rec_list *unwind_record; /* Unwind directive. */
301 expressionS opnd[6];
302 const char *src_file;
303 unsigned int src_line;
304 struct dwarf2_line_info debug_line;
305 }
306 slot[NUM_SLOTS];
307
308 segT last_text_seg;
309
310 struct dynreg
311 {
312 struct dynreg *next; /* next dynamic register */
313 const char *name;
314 unsigned short base; /* the base register number */
315 unsigned short num_regs; /* # of registers in this set */
316 }
317 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
318
319 flagword flags; /* ELF-header flags */
320
321 struct mem_offset {
322 unsigned hint:1; /* is this hint currently valid? */
323 bfd_vma offset; /* mem.offset offset */
324 bfd_vma base; /* mem.offset base */
325 } mem_offset;
326
327 int path; /* number of alt. entry points seen */
328 const char **entry_labels; /* labels of all alternate paths in
329 the current DV-checking block. */
330 int maxpaths; /* size currently allocated for
331 entry_labels */
332
333 int pointer_size; /* size in bytes of a pointer */
334 int pointer_size_shift; /* shift size of a pointer for alignment */
335
336 symbolS *indregsym[IND_RR - IND_CPUID + 1];
337 }
338 md;
339
340 /* These are not const, because they are modified to MMI for non-itanium1
341 targets below. */
342 /* MFI bundle of nops. */
343 static unsigned char le_nop[16] =
344 {
345 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
346 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
347 };
348 /* MFI bundle of nops with stop-bit. */
349 static unsigned char le_nop_stop[16] =
350 {
351 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
352 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
353 };
354
355 /* application registers: */
356
357 #define AR_K0 0
358 #define AR_K7 7
359 #define AR_RSC 16
360 #define AR_BSP 17
361 #define AR_BSPSTORE 18
362 #define AR_RNAT 19
363 #define AR_FCR 21
364 #define AR_EFLAG 24
365 #define AR_CSD 25
366 #define AR_SSD 26
367 #define AR_CFLG 27
368 #define AR_FSR 28
369 #define AR_FIR 29
370 #define AR_FDR 30
371 #define AR_CCV 32
372 #define AR_UNAT 36
373 #define AR_FPSR 40
374 #define AR_ITC 44
375 #define AR_RUC 45
376 #define AR_PFS 64
377 #define AR_LC 65
378 #define AR_EC 66
379
380 static const struct
381 {
382 const char *name;
383 unsigned int regnum;
384 }
385 ar[] =
386 {
387 {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1},
388 {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3},
389 {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5},
390 {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7},
391 {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP},
392 {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT},
393 {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG},
394 {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD},
395 {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR},
396 {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR},
397 {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT},
398 {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC},
399 {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS},
400 {"ar.lc", AR_LC}, {"ar.ec", AR_EC},
401 };
402
403 /* control registers: */
404
405 #define CR_DCR 0
406 #define CR_ITM 1
407 #define CR_IVA 2
408 #define CR_PTA 8
409 #define CR_GPTA 9
410 #define CR_IPSR 16
411 #define CR_ISR 17
412 #define CR_IIP 19
413 #define CR_IFA 20
414 #define CR_ITIR 21
415 #define CR_IIPA 22
416 #define CR_IFS 23
417 #define CR_IIM 24
418 #define CR_IHA 25
419 #define CR_IIB0 26
420 #define CR_IIB1 27
421 #define CR_LID 64
422 #define CR_IVR 65
423 #define CR_TPR 66
424 #define CR_EOI 67
425 #define CR_IRR0 68
426 #define CR_IRR3 71
427 #define CR_ITV 72
428 #define CR_PMV 73
429 #define CR_CMCV 74
430 #define CR_LRR0 80
431 #define CR_LRR1 81
432
433 static const struct
434 {
435 const char *name;
436 unsigned int regnum;
437 }
438 cr[] =
439 {
440 {"cr.dcr", CR_DCR},
441 {"cr.itm", CR_ITM},
442 {"cr.iva", CR_IVA},
443 {"cr.pta", CR_PTA},
444 {"cr.gpta", CR_GPTA},
445 {"cr.ipsr", CR_IPSR},
446 {"cr.isr", CR_ISR},
447 {"cr.iip", CR_IIP},
448 {"cr.ifa", CR_IFA},
449 {"cr.itir", CR_ITIR},
450 {"cr.iipa", CR_IIPA},
451 {"cr.ifs", CR_IFS},
452 {"cr.iim", CR_IIM},
453 {"cr.iha", CR_IHA},
454 {"cr.iib0", CR_IIB0},
455 {"cr.iib1", CR_IIB1},
456 {"cr.lid", CR_LID},
457 {"cr.ivr", CR_IVR},
458 {"cr.tpr", CR_TPR},
459 {"cr.eoi", CR_EOI},
460 {"cr.irr0", CR_IRR0},
461 {"cr.irr1", CR_IRR0 + 1},
462 {"cr.irr2", CR_IRR0 + 2},
463 {"cr.irr3", CR_IRR3},
464 {"cr.itv", CR_ITV},
465 {"cr.pmv", CR_PMV},
466 {"cr.cmcv", CR_CMCV},
467 {"cr.lrr0", CR_LRR0},
468 {"cr.lrr1", CR_LRR1}
469 };
470
471 #define PSR_MFL 4
472 #define PSR_IC 13
473 #define PSR_DFL 18
474 #define PSR_CPL 32
475
476 static const struct const_desc
477 {
478 const char *name;
479 valueT value;
480 }
481 const_bits[] =
482 {
483 /* PSR constant masks: */
484
485 /* 0: reserved */
486 {"psr.be", ((valueT) 1) << 1},
487 {"psr.up", ((valueT) 1) << 2},
488 {"psr.ac", ((valueT) 1) << 3},
489 {"psr.mfl", ((valueT) 1) << 4},
490 {"psr.mfh", ((valueT) 1) << 5},
491 /* 6-12: reserved */
492 {"psr.ic", ((valueT) 1) << 13},
493 {"psr.i", ((valueT) 1) << 14},
494 {"psr.pk", ((valueT) 1) << 15},
495 /* 16: reserved */
496 {"psr.dt", ((valueT) 1) << 17},
497 {"psr.dfl", ((valueT) 1) << 18},
498 {"psr.dfh", ((valueT) 1) << 19},
499 {"psr.sp", ((valueT) 1) << 20},
500 {"psr.pp", ((valueT) 1) << 21},
501 {"psr.di", ((valueT) 1) << 22},
502 {"psr.si", ((valueT) 1) << 23},
503 {"psr.db", ((valueT) 1) << 24},
504 {"psr.lp", ((valueT) 1) << 25},
505 {"psr.tb", ((valueT) 1) << 26},
506 {"psr.rt", ((valueT) 1) << 27},
507 /* 28-31: reserved */
508 /* 32-33: cpl (current privilege level) */
509 {"psr.is", ((valueT) 1) << 34},
510 {"psr.mc", ((valueT) 1) << 35},
511 {"psr.it", ((valueT) 1) << 36},
512 {"psr.id", ((valueT) 1) << 37},
513 {"psr.da", ((valueT) 1) << 38},
514 {"psr.dd", ((valueT) 1) << 39},
515 {"psr.ss", ((valueT) 1) << 40},
516 /* 41-42: ri (restart instruction) */
517 {"psr.ed", ((valueT) 1) << 43},
518 {"psr.bn", ((valueT) 1) << 44},
519 };
520
521 /* indirect register-sets/memory: */
522
523 static const struct
524 {
525 const char *name;
526 unsigned int regnum;
527 }
528 indirect_reg[] =
529 {
530 { "CPUID", IND_CPUID },
531 { "cpuid", IND_CPUID },
532 { "dbr", IND_DBR },
533 { "dtr", IND_DTR },
534 { "itr", IND_ITR },
535 { "ibr", IND_IBR },
536 { "msr", IND_MSR },
537 { "pkr", IND_PKR },
538 { "pmc", IND_PMC },
539 { "pmd", IND_PMD },
540 { "dahr", IND_DAHR },
541 { "rr", IND_RR },
542 };
543
544 /* Pseudo functions used to indicate relocation types (these functions
545 start with an at sign (@). */
546 static struct
547 {
548 const char *name;
549 enum pseudo_type
550 {
551 PSEUDO_FUNC_NONE,
552 PSEUDO_FUNC_RELOC,
553 PSEUDO_FUNC_CONST,
554 PSEUDO_FUNC_REG,
555 PSEUDO_FUNC_FLOAT
556 }
557 type;
558 union
559 {
560 unsigned long ival;
561 symbolS *sym;
562 }
563 u;
564 }
565 pseudo_func[] =
566 {
567 /* reloc pseudo functions (these must come first!): */
568 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
569 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
570 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
571 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
572 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
573 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
574 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
575 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
576 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
577 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
578 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
579 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
580 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
581 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
582 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
583 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
584 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
585 #ifdef TE_VMS
586 { "slotcount", PSEUDO_FUNC_RELOC, { 0 } },
587 #endif
588
589 /* mbtype4 constants: */
590 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
591 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
592 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
593 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
594 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
595
596 /* fclass constants: */
597 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
598 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
599 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
600 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
601 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
602 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
603 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
604 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
605 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
606
607 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
608
609 /* hint constants: */
610 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
611 { "priority", PSEUDO_FUNC_CONST, { 0x1 } },
612
613 /* tf constants: */
614 { "clz", PSEUDO_FUNC_CONST, { 32 } },
615 { "mpy", PSEUDO_FUNC_CONST, { 33 } },
616 { "datahints", PSEUDO_FUNC_CONST, { 34 } },
617
618 /* unwind-related constants: */
619 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
620 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
621 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
622 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_GNU } },
623 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
624 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
625 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
626
627 /* unwind-related registers: */
628 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
629 };
630
631 /* 41-bit nop opcodes (one per unit): */
632 static const bfd_vma nop[IA64_NUM_UNITS] =
633 {
634 0x0000000000LL, /* NIL => break 0 */
635 0x0008000000LL, /* I-unit nop */
636 0x0008000000LL, /* M-unit nop */
637 0x4000000000LL, /* B-unit nop */
638 0x0008000000LL, /* F-unit nop */
639 0x0000000000LL, /* L-"unit" nop immediate */
640 0x0008000000LL, /* X-unit nop */
641 };
642
643 /* Can't be `const' as it's passed to input routines (which have the
644 habit of setting temporary sentinels. */
645 static char special_section_name[][20] =
646 {
647 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
648 {".IA_64.unwind"}, {".IA_64.unwind_info"},
649 {".init_array"}, {".fini_array"}
650 };
651
652 /* The best template for a particular sequence of up to three
653 instructions: */
654 #define N IA64_NUM_TYPES
655 static unsigned char best_template[N][N][N];
656 #undef N
657
658 /* Resource dependencies currently in effect */
659 static struct rsrc {
660 int depind; /* dependency index */
661 const struct ia64_dependency *dependency; /* actual dependency */
662 unsigned specific:1, /* is this a specific bit/regno? */
663 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
664 int index; /* specific regno/bit within dependency */
665 int note; /* optional qualifying note (0 if none) */
666 #define STATE_NONE 0
667 #define STATE_STOP 1
668 #define STATE_SRLZ 2
669 int insn_srlz; /* current insn serialization state */
670 int data_srlz; /* current data serialization state */
671 int qp_regno; /* qualifying predicate for this usage */
672 const char *file; /* what file marked this dependency */
673 unsigned int line; /* what line marked this dependency */
674 struct mem_offset mem_offset; /* optional memory offset hint */
675 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
676 int path; /* corresponding code entry index */
677 } *regdeps = NULL;
678 static int regdepslen = 0;
679 static int regdepstotlen = 0;
680 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
681 static const char *dv_sem[] = { "none", "implied", "impliedf",
682 "data", "instr", "specific", "stop", "other" };
683 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
684
685 /* Current state of PR mutexation */
686 static struct qpmutex {
687 valueT prmask;
688 int path;
689 } *qp_mutexes = NULL; /* QP mutex bitmasks */
690 static int qp_mutexeslen = 0;
691 static int qp_mutexestotlen = 0;
692 static valueT qp_safe_across_calls = 0;
693
694 /* Current state of PR implications */
695 static struct qp_imply {
696 unsigned p1:6;
697 unsigned p2:6;
698 unsigned p2_branched:1;
699 int path;
700 } *qp_implies = NULL;
701 static int qp_implieslen = 0;
702 static int qp_impliestotlen = 0;
703
704 /* Keep track of static GR values so that indirect register usage can
705 sometimes be tracked. */
706 static struct gr {
707 unsigned known:1;
708 int path;
709 valueT value;
710 } gr_values[128] = {
711 {
712 1,
713 #ifdef INT_MAX
714 INT_MAX,
715 #else
716 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1,
717 #endif
718 0
719 }
720 };
721
722 /* Remember the alignment frag. */
723 static fragS *align_frag;
724
725 /* These are the routines required to output the various types of
726 unwind records. */
727
728 /* A slot_number is a frag address plus the slot index (0-2). We use the
729 frag address here so that if there is a section switch in the middle of
730 a function, then instructions emitted to a different section are not
731 counted. Since there may be more than one frag for a function, this
732 means we also need to keep track of which frag this address belongs to
733 so we can compute inter-frag distances. This also nicely solves the
734 problem with nops emitted for align directives, which can't easily be
735 counted, but can easily be derived from frag sizes. */
736
737 typedef struct unw_rec_list {
738 unwind_record r;
739 unsigned long slot_number;
740 fragS *slot_frag;
741 struct unw_rec_list *next;
742 } unw_rec_list;
743
744 #define SLOT_NUM_NOT_SET (unsigned)-1
745
746 /* Linked list of saved prologue counts. A very poor
747 implementation of a map from label numbers to prologue counts. */
748 typedef struct label_prologue_count
749 {
750 struct label_prologue_count *next;
751 unsigned long label_number;
752 unsigned int prologue_count;
753 } label_prologue_count;
754
755 typedef struct proc_pending
756 {
757 symbolS *sym;
758 struct proc_pending *next;
759 } proc_pending;
760
761 static struct
762 {
763 /* Maintain a list of unwind entries for the current function. */
764 unw_rec_list *list;
765 unw_rec_list *tail;
766
767 /* Any unwind entries that should be attached to the current slot
768 that an insn is being constructed for. */
769 unw_rec_list *current_entry;
770
771 /* These are used to create the unwind table entry for this function. */
772 proc_pending proc_pending;
773 symbolS *info; /* pointer to unwind info */
774 symbolS *personality_routine;
775 segT saved_text_seg;
776 subsegT saved_text_subseg;
777 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
778
779 /* TRUE if processing unwind directives in a prologue region. */
780 unsigned int prologue : 1;
781 unsigned int prologue_mask : 4;
782 unsigned int prologue_gr : 7;
783 unsigned int body : 1;
784 unsigned int insn : 1;
785 unsigned int prologue_count; /* number of .prologues seen so far */
786 /* Prologue counts at previous .label_state directives. */
787 struct label_prologue_count * saved_prologue_counts;
788
789 /* List of split up .save-s. */
790 unw_p_record *pending_saves;
791 } unwind;
792
793 /* The input value is a negated offset from psp, and specifies an address
794 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
795 must add 16 and divide by 4 to get the encoded value. */
796
797 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
798
799 typedef void (*vbyte_func) (int, char *, char *);
800
801 /* Forward declarations: */
802 static void dot_alias (int);
803 static int parse_operand_and_eval (expressionS *, int);
804 static void emit_one_bundle (void);
805 static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *,
806 bfd_reloc_code_real_type);
807 static void insn_group_break (int, int, int);
808 static void add_qp_mutex (valueT);
809 static void add_qp_imply (int, int);
810 static void clear_qp_mutex (valueT);
811 static void clear_qp_implies (valueT, valueT);
812 static void print_dependency (const char *, int);
813 static void instruction_serialization (void);
814 static void data_serialization (void);
815 static void output_R3_format (vbyte_func, unw_record_type, unsigned long);
816 static void output_B3_format (vbyte_func, unsigned long, unsigned long);
817 static void output_B4_format (vbyte_func, unw_record_type, unsigned long);
818 static void free_saved_prologue_counts (void);
819
820 /* Determine if application register REGNUM resides only in the integer
821 unit (as opposed to the memory unit). */
822 static int
823 ar_is_only_in_integer_unit (int reg)
824 {
825 reg -= REG_AR;
826 return reg >= 64 && reg <= 111;
827 }
828
829 /* Determine if application register REGNUM resides only in the memory
830 unit (as opposed to the integer unit). */
831 static int
832 ar_is_only_in_memory_unit (int reg)
833 {
834 reg -= REG_AR;
835 return reg >= 0 && reg <= 47;
836 }
837
838 /* Switch to section NAME and create section if necessary. It's
839 rather ugly that we have to manipulate input_line_pointer but I
840 don't see any other way to accomplish the same thing without
841 changing obj-elf.c (which may be the Right Thing, in the end). */
842 static void
843 set_section (char *name)
844 {
845 char *saved_input_line_pointer;
846
847 saved_input_line_pointer = input_line_pointer;
848 input_line_pointer = name;
849 obj_elf_section (0);
850 input_line_pointer = saved_input_line_pointer;
851 }
852
853 /* Map 's' to SHF_IA_64_SHORT. */
854
855 bfd_vma
856 ia64_elf_section_letter (int letter, const char **ptr_msg)
857 {
858 if (letter == 's')
859 return SHF_IA_64_SHORT;
860 else if (letter == 'o')
861 return SHF_LINK_ORDER;
862 #ifdef TE_VMS
863 else if (letter == 'O')
864 return SHF_IA_64_VMS_OVERLAID;
865 else if (letter == 'g')
866 return SHF_IA_64_VMS_GLOBAL;
867 #endif
868
869 *ptr_msg = _("bad .section directive: want a,o,s,w,x,M,S,G,T in string");
870 return -1;
871 }
872
873 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
874
875 flagword
876 ia64_elf_section_flags (flagword flags,
877 bfd_vma attr,
878 int type ATTRIBUTE_UNUSED)
879 {
880 if (attr & SHF_IA_64_SHORT)
881 flags |= SEC_SMALL_DATA;
882 return flags;
883 }
884
885 int
886 ia64_elf_section_type (const char *str, size_t len)
887 {
888 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
889
890 if (STREQ (ELF_STRING_ia64_unwind_info))
891 return SHT_PROGBITS;
892
893 if (STREQ (ELF_STRING_ia64_unwind_info_once))
894 return SHT_PROGBITS;
895
896 if (STREQ (ELF_STRING_ia64_unwind))
897 return SHT_IA_64_UNWIND;
898
899 if (STREQ (ELF_STRING_ia64_unwind_once))
900 return SHT_IA_64_UNWIND;
901
902 if (STREQ ("unwind"))
903 return SHT_IA_64_UNWIND;
904
905 return -1;
906 #undef STREQ
907 }
908
909 static unsigned int
910 set_regstack (unsigned int ins,
911 unsigned int locs,
912 unsigned int outs,
913 unsigned int rots)
914 {
915 /* Size of frame. */
916 unsigned int sof;
917
918 sof = ins + locs + outs;
919 if (sof > 96)
920 {
921 as_bad (_("Size of frame exceeds maximum of 96 registers"));
922 return 0;
923 }
924 if (rots > sof)
925 {
926 as_warn (_("Size of rotating registers exceeds frame size"));
927 return 0;
928 }
929 md.in.base = REG_GR + 32;
930 md.loc.base = md.in.base + ins;
931 md.out.base = md.loc.base + locs;
932
933 md.in.num_regs = ins;
934 md.loc.num_regs = locs;
935 md.out.num_regs = outs;
936 md.rot.num_regs = rots;
937 return sof;
938 }
939
940 void
941 ia64_flush_insns (void)
942 {
943 struct label_fix *lfix;
944 segT saved_seg;
945 subsegT saved_subseg;
946 unw_rec_list *ptr;
947 bool mark;
948
949 if (!md.last_text_seg)
950 return;
951
952 saved_seg = now_seg;
953 saved_subseg = now_subseg;
954
955 subseg_set (md.last_text_seg, 0);
956
957 while (md.num_slots_in_use > 0)
958 emit_one_bundle (); /* force out queued instructions */
959
960 /* In case there are labels following the last instruction, resolve
961 those now. */
962 mark = false;
963 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
964 {
965 symbol_set_value_now (lfix->sym);
966 mark |= lfix->dw2_mark_labels;
967 }
968 if (mark)
969 {
970 dwarf2_where (&CURR_SLOT.debug_line);
971 CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
972 dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line);
973 dwarf2_consume_line_info ();
974 }
975 CURR_SLOT.label_fixups = 0;
976
977 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
978 symbol_set_value_now (lfix->sym);
979 CURR_SLOT.tag_fixups = 0;
980
981 /* In case there are unwind directives following the last instruction,
982 resolve those now. We only handle prologue, body, and endp directives
983 here. Give an error for others. */
984 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
985 {
986 switch (ptr->r.type)
987 {
988 case prologue:
989 case prologue_gr:
990 case body:
991 case endp:
992 ptr->slot_number = (unsigned long) frag_more (0);
993 ptr->slot_frag = frag_now;
994 break;
995
996 /* Allow any record which doesn't have a "t" field (i.e.,
997 doesn't relate to a particular instruction). */
998 case unwabi:
999 case br_gr:
1000 case copy_state:
1001 case fr_mem:
1002 case frgr_mem:
1003 case gr_gr:
1004 case gr_mem:
1005 case label_state:
1006 case rp_br:
1007 case spill_base:
1008 case spill_mask:
1009 /* nothing */
1010 break;
1011
1012 default:
1013 as_bad (_("Unwind directive not followed by an instruction."));
1014 break;
1015 }
1016 }
1017 unwind.current_entry = NULL;
1018
1019 subseg_set (saved_seg, saved_subseg);
1020
1021 if (md.qp.X_op == O_register)
1022 as_bad (_("qualifying predicate not followed by instruction"));
1023 }
1024
1025 void
1026 ia64_cons_align (int nbytes)
1027 {
1028 if (md.auto_align)
1029 {
1030 int log;
1031 for (log = 0; (nbytes & 1) != 1; nbytes >>= 1)
1032 log++;
1033
1034 do_align (log, NULL, 0, 0);
1035 }
1036 }
1037
1038 #ifdef TE_VMS
1039
1040 /* .vms_common section, symbol, size, alignment */
1041
1042 static void
1043 obj_elf_vms_common (int ignore ATTRIBUTE_UNUSED)
1044 {
1045 const char *sec_name;
1046 char *sym_name;
1047 char c;
1048 offsetT size;
1049 offsetT cur_size;
1050 offsetT temp;
1051 symbolS *symbolP;
1052 segT current_seg = now_seg;
1053 subsegT current_subseg = now_subseg;
1054 offsetT log_align;
1055
1056 /* Section name. */
1057 sec_name = obj_elf_section_name ();
1058 if (sec_name == NULL)
1059 return;
1060
1061 /* Symbol name. */
1062 SKIP_WHITESPACE ();
1063 if (*input_line_pointer == ',')
1064 {
1065 input_line_pointer++;
1066 SKIP_WHITESPACE ();
1067 }
1068 else
1069 {
1070 as_bad (_("expected ',' after section name"));
1071 ignore_rest_of_line ();
1072 return;
1073 }
1074
1075 c = get_symbol_name (&sym_name);
1076
1077 if (input_line_pointer == sym_name)
1078 {
1079 (void) restore_line_pointer (c);
1080 as_bad (_("expected symbol name"));
1081 ignore_rest_of_line ();
1082 return;
1083 }
1084
1085 symbolP = symbol_find_or_make (sym_name);
1086 (void) restore_line_pointer (c);
1087
1088 if ((S_IS_DEFINED (symbolP) || symbol_equated_p (symbolP))
1089 && !S_IS_COMMON (symbolP))
1090 {
1091 as_bad (_("Ignoring attempt to re-define symbol"));
1092 ignore_rest_of_line ();
1093 return;
1094 }
1095
1096 /* Symbol size. */
1097 SKIP_WHITESPACE ();
1098 if (*input_line_pointer == ',')
1099 {
1100 input_line_pointer++;
1101 SKIP_WHITESPACE ();
1102 }
1103 else
1104 {
1105 as_bad (_("expected ',' after symbol name"));
1106 ignore_rest_of_line ();
1107 return;
1108 }
1109
1110 temp = get_absolute_expression ();
1111 size = temp;
1112 size &= ((offsetT) 2 << (stdoutput->arch_info->bits_per_address - 1)) - 1;
1113 if (temp != size)
1114 {
1115 as_warn (_("size (%ld) out of range, ignored"), (long) temp);
1116 ignore_rest_of_line ();
1117 return;
1118 }
1119
1120 /* Alignment. */
1121 SKIP_WHITESPACE ();
1122 if (*input_line_pointer == ',')
1123 {
1124 input_line_pointer++;
1125 SKIP_WHITESPACE ();
1126 }
1127 else
1128 {
1129 as_bad (_("expected ',' after symbol size"));
1130 ignore_rest_of_line ();
1131 return;
1132 }
1133
1134 log_align = get_absolute_expression ();
1135
1136 demand_empty_rest_of_line ();
1137
1138 obj_elf_change_section
1139 (sec_name, SHT_NOBITS,
1140 SHF_ALLOC | SHF_WRITE | SHF_IA_64_VMS_OVERLAID | SHF_IA_64_VMS_GLOBAL,
1141 0, NULL, 1, 0);
1142
1143 S_SET_VALUE (symbolP, 0);
1144 S_SET_SIZE (symbolP, size);
1145 S_SET_EXTERNAL (symbolP);
1146 S_SET_SEGMENT (symbolP, now_seg);
1147
1148 symbol_get_bfdsym (symbolP)->flags |= BSF_OBJECT;
1149
1150 record_alignment (now_seg, log_align);
1151
1152 cur_size = bfd_section_size (now_seg);
1153 if ((int) size > cur_size)
1154 {
1155 char *pfrag
1156 = frag_var (rs_fill, 1, 1, (relax_substateT)0, NULL,
1157 (valueT)size - (valueT)cur_size, NULL);
1158 *pfrag = 0;
1159 bfd_set_section_size (now_seg, size);
1160 }
1161
1162 /* Switch back to current segment. */
1163 subseg_set (current_seg, current_subseg);
1164
1165 #ifdef md_elf_section_change_hook
1166 md_elf_section_change_hook ();
1167 #endif
1168 }
1169
1170 #endif /* TE_VMS */
1171
1172 /* Output COUNT bytes to a memory location. */
1173 static char *vbyte_mem_ptr = NULL;
1174
1175 static void
1176 output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED)
1177 {
1178 int x;
1179 if (vbyte_mem_ptr == NULL)
1180 abort ();
1181
1182 if (count == 0)
1183 return;
1184 for (x = 0; x < count; x++)
1185 *(vbyte_mem_ptr++) = ptr[x];
1186 }
1187
1188 /* Count the number of bytes required for records. */
1189 static int vbyte_count = 0;
1190 static void
1191 count_output (int count,
1192 char *ptr ATTRIBUTE_UNUSED,
1193 char *comment ATTRIBUTE_UNUSED)
1194 {
1195 vbyte_count += count;
1196 }
1197
1198 static void
1199 output_R1_format (vbyte_func f, unw_record_type rtype, int rlen)
1200 {
1201 int r = 0;
1202 char byte;
1203 if (rlen > 0x1f)
1204 {
1205 output_R3_format (f, rtype, rlen);
1206 return;
1207 }
1208
1209 if (rtype == body)
1210 r = 1;
1211 else if (rtype != prologue)
1212 as_bad (_("record type is not valid"));
1213
1214 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1215 (*f) (1, &byte, NULL);
1216 }
1217
1218 static void
1219 output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen)
1220 {
1221 char bytes[20];
1222 int count = 2;
1223 mask = (mask & 0x0f);
1224 grsave = (grsave & 0x7f);
1225
1226 bytes[0] = (UNW_R2 | (mask >> 1));
1227 bytes[1] = (((mask & 0x01) << 7) | grsave);
1228 count += output_leb128 (bytes + 2, rlen, 0);
1229 (*f) (count, bytes, NULL);
1230 }
1231
1232 static void
1233 output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen)
1234 {
1235 int r = 0, count;
1236 char bytes[20];
1237 if (rlen <= 0x1f)
1238 {
1239 output_R1_format (f, rtype, rlen);
1240 return;
1241 }
1242
1243 if (rtype == body)
1244 r = 1;
1245 else if (rtype != prologue)
1246 as_bad (_("record type is not valid"));
1247 bytes[0] = (UNW_R3 | r);
1248 count = output_leb128 (bytes + 1, rlen, 0);
1249 (*f) (count + 1, bytes, NULL);
1250 }
1251
1252 static void
1253 output_P1_format (vbyte_func f, int brmask)
1254 {
1255 char byte;
1256 byte = UNW_P1 | (brmask & 0x1f);
1257 (*f) (1, &byte, NULL);
1258 }
1259
1260 static void
1261 output_P2_format (vbyte_func f, int brmask, int gr)
1262 {
1263 char bytes[2];
1264 brmask = (brmask & 0x1f);
1265 bytes[0] = UNW_P2 | (brmask >> 1);
1266 bytes[1] = (((brmask & 1) << 7) | gr);
1267 (*f) (2, bytes, NULL);
1268 }
1269
1270 static void
1271 output_P3_format (vbyte_func f, unw_record_type rtype, int reg)
1272 {
1273 char bytes[2];
1274 int r = 0;
1275 reg = (reg & 0x7f);
1276 switch (rtype)
1277 {
1278 case psp_gr:
1279 r = 0;
1280 break;
1281 case rp_gr:
1282 r = 1;
1283 break;
1284 case pfs_gr:
1285 r = 2;
1286 break;
1287 case preds_gr:
1288 r = 3;
1289 break;
1290 case unat_gr:
1291 r = 4;
1292 break;
1293 case lc_gr:
1294 r = 5;
1295 break;
1296 case rp_br:
1297 r = 6;
1298 break;
1299 case rnat_gr:
1300 r = 7;
1301 break;
1302 case bsp_gr:
1303 r = 8;
1304 break;
1305 case bspstore_gr:
1306 r = 9;
1307 break;
1308 case fpsr_gr:
1309 r = 10;
1310 break;
1311 case priunat_gr:
1312 r = 11;
1313 break;
1314 default:
1315 as_bad (_("Invalid record type for P3 format."));
1316 }
1317 bytes[0] = (UNW_P3 | (r >> 1));
1318 bytes[1] = (((r & 1) << 7) | reg);
1319 (*f) (2, bytes, NULL);
1320 }
1321
1322 static void
1323 output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size)
1324 {
1325 imask[0] = UNW_P4;
1326 (*f) (imask_size, (char *) imask, NULL);
1327 }
1328
1329 static void
1330 output_P5_format (vbyte_func f, int grmask, unsigned long frmask)
1331 {
1332 char bytes[4];
1333 grmask = (grmask & 0x0f);
1334
1335 bytes[0] = UNW_P5;
1336 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1337 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1338 bytes[3] = (frmask & 0x000000ff);
1339 (*f) (4, bytes, NULL);
1340 }
1341
1342 static void
1343 output_P6_format (vbyte_func f, unw_record_type rtype, int rmask)
1344 {
1345 char byte;
1346 int r = 0;
1347
1348 if (rtype == gr_mem)
1349 r = 1;
1350 else if (rtype != fr_mem)
1351 as_bad (_("Invalid record type for format P6"));
1352 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1353 (*f) (1, &byte, NULL);
1354 }
1355
1356 static void
1357 output_P7_format (vbyte_func f,
1358 unw_record_type rtype,
1359 unsigned long w1,
1360 unsigned long w2)
1361 {
1362 char bytes[20];
1363 int count = 1;
1364 int r = 0;
1365 count += output_leb128 (bytes + 1, w1, 0);
1366 switch (rtype)
1367 {
1368 case mem_stack_f:
1369 r = 0;
1370 count += output_leb128 (bytes + count, w2 >> 4, 0);
1371 break;
1372 case mem_stack_v:
1373 r = 1;
1374 break;
1375 case spill_base:
1376 r = 2;
1377 break;
1378 case psp_sprel:
1379 r = 3;
1380 break;
1381 case rp_when:
1382 r = 4;
1383 break;
1384 case rp_psprel:
1385 r = 5;
1386 break;
1387 case pfs_when:
1388 r = 6;
1389 break;
1390 case pfs_psprel:
1391 r = 7;
1392 break;
1393 case preds_when:
1394 r = 8;
1395 break;
1396 case preds_psprel:
1397 r = 9;
1398 break;
1399 case lc_when:
1400 r = 10;
1401 break;
1402 case lc_psprel:
1403 r = 11;
1404 break;
1405 case unat_when:
1406 r = 12;
1407 break;
1408 case unat_psprel:
1409 r = 13;
1410 break;
1411 case fpsr_when:
1412 r = 14;
1413 break;
1414 case fpsr_psprel:
1415 r = 15;
1416 break;
1417 default:
1418 break;
1419 }
1420 bytes[0] = (UNW_P7 | r);
1421 (*f) (count, bytes, NULL);
1422 }
1423
1424 static void
1425 output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t)
1426 {
1427 char bytes[20];
1428 int r = 0;
1429 int count = 2;
1430 bytes[0] = UNW_P8;
1431 switch (rtype)
1432 {
1433 case rp_sprel:
1434 r = 1;
1435 break;
1436 case pfs_sprel:
1437 r = 2;
1438 break;
1439 case preds_sprel:
1440 r = 3;
1441 break;
1442 case lc_sprel:
1443 r = 4;
1444 break;
1445 case unat_sprel:
1446 r = 5;
1447 break;
1448 case fpsr_sprel:
1449 r = 6;
1450 break;
1451 case bsp_when:
1452 r = 7;
1453 break;
1454 case bsp_psprel:
1455 r = 8;
1456 break;
1457 case bsp_sprel:
1458 r = 9;
1459 break;
1460 case bspstore_when:
1461 r = 10;
1462 break;
1463 case bspstore_psprel:
1464 r = 11;
1465 break;
1466 case bspstore_sprel:
1467 r = 12;
1468 break;
1469 case rnat_when:
1470 r = 13;
1471 break;
1472 case rnat_psprel:
1473 r = 14;
1474 break;
1475 case rnat_sprel:
1476 r = 15;
1477 break;
1478 case priunat_when_gr:
1479 r = 16;
1480 break;
1481 case priunat_psprel:
1482 r = 17;
1483 break;
1484 case priunat_sprel:
1485 r = 18;
1486 break;
1487 case priunat_when_mem:
1488 r = 19;
1489 break;
1490 default:
1491 break;
1492 }
1493 bytes[1] = r;
1494 count += output_leb128 (bytes + 2, t, 0);
1495 (*f) (count, bytes, NULL);
1496 }
1497
1498 static void
1499 output_P9_format (vbyte_func f, int grmask, int gr)
1500 {
1501 char bytes[3];
1502 bytes[0] = UNW_P9;
1503 bytes[1] = (grmask & 0x0f);
1504 bytes[2] = (gr & 0x7f);
1505 (*f) (3, bytes, NULL);
1506 }
1507
1508 static void
1509 output_P10_format (vbyte_func f, int abi, int context)
1510 {
1511 char bytes[3];
1512 bytes[0] = UNW_P10;
1513 bytes[1] = (abi & 0xff);
1514 bytes[2] = (context & 0xff);
1515 (*f) (3, bytes, NULL);
1516 }
1517
1518 static void
1519 output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1520 {
1521 char byte;
1522 int r = 0;
1523 if (label > 0x1f)
1524 {
1525 output_B4_format (f, rtype, label);
1526 return;
1527 }
1528 if (rtype == copy_state)
1529 r = 1;
1530 else if (rtype != label_state)
1531 as_bad (_("Invalid record type for format B1"));
1532
1533 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1534 (*f) (1, &byte, NULL);
1535 }
1536
1537 static void
1538 output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t)
1539 {
1540 char bytes[20];
1541 int count = 1;
1542 if (ecount > 0x1f)
1543 {
1544 output_B3_format (f, ecount, t);
1545 return;
1546 }
1547 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1548 count += output_leb128 (bytes + 1, t, 0);
1549 (*f) (count, bytes, NULL);
1550 }
1551
1552 static void
1553 output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t)
1554 {
1555 char bytes[20];
1556 int count = 1;
1557 if (ecount <= 0x1f)
1558 {
1559 output_B2_format (f, ecount, t);
1560 return;
1561 }
1562 bytes[0] = UNW_B3;
1563 count += output_leb128 (bytes + 1, t, 0);
1564 count += output_leb128 (bytes + count, ecount, 0);
1565 (*f) (count, bytes, NULL);
1566 }
1567
1568 static void
1569 output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1570 {
1571 char bytes[20];
1572 int r = 0;
1573 int count = 1;
1574 if (label <= 0x1f)
1575 {
1576 output_B1_format (f, rtype, label);
1577 return;
1578 }
1579
1580 if (rtype == copy_state)
1581 r = 1;
1582 else if (rtype != label_state)
1583 as_bad (_("Invalid record type for format B1"));
1584
1585 bytes[0] = (UNW_B4 | (r << 3));
1586 count += output_leb128 (bytes + 1, label, 0);
1587 (*f) (count, bytes, NULL);
1588 }
1589
1590 static char
1591 format_ab_reg (int ab, int reg)
1592 {
1593 int ret;
1594 ab = (ab & 3);
1595 reg = (reg & 0x1f);
1596 ret = (ab << 5) | reg;
1597 return ret;
1598 }
1599
1600 static void
1601 output_X1_format (vbyte_func f,
1602 unw_record_type rtype,
1603 int ab,
1604 int reg,
1605 unsigned long t,
1606 unsigned long w1)
1607 {
1608 char bytes[20];
1609 int r = 0;
1610 int count = 2;
1611 bytes[0] = UNW_X1;
1612
1613 if (rtype == spill_sprel)
1614 r = 1;
1615 else if (rtype != spill_psprel)
1616 as_bad (_("Invalid record type for format X1"));
1617 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1618 count += output_leb128 (bytes + 2, t, 0);
1619 count += output_leb128 (bytes + count, w1, 0);
1620 (*f) (count, bytes, NULL);
1621 }
1622
1623 static void
1624 output_X2_format (vbyte_func f,
1625 int ab,
1626 int reg,
1627 int x,
1628 int y,
1629 int treg,
1630 unsigned long t)
1631 {
1632 char bytes[20];
1633 int count = 3;
1634 bytes[0] = UNW_X2;
1635 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1636 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1637 count += output_leb128 (bytes + 3, t, 0);
1638 (*f) (count, bytes, NULL);
1639 }
1640
1641 static void
1642 output_X3_format (vbyte_func f,
1643 unw_record_type rtype,
1644 int qp,
1645 int ab,
1646 int reg,
1647 unsigned long t,
1648 unsigned long w1)
1649 {
1650 char bytes[20];
1651 int r = 0;
1652 int count = 3;
1653 bytes[0] = UNW_X3;
1654
1655 if (rtype == spill_sprel_p)
1656 r = 1;
1657 else if (rtype != spill_psprel_p)
1658 as_bad (_("Invalid record type for format X3"));
1659 bytes[1] = ((r << 7) | (qp & 0x3f));
1660 bytes[2] = format_ab_reg (ab, reg);
1661 count += output_leb128 (bytes + 3, t, 0);
1662 count += output_leb128 (bytes + count, w1, 0);
1663 (*f) (count, bytes, NULL);
1664 }
1665
1666 static void
1667 output_X4_format (vbyte_func f,
1668 int qp,
1669 int ab,
1670 int reg,
1671 int x,
1672 int y,
1673 int treg,
1674 unsigned long t)
1675 {
1676 char bytes[20];
1677 int count = 4;
1678 bytes[0] = UNW_X4;
1679 bytes[1] = (qp & 0x3f);
1680 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1681 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1682 count += output_leb128 (bytes + 4, t, 0);
1683 (*f) (count, bytes, NULL);
1684 }
1685
1686 /* This function checks whether there are any outstanding .save-s and
1687 discards them if so. */
1688
1689 static void
1690 check_pending_save (void)
1691 {
1692 if (unwind.pending_saves)
1693 {
1694 unw_rec_list *cur, *prev;
1695
1696 as_warn (_("Previous .save incomplete"));
1697 for (cur = unwind.list, prev = NULL; cur; )
1698 if (&cur->r.record.p == unwind.pending_saves)
1699 {
1700 if (prev)
1701 prev->next = cur->next;
1702 else
1703 unwind.list = cur->next;
1704 if (cur == unwind.tail)
1705 unwind.tail = prev;
1706 if (cur == unwind.current_entry)
1707 unwind.current_entry = cur->next;
1708 /* Don't free the first discarded record, it's being used as
1709 terminator for (currently) br_gr and gr_gr processing, and
1710 also prevents leaving a dangling pointer to it in its
1711 predecessor. */
1712 cur->r.record.p.grmask = 0;
1713 cur->r.record.p.brmask = 0;
1714 cur->r.record.p.frmask = 0;
1715 prev = cur->r.record.p.next;
1716 cur->r.record.p.next = NULL;
1717 cur = prev;
1718 break;
1719 }
1720 else
1721 {
1722 prev = cur;
1723 cur = cur->next;
1724 }
1725 while (cur)
1726 {
1727 prev = cur;
1728 cur = cur->r.record.p.next;
1729 free (prev);
1730 }
1731 unwind.pending_saves = NULL;
1732 }
1733 }
1734
1735 /* This function allocates a record list structure, and initializes fields. */
1736
1737 static unw_rec_list *
1738 alloc_record (unw_record_type t)
1739 {
1740 unw_rec_list *ptr;
1741 ptr = XNEW (unw_rec_list);
1742 memset (ptr, 0, sizeof (*ptr));
1743 ptr->slot_number = SLOT_NUM_NOT_SET;
1744 ptr->r.type = t;
1745 return ptr;
1746 }
1747
1748 /* Dummy unwind record used for calculating the length of the last prologue or
1749 body region. */
1750
1751 static unw_rec_list *
1752 output_endp (void)
1753 {
1754 unw_rec_list *ptr = alloc_record (endp);
1755 return ptr;
1756 }
1757
1758 static unw_rec_list *
1759 output_prologue (void)
1760 {
1761 unw_rec_list *ptr = alloc_record (prologue);
1762 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1763 return ptr;
1764 }
1765
1766 static unw_rec_list *
1767 output_prologue_gr (unsigned int saved_mask, unsigned int reg)
1768 {
1769 unw_rec_list *ptr = alloc_record (prologue_gr);
1770 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1771 ptr->r.record.r.grmask = saved_mask;
1772 ptr->r.record.r.grsave = reg;
1773 return ptr;
1774 }
1775
1776 static unw_rec_list *
1777 output_body (void)
1778 {
1779 unw_rec_list *ptr = alloc_record (body);
1780 return ptr;
1781 }
1782
1783 static unw_rec_list *
1784 output_mem_stack_f (unsigned int size)
1785 {
1786 unw_rec_list *ptr = alloc_record (mem_stack_f);
1787 ptr->r.record.p.size = size;
1788 return ptr;
1789 }
1790
1791 static unw_rec_list *
1792 output_mem_stack_v (void)
1793 {
1794 unw_rec_list *ptr = alloc_record (mem_stack_v);
1795 return ptr;
1796 }
1797
1798 static unw_rec_list *
1799 output_psp_gr (unsigned int gr)
1800 {
1801 unw_rec_list *ptr = alloc_record (psp_gr);
1802 ptr->r.record.p.r.gr = gr;
1803 return ptr;
1804 }
1805
1806 static unw_rec_list *
1807 output_psp_sprel (unsigned int offset)
1808 {
1809 unw_rec_list *ptr = alloc_record (psp_sprel);
1810 ptr->r.record.p.off.sp = offset / 4;
1811 return ptr;
1812 }
1813
1814 static unw_rec_list *
1815 output_rp_when (void)
1816 {
1817 unw_rec_list *ptr = alloc_record (rp_when);
1818 return ptr;
1819 }
1820
1821 static unw_rec_list *
1822 output_rp_gr (unsigned int gr)
1823 {
1824 unw_rec_list *ptr = alloc_record (rp_gr);
1825 ptr->r.record.p.r.gr = gr;
1826 return ptr;
1827 }
1828
1829 static unw_rec_list *
1830 output_rp_br (unsigned int br)
1831 {
1832 unw_rec_list *ptr = alloc_record (rp_br);
1833 ptr->r.record.p.r.br = br;
1834 return ptr;
1835 }
1836
1837 static unw_rec_list *
1838 output_rp_psprel (unsigned int offset)
1839 {
1840 unw_rec_list *ptr = alloc_record (rp_psprel);
1841 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1842 return ptr;
1843 }
1844
1845 static unw_rec_list *
1846 output_rp_sprel (unsigned int offset)
1847 {
1848 unw_rec_list *ptr = alloc_record (rp_sprel);
1849 ptr->r.record.p.off.sp = offset / 4;
1850 return ptr;
1851 }
1852
1853 static unw_rec_list *
1854 output_pfs_when (void)
1855 {
1856 unw_rec_list *ptr = alloc_record (pfs_when);
1857 return ptr;
1858 }
1859
1860 static unw_rec_list *
1861 output_pfs_gr (unsigned int gr)
1862 {
1863 unw_rec_list *ptr = alloc_record (pfs_gr);
1864 ptr->r.record.p.r.gr = gr;
1865 return ptr;
1866 }
1867
1868 static unw_rec_list *
1869 output_pfs_psprel (unsigned int offset)
1870 {
1871 unw_rec_list *ptr = alloc_record (pfs_psprel);
1872 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1873 return ptr;
1874 }
1875
1876 static unw_rec_list *
1877 output_pfs_sprel (unsigned int offset)
1878 {
1879 unw_rec_list *ptr = alloc_record (pfs_sprel);
1880 ptr->r.record.p.off.sp = offset / 4;
1881 return ptr;
1882 }
1883
1884 static unw_rec_list *
1885 output_preds_when (void)
1886 {
1887 unw_rec_list *ptr = alloc_record (preds_when);
1888 return ptr;
1889 }
1890
1891 static unw_rec_list *
1892 output_preds_gr (unsigned int gr)
1893 {
1894 unw_rec_list *ptr = alloc_record (preds_gr);
1895 ptr->r.record.p.r.gr = gr;
1896 return ptr;
1897 }
1898
1899 static unw_rec_list *
1900 output_preds_psprel (unsigned int offset)
1901 {
1902 unw_rec_list *ptr = alloc_record (preds_psprel);
1903 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1904 return ptr;
1905 }
1906
1907 static unw_rec_list *
1908 output_preds_sprel (unsigned int offset)
1909 {
1910 unw_rec_list *ptr = alloc_record (preds_sprel);
1911 ptr->r.record.p.off.sp = offset / 4;
1912 return ptr;
1913 }
1914
1915 static unw_rec_list *
1916 output_fr_mem (unsigned int mask)
1917 {
1918 unw_rec_list *ptr = alloc_record (fr_mem);
1919 unw_rec_list *cur = ptr;
1920
1921 ptr->r.record.p.frmask = mask;
1922 unwind.pending_saves = &ptr->r.record.p;
1923 for (;;)
1924 {
1925 unw_rec_list *prev = cur;
1926
1927 /* Clear least significant set bit. */
1928 mask &= ~(mask & (~mask + 1));
1929 if (!mask)
1930 return ptr;
1931 cur = alloc_record (fr_mem);
1932 cur->r.record.p.frmask = mask;
1933 /* Retain only least significant bit. */
1934 prev->r.record.p.frmask ^= mask;
1935 prev->r.record.p.next = cur;
1936 }
1937 }
1938
1939 static unw_rec_list *
1940 output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask)
1941 {
1942 unw_rec_list *ptr = alloc_record (frgr_mem);
1943 unw_rec_list *cur = ptr;
1944
1945 unwind.pending_saves = &cur->r.record.p;
1946 cur->r.record.p.frmask = fr_mask;
1947 while (fr_mask)
1948 {
1949 unw_rec_list *prev = cur;
1950
1951 /* Clear least significant set bit. */
1952 fr_mask &= ~(fr_mask & (~fr_mask + 1));
1953 if (!gr_mask && !fr_mask)
1954 return ptr;
1955 cur = alloc_record (frgr_mem);
1956 cur->r.record.p.frmask = fr_mask;
1957 /* Retain only least significant bit. */
1958 prev->r.record.p.frmask ^= fr_mask;
1959 prev->r.record.p.next = cur;
1960 }
1961 cur->r.record.p.grmask = gr_mask;
1962 for (;;)
1963 {
1964 unw_rec_list *prev = cur;
1965
1966 /* Clear least significant set bit. */
1967 gr_mask &= ~(gr_mask & (~gr_mask + 1));
1968 if (!gr_mask)
1969 return ptr;
1970 cur = alloc_record (frgr_mem);
1971 cur->r.record.p.grmask = gr_mask;
1972 /* Retain only least significant bit. */
1973 prev->r.record.p.grmask ^= gr_mask;
1974 prev->r.record.p.next = cur;
1975 }
1976 }
1977
1978 static unw_rec_list *
1979 output_gr_gr (unsigned int mask, unsigned int reg)
1980 {
1981 unw_rec_list *ptr = alloc_record (gr_gr);
1982 unw_rec_list *cur = ptr;
1983
1984 ptr->r.record.p.grmask = mask;
1985 ptr->r.record.p.r.gr = reg;
1986 unwind.pending_saves = &ptr->r.record.p;
1987 for (;;)
1988 {
1989 unw_rec_list *prev = cur;
1990
1991 /* Clear least significant set bit. */
1992 mask &= ~(mask & (~mask + 1));
1993 if (!mask)
1994 return ptr;
1995 cur = alloc_record (gr_gr);
1996 cur->r.record.p.grmask = mask;
1997 /* Indicate this record shouldn't be output. */
1998 cur->r.record.p.r.gr = REG_NUM;
1999 /* Retain only least significant bit. */
2000 prev->r.record.p.grmask ^= mask;
2001 prev->r.record.p.next = cur;
2002 }
2003 }
2004
2005 static unw_rec_list *
2006 output_gr_mem (unsigned int mask)
2007 {
2008 unw_rec_list *ptr = alloc_record (gr_mem);
2009 unw_rec_list *cur = ptr;
2010
2011 ptr->r.record.p.grmask = mask;
2012 unwind.pending_saves = &ptr->r.record.p;
2013 for (;;)
2014 {
2015 unw_rec_list *prev = cur;
2016
2017 /* Clear least significant set bit. */
2018 mask &= ~(mask & (~mask + 1));
2019 if (!mask)
2020 return ptr;
2021 cur = alloc_record (gr_mem);
2022 cur->r.record.p.grmask = mask;
2023 /* Retain only least significant bit. */
2024 prev->r.record.p.grmask ^= mask;
2025 prev->r.record.p.next = cur;
2026 }
2027 }
2028
2029 static unw_rec_list *
2030 output_br_mem (unsigned int mask)
2031 {
2032 unw_rec_list *ptr = alloc_record (br_mem);
2033 unw_rec_list *cur = ptr;
2034
2035 ptr->r.record.p.brmask = mask;
2036 unwind.pending_saves = &ptr->r.record.p;
2037 for (;;)
2038 {
2039 unw_rec_list *prev = cur;
2040
2041 /* Clear least significant set bit. */
2042 mask &= ~(mask & (~mask + 1));
2043 if (!mask)
2044 return ptr;
2045 cur = alloc_record (br_mem);
2046 cur->r.record.p.brmask = mask;
2047 /* Retain only least significant bit. */
2048 prev->r.record.p.brmask ^= mask;
2049 prev->r.record.p.next = cur;
2050 }
2051 }
2052
2053 static unw_rec_list *
2054 output_br_gr (unsigned int mask, unsigned int reg)
2055 {
2056 unw_rec_list *ptr = alloc_record (br_gr);
2057 unw_rec_list *cur = ptr;
2058
2059 ptr->r.record.p.brmask = mask;
2060 ptr->r.record.p.r.gr = reg;
2061 unwind.pending_saves = &ptr->r.record.p;
2062 for (;;)
2063 {
2064 unw_rec_list *prev = cur;
2065
2066 /* Clear least significant set bit. */
2067 mask &= ~(mask & (~mask + 1));
2068 if (!mask)
2069 return ptr;
2070 cur = alloc_record (br_gr);
2071 cur->r.record.p.brmask = mask;
2072 /* Indicate this record shouldn't be output. */
2073 cur->r.record.p.r.gr = REG_NUM;
2074 /* Retain only least significant bit. */
2075 prev->r.record.p.brmask ^= mask;
2076 prev->r.record.p.next = cur;
2077 }
2078 }
2079
2080 static unw_rec_list *
2081 output_spill_base (unsigned int offset)
2082 {
2083 unw_rec_list *ptr = alloc_record (spill_base);
2084 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2085 return ptr;
2086 }
2087
2088 static unw_rec_list *
2089 output_unat_when (void)
2090 {
2091 unw_rec_list *ptr = alloc_record (unat_when);
2092 return ptr;
2093 }
2094
2095 static unw_rec_list *
2096 output_unat_gr (unsigned int gr)
2097 {
2098 unw_rec_list *ptr = alloc_record (unat_gr);
2099 ptr->r.record.p.r.gr = gr;
2100 return ptr;
2101 }
2102
2103 static unw_rec_list *
2104 output_unat_psprel (unsigned int offset)
2105 {
2106 unw_rec_list *ptr = alloc_record (unat_psprel);
2107 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2108 return ptr;
2109 }
2110
2111 static unw_rec_list *
2112 output_unat_sprel (unsigned int offset)
2113 {
2114 unw_rec_list *ptr = alloc_record (unat_sprel);
2115 ptr->r.record.p.off.sp = offset / 4;
2116 return ptr;
2117 }
2118
2119 static unw_rec_list *
2120 output_lc_when (void)
2121 {
2122 unw_rec_list *ptr = alloc_record (lc_when);
2123 return ptr;
2124 }
2125
2126 static unw_rec_list *
2127 output_lc_gr (unsigned int gr)
2128 {
2129 unw_rec_list *ptr = alloc_record (lc_gr);
2130 ptr->r.record.p.r.gr = gr;
2131 return ptr;
2132 }
2133
2134 static unw_rec_list *
2135 output_lc_psprel (unsigned int offset)
2136 {
2137 unw_rec_list *ptr = alloc_record (lc_psprel);
2138 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2139 return ptr;
2140 }
2141
2142 static unw_rec_list *
2143 output_lc_sprel (unsigned int offset)
2144 {
2145 unw_rec_list *ptr = alloc_record (lc_sprel);
2146 ptr->r.record.p.off.sp = offset / 4;
2147 return ptr;
2148 }
2149
2150 static unw_rec_list *
2151 output_fpsr_when (void)
2152 {
2153 unw_rec_list *ptr = alloc_record (fpsr_when);
2154 return ptr;
2155 }
2156
2157 static unw_rec_list *
2158 output_fpsr_gr (unsigned int gr)
2159 {
2160 unw_rec_list *ptr = alloc_record (fpsr_gr);
2161 ptr->r.record.p.r.gr = gr;
2162 return ptr;
2163 }
2164
2165 static unw_rec_list *
2166 output_fpsr_psprel (unsigned int offset)
2167 {
2168 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2169 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2170 return ptr;
2171 }
2172
2173 static unw_rec_list *
2174 output_fpsr_sprel (unsigned int offset)
2175 {
2176 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2177 ptr->r.record.p.off.sp = offset / 4;
2178 return ptr;
2179 }
2180
2181 static unw_rec_list *
2182 output_priunat_when_gr (void)
2183 {
2184 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2185 return ptr;
2186 }
2187
2188 static unw_rec_list *
2189 output_priunat_when_mem (void)
2190 {
2191 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2192 return ptr;
2193 }
2194
2195 static unw_rec_list *
2196 output_priunat_gr (unsigned int gr)
2197 {
2198 unw_rec_list *ptr = alloc_record (priunat_gr);
2199 ptr->r.record.p.r.gr = gr;
2200 return ptr;
2201 }
2202
2203 static unw_rec_list *
2204 output_priunat_psprel (unsigned int offset)
2205 {
2206 unw_rec_list *ptr = alloc_record (priunat_psprel);
2207 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2208 return ptr;
2209 }
2210
2211 static unw_rec_list *
2212 output_priunat_sprel (unsigned int offset)
2213 {
2214 unw_rec_list *ptr = alloc_record (priunat_sprel);
2215 ptr->r.record.p.off.sp = offset / 4;
2216 return ptr;
2217 }
2218
2219 static unw_rec_list *
2220 output_bsp_when (void)
2221 {
2222 unw_rec_list *ptr = alloc_record (bsp_when);
2223 return ptr;
2224 }
2225
2226 static unw_rec_list *
2227 output_bsp_gr (unsigned int gr)
2228 {
2229 unw_rec_list *ptr = alloc_record (bsp_gr);
2230 ptr->r.record.p.r.gr = gr;
2231 return ptr;
2232 }
2233
2234 static unw_rec_list *
2235 output_bsp_psprel (unsigned int offset)
2236 {
2237 unw_rec_list *ptr = alloc_record (bsp_psprel);
2238 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2239 return ptr;
2240 }
2241
2242 static unw_rec_list *
2243 output_bsp_sprel (unsigned int offset)
2244 {
2245 unw_rec_list *ptr = alloc_record (bsp_sprel);
2246 ptr->r.record.p.off.sp = offset / 4;
2247 return ptr;
2248 }
2249
2250 static unw_rec_list *
2251 output_bspstore_when (void)
2252 {
2253 unw_rec_list *ptr = alloc_record (bspstore_when);
2254 return ptr;
2255 }
2256
2257 static unw_rec_list *
2258 output_bspstore_gr (unsigned int gr)
2259 {
2260 unw_rec_list *ptr = alloc_record (bspstore_gr);
2261 ptr->r.record.p.r.gr = gr;
2262 return ptr;
2263 }
2264
2265 static unw_rec_list *
2266 output_bspstore_psprel (unsigned int offset)
2267 {
2268 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2269 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2270 return ptr;
2271 }
2272
2273 static unw_rec_list *
2274 output_bspstore_sprel (unsigned int offset)
2275 {
2276 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2277 ptr->r.record.p.off.sp = offset / 4;
2278 return ptr;
2279 }
2280
2281 static unw_rec_list *
2282 output_rnat_when (void)
2283 {
2284 unw_rec_list *ptr = alloc_record (rnat_when);
2285 return ptr;
2286 }
2287
2288 static unw_rec_list *
2289 output_rnat_gr (unsigned int gr)
2290 {
2291 unw_rec_list *ptr = alloc_record (rnat_gr);
2292 ptr->r.record.p.r.gr = gr;
2293 return ptr;
2294 }
2295
2296 static unw_rec_list *
2297 output_rnat_psprel (unsigned int offset)
2298 {
2299 unw_rec_list *ptr = alloc_record (rnat_psprel);
2300 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2301 return ptr;
2302 }
2303
2304 static unw_rec_list *
2305 output_rnat_sprel (unsigned int offset)
2306 {
2307 unw_rec_list *ptr = alloc_record (rnat_sprel);
2308 ptr->r.record.p.off.sp = offset / 4;
2309 return ptr;
2310 }
2311
2312 static unw_rec_list *
2313 output_unwabi (unsigned long abi, unsigned long context)
2314 {
2315 unw_rec_list *ptr = alloc_record (unwabi);
2316 ptr->r.record.p.abi = abi;
2317 ptr->r.record.p.context = context;
2318 return ptr;
2319 }
2320
2321 static unw_rec_list *
2322 output_epilogue (unsigned long ecount)
2323 {
2324 unw_rec_list *ptr = alloc_record (epilogue);
2325 ptr->r.record.b.ecount = ecount;
2326 return ptr;
2327 }
2328
2329 static unw_rec_list *
2330 output_label_state (unsigned long label)
2331 {
2332 unw_rec_list *ptr = alloc_record (label_state);
2333 ptr->r.record.b.label = label;
2334 return ptr;
2335 }
2336
2337 static unw_rec_list *
2338 output_copy_state (unsigned long label)
2339 {
2340 unw_rec_list *ptr = alloc_record (copy_state);
2341 ptr->r.record.b.label = label;
2342 return ptr;
2343 }
2344
2345 static unw_rec_list *
2346 output_spill_psprel (unsigned int ab,
2347 unsigned int reg,
2348 unsigned int offset,
2349 unsigned int predicate)
2350 {
2351 unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel);
2352 ptr->r.record.x.ab = ab;
2353 ptr->r.record.x.reg = reg;
2354 ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset);
2355 ptr->r.record.x.qp = predicate;
2356 return ptr;
2357 }
2358
2359 static unw_rec_list *
2360 output_spill_sprel (unsigned int ab,
2361 unsigned int reg,
2362 unsigned int offset,
2363 unsigned int predicate)
2364 {
2365 unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel);
2366 ptr->r.record.x.ab = ab;
2367 ptr->r.record.x.reg = reg;
2368 ptr->r.record.x.where.spoff = offset / 4;
2369 ptr->r.record.x.qp = predicate;
2370 return ptr;
2371 }
2372
2373 static unw_rec_list *
2374 output_spill_reg (unsigned int ab,
2375 unsigned int reg,
2376 unsigned int targ_reg,
2377 unsigned int xy,
2378 unsigned int predicate)
2379 {
2380 unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg);
2381 ptr->r.record.x.ab = ab;
2382 ptr->r.record.x.reg = reg;
2383 ptr->r.record.x.where.reg = targ_reg;
2384 ptr->r.record.x.xy = xy;
2385 ptr->r.record.x.qp = predicate;
2386 return ptr;
2387 }
2388
2389 /* Given a unw_rec_list process the correct format with the
2390 specified function. */
2391
2392 static void
2393 process_one_record (unw_rec_list *ptr, vbyte_func f)
2394 {
2395 unsigned int fr_mask, gr_mask;
2396
2397 switch (ptr->r.type)
2398 {
2399 /* This is a dummy record that takes up no space in the output. */
2400 case endp:
2401 break;
2402
2403 case gr_mem:
2404 case fr_mem:
2405 case br_mem:
2406 case frgr_mem:
2407 /* These are taken care of by prologue/prologue_gr. */
2408 break;
2409
2410 case prologue_gr:
2411 case prologue:
2412 if (ptr->r.type == prologue_gr)
2413 output_R2_format (f, ptr->r.record.r.grmask,
2414 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2415 else
2416 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2417
2418 /* Output descriptor(s) for union of register spills (if any). */
2419 gr_mask = ptr->r.record.r.mask.gr_mem;
2420 fr_mask = ptr->r.record.r.mask.fr_mem;
2421 if (fr_mask)
2422 {
2423 if ((fr_mask & ~0xfUL) == 0)
2424 output_P6_format (f, fr_mem, fr_mask);
2425 else
2426 {
2427 output_P5_format (f, gr_mask, fr_mask);
2428 gr_mask = 0;
2429 }
2430 }
2431 if (gr_mask)
2432 output_P6_format (f, gr_mem, gr_mask);
2433 if (ptr->r.record.r.mask.br_mem)
2434 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2435
2436 /* output imask descriptor if necessary: */
2437 if (ptr->r.record.r.mask.i)
2438 output_P4_format (f, ptr->r.record.r.mask.i,
2439 ptr->r.record.r.imask_size);
2440 break;
2441
2442 case body:
2443 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2444 break;
2445 case mem_stack_f:
2446 case mem_stack_v:
2447 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2448 ptr->r.record.p.size);
2449 break;
2450 case psp_gr:
2451 case rp_gr:
2452 case pfs_gr:
2453 case preds_gr:
2454 case unat_gr:
2455 case lc_gr:
2456 case fpsr_gr:
2457 case priunat_gr:
2458 case bsp_gr:
2459 case bspstore_gr:
2460 case rnat_gr:
2461 output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr);
2462 break;
2463 case rp_br:
2464 output_P3_format (f, rp_br, ptr->r.record.p.r.br);
2465 break;
2466 case psp_sprel:
2467 output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0);
2468 break;
2469 case rp_when:
2470 case pfs_when:
2471 case preds_when:
2472 case unat_when:
2473 case lc_when:
2474 case fpsr_when:
2475 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2476 break;
2477 case rp_psprel:
2478 case pfs_psprel:
2479 case preds_psprel:
2480 case unat_psprel:
2481 case lc_psprel:
2482 case fpsr_psprel:
2483 case spill_base:
2484 output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0);
2485 break;
2486 case rp_sprel:
2487 case pfs_sprel:
2488 case preds_sprel:
2489 case unat_sprel:
2490 case lc_sprel:
2491 case fpsr_sprel:
2492 case priunat_sprel:
2493 case bsp_sprel:
2494 case bspstore_sprel:
2495 case rnat_sprel:
2496 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp);
2497 break;
2498 case gr_gr:
2499 if (ptr->r.record.p.r.gr < REG_NUM)
2500 {
2501 const unw_rec_list *cur = ptr;
2502
2503 gr_mask = cur->r.record.p.grmask;
2504 while ((cur = cur->r.record.p.next) != NULL)
2505 gr_mask |= cur->r.record.p.grmask;
2506 output_P9_format (f, gr_mask, ptr->r.record.p.r.gr);
2507 }
2508 break;
2509 case br_gr:
2510 if (ptr->r.record.p.r.gr < REG_NUM)
2511 {
2512 const unw_rec_list *cur = ptr;
2513
2514 gr_mask = cur->r.record.p.brmask;
2515 while ((cur = cur->r.record.p.next) != NULL)
2516 gr_mask |= cur->r.record.p.brmask;
2517 output_P2_format (f, gr_mask, ptr->r.record.p.r.gr);
2518 }
2519 break;
2520 case spill_mask:
2521 as_bad (_("spill_mask record unimplemented."));
2522 break;
2523 case priunat_when_gr:
2524 case priunat_when_mem:
2525 case bsp_when:
2526 case bspstore_when:
2527 case rnat_when:
2528 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2529 break;
2530 case priunat_psprel:
2531 case bsp_psprel:
2532 case bspstore_psprel:
2533 case rnat_psprel:
2534 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp);
2535 break;
2536 case unwabi:
2537 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2538 break;
2539 case epilogue:
2540 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2541 break;
2542 case label_state:
2543 case copy_state:
2544 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2545 break;
2546 case spill_psprel:
2547 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2548 ptr->r.record.x.reg, ptr->r.record.x.t,
2549 ptr->r.record.x.where.pspoff);
2550 break;
2551 case spill_sprel:
2552 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2553 ptr->r.record.x.reg, ptr->r.record.x.t,
2554 ptr->r.record.x.where.spoff);
2555 break;
2556 case spill_reg:
2557 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2558 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2559 ptr->r.record.x.where.reg, ptr->r.record.x.t);
2560 break;
2561 case spill_psprel_p:
2562 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2563 ptr->r.record.x.ab, ptr->r.record.x.reg,
2564 ptr->r.record.x.t, ptr->r.record.x.where.pspoff);
2565 break;
2566 case spill_sprel_p:
2567 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2568 ptr->r.record.x.ab, ptr->r.record.x.reg,
2569 ptr->r.record.x.t, ptr->r.record.x.where.spoff);
2570 break;
2571 case spill_reg_p:
2572 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2573 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2574 ptr->r.record.x.xy, ptr->r.record.x.where.reg,
2575 ptr->r.record.x.t);
2576 break;
2577 default:
2578 as_bad (_("record_type_not_valid"));
2579 break;
2580 }
2581 }
2582
2583 /* Given a unw_rec_list list, process all the records with
2584 the specified function. */
2585 static void
2586 process_unw_records (unw_rec_list *list, vbyte_func f)
2587 {
2588 unw_rec_list *ptr;
2589 for (ptr = list; ptr; ptr = ptr->next)
2590 process_one_record (ptr, f);
2591 }
2592
2593 /* Determine the size of a record list in bytes. */
2594 static int
2595 calc_record_size (unw_rec_list *list)
2596 {
2597 vbyte_count = 0;
2598 process_unw_records (list, count_output);
2599 return vbyte_count;
2600 }
2601
2602 /* Return the number of bits set in the input value.
2603 Perhaps this has a better place... */
2604 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
2605 # define popcount __builtin_popcount
2606 #else
2607 static int
2608 popcount (unsigned x)
2609 {
2610 static const unsigned char popcnt[16] =
2611 {
2612 0, 1, 1, 2,
2613 1, 2, 2, 3,
2614 1, 2, 2, 3,
2615 2, 3, 3, 4
2616 };
2617
2618 if (x < NELEMS (popcnt))
2619 return popcnt[x];
2620 return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt));
2621 }
2622 #endif
2623
2624 /* Update IMASK bitmask to reflect the fact that one or more registers
2625 of type TYPE are saved starting at instruction with index T. If N
2626 bits are set in REGMASK, it is assumed that instructions T through
2627 T+N-1 save these registers.
2628
2629 TYPE values:
2630 0: no save
2631 1: instruction saves next fp reg
2632 2: instruction saves next general reg
2633 3: instruction saves next branch reg */
2634 static void
2635 set_imask (unw_rec_list *region,
2636 unsigned long regmask,
2637 unsigned long t,
2638 unsigned int type)
2639 {
2640 unsigned char *imask;
2641 unsigned long imask_size;
2642 unsigned int i;
2643 int pos;
2644
2645 imask = region->r.record.r.mask.i;
2646 imask_size = region->r.record.r.imask_size;
2647 if (!imask)
2648 {
2649 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2650 imask = XCNEWVEC (unsigned char, imask_size);
2651
2652 region->r.record.r.imask_size = imask_size;
2653 region->r.record.r.mask.i = imask;
2654 }
2655
2656 i = (t / 4) + 1;
2657 pos = 2 * (3 - t % 4);
2658 while (regmask)
2659 {
2660 if (i >= imask_size)
2661 {
2662 as_bad (_("Ignoring attempt to spill beyond end of region"));
2663 return;
2664 }
2665
2666 imask[i] |= (type & 0x3) << pos;
2667
2668 regmask &= (regmask - 1);
2669 pos -= 2;
2670 if (pos < 0)
2671 {
2672 pos = 0;
2673 ++i;
2674 }
2675 }
2676 }
2677
2678 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2679 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2680 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2681 for frag sizes. */
2682
2683 static unsigned long
2684 slot_index (unsigned long slot_addr,
2685 fragS *slot_frag,
2686 unsigned long first_addr,
2687 fragS *first_frag,
2688 int before_relax)
2689 {
2690 unsigned long s_index = 0;
2691
2692 /* First time we are called, the initial address and frag are invalid. */
2693 if (first_addr == 0)
2694 return 0;
2695
2696 /* If the two addresses are in different frags, then we need to add in
2697 the remaining size of this frag, and then the entire size of intermediate
2698 frags. */
2699 while (slot_frag != first_frag)
2700 {
2701 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2702
2703 if (! before_relax)
2704 {
2705 /* We can get the final addresses only during and after
2706 relaxation. */
2707 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2708 s_index += 3 * ((first_frag->fr_next->fr_address
2709 - first_frag->fr_address
2710 - first_frag->fr_fix) >> 4);
2711 }
2712 else
2713 /* We don't know what the final addresses will be. We try our
2714 best to estimate. */
2715 switch (first_frag->fr_type)
2716 {
2717 default:
2718 break;
2719
2720 case rs_space:
2721 as_fatal (_("Only constant space allocation is supported"));
2722 break;
2723
2724 case rs_align:
2725 case rs_align_code:
2726 case rs_align_test:
2727 /* Take alignment into account. Assume the worst case
2728 before relaxation. */
2729 s_index += 3 * ((1 << first_frag->fr_offset) >> 4);
2730 break;
2731
2732 case rs_org:
2733 if (first_frag->fr_symbol)
2734 {
2735 as_fatal (_("Only constant offsets are supported"));
2736 break;
2737 }
2738 /* Fall through. */
2739 case rs_fill:
2740 s_index += 3 * (first_frag->fr_offset >> 4);
2741 break;
2742 }
2743
2744 /* Add in the full size of the frag converted to instruction slots. */
2745 s_index += 3 * (first_frag->fr_fix >> 4);
2746 /* Subtract away the initial part before first_addr. */
2747 s_index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2748 + ((first_addr & 0x3) - (start_addr & 0x3)));
2749
2750 /* Move to the beginning of the next frag. */
2751 first_frag = first_frag->fr_next;
2752 first_addr = (unsigned long) &first_frag->fr_literal;
2753
2754 /* This can happen if there is section switching in the middle of a
2755 function, causing the frag chain for the function to be broken.
2756 It is too difficult to recover safely from this problem, so we just
2757 exit with an error. */
2758 if (first_frag == NULL)
2759 as_fatal (_("Section switching in code is not supported."));
2760 }
2761
2762 /* Add in the used part of the last frag. */
2763 s_index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2764 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2765 return s_index;
2766 }
2767
2768 /* Optimize unwind record directives. */
2769
2770 static unw_rec_list *
2771 optimize_unw_records (unw_rec_list *list)
2772 {
2773 if (!list)
2774 return NULL;
2775
2776 /* If the only unwind record is ".prologue" or ".prologue" followed
2777 by ".body", then we can optimize the unwind directives away. */
2778 if (list->r.type == prologue
2779 && (list->next->r.type == endp
2780 || (list->next->r.type == body && list->next->next->r.type == endp)))
2781 return NULL;
2782
2783 return list;
2784 }
2785
2786 /* Given a complete record list, process any records which have
2787 unresolved fields, (ie length counts for a prologue). After
2788 this has been run, all necessary information should be available
2789 within each record to generate an image. */
2790
2791 static void
2792 fixup_unw_records (unw_rec_list *list, int before_relax)
2793 {
2794 unw_rec_list *ptr, *region = 0;
2795 unsigned long first_addr = 0, rlen = 0, t;
2796 fragS *first_frag = 0;
2797
2798 for (ptr = list; ptr; ptr = ptr->next)
2799 {
2800 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2801 as_bad (_("Insn slot not set in unwind record."));
2802 t = slot_index (ptr->slot_number, ptr->slot_frag,
2803 first_addr, first_frag, before_relax);
2804 switch (ptr->r.type)
2805 {
2806 case prologue:
2807 case prologue_gr:
2808 case body:
2809 {
2810 unw_rec_list *last;
2811 int size;
2812 unsigned long last_addr = 0;
2813 fragS *last_frag = NULL;
2814
2815 first_addr = ptr->slot_number;
2816 first_frag = ptr->slot_frag;
2817 /* Find either the next body/prologue start, or the end of
2818 the function, and determine the size of the region. */
2819 for (last = ptr->next; last != NULL; last = last->next)
2820 if (last->r.type == prologue || last->r.type == prologue_gr
2821 || last->r.type == body || last->r.type == endp)
2822 {
2823 last_addr = last->slot_number;
2824 last_frag = last->slot_frag;
2825 break;
2826 }
2827 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2828 before_relax);
2829 rlen = ptr->r.record.r.rlen = size;
2830 if (ptr->r.type == body)
2831 /* End of region. */
2832 region = 0;
2833 else
2834 region = ptr;
2835 break;
2836 }
2837 case epilogue:
2838 if (t < rlen)
2839 ptr->r.record.b.t = rlen - 1 - t;
2840 else
2841 /* This happens when a memory-stack-less procedure uses a
2842 ".restore sp" directive at the end of a region to pop
2843 the frame state. */
2844 ptr->r.record.b.t = 0;
2845 break;
2846
2847 case mem_stack_f:
2848 case mem_stack_v:
2849 case rp_when:
2850 case pfs_when:
2851 case preds_when:
2852 case unat_when:
2853 case lc_when:
2854 case fpsr_when:
2855 case priunat_when_gr:
2856 case priunat_when_mem:
2857 case bsp_when:
2858 case bspstore_when:
2859 case rnat_when:
2860 ptr->r.record.p.t = t;
2861 break;
2862
2863 case spill_reg:
2864 case spill_sprel:
2865 case spill_psprel:
2866 case spill_reg_p:
2867 case spill_sprel_p:
2868 case spill_psprel_p:
2869 ptr->r.record.x.t = t;
2870 break;
2871
2872 case frgr_mem:
2873 if (!region)
2874 {
2875 as_bad (_("frgr_mem record before region record!"));
2876 return;
2877 }
2878 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2879 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2880 set_imask (region, ptr->r.record.p.frmask, t, 1);
2881 set_imask (region, ptr->r.record.p.grmask, t, 2);
2882 break;
2883 case fr_mem:
2884 if (!region)
2885 {
2886 as_bad (_("fr_mem record before region record!"));
2887 return;
2888 }
2889 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2890 set_imask (region, ptr->r.record.p.frmask, t, 1);
2891 break;
2892 case gr_mem:
2893 if (!region)
2894 {
2895 as_bad (_("gr_mem record before region record!"));
2896 return;
2897 }
2898 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2899 set_imask (region, ptr->r.record.p.grmask, t, 2);
2900 break;
2901 case br_mem:
2902 if (!region)
2903 {
2904 as_bad (_("br_mem record before region record!"));
2905 return;
2906 }
2907 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2908 set_imask (region, ptr->r.record.p.brmask, t, 3);
2909 break;
2910
2911 case gr_gr:
2912 if (!region)
2913 {
2914 as_bad (_("gr_gr record before region record!"));
2915 return;
2916 }
2917 set_imask (region, ptr->r.record.p.grmask, t, 2);
2918 break;
2919 case br_gr:
2920 if (!region)
2921 {
2922 as_bad (_("br_gr record before region record!"));
2923 return;
2924 }
2925 set_imask (region, ptr->r.record.p.brmask, t, 3);
2926 break;
2927
2928 default:
2929 break;
2930 }
2931 }
2932 }
2933
2934 /* Estimate the size of a frag before relaxing. We only have one type of frag
2935 to handle here, which is the unwind info frag. */
2936
2937 int
2938 ia64_estimate_size_before_relax (fragS *frag,
2939 asection *segtype ATTRIBUTE_UNUSED)
2940 {
2941 unw_rec_list *list;
2942 int len, size, pad;
2943
2944 /* ??? This code is identical to the first part of ia64_convert_frag. */
2945 list = (unw_rec_list *) frag->fr_opcode;
2946 fixup_unw_records (list, 0);
2947
2948 len = calc_record_size (list);
2949 /* pad to pointer-size boundary. */
2950 pad = len % md.pointer_size;
2951 if (pad != 0)
2952 len += md.pointer_size - pad;
2953 /* Add 8 for the header. */
2954 size = len + 8;
2955 /* Add a pointer for the personality offset. */
2956 if (frag->fr_offset)
2957 size += md.pointer_size;
2958
2959 /* fr_var carries the max_chars that we created the fragment with.
2960 We must, of course, have allocated enough memory earlier. */
2961 gas_assert (frag->fr_var >= size);
2962
2963 return frag->fr_fix + size;
2964 }
2965
2966 /* This function converts a rs_machine_dependent variant frag into a
2967 normal fill frag with the unwind image from the record list. */
2968 void
2969 ia64_convert_frag (fragS *frag)
2970 {
2971 unw_rec_list *list;
2972 int len, size, pad;
2973 valueT flag_value;
2974
2975 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2976 list = (unw_rec_list *) frag->fr_opcode;
2977 fixup_unw_records (list, 0);
2978
2979 len = calc_record_size (list);
2980 /* pad to pointer-size boundary. */
2981 pad = len % md.pointer_size;
2982 if (pad != 0)
2983 len += md.pointer_size - pad;
2984 /* Add 8 for the header. */
2985 size = len + 8;
2986 /* Add a pointer for the personality offset. */
2987 if (frag->fr_offset)
2988 size += md.pointer_size;
2989
2990 /* fr_var carries the max_chars that we created the fragment with.
2991 We must, of course, have allocated enough memory earlier. */
2992 gas_assert (frag->fr_var >= size);
2993
2994 /* Initialize the header area. fr_offset is initialized with
2995 unwind.personality_routine. */
2996 if (frag->fr_offset)
2997 {
2998 if (md.flags & EF_IA_64_ABI64)
2999 flag_value = (bfd_vma) 3 << 32;
3000 else
3001 /* 32-bit unwind info block. */
3002 flag_value = (bfd_vma) 0x1003 << 32;
3003 }
3004 else
3005 flag_value = 0;
3006
3007 md_number_to_chars (frag->fr_literal,
3008 (((bfd_vma) 1 << 48) /* Version. */
3009 | flag_value /* U & E handler flags. */
3010 | (len / md.pointer_size)), /* Length. */
3011 8);
3012
3013 /* Skip the header. */
3014 vbyte_mem_ptr = frag->fr_literal + 8;
3015 process_unw_records (list, output_vbyte_mem);
3016
3017 /* Fill the padding bytes with zeros. */
3018 if (pad != 0)
3019 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
3020 md.pointer_size - pad);
3021 /* Fill the unwind personality with zeros. */
3022 if (frag->fr_offset)
3023 md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0,
3024 md.pointer_size);
3025
3026 frag->fr_fix += size;
3027 frag->fr_type = rs_fill;
3028 frag->fr_var = 0;
3029 frag->fr_offset = 0;
3030 }
3031
3032 static int
3033 parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po)
3034 {
3035 int sep = parse_operand_and_eval (e, ',');
3036
3037 *qp = e->X_add_number - REG_P;
3038 if (e->X_op != O_register || *qp > 63)
3039 {
3040 as_bad (_("First operand to .%s must be a predicate"), po);
3041 *qp = 0;
3042 }
3043 else if (*qp == 0)
3044 as_warn (_("Pointless use of p0 as first operand to .%s"), po);
3045 if (sep == ',')
3046 sep = parse_operand_and_eval (e, ',');
3047 else
3048 e->X_op = O_absent;
3049 return sep;
3050 }
3051
3052 static void
3053 convert_expr_to_ab_reg (const expressionS *e,
3054 unsigned int *ab,
3055 unsigned int *regp,
3056 const char *po,
3057 int n)
3058 {
3059 unsigned int reg = e->X_add_number;
3060
3061 *ab = *regp = 0; /* Anything valid is good here. */
3062
3063 if (e->X_op != O_register)
3064 reg = REG_GR; /* Anything invalid is good here. */
3065
3066 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
3067 {
3068 *ab = 0;
3069 *regp = reg - REG_GR;
3070 }
3071 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
3072 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
3073 {
3074 *ab = 1;
3075 *regp = reg - REG_FR;
3076 }
3077 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
3078 {
3079 *ab = 2;
3080 *regp = reg - REG_BR;
3081 }
3082 else
3083 {
3084 *ab = 3;
3085 switch (reg)
3086 {
3087 case REG_PR: *regp = 0; break;
3088 case REG_PSP: *regp = 1; break;
3089 case REG_PRIUNAT: *regp = 2; break;
3090 case REG_BR + 0: *regp = 3; break;
3091 case REG_AR + AR_BSP: *regp = 4; break;
3092 case REG_AR + AR_BSPSTORE: *regp = 5; break;
3093 case REG_AR + AR_RNAT: *regp = 6; break;
3094 case REG_AR + AR_UNAT: *regp = 7; break;
3095 case REG_AR + AR_FPSR: *regp = 8; break;
3096 case REG_AR + AR_PFS: *regp = 9; break;
3097 case REG_AR + AR_LC: *regp = 10; break;
3098
3099 default:
3100 as_bad (_("Operand %d to .%s must be a preserved register"), n, po);
3101 break;
3102 }
3103 }
3104 }
3105
3106 static void
3107 convert_expr_to_xy_reg (const expressionS *e,
3108 unsigned int *xy,
3109 unsigned int *regp,
3110 const char *po,
3111 int n)
3112 {
3113 unsigned int reg = e->X_add_number;
3114
3115 *xy = *regp = 0; /* Anything valid is good here. */
3116
3117 if (e->X_op != O_register)
3118 reg = REG_GR; /* Anything invalid is good here. */
3119
3120 if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127))
3121 {
3122 *xy = 0;
3123 *regp = reg - REG_GR;
3124 }
3125 else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127))
3126 {
3127 *xy = 1;
3128 *regp = reg - REG_FR;
3129 }
3130 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3131 {
3132 *xy = 2;
3133 *regp = reg - REG_BR;
3134 }
3135 else
3136 as_bad (_("Operand %d to .%s must be a writable register"), n, po);
3137 }
3138
3139 static void
3140 dot_align (int arg)
3141 {
3142 /* The current frag is an alignment frag. */
3143 align_frag = frag_now;
3144 s_align_bytes (arg);
3145 }
3146
3147 static void
3148 dot_radix (int dummy ATTRIBUTE_UNUSED)
3149 {
3150 char *radix;
3151 int ch;
3152
3153 SKIP_WHITESPACE ();
3154
3155 if (is_it_end_of_statement ())
3156 return;
3157 ch = get_symbol_name (&radix);
3158 ia64_canonicalize_symbol_name (radix);
3159 if (strcasecmp (radix, "C"))
3160 as_bad (_("Radix `%s' unsupported or invalid"), radix);
3161 (void) restore_line_pointer (ch);
3162 demand_empty_rest_of_line ();
3163 }
3164
3165 /* Helper function for .loc directives. If the assembler is not generating
3166 line number info, then we need to remember which instructions have a .loc
3167 directive, and only call dwarf2_gen_line_info for those instructions. */
3168
3169 static void
3170 dot_loc (int x)
3171 {
3172 CURR_SLOT.loc_directive_seen = 1;
3173 dwarf2_directive_loc (x);
3174 }
3175
3176 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3177 static void
3178 dot_special_section (int which)
3179 {
3180 set_section ((char *) special_section_name[which]);
3181 }
3182
3183 /* Return -1 for warning and 0 for error. */
3184
3185 static int
3186 unwind_diagnostic (const char * region, const char *directive)
3187 {
3188 if (md.unwind_check == unwind_check_warning)
3189 {
3190 as_warn (_(".%s outside of %s"), directive, region);
3191 return -1;
3192 }
3193 else
3194 {
3195 as_bad (_(".%s outside of %s"), directive, region);
3196 ignore_rest_of_line ();
3197 return 0;
3198 }
3199 }
3200
3201 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in
3202 a procedure but the unwind directive check is set to warning, 0 if
3203 a directive isn't in a procedure and the unwind directive check is set
3204 to error. */
3205
3206 static int
3207 in_procedure (const char *directive)
3208 {
3209 if (unwind.proc_pending.sym
3210 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0))
3211 return 1;
3212 return unwind_diagnostic ("procedure", directive);
3213 }
3214
3215 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in
3216 a prologue but the unwind directive check is set to warning, 0 if
3217 a directive isn't in a prologue and the unwind directive check is set
3218 to error. */
3219
3220 static int
3221 in_prologue (const char *directive)
3222 {
3223 int in = in_procedure (directive);
3224
3225 if (in > 0 && !unwind.prologue)
3226 in = unwind_diagnostic ("prologue", directive);
3227 check_pending_save ();
3228 return in;
3229 }
3230
3231 /* Return 1 if a directive is in a body, -1 if a directive isn't in
3232 a body but the unwind directive check is set to warning, 0 if
3233 a directive isn't in a body and the unwind directive check is set
3234 to error. */
3235
3236 static int
3237 in_body (const char *directive)
3238 {
3239 int in = in_procedure (directive);
3240
3241 if (in > 0 && !unwind.body)
3242 in = unwind_diagnostic ("body region", directive);
3243 return in;
3244 }
3245
3246 static void
3247 add_unwind_entry (unw_rec_list *ptr, int sep)
3248 {
3249 if (ptr)
3250 {
3251 if (unwind.tail)
3252 unwind.tail->next = ptr;
3253 else
3254 unwind.list = ptr;
3255 unwind.tail = ptr;
3256
3257 /* The current entry can in fact be a chain of unwind entries. */
3258 if (unwind.current_entry == NULL)
3259 unwind.current_entry = ptr;
3260 }
3261
3262 /* The current entry can in fact be a chain of unwind entries. */
3263 if (unwind.current_entry == NULL)
3264 unwind.current_entry = ptr;
3265
3266 if (sep == ',')
3267 {
3268 char *name;
3269 /* Parse a tag permitted for the current directive. */
3270 int ch;
3271
3272 SKIP_WHITESPACE ();
3273 ch = get_symbol_name (&name);
3274 /* FIXME: For now, just issue a warning that this isn't implemented. */
3275 {
3276 static int warned;
3277
3278 if (!warned)
3279 {
3280 warned = 1;
3281 as_warn (_("Tags on unwind pseudo-ops aren't supported, yet"));
3282 }
3283 }
3284 (void) restore_line_pointer (ch);
3285 }
3286 if (sep != NOT_A_CHAR)
3287 demand_empty_rest_of_line ();
3288 }
3289
3290 static void
3291 dot_fframe (int dummy ATTRIBUTE_UNUSED)
3292 {
3293 expressionS e;
3294 int sep;
3295
3296 if (!in_prologue ("fframe"))
3297 return;
3298
3299 sep = parse_operand_and_eval (&e, ',');
3300
3301 if (e.X_op != O_constant)
3302 {
3303 as_bad (_("First operand to .fframe must be a constant"));
3304 e.X_add_number = 0;
3305 }
3306 add_unwind_entry (output_mem_stack_f (e.X_add_number), sep);
3307 }
3308
3309 static void
3310 dot_vframe (int dummy ATTRIBUTE_UNUSED)
3311 {
3312 expressionS e;
3313 unsigned reg;
3314 int sep;
3315
3316 if (!in_prologue ("vframe"))
3317 return;
3318
3319 sep = parse_operand_and_eval (&e, ',');
3320 reg = e.X_add_number - REG_GR;
3321 if (e.X_op != O_register || reg > 127)
3322 {
3323 as_bad (_("First operand to .vframe must be a general register"));
3324 reg = 0;
3325 }
3326 add_unwind_entry (output_mem_stack_v (), sep);
3327 if (! (unwind.prologue_mask & 2))
3328 add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR);
3329 else if (reg != unwind.prologue_gr
3330 + (unsigned) popcount (unwind.prologue_mask & -(2 << 1)))
3331 as_warn (_("Operand of .vframe contradicts .prologue"));
3332 }
3333
3334 static void
3335 dot_vframesp (int psp)
3336 {
3337 expressionS e;
3338 int sep;
3339
3340 if (psp)
3341 as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant"));
3342
3343 if (!in_prologue ("vframesp"))
3344 return;
3345
3346 sep = parse_operand_and_eval (&e, ',');
3347 if (e.X_op != O_constant)
3348 {
3349 as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)"));
3350 e.X_add_number = 0;
3351 }
3352 add_unwind_entry (output_mem_stack_v (), sep);
3353 add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR);
3354 }
3355
3356 static void
3357 dot_save (int dummy ATTRIBUTE_UNUSED)
3358 {
3359 expressionS e1, e2;
3360 unsigned reg1, reg2;
3361 int sep;
3362
3363 if (!in_prologue ("save"))
3364 return;
3365
3366 sep = parse_operand_and_eval (&e1, ',');
3367 if (sep == ',')
3368 sep = parse_operand_and_eval (&e2, ',');
3369 else
3370 e2.X_op = O_absent;
3371
3372 reg1 = e1.X_add_number;
3373 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3374 if (e1.X_op != O_register)
3375 {
3376 as_bad (_("First operand to .save not a register"));
3377 reg1 = REG_PR; /* Anything valid is good here. */
3378 }
3379 reg2 = e2.X_add_number - REG_GR;
3380 if (e2.X_op != O_register || reg2 > 127)
3381 {
3382 as_bad (_("Second operand to .save not a valid register"));
3383 reg2 = 0;
3384 }
3385 switch (reg1)
3386 {
3387 case REG_AR + AR_BSP:
3388 add_unwind_entry (output_bsp_when (), sep);
3389 add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR);
3390 break;
3391 case REG_AR + AR_BSPSTORE:
3392 add_unwind_entry (output_bspstore_when (), sep);
3393 add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR);
3394 break;
3395 case REG_AR + AR_RNAT:
3396 add_unwind_entry (output_rnat_when (), sep);
3397 add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR);
3398 break;
3399 case REG_AR + AR_UNAT:
3400 add_unwind_entry (output_unat_when (), sep);
3401 add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR);
3402 break;
3403 case REG_AR + AR_FPSR:
3404 add_unwind_entry (output_fpsr_when (), sep);
3405 add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR);
3406 break;
3407 case REG_AR + AR_PFS:
3408 add_unwind_entry (output_pfs_when (), sep);
3409 if (! (unwind.prologue_mask & 4))
3410 add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR);
3411 else if (reg2 != unwind.prologue_gr
3412 + (unsigned) popcount (unwind.prologue_mask & -(4 << 1)))
3413 as_warn (_("Second operand of .save contradicts .prologue"));
3414 break;
3415 case REG_AR + AR_LC:
3416 add_unwind_entry (output_lc_when (), sep);
3417 add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR);
3418 break;
3419 case REG_BR:
3420 add_unwind_entry (output_rp_when (), sep);
3421 if (! (unwind.prologue_mask & 8))
3422 add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR);
3423 else if (reg2 != unwind.prologue_gr)
3424 as_warn (_("Second operand of .save contradicts .prologue"));
3425 break;
3426 case REG_PR:
3427 add_unwind_entry (output_preds_when (), sep);
3428 if (! (unwind.prologue_mask & 1))
3429 add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR);
3430 else if (reg2 != unwind.prologue_gr
3431 + (unsigned) popcount (unwind.prologue_mask & -(1 << 1)))
3432 as_warn (_("Second operand of .save contradicts .prologue"));
3433 break;
3434 case REG_PRIUNAT:
3435 add_unwind_entry (output_priunat_when_gr (), sep);
3436 add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR);
3437 break;
3438 default:
3439 as_bad (_("First operand to .save not a valid register"));
3440 add_unwind_entry (NULL, sep);
3441 break;
3442 }
3443 }
3444
3445 static void
3446 dot_restore (int dummy ATTRIBUTE_UNUSED)
3447 {
3448 expressionS e1;
3449 unsigned long ecount; /* # of _additional_ regions to pop */
3450 int sep;
3451
3452 if (!in_body ("restore"))
3453 return;
3454
3455 sep = parse_operand_and_eval (&e1, ',');
3456 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3457 as_bad (_("First operand to .restore must be stack pointer (sp)"));
3458
3459 if (sep == ',')
3460 {
3461 expressionS e2;
3462
3463 sep = parse_operand_and_eval (&e2, ',');
3464 if (e2.X_op != O_constant || e2.X_add_number < 0)
3465 {
3466 as_bad (_("Second operand to .restore must be a constant >= 0"));
3467 e2.X_add_number = 0;
3468 }
3469 ecount = e2.X_add_number;
3470 }
3471 else
3472 ecount = unwind.prologue_count - 1;
3473
3474 if (ecount >= unwind.prologue_count)
3475 {
3476 as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"),
3477 ecount + 1, unwind.prologue_count);
3478 ecount = 0;
3479 }
3480
3481 add_unwind_entry (output_epilogue (ecount), sep);
3482
3483 if (ecount < unwind.prologue_count)
3484 unwind.prologue_count -= ecount + 1;
3485 else
3486 unwind.prologue_count = 0;
3487 }
3488
3489 static void
3490 dot_restorereg (int pred)
3491 {
3492 unsigned int qp, ab, reg;
3493 expressionS e;
3494 int sep;
3495 const char * const po = pred ? "restorereg.p" : "restorereg";
3496
3497 if (!in_procedure (po))
3498 return;
3499
3500 if (pred)
3501 sep = parse_predicate_and_operand (&e, &qp, po);
3502 else
3503 {
3504 sep = parse_operand_and_eval (&e, ',');
3505 qp = 0;
3506 }
3507 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
3508
3509 add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep);
3510 }
3511
3512 static const char *special_linkonce_name[] =
3513 {
3514 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3515 };
3516
3517 static void
3518 start_unwind_section (const segT text_seg, int sec_index)
3519 {
3520 /*
3521 Use a slightly ugly scheme to derive the unwind section names from
3522 the text section name:
3523
3524 text sect. unwind table sect.
3525 name: name: comments:
3526 ---------- ----------------- --------------------------------
3527 .text .IA_64.unwind
3528 .text.foo .IA_64.unwind.text.foo
3529 .foo .IA_64.unwind.foo
3530 .gnu.linkonce.t.foo
3531 .gnu.linkonce.ia64unw.foo
3532 _info .IA_64.unwind_info gas issues error message (ditto)
3533 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3534
3535 This mapping is done so that:
3536
3537 (a) An object file with unwind info only in .text will use
3538 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3539 This follows the letter of the ABI and also ensures backwards
3540 compatibility with older toolchains.
3541
3542 (b) An object file with unwind info in multiple text sections
3543 will use separate unwind sections for each text section.
3544 This allows us to properly set the "sh_info" and "sh_link"
3545 fields in SHT_IA_64_UNWIND as required by the ABI and also
3546 lets GNU ld support programs with multiple segments
3547 containing unwind info (as might be the case for certain
3548 embedded applications).
3549
3550 (c) An error is issued if there would be a name clash.
3551 */
3552
3553 const char *text_name, *sec_text_name;
3554 char *sec_name;
3555 const char *prefix = special_section_name [sec_index];
3556 const char *suffix;
3557
3558 sec_text_name = segment_name (text_seg);
3559 text_name = sec_text_name;
3560 if (startswith (text_name, "_info"))
3561 {
3562 as_bad (_("Illegal section name `%s' (causes unwind section name clash)"),
3563 text_name);
3564 ignore_rest_of_line ();
3565 return;
3566 }
3567 if (strcmp (text_name, ".text") == 0)
3568 text_name = "";
3569
3570 /* Build the unwind section name by appending the (possibly stripped)
3571 text section name to the unwind prefix. */
3572 suffix = text_name;
3573 if (startswith (text_name, ".gnu.linkonce.t."))
3574 {
3575 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3576 suffix += sizeof (".gnu.linkonce.t.") - 1;
3577 }
3578
3579 sec_name = concat (prefix, suffix, NULL);
3580
3581 /* Handle COMDAT group. */
3582 if ((text_seg->flags & SEC_LINK_ONCE) != 0
3583 && (elf_section_flags (text_seg) & SHF_GROUP) != 0)
3584 {
3585 char *section;
3586 const char *group_name = elf_group_name (text_seg);
3587
3588 if (group_name == NULL)
3589 {
3590 as_bad (_("Group section `%s' has no group signature"),
3591 sec_text_name);
3592 ignore_rest_of_line ();
3593 free (sec_name);
3594 return;
3595 }
3596
3597 /* We have to construct a fake section directive. */
3598 section = concat (sec_name, ",\"aG\",@progbits,", group_name, ",comdat", NULL);
3599 set_section (section);
3600 free (section);
3601 }
3602 else
3603 {
3604 set_section (sec_name);
3605 bfd_set_section_flags (now_seg, SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3606 }
3607
3608 elf_linked_to_section (now_seg) = text_seg;
3609 free (sec_name);
3610 }
3611
3612 static void
3613 generate_unwind_image (const segT text_seg)
3614 {
3615 int size, pad;
3616 unw_rec_list *list;
3617
3618 /* Mark the end of the unwind info, so that we can compute the size of the
3619 last unwind region. */
3620 add_unwind_entry (output_endp (), NOT_A_CHAR);
3621
3622 /* Force out pending instructions, to make sure all unwind records have
3623 a valid slot_number field. */
3624 ia64_flush_insns ();
3625
3626 /* Generate the unwind record. */
3627 list = optimize_unw_records (unwind.list);
3628 fixup_unw_records (list, 1);
3629 size = calc_record_size (list);
3630
3631 if (size > 0 || unwind.force_unwind_entry)
3632 {
3633 unwind.force_unwind_entry = 0;
3634 /* pad to pointer-size boundary. */
3635 pad = size % md.pointer_size;
3636 if (pad != 0)
3637 size += md.pointer_size - pad;
3638 /* Add 8 for the header. */
3639 size += 8;
3640 /* Add a pointer for the personality offset. */
3641 if (unwind.personality_routine)
3642 size += md.pointer_size;
3643 }
3644
3645 /* If there are unwind records, switch sections, and output the info. */
3646 if (size != 0)
3647 {
3648 expressionS exp;
3649 bfd_reloc_code_real_type reloc;
3650
3651 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO);
3652
3653 /* Make sure the section has 4 byte alignment for ILP32 and
3654 8 byte alignment for LP64. */
3655 frag_align (md.pointer_size_shift, 0, 0);
3656 record_alignment (now_seg, md.pointer_size_shift);
3657
3658 /* Set expression which points to start of unwind descriptor area. */
3659 unwind.info = expr_build_dot ();
3660
3661 frag_var (rs_machine_dependent, size, size, 0, 0,
3662 (offsetT) (long) unwind.personality_routine,
3663 (char *) list);
3664
3665 /* Add the personality address to the image. */
3666 if (unwind.personality_routine != 0)
3667 {
3668 exp.X_op = O_symbol;
3669 exp.X_add_symbol = unwind.personality_routine;
3670 exp.X_add_number = 0;
3671
3672 if (md.flags & EF_IA_64_BE)
3673 {
3674 if (md.flags & EF_IA_64_ABI64)
3675 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3676 else
3677 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3678 }
3679 else
3680 {
3681 if (md.flags & EF_IA_64_ABI64)
3682 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3683 else
3684 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3685 }
3686
3687 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3688 md.pointer_size, &exp, 0, reloc);
3689 unwind.personality_routine = 0;
3690 }
3691 }
3692
3693 free_saved_prologue_counts ();
3694 unwind.list = unwind.tail = unwind.current_entry = NULL;
3695 }
3696
3697 static void
3698 dot_handlerdata (int dummy ATTRIBUTE_UNUSED)
3699 {
3700 if (!in_procedure ("handlerdata"))
3701 return;
3702 unwind.force_unwind_entry = 1;
3703
3704 /* Remember which segment we're in so we can switch back after .endp */
3705 unwind.saved_text_seg = now_seg;
3706 unwind.saved_text_subseg = now_subseg;
3707
3708 /* Generate unwind info into unwind-info section and then leave that
3709 section as the currently active one so dataXX directives go into
3710 the language specific data area of the unwind info block. */
3711 generate_unwind_image (now_seg);
3712 demand_empty_rest_of_line ();
3713 }
3714
3715 static void
3716 dot_unwentry (int dummy ATTRIBUTE_UNUSED)
3717 {
3718 if (!in_procedure ("unwentry"))
3719 return;
3720 unwind.force_unwind_entry = 1;
3721 demand_empty_rest_of_line ();
3722 }
3723
3724 static void
3725 dot_altrp (int dummy ATTRIBUTE_UNUSED)
3726 {
3727 expressionS e;
3728 unsigned reg;
3729
3730 if (!in_prologue ("altrp"))
3731 return;
3732
3733 parse_operand_and_eval (&e, 0);
3734 reg = e.X_add_number - REG_BR;
3735 if (e.X_op != O_register || reg > 7)
3736 {
3737 as_bad (_("First operand to .altrp not a valid branch register"));
3738 reg = 0;
3739 }
3740 add_unwind_entry (output_rp_br (reg), 0);
3741 }
3742
3743 static void
3744 dot_savemem (int psprel)
3745 {
3746 expressionS e1, e2;
3747 int sep;
3748 int reg1, val;
3749 const char * const po = psprel ? "savepsp" : "savesp";
3750
3751 if (!in_prologue (po))
3752 return;
3753
3754 sep = parse_operand_and_eval (&e1, ',');
3755 if (sep == ',')
3756 sep = parse_operand_and_eval (&e2, ',');
3757 else
3758 e2.X_op = O_absent;
3759
3760 reg1 = e1.X_add_number;
3761 val = e2.X_add_number;
3762
3763 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3764 if (e1.X_op != O_register)
3765 {
3766 as_bad (_("First operand to .%s not a register"), po);
3767 reg1 = REG_PR; /* Anything valid is good here. */
3768 }
3769 if (e2.X_op != O_constant)
3770 {
3771 as_bad (_("Second operand to .%s not a constant"), po);
3772 val = 0;
3773 }
3774
3775 switch (reg1)
3776 {
3777 case REG_AR + AR_BSP:
3778 add_unwind_entry (output_bsp_when (), sep);
3779 add_unwind_entry ((psprel
3780 ? output_bsp_psprel
3781 : output_bsp_sprel) (val), NOT_A_CHAR);
3782 break;
3783 case REG_AR + AR_BSPSTORE:
3784 add_unwind_entry (output_bspstore_when (), sep);
3785 add_unwind_entry ((psprel
3786 ? output_bspstore_psprel
3787 : output_bspstore_sprel) (val), NOT_A_CHAR);
3788 break;
3789 case REG_AR + AR_RNAT:
3790 add_unwind_entry (output_rnat_when (), sep);
3791 add_unwind_entry ((psprel
3792 ? output_rnat_psprel
3793 : output_rnat_sprel) (val), NOT_A_CHAR);
3794 break;
3795 case REG_AR + AR_UNAT:
3796 add_unwind_entry (output_unat_when (), sep);
3797 add_unwind_entry ((psprel
3798 ? output_unat_psprel
3799 : output_unat_sprel) (val), NOT_A_CHAR);
3800 break;
3801 case REG_AR + AR_FPSR:
3802 add_unwind_entry (output_fpsr_when (), sep);
3803 add_unwind_entry ((psprel
3804 ? output_fpsr_psprel
3805 : output_fpsr_sprel) (val), NOT_A_CHAR);
3806 break;
3807 case REG_AR + AR_PFS:
3808 add_unwind_entry (output_pfs_when (), sep);
3809 add_unwind_entry ((psprel
3810 ? output_pfs_psprel
3811 : output_pfs_sprel) (val), NOT_A_CHAR);
3812 break;
3813 case REG_AR + AR_LC:
3814 add_unwind_entry (output_lc_when (), sep);
3815 add_unwind_entry ((psprel
3816 ? output_lc_psprel
3817 : output_lc_sprel) (val), NOT_A_CHAR);
3818 break;
3819 case REG_BR:
3820 add_unwind_entry (output_rp_when (), sep);
3821 add_unwind_entry ((psprel
3822 ? output_rp_psprel
3823 : output_rp_sprel) (val), NOT_A_CHAR);
3824 break;
3825 case REG_PR:
3826 add_unwind_entry (output_preds_when (), sep);
3827 add_unwind_entry ((psprel
3828 ? output_preds_psprel
3829 : output_preds_sprel) (val), NOT_A_CHAR);
3830 break;
3831 case REG_PRIUNAT:
3832 add_unwind_entry (output_priunat_when_mem (), sep);
3833 add_unwind_entry ((psprel
3834 ? output_priunat_psprel
3835 : output_priunat_sprel) (val), NOT_A_CHAR);
3836 break;
3837 default:
3838 as_bad (_("First operand to .%s not a valid register"), po);
3839 add_unwind_entry (NULL, sep);
3840 break;
3841 }
3842 }
3843
3844 static void
3845 dot_saveg (int dummy ATTRIBUTE_UNUSED)
3846 {
3847 expressionS e;
3848 unsigned grmask;
3849 int sep;
3850
3851 if (!in_prologue ("save.g"))
3852 return;
3853
3854 sep = parse_operand_and_eval (&e, ',');
3855
3856 grmask = e.X_add_number;
3857 if (e.X_op != O_constant
3858 || e.X_add_number <= 0
3859 || e.X_add_number > 0xf)
3860 {
3861 as_bad (_("First operand to .save.g must be a positive 4-bit constant"));
3862 grmask = 0;
3863 }
3864
3865 if (sep == ',')
3866 {
3867 unsigned reg;
3868 int n = popcount (grmask);
3869
3870 parse_operand_and_eval (&e, 0);
3871 reg = e.X_add_number - REG_GR;
3872 if (e.X_op != O_register || reg > 127)
3873 {
3874 as_bad (_("Second operand to .save.g must be a general register"));
3875 reg = 0;
3876 }
3877 else if (reg > 128U - n)
3878 {
3879 as_bad (_("Second operand to .save.g must be the first of %d general registers"), n);
3880 reg = 0;
3881 }
3882 add_unwind_entry (output_gr_gr (grmask, reg), 0);
3883 }
3884 else
3885 add_unwind_entry (output_gr_mem (grmask), 0);
3886 }
3887
3888 static void
3889 dot_savef (int dummy ATTRIBUTE_UNUSED)
3890 {
3891 expressionS e;
3892
3893 if (!in_prologue ("save.f"))
3894 return;
3895
3896 parse_operand_and_eval (&e, 0);
3897
3898 if (e.X_op != O_constant
3899 || e.X_add_number <= 0
3900 || e.X_add_number > 0xfffff)
3901 {
3902 as_bad (_("Operand to .save.f must be a positive 20-bit constant"));
3903 e.X_add_number = 0;
3904 }
3905 add_unwind_entry (output_fr_mem (e.X_add_number), 0);
3906 }
3907
3908 static void
3909 dot_saveb (int dummy ATTRIBUTE_UNUSED)
3910 {
3911 expressionS e;
3912 unsigned brmask;
3913 int sep;
3914
3915 if (!in_prologue ("save.b"))
3916 return;
3917
3918 sep = parse_operand_and_eval (&e, ',');
3919
3920 brmask = e.X_add_number;
3921 if (e.X_op != O_constant
3922 || e.X_add_number <= 0
3923 || e.X_add_number > 0x1f)
3924 {
3925 as_bad (_("First operand to .save.b must be a positive 5-bit constant"));
3926 brmask = 0;
3927 }
3928
3929 if (sep == ',')
3930 {
3931 unsigned reg;
3932 int n = popcount (brmask);
3933
3934 parse_operand_and_eval (&e, 0);
3935 reg = e.X_add_number - REG_GR;
3936 if (e.X_op != O_register || reg > 127)
3937 {
3938 as_bad (_("Second operand to .save.b must be a general register"));
3939 reg = 0;
3940 }
3941 else if (reg > 128U - n)
3942 {
3943 as_bad (_("Second operand to .save.b must be the first of %d general registers"), n);
3944 reg = 0;
3945 }
3946 add_unwind_entry (output_br_gr (brmask, reg), 0);
3947 }
3948 else
3949 add_unwind_entry (output_br_mem (brmask), 0);
3950 }
3951
3952 static void
3953 dot_savegf (int dummy ATTRIBUTE_UNUSED)
3954 {
3955 expressionS e1, e2;
3956
3957 if (!in_prologue ("save.gf"))
3958 return;
3959
3960 if (parse_operand_and_eval (&e1, ',') == ',')
3961 parse_operand_and_eval (&e2, 0);
3962 else
3963 e2.X_op = O_absent;
3964
3965 if (e1.X_op != O_constant
3966 || e1.X_add_number < 0
3967 || e1.X_add_number > 0xf)
3968 {
3969 as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant"));
3970 e1.X_op = O_absent;
3971 e1.X_add_number = 0;
3972 }
3973 if (e2.X_op != O_constant
3974 || e2.X_add_number < 0
3975 || e2.X_add_number > 0xfffff)
3976 {
3977 as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant"));
3978 e2.X_op = O_absent;
3979 e2.X_add_number = 0;
3980 }
3981 if (e1.X_op == O_constant
3982 && e2.X_op == O_constant
3983 && e1.X_add_number == 0
3984 && e2.X_add_number == 0)
3985 as_bad (_("Operands to .save.gf may not be both zero"));
3986
3987 add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0);
3988 }
3989
3990 static void
3991 dot_spill (int dummy ATTRIBUTE_UNUSED)
3992 {
3993 expressionS e;
3994
3995 if (!in_prologue ("spill"))
3996 return;
3997
3998 parse_operand_and_eval (&e, 0);
3999
4000 if (e.X_op != O_constant)
4001 {
4002 as_bad (_("Operand to .spill must be a constant"));
4003 e.X_add_number = 0;
4004 }
4005 add_unwind_entry (output_spill_base (e.X_add_number), 0);
4006 }
4007
4008 static void
4009 dot_spillreg (int pred)
4010 {
4011 int sep;
4012 unsigned int qp, ab, xy, reg, treg;
4013 expressionS e;
4014 const char * const po = pred ? "spillreg.p" : "spillreg";
4015
4016 if (!in_procedure (po))
4017 return;
4018
4019 if (pred)
4020 sep = parse_predicate_and_operand (&e, &qp, po);
4021 else
4022 {
4023 sep = parse_operand_and_eval (&e, ',');
4024 qp = 0;
4025 }
4026 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4027
4028 if (sep == ',')
4029 sep = parse_operand_and_eval (&e, ',');
4030 else
4031 e.X_op = O_absent;
4032 convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred);
4033
4034 add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep);
4035 }
4036
4037 static void
4038 dot_spillmem (int psprel)
4039 {
4040 expressionS e;
4041 int pred = (psprel < 0), sep;
4042 unsigned int qp, ab, reg;
4043 const char * po;
4044
4045 if (pred)
4046 {
4047 psprel = ~psprel;
4048 po = psprel ? "spillpsp.p" : "spillsp.p";
4049 }
4050 else
4051 po = psprel ? "spillpsp" : "spillsp";
4052
4053 if (!in_procedure (po))
4054 return;
4055
4056 if (pred)
4057 sep = parse_predicate_and_operand (&e, &qp, po);
4058 else
4059 {
4060 sep = parse_operand_and_eval (&e, ',');
4061 qp = 0;
4062 }
4063 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4064
4065 if (sep == ',')
4066 sep = parse_operand_and_eval (&e, ',');
4067 else
4068 e.X_op = O_absent;
4069 if (e.X_op != O_constant)
4070 {
4071 as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po);
4072 e.X_add_number = 0;
4073 }
4074
4075 if (psprel)
4076 add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep);
4077 else
4078 add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep);
4079 }
4080
4081 static unsigned int
4082 get_saved_prologue_count (unsigned long lbl)
4083 {
4084 label_prologue_count *lpc = unwind.saved_prologue_counts;
4085
4086 while (lpc != NULL && lpc->label_number != lbl)
4087 lpc = lpc->next;
4088
4089 if (lpc != NULL)
4090 return lpc->prologue_count;
4091
4092 as_bad (_("Missing .label_state %ld"), lbl);
4093 return 1;
4094 }
4095
4096 static void
4097 save_prologue_count (unsigned long lbl, unsigned int count)
4098 {
4099 label_prologue_count *lpc = unwind.saved_prologue_counts;
4100
4101 while (lpc != NULL && lpc->label_number != lbl)
4102 lpc = lpc->next;
4103
4104 if (lpc != NULL)
4105 lpc->prologue_count = count;
4106 else
4107 {
4108 label_prologue_count *new_lpc = XNEW (label_prologue_count);
4109
4110 new_lpc->next = unwind.saved_prologue_counts;
4111 new_lpc->label_number = lbl;
4112 new_lpc->prologue_count = count;
4113 unwind.saved_prologue_counts = new_lpc;
4114 }
4115 }
4116
4117 static void
4118 free_saved_prologue_counts (void)
4119 {
4120 label_prologue_count *lpc = unwind.saved_prologue_counts;
4121 label_prologue_count *next;
4122
4123 while (lpc != NULL)
4124 {
4125 next = lpc->next;
4126 free (lpc);
4127 lpc = next;
4128 }
4129
4130 unwind.saved_prologue_counts = NULL;
4131 }
4132
4133 static void
4134 dot_label_state (int dummy ATTRIBUTE_UNUSED)
4135 {
4136 expressionS e;
4137
4138 if (!in_body ("label_state"))
4139 return;
4140
4141 parse_operand_and_eval (&e, 0);
4142 if (e.X_op == O_constant)
4143 save_prologue_count (e.X_add_number, unwind.prologue_count);
4144 else
4145 {
4146 as_bad (_("Operand to .label_state must be a constant"));
4147 e.X_add_number = 0;
4148 }
4149 add_unwind_entry (output_label_state (e.X_add_number), 0);
4150 }
4151
4152 static void
4153 dot_copy_state (int dummy ATTRIBUTE_UNUSED)
4154 {
4155 expressionS e;
4156
4157 if (!in_body ("copy_state"))
4158 return;
4159
4160 parse_operand_and_eval (&e, 0);
4161 if (e.X_op == O_constant)
4162 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4163 else
4164 {
4165 as_bad (_("Operand to .copy_state must be a constant"));
4166 e.X_add_number = 0;
4167 }
4168 add_unwind_entry (output_copy_state (e.X_add_number), 0);
4169 }
4170
4171 static void
4172 dot_unwabi (int dummy ATTRIBUTE_UNUSED)
4173 {
4174 expressionS e1, e2;
4175 unsigned char sep;
4176
4177 if (!in_prologue ("unwabi"))
4178 return;
4179
4180 sep = parse_operand_and_eval (&e1, ',');
4181 if (sep == ',')
4182 parse_operand_and_eval (&e2, 0);
4183 else
4184 e2.X_op = O_absent;
4185
4186 if (e1.X_op != O_constant)
4187 {
4188 as_bad (_("First operand to .unwabi must be a constant"));
4189 e1.X_add_number = 0;
4190 }
4191
4192 if (e2.X_op != O_constant)
4193 {
4194 as_bad (_("Second operand to .unwabi must be a constant"));
4195 e2.X_add_number = 0;
4196 }
4197
4198 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0);
4199 }
4200
4201 static void
4202 dot_personality (int dummy ATTRIBUTE_UNUSED)
4203 {
4204 char *name, *p, c;
4205
4206 if (!in_procedure ("personality"))
4207 return;
4208 SKIP_WHITESPACE ();
4209 c = get_symbol_name (&name);
4210 p = input_line_pointer;
4211 unwind.personality_routine = symbol_find_or_make (name);
4212 unwind.force_unwind_entry = 1;
4213 *p = c;
4214 SKIP_WHITESPACE_AFTER_NAME ();
4215 demand_empty_rest_of_line ();
4216 }
4217
4218 static void
4219 dot_proc (int dummy ATTRIBUTE_UNUSED)
4220 {
4221 char *name, *p, c;
4222 symbolS *sym;
4223 proc_pending *pending, *last_pending;
4224
4225 if (unwind.proc_pending.sym)
4226 {
4227 (md.unwind_check == unwind_check_warning
4228 ? as_warn
4229 : as_bad) (_("Missing .endp after previous .proc"));
4230 while (unwind.proc_pending.next)
4231 {
4232 pending = unwind.proc_pending.next;
4233 unwind.proc_pending.next = pending->next;
4234 free (pending);
4235 }
4236 }
4237 last_pending = NULL;
4238
4239 /* Parse names of main and alternate entry points and mark them as
4240 function symbols: */
4241 while (1)
4242 {
4243 SKIP_WHITESPACE ();
4244 c = get_symbol_name (&name);
4245 p = input_line_pointer;
4246 if (!*name)
4247 as_bad (_("Empty argument of .proc"));
4248 else
4249 {
4250 sym = symbol_find_or_make (name);
4251 if (S_IS_DEFINED (sym))
4252 as_bad (_("`%s' was already defined"), name);
4253 else if (!last_pending)
4254 {
4255 unwind.proc_pending.sym = sym;
4256 last_pending = &unwind.proc_pending;
4257 }
4258 else
4259 {
4260 pending = XNEW (proc_pending);
4261 pending->sym = sym;
4262 last_pending = last_pending->next = pending;
4263 }
4264 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4265 }
4266 *p = c;
4267 SKIP_WHITESPACE_AFTER_NAME ();
4268 if (*input_line_pointer != ',')
4269 break;
4270 ++input_line_pointer;
4271 }
4272 if (!last_pending)
4273 {
4274 unwind.proc_pending.sym = expr_build_dot ();
4275 last_pending = &unwind.proc_pending;
4276 }
4277 last_pending->next = NULL;
4278 demand_empty_rest_of_line ();
4279 do_align (4, NULL, 0, 0);
4280
4281 unwind.prologue = 0;
4282 unwind.prologue_count = 0;
4283 unwind.body = 0;
4284 unwind.insn = 0;
4285 unwind.list = unwind.tail = unwind.current_entry = NULL;
4286 unwind.personality_routine = 0;
4287 }
4288
4289 static void
4290 dot_body (int dummy ATTRIBUTE_UNUSED)
4291 {
4292 if (!in_procedure ("body"))
4293 return;
4294 if (!unwind.prologue && !unwind.body && unwind.insn)
4295 as_warn (_("Initial .body should precede any instructions"));
4296 check_pending_save ();
4297
4298 unwind.prologue = 0;
4299 unwind.prologue_mask = 0;
4300 unwind.body = 1;
4301
4302 add_unwind_entry (output_body (), 0);
4303 }
4304
4305 static void
4306 dot_prologue (int dummy ATTRIBUTE_UNUSED)
4307 {
4308 unsigned mask = 0, grsave = 0;
4309
4310 if (!in_procedure ("prologue"))
4311 return;
4312 if (unwind.prologue)
4313 {
4314 as_bad (_(".prologue within prologue"));
4315 ignore_rest_of_line ();
4316 return;
4317 }
4318 if (!unwind.body && unwind.insn)
4319 as_warn (_("Initial .prologue should precede any instructions"));
4320
4321 if (!is_it_end_of_statement ())
4322 {
4323 expressionS e;
4324 int n, sep = parse_operand_and_eval (&e, ',');
4325
4326 if (e.X_op != O_constant
4327 || e.X_add_number < 0
4328 || e.X_add_number > 0xf)
4329 as_bad (_("First operand to .prologue must be a positive 4-bit constant"));
4330 else if (e.X_add_number == 0)
4331 as_warn (_("Pointless use of zero first operand to .prologue"));
4332 else
4333 mask = e.X_add_number;
4334
4335 n = popcount (mask);
4336
4337 if (sep == ',')
4338 parse_operand_and_eval (&e, 0);
4339 else
4340 e.X_op = O_absent;
4341
4342 if (e.X_op == O_constant
4343 && e.X_add_number >= 0
4344 && e.X_add_number < 128)
4345 {
4346 if (md.unwind_check == unwind_check_error)
4347 as_warn (_("Using a constant as second operand to .prologue is deprecated"));
4348 grsave = e.X_add_number;
4349 }
4350 else if (e.X_op != O_register
4351 || (grsave = e.X_add_number - REG_GR) > 127)
4352 {
4353 as_bad (_("Second operand to .prologue must be a general register"));
4354 grsave = 0;
4355 }
4356 else if (grsave > 128U - n)
4357 {
4358 as_bad (_("Second operand to .prologue must be the first of %d general registers"), n);
4359 grsave = 0;
4360 }
4361 }
4362
4363 if (mask)
4364 add_unwind_entry (output_prologue_gr (mask, grsave), 0);
4365 else
4366 add_unwind_entry (output_prologue (), 0);
4367
4368 unwind.prologue = 1;
4369 unwind.prologue_mask = mask;
4370 unwind.prologue_gr = grsave;
4371 unwind.body = 0;
4372 ++unwind.prologue_count;
4373 }
4374
4375 static void
4376 dot_endp (int dummy ATTRIBUTE_UNUSED)
4377 {
4378 expressionS e;
4379 int bytes_per_address;
4380 long where;
4381 segT saved_seg;
4382 subsegT saved_subseg;
4383 proc_pending *pending;
4384 int unwind_check = md.unwind_check;
4385
4386 md.unwind_check = unwind_check_error;
4387 if (!in_procedure ("endp"))
4388 return;
4389 md.unwind_check = unwind_check;
4390
4391 if (unwind.saved_text_seg)
4392 {
4393 saved_seg = unwind.saved_text_seg;
4394 saved_subseg = unwind.saved_text_subseg;
4395 unwind.saved_text_seg = NULL;
4396 }
4397 else
4398 {
4399 saved_seg = now_seg;
4400 saved_subseg = now_subseg;
4401 }
4402
4403 insn_group_break (1, 0, 0);
4404
4405 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4406 if (!unwind.info)
4407 generate_unwind_image (saved_seg);
4408
4409 if (unwind.info || unwind.force_unwind_entry)
4410 {
4411 symbolS *proc_end;
4412
4413 subseg_set (md.last_text_seg, 0);
4414 proc_end = expr_build_dot ();
4415
4416 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND);
4417
4418 /* Make sure that section has 4 byte alignment for ILP32 and
4419 8 byte alignment for LP64. */
4420 record_alignment (now_seg, md.pointer_size_shift);
4421
4422 /* Need space for 3 pointers for procedure start, procedure end,
4423 and unwind info. */
4424 memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size);
4425 where = frag_now_fix () - (3 * md.pointer_size);
4426 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4427
4428 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4429 e.X_op = O_pseudo_fixup;
4430 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4431 e.X_add_number = 0;
4432 if (!S_IS_LOCAL (unwind.proc_pending.sym)
4433 && S_IS_DEFINED (unwind.proc_pending.sym))
4434 e.X_add_symbol
4435 = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym),
4436 symbol_get_frag (unwind.proc_pending.sym),
4437 S_GET_VALUE (unwind.proc_pending.sym));
4438 else
4439 e.X_add_symbol = unwind.proc_pending.sym;
4440 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e,
4441 BFD_RELOC_NONE);
4442
4443 e.X_op = O_pseudo_fixup;
4444 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4445 e.X_add_number = 0;
4446 e.X_add_symbol = proc_end;
4447 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4448 bytes_per_address, &e, BFD_RELOC_NONE);
4449
4450 if (unwind.info)
4451 {
4452 e.X_op = O_pseudo_fixup;
4453 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4454 e.X_add_number = 0;
4455 e.X_add_symbol = unwind.info;
4456 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4457 bytes_per_address, &e, BFD_RELOC_NONE);
4458 }
4459 }
4460 subseg_set (saved_seg, saved_subseg);
4461
4462 /* Set symbol sizes. */
4463 pending = &unwind.proc_pending;
4464 if (S_GET_NAME (pending->sym))
4465 {
4466 do
4467 {
4468 symbolS *sym = pending->sym;
4469
4470 if (!S_IS_DEFINED (sym))
4471 as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym));
4472 else if (S_GET_SIZE (sym) == 0
4473 && symbol_get_obj (sym)->size == NULL)
4474 {
4475 fragS *frag = symbol_get_frag (sym);
4476
4477 if (frag)
4478 {
4479 if (frag == frag_now && SEG_NORMAL (now_seg))
4480 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4481 else
4482 {
4483 symbol_get_obj (sym)->size = XNEW (expressionS);
4484 symbol_get_obj (sym)->size->X_op = O_subtract;
4485 symbol_get_obj (sym)->size->X_add_symbol
4486 = symbol_new (FAKE_LABEL_NAME, now_seg,
4487 frag_now, frag_now_fix ());
4488 symbol_get_obj (sym)->size->X_op_symbol = sym;
4489 symbol_get_obj (sym)->size->X_add_number = 0;
4490 }
4491 }
4492 }
4493 } while ((pending = pending->next) != NULL);
4494 }
4495
4496 /* Parse names of main and alternate entry points. */
4497 while (1)
4498 {
4499 char *name, *p, c;
4500
4501 SKIP_WHITESPACE ();
4502 c = get_symbol_name (&name);
4503 p = input_line_pointer;
4504 if (!*name)
4505 (md.unwind_check == unwind_check_warning
4506 ? as_warn
4507 : as_bad) (_("Empty argument of .endp"));
4508 else
4509 {
4510 symbolS *sym = symbol_find (name);
4511
4512 for (pending = &unwind.proc_pending; pending; pending = pending->next)
4513 {
4514 if (sym == pending->sym)
4515 {
4516 pending->sym = NULL;
4517 break;
4518 }
4519 }
4520 if (!sym || !pending)
4521 as_warn (_("`%s' was not specified with previous .proc"), name);
4522 }
4523 *p = c;
4524 SKIP_WHITESPACE_AFTER_NAME ();
4525 if (*input_line_pointer != ',')
4526 break;
4527 ++input_line_pointer;
4528 }
4529 demand_empty_rest_of_line ();
4530
4531 /* Deliberately only checking for the main entry point here; the
4532 language spec even says all arguments to .endp are ignored. */
4533 if (unwind.proc_pending.sym
4534 && S_GET_NAME (unwind.proc_pending.sym)
4535 && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME))
4536 as_warn (_("`%s' should be an operand to this .endp"),
4537 S_GET_NAME (unwind.proc_pending.sym));
4538 while (unwind.proc_pending.next)
4539 {
4540 pending = unwind.proc_pending.next;
4541 unwind.proc_pending.next = pending->next;
4542 free (pending);
4543 }
4544 unwind.proc_pending.sym = unwind.info = NULL;
4545 }
4546
4547 static void
4548 dot_template (int template_val)
4549 {
4550 CURR_SLOT.user_template = template_val;
4551 }
4552
4553 static void
4554 dot_regstk (int dummy ATTRIBUTE_UNUSED)
4555 {
4556 int ins, locs, outs, rots;
4557
4558 if (is_it_end_of_statement ())
4559 ins = locs = outs = rots = 0;
4560 else
4561 {
4562 ins = get_absolute_expression ();
4563 if (*input_line_pointer++ != ',')
4564 goto err;
4565 locs = get_absolute_expression ();
4566 if (*input_line_pointer++ != ',')
4567 goto err;
4568 outs = get_absolute_expression ();
4569 if (*input_line_pointer++ != ',')
4570 goto err;
4571 rots = get_absolute_expression ();
4572 }
4573 set_regstack (ins, locs, outs, rots);
4574 return;
4575
4576 err:
4577 as_bad (_("Comma expected"));
4578 ignore_rest_of_line ();
4579 }
4580
4581 static void
4582 dot_rot (int type)
4583 {
4584 offsetT num_regs;
4585 valueT num_alloced = 0;
4586 struct dynreg **drpp, *dr;
4587 int ch, base_reg = 0;
4588 char *name, *start;
4589 size_t len;
4590
4591 switch (type)
4592 {
4593 case DYNREG_GR: base_reg = REG_GR + 32; break;
4594 case DYNREG_FR: base_reg = REG_FR + 32; break;
4595 case DYNREG_PR: base_reg = REG_P + 16; break;
4596 default: break;
4597 }
4598
4599 /* First, remove existing names from hash table. */
4600 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4601 {
4602 str_hash_delete (md.dynreg_hash, dr->name);
4603 /* FIXME: Free dr->name. */
4604 dr->num_regs = 0;
4605 }
4606
4607 drpp = &md.dynreg[type];
4608 while (1)
4609 {
4610 ch = get_symbol_name (&start);
4611 len = strlen (ia64_canonicalize_symbol_name (start));
4612 *input_line_pointer = ch;
4613
4614 SKIP_WHITESPACE_AFTER_NAME ();
4615 if (*input_line_pointer != '[')
4616 {
4617 as_bad (_("Expected '['"));
4618 goto err;
4619 }
4620 ++input_line_pointer; /* skip '[' */
4621
4622 num_regs = get_absolute_expression ();
4623
4624 if (*input_line_pointer++ != ']')
4625 {
4626 as_bad (_("Expected ']'"));
4627 goto err;
4628 }
4629 if (num_regs <= 0)
4630 {
4631 as_bad (_("Number of elements must be positive"));
4632 goto err;
4633 }
4634 SKIP_WHITESPACE ();
4635
4636 num_alloced += num_regs;
4637 switch (type)
4638 {
4639 case DYNREG_GR:
4640 if (num_alloced > md.rot.num_regs)
4641 {
4642 as_bad (_("Used more than the declared %d rotating registers"),
4643 md.rot.num_regs);
4644 goto err;
4645 }
4646 break;
4647 case DYNREG_FR:
4648 if (num_alloced > 96)
4649 {
4650 as_bad (_("Used more than the available 96 rotating registers"));
4651 goto err;
4652 }
4653 break;
4654 case DYNREG_PR:
4655 if (num_alloced > 48)
4656 {
4657 as_bad (_("Used more than the available 48 rotating registers"));
4658 goto err;
4659 }
4660 break;
4661
4662 default:
4663 break;
4664 }
4665
4666 if (!*drpp)
4667 {
4668 *drpp = XOBNEW (&notes, struct dynreg);
4669 memset (*drpp, 0, sizeof (*dr));
4670 }
4671
4672 name = XOBNEWVEC (&notes, char, len + 1);
4673 memcpy (name, start, len);
4674 name[len] = '\0';
4675
4676 dr = *drpp;
4677 dr->name = name;
4678 dr->num_regs = num_regs;
4679 dr->base = base_reg;
4680 drpp = &dr->next;
4681 base_reg += num_regs;
4682
4683 if (str_hash_insert (md.dynreg_hash, name, dr, 0) != NULL)
4684 {
4685 as_bad (_("Attempt to redefine register set `%s'"), name);
4686 obstack_free (&notes, name);
4687 goto err;
4688 }
4689
4690 if (*input_line_pointer != ',')
4691 break;
4692 ++input_line_pointer; /* skip comma */
4693 SKIP_WHITESPACE ();
4694 }
4695 demand_empty_rest_of_line ();
4696 return;
4697
4698 err:
4699 ignore_rest_of_line ();
4700 }
4701
4702 static void
4703 dot_byteorder (int byteorder)
4704 {
4705 segment_info_type *seginfo = seg_info (now_seg);
4706
4707 if (byteorder == -1)
4708 {
4709 if (seginfo->tc_segment_info_data.endian == 0)
4710 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4711 byteorder = seginfo->tc_segment_info_data.endian == 1;
4712 }
4713 else
4714 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4715
4716 if (target_big_endian != byteorder)
4717 {
4718 target_big_endian = byteorder;
4719 if (target_big_endian)
4720 {
4721 ia64_number_to_chars = number_to_chars_bigendian;
4722 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4723 }
4724 else
4725 {
4726 ia64_number_to_chars = number_to_chars_littleendian;
4727 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4728 }
4729 }
4730 }
4731
4732 static void
4733 dot_psr (int dummy ATTRIBUTE_UNUSED)
4734 {
4735 char *option;
4736 int ch;
4737
4738 while (1)
4739 {
4740 ch = get_symbol_name (&option);
4741 if (strcmp (option, "lsb") == 0)
4742 md.flags &= ~EF_IA_64_BE;
4743 else if (strcmp (option, "msb") == 0)
4744 md.flags |= EF_IA_64_BE;
4745 else if (strcmp (option, "abi32") == 0)
4746 md.flags &= ~EF_IA_64_ABI64;
4747 else if (strcmp (option, "abi64") == 0)
4748 md.flags |= EF_IA_64_ABI64;
4749 else
4750 as_bad (_("Unknown psr option `%s'"), option);
4751 *input_line_pointer = ch;
4752
4753 SKIP_WHITESPACE_AFTER_NAME ();
4754 if (*input_line_pointer != ',')
4755 break;
4756
4757 ++input_line_pointer;
4758 SKIP_WHITESPACE ();
4759 }
4760 demand_empty_rest_of_line ();
4761 }
4762
4763 static void
4764 dot_ln (int dummy ATTRIBUTE_UNUSED)
4765 {
4766 new_logical_line (0, get_absolute_expression ());
4767 demand_empty_rest_of_line ();
4768 }
4769
4770 static void
4771 cross_section (int ref, void (*builder) (int), int ua)
4772 {
4773 char *start, *end;
4774 int saved_auto_align;
4775 unsigned int section_count;
4776 char *name;
4777 char c;
4778
4779 SKIP_WHITESPACE ();
4780 start = input_line_pointer;
4781 c = get_symbol_name (&name);
4782 if (input_line_pointer == start)
4783 {
4784 as_bad (_("Missing section name"));
4785 ignore_rest_of_line ();
4786 return;
4787 }
4788 * input_line_pointer = c;
4789 SKIP_WHITESPACE_AFTER_NAME ();
4790 end = input_line_pointer;
4791 if (*input_line_pointer != ',')
4792 {
4793 as_bad (_("Comma expected after section name"));
4794 ignore_rest_of_line ();
4795 return;
4796 }
4797 *end = '\0';
4798 end = input_line_pointer + 1; /* skip comma */
4799 input_line_pointer = start;
4800 md.keep_pending_output = 1;
4801 section_count = bfd_count_sections (stdoutput);
4802 obj_elf_section (0);
4803 if (section_count != bfd_count_sections (stdoutput))
4804 as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated."));
4805 input_line_pointer = end;
4806 saved_auto_align = md.auto_align;
4807 if (ua)
4808 md.auto_align = 0;
4809 (*builder) (ref);
4810 if (ua)
4811 md.auto_align = saved_auto_align;
4812 obj_elf_previous (0);
4813 md.keep_pending_output = 0;
4814 }
4815
4816 static void
4817 dot_xdata (int size)
4818 {
4819 cross_section (size, cons, 0);
4820 }
4821
4822 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4823
4824 static void
4825 stmt_float_cons (int kind)
4826 {
4827 size_t alignment;
4828
4829 switch (kind)
4830 {
4831 case 'd':
4832 alignment = 3;
4833 break;
4834
4835 case 'x':
4836 case 'X':
4837 alignment = 4;
4838 break;
4839
4840 case 'f':
4841 default:
4842 alignment = 2;
4843 break;
4844 }
4845 do_align (alignment, NULL, 0, 0);
4846 float_cons (kind);
4847 }
4848
4849 static void
4850 stmt_cons_ua (int size)
4851 {
4852 int saved_auto_align = md.auto_align;
4853
4854 md.auto_align = 0;
4855 cons (size);
4856 md.auto_align = saved_auto_align;
4857 }
4858
4859 static void
4860 dot_xfloat_cons (int kind)
4861 {
4862 cross_section (kind, stmt_float_cons, 0);
4863 }
4864
4865 static void
4866 dot_xstringer (int zero)
4867 {
4868 cross_section (zero, stringer, 0);
4869 }
4870
4871 static void
4872 dot_xdata_ua (int size)
4873 {
4874 cross_section (size, cons, 1);
4875 }
4876
4877 static void
4878 dot_xfloat_cons_ua (int kind)
4879 {
4880 cross_section (kind, float_cons, 1);
4881 }
4882
4883 /* .reg.val <regname>,value */
4884
4885 static void
4886 dot_reg_val (int dummy ATTRIBUTE_UNUSED)
4887 {
4888 expressionS reg;
4889
4890 expression_and_evaluate (&reg);
4891 if (reg.X_op != O_register)
4892 {
4893 as_bad (_("Register name expected"));
4894 ignore_rest_of_line ();
4895 }
4896 else if (*input_line_pointer++ != ',')
4897 {
4898 as_bad (_("Comma expected"));
4899 ignore_rest_of_line ();
4900 }
4901 else
4902 {
4903 valueT value = get_absolute_expression ();
4904 int regno = reg.X_add_number;
4905 if (regno <= REG_GR || regno > REG_GR + 127)
4906 as_warn (_("Register value annotation ignored"));
4907 else
4908 {
4909 gr_values[regno - REG_GR].known = 1;
4910 gr_values[regno - REG_GR].value = value;
4911 gr_values[regno - REG_GR].path = md.path;
4912 }
4913 }
4914 demand_empty_rest_of_line ();
4915 }
4916
4917 /*
4918 .serialize.data
4919 .serialize.instruction
4920 */
4921 static void
4922 dot_serialize (int type)
4923 {
4924 insn_group_break (0, 0, 0);
4925 if (type)
4926 instruction_serialization ();
4927 else
4928 data_serialization ();
4929 insn_group_break (0, 0, 0);
4930 demand_empty_rest_of_line ();
4931 }
4932
4933 /* select dv checking mode
4934 .auto
4935 .explicit
4936 .default
4937
4938 A stop is inserted when changing modes
4939 */
4940
4941 static void
4942 dot_dv_mode (int type)
4943 {
4944 if (md.manual_bundling)
4945 as_warn (_("Directive invalid within a bundle"));
4946
4947 if (type == 'E' || type == 'A')
4948 md.mode_explicitly_set = 0;
4949 else
4950 md.mode_explicitly_set = 1;
4951
4952 md.detect_dv = 1;
4953 switch (type)
4954 {
4955 case 'A':
4956 case 'a':
4957 if (md.explicit_mode)
4958 insn_group_break (1, 0, 0);
4959 md.explicit_mode = 0;
4960 break;
4961 case 'E':
4962 case 'e':
4963 if (!md.explicit_mode)
4964 insn_group_break (1, 0, 0);
4965 md.explicit_mode = 1;
4966 break;
4967 default:
4968 case 'd':
4969 if (md.explicit_mode != md.default_explicit_mode)
4970 insn_group_break (1, 0, 0);
4971 md.explicit_mode = md.default_explicit_mode;
4972 md.mode_explicitly_set = 0;
4973 break;
4974 }
4975 }
4976
4977 static void
4978 print_prmask (valueT mask)
4979 {
4980 int regno;
4981 const char *comma = "";
4982 for (regno = 0; regno < 64; regno++)
4983 {
4984 if (mask & ((valueT) 1 << regno))
4985 {
4986 fprintf (stderr, "%s p%d", comma, regno);
4987 comma = ",";
4988 }
4989 }
4990 }
4991
4992 /*
4993 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear)
4994 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply)
4995 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex)
4996 .pred.safe_across_calls p1 [, p2 [,...]]
4997 */
4998
4999 static void
5000 dot_pred_rel (int type)
5001 {
5002 valueT mask = 0;
5003 int count = 0;
5004 int p1 = -1, p2 = -1;
5005
5006 if (type == 0)
5007 {
5008 if (*input_line_pointer == '"')
5009 {
5010 int len;
5011 char *form = demand_copy_C_string (&len);
5012
5013 if (strcmp (form, "mutex") == 0)
5014 type = 'm';
5015 else if (strcmp (form, "clear") == 0)
5016 type = 'c';
5017 else if (strcmp (form, "imply") == 0)
5018 type = 'i';
5019 obstack_free (&notes, form);
5020 }
5021 else if (*input_line_pointer == '@')
5022 {
5023 char *form;
5024 char c;
5025
5026 ++input_line_pointer;
5027 c = get_symbol_name (&form);
5028
5029 if (strcmp (form, "mutex") == 0)
5030 type = 'm';
5031 else if (strcmp (form, "clear") == 0)
5032 type = 'c';
5033 else if (strcmp (form, "imply") == 0)
5034 type = 'i';
5035 (void) restore_line_pointer (c);
5036 }
5037 else
5038 {
5039 as_bad (_("Missing predicate relation type"));
5040 ignore_rest_of_line ();
5041 return;
5042 }
5043 if (type == 0)
5044 {
5045 as_bad (_("Unrecognized predicate relation type"));
5046 ignore_rest_of_line ();
5047 return;
5048 }
5049 if (*input_line_pointer == ',')
5050 ++input_line_pointer;
5051 SKIP_WHITESPACE ();
5052 }
5053
5054 while (1)
5055 {
5056 valueT bits = 1;
5057 int sep, regno;
5058 expressionS pr, *pr1, *pr2;
5059
5060 sep = parse_operand_and_eval (&pr, ',');
5061 if (pr.X_op == O_register
5062 && pr.X_add_number >= REG_P
5063 && pr.X_add_number <= REG_P + 63)
5064 {
5065 regno = pr.X_add_number - REG_P;
5066 bits <<= regno;
5067 count++;
5068 if (p1 == -1)
5069 p1 = regno;
5070 else if (p2 == -1)
5071 p2 = regno;
5072 }
5073 else if (type != 'i'
5074 && pr.X_op == O_subtract
5075 && (pr1 = symbol_get_value_expression (pr.X_add_symbol))
5076 && pr1->X_op == O_register
5077 && pr1->X_add_number >= REG_P
5078 && pr1->X_add_number <= REG_P + 63
5079 && (pr2 = symbol_get_value_expression (pr.X_op_symbol))
5080 && pr2->X_op == O_register
5081 && pr2->X_add_number >= REG_P
5082 && pr2->X_add_number <= REG_P + 63)
5083 {
5084 /* It's a range. */
5085 int stop;
5086
5087 regno = pr1->X_add_number - REG_P;
5088 stop = pr2->X_add_number - REG_P;
5089 if (regno >= stop)
5090 {
5091 as_bad (_("Bad register range"));
5092 ignore_rest_of_line ();
5093 return;
5094 }
5095 bits = ((bits << stop) << 1) - (bits << regno);
5096 count += stop - regno + 1;
5097 }
5098 else
5099 {
5100 as_bad (_("Predicate register expected"));
5101 ignore_rest_of_line ();
5102 return;
5103 }
5104 if (mask & bits)
5105 as_warn (_("Duplicate predicate register ignored"));
5106 mask |= bits;
5107 if (sep != ',')
5108 break;
5109 }
5110
5111 switch (type)
5112 {
5113 case 'c':
5114 if (count == 0)
5115 mask = ~(valueT) 0;
5116 clear_qp_mutex (mask);
5117 clear_qp_implies (mask, (valueT) 0);
5118 break;
5119 case 'i':
5120 if (count != 2 || p1 == -1 || p2 == -1)
5121 as_bad (_("Predicate source and target required"));
5122 else if (p1 == 0 || p2 == 0)
5123 as_bad (_("Use of p0 is not valid in this context"));
5124 else
5125 add_qp_imply (p1, p2);
5126 break;
5127 case 'm':
5128 if (count < 2)
5129 {
5130 as_bad (_("At least two PR arguments expected"));
5131 break;
5132 }
5133 else if (mask & 1)
5134 {
5135 as_bad (_("Use of p0 is not valid in this context"));
5136 break;
5137 }
5138 add_qp_mutex (mask);
5139 break;
5140 case 's':
5141 /* note that we don't override any existing relations */
5142 if (count == 0)
5143 {
5144 as_bad (_("At least one PR argument expected"));
5145 break;
5146 }
5147 if (md.debug_dv)
5148 {
5149 fprintf (stderr, "Safe across calls: ");
5150 print_prmask (mask);
5151 fprintf (stderr, "\n");
5152 }
5153 qp_safe_across_calls = mask;
5154 break;
5155 }
5156 demand_empty_rest_of_line ();
5157 }
5158
5159 /* .entry label [, label [, ...]]
5160 Hint to DV code that the given labels are to be considered entry points.
5161 Otherwise, only global labels are considered entry points. */
5162
5163 static void
5164 dot_entry (int dummy ATTRIBUTE_UNUSED)
5165 {
5166 char *name;
5167 int c;
5168 symbolS *symbolP;
5169
5170 do
5171 {
5172 c = get_symbol_name (&name);
5173 symbolP = symbol_find_or_make (name);
5174
5175 if (str_hash_insert (md.entry_hash, S_GET_NAME (symbolP), symbolP, 0))
5176 as_bad (_("duplicate entry hint %s"), name);
5177
5178 *input_line_pointer = c;
5179 SKIP_WHITESPACE_AFTER_NAME ();
5180 c = *input_line_pointer;
5181 if (c == ',')
5182 {
5183 input_line_pointer++;
5184 SKIP_WHITESPACE ();
5185 if (*input_line_pointer == '\n')
5186 c = '\n';
5187 }
5188 }
5189 while (c == ',');
5190
5191 demand_empty_rest_of_line ();
5192 }
5193
5194 /* .mem.offset offset, base
5195 "base" is used to distinguish between offsets from a different base. */
5196
5197 static void
5198 dot_mem_offset (int dummy ATTRIBUTE_UNUSED)
5199 {
5200 md.mem_offset.hint = 1;
5201 md.mem_offset.offset = get_absolute_expression ();
5202 if (*input_line_pointer != ',')
5203 {
5204 as_bad (_("Comma expected"));
5205 ignore_rest_of_line ();
5206 return;
5207 }
5208 ++input_line_pointer;
5209 md.mem_offset.base = get_absolute_expression ();
5210 demand_empty_rest_of_line ();
5211 }
5212
5213 /* ia64-specific pseudo-ops: */
5214 const pseudo_typeS md_pseudo_table[] =
5215 {
5216 { "radix", dot_radix, 0 },
5217 { "lcomm", s_lcomm_bytes, 1 },
5218 { "loc", dot_loc, 0 },
5219 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
5220 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
5221 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
5222 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5223 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5224 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5225 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5226 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5227 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5228 { "proc", dot_proc, 0 },
5229 { "body", dot_body, 0 },
5230 { "prologue", dot_prologue, 0 },
5231 { "endp", dot_endp, 0 },
5232
5233 { "fframe", dot_fframe, 0 },
5234 { "vframe", dot_vframe, 0 },
5235 { "vframesp", dot_vframesp, 0 },
5236 { "vframepsp", dot_vframesp, 1 },
5237 { "save", dot_save, 0 },
5238 { "restore", dot_restore, 0 },
5239 { "restorereg", dot_restorereg, 0 },
5240 { "restorereg.p", dot_restorereg, 1 },
5241 { "handlerdata", dot_handlerdata, 0 },
5242 { "unwentry", dot_unwentry, 0 },
5243 { "altrp", dot_altrp, 0 },
5244 { "savesp", dot_savemem, 0 },
5245 { "savepsp", dot_savemem, 1 },
5246 { "save.g", dot_saveg, 0 },
5247 { "save.f", dot_savef, 0 },
5248 { "save.b", dot_saveb, 0 },
5249 { "save.gf", dot_savegf, 0 },
5250 { "spill", dot_spill, 0 },
5251 { "spillreg", dot_spillreg, 0 },
5252 { "spillsp", dot_spillmem, 0 },
5253 { "spillpsp", dot_spillmem, 1 },
5254 { "spillreg.p", dot_spillreg, 1 },
5255 { "spillsp.p", dot_spillmem, ~0 },
5256 { "spillpsp.p", dot_spillmem, ~1 },
5257 { "label_state", dot_label_state, 0 },
5258 { "copy_state", dot_copy_state, 0 },
5259 { "unwabi", dot_unwabi, 0 },
5260 { "personality", dot_personality, 0 },
5261 { "mii", dot_template, 0x0 },
5262 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5263 { "mlx", dot_template, 0x2 },
5264 { "mmi", dot_template, 0x4 },
5265 { "mfi", dot_template, 0x6 },
5266 { "mmf", dot_template, 0x7 },
5267 { "mib", dot_template, 0x8 },
5268 { "mbb", dot_template, 0x9 },
5269 { "bbb", dot_template, 0xb },
5270 { "mmb", dot_template, 0xc },
5271 { "mfb", dot_template, 0xe },
5272 { "align", dot_align, 0 },
5273 { "regstk", dot_regstk, 0 },
5274 { "rotr", dot_rot, DYNREG_GR },
5275 { "rotf", dot_rot, DYNREG_FR },
5276 { "rotp", dot_rot, DYNREG_PR },
5277 { "lsb", dot_byteorder, 0 },
5278 { "msb", dot_byteorder, 1 },
5279 { "psr", dot_psr, 0 },
5280 { "alias", dot_alias, 0 },
5281 { "secalias", dot_alias, 1 },
5282 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5283
5284 { "xdata1", dot_xdata, 1 },
5285 { "xdata2", dot_xdata, 2 },
5286 { "xdata4", dot_xdata, 4 },
5287 { "xdata8", dot_xdata, 8 },
5288 { "xdata16", dot_xdata, 16 },
5289 { "xreal4", dot_xfloat_cons, 'f' },
5290 { "xreal8", dot_xfloat_cons, 'd' },
5291 { "xreal10", dot_xfloat_cons, 'x' },
5292 { "xreal16", dot_xfloat_cons, 'X' },
5293 { "xstring", dot_xstringer, 8 + 0 },
5294 { "xstringz", dot_xstringer, 8 + 1 },
5295
5296 /* unaligned versions: */
5297 { "xdata2.ua", dot_xdata_ua, 2 },
5298 { "xdata4.ua", dot_xdata_ua, 4 },
5299 { "xdata8.ua", dot_xdata_ua, 8 },
5300 { "xdata16.ua", dot_xdata_ua, 16 },
5301 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5302 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5303 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5304 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5305
5306 /* annotations/DV checking support */
5307 { "entry", dot_entry, 0 },
5308 { "mem.offset", dot_mem_offset, 0 },
5309 { "pred.rel", dot_pred_rel, 0 },
5310 { "pred.rel.clear", dot_pred_rel, 'c' },
5311 { "pred.rel.imply", dot_pred_rel, 'i' },
5312 { "pred.rel.mutex", dot_pred_rel, 'm' },
5313 { "pred.safe_across_calls", dot_pred_rel, 's' },
5314 { "reg.val", dot_reg_val, 0 },
5315 { "serialize.data", dot_serialize, 0 },
5316 { "serialize.instruction", dot_serialize, 1 },
5317 { "auto", dot_dv_mode, 'a' },
5318 { "explicit", dot_dv_mode, 'e' },
5319 { "default", dot_dv_mode, 'd' },
5320
5321 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5322 IA-64 aligns data allocation pseudo-ops by default, so we have to
5323 tell it that these ones are supposed to be unaligned. Long term,
5324 should rewrite so that only IA-64 specific data allocation pseudo-ops
5325 are aligned by default. */
5326 {"2byte", stmt_cons_ua, 2},
5327 {"4byte", stmt_cons_ua, 4},
5328 {"8byte", stmt_cons_ua, 8},
5329
5330 #ifdef TE_VMS
5331 {"vms_common", obj_elf_vms_common, 0},
5332 #endif
5333
5334 { NULL, 0, 0 }
5335 };
5336
5337 static const struct pseudo_opcode
5338 {
5339 const char *name;
5340 void (*handler) (int);
5341 int arg;
5342 }
5343 pseudo_opcode[] =
5344 {
5345 /* these are more like pseudo-ops, but don't start with a dot */
5346 { "data1", cons, 1 },
5347 { "data2", cons, 2 },
5348 { "data4", cons, 4 },
5349 { "data8", cons, 8 },
5350 { "data16", cons, 16 },
5351 { "real4", stmt_float_cons, 'f' },
5352 { "real8", stmt_float_cons, 'd' },
5353 { "real10", stmt_float_cons, 'x' },
5354 { "real16", stmt_float_cons, 'X' },
5355 { "string", stringer, 8 + 0 },
5356 { "stringz", stringer, 8 + 1 },
5357
5358 /* unaligned versions: */
5359 { "data2.ua", stmt_cons_ua, 2 },
5360 { "data4.ua", stmt_cons_ua, 4 },
5361 { "data8.ua", stmt_cons_ua, 8 },
5362 { "data16.ua", stmt_cons_ua, 16 },
5363 { "real4.ua", float_cons, 'f' },
5364 { "real8.ua", float_cons, 'd' },
5365 { "real10.ua", float_cons, 'x' },
5366 { "real16.ua", float_cons, 'X' },
5367 };
5368
5369 /* Declare a register by creating a symbol for it and entering it in
5370 the symbol table. */
5371
5372 static symbolS *
5373 declare_register (const char *name, unsigned int regnum)
5374 {
5375 symbolS *sym;
5376
5377 sym = symbol_create (name, reg_section, &zero_address_frag, regnum);
5378
5379 if (str_hash_insert (md.reg_hash, S_GET_NAME (sym), sym, 0) != NULL)
5380 as_fatal (_("duplicate %s"), name);
5381
5382 return sym;
5383 }
5384
5385 static void
5386 declare_register_set (const char *prefix,
5387 unsigned int num_regs,
5388 unsigned int base_regnum)
5389 {
5390 char name[8];
5391 unsigned int i;
5392
5393 for (i = 0; i < num_regs; ++i)
5394 {
5395 snprintf (name, sizeof (name), "%s%u", prefix, i);
5396 declare_register (name, base_regnum + i);
5397 }
5398 }
5399
5400 static unsigned int
5401 operand_width (enum ia64_opnd opnd)
5402 {
5403 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5404 unsigned int bits = 0;
5405 int i;
5406
5407 bits = 0;
5408 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5409 bits += odesc->field[i].bits;
5410
5411 return bits;
5412 }
5413
5414 static enum operand_match_result
5415 operand_match (const struct ia64_opcode *idesc, int res_index, expressionS *e)
5416 {
5417 enum ia64_opnd opnd = idesc->operands[res_index];
5418 int bits, relocatable = 0;
5419 struct insn_fix *fix;
5420 bfd_signed_vma val;
5421
5422 switch (opnd)
5423 {
5424 /* constants: */
5425
5426 case IA64_OPND_AR_CCV:
5427 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5428 return OPERAND_MATCH;
5429 break;
5430
5431 case IA64_OPND_AR_CSD:
5432 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5433 return OPERAND_MATCH;
5434 break;
5435
5436 case IA64_OPND_AR_PFS:
5437 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5438 return OPERAND_MATCH;
5439 break;
5440
5441 case IA64_OPND_GR0:
5442 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5443 return OPERAND_MATCH;
5444 break;
5445
5446 case IA64_OPND_IP:
5447 if (e->X_op == O_register && e->X_add_number == REG_IP)
5448 return OPERAND_MATCH;
5449 break;
5450
5451 case IA64_OPND_PR:
5452 if (e->X_op == O_register && e->X_add_number == REG_PR)
5453 return OPERAND_MATCH;
5454 break;
5455
5456 case IA64_OPND_PR_ROT:
5457 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5458 return OPERAND_MATCH;
5459 break;
5460
5461 case IA64_OPND_PSR:
5462 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5463 return OPERAND_MATCH;
5464 break;
5465
5466 case IA64_OPND_PSR_L:
5467 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5468 return OPERAND_MATCH;
5469 break;
5470
5471 case IA64_OPND_PSR_UM:
5472 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5473 return OPERAND_MATCH;
5474 break;
5475
5476 case IA64_OPND_C1:
5477 if (e->X_op == O_constant)
5478 {
5479 if (e->X_add_number == 1)
5480 return OPERAND_MATCH;
5481 else
5482 return OPERAND_OUT_OF_RANGE;
5483 }
5484 break;
5485
5486 case IA64_OPND_C8:
5487 if (e->X_op == O_constant)
5488 {
5489 if (e->X_add_number == 8)
5490 return OPERAND_MATCH;
5491 else
5492 return OPERAND_OUT_OF_RANGE;
5493 }
5494 break;
5495
5496 case IA64_OPND_C16:
5497 if (e->X_op == O_constant)
5498 {
5499 if (e->X_add_number == 16)
5500 return OPERAND_MATCH;
5501 else
5502 return OPERAND_OUT_OF_RANGE;
5503 }
5504 break;
5505
5506 /* register operands: */
5507
5508 case IA64_OPND_AR3:
5509 if (e->X_op == O_register && e->X_add_number >= REG_AR
5510 && e->X_add_number < REG_AR + 128)
5511 return OPERAND_MATCH;
5512 break;
5513
5514 case IA64_OPND_B1:
5515 case IA64_OPND_B2:
5516 if (e->X_op == O_register && e->X_add_number >= REG_BR
5517 && e->X_add_number < REG_BR + 8)
5518 return OPERAND_MATCH;
5519 break;
5520
5521 case IA64_OPND_CR3:
5522 if (e->X_op == O_register && e->X_add_number >= REG_CR
5523 && e->X_add_number < REG_CR + 128)
5524 return OPERAND_MATCH;
5525 break;
5526
5527 case IA64_OPND_DAHR3:
5528 if (e->X_op == O_register && e->X_add_number >= REG_DAHR
5529 && e->X_add_number < REG_DAHR + 8)
5530 return OPERAND_MATCH;
5531 break;
5532
5533 case IA64_OPND_F1:
5534 case IA64_OPND_F2:
5535 case IA64_OPND_F3:
5536 case IA64_OPND_F4:
5537 if (e->X_op == O_register && e->X_add_number >= REG_FR
5538 && e->X_add_number < REG_FR + 128)
5539 return OPERAND_MATCH;
5540 break;
5541
5542 case IA64_OPND_P1:
5543 case IA64_OPND_P2:
5544 if (e->X_op == O_register && e->X_add_number >= REG_P
5545 && e->X_add_number < REG_P + 64)
5546 return OPERAND_MATCH;
5547 break;
5548
5549 case IA64_OPND_R1:
5550 case IA64_OPND_R2:
5551 case IA64_OPND_R3:
5552 if (e->X_op == O_register && e->X_add_number >= REG_GR
5553 && e->X_add_number < REG_GR + 128)
5554 return OPERAND_MATCH;
5555 break;
5556
5557 case IA64_OPND_R3_2:
5558 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5559 {
5560 if (e->X_add_number < REG_GR + 4)
5561 return OPERAND_MATCH;
5562 else if (e->X_add_number < REG_GR + 128)
5563 return OPERAND_OUT_OF_RANGE;
5564 }
5565 break;
5566
5567 /* indirect operands: */
5568 case IA64_OPND_CPUID_R3:
5569 case IA64_OPND_DBR_R3:
5570 case IA64_OPND_DTR_R3:
5571 case IA64_OPND_ITR_R3:
5572 case IA64_OPND_IBR_R3:
5573 case IA64_OPND_MSR_R3:
5574 case IA64_OPND_PKR_R3:
5575 case IA64_OPND_PMC_R3:
5576 case IA64_OPND_PMD_R3:
5577 case IA64_OPND_DAHR_R3:
5578 case IA64_OPND_RR_R3:
5579 if (e->X_op == O_index && e->X_op_symbol
5580 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5581 == opnd - IA64_OPND_CPUID_R3))
5582 return OPERAND_MATCH;
5583 break;
5584
5585 case IA64_OPND_MR3:
5586 if (e->X_op == O_index && !e->X_op_symbol)
5587 return OPERAND_MATCH;
5588 break;
5589
5590 /* immediate operands: */
5591 case IA64_OPND_CNT2a:
5592 case IA64_OPND_LEN4:
5593 case IA64_OPND_LEN6:
5594 bits = operand_width (idesc->operands[res_index]);
5595 if (e->X_op == O_constant)
5596 {
5597 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5598 return OPERAND_MATCH;
5599 else
5600 return OPERAND_OUT_OF_RANGE;
5601 }
5602 break;
5603
5604 case IA64_OPND_CNT2b:
5605 if (e->X_op == O_constant)
5606 {
5607 if ((bfd_vma) (e->X_add_number - 1) < 3)
5608 return OPERAND_MATCH;
5609 else
5610 return OPERAND_OUT_OF_RANGE;
5611 }
5612 break;
5613
5614 case IA64_OPND_CNT2c:
5615 val = e->X_add_number;
5616 if (e->X_op == O_constant)
5617 {
5618 if ((val == 0 || val == 7 || val == 15 || val == 16))
5619 return OPERAND_MATCH;
5620 else
5621 return OPERAND_OUT_OF_RANGE;
5622 }
5623 break;
5624
5625 case IA64_OPND_SOR:
5626 /* SOR must be an integer multiple of 8 */
5627 if (e->X_op == O_constant && e->X_add_number & 0x7)
5628 return OPERAND_OUT_OF_RANGE;
5629 /* Fall through. */
5630 case IA64_OPND_SOF:
5631 case IA64_OPND_SOL:
5632 if (e->X_op == O_constant)
5633 {
5634 if ((bfd_vma) e->X_add_number <= 96)
5635 return OPERAND_MATCH;
5636 else
5637 return OPERAND_OUT_OF_RANGE;
5638 }
5639 break;
5640
5641 case IA64_OPND_IMMU62:
5642 if (e->X_op == O_constant)
5643 {
5644 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5645 return OPERAND_MATCH;
5646 else
5647 return OPERAND_OUT_OF_RANGE;
5648 }
5649 else
5650 {
5651 /* FIXME -- need 62-bit relocation type */
5652 as_bad (_("62-bit relocation not yet implemented"));
5653 }
5654 break;
5655
5656 case IA64_OPND_IMMU64:
5657 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5658 || e->X_op == O_subtract)
5659 {
5660 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5661 fix->code = BFD_RELOC_IA64_IMM64;
5662 if (e->X_op != O_subtract)
5663 {
5664 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5665 if (e->X_op == O_pseudo_fixup)
5666 e->X_op = O_symbol;
5667 }
5668
5669 fix->opnd = idesc->operands[res_index];
5670 fix->expr = *e;
5671 fix->is_pcrel = 0;
5672 ++CURR_SLOT.num_fixups;
5673 return OPERAND_MATCH;
5674 }
5675 else if (e->X_op == O_constant)
5676 return OPERAND_MATCH;
5677 break;
5678
5679 case IA64_OPND_IMMU5b:
5680 if (e->X_op == O_constant)
5681 {
5682 val = e->X_add_number;
5683 if (val >= 32 && val <= 63)
5684 return OPERAND_MATCH;
5685 else
5686 return OPERAND_OUT_OF_RANGE;
5687 }
5688 break;
5689
5690 case IA64_OPND_CCNT5:
5691 case IA64_OPND_CNT5:
5692 case IA64_OPND_CNT6:
5693 case IA64_OPND_CPOS6a:
5694 case IA64_OPND_CPOS6b:
5695 case IA64_OPND_CPOS6c:
5696 case IA64_OPND_IMMU2:
5697 case IA64_OPND_IMMU7a:
5698 case IA64_OPND_IMMU7b:
5699 case IA64_OPND_IMMU16:
5700 case IA64_OPND_IMMU19:
5701 case IA64_OPND_IMMU21:
5702 case IA64_OPND_IMMU24:
5703 case IA64_OPND_MBTYPE4:
5704 case IA64_OPND_MHTYPE8:
5705 case IA64_OPND_POS6:
5706 bits = operand_width (idesc->operands[res_index]);
5707 if (e->X_op == O_constant)
5708 {
5709 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5710 return OPERAND_MATCH;
5711 else
5712 return OPERAND_OUT_OF_RANGE;
5713 }
5714 break;
5715
5716 case IA64_OPND_IMMU9:
5717 bits = operand_width (idesc->operands[res_index]);
5718 if (e->X_op == O_constant)
5719 {
5720 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5721 {
5722 int lobits = e->X_add_number & 0x3;
5723 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5724 e->X_add_number |= (bfd_vma) 0x3;
5725 return OPERAND_MATCH;
5726 }
5727 else
5728 return OPERAND_OUT_OF_RANGE;
5729 }
5730 break;
5731
5732 case IA64_OPND_IMM44:
5733 /* least 16 bits must be zero */
5734 if ((e->X_add_number & 0xffff) != 0)
5735 /* XXX technically, this is wrong: we should not be issuing warning
5736 messages until we're sure this instruction pattern is going to
5737 be used! */
5738 as_warn (_("lower 16 bits of mask ignored"));
5739
5740 if (e->X_op == O_constant)
5741 {
5742 if (((e->X_add_number >= 0
5743 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5744 || (e->X_add_number < 0
5745 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5746 {
5747 /* sign-extend */
5748 if (e->X_add_number >= 0
5749 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5750 {
5751 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5752 }
5753 return OPERAND_MATCH;
5754 }
5755 else
5756 return OPERAND_OUT_OF_RANGE;
5757 }
5758 break;
5759
5760 case IA64_OPND_IMM17:
5761 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5762 if (e->X_op == O_constant)
5763 {
5764 if (((e->X_add_number >= 0
5765 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5766 || (e->X_add_number < 0
5767 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5768 {
5769 /* sign-extend */
5770 if (e->X_add_number >= 0
5771 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5772 {
5773 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5774 }
5775 return OPERAND_MATCH;
5776 }
5777 else
5778 return OPERAND_OUT_OF_RANGE;
5779 }
5780 break;
5781
5782 case IA64_OPND_IMM14:
5783 case IA64_OPND_IMM22:
5784 relocatable = 1;
5785 /* Fall through. */
5786 case IA64_OPND_IMM1:
5787 case IA64_OPND_IMM8:
5788 case IA64_OPND_IMM8U4:
5789 case IA64_OPND_IMM8M1:
5790 case IA64_OPND_IMM8M1U4:
5791 case IA64_OPND_IMM8M1U8:
5792 case IA64_OPND_IMM9a:
5793 case IA64_OPND_IMM9b:
5794 bits = operand_width (idesc->operands[res_index]);
5795 if (relocatable && (e->X_op == O_symbol
5796 || e->X_op == O_subtract
5797 || e->X_op == O_pseudo_fixup))
5798 {
5799 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5800
5801 if (idesc->operands[res_index] == IA64_OPND_IMM14)
5802 fix->code = BFD_RELOC_IA64_IMM14;
5803 else
5804 fix->code = BFD_RELOC_IA64_IMM22;
5805
5806 if (e->X_op != O_subtract)
5807 {
5808 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5809 if (e->X_op == O_pseudo_fixup)
5810 e->X_op = O_symbol;
5811 }
5812
5813 fix->opnd = idesc->operands[res_index];
5814 fix->expr = *e;
5815 fix->is_pcrel = 0;
5816 ++CURR_SLOT.num_fixups;
5817 return OPERAND_MATCH;
5818 }
5819 else if (e->X_op != O_constant
5820 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5821 return OPERAND_MISMATCH;
5822
5823 if (opnd == IA64_OPND_IMM8M1U4)
5824 {
5825 /* Zero is not valid for unsigned compares that take an adjusted
5826 constant immediate range. */
5827 if (e->X_add_number == 0)
5828 return OPERAND_OUT_OF_RANGE;
5829
5830 /* Sign-extend 32-bit unsigned numbers, so that the following range
5831 checks will work. */
5832 val = e->X_add_number;
5833 if ((val & (~(bfd_vma) 0 << 32)) == 0)
5834 val = (val ^ ((bfd_vma) 1 << 31)) - ((bfd_vma) 1 << 31);
5835
5836 /* Check for 0x100000000. This is valid because
5837 0x100000000-1 is the same as ((uint32_t) -1). */
5838 if (val == ((bfd_signed_vma) 1 << 32))
5839 return OPERAND_MATCH;
5840
5841 val = val - 1;
5842 }
5843 else if (opnd == IA64_OPND_IMM8M1U8)
5844 {
5845 /* Zero is not valid for unsigned compares that take an adjusted
5846 constant immediate range. */
5847 if (e->X_add_number == 0)
5848 return OPERAND_OUT_OF_RANGE;
5849
5850 /* Check for 0x10000000000000000. */
5851 if (e->X_op == O_big)
5852 {
5853 if (generic_bignum[0] == 0
5854 && generic_bignum[1] == 0
5855 && generic_bignum[2] == 0
5856 && generic_bignum[3] == 0
5857 && generic_bignum[4] == 1)
5858 return OPERAND_MATCH;
5859 else
5860 return OPERAND_OUT_OF_RANGE;
5861 }
5862 else
5863 val = e->X_add_number - 1;
5864 }
5865 else if (opnd == IA64_OPND_IMM8M1)
5866 val = e->X_add_number - 1;
5867 else if (opnd == IA64_OPND_IMM8U4)
5868 {
5869 /* Sign-extend 32-bit unsigned numbers, so that the following range
5870 checks will work. */
5871 val = e->X_add_number;
5872 if ((val & (~(bfd_vma) 0 << 32)) == 0)
5873 val = (val ^ ((bfd_vma) 1 << 31)) - ((bfd_vma) 1 << 31);
5874 }
5875 else
5876 val = e->X_add_number;
5877
5878 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5879 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5880 return OPERAND_MATCH;
5881 else
5882 return OPERAND_OUT_OF_RANGE;
5883
5884 case IA64_OPND_INC3:
5885 /* +/- 1, 4, 8, 16 */
5886 val = e->X_add_number;
5887 if (val < 0)
5888 val = -val;
5889 if (e->X_op == O_constant)
5890 {
5891 if ((val == 1 || val == 4 || val == 8 || val == 16))
5892 return OPERAND_MATCH;
5893 else
5894 return OPERAND_OUT_OF_RANGE;
5895 }
5896 break;
5897
5898 case IA64_OPND_TGT25:
5899 case IA64_OPND_TGT25b:
5900 case IA64_OPND_TGT25c:
5901 case IA64_OPND_TGT64:
5902 if (e->X_op == O_symbol)
5903 {
5904 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5905 if (opnd == IA64_OPND_TGT25)
5906 fix->code = BFD_RELOC_IA64_PCREL21F;
5907 else if (opnd == IA64_OPND_TGT25b)
5908 fix->code = BFD_RELOC_IA64_PCREL21M;
5909 else if (opnd == IA64_OPND_TGT25c)
5910 fix->code = BFD_RELOC_IA64_PCREL21B;
5911 else if (opnd == IA64_OPND_TGT64)
5912 fix->code = BFD_RELOC_IA64_PCREL60B;
5913 else
5914 abort ();
5915
5916 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5917 fix->opnd = idesc->operands[res_index];
5918 fix->expr = *e;
5919 fix->is_pcrel = 1;
5920 ++CURR_SLOT.num_fixups;
5921 return OPERAND_MATCH;
5922 }
5923 /* Fall through. */
5924 case IA64_OPND_TAG13:
5925 case IA64_OPND_TAG13b:
5926 switch (e->X_op)
5927 {
5928 case O_constant:
5929 return OPERAND_MATCH;
5930
5931 case O_symbol:
5932 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5933 /* There are no external relocs for TAG13/TAG13b fields, so we
5934 create a dummy reloc. This will not live past md_apply_fix. */
5935 fix->code = BFD_RELOC_UNUSED;
5936 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5937 fix->opnd = idesc->operands[res_index];
5938 fix->expr = *e;
5939 fix->is_pcrel = 1;
5940 ++CURR_SLOT.num_fixups;
5941 return OPERAND_MATCH;
5942
5943 default:
5944 break;
5945 }
5946 break;
5947
5948 case IA64_OPND_LDXMOV:
5949 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5950 fix->code = BFD_RELOC_IA64_LDXMOV;
5951 fix->opnd = idesc->operands[res_index];
5952 fix->expr = *e;
5953 fix->is_pcrel = 0;
5954 ++CURR_SLOT.num_fixups;
5955 return OPERAND_MATCH;
5956
5957 case IA64_OPND_STRD5b:
5958 if (e->X_op == O_constant)
5959 {
5960 /* 5-bit signed scaled by 64 */
5961 if ((e->X_add_number <= ( 0xf << 6 ))
5962 && (e->X_add_number >= -( 0x10 << 6 )))
5963 {
5964
5965 /* Must be a multiple of 64 */
5966 if ((e->X_add_number & 0x3f) != 0)
5967 as_warn (_("stride must be a multiple of 64; lower 6 bits ignored"));
5968
5969 e->X_add_number &= ~ 0x3f;
5970 return OPERAND_MATCH;
5971 }
5972 else
5973 return OPERAND_OUT_OF_RANGE;
5974 }
5975 break;
5976 case IA64_OPND_CNT6a:
5977 if (e->X_op == O_constant)
5978 {
5979 /* 6-bit unsigned biased by 1 -- count 0 is meaningless */
5980 if ((e->X_add_number <= 64)
5981 && (e->X_add_number > 0) )
5982 {
5983 return OPERAND_MATCH;
5984 }
5985 else
5986 return OPERAND_OUT_OF_RANGE;
5987 }
5988 break;
5989
5990 default:
5991 break;
5992 }
5993 return OPERAND_MISMATCH;
5994 }
5995
5996 static int
5997 parse_operand (expressionS *e, int more)
5998 {
5999 int sep = '\0';
6000
6001 memset (e, 0, sizeof (*e));
6002 e->X_op = O_absent;
6003 SKIP_WHITESPACE ();
6004 expression (e);
6005 sep = *input_line_pointer;
6006 if (more && (sep == ',' || sep == more))
6007 ++input_line_pointer;
6008 return sep;
6009 }
6010
6011 static int
6012 parse_operand_and_eval (expressionS *e, int more)
6013 {
6014 int sep = parse_operand (e, more);
6015 resolve_expression (e);
6016 return sep;
6017 }
6018
6019 static int
6020 parse_operand_maybe_eval (expressionS *e, int more, enum ia64_opnd op)
6021 {
6022 int sep = parse_operand (e, more);
6023 switch (op)
6024 {
6025 case IA64_OPND_IMM14:
6026 case IA64_OPND_IMM22:
6027 case IA64_OPND_IMMU64:
6028 case IA64_OPND_TGT25:
6029 case IA64_OPND_TGT25b:
6030 case IA64_OPND_TGT25c:
6031 case IA64_OPND_TGT64:
6032 case IA64_OPND_TAG13:
6033 case IA64_OPND_TAG13b:
6034 case IA64_OPND_LDXMOV:
6035 break;
6036 default:
6037 resolve_expression (e);
6038 break;
6039 }
6040 return sep;
6041 }
6042
6043 /* Returns the next entry in the opcode table that matches the one in
6044 IDESC, and frees the entry in IDESC. If no matching entry is
6045 found, NULL is returned instead. */
6046
6047 static struct ia64_opcode *
6048 get_next_opcode (struct ia64_opcode *idesc)
6049 {
6050 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
6051 ia64_free_opcode (idesc);
6052 return next;
6053 }
6054
6055 /* Parse the operands for the opcode and find the opcode variant that
6056 matches the specified operands, or NULL if no match is possible. */
6057
6058 static struct ia64_opcode *
6059 parse_operands (struct ia64_opcode *idesc)
6060 {
6061 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
6062 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
6063 int reg1, reg2;
6064 char reg_class;
6065 enum ia64_opnd expected_operand = IA64_OPND_NIL;
6066 enum operand_match_result result;
6067 char mnemonic[129];
6068 char *first_arg = 0, *end, *saved_input_pointer;
6069 unsigned int sof;
6070
6071 gas_assert (strlen (idesc->name) <= 128);
6072
6073 strcpy (mnemonic, idesc->name);
6074 if (idesc->operands[2] == IA64_OPND_SOF
6075 || idesc->operands[1] == IA64_OPND_SOF)
6076 {
6077 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
6078 can't parse the first operand until we have parsed the
6079 remaining operands of the "alloc" instruction. */
6080 SKIP_WHITESPACE ();
6081 first_arg = input_line_pointer;
6082 end = strchr (input_line_pointer, '=');
6083 if (!end)
6084 {
6085 as_bad (_("Expected separator `='"));
6086 return 0;
6087 }
6088 input_line_pointer = end + 1;
6089 ++i;
6090 ++num_outputs;
6091 }
6092
6093 for (; ; ++i)
6094 {
6095 if (i < NELEMS (CURR_SLOT.opnd))
6096 {
6097 enum ia64_opnd op = IA64_OPND_NIL;
6098 if (i < NELEMS (idesc->operands))
6099 op = idesc->operands[i];
6100 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + i, '=', op);
6101 if (CURR_SLOT.opnd[i].X_op == O_absent)
6102 break;
6103 }
6104 else
6105 {
6106 expressionS dummy;
6107
6108 sep = parse_operand (&dummy, '=');
6109 if (dummy.X_op == O_absent)
6110 break;
6111 }
6112
6113 ++num_operands;
6114
6115 if (sep != '=' && sep != ',')
6116 break;
6117
6118 if (sep == '=')
6119 {
6120 if (num_outputs > 0)
6121 as_bad (_("Duplicate equal sign (=) in instruction"));
6122 else
6123 num_outputs = i + 1;
6124 }
6125 }
6126 if (sep != '\0')
6127 {
6128 as_bad (_("Illegal operand separator `%c'"), sep);
6129 return 0;
6130 }
6131
6132 if (idesc->operands[2] == IA64_OPND_SOF
6133 || idesc->operands[1] == IA64_OPND_SOF)
6134 {
6135 /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r.
6136 Note, however, that due to that mapping operand numbers in error
6137 messages for any of the constant operands will not be correct. */
6138 know (strcmp (idesc->name, "alloc") == 0);
6139 /* The first operand hasn't been parsed/initialized, yet (but
6140 num_operands intentionally doesn't account for that). */
6141 i = num_operands > 4 ? 2 : 1;
6142 #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \
6143 ? CURR_SLOT.opnd[n].X_add_number \
6144 : 0)
6145 sof = set_regstack (FORCE_CONST(i),
6146 FORCE_CONST(i + 1),
6147 FORCE_CONST(i + 2),
6148 FORCE_CONST(i + 3));
6149 #undef FORCE_CONST
6150
6151 /* now we can parse the first arg: */
6152 saved_input_pointer = input_line_pointer;
6153 input_line_pointer = first_arg;
6154 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + 0, '=',
6155 idesc->operands[0]);
6156 if (sep != '=')
6157 --num_outputs; /* force error */
6158 input_line_pointer = saved_input_pointer;
6159
6160 CURR_SLOT.opnd[i].X_add_number = sof;
6161 if (CURR_SLOT.opnd[i + 1].X_op == O_constant
6162 && CURR_SLOT.opnd[i + 2].X_op == O_constant)
6163 CURR_SLOT.opnd[i + 1].X_add_number
6164 = sof - CURR_SLOT.opnd[i + 2].X_add_number;
6165 else
6166 CURR_SLOT.opnd[i + 1].X_op = O_illegal;
6167 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3];
6168 }
6169
6170 highest_unmatched_operand = -4;
6171 curr_out_of_range_pos = -1;
6172 error_pos = 0;
6173 for (; idesc; idesc = get_next_opcode (idesc))
6174 {
6175 if (num_outputs != idesc->num_outputs)
6176 continue; /* mismatch in # of outputs */
6177 if (highest_unmatched_operand < 0)
6178 highest_unmatched_operand |= 1;
6179 if (num_operands > NELEMS (idesc->operands)
6180 || (num_operands < NELEMS (idesc->operands)
6181 && idesc->operands[num_operands])
6182 || (num_operands > 0 && !idesc->operands[num_operands - 1]))
6183 continue; /* mismatch in number of arguments */
6184 if (highest_unmatched_operand < 0)
6185 highest_unmatched_operand |= 2;
6186
6187 CURR_SLOT.num_fixups = 0;
6188
6189 /* Try to match all operands. If we see an out-of-range operand,
6190 then continue trying to match the rest of the operands, since if
6191 the rest match, then this idesc will give the best error message. */
6192
6193 out_of_range_pos = -1;
6194 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
6195 {
6196 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
6197 if (result != OPERAND_MATCH)
6198 {
6199 if (result != OPERAND_OUT_OF_RANGE)
6200 break;
6201 if (out_of_range_pos < 0)
6202 /* remember position of the first out-of-range operand: */
6203 out_of_range_pos = i;
6204 }
6205 }
6206
6207 /* If we did not match all operands, or if at least one operand was
6208 out-of-range, then this idesc does not match. Keep track of which
6209 idesc matched the most operands before failing. If we have two
6210 idescs that failed at the same position, and one had an out-of-range
6211 operand, then prefer the out-of-range operand. Thus if we have
6212 "add r0=0x1000000,r1" we get an error saying the constant is out
6213 of range instead of an error saying that the constant should have been
6214 a register. */
6215
6216 if (i != num_operands || out_of_range_pos >= 0)
6217 {
6218 if (i > highest_unmatched_operand
6219 || (i == highest_unmatched_operand
6220 && out_of_range_pos > curr_out_of_range_pos))
6221 {
6222 highest_unmatched_operand = i;
6223 if (out_of_range_pos >= 0)
6224 {
6225 expected_operand = idesc->operands[out_of_range_pos];
6226 error_pos = out_of_range_pos;
6227 }
6228 else
6229 {
6230 expected_operand = idesc->operands[i];
6231 error_pos = i;
6232 }
6233 curr_out_of_range_pos = out_of_range_pos;
6234 }
6235 continue;
6236 }
6237
6238 break;
6239 }
6240 if (!idesc)
6241 {
6242 if (expected_operand)
6243 as_bad (_("Operand %u of `%s' should be %s"),
6244 error_pos + 1, mnemonic,
6245 elf64_ia64_operands[expected_operand].desc);
6246 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1))
6247 as_bad (_("Wrong number of output operands"));
6248 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2))
6249 as_bad (_("Wrong number of input operands"));
6250 else
6251 as_bad (_("Operand mismatch"));
6252 return 0;
6253 }
6254
6255 /* Check that the instruction doesn't use
6256 - r0, f0, or f1 as output operands
6257 - the same predicate twice as output operands
6258 - r0 as address of a base update load or store
6259 - the same GR as output and address of a base update load
6260 - two even- or two odd-numbered FRs as output operands of a floating
6261 point parallel load.
6262 At most two (conflicting) output (or output-like) operands can exist,
6263 (floating point parallel loads have three outputs, but the base register,
6264 if updated, cannot conflict with the actual outputs). */
6265 reg2 = reg1 = -1;
6266 for (i = 0; i < num_operands; ++i)
6267 {
6268 int regno = 0;
6269
6270 reg_class = 0;
6271 switch (idesc->operands[i])
6272 {
6273 case IA64_OPND_R1:
6274 case IA64_OPND_R2:
6275 case IA64_OPND_R3:
6276 if (i < num_outputs)
6277 {
6278 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6279 reg_class = 'r';
6280 else if (reg1 < 0)
6281 reg1 = CURR_SLOT.opnd[i].X_add_number;
6282 else if (reg2 < 0)
6283 reg2 = CURR_SLOT.opnd[i].X_add_number;
6284 }
6285 break;
6286 case IA64_OPND_P1:
6287 case IA64_OPND_P2:
6288 if (i < num_outputs)
6289 {
6290 if (reg1 < 0)
6291 reg1 = CURR_SLOT.opnd[i].X_add_number;
6292 else if (reg2 < 0)
6293 reg2 = CURR_SLOT.opnd[i].X_add_number;
6294 }
6295 break;
6296 case IA64_OPND_F1:
6297 case IA64_OPND_F2:
6298 case IA64_OPND_F3:
6299 case IA64_OPND_F4:
6300 if (i < num_outputs)
6301 {
6302 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR
6303 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1)
6304 {
6305 reg_class = 'f';
6306 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR;
6307 }
6308 else if (reg1 < 0)
6309 reg1 = CURR_SLOT.opnd[i].X_add_number;
6310 else if (reg2 < 0)
6311 reg2 = CURR_SLOT.opnd[i].X_add_number;
6312 }
6313 break;
6314 case IA64_OPND_MR3:
6315 if (idesc->flags & IA64_OPCODE_POSTINC)
6316 {
6317 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6318 reg_class = 'm';
6319 else if (reg1 < 0)
6320 reg1 = CURR_SLOT.opnd[i].X_add_number;
6321 else if (reg2 < 0)
6322 reg2 = CURR_SLOT.opnd[i].X_add_number;
6323 }
6324 break;
6325 default:
6326 break;
6327 }
6328 switch (reg_class)
6329 {
6330 case 0:
6331 break;
6332 default:
6333 as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno);
6334 break;
6335 case 'm':
6336 as_warn (_("Invalid use of `r%d' as base update address operand"), regno);
6337 break;
6338 }
6339 }
6340 if (reg1 == reg2)
6341 {
6342 if (reg1 >= REG_GR && reg1 <= REG_GR + 127)
6343 {
6344 reg1 -= REG_GR;
6345 reg_class = 'r';
6346 }
6347 else if (reg1 >= REG_P && reg1 <= REG_P + 63)
6348 {
6349 reg1 -= REG_P;
6350 reg_class = 'p';
6351 }
6352 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127)
6353 {
6354 reg1 -= REG_FR;
6355 reg_class = 'f';
6356 }
6357 else
6358 reg_class = 0;
6359 if (reg_class)
6360 as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1);
6361 }
6362 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31
6363 && reg2 >= REG_FR && reg2 <= REG_FR + 31)
6364 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6365 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127))
6366 && ! ((reg1 ^ reg2) & 1))
6367 as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"),
6368 reg1 - REG_FR, reg2 - REG_FR);
6369 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31
6370 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)
6371 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6372 && reg2 >= REG_FR && reg2 <= REG_FR + 31))
6373 as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"),
6374 reg1 - REG_FR, reg2 - REG_FR);
6375 return idesc;
6376 }
6377
6378 static void
6379 build_insn (struct slot *slot, bfd_vma *insnp)
6380 {
6381 const struct ia64_operand *odesc, *o2desc;
6382 struct ia64_opcode *idesc = slot->idesc;
6383 bfd_vma insn;
6384 bfd_signed_vma val;
6385 const char *err;
6386 int i;
6387
6388 insn = idesc->opcode | slot->qp_regno;
6389
6390 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6391 {
6392 if (slot->opnd[i].X_op == O_register
6393 || slot->opnd[i].X_op == O_constant
6394 || slot->opnd[i].X_op == O_index)
6395 val = slot->opnd[i].X_add_number;
6396 else if (slot->opnd[i].X_op == O_big)
6397 {
6398 /* This must be the value 0x10000000000000000. */
6399 gas_assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6400 val = 0;
6401 }
6402 else
6403 val = 0;
6404
6405 switch (idesc->operands[i])
6406 {
6407 case IA64_OPND_IMMU64:
6408 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6409 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6410 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6411 | (((val >> 63) & 0x1) << 36));
6412 continue;
6413
6414 case IA64_OPND_IMMU62:
6415 val &= 0x3fffffffffffffffULL;
6416 if (val != slot->opnd[i].X_add_number)
6417 as_warn (_("Value truncated to 62 bits"));
6418 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6419 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6420 continue;
6421
6422 case IA64_OPND_TGT64:
6423 val >>= 4;
6424 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6425 insn |= ((((val >> 59) & 0x1) << 36)
6426 | (((val >> 0) & 0xfffff) << 13));
6427 continue;
6428
6429 case IA64_OPND_AR3:
6430 val -= REG_AR;
6431 break;
6432
6433 case IA64_OPND_B1:
6434 case IA64_OPND_B2:
6435 val -= REG_BR;
6436 break;
6437
6438 case IA64_OPND_CR3:
6439 val -= REG_CR;
6440 break;
6441
6442 case IA64_OPND_DAHR3:
6443 val -= REG_DAHR;
6444 break;
6445
6446 case IA64_OPND_F1:
6447 case IA64_OPND_F2:
6448 case IA64_OPND_F3:
6449 case IA64_OPND_F4:
6450 val -= REG_FR;
6451 break;
6452
6453 case IA64_OPND_P1:
6454 case IA64_OPND_P2:
6455 val -= REG_P;
6456 break;
6457
6458 case IA64_OPND_R1:
6459 case IA64_OPND_R2:
6460 case IA64_OPND_R3:
6461 case IA64_OPND_R3_2:
6462 case IA64_OPND_CPUID_R3:
6463 case IA64_OPND_DBR_R3:
6464 case IA64_OPND_DTR_R3:
6465 case IA64_OPND_ITR_R3:
6466 case IA64_OPND_IBR_R3:
6467 case IA64_OPND_MR3:
6468 case IA64_OPND_MSR_R3:
6469 case IA64_OPND_PKR_R3:
6470 case IA64_OPND_PMC_R3:
6471 case IA64_OPND_PMD_R3:
6472 case IA64_OPND_DAHR_R3:
6473 case IA64_OPND_RR_R3:
6474 val -= REG_GR;
6475 break;
6476
6477 default:
6478 break;
6479 }
6480
6481 odesc = elf64_ia64_operands + idesc->operands[i];
6482 err = (*odesc->insert) (odesc, val, &insn);
6483 if (err)
6484 as_bad_where (slot->src_file, slot->src_line,
6485 _("Bad operand value: %s"), err);
6486 if (idesc->flags & IA64_OPCODE_PSEUDO)
6487 {
6488 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6489 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6490 {
6491 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6492 (*o2desc->insert) (o2desc, val, &insn);
6493 }
6494 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6495 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6496 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6497 {
6498 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6499 (*o2desc->insert) (o2desc, 64 - val, &insn);
6500 }
6501 }
6502 }
6503 *insnp = insn;
6504 }
6505
6506 static void
6507 emit_one_bundle (void)
6508 {
6509 int manual_bundling_off = 0, manual_bundling = 0;
6510 enum ia64_unit required_unit, insn_unit = 0;
6511 enum ia64_insn_type type[3], insn_type;
6512 unsigned int template_val, orig_template;
6513 bfd_vma insn[3] = { -1, -1, -1 };
6514 struct ia64_opcode *idesc;
6515 int end_of_insn_group = 0, user_template = -1;
6516 int n, i, j, first, curr, last_slot;
6517 bfd_vma t0 = 0, t1 = 0;
6518 struct label_fix *lfix;
6519 bool mark_label;
6520 struct insn_fix *ifix;
6521 char mnemonic[16];
6522 fixS *fix;
6523 char *f;
6524 int addr_mod;
6525
6526 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6527 know (first >= 0 && first < NUM_SLOTS);
6528 n = MIN (3, md.num_slots_in_use);
6529
6530 /* Determine template: user user_template if specified, best match
6531 otherwise: */
6532
6533 if (md.slot[first].user_template >= 0)
6534 user_template = template_val = md.slot[first].user_template;
6535 else
6536 {
6537 /* Auto select appropriate template. */
6538 memset (type, 0, sizeof (type));
6539 curr = first;
6540 for (i = 0; i < n; ++i)
6541 {
6542 if (md.slot[curr].label_fixups && i != 0)
6543 break;
6544 type[i] = md.slot[curr].idesc->type;
6545 curr = (curr + 1) % NUM_SLOTS;
6546 }
6547 template_val = best_template[type[0]][type[1]][type[2]];
6548 }
6549
6550 /* initialize instructions with appropriate nops: */
6551 for (i = 0; i < 3; ++i)
6552 insn[i] = nop[ia64_templ_desc[template_val].exec_unit[i]];
6553
6554 f = frag_more (16);
6555
6556 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6557 from the start of the frag. */
6558 addr_mod = frag_now_fix () & 15;
6559 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6560 as_bad (_("instruction address is not a multiple of 16"));
6561 frag_now->insn_addr = addr_mod;
6562 frag_now->has_code = 1;
6563
6564 /* now fill in slots with as many insns as possible: */
6565 curr = first;
6566 idesc = md.slot[curr].idesc;
6567 end_of_insn_group = 0;
6568 last_slot = -1;
6569 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6570 {
6571 /* If we have unwind records, we may need to update some now. */
6572 unw_rec_list *ptr = md.slot[curr].unwind_record;
6573 unw_rec_list *end_ptr = NULL;
6574
6575 if (ptr)
6576 {
6577 /* Find the last prologue/body record in the list for the current
6578 insn, and set the slot number for all records up to that point.
6579 This needs to be done now, because prologue/body records refer to
6580 the current point, not the point after the instruction has been
6581 issued. This matters because there may have been nops emitted
6582 meanwhile. Any non-prologue non-body record followed by a
6583 prologue/body record must also refer to the current point. */
6584 unw_rec_list *last_ptr;
6585
6586 for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j)
6587 end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record;
6588 for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next)
6589 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6590 || ptr->r.type == body)
6591 last_ptr = ptr;
6592 if (last_ptr)
6593 {
6594 /* Make last_ptr point one after the last prologue/body
6595 record. */
6596 last_ptr = last_ptr->next;
6597 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6598 ptr = ptr->next)
6599 {
6600 ptr->slot_number = (unsigned long) f + i;
6601 ptr->slot_frag = frag_now;
6602 }
6603 /* Remove the initialized records, so that we won't accidentally
6604 update them again if we insert a nop and continue. */
6605 md.slot[curr].unwind_record = last_ptr;
6606 }
6607 }
6608
6609 manual_bundling_off = md.slot[curr].manual_bundling_off;
6610 if (md.slot[curr].manual_bundling_on)
6611 {
6612 if (curr == first)
6613 manual_bundling = 1;
6614 else
6615 break; /* Need to start a new bundle. */
6616 }
6617
6618 /* If this instruction specifies a template, then it must be the first
6619 instruction of a bundle. */
6620 if (curr != first && md.slot[curr].user_template >= 0)
6621 break;
6622
6623 if (idesc->flags & IA64_OPCODE_SLOT2)
6624 {
6625 if (manual_bundling && !manual_bundling_off)
6626 {
6627 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6628 _("`%s' must be last in bundle"), idesc->name);
6629 if (i < 2)
6630 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6631 }
6632 i = 2;
6633 }
6634 if (idesc->flags & IA64_OPCODE_LAST)
6635 {
6636 int required_slot;
6637 unsigned int required_template;
6638
6639 /* If we need a stop bit after an M slot, our only choice is
6640 template 5 (M;;MI). If we need a stop bit after a B
6641 slot, our only choice is to place it at the end of the
6642 bundle, because the only available templates are MIB,
6643 MBB, BBB, MMB, and MFB. We don't handle anything other
6644 than M and B slots because these are the only kind of
6645 instructions that can have the IA64_OPCODE_LAST bit set. */
6646 required_template = template_val;
6647 switch (idesc->type)
6648 {
6649 case IA64_TYPE_M:
6650 required_slot = 0;
6651 required_template = 5;
6652 break;
6653
6654 case IA64_TYPE_B:
6655 required_slot = 2;
6656 break;
6657
6658 default:
6659 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6660 _("Internal error: don't know how to force %s to end of instruction group"),
6661 idesc->name);
6662 required_slot = i;
6663 break;
6664 }
6665 if (manual_bundling
6666 && (i > required_slot
6667 || (required_slot == 2 && !manual_bundling_off)
6668 || (user_template >= 0
6669 /* Changing from MMI to M;MI is OK. */
6670 && (template_val ^ required_template) > 1)))
6671 {
6672 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6673 _("`%s' must be last in instruction group"),
6674 idesc->name);
6675 if (i < 2 && required_slot == 2 && !manual_bundling_off)
6676 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6677 }
6678 if (required_slot < i)
6679 /* Can't fit this instruction. */
6680 break;
6681
6682 i = required_slot;
6683 if (required_template != template_val)
6684 {
6685 /* If we switch the template, we need to reset the NOPs
6686 after slot i. The slot-types of the instructions ahead
6687 of i never change, so we don't need to worry about
6688 changing NOPs in front of this slot. */
6689 for (j = i; j < 3; ++j)
6690 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6691
6692 /* We just picked a template that includes the stop bit in the
6693 middle, so we don't need another one emitted later. */
6694 md.slot[curr].end_of_insn_group = 0;
6695 }
6696 template_val = required_template;
6697 }
6698 if (curr != first && md.slot[curr].label_fixups)
6699 {
6700 if (manual_bundling)
6701 {
6702 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6703 _("Label must be first in a bundle"));
6704 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6705 }
6706 /* This insn must go into the first slot of a bundle. */
6707 break;
6708 }
6709
6710 if (end_of_insn_group && md.num_slots_in_use >= 1)
6711 {
6712 /* We need an instruction group boundary in the middle of a
6713 bundle. See if we can switch to an other template with
6714 an appropriate boundary. */
6715
6716 orig_template = template_val;
6717 if (i == 1 && (user_template == 4
6718 || (user_template < 0
6719 && (ia64_templ_desc[template_val].exec_unit[0]
6720 == IA64_UNIT_M))))
6721 {
6722 template_val = 5;
6723 end_of_insn_group = 0;
6724 }
6725 else if (i == 2 && (user_template == 0
6726 || (user_template < 0
6727 && (ia64_templ_desc[template_val].exec_unit[1]
6728 == IA64_UNIT_I)))
6729 /* This test makes sure we don't switch the template if
6730 the next instruction is one that needs to be first in
6731 an instruction group. Since all those instructions are
6732 in the M group, there is no way such an instruction can
6733 fit in this bundle even if we switch the template. The
6734 reason we have to check for this is that otherwise we
6735 may end up generating "MI;;I M.." which has the deadly
6736 effect that the second M instruction is no longer the
6737 first in the group! --davidm 99/12/16 */
6738 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6739 {
6740 template_val = 1;
6741 end_of_insn_group = 0;
6742 }
6743 else if (i == 1
6744 && user_template == 0
6745 && !(idesc->flags & IA64_OPCODE_FIRST))
6746 /* Use the next slot. */
6747 continue;
6748 else if (curr != first)
6749 /* can't fit this insn */
6750 break;
6751
6752 if (template_val != orig_template)
6753 /* if we switch the template, we need to reset the NOPs
6754 after slot i. The slot-types of the instructions ahead
6755 of i never change, so we don't need to worry about
6756 changing NOPs in front of this slot. */
6757 for (j = i; j < 3; ++j)
6758 insn[j] = nop[ia64_templ_desc[template_val].exec_unit[j]];
6759 }
6760 required_unit = ia64_templ_desc[template_val].exec_unit[i];
6761
6762 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6763 if (idesc->type == IA64_TYPE_DYN)
6764 {
6765 enum ia64_opnd opnd1, opnd2;
6766
6767 if ((strcmp (idesc->name, "nop") == 0)
6768 || (strcmp (idesc->name, "break") == 0))
6769 insn_unit = required_unit;
6770 else if (strcmp (idesc->name, "hint") == 0)
6771 {
6772 insn_unit = required_unit;
6773 if (required_unit == IA64_UNIT_B)
6774 {
6775 switch (md.hint_b)
6776 {
6777 case hint_b_ok:
6778 break;
6779 case hint_b_warning:
6780 as_warn (_("hint in B unit may be treated as nop"));
6781 break;
6782 case hint_b_error:
6783 /* When manual bundling is off and there is no
6784 user template, we choose a different unit so
6785 that hint won't go into the current slot. We
6786 will fill the current bundle with nops and
6787 try to put hint into the next bundle. */
6788 if (!manual_bundling && user_template < 0)
6789 insn_unit = IA64_UNIT_I;
6790 else
6791 as_bad (_("hint in B unit can't be used"));
6792 break;
6793 }
6794 }
6795 }
6796 else if (strcmp (idesc->name, "chk.s") == 0
6797 || strcmp (idesc->name, "mov") == 0)
6798 {
6799 insn_unit = IA64_UNIT_M;
6800 if (required_unit == IA64_UNIT_I
6801 || (required_unit == IA64_UNIT_F && template_val == 6))
6802 insn_unit = IA64_UNIT_I;
6803 }
6804 else
6805 as_fatal (_("emit_one_bundle: unexpected dynamic op"));
6806
6807 snprintf (mnemonic, sizeof (mnemonic), "%s.%c",
6808 idesc->name, "?imbfxx"[insn_unit]);
6809 opnd1 = idesc->operands[0];
6810 opnd2 = idesc->operands[1];
6811 ia64_free_opcode (idesc);
6812 idesc = ia64_find_opcode (mnemonic);
6813 /* moves to/from ARs have collisions */
6814 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6815 {
6816 while (idesc != NULL
6817 && (idesc->operands[0] != opnd1
6818 || idesc->operands[1] != opnd2))
6819 idesc = get_next_opcode (idesc);
6820 }
6821 md.slot[curr].idesc = idesc;
6822 }
6823 else
6824 {
6825 insn_type = idesc->type;
6826 insn_unit = IA64_UNIT_NIL;
6827 switch (insn_type)
6828 {
6829 case IA64_TYPE_A:
6830 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6831 insn_unit = required_unit;
6832 break;
6833 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6834 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6835 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6836 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6837 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6838 default: break;
6839 }
6840 }
6841
6842 if (insn_unit != required_unit)
6843 continue; /* Try next slot. */
6844
6845 /* Now is a good time to fix up the labels for this insn. */
6846 mark_label = false;
6847 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6848 {
6849 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6850 symbol_set_frag (lfix->sym, frag_now);
6851 mark_label |= lfix->dw2_mark_labels;
6852 }
6853 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6854 {
6855 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6856 symbol_set_frag (lfix->sym, frag_now);
6857 }
6858
6859 if (debug_type == DEBUG_DWARF2
6860 || md.slot[curr].loc_directive_seen
6861 || mark_label)
6862 {
6863 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6864
6865 md.slot[curr].loc_directive_seen = 0;
6866 if (mark_label)
6867 md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
6868
6869 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6870 }
6871
6872 build_insn (md.slot + curr, insn + i);
6873
6874 ptr = md.slot[curr].unwind_record;
6875 if (ptr)
6876 {
6877 /* Set slot numbers for all remaining unwind records belonging to the
6878 current insn. There can not be any prologue/body unwind records
6879 here. */
6880 for (; ptr != end_ptr; ptr = ptr->next)
6881 {
6882 ptr->slot_number = (unsigned long) f + i;
6883 ptr->slot_frag = frag_now;
6884 }
6885 md.slot[curr].unwind_record = NULL;
6886 }
6887
6888 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6889 {
6890 unsigned long where;
6891
6892 ifix = md.slot[curr].fixup + j;
6893 where = frag_now_fix () - 16 + i;
6894 #ifdef TE_HPUX
6895 /* Fix offset for PCREL60B relocation on HP-UX. */
6896 if (ifix->code == BFD_RELOC_IA64_PCREL60B)
6897 ++where;
6898 #endif
6899
6900 fix = fix_new_exp (frag_now, where, 8,
6901 &ifix->expr, ifix->is_pcrel, ifix->code);
6902 fix->tc_fix_data.opnd = ifix->opnd;
6903 fix->fx_file = md.slot[curr].src_file;
6904 fix->fx_line = md.slot[curr].src_line;
6905 }
6906
6907 end_of_insn_group = md.slot[curr].end_of_insn_group;
6908
6909 /* This adjustment to "i" must occur after the fix, otherwise the fix
6910 is assigned to the wrong slot, and the VMS linker complains. */
6911 if (required_unit == IA64_UNIT_L)
6912 {
6913 know (i == 1);
6914 /* skip one slot for long/X-unit instructions */
6915 ++i;
6916 }
6917 --md.num_slots_in_use;
6918 last_slot = i;
6919
6920 /* clear slot: */
6921 ia64_free_opcode (md.slot[curr].idesc);
6922 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6923 md.slot[curr].user_template = -1;
6924
6925 if (manual_bundling_off)
6926 {
6927 manual_bundling = 0;
6928 break;
6929 }
6930 curr = (curr + 1) % NUM_SLOTS;
6931 idesc = md.slot[curr].idesc;
6932 }
6933
6934 /* A user template was specified, but the first following instruction did
6935 not fit. This can happen with or without manual bundling. */
6936 if (md.num_slots_in_use > 0 && last_slot < 0)
6937 {
6938 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6939 _("`%s' does not fit into %s template"),
6940 idesc->name, ia64_templ_desc[template_val].name);
6941 /* Drop first insn so we don't livelock. */
6942 --md.num_slots_in_use;
6943 know (curr == first);
6944 ia64_free_opcode (md.slot[curr].idesc);
6945 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6946 md.slot[curr].user_template = -1;
6947 }
6948 else if (manual_bundling > 0)
6949 {
6950 if (md.num_slots_in_use > 0)
6951 {
6952 if (last_slot >= 2)
6953 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6954 _("`%s' does not fit into bundle"), idesc->name);
6955 else
6956 {
6957 const char *where;
6958
6959 if (template_val == 2)
6960 where = "X slot";
6961 else if (last_slot == 0)
6962 where = "slots 2 or 3";
6963 else
6964 where = "slot 3";
6965 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6966 _("`%s' can't go in %s of %s template"),
6967 idesc->name, where, ia64_templ_desc[template_val].name);
6968 }
6969 }
6970 else
6971 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6972 _("Missing '}' at end of file"));
6973 }
6974
6975 know (md.num_slots_in_use < NUM_SLOTS);
6976
6977 t0 = end_of_insn_group | (template_val << 1) | (insn[0] << 5) | (insn[1] << 46);
6978 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6979
6980 number_to_chars_littleendian (f + 0, t0, 8);
6981 number_to_chars_littleendian (f + 8, t1, 8);
6982 }
6983
6984 int
6985 md_parse_option (int c, const char *arg)
6986 {
6987
6988 switch (c)
6989 {
6990 /* Switches from the Intel assembler. */
6991 case 'm':
6992 if (strcmp (arg, "ilp64") == 0
6993 || strcmp (arg, "lp64") == 0
6994 || strcmp (arg, "p64") == 0)
6995 {
6996 md.flags |= EF_IA_64_ABI64;
6997 }
6998 else if (strcmp (arg, "ilp32") == 0)
6999 {
7000 md.flags &= ~EF_IA_64_ABI64;
7001 }
7002 else if (strcmp (arg, "le") == 0)
7003 {
7004 md.flags &= ~EF_IA_64_BE;
7005 default_big_endian = 0;
7006 }
7007 else if (strcmp (arg, "be") == 0)
7008 {
7009 md.flags |= EF_IA_64_BE;
7010 default_big_endian = 1;
7011 }
7012 else if (startswith (arg, "unwind-check="))
7013 {
7014 arg += 13;
7015 if (strcmp (arg, "warning") == 0)
7016 md.unwind_check = unwind_check_warning;
7017 else if (strcmp (arg, "error") == 0)
7018 md.unwind_check = unwind_check_error;
7019 else
7020 return 0;
7021 }
7022 else if (startswith (arg, "hint.b="))
7023 {
7024 arg += 7;
7025 if (strcmp (arg, "ok") == 0)
7026 md.hint_b = hint_b_ok;
7027 else if (strcmp (arg, "warning") == 0)
7028 md.hint_b = hint_b_warning;
7029 else if (strcmp (arg, "error") == 0)
7030 md.hint_b = hint_b_error;
7031 else
7032 return 0;
7033 }
7034 else if (startswith (arg, "tune="))
7035 {
7036 arg += 5;
7037 if (strcmp (arg, "itanium1") == 0)
7038 md.tune = itanium1;
7039 else if (strcmp (arg, "itanium2") == 0)
7040 md.tune = itanium2;
7041 else
7042 return 0;
7043 }
7044 else
7045 return 0;
7046 break;
7047
7048 case 'N':
7049 if (strcmp (arg, "so") == 0)
7050 {
7051 /* Suppress signon message. */
7052 }
7053 else if (strcmp (arg, "pi") == 0)
7054 {
7055 /* Reject privileged instructions. FIXME */
7056 }
7057 else if (strcmp (arg, "us") == 0)
7058 {
7059 /* Allow union of signed and unsigned range. FIXME */
7060 }
7061 else if (strcmp (arg, "close_fcalls") == 0)
7062 {
7063 /* Do not resolve global function calls. */
7064 }
7065 else
7066 return 0;
7067 break;
7068
7069 case 'C':
7070 /* temp[="prefix"] Insert temporary labels into the object file
7071 symbol table prefixed by "prefix".
7072 Default prefix is ":temp:".
7073 */
7074 break;
7075
7076 case 'a':
7077 /* indirect=<tgt> Assume unannotated indirect branches behavior
7078 according to <tgt> --
7079 exit: branch out from the current context (default)
7080 labels: all labels in context may be branch targets
7081 */
7082 if (!startswith (arg, "indirect="))
7083 return 0;
7084 break;
7085
7086 case 'x':
7087 /* -X conflicts with an ignored option, use -x instead */
7088 md.detect_dv = 1;
7089 if (!arg || strcmp (arg, "explicit") == 0)
7090 {
7091 /* set default mode to explicit */
7092 md.default_explicit_mode = 1;
7093 break;
7094 }
7095 else if (strcmp (arg, "auto") == 0)
7096 {
7097 md.default_explicit_mode = 0;
7098 }
7099 else if (strcmp (arg, "none") == 0)
7100 {
7101 md.detect_dv = 0;
7102 }
7103 else if (strcmp (arg, "debug") == 0)
7104 {
7105 md.debug_dv = 1;
7106 }
7107 else if (strcmp (arg, "debugx") == 0)
7108 {
7109 md.default_explicit_mode = 1;
7110 md.debug_dv = 1;
7111 }
7112 else if (strcmp (arg, "debugn") == 0)
7113 {
7114 md.debug_dv = 1;
7115 md.detect_dv = 0;
7116 }
7117 else
7118 {
7119 as_bad (_("Unrecognized option '-x%s'"), arg);
7120 }
7121 break;
7122
7123 case 'S':
7124 /* nops Print nops statistics. */
7125 break;
7126
7127 /* GNU specific switches for gcc. */
7128 case OPTION_MCONSTANT_GP:
7129 md.flags |= EF_IA_64_CONS_GP;
7130 break;
7131
7132 case OPTION_MAUTO_PIC:
7133 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
7134 break;
7135
7136 default:
7137 return 0;
7138 }
7139
7140 return 1;
7141 }
7142
7143 void
7144 md_show_usage (FILE *stream)
7145 {
7146 fputs (_("\
7147 IA-64 options:\n\
7148 --mconstant-gp mark output file as using the constant-GP model\n\
7149 (sets ELF header flag EF_IA_64_CONS_GP)\n\
7150 --mauto-pic mark output file as using the constant-GP model\n\
7151 without function descriptors (sets ELF header flag\n\
7152 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
7153 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
7154 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
7155 -mtune=[itanium1|itanium2]\n\
7156 tune for a specific CPU (default -mtune=itanium2)\n\
7157 -munwind-check=[warning|error]\n\
7158 unwind directive check (default -munwind-check=warning)\n\
7159 -mhint.b=[ok|warning|error]\n\
7160 hint.b check (default -mhint.b=error)\n\
7161 -x | -xexplicit turn on dependency violation checking\n"), stream);
7162 /* Note for translators: "automagically" can be translated as "automatically" here. */
7163 fputs (_("\
7164 -xauto automagically remove dependency violations (default)\n\
7165 -xnone turn off dependency violation checking\n\
7166 -xdebug debug dependency violation checker\n\
7167 -xdebugn debug dependency violation checker but turn off\n\
7168 dependency violation checking\n\
7169 -xdebugx debug dependency violation checker and turn on\n\
7170 dependency violation checking\n"),
7171 stream);
7172 }
7173
7174 void
7175 ia64_after_parse_args (void)
7176 {
7177 if (debug_type == DEBUG_STABS)
7178 as_fatal (_("--gstabs is not supported for ia64"));
7179 }
7180
7181 /* Return true if TYPE fits in TEMPL at SLOT. */
7182
7183 static int
7184 match (int templ, int type, int slot)
7185 {
7186 enum ia64_unit unit;
7187 int result;
7188
7189 unit = ia64_templ_desc[templ].exec_unit[slot];
7190 switch (type)
7191 {
7192 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
7193 case IA64_TYPE_A:
7194 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
7195 break;
7196 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
7197 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
7198 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
7199 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
7200 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
7201 default: result = 0; break;
7202 }
7203 return result;
7204 }
7205
7206 /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit
7207 in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of
7208 type M or I would fit in TEMPL at SLOT. */
7209
7210 static inline int
7211 extra_goodness (int templ, int slot)
7212 {
7213 switch (md.tune)
7214 {
7215 case itanium1:
7216 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
7217 return 2;
7218 else if (slot == 2 && match (templ, IA64_TYPE_B, slot))
7219 return 1;
7220 else
7221 return 0;
7222 break;
7223 case itanium2:
7224 if (match (templ, IA64_TYPE_M, slot)
7225 || match (templ, IA64_TYPE_I, slot))
7226 /* Favor M- and I-unit NOPs. We definitely want to avoid
7227 F-unit and B-unit may cause split-issue or less-than-optimal
7228 branch-prediction. */
7229 return 2;
7230 else
7231 return 0;
7232 break;
7233 default:
7234 abort ();
7235 return 0;
7236 }
7237 }
7238
7239 /* This function is called once, at assembler startup time. It sets
7240 up all the tables, etc. that the MD part of the assembler will need
7241 that can be determined before arguments are parsed. */
7242 void
7243 md_begin (void)
7244 {
7245 int i, j, k, t, goodness, best, ok;
7246
7247 md.auto_align = 1;
7248 md.explicit_mode = md.default_explicit_mode;
7249
7250 bfd_set_section_alignment (text_section, 4);
7251
7252 /* Make sure function pointers get initialized. */
7253 target_big_endian = -1;
7254 dot_byteorder (default_big_endian);
7255
7256 alias_hash = str_htab_create ();
7257 alias_name_hash = str_htab_create ();
7258 secalias_hash = str_htab_create ();
7259 secalias_name_hash = str_htab_create ();
7260
7261 pseudo_func[FUNC_DTP_MODULE].u.sym =
7262 symbol_new (".<dtpmod>", undefined_section,
7263 &zero_address_frag, FUNC_DTP_MODULE);
7264
7265 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
7266 symbol_new (".<dtprel>", undefined_section,
7267 &zero_address_frag, FUNC_DTP_RELATIVE);
7268
7269 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
7270 symbol_new (".<fptr>", undefined_section,
7271 &zero_address_frag, FUNC_FPTR_RELATIVE);
7272
7273 pseudo_func[FUNC_GP_RELATIVE].u.sym =
7274 symbol_new (".<gprel>", undefined_section,
7275 &zero_address_frag, FUNC_GP_RELATIVE);
7276
7277 pseudo_func[FUNC_LT_RELATIVE].u.sym =
7278 symbol_new (".<ltoff>", undefined_section,
7279 &zero_address_frag, FUNC_LT_RELATIVE);
7280
7281 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
7282 symbol_new (".<ltoffx>", undefined_section,
7283 &zero_address_frag, FUNC_LT_RELATIVE_X);
7284
7285 pseudo_func[FUNC_PC_RELATIVE].u.sym =
7286 symbol_new (".<pcrel>", undefined_section,
7287 &zero_address_frag, FUNC_PC_RELATIVE);
7288
7289 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
7290 symbol_new (".<pltoff>", undefined_section,
7291 &zero_address_frag, FUNC_PLT_RELATIVE);
7292
7293 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
7294 symbol_new (".<secrel>", undefined_section,
7295 &zero_address_frag, FUNC_SEC_RELATIVE);
7296
7297 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
7298 symbol_new (".<segrel>", undefined_section,
7299 &zero_address_frag, FUNC_SEG_RELATIVE);
7300
7301 pseudo_func[FUNC_TP_RELATIVE].u.sym =
7302 symbol_new (".<tprel>", undefined_section,
7303 &zero_address_frag, FUNC_TP_RELATIVE);
7304
7305 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
7306 symbol_new (".<ltv>", undefined_section,
7307 &zero_address_frag, FUNC_LTV_RELATIVE);
7308
7309 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
7310 symbol_new (".<ltoff.fptr>", undefined_section,
7311 &zero_address_frag, FUNC_LT_FPTR_RELATIVE);
7312
7313 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
7314 symbol_new (".<ltoff.dtpmod>", undefined_section,
7315 &zero_address_frag, FUNC_LT_DTP_MODULE);
7316
7317 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
7318 symbol_new (".<ltoff.dptrel>", undefined_section,
7319 &zero_address_frag, FUNC_LT_DTP_RELATIVE);
7320
7321 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
7322 symbol_new (".<ltoff.tprel>", undefined_section,
7323 &zero_address_frag, FUNC_LT_TP_RELATIVE);
7324
7325 pseudo_func[FUNC_IPLT_RELOC].u.sym =
7326 symbol_new (".<iplt>", undefined_section,
7327 &zero_address_frag, FUNC_IPLT_RELOC);
7328
7329 #ifdef TE_VMS
7330 pseudo_func[FUNC_SLOTCOUNT_RELOC].u.sym =
7331 symbol_new (".<slotcount>", undefined_section,
7332 &zero_address_frag, FUNC_SLOTCOUNT_RELOC);
7333 #endif
7334
7335 if (md.tune != itanium1)
7336 {
7337 /* Convert MFI NOPs bundles into MMI NOPs bundles. */
7338 le_nop[0] = 0x8;
7339 le_nop_stop[0] = 0x9;
7340 }
7341
7342 /* Compute the table of best templates. We compute goodness as a
7343 base 4 value, in which each match counts for 3. Match-failures
7344 result in NOPs and we use extra_goodness() to pick the execution
7345 units that are best suited for issuing the NOP. */
7346 for (i = 0; i < IA64_NUM_TYPES; ++i)
7347 for (j = 0; j < IA64_NUM_TYPES; ++j)
7348 for (k = 0; k < IA64_NUM_TYPES; ++k)
7349 {
7350 best = 0;
7351 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
7352 {
7353 goodness = 0;
7354 if (match (t, i, 0))
7355 {
7356 if (match (t, j, 1))
7357 {
7358 if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2))
7359 goodness = 3 + 3 + 3;
7360 else
7361 goodness = 3 + 3 + extra_goodness (t, 2);
7362 }
7363 else if (match (t, j, 2))
7364 goodness = 3 + 3 + extra_goodness (t, 1);
7365 else
7366 {
7367 goodness = 3;
7368 goodness += extra_goodness (t, 1);
7369 goodness += extra_goodness (t, 2);
7370 }
7371 }
7372 else if (match (t, i, 1))
7373 {
7374 if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2))
7375 goodness = 3 + 3;
7376 else
7377 goodness = 3 + extra_goodness (t, 2);
7378 }
7379 else if (match (t, i, 2))
7380 goodness = 3 + extra_goodness (t, 1);
7381
7382 if (goodness > best)
7383 {
7384 best = goodness;
7385 best_template[i][j][k] = t;
7386 }
7387 }
7388 }
7389
7390 #ifdef DEBUG_TEMPLATES
7391 /* For debugging changes to the best_template calculations. We don't care
7392 about combinations with invalid instructions, so start the loops at 1. */
7393 for (i = 0; i < IA64_NUM_TYPES; ++i)
7394 for (j = 0; j < IA64_NUM_TYPES; ++j)
7395 for (k = 0; k < IA64_NUM_TYPES; ++k)
7396 {
7397 char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f',
7398 'x', 'd' };
7399 fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j],
7400 type_letter[k],
7401 ia64_templ_desc[best_template[i][j][k]].name);
7402 }
7403 #endif
7404
7405 for (i = 0; i < NUM_SLOTS; ++i)
7406 md.slot[i].user_template = -1;
7407
7408 md.pseudo_hash = str_htab_create ();
7409 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
7410 if (str_hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
7411 pseudo_opcode + i, 0) != NULL)
7412 as_fatal (_("duplicate %s"), pseudo_opcode[i].name);
7413
7414 md.reg_hash = str_htab_create ();
7415 md.dynreg_hash = str_htab_create ();
7416 md.const_hash = str_htab_create ();
7417 md.entry_hash = str_htab_create ();
7418
7419 /* general registers: */
7420 declare_register_set ("r", 128, REG_GR);
7421 declare_register ("gp", REG_GR + 1);
7422 declare_register ("sp", REG_GR + 12);
7423 declare_register ("tp", REG_GR + 13);
7424 declare_register_set ("ret", 4, REG_GR + 8);
7425
7426 /* floating point registers: */
7427 declare_register_set ("f", 128, REG_FR);
7428 declare_register_set ("farg", 8, REG_FR + 8);
7429 declare_register_set ("fret", 8, REG_FR + 8);
7430
7431 /* branch registers: */
7432 declare_register_set ("b", 8, REG_BR);
7433 declare_register ("rp", REG_BR + 0);
7434
7435 /* predicate registers: */
7436 declare_register_set ("p", 64, REG_P);
7437 declare_register ("pr", REG_PR);
7438 declare_register ("pr.rot", REG_PR_ROT);
7439
7440 /* application registers: */
7441 declare_register_set ("ar", 128, REG_AR);
7442 for (i = 0; i < NELEMS (ar); ++i)
7443 declare_register (ar[i].name, REG_AR + ar[i].regnum);
7444
7445 /* control registers: */
7446 declare_register_set ("cr", 128, REG_CR);
7447 for (i = 0; i < NELEMS (cr); ++i)
7448 declare_register (cr[i].name, REG_CR + cr[i].regnum);
7449
7450 /* dahr registers: */
7451 declare_register_set ("dahr", 8, REG_DAHR);
7452
7453 declare_register ("ip", REG_IP);
7454 declare_register ("cfm", REG_CFM);
7455 declare_register ("psr", REG_PSR);
7456 declare_register ("psr.l", REG_PSR_L);
7457 declare_register ("psr.um", REG_PSR_UM);
7458
7459 for (i = 0; i < NELEMS (indirect_reg); ++i)
7460 {
7461 unsigned int regnum = indirect_reg[i].regnum;
7462
7463 md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum);
7464 }
7465
7466 /* pseudo-registers used to specify unwind info: */
7467 declare_register ("psp", REG_PSP);
7468
7469 for (i = 0; i < NELEMS (const_bits); ++i)
7470 if (str_hash_insert (md.const_hash, const_bits[i].name, const_bits + i, 0))
7471 as_fatal (_("duplicate %s"), const_bits[i].name);
7472
7473 /* Set the architecture and machine depending on defaults and command line
7474 options. */
7475 if (md.flags & EF_IA_64_ABI64)
7476 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7477 else
7478 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7479
7480 if (! ok)
7481 as_warn (_("Could not set architecture and machine"));
7482
7483 /* Set the pointer size and pointer shift size depending on md.flags */
7484
7485 if (md.flags & EF_IA_64_ABI64)
7486 {
7487 md.pointer_size = 8; /* pointers are 8 bytes */
7488 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7489 }
7490 else
7491 {
7492 md.pointer_size = 4; /* pointers are 4 bytes */
7493 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7494 }
7495
7496 md.mem_offset.hint = 0;
7497 md.path = 0;
7498 md.maxpaths = 0;
7499 md.entry_labels = NULL;
7500 }
7501
7502 /* Set the default options in md. Cannot do this in md_begin because
7503 that is called after md_parse_option which is where we set the
7504 options in md based on command line options. */
7505
7506 void
7507 ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED)
7508 {
7509 md.flags = MD_FLAGS_DEFAULT;
7510 #ifndef TE_VMS
7511 /* Don't turn on dependency checking for VMS, doesn't work. */
7512 md.detect_dv = 1;
7513 #endif
7514 /* FIXME: We should change it to unwind_check_error someday. */
7515 md.unwind_check = unwind_check_warning;
7516 md.hint_b = hint_b_error;
7517 md.tune = itanium2;
7518 }
7519
7520 /* Return a string for the target object file format. */
7521
7522 const char *
7523 ia64_target_format (void)
7524 {
7525 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7526 {
7527 if (md.flags & EF_IA_64_BE)
7528 {
7529 if (md.flags & EF_IA_64_ABI64)
7530 #if defined(TE_AIX50)
7531 return "elf64-ia64-aix-big";
7532 #elif defined(TE_HPUX)
7533 return "elf64-ia64-hpux-big";
7534 #else
7535 return "elf64-ia64-big";
7536 #endif
7537 else
7538 #if defined(TE_AIX50)
7539 return "elf32-ia64-aix-big";
7540 #elif defined(TE_HPUX)
7541 return "elf32-ia64-hpux-big";
7542 #else
7543 return "elf32-ia64-big";
7544 #endif
7545 }
7546 else
7547 {
7548 if (md.flags & EF_IA_64_ABI64)
7549 #if defined (TE_AIX50)
7550 return "elf64-ia64-aix-little";
7551 #elif defined (TE_VMS)
7552 {
7553 md.flags |= EF_IA_64_ARCHVER_1;
7554 return "elf64-ia64-vms";
7555 }
7556 #else
7557 return "elf64-ia64-little";
7558 #endif
7559 else
7560 #ifdef TE_AIX50
7561 return "elf32-ia64-aix-little";
7562 #else
7563 return "elf32-ia64-little";
7564 #endif
7565 }
7566 }
7567 else
7568 return "unknown-format";
7569 }
7570
7571 void
7572 ia64_end_of_source (void)
7573 {
7574 /* terminate insn group upon reaching end of file: */
7575 insn_group_break (1, 0, 0);
7576
7577 /* emits slots we haven't written yet: */
7578 ia64_flush_insns ();
7579
7580 bfd_set_private_flags (stdoutput, md.flags);
7581
7582 md.mem_offset.hint = 0;
7583 }
7584
7585 void
7586 ia64_start_line (void)
7587 {
7588 static int first;
7589
7590 if (!first) {
7591 /* Make sure we don't reference input_line_pointer[-1] when that's
7592 not valid. */
7593 first = 1;
7594 return;
7595 }
7596
7597 if (md.qp.X_op == O_register)
7598 as_bad (_("qualifying predicate not followed by instruction"));
7599 md.qp.X_op = O_absent;
7600
7601 if (ignore_input ())
7602 return;
7603
7604 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7605 {
7606 if (md.detect_dv && !md.explicit_mode)
7607 {
7608 static int warned;
7609
7610 if (!warned)
7611 {
7612 warned = 1;
7613 as_warn (_("Explicit stops are ignored in auto mode"));
7614 }
7615 }
7616 else
7617 insn_group_break (1, 0, 0);
7618 }
7619 else if (input_line_pointer[-1] == '{')
7620 {
7621 if (md.manual_bundling)
7622 as_warn (_("Found '{' when manual bundling is already turned on"));
7623 else
7624 CURR_SLOT.manual_bundling_on = 1;
7625 md.manual_bundling = 1;
7626
7627 /* Bundling is only acceptable in explicit mode
7628 or when in default automatic mode. */
7629 if (md.detect_dv && !md.explicit_mode)
7630 {
7631 if (!md.mode_explicitly_set
7632 && !md.default_explicit_mode)
7633 dot_dv_mode ('E');
7634 else
7635 as_warn (_("Found '{' after explicit switch to automatic mode"));
7636 }
7637 }
7638 else if (input_line_pointer[-1] == '}')
7639 {
7640 if (!md.manual_bundling)
7641 as_warn (_("Found '}' when manual bundling is off"));
7642 else
7643 PREV_SLOT.manual_bundling_off = 1;
7644 md.manual_bundling = 0;
7645
7646 /* switch back to automatic mode, if applicable */
7647 if (md.detect_dv
7648 && md.explicit_mode
7649 && !md.mode_explicitly_set
7650 && !md.default_explicit_mode)
7651 dot_dv_mode ('A');
7652 }
7653 }
7654
7655 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7656 labels. */
7657 static int defining_tag = 0;
7658
7659 int
7660 ia64_unrecognized_line (int ch)
7661 {
7662 switch (ch)
7663 {
7664 case '(':
7665 expression_and_evaluate (&md.qp);
7666 if (*input_line_pointer++ != ')')
7667 {
7668 as_bad (_("Expected ')'"));
7669 return 0;
7670 }
7671 if (md.qp.X_op != O_register)
7672 {
7673 as_bad (_("Qualifying predicate expected"));
7674 return 0;
7675 }
7676 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7677 {
7678 as_bad (_("Predicate register expected"));
7679 return 0;
7680 }
7681 return 1;
7682
7683 case '[':
7684 {
7685 char *s;
7686 char c;
7687 symbolS *tag;
7688 int temp;
7689
7690 if (md.qp.X_op == O_register)
7691 {
7692 as_bad (_("Tag must come before qualifying predicate."));
7693 return 0;
7694 }
7695
7696 /* This implements just enough of read_a_source_file in read.c to
7697 recognize labels. */
7698 if (is_name_beginner (*input_line_pointer))
7699 {
7700 c = get_symbol_name (&s);
7701 }
7702 else if (LOCAL_LABELS_FB
7703 && ISDIGIT (*input_line_pointer))
7704 {
7705 temp = 0;
7706 while (ISDIGIT (*input_line_pointer))
7707 temp = (temp * 10) + *input_line_pointer++ - '0';
7708 fb_label_instance_inc (temp);
7709 s = fb_label_name (temp, 0);
7710 c = *input_line_pointer;
7711 }
7712 else
7713 {
7714 s = NULL;
7715 c = '\0';
7716 }
7717 if (c != ':')
7718 {
7719 /* Put ':' back for error messages' sake. */
7720 *input_line_pointer++ = ':';
7721 as_bad (_("Expected ':'"));
7722 return 0;
7723 }
7724
7725 defining_tag = 1;
7726 tag = colon (s);
7727 defining_tag = 0;
7728 /* Put ':' back for error messages' sake. */
7729 *input_line_pointer++ = ':';
7730 if (*input_line_pointer++ != ']')
7731 {
7732 as_bad (_("Expected ']'"));
7733 return 0;
7734 }
7735 if (! tag)
7736 {
7737 as_bad (_("Tag name expected"));
7738 return 0;
7739 }
7740 return 1;
7741 }
7742
7743 default:
7744 break;
7745 }
7746
7747 /* Not a valid line. */
7748 return 0;
7749 }
7750
7751 void
7752 ia64_frob_label (struct symbol *sym)
7753 {
7754 struct label_fix *fix;
7755
7756 /* Tags need special handling since they are not bundle breaks like
7757 labels. */
7758 if (defining_tag)
7759 {
7760 fix = XOBNEW (&notes, struct label_fix);
7761 fix->sym = sym;
7762 fix->next = CURR_SLOT.tag_fixups;
7763 fix->dw2_mark_labels = false;
7764 CURR_SLOT.tag_fixups = fix;
7765
7766 return;
7767 }
7768
7769 if (bfd_section_flags (now_seg) & SEC_CODE)
7770 {
7771 md.last_text_seg = now_seg;
7772 fix = XOBNEW (&notes, struct label_fix);
7773 fix->sym = sym;
7774 fix->next = CURR_SLOT.label_fixups;
7775 fix->dw2_mark_labels = dwarf2_loc_mark_labels;
7776 CURR_SLOT.label_fixups = fix;
7777
7778 /* Keep track of how many code entry points we've seen. */
7779 if (md.path == md.maxpaths)
7780 {
7781 md.maxpaths += 20;
7782 md.entry_labels = XRESIZEVEC (const char *, md.entry_labels,
7783 md.maxpaths);
7784 }
7785 md.entry_labels[md.path++] = S_GET_NAME (sym);
7786 }
7787 }
7788
7789 #ifdef TE_HPUX
7790 /* The HP-UX linker will give unresolved symbol errors for symbols
7791 that are declared but unused. This routine removes declared,
7792 unused symbols from an object. */
7793 int
7794 ia64_frob_symbol (struct symbol *sym)
7795 {
7796 if ((S_GET_SEGMENT (sym) == bfd_und_section_ptr && ! symbol_used_p (sym) &&
7797 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7798 || (S_GET_SEGMENT (sym) == bfd_abs_section_ptr
7799 && ! S_IS_EXTERNAL (sym)))
7800 return 1;
7801 return 0;
7802 }
7803 #endif
7804
7805 void
7806 ia64_flush_pending_output (void)
7807 {
7808 if (!md.keep_pending_output
7809 && bfd_section_flags (now_seg) & SEC_CODE)
7810 {
7811 /* ??? This causes many unnecessary stop bits to be emitted.
7812 Unfortunately, it isn't clear if it is safe to remove this. */
7813 insn_group_break (1, 0, 0);
7814 ia64_flush_insns ();
7815 }
7816 }
7817
7818 /* Do ia64-specific expression optimization. All that's done here is
7819 to transform index expressions that are either due to the indexing
7820 of rotating registers or due to the indexing of indirect register
7821 sets. */
7822 int
7823 ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r)
7824 {
7825 if (op != O_index)
7826 return 0;
7827 resolve_expression (l);
7828 if (l->X_op == O_register)
7829 {
7830 unsigned num_regs = l->X_add_number >> 16;
7831
7832 resolve_expression (r);
7833 if (num_regs)
7834 {
7835 /* Left side is a .rotX-allocated register. */
7836 if (r->X_op != O_constant)
7837 {
7838 as_bad (_("Rotating register index must be a non-negative constant"));
7839 r->X_add_number = 0;
7840 }
7841 else if ((valueT) r->X_add_number >= num_regs)
7842 {
7843 as_bad (_("Index out of range 0..%u"), num_regs - 1);
7844 r->X_add_number = 0;
7845 }
7846 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7847 return 1;
7848 }
7849 else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR)
7850 {
7851 if (r->X_op != O_register
7852 || r->X_add_number < REG_GR
7853 || r->X_add_number > REG_GR + 127)
7854 {
7855 as_bad (_("Indirect register index must be a general register"));
7856 r->X_add_number = REG_GR;
7857 }
7858 l->X_op = O_index;
7859 l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID];
7860 l->X_add_number = r->X_add_number;
7861 return 1;
7862 }
7863 }
7864 as_bad (_("Index can only be applied to rotating or indirect registers"));
7865 /* Fall back to some register use of which has as little as possible
7866 side effects, to minimize subsequent error messages. */
7867 l->X_op = O_register;
7868 l->X_add_number = REG_GR + 3;
7869 return 1;
7870 }
7871
7872 int
7873 ia64_parse_name (char *name, expressionS *e, char *nextcharP)
7874 {
7875 struct const_desc *cdesc;
7876 struct dynreg *dr = 0;
7877 unsigned int idx;
7878 struct symbol *sym;
7879 char *end;
7880
7881 if (*name == '@')
7882 {
7883 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE;
7884
7885 /* Find what relocation pseudo-function we're dealing with. */
7886 for (idx = 0; idx < NELEMS (pseudo_func); ++idx)
7887 if (pseudo_func[idx].name
7888 && pseudo_func[idx].name[0] == name[1]
7889 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0)
7890 {
7891 pseudo_type = pseudo_func[idx].type;
7892 break;
7893 }
7894 switch (pseudo_type)
7895 {
7896 case PSEUDO_FUNC_RELOC:
7897 end = input_line_pointer;
7898 if (*nextcharP != '(')
7899 {
7900 as_bad (_("Expected '('"));
7901 break;
7902 }
7903 /* Skip '('. */
7904 ++input_line_pointer;
7905 expression (e);
7906 if (*input_line_pointer != ')')
7907 {
7908 as_bad (_("Missing ')'"));
7909 goto done;
7910 }
7911 /* Skip ')'. */
7912 ++input_line_pointer;
7913 #ifdef TE_VMS
7914 if (idx == FUNC_SLOTCOUNT_RELOC)
7915 {
7916 /* @slotcount can accept any expression. Canonicalize. */
7917 e->X_add_symbol = make_expr_symbol (e);
7918 e->X_op = O_symbol;
7919 e->X_add_number = 0;
7920 }
7921 #endif
7922 if (e->X_op != O_symbol)
7923 {
7924 if (e->X_op != O_pseudo_fixup)
7925 {
7926 as_bad (_("Not a symbolic expression"));
7927 goto done;
7928 }
7929 if (idx != FUNC_LT_RELATIVE)
7930 {
7931 as_bad (_("Illegal combination of relocation functions"));
7932 goto done;
7933 }
7934 switch (S_GET_VALUE (e->X_op_symbol))
7935 {
7936 case FUNC_FPTR_RELATIVE:
7937 idx = FUNC_LT_FPTR_RELATIVE; break;
7938 case FUNC_DTP_MODULE:
7939 idx = FUNC_LT_DTP_MODULE; break;
7940 case FUNC_DTP_RELATIVE:
7941 idx = FUNC_LT_DTP_RELATIVE; break;
7942 case FUNC_TP_RELATIVE:
7943 idx = FUNC_LT_TP_RELATIVE; break;
7944 default:
7945 as_bad (_("Illegal combination of relocation functions"));
7946 goto done;
7947 }
7948 }
7949 /* Make sure gas doesn't get rid of local symbols that are used
7950 in relocs. */
7951 e->X_op = O_pseudo_fixup;
7952 e->X_op_symbol = pseudo_func[idx].u.sym;
7953 done:
7954 *nextcharP = *input_line_pointer;
7955 break;
7956
7957 case PSEUDO_FUNC_CONST:
7958 e->X_op = O_constant;
7959 e->X_add_number = pseudo_func[idx].u.ival;
7960 break;
7961
7962 case PSEUDO_FUNC_REG:
7963 e->X_op = O_register;
7964 e->X_add_number = pseudo_func[idx].u.ival;
7965 break;
7966
7967 default:
7968 return 0;
7969 }
7970 return 1;
7971 }
7972
7973 /* first see if NAME is a known register name: */
7974 sym = str_hash_find (md.reg_hash, name);
7975 if (sym)
7976 {
7977 e->X_op = O_register;
7978 e->X_add_number = S_GET_VALUE (sym);
7979 return 1;
7980 }
7981
7982 cdesc = str_hash_find (md.const_hash, name);
7983 if (cdesc)
7984 {
7985 e->X_op = O_constant;
7986 e->X_add_number = cdesc->value;
7987 return 1;
7988 }
7989
7990 /* check for inN, locN, or outN: */
7991 idx = 0;
7992 switch (name[0])
7993 {
7994 case 'i':
7995 if (name[1] == 'n' && ISDIGIT (name[2]))
7996 {
7997 dr = &md.in;
7998 idx = 2;
7999 }
8000 break;
8001
8002 case 'l':
8003 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
8004 {
8005 dr = &md.loc;
8006 idx = 3;
8007 }
8008 break;
8009
8010 case 'o':
8011 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
8012 {
8013 dr = &md.out;
8014 idx = 3;
8015 }
8016 break;
8017
8018 default:
8019 break;
8020 }
8021
8022 /* Ignore register numbers with leading zeroes, except zero itself. */
8023 if (dr && (name[idx] != '0' || name[idx + 1] == '\0'))
8024 {
8025 unsigned long regnum;
8026
8027 /* The name is inN, locN, or outN; parse the register number. */
8028 regnum = strtoul (name + idx, &end, 10);
8029 if (end > name + idx && *end == '\0' && regnum < 96)
8030 {
8031 if (regnum >= dr->num_regs)
8032 {
8033 if (!dr->num_regs)
8034 as_bad (_("No current frame"));
8035 else
8036 as_bad (_("Register number out of range 0..%u"),
8037 dr->num_regs - 1);
8038 regnum = 0;
8039 }
8040 e->X_op = O_register;
8041 e->X_add_number = dr->base + regnum;
8042 return 1;
8043 }
8044 }
8045
8046 end = xstrdup (name);
8047 name = ia64_canonicalize_symbol_name (end);
8048 if ((dr = str_hash_find (md.dynreg_hash, name)))
8049 {
8050 /* We've got ourselves the name of a rotating register set.
8051 Store the base register number in the low 16 bits of
8052 X_add_number and the size of the register set in the top 16
8053 bits. */
8054 e->X_op = O_register;
8055 e->X_add_number = dr->base | (dr->num_regs << 16);
8056 free (end);
8057 return 1;
8058 }
8059 free (end);
8060 return 0;
8061 }
8062
8063 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
8064
8065 char *
8066 ia64_canonicalize_symbol_name (char *name)
8067 {
8068 size_t len = strlen (name), full = len;
8069
8070 while (len > 0 && name[len - 1] == '#')
8071 --len;
8072 if (len <= 0)
8073 {
8074 if (full > 0)
8075 as_bad (_("Standalone `#' is illegal"));
8076 }
8077 else if (len < full - 1)
8078 as_warn (_("Redundant `#' suffix operators"));
8079 name[len] = '\0';
8080 return name;
8081 }
8082
8083 /* Return true if idesc is a conditional branch instruction. This excludes
8084 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
8085 because they always read/write resources regardless of the value of the
8086 qualifying predicate. br.ia must always use p0, and hence is always
8087 taken. Thus this function returns true for branches which can fall
8088 through, and which use no resources if they do fall through. */
8089
8090 static int
8091 is_conditional_branch (struct ia64_opcode *idesc)
8092 {
8093 /* br is a conditional branch. Everything that starts with br. except
8094 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
8095 Everything that starts with brl is a conditional branch. */
8096 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
8097 && (idesc->name[2] == '\0'
8098 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
8099 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
8100 || idesc->name[2] == 'l'
8101 /* br.cond, br.call, br.clr */
8102 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
8103 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
8104 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
8105 }
8106
8107 /* Return whether the given opcode is a taken branch. If there's any doubt,
8108 returns zero. */
8109
8110 static int
8111 is_taken_branch (struct ia64_opcode *idesc)
8112 {
8113 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
8114 || startswith (idesc->name, "br.ia"));
8115 }
8116
8117 /* Return whether the given opcode is an interruption or rfi. If there's any
8118 doubt, returns zero. */
8119
8120 static int
8121 is_interruption_or_rfi (struct ia64_opcode *idesc)
8122 {
8123 if (strcmp (idesc->name, "rfi") == 0)
8124 return 1;
8125 return 0;
8126 }
8127
8128 /* Returns the index of the given dependency in the opcode's list of chks, or
8129 -1 if there is no dependency. */
8130
8131 static int
8132 depends_on (int depind, struct ia64_opcode *idesc)
8133 {
8134 int i;
8135 const struct ia64_opcode_dependency *dep = idesc->dependencies;
8136 for (i = 0; i < dep->nchks; i++)
8137 {
8138 if (depind == DEP (dep->chks[i]))
8139 return i;
8140 }
8141 return -1;
8142 }
8143
8144 /* Determine a set of specific resources used for a particular resource
8145 class. Returns the number of specific resources identified For those
8146 cases which are not determinable statically, the resource returned is
8147 marked nonspecific.
8148
8149 Meanings of value in 'NOTE':
8150 1) only read/write when the register number is explicitly encoded in the
8151 insn.
8152 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
8153 accesses CFM when qualifying predicate is in the rotating region.
8154 3) general register value is used to specify an indirect register; not
8155 determinable statically.
8156 4) only read the given resource when bits 7:0 of the indirect index
8157 register value does not match the register number of the resource; not
8158 determinable statically.
8159 5) all rules are implementation specific.
8160 6) only when both the index specified by the reader and the index specified
8161 by the writer have the same value in bits 63:61; not determinable
8162 statically.
8163 7) only access the specified resource when the corresponding mask bit is
8164 set
8165 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
8166 only read when these insns reference FR2-31
8167 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
8168 written when these insns write FR32-127
8169 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
8170 instruction
8171 11) The target predicates are written independently of PR[qp], but source
8172 registers are only read if PR[qp] is true. Since the state of PR[qp]
8173 cannot statically be determined, all source registers are marked used.
8174 12) This insn only reads the specified predicate register when that
8175 register is the PR[qp].
8176 13) This reference to ld-c only applies to the GR whose value is loaded
8177 with data returned from memory, not the post-incremented address register.
8178 14) The RSE resource includes the implementation-specific RSE internal
8179 state resources. At least one (and possibly more) of these resources are
8180 read by each instruction listed in IC:rse-readers. At least one (and
8181 possibly more) of these resources are written by each insn listed in
8182 IC:rse-writers.
8183 15+16) Represents reserved instructions, which the assembler does not
8184 generate.
8185 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and
8186 mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up.
8187
8188 Memory resources (i.e. locations in memory) are *not* marked or tracked by
8189 this code; there are no dependency violations based on memory access.
8190 */
8191
8192 #define MAX_SPECS 256
8193 #define DV_CHK 1
8194 #define DV_REG 0
8195
8196 static int
8197 specify_resource (const struct ia64_dependency *dep,
8198 struct ia64_opcode *idesc,
8199 /* is this a DV chk or a DV reg? */
8200 int type,
8201 /* returned specific resources */
8202 struct rsrc specs[MAX_SPECS],
8203 /* resource note for this insn's usage */
8204 int note,
8205 /* which execution path to examine */
8206 int path)
8207 {
8208 int count = 0;
8209 int i;
8210 int rsrc_write = 0;
8211 struct rsrc tmpl;
8212
8213 if (dep->mode == IA64_DV_WAW
8214 || (dep->mode == IA64_DV_RAW && type == DV_REG)
8215 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
8216 rsrc_write = 1;
8217
8218 /* template for any resources we identify */
8219 tmpl.dependency = dep;
8220 tmpl.note = note;
8221 tmpl.insn_srlz = tmpl.data_srlz = 0;
8222 tmpl.qp_regno = CURR_SLOT.qp_regno;
8223 tmpl.link_to_qp_branch = 1;
8224 tmpl.mem_offset.hint = 0;
8225 tmpl.mem_offset.offset = 0;
8226 tmpl.mem_offset.base = 0;
8227 tmpl.specific = 1;
8228 tmpl.index = -1;
8229 tmpl.cmp_type = CMP_NONE;
8230 tmpl.depind = 0;
8231 tmpl.file = NULL;
8232 tmpl.line = 0;
8233 tmpl.path = 0;
8234
8235 #define UNHANDLED \
8236 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
8237 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
8238 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
8239
8240 /* we don't need to track these */
8241 if (dep->semantics == IA64_DVS_NONE)
8242 return 0;
8243
8244 switch (dep->specifier)
8245 {
8246 case IA64_RS_AR_K:
8247 if (note == 1)
8248 {
8249 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8250 {
8251 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8252 if (regno >= 0 && regno <= 7)
8253 {
8254 specs[count] = tmpl;
8255 specs[count++].index = regno;
8256 }
8257 }
8258 }
8259 else if (note == 0)
8260 {
8261 for (i = 0; i < 8; i++)
8262 {
8263 specs[count] = tmpl;
8264 specs[count++].index = i;
8265 }
8266 }
8267 else
8268 {
8269 UNHANDLED;
8270 }
8271 break;
8272
8273 case IA64_RS_AR_UNAT:
8274 /* This is a mov =AR or mov AR= instruction. */
8275 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8276 {
8277 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8278 if (regno == AR_UNAT)
8279 {
8280 specs[count++] = tmpl;
8281 }
8282 }
8283 else
8284 {
8285 /* This is a spill/fill, or other instruction that modifies the
8286 unat register. */
8287
8288 /* Unless we can determine the specific bits used, mark the whole
8289 thing; bits 8:3 of the memory address indicate the bit used in
8290 UNAT. The .mem.offset hint may be used to eliminate a small
8291 subset of conflicts. */
8292 specs[count] = tmpl;
8293 if (md.mem_offset.hint)
8294 {
8295 if (md.debug_dv)
8296 fprintf (stderr, " Using hint for spill/fill\n");
8297 /* The index isn't actually used, just set it to something
8298 approximating the bit index. */
8299 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
8300 specs[count].mem_offset.hint = 1;
8301 specs[count].mem_offset.offset = md.mem_offset.offset;
8302 specs[count++].mem_offset.base = md.mem_offset.base;
8303 }
8304 else
8305 {
8306 specs[count++].specific = 0;
8307 }
8308 }
8309 break;
8310
8311 case IA64_RS_AR:
8312 if (note == 1)
8313 {
8314 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8315 {
8316 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8317 if ((regno >= 8 && regno <= 15)
8318 || (regno >= 20 && regno <= 23)
8319 || (regno >= 31 && regno <= 39)
8320 || (regno >= 41 && regno <= 47)
8321 || (regno >= 67 && regno <= 111))
8322 {
8323 specs[count] = tmpl;
8324 specs[count++].index = regno;
8325 }
8326 }
8327 }
8328 else
8329 {
8330 UNHANDLED;
8331 }
8332 break;
8333
8334 case IA64_RS_ARb:
8335 if (note == 1)
8336 {
8337 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8338 {
8339 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8340 if ((regno >= 48 && regno <= 63)
8341 || (regno >= 112 && regno <= 127))
8342 {
8343 specs[count] = tmpl;
8344 specs[count++].index = regno;
8345 }
8346 }
8347 }
8348 else if (note == 0)
8349 {
8350 for (i = 48; i < 64; i++)
8351 {
8352 specs[count] = tmpl;
8353 specs[count++].index = i;
8354 }
8355 for (i = 112; i < 128; i++)
8356 {
8357 specs[count] = tmpl;
8358 specs[count++].index = i;
8359 }
8360 }
8361 else
8362 {
8363 UNHANDLED;
8364 }
8365 break;
8366
8367 case IA64_RS_BR:
8368 if (note != 1)
8369 {
8370 UNHANDLED;
8371 }
8372 else
8373 {
8374 if (rsrc_write)
8375 {
8376 for (i = 0; i < idesc->num_outputs; i++)
8377 if (idesc->operands[i] == IA64_OPND_B1
8378 || idesc->operands[i] == IA64_OPND_B2)
8379 {
8380 specs[count] = tmpl;
8381 specs[count++].index =
8382 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8383 }
8384 }
8385 else
8386 {
8387 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8388 if (idesc->operands[i] == IA64_OPND_B1
8389 || idesc->operands[i] == IA64_OPND_B2)
8390 {
8391 specs[count] = tmpl;
8392 specs[count++].index =
8393 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8394 }
8395 }
8396 }
8397 break;
8398
8399 case IA64_RS_CPUID: /* four or more registers */
8400 if (note == 3)
8401 {
8402 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
8403 {
8404 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8405 if (regno >= 0 && regno < NELEMS (gr_values)
8406 && KNOWN (regno))
8407 {
8408 specs[count] = tmpl;
8409 specs[count++].index = gr_values[regno].value & 0xFF;
8410 }
8411 else
8412 {
8413 specs[count] = tmpl;
8414 specs[count++].specific = 0;
8415 }
8416 }
8417 }
8418 else
8419 {
8420 UNHANDLED;
8421 }
8422 break;
8423
8424 case IA64_RS_DBR: /* four or more registers */
8425 if (note == 3)
8426 {
8427 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
8428 {
8429 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8430 if (regno >= 0 && regno < NELEMS (gr_values)
8431 && KNOWN (regno))
8432 {
8433 specs[count] = tmpl;
8434 specs[count++].index = gr_values[regno].value & 0xFF;
8435 }
8436 else
8437 {
8438 specs[count] = tmpl;
8439 specs[count++].specific = 0;
8440 }
8441 }
8442 }
8443 else if (note == 0 && !rsrc_write)
8444 {
8445 specs[count] = tmpl;
8446 specs[count++].specific = 0;
8447 }
8448 else
8449 {
8450 UNHANDLED;
8451 }
8452 break;
8453
8454 case IA64_RS_IBR: /* four or more registers */
8455 if (note == 3)
8456 {
8457 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
8458 {
8459 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8460 if (regno >= 0 && regno < NELEMS (gr_values)
8461 && KNOWN (regno))
8462 {
8463 specs[count] = tmpl;
8464 specs[count++].index = gr_values[regno].value & 0xFF;
8465 }
8466 else
8467 {
8468 specs[count] = tmpl;
8469 specs[count++].specific = 0;
8470 }
8471 }
8472 }
8473 else
8474 {
8475 UNHANDLED;
8476 }
8477 break;
8478
8479 case IA64_RS_MSR:
8480 if (note == 5)
8481 {
8482 /* These are implementation specific. Force all references to
8483 conflict with all other references. */
8484 specs[count] = tmpl;
8485 specs[count++].specific = 0;
8486 }
8487 else
8488 {
8489 UNHANDLED;
8490 }
8491 break;
8492
8493 case IA64_RS_PKR: /* 16 or more registers */
8494 if (note == 3 || note == 4)
8495 {
8496 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
8497 {
8498 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8499 if (regno >= 0 && regno < NELEMS (gr_values)
8500 && KNOWN (regno))
8501 {
8502 if (note == 3)
8503 {
8504 specs[count] = tmpl;
8505 specs[count++].index = gr_values[regno].value & 0xFF;
8506 }
8507 else
8508 for (i = 0; i < NELEMS (gr_values); i++)
8509 {
8510 /* Uses all registers *except* the one in R3. */
8511 if ((unsigned)i != (gr_values[regno].value & 0xFF))
8512 {
8513 specs[count] = tmpl;
8514 specs[count++].index = i;
8515 }
8516 }
8517 }
8518 else
8519 {
8520 specs[count] = tmpl;
8521 specs[count++].specific = 0;
8522 }
8523 }
8524 }
8525 else if (note == 0)
8526 {
8527 /* probe et al. */
8528 specs[count] = tmpl;
8529 specs[count++].specific = 0;
8530 }
8531 break;
8532
8533 case IA64_RS_PMC: /* four or more registers */
8534 if (note == 3)
8535 {
8536 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
8537 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
8538
8539 {
8540 int reg_index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
8541 ? 1 : !rsrc_write);
8542 int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_GR;
8543 if (regno >= 0 && regno < NELEMS (gr_values)
8544 && KNOWN (regno))
8545 {
8546 specs[count] = tmpl;
8547 specs[count++].index = gr_values[regno].value & 0xFF;
8548 }
8549 else
8550 {
8551 specs[count] = tmpl;
8552 specs[count++].specific = 0;
8553 }
8554 }
8555 }
8556 else
8557 {
8558 UNHANDLED;
8559 }
8560 break;
8561
8562 case IA64_RS_PMD: /* four or more registers */
8563 if (note == 3)
8564 {
8565 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
8566 {
8567 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8568 if (regno >= 0 && regno < NELEMS (gr_values)
8569 && KNOWN (regno))
8570 {
8571 specs[count] = tmpl;
8572 specs[count++].index = gr_values[regno].value & 0xFF;
8573 }
8574 else
8575 {
8576 specs[count] = tmpl;
8577 specs[count++].specific = 0;
8578 }
8579 }
8580 }
8581 else
8582 {
8583 UNHANDLED;
8584 }
8585 break;
8586
8587 case IA64_RS_RR: /* eight registers */
8588 if (note == 6)
8589 {
8590 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
8591 {
8592 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8593 if (regno >= 0 && regno < NELEMS (gr_values)
8594 && KNOWN (regno))
8595 {
8596 specs[count] = tmpl;
8597 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
8598 }
8599 else
8600 {
8601 specs[count] = tmpl;
8602 specs[count++].specific = 0;
8603 }
8604 }
8605 }
8606 else if (note == 0 && !rsrc_write)
8607 {
8608 specs[count] = tmpl;
8609 specs[count++].specific = 0;
8610 }
8611 else
8612 {
8613 UNHANDLED;
8614 }
8615 break;
8616
8617 case IA64_RS_CR_IRR:
8618 if (note == 0)
8619 {
8620 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8621 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8622 if (rsrc_write
8623 && idesc->operands[1] == IA64_OPND_CR3
8624 && regno == CR_IVR)
8625 {
8626 for (i = 0; i < 4; i++)
8627 {
8628 specs[count] = tmpl;
8629 specs[count++].index = CR_IRR0 + i;
8630 }
8631 }
8632 }
8633 else if (note == 1)
8634 {
8635 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8636 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8637 && regno >= CR_IRR0
8638 && regno <= CR_IRR3)
8639 {
8640 specs[count] = tmpl;
8641 specs[count++].index = regno;
8642 }
8643 }
8644 else
8645 {
8646 UNHANDLED;
8647 }
8648 break;
8649
8650 case IA64_RS_CR_IIB:
8651 if (note != 0)
8652 {
8653 UNHANDLED;
8654 }
8655 else
8656 {
8657 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8658 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8659 && (regno == CR_IIB0 || regno == CR_IIB1))
8660 {
8661 specs[count] = tmpl;
8662 specs[count++].index = regno;
8663 }
8664 }
8665 break;
8666
8667 case IA64_RS_CR_LRR:
8668 if (note != 1)
8669 {
8670 UNHANDLED;
8671 }
8672 else
8673 {
8674 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8675 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8676 && (regno == CR_LRR0 || regno == CR_LRR1))
8677 {
8678 specs[count] = tmpl;
8679 specs[count++].index = regno;
8680 }
8681 }
8682 break;
8683
8684 case IA64_RS_CR:
8685 if (note == 1)
8686 {
8687 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8688 {
8689 specs[count] = tmpl;
8690 specs[count++].index =
8691 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8692 }
8693 }
8694 else
8695 {
8696 UNHANDLED;
8697 }
8698 break;
8699
8700 case IA64_RS_DAHR:
8701 if (note == 0)
8702 {
8703 if (idesc->operands[!rsrc_write] == IA64_OPND_DAHR3)
8704 {
8705 specs[count] = tmpl;
8706 specs[count++].index =
8707 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_DAHR;
8708 }
8709 }
8710 else
8711 {
8712 UNHANDLED;
8713 }
8714 break;
8715
8716 case IA64_RS_FR:
8717 case IA64_RS_FRb:
8718 if (note != 1)
8719 {
8720 UNHANDLED;
8721 }
8722 else if (rsrc_write)
8723 {
8724 if (dep->specifier == IA64_RS_FRb
8725 && idesc->operands[0] == IA64_OPND_F1)
8726 {
8727 specs[count] = tmpl;
8728 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8729 }
8730 }
8731 else
8732 {
8733 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8734 {
8735 if (idesc->operands[i] == IA64_OPND_F2
8736 || idesc->operands[i] == IA64_OPND_F3
8737 || idesc->operands[i] == IA64_OPND_F4)
8738 {
8739 specs[count] = tmpl;
8740 specs[count++].index =
8741 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8742 }
8743 }
8744 }
8745 break;
8746
8747 case IA64_RS_GR:
8748 if (note == 13)
8749 {
8750 /* This reference applies only to the GR whose value is loaded with
8751 data returned from memory. */
8752 specs[count] = tmpl;
8753 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8754 }
8755 else if (note == 1)
8756 {
8757 if (rsrc_write)
8758 {
8759 for (i = 0; i < idesc->num_outputs; i++)
8760 if (idesc->operands[i] == IA64_OPND_R1
8761 || idesc->operands[i] == IA64_OPND_R2
8762 || idesc->operands[i] == IA64_OPND_R3)
8763 {
8764 specs[count] = tmpl;
8765 specs[count++].index =
8766 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8767 }
8768 if (idesc->flags & IA64_OPCODE_POSTINC)
8769 for (i = 0; i < NELEMS (idesc->operands); i++)
8770 if (idesc->operands[i] == IA64_OPND_MR3)
8771 {
8772 specs[count] = tmpl;
8773 specs[count++].index =
8774 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8775 }
8776 }
8777 else
8778 {
8779 /* Look for anything that reads a GR. */
8780 for (i = 0; i < NELEMS (idesc->operands); i++)
8781 {
8782 if (idesc->operands[i] == IA64_OPND_MR3
8783 || idesc->operands[i] == IA64_OPND_CPUID_R3
8784 || idesc->operands[i] == IA64_OPND_DBR_R3
8785 || idesc->operands[i] == IA64_OPND_IBR_R3
8786 || idesc->operands[i] == IA64_OPND_MSR_R3
8787 || idesc->operands[i] == IA64_OPND_PKR_R3
8788 || idesc->operands[i] == IA64_OPND_PMC_R3
8789 || idesc->operands[i] == IA64_OPND_PMD_R3
8790 || idesc->operands[i] == IA64_OPND_DAHR_R3
8791 || idesc->operands[i] == IA64_OPND_RR_R3
8792 || ((i >= idesc->num_outputs)
8793 && (idesc->operands[i] == IA64_OPND_R1
8794 || idesc->operands[i] == IA64_OPND_R2
8795 || idesc->operands[i] == IA64_OPND_R3
8796 /* addl source register. */
8797 || idesc->operands[i] == IA64_OPND_R3_2)))
8798 {
8799 specs[count] = tmpl;
8800 specs[count++].index =
8801 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8802 }
8803 }
8804 }
8805 }
8806 else
8807 {
8808 UNHANDLED;
8809 }
8810 break;
8811
8812 /* This is the same as IA64_RS_PRr, except that the register range is
8813 from 1 - 15, and there are no rotating register reads/writes here. */
8814 case IA64_RS_PR:
8815 if (note == 0)
8816 {
8817 for (i = 1; i < 16; i++)
8818 {
8819 specs[count] = tmpl;
8820 specs[count++].index = i;
8821 }
8822 }
8823 else if (note == 7)
8824 {
8825 valueT mask = 0;
8826 /* Mark only those registers indicated by the mask. */
8827 if (rsrc_write)
8828 {
8829 mask = CURR_SLOT.opnd[2].X_add_number;
8830 for (i = 1; i < 16; i++)
8831 if (mask & ((valueT) 1 << i))
8832 {
8833 specs[count] = tmpl;
8834 specs[count++].index = i;
8835 }
8836 }
8837 else
8838 {
8839 UNHANDLED;
8840 }
8841 }
8842 else if (note == 11) /* note 11 implies note 1 as well */
8843 {
8844 if (rsrc_write)
8845 {
8846 for (i = 0; i < idesc->num_outputs; i++)
8847 {
8848 if (idesc->operands[i] == IA64_OPND_P1
8849 || idesc->operands[i] == IA64_OPND_P2)
8850 {
8851 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8852 if (regno >= 1 && regno < 16)
8853 {
8854 specs[count] = tmpl;
8855 specs[count++].index = regno;
8856 }
8857 }
8858 }
8859 }
8860 else
8861 {
8862 UNHANDLED;
8863 }
8864 }
8865 else if (note == 12)
8866 {
8867 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8868 {
8869 specs[count] = tmpl;
8870 specs[count++].index = CURR_SLOT.qp_regno;
8871 }
8872 }
8873 else if (note == 1)
8874 {
8875 if (rsrc_write)
8876 {
8877 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8878 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8879 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8880 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8881
8882 if ((idesc->operands[0] == IA64_OPND_P1
8883 || idesc->operands[0] == IA64_OPND_P2)
8884 && p1 >= 1 && p1 < 16)
8885 {
8886 specs[count] = tmpl;
8887 specs[count].cmp_type =
8888 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8889 specs[count++].index = p1;
8890 }
8891 if ((idesc->operands[1] == IA64_OPND_P1
8892 || idesc->operands[1] == IA64_OPND_P2)
8893 && p2 >= 1 && p2 < 16)
8894 {
8895 specs[count] = tmpl;
8896 specs[count].cmp_type =
8897 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8898 specs[count++].index = p2;
8899 }
8900 }
8901 else
8902 {
8903 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8904 {
8905 specs[count] = tmpl;
8906 specs[count++].index = CURR_SLOT.qp_regno;
8907 }
8908 if (idesc->operands[1] == IA64_OPND_PR)
8909 {
8910 for (i = 1; i < 16; i++)
8911 {
8912 specs[count] = tmpl;
8913 specs[count++].index = i;
8914 }
8915 }
8916 }
8917 }
8918 else
8919 {
8920 UNHANDLED;
8921 }
8922 break;
8923
8924 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8925 simplified cases of this. */
8926 case IA64_RS_PRr:
8927 if (note == 0)
8928 {
8929 for (i = 16; i < 63; i++)
8930 {
8931 specs[count] = tmpl;
8932 specs[count++].index = i;
8933 }
8934 }
8935 else if (note == 7)
8936 {
8937 valueT mask = 0;
8938 /* Mark only those registers indicated by the mask. */
8939 if (rsrc_write
8940 && idesc->operands[0] == IA64_OPND_PR)
8941 {
8942 mask = CURR_SLOT.opnd[2].X_add_number;
8943 if (mask & ((valueT) 1 << 16))
8944 for (i = 16; i < 63; i++)
8945 {
8946 specs[count] = tmpl;
8947 specs[count++].index = i;
8948 }
8949 }
8950 else if (rsrc_write
8951 && idesc->operands[0] == IA64_OPND_PR_ROT)
8952 {
8953 for (i = 16; i < 63; i++)
8954 {
8955 specs[count] = tmpl;
8956 specs[count++].index = i;
8957 }
8958 }
8959 else
8960 {
8961 UNHANDLED;
8962 }
8963 }
8964 else if (note == 11) /* note 11 implies note 1 as well */
8965 {
8966 if (rsrc_write)
8967 {
8968 for (i = 0; i < idesc->num_outputs; i++)
8969 {
8970 if (idesc->operands[i] == IA64_OPND_P1
8971 || idesc->operands[i] == IA64_OPND_P2)
8972 {
8973 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8974 if (regno >= 16 && regno < 63)
8975 {
8976 specs[count] = tmpl;
8977 specs[count++].index = regno;
8978 }
8979 }
8980 }
8981 }
8982 else
8983 {
8984 UNHANDLED;
8985 }
8986 }
8987 else if (note == 12)
8988 {
8989 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8990 {
8991 specs[count] = tmpl;
8992 specs[count++].index = CURR_SLOT.qp_regno;
8993 }
8994 }
8995 else if (note == 1)
8996 {
8997 if (rsrc_write)
8998 {
8999 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9000 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9001 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9002 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9003
9004 if ((idesc->operands[0] == IA64_OPND_P1
9005 || idesc->operands[0] == IA64_OPND_P2)
9006 && p1 >= 16 && p1 < 63)
9007 {
9008 specs[count] = tmpl;
9009 specs[count].cmp_type =
9010 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9011 specs[count++].index = p1;
9012 }
9013 if ((idesc->operands[1] == IA64_OPND_P1
9014 || idesc->operands[1] == IA64_OPND_P2)
9015 && p2 >= 16 && p2 < 63)
9016 {
9017 specs[count] = tmpl;
9018 specs[count].cmp_type =
9019 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9020 specs[count++].index = p2;
9021 }
9022 }
9023 else
9024 {
9025 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9026 {
9027 specs[count] = tmpl;
9028 specs[count++].index = CURR_SLOT.qp_regno;
9029 }
9030 if (idesc->operands[1] == IA64_OPND_PR)
9031 {
9032 for (i = 16; i < 63; i++)
9033 {
9034 specs[count] = tmpl;
9035 specs[count++].index = i;
9036 }
9037 }
9038 }
9039 }
9040 else
9041 {
9042 UNHANDLED;
9043 }
9044 break;
9045
9046 case IA64_RS_PSR:
9047 /* Verify that the instruction is using the PSR bit indicated in
9048 dep->regindex. */
9049 if (note == 0)
9050 {
9051 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
9052 {
9053 if (dep->regindex < 6)
9054 {
9055 specs[count++] = tmpl;
9056 }
9057 }
9058 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
9059 {
9060 if (dep->regindex < 32
9061 || dep->regindex == 35
9062 || dep->regindex == 36
9063 || (!rsrc_write && dep->regindex == PSR_CPL))
9064 {
9065 specs[count++] = tmpl;
9066 }
9067 }
9068 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
9069 {
9070 if (dep->regindex < 32
9071 || dep->regindex == 35
9072 || dep->regindex == 36
9073 || (rsrc_write && dep->regindex == PSR_CPL))
9074 {
9075 specs[count++] = tmpl;
9076 }
9077 }
9078 else
9079 {
9080 /* Several PSR bits have very specific dependencies. */
9081 switch (dep->regindex)
9082 {
9083 default:
9084 specs[count++] = tmpl;
9085 break;
9086 case PSR_IC:
9087 if (rsrc_write)
9088 {
9089 specs[count++] = tmpl;
9090 }
9091 else
9092 {
9093 /* Only certain CR accesses use PSR.ic */
9094 if (idesc->operands[0] == IA64_OPND_CR3
9095 || idesc->operands[1] == IA64_OPND_CR3)
9096 {
9097 int reg_index =
9098 ((idesc->operands[0] == IA64_OPND_CR3)
9099 ? 0 : 1);
9100 int regno =
9101 CURR_SLOT.opnd[reg_index].X_add_number - REG_CR;
9102
9103 switch (regno)
9104 {
9105 default:
9106 break;
9107 case CR_ITIR:
9108 case CR_IFS:
9109 case CR_IIM:
9110 case CR_IIP:
9111 case CR_IPSR:
9112 case CR_ISR:
9113 case CR_IFA:
9114 case CR_IHA:
9115 case CR_IIB0:
9116 case CR_IIB1:
9117 case CR_IIPA:
9118 specs[count++] = tmpl;
9119 break;
9120 }
9121 }
9122 }
9123 break;
9124 case PSR_CPL:
9125 if (rsrc_write)
9126 {
9127 specs[count++] = tmpl;
9128 }
9129 else
9130 {
9131 /* Only some AR accesses use cpl */
9132 if (idesc->operands[0] == IA64_OPND_AR3
9133 || idesc->operands[1] == IA64_OPND_AR3)
9134 {
9135 int reg_index =
9136 ((idesc->operands[0] == IA64_OPND_AR3)
9137 ? 0 : 1);
9138 int regno =
9139 CURR_SLOT.opnd[reg_index].X_add_number - REG_AR;
9140
9141 if (regno == AR_ITC
9142 || regno == AR_RUC
9143 || (reg_index == 0
9144 && (regno == AR_RSC
9145 || (regno >= AR_K0
9146 && regno <= AR_K7))))
9147 {
9148 specs[count++] = tmpl;
9149 }
9150 }
9151 else
9152 {
9153 specs[count++] = tmpl;
9154 }
9155 break;
9156 }
9157 }
9158 }
9159 }
9160 else if (note == 7)
9161 {
9162 valueT mask = 0;
9163 if (idesc->operands[0] == IA64_OPND_IMMU24)
9164 {
9165 mask = CURR_SLOT.opnd[0].X_add_number;
9166 }
9167 else
9168 {
9169 UNHANDLED;
9170 }
9171 if (mask & ((valueT) 1 << dep->regindex))
9172 {
9173 specs[count++] = tmpl;
9174 }
9175 }
9176 else if (note == 8)
9177 {
9178 int min = dep->regindex == PSR_DFL ? 2 : 32;
9179 int max = dep->regindex == PSR_DFL ? 31 : 127;
9180 /* dfh is read on FR32-127; dfl is read on FR2-31 */
9181 for (i = 0; i < NELEMS (idesc->operands); i++)
9182 {
9183 if (idesc->operands[i] == IA64_OPND_F1
9184 || idesc->operands[i] == IA64_OPND_F2
9185 || idesc->operands[i] == IA64_OPND_F3
9186 || idesc->operands[i] == IA64_OPND_F4)
9187 {
9188 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9189 if (reg >= min && reg <= max)
9190 {
9191 specs[count++] = tmpl;
9192 }
9193 }
9194 }
9195 }
9196 else if (note == 9)
9197 {
9198 int min = dep->regindex == PSR_MFL ? 2 : 32;
9199 int max = dep->regindex == PSR_MFL ? 31 : 127;
9200 /* mfh is read on writes to FR32-127; mfl is read on writes to
9201 FR2-31 */
9202 for (i = 0; i < idesc->num_outputs; i++)
9203 {
9204 if (idesc->operands[i] == IA64_OPND_F1)
9205 {
9206 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9207 if (reg >= min && reg <= max)
9208 {
9209 specs[count++] = tmpl;
9210 }
9211 }
9212 }
9213 }
9214 else if (note == 10)
9215 {
9216 for (i = 0; i < NELEMS (idesc->operands); i++)
9217 {
9218 if (idesc->operands[i] == IA64_OPND_R1
9219 || idesc->operands[i] == IA64_OPND_R2
9220 || idesc->operands[i] == IA64_OPND_R3)
9221 {
9222 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9223 if (regno >= 16 && regno <= 31)
9224 {
9225 specs[count++] = tmpl;
9226 }
9227 }
9228 }
9229 }
9230 else
9231 {
9232 UNHANDLED;
9233 }
9234 break;
9235
9236 case IA64_RS_AR_FPSR:
9237 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
9238 {
9239 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9240 if (regno == AR_FPSR)
9241 {
9242 specs[count++] = tmpl;
9243 }
9244 }
9245 else
9246 {
9247 specs[count++] = tmpl;
9248 }
9249 break;
9250
9251 case IA64_RS_ARX:
9252 /* Handle all AR[REG] resources */
9253 if (note == 0 || note == 1)
9254 {
9255 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9256 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
9257 && regno == dep->regindex)
9258 {
9259 specs[count++] = tmpl;
9260 }
9261 /* other AR[REG] resources may be affected by AR accesses */
9262 else if (idesc->operands[0] == IA64_OPND_AR3)
9263 {
9264 /* AR[] writes */
9265 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
9266 switch (dep->regindex)
9267 {
9268 default:
9269 break;
9270 case AR_BSP:
9271 case AR_RNAT:
9272 if (regno == AR_BSPSTORE)
9273 {
9274 specs[count++] = tmpl;
9275 }
9276 /* Fall through. */
9277 case AR_RSC:
9278 if (!rsrc_write &&
9279 (regno == AR_BSPSTORE
9280 || regno == AR_RNAT))
9281 {
9282 specs[count++] = tmpl;
9283 }
9284 break;
9285 }
9286 }
9287 else if (idesc->operands[1] == IA64_OPND_AR3)
9288 {
9289 /* AR[] reads */
9290 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
9291 switch (dep->regindex)
9292 {
9293 default:
9294 break;
9295 case AR_RSC:
9296 if (regno == AR_BSPSTORE || regno == AR_RNAT)
9297 {
9298 specs[count++] = tmpl;
9299 }
9300 break;
9301 }
9302 }
9303 else
9304 {
9305 specs[count++] = tmpl;
9306 }
9307 }
9308 else
9309 {
9310 UNHANDLED;
9311 }
9312 break;
9313
9314 case IA64_RS_CRX:
9315 /* Handle all CR[REG] resources.
9316 ??? FIXME: The rule 17 isn't really handled correctly. */
9317 if (note == 0 || note == 1 || note == 17)
9318 {
9319 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
9320 {
9321 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
9322 if (regno == dep->regindex)
9323 {
9324 specs[count++] = tmpl;
9325 }
9326 else if (!rsrc_write)
9327 {
9328 /* Reads from CR[IVR] affect other resources. */
9329 if (regno == CR_IVR)
9330 {
9331 if ((dep->regindex >= CR_IRR0
9332 && dep->regindex <= CR_IRR3)
9333 || dep->regindex == CR_TPR)
9334 {
9335 specs[count++] = tmpl;
9336 }
9337 }
9338 }
9339 }
9340 else
9341 {
9342 specs[count++] = tmpl;
9343 }
9344 }
9345 else
9346 {
9347 UNHANDLED;
9348 }
9349 break;
9350
9351 case IA64_RS_INSERVICE:
9352 /* look for write of EOI (67) or read of IVR (65) */
9353 if ((idesc->operands[0] == IA64_OPND_CR3
9354 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
9355 || (idesc->operands[1] == IA64_OPND_CR3
9356 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
9357 {
9358 specs[count++] = tmpl;
9359 }
9360 break;
9361
9362 case IA64_RS_GR0:
9363 if (note == 1)
9364 {
9365 specs[count++] = tmpl;
9366 }
9367 else
9368 {
9369 UNHANDLED;
9370 }
9371 break;
9372
9373 case IA64_RS_CFM:
9374 if (note != 2)
9375 {
9376 specs[count++] = tmpl;
9377 }
9378 else
9379 {
9380 /* Check if any of the registers accessed are in the rotating region.
9381 mov to/from pr accesses CFM only when qp_regno is in the rotating
9382 region */
9383 for (i = 0; i < NELEMS (idesc->operands); i++)
9384 {
9385 if (idesc->operands[i] == IA64_OPND_R1
9386 || idesc->operands[i] == IA64_OPND_R2
9387 || idesc->operands[i] == IA64_OPND_R3)
9388 {
9389 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9390 /* Assumes that md.rot.num_regs is always valid */
9391 if (md.rot.num_regs > 0
9392 && num > 31
9393 && num < 31 + md.rot.num_regs)
9394 {
9395 specs[count] = tmpl;
9396 specs[count++].specific = 0;
9397 }
9398 }
9399 else if (idesc->operands[i] == IA64_OPND_F1
9400 || idesc->operands[i] == IA64_OPND_F2
9401 || idesc->operands[i] == IA64_OPND_F3
9402 || idesc->operands[i] == IA64_OPND_F4)
9403 {
9404 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9405 if (num > 31)
9406 {
9407 specs[count] = tmpl;
9408 specs[count++].specific = 0;
9409 }
9410 }
9411 else if (idesc->operands[i] == IA64_OPND_P1
9412 || idesc->operands[i] == IA64_OPND_P2)
9413 {
9414 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
9415 if (num > 15)
9416 {
9417 specs[count] = tmpl;
9418 specs[count++].specific = 0;
9419 }
9420 }
9421 }
9422 if (CURR_SLOT.qp_regno > 15)
9423 {
9424 specs[count] = tmpl;
9425 specs[count++].specific = 0;
9426 }
9427 }
9428 break;
9429
9430 /* This is the same as IA64_RS_PRr, except simplified to account for
9431 the fact that there is only one register. */
9432 case IA64_RS_PR63:
9433 if (note == 0)
9434 {
9435 specs[count++] = tmpl;
9436 }
9437 else if (note == 7)
9438 {
9439 valueT mask = 0;
9440 if (idesc->operands[2] == IA64_OPND_IMM17)
9441 mask = CURR_SLOT.opnd[2].X_add_number;
9442 if (mask & ((valueT) 1 << 63))
9443 specs[count++] = tmpl;
9444 }
9445 else if (note == 11)
9446 {
9447 if ((idesc->operands[0] == IA64_OPND_P1
9448 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
9449 || (idesc->operands[1] == IA64_OPND_P2
9450 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
9451 {
9452 specs[count++] = tmpl;
9453 }
9454 }
9455 else if (note == 12)
9456 {
9457 if (CURR_SLOT.qp_regno == 63)
9458 {
9459 specs[count++] = tmpl;
9460 }
9461 }
9462 else if (note == 1)
9463 {
9464 if (rsrc_write)
9465 {
9466 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9467 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9468 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9469 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9470
9471 if (p1 == 63
9472 && (idesc->operands[0] == IA64_OPND_P1
9473 || idesc->operands[0] == IA64_OPND_P2))
9474 {
9475 specs[count] = tmpl;
9476 specs[count++].cmp_type =
9477 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9478 }
9479 if (p2 == 63
9480 && (idesc->operands[1] == IA64_OPND_P1
9481 || idesc->operands[1] == IA64_OPND_P2))
9482 {
9483 specs[count] = tmpl;
9484 specs[count++].cmp_type =
9485 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9486 }
9487 }
9488 else
9489 {
9490 if (CURR_SLOT.qp_regno == 63)
9491 {
9492 specs[count++] = tmpl;
9493 }
9494 }
9495 }
9496 else
9497 {
9498 UNHANDLED;
9499 }
9500 break;
9501
9502 case IA64_RS_RSE:
9503 /* FIXME we can identify some individual RSE written resources, but RSE
9504 read resources have not yet been completely identified, so for now
9505 treat RSE as a single resource */
9506 if (startswith (idesc->name, "mov"))
9507 {
9508 if (rsrc_write)
9509 {
9510 if (idesc->operands[0] == IA64_OPND_AR3
9511 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
9512 {
9513 specs[count++] = tmpl;
9514 }
9515 }
9516 else
9517 {
9518 if (idesc->operands[0] == IA64_OPND_AR3)
9519 {
9520 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
9521 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
9522 {
9523 specs[count++] = tmpl;
9524 }
9525 }
9526 else if (idesc->operands[1] == IA64_OPND_AR3)
9527 {
9528 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
9529 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
9530 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
9531 {
9532 specs[count++] = tmpl;
9533 }
9534 }
9535 }
9536 }
9537 else
9538 {
9539 specs[count++] = tmpl;
9540 }
9541 break;
9542
9543 case IA64_RS_ANY:
9544 /* FIXME -- do any of these need to be non-specific? */
9545 specs[count++] = tmpl;
9546 break;
9547
9548 default:
9549 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
9550 break;
9551 }
9552
9553 return count;
9554 }
9555
9556 /* Clear branch flags on marked resources. This breaks the link between the
9557 QP of the marking instruction and a subsequent branch on the same QP. */
9558
9559 static void
9560 clear_qp_branch_flag (valueT mask)
9561 {
9562 int i;
9563 for (i = 0; i < regdepslen; i++)
9564 {
9565 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
9566 if ((bit & mask) != 0)
9567 {
9568 regdeps[i].link_to_qp_branch = 0;
9569 }
9570 }
9571 }
9572
9573 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
9574 any mutexes which contain one of the PRs and create new ones when
9575 needed. */
9576
9577 static int
9578 update_qp_mutex (valueT mask)
9579 {
9580 int i;
9581 int add = 0;
9582
9583 i = 0;
9584 while (i < qp_mutexeslen)
9585 {
9586 if ((qp_mutexes[i].prmask & mask) != 0)
9587 {
9588 /* If it destroys and creates the same mutex, do nothing. */
9589 if (qp_mutexes[i].prmask == mask
9590 && qp_mutexes[i].path == md.path)
9591 {
9592 i++;
9593 add = -1;
9594 }
9595 else
9596 {
9597 int keep = 0;
9598
9599 if (md.debug_dv)
9600 {
9601 fprintf (stderr, " Clearing mutex relation");
9602 print_prmask (qp_mutexes[i].prmask);
9603 fprintf (stderr, "\n");
9604 }
9605
9606 /* Deal with the old mutex with more than 3+ PRs only if
9607 the new mutex on the same execution path with it.
9608
9609 FIXME: The 3+ mutex support is incomplete.
9610 dot_pred_rel () may be a better place to fix it. */
9611 if (qp_mutexes[i].path == md.path)
9612 {
9613 /* If it is a proper subset of the mutex, create a
9614 new mutex. */
9615 if (add == 0
9616 && (qp_mutexes[i].prmask & mask) == mask)
9617 add = 1;
9618
9619 qp_mutexes[i].prmask &= ~mask;
9620 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
9621 {
9622 /* Modify the mutex if there are more than one
9623 PR left. */
9624 keep = 1;
9625 i++;
9626 }
9627 }
9628
9629 if (keep == 0)
9630 /* Remove the mutex. */
9631 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9632 }
9633 }
9634 else
9635 ++i;
9636 }
9637
9638 if (add == 1)
9639 add_qp_mutex (mask);
9640
9641 return add;
9642 }
9643
9644 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9645
9646 Any changes to a PR clears the mutex relations which include that PR. */
9647
9648 static void
9649 clear_qp_mutex (valueT mask)
9650 {
9651 int i;
9652
9653 i = 0;
9654 while (i < qp_mutexeslen)
9655 {
9656 if ((qp_mutexes[i].prmask & mask) != 0)
9657 {
9658 if (md.debug_dv)
9659 {
9660 fprintf (stderr, " Clearing mutex relation");
9661 print_prmask (qp_mutexes[i].prmask);
9662 fprintf (stderr, "\n");
9663 }
9664 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9665 }
9666 else
9667 ++i;
9668 }
9669 }
9670
9671 /* Clear implies relations which contain PRs in the given masks.
9672 P1_MASK indicates the source of the implies relation, while P2_MASK
9673 indicates the implied PR. */
9674
9675 static void
9676 clear_qp_implies (valueT p1_mask, valueT p2_mask)
9677 {
9678 int i;
9679
9680 i = 0;
9681 while (i < qp_implieslen)
9682 {
9683 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9684 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9685 {
9686 if (md.debug_dv)
9687 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9688 qp_implies[i].p1, qp_implies[i].p2);
9689 qp_implies[i] = qp_implies[--qp_implieslen];
9690 }
9691 else
9692 ++i;
9693 }
9694 }
9695
9696 /* Add the PRs specified to the list of implied relations. */
9697
9698 static void
9699 add_qp_imply (int p1, int p2)
9700 {
9701 valueT mask;
9702 valueT bit;
9703 int i;
9704
9705 /* p0 is not meaningful here. */
9706 if (p1 == 0 || p2 == 0)
9707 abort ();
9708
9709 if (p1 == p2)
9710 return;
9711
9712 /* If it exists already, ignore it. */
9713 for (i = 0; i < qp_implieslen; i++)
9714 {
9715 if (qp_implies[i].p1 == p1
9716 && qp_implies[i].p2 == p2
9717 && qp_implies[i].path == md.path
9718 && !qp_implies[i].p2_branched)
9719 return;
9720 }
9721
9722 if (qp_implieslen == qp_impliestotlen)
9723 {
9724 qp_impliestotlen += 20;
9725 qp_implies = XRESIZEVEC (struct qp_imply, qp_implies, qp_impliestotlen);
9726 }
9727 if (md.debug_dv)
9728 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9729 qp_implies[qp_implieslen].p1 = p1;
9730 qp_implies[qp_implieslen].p2 = p2;
9731 qp_implies[qp_implieslen].path = md.path;
9732 qp_implies[qp_implieslen++].p2_branched = 0;
9733
9734 /* Add in the implied transitive relations; for everything that p2 implies,
9735 make p1 imply that, too; for everything that implies p1, make it imply p2
9736 as well. */
9737 for (i = 0; i < qp_implieslen; i++)
9738 {
9739 if (qp_implies[i].p1 == p2)
9740 add_qp_imply (p1, qp_implies[i].p2);
9741 if (qp_implies[i].p2 == p1)
9742 add_qp_imply (qp_implies[i].p1, p2);
9743 }
9744 /* Add in mutex relations implied by this implies relation; for each mutex
9745 relation containing p2, duplicate it and replace p2 with p1. */
9746 bit = (valueT) 1 << p1;
9747 mask = (valueT) 1 << p2;
9748 for (i = 0; i < qp_mutexeslen; i++)
9749 {
9750 if (qp_mutexes[i].prmask & mask)
9751 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9752 }
9753 }
9754
9755 /* Add the PRs specified in the mask to the mutex list; this means that only
9756 one of the PRs can be true at any time. PR0 should never be included in
9757 the mask. */
9758
9759 static void
9760 add_qp_mutex (valueT mask)
9761 {
9762 if (mask & 0x1)
9763 abort ();
9764
9765 if (qp_mutexeslen == qp_mutexestotlen)
9766 {
9767 qp_mutexestotlen += 20;
9768 qp_mutexes = XRESIZEVEC (struct qpmutex, qp_mutexes, qp_mutexestotlen);
9769 }
9770 if (md.debug_dv)
9771 {
9772 fprintf (stderr, " Registering mutex on");
9773 print_prmask (mask);
9774 fprintf (stderr, "\n");
9775 }
9776 qp_mutexes[qp_mutexeslen].path = md.path;
9777 qp_mutexes[qp_mutexeslen++].prmask = mask;
9778 }
9779
9780 static int
9781 has_suffix_p (const char *name, const char *suffix)
9782 {
9783 size_t namelen = strlen (name);
9784 size_t sufflen = strlen (suffix);
9785
9786 if (namelen <= sufflen)
9787 return 0;
9788 return strcmp (name + namelen - sufflen, suffix) == 0;
9789 }
9790
9791 static void
9792 clear_register_values (void)
9793 {
9794 int i;
9795 if (md.debug_dv)
9796 fprintf (stderr, " Clearing register values\n");
9797 for (i = 1; i < NELEMS (gr_values); i++)
9798 gr_values[i].known = 0;
9799 }
9800
9801 /* Keep track of register values/changes which affect DV tracking.
9802
9803 optimization note: should add a flag to classes of insns where otherwise we
9804 have to examine a group of strings to identify them. */
9805
9806 static void
9807 note_register_values (struct ia64_opcode *idesc)
9808 {
9809 valueT qp_changemask = 0;
9810 int i;
9811
9812 /* Invalidate values for registers being written to. */
9813 for (i = 0; i < idesc->num_outputs; i++)
9814 {
9815 if (idesc->operands[i] == IA64_OPND_R1
9816 || idesc->operands[i] == IA64_OPND_R2
9817 || idesc->operands[i] == IA64_OPND_R3)
9818 {
9819 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9820 if (regno > 0 && regno < NELEMS (gr_values))
9821 gr_values[regno].known = 0;
9822 }
9823 else if (idesc->operands[i] == IA64_OPND_R3_2)
9824 {
9825 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9826 if (regno > 0 && regno < 4)
9827 gr_values[regno].known = 0;
9828 }
9829 else if (idesc->operands[i] == IA64_OPND_P1
9830 || idesc->operands[i] == IA64_OPND_P2)
9831 {
9832 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9833 qp_changemask |= (valueT) 1 << regno;
9834 }
9835 else if (idesc->operands[i] == IA64_OPND_PR)
9836 {
9837 if (idesc->operands[2] & (valueT) 0x10000)
9838 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9839 else
9840 qp_changemask = idesc->operands[2];
9841 break;
9842 }
9843 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9844 {
9845 if (idesc->operands[1] & ((valueT) 1 << 43))
9846 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9847 else
9848 qp_changemask = idesc->operands[1];
9849 qp_changemask &= ~(valueT) 0xFFFF;
9850 break;
9851 }
9852 }
9853
9854 /* Always clear qp branch flags on any PR change. */
9855 /* FIXME there may be exceptions for certain compares. */
9856 clear_qp_branch_flag (qp_changemask);
9857
9858 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9859 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9860 {
9861 qp_changemask |= ~(valueT) 0xFFFF;
9862 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9863 {
9864 for (i = 32; i < 32 + md.rot.num_regs; i++)
9865 gr_values[i].known = 0;
9866 }
9867 clear_qp_mutex (qp_changemask);
9868 clear_qp_implies (qp_changemask, qp_changemask);
9869 }
9870 /* After a call, all register values are undefined, except those marked
9871 as "safe". */
9872 else if (startswith (idesc->name, "br.call")
9873 || startswith (idesc->name, "brl.call"))
9874 {
9875 /* FIXME keep GR values which are marked as "safe_across_calls" */
9876 clear_register_values ();
9877 clear_qp_mutex (~qp_safe_across_calls);
9878 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9879 clear_qp_branch_flag (~qp_safe_across_calls);
9880 }
9881 else if (is_interruption_or_rfi (idesc)
9882 || is_taken_branch (idesc))
9883 {
9884 clear_register_values ();
9885 clear_qp_mutex (~(valueT) 0);
9886 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9887 }
9888 /* Look for mutex and implies relations. */
9889 else if ((idesc->operands[0] == IA64_OPND_P1
9890 || idesc->operands[0] == IA64_OPND_P2)
9891 && (idesc->operands[1] == IA64_OPND_P1
9892 || idesc->operands[1] == IA64_OPND_P2))
9893 {
9894 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9895 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9896 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9897 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9898
9899 /* If both PRs are PR0, we can't really do anything. */
9900 if (p1 == 0 && p2 == 0)
9901 {
9902 if (md.debug_dv)
9903 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9904 }
9905 /* In general, clear mutexes and implies which include P1 or P2,
9906 with the following exceptions. */
9907 else if (has_suffix_p (idesc->name, ".or.andcm")
9908 || has_suffix_p (idesc->name, ".and.orcm"))
9909 {
9910 clear_qp_implies (p2mask, p1mask);
9911 }
9912 else if (has_suffix_p (idesc->name, ".andcm")
9913 || has_suffix_p (idesc->name, ".and"))
9914 {
9915 clear_qp_implies (0, p1mask | p2mask);
9916 }
9917 else if (has_suffix_p (idesc->name, ".orcm")
9918 || has_suffix_p (idesc->name, ".or"))
9919 {
9920 clear_qp_mutex (p1mask | p2mask);
9921 clear_qp_implies (p1mask | p2mask, 0);
9922 }
9923 else
9924 {
9925 int added = 0;
9926
9927 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9928
9929 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9930 if (p1 == 0 || p2 == 0)
9931 clear_qp_mutex (p1mask | p2mask);
9932 else
9933 added = update_qp_mutex (p1mask | p2mask);
9934
9935 if (CURR_SLOT.qp_regno == 0
9936 || has_suffix_p (idesc->name, ".unc"))
9937 {
9938 if (added == 0 && p1 && p2)
9939 add_qp_mutex (p1mask | p2mask);
9940 if (CURR_SLOT.qp_regno != 0)
9941 {
9942 if (p1)
9943 add_qp_imply (p1, CURR_SLOT.qp_regno);
9944 if (p2)
9945 add_qp_imply (p2, CURR_SLOT.qp_regno);
9946 }
9947 }
9948 }
9949 }
9950 /* Look for mov imm insns into GRs. */
9951 else if (idesc->operands[0] == IA64_OPND_R1
9952 && (idesc->operands[1] == IA64_OPND_IMM22
9953 || idesc->operands[1] == IA64_OPND_IMMU64)
9954 && CURR_SLOT.opnd[1].X_op == O_constant
9955 && (strcmp (idesc->name, "mov") == 0
9956 || strcmp (idesc->name, "movl") == 0))
9957 {
9958 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9959 if (regno > 0 && regno < NELEMS (gr_values))
9960 {
9961 gr_values[regno].known = 1;
9962 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9963 gr_values[regno].path = md.path;
9964 if (md.debug_dv)
9965 {
9966 fprintf (stderr, " Know gr%d = ", regno);
9967 fprintf_vma (stderr, gr_values[regno].value);
9968 fputs ("\n", stderr);
9969 }
9970 }
9971 }
9972 /* Look for dep.z imm insns. */
9973 else if (idesc->operands[0] == IA64_OPND_R1
9974 && idesc->operands[1] == IA64_OPND_IMM8
9975 && strcmp (idesc->name, "dep.z") == 0)
9976 {
9977 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9978 if (regno > 0 && regno < NELEMS (gr_values))
9979 {
9980 valueT value = CURR_SLOT.opnd[1].X_add_number;
9981
9982 if (CURR_SLOT.opnd[3].X_add_number < 64)
9983 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1;
9984 value <<= CURR_SLOT.opnd[2].X_add_number;
9985 gr_values[regno].known = 1;
9986 gr_values[regno].value = value;
9987 gr_values[regno].path = md.path;
9988 if (md.debug_dv)
9989 {
9990 fprintf (stderr, " Know gr%d = ", regno);
9991 fprintf_vma (stderr, gr_values[regno].value);
9992 fputs ("\n", stderr);
9993 }
9994 }
9995 }
9996 else
9997 {
9998 clear_qp_mutex (qp_changemask);
9999 clear_qp_implies (qp_changemask, qp_changemask);
10000 }
10001 }
10002
10003 /* Return whether the given predicate registers are currently mutex. */
10004
10005 static int
10006 qp_mutex (int p1, int p2, int path)
10007 {
10008 int i;
10009 valueT mask;
10010
10011 if (p1 != p2)
10012 {
10013 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
10014 for (i = 0; i < qp_mutexeslen; i++)
10015 {
10016 if (qp_mutexes[i].path >= path
10017 && (qp_mutexes[i].prmask & mask) == mask)
10018 return 1;
10019 }
10020 }
10021 return 0;
10022 }
10023
10024 /* Return whether the given resource is in the given insn's list of chks
10025 Return 1 if the conflict is absolutely determined, 2 if it's a potential
10026 conflict. */
10027
10028 static int
10029 resources_match (struct rsrc *rs,
10030 struct ia64_opcode *idesc,
10031 int note,
10032 int qp_regno,
10033 int path)
10034 {
10035 struct rsrc specs[MAX_SPECS];
10036 int count;
10037
10038 /* If the marked resource's qp_regno and the given qp_regno are mutex,
10039 we don't need to check. One exception is note 11, which indicates that
10040 target predicates are written regardless of PR[qp]. */
10041 if (qp_mutex (rs->qp_regno, qp_regno, path)
10042 && note != 11)
10043 return 0;
10044
10045 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
10046 while (count-- > 0)
10047 {
10048 /* UNAT checking is a bit more specific than other resources */
10049 if (rs->dependency->specifier == IA64_RS_AR_UNAT
10050 && specs[count].mem_offset.hint
10051 && rs->mem_offset.hint)
10052 {
10053 if (rs->mem_offset.base == specs[count].mem_offset.base)
10054 {
10055 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
10056 ((specs[count].mem_offset.offset >> 3) & 0x3F))
10057 return 1;
10058 else
10059 continue;
10060 }
10061 }
10062
10063 /* Skip apparent PR write conflicts where both writes are an AND or both
10064 writes are an OR. */
10065 if (rs->dependency->specifier == IA64_RS_PR
10066 || rs->dependency->specifier == IA64_RS_PRr
10067 || rs->dependency->specifier == IA64_RS_PR63)
10068 {
10069 if (specs[count].cmp_type != CMP_NONE
10070 && specs[count].cmp_type == rs->cmp_type)
10071 {
10072 if (md.debug_dv)
10073 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
10074 dv_mode[rs->dependency->mode],
10075 rs->dependency->specifier != IA64_RS_PR63 ?
10076 specs[count].index : 63);
10077 continue;
10078 }
10079 if (md.debug_dv)
10080 fprintf (stderr,
10081 " %s on parallel compare conflict %s vs %s on PR%d\n",
10082 dv_mode[rs->dependency->mode],
10083 dv_cmp_type[rs->cmp_type],
10084 dv_cmp_type[specs[count].cmp_type],
10085 rs->dependency->specifier != IA64_RS_PR63 ?
10086 specs[count].index : 63);
10087
10088 }
10089
10090 /* If either resource is not specific, conservatively assume a conflict
10091 */
10092 if (!specs[count].specific || !rs->specific)
10093 return 2;
10094 else if (specs[count].index == rs->index)
10095 return 1;
10096 }
10097
10098 return 0;
10099 }
10100
10101 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
10102 insert a stop to create the break. Update all resource dependencies
10103 appropriately. If QP_REGNO is non-zero, only apply the break to resources
10104 which use the same QP_REGNO and have the link_to_qp_branch flag set.
10105 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
10106 instruction. */
10107
10108 static void
10109 insn_group_break (int insert_stop, int qp_regno, int save_current)
10110 {
10111 int i;
10112
10113 if (insert_stop && md.num_slots_in_use > 0)
10114 PREV_SLOT.end_of_insn_group = 1;
10115
10116 if (md.debug_dv)
10117 {
10118 fprintf (stderr, " Insn group break%s",
10119 (insert_stop ? " (w/stop)" : ""));
10120 if (qp_regno != 0)
10121 fprintf (stderr, " effective for QP=%d", qp_regno);
10122 fprintf (stderr, "\n");
10123 }
10124
10125 i = 0;
10126 while (i < regdepslen)
10127 {
10128 const struct ia64_dependency *dep = regdeps[i].dependency;
10129
10130 if (qp_regno != 0
10131 && regdeps[i].qp_regno != qp_regno)
10132 {
10133 ++i;
10134 continue;
10135 }
10136
10137 if (save_current
10138 && CURR_SLOT.src_file == regdeps[i].file
10139 && CURR_SLOT.src_line == regdeps[i].line)
10140 {
10141 ++i;
10142 continue;
10143 }
10144
10145 /* clear dependencies which are automatically cleared by a stop, or
10146 those that have reached the appropriate state of insn serialization */
10147 if (dep->semantics == IA64_DVS_IMPLIED
10148 || dep->semantics == IA64_DVS_IMPLIEDF
10149 || regdeps[i].insn_srlz == STATE_SRLZ)
10150 {
10151 print_dependency ("Removing", i);
10152 regdeps[i] = regdeps[--regdepslen];
10153 }
10154 else
10155 {
10156 if (dep->semantics == IA64_DVS_DATA
10157 || dep->semantics == IA64_DVS_INSTR
10158 || dep->semantics == IA64_DVS_SPECIFIC)
10159 {
10160 if (regdeps[i].insn_srlz == STATE_NONE)
10161 regdeps[i].insn_srlz = STATE_STOP;
10162 if (regdeps[i].data_srlz == STATE_NONE)
10163 regdeps[i].data_srlz = STATE_STOP;
10164 }
10165 ++i;
10166 }
10167 }
10168 }
10169
10170 /* Add the given resource usage spec to the list of active dependencies. */
10171
10172 static void
10173 mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED,
10174 const struct ia64_dependency *dep ATTRIBUTE_UNUSED,
10175 struct rsrc *spec,
10176 int depind,
10177 int path)
10178 {
10179 if (regdepslen == regdepstotlen)
10180 {
10181 regdepstotlen += 20;
10182 regdeps = XRESIZEVEC (struct rsrc, regdeps, regdepstotlen);
10183 }
10184
10185 regdeps[regdepslen] = *spec;
10186 regdeps[regdepslen].depind = depind;
10187 regdeps[regdepslen].path = path;
10188 regdeps[regdepslen].file = CURR_SLOT.src_file;
10189 regdeps[regdepslen].line = CURR_SLOT.src_line;
10190
10191 print_dependency ("Adding", regdepslen);
10192
10193 ++regdepslen;
10194 }
10195
10196 static void
10197 print_dependency (const char *action, int depind)
10198 {
10199 if (md.debug_dv)
10200 {
10201 fprintf (stderr, " %s %s '%s'",
10202 action, dv_mode[(regdeps[depind].dependency)->mode],
10203 (regdeps[depind].dependency)->name);
10204 if (regdeps[depind].specific && regdeps[depind].index >= 0)
10205 fprintf (stderr, " (%d)", regdeps[depind].index);
10206 if (regdeps[depind].mem_offset.hint)
10207 {
10208 fputs (" ", stderr);
10209 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
10210 fputs ("+", stderr);
10211 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
10212 }
10213 fprintf (stderr, "\n");
10214 }
10215 }
10216
10217 static void
10218 instruction_serialization (void)
10219 {
10220 int i;
10221 if (md.debug_dv)
10222 fprintf (stderr, " Instruction serialization\n");
10223 for (i = 0; i < regdepslen; i++)
10224 if (regdeps[i].insn_srlz == STATE_STOP)
10225 regdeps[i].insn_srlz = STATE_SRLZ;
10226 }
10227
10228 static void
10229 data_serialization (void)
10230 {
10231 int i = 0;
10232 if (md.debug_dv)
10233 fprintf (stderr, " Data serialization\n");
10234 while (i < regdepslen)
10235 {
10236 if (regdeps[i].data_srlz == STATE_STOP
10237 /* Note: as of 991210, all "other" dependencies are cleared by a
10238 data serialization. This might change with new tables */
10239 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
10240 {
10241 print_dependency ("Removing", i);
10242 regdeps[i] = regdeps[--regdepslen];
10243 }
10244 else
10245 ++i;
10246 }
10247 }
10248
10249 /* Insert stops and serializations as needed to avoid DVs. */
10250
10251 static void
10252 remove_marked_resource (struct rsrc *rs)
10253 {
10254 switch (rs->dependency->semantics)
10255 {
10256 case IA64_DVS_SPECIFIC:
10257 if (md.debug_dv)
10258 fprintf (stderr, "Implementation-specific, assume worst case...\n");
10259 /* Fall through. */
10260 case IA64_DVS_INSTR:
10261 if (md.debug_dv)
10262 fprintf (stderr, "Inserting instr serialization\n");
10263 if (rs->insn_srlz < STATE_STOP)
10264 insn_group_break (1, 0, 0);
10265 if (rs->insn_srlz < STATE_SRLZ)
10266 {
10267 struct slot oldslot = CURR_SLOT;
10268 /* Manually jam a srlz.i insn into the stream */
10269 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10270 CURR_SLOT.user_template = -1;
10271 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
10272 instruction_serialization ();
10273 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10274 if (++md.num_slots_in_use >= NUM_SLOTS)
10275 emit_one_bundle ();
10276 CURR_SLOT = oldslot;
10277 }
10278 insn_group_break (1, 0, 0);
10279 break;
10280 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
10281 "other" types of DV are eliminated
10282 by a data serialization */
10283 case IA64_DVS_DATA:
10284 if (md.debug_dv)
10285 fprintf (stderr, "Inserting data serialization\n");
10286 if (rs->data_srlz < STATE_STOP)
10287 insn_group_break (1, 0, 0);
10288 {
10289 struct slot oldslot = CURR_SLOT;
10290 /* Manually jam a srlz.d insn into the stream */
10291 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10292 CURR_SLOT.user_template = -1;
10293 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
10294 data_serialization ();
10295 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10296 if (++md.num_slots_in_use >= NUM_SLOTS)
10297 emit_one_bundle ();
10298 CURR_SLOT = oldslot;
10299 }
10300 break;
10301 case IA64_DVS_IMPLIED:
10302 case IA64_DVS_IMPLIEDF:
10303 if (md.debug_dv)
10304 fprintf (stderr, "Inserting stop\n");
10305 insn_group_break (1, 0, 0);
10306 break;
10307 default:
10308 break;
10309 }
10310 }
10311
10312 /* Check the resources used by the given opcode against the current dependency
10313 list.
10314
10315 The check is run once for each execution path encountered. In this case,
10316 a unique execution path is the sequence of instructions following a code
10317 entry point, e.g. the following has three execution paths, one starting
10318 at L0, one at L1, and one at L2.
10319
10320 L0: nop
10321 L1: add
10322 L2: add
10323 br.ret
10324 */
10325
10326 static void
10327 check_dependencies (struct ia64_opcode *idesc)
10328 {
10329 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10330 int path;
10331 int i;
10332
10333 /* Note that the number of marked resources may change within the
10334 loop if in auto mode. */
10335 i = 0;
10336 while (i < regdepslen)
10337 {
10338 struct rsrc *rs = &regdeps[i];
10339 const struct ia64_dependency *dep = rs->dependency;
10340 int chkind;
10341 int note;
10342 int start_over = 0;
10343
10344 if (dep->semantics == IA64_DVS_NONE
10345 || (chkind = depends_on (rs->depind, idesc)) == -1)
10346 {
10347 ++i;
10348 continue;
10349 }
10350
10351 note = NOTE (opdeps->chks[chkind]);
10352
10353 /* Check this resource against each execution path seen thus far. */
10354 for (path = 0; path <= md.path; path++)
10355 {
10356 int matchtype;
10357
10358 /* If the dependency wasn't on the path being checked, ignore it. */
10359 if (rs->path < path)
10360 continue;
10361
10362 /* If the QP for this insn implies a QP which has branched, don't
10363 bother checking. Ed. NOTE: I don't think this check is terribly
10364 useful; what's the point of generating code which will only be
10365 reached if its QP is zero?
10366 This code was specifically inserted to handle the following code,
10367 based on notes from Intel's DV checking code, where p1 implies p2.
10368
10369 mov r4 = 2
10370 (p2) br.cond L
10371 (p1) mov r4 = 7
10372 */
10373 if (CURR_SLOT.qp_regno != 0)
10374 {
10375 int skip = 0;
10376 int implies;
10377 for (implies = 0; implies < qp_implieslen; implies++)
10378 {
10379 if (qp_implies[implies].path >= path
10380 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
10381 && qp_implies[implies].p2_branched)
10382 {
10383 skip = 1;
10384 break;
10385 }
10386 }
10387 if (skip)
10388 continue;
10389 }
10390
10391 if ((matchtype = resources_match (rs, idesc, note,
10392 CURR_SLOT.qp_regno, path)) != 0)
10393 {
10394 char msg[1024];
10395 char pathmsg[256] = "";
10396 char indexmsg[256] = "";
10397 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
10398
10399 if (path != 0)
10400 snprintf (pathmsg, sizeof (pathmsg),
10401 " when entry is at label '%s'",
10402 md.entry_labels[path - 1]);
10403 if (matchtype == 1 && rs->index >= 0)
10404 snprintf (indexmsg, sizeof (indexmsg),
10405 ", specific resource number is %d",
10406 rs->index);
10407 snprintf (msg, sizeof (msg),
10408 "Use of '%s' %s %s dependency '%s' (%s)%s%s",
10409 idesc->name,
10410 (certain ? "violates" : "may violate"),
10411 dv_mode[dep->mode], dep->name,
10412 dv_sem[dep->semantics],
10413 pathmsg, indexmsg);
10414
10415 if (md.explicit_mode)
10416 {
10417 as_warn ("%s", msg);
10418 if (path < md.path)
10419 as_warn (_("Only the first path encountering the conflict is reported"));
10420 as_warn_where (rs->file, rs->line,
10421 _("This is the location of the conflicting usage"));
10422 /* Don't bother checking other paths, to avoid duplicating
10423 the same warning */
10424 break;
10425 }
10426 else
10427 {
10428 if (md.debug_dv)
10429 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
10430
10431 remove_marked_resource (rs);
10432
10433 /* since the set of dependencies has changed, start over */
10434 /* FIXME -- since we're removing dvs as we go, we
10435 probably don't really need to start over... */
10436 start_over = 1;
10437 break;
10438 }
10439 }
10440 }
10441 if (start_over)
10442 i = 0;
10443 else
10444 ++i;
10445 }
10446 }
10447
10448 /* Register new dependencies based on the given opcode. */
10449
10450 static void
10451 mark_resources (struct ia64_opcode *idesc)
10452 {
10453 int i;
10454 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10455 int add_only_qp_reads = 0;
10456
10457 /* A conditional branch only uses its resources if it is taken; if it is
10458 taken, we stop following that path. The other branch types effectively
10459 *always* write their resources. If it's not taken, register only QP
10460 reads. */
10461 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
10462 {
10463 add_only_qp_reads = 1;
10464 }
10465
10466 if (md.debug_dv)
10467 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
10468
10469 for (i = 0; i < opdeps->nregs; i++)
10470 {
10471 const struct ia64_dependency *dep;
10472 struct rsrc specs[MAX_SPECS];
10473 int note;
10474 int path;
10475 int count;
10476
10477 dep = ia64_find_dependency (opdeps->regs[i]);
10478 note = NOTE (opdeps->regs[i]);
10479
10480 if (add_only_qp_reads
10481 && !(dep->mode == IA64_DV_WAR
10482 && (dep->specifier == IA64_RS_PR
10483 || dep->specifier == IA64_RS_PRr
10484 || dep->specifier == IA64_RS_PR63)))
10485 continue;
10486
10487 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
10488
10489 while (count-- > 0)
10490 {
10491 mark_resource (idesc, dep, &specs[count],
10492 DEP (opdeps->regs[i]), md.path);
10493 }
10494
10495 /* The execution path may affect register values, which may in turn
10496 affect which indirect-access resources are accessed. */
10497 switch (dep->specifier)
10498 {
10499 default:
10500 break;
10501 case IA64_RS_CPUID:
10502 case IA64_RS_DBR:
10503 case IA64_RS_IBR:
10504 case IA64_RS_MSR:
10505 case IA64_RS_PKR:
10506 case IA64_RS_PMC:
10507 case IA64_RS_PMD:
10508 case IA64_RS_RR:
10509 for (path = 0; path < md.path; path++)
10510 {
10511 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
10512 while (count-- > 0)
10513 mark_resource (idesc, dep, &specs[count],
10514 DEP (opdeps->regs[i]), path);
10515 }
10516 break;
10517 }
10518 }
10519 }
10520
10521 /* Remove dependencies when they no longer apply. */
10522
10523 static void
10524 update_dependencies (struct ia64_opcode *idesc)
10525 {
10526 int i;
10527
10528 if (strcmp (idesc->name, "srlz.i") == 0)
10529 {
10530 instruction_serialization ();
10531 }
10532 else if (strcmp (idesc->name, "srlz.d") == 0)
10533 {
10534 data_serialization ();
10535 }
10536 else if (is_interruption_or_rfi (idesc)
10537 || is_taken_branch (idesc))
10538 {
10539 /* Although technically the taken branch doesn't clear dependencies
10540 which require a srlz.[id], we don't follow the branch; the next
10541 instruction is assumed to start with a clean slate. */
10542 regdepslen = 0;
10543 md.path = 0;
10544 }
10545 else if (is_conditional_branch (idesc)
10546 && CURR_SLOT.qp_regno != 0)
10547 {
10548 int is_call = strstr (idesc->name, ".call") != NULL;
10549
10550 for (i = 0; i < qp_implieslen; i++)
10551 {
10552 /* If the conditional branch's predicate is implied by the predicate
10553 in an existing dependency, remove that dependency. */
10554 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
10555 {
10556 int depind = 0;
10557 /* Note that this implied predicate takes a branch so that if
10558 a later insn generates a DV but its predicate implies this
10559 one, we can avoid the false DV warning. */
10560 qp_implies[i].p2_branched = 1;
10561 while (depind < regdepslen)
10562 {
10563 if (regdeps[depind].qp_regno == qp_implies[i].p1)
10564 {
10565 print_dependency ("Removing", depind);
10566 regdeps[depind] = regdeps[--regdepslen];
10567 }
10568 else
10569 ++depind;
10570 }
10571 }
10572 }
10573 /* Any marked resources which have this same predicate should be
10574 cleared, provided that the QP hasn't been modified between the
10575 marking instruction and the branch. */
10576 if (is_call)
10577 {
10578 insn_group_break (0, CURR_SLOT.qp_regno, 1);
10579 }
10580 else
10581 {
10582 i = 0;
10583 while (i < regdepslen)
10584 {
10585 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
10586 && regdeps[i].link_to_qp_branch
10587 && (regdeps[i].file != CURR_SLOT.src_file
10588 || regdeps[i].line != CURR_SLOT.src_line))
10589 {
10590 /* Treat like a taken branch */
10591 print_dependency ("Removing", i);
10592 regdeps[i] = regdeps[--regdepslen];
10593 }
10594 else
10595 ++i;
10596 }
10597 }
10598 }
10599 }
10600
10601 /* Examine the current instruction for dependency violations. */
10602
10603 static int
10604 check_dv (struct ia64_opcode *idesc)
10605 {
10606 if (md.debug_dv)
10607 {
10608 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
10609 idesc->name, CURR_SLOT.src_line,
10610 idesc->dependencies->nchks,
10611 idesc->dependencies->nregs);
10612 }
10613
10614 /* Look through the list of currently marked resources; if the current
10615 instruction has the dependency in its chks list which uses that resource,
10616 check against the specific resources used. */
10617 check_dependencies (idesc);
10618
10619 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
10620 then add them to the list of marked resources. */
10621 mark_resources (idesc);
10622
10623 /* There are several types of dependency semantics, and each has its own
10624 requirements for being cleared
10625
10626 Instruction serialization (insns separated by interruption, rfi, or
10627 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10628
10629 Data serialization (instruction serialization, or writer + srlz.d +
10630 reader, where writer and srlz.d are in separate groups) clears
10631 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10632 always be the case).
10633
10634 Instruction group break (groups separated by stop, taken branch,
10635 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10636 */
10637 update_dependencies (idesc);
10638
10639 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10640 warning. Keep track of as many as possible that are useful. */
10641 note_register_values (idesc);
10642
10643 /* We don't need or want this anymore. */
10644 md.mem_offset.hint = 0;
10645
10646 return 0;
10647 }
10648
10649 /* Translate one line of assembly. Pseudo ops and labels do not show
10650 here. */
10651 void
10652 md_assemble (char *str)
10653 {
10654 char *saved_input_line_pointer, *temp;
10655 const char *mnemonic;
10656 const struct pseudo_opcode *pdesc;
10657 struct ia64_opcode *idesc;
10658 unsigned char qp_regno;
10659 unsigned int flags;
10660 int ch;
10661
10662 saved_input_line_pointer = input_line_pointer;
10663 input_line_pointer = str;
10664
10665 /* extract the opcode (mnemonic): */
10666
10667 ch = get_symbol_name (&temp);
10668 mnemonic = temp;
10669 pdesc = (struct pseudo_opcode *) str_hash_find (md.pseudo_hash, mnemonic);
10670 if (pdesc)
10671 {
10672 (void) restore_line_pointer (ch);
10673 (*pdesc->handler) (pdesc->arg);
10674 goto done;
10675 }
10676
10677 /* Find the instruction descriptor matching the arguments. */
10678
10679 idesc = ia64_find_opcode (mnemonic);
10680 (void) restore_line_pointer (ch);
10681 if (!idesc)
10682 {
10683 as_bad (_("Unknown opcode `%s'"), mnemonic);
10684 goto done;
10685 }
10686
10687 idesc = parse_operands (idesc);
10688 if (!idesc)
10689 goto done;
10690
10691 /* Handle the dynamic ops we can handle now: */
10692 if (idesc->type == IA64_TYPE_DYN)
10693 {
10694 if (strcmp (idesc->name, "add") == 0)
10695 {
10696 if (CURR_SLOT.opnd[2].X_op == O_register
10697 && CURR_SLOT.opnd[2].X_add_number < 4)
10698 mnemonic = "addl";
10699 else
10700 mnemonic = "adds";
10701 ia64_free_opcode (idesc);
10702 idesc = ia64_find_opcode (mnemonic);
10703 }
10704 else if (strcmp (idesc->name, "mov") == 0)
10705 {
10706 enum ia64_opnd opnd1, opnd2;
10707 int rop;
10708
10709 opnd1 = idesc->operands[0];
10710 opnd2 = idesc->operands[1];
10711 if (opnd1 == IA64_OPND_AR3)
10712 rop = 0;
10713 else if (opnd2 == IA64_OPND_AR3)
10714 rop = 1;
10715 else
10716 abort ();
10717 if (CURR_SLOT.opnd[rop].X_op == O_register)
10718 {
10719 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10720 mnemonic = "mov.i";
10721 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10722 mnemonic = "mov.m";
10723 else
10724 rop = -1;
10725 }
10726 else
10727 abort ();
10728 if (rop >= 0)
10729 {
10730 ia64_free_opcode (idesc);
10731 idesc = ia64_find_opcode (mnemonic);
10732 while (idesc != NULL
10733 && (idesc->operands[0] != opnd1
10734 || idesc->operands[1] != opnd2))
10735 idesc = get_next_opcode (idesc);
10736 }
10737 }
10738 }
10739 else if (strcmp (idesc->name, "mov.i") == 0
10740 || strcmp (idesc->name, "mov.m") == 0)
10741 {
10742 enum ia64_opnd opnd1, opnd2;
10743 int rop;
10744
10745 opnd1 = idesc->operands[0];
10746 opnd2 = idesc->operands[1];
10747 if (opnd1 == IA64_OPND_AR3)
10748 rop = 0;
10749 else if (opnd2 == IA64_OPND_AR3)
10750 rop = 1;
10751 else
10752 abort ();
10753 if (CURR_SLOT.opnd[rop].X_op == O_register)
10754 {
10755 char unit = 'a';
10756 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10757 unit = 'i';
10758 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10759 unit = 'm';
10760 if (unit != 'a' && unit != idesc->name [4])
10761 as_bad (_("AR %d can only be accessed by %c-unit"),
10762 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10763 TOUPPER (unit));
10764 }
10765 }
10766 else if (strcmp (idesc->name, "hint.b") == 0)
10767 {
10768 switch (md.hint_b)
10769 {
10770 case hint_b_ok:
10771 break;
10772 case hint_b_warning:
10773 as_warn (_("hint.b may be treated as nop"));
10774 break;
10775 case hint_b_error:
10776 as_bad (_("hint.b shouldn't be used"));
10777 break;
10778 }
10779 }
10780
10781 qp_regno = 0;
10782 if (md.qp.X_op == O_register)
10783 {
10784 qp_regno = md.qp.X_add_number - REG_P;
10785 md.qp.X_op = O_absent;
10786 }
10787
10788 flags = idesc->flags;
10789
10790 if ((flags & IA64_OPCODE_FIRST) != 0)
10791 {
10792 /* The alignment frag has to end with a stop bit only if the
10793 next instruction after the alignment directive has to be
10794 the first instruction in an instruction group. */
10795 if (align_frag)
10796 {
10797 while (align_frag->fr_type != rs_align_code)
10798 {
10799 align_frag = align_frag->fr_next;
10800 if (!align_frag)
10801 break;
10802 }
10803 /* align_frag can be NULL if there are directives in
10804 between. */
10805 if (align_frag && align_frag->fr_next == frag_now)
10806 align_frag->tc_frag_data = 1;
10807 }
10808
10809 insn_group_break (1, 0, 0);
10810 }
10811 align_frag = NULL;
10812
10813 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10814 {
10815 as_bad (_("`%s' cannot be predicated"), idesc->name);
10816 goto done;
10817 }
10818
10819 /* Build the instruction. */
10820 CURR_SLOT.qp_regno = qp_regno;
10821 CURR_SLOT.idesc = idesc;
10822 CURR_SLOT.src_file = as_where (&CURR_SLOT.src_line);
10823 dwarf2_where (&CURR_SLOT.debug_line);
10824 dwarf2_consume_line_info ();
10825
10826 /* Add unwind entries, if there are any. */
10827 if (unwind.current_entry)
10828 {
10829 CURR_SLOT.unwind_record = unwind.current_entry;
10830 unwind.current_entry = NULL;
10831 }
10832 if (unwind.pending_saves)
10833 {
10834 if (unwind.pending_saves->next)
10835 {
10836 /* Attach the next pending save to the next slot so that its
10837 slot number will get set correctly. */
10838 add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR);
10839 unwind.pending_saves = &unwind.pending_saves->next->r.record.p;
10840 }
10841 else
10842 unwind.pending_saves = NULL;
10843 }
10844 if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym))
10845 unwind.insn = 1;
10846
10847 /* Check for dependency violations. */
10848 if (md.detect_dv)
10849 check_dv (idesc);
10850
10851 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10852 if (++md.num_slots_in_use >= NUM_SLOTS)
10853 emit_one_bundle ();
10854
10855 if ((flags & IA64_OPCODE_LAST) != 0)
10856 insn_group_break (1, 0, 0);
10857
10858 md.last_text_seg = now_seg;
10859
10860 done:
10861 input_line_pointer = saved_input_line_pointer;
10862 }
10863
10864 /* Called when symbol NAME cannot be found in the symbol table.
10865 Should be used for dynamic valued symbols only. */
10866
10867 symbolS *
10868 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
10869 {
10870 return 0;
10871 }
10872
10873 /* Called for any expression that can not be recognized. When the
10874 function is called, `input_line_pointer' will point to the start of
10875 the expression. */
10876
10877 void
10878 md_operand (expressionS *e)
10879 {
10880 switch (*input_line_pointer)
10881 {
10882 case '[':
10883 ++input_line_pointer;
10884 expression_and_evaluate (e);
10885 if (*input_line_pointer != ']')
10886 {
10887 as_bad (_("Closing bracket missing"));
10888 goto err;
10889 }
10890 else
10891 {
10892 if (e->X_op != O_register
10893 || e->X_add_number < REG_GR
10894 || e->X_add_number > REG_GR + 127)
10895 {
10896 as_bad (_("Index must be a general register"));
10897 e->X_add_number = REG_GR;
10898 }
10899
10900 ++input_line_pointer;
10901 e->X_op = O_index;
10902 }
10903 break;
10904
10905 default:
10906 break;
10907 }
10908 return;
10909
10910 err:
10911 ignore_rest_of_line ();
10912 }
10913
10914 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10915 a section symbol plus some offset. For relocs involving @fptr(),
10916 directives we don't want such adjustments since we need to have the
10917 original symbol's name in the reloc. */
10918 int
10919 ia64_fix_adjustable (fixS *fix)
10920 {
10921 /* Prevent all adjustments to global symbols */
10922 if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10923 return 0;
10924
10925 switch (fix->fx_r_type)
10926 {
10927 case BFD_RELOC_IA64_FPTR64I:
10928 case BFD_RELOC_IA64_FPTR32MSB:
10929 case BFD_RELOC_IA64_FPTR32LSB:
10930 case BFD_RELOC_IA64_FPTR64MSB:
10931 case BFD_RELOC_IA64_FPTR64LSB:
10932 case BFD_RELOC_IA64_LTOFF_FPTR22:
10933 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10934 return 0;
10935 default:
10936 break;
10937 }
10938
10939 return 1;
10940 }
10941
10942 int
10943 ia64_force_relocation (fixS *fix)
10944 {
10945 switch (fix->fx_r_type)
10946 {
10947 case BFD_RELOC_IA64_FPTR64I:
10948 case BFD_RELOC_IA64_FPTR32MSB:
10949 case BFD_RELOC_IA64_FPTR32LSB:
10950 case BFD_RELOC_IA64_FPTR64MSB:
10951 case BFD_RELOC_IA64_FPTR64LSB:
10952
10953 case BFD_RELOC_IA64_LTOFF22:
10954 case BFD_RELOC_IA64_LTOFF64I:
10955 case BFD_RELOC_IA64_LTOFF_FPTR22:
10956 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10957 case BFD_RELOC_IA64_PLTOFF22:
10958 case BFD_RELOC_IA64_PLTOFF64I:
10959 case BFD_RELOC_IA64_PLTOFF64MSB:
10960 case BFD_RELOC_IA64_PLTOFF64LSB:
10961
10962 case BFD_RELOC_IA64_LTOFF22X:
10963 case BFD_RELOC_IA64_LDXMOV:
10964 return 1;
10965
10966 default:
10967 break;
10968 }
10969
10970 return generic_force_reloc (fix);
10971 }
10972
10973 /* Decide from what point a pc-relative relocation is relative to,
10974 relative to the pc-relative fixup. Er, relatively speaking. */
10975 long
10976 ia64_pcrel_from_section (fixS *fix, segT sec)
10977 {
10978 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10979
10980 if (bfd_section_flags (sec) & SEC_CODE)
10981 off &= ~0xfUL;
10982
10983 return off;
10984 }
10985
10986
10987 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10988 void
10989 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10990 {
10991 expressionS exp;
10992
10993 exp.X_op = O_pseudo_fixup;
10994 exp.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
10995 exp.X_add_number = 0;
10996 exp.X_add_symbol = symbol;
10997 emit_expr (&exp, size);
10998 }
10999
11000 /* This is called whenever some data item (not an instruction) needs a
11001 fixup. We pick the right reloc code depending on the byteorder
11002 currently in effect. */
11003 void
11004 ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp,
11005 bfd_reloc_code_real_type code)
11006 {
11007 fixS *fix;
11008
11009 switch (nbytes)
11010 {
11011 /* There are no reloc for 8 and 16 bit quantities, but we allow
11012 them here since they will work fine as long as the expression
11013 is fully defined at the end of the pass over the source file. */
11014 case 1: code = BFD_RELOC_8; break;
11015 case 2: code = BFD_RELOC_16; break;
11016 case 4:
11017 if (target_big_endian)
11018 code = BFD_RELOC_IA64_DIR32MSB;
11019 else
11020 code = BFD_RELOC_IA64_DIR32LSB;
11021 break;
11022
11023 case 8:
11024 /* In 32-bit mode, data8 could mean function descriptors too. */
11025 if (exp->X_op == O_pseudo_fixup
11026 && exp->X_op_symbol
11027 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
11028 && !(md.flags & EF_IA_64_ABI64))
11029 {
11030 if (target_big_endian)
11031 code = BFD_RELOC_IA64_IPLTMSB;
11032 else
11033 code = BFD_RELOC_IA64_IPLTLSB;
11034 exp->X_op = O_symbol;
11035 break;
11036 }
11037 else
11038 {
11039 if (target_big_endian)
11040 code = BFD_RELOC_IA64_DIR64MSB;
11041 else
11042 code = BFD_RELOC_IA64_DIR64LSB;
11043 break;
11044 }
11045
11046 case 16:
11047 if (exp->X_op == O_pseudo_fixup
11048 && exp->X_op_symbol
11049 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
11050 {
11051 if (target_big_endian)
11052 code = BFD_RELOC_IA64_IPLTMSB;
11053 else
11054 code = BFD_RELOC_IA64_IPLTLSB;
11055 exp->X_op = O_symbol;
11056 break;
11057 }
11058 /* FALLTHRU */
11059
11060 default:
11061 as_bad (_("Unsupported fixup size %d"), nbytes);
11062 ignore_rest_of_line ();
11063 return;
11064 }
11065
11066 if (exp->X_op == O_pseudo_fixup)
11067 {
11068 exp->X_op = O_symbol;
11069 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
11070 /* ??? If code unchanged, unsupported. */
11071 }
11072
11073 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
11074 /* We need to store the byte order in effect in case we're going
11075 to fix an 8 or 16 bit relocation (for which there no real
11076 relocs available). See md_apply_fix(). */
11077 fix->tc_fix_data.bigendian = target_big_endian;
11078 }
11079
11080 /* Return the actual relocation we wish to associate with the pseudo
11081 reloc described by SYM and R_TYPE. SYM should be one of the
11082 symbols in the pseudo_func array, or NULL. */
11083
11084 static bfd_reloc_code_real_type
11085 ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type)
11086 {
11087 bfd_reloc_code_real_type newr = 0;
11088 const char *type = NULL, *suffix = "";
11089
11090 if (sym == NULL)
11091 {
11092 return r_type;
11093 }
11094
11095 switch (S_GET_VALUE (sym))
11096 {
11097 case FUNC_FPTR_RELATIVE:
11098 switch (r_type)
11099 {
11100 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_FPTR64I; break;
11101 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_FPTR32MSB; break;
11102 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_FPTR32LSB; break;
11103 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_FPTR64MSB; break;
11104 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_FPTR64LSB; break;
11105 default: type = "FPTR"; break;
11106 }
11107 break;
11108
11109 case FUNC_GP_RELATIVE:
11110 switch (r_type)
11111 {
11112 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_GPREL22; break;
11113 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_GPREL64I; break;
11114 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_GPREL32MSB; break;
11115 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_GPREL32LSB; break;
11116 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_GPREL64MSB; break;
11117 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_GPREL64LSB; break;
11118 default: type = "GPREL"; break;
11119 }
11120 break;
11121
11122 case FUNC_LT_RELATIVE:
11123 switch (r_type)
11124 {
11125 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22; break;
11126 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_LTOFF64I; break;
11127 default: type = "LTOFF"; break;
11128 }
11129 break;
11130
11131 case FUNC_LT_RELATIVE_X:
11132 switch (r_type)
11133 {
11134 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22X; break;
11135 default: type = "LTOFF"; suffix = "X"; break;
11136 }
11137 break;
11138
11139 case FUNC_PC_RELATIVE:
11140 switch (r_type)
11141 {
11142 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PCREL22; break;
11143 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PCREL64I; break;
11144 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_PCREL32MSB; break;
11145 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_PCREL32LSB; break;
11146 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PCREL64MSB; break;
11147 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PCREL64LSB; break;
11148 default: type = "PCREL"; break;
11149 }
11150 break;
11151
11152 case FUNC_PLT_RELATIVE:
11153 switch (r_type)
11154 {
11155 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PLTOFF22; break;
11156 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PLTOFF64I; break;
11157 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PLTOFF64MSB;break;
11158 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PLTOFF64LSB;break;
11159 default: type = "PLTOFF"; break;
11160 }
11161 break;
11162
11163 case FUNC_SEC_RELATIVE:
11164 switch (r_type)
11165 {
11166 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SECREL32MSB;break;
11167 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SECREL32LSB;break;
11168 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SECREL64MSB;break;
11169 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SECREL64LSB;break;
11170 default: type = "SECREL"; break;
11171 }
11172 break;
11173
11174 case FUNC_SEG_RELATIVE:
11175 switch (r_type)
11176 {
11177 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SEGREL32MSB;break;
11178 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SEGREL32LSB;break;
11179 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SEGREL64MSB;break;
11180 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SEGREL64LSB;break;
11181 default: type = "SEGREL"; break;
11182 }
11183 break;
11184
11185 case FUNC_LTV_RELATIVE:
11186 switch (r_type)
11187 {
11188 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_LTV32MSB; break;
11189 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_LTV32LSB; break;
11190 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_LTV64MSB; break;
11191 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_LTV64LSB; break;
11192 default: type = "LTV"; break;
11193 }
11194 break;
11195
11196 case FUNC_LT_FPTR_RELATIVE:
11197 switch (r_type)
11198 {
11199 case BFD_RELOC_IA64_IMM22:
11200 newr = BFD_RELOC_IA64_LTOFF_FPTR22; break;
11201 case BFD_RELOC_IA64_IMM64:
11202 newr = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
11203 case BFD_RELOC_IA64_DIR32MSB:
11204 newr = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break;
11205 case BFD_RELOC_IA64_DIR32LSB:
11206 newr = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break;
11207 case BFD_RELOC_IA64_DIR64MSB:
11208 newr = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break;
11209 case BFD_RELOC_IA64_DIR64LSB:
11210 newr = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break;
11211 default:
11212 type = "LTOFF_FPTR"; break;
11213 }
11214 break;
11215
11216 case FUNC_TP_RELATIVE:
11217 switch (r_type)
11218 {
11219 case BFD_RELOC_IA64_IMM14: newr = BFD_RELOC_IA64_TPREL14; break;
11220 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_TPREL22; break;
11221 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_TPREL64I; break;
11222 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_TPREL64MSB; break;
11223 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_TPREL64LSB; break;
11224 default: type = "TPREL"; break;
11225 }
11226 break;
11227
11228 case FUNC_LT_TP_RELATIVE:
11229 switch (r_type)
11230 {
11231 case BFD_RELOC_IA64_IMM22:
11232 newr = BFD_RELOC_IA64_LTOFF_TPREL22; break;
11233 default:
11234 type = "LTOFF_TPREL"; break;
11235 }
11236 break;
11237
11238 case FUNC_DTP_MODULE:
11239 switch (r_type)
11240 {
11241 case BFD_RELOC_IA64_DIR64MSB:
11242 newr = BFD_RELOC_IA64_DTPMOD64MSB; break;
11243 case BFD_RELOC_IA64_DIR64LSB:
11244 newr = BFD_RELOC_IA64_DTPMOD64LSB; break;
11245 default:
11246 type = "DTPMOD"; break;
11247 }
11248 break;
11249
11250 case FUNC_LT_DTP_MODULE:
11251 switch (r_type)
11252 {
11253 case BFD_RELOC_IA64_IMM22:
11254 newr = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
11255 default:
11256 type = "LTOFF_DTPMOD"; break;
11257 }
11258 break;
11259
11260 case FUNC_DTP_RELATIVE:
11261 switch (r_type)
11262 {
11263 case BFD_RELOC_IA64_DIR32MSB:
11264 newr = BFD_RELOC_IA64_DTPREL32MSB; break;
11265 case BFD_RELOC_IA64_DIR32LSB:
11266 newr = BFD_RELOC_IA64_DTPREL32LSB; break;
11267 case BFD_RELOC_IA64_DIR64MSB:
11268 newr = BFD_RELOC_IA64_DTPREL64MSB; break;
11269 case BFD_RELOC_IA64_DIR64LSB:
11270 newr = BFD_RELOC_IA64_DTPREL64LSB; break;
11271 case BFD_RELOC_IA64_IMM14:
11272 newr = BFD_RELOC_IA64_DTPREL14; break;
11273 case BFD_RELOC_IA64_IMM22:
11274 newr = BFD_RELOC_IA64_DTPREL22; break;
11275 case BFD_RELOC_IA64_IMM64:
11276 newr = BFD_RELOC_IA64_DTPREL64I; break;
11277 default:
11278 type = "DTPREL"; break;
11279 }
11280 break;
11281
11282 case FUNC_LT_DTP_RELATIVE:
11283 switch (r_type)
11284 {
11285 case BFD_RELOC_IA64_IMM22:
11286 newr = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
11287 default:
11288 type = "LTOFF_DTPREL"; break;
11289 }
11290 break;
11291
11292 case FUNC_IPLT_RELOC:
11293 switch (r_type)
11294 {
11295 case BFD_RELOC_IA64_IPLTMSB: return r_type;
11296 case BFD_RELOC_IA64_IPLTLSB: return r_type;
11297 default: type = "IPLT"; break;
11298 }
11299 break;
11300
11301 #ifdef TE_VMS
11302 case FUNC_SLOTCOUNT_RELOC:
11303 return DUMMY_RELOC_IA64_SLOTCOUNT;
11304 #endif
11305
11306 default:
11307 abort ();
11308 }
11309
11310 if (newr)
11311 return newr;
11312 else
11313 {
11314 int width;
11315
11316 if (!type)
11317 abort ();
11318 switch (r_type)
11319 {
11320 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break;
11321 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break;
11322 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break;
11323 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break;
11324 case BFD_RELOC_UNUSED: width = 13; break;
11325 case BFD_RELOC_IA64_IMM14: width = 14; break;
11326 case BFD_RELOC_IA64_IMM22: width = 22; break;
11327 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break;
11328 default: abort ();
11329 }
11330
11331 /* This should be an error, but since previously there wasn't any
11332 diagnostic here, don't make it fail because of this for now. */
11333 as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix);
11334 return r_type;
11335 }
11336 }
11337
11338 /* Here is where generate the appropriate reloc for pseudo relocation
11339 functions. */
11340 void
11341 ia64_validate_fix (fixS *fix)
11342 {
11343 switch (fix->fx_r_type)
11344 {
11345 case BFD_RELOC_IA64_FPTR64I:
11346 case BFD_RELOC_IA64_FPTR32MSB:
11347 case BFD_RELOC_IA64_FPTR64LSB:
11348 case BFD_RELOC_IA64_LTOFF_FPTR22:
11349 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11350 if (fix->fx_offset != 0)
11351 as_bad_where (fix->fx_file, fix->fx_line,
11352 _("No addend allowed in @fptr() relocation"));
11353 break;
11354 default:
11355 break;
11356 }
11357 }
11358
11359 static void
11360 fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value)
11361 {
11362 bfd_vma insn[3], t0, t1, control_bits;
11363 const char *err;
11364 char *fixpos;
11365 long slot;
11366
11367 slot = fix->fx_where & 0x3;
11368 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
11369
11370 /* Bundles are always in little-endian byte order */
11371 t0 = bfd_getl64 (fixpos);
11372 t1 = bfd_getl64 (fixpos + 8);
11373 control_bits = t0 & 0x1f;
11374 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
11375 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
11376 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
11377
11378 err = NULL;
11379 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
11380 {
11381 insn[1] = (value >> 22) & 0x1ffffffffffLL;
11382 insn[2] |= (((value & 0x7f) << 13)
11383 | (((value >> 7) & 0x1ff) << 27)
11384 | (((value >> 16) & 0x1f) << 22)
11385 | (((value >> 21) & 0x1) << 21)
11386 | (((value >> 63) & 0x1) << 36));
11387 }
11388 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
11389 {
11390 if (value & ~0x3fffffffffffffffULL)
11391 err = _("integer operand out of range");
11392 insn[1] = (value >> 21) & 0x1ffffffffffLL;
11393 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
11394 }
11395 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
11396 {
11397 value >>= 4;
11398 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
11399 insn[2] |= ((((value >> 59) & 0x1) << 36)
11400 | (((value >> 0) & 0xfffff) << 13));
11401 }
11402 else
11403 err = (*odesc->insert) (odesc, value, insn + slot);
11404
11405 if (err)
11406 as_bad_where (fix->fx_file, fix->fx_line, "%s", err);
11407
11408 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
11409 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
11410 number_to_chars_littleendian (fixpos + 0, t0, 8);
11411 number_to_chars_littleendian (fixpos + 8, t1, 8);
11412 }
11413
11414 /* Attempt to simplify or even eliminate a fixup. The return value is
11415 ignored; perhaps it was once meaningful, but now it is historical.
11416 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
11417
11418 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
11419 (if possible). */
11420
11421 void
11422 md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED)
11423 {
11424 char *fixpos;
11425 valueT value = *valP;
11426
11427 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
11428
11429 if (fix->fx_pcrel)
11430 {
11431 switch (fix->fx_r_type)
11432 {
11433 case BFD_RELOC_IA64_PCREL21B: break;
11434 case BFD_RELOC_IA64_PCREL21BI: break;
11435 case BFD_RELOC_IA64_PCREL21F: break;
11436 case BFD_RELOC_IA64_PCREL21M: break;
11437 case BFD_RELOC_IA64_PCREL60B: break;
11438 case BFD_RELOC_IA64_PCREL22: break;
11439 case BFD_RELOC_IA64_PCREL64I: break;
11440 case BFD_RELOC_IA64_PCREL32MSB: break;
11441 case BFD_RELOC_IA64_PCREL32LSB: break;
11442 case BFD_RELOC_IA64_PCREL64MSB: break;
11443 case BFD_RELOC_IA64_PCREL64LSB: break;
11444 default:
11445 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym,
11446 fix->fx_r_type);
11447 break;
11448 }
11449 }
11450 if (fix->fx_addsy)
11451 {
11452 switch ((unsigned) fix->fx_r_type)
11453 {
11454 case BFD_RELOC_UNUSED:
11455 /* This must be a TAG13 or TAG13b operand. There are no external
11456 relocs defined for them, so we must give an error. */
11457 as_bad_where (fix->fx_file, fix->fx_line,
11458 _("%s must have a constant value"),
11459 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
11460 fix->fx_done = 1;
11461 return;
11462
11463 case BFD_RELOC_IA64_TPREL14:
11464 case BFD_RELOC_IA64_TPREL22:
11465 case BFD_RELOC_IA64_TPREL64I:
11466 case BFD_RELOC_IA64_LTOFF_TPREL22:
11467 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
11468 case BFD_RELOC_IA64_DTPREL14:
11469 case BFD_RELOC_IA64_DTPREL22:
11470 case BFD_RELOC_IA64_DTPREL64I:
11471 case BFD_RELOC_IA64_LTOFF_DTPREL22:
11472 S_SET_THREAD_LOCAL (fix->fx_addsy);
11473 break;
11474
11475 #ifdef TE_VMS
11476 case DUMMY_RELOC_IA64_SLOTCOUNT:
11477 as_bad_where (fix->fx_file, fix->fx_line,
11478 _("cannot resolve @slotcount parameter"));
11479 fix->fx_done = 1;
11480 return;
11481 #endif
11482
11483 default:
11484 break;
11485 }
11486 }
11487 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
11488 {
11489 #ifdef TE_VMS
11490 if (fix->fx_r_type == DUMMY_RELOC_IA64_SLOTCOUNT)
11491 {
11492 /* For @slotcount, convert an addresses difference to a slots
11493 difference. */
11494 valueT v;
11495
11496 v = (value >> 4) * 3;
11497 switch (value & 0x0f)
11498 {
11499 case 0:
11500 case 1:
11501 case 2:
11502 v += value & 0x0f;
11503 break;
11504 case 0x0f:
11505 v += 2;
11506 break;
11507 case 0x0e:
11508 v += 1;
11509 break;
11510 default:
11511 as_bad (_("invalid @slotcount value"));
11512 }
11513 value = v;
11514 }
11515 #endif
11516
11517 if (fix->tc_fix_data.bigendian)
11518 number_to_chars_bigendian (fixpos, value, fix->fx_size);
11519 else
11520 number_to_chars_littleendian (fixpos, value, fix->fx_size);
11521 fix->fx_done = 1;
11522 }
11523 else
11524 {
11525 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
11526 fix->fx_done = 1;
11527 }
11528 }
11529
11530 /* Generate the BFD reloc to be stuck in the object file from the
11531 fixup used internally in the assembler. */
11532
11533 arelent *
11534 tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp)
11535 {
11536 arelent *reloc;
11537
11538 reloc = XNEW (arelent);
11539 reloc->sym_ptr_ptr = XNEW (asymbol *);
11540 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11541 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
11542 reloc->addend = fixp->fx_offset;
11543 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
11544
11545 if (!reloc->howto)
11546 {
11547 as_bad_where (fixp->fx_file, fixp->fx_line,
11548 _("Cannot represent %s relocation in object file"),
11549 bfd_get_reloc_code_name (fixp->fx_r_type));
11550 free (reloc);
11551 return NULL;
11552 }
11553 return reloc;
11554 }
11555
11556 /* Turn a string in input_line_pointer into a floating point constant
11557 of type TYPE, and store the appropriate bytes in *LIT. The number
11558 of LITTLENUMS emitted is stored in *SIZE. An error message is
11559 returned, or NULL on OK. */
11560
11561 const char *
11562 md_atof (int type, char *lit, int *size)
11563 {
11564 LITTLENUM_TYPE words[MAX_LITTLENUMS];
11565 char *t;
11566 int prec;
11567
11568 switch (type)
11569 {
11570 /* IEEE floats */
11571 case 'f':
11572 case 'F':
11573 case 's':
11574 case 'S':
11575 prec = 2;
11576 break;
11577
11578 case 'd':
11579 case 'D':
11580 case 'r':
11581 case 'R':
11582 prec = 4;
11583 break;
11584
11585 case 'x':
11586 case 'X':
11587 case 'p':
11588 case 'P':
11589 prec = 5;
11590 break;
11591
11592 default:
11593 *size = 0;
11594 return _("Unrecognized or unsupported floating point constant");
11595 }
11596 t = atof_ieee (input_line_pointer, type, words);
11597 if (t)
11598 input_line_pointer = t;
11599
11600 (*ia64_float_to_chars) (lit, words, prec);
11601
11602 if (type == 'X')
11603 {
11604 /* It is 10 byte floating point with 6 byte padding. */
11605 memset (&lit [10], 0, 6);
11606 *size = 8 * sizeof (LITTLENUM_TYPE);
11607 }
11608 else
11609 *size = prec * sizeof (LITTLENUM_TYPE);
11610
11611 return NULL;
11612 }
11613
11614 /* Handle ia64 specific semantics of the align directive. */
11615
11616 void
11617 ia64_md_do_align (int n ATTRIBUTE_UNUSED,
11618 const char *fill ATTRIBUTE_UNUSED,
11619 int len ATTRIBUTE_UNUSED,
11620 int max ATTRIBUTE_UNUSED)
11621 {
11622 if (subseg_text_p (now_seg))
11623 ia64_flush_insns ();
11624 }
11625
11626 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
11627 of an rs_align_code fragment. */
11628
11629 void
11630 ia64_handle_align (fragS *fragp)
11631 {
11632 int bytes;
11633 char *p;
11634 const unsigned char *nop_type;
11635
11636 if (fragp->fr_type != rs_align_code)
11637 return;
11638
11639 /* Check if this frag has to end with a stop bit. */
11640 nop_type = fragp->tc_frag_data ? le_nop_stop : le_nop;
11641
11642 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11643 p = fragp->fr_literal + fragp->fr_fix;
11644
11645 /* If no paddings are needed, we check if we need a stop bit. */
11646 if (!bytes && fragp->tc_frag_data)
11647 {
11648 if (fragp->fr_fix < 16)
11649 #if 1
11650 /* FIXME: It won't work with
11651 .align 16
11652 alloc r32=ar.pfs,1,2,4,0
11653 */
11654 ;
11655 #else
11656 as_bad_where (fragp->fr_file, fragp->fr_line,
11657 _("Can't add stop bit to mark end of instruction group"));
11658 #endif
11659 else
11660 /* Bundles are always in little-endian byte order. Make sure
11661 the previous bundle has the stop bit. */
11662 *(p - 16) |= 1;
11663 }
11664
11665 /* Make sure we are on a 16-byte boundary, in case someone has been
11666 putting data into a text section. */
11667 if (bytes & 15)
11668 {
11669 int fix = bytes & 15;
11670 memset (p, 0, fix);
11671 p += fix;
11672 bytes -= fix;
11673 fragp->fr_fix += fix;
11674 }
11675
11676 /* Instruction bundles are always little-endian. */
11677 memcpy (p, nop_type, 16);
11678 fragp->fr_var = 16;
11679 }
11680
11681 static void
11682 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11683 int prec)
11684 {
11685 while (prec--)
11686 {
11687 number_to_chars_bigendian (lit, (long) (*words++),
11688 sizeof (LITTLENUM_TYPE));
11689 lit += sizeof (LITTLENUM_TYPE);
11690 }
11691 }
11692
11693 static void
11694 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11695 int prec)
11696 {
11697 while (prec--)
11698 {
11699 number_to_chars_littleendian (lit, (long) (words[prec]),
11700 sizeof (LITTLENUM_TYPE));
11701 lit += sizeof (LITTLENUM_TYPE);
11702 }
11703 }
11704
11705 void
11706 ia64_elf_section_change_hook (void)
11707 {
11708 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11709 && elf_linked_to_section (now_seg) == NULL)
11710 elf_linked_to_section (now_seg) = text_section;
11711 dot_byteorder (-1);
11712 }
11713
11714 /* Check if a label should be made global. */
11715 void
11716 ia64_check_label (symbolS *label)
11717 {
11718 if (*input_line_pointer == ':')
11719 {
11720 S_SET_EXTERNAL (label);
11721 input_line_pointer++;
11722 }
11723 }
11724
11725 /* Used to remember where .alias and .secalias directives are seen. We
11726 will rename symbol and section names when we are about to output
11727 the relocatable file. */
11728 struct alias
11729 {
11730 const char *file; /* The file where the directive is seen. */
11731 unsigned int line; /* The line number the directive is at. */
11732 const char *name; /* The original name of the symbol. */
11733 };
11734
11735 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11736 .secalias. Otherwise, it is .alias. */
11737 static void
11738 dot_alias (int section)
11739 {
11740 char *name, *alias;
11741 char delim;
11742 char *end_name;
11743 int len;
11744 struct alias *h;
11745 const char *a;
11746 htab_t ahash, nhash;
11747 const char *kind;
11748
11749 delim = get_symbol_name (&name);
11750 end_name = input_line_pointer;
11751 *end_name = delim;
11752
11753 if (name == end_name)
11754 {
11755 as_bad (_("expected symbol name"));
11756 ignore_rest_of_line ();
11757 return;
11758 }
11759
11760 SKIP_WHITESPACE_AFTER_NAME ();
11761
11762 if (*input_line_pointer != ',')
11763 {
11764 *end_name = 0;
11765 as_bad (_("expected comma after \"%s\""), name);
11766 *end_name = delim;
11767 ignore_rest_of_line ();
11768 return;
11769 }
11770
11771 input_line_pointer++;
11772 *end_name = 0;
11773 ia64_canonicalize_symbol_name (name);
11774
11775 /* We call demand_copy_C_string to check if alias string is valid.
11776 There should be a closing `"' and no `\0' in the string. */
11777 alias = demand_copy_C_string (&len);
11778 if (alias == NULL)
11779 {
11780 ignore_rest_of_line ();
11781 return;
11782 }
11783
11784 /* Make a copy of name string. */
11785 len = strlen (name) + 1;
11786 obstack_grow (&notes, name, len);
11787 name = obstack_finish (&notes);
11788
11789 if (section)
11790 {
11791 kind = "section";
11792 ahash = secalias_hash;
11793 nhash = secalias_name_hash;
11794 }
11795 else
11796 {
11797 kind = "symbol";
11798 ahash = alias_hash;
11799 nhash = alias_name_hash;
11800 }
11801
11802 /* Check if alias has been used before. */
11803
11804 h = (struct alias *) str_hash_find (ahash, alias);
11805 if (h)
11806 {
11807 if (strcmp (h->name, name))
11808 as_bad (_("`%s' is already the alias of %s `%s'"),
11809 alias, kind, h->name);
11810 obstack_free (&notes, name);
11811 obstack_free (&notes, alias);
11812 goto out;
11813 }
11814
11815 /* Check if name already has an alias. */
11816 a = (const char *) str_hash_find (nhash, name);
11817 if (a)
11818 {
11819 if (strcmp (a, alias))
11820 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11821 obstack_free (&notes, name);
11822 obstack_free (&notes, alias);
11823 goto out;
11824 }
11825
11826 h = XNEW (struct alias);
11827 h->file = as_where (&h->line);
11828 h->name = name;
11829
11830 str_hash_insert (ahash, alias, h, 0);
11831 str_hash_insert (nhash, name, alias, 0);
11832
11833 out:
11834 demand_empty_rest_of_line ();
11835 }
11836
11837 /* It renames the original symbol name to its alias. */
11838 static int
11839 do_alias (void **slot, void *arg ATTRIBUTE_UNUSED)
11840 {
11841 string_tuple_t *tuple = *((string_tuple_t **) slot);
11842 struct alias *h = (struct alias *) tuple->value;
11843 symbolS *sym = symbol_find (h->name);
11844
11845 if (sym == NULL)
11846 {
11847 #ifdef TE_VMS
11848 /* Uses .alias extensively to alias CRTL functions to same with
11849 decc$ prefix. Sometimes function gets optimized away and a
11850 warning results, which should be suppressed. */
11851 if (!startswith (tuple->key, "decc$"))
11852 #endif
11853 as_warn_where (h->file, h->line,
11854 _("symbol `%s' aliased to `%s' is not used"),
11855 h->name, tuple->key);
11856 }
11857 else
11858 S_SET_NAME (sym, (char *) tuple->key);
11859
11860 return 1;
11861 }
11862
11863 /* Called from write_object_file. */
11864 void
11865 ia64_adjust_symtab (void)
11866 {
11867 htab_traverse (alias_hash, do_alias, NULL);
11868 }
11869
11870 /* It renames the original section name to its alias. */
11871 static int
11872 do_secalias (void **slot, void *arg ATTRIBUTE_UNUSED)
11873 {
11874 string_tuple_t *tuple = *((string_tuple_t **) slot);
11875 struct alias *h = (struct alias *) tuple->value;
11876 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11877
11878 if (sec == NULL)
11879 as_warn_where (h->file, h->line,
11880 _("section `%s' aliased to `%s' is not used"),
11881 h->name, tuple->key);
11882 else
11883 sec->name = tuple->key;
11884
11885 return 1;
11886 }
11887
11888 /* Called from write_object_file. */
11889 void
11890 ia64_frob_file (void)
11891 {
11892 htab_traverse (secalias_hash, do_secalias, NULL);
11893 }
11894
11895 #ifdef TE_VMS
11896 #define NT_VMS_MHD 1
11897 #define NT_VMS_LNM 2
11898
11899 /* Integrity VMS 8.x identifies it's ELF modules with a standard ELF
11900 .note section. */
11901
11902 /* Manufacture a VMS-like time string. */
11903 static void
11904 get_vms_time (char *Now)
11905 {
11906 char *pnt;
11907 time_t timeb;
11908
11909 time (&timeb);
11910 pnt = ctime (&timeb);
11911 pnt[3] = 0;
11912 pnt[7] = 0;
11913 pnt[10] = 0;
11914 pnt[16] = 0;
11915 pnt[24] = 0;
11916 sprintf (Now, "%2s-%3s-%s %s", pnt + 8, pnt + 4, pnt + 20, pnt + 11);
11917 }
11918
11919 void
11920 ia64_vms_note (void)
11921 {
11922 char *p;
11923 asection *seg = now_seg;
11924 subsegT subseg = now_subseg;
11925 asection *secp = NULL;
11926 char *bname;
11927 char buf [256];
11928 symbolS *sym;
11929
11930 /* Create the .note section. */
11931
11932 secp = subseg_new (".note", 0);
11933 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11934
11935 /* Module header note (MHD). */
11936 bname = xstrdup (lbasename (out_file_name));
11937 if ((p = strrchr (bname, '.')))
11938 *p = '\0';
11939
11940 /* VMS note header is 24 bytes long. */
11941 p = frag_more (8 + 8 + 8);
11942 number_to_chars_littleendian (p + 0, 8, 8);
11943 number_to_chars_littleendian (p + 8, 40 + strlen (bname), 8);
11944 number_to_chars_littleendian (p + 16, NT_VMS_MHD, 8);
11945
11946 p = frag_more (8);
11947 strcpy (p, "IPF/VMS");
11948
11949 p = frag_more (17 + 17 + strlen (bname) + 1 + 5);
11950 get_vms_time (p);
11951 strcpy (p + 17, "24-FEB-2005 15:00");
11952 p += 17 + 17;
11953 strcpy (p, bname);
11954 p += strlen (bname) + 1;
11955 free (bname);
11956 strcpy (p, "V1.0");
11957
11958 frag_align (3, 0, 0);
11959
11960 /* Language processor name note. */
11961 sprintf (buf, "GNU assembler version %s (%s) using BFD version %s",
11962 VERSION, TARGET_ALIAS, BFD_VERSION_STRING);
11963
11964 p = frag_more (8 + 8 + 8);
11965 number_to_chars_littleendian (p + 0, 8, 8);
11966 number_to_chars_littleendian (p + 8, strlen (buf) + 1, 8);
11967 number_to_chars_littleendian (p + 16, NT_VMS_LNM, 8);
11968
11969 p = frag_more (8);
11970 strcpy (p, "IPF/VMS");
11971
11972 p = frag_more (strlen (buf) + 1);
11973 strcpy (p, buf);
11974
11975 frag_align (3, 0, 0);
11976
11977 secp = subseg_new (".vms_display_name_info", 0);
11978 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11979
11980 /* This symbol should be passed on the command line and be variable
11981 according to language. */
11982 sym = symbol_new ("__gnat_vms_display_name@gnat_demangler_rtl",
11983 absolute_section, &zero_address_frag, 0);
11984 symbol_table_insert (sym);
11985 symbol_get_bfdsym (sym)->flags |= BSF_DEBUGGING | BSF_DYNAMIC;
11986
11987 p = frag_more (4);
11988 /* Format 3 of VMS demangler Spec. */
11989 number_to_chars_littleendian (p, 3, 4);
11990
11991 p = frag_more (4);
11992 /* Place holder for symbol table index of above symbol. */
11993 number_to_chars_littleendian (p, -1, 4);
11994
11995 frag_align (3, 0, 0);
11996
11997 /* We probably can't restore the current segment, for there likely
11998 isn't one yet... */
11999 if (seg && subseg)
12000 subseg_set (seg, subseg);
12001 }
12002
12003 #endif /* TE_VMS */