Use startswith in gas subfolder.
[binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright (C) 1998-2021 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street - Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .eb
28 .estate
29 .lb
30 .popsection
31 .previous
32 .psr
33 .pushsection
34 - labels are wrong if automatic alignment is introduced
35 (e.g., checkout the second real10 definition in test-data.s)
36 - DV-related stuff:
37 <reg>.safe_across_calls and any other DV-related directives I don't
38 have documentation for.
39 verify mod-sched-brs reads/writes are checked/marked (and other
40 notes)
41
42 */
43
44 #include "as.h"
45 #include "safe-ctype.h"
46 #include "dwarf2dbg.h"
47 #include "subsegs.h"
48
49 #include "opcode/ia64.h"
50
51 #include "elf/ia64.h"
52 #include "bfdver.h"
53 #include <time.h>
54
55 #ifdef HAVE_LIMITS_H
56 #include <limits.h>
57 #endif
58
59 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
60
61 /* Some systems define MIN in, e.g., param.h. */
62 #undef MIN
63 #define MIN(a,b) ((a) < (b) ? (a) : (b))
64
65 #define NUM_SLOTS 4
66 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
67 #define CURR_SLOT md.slot[md.curr_slot]
68
69 #define O_pseudo_fixup (O_max + 1)
70
71 enum special_section
72 {
73 /* IA-64 ABI section pseudo-ops. */
74 SPECIAL_SECTION_BSS = 0,
75 SPECIAL_SECTION_SBSS,
76 SPECIAL_SECTION_SDATA,
77 SPECIAL_SECTION_RODATA,
78 SPECIAL_SECTION_COMMENT,
79 SPECIAL_SECTION_UNWIND,
80 SPECIAL_SECTION_UNWIND_INFO,
81 /* HPUX specific section pseudo-ops. */
82 SPECIAL_SECTION_INIT_ARRAY,
83 SPECIAL_SECTION_FINI_ARRAY,
84 };
85
86 enum reloc_func
87 {
88 FUNC_DTP_MODULE,
89 FUNC_DTP_RELATIVE,
90 FUNC_FPTR_RELATIVE,
91 FUNC_GP_RELATIVE,
92 FUNC_LT_RELATIVE,
93 FUNC_LT_RELATIVE_X,
94 FUNC_PC_RELATIVE,
95 FUNC_PLT_RELATIVE,
96 FUNC_SEC_RELATIVE,
97 FUNC_SEG_RELATIVE,
98 FUNC_TP_RELATIVE,
99 FUNC_LTV_RELATIVE,
100 FUNC_LT_FPTR_RELATIVE,
101 FUNC_LT_DTP_MODULE,
102 FUNC_LT_DTP_RELATIVE,
103 FUNC_LT_TP_RELATIVE,
104 FUNC_IPLT_RELOC,
105 #ifdef TE_VMS
106 FUNC_SLOTCOUNT_RELOC,
107 #endif
108 };
109
110 enum reg_symbol
111 {
112 REG_GR = 0,
113 REG_FR = (REG_GR + 128),
114 REG_AR = (REG_FR + 128),
115 REG_CR = (REG_AR + 128),
116 REG_DAHR = (REG_CR + 128),
117 REG_P = (REG_DAHR + 8),
118 REG_BR = (REG_P + 64),
119 REG_IP = (REG_BR + 8),
120 REG_CFM,
121 REG_PR,
122 REG_PR_ROT,
123 REG_PSR,
124 REG_PSR_L,
125 REG_PSR_UM,
126 /* The following are pseudo-registers for use by gas only. */
127 IND_CPUID,
128 IND_DBR,
129 IND_DTR,
130 IND_ITR,
131 IND_IBR,
132 IND_MSR,
133 IND_PKR,
134 IND_PMC,
135 IND_PMD,
136 IND_DAHR,
137 IND_RR,
138 /* The following pseudo-registers are used for unwind directives only: */
139 REG_PSP,
140 REG_PRIUNAT,
141 REG_NUM
142 };
143
144 enum dynreg_type
145 {
146 DYNREG_GR = 0, /* dynamic general purpose register */
147 DYNREG_FR, /* dynamic floating point register */
148 DYNREG_PR, /* dynamic predicate register */
149 DYNREG_NUM_TYPES
150 };
151
152 enum operand_match_result
153 {
154 OPERAND_MATCH,
155 OPERAND_OUT_OF_RANGE,
156 OPERAND_MISMATCH
157 };
158
159 /* On the ia64, we can't know the address of a text label until the
160 instructions are packed into a bundle. To handle this, we keep
161 track of the list of labels that appear in front of each
162 instruction. */
163 struct label_fix
164 {
165 struct label_fix *next;
166 struct symbol *sym;
167 bool dw2_mark_labels;
168 };
169
170 #ifdef TE_VMS
171 /* An internally used relocation. */
172 #define DUMMY_RELOC_IA64_SLOTCOUNT (BFD_RELOC_UNUSED + 1)
173 #endif
174
175 /* This is the endianness of the current section. */
176 extern int target_big_endian;
177
178 /* This is the default endianness. */
179 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
180
181 void (*ia64_number_to_chars) (char *, valueT, int);
182
183 static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int);
184 static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int);
185
186 static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int);
187
188 static htab_t alias_hash;
189 static htab_t alias_name_hash;
190 static htab_t secalias_hash;
191 static htab_t secalias_name_hash;
192
193 /* List of chars besides those in app.c:symbol_chars that can start an
194 operand. Used to prevent the scrubber eating vital white-space. */
195 const char ia64_symbol_chars[] = "@?";
196
197 /* Characters which always start a comment. */
198 const char comment_chars[] = "";
199
200 /* Characters which start a comment at the beginning of a line. */
201 const char line_comment_chars[] = "#";
202
203 /* Characters which may be used to separate multiple commands on a
204 single line. */
205 const char line_separator_chars[] = ";{}";
206
207 /* Characters which are used to indicate an exponent in a floating
208 point number. */
209 const char EXP_CHARS[] = "eE";
210
211 /* Characters which mean that a number is a floating point constant,
212 as in 0d1.0. */
213 const char FLT_CHARS[] = "rRsSfFdDxXpP";
214
215 /* ia64-specific option processing: */
216
217 const char *md_shortopts = "m:N:x::";
218
219 struct option md_longopts[] =
220 {
221 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
222 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
223 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
224 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
225 };
226
227 size_t md_longopts_size = sizeof (md_longopts);
228
229 static struct
230 {
231 htab_t pseudo_hash; /* pseudo opcode hash table */
232 htab_t reg_hash; /* register name hash table */
233 htab_t dynreg_hash; /* dynamic register hash table */
234 htab_t const_hash; /* constant hash table */
235 htab_t entry_hash; /* code entry hint hash table */
236
237 /* If X_op is != O_absent, the register name for the instruction's
238 qualifying predicate. If NULL, p0 is assumed for instructions
239 that are predictable. */
240 expressionS qp;
241
242 /* Optimize for which CPU. */
243 enum
244 {
245 itanium1,
246 itanium2
247 } tune;
248
249 /* What to do when hint.b is used. */
250 enum
251 {
252 hint_b_error,
253 hint_b_warning,
254 hint_b_ok
255 } hint_b;
256
257 unsigned int
258 manual_bundling : 1,
259 debug_dv: 1,
260 detect_dv: 1,
261 explicit_mode : 1, /* which mode we're in */
262 default_explicit_mode : 1, /* which mode is the default */
263 mode_explicitly_set : 1, /* was the current mode explicitly set? */
264 auto_align : 1,
265 keep_pending_output : 1;
266
267 /* What to do when something is wrong with unwind directives. */
268 enum
269 {
270 unwind_check_warning,
271 unwind_check_error
272 } unwind_check;
273
274 /* Each bundle consists of up to three instructions. We keep
275 track of four most recent instructions so we can correctly set
276 the end_of_insn_group for the last instruction in a bundle. */
277 int curr_slot;
278 int num_slots_in_use;
279 struct slot
280 {
281 unsigned int
282 end_of_insn_group : 1,
283 manual_bundling_on : 1,
284 manual_bundling_off : 1,
285 loc_directive_seen : 1;
286 signed char user_template; /* user-selected template, if any */
287 unsigned char qp_regno; /* qualifying predicate */
288 /* This duplicates a good fraction of "struct fix" but we
289 can't use a "struct fix" instead since we can't call
290 fix_new_exp() until we know the address of the instruction. */
291 int num_fixups;
292 struct insn_fix
293 {
294 bfd_reloc_code_real_type code;
295 enum ia64_opnd opnd; /* type of operand in need of fix */
296 unsigned int is_pcrel : 1; /* is operand pc-relative? */
297 expressionS expr; /* the value to be inserted */
298 }
299 fixup[2]; /* at most two fixups per insn */
300 struct ia64_opcode *idesc;
301 struct label_fix *label_fixups;
302 struct label_fix *tag_fixups;
303 struct unw_rec_list *unwind_record; /* Unwind directive. */
304 expressionS opnd[6];
305 const char *src_file;
306 unsigned int src_line;
307 struct dwarf2_line_info debug_line;
308 }
309 slot[NUM_SLOTS];
310
311 segT last_text_seg;
312
313 struct dynreg
314 {
315 struct dynreg *next; /* next dynamic register */
316 const char *name;
317 unsigned short base; /* the base register number */
318 unsigned short num_regs; /* # of registers in this set */
319 }
320 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
321
322 flagword flags; /* ELF-header flags */
323
324 struct mem_offset {
325 unsigned hint:1; /* is this hint currently valid? */
326 bfd_vma offset; /* mem.offset offset */
327 bfd_vma base; /* mem.offset base */
328 } mem_offset;
329
330 int path; /* number of alt. entry points seen */
331 const char **entry_labels; /* labels of all alternate paths in
332 the current DV-checking block. */
333 int maxpaths; /* size currently allocated for
334 entry_labels */
335
336 int pointer_size; /* size in bytes of a pointer */
337 int pointer_size_shift; /* shift size of a pointer for alignment */
338
339 symbolS *indregsym[IND_RR - IND_CPUID + 1];
340 }
341 md;
342
343 /* These are not const, because they are modified to MMI for non-itanium1
344 targets below. */
345 /* MFI bundle of nops. */
346 static unsigned char le_nop[16] =
347 {
348 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
349 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
350 };
351 /* MFI bundle of nops with stop-bit. */
352 static unsigned char le_nop_stop[16] =
353 {
354 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
355 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
356 };
357
358 /* application registers: */
359
360 #define AR_K0 0
361 #define AR_K7 7
362 #define AR_RSC 16
363 #define AR_BSP 17
364 #define AR_BSPSTORE 18
365 #define AR_RNAT 19
366 #define AR_FCR 21
367 #define AR_EFLAG 24
368 #define AR_CSD 25
369 #define AR_SSD 26
370 #define AR_CFLG 27
371 #define AR_FSR 28
372 #define AR_FIR 29
373 #define AR_FDR 30
374 #define AR_CCV 32
375 #define AR_UNAT 36
376 #define AR_FPSR 40
377 #define AR_ITC 44
378 #define AR_RUC 45
379 #define AR_PFS 64
380 #define AR_LC 65
381 #define AR_EC 66
382
383 static const struct
384 {
385 const char *name;
386 unsigned int regnum;
387 }
388 ar[] =
389 {
390 {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1},
391 {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3},
392 {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5},
393 {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7},
394 {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP},
395 {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT},
396 {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG},
397 {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD},
398 {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR},
399 {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR},
400 {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT},
401 {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC},
402 {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS},
403 {"ar.lc", AR_LC}, {"ar.ec", AR_EC},
404 };
405
406 /* control registers: */
407
408 #define CR_DCR 0
409 #define CR_ITM 1
410 #define CR_IVA 2
411 #define CR_PTA 8
412 #define CR_GPTA 9
413 #define CR_IPSR 16
414 #define CR_ISR 17
415 #define CR_IIP 19
416 #define CR_IFA 20
417 #define CR_ITIR 21
418 #define CR_IIPA 22
419 #define CR_IFS 23
420 #define CR_IIM 24
421 #define CR_IHA 25
422 #define CR_IIB0 26
423 #define CR_IIB1 27
424 #define CR_LID 64
425 #define CR_IVR 65
426 #define CR_TPR 66
427 #define CR_EOI 67
428 #define CR_IRR0 68
429 #define CR_IRR3 71
430 #define CR_ITV 72
431 #define CR_PMV 73
432 #define CR_CMCV 74
433 #define CR_LRR0 80
434 #define CR_LRR1 81
435
436 static const struct
437 {
438 const char *name;
439 unsigned int regnum;
440 }
441 cr[] =
442 {
443 {"cr.dcr", CR_DCR},
444 {"cr.itm", CR_ITM},
445 {"cr.iva", CR_IVA},
446 {"cr.pta", CR_PTA},
447 {"cr.gpta", CR_GPTA},
448 {"cr.ipsr", CR_IPSR},
449 {"cr.isr", CR_ISR},
450 {"cr.iip", CR_IIP},
451 {"cr.ifa", CR_IFA},
452 {"cr.itir", CR_ITIR},
453 {"cr.iipa", CR_IIPA},
454 {"cr.ifs", CR_IFS},
455 {"cr.iim", CR_IIM},
456 {"cr.iha", CR_IHA},
457 {"cr.iib0", CR_IIB0},
458 {"cr.iib1", CR_IIB1},
459 {"cr.lid", CR_LID},
460 {"cr.ivr", CR_IVR},
461 {"cr.tpr", CR_TPR},
462 {"cr.eoi", CR_EOI},
463 {"cr.irr0", CR_IRR0},
464 {"cr.irr1", CR_IRR0 + 1},
465 {"cr.irr2", CR_IRR0 + 2},
466 {"cr.irr3", CR_IRR3},
467 {"cr.itv", CR_ITV},
468 {"cr.pmv", CR_PMV},
469 {"cr.cmcv", CR_CMCV},
470 {"cr.lrr0", CR_LRR0},
471 {"cr.lrr1", CR_LRR1}
472 };
473
474 #define PSR_MFL 4
475 #define PSR_IC 13
476 #define PSR_DFL 18
477 #define PSR_CPL 32
478
479 static const struct const_desc
480 {
481 const char *name;
482 valueT value;
483 }
484 const_bits[] =
485 {
486 /* PSR constant masks: */
487
488 /* 0: reserved */
489 {"psr.be", ((valueT) 1) << 1},
490 {"psr.up", ((valueT) 1) << 2},
491 {"psr.ac", ((valueT) 1) << 3},
492 {"psr.mfl", ((valueT) 1) << 4},
493 {"psr.mfh", ((valueT) 1) << 5},
494 /* 6-12: reserved */
495 {"psr.ic", ((valueT) 1) << 13},
496 {"psr.i", ((valueT) 1) << 14},
497 {"psr.pk", ((valueT) 1) << 15},
498 /* 16: reserved */
499 {"psr.dt", ((valueT) 1) << 17},
500 {"psr.dfl", ((valueT) 1) << 18},
501 {"psr.dfh", ((valueT) 1) << 19},
502 {"psr.sp", ((valueT) 1) << 20},
503 {"psr.pp", ((valueT) 1) << 21},
504 {"psr.di", ((valueT) 1) << 22},
505 {"psr.si", ((valueT) 1) << 23},
506 {"psr.db", ((valueT) 1) << 24},
507 {"psr.lp", ((valueT) 1) << 25},
508 {"psr.tb", ((valueT) 1) << 26},
509 {"psr.rt", ((valueT) 1) << 27},
510 /* 28-31: reserved */
511 /* 32-33: cpl (current privilege level) */
512 {"psr.is", ((valueT) 1) << 34},
513 {"psr.mc", ((valueT) 1) << 35},
514 {"psr.it", ((valueT) 1) << 36},
515 {"psr.id", ((valueT) 1) << 37},
516 {"psr.da", ((valueT) 1) << 38},
517 {"psr.dd", ((valueT) 1) << 39},
518 {"psr.ss", ((valueT) 1) << 40},
519 /* 41-42: ri (restart instruction) */
520 {"psr.ed", ((valueT) 1) << 43},
521 {"psr.bn", ((valueT) 1) << 44},
522 };
523
524 /* indirect register-sets/memory: */
525
526 static const struct
527 {
528 const char *name;
529 unsigned int regnum;
530 }
531 indirect_reg[] =
532 {
533 { "CPUID", IND_CPUID },
534 { "cpuid", IND_CPUID },
535 { "dbr", IND_DBR },
536 { "dtr", IND_DTR },
537 { "itr", IND_ITR },
538 { "ibr", IND_IBR },
539 { "msr", IND_MSR },
540 { "pkr", IND_PKR },
541 { "pmc", IND_PMC },
542 { "pmd", IND_PMD },
543 { "dahr", IND_DAHR },
544 { "rr", IND_RR },
545 };
546
547 /* Pseudo functions used to indicate relocation types (these functions
548 start with an at sign (@). */
549 static struct
550 {
551 const char *name;
552 enum pseudo_type
553 {
554 PSEUDO_FUNC_NONE,
555 PSEUDO_FUNC_RELOC,
556 PSEUDO_FUNC_CONST,
557 PSEUDO_FUNC_REG,
558 PSEUDO_FUNC_FLOAT
559 }
560 type;
561 union
562 {
563 unsigned long ival;
564 symbolS *sym;
565 }
566 u;
567 }
568 pseudo_func[] =
569 {
570 /* reloc pseudo functions (these must come first!): */
571 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
572 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
573 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
574 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
575 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
576 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
577 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
578 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
579 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
580 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
581 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
582 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
583 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
584 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
585 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
586 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
587 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
588 #ifdef TE_VMS
589 { "slotcount", PSEUDO_FUNC_RELOC, { 0 } },
590 #endif
591
592 /* mbtype4 constants: */
593 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
594 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
595 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
596 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
597 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
598
599 /* fclass constants: */
600 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
601 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
602 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
603 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
604 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
605 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
606 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
607 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
608 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
609
610 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
611
612 /* hint constants: */
613 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
614 { "priority", PSEUDO_FUNC_CONST, { 0x1 } },
615
616 /* tf constants: */
617 { "clz", PSEUDO_FUNC_CONST, { 32 } },
618 { "mpy", PSEUDO_FUNC_CONST, { 33 } },
619 { "datahints", PSEUDO_FUNC_CONST, { 34 } },
620
621 /* unwind-related constants: */
622 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
623 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
624 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
625 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_GNU } },
626 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
627 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
628 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
629
630 /* unwind-related registers: */
631 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
632 };
633
634 /* 41-bit nop opcodes (one per unit): */
635 static const bfd_vma nop[IA64_NUM_UNITS] =
636 {
637 0x0000000000LL, /* NIL => break 0 */
638 0x0008000000LL, /* I-unit nop */
639 0x0008000000LL, /* M-unit nop */
640 0x4000000000LL, /* B-unit nop */
641 0x0008000000LL, /* F-unit nop */
642 0x0000000000LL, /* L-"unit" nop immediate */
643 0x0008000000LL, /* X-unit nop */
644 };
645
646 /* Can't be `const' as it's passed to input routines (which have the
647 habit of setting temporary sentinels. */
648 static char special_section_name[][20] =
649 {
650 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
651 {".IA_64.unwind"}, {".IA_64.unwind_info"},
652 {".init_array"}, {".fini_array"}
653 };
654
655 /* The best template for a particular sequence of up to three
656 instructions: */
657 #define N IA64_NUM_TYPES
658 static unsigned char best_template[N][N][N];
659 #undef N
660
661 /* Resource dependencies currently in effect */
662 static struct rsrc {
663 int depind; /* dependency index */
664 const struct ia64_dependency *dependency; /* actual dependency */
665 unsigned specific:1, /* is this a specific bit/regno? */
666 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
667 int index; /* specific regno/bit within dependency */
668 int note; /* optional qualifying note (0 if none) */
669 #define STATE_NONE 0
670 #define STATE_STOP 1
671 #define STATE_SRLZ 2
672 int insn_srlz; /* current insn serialization state */
673 int data_srlz; /* current data serialization state */
674 int qp_regno; /* qualifying predicate for this usage */
675 const char *file; /* what file marked this dependency */
676 unsigned int line; /* what line marked this dependency */
677 struct mem_offset mem_offset; /* optional memory offset hint */
678 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
679 int path; /* corresponding code entry index */
680 } *regdeps = NULL;
681 static int regdepslen = 0;
682 static int regdepstotlen = 0;
683 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
684 static const char *dv_sem[] = { "none", "implied", "impliedf",
685 "data", "instr", "specific", "stop", "other" };
686 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
687
688 /* Current state of PR mutexation */
689 static struct qpmutex {
690 valueT prmask;
691 int path;
692 } *qp_mutexes = NULL; /* QP mutex bitmasks */
693 static int qp_mutexeslen = 0;
694 static int qp_mutexestotlen = 0;
695 static valueT qp_safe_across_calls = 0;
696
697 /* Current state of PR implications */
698 static struct qp_imply {
699 unsigned p1:6;
700 unsigned p2:6;
701 unsigned p2_branched:1;
702 int path;
703 } *qp_implies = NULL;
704 static int qp_implieslen = 0;
705 static int qp_impliestotlen = 0;
706
707 /* Keep track of static GR values so that indirect register usage can
708 sometimes be tracked. */
709 static struct gr {
710 unsigned known:1;
711 int path;
712 valueT value;
713 } gr_values[128] = {
714 {
715 1,
716 #ifdef INT_MAX
717 INT_MAX,
718 #else
719 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1,
720 #endif
721 0
722 }
723 };
724
725 /* Remember the alignment frag. */
726 static fragS *align_frag;
727
728 /* These are the routines required to output the various types of
729 unwind records. */
730
731 /* A slot_number is a frag address plus the slot index (0-2). We use the
732 frag address here so that if there is a section switch in the middle of
733 a function, then instructions emitted to a different section are not
734 counted. Since there may be more than one frag for a function, this
735 means we also need to keep track of which frag this address belongs to
736 so we can compute inter-frag distances. This also nicely solves the
737 problem with nops emitted for align directives, which can't easily be
738 counted, but can easily be derived from frag sizes. */
739
740 typedef struct unw_rec_list {
741 unwind_record r;
742 unsigned long slot_number;
743 fragS *slot_frag;
744 struct unw_rec_list *next;
745 } unw_rec_list;
746
747 #define SLOT_NUM_NOT_SET (unsigned)-1
748
749 /* Linked list of saved prologue counts. A very poor
750 implementation of a map from label numbers to prologue counts. */
751 typedef struct label_prologue_count
752 {
753 struct label_prologue_count *next;
754 unsigned long label_number;
755 unsigned int prologue_count;
756 } label_prologue_count;
757
758 typedef struct proc_pending
759 {
760 symbolS *sym;
761 struct proc_pending *next;
762 } proc_pending;
763
764 static struct
765 {
766 /* Maintain a list of unwind entries for the current function. */
767 unw_rec_list *list;
768 unw_rec_list *tail;
769
770 /* Any unwind entries that should be attached to the current slot
771 that an insn is being constructed for. */
772 unw_rec_list *current_entry;
773
774 /* These are used to create the unwind table entry for this function. */
775 proc_pending proc_pending;
776 symbolS *info; /* pointer to unwind info */
777 symbolS *personality_routine;
778 segT saved_text_seg;
779 subsegT saved_text_subseg;
780 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
781
782 /* TRUE if processing unwind directives in a prologue region. */
783 unsigned int prologue : 1;
784 unsigned int prologue_mask : 4;
785 unsigned int prologue_gr : 7;
786 unsigned int body : 1;
787 unsigned int insn : 1;
788 unsigned int prologue_count; /* number of .prologues seen so far */
789 /* Prologue counts at previous .label_state directives. */
790 struct label_prologue_count * saved_prologue_counts;
791
792 /* List of split up .save-s. */
793 unw_p_record *pending_saves;
794 } unwind;
795
796 /* The input value is a negated offset from psp, and specifies an address
797 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
798 must add 16 and divide by 4 to get the encoded value. */
799
800 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
801
802 typedef void (*vbyte_func) (int, char *, char *);
803
804 /* Forward declarations: */
805 static void dot_alias (int);
806 static int parse_operand_and_eval (expressionS *, int);
807 static void emit_one_bundle (void);
808 static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *,
809 bfd_reloc_code_real_type);
810 static void insn_group_break (int, int, int);
811 static void add_qp_mutex (valueT);
812 static void add_qp_imply (int, int);
813 static void clear_qp_mutex (valueT);
814 static void clear_qp_implies (valueT, valueT);
815 static void print_dependency (const char *, int);
816 static void instruction_serialization (void);
817 static void data_serialization (void);
818 static void output_R3_format (vbyte_func, unw_record_type, unsigned long);
819 static void output_B3_format (vbyte_func, unsigned long, unsigned long);
820 static void output_B4_format (vbyte_func, unw_record_type, unsigned long);
821 static void free_saved_prologue_counts (void);
822
823 /* Determine if application register REGNUM resides only in the integer
824 unit (as opposed to the memory unit). */
825 static int
826 ar_is_only_in_integer_unit (int reg)
827 {
828 reg -= REG_AR;
829 return reg >= 64 && reg <= 111;
830 }
831
832 /* Determine if application register REGNUM resides only in the memory
833 unit (as opposed to the integer unit). */
834 static int
835 ar_is_only_in_memory_unit (int reg)
836 {
837 reg -= REG_AR;
838 return reg >= 0 && reg <= 47;
839 }
840
841 /* Switch to section NAME and create section if necessary. It's
842 rather ugly that we have to manipulate input_line_pointer but I
843 don't see any other way to accomplish the same thing without
844 changing obj-elf.c (which may be the Right Thing, in the end). */
845 static void
846 set_section (char *name)
847 {
848 char *saved_input_line_pointer;
849
850 saved_input_line_pointer = input_line_pointer;
851 input_line_pointer = name;
852 obj_elf_section (0);
853 input_line_pointer = saved_input_line_pointer;
854 }
855
856 /* Map 's' to SHF_IA_64_SHORT. */
857
858 bfd_vma
859 ia64_elf_section_letter (int letter, const char **ptr_msg)
860 {
861 if (letter == 's')
862 return SHF_IA_64_SHORT;
863 else if (letter == 'o')
864 return SHF_LINK_ORDER;
865 #ifdef TE_VMS
866 else if (letter == 'O')
867 return SHF_IA_64_VMS_OVERLAID;
868 else if (letter == 'g')
869 return SHF_IA_64_VMS_GLOBAL;
870 #endif
871
872 *ptr_msg = _("bad .section directive: want a,o,s,w,x,M,S,G,T in string");
873 return -1;
874 }
875
876 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
877
878 flagword
879 ia64_elf_section_flags (flagword flags,
880 bfd_vma attr,
881 int type ATTRIBUTE_UNUSED)
882 {
883 if (attr & SHF_IA_64_SHORT)
884 flags |= SEC_SMALL_DATA;
885 return flags;
886 }
887
888 int
889 ia64_elf_section_type (const char *str, size_t len)
890 {
891 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
892
893 if (STREQ (ELF_STRING_ia64_unwind_info))
894 return SHT_PROGBITS;
895
896 if (STREQ (ELF_STRING_ia64_unwind_info_once))
897 return SHT_PROGBITS;
898
899 if (STREQ (ELF_STRING_ia64_unwind))
900 return SHT_IA_64_UNWIND;
901
902 if (STREQ (ELF_STRING_ia64_unwind_once))
903 return SHT_IA_64_UNWIND;
904
905 if (STREQ ("unwind"))
906 return SHT_IA_64_UNWIND;
907
908 return -1;
909 #undef STREQ
910 }
911
912 static unsigned int
913 set_regstack (unsigned int ins,
914 unsigned int locs,
915 unsigned int outs,
916 unsigned int rots)
917 {
918 /* Size of frame. */
919 unsigned int sof;
920
921 sof = ins + locs + outs;
922 if (sof > 96)
923 {
924 as_bad (_("Size of frame exceeds maximum of 96 registers"));
925 return 0;
926 }
927 if (rots > sof)
928 {
929 as_warn (_("Size of rotating registers exceeds frame size"));
930 return 0;
931 }
932 md.in.base = REG_GR + 32;
933 md.loc.base = md.in.base + ins;
934 md.out.base = md.loc.base + locs;
935
936 md.in.num_regs = ins;
937 md.loc.num_regs = locs;
938 md.out.num_regs = outs;
939 md.rot.num_regs = rots;
940 return sof;
941 }
942
943 void
944 ia64_flush_insns (void)
945 {
946 struct label_fix *lfix;
947 segT saved_seg;
948 subsegT saved_subseg;
949 unw_rec_list *ptr;
950 bool mark;
951
952 if (!md.last_text_seg)
953 return;
954
955 saved_seg = now_seg;
956 saved_subseg = now_subseg;
957
958 subseg_set (md.last_text_seg, 0);
959
960 while (md.num_slots_in_use > 0)
961 emit_one_bundle (); /* force out queued instructions */
962
963 /* In case there are labels following the last instruction, resolve
964 those now. */
965 mark = false;
966 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
967 {
968 symbol_set_value_now (lfix->sym);
969 mark |= lfix->dw2_mark_labels;
970 }
971 if (mark)
972 {
973 dwarf2_where (&CURR_SLOT.debug_line);
974 CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
975 dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line);
976 dwarf2_consume_line_info ();
977 }
978 CURR_SLOT.label_fixups = 0;
979
980 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
981 symbol_set_value_now (lfix->sym);
982 CURR_SLOT.tag_fixups = 0;
983
984 /* In case there are unwind directives following the last instruction,
985 resolve those now. We only handle prologue, body, and endp directives
986 here. Give an error for others. */
987 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
988 {
989 switch (ptr->r.type)
990 {
991 case prologue:
992 case prologue_gr:
993 case body:
994 case endp:
995 ptr->slot_number = (unsigned long) frag_more (0);
996 ptr->slot_frag = frag_now;
997 break;
998
999 /* Allow any record which doesn't have a "t" field (i.e.,
1000 doesn't relate to a particular instruction). */
1001 case unwabi:
1002 case br_gr:
1003 case copy_state:
1004 case fr_mem:
1005 case frgr_mem:
1006 case gr_gr:
1007 case gr_mem:
1008 case label_state:
1009 case rp_br:
1010 case spill_base:
1011 case spill_mask:
1012 /* nothing */
1013 break;
1014
1015 default:
1016 as_bad (_("Unwind directive not followed by an instruction."));
1017 break;
1018 }
1019 }
1020 unwind.current_entry = NULL;
1021
1022 subseg_set (saved_seg, saved_subseg);
1023
1024 if (md.qp.X_op == O_register)
1025 as_bad (_("qualifying predicate not followed by instruction"));
1026 }
1027
1028 void
1029 ia64_cons_align (int nbytes)
1030 {
1031 if (md.auto_align)
1032 {
1033 int log;
1034 for (log = 0; (nbytes & 1) != 1; nbytes >>= 1)
1035 log++;
1036
1037 do_align (log, NULL, 0, 0);
1038 }
1039 }
1040
1041 #ifdef TE_VMS
1042
1043 /* .vms_common section, symbol, size, alignment */
1044
1045 static void
1046 obj_elf_vms_common (int ignore ATTRIBUTE_UNUSED)
1047 {
1048 const char *sec_name;
1049 char *sym_name;
1050 char c;
1051 offsetT size;
1052 offsetT cur_size;
1053 offsetT temp;
1054 symbolS *symbolP;
1055 segT current_seg = now_seg;
1056 subsegT current_subseg = now_subseg;
1057 offsetT log_align;
1058
1059 /* Section name. */
1060 sec_name = obj_elf_section_name ();
1061 if (sec_name == NULL)
1062 return;
1063
1064 /* Symbol name. */
1065 SKIP_WHITESPACE ();
1066 if (*input_line_pointer == ',')
1067 {
1068 input_line_pointer++;
1069 SKIP_WHITESPACE ();
1070 }
1071 else
1072 {
1073 as_bad (_("expected ',' after section name"));
1074 ignore_rest_of_line ();
1075 return;
1076 }
1077
1078 c = get_symbol_name (&sym_name);
1079
1080 if (input_line_pointer == sym_name)
1081 {
1082 (void) restore_line_pointer (c);
1083 as_bad (_("expected symbol name"));
1084 ignore_rest_of_line ();
1085 return;
1086 }
1087
1088 symbolP = symbol_find_or_make (sym_name);
1089 (void) restore_line_pointer (c);
1090
1091 if ((S_IS_DEFINED (symbolP) || symbol_equated_p (symbolP))
1092 && !S_IS_COMMON (symbolP))
1093 {
1094 as_bad (_("Ignoring attempt to re-define symbol"));
1095 ignore_rest_of_line ();
1096 return;
1097 }
1098
1099 /* Symbol size. */
1100 SKIP_WHITESPACE ();
1101 if (*input_line_pointer == ',')
1102 {
1103 input_line_pointer++;
1104 SKIP_WHITESPACE ();
1105 }
1106 else
1107 {
1108 as_bad (_("expected ',' after symbol name"));
1109 ignore_rest_of_line ();
1110 return;
1111 }
1112
1113 temp = get_absolute_expression ();
1114 size = temp;
1115 size &= ((offsetT) 2 << (stdoutput->arch_info->bits_per_address - 1)) - 1;
1116 if (temp != size)
1117 {
1118 as_warn (_("size (%ld) out of range, ignored"), (long) temp);
1119 ignore_rest_of_line ();
1120 return;
1121 }
1122
1123 /* Alignment. */
1124 SKIP_WHITESPACE ();
1125 if (*input_line_pointer == ',')
1126 {
1127 input_line_pointer++;
1128 SKIP_WHITESPACE ();
1129 }
1130 else
1131 {
1132 as_bad (_("expected ',' after symbol size"));
1133 ignore_rest_of_line ();
1134 return;
1135 }
1136
1137 log_align = get_absolute_expression ();
1138
1139 demand_empty_rest_of_line ();
1140
1141 obj_elf_change_section
1142 (sec_name, SHT_NOBITS,
1143 SHF_ALLOC | SHF_WRITE | SHF_IA_64_VMS_OVERLAID | SHF_IA_64_VMS_GLOBAL,
1144 0, NULL, 1, 0);
1145
1146 S_SET_VALUE (symbolP, 0);
1147 S_SET_SIZE (symbolP, size);
1148 S_SET_EXTERNAL (symbolP);
1149 S_SET_SEGMENT (symbolP, now_seg);
1150
1151 symbol_get_bfdsym (symbolP)->flags |= BSF_OBJECT;
1152
1153 record_alignment (now_seg, log_align);
1154
1155 cur_size = bfd_section_size (now_seg);
1156 if ((int) size > cur_size)
1157 {
1158 char *pfrag
1159 = frag_var (rs_fill, 1, 1, (relax_substateT)0, NULL,
1160 (valueT)size - (valueT)cur_size, NULL);
1161 *pfrag = 0;
1162 bfd_set_section_size (now_seg, size);
1163 }
1164
1165 /* Switch back to current segment. */
1166 subseg_set (current_seg, current_subseg);
1167
1168 #ifdef md_elf_section_change_hook
1169 md_elf_section_change_hook ();
1170 #endif
1171 }
1172
1173 #endif /* TE_VMS */
1174
1175 /* Output COUNT bytes to a memory location. */
1176 static char *vbyte_mem_ptr = NULL;
1177
1178 static void
1179 output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED)
1180 {
1181 int x;
1182 if (vbyte_mem_ptr == NULL)
1183 abort ();
1184
1185 if (count == 0)
1186 return;
1187 for (x = 0; x < count; x++)
1188 *(vbyte_mem_ptr++) = ptr[x];
1189 }
1190
1191 /* Count the number of bytes required for records. */
1192 static int vbyte_count = 0;
1193 static void
1194 count_output (int count,
1195 char *ptr ATTRIBUTE_UNUSED,
1196 char *comment ATTRIBUTE_UNUSED)
1197 {
1198 vbyte_count += count;
1199 }
1200
1201 static void
1202 output_R1_format (vbyte_func f, unw_record_type rtype, int rlen)
1203 {
1204 int r = 0;
1205 char byte;
1206 if (rlen > 0x1f)
1207 {
1208 output_R3_format (f, rtype, rlen);
1209 return;
1210 }
1211
1212 if (rtype == body)
1213 r = 1;
1214 else if (rtype != prologue)
1215 as_bad (_("record type is not valid"));
1216
1217 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1218 (*f) (1, &byte, NULL);
1219 }
1220
1221 static void
1222 output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen)
1223 {
1224 char bytes[20];
1225 int count = 2;
1226 mask = (mask & 0x0f);
1227 grsave = (grsave & 0x7f);
1228
1229 bytes[0] = (UNW_R2 | (mask >> 1));
1230 bytes[1] = (((mask & 0x01) << 7) | grsave);
1231 count += output_leb128 (bytes + 2, rlen, 0);
1232 (*f) (count, bytes, NULL);
1233 }
1234
1235 static void
1236 output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen)
1237 {
1238 int r = 0, count;
1239 char bytes[20];
1240 if (rlen <= 0x1f)
1241 {
1242 output_R1_format (f, rtype, rlen);
1243 return;
1244 }
1245
1246 if (rtype == body)
1247 r = 1;
1248 else if (rtype != prologue)
1249 as_bad (_("record type is not valid"));
1250 bytes[0] = (UNW_R3 | r);
1251 count = output_leb128 (bytes + 1, rlen, 0);
1252 (*f) (count + 1, bytes, NULL);
1253 }
1254
1255 static void
1256 output_P1_format (vbyte_func f, int brmask)
1257 {
1258 char byte;
1259 byte = UNW_P1 | (brmask & 0x1f);
1260 (*f) (1, &byte, NULL);
1261 }
1262
1263 static void
1264 output_P2_format (vbyte_func f, int brmask, int gr)
1265 {
1266 char bytes[2];
1267 brmask = (brmask & 0x1f);
1268 bytes[0] = UNW_P2 | (brmask >> 1);
1269 bytes[1] = (((brmask & 1) << 7) | gr);
1270 (*f) (2, bytes, NULL);
1271 }
1272
1273 static void
1274 output_P3_format (vbyte_func f, unw_record_type rtype, int reg)
1275 {
1276 char bytes[2];
1277 int r = 0;
1278 reg = (reg & 0x7f);
1279 switch (rtype)
1280 {
1281 case psp_gr:
1282 r = 0;
1283 break;
1284 case rp_gr:
1285 r = 1;
1286 break;
1287 case pfs_gr:
1288 r = 2;
1289 break;
1290 case preds_gr:
1291 r = 3;
1292 break;
1293 case unat_gr:
1294 r = 4;
1295 break;
1296 case lc_gr:
1297 r = 5;
1298 break;
1299 case rp_br:
1300 r = 6;
1301 break;
1302 case rnat_gr:
1303 r = 7;
1304 break;
1305 case bsp_gr:
1306 r = 8;
1307 break;
1308 case bspstore_gr:
1309 r = 9;
1310 break;
1311 case fpsr_gr:
1312 r = 10;
1313 break;
1314 case priunat_gr:
1315 r = 11;
1316 break;
1317 default:
1318 as_bad (_("Invalid record type for P3 format."));
1319 }
1320 bytes[0] = (UNW_P3 | (r >> 1));
1321 bytes[1] = (((r & 1) << 7) | reg);
1322 (*f) (2, bytes, NULL);
1323 }
1324
1325 static void
1326 output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size)
1327 {
1328 imask[0] = UNW_P4;
1329 (*f) (imask_size, (char *) imask, NULL);
1330 }
1331
1332 static void
1333 output_P5_format (vbyte_func f, int grmask, unsigned long frmask)
1334 {
1335 char bytes[4];
1336 grmask = (grmask & 0x0f);
1337
1338 bytes[0] = UNW_P5;
1339 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1340 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1341 bytes[3] = (frmask & 0x000000ff);
1342 (*f) (4, bytes, NULL);
1343 }
1344
1345 static void
1346 output_P6_format (vbyte_func f, unw_record_type rtype, int rmask)
1347 {
1348 char byte;
1349 int r = 0;
1350
1351 if (rtype == gr_mem)
1352 r = 1;
1353 else if (rtype != fr_mem)
1354 as_bad (_("Invalid record type for format P6"));
1355 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1356 (*f) (1, &byte, NULL);
1357 }
1358
1359 static void
1360 output_P7_format (vbyte_func f,
1361 unw_record_type rtype,
1362 unsigned long w1,
1363 unsigned long w2)
1364 {
1365 char bytes[20];
1366 int count = 1;
1367 int r = 0;
1368 count += output_leb128 (bytes + 1, w1, 0);
1369 switch (rtype)
1370 {
1371 case mem_stack_f:
1372 r = 0;
1373 count += output_leb128 (bytes + count, w2 >> 4, 0);
1374 break;
1375 case mem_stack_v:
1376 r = 1;
1377 break;
1378 case spill_base:
1379 r = 2;
1380 break;
1381 case psp_sprel:
1382 r = 3;
1383 break;
1384 case rp_when:
1385 r = 4;
1386 break;
1387 case rp_psprel:
1388 r = 5;
1389 break;
1390 case pfs_when:
1391 r = 6;
1392 break;
1393 case pfs_psprel:
1394 r = 7;
1395 break;
1396 case preds_when:
1397 r = 8;
1398 break;
1399 case preds_psprel:
1400 r = 9;
1401 break;
1402 case lc_when:
1403 r = 10;
1404 break;
1405 case lc_psprel:
1406 r = 11;
1407 break;
1408 case unat_when:
1409 r = 12;
1410 break;
1411 case unat_psprel:
1412 r = 13;
1413 break;
1414 case fpsr_when:
1415 r = 14;
1416 break;
1417 case fpsr_psprel:
1418 r = 15;
1419 break;
1420 default:
1421 break;
1422 }
1423 bytes[0] = (UNW_P7 | r);
1424 (*f) (count, bytes, NULL);
1425 }
1426
1427 static void
1428 output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t)
1429 {
1430 char bytes[20];
1431 int r = 0;
1432 int count = 2;
1433 bytes[0] = UNW_P8;
1434 switch (rtype)
1435 {
1436 case rp_sprel:
1437 r = 1;
1438 break;
1439 case pfs_sprel:
1440 r = 2;
1441 break;
1442 case preds_sprel:
1443 r = 3;
1444 break;
1445 case lc_sprel:
1446 r = 4;
1447 break;
1448 case unat_sprel:
1449 r = 5;
1450 break;
1451 case fpsr_sprel:
1452 r = 6;
1453 break;
1454 case bsp_when:
1455 r = 7;
1456 break;
1457 case bsp_psprel:
1458 r = 8;
1459 break;
1460 case bsp_sprel:
1461 r = 9;
1462 break;
1463 case bspstore_when:
1464 r = 10;
1465 break;
1466 case bspstore_psprel:
1467 r = 11;
1468 break;
1469 case bspstore_sprel:
1470 r = 12;
1471 break;
1472 case rnat_when:
1473 r = 13;
1474 break;
1475 case rnat_psprel:
1476 r = 14;
1477 break;
1478 case rnat_sprel:
1479 r = 15;
1480 break;
1481 case priunat_when_gr:
1482 r = 16;
1483 break;
1484 case priunat_psprel:
1485 r = 17;
1486 break;
1487 case priunat_sprel:
1488 r = 18;
1489 break;
1490 case priunat_when_mem:
1491 r = 19;
1492 break;
1493 default:
1494 break;
1495 }
1496 bytes[1] = r;
1497 count += output_leb128 (bytes + 2, t, 0);
1498 (*f) (count, bytes, NULL);
1499 }
1500
1501 static void
1502 output_P9_format (vbyte_func f, int grmask, int gr)
1503 {
1504 char bytes[3];
1505 bytes[0] = UNW_P9;
1506 bytes[1] = (grmask & 0x0f);
1507 bytes[2] = (gr & 0x7f);
1508 (*f) (3, bytes, NULL);
1509 }
1510
1511 static void
1512 output_P10_format (vbyte_func f, int abi, int context)
1513 {
1514 char bytes[3];
1515 bytes[0] = UNW_P10;
1516 bytes[1] = (abi & 0xff);
1517 bytes[2] = (context & 0xff);
1518 (*f) (3, bytes, NULL);
1519 }
1520
1521 static void
1522 output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1523 {
1524 char byte;
1525 int r = 0;
1526 if (label > 0x1f)
1527 {
1528 output_B4_format (f, rtype, label);
1529 return;
1530 }
1531 if (rtype == copy_state)
1532 r = 1;
1533 else if (rtype != label_state)
1534 as_bad (_("Invalid record type for format B1"));
1535
1536 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1537 (*f) (1, &byte, NULL);
1538 }
1539
1540 static void
1541 output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t)
1542 {
1543 char bytes[20];
1544 int count = 1;
1545 if (ecount > 0x1f)
1546 {
1547 output_B3_format (f, ecount, t);
1548 return;
1549 }
1550 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1551 count += output_leb128 (bytes + 1, t, 0);
1552 (*f) (count, bytes, NULL);
1553 }
1554
1555 static void
1556 output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t)
1557 {
1558 char bytes[20];
1559 int count = 1;
1560 if (ecount <= 0x1f)
1561 {
1562 output_B2_format (f, ecount, t);
1563 return;
1564 }
1565 bytes[0] = UNW_B3;
1566 count += output_leb128 (bytes + 1, t, 0);
1567 count += output_leb128 (bytes + count, ecount, 0);
1568 (*f) (count, bytes, NULL);
1569 }
1570
1571 static void
1572 output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1573 {
1574 char bytes[20];
1575 int r = 0;
1576 int count = 1;
1577 if (label <= 0x1f)
1578 {
1579 output_B1_format (f, rtype, label);
1580 return;
1581 }
1582
1583 if (rtype == copy_state)
1584 r = 1;
1585 else if (rtype != label_state)
1586 as_bad (_("Invalid record type for format B1"));
1587
1588 bytes[0] = (UNW_B4 | (r << 3));
1589 count += output_leb128 (bytes + 1, label, 0);
1590 (*f) (count, bytes, NULL);
1591 }
1592
1593 static char
1594 format_ab_reg (int ab, int reg)
1595 {
1596 int ret;
1597 ab = (ab & 3);
1598 reg = (reg & 0x1f);
1599 ret = (ab << 5) | reg;
1600 return ret;
1601 }
1602
1603 static void
1604 output_X1_format (vbyte_func f,
1605 unw_record_type rtype,
1606 int ab,
1607 int reg,
1608 unsigned long t,
1609 unsigned long w1)
1610 {
1611 char bytes[20];
1612 int r = 0;
1613 int count = 2;
1614 bytes[0] = UNW_X1;
1615
1616 if (rtype == spill_sprel)
1617 r = 1;
1618 else if (rtype != spill_psprel)
1619 as_bad (_("Invalid record type for format X1"));
1620 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1621 count += output_leb128 (bytes + 2, t, 0);
1622 count += output_leb128 (bytes + count, w1, 0);
1623 (*f) (count, bytes, NULL);
1624 }
1625
1626 static void
1627 output_X2_format (vbyte_func f,
1628 int ab,
1629 int reg,
1630 int x,
1631 int y,
1632 int treg,
1633 unsigned long t)
1634 {
1635 char bytes[20];
1636 int count = 3;
1637 bytes[0] = UNW_X2;
1638 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1639 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1640 count += output_leb128 (bytes + 3, t, 0);
1641 (*f) (count, bytes, NULL);
1642 }
1643
1644 static void
1645 output_X3_format (vbyte_func f,
1646 unw_record_type rtype,
1647 int qp,
1648 int ab,
1649 int reg,
1650 unsigned long t,
1651 unsigned long w1)
1652 {
1653 char bytes[20];
1654 int r = 0;
1655 int count = 3;
1656 bytes[0] = UNW_X3;
1657
1658 if (rtype == spill_sprel_p)
1659 r = 1;
1660 else if (rtype != spill_psprel_p)
1661 as_bad (_("Invalid record type for format X3"));
1662 bytes[1] = ((r << 7) | (qp & 0x3f));
1663 bytes[2] = format_ab_reg (ab, reg);
1664 count += output_leb128 (bytes + 3, t, 0);
1665 count += output_leb128 (bytes + count, w1, 0);
1666 (*f) (count, bytes, NULL);
1667 }
1668
1669 static void
1670 output_X4_format (vbyte_func f,
1671 int qp,
1672 int ab,
1673 int reg,
1674 int x,
1675 int y,
1676 int treg,
1677 unsigned long t)
1678 {
1679 char bytes[20];
1680 int count = 4;
1681 bytes[0] = UNW_X4;
1682 bytes[1] = (qp & 0x3f);
1683 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1684 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1685 count += output_leb128 (bytes + 4, t, 0);
1686 (*f) (count, bytes, NULL);
1687 }
1688
1689 /* This function checks whether there are any outstanding .save-s and
1690 discards them if so. */
1691
1692 static void
1693 check_pending_save (void)
1694 {
1695 if (unwind.pending_saves)
1696 {
1697 unw_rec_list *cur, *prev;
1698
1699 as_warn (_("Previous .save incomplete"));
1700 for (cur = unwind.list, prev = NULL; cur; )
1701 if (&cur->r.record.p == unwind.pending_saves)
1702 {
1703 if (prev)
1704 prev->next = cur->next;
1705 else
1706 unwind.list = cur->next;
1707 if (cur == unwind.tail)
1708 unwind.tail = prev;
1709 if (cur == unwind.current_entry)
1710 unwind.current_entry = cur->next;
1711 /* Don't free the first discarded record, it's being used as
1712 terminator for (currently) br_gr and gr_gr processing, and
1713 also prevents leaving a dangling pointer to it in its
1714 predecessor. */
1715 cur->r.record.p.grmask = 0;
1716 cur->r.record.p.brmask = 0;
1717 cur->r.record.p.frmask = 0;
1718 prev = cur->r.record.p.next;
1719 cur->r.record.p.next = NULL;
1720 cur = prev;
1721 break;
1722 }
1723 else
1724 {
1725 prev = cur;
1726 cur = cur->next;
1727 }
1728 while (cur)
1729 {
1730 prev = cur;
1731 cur = cur->r.record.p.next;
1732 free (prev);
1733 }
1734 unwind.pending_saves = NULL;
1735 }
1736 }
1737
1738 /* This function allocates a record list structure, and initializes fields. */
1739
1740 static unw_rec_list *
1741 alloc_record (unw_record_type t)
1742 {
1743 unw_rec_list *ptr;
1744 ptr = XNEW (unw_rec_list);
1745 memset (ptr, 0, sizeof (*ptr));
1746 ptr->slot_number = SLOT_NUM_NOT_SET;
1747 ptr->r.type = t;
1748 return ptr;
1749 }
1750
1751 /* Dummy unwind record used for calculating the length of the last prologue or
1752 body region. */
1753
1754 static unw_rec_list *
1755 output_endp (void)
1756 {
1757 unw_rec_list *ptr = alloc_record (endp);
1758 return ptr;
1759 }
1760
1761 static unw_rec_list *
1762 output_prologue (void)
1763 {
1764 unw_rec_list *ptr = alloc_record (prologue);
1765 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1766 return ptr;
1767 }
1768
1769 static unw_rec_list *
1770 output_prologue_gr (unsigned int saved_mask, unsigned int reg)
1771 {
1772 unw_rec_list *ptr = alloc_record (prologue_gr);
1773 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1774 ptr->r.record.r.grmask = saved_mask;
1775 ptr->r.record.r.grsave = reg;
1776 return ptr;
1777 }
1778
1779 static unw_rec_list *
1780 output_body (void)
1781 {
1782 unw_rec_list *ptr = alloc_record (body);
1783 return ptr;
1784 }
1785
1786 static unw_rec_list *
1787 output_mem_stack_f (unsigned int size)
1788 {
1789 unw_rec_list *ptr = alloc_record (mem_stack_f);
1790 ptr->r.record.p.size = size;
1791 return ptr;
1792 }
1793
1794 static unw_rec_list *
1795 output_mem_stack_v (void)
1796 {
1797 unw_rec_list *ptr = alloc_record (mem_stack_v);
1798 return ptr;
1799 }
1800
1801 static unw_rec_list *
1802 output_psp_gr (unsigned int gr)
1803 {
1804 unw_rec_list *ptr = alloc_record (psp_gr);
1805 ptr->r.record.p.r.gr = gr;
1806 return ptr;
1807 }
1808
1809 static unw_rec_list *
1810 output_psp_sprel (unsigned int offset)
1811 {
1812 unw_rec_list *ptr = alloc_record (psp_sprel);
1813 ptr->r.record.p.off.sp = offset / 4;
1814 return ptr;
1815 }
1816
1817 static unw_rec_list *
1818 output_rp_when (void)
1819 {
1820 unw_rec_list *ptr = alloc_record (rp_when);
1821 return ptr;
1822 }
1823
1824 static unw_rec_list *
1825 output_rp_gr (unsigned int gr)
1826 {
1827 unw_rec_list *ptr = alloc_record (rp_gr);
1828 ptr->r.record.p.r.gr = gr;
1829 return ptr;
1830 }
1831
1832 static unw_rec_list *
1833 output_rp_br (unsigned int br)
1834 {
1835 unw_rec_list *ptr = alloc_record (rp_br);
1836 ptr->r.record.p.r.br = br;
1837 return ptr;
1838 }
1839
1840 static unw_rec_list *
1841 output_rp_psprel (unsigned int offset)
1842 {
1843 unw_rec_list *ptr = alloc_record (rp_psprel);
1844 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1845 return ptr;
1846 }
1847
1848 static unw_rec_list *
1849 output_rp_sprel (unsigned int offset)
1850 {
1851 unw_rec_list *ptr = alloc_record (rp_sprel);
1852 ptr->r.record.p.off.sp = offset / 4;
1853 return ptr;
1854 }
1855
1856 static unw_rec_list *
1857 output_pfs_when (void)
1858 {
1859 unw_rec_list *ptr = alloc_record (pfs_when);
1860 return ptr;
1861 }
1862
1863 static unw_rec_list *
1864 output_pfs_gr (unsigned int gr)
1865 {
1866 unw_rec_list *ptr = alloc_record (pfs_gr);
1867 ptr->r.record.p.r.gr = gr;
1868 return ptr;
1869 }
1870
1871 static unw_rec_list *
1872 output_pfs_psprel (unsigned int offset)
1873 {
1874 unw_rec_list *ptr = alloc_record (pfs_psprel);
1875 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1876 return ptr;
1877 }
1878
1879 static unw_rec_list *
1880 output_pfs_sprel (unsigned int offset)
1881 {
1882 unw_rec_list *ptr = alloc_record (pfs_sprel);
1883 ptr->r.record.p.off.sp = offset / 4;
1884 return ptr;
1885 }
1886
1887 static unw_rec_list *
1888 output_preds_when (void)
1889 {
1890 unw_rec_list *ptr = alloc_record (preds_when);
1891 return ptr;
1892 }
1893
1894 static unw_rec_list *
1895 output_preds_gr (unsigned int gr)
1896 {
1897 unw_rec_list *ptr = alloc_record (preds_gr);
1898 ptr->r.record.p.r.gr = gr;
1899 return ptr;
1900 }
1901
1902 static unw_rec_list *
1903 output_preds_psprel (unsigned int offset)
1904 {
1905 unw_rec_list *ptr = alloc_record (preds_psprel);
1906 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1907 return ptr;
1908 }
1909
1910 static unw_rec_list *
1911 output_preds_sprel (unsigned int offset)
1912 {
1913 unw_rec_list *ptr = alloc_record (preds_sprel);
1914 ptr->r.record.p.off.sp = offset / 4;
1915 return ptr;
1916 }
1917
1918 static unw_rec_list *
1919 output_fr_mem (unsigned int mask)
1920 {
1921 unw_rec_list *ptr = alloc_record (fr_mem);
1922 unw_rec_list *cur = ptr;
1923
1924 ptr->r.record.p.frmask = mask;
1925 unwind.pending_saves = &ptr->r.record.p;
1926 for (;;)
1927 {
1928 unw_rec_list *prev = cur;
1929
1930 /* Clear least significant set bit. */
1931 mask &= ~(mask & (~mask + 1));
1932 if (!mask)
1933 return ptr;
1934 cur = alloc_record (fr_mem);
1935 cur->r.record.p.frmask = mask;
1936 /* Retain only least significant bit. */
1937 prev->r.record.p.frmask ^= mask;
1938 prev->r.record.p.next = cur;
1939 }
1940 }
1941
1942 static unw_rec_list *
1943 output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask)
1944 {
1945 unw_rec_list *ptr = alloc_record (frgr_mem);
1946 unw_rec_list *cur = ptr;
1947
1948 unwind.pending_saves = &cur->r.record.p;
1949 cur->r.record.p.frmask = fr_mask;
1950 while (fr_mask)
1951 {
1952 unw_rec_list *prev = cur;
1953
1954 /* Clear least significant set bit. */
1955 fr_mask &= ~(fr_mask & (~fr_mask + 1));
1956 if (!gr_mask && !fr_mask)
1957 return ptr;
1958 cur = alloc_record (frgr_mem);
1959 cur->r.record.p.frmask = fr_mask;
1960 /* Retain only least significant bit. */
1961 prev->r.record.p.frmask ^= fr_mask;
1962 prev->r.record.p.next = cur;
1963 }
1964 cur->r.record.p.grmask = gr_mask;
1965 for (;;)
1966 {
1967 unw_rec_list *prev = cur;
1968
1969 /* Clear least significant set bit. */
1970 gr_mask &= ~(gr_mask & (~gr_mask + 1));
1971 if (!gr_mask)
1972 return ptr;
1973 cur = alloc_record (frgr_mem);
1974 cur->r.record.p.grmask = gr_mask;
1975 /* Retain only least significant bit. */
1976 prev->r.record.p.grmask ^= gr_mask;
1977 prev->r.record.p.next = cur;
1978 }
1979 }
1980
1981 static unw_rec_list *
1982 output_gr_gr (unsigned int mask, unsigned int reg)
1983 {
1984 unw_rec_list *ptr = alloc_record (gr_gr);
1985 unw_rec_list *cur = ptr;
1986
1987 ptr->r.record.p.grmask = mask;
1988 ptr->r.record.p.r.gr = reg;
1989 unwind.pending_saves = &ptr->r.record.p;
1990 for (;;)
1991 {
1992 unw_rec_list *prev = cur;
1993
1994 /* Clear least significant set bit. */
1995 mask &= ~(mask & (~mask + 1));
1996 if (!mask)
1997 return ptr;
1998 cur = alloc_record (gr_gr);
1999 cur->r.record.p.grmask = mask;
2000 /* Indicate this record shouldn't be output. */
2001 cur->r.record.p.r.gr = REG_NUM;
2002 /* Retain only least significant bit. */
2003 prev->r.record.p.grmask ^= mask;
2004 prev->r.record.p.next = cur;
2005 }
2006 }
2007
2008 static unw_rec_list *
2009 output_gr_mem (unsigned int mask)
2010 {
2011 unw_rec_list *ptr = alloc_record (gr_mem);
2012 unw_rec_list *cur = ptr;
2013
2014 ptr->r.record.p.grmask = mask;
2015 unwind.pending_saves = &ptr->r.record.p;
2016 for (;;)
2017 {
2018 unw_rec_list *prev = cur;
2019
2020 /* Clear least significant set bit. */
2021 mask &= ~(mask & (~mask + 1));
2022 if (!mask)
2023 return ptr;
2024 cur = alloc_record (gr_mem);
2025 cur->r.record.p.grmask = mask;
2026 /* Retain only least significant bit. */
2027 prev->r.record.p.grmask ^= mask;
2028 prev->r.record.p.next = cur;
2029 }
2030 }
2031
2032 static unw_rec_list *
2033 output_br_mem (unsigned int mask)
2034 {
2035 unw_rec_list *ptr = alloc_record (br_mem);
2036 unw_rec_list *cur = ptr;
2037
2038 ptr->r.record.p.brmask = mask;
2039 unwind.pending_saves = &ptr->r.record.p;
2040 for (;;)
2041 {
2042 unw_rec_list *prev = cur;
2043
2044 /* Clear least significant set bit. */
2045 mask &= ~(mask & (~mask + 1));
2046 if (!mask)
2047 return ptr;
2048 cur = alloc_record (br_mem);
2049 cur->r.record.p.brmask = mask;
2050 /* Retain only least significant bit. */
2051 prev->r.record.p.brmask ^= mask;
2052 prev->r.record.p.next = cur;
2053 }
2054 }
2055
2056 static unw_rec_list *
2057 output_br_gr (unsigned int mask, unsigned int reg)
2058 {
2059 unw_rec_list *ptr = alloc_record (br_gr);
2060 unw_rec_list *cur = ptr;
2061
2062 ptr->r.record.p.brmask = mask;
2063 ptr->r.record.p.r.gr = reg;
2064 unwind.pending_saves = &ptr->r.record.p;
2065 for (;;)
2066 {
2067 unw_rec_list *prev = cur;
2068
2069 /* Clear least significant set bit. */
2070 mask &= ~(mask & (~mask + 1));
2071 if (!mask)
2072 return ptr;
2073 cur = alloc_record (br_gr);
2074 cur->r.record.p.brmask = mask;
2075 /* Indicate this record shouldn't be output. */
2076 cur->r.record.p.r.gr = REG_NUM;
2077 /* Retain only least significant bit. */
2078 prev->r.record.p.brmask ^= mask;
2079 prev->r.record.p.next = cur;
2080 }
2081 }
2082
2083 static unw_rec_list *
2084 output_spill_base (unsigned int offset)
2085 {
2086 unw_rec_list *ptr = alloc_record (spill_base);
2087 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2088 return ptr;
2089 }
2090
2091 static unw_rec_list *
2092 output_unat_when (void)
2093 {
2094 unw_rec_list *ptr = alloc_record (unat_when);
2095 return ptr;
2096 }
2097
2098 static unw_rec_list *
2099 output_unat_gr (unsigned int gr)
2100 {
2101 unw_rec_list *ptr = alloc_record (unat_gr);
2102 ptr->r.record.p.r.gr = gr;
2103 return ptr;
2104 }
2105
2106 static unw_rec_list *
2107 output_unat_psprel (unsigned int offset)
2108 {
2109 unw_rec_list *ptr = alloc_record (unat_psprel);
2110 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2111 return ptr;
2112 }
2113
2114 static unw_rec_list *
2115 output_unat_sprel (unsigned int offset)
2116 {
2117 unw_rec_list *ptr = alloc_record (unat_sprel);
2118 ptr->r.record.p.off.sp = offset / 4;
2119 return ptr;
2120 }
2121
2122 static unw_rec_list *
2123 output_lc_when (void)
2124 {
2125 unw_rec_list *ptr = alloc_record (lc_when);
2126 return ptr;
2127 }
2128
2129 static unw_rec_list *
2130 output_lc_gr (unsigned int gr)
2131 {
2132 unw_rec_list *ptr = alloc_record (lc_gr);
2133 ptr->r.record.p.r.gr = gr;
2134 return ptr;
2135 }
2136
2137 static unw_rec_list *
2138 output_lc_psprel (unsigned int offset)
2139 {
2140 unw_rec_list *ptr = alloc_record (lc_psprel);
2141 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2142 return ptr;
2143 }
2144
2145 static unw_rec_list *
2146 output_lc_sprel (unsigned int offset)
2147 {
2148 unw_rec_list *ptr = alloc_record (lc_sprel);
2149 ptr->r.record.p.off.sp = offset / 4;
2150 return ptr;
2151 }
2152
2153 static unw_rec_list *
2154 output_fpsr_when (void)
2155 {
2156 unw_rec_list *ptr = alloc_record (fpsr_when);
2157 return ptr;
2158 }
2159
2160 static unw_rec_list *
2161 output_fpsr_gr (unsigned int gr)
2162 {
2163 unw_rec_list *ptr = alloc_record (fpsr_gr);
2164 ptr->r.record.p.r.gr = gr;
2165 return ptr;
2166 }
2167
2168 static unw_rec_list *
2169 output_fpsr_psprel (unsigned int offset)
2170 {
2171 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2172 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2173 return ptr;
2174 }
2175
2176 static unw_rec_list *
2177 output_fpsr_sprel (unsigned int offset)
2178 {
2179 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2180 ptr->r.record.p.off.sp = offset / 4;
2181 return ptr;
2182 }
2183
2184 static unw_rec_list *
2185 output_priunat_when_gr (void)
2186 {
2187 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2188 return ptr;
2189 }
2190
2191 static unw_rec_list *
2192 output_priunat_when_mem (void)
2193 {
2194 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2195 return ptr;
2196 }
2197
2198 static unw_rec_list *
2199 output_priunat_gr (unsigned int gr)
2200 {
2201 unw_rec_list *ptr = alloc_record (priunat_gr);
2202 ptr->r.record.p.r.gr = gr;
2203 return ptr;
2204 }
2205
2206 static unw_rec_list *
2207 output_priunat_psprel (unsigned int offset)
2208 {
2209 unw_rec_list *ptr = alloc_record (priunat_psprel);
2210 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2211 return ptr;
2212 }
2213
2214 static unw_rec_list *
2215 output_priunat_sprel (unsigned int offset)
2216 {
2217 unw_rec_list *ptr = alloc_record (priunat_sprel);
2218 ptr->r.record.p.off.sp = offset / 4;
2219 return ptr;
2220 }
2221
2222 static unw_rec_list *
2223 output_bsp_when (void)
2224 {
2225 unw_rec_list *ptr = alloc_record (bsp_when);
2226 return ptr;
2227 }
2228
2229 static unw_rec_list *
2230 output_bsp_gr (unsigned int gr)
2231 {
2232 unw_rec_list *ptr = alloc_record (bsp_gr);
2233 ptr->r.record.p.r.gr = gr;
2234 return ptr;
2235 }
2236
2237 static unw_rec_list *
2238 output_bsp_psprel (unsigned int offset)
2239 {
2240 unw_rec_list *ptr = alloc_record (bsp_psprel);
2241 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2242 return ptr;
2243 }
2244
2245 static unw_rec_list *
2246 output_bsp_sprel (unsigned int offset)
2247 {
2248 unw_rec_list *ptr = alloc_record (bsp_sprel);
2249 ptr->r.record.p.off.sp = offset / 4;
2250 return ptr;
2251 }
2252
2253 static unw_rec_list *
2254 output_bspstore_when (void)
2255 {
2256 unw_rec_list *ptr = alloc_record (bspstore_when);
2257 return ptr;
2258 }
2259
2260 static unw_rec_list *
2261 output_bspstore_gr (unsigned int gr)
2262 {
2263 unw_rec_list *ptr = alloc_record (bspstore_gr);
2264 ptr->r.record.p.r.gr = gr;
2265 return ptr;
2266 }
2267
2268 static unw_rec_list *
2269 output_bspstore_psprel (unsigned int offset)
2270 {
2271 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2272 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2273 return ptr;
2274 }
2275
2276 static unw_rec_list *
2277 output_bspstore_sprel (unsigned int offset)
2278 {
2279 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2280 ptr->r.record.p.off.sp = offset / 4;
2281 return ptr;
2282 }
2283
2284 static unw_rec_list *
2285 output_rnat_when (void)
2286 {
2287 unw_rec_list *ptr = alloc_record (rnat_when);
2288 return ptr;
2289 }
2290
2291 static unw_rec_list *
2292 output_rnat_gr (unsigned int gr)
2293 {
2294 unw_rec_list *ptr = alloc_record (rnat_gr);
2295 ptr->r.record.p.r.gr = gr;
2296 return ptr;
2297 }
2298
2299 static unw_rec_list *
2300 output_rnat_psprel (unsigned int offset)
2301 {
2302 unw_rec_list *ptr = alloc_record (rnat_psprel);
2303 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2304 return ptr;
2305 }
2306
2307 static unw_rec_list *
2308 output_rnat_sprel (unsigned int offset)
2309 {
2310 unw_rec_list *ptr = alloc_record (rnat_sprel);
2311 ptr->r.record.p.off.sp = offset / 4;
2312 return ptr;
2313 }
2314
2315 static unw_rec_list *
2316 output_unwabi (unsigned long abi, unsigned long context)
2317 {
2318 unw_rec_list *ptr = alloc_record (unwabi);
2319 ptr->r.record.p.abi = abi;
2320 ptr->r.record.p.context = context;
2321 return ptr;
2322 }
2323
2324 static unw_rec_list *
2325 output_epilogue (unsigned long ecount)
2326 {
2327 unw_rec_list *ptr = alloc_record (epilogue);
2328 ptr->r.record.b.ecount = ecount;
2329 return ptr;
2330 }
2331
2332 static unw_rec_list *
2333 output_label_state (unsigned long label)
2334 {
2335 unw_rec_list *ptr = alloc_record (label_state);
2336 ptr->r.record.b.label = label;
2337 return ptr;
2338 }
2339
2340 static unw_rec_list *
2341 output_copy_state (unsigned long label)
2342 {
2343 unw_rec_list *ptr = alloc_record (copy_state);
2344 ptr->r.record.b.label = label;
2345 return ptr;
2346 }
2347
2348 static unw_rec_list *
2349 output_spill_psprel (unsigned int ab,
2350 unsigned int reg,
2351 unsigned int offset,
2352 unsigned int predicate)
2353 {
2354 unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel);
2355 ptr->r.record.x.ab = ab;
2356 ptr->r.record.x.reg = reg;
2357 ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset);
2358 ptr->r.record.x.qp = predicate;
2359 return ptr;
2360 }
2361
2362 static unw_rec_list *
2363 output_spill_sprel (unsigned int ab,
2364 unsigned int reg,
2365 unsigned int offset,
2366 unsigned int predicate)
2367 {
2368 unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel);
2369 ptr->r.record.x.ab = ab;
2370 ptr->r.record.x.reg = reg;
2371 ptr->r.record.x.where.spoff = offset / 4;
2372 ptr->r.record.x.qp = predicate;
2373 return ptr;
2374 }
2375
2376 static unw_rec_list *
2377 output_spill_reg (unsigned int ab,
2378 unsigned int reg,
2379 unsigned int targ_reg,
2380 unsigned int xy,
2381 unsigned int predicate)
2382 {
2383 unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg);
2384 ptr->r.record.x.ab = ab;
2385 ptr->r.record.x.reg = reg;
2386 ptr->r.record.x.where.reg = targ_reg;
2387 ptr->r.record.x.xy = xy;
2388 ptr->r.record.x.qp = predicate;
2389 return ptr;
2390 }
2391
2392 /* Given a unw_rec_list process the correct format with the
2393 specified function. */
2394
2395 static void
2396 process_one_record (unw_rec_list *ptr, vbyte_func f)
2397 {
2398 unsigned int fr_mask, gr_mask;
2399
2400 switch (ptr->r.type)
2401 {
2402 /* This is a dummy record that takes up no space in the output. */
2403 case endp:
2404 break;
2405
2406 case gr_mem:
2407 case fr_mem:
2408 case br_mem:
2409 case frgr_mem:
2410 /* These are taken care of by prologue/prologue_gr. */
2411 break;
2412
2413 case prologue_gr:
2414 case prologue:
2415 if (ptr->r.type == prologue_gr)
2416 output_R2_format (f, ptr->r.record.r.grmask,
2417 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2418 else
2419 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2420
2421 /* Output descriptor(s) for union of register spills (if any). */
2422 gr_mask = ptr->r.record.r.mask.gr_mem;
2423 fr_mask = ptr->r.record.r.mask.fr_mem;
2424 if (fr_mask)
2425 {
2426 if ((fr_mask & ~0xfUL) == 0)
2427 output_P6_format (f, fr_mem, fr_mask);
2428 else
2429 {
2430 output_P5_format (f, gr_mask, fr_mask);
2431 gr_mask = 0;
2432 }
2433 }
2434 if (gr_mask)
2435 output_P6_format (f, gr_mem, gr_mask);
2436 if (ptr->r.record.r.mask.br_mem)
2437 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2438
2439 /* output imask descriptor if necessary: */
2440 if (ptr->r.record.r.mask.i)
2441 output_P4_format (f, ptr->r.record.r.mask.i,
2442 ptr->r.record.r.imask_size);
2443 break;
2444
2445 case body:
2446 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2447 break;
2448 case mem_stack_f:
2449 case mem_stack_v:
2450 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2451 ptr->r.record.p.size);
2452 break;
2453 case psp_gr:
2454 case rp_gr:
2455 case pfs_gr:
2456 case preds_gr:
2457 case unat_gr:
2458 case lc_gr:
2459 case fpsr_gr:
2460 case priunat_gr:
2461 case bsp_gr:
2462 case bspstore_gr:
2463 case rnat_gr:
2464 output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr);
2465 break;
2466 case rp_br:
2467 output_P3_format (f, rp_br, ptr->r.record.p.r.br);
2468 break;
2469 case psp_sprel:
2470 output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0);
2471 break;
2472 case rp_when:
2473 case pfs_when:
2474 case preds_when:
2475 case unat_when:
2476 case lc_when:
2477 case fpsr_when:
2478 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2479 break;
2480 case rp_psprel:
2481 case pfs_psprel:
2482 case preds_psprel:
2483 case unat_psprel:
2484 case lc_psprel:
2485 case fpsr_psprel:
2486 case spill_base:
2487 output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0);
2488 break;
2489 case rp_sprel:
2490 case pfs_sprel:
2491 case preds_sprel:
2492 case unat_sprel:
2493 case lc_sprel:
2494 case fpsr_sprel:
2495 case priunat_sprel:
2496 case bsp_sprel:
2497 case bspstore_sprel:
2498 case rnat_sprel:
2499 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp);
2500 break;
2501 case gr_gr:
2502 if (ptr->r.record.p.r.gr < REG_NUM)
2503 {
2504 const unw_rec_list *cur = ptr;
2505
2506 gr_mask = cur->r.record.p.grmask;
2507 while ((cur = cur->r.record.p.next) != NULL)
2508 gr_mask |= cur->r.record.p.grmask;
2509 output_P9_format (f, gr_mask, ptr->r.record.p.r.gr);
2510 }
2511 break;
2512 case br_gr:
2513 if (ptr->r.record.p.r.gr < REG_NUM)
2514 {
2515 const unw_rec_list *cur = ptr;
2516
2517 gr_mask = cur->r.record.p.brmask;
2518 while ((cur = cur->r.record.p.next) != NULL)
2519 gr_mask |= cur->r.record.p.brmask;
2520 output_P2_format (f, gr_mask, ptr->r.record.p.r.gr);
2521 }
2522 break;
2523 case spill_mask:
2524 as_bad (_("spill_mask record unimplemented."));
2525 break;
2526 case priunat_when_gr:
2527 case priunat_when_mem:
2528 case bsp_when:
2529 case bspstore_when:
2530 case rnat_when:
2531 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2532 break;
2533 case priunat_psprel:
2534 case bsp_psprel:
2535 case bspstore_psprel:
2536 case rnat_psprel:
2537 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp);
2538 break;
2539 case unwabi:
2540 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2541 break;
2542 case epilogue:
2543 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2544 break;
2545 case label_state:
2546 case copy_state:
2547 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2548 break;
2549 case spill_psprel:
2550 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2551 ptr->r.record.x.reg, ptr->r.record.x.t,
2552 ptr->r.record.x.where.pspoff);
2553 break;
2554 case spill_sprel:
2555 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2556 ptr->r.record.x.reg, ptr->r.record.x.t,
2557 ptr->r.record.x.where.spoff);
2558 break;
2559 case spill_reg:
2560 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2561 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2562 ptr->r.record.x.where.reg, ptr->r.record.x.t);
2563 break;
2564 case spill_psprel_p:
2565 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2566 ptr->r.record.x.ab, ptr->r.record.x.reg,
2567 ptr->r.record.x.t, ptr->r.record.x.where.pspoff);
2568 break;
2569 case spill_sprel_p:
2570 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2571 ptr->r.record.x.ab, ptr->r.record.x.reg,
2572 ptr->r.record.x.t, ptr->r.record.x.where.spoff);
2573 break;
2574 case spill_reg_p:
2575 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2576 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2577 ptr->r.record.x.xy, ptr->r.record.x.where.reg,
2578 ptr->r.record.x.t);
2579 break;
2580 default:
2581 as_bad (_("record_type_not_valid"));
2582 break;
2583 }
2584 }
2585
2586 /* Given a unw_rec_list list, process all the records with
2587 the specified function. */
2588 static void
2589 process_unw_records (unw_rec_list *list, vbyte_func f)
2590 {
2591 unw_rec_list *ptr;
2592 for (ptr = list; ptr; ptr = ptr->next)
2593 process_one_record (ptr, f);
2594 }
2595
2596 /* Determine the size of a record list in bytes. */
2597 static int
2598 calc_record_size (unw_rec_list *list)
2599 {
2600 vbyte_count = 0;
2601 process_unw_records (list, count_output);
2602 return vbyte_count;
2603 }
2604
2605 /* Return the number of bits set in the input value.
2606 Perhaps this has a better place... */
2607 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
2608 # define popcount __builtin_popcount
2609 #else
2610 static int
2611 popcount (unsigned x)
2612 {
2613 static const unsigned char popcnt[16] =
2614 {
2615 0, 1, 1, 2,
2616 1, 2, 2, 3,
2617 1, 2, 2, 3,
2618 2, 3, 3, 4
2619 };
2620
2621 if (x < NELEMS (popcnt))
2622 return popcnt[x];
2623 return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt));
2624 }
2625 #endif
2626
2627 /* Update IMASK bitmask to reflect the fact that one or more registers
2628 of type TYPE are saved starting at instruction with index T. If N
2629 bits are set in REGMASK, it is assumed that instructions T through
2630 T+N-1 save these registers.
2631
2632 TYPE values:
2633 0: no save
2634 1: instruction saves next fp reg
2635 2: instruction saves next general reg
2636 3: instruction saves next branch reg */
2637 static void
2638 set_imask (unw_rec_list *region,
2639 unsigned long regmask,
2640 unsigned long t,
2641 unsigned int type)
2642 {
2643 unsigned char *imask;
2644 unsigned long imask_size;
2645 unsigned int i;
2646 int pos;
2647
2648 imask = region->r.record.r.mask.i;
2649 imask_size = region->r.record.r.imask_size;
2650 if (!imask)
2651 {
2652 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2653 imask = XCNEWVEC (unsigned char, imask_size);
2654
2655 region->r.record.r.imask_size = imask_size;
2656 region->r.record.r.mask.i = imask;
2657 }
2658
2659 i = (t / 4) + 1;
2660 pos = 2 * (3 - t % 4);
2661 while (regmask)
2662 {
2663 if (i >= imask_size)
2664 {
2665 as_bad (_("Ignoring attempt to spill beyond end of region"));
2666 return;
2667 }
2668
2669 imask[i] |= (type & 0x3) << pos;
2670
2671 regmask &= (regmask - 1);
2672 pos -= 2;
2673 if (pos < 0)
2674 {
2675 pos = 0;
2676 ++i;
2677 }
2678 }
2679 }
2680
2681 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2682 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2683 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2684 for frag sizes. */
2685
2686 static unsigned long
2687 slot_index (unsigned long slot_addr,
2688 fragS *slot_frag,
2689 unsigned long first_addr,
2690 fragS *first_frag,
2691 int before_relax)
2692 {
2693 unsigned long s_index = 0;
2694
2695 /* First time we are called, the initial address and frag are invalid. */
2696 if (first_addr == 0)
2697 return 0;
2698
2699 /* If the two addresses are in different frags, then we need to add in
2700 the remaining size of this frag, and then the entire size of intermediate
2701 frags. */
2702 while (slot_frag != first_frag)
2703 {
2704 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2705
2706 if (! before_relax)
2707 {
2708 /* We can get the final addresses only during and after
2709 relaxation. */
2710 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2711 s_index += 3 * ((first_frag->fr_next->fr_address
2712 - first_frag->fr_address
2713 - first_frag->fr_fix) >> 4);
2714 }
2715 else
2716 /* We don't know what the final addresses will be. We try our
2717 best to estimate. */
2718 switch (first_frag->fr_type)
2719 {
2720 default:
2721 break;
2722
2723 case rs_space:
2724 as_fatal (_("Only constant space allocation is supported"));
2725 break;
2726
2727 case rs_align:
2728 case rs_align_code:
2729 case rs_align_test:
2730 /* Take alignment into account. Assume the worst case
2731 before relaxation. */
2732 s_index += 3 * ((1 << first_frag->fr_offset) >> 4);
2733 break;
2734
2735 case rs_org:
2736 if (first_frag->fr_symbol)
2737 {
2738 as_fatal (_("Only constant offsets are supported"));
2739 break;
2740 }
2741 /* Fall through. */
2742 case rs_fill:
2743 s_index += 3 * (first_frag->fr_offset >> 4);
2744 break;
2745 }
2746
2747 /* Add in the full size of the frag converted to instruction slots. */
2748 s_index += 3 * (first_frag->fr_fix >> 4);
2749 /* Subtract away the initial part before first_addr. */
2750 s_index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2751 + ((first_addr & 0x3) - (start_addr & 0x3)));
2752
2753 /* Move to the beginning of the next frag. */
2754 first_frag = first_frag->fr_next;
2755 first_addr = (unsigned long) &first_frag->fr_literal;
2756
2757 /* This can happen if there is section switching in the middle of a
2758 function, causing the frag chain for the function to be broken.
2759 It is too difficult to recover safely from this problem, so we just
2760 exit with an error. */
2761 if (first_frag == NULL)
2762 as_fatal (_("Section switching in code is not supported."));
2763 }
2764
2765 /* Add in the used part of the last frag. */
2766 s_index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2767 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2768 return s_index;
2769 }
2770
2771 /* Optimize unwind record directives. */
2772
2773 static unw_rec_list *
2774 optimize_unw_records (unw_rec_list *list)
2775 {
2776 if (!list)
2777 return NULL;
2778
2779 /* If the only unwind record is ".prologue" or ".prologue" followed
2780 by ".body", then we can optimize the unwind directives away. */
2781 if (list->r.type == prologue
2782 && (list->next->r.type == endp
2783 || (list->next->r.type == body && list->next->next->r.type == endp)))
2784 return NULL;
2785
2786 return list;
2787 }
2788
2789 /* Given a complete record list, process any records which have
2790 unresolved fields, (ie length counts for a prologue). After
2791 this has been run, all necessary information should be available
2792 within each record to generate an image. */
2793
2794 static void
2795 fixup_unw_records (unw_rec_list *list, int before_relax)
2796 {
2797 unw_rec_list *ptr, *region = 0;
2798 unsigned long first_addr = 0, rlen = 0, t;
2799 fragS *first_frag = 0;
2800
2801 for (ptr = list; ptr; ptr = ptr->next)
2802 {
2803 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2804 as_bad (_("Insn slot not set in unwind record."));
2805 t = slot_index (ptr->slot_number, ptr->slot_frag,
2806 first_addr, first_frag, before_relax);
2807 switch (ptr->r.type)
2808 {
2809 case prologue:
2810 case prologue_gr:
2811 case body:
2812 {
2813 unw_rec_list *last;
2814 int size;
2815 unsigned long last_addr = 0;
2816 fragS *last_frag = NULL;
2817
2818 first_addr = ptr->slot_number;
2819 first_frag = ptr->slot_frag;
2820 /* Find either the next body/prologue start, or the end of
2821 the function, and determine the size of the region. */
2822 for (last = ptr->next; last != NULL; last = last->next)
2823 if (last->r.type == prologue || last->r.type == prologue_gr
2824 || last->r.type == body || last->r.type == endp)
2825 {
2826 last_addr = last->slot_number;
2827 last_frag = last->slot_frag;
2828 break;
2829 }
2830 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2831 before_relax);
2832 rlen = ptr->r.record.r.rlen = size;
2833 if (ptr->r.type == body)
2834 /* End of region. */
2835 region = 0;
2836 else
2837 region = ptr;
2838 break;
2839 }
2840 case epilogue:
2841 if (t < rlen)
2842 ptr->r.record.b.t = rlen - 1 - t;
2843 else
2844 /* This happens when a memory-stack-less procedure uses a
2845 ".restore sp" directive at the end of a region to pop
2846 the frame state. */
2847 ptr->r.record.b.t = 0;
2848 break;
2849
2850 case mem_stack_f:
2851 case mem_stack_v:
2852 case rp_when:
2853 case pfs_when:
2854 case preds_when:
2855 case unat_when:
2856 case lc_when:
2857 case fpsr_when:
2858 case priunat_when_gr:
2859 case priunat_when_mem:
2860 case bsp_when:
2861 case bspstore_when:
2862 case rnat_when:
2863 ptr->r.record.p.t = t;
2864 break;
2865
2866 case spill_reg:
2867 case spill_sprel:
2868 case spill_psprel:
2869 case spill_reg_p:
2870 case spill_sprel_p:
2871 case spill_psprel_p:
2872 ptr->r.record.x.t = t;
2873 break;
2874
2875 case frgr_mem:
2876 if (!region)
2877 {
2878 as_bad (_("frgr_mem record before region record!"));
2879 return;
2880 }
2881 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2882 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2883 set_imask (region, ptr->r.record.p.frmask, t, 1);
2884 set_imask (region, ptr->r.record.p.grmask, t, 2);
2885 break;
2886 case fr_mem:
2887 if (!region)
2888 {
2889 as_bad (_("fr_mem record before region record!"));
2890 return;
2891 }
2892 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2893 set_imask (region, ptr->r.record.p.frmask, t, 1);
2894 break;
2895 case gr_mem:
2896 if (!region)
2897 {
2898 as_bad (_("gr_mem record before region record!"));
2899 return;
2900 }
2901 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2902 set_imask (region, ptr->r.record.p.grmask, t, 2);
2903 break;
2904 case br_mem:
2905 if (!region)
2906 {
2907 as_bad (_("br_mem record before region record!"));
2908 return;
2909 }
2910 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2911 set_imask (region, ptr->r.record.p.brmask, t, 3);
2912 break;
2913
2914 case gr_gr:
2915 if (!region)
2916 {
2917 as_bad (_("gr_gr record before region record!"));
2918 return;
2919 }
2920 set_imask (region, ptr->r.record.p.grmask, t, 2);
2921 break;
2922 case br_gr:
2923 if (!region)
2924 {
2925 as_bad (_("br_gr record before region record!"));
2926 return;
2927 }
2928 set_imask (region, ptr->r.record.p.brmask, t, 3);
2929 break;
2930
2931 default:
2932 break;
2933 }
2934 }
2935 }
2936
2937 /* Estimate the size of a frag before relaxing. We only have one type of frag
2938 to handle here, which is the unwind info frag. */
2939
2940 int
2941 ia64_estimate_size_before_relax (fragS *frag,
2942 asection *segtype ATTRIBUTE_UNUSED)
2943 {
2944 unw_rec_list *list;
2945 int len, size, pad;
2946
2947 /* ??? This code is identical to the first part of ia64_convert_frag. */
2948 list = (unw_rec_list *) frag->fr_opcode;
2949 fixup_unw_records (list, 0);
2950
2951 len = calc_record_size (list);
2952 /* pad to pointer-size boundary. */
2953 pad = len % md.pointer_size;
2954 if (pad != 0)
2955 len += md.pointer_size - pad;
2956 /* Add 8 for the header. */
2957 size = len + 8;
2958 /* Add a pointer for the personality offset. */
2959 if (frag->fr_offset)
2960 size += md.pointer_size;
2961
2962 /* fr_var carries the max_chars that we created the fragment with.
2963 We must, of course, have allocated enough memory earlier. */
2964 gas_assert (frag->fr_var >= size);
2965
2966 return frag->fr_fix + size;
2967 }
2968
2969 /* This function converts a rs_machine_dependent variant frag into a
2970 normal fill frag with the unwind image from the record list. */
2971 void
2972 ia64_convert_frag (fragS *frag)
2973 {
2974 unw_rec_list *list;
2975 int len, size, pad;
2976 valueT flag_value;
2977
2978 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2979 list = (unw_rec_list *) frag->fr_opcode;
2980 fixup_unw_records (list, 0);
2981
2982 len = calc_record_size (list);
2983 /* pad to pointer-size boundary. */
2984 pad = len % md.pointer_size;
2985 if (pad != 0)
2986 len += md.pointer_size - pad;
2987 /* Add 8 for the header. */
2988 size = len + 8;
2989 /* Add a pointer for the personality offset. */
2990 if (frag->fr_offset)
2991 size += md.pointer_size;
2992
2993 /* fr_var carries the max_chars that we created the fragment with.
2994 We must, of course, have allocated enough memory earlier. */
2995 gas_assert (frag->fr_var >= size);
2996
2997 /* Initialize the header area. fr_offset is initialized with
2998 unwind.personality_routine. */
2999 if (frag->fr_offset)
3000 {
3001 if (md.flags & EF_IA_64_ABI64)
3002 flag_value = (bfd_vma) 3 << 32;
3003 else
3004 /* 32-bit unwind info block. */
3005 flag_value = (bfd_vma) 0x1003 << 32;
3006 }
3007 else
3008 flag_value = 0;
3009
3010 md_number_to_chars (frag->fr_literal,
3011 (((bfd_vma) 1 << 48) /* Version. */
3012 | flag_value /* U & E handler flags. */
3013 | (len / md.pointer_size)), /* Length. */
3014 8);
3015
3016 /* Skip the header. */
3017 vbyte_mem_ptr = frag->fr_literal + 8;
3018 process_unw_records (list, output_vbyte_mem);
3019
3020 /* Fill the padding bytes with zeros. */
3021 if (pad != 0)
3022 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
3023 md.pointer_size - pad);
3024 /* Fill the unwind personality with zeros. */
3025 if (frag->fr_offset)
3026 md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0,
3027 md.pointer_size);
3028
3029 frag->fr_fix += size;
3030 frag->fr_type = rs_fill;
3031 frag->fr_var = 0;
3032 frag->fr_offset = 0;
3033 }
3034
3035 static int
3036 parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po)
3037 {
3038 int sep = parse_operand_and_eval (e, ',');
3039
3040 *qp = e->X_add_number - REG_P;
3041 if (e->X_op != O_register || *qp > 63)
3042 {
3043 as_bad (_("First operand to .%s must be a predicate"), po);
3044 *qp = 0;
3045 }
3046 else if (*qp == 0)
3047 as_warn (_("Pointless use of p0 as first operand to .%s"), po);
3048 if (sep == ',')
3049 sep = parse_operand_and_eval (e, ',');
3050 else
3051 e->X_op = O_absent;
3052 return sep;
3053 }
3054
3055 static void
3056 convert_expr_to_ab_reg (const expressionS *e,
3057 unsigned int *ab,
3058 unsigned int *regp,
3059 const char *po,
3060 int n)
3061 {
3062 unsigned int reg = e->X_add_number;
3063
3064 *ab = *regp = 0; /* Anything valid is good here. */
3065
3066 if (e->X_op != O_register)
3067 reg = REG_GR; /* Anything invalid is good here. */
3068
3069 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
3070 {
3071 *ab = 0;
3072 *regp = reg - REG_GR;
3073 }
3074 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
3075 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
3076 {
3077 *ab = 1;
3078 *regp = reg - REG_FR;
3079 }
3080 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
3081 {
3082 *ab = 2;
3083 *regp = reg - REG_BR;
3084 }
3085 else
3086 {
3087 *ab = 3;
3088 switch (reg)
3089 {
3090 case REG_PR: *regp = 0; break;
3091 case REG_PSP: *regp = 1; break;
3092 case REG_PRIUNAT: *regp = 2; break;
3093 case REG_BR + 0: *regp = 3; break;
3094 case REG_AR + AR_BSP: *regp = 4; break;
3095 case REG_AR + AR_BSPSTORE: *regp = 5; break;
3096 case REG_AR + AR_RNAT: *regp = 6; break;
3097 case REG_AR + AR_UNAT: *regp = 7; break;
3098 case REG_AR + AR_FPSR: *regp = 8; break;
3099 case REG_AR + AR_PFS: *regp = 9; break;
3100 case REG_AR + AR_LC: *regp = 10; break;
3101
3102 default:
3103 as_bad (_("Operand %d to .%s must be a preserved register"), n, po);
3104 break;
3105 }
3106 }
3107 }
3108
3109 static void
3110 convert_expr_to_xy_reg (const expressionS *e,
3111 unsigned int *xy,
3112 unsigned int *regp,
3113 const char *po,
3114 int n)
3115 {
3116 unsigned int reg = e->X_add_number;
3117
3118 *xy = *regp = 0; /* Anything valid is good here. */
3119
3120 if (e->X_op != O_register)
3121 reg = REG_GR; /* Anything invalid is good here. */
3122
3123 if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127))
3124 {
3125 *xy = 0;
3126 *regp = reg - REG_GR;
3127 }
3128 else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127))
3129 {
3130 *xy = 1;
3131 *regp = reg - REG_FR;
3132 }
3133 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3134 {
3135 *xy = 2;
3136 *regp = reg - REG_BR;
3137 }
3138 else
3139 as_bad (_("Operand %d to .%s must be a writable register"), n, po);
3140 }
3141
3142 static void
3143 dot_align (int arg)
3144 {
3145 /* The current frag is an alignment frag. */
3146 align_frag = frag_now;
3147 s_align_bytes (arg);
3148 }
3149
3150 static void
3151 dot_radix (int dummy ATTRIBUTE_UNUSED)
3152 {
3153 char *radix;
3154 int ch;
3155
3156 SKIP_WHITESPACE ();
3157
3158 if (is_it_end_of_statement ())
3159 return;
3160 ch = get_symbol_name (&radix);
3161 ia64_canonicalize_symbol_name (radix);
3162 if (strcasecmp (radix, "C"))
3163 as_bad (_("Radix `%s' unsupported or invalid"), radix);
3164 (void) restore_line_pointer (ch);
3165 demand_empty_rest_of_line ();
3166 }
3167
3168 /* Helper function for .loc directives. If the assembler is not generating
3169 line number info, then we need to remember which instructions have a .loc
3170 directive, and only call dwarf2_gen_line_info for those instructions. */
3171
3172 static void
3173 dot_loc (int x)
3174 {
3175 CURR_SLOT.loc_directive_seen = 1;
3176 dwarf2_directive_loc (x);
3177 }
3178
3179 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3180 static void
3181 dot_special_section (int which)
3182 {
3183 set_section ((char *) special_section_name[which]);
3184 }
3185
3186 /* Return -1 for warning and 0 for error. */
3187
3188 static int
3189 unwind_diagnostic (const char * region, const char *directive)
3190 {
3191 if (md.unwind_check == unwind_check_warning)
3192 {
3193 as_warn (_(".%s outside of %s"), directive, region);
3194 return -1;
3195 }
3196 else
3197 {
3198 as_bad (_(".%s outside of %s"), directive, region);
3199 ignore_rest_of_line ();
3200 return 0;
3201 }
3202 }
3203
3204 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in
3205 a procedure but the unwind directive check is set to warning, 0 if
3206 a directive isn't in a procedure and the unwind directive check is set
3207 to error. */
3208
3209 static int
3210 in_procedure (const char *directive)
3211 {
3212 if (unwind.proc_pending.sym
3213 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0))
3214 return 1;
3215 return unwind_diagnostic ("procedure", directive);
3216 }
3217
3218 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in
3219 a prologue but the unwind directive check is set to warning, 0 if
3220 a directive isn't in a prologue and the unwind directive check is set
3221 to error. */
3222
3223 static int
3224 in_prologue (const char *directive)
3225 {
3226 int in = in_procedure (directive);
3227
3228 if (in > 0 && !unwind.prologue)
3229 in = unwind_diagnostic ("prologue", directive);
3230 check_pending_save ();
3231 return in;
3232 }
3233
3234 /* Return 1 if a directive is in a body, -1 if a directive isn't in
3235 a body but the unwind directive check is set to warning, 0 if
3236 a directive isn't in a body and the unwind directive check is set
3237 to error. */
3238
3239 static int
3240 in_body (const char *directive)
3241 {
3242 int in = in_procedure (directive);
3243
3244 if (in > 0 && !unwind.body)
3245 in = unwind_diagnostic ("body region", directive);
3246 return in;
3247 }
3248
3249 static void
3250 add_unwind_entry (unw_rec_list *ptr, int sep)
3251 {
3252 if (ptr)
3253 {
3254 if (unwind.tail)
3255 unwind.tail->next = ptr;
3256 else
3257 unwind.list = ptr;
3258 unwind.tail = ptr;
3259
3260 /* The current entry can in fact be a chain of unwind entries. */
3261 if (unwind.current_entry == NULL)
3262 unwind.current_entry = ptr;
3263 }
3264
3265 /* The current entry can in fact be a chain of unwind entries. */
3266 if (unwind.current_entry == NULL)
3267 unwind.current_entry = ptr;
3268
3269 if (sep == ',')
3270 {
3271 char *name;
3272 /* Parse a tag permitted for the current directive. */
3273 int ch;
3274
3275 SKIP_WHITESPACE ();
3276 ch = get_symbol_name (&name);
3277 /* FIXME: For now, just issue a warning that this isn't implemented. */
3278 {
3279 static int warned;
3280
3281 if (!warned)
3282 {
3283 warned = 1;
3284 as_warn (_("Tags on unwind pseudo-ops aren't supported, yet"));
3285 }
3286 }
3287 (void) restore_line_pointer (ch);
3288 }
3289 if (sep != NOT_A_CHAR)
3290 demand_empty_rest_of_line ();
3291 }
3292
3293 static void
3294 dot_fframe (int dummy ATTRIBUTE_UNUSED)
3295 {
3296 expressionS e;
3297 int sep;
3298
3299 if (!in_prologue ("fframe"))
3300 return;
3301
3302 sep = parse_operand_and_eval (&e, ',');
3303
3304 if (e.X_op != O_constant)
3305 {
3306 as_bad (_("First operand to .fframe must be a constant"));
3307 e.X_add_number = 0;
3308 }
3309 add_unwind_entry (output_mem_stack_f (e.X_add_number), sep);
3310 }
3311
3312 static void
3313 dot_vframe (int dummy ATTRIBUTE_UNUSED)
3314 {
3315 expressionS e;
3316 unsigned reg;
3317 int sep;
3318
3319 if (!in_prologue ("vframe"))
3320 return;
3321
3322 sep = parse_operand_and_eval (&e, ',');
3323 reg = e.X_add_number - REG_GR;
3324 if (e.X_op != O_register || reg > 127)
3325 {
3326 as_bad (_("First operand to .vframe must be a general register"));
3327 reg = 0;
3328 }
3329 add_unwind_entry (output_mem_stack_v (), sep);
3330 if (! (unwind.prologue_mask & 2))
3331 add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR);
3332 else if (reg != unwind.prologue_gr
3333 + (unsigned) popcount (unwind.prologue_mask & -(2 << 1)))
3334 as_warn (_("Operand of .vframe contradicts .prologue"));
3335 }
3336
3337 static void
3338 dot_vframesp (int psp)
3339 {
3340 expressionS e;
3341 int sep;
3342
3343 if (psp)
3344 as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant"));
3345
3346 if (!in_prologue ("vframesp"))
3347 return;
3348
3349 sep = parse_operand_and_eval (&e, ',');
3350 if (e.X_op != O_constant)
3351 {
3352 as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)"));
3353 e.X_add_number = 0;
3354 }
3355 add_unwind_entry (output_mem_stack_v (), sep);
3356 add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR);
3357 }
3358
3359 static void
3360 dot_save (int dummy ATTRIBUTE_UNUSED)
3361 {
3362 expressionS e1, e2;
3363 unsigned reg1, reg2;
3364 int sep;
3365
3366 if (!in_prologue ("save"))
3367 return;
3368
3369 sep = parse_operand_and_eval (&e1, ',');
3370 if (sep == ',')
3371 sep = parse_operand_and_eval (&e2, ',');
3372 else
3373 e2.X_op = O_absent;
3374
3375 reg1 = e1.X_add_number;
3376 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3377 if (e1.X_op != O_register)
3378 {
3379 as_bad (_("First operand to .save not a register"));
3380 reg1 = REG_PR; /* Anything valid is good here. */
3381 }
3382 reg2 = e2.X_add_number - REG_GR;
3383 if (e2.X_op != O_register || reg2 > 127)
3384 {
3385 as_bad (_("Second operand to .save not a valid register"));
3386 reg2 = 0;
3387 }
3388 switch (reg1)
3389 {
3390 case REG_AR + AR_BSP:
3391 add_unwind_entry (output_bsp_when (), sep);
3392 add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR);
3393 break;
3394 case REG_AR + AR_BSPSTORE:
3395 add_unwind_entry (output_bspstore_when (), sep);
3396 add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR);
3397 break;
3398 case REG_AR + AR_RNAT:
3399 add_unwind_entry (output_rnat_when (), sep);
3400 add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR);
3401 break;
3402 case REG_AR + AR_UNAT:
3403 add_unwind_entry (output_unat_when (), sep);
3404 add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR);
3405 break;
3406 case REG_AR + AR_FPSR:
3407 add_unwind_entry (output_fpsr_when (), sep);
3408 add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR);
3409 break;
3410 case REG_AR + AR_PFS:
3411 add_unwind_entry (output_pfs_when (), sep);
3412 if (! (unwind.prologue_mask & 4))
3413 add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR);
3414 else if (reg2 != unwind.prologue_gr
3415 + (unsigned) popcount (unwind.prologue_mask & -(4 << 1)))
3416 as_warn (_("Second operand of .save contradicts .prologue"));
3417 break;
3418 case REG_AR + AR_LC:
3419 add_unwind_entry (output_lc_when (), sep);
3420 add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR);
3421 break;
3422 case REG_BR:
3423 add_unwind_entry (output_rp_when (), sep);
3424 if (! (unwind.prologue_mask & 8))
3425 add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR);
3426 else if (reg2 != unwind.prologue_gr)
3427 as_warn (_("Second operand of .save contradicts .prologue"));
3428 break;
3429 case REG_PR:
3430 add_unwind_entry (output_preds_when (), sep);
3431 if (! (unwind.prologue_mask & 1))
3432 add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR);
3433 else if (reg2 != unwind.prologue_gr
3434 + (unsigned) popcount (unwind.prologue_mask & -(1 << 1)))
3435 as_warn (_("Second operand of .save contradicts .prologue"));
3436 break;
3437 case REG_PRIUNAT:
3438 add_unwind_entry (output_priunat_when_gr (), sep);
3439 add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR);
3440 break;
3441 default:
3442 as_bad (_("First operand to .save not a valid register"));
3443 add_unwind_entry (NULL, sep);
3444 break;
3445 }
3446 }
3447
3448 static void
3449 dot_restore (int dummy ATTRIBUTE_UNUSED)
3450 {
3451 expressionS e1;
3452 unsigned long ecount; /* # of _additional_ regions to pop */
3453 int sep;
3454
3455 if (!in_body ("restore"))
3456 return;
3457
3458 sep = parse_operand_and_eval (&e1, ',');
3459 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3460 as_bad (_("First operand to .restore must be stack pointer (sp)"));
3461
3462 if (sep == ',')
3463 {
3464 expressionS e2;
3465
3466 sep = parse_operand_and_eval (&e2, ',');
3467 if (e2.X_op != O_constant || e2.X_add_number < 0)
3468 {
3469 as_bad (_("Second operand to .restore must be a constant >= 0"));
3470 e2.X_add_number = 0;
3471 }
3472 ecount = e2.X_add_number;
3473 }
3474 else
3475 ecount = unwind.prologue_count - 1;
3476
3477 if (ecount >= unwind.prologue_count)
3478 {
3479 as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"),
3480 ecount + 1, unwind.prologue_count);
3481 ecount = 0;
3482 }
3483
3484 add_unwind_entry (output_epilogue (ecount), sep);
3485
3486 if (ecount < unwind.prologue_count)
3487 unwind.prologue_count -= ecount + 1;
3488 else
3489 unwind.prologue_count = 0;
3490 }
3491
3492 static void
3493 dot_restorereg (int pred)
3494 {
3495 unsigned int qp, ab, reg;
3496 expressionS e;
3497 int sep;
3498 const char * const po = pred ? "restorereg.p" : "restorereg";
3499
3500 if (!in_procedure (po))
3501 return;
3502
3503 if (pred)
3504 sep = parse_predicate_and_operand (&e, &qp, po);
3505 else
3506 {
3507 sep = parse_operand_and_eval (&e, ',');
3508 qp = 0;
3509 }
3510 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
3511
3512 add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep);
3513 }
3514
3515 static const char *special_linkonce_name[] =
3516 {
3517 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3518 };
3519
3520 static void
3521 start_unwind_section (const segT text_seg, int sec_index)
3522 {
3523 /*
3524 Use a slightly ugly scheme to derive the unwind section names from
3525 the text section name:
3526
3527 text sect. unwind table sect.
3528 name: name: comments:
3529 ---------- ----------------- --------------------------------
3530 .text .IA_64.unwind
3531 .text.foo .IA_64.unwind.text.foo
3532 .foo .IA_64.unwind.foo
3533 .gnu.linkonce.t.foo
3534 .gnu.linkonce.ia64unw.foo
3535 _info .IA_64.unwind_info gas issues error message (ditto)
3536 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3537
3538 This mapping is done so that:
3539
3540 (a) An object file with unwind info only in .text will use
3541 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3542 This follows the letter of the ABI and also ensures backwards
3543 compatibility with older toolchains.
3544
3545 (b) An object file with unwind info in multiple text sections
3546 will use separate unwind sections for each text section.
3547 This allows us to properly set the "sh_info" and "sh_link"
3548 fields in SHT_IA_64_UNWIND as required by the ABI and also
3549 lets GNU ld support programs with multiple segments
3550 containing unwind info (as might be the case for certain
3551 embedded applications).
3552
3553 (c) An error is issued if there would be a name clash.
3554 */
3555
3556 const char *text_name, *sec_text_name;
3557 char *sec_name;
3558 const char *prefix = special_section_name [sec_index];
3559 const char *suffix;
3560
3561 sec_text_name = segment_name (text_seg);
3562 text_name = sec_text_name;
3563 if (startswith (text_name, "_info"))
3564 {
3565 as_bad (_("Illegal section name `%s' (causes unwind section name clash)"),
3566 text_name);
3567 ignore_rest_of_line ();
3568 return;
3569 }
3570 if (strcmp (text_name, ".text") == 0)
3571 text_name = "";
3572
3573 /* Build the unwind section name by appending the (possibly stripped)
3574 text section name to the unwind prefix. */
3575 suffix = text_name;
3576 if (startswith (text_name, ".gnu.linkonce.t."))
3577 {
3578 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3579 suffix += sizeof (".gnu.linkonce.t.") - 1;
3580 }
3581
3582 sec_name = concat (prefix, suffix, NULL);
3583
3584 /* Handle COMDAT group. */
3585 if ((text_seg->flags & SEC_LINK_ONCE) != 0
3586 && (elf_section_flags (text_seg) & SHF_GROUP) != 0)
3587 {
3588 char *section;
3589 const char *group_name = elf_group_name (text_seg);
3590
3591 if (group_name == NULL)
3592 {
3593 as_bad (_("Group section `%s' has no group signature"),
3594 sec_text_name);
3595 ignore_rest_of_line ();
3596 free (sec_name);
3597 return;
3598 }
3599
3600 /* We have to construct a fake section directive. */
3601 section = concat (sec_name, ",\"aG\",@progbits,", group_name, ",comdat", NULL);
3602 set_section (section);
3603 free (section);
3604 }
3605 else
3606 {
3607 set_section (sec_name);
3608 bfd_set_section_flags (now_seg, SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3609 }
3610
3611 elf_linked_to_section (now_seg) = text_seg;
3612 free (sec_name);
3613 }
3614
3615 static void
3616 generate_unwind_image (const segT text_seg)
3617 {
3618 int size, pad;
3619 unw_rec_list *list;
3620
3621 /* Mark the end of the unwind info, so that we can compute the size of the
3622 last unwind region. */
3623 add_unwind_entry (output_endp (), NOT_A_CHAR);
3624
3625 /* Force out pending instructions, to make sure all unwind records have
3626 a valid slot_number field. */
3627 ia64_flush_insns ();
3628
3629 /* Generate the unwind record. */
3630 list = optimize_unw_records (unwind.list);
3631 fixup_unw_records (list, 1);
3632 size = calc_record_size (list);
3633
3634 if (size > 0 || unwind.force_unwind_entry)
3635 {
3636 unwind.force_unwind_entry = 0;
3637 /* pad to pointer-size boundary. */
3638 pad = size % md.pointer_size;
3639 if (pad != 0)
3640 size += md.pointer_size - pad;
3641 /* Add 8 for the header. */
3642 size += 8;
3643 /* Add a pointer for the personality offset. */
3644 if (unwind.personality_routine)
3645 size += md.pointer_size;
3646 }
3647
3648 /* If there are unwind records, switch sections, and output the info. */
3649 if (size != 0)
3650 {
3651 expressionS exp;
3652 bfd_reloc_code_real_type reloc;
3653
3654 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO);
3655
3656 /* Make sure the section has 4 byte alignment for ILP32 and
3657 8 byte alignment for LP64. */
3658 frag_align (md.pointer_size_shift, 0, 0);
3659 record_alignment (now_seg, md.pointer_size_shift);
3660
3661 /* Set expression which points to start of unwind descriptor area. */
3662 unwind.info = expr_build_dot ();
3663
3664 frag_var (rs_machine_dependent, size, size, 0, 0,
3665 (offsetT) (long) unwind.personality_routine,
3666 (char *) list);
3667
3668 /* Add the personality address to the image. */
3669 if (unwind.personality_routine != 0)
3670 {
3671 exp.X_op = O_symbol;
3672 exp.X_add_symbol = unwind.personality_routine;
3673 exp.X_add_number = 0;
3674
3675 if (md.flags & EF_IA_64_BE)
3676 {
3677 if (md.flags & EF_IA_64_ABI64)
3678 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3679 else
3680 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3681 }
3682 else
3683 {
3684 if (md.flags & EF_IA_64_ABI64)
3685 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3686 else
3687 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3688 }
3689
3690 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3691 md.pointer_size, &exp, 0, reloc);
3692 unwind.personality_routine = 0;
3693 }
3694 }
3695
3696 free_saved_prologue_counts ();
3697 unwind.list = unwind.tail = unwind.current_entry = NULL;
3698 }
3699
3700 static void
3701 dot_handlerdata (int dummy ATTRIBUTE_UNUSED)
3702 {
3703 if (!in_procedure ("handlerdata"))
3704 return;
3705 unwind.force_unwind_entry = 1;
3706
3707 /* Remember which segment we're in so we can switch back after .endp */
3708 unwind.saved_text_seg = now_seg;
3709 unwind.saved_text_subseg = now_subseg;
3710
3711 /* Generate unwind info into unwind-info section and then leave that
3712 section as the currently active one so dataXX directives go into
3713 the language specific data area of the unwind info block. */
3714 generate_unwind_image (now_seg);
3715 demand_empty_rest_of_line ();
3716 }
3717
3718 static void
3719 dot_unwentry (int dummy ATTRIBUTE_UNUSED)
3720 {
3721 if (!in_procedure ("unwentry"))
3722 return;
3723 unwind.force_unwind_entry = 1;
3724 demand_empty_rest_of_line ();
3725 }
3726
3727 static void
3728 dot_altrp (int dummy ATTRIBUTE_UNUSED)
3729 {
3730 expressionS e;
3731 unsigned reg;
3732
3733 if (!in_prologue ("altrp"))
3734 return;
3735
3736 parse_operand_and_eval (&e, 0);
3737 reg = e.X_add_number - REG_BR;
3738 if (e.X_op != O_register || reg > 7)
3739 {
3740 as_bad (_("First operand to .altrp not a valid branch register"));
3741 reg = 0;
3742 }
3743 add_unwind_entry (output_rp_br (reg), 0);
3744 }
3745
3746 static void
3747 dot_savemem (int psprel)
3748 {
3749 expressionS e1, e2;
3750 int sep;
3751 int reg1, val;
3752 const char * const po = psprel ? "savepsp" : "savesp";
3753
3754 if (!in_prologue (po))
3755 return;
3756
3757 sep = parse_operand_and_eval (&e1, ',');
3758 if (sep == ',')
3759 sep = parse_operand_and_eval (&e2, ',');
3760 else
3761 e2.X_op = O_absent;
3762
3763 reg1 = e1.X_add_number;
3764 val = e2.X_add_number;
3765
3766 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3767 if (e1.X_op != O_register)
3768 {
3769 as_bad (_("First operand to .%s not a register"), po);
3770 reg1 = REG_PR; /* Anything valid is good here. */
3771 }
3772 if (e2.X_op != O_constant)
3773 {
3774 as_bad (_("Second operand to .%s not a constant"), po);
3775 val = 0;
3776 }
3777
3778 switch (reg1)
3779 {
3780 case REG_AR + AR_BSP:
3781 add_unwind_entry (output_bsp_when (), sep);
3782 add_unwind_entry ((psprel
3783 ? output_bsp_psprel
3784 : output_bsp_sprel) (val), NOT_A_CHAR);
3785 break;
3786 case REG_AR + AR_BSPSTORE:
3787 add_unwind_entry (output_bspstore_when (), sep);
3788 add_unwind_entry ((psprel
3789 ? output_bspstore_psprel
3790 : output_bspstore_sprel) (val), NOT_A_CHAR);
3791 break;
3792 case REG_AR + AR_RNAT:
3793 add_unwind_entry (output_rnat_when (), sep);
3794 add_unwind_entry ((psprel
3795 ? output_rnat_psprel
3796 : output_rnat_sprel) (val), NOT_A_CHAR);
3797 break;
3798 case REG_AR + AR_UNAT:
3799 add_unwind_entry (output_unat_when (), sep);
3800 add_unwind_entry ((psprel
3801 ? output_unat_psprel
3802 : output_unat_sprel) (val), NOT_A_CHAR);
3803 break;
3804 case REG_AR + AR_FPSR:
3805 add_unwind_entry (output_fpsr_when (), sep);
3806 add_unwind_entry ((psprel
3807 ? output_fpsr_psprel
3808 : output_fpsr_sprel) (val), NOT_A_CHAR);
3809 break;
3810 case REG_AR + AR_PFS:
3811 add_unwind_entry (output_pfs_when (), sep);
3812 add_unwind_entry ((psprel
3813 ? output_pfs_psprel
3814 : output_pfs_sprel) (val), NOT_A_CHAR);
3815 break;
3816 case REG_AR + AR_LC:
3817 add_unwind_entry (output_lc_when (), sep);
3818 add_unwind_entry ((psprel
3819 ? output_lc_psprel
3820 : output_lc_sprel) (val), NOT_A_CHAR);
3821 break;
3822 case REG_BR:
3823 add_unwind_entry (output_rp_when (), sep);
3824 add_unwind_entry ((psprel
3825 ? output_rp_psprel
3826 : output_rp_sprel) (val), NOT_A_CHAR);
3827 break;
3828 case REG_PR:
3829 add_unwind_entry (output_preds_when (), sep);
3830 add_unwind_entry ((psprel
3831 ? output_preds_psprel
3832 : output_preds_sprel) (val), NOT_A_CHAR);
3833 break;
3834 case REG_PRIUNAT:
3835 add_unwind_entry (output_priunat_when_mem (), sep);
3836 add_unwind_entry ((psprel
3837 ? output_priunat_psprel
3838 : output_priunat_sprel) (val), NOT_A_CHAR);
3839 break;
3840 default:
3841 as_bad (_("First operand to .%s not a valid register"), po);
3842 add_unwind_entry (NULL, sep);
3843 break;
3844 }
3845 }
3846
3847 static void
3848 dot_saveg (int dummy ATTRIBUTE_UNUSED)
3849 {
3850 expressionS e;
3851 unsigned grmask;
3852 int sep;
3853
3854 if (!in_prologue ("save.g"))
3855 return;
3856
3857 sep = parse_operand_and_eval (&e, ',');
3858
3859 grmask = e.X_add_number;
3860 if (e.X_op != O_constant
3861 || e.X_add_number <= 0
3862 || e.X_add_number > 0xf)
3863 {
3864 as_bad (_("First operand to .save.g must be a positive 4-bit constant"));
3865 grmask = 0;
3866 }
3867
3868 if (sep == ',')
3869 {
3870 unsigned reg;
3871 int n = popcount (grmask);
3872
3873 parse_operand_and_eval (&e, 0);
3874 reg = e.X_add_number - REG_GR;
3875 if (e.X_op != O_register || reg > 127)
3876 {
3877 as_bad (_("Second operand to .save.g must be a general register"));
3878 reg = 0;
3879 }
3880 else if (reg > 128U - n)
3881 {
3882 as_bad (_("Second operand to .save.g must be the first of %d general registers"), n);
3883 reg = 0;
3884 }
3885 add_unwind_entry (output_gr_gr (grmask, reg), 0);
3886 }
3887 else
3888 add_unwind_entry (output_gr_mem (grmask), 0);
3889 }
3890
3891 static void
3892 dot_savef (int dummy ATTRIBUTE_UNUSED)
3893 {
3894 expressionS e;
3895
3896 if (!in_prologue ("save.f"))
3897 return;
3898
3899 parse_operand_and_eval (&e, 0);
3900
3901 if (e.X_op != O_constant
3902 || e.X_add_number <= 0
3903 || e.X_add_number > 0xfffff)
3904 {
3905 as_bad (_("Operand to .save.f must be a positive 20-bit constant"));
3906 e.X_add_number = 0;
3907 }
3908 add_unwind_entry (output_fr_mem (e.X_add_number), 0);
3909 }
3910
3911 static void
3912 dot_saveb (int dummy ATTRIBUTE_UNUSED)
3913 {
3914 expressionS e;
3915 unsigned brmask;
3916 int sep;
3917
3918 if (!in_prologue ("save.b"))
3919 return;
3920
3921 sep = parse_operand_and_eval (&e, ',');
3922
3923 brmask = e.X_add_number;
3924 if (e.X_op != O_constant
3925 || e.X_add_number <= 0
3926 || e.X_add_number > 0x1f)
3927 {
3928 as_bad (_("First operand to .save.b must be a positive 5-bit constant"));
3929 brmask = 0;
3930 }
3931
3932 if (sep == ',')
3933 {
3934 unsigned reg;
3935 int n = popcount (brmask);
3936
3937 parse_operand_and_eval (&e, 0);
3938 reg = e.X_add_number - REG_GR;
3939 if (e.X_op != O_register || reg > 127)
3940 {
3941 as_bad (_("Second operand to .save.b must be a general register"));
3942 reg = 0;
3943 }
3944 else if (reg > 128U - n)
3945 {
3946 as_bad (_("Second operand to .save.b must be the first of %d general registers"), n);
3947 reg = 0;
3948 }
3949 add_unwind_entry (output_br_gr (brmask, reg), 0);
3950 }
3951 else
3952 add_unwind_entry (output_br_mem (brmask), 0);
3953 }
3954
3955 static void
3956 dot_savegf (int dummy ATTRIBUTE_UNUSED)
3957 {
3958 expressionS e1, e2;
3959
3960 if (!in_prologue ("save.gf"))
3961 return;
3962
3963 if (parse_operand_and_eval (&e1, ',') == ',')
3964 parse_operand_and_eval (&e2, 0);
3965 else
3966 e2.X_op = O_absent;
3967
3968 if (e1.X_op != O_constant
3969 || e1.X_add_number < 0
3970 || e1.X_add_number > 0xf)
3971 {
3972 as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant"));
3973 e1.X_op = O_absent;
3974 e1.X_add_number = 0;
3975 }
3976 if (e2.X_op != O_constant
3977 || e2.X_add_number < 0
3978 || e2.X_add_number > 0xfffff)
3979 {
3980 as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant"));
3981 e2.X_op = O_absent;
3982 e2.X_add_number = 0;
3983 }
3984 if (e1.X_op == O_constant
3985 && e2.X_op == O_constant
3986 && e1.X_add_number == 0
3987 && e2.X_add_number == 0)
3988 as_bad (_("Operands to .save.gf may not be both zero"));
3989
3990 add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0);
3991 }
3992
3993 static void
3994 dot_spill (int dummy ATTRIBUTE_UNUSED)
3995 {
3996 expressionS e;
3997
3998 if (!in_prologue ("spill"))
3999 return;
4000
4001 parse_operand_and_eval (&e, 0);
4002
4003 if (e.X_op != O_constant)
4004 {
4005 as_bad (_("Operand to .spill must be a constant"));
4006 e.X_add_number = 0;
4007 }
4008 add_unwind_entry (output_spill_base (e.X_add_number), 0);
4009 }
4010
4011 static void
4012 dot_spillreg (int pred)
4013 {
4014 int sep;
4015 unsigned int qp, ab, xy, reg, treg;
4016 expressionS e;
4017 const char * const po = pred ? "spillreg.p" : "spillreg";
4018
4019 if (!in_procedure (po))
4020 return;
4021
4022 if (pred)
4023 sep = parse_predicate_and_operand (&e, &qp, po);
4024 else
4025 {
4026 sep = parse_operand_and_eval (&e, ',');
4027 qp = 0;
4028 }
4029 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4030
4031 if (sep == ',')
4032 sep = parse_operand_and_eval (&e, ',');
4033 else
4034 e.X_op = O_absent;
4035 convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred);
4036
4037 add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep);
4038 }
4039
4040 static void
4041 dot_spillmem (int psprel)
4042 {
4043 expressionS e;
4044 int pred = (psprel < 0), sep;
4045 unsigned int qp, ab, reg;
4046 const char * po;
4047
4048 if (pred)
4049 {
4050 psprel = ~psprel;
4051 po = psprel ? "spillpsp.p" : "spillsp.p";
4052 }
4053 else
4054 po = psprel ? "spillpsp" : "spillsp";
4055
4056 if (!in_procedure (po))
4057 return;
4058
4059 if (pred)
4060 sep = parse_predicate_and_operand (&e, &qp, po);
4061 else
4062 {
4063 sep = parse_operand_and_eval (&e, ',');
4064 qp = 0;
4065 }
4066 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4067
4068 if (sep == ',')
4069 sep = parse_operand_and_eval (&e, ',');
4070 else
4071 e.X_op = O_absent;
4072 if (e.X_op != O_constant)
4073 {
4074 as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po);
4075 e.X_add_number = 0;
4076 }
4077
4078 if (psprel)
4079 add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep);
4080 else
4081 add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep);
4082 }
4083
4084 static unsigned int
4085 get_saved_prologue_count (unsigned long lbl)
4086 {
4087 label_prologue_count *lpc = unwind.saved_prologue_counts;
4088
4089 while (lpc != NULL && lpc->label_number != lbl)
4090 lpc = lpc->next;
4091
4092 if (lpc != NULL)
4093 return lpc->prologue_count;
4094
4095 as_bad (_("Missing .label_state %ld"), lbl);
4096 return 1;
4097 }
4098
4099 static void
4100 save_prologue_count (unsigned long lbl, unsigned int count)
4101 {
4102 label_prologue_count *lpc = unwind.saved_prologue_counts;
4103
4104 while (lpc != NULL && lpc->label_number != lbl)
4105 lpc = lpc->next;
4106
4107 if (lpc != NULL)
4108 lpc->prologue_count = count;
4109 else
4110 {
4111 label_prologue_count *new_lpc = XNEW (label_prologue_count);
4112
4113 new_lpc->next = unwind.saved_prologue_counts;
4114 new_lpc->label_number = lbl;
4115 new_lpc->prologue_count = count;
4116 unwind.saved_prologue_counts = new_lpc;
4117 }
4118 }
4119
4120 static void
4121 free_saved_prologue_counts (void)
4122 {
4123 label_prologue_count *lpc = unwind.saved_prologue_counts;
4124 label_prologue_count *next;
4125
4126 while (lpc != NULL)
4127 {
4128 next = lpc->next;
4129 free (lpc);
4130 lpc = next;
4131 }
4132
4133 unwind.saved_prologue_counts = NULL;
4134 }
4135
4136 static void
4137 dot_label_state (int dummy ATTRIBUTE_UNUSED)
4138 {
4139 expressionS e;
4140
4141 if (!in_body ("label_state"))
4142 return;
4143
4144 parse_operand_and_eval (&e, 0);
4145 if (e.X_op == O_constant)
4146 save_prologue_count (e.X_add_number, unwind.prologue_count);
4147 else
4148 {
4149 as_bad (_("Operand to .label_state must be a constant"));
4150 e.X_add_number = 0;
4151 }
4152 add_unwind_entry (output_label_state (e.X_add_number), 0);
4153 }
4154
4155 static void
4156 dot_copy_state (int dummy ATTRIBUTE_UNUSED)
4157 {
4158 expressionS e;
4159
4160 if (!in_body ("copy_state"))
4161 return;
4162
4163 parse_operand_and_eval (&e, 0);
4164 if (e.X_op == O_constant)
4165 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4166 else
4167 {
4168 as_bad (_("Operand to .copy_state must be a constant"));
4169 e.X_add_number = 0;
4170 }
4171 add_unwind_entry (output_copy_state (e.X_add_number), 0);
4172 }
4173
4174 static void
4175 dot_unwabi (int dummy ATTRIBUTE_UNUSED)
4176 {
4177 expressionS e1, e2;
4178 unsigned char sep;
4179
4180 if (!in_prologue ("unwabi"))
4181 return;
4182
4183 sep = parse_operand_and_eval (&e1, ',');
4184 if (sep == ',')
4185 parse_operand_and_eval (&e2, 0);
4186 else
4187 e2.X_op = O_absent;
4188
4189 if (e1.X_op != O_constant)
4190 {
4191 as_bad (_("First operand to .unwabi must be a constant"));
4192 e1.X_add_number = 0;
4193 }
4194
4195 if (e2.X_op != O_constant)
4196 {
4197 as_bad (_("Second operand to .unwabi must be a constant"));
4198 e2.X_add_number = 0;
4199 }
4200
4201 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0);
4202 }
4203
4204 static void
4205 dot_personality (int dummy ATTRIBUTE_UNUSED)
4206 {
4207 char *name, *p, c;
4208
4209 if (!in_procedure ("personality"))
4210 return;
4211 SKIP_WHITESPACE ();
4212 c = get_symbol_name (&name);
4213 p = input_line_pointer;
4214 unwind.personality_routine = symbol_find_or_make (name);
4215 unwind.force_unwind_entry = 1;
4216 *p = c;
4217 SKIP_WHITESPACE_AFTER_NAME ();
4218 demand_empty_rest_of_line ();
4219 }
4220
4221 static void
4222 dot_proc (int dummy ATTRIBUTE_UNUSED)
4223 {
4224 char *name, *p, c;
4225 symbolS *sym;
4226 proc_pending *pending, *last_pending;
4227
4228 if (unwind.proc_pending.sym)
4229 {
4230 (md.unwind_check == unwind_check_warning
4231 ? as_warn
4232 : as_bad) (_("Missing .endp after previous .proc"));
4233 while (unwind.proc_pending.next)
4234 {
4235 pending = unwind.proc_pending.next;
4236 unwind.proc_pending.next = pending->next;
4237 free (pending);
4238 }
4239 }
4240 last_pending = NULL;
4241
4242 /* Parse names of main and alternate entry points and mark them as
4243 function symbols: */
4244 while (1)
4245 {
4246 SKIP_WHITESPACE ();
4247 c = get_symbol_name (&name);
4248 p = input_line_pointer;
4249 if (!*name)
4250 as_bad (_("Empty argument of .proc"));
4251 else
4252 {
4253 sym = symbol_find_or_make (name);
4254 if (S_IS_DEFINED (sym))
4255 as_bad (_("`%s' was already defined"), name);
4256 else if (!last_pending)
4257 {
4258 unwind.proc_pending.sym = sym;
4259 last_pending = &unwind.proc_pending;
4260 }
4261 else
4262 {
4263 pending = XNEW (proc_pending);
4264 pending->sym = sym;
4265 last_pending = last_pending->next = pending;
4266 }
4267 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4268 }
4269 *p = c;
4270 SKIP_WHITESPACE_AFTER_NAME ();
4271 if (*input_line_pointer != ',')
4272 break;
4273 ++input_line_pointer;
4274 }
4275 if (!last_pending)
4276 {
4277 unwind.proc_pending.sym = expr_build_dot ();
4278 last_pending = &unwind.proc_pending;
4279 }
4280 last_pending->next = NULL;
4281 demand_empty_rest_of_line ();
4282 do_align (4, NULL, 0, 0);
4283
4284 unwind.prologue = 0;
4285 unwind.prologue_count = 0;
4286 unwind.body = 0;
4287 unwind.insn = 0;
4288 unwind.list = unwind.tail = unwind.current_entry = NULL;
4289 unwind.personality_routine = 0;
4290 }
4291
4292 static void
4293 dot_body (int dummy ATTRIBUTE_UNUSED)
4294 {
4295 if (!in_procedure ("body"))
4296 return;
4297 if (!unwind.prologue && !unwind.body && unwind.insn)
4298 as_warn (_("Initial .body should precede any instructions"));
4299 check_pending_save ();
4300
4301 unwind.prologue = 0;
4302 unwind.prologue_mask = 0;
4303 unwind.body = 1;
4304
4305 add_unwind_entry (output_body (), 0);
4306 }
4307
4308 static void
4309 dot_prologue (int dummy ATTRIBUTE_UNUSED)
4310 {
4311 unsigned mask = 0, grsave = 0;
4312
4313 if (!in_procedure ("prologue"))
4314 return;
4315 if (unwind.prologue)
4316 {
4317 as_bad (_(".prologue within prologue"));
4318 ignore_rest_of_line ();
4319 return;
4320 }
4321 if (!unwind.body && unwind.insn)
4322 as_warn (_("Initial .prologue should precede any instructions"));
4323
4324 if (!is_it_end_of_statement ())
4325 {
4326 expressionS e;
4327 int n, sep = parse_operand_and_eval (&e, ',');
4328
4329 if (e.X_op != O_constant
4330 || e.X_add_number < 0
4331 || e.X_add_number > 0xf)
4332 as_bad (_("First operand to .prologue must be a positive 4-bit constant"));
4333 else if (e.X_add_number == 0)
4334 as_warn (_("Pointless use of zero first operand to .prologue"));
4335 else
4336 mask = e.X_add_number;
4337
4338 n = popcount (mask);
4339
4340 if (sep == ',')
4341 parse_operand_and_eval (&e, 0);
4342 else
4343 e.X_op = O_absent;
4344
4345 if (e.X_op == O_constant
4346 && e.X_add_number >= 0
4347 && e.X_add_number < 128)
4348 {
4349 if (md.unwind_check == unwind_check_error)
4350 as_warn (_("Using a constant as second operand to .prologue is deprecated"));
4351 grsave = e.X_add_number;
4352 }
4353 else if (e.X_op != O_register
4354 || (grsave = e.X_add_number - REG_GR) > 127)
4355 {
4356 as_bad (_("Second operand to .prologue must be a general register"));
4357 grsave = 0;
4358 }
4359 else if (grsave > 128U - n)
4360 {
4361 as_bad (_("Second operand to .prologue must be the first of %d general registers"), n);
4362 grsave = 0;
4363 }
4364 }
4365
4366 if (mask)
4367 add_unwind_entry (output_prologue_gr (mask, grsave), 0);
4368 else
4369 add_unwind_entry (output_prologue (), 0);
4370
4371 unwind.prologue = 1;
4372 unwind.prologue_mask = mask;
4373 unwind.prologue_gr = grsave;
4374 unwind.body = 0;
4375 ++unwind.prologue_count;
4376 }
4377
4378 static void
4379 dot_endp (int dummy ATTRIBUTE_UNUSED)
4380 {
4381 expressionS e;
4382 int bytes_per_address;
4383 long where;
4384 segT saved_seg;
4385 subsegT saved_subseg;
4386 proc_pending *pending;
4387 int unwind_check = md.unwind_check;
4388
4389 md.unwind_check = unwind_check_error;
4390 if (!in_procedure ("endp"))
4391 return;
4392 md.unwind_check = unwind_check;
4393
4394 if (unwind.saved_text_seg)
4395 {
4396 saved_seg = unwind.saved_text_seg;
4397 saved_subseg = unwind.saved_text_subseg;
4398 unwind.saved_text_seg = NULL;
4399 }
4400 else
4401 {
4402 saved_seg = now_seg;
4403 saved_subseg = now_subseg;
4404 }
4405
4406 insn_group_break (1, 0, 0);
4407
4408 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4409 if (!unwind.info)
4410 generate_unwind_image (saved_seg);
4411
4412 if (unwind.info || unwind.force_unwind_entry)
4413 {
4414 symbolS *proc_end;
4415
4416 subseg_set (md.last_text_seg, 0);
4417 proc_end = expr_build_dot ();
4418
4419 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND);
4420
4421 /* Make sure that section has 4 byte alignment for ILP32 and
4422 8 byte alignment for LP64. */
4423 record_alignment (now_seg, md.pointer_size_shift);
4424
4425 /* Need space for 3 pointers for procedure start, procedure end,
4426 and unwind info. */
4427 memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size);
4428 where = frag_now_fix () - (3 * md.pointer_size);
4429 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4430
4431 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4432 e.X_op = O_pseudo_fixup;
4433 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4434 e.X_add_number = 0;
4435 if (!S_IS_LOCAL (unwind.proc_pending.sym)
4436 && S_IS_DEFINED (unwind.proc_pending.sym))
4437 e.X_add_symbol
4438 = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym),
4439 symbol_get_frag (unwind.proc_pending.sym),
4440 S_GET_VALUE (unwind.proc_pending.sym));
4441 else
4442 e.X_add_symbol = unwind.proc_pending.sym;
4443 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e,
4444 BFD_RELOC_NONE);
4445
4446 e.X_op = O_pseudo_fixup;
4447 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4448 e.X_add_number = 0;
4449 e.X_add_symbol = proc_end;
4450 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4451 bytes_per_address, &e, BFD_RELOC_NONE);
4452
4453 if (unwind.info)
4454 {
4455 e.X_op = O_pseudo_fixup;
4456 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4457 e.X_add_number = 0;
4458 e.X_add_symbol = unwind.info;
4459 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4460 bytes_per_address, &e, BFD_RELOC_NONE);
4461 }
4462 }
4463 subseg_set (saved_seg, saved_subseg);
4464
4465 /* Set symbol sizes. */
4466 pending = &unwind.proc_pending;
4467 if (S_GET_NAME (pending->sym))
4468 {
4469 do
4470 {
4471 symbolS *sym = pending->sym;
4472
4473 if (!S_IS_DEFINED (sym))
4474 as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym));
4475 else if (S_GET_SIZE (sym) == 0
4476 && symbol_get_obj (sym)->size == NULL)
4477 {
4478 fragS *frag = symbol_get_frag (sym);
4479
4480 if (frag)
4481 {
4482 if (frag == frag_now && SEG_NORMAL (now_seg))
4483 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4484 else
4485 {
4486 symbol_get_obj (sym)->size = XNEW (expressionS);
4487 symbol_get_obj (sym)->size->X_op = O_subtract;
4488 symbol_get_obj (sym)->size->X_add_symbol
4489 = symbol_new (FAKE_LABEL_NAME, now_seg,
4490 frag_now, frag_now_fix ());
4491 symbol_get_obj (sym)->size->X_op_symbol = sym;
4492 symbol_get_obj (sym)->size->X_add_number = 0;
4493 }
4494 }
4495 }
4496 } while ((pending = pending->next) != NULL);
4497 }
4498
4499 /* Parse names of main and alternate entry points. */
4500 while (1)
4501 {
4502 char *name, *p, c;
4503
4504 SKIP_WHITESPACE ();
4505 c = get_symbol_name (&name);
4506 p = input_line_pointer;
4507 if (!*name)
4508 (md.unwind_check == unwind_check_warning
4509 ? as_warn
4510 : as_bad) (_("Empty argument of .endp"));
4511 else
4512 {
4513 symbolS *sym = symbol_find (name);
4514
4515 for (pending = &unwind.proc_pending; pending; pending = pending->next)
4516 {
4517 if (sym == pending->sym)
4518 {
4519 pending->sym = NULL;
4520 break;
4521 }
4522 }
4523 if (!sym || !pending)
4524 as_warn (_("`%s' was not specified with previous .proc"), name);
4525 }
4526 *p = c;
4527 SKIP_WHITESPACE_AFTER_NAME ();
4528 if (*input_line_pointer != ',')
4529 break;
4530 ++input_line_pointer;
4531 }
4532 demand_empty_rest_of_line ();
4533
4534 /* Deliberately only checking for the main entry point here; the
4535 language spec even says all arguments to .endp are ignored. */
4536 if (unwind.proc_pending.sym
4537 && S_GET_NAME (unwind.proc_pending.sym)
4538 && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME))
4539 as_warn (_("`%s' should be an operand to this .endp"),
4540 S_GET_NAME (unwind.proc_pending.sym));
4541 while (unwind.proc_pending.next)
4542 {
4543 pending = unwind.proc_pending.next;
4544 unwind.proc_pending.next = pending->next;
4545 free (pending);
4546 }
4547 unwind.proc_pending.sym = unwind.info = NULL;
4548 }
4549
4550 static void
4551 dot_template (int template_val)
4552 {
4553 CURR_SLOT.user_template = template_val;
4554 }
4555
4556 static void
4557 dot_regstk (int dummy ATTRIBUTE_UNUSED)
4558 {
4559 int ins, locs, outs, rots;
4560
4561 if (is_it_end_of_statement ())
4562 ins = locs = outs = rots = 0;
4563 else
4564 {
4565 ins = get_absolute_expression ();
4566 if (*input_line_pointer++ != ',')
4567 goto err;
4568 locs = get_absolute_expression ();
4569 if (*input_line_pointer++ != ',')
4570 goto err;
4571 outs = get_absolute_expression ();
4572 if (*input_line_pointer++ != ',')
4573 goto err;
4574 rots = get_absolute_expression ();
4575 }
4576 set_regstack (ins, locs, outs, rots);
4577 return;
4578
4579 err:
4580 as_bad (_("Comma expected"));
4581 ignore_rest_of_line ();
4582 }
4583
4584 static void
4585 dot_rot (int type)
4586 {
4587 offsetT num_regs;
4588 valueT num_alloced = 0;
4589 struct dynreg **drpp, *dr;
4590 int ch, base_reg = 0;
4591 char *name, *start;
4592 size_t len;
4593
4594 switch (type)
4595 {
4596 case DYNREG_GR: base_reg = REG_GR + 32; break;
4597 case DYNREG_FR: base_reg = REG_FR + 32; break;
4598 case DYNREG_PR: base_reg = REG_P + 16; break;
4599 default: break;
4600 }
4601
4602 /* First, remove existing names from hash table. */
4603 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4604 {
4605 str_hash_delete (md.dynreg_hash, dr->name);
4606 /* FIXME: Free dr->name. */
4607 dr->num_regs = 0;
4608 }
4609
4610 drpp = &md.dynreg[type];
4611 while (1)
4612 {
4613 ch = get_symbol_name (&start);
4614 len = strlen (ia64_canonicalize_symbol_name (start));
4615 *input_line_pointer = ch;
4616
4617 SKIP_WHITESPACE_AFTER_NAME ();
4618 if (*input_line_pointer != '[')
4619 {
4620 as_bad (_("Expected '['"));
4621 goto err;
4622 }
4623 ++input_line_pointer; /* skip '[' */
4624
4625 num_regs = get_absolute_expression ();
4626
4627 if (*input_line_pointer++ != ']')
4628 {
4629 as_bad (_("Expected ']'"));
4630 goto err;
4631 }
4632 if (num_regs <= 0)
4633 {
4634 as_bad (_("Number of elements must be positive"));
4635 goto err;
4636 }
4637 SKIP_WHITESPACE ();
4638
4639 num_alloced += num_regs;
4640 switch (type)
4641 {
4642 case DYNREG_GR:
4643 if (num_alloced > md.rot.num_regs)
4644 {
4645 as_bad (_("Used more than the declared %d rotating registers"),
4646 md.rot.num_regs);
4647 goto err;
4648 }
4649 break;
4650 case DYNREG_FR:
4651 if (num_alloced > 96)
4652 {
4653 as_bad (_("Used more than the available 96 rotating registers"));
4654 goto err;
4655 }
4656 break;
4657 case DYNREG_PR:
4658 if (num_alloced > 48)
4659 {
4660 as_bad (_("Used more than the available 48 rotating registers"));
4661 goto err;
4662 }
4663 break;
4664
4665 default:
4666 break;
4667 }
4668
4669 if (!*drpp)
4670 {
4671 *drpp = XOBNEW (&notes, struct dynreg);
4672 memset (*drpp, 0, sizeof (*dr));
4673 }
4674
4675 name = XOBNEWVEC (&notes, char, len + 1);
4676 memcpy (name, start, len);
4677 name[len] = '\0';
4678
4679 dr = *drpp;
4680 dr->name = name;
4681 dr->num_regs = num_regs;
4682 dr->base = base_reg;
4683 drpp = &dr->next;
4684 base_reg += num_regs;
4685
4686 if (str_hash_insert (md.dynreg_hash, name, dr, 0) != NULL)
4687 {
4688 as_bad (_("Attempt to redefine register set `%s'"), name);
4689 obstack_free (&notes, name);
4690 goto err;
4691 }
4692
4693 if (*input_line_pointer != ',')
4694 break;
4695 ++input_line_pointer; /* skip comma */
4696 SKIP_WHITESPACE ();
4697 }
4698 demand_empty_rest_of_line ();
4699 return;
4700
4701 err:
4702 ignore_rest_of_line ();
4703 }
4704
4705 static void
4706 dot_byteorder (int byteorder)
4707 {
4708 segment_info_type *seginfo = seg_info (now_seg);
4709
4710 if (byteorder == -1)
4711 {
4712 if (seginfo->tc_segment_info_data.endian == 0)
4713 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4714 byteorder = seginfo->tc_segment_info_data.endian == 1;
4715 }
4716 else
4717 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4718
4719 if (target_big_endian != byteorder)
4720 {
4721 target_big_endian = byteorder;
4722 if (target_big_endian)
4723 {
4724 ia64_number_to_chars = number_to_chars_bigendian;
4725 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4726 }
4727 else
4728 {
4729 ia64_number_to_chars = number_to_chars_littleendian;
4730 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4731 }
4732 }
4733 }
4734
4735 static void
4736 dot_psr (int dummy ATTRIBUTE_UNUSED)
4737 {
4738 char *option;
4739 int ch;
4740
4741 while (1)
4742 {
4743 ch = get_symbol_name (&option);
4744 if (strcmp (option, "lsb") == 0)
4745 md.flags &= ~EF_IA_64_BE;
4746 else if (strcmp (option, "msb") == 0)
4747 md.flags |= EF_IA_64_BE;
4748 else if (strcmp (option, "abi32") == 0)
4749 md.flags &= ~EF_IA_64_ABI64;
4750 else if (strcmp (option, "abi64") == 0)
4751 md.flags |= EF_IA_64_ABI64;
4752 else
4753 as_bad (_("Unknown psr option `%s'"), option);
4754 *input_line_pointer = ch;
4755
4756 SKIP_WHITESPACE_AFTER_NAME ();
4757 if (*input_line_pointer != ',')
4758 break;
4759
4760 ++input_line_pointer;
4761 SKIP_WHITESPACE ();
4762 }
4763 demand_empty_rest_of_line ();
4764 }
4765
4766 static void
4767 dot_ln (int dummy ATTRIBUTE_UNUSED)
4768 {
4769 new_logical_line (0, get_absolute_expression ());
4770 demand_empty_rest_of_line ();
4771 }
4772
4773 static void
4774 cross_section (int ref, void (*builder) (int), int ua)
4775 {
4776 char *start, *end;
4777 int saved_auto_align;
4778 unsigned int section_count;
4779 char *name;
4780 char c;
4781
4782 SKIP_WHITESPACE ();
4783 start = input_line_pointer;
4784 c = get_symbol_name (&name);
4785 if (input_line_pointer == start)
4786 {
4787 as_bad (_("Missing section name"));
4788 ignore_rest_of_line ();
4789 return;
4790 }
4791 * input_line_pointer = c;
4792 SKIP_WHITESPACE_AFTER_NAME ();
4793 end = input_line_pointer;
4794 if (*input_line_pointer != ',')
4795 {
4796 as_bad (_("Comma expected after section name"));
4797 ignore_rest_of_line ();
4798 return;
4799 }
4800 *end = '\0';
4801 end = input_line_pointer + 1; /* skip comma */
4802 input_line_pointer = start;
4803 md.keep_pending_output = 1;
4804 section_count = bfd_count_sections (stdoutput);
4805 obj_elf_section (0);
4806 if (section_count != bfd_count_sections (stdoutput))
4807 as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated."));
4808 input_line_pointer = end;
4809 saved_auto_align = md.auto_align;
4810 if (ua)
4811 md.auto_align = 0;
4812 (*builder) (ref);
4813 if (ua)
4814 md.auto_align = saved_auto_align;
4815 obj_elf_previous (0);
4816 md.keep_pending_output = 0;
4817 }
4818
4819 static void
4820 dot_xdata (int size)
4821 {
4822 cross_section (size, cons, 0);
4823 }
4824
4825 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4826
4827 static void
4828 stmt_float_cons (int kind)
4829 {
4830 size_t alignment;
4831
4832 switch (kind)
4833 {
4834 case 'd':
4835 alignment = 3;
4836 break;
4837
4838 case 'x':
4839 case 'X':
4840 alignment = 4;
4841 break;
4842
4843 case 'f':
4844 default:
4845 alignment = 2;
4846 break;
4847 }
4848 do_align (alignment, NULL, 0, 0);
4849 float_cons (kind);
4850 }
4851
4852 static void
4853 stmt_cons_ua (int size)
4854 {
4855 int saved_auto_align = md.auto_align;
4856
4857 md.auto_align = 0;
4858 cons (size);
4859 md.auto_align = saved_auto_align;
4860 }
4861
4862 static void
4863 dot_xfloat_cons (int kind)
4864 {
4865 cross_section (kind, stmt_float_cons, 0);
4866 }
4867
4868 static void
4869 dot_xstringer (int zero)
4870 {
4871 cross_section (zero, stringer, 0);
4872 }
4873
4874 static void
4875 dot_xdata_ua (int size)
4876 {
4877 cross_section (size, cons, 1);
4878 }
4879
4880 static void
4881 dot_xfloat_cons_ua (int kind)
4882 {
4883 cross_section (kind, float_cons, 1);
4884 }
4885
4886 /* .reg.val <regname>,value */
4887
4888 static void
4889 dot_reg_val (int dummy ATTRIBUTE_UNUSED)
4890 {
4891 expressionS reg;
4892
4893 expression_and_evaluate (&reg);
4894 if (reg.X_op != O_register)
4895 {
4896 as_bad (_("Register name expected"));
4897 ignore_rest_of_line ();
4898 }
4899 else if (*input_line_pointer++ != ',')
4900 {
4901 as_bad (_("Comma expected"));
4902 ignore_rest_of_line ();
4903 }
4904 else
4905 {
4906 valueT value = get_absolute_expression ();
4907 int regno = reg.X_add_number;
4908 if (regno <= REG_GR || regno > REG_GR + 127)
4909 as_warn (_("Register value annotation ignored"));
4910 else
4911 {
4912 gr_values[regno - REG_GR].known = 1;
4913 gr_values[regno - REG_GR].value = value;
4914 gr_values[regno - REG_GR].path = md.path;
4915 }
4916 }
4917 demand_empty_rest_of_line ();
4918 }
4919
4920 /*
4921 .serialize.data
4922 .serialize.instruction
4923 */
4924 static void
4925 dot_serialize (int type)
4926 {
4927 insn_group_break (0, 0, 0);
4928 if (type)
4929 instruction_serialization ();
4930 else
4931 data_serialization ();
4932 insn_group_break (0, 0, 0);
4933 demand_empty_rest_of_line ();
4934 }
4935
4936 /* select dv checking mode
4937 .auto
4938 .explicit
4939 .default
4940
4941 A stop is inserted when changing modes
4942 */
4943
4944 static void
4945 dot_dv_mode (int type)
4946 {
4947 if (md.manual_bundling)
4948 as_warn (_("Directive invalid within a bundle"));
4949
4950 if (type == 'E' || type == 'A')
4951 md.mode_explicitly_set = 0;
4952 else
4953 md.mode_explicitly_set = 1;
4954
4955 md.detect_dv = 1;
4956 switch (type)
4957 {
4958 case 'A':
4959 case 'a':
4960 if (md.explicit_mode)
4961 insn_group_break (1, 0, 0);
4962 md.explicit_mode = 0;
4963 break;
4964 case 'E':
4965 case 'e':
4966 if (!md.explicit_mode)
4967 insn_group_break (1, 0, 0);
4968 md.explicit_mode = 1;
4969 break;
4970 default:
4971 case 'd':
4972 if (md.explicit_mode != md.default_explicit_mode)
4973 insn_group_break (1, 0, 0);
4974 md.explicit_mode = md.default_explicit_mode;
4975 md.mode_explicitly_set = 0;
4976 break;
4977 }
4978 }
4979
4980 static void
4981 print_prmask (valueT mask)
4982 {
4983 int regno;
4984 const char *comma = "";
4985 for (regno = 0; regno < 64; regno++)
4986 {
4987 if (mask & ((valueT) 1 << regno))
4988 {
4989 fprintf (stderr, "%s p%d", comma, regno);
4990 comma = ",";
4991 }
4992 }
4993 }
4994
4995 /*
4996 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear)
4997 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply)
4998 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex)
4999 .pred.safe_across_calls p1 [, p2 [,...]]
5000 */
5001
5002 static void
5003 dot_pred_rel (int type)
5004 {
5005 valueT mask = 0;
5006 int count = 0;
5007 int p1 = -1, p2 = -1;
5008
5009 if (type == 0)
5010 {
5011 if (*input_line_pointer == '"')
5012 {
5013 int len;
5014 char *form = demand_copy_C_string (&len);
5015
5016 if (strcmp (form, "mutex") == 0)
5017 type = 'm';
5018 else if (strcmp (form, "clear") == 0)
5019 type = 'c';
5020 else if (strcmp (form, "imply") == 0)
5021 type = 'i';
5022 obstack_free (&notes, form);
5023 }
5024 else if (*input_line_pointer == '@')
5025 {
5026 char *form;
5027 char c;
5028
5029 ++input_line_pointer;
5030 c = get_symbol_name (&form);
5031
5032 if (strcmp (form, "mutex") == 0)
5033 type = 'm';
5034 else if (strcmp (form, "clear") == 0)
5035 type = 'c';
5036 else if (strcmp (form, "imply") == 0)
5037 type = 'i';
5038 (void) restore_line_pointer (c);
5039 }
5040 else
5041 {
5042 as_bad (_("Missing predicate relation type"));
5043 ignore_rest_of_line ();
5044 return;
5045 }
5046 if (type == 0)
5047 {
5048 as_bad (_("Unrecognized predicate relation type"));
5049 ignore_rest_of_line ();
5050 return;
5051 }
5052 if (*input_line_pointer == ',')
5053 ++input_line_pointer;
5054 SKIP_WHITESPACE ();
5055 }
5056
5057 while (1)
5058 {
5059 valueT bits = 1;
5060 int sep, regno;
5061 expressionS pr, *pr1, *pr2;
5062
5063 sep = parse_operand_and_eval (&pr, ',');
5064 if (pr.X_op == O_register
5065 && pr.X_add_number >= REG_P
5066 && pr.X_add_number <= REG_P + 63)
5067 {
5068 regno = pr.X_add_number - REG_P;
5069 bits <<= regno;
5070 count++;
5071 if (p1 == -1)
5072 p1 = regno;
5073 else if (p2 == -1)
5074 p2 = regno;
5075 }
5076 else if (type != 'i'
5077 && pr.X_op == O_subtract
5078 && (pr1 = symbol_get_value_expression (pr.X_add_symbol))
5079 && pr1->X_op == O_register
5080 && pr1->X_add_number >= REG_P
5081 && pr1->X_add_number <= REG_P + 63
5082 && (pr2 = symbol_get_value_expression (pr.X_op_symbol))
5083 && pr2->X_op == O_register
5084 && pr2->X_add_number >= REG_P
5085 && pr2->X_add_number <= REG_P + 63)
5086 {
5087 /* It's a range. */
5088 int stop;
5089
5090 regno = pr1->X_add_number - REG_P;
5091 stop = pr2->X_add_number - REG_P;
5092 if (regno >= stop)
5093 {
5094 as_bad (_("Bad register range"));
5095 ignore_rest_of_line ();
5096 return;
5097 }
5098 bits = ((bits << stop) << 1) - (bits << regno);
5099 count += stop - regno + 1;
5100 }
5101 else
5102 {
5103 as_bad (_("Predicate register expected"));
5104 ignore_rest_of_line ();
5105 return;
5106 }
5107 if (mask & bits)
5108 as_warn (_("Duplicate predicate register ignored"));
5109 mask |= bits;
5110 if (sep != ',')
5111 break;
5112 }
5113
5114 switch (type)
5115 {
5116 case 'c':
5117 if (count == 0)
5118 mask = ~(valueT) 0;
5119 clear_qp_mutex (mask);
5120 clear_qp_implies (mask, (valueT) 0);
5121 break;
5122 case 'i':
5123 if (count != 2 || p1 == -1 || p2 == -1)
5124 as_bad (_("Predicate source and target required"));
5125 else if (p1 == 0 || p2 == 0)
5126 as_bad (_("Use of p0 is not valid in this context"));
5127 else
5128 add_qp_imply (p1, p2);
5129 break;
5130 case 'm':
5131 if (count < 2)
5132 {
5133 as_bad (_("At least two PR arguments expected"));
5134 break;
5135 }
5136 else if (mask & 1)
5137 {
5138 as_bad (_("Use of p0 is not valid in this context"));
5139 break;
5140 }
5141 add_qp_mutex (mask);
5142 break;
5143 case 's':
5144 /* note that we don't override any existing relations */
5145 if (count == 0)
5146 {
5147 as_bad (_("At least one PR argument expected"));
5148 break;
5149 }
5150 if (md.debug_dv)
5151 {
5152 fprintf (stderr, "Safe across calls: ");
5153 print_prmask (mask);
5154 fprintf (stderr, "\n");
5155 }
5156 qp_safe_across_calls = mask;
5157 break;
5158 }
5159 demand_empty_rest_of_line ();
5160 }
5161
5162 /* .entry label [, label [, ...]]
5163 Hint to DV code that the given labels are to be considered entry points.
5164 Otherwise, only global labels are considered entry points. */
5165
5166 static void
5167 dot_entry (int dummy ATTRIBUTE_UNUSED)
5168 {
5169 char *name;
5170 int c;
5171 symbolS *symbolP;
5172
5173 do
5174 {
5175 c = get_symbol_name (&name);
5176 symbolP = symbol_find_or_make (name);
5177
5178 if (str_hash_insert (md.entry_hash, S_GET_NAME (symbolP), symbolP, 0))
5179 as_bad (_("duplicate entry hint %s"), name);
5180
5181 *input_line_pointer = c;
5182 SKIP_WHITESPACE_AFTER_NAME ();
5183 c = *input_line_pointer;
5184 if (c == ',')
5185 {
5186 input_line_pointer++;
5187 SKIP_WHITESPACE ();
5188 if (*input_line_pointer == '\n')
5189 c = '\n';
5190 }
5191 }
5192 while (c == ',');
5193
5194 demand_empty_rest_of_line ();
5195 }
5196
5197 /* .mem.offset offset, base
5198 "base" is used to distinguish between offsets from a different base. */
5199
5200 static void
5201 dot_mem_offset (int dummy ATTRIBUTE_UNUSED)
5202 {
5203 md.mem_offset.hint = 1;
5204 md.mem_offset.offset = get_absolute_expression ();
5205 if (*input_line_pointer != ',')
5206 {
5207 as_bad (_("Comma expected"));
5208 ignore_rest_of_line ();
5209 return;
5210 }
5211 ++input_line_pointer;
5212 md.mem_offset.base = get_absolute_expression ();
5213 demand_empty_rest_of_line ();
5214 }
5215
5216 /* ia64-specific pseudo-ops: */
5217 const pseudo_typeS md_pseudo_table[] =
5218 {
5219 { "radix", dot_radix, 0 },
5220 { "lcomm", s_lcomm_bytes, 1 },
5221 { "loc", dot_loc, 0 },
5222 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
5223 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
5224 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
5225 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5226 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5227 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5228 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5229 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5230 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5231 { "proc", dot_proc, 0 },
5232 { "body", dot_body, 0 },
5233 { "prologue", dot_prologue, 0 },
5234 { "endp", dot_endp, 0 },
5235
5236 { "fframe", dot_fframe, 0 },
5237 { "vframe", dot_vframe, 0 },
5238 { "vframesp", dot_vframesp, 0 },
5239 { "vframepsp", dot_vframesp, 1 },
5240 { "save", dot_save, 0 },
5241 { "restore", dot_restore, 0 },
5242 { "restorereg", dot_restorereg, 0 },
5243 { "restorereg.p", dot_restorereg, 1 },
5244 { "handlerdata", dot_handlerdata, 0 },
5245 { "unwentry", dot_unwentry, 0 },
5246 { "altrp", dot_altrp, 0 },
5247 { "savesp", dot_savemem, 0 },
5248 { "savepsp", dot_savemem, 1 },
5249 { "save.g", dot_saveg, 0 },
5250 { "save.f", dot_savef, 0 },
5251 { "save.b", dot_saveb, 0 },
5252 { "save.gf", dot_savegf, 0 },
5253 { "spill", dot_spill, 0 },
5254 { "spillreg", dot_spillreg, 0 },
5255 { "spillsp", dot_spillmem, 0 },
5256 { "spillpsp", dot_spillmem, 1 },
5257 { "spillreg.p", dot_spillreg, 1 },
5258 { "spillsp.p", dot_spillmem, ~0 },
5259 { "spillpsp.p", dot_spillmem, ~1 },
5260 { "label_state", dot_label_state, 0 },
5261 { "copy_state", dot_copy_state, 0 },
5262 { "unwabi", dot_unwabi, 0 },
5263 { "personality", dot_personality, 0 },
5264 { "mii", dot_template, 0x0 },
5265 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5266 { "mlx", dot_template, 0x2 },
5267 { "mmi", dot_template, 0x4 },
5268 { "mfi", dot_template, 0x6 },
5269 { "mmf", dot_template, 0x7 },
5270 { "mib", dot_template, 0x8 },
5271 { "mbb", dot_template, 0x9 },
5272 { "bbb", dot_template, 0xb },
5273 { "mmb", dot_template, 0xc },
5274 { "mfb", dot_template, 0xe },
5275 { "align", dot_align, 0 },
5276 { "regstk", dot_regstk, 0 },
5277 { "rotr", dot_rot, DYNREG_GR },
5278 { "rotf", dot_rot, DYNREG_FR },
5279 { "rotp", dot_rot, DYNREG_PR },
5280 { "lsb", dot_byteorder, 0 },
5281 { "msb", dot_byteorder, 1 },
5282 { "psr", dot_psr, 0 },
5283 { "alias", dot_alias, 0 },
5284 { "secalias", dot_alias, 1 },
5285 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5286
5287 { "xdata1", dot_xdata, 1 },
5288 { "xdata2", dot_xdata, 2 },
5289 { "xdata4", dot_xdata, 4 },
5290 { "xdata8", dot_xdata, 8 },
5291 { "xdata16", dot_xdata, 16 },
5292 { "xreal4", dot_xfloat_cons, 'f' },
5293 { "xreal8", dot_xfloat_cons, 'd' },
5294 { "xreal10", dot_xfloat_cons, 'x' },
5295 { "xreal16", dot_xfloat_cons, 'X' },
5296 { "xstring", dot_xstringer, 8 + 0 },
5297 { "xstringz", dot_xstringer, 8 + 1 },
5298
5299 /* unaligned versions: */
5300 { "xdata2.ua", dot_xdata_ua, 2 },
5301 { "xdata4.ua", dot_xdata_ua, 4 },
5302 { "xdata8.ua", dot_xdata_ua, 8 },
5303 { "xdata16.ua", dot_xdata_ua, 16 },
5304 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5305 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5306 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5307 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5308
5309 /* annotations/DV checking support */
5310 { "entry", dot_entry, 0 },
5311 { "mem.offset", dot_mem_offset, 0 },
5312 { "pred.rel", dot_pred_rel, 0 },
5313 { "pred.rel.clear", dot_pred_rel, 'c' },
5314 { "pred.rel.imply", dot_pred_rel, 'i' },
5315 { "pred.rel.mutex", dot_pred_rel, 'm' },
5316 { "pred.safe_across_calls", dot_pred_rel, 's' },
5317 { "reg.val", dot_reg_val, 0 },
5318 { "serialize.data", dot_serialize, 0 },
5319 { "serialize.instruction", dot_serialize, 1 },
5320 { "auto", dot_dv_mode, 'a' },
5321 { "explicit", dot_dv_mode, 'e' },
5322 { "default", dot_dv_mode, 'd' },
5323
5324 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5325 IA-64 aligns data allocation pseudo-ops by default, so we have to
5326 tell it that these ones are supposed to be unaligned. Long term,
5327 should rewrite so that only IA-64 specific data allocation pseudo-ops
5328 are aligned by default. */
5329 {"2byte", stmt_cons_ua, 2},
5330 {"4byte", stmt_cons_ua, 4},
5331 {"8byte", stmt_cons_ua, 8},
5332
5333 #ifdef TE_VMS
5334 {"vms_common", obj_elf_vms_common, 0},
5335 #endif
5336
5337 { NULL, 0, 0 }
5338 };
5339
5340 static const struct pseudo_opcode
5341 {
5342 const char *name;
5343 void (*handler) (int);
5344 int arg;
5345 }
5346 pseudo_opcode[] =
5347 {
5348 /* these are more like pseudo-ops, but don't start with a dot */
5349 { "data1", cons, 1 },
5350 { "data2", cons, 2 },
5351 { "data4", cons, 4 },
5352 { "data8", cons, 8 },
5353 { "data16", cons, 16 },
5354 { "real4", stmt_float_cons, 'f' },
5355 { "real8", stmt_float_cons, 'd' },
5356 { "real10", stmt_float_cons, 'x' },
5357 { "real16", stmt_float_cons, 'X' },
5358 { "string", stringer, 8 + 0 },
5359 { "stringz", stringer, 8 + 1 },
5360
5361 /* unaligned versions: */
5362 { "data2.ua", stmt_cons_ua, 2 },
5363 { "data4.ua", stmt_cons_ua, 4 },
5364 { "data8.ua", stmt_cons_ua, 8 },
5365 { "data16.ua", stmt_cons_ua, 16 },
5366 { "real4.ua", float_cons, 'f' },
5367 { "real8.ua", float_cons, 'd' },
5368 { "real10.ua", float_cons, 'x' },
5369 { "real16.ua", float_cons, 'X' },
5370 };
5371
5372 /* Declare a register by creating a symbol for it and entering it in
5373 the symbol table. */
5374
5375 static symbolS *
5376 declare_register (const char *name, unsigned int regnum)
5377 {
5378 symbolS *sym;
5379
5380 sym = symbol_create (name, reg_section, &zero_address_frag, regnum);
5381
5382 if (str_hash_insert (md.reg_hash, S_GET_NAME (sym), sym, 0) != NULL)
5383 as_fatal (_("duplicate %s"), name);
5384
5385 return sym;
5386 }
5387
5388 static void
5389 declare_register_set (const char *prefix,
5390 unsigned int num_regs,
5391 unsigned int base_regnum)
5392 {
5393 char name[8];
5394 unsigned int i;
5395
5396 for (i = 0; i < num_regs; ++i)
5397 {
5398 snprintf (name, sizeof (name), "%s%u", prefix, i);
5399 declare_register (name, base_regnum + i);
5400 }
5401 }
5402
5403 static unsigned int
5404 operand_width (enum ia64_opnd opnd)
5405 {
5406 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5407 unsigned int bits = 0;
5408 int i;
5409
5410 bits = 0;
5411 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5412 bits += odesc->field[i].bits;
5413
5414 return bits;
5415 }
5416
5417 static enum operand_match_result
5418 operand_match (const struct ia64_opcode *idesc, int res_index, expressionS *e)
5419 {
5420 enum ia64_opnd opnd = idesc->operands[res_index];
5421 int bits, relocatable = 0;
5422 struct insn_fix *fix;
5423 bfd_signed_vma val;
5424
5425 switch (opnd)
5426 {
5427 /* constants: */
5428
5429 case IA64_OPND_AR_CCV:
5430 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5431 return OPERAND_MATCH;
5432 break;
5433
5434 case IA64_OPND_AR_CSD:
5435 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5436 return OPERAND_MATCH;
5437 break;
5438
5439 case IA64_OPND_AR_PFS:
5440 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5441 return OPERAND_MATCH;
5442 break;
5443
5444 case IA64_OPND_GR0:
5445 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5446 return OPERAND_MATCH;
5447 break;
5448
5449 case IA64_OPND_IP:
5450 if (e->X_op == O_register && e->X_add_number == REG_IP)
5451 return OPERAND_MATCH;
5452 break;
5453
5454 case IA64_OPND_PR:
5455 if (e->X_op == O_register && e->X_add_number == REG_PR)
5456 return OPERAND_MATCH;
5457 break;
5458
5459 case IA64_OPND_PR_ROT:
5460 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5461 return OPERAND_MATCH;
5462 break;
5463
5464 case IA64_OPND_PSR:
5465 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5466 return OPERAND_MATCH;
5467 break;
5468
5469 case IA64_OPND_PSR_L:
5470 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5471 return OPERAND_MATCH;
5472 break;
5473
5474 case IA64_OPND_PSR_UM:
5475 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5476 return OPERAND_MATCH;
5477 break;
5478
5479 case IA64_OPND_C1:
5480 if (e->X_op == O_constant)
5481 {
5482 if (e->X_add_number == 1)
5483 return OPERAND_MATCH;
5484 else
5485 return OPERAND_OUT_OF_RANGE;
5486 }
5487 break;
5488
5489 case IA64_OPND_C8:
5490 if (e->X_op == O_constant)
5491 {
5492 if (e->X_add_number == 8)
5493 return OPERAND_MATCH;
5494 else
5495 return OPERAND_OUT_OF_RANGE;
5496 }
5497 break;
5498
5499 case IA64_OPND_C16:
5500 if (e->X_op == O_constant)
5501 {
5502 if (e->X_add_number == 16)
5503 return OPERAND_MATCH;
5504 else
5505 return OPERAND_OUT_OF_RANGE;
5506 }
5507 break;
5508
5509 /* register operands: */
5510
5511 case IA64_OPND_AR3:
5512 if (e->X_op == O_register && e->X_add_number >= REG_AR
5513 && e->X_add_number < REG_AR + 128)
5514 return OPERAND_MATCH;
5515 break;
5516
5517 case IA64_OPND_B1:
5518 case IA64_OPND_B2:
5519 if (e->X_op == O_register && e->X_add_number >= REG_BR
5520 && e->X_add_number < REG_BR + 8)
5521 return OPERAND_MATCH;
5522 break;
5523
5524 case IA64_OPND_CR3:
5525 if (e->X_op == O_register && e->X_add_number >= REG_CR
5526 && e->X_add_number < REG_CR + 128)
5527 return OPERAND_MATCH;
5528 break;
5529
5530 case IA64_OPND_DAHR3:
5531 if (e->X_op == O_register && e->X_add_number >= REG_DAHR
5532 && e->X_add_number < REG_DAHR + 8)
5533 return OPERAND_MATCH;
5534 break;
5535
5536 case IA64_OPND_F1:
5537 case IA64_OPND_F2:
5538 case IA64_OPND_F3:
5539 case IA64_OPND_F4:
5540 if (e->X_op == O_register && e->X_add_number >= REG_FR
5541 && e->X_add_number < REG_FR + 128)
5542 return OPERAND_MATCH;
5543 break;
5544
5545 case IA64_OPND_P1:
5546 case IA64_OPND_P2:
5547 if (e->X_op == O_register && e->X_add_number >= REG_P
5548 && e->X_add_number < REG_P + 64)
5549 return OPERAND_MATCH;
5550 break;
5551
5552 case IA64_OPND_R1:
5553 case IA64_OPND_R2:
5554 case IA64_OPND_R3:
5555 if (e->X_op == O_register && e->X_add_number >= REG_GR
5556 && e->X_add_number < REG_GR + 128)
5557 return OPERAND_MATCH;
5558 break;
5559
5560 case IA64_OPND_R3_2:
5561 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5562 {
5563 if (e->X_add_number < REG_GR + 4)
5564 return OPERAND_MATCH;
5565 else if (e->X_add_number < REG_GR + 128)
5566 return OPERAND_OUT_OF_RANGE;
5567 }
5568 break;
5569
5570 /* indirect operands: */
5571 case IA64_OPND_CPUID_R3:
5572 case IA64_OPND_DBR_R3:
5573 case IA64_OPND_DTR_R3:
5574 case IA64_OPND_ITR_R3:
5575 case IA64_OPND_IBR_R3:
5576 case IA64_OPND_MSR_R3:
5577 case IA64_OPND_PKR_R3:
5578 case IA64_OPND_PMC_R3:
5579 case IA64_OPND_PMD_R3:
5580 case IA64_OPND_DAHR_R3:
5581 case IA64_OPND_RR_R3:
5582 if (e->X_op == O_index && e->X_op_symbol
5583 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5584 == opnd - IA64_OPND_CPUID_R3))
5585 return OPERAND_MATCH;
5586 break;
5587
5588 case IA64_OPND_MR3:
5589 if (e->X_op == O_index && !e->X_op_symbol)
5590 return OPERAND_MATCH;
5591 break;
5592
5593 /* immediate operands: */
5594 case IA64_OPND_CNT2a:
5595 case IA64_OPND_LEN4:
5596 case IA64_OPND_LEN6:
5597 bits = operand_width (idesc->operands[res_index]);
5598 if (e->X_op == O_constant)
5599 {
5600 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5601 return OPERAND_MATCH;
5602 else
5603 return OPERAND_OUT_OF_RANGE;
5604 }
5605 break;
5606
5607 case IA64_OPND_CNT2b:
5608 if (e->X_op == O_constant)
5609 {
5610 if ((bfd_vma) (e->X_add_number - 1) < 3)
5611 return OPERAND_MATCH;
5612 else
5613 return OPERAND_OUT_OF_RANGE;
5614 }
5615 break;
5616
5617 case IA64_OPND_CNT2c:
5618 val = e->X_add_number;
5619 if (e->X_op == O_constant)
5620 {
5621 if ((val == 0 || val == 7 || val == 15 || val == 16))
5622 return OPERAND_MATCH;
5623 else
5624 return OPERAND_OUT_OF_RANGE;
5625 }
5626 break;
5627
5628 case IA64_OPND_SOR:
5629 /* SOR must be an integer multiple of 8 */
5630 if (e->X_op == O_constant && e->X_add_number & 0x7)
5631 return OPERAND_OUT_OF_RANGE;
5632 /* Fall through. */
5633 case IA64_OPND_SOF:
5634 case IA64_OPND_SOL:
5635 if (e->X_op == O_constant)
5636 {
5637 if ((bfd_vma) e->X_add_number <= 96)
5638 return OPERAND_MATCH;
5639 else
5640 return OPERAND_OUT_OF_RANGE;
5641 }
5642 break;
5643
5644 case IA64_OPND_IMMU62:
5645 if (e->X_op == O_constant)
5646 {
5647 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5648 return OPERAND_MATCH;
5649 else
5650 return OPERAND_OUT_OF_RANGE;
5651 }
5652 else
5653 {
5654 /* FIXME -- need 62-bit relocation type */
5655 as_bad (_("62-bit relocation not yet implemented"));
5656 }
5657 break;
5658
5659 case IA64_OPND_IMMU64:
5660 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5661 || e->X_op == O_subtract)
5662 {
5663 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5664 fix->code = BFD_RELOC_IA64_IMM64;
5665 if (e->X_op != O_subtract)
5666 {
5667 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5668 if (e->X_op == O_pseudo_fixup)
5669 e->X_op = O_symbol;
5670 }
5671
5672 fix->opnd = idesc->operands[res_index];
5673 fix->expr = *e;
5674 fix->is_pcrel = 0;
5675 ++CURR_SLOT.num_fixups;
5676 return OPERAND_MATCH;
5677 }
5678 else if (e->X_op == O_constant)
5679 return OPERAND_MATCH;
5680 break;
5681
5682 case IA64_OPND_IMMU5b:
5683 if (e->X_op == O_constant)
5684 {
5685 val = e->X_add_number;
5686 if (val >= 32 && val <= 63)
5687 return OPERAND_MATCH;
5688 else
5689 return OPERAND_OUT_OF_RANGE;
5690 }
5691 break;
5692
5693 case IA64_OPND_CCNT5:
5694 case IA64_OPND_CNT5:
5695 case IA64_OPND_CNT6:
5696 case IA64_OPND_CPOS6a:
5697 case IA64_OPND_CPOS6b:
5698 case IA64_OPND_CPOS6c:
5699 case IA64_OPND_IMMU2:
5700 case IA64_OPND_IMMU7a:
5701 case IA64_OPND_IMMU7b:
5702 case IA64_OPND_IMMU16:
5703 case IA64_OPND_IMMU19:
5704 case IA64_OPND_IMMU21:
5705 case IA64_OPND_IMMU24:
5706 case IA64_OPND_MBTYPE4:
5707 case IA64_OPND_MHTYPE8:
5708 case IA64_OPND_POS6:
5709 bits = operand_width (idesc->operands[res_index]);
5710 if (e->X_op == O_constant)
5711 {
5712 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5713 return OPERAND_MATCH;
5714 else
5715 return OPERAND_OUT_OF_RANGE;
5716 }
5717 break;
5718
5719 case IA64_OPND_IMMU9:
5720 bits = operand_width (idesc->operands[res_index]);
5721 if (e->X_op == O_constant)
5722 {
5723 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5724 {
5725 int lobits = e->X_add_number & 0x3;
5726 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5727 e->X_add_number |= (bfd_vma) 0x3;
5728 return OPERAND_MATCH;
5729 }
5730 else
5731 return OPERAND_OUT_OF_RANGE;
5732 }
5733 break;
5734
5735 case IA64_OPND_IMM44:
5736 /* least 16 bits must be zero */
5737 if ((e->X_add_number & 0xffff) != 0)
5738 /* XXX technically, this is wrong: we should not be issuing warning
5739 messages until we're sure this instruction pattern is going to
5740 be used! */
5741 as_warn (_("lower 16 bits of mask ignored"));
5742
5743 if (e->X_op == O_constant)
5744 {
5745 if (((e->X_add_number >= 0
5746 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5747 || (e->X_add_number < 0
5748 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5749 {
5750 /* sign-extend */
5751 if (e->X_add_number >= 0
5752 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5753 {
5754 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5755 }
5756 return OPERAND_MATCH;
5757 }
5758 else
5759 return OPERAND_OUT_OF_RANGE;
5760 }
5761 break;
5762
5763 case IA64_OPND_IMM17:
5764 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5765 if (e->X_op == O_constant)
5766 {
5767 if (((e->X_add_number >= 0
5768 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5769 || (e->X_add_number < 0
5770 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5771 {
5772 /* sign-extend */
5773 if (e->X_add_number >= 0
5774 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5775 {
5776 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5777 }
5778 return OPERAND_MATCH;
5779 }
5780 else
5781 return OPERAND_OUT_OF_RANGE;
5782 }
5783 break;
5784
5785 case IA64_OPND_IMM14:
5786 case IA64_OPND_IMM22:
5787 relocatable = 1;
5788 /* Fall through. */
5789 case IA64_OPND_IMM1:
5790 case IA64_OPND_IMM8:
5791 case IA64_OPND_IMM8U4:
5792 case IA64_OPND_IMM8M1:
5793 case IA64_OPND_IMM8M1U4:
5794 case IA64_OPND_IMM8M1U8:
5795 case IA64_OPND_IMM9a:
5796 case IA64_OPND_IMM9b:
5797 bits = operand_width (idesc->operands[res_index]);
5798 if (relocatable && (e->X_op == O_symbol
5799 || e->X_op == O_subtract
5800 || e->X_op == O_pseudo_fixup))
5801 {
5802 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5803
5804 if (idesc->operands[res_index] == IA64_OPND_IMM14)
5805 fix->code = BFD_RELOC_IA64_IMM14;
5806 else
5807 fix->code = BFD_RELOC_IA64_IMM22;
5808
5809 if (e->X_op != O_subtract)
5810 {
5811 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5812 if (e->X_op == O_pseudo_fixup)
5813 e->X_op = O_symbol;
5814 }
5815
5816 fix->opnd = idesc->operands[res_index];
5817 fix->expr = *e;
5818 fix->is_pcrel = 0;
5819 ++CURR_SLOT.num_fixups;
5820 return OPERAND_MATCH;
5821 }
5822 else if (e->X_op != O_constant
5823 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5824 return OPERAND_MISMATCH;
5825
5826 if (opnd == IA64_OPND_IMM8M1U4)
5827 {
5828 /* Zero is not valid for unsigned compares that take an adjusted
5829 constant immediate range. */
5830 if (e->X_add_number == 0)
5831 return OPERAND_OUT_OF_RANGE;
5832
5833 /* Sign-extend 32-bit unsigned numbers, so that the following range
5834 checks will work. */
5835 val = e->X_add_number;
5836 if ((val & (~(bfd_vma) 0 << 32)) == 0)
5837 val = (val ^ ((bfd_vma) 1 << 31)) - ((bfd_vma) 1 << 31);
5838
5839 /* Check for 0x100000000. This is valid because
5840 0x100000000-1 is the same as ((uint32_t) -1). */
5841 if (val == ((bfd_signed_vma) 1 << 32))
5842 return OPERAND_MATCH;
5843
5844 val = val - 1;
5845 }
5846 else if (opnd == IA64_OPND_IMM8M1U8)
5847 {
5848 /* Zero is not valid for unsigned compares that take an adjusted
5849 constant immediate range. */
5850 if (e->X_add_number == 0)
5851 return OPERAND_OUT_OF_RANGE;
5852
5853 /* Check for 0x10000000000000000. */
5854 if (e->X_op == O_big)
5855 {
5856 if (generic_bignum[0] == 0
5857 && generic_bignum[1] == 0
5858 && generic_bignum[2] == 0
5859 && generic_bignum[3] == 0
5860 && generic_bignum[4] == 1)
5861 return OPERAND_MATCH;
5862 else
5863 return OPERAND_OUT_OF_RANGE;
5864 }
5865 else
5866 val = e->X_add_number - 1;
5867 }
5868 else if (opnd == IA64_OPND_IMM8M1)
5869 val = e->X_add_number - 1;
5870 else if (opnd == IA64_OPND_IMM8U4)
5871 {
5872 /* Sign-extend 32-bit unsigned numbers, so that the following range
5873 checks will work. */
5874 val = e->X_add_number;
5875 if ((val & (~(bfd_vma) 0 << 32)) == 0)
5876 val = (val ^ ((bfd_vma) 1 << 31)) - ((bfd_vma) 1 << 31);
5877 }
5878 else
5879 val = e->X_add_number;
5880
5881 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5882 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5883 return OPERAND_MATCH;
5884 else
5885 return OPERAND_OUT_OF_RANGE;
5886
5887 case IA64_OPND_INC3:
5888 /* +/- 1, 4, 8, 16 */
5889 val = e->X_add_number;
5890 if (val < 0)
5891 val = -val;
5892 if (e->X_op == O_constant)
5893 {
5894 if ((val == 1 || val == 4 || val == 8 || val == 16))
5895 return OPERAND_MATCH;
5896 else
5897 return OPERAND_OUT_OF_RANGE;
5898 }
5899 break;
5900
5901 case IA64_OPND_TGT25:
5902 case IA64_OPND_TGT25b:
5903 case IA64_OPND_TGT25c:
5904 case IA64_OPND_TGT64:
5905 if (e->X_op == O_symbol)
5906 {
5907 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5908 if (opnd == IA64_OPND_TGT25)
5909 fix->code = BFD_RELOC_IA64_PCREL21F;
5910 else if (opnd == IA64_OPND_TGT25b)
5911 fix->code = BFD_RELOC_IA64_PCREL21M;
5912 else if (opnd == IA64_OPND_TGT25c)
5913 fix->code = BFD_RELOC_IA64_PCREL21B;
5914 else if (opnd == IA64_OPND_TGT64)
5915 fix->code = BFD_RELOC_IA64_PCREL60B;
5916 else
5917 abort ();
5918
5919 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5920 fix->opnd = idesc->operands[res_index];
5921 fix->expr = *e;
5922 fix->is_pcrel = 1;
5923 ++CURR_SLOT.num_fixups;
5924 return OPERAND_MATCH;
5925 }
5926 /* Fall through. */
5927 case IA64_OPND_TAG13:
5928 case IA64_OPND_TAG13b:
5929 switch (e->X_op)
5930 {
5931 case O_constant:
5932 return OPERAND_MATCH;
5933
5934 case O_symbol:
5935 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5936 /* There are no external relocs for TAG13/TAG13b fields, so we
5937 create a dummy reloc. This will not live past md_apply_fix. */
5938 fix->code = BFD_RELOC_UNUSED;
5939 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5940 fix->opnd = idesc->operands[res_index];
5941 fix->expr = *e;
5942 fix->is_pcrel = 1;
5943 ++CURR_SLOT.num_fixups;
5944 return OPERAND_MATCH;
5945
5946 default:
5947 break;
5948 }
5949 break;
5950
5951 case IA64_OPND_LDXMOV:
5952 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5953 fix->code = BFD_RELOC_IA64_LDXMOV;
5954 fix->opnd = idesc->operands[res_index];
5955 fix->expr = *e;
5956 fix->is_pcrel = 0;
5957 ++CURR_SLOT.num_fixups;
5958 return OPERAND_MATCH;
5959
5960 case IA64_OPND_STRD5b:
5961 if (e->X_op == O_constant)
5962 {
5963 /* 5-bit signed scaled by 64 */
5964 if ((e->X_add_number <= ( 0xf << 6 ))
5965 && (e->X_add_number >= -( 0x10 << 6 )))
5966 {
5967
5968 /* Must be a multiple of 64 */
5969 if ((e->X_add_number & 0x3f) != 0)
5970 as_warn (_("stride must be a multiple of 64; lower 6 bits ignored"));
5971
5972 e->X_add_number &= ~ 0x3f;
5973 return OPERAND_MATCH;
5974 }
5975 else
5976 return OPERAND_OUT_OF_RANGE;
5977 }
5978 break;
5979 case IA64_OPND_CNT6a:
5980 if (e->X_op == O_constant)
5981 {
5982 /* 6-bit unsigned biased by 1 -- count 0 is meaningless */
5983 if ((e->X_add_number <= 64)
5984 && (e->X_add_number > 0) )
5985 {
5986 return OPERAND_MATCH;
5987 }
5988 else
5989 return OPERAND_OUT_OF_RANGE;
5990 }
5991 break;
5992
5993 default:
5994 break;
5995 }
5996 return OPERAND_MISMATCH;
5997 }
5998
5999 static int
6000 parse_operand (expressionS *e, int more)
6001 {
6002 int sep = '\0';
6003
6004 memset (e, 0, sizeof (*e));
6005 e->X_op = O_absent;
6006 SKIP_WHITESPACE ();
6007 expression (e);
6008 sep = *input_line_pointer;
6009 if (more && (sep == ',' || sep == more))
6010 ++input_line_pointer;
6011 return sep;
6012 }
6013
6014 static int
6015 parse_operand_and_eval (expressionS *e, int more)
6016 {
6017 int sep = parse_operand (e, more);
6018 resolve_expression (e);
6019 return sep;
6020 }
6021
6022 static int
6023 parse_operand_maybe_eval (expressionS *e, int more, enum ia64_opnd op)
6024 {
6025 int sep = parse_operand (e, more);
6026 switch (op)
6027 {
6028 case IA64_OPND_IMM14:
6029 case IA64_OPND_IMM22:
6030 case IA64_OPND_IMMU64:
6031 case IA64_OPND_TGT25:
6032 case IA64_OPND_TGT25b:
6033 case IA64_OPND_TGT25c:
6034 case IA64_OPND_TGT64:
6035 case IA64_OPND_TAG13:
6036 case IA64_OPND_TAG13b:
6037 case IA64_OPND_LDXMOV:
6038 break;
6039 default:
6040 resolve_expression (e);
6041 break;
6042 }
6043 return sep;
6044 }
6045
6046 /* Returns the next entry in the opcode table that matches the one in
6047 IDESC, and frees the entry in IDESC. If no matching entry is
6048 found, NULL is returned instead. */
6049
6050 static struct ia64_opcode *
6051 get_next_opcode (struct ia64_opcode *idesc)
6052 {
6053 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
6054 ia64_free_opcode (idesc);
6055 return next;
6056 }
6057
6058 /* Parse the operands for the opcode and find the opcode variant that
6059 matches the specified operands, or NULL if no match is possible. */
6060
6061 static struct ia64_opcode *
6062 parse_operands (struct ia64_opcode *idesc)
6063 {
6064 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
6065 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
6066 int reg1, reg2;
6067 char reg_class;
6068 enum ia64_opnd expected_operand = IA64_OPND_NIL;
6069 enum operand_match_result result;
6070 char mnemonic[129];
6071 char *first_arg = 0, *end, *saved_input_pointer;
6072 unsigned int sof;
6073
6074 gas_assert (strlen (idesc->name) <= 128);
6075
6076 strcpy (mnemonic, idesc->name);
6077 if (idesc->operands[2] == IA64_OPND_SOF
6078 || idesc->operands[1] == IA64_OPND_SOF)
6079 {
6080 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
6081 can't parse the first operand until we have parsed the
6082 remaining operands of the "alloc" instruction. */
6083 SKIP_WHITESPACE ();
6084 first_arg = input_line_pointer;
6085 end = strchr (input_line_pointer, '=');
6086 if (!end)
6087 {
6088 as_bad (_("Expected separator `='"));
6089 return 0;
6090 }
6091 input_line_pointer = end + 1;
6092 ++i;
6093 ++num_outputs;
6094 }
6095
6096 for (; ; ++i)
6097 {
6098 if (i < NELEMS (CURR_SLOT.opnd))
6099 {
6100 enum ia64_opnd op = IA64_OPND_NIL;
6101 if (i < NELEMS (idesc->operands))
6102 op = idesc->operands[i];
6103 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + i, '=', op);
6104 if (CURR_SLOT.opnd[i].X_op == O_absent)
6105 break;
6106 }
6107 else
6108 {
6109 expressionS dummy;
6110
6111 sep = parse_operand (&dummy, '=');
6112 if (dummy.X_op == O_absent)
6113 break;
6114 }
6115
6116 ++num_operands;
6117
6118 if (sep != '=' && sep != ',')
6119 break;
6120
6121 if (sep == '=')
6122 {
6123 if (num_outputs > 0)
6124 as_bad (_("Duplicate equal sign (=) in instruction"));
6125 else
6126 num_outputs = i + 1;
6127 }
6128 }
6129 if (sep != '\0')
6130 {
6131 as_bad (_("Illegal operand separator `%c'"), sep);
6132 return 0;
6133 }
6134
6135 if (idesc->operands[2] == IA64_OPND_SOF
6136 || idesc->operands[1] == IA64_OPND_SOF)
6137 {
6138 /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r.
6139 Note, however, that due to that mapping operand numbers in error
6140 messages for any of the constant operands will not be correct. */
6141 know (strcmp (idesc->name, "alloc") == 0);
6142 /* The first operand hasn't been parsed/initialized, yet (but
6143 num_operands intentionally doesn't account for that). */
6144 i = num_operands > 4 ? 2 : 1;
6145 #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \
6146 ? CURR_SLOT.opnd[n].X_add_number \
6147 : 0)
6148 sof = set_regstack (FORCE_CONST(i),
6149 FORCE_CONST(i + 1),
6150 FORCE_CONST(i + 2),
6151 FORCE_CONST(i + 3));
6152 #undef FORCE_CONST
6153
6154 /* now we can parse the first arg: */
6155 saved_input_pointer = input_line_pointer;
6156 input_line_pointer = first_arg;
6157 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + 0, '=',
6158 idesc->operands[0]);
6159 if (sep != '=')
6160 --num_outputs; /* force error */
6161 input_line_pointer = saved_input_pointer;
6162
6163 CURR_SLOT.opnd[i].X_add_number = sof;
6164 if (CURR_SLOT.opnd[i + 1].X_op == O_constant
6165 && CURR_SLOT.opnd[i + 2].X_op == O_constant)
6166 CURR_SLOT.opnd[i + 1].X_add_number
6167 = sof - CURR_SLOT.opnd[i + 2].X_add_number;
6168 else
6169 CURR_SLOT.opnd[i + 1].X_op = O_illegal;
6170 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3];
6171 }
6172
6173 highest_unmatched_operand = -4;
6174 curr_out_of_range_pos = -1;
6175 error_pos = 0;
6176 for (; idesc; idesc = get_next_opcode (idesc))
6177 {
6178 if (num_outputs != idesc->num_outputs)
6179 continue; /* mismatch in # of outputs */
6180 if (highest_unmatched_operand < 0)
6181 highest_unmatched_operand |= 1;
6182 if (num_operands > NELEMS (idesc->operands)
6183 || (num_operands < NELEMS (idesc->operands)
6184 && idesc->operands[num_operands])
6185 || (num_operands > 0 && !idesc->operands[num_operands - 1]))
6186 continue; /* mismatch in number of arguments */
6187 if (highest_unmatched_operand < 0)
6188 highest_unmatched_operand |= 2;
6189
6190 CURR_SLOT.num_fixups = 0;
6191
6192 /* Try to match all operands. If we see an out-of-range operand,
6193 then continue trying to match the rest of the operands, since if
6194 the rest match, then this idesc will give the best error message. */
6195
6196 out_of_range_pos = -1;
6197 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
6198 {
6199 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
6200 if (result != OPERAND_MATCH)
6201 {
6202 if (result != OPERAND_OUT_OF_RANGE)
6203 break;
6204 if (out_of_range_pos < 0)
6205 /* remember position of the first out-of-range operand: */
6206 out_of_range_pos = i;
6207 }
6208 }
6209
6210 /* If we did not match all operands, or if at least one operand was
6211 out-of-range, then this idesc does not match. Keep track of which
6212 idesc matched the most operands before failing. If we have two
6213 idescs that failed at the same position, and one had an out-of-range
6214 operand, then prefer the out-of-range operand. Thus if we have
6215 "add r0=0x1000000,r1" we get an error saying the constant is out
6216 of range instead of an error saying that the constant should have been
6217 a register. */
6218
6219 if (i != num_operands || out_of_range_pos >= 0)
6220 {
6221 if (i > highest_unmatched_operand
6222 || (i == highest_unmatched_operand
6223 && out_of_range_pos > curr_out_of_range_pos))
6224 {
6225 highest_unmatched_operand = i;
6226 if (out_of_range_pos >= 0)
6227 {
6228 expected_operand = idesc->operands[out_of_range_pos];
6229 error_pos = out_of_range_pos;
6230 }
6231 else
6232 {
6233 expected_operand = idesc->operands[i];
6234 error_pos = i;
6235 }
6236 curr_out_of_range_pos = out_of_range_pos;
6237 }
6238 continue;
6239 }
6240
6241 break;
6242 }
6243 if (!idesc)
6244 {
6245 if (expected_operand)
6246 as_bad (_("Operand %u of `%s' should be %s"),
6247 error_pos + 1, mnemonic,
6248 elf64_ia64_operands[expected_operand].desc);
6249 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1))
6250 as_bad (_("Wrong number of output operands"));
6251 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2))
6252 as_bad (_("Wrong number of input operands"));
6253 else
6254 as_bad (_("Operand mismatch"));
6255 return 0;
6256 }
6257
6258 /* Check that the instruction doesn't use
6259 - r0, f0, or f1 as output operands
6260 - the same predicate twice as output operands
6261 - r0 as address of a base update load or store
6262 - the same GR as output and address of a base update load
6263 - two even- or two odd-numbered FRs as output operands of a floating
6264 point parallel load.
6265 At most two (conflicting) output (or output-like) operands can exist,
6266 (floating point parallel loads have three outputs, but the base register,
6267 if updated, cannot conflict with the actual outputs). */
6268 reg2 = reg1 = -1;
6269 for (i = 0; i < num_operands; ++i)
6270 {
6271 int regno = 0;
6272
6273 reg_class = 0;
6274 switch (idesc->operands[i])
6275 {
6276 case IA64_OPND_R1:
6277 case IA64_OPND_R2:
6278 case IA64_OPND_R3:
6279 if (i < num_outputs)
6280 {
6281 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6282 reg_class = 'r';
6283 else if (reg1 < 0)
6284 reg1 = CURR_SLOT.opnd[i].X_add_number;
6285 else if (reg2 < 0)
6286 reg2 = CURR_SLOT.opnd[i].X_add_number;
6287 }
6288 break;
6289 case IA64_OPND_P1:
6290 case IA64_OPND_P2:
6291 if (i < num_outputs)
6292 {
6293 if (reg1 < 0)
6294 reg1 = CURR_SLOT.opnd[i].X_add_number;
6295 else if (reg2 < 0)
6296 reg2 = CURR_SLOT.opnd[i].X_add_number;
6297 }
6298 break;
6299 case IA64_OPND_F1:
6300 case IA64_OPND_F2:
6301 case IA64_OPND_F3:
6302 case IA64_OPND_F4:
6303 if (i < num_outputs)
6304 {
6305 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR
6306 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1)
6307 {
6308 reg_class = 'f';
6309 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR;
6310 }
6311 else if (reg1 < 0)
6312 reg1 = CURR_SLOT.opnd[i].X_add_number;
6313 else if (reg2 < 0)
6314 reg2 = CURR_SLOT.opnd[i].X_add_number;
6315 }
6316 break;
6317 case IA64_OPND_MR3:
6318 if (idesc->flags & IA64_OPCODE_POSTINC)
6319 {
6320 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6321 reg_class = 'm';
6322 else if (reg1 < 0)
6323 reg1 = CURR_SLOT.opnd[i].X_add_number;
6324 else if (reg2 < 0)
6325 reg2 = CURR_SLOT.opnd[i].X_add_number;
6326 }
6327 break;
6328 default:
6329 break;
6330 }
6331 switch (reg_class)
6332 {
6333 case 0:
6334 break;
6335 default:
6336 as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno);
6337 break;
6338 case 'm':
6339 as_warn (_("Invalid use of `r%d' as base update address operand"), regno);
6340 break;
6341 }
6342 }
6343 if (reg1 == reg2)
6344 {
6345 if (reg1 >= REG_GR && reg1 <= REG_GR + 127)
6346 {
6347 reg1 -= REG_GR;
6348 reg_class = 'r';
6349 }
6350 else if (reg1 >= REG_P && reg1 <= REG_P + 63)
6351 {
6352 reg1 -= REG_P;
6353 reg_class = 'p';
6354 }
6355 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127)
6356 {
6357 reg1 -= REG_FR;
6358 reg_class = 'f';
6359 }
6360 else
6361 reg_class = 0;
6362 if (reg_class)
6363 as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1);
6364 }
6365 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31
6366 && reg2 >= REG_FR && reg2 <= REG_FR + 31)
6367 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6368 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127))
6369 && ! ((reg1 ^ reg2) & 1))
6370 as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"),
6371 reg1 - REG_FR, reg2 - REG_FR);
6372 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31
6373 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)
6374 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6375 && reg2 >= REG_FR && reg2 <= REG_FR + 31))
6376 as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"),
6377 reg1 - REG_FR, reg2 - REG_FR);
6378 return idesc;
6379 }
6380
6381 static void
6382 build_insn (struct slot *slot, bfd_vma *insnp)
6383 {
6384 const struct ia64_operand *odesc, *o2desc;
6385 struct ia64_opcode *idesc = slot->idesc;
6386 bfd_vma insn;
6387 bfd_signed_vma val;
6388 const char *err;
6389 int i;
6390
6391 insn = idesc->opcode | slot->qp_regno;
6392
6393 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6394 {
6395 if (slot->opnd[i].X_op == O_register
6396 || slot->opnd[i].X_op == O_constant
6397 || slot->opnd[i].X_op == O_index)
6398 val = slot->opnd[i].X_add_number;
6399 else if (slot->opnd[i].X_op == O_big)
6400 {
6401 /* This must be the value 0x10000000000000000. */
6402 gas_assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6403 val = 0;
6404 }
6405 else
6406 val = 0;
6407
6408 switch (idesc->operands[i])
6409 {
6410 case IA64_OPND_IMMU64:
6411 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6412 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6413 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6414 | (((val >> 63) & 0x1) << 36));
6415 continue;
6416
6417 case IA64_OPND_IMMU62:
6418 val &= 0x3fffffffffffffffULL;
6419 if (val != slot->opnd[i].X_add_number)
6420 as_warn (_("Value truncated to 62 bits"));
6421 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6422 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6423 continue;
6424
6425 case IA64_OPND_TGT64:
6426 val >>= 4;
6427 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6428 insn |= ((((val >> 59) & 0x1) << 36)
6429 | (((val >> 0) & 0xfffff) << 13));
6430 continue;
6431
6432 case IA64_OPND_AR3:
6433 val -= REG_AR;
6434 break;
6435
6436 case IA64_OPND_B1:
6437 case IA64_OPND_B2:
6438 val -= REG_BR;
6439 break;
6440
6441 case IA64_OPND_CR3:
6442 val -= REG_CR;
6443 break;
6444
6445 case IA64_OPND_DAHR3:
6446 val -= REG_DAHR;
6447 break;
6448
6449 case IA64_OPND_F1:
6450 case IA64_OPND_F2:
6451 case IA64_OPND_F3:
6452 case IA64_OPND_F4:
6453 val -= REG_FR;
6454 break;
6455
6456 case IA64_OPND_P1:
6457 case IA64_OPND_P2:
6458 val -= REG_P;
6459 break;
6460
6461 case IA64_OPND_R1:
6462 case IA64_OPND_R2:
6463 case IA64_OPND_R3:
6464 case IA64_OPND_R3_2:
6465 case IA64_OPND_CPUID_R3:
6466 case IA64_OPND_DBR_R3:
6467 case IA64_OPND_DTR_R3:
6468 case IA64_OPND_ITR_R3:
6469 case IA64_OPND_IBR_R3:
6470 case IA64_OPND_MR3:
6471 case IA64_OPND_MSR_R3:
6472 case IA64_OPND_PKR_R3:
6473 case IA64_OPND_PMC_R3:
6474 case IA64_OPND_PMD_R3:
6475 case IA64_OPND_DAHR_R3:
6476 case IA64_OPND_RR_R3:
6477 val -= REG_GR;
6478 break;
6479
6480 default:
6481 break;
6482 }
6483
6484 odesc = elf64_ia64_operands + idesc->operands[i];
6485 err = (*odesc->insert) (odesc, val, &insn);
6486 if (err)
6487 as_bad_where (slot->src_file, slot->src_line,
6488 _("Bad operand value: %s"), err);
6489 if (idesc->flags & IA64_OPCODE_PSEUDO)
6490 {
6491 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6492 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6493 {
6494 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6495 (*o2desc->insert) (o2desc, val, &insn);
6496 }
6497 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6498 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6499 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6500 {
6501 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6502 (*o2desc->insert) (o2desc, 64 - val, &insn);
6503 }
6504 }
6505 }
6506 *insnp = insn;
6507 }
6508
6509 static void
6510 emit_one_bundle (void)
6511 {
6512 int manual_bundling_off = 0, manual_bundling = 0;
6513 enum ia64_unit required_unit, insn_unit = 0;
6514 enum ia64_insn_type type[3], insn_type;
6515 unsigned int template_val, orig_template;
6516 bfd_vma insn[3] = { -1, -1, -1 };
6517 struct ia64_opcode *idesc;
6518 int end_of_insn_group = 0, user_template = -1;
6519 int n, i, j, first, curr, last_slot;
6520 bfd_vma t0 = 0, t1 = 0;
6521 struct label_fix *lfix;
6522 bool mark_label;
6523 struct insn_fix *ifix;
6524 char mnemonic[16];
6525 fixS *fix;
6526 char *f;
6527 int addr_mod;
6528
6529 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6530 know (first >= 0 && first < NUM_SLOTS);
6531 n = MIN (3, md.num_slots_in_use);
6532
6533 /* Determine template: user user_template if specified, best match
6534 otherwise: */
6535
6536 if (md.slot[first].user_template >= 0)
6537 user_template = template_val = md.slot[first].user_template;
6538 else
6539 {
6540 /* Auto select appropriate template. */
6541 memset (type, 0, sizeof (type));
6542 curr = first;
6543 for (i = 0; i < n; ++i)
6544 {
6545 if (md.slot[curr].label_fixups && i != 0)
6546 break;
6547 type[i] = md.slot[curr].idesc->type;
6548 curr = (curr + 1) % NUM_SLOTS;
6549 }
6550 template_val = best_template[type[0]][type[1]][type[2]];
6551 }
6552
6553 /* initialize instructions with appropriate nops: */
6554 for (i = 0; i < 3; ++i)
6555 insn[i] = nop[ia64_templ_desc[template_val].exec_unit[i]];
6556
6557 f = frag_more (16);
6558
6559 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6560 from the start of the frag. */
6561 addr_mod = frag_now_fix () & 15;
6562 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6563 as_bad (_("instruction address is not a multiple of 16"));
6564 frag_now->insn_addr = addr_mod;
6565 frag_now->has_code = 1;
6566
6567 /* now fill in slots with as many insns as possible: */
6568 curr = first;
6569 idesc = md.slot[curr].idesc;
6570 end_of_insn_group = 0;
6571 last_slot = -1;
6572 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6573 {
6574 /* If we have unwind records, we may need to update some now. */
6575 unw_rec_list *ptr = md.slot[curr].unwind_record;
6576 unw_rec_list *end_ptr = NULL;
6577
6578 if (ptr)
6579 {
6580 /* Find the last prologue/body record in the list for the current
6581 insn, and set the slot number for all records up to that point.
6582 This needs to be done now, because prologue/body records refer to
6583 the current point, not the point after the instruction has been
6584 issued. This matters because there may have been nops emitted
6585 meanwhile. Any non-prologue non-body record followed by a
6586 prologue/body record must also refer to the current point. */
6587 unw_rec_list *last_ptr;
6588
6589 for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j)
6590 end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record;
6591 for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next)
6592 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6593 || ptr->r.type == body)
6594 last_ptr = ptr;
6595 if (last_ptr)
6596 {
6597 /* Make last_ptr point one after the last prologue/body
6598 record. */
6599 last_ptr = last_ptr->next;
6600 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6601 ptr = ptr->next)
6602 {
6603 ptr->slot_number = (unsigned long) f + i;
6604 ptr->slot_frag = frag_now;
6605 }
6606 /* Remove the initialized records, so that we won't accidentally
6607 update them again if we insert a nop and continue. */
6608 md.slot[curr].unwind_record = last_ptr;
6609 }
6610 }
6611
6612 manual_bundling_off = md.slot[curr].manual_bundling_off;
6613 if (md.slot[curr].manual_bundling_on)
6614 {
6615 if (curr == first)
6616 manual_bundling = 1;
6617 else
6618 break; /* Need to start a new bundle. */
6619 }
6620
6621 /* If this instruction specifies a template, then it must be the first
6622 instruction of a bundle. */
6623 if (curr != first && md.slot[curr].user_template >= 0)
6624 break;
6625
6626 if (idesc->flags & IA64_OPCODE_SLOT2)
6627 {
6628 if (manual_bundling && !manual_bundling_off)
6629 {
6630 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6631 _("`%s' must be last in bundle"), idesc->name);
6632 if (i < 2)
6633 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6634 }
6635 i = 2;
6636 }
6637 if (idesc->flags & IA64_OPCODE_LAST)
6638 {
6639 int required_slot;
6640 unsigned int required_template;
6641
6642 /* If we need a stop bit after an M slot, our only choice is
6643 template 5 (M;;MI). If we need a stop bit after a B
6644 slot, our only choice is to place it at the end of the
6645 bundle, because the only available templates are MIB,
6646 MBB, BBB, MMB, and MFB. We don't handle anything other
6647 than M and B slots because these are the only kind of
6648 instructions that can have the IA64_OPCODE_LAST bit set. */
6649 required_template = template_val;
6650 switch (idesc->type)
6651 {
6652 case IA64_TYPE_M:
6653 required_slot = 0;
6654 required_template = 5;
6655 break;
6656
6657 case IA64_TYPE_B:
6658 required_slot = 2;
6659 break;
6660
6661 default:
6662 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6663 _("Internal error: don't know how to force %s to end of instruction group"),
6664 idesc->name);
6665 required_slot = i;
6666 break;
6667 }
6668 if (manual_bundling
6669 && (i > required_slot
6670 || (required_slot == 2 && !manual_bundling_off)
6671 || (user_template >= 0
6672 /* Changing from MMI to M;MI is OK. */
6673 && (template_val ^ required_template) > 1)))
6674 {
6675 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6676 _("`%s' must be last in instruction group"),
6677 idesc->name);
6678 if (i < 2 && required_slot == 2 && !manual_bundling_off)
6679 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6680 }
6681 if (required_slot < i)
6682 /* Can't fit this instruction. */
6683 break;
6684
6685 i = required_slot;
6686 if (required_template != template_val)
6687 {
6688 /* If we switch the template, we need to reset the NOPs
6689 after slot i. The slot-types of the instructions ahead
6690 of i never change, so we don't need to worry about
6691 changing NOPs in front of this slot. */
6692 for (j = i; j < 3; ++j)
6693 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6694
6695 /* We just picked a template that includes the stop bit in the
6696 middle, so we don't need another one emitted later. */
6697 md.slot[curr].end_of_insn_group = 0;
6698 }
6699 template_val = required_template;
6700 }
6701 if (curr != first && md.slot[curr].label_fixups)
6702 {
6703 if (manual_bundling)
6704 {
6705 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6706 _("Label must be first in a bundle"));
6707 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6708 }
6709 /* This insn must go into the first slot of a bundle. */
6710 break;
6711 }
6712
6713 if (end_of_insn_group && md.num_slots_in_use >= 1)
6714 {
6715 /* We need an instruction group boundary in the middle of a
6716 bundle. See if we can switch to an other template with
6717 an appropriate boundary. */
6718
6719 orig_template = template_val;
6720 if (i == 1 && (user_template == 4
6721 || (user_template < 0
6722 && (ia64_templ_desc[template_val].exec_unit[0]
6723 == IA64_UNIT_M))))
6724 {
6725 template_val = 5;
6726 end_of_insn_group = 0;
6727 }
6728 else if (i == 2 && (user_template == 0
6729 || (user_template < 0
6730 && (ia64_templ_desc[template_val].exec_unit[1]
6731 == IA64_UNIT_I)))
6732 /* This test makes sure we don't switch the template if
6733 the next instruction is one that needs to be first in
6734 an instruction group. Since all those instructions are
6735 in the M group, there is no way such an instruction can
6736 fit in this bundle even if we switch the template. The
6737 reason we have to check for this is that otherwise we
6738 may end up generating "MI;;I M.." which has the deadly
6739 effect that the second M instruction is no longer the
6740 first in the group! --davidm 99/12/16 */
6741 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6742 {
6743 template_val = 1;
6744 end_of_insn_group = 0;
6745 }
6746 else if (i == 1
6747 && user_template == 0
6748 && !(idesc->flags & IA64_OPCODE_FIRST))
6749 /* Use the next slot. */
6750 continue;
6751 else if (curr != first)
6752 /* can't fit this insn */
6753 break;
6754
6755 if (template_val != orig_template)
6756 /* if we switch the template, we need to reset the NOPs
6757 after slot i. The slot-types of the instructions ahead
6758 of i never change, so we don't need to worry about
6759 changing NOPs in front of this slot. */
6760 for (j = i; j < 3; ++j)
6761 insn[j] = nop[ia64_templ_desc[template_val].exec_unit[j]];
6762 }
6763 required_unit = ia64_templ_desc[template_val].exec_unit[i];
6764
6765 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6766 if (idesc->type == IA64_TYPE_DYN)
6767 {
6768 enum ia64_opnd opnd1, opnd2;
6769
6770 if ((strcmp (idesc->name, "nop") == 0)
6771 || (strcmp (idesc->name, "break") == 0))
6772 insn_unit = required_unit;
6773 else if (strcmp (idesc->name, "hint") == 0)
6774 {
6775 insn_unit = required_unit;
6776 if (required_unit == IA64_UNIT_B)
6777 {
6778 switch (md.hint_b)
6779 {
6780 case hint_b_ok:
6781 break;
6782 case hint_b_warning:
6783 as_warn (_("hint in B unit may be treated as nop"));
6784 break;
6785 case hint_b_error:
6786 /* When manual bundling is off and there is no
6787 user template, we choose a different unit so
6788 that hint won't go into the current slot. We
6789 will fill the current bundle with nops and
6790 try to put hint into the next bundle. */
6791 if (!manual_bundling && user_template < 0)
6792 insn_unit = IA64_UNIT_I;
6793 else
6794 as_bad (_("hint in B unit can't be used"));
6795 break;
6796 }
6797 }
6798 }
6799 else if (strcmp (idesc->name, "chk.s") == 0
6800 || strcmp (idesc->name, "mov") == 0)
6801 {
6802 insn_unit = IA64_UNIT_M;
6803 if (required_unit == IA64_UNIT_I
6804 || (required_unit == IA64_UNIT_F && template_val == 6))
6805 insn_unit = IA64_UNIT_I;
6806 }
6807 else
6808 as_fatal (_("emit_one_bundle: unexpected dynamic op"));
6809
6810 snprintf (mnemonic, sizeof (mnemonic), "%s.%c",
6811 idesc->name, "?imbfxx"[insn_unit]);
6812 opnd1 = idesc->operands[0];
6813 opnd2 = idesc->operands[1];
6814 ia64_free_opcode (idesc);
6815 idesc = ia64_find_opcode (mnemonic);
6816 /* moves to/from ARs have collisions */
6817 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6818 {
6819 while (idesc != NULL
6820 && (idesc->operands[0] != opnd1
6821 || idesc->operands[1] != opnd2))
6822 idesc = get_next_opcode (idesc);
6823 }
6824 md.slot[curr].idesc = idesc;
6825 }
6826 else
6827 {
6828 insn_type = idesc->type;
6829 insn_unit = IA64_UNIT_NIL;
6830 switch (insn_type)
6831 {
6832 case IA64_TYPE_A:
6833 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6834 insn_unit = required_unit;
6835 break;
6836 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6837 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6838 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6839 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6840 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6841 default: break;
6842 }
6843 }
6844
6845 if (insn_unit != required_unit)
6846 continue; /* Try next slot. */
6847
6848 /* Now is a good time to fix up the labels for this insn. */
6849 mark_label = false;
6850 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6851 {
6852 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6853 symbol_set_frag (lfix->sym, frag_now);
6854 mark_label |= lfix->dw2_mark_labels;
6855 }
6856 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6857 {
6858 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6859 symbol_set_frag (lfix->sym, frag_now);
6860 }
6861
6862 if (debug_type == DEBUG_DWARF2
6863 || md.slot[curr].loc_directive_seen
6864 || mark_label)
6865 {
6866 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6867
6868 md.slot[curr].loc_directive_seen = 0;
6869 if (mark_label)
6870 md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
6871
6872 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6873 }
6874
6875 build_insn (md.slot + curr, insn + i);
6876
6877 ptr = md.slot[curr].unwind_record;
6878 if (ptr)
6879 {
6880 /* Set slot numbers for all remaining unwind records belonging to the
6881 current insn. There can not be any prologue/body unwind records
6882 here. */
6883 for (; ptr != end_ptr; ptr = ptr->next)
6884 {
6885 ptr->slot_number = (unsigned long) f + i;
6886 ptr->slot_frag = frag_now;
6887 }
6888 md.slot[curr].unwind_record = NULL;
6889 }
6890
6891 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6892 {
6893 ifix = md.slot[curr].fixup + j;
6894 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6895 &ifix->expr, ifix->is_pcrel, ifix->code);
6896 fix->tc_fix_data.opnd = ifix->opnd;
6897 fix->fx_file = md.slot[curr].src_file;
6898 fix->fx_line = md.slot[curr].src_line;
6899 }
6900
6901 end_of_insn_group = md.slot[curr].end_of_insn_group;
6902
6903 /* This adjustment to "i" must occur after the fix, otherwise the fix
6904 is assigned to the wrong slot, and the VMS linker complains. */
6905 if (required_unit == IA64_UNIT_L)
6906 {
6907 know (i == 1);
6908 /* skip one slot for long/X-unit instructions */
6909 ++i;
6910 }
6911 --md.num_slots_in_use;
6912 last_slot = i;
6913
6914 /* clear slot: */
6915 ia64_free_opcode (md.slot[curr].idesc);
6916 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6917 md.slot[curr].user_template = -1;
6918
6919 if (manual_bundling_off)
6920 {
6921 manual_bundling = 0;
6922 break;
6923 }
6924 curr = (curr + 1) % NUM_SLOTS;
6925 idesc = md.slot[curr].idesc;
6926 }
6927
6928 /* A user template was specified, but the first following instruction did
6929 not fit. This can happen with or without manual bundling. */
6930 if (md.num_slots_in_use > 0 && last_slot < 0)
6931 {
6932 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6933 _("`%s' does not fit into %s template"),
6934 idesc->name, ia64_templ_desc[template_val].name);
6935 /* Drop first insn so we don't livelock. */
6936 --md.num_slots_in_use;
6937 know (curr == first);
6938 ia64_free_opcode (md.slot[curr].idesc);
6939 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6940 md.slot[curr].user_template = -1;
6941 }
6942 else if (manual_bundling > 0)
6943 {
6944 if (md.num_slots_in_use > 0)
6945 {
6946 if (last_slot >= 2)
6947 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6948 _("`%s' does not fit into bundle"), idesc->name);
6949 else
6950 {
6951 const char *where;
6952
6953 if (template_val == 2)
6954 where = "X slot";
6955 else if (last_slot == 0)
6956 where = "slots 2 or 3";
6957 else
6958 where = "slot 3";
6959 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6960 _("`%s' can't go in %s of %s template"),
6961 idesc->name, where, ia64_templ_desc[template_val].name);
6962 }
6963 }
6964 else
6965 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6966 _("Missing '}' at end of file"));
6967 }
6968
6969 know (md.num_slots_in_use < NUM_SLOTS);
6970
6971 t0 = end_of_insn_group | (template_val << 1) | (insn[0] << 5) | (insn[1] << 46);
6972 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6973
6974 number_to_chars_littleendian (f + 0, t0, 8);
6975 number_to_chars_littleendian (f + 8, t1, 8);
6976 }
6977
6978 int
6979 md_parse_option (int c, const char *arg)
6980 {
6981
6982 switch (c)
6983 {
6984 /* Switches from the Intel assembler. */
6985 case 'm':
6986 if (strcmp (arg, "ilp64") == 0
6987 || strcmp (arg, "lp64") == 0
6988 || strcmp (arg, "p64") == 0)
6989 {
6990 md.flags |= EF_IA_64_ABI64;
6991 }
6992 else if (strcmp (arg, "ilp32") == 0)
6993 {
6994 md.flags &= ~EF_IA_64_ABI64;
6995 }
6996 else if (strcmp (arg, "le") == 0)
6997 {
6998 md.flags &= ~EF_IA_64_BE;
6999 default_big_endian = 0;
7000 }
7001 else if (strcmp (arg, "be") == 0)
7002 {
7003 md.flags |= EF_IA_64_BE;
7004 default_big_endian = 1;
7005 }
7006 else if (startswith (arg, "unwind-check="))
7007 {
7008 arg += 13;
7009 if (strcmp (arg, "warning") == 0)
7010 md.unwind_check = unwind_check_warning;
7011 else if (strcmp (arg, "error") == 0)
7012 md.unwind_check = unwind_check_error;
7013 else
7014 return 0;
7015 }
7016 else if (startswith (arg, "hint.b="))
7017 {
7018 arg += 7;
7019 if (strcmp (arg, "ok") == 0)
7020 md.hint_b = hint_b_ok;
7021 else if (strcmp (arg, "warning") == 0)
7022 md.hint_b = hint_b_warning;
7023 else if (strcmp (arg, "error") == 0)
7024 md.hint_b = hint_b_error;
7025 else
7026 return 0;
7027 }
7028 else if (startswith (arg, "tune="))
7029 {
7030 arg += 5;
7031 if (strcmp (arg, "itanium1") == 0)
7032 md.tune = itanium1;
7033 else if (strcmp (arg, "itanium2") == 0)
7034 md.tune = itanium2;
7035 else
7036 return 0;
7037 }
7038 else
7039 return 0;
7040 break;
7041
7042 case 'N':
7043 if (strcmp (arg, "so") == 0)
7044 {
7045 /* Suppress signon message. */
7046 }
7047 else if (strcmp (arg, "pi") == 0)
7048 {
7049 /* Reject privileged instructions. FIXME */
7050 }
7051 else if (strcmp (arg, "us") == 0)
7052 {
7053 /* Allow union of signed and unsigned range. FIXME */
7054 }
7055 else if (strcmp (arg, "close_fcalls") == 0)
7056 {
7057 /* Do not resolve global function calls. */
7058 }
7059 else
7060 return 0;
7061 break;
7062
7063 case 'C':
7064 /* temp[="prefix"] Insert temporary labels into the object file
7065 symbol table prefixed by "prefix".
7066 Default prefix is ":temp:".
7067 */
7068 break;
7069
7070 case 'a':
7071 /* indirect=<tgt> Assume unannotated indirect branches behavior
7072 according to <tgt> --
7073 exit: branch out from the current context (default)
7074 labels: all labels in context may be branch targets
7075 */
7076 if (!startswith (arg, "indirect="))
7077 return 0;
7078 break;
7079
7080 case 'x':
7081 /* -X conflicts with an ignored option, use -x instead */
7082 md.detect_dv = 1;
7083 if (!arg || strcmp (arg, "explicit") == 0)
7084 {
7085 /* set default mode to explicit */
7086 md.default_explicit_mode = 1;
7087 break;
7088 }
7089 else if (strcmp (arg, "auto") == 0)
7090 {
7091 md.default_explicit_mode = 0;
7092 }
7093 else if (strcmp (arg, "none") == 0)
7094 {
7095 md.detect_dv = 0;
7096 }
7097 else if (strcmp (arg, "debug") == 0)
7098 {
7099 md.debug_dv = 1;
7100 }
7101 else if (strcmp (arg, "debugx") == 0)
7102 {
7103 md.default_explicit_mode = 1;
7104 md.debug_dv = 1;
7105 }
7106 else if (strcmp (arg, "debugn") == 0)
7107 {
7108 md.debug_dv = 1;
7109 md.detect_dv = 0;
7110 }
7111 else
7112 {
7113 as_bad (_("Unrecognized option '-x%s'"), arg);
7114 }
7115 break;
7116
7117 case 'S':
7118 /* nops Print nops statistics. */
7119 break;
7120
7121 /* GNU specific switches for gcc. */
7122 case OPTION_MCONSTANT_GP:
7123 md.flags |= EF_IA_64_CONS_GP;
7124 break;
7125
7126 case OPTION_MAUTO_PIC:
7127 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
7128 break;
7129
7130 default:
7131 return 0;
7132 }
7133
7134 return 1;
7135 }
7136
7137 void
7138 md_show_usage (FILE *stream)
7139 {
7140 fputs (_("\
7141 IA-64 options:\n\
7142 --mconstant-gp mark output file as using the constant-GP model\n\
7143 (sets ELF header flag EF_IA_64_CONS_GP)\n\
7144 --mauto-pic mark output file as using the constant-GP model\n\
7145 without function descriptors (sets ELF header flag\n\
7146 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
7147 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
7148 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
7149 -mtune=[itanium1|itanium2]\n\
7150 tune for a specific CPU (default -mtune=itanium2)\n\
7151 -munwind-check=[warning|error]\n\
7152 unwind directive check (default -munwind-check=warning)\n\
7153 -mhint.b=[ok|warning|error]\n\
7154 hint.b check (default -mhint.b=error)\n\
7155 -x | -xexplicit turn on dependency violation checking\n"), stream);
7156 /* Note for translators: "automagically" can be translated as "automatically" here. */
7157 fputs (_("\
7158 -xauto automagically remove dependency violations (default)\n\
7159 -xnone turn off dependency violation checking\n\
7160 -xdebug debug dependency violation checker\n\
7161 -xdebugn debug dependency violation checker but turn off\n\
7162 dependency violation checking\n\
7163 -xdebugx debug dependency violation checker and turn on\n\
7164 dependency violation checking\n"),
7165 stream);
7166 }
7167
7168 void
7169 ia64_after_parse_args (void)
7170 {
7171 if (debug_type == DEBUG_STABS)
7172 as_fatal (_("--gstabs is not supported for ia64"));
7173 }
7174
7175 /* Return true if TYPE fits in TEMPL at SLOT. */
7176
7177 static int
7178 match (int templ, int type, int slot)
7179 {
7180 enum ia64_unit unit;
7181 int result;
7182
7183 unit = ia64_templ_desc[templ].exec_unit[slot];
7184 switch (type)
7185 {
7186 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
7187 case IA64_TYPE_A:
7188 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
7189 break;
7190 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
7191 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
7192 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
7193 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
7194 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
7195 default: result = 0; break;
7196 }
7197 return result;
7198 }
7199
7200 /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit
7201 in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of
7202 type M or I would fit in TEMPL at SLOT. */
7203
7204 static inline int
7205 extra_goodness (int templ, int slot)
7206 {
7207 switch (md.tune)
7208 {
7209 case itanium1:
7210 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
7211 return 2;
7212 else if (slot == 2 && match (templ, IA64_TYPE_B, slot))
7213 return 1;
7214 else
7215 return 0;
7216 break;
7217 case itanium2:
7218 if (match (templ, IA64_TYPE_M, slot)
7219 || match (templ, IA64_TYPE_I, slot))
7220 /* Favor M- and I-unit NOPs. We definitely want to avoid
7221 F-unit and B-unit may cause split-issue or less-than-optimal
7222 branch-prediction. */
7223 return 2;
7224 else
7225 return 0;
7226 break;
7227 default:
7228 abort ();
7229 return 0;
7230 }
7231 }
7232
7233 /* This function is called once, at assembler startup time. It sets
7234 up all the tables, etc. that the MD part of the assembler will need
7235 that can be determined before arguments are parsed. */
7236 void
7237 md_begin (void)
7238 {
7239 int i, j, k, t, goodness, best, ok;
7240
7241 md.auto_align = 1;
7242 md.explicit_mode = md.default_explicit_mode;
7243
7244 bfd_set_section_alignment (text_section, 4);
7245
7246 /* Make sure function pointers get initialized. */
7247 target_big_endian = -1;
7248 dot_byteorder (default_big_endian);
7249
7250 alias_hash = str_htab_create ();
7251 alias_name_hash = str_htab_create ();
7252 secalias_hash = str_htab_create ();
7253 secalias_name_hash = str_htab_create ();
7254
7255 pseudo_func[FUNC_DTP_MODULE].u.sym =
7256 symbol_new (".<dtpmod>", undefined_section,
7257 &zero_address_frag, FUNC_DTP_MODULE);
7258
7259 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
7260 symbol_new (".<dtprel>", undefined_section,
7261 &zero_address_frag, FUNC_DTP_RELATIVE);
7262
7263 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
7264 symbol_new (".<fptr>", undefined_section,
7265 &zero_address_frag, FUNC_FPTR_RELATIVE);
7266
7267 pseudo_func[FUNC_GP_RELATIVE].u.sym =
7268 symbol_new (".<gprel>", undefined_section,
7269 &zero_address_frag, FUNC_GP_RELATIVE);
7270
7271 pseudo_func[FUNC_LT_RELATIVE].u.sym =
7272 symbol_new (".<ltoff>", undefined_section,
7273 &zero_address_frag, FUNC_LT_RELATIVE);
7274
7275 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
7276 symbol_new (".<ltoffx>", undefined_section,
7277 &zero_address_frag, FUNC_LT_RELATIVE_X);
7278
7279 pseudo_func[FUNC_PC_RELATIVE].u.sym =
7280 symbol_new (".<pcrel>", undefined_section,
7281 &zero_address_frag, FUNC_PC_RELATIVE);
7282
7283 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
7284 symbol_new (".<pltoff>", undefined_section,
7285 &zero_address_frag, FUNC_PLT_RELATIVE);
7286
7287 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
7288 symbol_new (".<secrel>", undefined_section,
7289 &zero_address_frag, FUNC_SEC_RELATIVE);
7290
7291 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
7292 symbol_new (".<segrel>", undefined_section,
7293 &zero_address_frag, FUNC_SEG_RELATIVE);
7294
7295 pseudo_func[FUNC_TP_RELATIVE].u.sym =
7296 symbol_new (".<tprel>", undefined_section,
7297 &zero_address_frag, FUNC_TP_RELATIVE);
7298
7299 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
7300 symbol_new (".<ltv>", undefined_section,
7301 &zero_address_frag, FUNC_LTV_RELATIVE);
7302
7303 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
7304 symbol_new (".<ltoff.fptr>", undefined_section,
7305 &zero_address_frag, FUNC_LT_FPTR_RELATIVE);
7306
7307 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
7308 symbol_new (".<ltoff.dtpmod>", undefined_section,
7309 &zero_address_frag, FUNC_LT_DTP_MODULE);
7310
7311 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
7312 symbol_new (".<ltoff.dptrel>", undefined_section,
7313 &zero_address_frag, FUNC_LT_DTP_RELATIVE);
7314
7315 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
7316 symbol_new (".<ltoff.tprel>", undefined_section,
7317 &zero_address_frag, FUNC_LT_TP_RELATIVE);
7318
7319 pseudo_func[FUNC_IPLT_RELOC].u.sym =
7320 symbol_new (".<iplt>", undefined_section,
7321 &zero_address_frag, FUNC_IPLT_RELOC);
7322
7323 #ifdef TE_VMS
7324 pseudo_func[FUNC_SLOTCOUNT_RELOC].u.sym =
7325 symbol_new (".<slotcount>", undefined_section,
7326 &zero_address_frag, FUNC_SLOTCOUNT_RELOC);
7327 #endif
7328
7329 if (md.tune != itanium1)
7330 {
7331 /* Convert MFI NOPs bundles into MMI NOPs bundles. */
7332 le_nop[0] = 0x8;
7333 le_nop_stop[0] = 0x9;
7334 }
7335
7336 /* Compute the table of best templates. We compute goodness as a
7337 base 4 value, in which each match counts for 3. Match-failures
7338 result in NOPs and we use extra_goodness() to pick the execution
7339 units that are best suited for issuing the NOP. */
7340 for (i = 0; i < IA64_NUM_TYPES; ++i)
7341 for (j = 0; j < IA64_NUM_TYPES; ++j)
7342 for (k = 0; k < IA64_NUM_TYPES; ++k)
7343 {
7344 best = 0;
7345 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
7346 {
7347 goodness = 0;
7348 if (match (t, i, 0))
7349 {
7350 if (match (t, j, 1))
7351 {
7352 if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2))
7353 goodness = 3 + 3 + 3;
7354 else
7355 goodness = 3 + 3 + extra_goodness (t, 2);
7356 }
7357 else if (match (t, j, 2))
7358 goodness = 3 + 3 + extra_goodness (t, 1);
7359 else
7360 {
7361 goodness = 3;
7362 goodness += extra_goodness (t, 1);
7363 goodness += extra_goodness (t, 2);
7364 }
7365 }
7366 else if (match (t, i, 1))
7367 {
7368 if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2))
7369 goodness = 3 + 3;
7370 else
7371 goodness = 3 + extra_goodness (t, 2);
7372 }
7373 else if (match (t, i, 2))
7374 goodness = 3 + extra_goodness (t, 1);
7375
7376 if (goodness > best)
7377 {
7378 best = goodness;
7379 best_template[i][j][k] = t;
7380 }
7381 }
7382 }
7383
7384 #ifdef DEBUG_TEMPLATES
7385 /* For debugging changes to the best_template calculations. We don't care
7386 about combinations with invalid instructions, so start the loops at 1. */
7387 for (i = 0; i < IA64_NUM_TYPES; ++i)
7388 for (j = 0; j < IA64_NUM_TYPES; ++j)
7389 for (k = 0; k < IA64_NUM_TYPES; ++k)
7390 {
7391 char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f',
7392 'x', 'd' };
7393 fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j],
7394 type_letter[k],
7395 ia64_templ_desc[best_template[i][j][k]].name);
7396 }
7397 #endif
7398
7399 for (i = 0; i < NUM_SLOTS; ++i)
7400 md.slot[i].user_template = -1;
7401
7402 md.pseudo_hash = str_htab_create ();
7403 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
7404 if (str_hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
7405 pseudo_opcode + i, 0) != NULL)
7406 as_fatal (_("duplicate %s"), pseudo_opcode[i].name);
7407
7408 md.reg_hash = str_htab_create ();
7409 md.dynreg_hash = str_htab_create ();
7410 md.const_hash = str_htab_create ();
7411 md.entry_hash = str_htab_create ();
7412
7413 /* general registers: */
7414 declare_register_set ("r", 128, REG_GR);
7415 declare_register ("gp", REG_GR + 1);
7416 declare_register ("sp", REG_GR + 12);
7417 declare_register ("tp", REG_GR + 13);
7418 declare_register_set ("ret", 4, REG_GR + 8);
7419
7420 /* floating point registers: */
7421 declare_register_set ("f", 128, REG_FR);
7422 declare_register_set ("farg", 8, REG_FR + 8);
7423 declare_register_set ("fret", 8, REG_FR + 8);
7424
7425 /* branch registers: */
7426 declare_register_set ("b", 8, REG_BR);
7427 declare_register ("rp", REG_BR + 0);
7428
7429 /* predicate registers: */
7430 declare_register_set ("p", 64, REG_P);
7431 declare_register ("pr", REG_PR);
7432 declare_register ("pr.rot", REG_PR_ROT);
7433
7434 /* application registers: */
7435 declare_register_set ("ar", 128, REG_AR);
7436 for (i = 0; i < NELEMS (ar); ++i)
7437 declare_register (ar[i].name, REG_AR + ar[i].regnum);
7438
7439 /* control registers: */
7440 declare_register_set ("cr", 128, REG_CR);
7441 for (i = 0; i < NELEMS (cr); ++i)
7442 declare_register (cr[i].name, REG_CR + cr[i].regnum);
7443
7444 /* dahr registers: */
7445 declare_register_set ("dahr", 8, REG_DAHR);
7446
7447 declare_register ("ip", REG_IP);
7448 declare_register ("cfm", REG_CFM);
7449 declare_register ("psr", REG_PSR);
7450 declare_register ("psr.l", REG_PSR_L);
7451 declare_register ("psr.um", REG_PSR_UM);
7452
7453 for (i = 0; i < NELEMS (indirect_reg); ++i)
7454 {
7455 unsigned int regnum = indirect_reg[i].regnum;
7456
7457 md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum);
7458 }
7459
7460 /* pseudo-registers used to specify unwind info: */
7461 declare_register ("psp", REG_PSP);
7462
7463 for (i = 0; i < NELEMS (const_bits); ++i)
7464 if (str_hash_insert (md.const_hash, const_bits[i].name, const_bits + i, 0))
7465 as_fatal (_("duplicate %s"), const_bits[i].name);
7466
7467 /* Set the architecture and machine depending on defaults and command line
7468 options. */
7469 if (md.flags & EF_IA_64_ABI64)
7470 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7471 else
7472 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7473
7474 if (! ok)
7475 as_warn (_("Could not set architecture and machine"));
7476
7477 /* Set the pointer size and pointer shift size depending on md.flags */
7478
7479 if (md.flags & EF_IA_64_ABI64)
7480 {
7481 md.pointer_size = 8; /* pointers are 8 bytes */
7482 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7483 }
7484 else
7485 {
7486 md.pointer_size = 4; /* pointers are 4 bytes */
7487 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7488 }
7489
7490 md.mem_offset.hint = 0;
7491 md.path = 0;
7492 md.maxpaths = 0;
7493 md.entry_labels = NULL;
7494 }
7495
7496 /* Set the default options in md. Cannot do this in md_begin because
7497 that is called after md_parse_option which is where we set the
7498 options in md based on command line options. */
7499
7500 void
7501 ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED)
7502 {
7503 md.flags = MD_FLAGS_DEFAULT;
7504 #ifndef TE_VMS
7505 /* Don't turn on dependency checking for VMS, doesn't work. */
7506 md.detect_dv = 1;
7507 #endif
7508 /* FIXME: We should change it to unwind_check_error someday. */
7509 md.unwind_check = unwind_check_warning;
7510 md.hint_b = hint_b_error;
7511 md.tune = itanium2;
7512 }
7513
7514 /* Return a string for the target object file format. */
7515
7516 const char *
7517 ia64_target_format (void)
7518 {
7519 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7520 {
7521 if (md.flags & EF_IA_64_BE)
7522 {
7523 if (md.flags & EF_IA_64_ABI64)
7524 #if defined(TE_AIX50)
7525 return "elf64-ia64-aix-big";
7526 #elif defined(TE_HPUX)
7527 return "elf64-ia64-hpux-big";
7528 #else
7529 return "elf64-ia64-big";
7530 #endif
7531 else
7532 #if defined(TE_AIX50)
7533 return "elf32-ia64-aix-big";
7534 #elif defined(TE_HPUX)
7535 return "elf32-ia64-hpux-big";
7536 #else
7537 return "elf32-ia64-big";
7538 #endif
7539 }
7540 else
7541 {
7542 if (md.flags & EF_IA_64_ABI64)
7543 #if defined (TE_AIX50)
7544 return "elf64-ia64-aix-little";
7545 #elif defined (TE_VMS)
7546 {
7547 md.flags |= EF_IA_64_ARCHVER_1;
7548 return "elf64-ia64-vms";
7549 }
7550 #else
7551 return "elf64-ia64-little";
7552 #endif
7553 else
7554 #ifdef TE_AIX50
7555 return "elf32-ia64-aix-little";
7556 #else
7557 return "elf32-ia64-little";
7558 #endif
7559 }
7560 }
7561 else
7562 return "unknown-format";
7563 }
7564
7565 void
7566 ia64_end_of_source (void)
7567 {
7568 /* terminate insn group upon reaching end of file: */
7569 insn_group_break (1, 0, 0);
7570
7571 /* emits slots we haven't written yet: */
7572 ia64_flush_insns ();
7573
7574 bfd_set_private_flags (stdoutput, md.flags);
7575
7576 md.mem_offset.hint = 0;
7577 }
7578
7579 void
7580 ia64_start_line (void)
7581 {
7582 static int first;
7583
7584 if (!first) {
7585 /* Make sure we don't reference input_line_pointer[-1] when that's
7586 not valid. */
7587 first = 1;
7588 return;
7589 }
7590
7591 if (md.qp.X_op == O_register)
7592 as_bad (_("qualifying predicate not followed by instruction"));
7593 md.qp.X_op = O_absent;
7594
7595 if (ignore_input ())
7596 return;
7597
7598 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7599 {
7600 if (md.detect_dv && !md.explicit_mode)
7601 {
7602 static int warned;
7603
7604 if (!warned)
7605 {
7606 warned = 1;
7607 as_warn (_("Explicit stops are ignored in auto mode"));
7608 }
7609 }
7610 else
7611 insn_group_break (1, 0, 0);
7612 }
7613 else if (input_line_pointer[-1] == '{')
7614 {
7615 if (md.manual_bundling)
7616 as_warn (_("Found '{' when manual bundling is already turned on"));
7617 else
7618 CURR_SLOT.manual_bundling_on = 1;
7619 md.manual_bundling = 1;
7620
7621 /* Bundling is only acceptable in explicit mode
7622 or when in default automatic mode. */
7623 if (md.detect_dv && !md.explicit_mode)
7624 {
7625 if (!md.mode_explicitly_set
7626 && !md.default_explicit_mode)
7627 dot_dv_mode ('E');
7628 else
7629 as_warn (_("Found '{' after explicit switch to automatic mode"));
7630 }
7631 }
7632 else if (input_line_pointer[-1] == '}')
7633 {
7634 if (!md.manual_bundling)
7635 as_warn (_("Found '}' when manual bundling is off"));
7636 else
7637 PREV_SLOT.manual_bundling_off = 1;
7638 md.manual_bundling = 0;
7639
7640 /* switch back to automatic mode, if applicable */
7641 if (md.detect_dv
7642 && md.explicit_mode
7643 && !md.mode_explicitly_set
7644 && !md.default_explicit_mode)
7645 dot_dv_mode ('A');
7646 }
7647 }
7648
7649 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7650 labels. */
7651 static int defining_tag = 0;
7652
7653 int
7654 ia64_unrecognized_line (int ch)
7655 {
7656 switch (ch)
7657 {
7658 case '(':
7659 expression_and_evaluate (&md.qp);
7660 if (*input_line_pointer++ != ')')
7661 {
7662 as_bad (_("Expected ')'"));
7663 return 0;
7664 }
7665 if (md.qp.X_op != O_register)
7666 {
7667 as_bad (_("Qualifying predicate expected"));
7668 return 0;
7669 }
7670 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7671 {
7672 as_bad (_("Predicate register expected"));
7673 return 0;
7674 }
7675 return 1;
7676
7677 case '[':
7678 {
7679 char *s;
7680 char c;
7681 symbolS *tag;
7682 int temp;
7683
7684 if (md.qp.X_op == O_register)
7685 {
7686 as_bad (_("Tag must come before qualifying predicate."));
7687 return 0;
7688 }
7689
7690 /* This implements just enough of read_a_source_file in read.c to
7691 recognize labels. */
7692 if (is_name_beginner (*input_line_pointer))
7693 {
7694 c = get_symbol_name (&s);
7695 }
7696 else if (LOCAL_LABELS_FB
7697 && ISDIGIT (*input_line_pointer))
7698 {
7699 temp = 0;
7700 while (ISDIGIT (*input_line_pointer))
7701 temp = (temp * 10) + *input_line_pointer++ - '0';
7702 fb_label_instance_inc (temp);
7703 s = fb_label_name (temp, 0);
7704 c = *input_line_pointer;
7705 }
7706 else
7707 {
7708 s = NULL;
7709 c = '\0';
7710 }
7711 if (c != ':')
7712 {
7713 /* Put ':' back for error messages' sake. */
7714 *input_line_pointer++ = ':';
7715 as_bad (_("Expected ':'"));
7716 return 0;
7717 }
7718
7719 defining_tag = 1;
7720 tag = colon (s);
7721 defining_tag = 0;
7722 /* Put ':' back for error messages' sake. */
7723 *input_line_pointer++ = ':';
7724 if (*input_line_pointer++ != ']')
7725 {
7726 as_bad (_("Expected ']'"));
7727 return 0;
7728 }
7729 if (! tag)
7730 {
7731 as_bad (_("Tag name expected"));
7732 return 0;
7733 }
7734 return 1;
7735 }
7736
7737 default:
7738 break;
7739 }
7740
7741 /* Not a valid line. */
7742 return 0;
7743 }
7744
7745 void
7746 ia64_frob_label (struct symbol *sym)
7747 {
7748 struct label_fix *fix;
7749
7750 /* Tags need special handling since they are not bundle breaks like
7751 labels. */
7752 if (defining_tag)
7753 {
7754 fix = XOBNEW (&notes, struct label_fix);
7755 fix->sym = sym;
7756 fix->next = CURR_SLOT.tag_fixups;
7757 fix->dw2_mark_labels = false;
7758 CURR_SLOT.tag_fixups = fix;
7759
7760 return;
7761 }
7762
7763 if (bfd_section_flags (now_seg) & SEC_CODE)
7764 {
7765 md.last_text_seg = now_seg;
7766 fix = XOBNEW (&notes, struct label_fix);
7767 fix->sym = sym;
7768 fix->next = CURR_SLOT.label_fixups;
7769 fix->dw2_mark_labels = dwarf2_loc_mark_labels;
7770 CURR_SLOT.label_fixups = fix;
7771
7772 /* Keep track of how many code entry points we've seen. */
7773 if (md.path == md.maxpaths)
7774 {
7775 md.maxpaths += 20;
7776 md.entry_labels = XRESIZEVEC (const char *, md.entry_labels,
7777 md.maxpaths);
7778 }
7779 md.entry_labels[md.path++] = S_GET_NAME (sym);
7780 }
7781 }
7782
7783 #ifdef TE_HPUX
7784 /* The HP-UX linker will give unresolved symbol errors for symbols
7785 that are declared but unused. This routine removes declared,
7786 unused symbols from an object. */
7787 int
7788 ia64_frob_symbol (struct symbol *sym)
7789 {
7790 if ((S_GET_SEGMENT (sym) == bfd_und_section_ptr && ! symbol_used_p (sym) &&
7791 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7792 || (S_GET_SEGMENT (sym) == bfd_abs_section_ptr
7793 && ! S_IS_EXTERNAL (sym)))
7794 return 1;
7795 return 0;
7796 }
7797 #endif
7798
7799 void
7800 ia64_flush_pending_output (void)
7801 {
7802 if (!md.keep_pending_output
7803 && bfd_section_flags (now_seg) & SEC_CODE)
7804 {
7805 /* ??? This causes many unnecessary stop bits to be emitted.
7806 Unfortunately, it isn't clear if it is safe to remove this. */
7807 insn_group_break (1, 0, 0);
7808 ia64_flush_insns ();
7809 }
7810 }
7811
7812 /* Do ia64-specific expression optimization. All that's done here is
7813 to transform index expressions that are either due to the indexing
7814 of rotating registers or due to the indexing of indirect register
7815 sets. */
7816 int
7817 ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r)
7818 {
7819 if (op != O_index)
7820 return 0;
7821 resolve_expression (l);
7822 if (l->X_op == O_register)
7823 {
7824 unsigned num_regs = l->X_add_number >> 16;
7825
7826 resolve_expression (r);
7827 if (num_regs)
7828 {
7829 /* Left side is a .rotX-allocated register. */
7830 if (r->X_op != O_constant)
7831 {
7832 as_bad (_("Rotating register index must be a non-negative constant"));
7833 r->X_add_number = 0;
7834 }
7835 else if ((valueT) r->X_add_number >= num_regs)
7836 {
7837 as_bad (_("Index out of range 0..%u"), num_regs - 1);
7838 r->X_add_number = 0;
7839 }
7840 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7841 return 1;
7842 }
7843 else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR)
7844 {
7845 if (r->X_op != O_register
7846 || r->X_add_number < REG_GR
7847 || r->X_add_number > REG_GR + 127)
7848 {
7849 as_bad (_("Indirect register index must be a general register"));
7850 r->X_add_number = REG_GR;
7851 }
7852 l->X_op = O_index;
7853 l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID];
7854 l->X_add_number = r->X_add_number;
7855 return 1;
7856 }
7857 }
7858 as_bad (_("Index can only be applied to rotating or indirect registers"));
7859 /* Fall back to some register use of which has as little as possible
7860 side effects, to minimize subsequent error messages. */
7861 l->X_op = O_register;
7862 l->X_add_number = REG_GR + 3;
7863 return 1;
7864 }
7865
7866 int
7867 ia64_parse_name (char *name, expressionS *e, char *nextcharP)
7868 {
7869 struct const_desc *cdesc;
7870 struct dynreg *dr = 0;
7871 unsigned int idx;
7872 struct symbol *sym;
7873 char *end;
7874
7875 if (*name == '@')
7876 {
7877 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE;
7878
7879 /* Find what relocation pseudo-function we're dealing with. */
7880 for (idx = 0; idx < NELEMS (pseudo_func); ++idx)
7881 if (pseudo_func[idx].name
7882 && pseudo_func[idx].name[0] == name[1]
7883 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0)
7884 {
7885 pseudo_type = pseudo_func[idx].type;
7886 break;
7887 }
7888 switch (pseudo_type)
7889 {
7890 case PSEUDO_FUNC_RELOC:
7891 end = input_line_pointer;
7892 if (*nextcharP != '(')
7893 {
7894 as_bad (_("Expected '('"));
7895 break;
7896 }
7897 /* Skip '('. */
7898 ++input_line_pointer;
7899 expression (e);
7900 if (*input_line_pointer != ')')
7901 {
7902 as_bad (_("Missing ')'"));
7903 goto done;
7904 }
7905 /* Skip ')'. */
7906 ++input_line_pointer;
7907 #ifdef TE_VMS
7908 if (idx == FUNC_SLOTCOUNT_RELOC)
7909 {
7910 /* @slotcount can accept any expression. Canonicalize. */
7911 e->X_add_symbol = make_expr_symbol (e);
7912 e->X_op = O_symbol;
7913 e->X_add_number = 0;
7914 }
7915 #endif
7916 if (e->X_op != O_symbol)
7917 {
7918 if (e->X_op != O_pseudo_fixup)
7919 {
7920 as_bad (_("Not a symbolic expression"));
7921 goto done;
7922 }
7923 if (idx != FUNC_LT_RELATIVE)
7924 {
7925 as_bad (_("Illegal combination of relocation functions"));
7926 goto done;
7927 }
7928 switch (S_GET_VALUE (e->X_op_symbol))
7929 {
7930 case FUNC_FPTR_RELATIVE:
7931 idx = FUNC_LT_FPTR_RELATIVE; break;
7932 case FUNC_DTP_MODULE:
7933 idx = FUNC_LT_DTP_MODULE; break;
7934 case FUNC_DTP_RELATIVE:
7935 idx = FUNC_LT_DTP_RELATIVE; break;
7936 case FUNC_TP_RELATIVE:
7937 idx = FUNC_LT_TP_RELATIVE; break;
7938 default:
7939 as_bad (_("Illegal combination of relocation functions"));
7940 goto done;
7941 }
7942 }
7943 /* Make sure gas doesn't get rid of local symbols that are used
7944 in relocs. */
7945 e->X_op = O_pseudo_fixup;
7946 e->X_op_symbol = pseudo_func[idx].u.sym;
7947 done:
7948 *nextcharP = *input_line_pointer;
7949 break;
7950
7951 case PSEUDO_FUNC_CONST:
7952 e->X_op = O_constant;
7953 e->X_add_number = pseudo_func[idx].u.ival;
7954 break;
7955
7956 case PSEUDO_FUNC_REG:
7957 e->X_op = O_register;
7958 e->X_add_number = pseudo_func[idx].u.ival;
7959 break;
7960
7961 default:
7962 return 0;
7963 }
7964 return 1;
7965 }
7966
7967 /* first see if NAME is a known register name: */
7968 sym = str_hash_find (md.reg_hash, name);
7969 if (sym)
7970 {
7971 e->X_op = O_register;
7972 e->X_add_number = S_GET_VALUE (sym);
7973 return 1;
7974 }
7975
7976 cdesc = str_hash_find (md.const_hash, name);
7977 if (cdesc)
7978 {
7979 e->X_op = O_constant;
7980 e->X_add_number = cdesc->value;
7981 return 1;
7982 }
7983
7984 /* check for inN, locN, or outN: */
7985 idx = 0;
7986 switch (name[0])
7987 {
7988 case 'i':
7989 if (name[1] == 'n' && ISDIGIT (name[2]))
7990 {
7991 dr = &md.in;
7992 idx = 2;
7993 }
7994 break;
7995
7996 case 'l':
7997 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
7998 {
7999 dr = &md.loc;
8000 idx = 3;
8001 }
8002 break;
8003
8004 case 'o':
8005 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
8006 {
8007 dr = &md.out;
8008 idx = 3;
8009 }
8010 break;
8011
8012 default:
8013 break;
8014 }
8015
8016 /* Ignore register numbers with leading zeroes, except zero itself. */
8017 if (dr && (name[idx] != '0' || name[idx + 1] == '\0'))
8018 {
8019 unsigned long regnum;
8020
8021 /* The name is inN, locN, or outN; parse the register number. */
8022 regnum = strtoul (name + idx, &end, 10);
8023 if (end > name + idx && *end == '\0' && regnum < 96)
8024 {
8025 if (regnum >= dr->num_regs)
8026 {
8027 if (!dr->num_regs)
8028 as_bad (_("No current frame"));
8029 else
8030 as_bad (_("Register number out of range 0..%u"),
8031 dr->num_regs - 1);
8032 regnum = 0;
8033 }
8034 e->X_op = O_register;
8035 e->X_add_number = dr->base + regnum;
8036 return 1;
8037 }
8038 }
8039
8040 end = xstrdup (name);
8041 name = ia64_canonicalize_symbol_name (end);
8042 if ((dr = str_hash_find (md.dynreg_hash, name)))
8043 {
8044 /* We've got ourselves the name of a rotating register set.
8045 Store the base register number in the low 16 bits of
8046 X_add_number and the size of the register set in the top 16
8047 bits. */
8048 e->X_op = O_register;
8049 e->X_add_number = dr->base | (dr->num_regs << 16);
8050 free (end);
8051 return 1;
8052 }
8053 free (end);
8054 return 0;
8055 }
8056
8057 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
8058
8059 char *
8060 ia64_canonicalize_symbol_name (char *name)
8061 {
8062 size_t len = strlen (name), full = len;
8063
8064 while (len > 0 && name[len - 1] == '#')
8065 --len;
8066 if (len <= 0)
8067 {
8068 if (full > 0)
8069 as_bad (_("Standalone `#' is illegal"));
8070 }
8071 else if (len < full - 1)
8072 as_warn (_("Redundant `#' suffix operators"));
8073 name[len] = '\0';
8074 return name;
8075 }
8076
8077 /* Return true if idesc is a conditional branch instruction. This excludes
8078 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
8079 because they always read/write resources regardless of the value of the
8080 qualifying predicate. br.ia must always use p0, and hence is always
8081 taken. Thus this function returns true for branches which can fall
8082 through, and which use no resources if they do fall through. */
8083
8084 static int
8085 is_conditional_branch (struct ia64_opcode *idesc)
8086 {
8087 /* br is a conditional branch. Everything that starts with br. except
8088 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
8089 Everything that starts with brl is a conditional branch. */
8090 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
8091 && (idesc->name[2] == '\0'
8092 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
8093 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
8094 || idesc->name[2] == 'l'
8095 /* br.cond, br.call, br.clr */
8096 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
8097 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
8098 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
8099 }
8100
8101 /* Return whether the given opcode is a taken branch. If there's any doubt,
8102 returns zero. */
8103
8104 static int
8105 is_taken_branch (struct ia64_opcode *idesc)
8106 {
8107 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
8108 || startswith (idesc->name, "br.ia"));
8109 }
8110
8111 /* Return whether the given opcode is an interruption or rfi. If there's any
8112 doubt, returns zero. */
8113
8114 static int
8115 is_interruption_or_rfi (struct ia64_opcode *idesc)
8116 {
8117 if (strcmp (idesc->name, "rfi") == 0)
8118 return 1;
8119 return 0;
8120 }
8121
8122 /* Returns the index of the given dependency in the opcode's list of chks, or
8123 -1 if there is no dependency. */
8124
8125 static int
8126 depends_on (int depind, struct ia64_opcode *idesc)
8127 {
8128 int i;
8129 const struct ia64_opcode_dependency *dep = idesc->dependencies;
8130 for (i = 0; i < dep->nchks; i++)
8131 {
8132 if (depind == DEP (dep->chks[i]))
8133 return i;
8134 }
8135 return -1;
8136 }
8137
8138 /* Determine a set of specific resources used for a particular resource
8139 class. Returns the number of specific resources identified For those
8140 cases which are not determinable statically, the resource returned is
8141 marked nonspecific.
8142
8143 Meanings of value in 'NOTE':
8144 1) only read/write when the register number is explicitly encoded in the
8145 insn.
8146 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
8147 accesses CFM when qualifying predicate is in the rotating region.
8148 3) general register value is used to specify an indirect register; not
8149 determinable statically.
8150 4) only read the given resource when bits 7:0 of the indirect index
8151 register value does not match the register number of the resource; not
8152 determinable statically.
8153 5) all rules are implementation specific.
8154 6) only when both the index specified by the reader and the index specified
8155 by the writer have the same value in bits 63:61; not determinable
8156 statically.
8157 7) only access the specified resource when the corresponding mask bit is
8158 set
8159 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
8160 only read when these insns reference FR2-31
8161 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
8162 written when these insns write FR32-127
8163 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
8164 instruction
8165 11) The target predicates are written independently of PR[qp], but source
8166 registers are only read if PR[qp] is true. Since the state of PR[qp]
8167 cannot statically be determined, all source registers are marked used.
8168 12) This insn only reads the specified predicate register when that
8169 register is the PR[qp].
8170 13) This reference to ld-c only applies to the GR whose value is loaded
8171 with data returned from memory, not the post-incremented address register.
8172 14) The RSE resource includes the implementation-specific RSE internal
8173 state resources. At least one (and possibly more) of these resources are
8174 read by each instruction listed in IC:rse-readers. At least one (and
8175 possibly more) of these resources are written by each insn listed in
8176 IC:rse-writers.
8177 15+16) Represents reserved instructions, which the assembler does not
8178 generate.
8179 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and
8180 mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up.
8181
8182 Memory resources (i.e. locations in memory) are *not* marked or tracked by
8183 this code; there are no dependency violations based on memory access.
8184 */
8185
8186 #define MAX_SPECS 256
8187 #define DV_CHK 1
8188 #define DV_REG 0
8189
8190 static int
8191 specify_resource (const struct ia64_dependency *dep,
8192 struct ia64_opcode *idesc,
8193 /* is this a DV chk or a DV reg? */
8194 int type,
8195 /* returned specific resources */
8196 struct rsrc specs[MAX_SPECS],
8197 /* resource note for this insn's usage */
8198 int note,
8199 /* which execution path to examine */
8200 int path)
8201 {
8202 int count = 0;
8203 int i;
8204 int rsrc_write = 0;
8205 struct rsrc tmpl;
8206
8207 if (dep->mode == IA64_DV_WAW
8208 || (dep->mode == IA64_DV_RAW && type == DV_REG)
8209 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
8210 rsrc_write = 1;
8211
8212 /* template for any resources we identify */
8213 tmpl.dependency = dep;
8214 tmpl.note = note;
8215 tmpl.insn_srlz = tmpl.data_srlz = 0;
8216 tmpl.qp_regno = CURR_SLOT.qp_regno;
8217 tmpl.link_to_qp_branch = 1;
8218 tmpl.mem_offset.hint = 0;
8219 tmpl.mem_offset.offset = 0;
8220 tmpl.mem_offset.base = 0;
8221 tmpl.specific = 1;
8222 tmpl.index = -1;
8223 tmpl.cmp_type = CMP_NONE;
8224 tmpl.depind = 0;
8225 tmpl.file = NULL;
8226 tmpl.line = 0;
8227 tmpl.path = 0;
8228
8229 #define UNHANDLED \
8230 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
8231 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
8232 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
8233
8234 /* we don't need to track these */
8235 if (dep->semantics == IA64_DVS_NONE)
8236 return 0;
8237
8238 switch (dep->specifier)
8239 {
8240 case IA64_RS_AR_K:
8241 if (note == 1)
8242 {
8243 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8244 {
8245 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8246 if (regno >= 0 && regno <= 7)
8247 {
8248 specs[count] = tmpl;
8249 specs[count++].index = regno;
8250 }
8251 }
8252 }
8253 else if (note == 0)
8254 {
8255 for (i = 0; i < 8; i++)
8256 {
8257 specs[count] = tmpl;
8258 specs[count++].index = i;
8259 }
8260 }
8261 else
8262 {
8263 UNHANDLED;
8264 }
8265 break;
8266
8267 case IA64_RS_AR_UNAT:
8268 /* This is a mov =AR or mov AR= instruction. */
8269 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8270 {
8271 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8272 if (regno == AR_UNAT)
8273 {
8274 specs[count++] = tmpl;
8275 }
8276 }
8277 else
8278 {
8279 /* This is a spill/fill, or other instruction that modifies the
8280 unat register. */
8281
8282 /* Unless we can determine the specific bits used, mark the whole
8283 thing; bits 8:3 of the memory address indicate the bit used in
8284 UNAT. The .mem.offset hint may be used to eliminate a small
8285 subset of conflicts. */
8286 specs[count] = tmpl;
8287 if (md.mem_offset.hint)
8288 {
8289 if (md.debug_dv)
8290 fprintf (stderr, " Using hint for spill/fill\n");
8291 /* The index isn't actually used, just set it to something
8292 approximating the bit index. */
8293 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
8294 specs[count].mem_offset.hint = 1;
8295 specs[count].mem_offset.offset = md.mem_offset.offset;
8296 specs[count++].mem_offset.base = md.mem_offset.base;
8297 }
8298 else
8299 {
8300 specs[count++].specific = 0;
8301 }
8302 }
8303 break;
8304
8305 case IA64_RS_AR:
8306 if (note == 1)
8307 {
8308 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8309 {
8310 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8311 if ((regno >= 8 && regno <= 15)
8312 || (regno >= 20 && regno <= 23)
8313 || (regno >= 31 && regno <= 39)
8314 || (regno >= 41 && regno <= 47)
8315 || (regno >= 67 && regno <= 111))
8316 {
8317 specs[count] = tmpl;
8318 specs[count++].index = regno;
8319 }
8320 }
8321 }
8322 else
8323 {
8324 UNHANDLED;
8325 }
8326 break;
8327
8328 case IA64_RS_ARb:
8329 if (note == 1)
8330 {
8331 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8332 {
8333 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8334 if ((regno >= 48 && regno <= 63)
8335 || (regno >= 112 && regno <= 127))
8336 {
8337 specs[count] = tmpl;
8338 specs[count++].index = regno;
8339 }
8340 }
8341 }
8342 else if (note == 0)
8343 {
8344 for (i = 48; i < 64; i++)
8345 {
8346 specs[count] = tmpl;
8347 specs[count++].index = i;
8348 }
8349 for (i = 112; i < 128; i++)
8350 {
8351 specs[count] = tmpl;
8352 specs[count++].index = i;
8353 }
8354 }
8355 else
8356 {
8357 UNHANDLED;
8358 }
8359 break;
8360
8361 case IA64_RS_BR:
8362 if (note != 1)
8363 {
8364 UNHANDLED;
8365 }
8366 else
8367 {
8368 if (rsrc_write)
8369 {
8370 for (i = 0; i < idesc->num_outputs; i++)
8371 if (idesc->operands[i] == IA64_OPND_B1
8372 || idesc->operands[i] == IA64_OPND_B2)
8373 {
8374 specs[count] = tmpl;
8375 specs[count++].index =
8376 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8377 }
8378 }
8379 else
8380 {
8381 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8382 if (idesc->operands[i] == IA64_OPND_B1
8383 || idesc->operands[i] == IA64_OPND_B2)
8384 {
8385 specs[count] = tmpl;
8386 specs[count++].index =
8387 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8388 }
8389 }
8390 }
8391 break;
8392
8393 case IA64_RS_CPUID: /* four or more registers */
8394 if (note == 3)
8395 {
8396 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
8397 {
8398 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8399 if (regno >= 0 && regno < NELEMS (gr_values)
8400 && KNOWN (regno))
8401 {
8402 specs[count] = tmpl;
8403 specs[count++].index = gr_values[regno].value & 0xFF;
8404 }
8405 else
8406 {
8407 specs[count] = tmpl;
8408 specs[count++].specific = 0;
8409 }
8410 }
8411 }
8412 else
8413 {
8414 UNHANDLED;
8415 }
8416 break;
8417
8418 case IA64_RS_DBR: /* four or more registers */
8419 if (note == 3)
8420 {
8421 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
8422 {
8423 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8424 if (regno >= 0 && regno < NELEMS (gr_values)
8425 && KNOWN (regno))
8426 {
8427 specs[count] = tmpl;
8428 specs[count++].index = gr_values[regno].value & 0xFF;
8429 }
8430 else
8431 {
8432 specs[count] = tmpl;
8433 specs[count++].specific = 0;
8434 }
8435 }
8436 }
8437 else if (note == 0 && !rsrc_write)
8438 {
8439 specs[count] = tmpl;
8440 specs[count++].specific = 0;
8441 }
8442 else
8443 {
8444 UNHANDLED;
8445 }
8446 break;
8447
8448 case IA64_RS_IBR: /* four or more registers */
8449 if (note == 3)
8450 {
8451 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
8452 {
8453 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8454 if (regno >= 0 && regno < NELEMS (gr_values)
8455 && KNOWN (regno))
8456 {
8457 specs[count] = tmpl;
8458 specs[count++].index = gr_values[regno].value & 0xFF;
8459 }
8460 else
8461 {
8462 specs[count] = tmpl;
8463 specs[count++].specific = 0;
8464 }
8465 }
8466 }
8467 else
8468 {
8469 UNHANDLED;
8470 }
8471 break;
8472
8473 case IA64_RS_MSR:
8474 if (note == 5)
8475 {
8476 /* These are implementation specific. Force all references to
8477 conflict with all other references. */
8478 specs[count] = tmpl;
8479 specs[count++].specific = 0;
8480 }
8481 else
8482 {
8483 UNHANDLED;
8484 }
8485 break;
8486
8487 case IA64_RS_PKR: /* 16 or more registers */
8488 if (note == 3 || note == 4)
8489 {
8490 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
8491 {
8492 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8493 if (regno >= 0 && regno < NELEMS (gr_values)
8494 && KNOWN (regno))
8495 {
8496 if (note == 3)
8497 {
8498 specs[count] = tmpl;
8499 specs[count++].index = gr_values[regno].value & 0xFF;
8500 }
8501 else
8502 for (i = 0; i < NELEMS (gr_values); i++)
8503 {
8504 /* Uses all registers *except* the one in R3. */
8505 if ((unsigned)i != (gr_values[regno].value & 0xFF))
8506 {
8507 specs[count] = tmpl;
8508 specs[count++].index = i;
8509 }
8510 }
8511 }
8512 else
8513 {
8514 specs[count] = tmpl;
8515 specs[count++].specific = 0;
8516 }
8517 }
8518 }
8519 else if (note == 0)
8520 {
8521 /* probe et al. */
8522 specs[count] = tmpl;
8523 specs[count++].specific = 0;
8524 }
8525 break;
8526
8527 case IA64_RS_PMC: /* four or more registers */
8528 if (note == 3)
8529 {
8530 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
8531 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
8532
8533 {
8534 int reg_index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
8535 ? 1 : !rsrc_write);
8536 int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_GR;
8537 if (regno >= 0 && regno < NELEMS (gr_values)
8538 && KNOWN (regno))
8539 {
8540 specs[count] = tmpl;
8541 specs[count++].index = gr_values[regno].value & 0xFF;
8542 }
8543 else
8544 {
8545 specs[count] = tmpl;
8546 specs[count++].specific = 0;
8547 }
8548 }
8549 }
8550 else
8551 {
8552 UNHANDLED;
8553 }
8554 break;
8555
8556 case IA64_RS_PMD: /* four or more registers */
8557 if (note == 3)
8558 {
8559 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
8560 {
8561 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8562 if (regno >= 0 && regno < NELEMS (gr_values)
8563 && KNOWN (regno))
8564 {
8565 specs[count] = tmpl;
8566 specs[count++].index = gr_values[regno].value & 0xFF;
8567 }
8568 else
8569 {
8570 specs[count] = tmpl;
8571 specs[count++].specific = 0;
8572 }
8573 }
8574 }
8575 else
8576 {
8577 UNHANDLED;
8578 }
8579 break;
8580
8581 case IA64_RS_RR: /* eight registers */
8582 if (note == 6)
8583 {
8584 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
8585 {
8586 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8587 if (regno >= 0 && regno < NELEMS (gr_values)
8588 && KNOWN (regno))
8589 {
8590 specs[count] = tmpl;
8591 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
8592 }
8593 else
8594 {
8595 specs[count] = tmpl;
8596 specs[count++].specific = 0;
8597 }
8598 }
8599 }
8600 else if (note == 0 && !rsrc_write)
8601 {
8602 specs[count] = tmpl;
8603 specs[count++].specific = 0;
8604 }
8605 else
8606 {
8607 UNHANDLED;
8608 }
8609 break;
8610
8611 case IA64_RS_CR_IRR:
8612 if (note == 0)
8613 {
8614 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8615 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8616 if (rsrc_write
8617 && idesc->operands[1] == IA64_OPND_CR3
8618 && regno == CR_IVR)
8619 {
8620 for (i = 0; i < 4; i++)
8621 {
8622 specs[count] = tmpl;
8623 specs[count++].index = CR_IRR0 + i;
8624 }
8625 }
8626 }
8627 else if (note == 1)
8628 {
8629 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8630 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8631 && regno >= CR_IRR0
8632 && regno <= CR_IRR3)
8633 {
8634 specs[count] = tmpl;
8635 specs[count++].index = regno;
8636 }
8637 }
8638 else
8639 {
8640 UNHANDLED;
8641 }
8642 break;
8643
8644 case IA64_RS_CR_IIB:
8645 if (note != 0)
8646 {
8647 UNHANDLED;
8648 }
8649 else
8650 {
8651 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8652 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8653 && (regno == CR_IIB0 || regno == CR_IIB1))
8654 {
8655 specs[count] = tmpl;
8656 specs[count++].index = regno;
8657 }
8658 }
8659 break;
8660
8661 case IA64_RS_CR_LRR:
8662 if (note != 1)
8663 {
8664 UNHANDLED;
8665 }
8666 else
8667 {
8668 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8669 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8670 && (regno == CR_LRR0 || regno == CR_LRR1))
8671 {
8672 specs[count] = tmpl;
8673 specs[count++].index = regno;
8674 }
8675 }
8676 break;
8677
8678 case IA64_RS_CR:
8679 if (note == 1)
8680 {
8681 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8682 {
8683 specs[count] = tmpl;
8684 specs[count++].index =
8685 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8686 }
8687 }
8688 else
8689 {
8690 UNHANDLED;
8691 }
8692 break;
8693
8694 case IA64_RS_DAHR:
8695 if (note == 0)
8696 {
8697 if (idesc->operands[!rsrc_write] == IA64_OPND_DAHR3)
8698 {
8699 specs[count] = tmpl;
8700 specs[count++].index =
8701 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_DAHR;
8702 }
8703 }
8704 else
8705 {
8706 UNHANDLED;
8707 }
8708 break;
8709
8710 case IA64_RS_FR:
8711 case IA64_RS_FRb:
8712 if (note != 1)
8713 {
8714 UNHANDLED;
8715 }
8716 else if (rsrc_write)
8717 {
8718 if (dep->specifier == IA64_RS_FRb
8719 && idesc->operands[0] == IA64_OPND_F1)
8720 {
8721 specs[count] = tmpl;
8722 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8723 }
8724 }
8725 else
8726 {
8727 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8728 {
8729 if (idesc->operands[i] == IA64_OPND_F2
8730 || idesc->operands[i] == IA64_OPND_F3
8731 || idesc->operands[i] == IA64_OPND_F4)
8732 {
8733 specs[count] = tmpl;
8734 specs[count++].index =
8735 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8736 }
8737 }
8738 }
8739 break;
8740
8741 case IA64_RS_GR:
8742 if (note == 13)
8743 {
8744 /* This reference applies only to the GR whose value is loaded with
8745 data returned from memory. */
8746 specs[count] = tmpl;
8747 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8748 }
8749 else if (note == 1)
8750 {
8751 if (rsrc_write)
8752 {
8753 for (i = 0; i < idesc->num_outputs; i++)
8754 if (idesc->operands[i] == IA64_OPND_R1
8755 || idesc->operands[i] == IA64_OPND_R2
8756 || idesc->operands[i] == IA64_OPND_R3)
8757 {
8758 specs[count] = tmpl;
8759 specs[count++].index =
8760 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8761 }
8762 if (idesc->flags & IA64_OPCODE_POSTINC)
8763 for (i = 0; i < NELEMS (idesc->operands); i++)
8764 if (idesc->operands[i] == IA64_OPND_MR3)
8765 {
8766 specs[count] = tmpl;
8767 specs[count++].index =
8768 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8769 }
8770 }
8771 else
8772 {
8773 /* Look for anything that reads a GR. */
8774 for (i = 0; i < NELEMS (idesc->operands); i++)
8775 {
8776 if (idesc->operands[i] == IA64_OPND_MR3
8777 || idesc->operands[i] == IA64_OPND_CPUID_R3
8778 || idesc->operands[i] == IA64_OPND_DBR_R3
8779 || idesc->operands[i] == IA64_OPND_IBR_R3
8780 || idesc->operands[i] == IA64_OPND_MSR_R3
8781 || idesc->operands[i] == IA64_OPND_PKR_R3
8782 || idesc->operands[i] == IA64_OPND_PMC_R3
8783 || idesc->operands[i] == IA64_OPND_PMD_R3
8784 || idesc->operands[i] == IA64_OPND_DAHR_R3
8785 || idesc->operands[i] == IA64_OPND_RR_R3
8786 || ((i >= idesc->num_outputs)
8787 && (idesc->operands[i] == IA64_OPND_R1
8788 || idesc->operands[i] == IA64_OPND_R2
8789 || idesc->operands[i] == IA64_OPND_R3
8790 /* addl source register. */
8791 || idesc->operands[i] == IA64_OPND_R3_2)))
8792 {
8793 specs[count] = tmpl;
8794 specs[count++].index =
8795 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8796 }
8797 }
8798 }
8799 }
8800 else
8801 {
8802 UNHANDLED;
8803 }
8804 break;
8805
8806 /* This is the same as IA64_RS_PRr, except that the register range is
8807 from 1 - 15, and there are no rotating register reads/writes here. */
8808 case IA64_RS_PR:
8809 if (note == 0)
8810 {
8811 for (i = 1; i < 16; i++)
8812 {
8813 specs[count] = tmpl;
8814 specs[count++].index = i;
8815 }
8816 }
8817 else if (note == 7)
8818 {
8819 valueT mask = 0;
8820 /* Mark only those registers indicated by the mask. */
8821 if (rsrc_write)
8822 {
8823 mask = CURR_SLOT.opnd[2].X_add_number;
8824 for (i = 1; i < 16; i++)
8825 if (mask & ((valueT) 1 << i))
8826 {
8827 specs[count] = tmpl;
8828 specs[count++].index = i;
8829 }
8830 }
8831 else
8832 {
8833 UNHANDLED;
8834 }
8835 }
8836 else if (note == 11) /* note 11 implies note 1 as well */
8837 {
8838 if (rsrc_write)
8839 {
8840 for (i = 0; i < idesc->num_outputs; i++)
8841 {
8842 if (idesc->operands[i] == IA64_OPND_P1
8843 || idesc->operands[i] == IA64_OPND_P2)
8844 {
8845 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8846 if (regno >= 1 && regno < 16)
8847 {
8848 specs[count] = tmpl;
8849 specs[count++].index = regno;
8850 }
8851 }
8852 }
8853 }
8854 else
8855 {
8856 UNHANDLED;
8857 }
8858 }
8859 else if (note == 12)
8860 {
8861 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8862 {
8863 specs[count] = tmpl;
8864 specs[count++].index = CURR_SLOT.qp_regno;
8865 }
8866 }
8867 else if (note == 1)
8868 {
8869 if (rsrc_write)
8870 {
8871 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8872 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8873 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8874 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8875
8876 if ((idesc->operands[0] == IA64_OPND_P1
8877 || idesc->operands[0] == IA64_OPND_P2)
8878 && p1 >= 1 && p1 < 16)
8879 {
8880 specs[count] = tmpl;
8881 specs[count].cmp_type =
8882 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8883 specs[count++].index = p1;
8884 }
8885 if ((idesc->operands[1] == IA64_OPND_P1
8886 || idesc->operands[1] == IA64_OPND_P2)
8887 && p2 >= 1 && p2 < 16)
8888 {
8889 specs[count] = tmpl;
8890 specs[count].cmp_type =
8891 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8892 specs[count++].index = p2;
8893 }
8894 }
8895 else
8896 {
8897 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8898 {
8899 specs[count] = tmpl;
8900 specs[count++].index = CURR_SLOT.qp_regno;
8901 }
8902 if (idesc->operands[1] == IA64_OPND_PR)
8903 {
8904 for (i = 1; i < 16; i++)
8905 {
8906 specs[count] = tmpl;
8907 specs[count++].index = i;
8908 }
8909 }
8910 }
8911 }
8912 else
8913 {
8914 UNHANDLED;
8915 }
8916 break;
8917
8918 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8919 simplified cases of this. */
8920 case IA64_RS_PRr:
8921 if (note == 0)
8922 {
8923 for (i = 16; i < 63; i++)
8924 {
8925 specs[count] = tmpl;
8926 specs[count++].index = i;
8927 }
8928 }
8929 else if (note == 7)
8930 {
8931 valueT mask = 0;
8932 /* Mark only those registers indicated by the mask. */
8933 if (rsrc_write
8934 && idesc->operands[0] == IA64_OPND_PR)
8935 {
8936 mask = CURR_SLOT.opnd[2].X_add_number;
8937 if (mask & ((valueT) 1 << 16))
8938 for (i = 16; i < 63; i++)
8939 {
8940 specs[count] = tmpl;
8941 specs[count++].index = i;
8942 }
8943 }
8944 else if (rsrc_write
8945 && idesc->operands[0] == IA64_OPND_PR_ROT)
8946 {
8947 for (i = 16; i < 63; i++)
8948 {
8949 specs[count] = tmpl;
8950 specs[count++].index = i;
8951 }
8952 }
8953 else
8954 {
8955 UNHANDLED;
8956 }
8957 }
8958 else if (note == 11) /* note 11 implies note 1 as well */
8959 {
8960 if (rsrc_write)
8961 {
8962 for (i = 0; i < idesc->num_outputs; i++)
8963 {
8964 if (idesc->operands[i] == IA64_OPND_P1
8965 || idesc->operands[i] == IA64_OPND_P2)
8966 {
8967 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8968 if (regno >= 16 && regno < 63)
8969 {
8970 specs[count] = tmpl;
8971 specs[count++].index = regno;
8972 }
8973 }
8974 }
8975 }
8976 else
8977 {
8978 UNHANDLED;
8979 }
8980 }
8981 else if (note == 12)
8982 {
8983 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8984 {
8985 specs[count] = tmpl;
8986 specs[count++].index = CURR_SLOT.qp_regno;
8987 }
8988 }
8989 else if (note == 1)
8990 {
8991 if (rsrc_write)
8992 {
8993 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8994 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8995 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8996 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8997
8998 if ((idesc->operands[0] == IA64_OPND_P1
8999 || idesc->operands[0] == IA64_OPND_P2)
9000 && p1 >= 16 && p1 < 63)
9001 {
9002 specs[count] = tmpl;
9003 specs[count].cmp_type =
9004 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9005 specs[count++].index = p1;
9006 }
9007 if ((idesc->operands[1] == IA64_OPND_P1
9008 || idesc->operands[1] == IA64_OPND_P2)
9009 && p2 >= 16 && p2 < 63)
9010 {
9011 specs[count] = tmpl;
9012 specs[count].cmp_type =
9013 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9014 specs[count++].index = p2;
9015 }
9016 }
9017 else
9018 {
9019 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9020 {
9021 specs[count] = tmpl;
9022 specs[count++].index = CURR_SLOT.qp_regno;
9023 }
9024 if (idesc->operands[1] == IA64_OPND_PR)
9025 {
9026 for (i = 16; i < 63; i++)
9027 {
9028 specs[count] = tmpl;
9029 specs[count++].index = i;
9030 }
9031 }
9032 }
9033 }
9034 else
9035 {
9036 UNHANDLED;
9037 }
9038 break;
9039
9040 case IA64_RS_PSR:
9041 /* Verify that the instruction is using the PSR bit indicated in
9042 dep->regindex. */
9043 if (note == 0)
9044 {
9045 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
9046 {
9047 if (dep->regindex < 6)
9048 {
9049 specs[count++] = tmpl;
9050 }
9051 }
9052 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
9053 {
9054 if (dep->regindex < 32
9055 || dep->regindex == 35
9056 || dep->regindex == 36
9057 || (!rsrc_write && dep->regindex == PSR_CPL))
9058 {
9059 specs[count++] = tmpl;
9060 }
9061 }
9062 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
9063 {
9064 if (dep->regindex < 32
9065 || dep->regindex == 35
9066 || dep->regindex == 36
9067 || (rsrc_write && dep->regindex == PSR_CPL))
9068 {
9069 specs[count++] = tmpl;
9070 }
9071 }
9072 else
9073 {
9074 /* Several PSR bits have very specific dependencies. */
9075 switch (dep->regindex)
9076 {
9077 default:
9078 specs[count++] = tmpl;
9079 break;
9080 case PSR_IC:
9081 if (rsrc_write)
9082 {
9083 specs[count++] = tmpl;
9084 }
9085 else
9086 {
9087 /* Only certain CR accesses use PSR.ic */
9088 if (idesc->operands[0] == IA64_OPND_CR3
9089 || idesc->operands[1] == IA64_OPND_CR3)
9090 {
9091 int reg_index =
9092 ((idesc->operands[0] == IA64_OPND_CR3)
9093 ? 0 : 1);
9094 int regno =
9095 CURR_SLOT.opnd[reg_index].X_add_number - REG_CR;
9096
9097 switch (regno)
9098 {
9099 default:
9100 break;
9101 case CR_ITIR:
9102 case CR_IFS:
9103 case CR_IIM:
9104 case CR_IIP:
9105 case CR_IPSR:
9106 case CR_ISR:
9107 case CR_IFA:
9108 case CR_IHA:
9109 case CR_IIB0:
9110 case CR_IIB1:
9111 case CR_IIPA:
9112 specs[count++] = tmpl;
9113 break;
9114 }
9115 }
9116 }
9117 break;
9118 case PSR_CPL:
9119 if (rsrc_write)
9120 {
9121 specs[count++] = tmpl;
9122 }
9123 else
9124 {
9125 /* Only some AR accesses use cpl */
9126 if (idesc->operands[0] == IA64_OPND_AR3
9127 || idesc->operands[1] == IA64_OPND_AR3)
9128 {
9129 int reg_index =
9130 ((idesc->operands[0] == IA64_OPND_AR3)
9131 ? 0 : 1);
9132 int regno =
9133 CURR_SLOT.opnd[reg_index].X_add_number - REG_AR;
9134
9135 if (regno == AR_ITC
9136 || regno == AR_RUC
9137 || (reg_index == 0
9138 && (regno == AR_RSC
9139 || (regno >= AR_K0
9140 && regno <= AR_K7))))
9141 {
9142 specs[count++] = tmpl;
9143 }
9144 }
9145 else
9146 {
9147 specs[count++] = tmpl;
9148 }
9149 break;
9150 }
9151 }
9152 }
9153 }
9154 else if (note == 7)
9155 {
9156 valueT mask = 0;
9157 if (idesc->operands[0] == IA64_OPND_IMMU24)
9158 {
9159 mask = CURR_SLOT.opnd[0].X_add_number;
9160 }
9161 else
9162 {
9163 UNHANDLED;
9164 }
9165 if (mask & ((valueT) 1 << dep->regindex))
9166 {
9167 specs[count++] = tmpl;
9168 }
9169 }
9170 else if (note == 8)
9171 {
9172 int min = dep->regindex == PSR_DFL ? 2 : 32;
9173 int max = dep->regindex == PSR_DFL ? 31 : 127;
9174 /* dfh is read on FR32-127; dfl is read on FR2-31 */
9175 for (i = 0; i < NELEMS (idesc->operands); i++)
9176 {
9177 if (idesc->operands[i] == IA64_OPND_F1
9178 || idesc->operands[i] == IA64_OPND_F2
9179 || idesc->operands[i] == IA64_OPND_F3
9180 || idesc->operands[i] == IA64_OPND_F4)
9181 {
9182 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9183 if (reg >= min && reg <= max)
9184 {
9185 specs[count++] = tmpl;
9186 }
9187 }
9188 }
9189 }
9190 else if (note == 9)
9191 {
9192 int min = dep->regindex == PSR_MFL ? 2 : 32;
9193 int max = dep->regindex == PSR_MFL ? 31 : 127;
9194 /* mfh is read on writes to FR32-127; mfl is read on writes to
9195 FR2-31 */
9196 for (i = 0; i < idesc->num_outputs; i++)
9197 {
9198 if (idesc->operands[i] == IA64_OPND_F1)
9199 {
9200 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9201 if (reg >= min && reg <= max)
9202 {
9203 specs[count++] = tmpl;
9204 }
9205 }
9206 }
9207 }
9208 else if (note == 10)
9209 {
9210 for (i = 0; i < NELEMS (idesc->operands); i++)
9211 {
9212 if (idesc->operands[i] == IA64_OPND_R1
9213 || idesc->operands[i] == IA64_OPND_R2
9214 || idesc->operands[i] == IA64_OPND_R3)
9215 {
9216 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9217 if (regno >= 16 && regno <= 31)
9218 {
9219 specs[count++] = tmpl;
9220 }
9221 }
9222 }
9223 }
9224 else
9225 {
9226 UNHANDLED;
9227 }
9228 break;
9229
9230 case IA64_RS_AR_FPSR:
9231 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
9232 {
9233 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9234 if (regno == AR_FPSR)
9235 {
9236 specs[count++] = tmpl;
9237 }
9238 }
9239 else
9240 {
9241 specs[count++] = tmpl;
9242 }
9243 break;
9244
9245 case IA64_RS_ARX:
9246 /* Handle all AR[REG] resources */
9247 if (note == 0 || note == 1)
9248 {
9249 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9250 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
9251 && regno == dep->regindex)
9252 {
9253 specs[count++] = tmpl;
9254 }
9255 /* other AR[REG] resources may be affected by AR accesses */
9256 else if (idesc->operands[0] == IA64_OPND_AR3)
9257 {
9258 /* AR[] writes */
9259 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
9260 switch (dep->regindex)
9261 {
9262 default:
9263 break;
9264 case AR_BSP:
9265 case AR_RNAT:
9266 if (regno == AR_BSPSTORE)
9267 {
9268 specs[count++] = tmpl;
9269 }
9270 /* Fall through. */
9271 case AR_RSC:
9272 if (!rsrc_write &&
9273 (regno == AR_BSPSTORE
9274 || regno == AR_RNAT))
9275 {
9276 specs[count++] = tmpl;
9277 }
9278 break;
9279 }
9280 }
9281 else if (idesc->operands[1] == IA64_OPND_AR3)
9282 {
9283 /* AR[] reads */
9284 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
9285 switch (dep->regindex)
9286 {
9287 default:
9288 break;
9289 case AR_RSC:
9290 if (regno == AR_BSPSTORE || regno == AR_RNAT)
9291 {
9292 specs[count++] = tmpl;
9293 }
9294 break;
9295 }
9296 }
9297 else
9298 {
9299 specs[count++] = tmpl;
9300 }
9301 }
9302 else
9303 {
9304 UNHANDLED;
9305 }
9306 break;
9307
9308 case IA64_RS_CRX:
9309 /* Handle all CR[REG] resources.
9310 ??? FIXME: The rule 17 isn't really handled correctly. */
9311 if (note == 0 || note == 1 || note == 17)
9312 {
9313 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
9314 {
9315 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
9316 if (regno == dep->regindex)
9317 {
9318 specs[count++] = tmpl;
9319 }
9320 else if (!rsrc_write)
9321 {
9322 /* Reads from CR[IVR] affect other resources. */
9323 if (regno == CR_IVR)
9324 {
9325 if ((dep->regindex >= CR_IRR0
9326 && dep->regindex <= CR_IRR3)
9327 || dep->regindex == CR_TPR)
9328 {
9329 specs[count++] = tmpl;
9330 }
9331 }
9332 }
9333 }
9334 else
9335 {
9336 specs[count++] = tmpl;
9337 }
9338 }
9339 else
9340 {
9341 UNHANDLED;
9342 }
9343 break;
9344
9345 case IA64_RS_INSERVICE:
9346 /* look for write of EOI (67) or read of IVR (65) */
9347 if ((idesc->operands[0] == IA64_OPND_CR3
9348 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
9349 || (idesc->operands[1] == IA64_OPND_CR3
9350 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
9351 {
9352 specs[count++] = tmpl;
9353 }
9354 break;
9355
9356 case IA64_RS_GR0:
9357 if (note == 1)
9358 {
9359 specs[count++] = tmpl;
9360 }
9361 else
9362 {
9363 UNHANDLED;
9364 }
9365 break;
9366
9367 case IA64_RS_CFM:
9368 if (note != 2)
9369 {
9370 specs[count++] = tmpl;
9371 }
9372 else
9373 {
9374 /* Check if any of the registers accessed are in the rotating region.
9375 mov to/from pr accesses CFM only when qp_regno is in the rotating
9376 region */
9377 for (i = 0; i < NELEMS (idesc->operands); i++)
9378 {
9379 if (idesc->operands[i] == IA64_OPND_R1
9380 || idesc->operands[i] == IA64_OPND_R2
9381 || idesc->operands[i] == IA64_OPND_R3)
9382 {
9383 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9384 /* Assumes that md.rot.num_regs is always valid */
9385 if (md.rot.num_regs > 0
9386 && num > 31
9387 && num < 31 + md.rot.num_regs)
9388 {
9389 specs[count] = tmpl;
9390 specs[count++].specific = 0;
9391 }
9392 }
9393 else if (idesc->operands[i] == IA64_OPND_F1
9394 || idesc->operands[i] == IA64_OPND_F2
9395 || idesc->operands[i] == IA64_OPND_F3
9396 || idesc->operands[i] == IA64_OPND_F4)
9397 {
9398 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9399 if (num > 31)
9400 {
9401 specs[count] = tmpl;
9402 specs[count++].specific = 0;
9403 }
9404 }
9405 else if (idesc->operands[i] == IA64_OPND_P1
9406 || idesc->operands[i] == IA64_OPND_P2)
9407 {
9408 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
9409 if (num > 15)
9410 {
9411 specs[count] = tmpl;
9412 specs[count++].specific = 0;
9413 }
9414 }
9415 }
9416 if (CURR_SLOT.qp_regno > 15)
9417 {
9418 specs[count] = tmpl;
9419 specs[count++].specific = 0;
9420 }
9421 }
9422 break;
9423
9424 /* This is the same as IA64_RS_PRr, except simplified to account for
9425 the fact that there is only one register. */
9426 case IA64_RS_PR63:
9427 if (note == 0)
9428 {
9429 specs[count++] = tmpl;
9430 }
9431 else if (note == 7)
9432 {
9433 valueT mask = 0;
9434 if (idesc->operands[2] == IA64_OPND_IMM17)
9435 mask = CURR_SLOT.opnd[2].X_add_number;
9436 if (mask & ((valueT) 1 << 63))
9437 specs[count++] = tmpl;
9438 }
9439 else if (note == 11)
9440 {
9441 if ((idesc->operands[0] == IA64_OPND_P1
9442 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
9443 || (idesc->operands[1] == IA64_OPND_P2
9444 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
9445 {
9446 specs[count++] = tmpl;
9447 }
9448 }
9449 else if (note == 12)
9450 {
9451 if (CURR_SLOT.qp_regno == 63)
9452 {
9453 specs[count++] = tmpl;
9454 }
9455 }
9456 else if (note == 1)
9457 {
9458 if (rsrc_write)
9459 {
9460 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9461 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9462 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9463 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9464
9465 if (p1 == 63
9466 && (idesc->operands[0] == IA64_OPND_P1
9467 || idesc->operands[0] == IA64_OPND_P2))
9468 {
9469 specs[count] = tmpl;
9470 specs[count++].cmp_type =
9471 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9472 }
9473 if (p2 == 63
9474 && (idesc->operands[1] == IA64_OPND_P1
9475 || idesc->operands[1] == IA64_OPND_P2))
9476 {
9477 specs[count] = tmpl;
9478 specs[count++].cmp_type =
9479 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9480 }
9481 }
9482 else
9483 {
9484 if (CURR_SLOT.qp_regno == 63)
9485 {
9486 specs[count++] = tmpl;
9487 }
9488 }
9489 }
9490 else
9491 {
9492 UNHANDLED;
9493 }
9494 break;
9495
9496 case IA64_RS_RSE:
9497 /* FIXME we can identify some individual RSE written resources, but RSE
9498 read resources have not yet been completely identified, so for now
9499 treat RSE as a single resource */
9500 if (startswith (idesc->name, "mov"))
9501 {
9502 if (rsrc_write)
9503 {
9504 if (idesc->operands[0] == IA64_OPND_AR3
9505 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
9506 {
9507 specs[count++] = tmpl;
9508 }
9509 }
9510 else
9511 {
9512 if (idesc->operands[0] == IA64_OPND_AR3)
9513 {
9514 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
9515 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
9516 {
9517 specs[count++] = tmpl;
9518 }
9519 }
9520 else if (idesc->operands[1] == IA64_OPND_AR3)
9521 {
9522 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
9523 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
9524 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
9525 {
9526 specs[count++] = tmpl;
9527 }
9528 }
9529 }
9530 }
9531 else
9532 {
9533 specs[count++] = tmpl;
9534 }
9535 break;
9536
9537 case IA64_RS_ANY:
9538 /* FIXME -- do any of these need to be non-specific? */
9539 specs[count++] = tmpl;
9540 break;
9541
9542 default:
9543 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
9544 break;
9545 }
9546
9547 return count;
9548 }
9549
9550 /* Clear branch flags on marked resources. This breaks the link between the
9551 QP of the marking instruction and a subsequent branch on the same QP. */
9552
9553 static void
9554 clear_qp_branch_flag (valueT mask)
9555 {
9556 int i;
9557 for (i = 0; i < regdepslen; i++)
9558 {
9559 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
9560 if ((bit & mask) != 0)
9561 {
9562 regdeps[i].link_to_qp_branch = 0;
9563 }
9564 }
9565 }
9566
9567 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
9568 any mutexes which contain one of the PRs and create new ones when
9569 needed. */
9570
9571 static int
9572 update_qp_mutex (valueT mask)
9573 {
9574 int i;
9575 int add = 0;
9576
9577 i = 0;
9578 while (i < qp_mutexeslen)
9579 {
9580 if ((qp_mutexes[i].prmask & mask) != 0)
9581 {
9582 /* If it destroys and creates the same mutex, do nothing. */
9583 if (qp_mutexes[i].prmask == mask
9584 && qp_mutexes[i].path == md.path)
9585 {
9586 i++;
9587 add = -1;
9588 }
9589 else
9590 {
9591 int keep = 0;
9592
9593 if (md.debug_dv)
9594 {
9595 fprintf (stderr, " Clearing mutex relation");
9596 print_prmask (qp_mutexes[i].prmask);
9597 fprintf (stderr, "\n");
9598 }
9599
9600 /* Deal with the old mutex with more than 3+ PRs only if
9601 the new mutex on the same execution path with it.
9602
9603 FIXME: The 3+ mutex support is incomplete.
9604 dot_pred_rel () may be a better place to fix it. */
9605 if (qp_mutexes[i].path == md.path)
9606 {
9607 /* If it is a proper subset of the mutex, create a
9608 new mutex. */
9609 if (add == 0
9610 && (qp_mutexes[i].prmask & mask) == mask)
9611 add = 1;
9612
9613 qp_mutexes[i].prmask &= ~mask;
9614 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
9615 {
9616 /* Modify the mutex if there are more than one
9617 PR left. */
9618 keep = 1;
9619 i++;
9620 }
9621 }
9622
9623 if (keep == 0)
9624 /* Remove the mutex. */
9625 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9626 }
9627 }
9628 else
9629 ++i;
9630 }
9631
9632 if (add == 1)
9633 add_qp_mutex (mask);
9634
9635 return add;
9636 }
9637
9638 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9639
9640 Any changes to a PR clears the mutex relations which include that PR. */
9641
9642 static void
9643 clear_qp_mutex (valueT mask)
9644 {
9645 int i;
9646
9647 i = 0;
9648 while (i < qp_mutexeslen)
9649 {
9650 if ((qp_mutexes[i].prmask & mask) != 0)
9651 {
9652 if (md.debug_dv)
9653 {
9654 fprintf (stderr, " Clearing mutex relation");
9655 print_prmask (qp_mutexes[i].prmask);
9656 fprintf (stderr, "\n");
9657 }
9658 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9659 }
9660 else
9661 ++i;
9662 }
9663 }
9664
9665 /* Clear implies relations which contain PRs in the given masks.
9666 P1_MASK indicates the source of the implies relation, while P2_MASK
9667 indicates the implied PR. */
9668
9669 static void
9670 clear_qp_implies (valueT p1_mask, valueT p2_mask)
9671 {
9672 int i;
9673
9674 i = 0;
9675 while (i < qp_implieslen)
9676 {
9677 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9678 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9679 {
9680 if (md.debug_dv)
9681 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9682 qp_implies[i].p1, qp_implies[i].p2);
9683 qp_implies[i] = qp_implies[--qp_implieslen];
9684 }
9685 else
9686 ++i;
9687 }
9688 }
9689
9690 /* Add the PRs specified to the list of implied relations. */
9691
9692 static void
9693 add_qp_imply (int p1, int p2)
9694 {
9695 valueT mask;
9696 valueT bit;
9697 int i;
9698
9699 /* p0 is not meaningful here. */
9700 if (p1 == 0 || p2 == 0)
9701 abort ();
9702
9703 if (p1 == p2)
9704 return;
9705
9706 /* If it exists already, ignore it. */
9707 for (i = 0; i < qp_implieslen; i++)
9708 {
9709 if (qp_implies[i].p1 == p1
9710 && qp_implies[i].p2 == p2
9711 && qp_implies[i].path == md.path
9712 && !qp_implies[i].p2_branched)
9713 return;
9714 }
9715
9716 if (qp_implieslen == qp_impliestotlen)
9717 {
9718 qp_impliestotlen += 20;
9719 qp_implies = XRESIZEVEC (struct qp_imply, qp_implies, qp_impliestotlen);
9720 }
9721 if (md.debug_dv)
9722 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9723 qp_implies[qp_implieslen].p1 = p1;
9724 qp_implies[qp_implieslen].p2 = p2;
9725 qp_implies[qp_implieslen].path = md.path;
9726 qp_implies[qp_implieslen++].p2_branched = 0;
9727
9728 /* Add in the implied transitive relations; for everything that p2 implies,
9729 make p1 imply that, too; for everything that implies p1, make it imply p2
9730 as well. */
9731 for (i = 0; i < qp_implieslen; i++)
9732 {
9733 if (qp_implies[i].p1 == p2)
9734 add_qp_imply (p1, qp_implies[i].p2);
9735 if (qp_implies[i].p2 == p1)
9736 add_qp_imply (qp_implies[i].p1, p2);
9737 }
9738 /* Add in mutex relations implied by this implies relation; for each mutex
9739 relation containing p2, duplicate it and replace p2 with p1. */
9740 bit = (valueT) 1 << p1;
9741 mask = (valueT) 1 << p2;
9742 for (i = 0; i < qp_mutexeslen; i++)
9743 {
9744 if (qp_mutexes[i].prmask & mask)
9745 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9746 }
9747 }
9748
9749 /* Add the PRs specified in the mask to the mutex list; this means that only
9750 one of the PRs can be true at any time. PR0 should never be included in
9751 the mask. */
9752
9753 static void
9754 add_qp_mutex (valueT mask)
9755 {
9756 if (mask & 0x1)
9757 abort ();
9758
9759 if (qp_mutexeslen == qp_mutexestotlen)
9760 {
9761 qp_mutexestotlen += 20;
9762 qp_mutexes = XRESIZEVEC (struct qpmutex, qp_mutexes, qp_mutexestotlen);
9763 }
9764 if (md.debug_dv)
9765 {
9766 fprintf (stderr, " Registering mutex on");
9767 print_prmask (mask);
9768 fprintf (stderr, "\n");
9769 }
9770 qp_mutexes[qp_mutexeslen].path = md.path;
9771 qp_mutexes[qp_mutexeslen++].prmask = mask;
9772 }
9773
9774 static int
9775 has_suffix_p (const char *name, const char *suffix)
9776 {
9777 size_t namelen = strlen (name);
9778 size_t sufflen = strlen (suffix);
9779
9780 if (namelen <= sufflen)
9781 return 0;
9782 return strcmp (name + namelen - sufflen, suffix) == 0;
9783 }
9784
9785 static void
9786 clear_register_values (void)
9787 {
9788 int i;
9789 if (md.debug_dv)
9790 fprintf (stderr, " Clearing register values\n");
9791 for (i = 1; i < NELEMS (gr_values); i++)
9792 gr_values[i].known = 0;
9793 }
9794
9795 /* Keep track of register values/changes which affect DV tracking.
9796
9797 optimization note: should add a flag to classes of insns where otherwise we
9798 have to examine a group of strings to identify them. */
9799
9800 static void
9801 note_register_values (struct ia64_opcode *idesc)
9802 {
9803 valueT qp_changemask = 0;
9804 int i;
9805
9806 /* Invalidate values for registers being written to. */
9807 for (i = 0; i < idesc->num_outputs; i++)
9808 {
9809 if (idesc->operands[i] == IA64_OPND_R1
9810 || idesc->operands[i] == IA64_OPND_R2
9811 || idesc->operands[i] == IA64_OPND_R3)
9812 {
9813 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9814 if (regno > 0 && regno < NELEMS (gr_values))
9815 gr_values[regno].known = 0;
9816 }
9817 else if (idesc->operands[i] == IA64_OPND_R3_2)
9818 {
9819 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9820 if (regno > 0 && regno < 4)
9821 gr_values[regno].known = 0;
9822 }
9823 else if (idesc->operands[i] == IA64_OPND_P1
9824 || idesc->operands[i] == IA64_OPND_P2)
9825 {
9826 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9827 qp_changemask |= (valueT) 1 << regno;
9828 }
9829 else if (idesc->operands[i] == IA64_OPND_PR)
9830 {
9831 if (idesc->operands[2] & (valueT) 0x10000)
9832 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9833 else
9834 qp_changemask = idesc->operands[2];
9835 break;
9836 }
9837 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9838 {
9839 if (idesc->operands[1] & ((valueT) 1 << 43))
9840 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9841 else
9842 qp_changemask = idesc->operands[1];
9843 qp_changemask &= ~(valueT) 0xFFFF;
9844 break;
9845 }
9846 }
9847
9848 /* Always clear qp branch flags on any PR change. */
9849 /* FIXME there may be exceptions for certain compares. */
9850 clear_qp_branch_flag (qp_changemask);
9851
9852 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9853 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9854 {
9855 qp_changemask |= ~(valueT) 0xFFFF;
9856 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9857 {
9858 for (i = 32; i < 32 + md.rot.num_regs; i++)
9859 gr_values[i].known = 0;
9860 }
9861 clear_qp_mutex (qp_changemask);
9862 clear_qp_implies (qp_changemask, qp_changemask);
9863 }
9864 /* After a call, all register values are undefined, except those marked
9865 as "safe". */
9866 else if (startswith (idesc->name, "br.call")
9867 || startswith (idesc->name, "brl.call"))
9868 {
9869 /* FIXME keep GR values which are marked as "safe_across_calls" */
9870 clear_register_values ();
9871 clear_qp_mutex (~qp_safe_across_calls);
9872 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9873 clear_qp_branch_flag (~qp_safe_across_calls);
9874 }
9875 else if (is_interruption_or_rfi (idesc)
9876 || is_taken_branch (idesc))
9877 {
9878 clear_register_values ();
9879 clear_qp_mutex (~(valueT) 0);
9880 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9881 }
9882 /* Look for mutex and implies relations. */
9883 else if ((idesc->operands[0] == IA64_OPND_P1
9884 || idesc->operands[0] == IA64_OPND_P2)
9885 && (idesc->operands[1] == IA64_OPND_P1
9886 || idesc->operands[1] == IA64_OPND_P2))
9887 {
9888 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9889 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9890 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9891 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9892
9893 /* If both PRs are PR0, we can't really do anything. */
9894 if (p1 == 0 && p2 == 0)
9895 {
9896 if (md.debug_dv)
9897 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9898 }
9899 /* In general, clear mutexes and implies which include P1 or P2,
9900 with the following exceptions. */
9901 else if (has_suffix_p (idesc->name, ".or.andcm")
9902 || has_suffix_p (idesc->name, ".and.orcm"))
9903 {
9904 clear_qp_implies (p2mask, p1mask);
9905 }
9906 else if (has_suffix_p (idesc->name, ".andcm")
9907 || has_suffix_p (idesc->name, ".and"))
9908 {
9909 clear_qp_implies (0, p1mask | p2mask);
9910 }
9911 else if (has_suffix_p (idesc->name, ".orcm")
9912 || has_suffix_p (idesc->name, ".or"))
9913 {
9914 clear_qp_mutex (p1mask | p2mask);
9915 clear_qp_implies (p1mask | p2mask, 0);
9916 }
9917 else
9918 {
9919 int added = 0;
9920
9921 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9922
9923 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9924 if (p1 == 0 || p2 == 0)
9925 clear_qp_mutex (p1mask | p2mask);
9926 else
9927 added = update_qp_mutex (p1mask | p2mask);
9928
9929 if (CURR_SLOT.qp_regno == 0
9930 || has_suffix_p (idesc->name, ".unc"))
9931 {
9932 if (added == 0 && p1 && p2)
9933 add_qp_mutex (p1mask | p2mask);
9934 if (CURR_SLOT.qp_regno != 0)
9935 {
9936 if (p1)
9937 add_qp_imply (p1, CURR_SLOT.qp_regno);
9938 if (p2)
9939 add_qp_imply (p2, CURR_SLOT.qp_regno);
9940 }
9941 }
9942 }
9943 }
9944 /* Look for mov imm insns into GRs. */
9945 else if (idesc->operands[0] == IA64_OPND_R1
9946 && (idesc->operands[1] == IA64_OPND_IMM22
9947 || idesc->operands[1] == IA64_OPND_IMMU64)
9948 && CURR_SLOT.opnd[1].X_op == O_constant
9949 && (strcmp (idesc->name, "mov") == 0
9950 || strcmp (idesc->name, "movl") == 0))
9951 {
9952 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9953 if (regno > 0 && regno < NELEMS (gr_values))
9954 {
9955 gr_values[regno].known = 1;
9956 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9957 gr_values[regno].path = md.path;
9958 if (md.debug_dv)
9959 {
9960 fprintf (stderr, " Know gr%d = ", regno);
9961 fprintf_vma (stderr, gr_values[regno].value);
9962 fputs ("\n", stderr);
9963 }
9964 }
9965 }
9966 /* Look for dep.z imm insns. */
9967 else if (idesc->operands[0] == IA64_OPND_R1
9968 && idesc->operands[1] == IA64_OPND_IMM8
9969 && strcmp (idesc->name, "dep.z") == 0)
9970 {
9971 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9972 if (regno > 0 && regno < NELEMS (gr_values))
9973 {
9974 valueT value = CURR_SLOT.opnd[1].X_add_number;
9975
9976 if (CURR_SLOT.opnd[3].X_add_number < 64)
9977 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1;
9978 value <<= CURR_SLOT.opnd[2].X_add_number;
9979 gr_values[regno].known = 1;
9980 gr_values[regno].value = value;
9981 gr_values[regno].path = md.path;
9982 if (md.debug_dv)
9983 {
9984 fprintf (stderr, " Know gr%d = ", regno);
9985 fprintf_vma (stderr, gr_values[regno].value);
9986 fputs ("\n", stderr);
9987 }
9988 }
9989 }
9990 else
9991 {
9992 clear_qp_mutex (qp_changemask);
9993 clear_qp_implies (qp_changemask, qp_changemask);
9994 }
9995 }
9996
9997 /* Return whether the given predicate registers are currently mutex. */
9998
9999 static int
10000 qp_mutex (int p1, int p2, int path)
10001 {
10002 int i;
10003 valueT mask;
10004
10005 if (p1 != p2)
10006 {
10007 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
10008 for (i = 0; i < qp_mutexeslen; i++)
10009 {
10010 if (qp_mutexes[i].path >= path
10011 && (qp_mutexes[i].prmask & mask) == mask)
10012 return 1;
10013 }
10014 }
10015 return 0;
10016 }
10017
10018 /* Return whether the given resource is in the given insn's list of chks
10019 Return 1 if the conflict is absolutely determined, 2 if it's a potential
10020 conflict. */
10021
10022 static int
10023 resources_match (struct rsrc *rs,
10024 struct ia64_opcode *idesc,
10025 int note,
10026 int qp_regno,
10027 int path)
10028 {
10029 struct rsrc specs[MAX_SPECS];
10030 int count;
10031
10032 /* If the marked resource's qp_regno and the given qp_regno are mutex,
10033 we don't need to check. One exception is note 11, which indicates that
10034 target predicates are written regardless of PR[qp]. */
10035 if (qp_mutex (rs->qp_regno, qp_regno, path)
10036 && note != 11)
10037 return 0;
10038
10039 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
10040 while (count-- > 0)
10041 {
10042 /* UNAT checking is a bit more specific than other resources */
10043 if (rs->dependency->specifier == IA64_RS_AR_UNAT
10044 && specs[count].mem_offset.hint
10045 && rs->mem_offset.hint)
10046 {
10047 if (rs->mem_offset.base == specs[count].mem_offset.base)
10048 {
10049 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
10050 ((specs[count].mem_offset.offset >> 3) & 0x3F))
10051 return 1;
10052 else
10053 continue;
10054 }
10055 }
10056
10057 /* Skip apparent PR write conflicts where both writes are an AND or both
10058 writes are an OR. */
10059 if (rs->dependency->specifier == IA64_RS_PR
10060 || rs->dependency->specifier == IA64_RS_PRr
10061 || rs->dependency->specifier == IA64_RS_PR63)
10062 {
10063 if (specs[count].cmp_type != CMP_NONE
10064 && specs[count].cmp_type == rs->cmp_type)
10065 {
10066 if (md.debug_dv)
10067 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
10068 dv_mode[rs->dependency->mode],
10069 rs->dependency->specifier != IA64_RS_PR63 ?
10070 specs[count].index : 63);
10071 continue;
10072 }
10073 if (md.debug_dv)
10074 fprintf (stderr,
10075 " %s on parallel compare conflict %s vs %s on PR%d\n",
10076 dv_mode[rs->dependency->mode],
10077 dv_cmp_type[rs->cmp_type],
10078 dv_cmp_type[specs[count].cmp_type],
10079 rs->dependency->specifier != IA64_RS_PR63 ?
10080 specs[count].index : 63);
10081
10082 }
10083
10084 /* If either resource is not specific, conservatively assume a conflict
10085 */
10086 if (!specs[count].specific || !rs->specific)
10087 return 2;
10088 else if (specs[count].index == rs->index)
10089 return 1;
10090 }
10091
10092 return 0;
10093 }
10094
10095 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
10096 insert a stop to create the break. Update all resource dependencies
10097 appropriately. If QP_REGNO is non-zero, only apply the break to resources
10098 which use the same QP_REGNO and have the link_to_qp_branch flag set.
10099 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
10100 instruction. */
10101
10102 static void
10103 insn_group_break (int insert_stop, int qp_regno, int save_current)
10104 {
10105 int i;
10106
10107 if (insert_stop && md.num_slots_in_use > 0)
10108 PREV_SLOT.end_of_insn_group = 1;
10109
10110 if (md.debug_dv)
10111 {
10112 fprintf (stderr, " Insn group break%s",
10113 (insert_stop ? " (w/stop)" : ""));
10114 if (qp_regno != 0)
10115 fprintf (stderr, " effective for QP=%d", qp_regno);
10116 fprintf (stderr, "\n");
10117 }
10118
10119 i = 0;
10120 while (i < regdepslen)
10121 {
10122 const struct ia64_dependency *dep = regdeps[i].dependency;
10123
10124 if (qp_regno != 0
10125 && regdeps[i].qp_regno != qp_regno)
10126 {
10127 ++i;
10128 continue;
10129 }
10130
10131 if (save_current
10132 && CURR_SLOT.src_file == regdeps[i].file
10133 && CURR_SLOT.src_line == regdeps[i].line)
10134 {
10135 ++i;
10136 continue;
10137 }
10138
10139 /* clear dependencies which are automatically cleared by a stop, or
10140 those that have reached the appropriate state of insn serialization */
10141 if (dep->semantics == IA64_DVS_IMPLIED
10142 || dep->semantics == IA64_DVS_IMPLIEDF
10143 || regdeps[i].insn_srlz == STATE_SRLZ)
10144 {
10145 print_dependency ("Removing", i);
10146 regdeps[i] = regdeps[--regdepslen];
10147 }
10148 else
10149 {
10150 if (dep->semantics == IA64_DVS_DATA
10151 || dep->semantics == IA64_DVS_INSTR
10152 || dep->semantics == IA64_DVS_SPECIFIC)
10153 {
10154 if (regdeps[i].insn_srlz == STATE_NONE)
10155 regdeps[i].insn_srlz = STATE_STOP;
10156 if (regdeps[i].data_srlz == STATE_NONE)
10157 regdeps[i].data_srlz = STATE_STOP;
10158 }
10159 ++i;
10160 }
10161 }
10162 }
10163
10164 /* Add the given resource usage spec to the list of active dependencies. */
10165
10166 static void
10167 mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED,
10168 const struct ia64_dependency *dep ATTRIBUTE_UNUSED,
10169 struct rsrc *spec,
10170 int depind,
10171 int path)
10172 {
10173 if (regdepslen == regdepstotlen)
10174 {
10175 regdepstotlen += 20;
10176 regdeps = XRESIZEVEC (struct rsrc, regdeps, regdepstotlen);
10177 }
10178
10179 regdeps[regdepslen] = *spec;
10180 regdeps[regdepslen].depind = depind;
10181 regdeps[regdepslen].path = path;
10182 regdeps[regdepslen].file = CURR_SLOT.src_file;
10183 regdeps[regdepslen].line = CURR_SLOT.src_line;
10184
10185 print_dependency ("Adding", regdepslen);
10186
10187 ++regdepslen;
10188 }
10189
10190 static void
10191 print_dependency (const char *action, int depind)
10192 {
10193 if (md.debug_dv)
10194 {
10195 fprintf (stderr, " %s %s '%s'",
10196 action, dv_mode[(regdeps[depind].dependency)->mode],
10197 (regdeps[depind].dependency)->name);
10198 if (regdeps[depind].specific && regdeps[depind].index >= 0)
10199 fprintf (stderr, " (%d)", regdeps[depind].index);
10200 if (regdeps[depind].mem_offset.hint)
10201 {
10202 fputs (" ", stderr);
10203 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
10204 fputs ("+", stderr);
10205 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
10206 }
10207 fprintf (stderr, "\n");
10208 }
10209 }
10210
10211 static void
10212 instruction_serialization (void)
10213 {
10214 int i;
10215 if (md.debug_dv)
10216 fprintf (stderr, " Instruction serialization\n");
10217 for (i = 0; i < regdepslen; i++)
10218 if (regdeps[i].insn_srlz == STATE_STOP)
10219 regdeps[i].insn_srlz = STATE_SRLZ;
10220 }
10221
10222 static void
10223 data_serialization (void)
10224 {
10225 int i = 0;
10226 if (md.debug_dv)
10227 fprintf (stderr, " Data serialization\n");
10228 while (i < regdepslen)
10229 {
10230 if (regdeps[i].data_srlz == STATE_STOP
10231 /* Note: as of 991210, all "other" dependencies are cleared by a
10232 data serialization. This might change with new tables */
10233 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
10234 {
10235 print_dependency ("Removing", i);
10236 regdeps[i] = regdeps[--regdepslen];
10237 }
10238 else
10239 ++i;
10240 }
10241 }
10242
10243 /* Insert stops and serializations as needed to avoid DVs. */
10244
10245 static void
10246 remove_marked_resource (struct rsrc *rs)
10247 {
10248 switch (rs->dependency->semantics)
10249 {
10250 case IA64_DVS_SPECIFIC:
10251 if (md.debug_dv)
10252 fprintf (stderr, "Implementation-specific, assume worst case...\n");
10253 /* Fall through. */
10254 case IA64_DVS_INSTR:
10255 if (md.debug_dv)
10256 fprintf (stderr, "Inserting instr serialization\n");
10257 if (rs->insn_srlz < STATE_STOP)
10258 insn_group_break (1, 0, 0);
10259 if (rs->insn_srlz < STATE_SRLZ)
10260 {
10261 struct slot oldslot = CURR_SLOT;
10262 /* Manually jam a srlz.i insn into the stream */
10263 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10264 CURR_SLOT.user_template = -1;
10265 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
10266 instruction_serialization ();
10267 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10268 if (++md.num_slots_in_use >= NUM_SLOTS)
10269 emit_one_bundle ();
10270 CURR_SLOT = oldslot;
10271 }
10272 insn_group_break (1, 0, 0);
10273 break;
10274 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
10275 "other" types of DV are eliminated
10276 by a data serialization */
10277 case IA64_DVS_DATA:
10278 if (md.debug_dv)
10279 fprintf (stderr, "Inserting data serialization\n");
10280 if (rs->data_srlz < STATE_STOP)
10281 insn_group_break (1, 0, 0);
10282 {
10283 struct slot oldslot = CURR_SLOT;
10284 /* Manually jam a srlz.d insn into the stream */
10285 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10286 CURR_SLOT.user_template = -1;
10287 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
10288 data_serialization ();
10289 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10290 if (++md.num_slots_in_use >= NUM_SLOTS)
10291 emit_one_bundle ();
10292 CURR_SLOT = oldslot;
10293 }
10294 break;
10295 case IA64_DVS_IMPLIED:
10296 case IA64_DVS_IMPLIEDF:
10297 if (md.debug_dv)
10298 fprintf (stderr, "Inserting stop\n");
10299 insn_group_break (1, 0, 0);
10300 break;
10301 default:
10302 break;
10303 }
10304 }
10305
10306 /* Check the resources used by the given opcode against the current dependency
10307 list.
10308
10309 The check is run once for each execution path encountered. In this case,
10310 a unique execution path is the sequence of instructions following a code
10311 entry point, e.g. the following has three execution paths, one starting
10312 at L0, one at L1, and one at L2.
10313
10314 L0: nop
10315 L1: add
10316 L2: add
10317 br.ret
10318 */
10319
10320 static void
10321 check_dependencies (struct ia64_opcode *idesc)
10322 {
10323 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10324 int path;
10325 int i;
10326
10327 /* Note that the number of marked resources may change within the
10328 loop if in auto mode. */
10329 i = 0;
10330 while (i < regdepslen)
10331 {
10332 struct rsrc *rs = &regdeps[i];
10333 const struct ia64_dependency *dep = rs->dependency;
10334 int chkind;
10335 int note;
10336 int start_over = 0;
10337
10338 if (dep->semantics == IA64_DVS_NONE
10339 || (chkind = depends_on (rs->depind, idesc)) == -1)
10340 {
10341 ++i;
10342 continue;
10343 }
10344
10345 note = NOTE (opdeps->chks[chkind]);
10346
10347 /* Check this resource against each execution path seen thus far. */
10348 for (path = 0; path <= md.path; path++)
10349 {
10350 int matchtype;
10351
10352 /* If the dependency wasn't on the path being checked, ignore it. */
10353 if (rs->path < path)
10354 continue;
10355
10356 /* If the QP for this insn implies a QP which has branched, don't
10357 bother checking. Ed. NOTE: I don't think this check is terribly
10358 useful; what's the point of generating code which will only be
10359 reached if its QP is zero?
10360 This code was specifically inserted to handle the following code,
10361 based on notes from Intel's DV checking code, where p1 implies p2.
10362
10363 mov r4 = 2
10364 (p2) br.cond L
10365 (p1) mov r4 = 7
10366 */
10367 if (CURR_SLOT.qp_regno != 0)
10368 {
10369 int skip = 0;
10370 int implies;
10371 for (implies = 0; implies < qp_implieslen; implies++)
10372 {
10373 if (qp_implies[implies].path >= path
10374 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
10375 && qp_implies[implies].p2_branched)
10376 {
10377 skip = 1;
10378 break;
10379 }
10380 }
10381 if (skip)
10382 continue;
10383 }
10384
10385 if ((matchtype = resources_match (rs, idesc, note,
10386 CURR_SLOT.qp_regno, path)) != 0)
10387 {
10388 char msg[1024];
10389 char pathmsg[256] = "";
10390 char indexmsg[256] = "";
10391 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
10392
10393 if (path != 0)
10394 snprintf (pathmsg, sizeof (pathmsg),
10395 " when entry is at label '%s'",
10396 md.entry_labels[path - 1]);
10397 if (matchtype == 1 && rs->index >= 0)
10398 snprintf (indexmsg, sizeof (indexmsg),
10399 ", specific resource number is %d",
10400 rs->index);
10401 snprintf (msg, sizeof (msg),
10402 "Use of '%s' %s %s dependency '%s' (%s)%s%s",
10403 idesc->name,
10404 (certain ? "violates" : "may violate"),
10405 dv_mode[dep->mode], dep->name,
10406 dv_sem[dep->semantics],
10407 pathmsg, indexmsg);
10408
10409 if (md.explicit_mode)
10410 {
10411 as_warn ("%s", msg);
10412 if (path < md.path)
10413 as_warn (_("Only the first path encountering the conflict is reported"));
10414 as_warn_where (rs->file, rs->line,
10415 _("This is the location of the conflicting usage"));
10416 /* Don't bother checking other paths, to avoid duplicating
10417 the same warning */
10418 break;
10419 }
10420 else
10421 {
10422 if (md.debug_dv)
10423 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
10424
10425 remove_marked_resource (rs);
10426
10427 /* since the set of dependencies has changed, start over */
10428 /* FIXME -- since we're removing dvs as we go, we
10429 probably don't really need to start over... */
10430 start_over = 1;
10431 break;
10432 }
10433 }
10434 }
10435 if (start_over)
10436 i = 0;
10437 else
10438 ++i;
10439 }
10440 }
10441
10442 /* Register new dependencies based on the given opcode. */
10443
10444 static void
10445 mark_resources (struct ia64_opcode *idesc)
10446 {
10447 int i;
10448 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10449 int add_only_qp_reads = 0;
10450
10451 /* A conditional branch only uses its resources if it is taken; if it is
10452 taken, we stop following that path. The other branch types effectively
10453 *always* write their resources. If it's not taken, register only QP
10454 reads. */
10455 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
10456 {
10457 add_only_qp_reads = 1;
10458 }
10459
10460 if (md.debug_dv)
10461 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
10462
10463 for (i = 0; i < opdeps->nregs; i++)
10464 {
10465 const struct ia64_dependency *dep;
10466 struct rsrc specs[MAX_SPECS];
10467 int note;
10468 int path;
10469 int count;
10470
10471 dep = ia64_find_dependency (opdeps->regs[i]);
10472 note = NOTE (opdeps->regs[i]);
10473
10474 if (add_only_qp_reads
10475 && !(dep->mode == IA64_DV_WAR
10476 && (dep->specifier == IA64_RS_PR
10477 || dep->specifier == IA64_RS_PRr
10478 || dep->specifier == IA64_RS_PR63)))
10479 continue;
10480
10481 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
10482
10483 while (count-- > 0)
10484 {
10485 mark_resource (idesc, dep, &specs[count],
10486 DEP (opdeps->regs[i]), md.path);
10487 }
10488
10489 /* The execution path may affect register values, which may in turn
10490 affect which indirect-access resources are accessed. */
10491 switch (dep->specifier)
10492 {
10493 default:
10494 break;
10495 case IA64_RS_CPUID:
10496 case IA64_RS_DBR:
10497 case IA64_RS_IBR:
10498 case IA64_RS_MSR:
10499 case IA64_RS_PKR:
10500 case IA64_RS_PMC:
10501 case IA64_RS_PMD:
10502 case IA64_RS_RR:
10503 for (path = 0; path < md.path; path++)
10504 {
10505 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
10506 while (count-- > 0)
10507 mark_resource (idesc, dep, &specs[count],
10508 DEP (opdeps->regs[i]), path);
10509 }
10510 break;
10511 }
10512 }
10513 }
10514
10515 /* Remove dependencies when they no longer apply. */
10516
10517 static void
10518 update_dependencies (struct ia64_opcode *idesc)
10519 {
10520 int i;
10521
10522 if (strcmp (idesc->name, "srlz.i") == 0)
10523 {
10524 instruction_serialization ();
10525 }
10526 else if (strcmp (idesc->name, "srlz.d") == 0)
10527 {
10528 data_serialization ();
10529 }
10530 else if (is_interruption_or_rfi (idesc)
10531 || is_taken_branch (idesc))
10532 {
10533 /* Although technically the taken branch doesn't clear dependencies
10534 which require a srlz.[id], we don't follow the branch; the next
10535 instruction is assumed to start with a clean slate. */
10536 regdepslen = 0;
10537 md.path = 0;
10538 }
10539 else if (is_conditional_branch (idesc)
10540 && CURR_SLOT.qp_regno != 0)
10541 {
10542 int is_call = strstr (idesc->name, ".call") != NULL;
10543
10544 for (i = 0; i < qp_implieslen; i++)
10545 {
10546 /* If the conditional branch's predicate is implied by the predicate
10547 in an existing dependency, remove that dependency. */
10548 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
10549 {
10550 int depind = 0;
10551 /* Note that this implied predicate takes a branch so that if
10552 a later insn generates a DV but its predicate implies this
10553 one, we can avoid the false DV warning. */
10554 qp_implies[i].p2_branched = 1;
10555 while (depind < regdepslen)
10556 {
10557 if (regdeps[depind].qp_regno == qp_implies[i].p1)
10558 {
10559 print_dependency ("Removing", depind);
10560 regdeps[depind] = regdeps[--regdepslen];
10561 }
10562 else
10563 ++depind;
10564 }
10565 }
10566 }
10567 /* Any marked resources which have this same predicate should be
10568 cleared, provided that the QP hasn't been modified between the
10569 marking instruction and the branch. */
10570 if (is_call)
10571 {
10572 insn_group_break (0, CURR_SLOT.qp_regno, 1);
10573 }
10574 else
10575 {
10576 i = 0;
10577 while (i < regdepslen)
10578 {
10579 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
10580 && regdeps[i].link_to_qp_branch
10581 && (regdeps[i].file != CURR_SLOT.src_file
10582 || regdeps[i].line != CURR_SLOT.src_line))
10583 {
10584 /* Treat like a taken branch */
10585 print_dependency ("Removing", i);
10586 regdeps[i] = regdeps[--regdepslen];
10587 }
10588 else
10589 ++i;
10590 }
10591 }
10592 }
10593 }
10594
10595 /* Examine the current instruction for dependency violations. */
10596
10597 static int
10598 check_dv (struct ia64_opcode *idesc)
10599 {
10600 if (md.debug_dv)
10601 {
10602 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
10603 idesc->name, CURR_SLOT.src_line,
10604 idesc->dependencies->nchks,
10605 idesc->dependencies->nregs);
10606 }
10607
10608 /* Look through the list of currently marked resources; if the current
10609 instruction has the dependency in its chks list which uses that resource,
10610 check against the specific resources used. */
10611 check_dependencies (idesc);
10612
10613 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
10614 then add them to the list of marked resources. */
10615 mark_resources (idesc);
10616
10617 /* There are several types of dependency semantics, and each has its own
10618 requirements for being cleared
10619
10620 Instruction serialization (insns separated by interruption, rfi, or
10621 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10622
10623 Data serialization (instruction serialization, or writer + srlz.d +
10624 reader, where writer and srlz.d are in separate groups) clears
10625 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10626 always be the case).
10627
10628 Instruction group break (groups separated by stop, taken branch,
10629 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10630 */
10631 update_dependencies (idesc);
10632
10633 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10634 warning. Keep track of as many as possible that are useful. */
10635 note_register_values (idesc);
10636
10637 /* We don't need or want this anymore. */
10638 md.mem_offset.hint = 0;
10639
10640 return 0;
10641 }
10642
10643 /* Translate one line of assembly. Pseudo ops and labels do not show
10644 here. */
10645 void
10646 md_assemble (char *str)
10647 {
10648 char *saved_input_line_pointer, *temp;
10649 const char *mnemonic;
10650 const struct pseudo_opcode *pdesc;
10651 struct ia64_opcode *idesc;
10652 unsigned char qp_regno;
10653 unsigned int flags;
10654 int ch;
10655
10656 saved_input_line_pointer = input_line_pointer;
10657 input_line_pointer = str;
10658
10659 /* extract the opcode (mnemonic): */
10660
10661 ch = get_symbol_name (&temp);
10662 mnemonic = temp;
10663 pdesc = (struct pseudo_opcode *) str_hash_find (md.pseudo_hash, mnemonic);
10664 if (pdesc)
10665 {
10666 (void) restore_line_pointer (ch);
10667 (*pdesc->handler) (pdesc->arg);
10668 goto done;
10669 }
10670
10671 /* Find the instruction descriptor matching the arguments. */
10672
10673 idesc = ia64_find_opcode (mnemonic);
10674 (void) restore_line_pointer (ch);
10675 if (!idesc)
10676 {
10677 as_bad (_("Unknown opcode `%s'"), mnemonic);
10678 goto done;
10679 }
10680
10681 idesc = parse_operands (idesc);
10682 if (!idesc)
10683 goto done;
10684
10685 /* Handle the dynamic ops we can handle now: */
10686 if (idesc->type == IA64_TYPE_DYN)
10687 {
10688 if (strcmp (idesc->name, "add") == 0)
10689 {
10690 if (CURR_SLOT.opnd[2].X_op == O_register
10691 && CURR_SLOT.opnd[2].X_add_number < 4)
10692 mnemonic = "addl";
10693 else
10694 mnemonic = "adds";
10695 ia64_free_opcode (idesc);
10696 idesc = ia64_find_opcode (mnemonic);
10697 }
10698 else if (strcmp (idesc->name, "mov") == 0)
10699 {
10700 enum ia64_opnd opnd1, opnd2;
10701 int rop;
10702
10703 opnd1 = idesc->operands[0];
10704 opnd2 = idesc->operands[1];
10705 if (opnd1 == IA64_OPND_AR3)
10706 rop = 0;
10707 else if (opnd2 == IA64_OPND_AR3)
10708 rop = 1;
10709 else
10710 abort ();
10711 if (CURR_SLOT.opnd[rop].X_op == O_register)
10712 {
10713 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10714 mnemonic = "mov.i";
10715 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10716 mnemonic = "mov.m";
10717 else
10718 rop = -1;
10719 }
10720 else
10721 abort ();
10722 if (rop >= 0)
10723 {
10724 ia64_free_opcode (idesc);
10725 idesc = ia64_find_opcode (mnemonic);
10726 while (idesc != NULL
10727 && (idesc->operands[0] != opnd1
10728 || idesc->operands[1] != opnd2))
10729 idesc = get_next_opcode (idesc);
10730 }
10731 }
10732 }
10733 else if (strcmp (idesc->name, "mov.i") == 0
10734 || strcmp (idesc->name, "mov.m") == 0)
10735 {
10736 enum ia64_opnd opnd1, opnd2;
10737 int rop;
10738
10739 opnd1 = idesc->operands[0];
10740 opnd2 = idesc->operands[1];
10741 if (opnd1 == IA64_OPND_AR3)
10742 rop = 0;
10743 else if (opnd2 == IA64_OPND_AR3)
10744 rop = 1;
10745 else
10746 abort ();
10747 if (CURR_SLOT.opnd[rop].X_op == O_register)
10748 {
10749 char unit = 'a';
10750 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10751 unit = 'i';
10752 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10753 unit = 'm';
10754 if (unit != 'a' && unit != idesc->name [4])
10755 as_bad (_("AR %d can only be accessed by %c-unit"),
10756 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10757 TOUPPER (unit));
10758 }
10759 }
10760 else if (strcmp (idesc->name, "hint.b") == 0)
10761 {
10762 switch (md.hint_b)
10763 {
10764 case hint_b_ok:
10765 break;
10766 case hint_b_warning:
10767 as_warn (_("hint.b may be treated as nop"));
10768 break;
10769 case hint_b_error:
10770 as_bad (_("hint.b shouldn't be used"));
10771 break;
10772 }
10773 }
10774
10775 qp_regno = 0;
10776 if (md.qp.X_op == O_register)
10777 {
10778 qp_regno = md.qp.X_add_number - REG_P;
10779 md.qp.X_op = O_absent;
10780 }
10781
10782 flags = idesc->flags;
10783
10784 if ((flags & IA64_OPCODE_FIRST) != 0)
10785 {
10786 /* The alignment frag has to end with a stop bit only if the
10787 next instruction after the alignment directive has to be
10788 the first instruction in an instruction group. */
10789 if (align_frag)
10790 {
10791 while (align_frag->fr_type != rs_align_code)
10792 {
10793 align_frag = align_frag->fr_next;
10794 if (!align_frag)
10795 break;
10796 }
10797 /* align_frag can be NULL if there are directives in
10798 between. */
10799 if (align_frag && align_frag->fr_next == frag_now)
10800 align_frag->tc_frag_data = 1;
10801 }
10802
10803 insn_group_break (1, 0, 0);
10804 }
10805 align_frag = NULL;
10806
10807 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10808 {
10809 as_bad (_("`%s' cannot be predicated"), idesc->name);
10810 goto done;
10811 }
10812
10813 /* Build the instruction. */
10814 CURR_SLOT.qp_regno = qp_regno;
10815 CURR_SLOT.idesc = idesc;
10816 CURR_SLOT.src_file = as_where (&CURR_SLOT.src_line);
10817 dwarf2_where (&CURR_SLOT.debug_line);
10818 dwarf2_consume_line_info ();
10819
10820 /* Add unwind entries, if there are any. */
10821 if (unwind.current_entry)
10822 {
10823 CURR_SLOT.unwind_record = unwind.current_entry;
10824 unwind.current_entry = NULL;
10825 }
10826 if (unwind.pending_saves)
10827 {
10828 if (unwind.pending_saves->next)
10829 {
10830 /* Attach the next pending save to the next slot so that its
10831 slot number will get set correctly. */
10832 add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR);
10833 unwind.pending_saves = &unwind.pending_saves->next->r.record.p;
10834 }
10835 else
10836 unwind.pending_saves = NULL;
10837 }
10838 if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym))
10839 unwind.insn = 1;
10840
10841 /* Check for dependency violations. */
10842 if (md.detect_dv)
10843 check_dv (idesc);
10844
10845 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10846 if (++md.num_slots_in_use >= NUM_SLOTS)
10847 emit_one_bundle ();
10848
10849 if ((flags & IA64_OPCODE_LAST) != 0)
10850 insn_group_break (1, 0, 0);
10851
10852 md.last_text_seg = now_seg;
10853
10854 done:
10855 input_line_pointer = saved_input_line_pointer;
10856 }
10857
10858 /* Called when symbol NAME cannot be found in the symbol table.
10859 Should be used for dynamic valued symbols only. */
10860
10861 symbolS *
10862 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
10863 {
10864 return 0;
10865 }
10866
10867 /* Called for any expression that can not be recognized. When the
10868 function is called, `input_line_pointer' will point to the start of
10869 the expression. */
10870
10871 void
10872 md_operand (expressionS *e)
10873 {
10874 switch (*input_line_pointer)
10875 {
10876 case '[':
10877 ++input_line_pointer;
10878 expression_and_evaluate (e);
10879 if (*input_line_pointer != ']')
10880 {
10881 as_bad (_("Closing bracket missing"));
10882 goto err;
10883 }
10884 else
10885 {
10886 if (e->X_op != O_register
10887 || e->X_add_number < REG_GR
10888 || e->X_add_number > REG_GR + 127)
10889 {
10890 as_bad (_("Index must be a general register"));
10891 e->X_add_number = REG_GR;
10892 }
10893
10894 ++input_line_pointer;
10895 e->X_op = O_index;
10896 }
10897 break;
10898
10899 default:
10900 break;
10901 }
10902 return;
10903
10904 err:
10905 ignore_rest_of_line ();
10906 }
10907
10908 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10909 a section symbol plus some offset. For relocs involving @fptr(),
10910 directives we don't want such adjustments since we need to have the
10911 original symbol's name in the reloc. */
10912 int
10913 ia64_fix_adjustable (fixS *fix)
10914 {
10915 /* Prevent all adjustments to global symbols */
10916 if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10917 return 0;
10918
10919 switch (fix->fx_r_type)
10920 {
10921 case BFD_RELOC_IA64_FPTR64I:
10922 case BFD_RELOC_IA64_FPTR32MSB:
10923 case BFD_RELOC_IA64_FPTR32LSB:
10924 case BFD_RELOC_IA64_FPTR64MSB:
10925 case BFD_RELOC_IA64_FPTR64LSB:
10926 case BFD_RELOC_IA64_LTOFF_FPTR22:
10927 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10928 return 0;
10929 default:
10930 break;
10931 }
10932
10933 return 1;
10934 }
10935
10936 int
10937 ia64_force_relocation (fixS *fix)
10938 {
10939 switch (fix->fx_r_type)
10940 {
10941 case BFD_RELOC_IA64_FPTR64I:
10942 case BFD_RELOC_IA64_FPTR32MSB:
10943 case BFD_RELOC_IA64_FPTR32LSB:
10944 case BFD_RELOC_IA64_FPTR64MSB:
10945 case BFD_RELOC_IA64_FPTR64LSB:
10946
10947 case BFD_RELOC_IA64_LTOFF22:
10948 case BFD_RELOC_IA64_LTOFF64I:
10949 case BFD_RELOC_IA64_LTOFF_FPTR22:
10950 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10951 case BFD_RELOC_IA64_PLTOFF22:
10952 case BFD_RELOC_IA64_PLTOFF64I:
10953 case BFD_RELOC_IA64_PLTOFF64MSB:
10954 case BFD_RELOC_IA64_PLTOFF64LSB:
10955
10956 case BFD_RELOC_IA64_LTOFF22X:
10957 case BFD_RELOC_IA64_LDXMOV:
10958 return 1;
10959
10960 default:
10961 break;
10962 }
10963
10964 return generic_force_reloc (fix);
10965 }
10966
10967 /* Decide from what point a pc-relative relocation is relative to,
10968 relative to the pc-relative fixup. Er, relatively speaking. */
10969 long
10970 ia64_pcrel_from_section (fixS *fix, segT sec)
10971 {
10972 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10973
10974 if (bfd_section_flags (sec) & SEC_CODE)
10975 off &= ~0xfUL;
10976
10977 return off;
10978 }
10979
10980
10981 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10982 void
10983 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10984 {
10985 expressionS exp;
10986
10987 exp.X_op = O_pseudo_fixup;
10988 exp.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
10989 exp.X_add_number = 0;
10990 exp.X_add_symbol = symbol;
10991 emit_expr (&exp, size);
10992 }
10993
10994 /* This is called whenever some data item (not an instruction) needs a
10995 fixup. We pick the right reloc code depending on the byteorder
10996 currently in effect. */
10997 void
10998 ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp,
10999 bfd_reloc_code_real_type code)
11000 {
11001 fixS *fix;
11002
11003 switch (nbytes)
11004 {
11005 /* There are no reloc for 8 and 16 bit quantities, but we allow
11006 them here since they will work fine as long as the expression
11007 is fully defined at the end of the pass over the source file. */
11008 case 1: code = BFD_RELOC_8; break;
11009 case 2: code = BFD_RELOC_16; break;
11010 case 4:
11011 if (target_big_endian)
11012 code = BFD_RELOC_IA64_DIR32MSB;
11013 else
11014 code = BFD_RELOC_IA64_DIR32LSB;
11015 break;
11016
11017 case 8:
11018 /* In 32-bit mode, data8 could mean function descriptors too. */
11019 if (exp->X_op == O_pseudo_fixup
11020 && exp->X_op_symbol
11021 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
11022 && !(md.flags & EF_IA_64_ABI64))
11023 {
11024 if (target_big_endian)
11025 code = BFD_RELOC_IA64_IPLTMSB;
11026 else
11027 code = BFD_RELOC_IA64_IPLTLSB;
11028 exp->X_op = O_symbol;
11029 break;
11030 }
11031 else
11032 {
11033 if (target_big_endian)
11034 code = BFD_RELOC_IA64_DIR64MSB;
11035 else
11036 code = BFD_RELOC_IA64_DIR64LSB;
11037 break;
11038 }
11039
11040 case 16:
11041 if (exp->X_op == O_pseudo_fixup
11042 && exp->X_op_symbol
11043 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
11044 {
11045 if (target_big_endian)
11046 code = BFD_RELOC_IA64_IPLTMSB;
11047 else
11048 code = BFD_RELOC_IA64_IPLTLSB;
11049 exp->X_op = O_symbol;
11050 break;
11051 }
11052 /* FALLTHRU */
11053
11054 default:
11055 as_bad (_("Unsupported fixup size %d"), nbytes);
11056 ignore_rest_of_line ();
11057 return;
11058 }
11059
11060 if (exp->X_op == O_pseudo_fixup)
11061 {
11062 exp->X_op = O_symbol;
11063 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
11064 /* ??? If code unchanged, unsupported. */
11065 }
11066
11067 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
11068 /* We need to store the byte order in effect in case we're going
11069 to fix an 8 or 16 bit relocation (for which there no real
11070 relocs available). See md_apply_fix(). */
11071 fix->tc_fix_data.bigendian = target_big_endian;
11072 }
11073
11074 /* Return the actual relocation we wish to associate with the pseudo
11075 reloc described by SYM and R_TYPE. SYM should be one of the
11076 symbols in the pseudo_func array, or NULL. */
11077
11078 static bfd_reloc_code_real_type
11079 ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type)
11080 {
11081 bfd_reloc_code_real_type newr = 0;
11082 const char *type = NULL, *suffix = "";
11083
11084 if (sym == NULL)
11085 {
11086 return r_type;
11087 }
11088
11089 switch (S_GET_VALUE (sym))
11090 {
11091 case FUNC_FPTR_RELATIVE:
11092 switch (r_type)
11093 {
11094 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_FPTR64I; break;
11095 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_FPTR32MSB; break;
11096 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_FPTR32LSB; break;
11097 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_FPTR64MSB; break;
11098 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_FPTR64LSB; break;
11099 default: type = "FPTR"; break;
11100 }
11101 break;
11102
11103 case FUNC_GP_RELATIVE:
11104 switch (r_type)
11105 {
11106 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_GPREL22; break;
11107 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_GPREL64I; break;
11108 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_GPREL32MSB; break;
11109 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_GPREL32LSB; break;
11110 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_GPREL64MSB; break;
11111 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_GPREL64LSB; break;
11112 default: type = "GPREL"; break;
11113 }
11114 break;
11115
11116 case FUNC_LT_RELATIVE:
11117 switch (r_type)
11118 {
11119 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22; break;
11120 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_LTOFF64I; break;
11121 default: type = "LTOFF"; break;
11122 }
11123 break;
11124
11125 case FUNC_LT_RELATIVE_X:
11126 switch (r_type)
11127 {
11128 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22X; break;
11129 default: type = "LTOFF"; suffix = "X"; break;
11130 }
11131 break;
11132
11133 case FUNC_PC_RELATIVE:
11134 switch (r_type)
11135 {
11136 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PCREL22; break;
11137 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PCREL64I; break;
11138 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_PCREL32MSB; break;
11139 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_PCREL32LSB; break;
11140 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PCREL64MSB; break;
11141 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PCREL64LSB; break;
11142 default: type = "PCREL"; break;
11143 }
11144 break;
11145
11146 case FUNC_PLT_RELATIVE:
11147 switch (r_type)
11148 {
11149 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PLTOFF22; break;
11150 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PLTOFF64I; break;
11151 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PLTOFF64MSB;break;
11152 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PLTOFF64LSB;break;
11153 default: type = "PLTOFF"; break;
11154 }
11155 break;
11156
11157 case FUNC_SEC_RELATIVE:
11158 switch (r_type)
11159 {
11160 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SECREL32MSB;break;
11161 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SECREL32LSB;break;
11162 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SECREL64MSB;break;
11163 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SECREL64LSB;break;
11164 default: type = "SECREL"; break;
11165 }
11166 break;
11167
11168 case FUNC_SEG_RELATIVE:
11169 switch (r_type)
11170 {
11171 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SEGREL32MSB;break;
11172 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SEGREL32LSB;break;
11173 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SEGREL64MSB;break;
11174 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SEGREL64LSB;break;
11175 default: type = "SEGREL"; break;
11176 }
11177 break;
11178
11179 case FUNC_LTV_RELATIVE:
11180 switch (r_type)
11181 {
11182 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_LTV32MSB; break;
11183 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_LTV32LSB; break;
11184 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_LTV64MSB; break;
11185 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_LTV64LSB; break;
11186 default: type = "LTV"; break;
11187 }
11188 break;
11189
11190 case FUNC_LT_FPTR_RELATIVE:
11191 switch (r_type)
11192 {
11193 case BFD_RELOC_IA64_IMM22:
11194 newr = BFD_RELOC_IA64_LTOFF_FPTR22; break;
11195 case BFD_RELOC_IA64_IMM64:
11196 newr = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
11197 case BFD_RELOC_IA64_DIR32MSB:
11198 newr = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break;
11199 case BFD_RELOC_IA64_DIR32LSB:
11200 newr = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break;
11201 case BFD_RELOC_IA64_DIR64MSB:
11202 newr = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break;
11203 case BFD_RELOC_IA64_DIR64LSB:
11204 newr = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break;
11205 default:
11206 type = "LTOFF_FPTR"; break;
11207 }
11208 break;
11209
11210 case FUNC_TP_RELATIVE:
11211 switch (r_type)
11212 {
11213 case BFD_RELOC_IA64_IMM14: newr = BFD_RELOC_IA64_TPREL14; break;
11214 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_TPREL22; break;
11215 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_TPREL64I; break;
11216 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_TPREL64MSB; break;
11217 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_TPREL64LSB; break;
11218 default: type = "TPREL"; break;
11219 }
11220 break;
11221
11222 case FUNC_LT_TP_RELATIVE:
11223 switch (r_type)
11224 {
11225 case BFD_RELOC_IA64_IMM22:
11226 newr = BFD_RELOC_IA64_LTOFF_TPREL22; break;
11227 default:
11228 type = "LTOFF_TPREL"; break;
11229 }
11230 break;
11231
11232 case FUNC_DTP_MODULE:
11233 switch (r_type)
11234 {
11235 case BFD_RELOC_IA64_DIR64MSB:
11236 newr = BFD_RELOC_IA64_DTPMOD64MSB; break;
11237 case BFD_RELOC_IA64_DIR64LSB:
11238 newr = BFD_RELOC_IA64_DTPMOD64LSB; break;
11239 default:
11240 type = "DTPMOD"; break;
11241 }
11242 break;
11243
11244 case FUNC_LT_DTP_MODULE:
11245 switch (r_type)
11246 {
11247 case BFD_RELOC_IA64_IMM22:
11248 newr = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
11249 default:
11250 type = "LTOFF_DTPMOD"; break;
11251 }
11252 break;
11253
11254 case FUNC_DTP_RELATIVE:
11255 switch (r_type)
11256 {
11257 case BFD_RELOC_IA64_DIR32MSB:
11258 newr = BFD_RELOC_IA64_DTPREL32MSB; break;
11259 case BFD_RELOC_IA64_DIR32LSB:
11260 newr = BFD_RELOC_IA64_DTPREL32LSB; break;
11261 case BFD_RELOC_IA64_DIR64MSB:
11262 newr = BFD_RELOC_IA64_DTPREL64MSB; break;
11263 case BFD_RELOC_IA64_DIR64LSB:
11264 newr = BFD_RELOC_IA64_DTPREL64LSB; break;
11265 case BFD_RELOC_IA64_IMM14:
11266 newr = BFD_RELOC_IA64_DTPREL14; break;
11267 case BFD_RELOC_IA64_IMM22:
11268 newr = BFD_RELOC_IA64_DTPREL22; break;
11269 case BFD_RELOC_IA64_IMM64:
11270 newr = BFD_RELOC_IA64_DTPREL64I; break;
11271 default:
11272 type = "DTPREL"; break;
11273 }
11274 break;
11275
11276 case FUNC_LT_DTP_RELATIVE:
11277 switch (r_type)
11278 {
11279 case BFD_RELOC_IA64_IMM22:
11280 newr = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
11281 default:
11282 type = "LTOFF_DTPREL"; break;
11283 }
11284 break;
11285
11286 case FUNC_IPLT_RELOC:
11287 switch (r_type)
11288 {
11289 case BFD_RELOC_IA64_IPLTMSB: return r_type;
11290 case BFD_RELOC_IA64_IPLTLSB: return r_type;
11291 default: type = "IPLT"; break;
11292 }
11293 break;
11294
11295 #ifdef TE_VMS
11296 case FUNC_SLOTCOUNT_RELOC:
11297 return DUMMY_RELOC_IA64_SLOTCOUNT;
11298 #endif
11299
11300 default:
11301 abort ();
11302 }
11303
11304 if (newr)
11305 return newr;
11306 else
11307 {
11308 int width;
11309
11310 if (!type)
11311 abort ();
11312 switch (r_type)
11313 {
11314 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break;
11315 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break;
11316 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break;
11317 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break;
11318 case BFD_RELOC_UNUSED: width = 13; break;
11319 case BFD_RELOC_IA64_IMM14: width = 14; break;
11320 case BFD_RELOC_IA64_IMM22: width = 22; break;
11321 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break;
11322 default: abort ();
11323 }
11324
11325 /* This should be an error, but since previously there wasn't any
11326 diagnostic here, don't make it fail because of this for now. */
11327 as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix);
11328 return r_type;
11329 }
11330 }
11331
11332 /* Here is where generate the appropriate reloc for pseudo relocation
11333 functions. */
11334 void
11335 ia64_validate_fix (fixS *fix)
11336 {
11337 switch (fix->fx_r_type)
11338 {
11339 case BFD_RELOC_IA64_FPTR64I:
11340 case BFD_RELOC_IA64_FPTR32MSB:
11341 case BFD_RELOC_IA64_FPTR64LSB:
11342 case BFD_RELOC_IA64_LTOFF_FPTR22:
11343 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11344 if (fix->fx_offset != 0)
11345 as_bad_where (fix->fx_file, fix->fx_line,
11346 _("No addend allowed in @fptr() relocation"));
11347 break;
11348 default:
11349 break;
11350 }
11351 }
11352
11353 static void
11354 fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value)
11355 {
11356 bfd_vma insn[3], t0, t1, control_bits;
11357 const char *err;
11358 char *fixpos;
11359 long slot;
11360
11361 slot = fix->fx_where & 0x3;
11362 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
11363
11364 /* Bundles are always in little-endian byte order */
11365 t0 = bfd_getl64 (fixpos);
11366 t1 = bfd_getl64 (fixpos + 8);
11367 control_bits = t0 & 0x1f;
11368 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
11369 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
11370 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
11371
11372 err = NULL;
11373 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
11374 {
11375 insn[1] = (value >> 22) & 0x1ffffffffffLL;
11376 insn[2] |= (((value & 0x7f) << 13)
11377 | (((value >> 7) & 0x1ff) << 27)
11378 | (((value >> 16) & 0x1f) << 22)
11379 | (((value >> 21) & 0x1) << 21)
11380 | (((value >> 63) & 0x1) << 36));
11381 }
11382 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
11383 {
11384 if (value & ~0x3fffffffffffffffULL)
11385 err = _("integer operand out of range");
11386 insn[1] = (value >> 21) & 0x1ffffffffffLL;
11387 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
11388 }
11389 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
11390 {
11391 value >>= 4;
11392 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
11393 insn[2] |= ((((value >> 59) & 0x1) << 36)
11394 | (((value >> 0) & 0xfffff) << 13));
11395 }
11396 else
11397 err = (*odesc->insert) (odesc, value, insn + slot);
11398
11399 if (err)
11400 as_bad_where (fix->fx_file, fix->fx_line, "%s", err);
11401
11402 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
11403 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
11404 number_to_chars_littleendian (fixpos + 0, t0, 8);
11405 number_to_chars_littleendian (fixpos + 8, t1, 8);
11406 }
11407
11408 /* Attempt to simplify or even eliminate a fixup. The return value is
11409 ignored; perhaps it was once meaningful, but now it is historical.
11410 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
11411
11412 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
11413 (if possible). */
11414
11415 void
11416 md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED)
11417 {
11418 char *fixpos;
11419 valueT value = *valP;
11420
11421 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
11422
11423 if (fix->fx_pcrel)
11424 {
11425 switch (fix->fx_r_type)
11426 {
11427 case BFD_RELOC_IA64_PCREL21B: break;
11428 case BFD_RELOC_IA64_PCREL21BI: break;
11429 case BFD_RELOC_IA64_PCREL21F: break;
11430 case BFD_RELOC_IA64_PCREL21M: break;
11431 case BFD_RELOC_IA64_PCREL60B: break;
11432 case BFD_RELOC_IA64_PCREL22: break;
11433 case BFD_RELOC_IA64_PCREL64I: break;
11434 case BFD_RELOC_IA64_PCREL32MSB: break;
11435 case BFD_RELOC_IA64_PCREL32LSB: break;
11436 case BFD_RELOC_IA64_PCREL64MSB: break;
11437 case BFD_RELOC_IA64_PCREL64LSB: break;
11438 default:
11439 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym,
11440 fix->fx_r_type);
11441 break;
11442 }
11443 }
11444 if (fix->fx_addsy)
11445 {
11446 switch ((unsigned) fix->fx_r_type)
11447 {
11448 case BFD_RELOC_UNUSED:
11449 /* This must be a TAG13 or TAG13b operand. There are no external
11450 relocs defined for them, so we must give an error. */
11451 as_bad_where (fix->fx_file, fix->fx_line,
11452 _("%s must have a constant value"),
11453 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
11454 fix->fx_done = 1;
11455 return;
11456
11457 case BFD_RELOC_IA64_TPREL14:
11458 case BFD_RELOC_IA64_TPREL22:
11459 case BFD_RELOC_IA64_TPREL64I:
11460 case BFD_RELOC_IA64_LTOFF_TPREL22:
11461 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
11462 case BFD_RELOC_IA64_DTPREL14:
11463 case BFD_RELOC_IA64_DTPREL22:
11464 case BFD_RELOC_IA64_DTPREL64I:
11465 case BFD_RELOC_IA64_LTOFF_DTPREL22:
11466 S_SET_THREAD_LOCAL (fix->fx_addsy);
11467 break;
11468
11469 #ifdef TE_VMS
11470 case DUMMY_RELOC_IA64_SLOTCOUNT:
11471 as_bad_where (fix->fx_file, fix->fx_line,
11472 _("cannot resolve @slotcount parameter"));
11473 fix->fx_done = 1;
11474 return;
11475 #endif
11476
11477 default:
11478 break;
11479 }
11480 }
11481 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
11482 {
11483 #ifdef TE_VMS
11484 if (fix->fx_r_type == DUMMY_RELOC_IA64_SLOTCOUNT)
11485 {
11486 /* For @slotcount, convert an addresses difference to a slots
11487 difference. */
11488 valueT v;
11489
11490 v = (value >> 4) * 3;
11491 switch (value & 0x0f)
11492 {
11493 case 0:
11494 case 1:
11495 case 2:
11496 v += value & 0x0f;
11497 break;
11498 case 0x0f:
11499 v += 2;
11500 break;
11501 case 0x0e:
11502 v += 1;
11503 break;
11504 default:
11505 as_bad (_("invalid @slotcount value"));
11506 }
11507 value = v;
11508 }
11509 #endif
11510
11511 if (fix->tc_fix_data.bigendian)
11512 number_to_chars_bigendian (fixpos, value, fix->fx_size);
11513 else
11514 number_to_chars_littleendian (fixpos, value, fix->fx_size);
11515 fix->fx_done = 1;
11516 }
11517 else
11518 {
11519 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
11520 fix->fx_done = 1;
11521 }
11522 }
11523
11524 /* Generate the BFD reloc to be stuck in the object file from the
11525 fixup used internally in the assembler. */
11526
11527 arelent *
11528 tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp)
11529 {
11530 arelent *reloc;
11531
11532 reloc = XNEW (arelent);
11533 reloc->sym_ptr_ptr = XNEW (asymbol *);
11534 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11535 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
11536 reloc->addend = fixp->fx_offset;
11537 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
11538
11539 if (!reloc->howto)
11540 {
11541 as_bad_where (fixp->fx_file, fixp->fx_line,
11542 _("Cannot represent %s relocation in object file"),
11543 bfd_get_reloc_code_name (fixp->fx_r_type));
11544 free (reloc);
11545 return NULL;
11546 }
11547 return reloc;
11548 }
11549
11550 /* Turn a string in input_line_pointer into a floating point constant
11551 of type TYPE, and store the appropriate bytes in *LIT. The number
11552 of LITTLENUMS emitted is stored in *SIZE. An error message is
11553 returned, or NULL on OK. */
11554
11555 const char *
11556 md_atof (int type, char *lit, int *size)
11557 {
11558 LITTLENUM_TYPE words[MAX_LITTLENUMS];
11559 char *t;
11560 int prec;
11561
11562 switch (type)
11563 {
11564 /* IEEE floats */
11565 case 'f':
11566 case 'F':
11567 case 's':
11568 case 'S':
11569 prec = 2;
11570 break;
11571
11572 case 'd':
11573 case 'D':
11574 case 'r':
11575 case 'R':
11576 prec = 4;
11577 break;
11578
11579 case 'x':
11580 case 'X':
11581 case 'p':
11582 case 'P':
11583 prec = 5;
11584 break;
11585
11586 default:
11587 *size = 0;
11588 return _("Unrecognized or unsupported floating point constant");
11589 }
11590 t = atof_ieee (input_line_pointer, type, words);
11591 if (t)
11592 input_line_pointer = t;
11593
11594 (*ia64_float_to_chars) (lit, words, prec);
11595
11596 if (type == 'X')
11597 {
11598 /* It is 10 byte floating point with 6 byte padding. */
11599 memset (&lit [10], 0, 6);
11600 *size = 8 * sizeof (LITTLENUM_TYPE);
11601 }
11602 else
11603 *size = prec * sizeof (LITTLENUM_TYPE);
11604
11605 return NULL;
11606 }
11607
11608 /* Handle ia64 specific semantics of the align directive. */
11609
11610 void
11611 ia64_md_do_align (int n ATTRIBUTE_UNUSED,
11612 const char *fill ATTRIBUTE_UNUSED,
11613 int len ATTRIBUTE_UNUSED,
11614 int max ATTRIBUTE_UNUSED)
11615 {
11616 if (subseg_text_p (now_seg))
11617 ia64_flush_insns ();
11618 }
11619
11620 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
11621 of an rs_align_code fragment. */
11622
11623 void
11624 ia64_handle_align (fragS *fragp)
11625 {
11626 int bytes;
11627 char *p;
11628 const unsigned char *nop_type;
11629
11630 if (fragp->fr_type != rs_align_code)
11631 return;
11632
11633 /* Check if this frag has to end with a stop bit. */
11634 nop_type = fragp->tc_frag_data ? le_nop_stop : le_nop;
11635
11636 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11637 p = fragp->fr_literal + fragp->fr_fix;
11638
11639 /* If no paddings are needed, we check if we need a stop bit. */
11640 if (!bytes && fragp->tc_frag_data)
11641 {
11642 if (fragp->fr_fix < 16)
11643 #if 1
11644 /* FIXME: It won't work with
11645 .align 16
11646 alloc r32=ar.pfs,1,2,4,0
11647 */
11648 ;
11649 #else
11650 as_bad_where (fragp->fr_file, fragp->fr_line,
11651 _("Can't add stop bit to mark end of instruction group"));
11652 #endif
11653 else
11654 /* Bundles are always in little-endian byte order. Make sure
11655 the previous bundle has the stop bit. */
11656 *(p - 16) |= 1;
11657 }
11658
11659 /* Make sure we are on a 16-byte boundary, in case someone has been
11660 putting data into a text section. */
11661 if (bytes & 15)
11662 {
11663 int fix = bytes & 15;
11664 memset (p, 0, fix);
11665 p += fix;
11666 bytes -= fix;
11667 fragp->fr_fix += fix;
11668 }
11669
11670 /* Instruction bundles are always little-endian. */
11671 memcpy (p, nop_type, 16);
11672 fragp->fr_var = 16;
11673 }
11674
11675 static void
11676 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11677 int prec)
11678 {
11679 while (prec--)
11680 {
11681 number_to_chars_bigendian (lit, (long) (*words++),
11682 sizeof (LITTLENUM_TYPE));
11683 lit += sizeof (LITTLENUM_TYPE);
11684 }
11685 }
11686
11687 static void
11688 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11689 int prec)
11690 {
11691 while (prec--)
11692 {
11693 number_to_chars_littleendian (lit, (long) (words[prec]),
11694 sizeof (LITTLENUM_TYPE));
11695 lit += sizeof (LITTLENUM_TYPE);
11696 }
11697 }
11698
11699 void
11700 ia64_elf_section_change_hook (void)
11701 {
11702 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11703 && elf_linked_to_section (now_seg) == NULL)
11704 elf_linked_to_section (now_seg) = text_section;
11705 dot_byteorder (-1);
11706 }
11707
11708 /* Check if a label should be made global. */
11709 void
11710 ia64_check_label (symbolS *label)
11711 {
11712 if (*input_line_pointer == ':')
11713 {
11714 S_SET_EXTERNAL (label);
11715 input_line_pointer++;
11716 }
11717 }
11718
11719 /* Used to remember where .alias and .secalias directives are seen. We
11720 will rename symbol and section names when we are about to output
11721 the relocatable file. */
11722 struct alias
11723 {
11724 const char *file; /* The file where the directive is seen. */
11725 unsigned int line; /* The line number the directive is at. */
11726 const char *name; /* The original name of the symbol. */
11727 };
11728
11729 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11730 .secalias. Otherwise, it is .alias. */
11731 static void
11732 dot_alias (int section)
11733 {
11734 char *name, *alias;
11735 char delim;
11736 char *end_name;
11737 int len;
11738 struct alias *h;
11739 const char *a;
11740 htab_t ahash, nhash;
11741 const char *kind;
11742
11743 delim = get_symbol_name (&name);
11744 end_name = input_line_pointer;
11745 *end_name = delim;
11746
11747 if (name == end_name)
11748 {
11749 as_bad (_("expected symbol name"));
11750 ignore_rest_of_line ();
11751 return;
11752 }
11753
11754 SKIP_WHITESPACE_AFTER_NAME ();
11755
11756 if (*input_line_pointer != ',')
11757 {
11758 *end_name = 0;
11759 as_bad (_("expected comma after \"%s\""), name);
11760 *end_name = delim;
11761 ignore_rest_of_line ();
11762 return;
11763 }
11764
11765 input_line_pointer++;
11766 *end_name = 0;
11767 ia64_canonicalize_symbol_name (name);
11768
11769 /* We call demand_copy_C_string to check if alias string is valid.
11770 There should be a closing `"' and no `\0' in the string. */
11771 alias = demand_copy_C_string (&len);
11772 if (alias == NULL)
11773 {
11774 ignore_rest_of_line ();
11775 return;
11776 }
11777
11778 /* Make a copy of name string. */
11779 len = strlen (name) + 1;
11780 obstack_grow (&notes, name, len);
11781 name = obstack_finish (&notes);
11782
11783 if (section)
11784 {
11785 kind = "section";
11786 ahash = secalias_hash;
11787 nhash = secalias_name_hash;
11788 }
11789 else
11790 {
11791 kind = "symbol";
11792 ahash = alias_hash;
11793 nhash = alias_name_hash;
11794 }
11795
11796 /* Check if alias has been used before. */
11797
11798 h = (struct alias *) str_hash_find (ahash, alias);
11799 if (h)
11800 {
11801 if (strcmp (h->name, name))
11802 as_bad (_("`%s' is already the alias of %s `%s'"),
11803 alias, kind, h->name);
11804 obstack_free (&notes, name);
11805 obstack_free (&notes, alias);
11806 goto out;
11807 }
11808
11809 /* Check if name already has an alias. */
11810 a = (const char *) str_hash_find (nhash, name);
11811 if (a)
11812 {
11813 if (strcmp (a, alias))
11814 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11815 obstack_free (&notes, name);
11816 obstack_free (&notes, alias);
11817 goto out;
11818 }
11819
11820 h = XNEW (struct alias);
11821 h->file = as_where (&h->line);
11822 h->name = name;
11823
11824 str_hash_insert (ahash, alias, h, 0);
11825 str_hash_insert (nhash, name, alias, 0);
11826
11827 out:
11828 demand_empty_rest_of_line ();
11829 }
11830
11831 /* It renames the original symbol name to its alias. */
11832 static int
11833 do_alias (void **slot, void *arg ATTRIBUTE_UNUSED)
11834 {
11835 string_tuple_t *tuple = *((string_tuple_t **) slot);
11836 struct alias *h = (struct alias *) tuple->value;
11837 symbolS *sym = symbol_find (h->name);
11838
11839 if (sym == NULL)
11840 {
11841 #ifdef TE_VMS
11842 /* Uses .alias extensively to alias CRTL functions to same with
11843 decc$ prefix. Sometimes function gets optimized away and a
11844 warning results, which should be suppressed. */
11845 if (!startswith (tuple->key, "decc$"))
11846 #endif
11847 as_warn_where (h->file, h->line,
11848 _("symbol `%s' aliased to `%s' is not used"),
11849 h->name, tuple->key);
11850 }
11851 else
11852 S_SET_NAME (sym, (char *) tuple->key);
11853
11854 return 1;
11855 }
11856
11857 /* Called from write_object_file. */
11858 void
11859 ia64_adjust_symtab (void)
11860 {
11861 htab_traverse (alias_hash, do_alias, NULL);
11862 }
11863
11864 /* It renames the original section name to its alias. */
11865 static int
11866 do_secalias (void **slot, void *arg ATTRIBUTE_UNUSED)
11867 {
11868 string_tuple_t *tuple = *((string_tuple_t **) slot);
11869 struct alias *h = (struct alias *) tuple->value;
11870 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11871
11872 if (sec == NULL)
11873 as_warn_where (h->file, h->line,
11874 _("section `%s' aliased to `%s' is not used"),
11875 h->name, tuple->key);
11876 else
11877 sec->name = tuple->key;
11878
11879 return 1;
11880 }
11881
11882 /* Called from write_object_file. */
11883 void
11884 ia64_frob_file (void)
11885 {
11886 htab_traverse (secalias_hash, do_secalias, NULL);
11887 }
11888
11889 #ifdef TE_VMS
11890 #define NT_VMS_MHD 1
11891 #define NT_VMS_LNM 2
11892
11893 /* Integrity VMS 8.x identifies it's ELF modules with a standard ELF
11894 .note section. */
11895
11896 /* Manufacture a VMS-like time string. */
11897 static void
11898 get_vms_time (char *Now)
11899 {
11900 char *pnt;
11901 time_t timeb;
11902
11903 time (&timeb);
11904 pnt = ctime (&timeb);
11905 pnt[3] = 0;
11906 pnt[7] = 0;
11907 pnt[10] = 0;
11908 pnt[16] = 0;
11909 pnt[24] = 0;
11910 sprintf (Now, "%2s-%3s-%s %s", pnt + 8, pnt + 4, pnt + 20, pnt + 11);
11911 }
11912
11913 void
11914 ia64_vms_note (void)
11915 {
11916 char *p;
11917 asection *seg = now_seg;
11918 subsegT subseg = now_subseg;
11919 asection *secp = NULL;
11920 char *bname;
11921 char buf [256];
11922 symbolS *sym;
11923
11924 /* Create the .note section. */
11925
11926 secp = subseg_new (".note", 0);
11927 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11928
11929 /* Module header note (MHD). */
11930 bname = xstrdup (lbasename (out_file_name));
11931 if ((p = strrchr (bname, '.')))
11932 *p = '\0';
11933
11934 /* VMS note header is 24 bytes long. */
11935 p = frag_more (8 + 8 + 8);
11936 number_to_chars_littleendian (p + 0, 8, 8);
11937 number_to_chars_littleendian (p + 8, 40 + strlen (bname), 8);
11938 number_to_chars_littleendian (p + 16, NT_VMS_MHD, 8);
11939
11940 p = frag_more (8);
11941 strcpy (p, "IPF/VMS");
11942
11943 p = frag_more (17 + 17 + strlen (bname) + 1 + 5);
11944 get_vms_time (p);
11945 strcpy (p + 17, "24-FEB-2005 15:00");
11946 p += 17 + 17;
11947 strcpy (p, bname);
11948 p += strlen (bname) + 1;
11949 free (bname);
11950 strcpy (p, "V1.0");
11951
11952 frag_align (3, 0, 0);
11953
11954 /* Language processor name note. */
11955 sprintf (buf, "GNU assembler version %s (%s) using BFD version %s",
11956 VERSION, TARGET_ALIAS, BFD_VERSION_STRING);
11957
11958 p = frag_more (8 + 8 + 8);
11959 number_to_chars_littleendian (p + 0, 8, 8);
11960 number_to_chars_littleendian (p + 8, strlen (buf) + 1, 8);
11961 number_to_chars_littleendian (p + 16, NT_VMS_LNM, 8);
11962
11963 p = frag_more (8);
11964 strcpy (p, "IPF/VMS");
11965
11966 p = frag_more (strlen (buf) + 1);
11967 strcpy (p, buf);
11968
11969 frag_align (3, 0, 0);
11970
11971 secp = subseg_new (".vms_display_name_info", 0);
11972 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11973
11974 /* This symbol should be passed on the command line and be variable
11975 according to language. */
11976 sym = symbol_new ("__gnat_vms_display_name@gnat_demangler_rtl",
11977 absolute_section, &zero_address_frag, 0);
11978 symbol_table_insert (sym);
11979 symbol_get_bfdsym (sym)->flags |= BSF_DEBUGGING | BSF_DYNAMIC;
11980
11981 p = frag_more (4);
11982 /* Format 3 of VMS demangler Spec. */
11983 number_to_chars_littleendian (p, 3, 4);
11984
11985 p = frag_more (4);
11986 /* Place holder for symbol table index of above symbol. */
11987 number_to_chars_littleendian (p, -1, 4);
11988
11989 frag_align (3, 0, 0);
11990
11991 /* We probably can't restore the current segment, for there likely
11992 isn't one yet... */
11993 if (seg && subseg)
11994 subseg_set (seg, subseg);
11995 }
11996
11997 #endif /* TE_VMS */