* as.c (parse_args): Call md_after_parse_args if defined.
[binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .alias
28 .eb
29 .estate
30 .lb
31 .popsection
32 .previous
33 .psr
34 .pushsection
35 - labels are wrong if automatic alignment is introduced
36 (e.g., checkout the second real10 definition in test-data.s)
37 - DV-related stuff:
38 <reg>.safe_across_calls and any other DV-related directives I don't
39 have documentation for.
40 verify mod-sched-brs reads/writes are checked/marked (and other
41 notes)
42
43 */
44
45 #include "as.h"
46 #include "safe-ctype.h"
47 #include "dwarf2dbg.h"
48 #include "subsegs.h"
49
50 #include "opcode/ia64.h"
51
52 #include "elf/ia64.h"
53
54 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
55 #define MIN(a,b) ((a) < (b) ? (a) : (b))
56
57 #define NUM_SLOTS 4
58 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
59 #define CURR_SLOT md.slot[md.curr_slot]
60
61 #define O_pseudo_fixup (O_max + 1)
62
63 enum special_section
64 {
65 /* IA-64 ABI section pseudo-ops. */
66 SPECIAL_SECTION_BSS = 0,
67 SPECIAL_SECTION_SBSS,
68 SPECIAL_SECTION_SDATA,
69 SPECIAL_SECTION_RODATA,
70 SPECIAL_SECTION_COMMENT,
71 SPECIAL_SECTION_UNWIND,
72 SPECIAL_SECTION_UNWIND_INFO,
73 /* HPUX specific section pseudo-ops. */
74 SPECIAL_SECTION_INIT_ARRAY,
75 SPECIAL_SECTION_FINI_ARRAY,
76 };
77
78 enum reloc_func
79 {
80 FUNC_FPTR_RELATIVE,
81 FUNC_GP_RELATIVE,
82 FUNC_LT_RELATIVE,
83 FUNC_PC_RELATIVE,
84 FUNC_PLT_RELATIVE,
85 FUNC_SEC_RELATIVE,
86 FUNC_SEG_RELATIVE,
87 FUNC_LTV_RELATIVE,
88 FUNC_LT_FPTR_RELATIVE,
89 FUNC_IPLT_RELOC,
90 };
91
92 enum reg_symbol
93 {
94 REG_GR = 0,
95 REG_FR = (REG_GR + 128),
96 REG_AR = (REG_FR + 128),
97 REG_CR = (REG_AR + 128),
98 REG_P = (REG_CR + 128),
99 REG_BR = (REG_P + 64),
100 REG_IP = (REG_BR + 8),
101 REG_CFM,
102 REG_PR,
103 REG_PR_ROT,
104 REG_PSR,
105 REG_PSR_L,
106 REG_PSR_UM,
107 /* The following are pseudo-registers for use by gas only. */
108 IND_CPUID,
109 IND_DBR,
110 IND_DTR,
111 IND_ITR,
112 IND_IBR,
113 IND_MEM,
114 IND_MSR,
115 IND_PKR,
116 IND_PMC,
117 IND_PMD,
118 IND_RR,
119 /* The following pseudo-registers are used for unwind directives only: */
120 REG_PSP,
121 REG_PRIUNAT,
122 REG_NUM
123 };
124
125 enum dynreg_type
126 {
127 DYNREG_GR = 0, /* dynamic general purpose register */
128 DYNREG_FR, /* dynamic floating point register */
129 DYNREG_PR, /* dynamic predicate register */
130 DYNREG_NUM_TYPES
131 };
132
133 enum operand_match_result
134 {
135 OPERAND_MATCH,
136 OPERAND_OUT_OF_RANGE,
137 OPERAND_MISMATCH
138 };
139
140 /* On the ia64, we can't know the address of a text label until the
141 instructions are packed into a bundle. To handle this, we keep
142 track of the list of labels that appear in front of each
143 instruction. */
144 struct label_fix
145 {
146 struct label_fix *next;
147 struct symbol *sym;
148 };
149
150 extern int target_big_endian;
151
152 /* Characters which always start a comment. */
153 const char comment_chars[] = "";
154
155 /* Characters which start a comment at the beginning of a line. */
156 const char line_comment_chars[] = "#";
157
158 /* Characters which may be used to separate multiple commands on a
159 single line. */
160 const char line_separator_chars[] = ";";
161
162 /* Characters which are used to indicate an exponent in a floating
163 point number. */
164 const char EXP_CHARS[] = "eE";
165
166 /* Characters which mean that a number is a floating point constant,
167 as in 0d1.0. */
168 const char FLT_CHARS[] = "rRsSfFdDxXpP";
169
170 /* ia64-specific option processing: */
171
172 const char *md_shortopts = "m:N:x::";
173
174 struct option md_longopts[] =
175 {
176 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
177 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
178 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
179 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
180 };
181
182 size_t md_longopts_size = sizeof (md_longopts);
183
184 static struct
185 {
186 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
187 struct hash_control *reg_hash; /* register name hash table */
188 struct hash_control *dynreg_hash; /* dynamic register hash table */
189 struct hash_control *const_hash; /* constant hash table */
190 struct hash_control *entry_hash; /* code entry hint hash table */
191
192 symbolS *regsym[REG_NUM];
193
194 /* If X_op is != O_absent, the registername for the instruction's
195 qualifying predicate. If NULL, p0 is assumed for instructions
196 that are predicatable. */
197 expressionS qp;
198
199 unsigned int
200 manual_bundling : 1,
201 debug_dv: 1,
202 detect_dv: 1,
203 explicit_mode : 1, /* which mode we're in */
204 default_explicit_mode : 1, /* which mode is the default */
205 mode_explicitly_set : 1, /* was the current mode explicitly set? */
206 auto_align : 1,
207 keep_pending_output : 1;
208
209 /* Each bundle consists of up to three instructions. We keep
210 track of four most recent instructions so we can correctly set
211 the end_of_insn_group for the last instruction in a bundle. */
212 int curr_slot;
213 int num_slots_in_use;
214 struct slot
215 {
216 unsigned int
217 end_of_insn_group : 1,
218 manual_bundling_on : 1,
219 manual_bundling_off : 1;
220 signed char user_template; /* user-selected template, if any */
221 unsigned char qp_regno; /* qualifying predicate */
222 /* This duplicates a good fraction of "struct fix" but we
223 can't use a "struct fix" instead since we can't call
224 fix_new_exp() until we know the address of the instruction. */
225 int num_fixups;
226 struct insn_fix
227 {
228 bfd_reloc_code_real_type code;
229 enum ia64_opnd opnd; /* type of operand in need of fix */
230 unsigned int is_pcrel : 1; /* is operand pc-relative? */
231 expressionS expr; /* the value to be inserted */
232 }
233 fixup[2]; /* at most two fixups per insn */
234 struct ia64_opcode *idesc;
235 struct label_fix *label_fixups;
236 struct label_fix *tag_fixups;
237 struct unw_rec_list *unwind_record; /* Unwind directive. */
238 expressionS opnd[6];
239 char *src_file;
240 unsigned int src_line;
241 struct dwarf2_line_info debug_line;
242 }
243 slot[NUM_SLOTS];
244
245 segT last_text_seg;
246
247 struct dynreg
248 {
249 struct dynreg *next; /* next dynamic register */
250 const char *name;
251 unsigned short base; /* the base register number */
252 unsigned short num_regs; /* # of registers in this set */
253 }
254 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
255
256 flagword flags; /* ELF-header flags */
257
258 struct mem_offset {
259 unsigned hint:1; /* is this hint currently valid? */
260 bfd_vma offset; /* mem.offset offset */
261 bfd_vma base; /* mem.offset base */
262 } mem_offset;
263
264 int path; /* number of alt. entry points seen */
265 const char **entry_labels; /* labels of all alternate paths in
266 the current DV-checking block. */
267 int maxpaths; /* size currently allocated for
268 entry_labels */
269 /* Support for hardware errata workarounds. */
270
271 /* Record data about the last three insn groups. */
272 struct group
273 {
274 /* B-step workaround.
275 For each predicate register, this is set if the corresponding insn
276 group conditionally sets this register with one of the affected
277 instructions. */
278 int p_reg_set[64];
279 /* B-step workaround.
280 For each general register, this is set if the corresponding insn
281 a) is conditional one one of the predicate registers for which
282 P_REG_SET is 1 in the corresponding entry of the previous group,
283 b) sets this general register with one of the affected
284 instructions. */
285 int g_reg_set_conditionally[128];
286 } last_groups[3];
287 int group_idx;
288
289 int pointer_size; /* size in bytes of a pointer */
290 int pointer_size_shift; /* shift size of a pointer for alignment */
291 }
292 md;
293
294 /* application registers: */
295
296 #define AR_K0 0
297 #define AR_K7 7
298 #define AR_RSC 16
299 #define AR_BSP 17
300 #define AR_BSPSTORE 18
301 #define AR_RNAT 19
302 #define AR_UNAT 36
303 #define AR_FPSR 40
304 #define AR_ITC 44
305 #define AR_PFS 64
306 #define AR_LC 65
307
308 static const struct
309 {
310 const char *name;
311 int regnum;
312 }
313 ar[] =
314 {
315 {"ar.k0", 0}, {"ar.k1", 1}, {"ar.k2", 2}, {"ar.k3", 3},
316 {"ar.k4", 4}, {"ar.k5", 5}, {"ar.k6", 6}, {"ar.k7", 7},
317 {"ar.rsc", 16}, {"ar.bsp", 17},
318 {"ar.bspstore", 18}, {"ar.rnat", 19},
319 {"ar.fcr", 21}, {"ar.eflag", 24},
320 {"ar.csd", 25}, {"ar.ssd", 26},
321 {"ar.cflg", 27}, {"ar.fsr", 28},
322 {"ar.fir", 29}, {"ar.fdr", 30},
323 {"ar.ccv", 32}, {"ar.unat", 36},
324 {"ar.fpsr", 40}, {"ar.itc", 44},
325 {"ar.pfs", 64}, {"ar.lc", 65},
326 {"ar.ec", 66},
327 };
328
329 #define CR_IPSR 16
330 #define CR_ISR 17
331 #define CR_IIP 19
332 #define CR_IFA 20
333 #define CR_ITIR 21
334 #define CR_IIPA 22
335 #define CR_IFS 23
336 #define CR_IIM 24
337 #define CR_IHA 25
338 #define CR_IVR 65
339 #define CR_TPR 66
340 #define CR_EOI 67
341 #define CR_IRR0 68
342 #define CR_IRR3 71
343 #define CR_LRR0 80
344 #define CR_LRR1 81
345
346 /* control registers: */
347 static const struct
348 {
349 const char *name;
350 int regnum;
351 }
352 cr[] =
353 {
354 {"cr.dcr", 0},
355 {"cr.itm", 1},
356 {"cr.iva", 2},
357 {"cr.pta", 8},
358 {"cr.gpta", 9},
359 {"cr.ipsr", 16},
360 {"cr.isr", 17},
361 {"cr.iip", 19},
362 {"cr.ifa", 20},
363 {"cr.itir", 21},
364 {"cr.iipa", 22},
365 {"cr.ifs", 23},
366 {"cr.iim", 24},
367 {"cr.iha", 25},
368 {"cr.lid", 64},
369 {"cr.ivr", 65},
370 {"cr.tpr", 66},
371 {"cr.eoi", 67},
372 {"cr.irr0", 68},
373 {"cr.irr1", 69},
374 {"cr.irr2", 70},
375 {"cr.irr3", 71},
376 {"cr.itv", 72},
377 {"cr.pmv", 73},
378 {"cr.cmcv", 74},
379 {"cr.lrr0", 80},
380 {"cr.lrr1", 81}
381 };
382
383 #define PSR_MFL 4
384 #define PSR_IC 13
385 #define PSR_DFL 18
386 #define PSR_CPL 32
387
388 static const struct const_desc
389 {
390 const char *name;
391 valueT value;
392 }
393 const_bits[] =
394 {
395 /* PSR constant masks: */
396
397 /* 0: reserved */
398 {"psr.be", ((valueT) 1) << 1},
399 {"psr.up", ((valueT) 1) << 2},
400 {"psr.ac", ((valueT) 1) << 3},
401 {"psr.mfl", ((valueT) 1) << 4},
402 {"psr.mfh", ((valueT) 1) << 5},
403 /* 6-12: reserved */
404 {"psr.ic", ((valueT) 1) << 13},
405 {"psr.i", ((valueT) 1) << 14},
406 {"psr.pk", ((valueT) 1) << 15},
407 /* 16: reserved */
408 {"psr.dt", ((valueT) 1) << 17},
409 {"psr.dfl", ((valueT) 1) << 18},
410 {"psr.dfh", ((valueT) 1) << 19},
411 {"psr.sp", ((valueT) 1) << 20},
412 {"psr.pp", ((valueT) 1) << 21},
413 {"psr.di", ((valueT) 1) << 22},
414 {"psr.si", ((valueT) 1) << 23},
415 {"psr.db", ((valueT) 1) << 24},
416 {"psr.lp", ((valueT) 1) << 25},
417 {"psr.tb", ((valueT) 1) << 26},
418 {"psr.rt", ((valueT) 1) << 27},
419 /* 28-31: reserved */
420 /* 32-33: cpl (current privilege level) */
421 {"psr.is", ((valueT) 1) << 34},
422 {"psr.mc", ((valueT) 1) << 35},
423 {"psr.it", ((valueT) 1) << 36},
424 {"psr.id", ((valueT) 1) << 37},
425 {"psr.da", ((valueT) 1) << 38},
426 {"psr.dd", ((valueT) 1) << 39},
427 {"psr.ss", ((valueT) 1) << 40},
428 /* 41-42: ri (restart instruction) */
429 {"psr.ed", ((valueT) 1) << 43},
430 {"psr.bn", ((valueT) 1) << 44},
431 };
432
433 /* indirect register-sets/memory: */
434
435 static const struct
436 {
437 const char *name;
438 int regnum;
439 }
440 indirect_reg[] =
441 {
442 { "CPUID", IND_CPUID },
443 { "cpuid", IND_CPUID },
444 { "dbr", IND_DBR },
445 { "dtr", IND_DTR },
446 { "itr", IND_ITR },
447 { "ibr", IND_IBR },
448 { "msr", IND_MSR },
449 { "pkr", IND_PKR },
450 { "pmc", IND_PMC },
451 { "pmd", IND_PMD },
452 { "rr", IND_RR },
453 };
454
455 /* Pseudo functions used to indicate relocation types (these functions
456 start with an at sign (@). */
457 static struct
458 {
459 const char *name;
460 enum pseudo_type
461 {
462 PSEUDO_FUNC_NONE,
463 PSEUDO_FUNC_RELOC,
464 PSEUDO_FUNC_CONST,
465 PSEUDO_FUNC_REG,
466 PSEUDO_FUNC_FLOAT
467 }
468 type;
469 union
470 {
471 unsigned long ival;
472 symbolS *sym;
473 }
474 u;
475 }
476 pseudo_func[] =
477 {
478 /* reloc pseudo functions (these must come first!): */
479 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
480 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
481 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
482 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
483 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
484 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
485 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
486 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
487 { "", 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
488 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
489
490 /* mbtype4 constants: */
491 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
492 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
493 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
494 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
495 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
496
497 /* fclass constants: */
498 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
499 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
500 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
501 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
502 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
503 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
504 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
505 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
506 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
507
508 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
509
510 /* unwind-related constants: */
511 { "svr4", PSEUDO_FUNC_CONST, { 0 } },
512 { "hpux", PSEUDO_FUNC_CONST, { 1 } },
513 { "nt", PSEUDO_FUNC_CONST, { 2 } },
514
515 /* unwind-related registers: */
516 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
517 };
518
519 /* 41-bit nop opcodes (one per unit): */
520 static const bfd_vma nop[IA64_NUM_UNITS] =
521 {
522 0x0000000000LL, /* NIL => break 0 */
523 0x0008000000LL, /* I-unit nop */
524 0x0008000000LL, /* M-unit nop */
525 0x4000000000LL, /* B-unit nop */
526 0x0008000000LL, /* F-unit nop */
527 0x0008000000LL, /* L-"unit" nop */
528 0x0008000000LL, /* X-unit nop */
529 };
530
531 /* Can't be `const' as it's passed to input routines (which have the
532 habit of setting temporary sentinels. */
533 static char special_section_name[][20] =
534 {
535 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
536 {".IA_64.unwind"}, {".IA_64.unwind_info"},
537 {".init_array"}, {".fini_array"}
538 };
539
540 static char *special_linkonce_name[] =
541 {
542 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
543 };
544
545 /* The best template for a particular sequence of up to three
546 instructions: */
547 #define N IA64_NUM_TYPES
548 static unsigned char best_template[N][N][N];
549 #undef N
550
551 /* Resource dependencies currently in effect */
552 static struct rsrc {
553 int depind; /* dependency index */
554 const struct ia64_dependency *dependency; /* actual dependency */
555 unsigned specific:1, /* is this a specific bit/regno? */
556 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
557 int index; /* specific regno/bit within dependency */
558 int note; /* optional qualifying note (0 if none) */
559 #define STATE_NONE 0
560 #define STATE_STOP 1
561 #define STATE_SRLZ 2
562 int insn_srlz; /* current insn serialization state */
563 int data_srlz; /* current data serialization state */
564 int qp_regno; /* qualifying predicate for this usage */
565 char *file; /* what file marked this dependency */
566 unsigned int line; /* what line marked this dependency */
567 struct mem_offset mem_offset; /* optional memory offset hint */
568 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
569 int path; /* corresponding code entry index */
570 } *regdeps = NULL;
571 static int regdepslen = 0;
572 static int regdepstotlen = 0;
573 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
574 static const char *dv_sem[] = { "none", "implied", "impliedf",
575 "data", "instr", "specific", "stop", "other" };
576 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
577
578 /* Current state of PR mutexation */
579 static struct qpmutex {
580 valueT prmask;
581 int path;
582 } *qp_mutexes = NULL; /* QP mutex bitmasks */
583 static int qp_mutexeslen = 0;
584 static int qp_mutexestotlen = 0;
585 static valueT qp_safe_across_calls = 0;
586
587 /* Current state of PR implications */
588 static struct qp_imply {
589 unsigned p1:6;
590 unsigned p2:6;
591 unsigned p2_branched:1;
592 int path;
593 } *qp_implies = NULL;
594 static int qp_implieslen = 0;
595 static int qp_impliestotlen = 0;
596
597 /* Keep track of static GR values so that indirect register usage can
598 sometimes be tracked. */
599 static struct gr {
600 unsigned known:1;
601 int path;
602 valueT value;
603 } gr_values[128] = {{ 1, 0, 0 }};
604
605 /* These are the routines required to output the various types of
606 unwind records. */
607
608 /* A slot_number is a frag address plus the slot index (0-2). We use the
609 frag address here so that if there is a section switch in the middle of
610 a function, then instructions emitted to a different section are not
611 counted. Since there may be more than one frag for a function, this
612 means we also need to keep track of which frag this address belongs to
613 so we can compute inter-frag distances. This also nicely solves the
614 problem with nops emitted for align directives, which can't easily be
615 counted, but can easily be derived from frag sizes. */
616
617 typedef struct unw_rec_list {
618 unwind_record r;
619 unsigned long slot_number;
620 fragS *slot_frag;
621 struct unw_rec_list *next;
622 } unw_rec_list;
623
624 #define SLOT_NUM_NOT_SET (unsigned)-1
625
626 static struct
627 {
628 unsigned long next_slot_number;
629 fragS *next_slot_frag;
630
631 /* Maintain a list of unwind entries for the current function. */
632 unw_rec_list *list;
633 unw_rec_list *tail;
634
635 /* Any unwind entires that should be attached to the current slot
636 that an insn is being constructed for. */
637 unw_rec_list *current_entry;
638
639 /* These are used to create the unwind table entry for this function. */
640 symbolS *proc_start;
641 symbolS *proc_end;
642 symbolS *info; /* pointer to unwind info */
643 symbolS *personality_routine;
644 segT saved_text_seg;
645 subsegT saved_text_subseg;
646 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
647
648 /* TRUE if processing unwind directives in a prologue region. */
649 int prologue;
650 int prologue_mask;
651 unsigned int prologue_count; /* number of .prologues seen so far */
652 } unwind;
653
654 typedef void (*vbyte_func) PARAMS ((int, char *, char *));
655
656 /* Forward delarations: */
657 static int ar_is_in_integer_unit PARAMS ((int regnum));
658 static void set_section PARAMS ((char *name));
659 static unsigned int set_regstack PARAMS ((unsigned int, unsigned int,
660 unsigned int, unsigned int));
661 static void dot_radix PARAMS ((int));
662 static void dot_special_section PARAMS ((int));
663 static void dot_proc PARAMS ((int));
664 static void dot_fframe PARAMS ((int));
665 static void dot_vframe PARAMS ((int));
666 static void dot_vframesp PARAMS ((int));
667 static void dot_vframepsp PARAMS ((int));
668 static void dot_save PARAMS ((int));
669 static void dot_restore PARAMS ((int));
670 static void dot_restorereg PARAMS ((int));
671 static void dot_restorereg_p PARAMS ((int));
672 static void dot_handlerdata PARAMS ((int));
673 static void dot_unwentry PARAMS ((int));
674 static void dot_altrp PARAMS ((int));
675 static void dot_savemem PARAMS ((int));
676 static void dot_saveg PARAMS ((int));
677 static void dot_savef PARAMS ((int));
678 static void dot_saveb PARAMS ((int));
679 static void dot_savegf PARAMS ((int));
680 static void dot_spill PARAMS ((int));
681 static void dot_spillreg PARAMS ((int));
682 static void dot_spillmem PARAMS ((int));
683 static void dot_spillreg_p PARAMS ((int));
684 static void dot_spillmem_p PARAMS ((int));
685 static void dot_label_state PARAMS ((int));
686 static void dot_copy_state PARAMS ((int));
687 static void dot_unwabi PARAMS ((int));
688 static void dot_personality PARAMS ((int));
689 static void dot_body PARAMS ((int));
690 static void dot_prologue PARAMS ((int));
691 static void dot_endp PARAMS ((int));
692 static void dot_template PARAMS ((int));
693 static void dot_regstk PARAMS ((int));
694 static void dot_rot PARAMS ((int));
695 static void dot_byteorder PARAMS ((int));
696 static void dot_psr PARAMS ((int));
697 static void dot_alias PARAMS ((int));
698 static void dot_ln PARAMS ((int));
699 static char *parse_section_name PARAMS ((void));
700 static void dot_xdata PARAMS ((int));
701 static void stmt_float_cons PARAMS ((int));
702 static void stmt_cons_ua PARAMS ((int));
703 static void dot_xfloat_cons PARAMS ((int));
704 static void dot_xstringer PARAMS ((int));
705 static void dot_xdata_ua PARAMS ((int));
706 static void dot_xfloat_cons_ua PARAMS ((int));
707 static void print_prmask PARAMS ((valueT mask));
708 static void dot_pred_rel PARAMS ((int));
709 static void dot_reg_val PARAMS ((int));
710 static void dot_dv_mode PARAMS ((int));
711 static void dot_entry PARAMS ((int));
712 static void dot_mem_offset PARAMS ((int));
713 static void add_unwind_entry PARAMS((unw_rec_list *ptr));
714 static symbolS *declare_register PARAMS ((const char *name, int regnum));
715 static void declare_register_set PARAMS ((const char *, int, int));
716 static unsigned int operand_width PARAMS ((enum ia64_opnd));
717 static enum operand_match_result operand_match PARAMS ((const struct ia64_opcode *idesc,
718 int index,
719 expressionS *e));
720 static int parse_operand PARAMS ((expressionS *e));
721 static struct ia64_opcode * parse_operands PARAMS ((struct ia64_opcode *));
722 static void build_insn PARAMS ((struct slot *, bfd_vma *));
723 static void emit_one_bundle PARAMS ((void));
724 static void fix_insn PARAMS ((fixS *, const struct ia64_operand *, valueT));
725 static bfd_reloc_code_real_type ia64_gen_real_reloc_type PARAMS ((struct symbol *sym,
726 bfd_reloc_code_real_type r_type));
727 static void insn_group_break PARAMS ((int, int, int));
728 static void mark_resource PARAMS ((struct ia64_opcode *, const struct ia64_dependency *,
729 struct rsrc *, int depind, int path));
730 static void add_qp_mutex PARAMS((valueT mask));
731 static void add_qp_imply PARAMS((int p1, int p2));
732 static void clear_qp_branch_flag PARAMS((valueT mask));
733 static void clear_qp_mutex PARAMS((valueT mask));
734 static void clear_qp_implies PARAMS((valueT p1_mask, valueT p2_mask));
735 static void clear_register_values PARAMS ((void));
736 static void print_dependency PARAMS ((const char *action, int depind));
737 static void instruction_serialization PARAMS ((void));
738 static void data_serialization PARAMS ((void));
739 static void remove_marked_resource PARAMS ((struct rsrc *));
740 static int is_conditional_branch PARAMS ((struct ia64_opcode *));
741 static int is_taken_branch PARAMS ((struct ia64_opcode *));
742 static int is_interruption_or_rfi PARAMS ((struct ia64_opcode *));
743 static int depends_on PARAMS ((int, struct ia64_opcode *));
744 static int specify_resource PARAMS ((const struct ia64_dependency *,
745 struct ia64_opcode *, int, struct rsrc [], int, int));
746 static int check_dv PARAMS((struct ia64_opcode *idesc));
747 static void check_dependencies PARAMS((struct ia64_opcode *));
748 static void mark_resources PARAMS((struct ia64_opcode *));
749 static void update_dependencies PARAMS((struct ia64_opcode *));
750 static void note_register_values PARAMS((struct ia64_opcode *));
751 static int qp_mutex PARAMS ((int, int, int));
752 static int resources_match PARAMS ((struct rsrc *, struct ia64_opcode *, int, int, int));
753 static void output_vbyte_mem PARAMS ((int, char *, char *));
754 static void count_output PARAMS ((int, char *, char *));
755 static void output_R1_format PARAMS ((vbyte_func, unw_record_type, int));
756 static void output_R2_format PARAMS ((vbyte_func, int, int, unsigned long));
757 static void output_R3_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
758 static void output_P1_format PARAMS ((vbyte_func, int));
759 static void output_P2_format PARAMS ((vbyte_func, int, int));
760 static void output_P3_format PARAMS ((vbyte_func, unw_record_type, int));
761 static void output_P4_format PARAMS ((vbyte_func, unsigned char *, unsigned long));
762 static void output_P5_format PARAMS ((vbyte_func, int, unsigned long));
763 static void output_P6_format PARAMS ((vbyte_func, unw_record_type, int));
764 static void output_P7_format PARAMS ((vbyte_func, unw_record_type, unsigned long, unsigned long));
765 static void output_P8_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
766 static void output_P9_format PARAMS ((vbyte_func, int, int));
767 static void output_P10_format PARAMS ((vbyte_func, int, int));
768 static void output_B1_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
769 static void output_B2_format PARAMS ((vbyte_func, unsigned long, unsigned long));
770 static void output_B3_format PARAMS ((vbyte_func, unsigned long, unsigned long));
771 static void output_B4_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
772 static char format_ab_reg PARAMS ((int, int));
773 static void output_X1_format PARAMS ((vbyte_func, unw_record_type, int, int, unsigned long,
774 unsigned long));
775 static void output_X2_format PARAMS ((vbyte_func, int, int, int, int, int, unsigned long));
776 static void output_X3_format PARAMS ((vbyte_func, unw_record_type, int, int, int, unsigned long,
777 unsigned long));
778 static void output_X4_format PARAMS ((vbyte_func, int, int, int, int, int, int, unsigned long));
779 static void free_list_records PARAMS ((unw_rec_list *));
780 static unw_rec_list *output_prologue PARAMS ((void));
781 static unw_rec_list *output_prologue_gr PARAMS ((unsigned int, unsigned int));
782 static unw_rec_list *output_body PARAMS ((void));
783 static unw_rec_list *output_mem_stack_f PARAMS ((unsigned int));
784 static unw_rec_list *output_mem_stack_v PARAMS ((void));
785 static unw_rec_list *output_psp_gr PARAMS ((unsigned int));
786 static unw_rec_list *output_psp_sprel PARAMS ((unsigned int));
787 static unw_rec_list *output_rp_when PARAMS ((void));
788 static unw_rec_list *output_rp_gr PARAMS ((unsigned int));
789 static unw_rec_list *output_rp_br PARAMS ((unsigned int));
790 static unw_rec_list *output_rp_psprel PARAMS ((unsigned int));
791 static unw_rec_list *output_rp_sprel PARAMS ((unsigned int));
792 static unw_rec_list *output_pfs_when PARAMS ((void));
793 static unw_rec_list *output_pfs_gr PARAMS ((unsigned int));
794 static unw_rec_list *output_pfs_psprel PARAMS ((unsigned int));
795 static unw_rec_list *output_pfs_sprel PARAMS ((unsigned int));
796 static unw_rec_list *output_preds_when PARAMS ((void));
797 static unw_rec_list *output_preds_gr PARAMS ((unsigned int));
798 static unw_rec_list *output_preds_psprel PARAMS ((unsigned int));
799 static unw_rec_list *output_preds_sprel PARAMS ((unsigned int));
800 static unw_rec_list *output_fr_mem PARAMS ((unsigned int));
801 static unw_rec_list *output_frgr_mem PARAMS ((unsigned int, unsigned int));
802 static unw_rec_list *output_gr_gr PARAMS ((unsigned int, unsigned int));
803 static unw_rec_list *output_gr_mem PARAMS ((unsigned int));
804 static unw_rec_list *output_br_mem PARAMS ((unsigned int));
805 static unw_rec_list *output_br_gr PARAMS ((unsigned int, unsigned int));
806 static unw_rec_list *output_spill_base PARAMS ((unsigned int));
807 static unw_rec_list *output_unat_when PARAMS ((void));
808 static unw_rec_list *output_unat_gr PARAMS ((unsigned int));
809 static unw_rec_list *output_unat_psprel PARAMS ((unsigned int));
810 static unw_rec_list *output_unat_sprel PARAMS ((unsigned int));
811 static unw_rec_list *output_lc_when PARAMS ((void));
812 static unw_rec_list *output_lc_gr PARAMS ((unsigned int));
813 static unw_rec_list *output_lc_psprel PARAMS ((unsigned int));
814 static unw_rec_list *output_lc_sprel PARAMS ((unsigned int));
815 static unw_rec_list *output_fpsr_when PARAMS ((void));
816 static unw_rec_list *output_fpsr_gr PARAMS ((unsigned int));
817 static unw_rec_list *output_fpsr_psprel PARAMS ((unsigned int));
818 static unw_rec_list *output_fpsr_sprel PARAMS ((unsigned int));
819 static unw_rec_list *output_priunat_when_gr PARAMS ((void));
820 static unw_rec_list *output_priunat_when_mem PARAMS ((void));
821 static unw_rec_list *output_priunat_gr PARAMS ((unsigned int));
822 static unw_rec_list *output_priunat_psprel PARAMS ((unsigned int));
823 static unw_rec_list *output_priunat_sprel PARAMS ((unsigned int));
824 static unw_rec_list *output_bsp_when PARAMS ((void));
825 static unw_rec_list *output_bsp_gr PARAMS ((unsigned int));
826 static unw_rec_list *output_bsp_psprel PARAMS ((unsigned int));
827 static unw_rec_list *output_bsp_sprel PARAMS ((unsigned int));
828 static unw_rec_list *output_bspstore_when PARAMS ((void));
829 static unw_rec_list *output_bspstore_gr PARAMS ((unsigned int));
830 static unw_rec_list *output_bspstore_psprel PARAMS ((unsigned int));
831 static unw_rec_list *output_bspstore_sprel PARAMS ((unsigned int));
832 static unw_rec_list *output_rnat_when PARAMS ((void));
833 static unw_rec_list *output_rnat_gr PARAMS ((unsigned int));
834 static unw_rec_list *output_rnat_psprel PARAMS ((unsigned int));
835 static unw_rec_list *output_rnat_sprel PARAMS ((unsigned int));
836 static unw_rec_list *output_unwabi PARAMS ((unsigned long, unsigned long));
837 static unw_rec_list *output_epilogue PARAMS ((unsigned long));
838 static unw_rec_list *output_label_state PARAMS ((unsigned long));
839 static unw_rec_list *output_copy_state PARAMS ((unsigned long));
840 static unw_rec_list *output_spill_psprel PARAMS ((unsigned int, unsigned int, unsigned int));
841 static unw_rec_list *output_spill_sprel PARAMS ((unsigned int, unsigned int, unsigned int));
842 static unw_rec_list *output_spill_psprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
843 unsigned int));
844 static unw_rec_list *output_spill_sprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
845 unsigned int));
846 static unw_rec_list *output_spill_reg PARAMS ((unsigned int, unsigned int, unsigned int,
847 unsigned int));
848 static unw_rec_list *output_spill_reg_p PARAMS ((unsigned int, unsigned int, unsigned int,
849 unsigned int, unsigned int));
850 static void process_one_record PARAMS ((unw_rec_list *, vbyte_func));
851 static void process_unw_records PARAMS ((unw_rec_list *, vbyte_func));
852 static int calc_record_size PARAMS ((unw_rec_list *));
853 static void set_imask PARAMS ((unw_rec_list *, unsigned long, unsigned long, unsigned int));
854 static int count_bits PARAMS ((unsigned long));
855 static unsigned long slot_index PARAMS ((unsigned long, fragS *,
856 unsigned long, fragS *));
857 static unw_rec_list *optimize_unw_records PARAMS ((unw_rec_list *));
858 static void fixup_unw_records PARAMS ((unw_rec_list *));
859 static int output_unw_records PARAMS ((unw_rec_list *, void **));
860 static int convert_expr_to_ab_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
861 static int convert_expr_to_xy_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
862 static int generate_unwind_image PARAMS ((const char *));
863
864 /* Build the unwind section name by appending the (possibly stripped)
865 text section NAME to the unwind PREFIX. The resulting string
866 pointer is assigned to RESULT. The string is allocated on the
867 stack, so this must be a macro... */
868 #define make_unw_section_name(special, text_name, result) \
869 { \
870 char *_prefix = special_section_name[special]; \
871 char *_suffix = text_name; \
872 size_t _prefix_len, _suffix_len; \
873 char *_result; \
874 if (strncmp (text_name, ".gnu.linkonce.t.", \
875 sizeof (".gnu.linkonce.t.") - 1) == 0) \
876 { \
877 _prefix = special_linkonce_name[special - SPECIAL_SECTION_UNWIND]; \
878 _suffix += sizeof (".gnu.linkonce.t.") - 1; \
879 } \
880 _prefix_len = strlen (_prefix), _suffix_len = strlen (_suffix); \
881 _result = alloca (_prefix_len + _suffix_len + 1); \
882 memcpy (_result, _prefix, _prefix_len); \
883 memcpy (_result + _prefix_len, _suffix, _suffix_len); \
884 _result[_prefix_len + _suffix_len] = '\0'; \
885 result = _result; \
886 } \
887 while (0)
888
889 /* Determine if application register REGNUM resides in the integer
890 unit (as opposed to the memory unit). */
891 static int
892 ar_is_in_integer_unit (reg)
893 int reg;
894 {
895 reg -= REG_AR;
896
897 return (reg == 64 /* pfs */
898 || reg == 65 /* lc */
899 || reg == 66 /* ec */
900 /* ??? ias accepts and puts these in the integer unit. */
901 || (reg >= 112 && reg <= 127));
902 }
903
904 /* Switch to section NAME and create section if necessary. It's
905 rather ugly that we have to manipulate input_line_pointer but I
906 don't see any other way to accomplish the same thing without
907 changing obj-elf.c (which may be the Right Thing, in the end). */
908 static void
909 set_section (name)
910 char *name;
911 {
912 char *saved_input_line_pointer;
913
914 saved_input_line_pointer = input_line_pointer;
915 input_line_pointer = name;
916 obj_elf_section (0);
917 input_line_pointer = saved_input_line_pointer;
918 }
919
920 /* Map 's' to SHF_IA_64_SHORT. */
921
922 int
923 ia64_elf_section_letter (letter, ptr_msg)
924 int letter;
925 char **ptr_msg;
926 {
927 if (letter == 's')
928 return SHF_IA_64_SHORT;
929
930 *ptr_msg = _("Bad .section directive: want a,s,w,x,M,S in string");
931 return 0;
932 }
933
934 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
935
936 flagword
937 ia64_elf_section_flags (flags, attr, type)
938 flagword flags;
939 int attr, type ATTRIBUTE_UNUSED;
940 {
941 if (attr & SHF_IA_64_SHORT)
942 flags |= SEC_SMALL_DATA;
943 return flags;
944 }
945
946 int
947 ia64_elf_section_type (str, len)
948 const char *str;
949 size_t len;
950 {
951 len = sizeof (ELF_STRING_ia64_unwind_info) - 1;
952 if (strncmp (str, ELF_STRING_ia64_unwind_info, len) == 0)
953 return SHT_PROGBITS;
954
955 len = sizeof (ELF_STRING_ia64_unwind_info_once) - 1;
956 if (strncmp (str, ELF_STRING_ia64_unwind_info_once, len) == 0)
957 return SHT_PROGBITS;
958
959 len = sizeof (ELF_STRING_ia64_unwind) - 1;
960 if (strncmp (str, ELF_STRING_ia64_unwind, len) == 0)
961 return SHT_IA_64_UNWIND;
962
963 len = sizeof (ELF_STRING_ia64_unwind_once) - 1;
964 if (strncmp (str, ELF_STRING_ia64_unwind_once, len) == 0)
965 return SHT_IA_64_UNWIND;
966
967 return -1;
968 }
969
970 static unsigned int
971 set_regstack (ins, locs, outs, rots)
972 unsigned int ins, locs, outs, rots;
973 {
974 /* Size of frame. */
975 unsigned int sof;
976
977 sof = ins + locs + outs;
978 if (sof > 96)
979 {
980 as_bad ("Size of frame exceeds maximum of 96 registers");
981 return 0;
982 }
983 if (rots > sof)
984 {
985 as_warn ("Size of rotating registers exceeds frame size");
986 return 0;
987 }
988 md.in.base = REG_GR + 32;
989 md.loc.base = md.in.base + ins;
990 md.out.base = md.loc.base + locs;
991
992 md.in.num_regs = ins;
993 md.loc.num_regs = locs;
994 md.out.num_regs = outs;
995 md.rot.num_regs = rots;
996 return sof;
997 }
998
999 void
1000 ia64_flush_insns ()
1001 {
1002 struct label_fix *lfix;
1003 segT saved_seg;
1004 subsegT saved_subseg;
1005 unw_rec_list *ptr;
1006
1007 if (!md.last_text_seg)
1008 return;
1009
1010 saved_seg = now_seg;
1011 saved_subseg = now_subseg;
1012
1013 subseg_set (md.last_text_seg, 0);
1014
1015 while (md.num_slots_in_use > 0)
1016 emit_one_bundle (); /* force out queued instructions */
1017
1018 /* In case there are labels following the last instruction, resolve
1019 those now: */
1020 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
1021 {
1022 S_SET_VALUE (lfix->sym, frag_now_fix ());
1023 symbol_set_frag (lfix->sym, frag_now);
1024 }
1025 CURR_SLOT.label_fixups = 0;
1026 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
1027 {
1028 S_SET_VALUE (lfix->sym, frag_now_fix ());
1029 symbol_set_frag (lfix->sym, frag_now);
1030 }
1031 CURR_SLOT.tag_fixups = 0;
1032
1033 /* In case there are unwind directives following the last instruction,
1034 resolve those now. We only handle body and prologue directives here.
1035 Give an error for others. */
1036 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
1037 {
1038 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
1039 || ptr->r.type == body)
1040 {
1041 ptr->slot_number = (unsigned long) frag_more (0);
1042 ptr->slot_frag = frag_now;
1043 }
1044 else
1045 as_bad (_("Unwind directive not followed by an instruction."));
1046 }
1047 unwind.current_entry = NULL;
1048
1049 subseg_set (saved_seg, saved_subseg);
1050
1051 if (md.qp.X_op == O_register)
1052 as_bad ("qualifying predicate not followed by instruction");
1053 }
1054
1055 void
1056 ia64_do_align (nbytes)
1057 int nbytes;
1058 {
1059 char *saved_input_line_pointer = input_line_pointer;
1060
1061 input_line_pointer = "";
1062 s_align_bytes (nbytes);
1063 input_line_pointer = saved_input_line_pointer;
1064 }
1065
1066 void
1067 ia64_cons_align (nbytes)
1068 int nbytes;
1069 {
1070 if (md.auto_align)
1071 {
1072 char *saved_input_line_pointer = input_line_pointer;
1073 input_line_pointer = "";
1074 s_align_bytes (nbytes);
1075 input_line_pointer = saved_input_line_pointer;
1076 }
1077 }
1078
1079 /* Output COUNT bytes to a memory location. */
1080 static unsigned char *vbyte_mem_ptr = NULL;
1081
1082 void
1083 output_vbyte_mem (count, ptr, comment)
1084 int count;
1085 char *ptr;
1086 char *comment ATTRIBUTE_UNUSED;
1087 {
1088 int x;
1089 if (vbyte_mem_ptr == NULL)
1090 abort ();
1091
1092 if (count == 0)
1093 return;
1094 for (x = 0; x < count; x++)
1095 *(vbyte_mem_ptr++) = ptr[x];
1096 }
1097
1098 /* Count the number of bytes required for records. */
1099 static int vbyte_count = 0;
1100 void
1101 count_output (count, ptr, comment)
1102 int count;
1103 char *ptr ATTRIBUTE_UNUSED;
1104 char *comment ATTRIBUTE_UNUSED;
1105 {
1106 vbyte_count += count;
1107 }
1108
1109 static void
1110 output_R1_format (f, rtype, rlen)
1111 vbyte_func f;
1112 unw_record_type rtype;
1113 int rlen;
1114 {
1115 int r = 0;
1116 char byte;
1117 if (rlen > 0x1f)
1118 {
1119 output_R3_format (f, rtype, rlen);
1120 return;
1121 }
1122
1123 if (rtype == body)
1124 r = 1;
1125 else if (rtype != prologue)
1126 as_bad ("record type is not valid");
1127
1128 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1129 (*f) (1, &byte, NULL);
1130 }
1131
1132 static void
1133 output_R2_format (f, mask, grsave, rlen)
1134 vbyte_func f;
1135 int mask, grsave;
1136 unsigned long rlen;
1137 {
1138 char bytes[20];
1139 int count = 2;
1140 mask = (mask & 0x0f);
1141 grsave = (grsave & 0x7f);
1142
1143 bytes[0] = (UNW_R2 | (mask >> 1));
1144 bytes[1] = (((mask & 0x01) << 7) | grsave);
1145 count += output_leb128 (bytes + 2, rlen, 0);
1146 (*f) (count, bytes, NULL);
1147 }
1148
1149 static void
1150 output_R3_format (f, rtype, rlen)
1151 vbyte_func f;
1152 unw_record_type rtype;
1153 unsigned long rlen;
1154 {
1155 int r = 0, count;
1156 char bytes[20];
1157 if (rlen <= 0x1f)
1158 {
1159 output_R1_format (f, rtype, rlen);
1160 return;
1161 }
1162
1163 if (rtype == body)
1164 r = 1;
1165 else if (rtype != prologue)
1166 as_bad ("record type is not valid");
1167 bytes[0] = (UNW_R3 | r);
1168 count = output_leb128 (bytes + 1, rlen, 0);
1169 (*f) (count + 1, bytes, NULL);
1170 }
1171
1172 static void
1173 output_P1_format (f, brmask)
1174 vbyte_func f;
1175 int brmask;
1176 {
1177 char byte;
1178 byte = UNW_P1 | (brmask & 0x1f);
1179 (*f) (1, &byte, NULL);
1180 }
1181
1182 static void
1183 output_P2_format (f, brmask, gr)
1184 vbyte_func f;
1185 int brmask;
1186 int gr;
1187 {
1188 char bytes[2];
1189 brmask = (brmask & 0x1f);
1190 bytes[0] = UNW_P2 | (brmask >> 1);
1191 bytes[1] = (((brmask & 1) << 7) | gr);
1192 (*f) (2, bytes, NULL);
1193 }
1194
1195 static void
1196 output_P3_format (f, rtype, reg)
1197 vbyte_func f;
1198 unw_record_type rtype;
1199 int reg;
1200 {
1201 char bytes[2];
1202 int r = 0;
1203 reg = (reg & 0x7f);
1204 switch (rtype)
1205 {
1206 case psp_gr:
1207 r = 0;
1208 break;
1209 case rp_gr:
1210 r = 1;
1211 break;
1212 case pfs_gr:
1213 r = 2;
1214 break;
1215 case preds_gr:
1216 r = 3;
1217 break;
1218 case unat_gr:
1219 r = 4;
1220 break;
1221 case lc_gr:
1222 r = 5;
1223 break;
1224 case rp_br:
1225 r = 6;
1226 break;
1227 case rnat_gr:
1228 r = 7;
1229 break;
1230 case bsp_gr:
1231 r = 8;
1232 break;
1233 case bspstore_gr:
1234 r = 9;
1235 break;
1236 case fpsr_gr:
1237 r = 10;
1238 break;
1239 case priunat_gr:
1240 r = 11;
1241 break;
1242 default:
1243 as_bad ("Invalid record type for P3 format.");
1244 }
1245 bytes[0] = (UNW_P3 | (r >> 1));
1246 bytes[1] = (((r & 1) << 7) | reg);
1247 (*f) (2, bytes, NULL);
1248 }
1249
1250 static void
1251 output_P4_format (f, imask, imask_size)
1252 vbyte_func f;
1253 unsigned char *imask;
1254 unsigned long imask_size;
1255 {
1256 imask[0] = UNW_P4;
1257 (*f) (imask_size, imask, NULL);
1258 }
1259
1260 static void
1261 output_P5_format (f, grmask, frmask)
1262 vbyte_func f;
1263 int grmask;
1264 unsigned long frmask;
1265 {
1266 char bytes[4];
1267 grmask = (grmask & 0x0f);
1268
1269 bytes[0] = UNW_P5;
1270 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1271 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1272 bytes[3] = (frmask & 0x000000ff);
1273 (*f) (4, bytes, NULL);
1274 }
1275
1276 static void
1277 output_P6_format (f, rtype, rmask)
1278 vbyte_func f;
1279 unw_record_type rtype;
1280 int rmask;
1281 {
1282 char byte;
1283 int r = 0;
1284
1285 if (rtype == gr_mem)
1286 r = 1;
1287 else if (rtype != fr_mem)
1288 as_bad ("Invalid record type for format P6");
1289 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1290 (*f) (1, &byte, NULL);
1291 }
1292
1293 static void
1294 output_P7_format (f, rtype, w1, w2)
1295 vbyte_func f;
1296 unw_record_type rtype;
1297 unsigned long w1;
1298 unsigned long w2;
1299 {
1300 char bytes[20];
1301 int count = 1;
1302 int r = 0;
1303 count += output_leb128 (bytes + 1, w1, 0);
1304 switch (rtype)
1305 {
1306 case mem_stack_f:
1307 r = 0;
1308 count += output_leb128 (bytes + count, w2 >> 4, 0);
1309 break;
1310 case mem_stack_v:
1311 r = 1;
1312 break;
1313 case spill_base:
1314 r = 2;
1315 break;
1316 case psp_sprel:
1317 r = 3;
1318 break;
1319 case rp_when:
1320 r = 4;
1321 break;
1322 case rp_psprel:
1323 r = 5;
1324 break;
1325 case pfs_when:
1326 r = 6;
1327 break;
1328 case pfs_psprel:
1329 r = 7;
1330 break;
1331 case preds_when:
1332 r = 8;
1333 break;
1334 case preds_psprel:
1335 r = 9;
1336 break;
1337 case lc_when:
1338 r = 10;
1339 break;
1340 case lc_psprel:
1341 r = 11;
1342 break;
1343 case unat_when:
1344 r = 12;
1345 break;
1346 case unat_psprel:
1347 r = 13;
1348 break;
1349 case fpsr_when:
1350 r = 14;
1351 break;
1352 case fpsr_psprel:
1353 r = 15;
1354 break;
1355 default:
1356 break;
1357 }
1358 bytes[0] = (UNW_P7 | r);
1359 (*f) (count, bytes, NULL);
1360 }
1361
1362 static void
1363 output_P8_format (f, rtype, t)
1364 vbyte_func f;
1365 unw_record_type rtype;
1366 unsigned long t;
1367 {
1368 char bytes[20];
1369 int r = 0;
1370 int count = 2;
1371 bytes[0] = UNW_P8;
1372 switch (rtype)
1373 {
1374 case rp_sprel:
1375 r = 1;
1376 break;
1377 case pfs_sprel:
1378 r = 2;
1379 break;
1380 case preds_sprel:
1381 r = 3;
1382 break;
1383 case lc_sprel:
1384 r = 4;
1385 break;
1386 case unat_sprel:
1387 r = 5;
1388 break;
1389 case fpsr_sprel:
1390 r = 6;
1391 break;
1392 case bsp_when:
1393 r = 7;
1394 break;
1395 case bsp_psprel:
1396 r = 8;
1397 break;
1398 case bsp_sprel:
1399 r = 9;
1400 break;
1401 case bspstore_when:
1402 r = 10;
1403 break;
1404 case bspstore_psprel:
1405 r = 11;
1406 break;
1407 case bspstore_sprel:
1408 r = 12;
1409 break;
1410 case rnat_when:
1411 r = 13;
1412 break;
1413 case rnat_psprel:
1414 r = 14;
1415 break;
1416 case rnat_sprel:
1417 r = 15;
1418 break;
1419 case priunat_when_gr:
1420 r = 16;
1421 break;
1422 case priunat_psprel:
1423 r = 17;
1424 break;
1425 case priunat_sprel:
1426 r = 18;
1427 break;
1428 case priunat_when_mem:
1429 r = 19;
1430 break;
1431 default:
1432 break;
1433 }
1434 bytes[1] = r;
1435 count += output_leb128 (bytes + 2, t, 0);
1436 (*f) (count, bytes, NULL);
1437 }
1438
1439 static void
1440 output_P9_format (f, grmask, gr)
1441 vbyte_func f;
1442 int grmask;
1443 int gr;
1444 {
1445 char bytes[3];
1446 bytes[0] = UNW_P9;
1447 bytes[1] = (grmask & 0x0f);
1448 bytes[2] = (gr & 0x7f);
1449 (*f) (3, bytes, NULL);
1450 }
1451
1452 static void
1453 output_P10_format (f, abi, context)
1454 vbyte_func f;
1455 int abi;
1456 int context;
1457 {
1458 char bytes[3];
1459 bytes[0] = UNW_P10;
1460 bytes[1] = (abi & 0xff);
1461 bytes[2] = (context & 0xff);
1462 (*f) (3, bytes, NULL);
1463 }
1464
1465 static void
1466 output_B1_format (f, rtype, label)
1467 vbyte_func f;
1468 unw_record_type rtype;
1469 unsigned long label;
1470 {
1471 char byte;
1472 int r = 0;
1473 if (label > 0x1f)
1474 {
1475 output_B4_format (f, rtype, label);
1476 return;
1477 }
1478 if (rtype == copy_state)
1479 r = 1;
1480 else if (rtype != label_state)
1481 as_bad ("Invalid record type for format B1");
1482
1483 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1484 (*f) (1, &byte, NULL);
1485 }
1486
1487 static void
1488 output_B2_format (f, ecount, t)
1489 vbyte_func f;
1490 unsigned long ecount;
1491 unsigned long t;
1492 {
1493 char bytes[20];
1494 int count = 1;
1495 if (ecount > 0x1f)
1496 {
1497 output_B3_format (f, ecount, t);
1498 return;
1499 }
1500 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1501 count += output_leb128 (bytes + 1, t, 0);
1502 (*f) (count, bytes, NULL);
1503 }
1504
1505 static void
1506 output_B3_format (f, ecount, t)
1507 vbyte_func f;
1508 unsigned long ecount;
1509 unsigned long t;
1510 {
1511 char bytes[20];
1512 int count = 1;
1513 if (ecount <= 0x1f)
1514 {
1515 output_B2_format (f, ecount, t);
1516 return;
1517 }
1518 bytes[0] = UNW_B3;
1519 count += output_leb128 (bytes + 1, t, 0);
1520 count += output_leb128 (bytes + count, ecount, 0);
1521 (*f) (count, bytes, NULL);
1522 }
1523
1524 static void
1525 output_B4_format (f, rtype, label)
1526 vbyte_func f;
1527 unw_record_type rtype;
1528 unsigned long label;
1529 {
1530 char bytes[20];
1531 int r = 0;
1532 int count = 1;
1533 if (label <= 0x1f)
1534 {
1535 output_B1_format (f, rtype, label);
1536 return;
1537 }
1538
1539 if (rtype == copy_state)
1540 r = 1;
1541 else if (rtype != label_state)
1542 as_bad ("Invalid record type for format B1");
1543
1544 bytes[0] = (UNW_B4 | (r << 3));
1545 count += output_leb128 (bytes + 1, label, 0);
1546 (*f) (count, bytes, NULL);
1547 }
1548
1549 static char
1550 format_ab_reg (ab, reg)
1551 int ab;
1552 int reg;
1553 {
1554 int ret;
1555 ab = (ab & 3);
1556 reg = (reg & 0x1f);
1557 ret = (ab << 5) | reg;
1558 return ret;
1559 }
1560
1561 static void
1562 output_X1_format (f, rtype, ab, reg, t, w1)
1563 vbyte_func f;
1564 unw_record_type rtype;
1565 int ab, reg;
1566 unsigned long t;
1567 unsigned long w1;
1568 {
1569 char bytes[20];
1570 int r = 0;
1571 int count = 2;
1572 bytes[0] = UNW_X1;
1573
1574 if (rtype == spill_sprel)
1575 r = 1;
1576 else if (rtype != spill_psprel)
1577 as_bad ("Invalid record type for format X1");
1578 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1579 count += output_leb128 (bytes + 2, t, 0);
1580 count += output_leb128 (bytes + count, w1, 0);
1581 (*f) (count, bytes, NULL);
1582 }
1583
1584 static void
1585 output_X2_format (f, ab, reg, x, y, treg, t)
1586 vbyte_func f;
1587 int ab, reg;
1588 int x, y, treg;
1589 unsigned long t;
1590 {
1591 char bytes[20];
1592 int count = 3;
1593 bytes[0] = UNW_X2;
1594 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1595 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1596 count += output_leb128 (bytes + 3, t, 0);
1597 (*f) (count, bytes, NULL);
1598 }
1599
1600 static void
1601 output_X3_format (f, rtype, qp, ab, reg, t, w1)
1602 vbyte_func f;
1603 unw_record_type rtype;
1604 int qp;
1605 int ab, reg;
1606 unsigned long t;
1607 unsigned long w1;
1608 {
1609 char bytes[20];
1610 int r = 0;
1611 int count = 3;
1612 bytes[0] = UNW_X3;
1613
1614 if (rtype == spill_sprel_p)
1615 r = 1;
1616 else if (rtype != spill_psprel_p)
1617 as_bad ("Invalid record type for format X3");
1618 bytes[1] = ((r << 7) | (qp & 0x3f));
1619 bytes[2] = format_ab_reg (ab, reg);
1620 count += output_leb128 (bytes + 3, t, 0);
1621 count += output_leb128 (bytes + count, w1, 0);
1622 (*f) (count, bytes, NULL);
1623 }
1624
1625 static void
1626 output_X4_format (f, qp, ab, reg, x, y, treg, t)
1627 vbyte_func f;
1628 int qp;
1629 int ab, reg;
1630 int x, y, treg;
1631 unsigned long t;
1632 {
1633 char bytes[20];
1634 int count = 4;
1635 bytes[0] = UNW_X4;
1636 bytes[1] = (qp & 0x3f);
1637 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1638 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1639 count += output_leb128 (bytes + 4, t, 0);
1640 (*f) (count, bytes, NULL);
1641 }
1642
1643 /* This function allocates a record list structure, and initializes fields. */
1644
1645 static unw_rec_list *
1646 alloc_record (unw_record_type t)
1647 {
1648 unw_rec_list *ptr;
1649 ptr = xmalloc (sizeof (*ptr));
1650 ptr->next = NULL;
1651 ptr->slot_number = SLOT_NUM_NOT_SET;
1652 ptr->r.type = t;
1653 return ptr;
1654 }
1655
1656 /* This function frees an entire list of record structures. */
1657
1658 void
1659 free_list_records (unw_rec_list *first)
1660 {
1661 unw_rec_list *ptr;
1662 for (ptr = first; ptr != NULL;)
1663 {
1664 unw_rec_list *tmp = ptr;
1665
1666 if ((tmp->r.type == prologue || tmp->r.type == prologue_gr)
1667 && tmp->r.record.r.mask.i)
1668 free (tmp->r.record.r.mask.i);
1669
1670 ptr = ptr->next;
1671 free (tmp);
1672 }
1673 }
1674
1675 static unw_rec_list *
1676 output_prologue ()
1677 {
1678 unw_rec_list *ptr = alloc_record (prologue);
1679 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1680 return ptr;
1681 }
1682
1683 static unw_rec_list *
1684 output_prologue_gr (saved_mask, reg)
1685 unsigned int saved_mask;
1686 unsigned int reg;
1687 {
1688 unw_rec_list *ptr = alloc_record (prologue_gr);
1689 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1690 ptr->r.record.r.grmask = saved_mask;
1691 ptr->r.record.r.grsave = reg;
1692 return ptr;
1693 }
1694
1695 static unw_rec_list *
1696 output_body ()
1697 {
1698 unw_rec_list *ptr = alloc_record (body);
1699 return ptr;
1700 }
1701
1702 static unw_rec_list *
1703 output_mem_stack_f (size)
1704 unsigned int size;
1705 {
1706 unw_rec_list *ptr = alloc_record (mem_stack_f);
1707 ptr->r.record.p.size = size;
1708 return ptr;
1709 }
1710
1711 static unw_rec_list *
1712 output_mem_stack_v ()
1713 {
1714 unw_rec_list *ptr = alloc_record (mem_stack_v);
1715 return ptr;
1716 }
1717
1718 static unw_rec_list *
1719 output_psp_gr (gr)
1720 unsigned int gr;
1721 {
1722 unw_rec_list *ptr = alloc_record (psp_gr);
1723 ptr->r.record.p.gr = gr;
1724 return ptr;
1725 }
1726
1727 static unw_rec_list *
1728 output_psp_sprel (offset)
1729 unsigned int offset;
1730 {
1731 unw_rec_list *ptr = alloc_record (psp_sprel);
1732 ptr->r.record.p.spoff = offset / 4;
1733 return ptr;
1734 }
1735
1736 static unw_rec_list *
1737 output_rp_when ()
1738 {
1739 unw_rec_list *ptr = alloc_record (rp_when);
1740 return ptr;
1741 }
1742
1743 static unw_rec_list *
1744 output_rp_gr (gr)
1745 unsigned int gr;
1746 {
1747 unw_rec_list *ptr = alloc_record (rp_gr);
1748 ptr->r.record.p.gr = gr;
1749 return ptr;
1750 }
1751
1752 static unw_rec_list *
1753 output_rp_br (br)
1754 unsigned int br;
1755 {
1756 unw_rec_list *ptr = alloc_record (rp_br);
1757 ptr->r.record.p.br = br;
1758 return ptr;
1759 }
1760
1761 static unw_rec_list *
1762 output_rp_psprel (offset)
1763 unsigned int offset;
1764 {
1765 unw_rec_list *ptr = alloc_record (rp_psprel);
1766 ptr->r.record.p.pspoff = offset / 4;
1767 return ptr;
1768 }
1769
1770 static unw_rec_list *
1771 output_rp_sprel (offset)
1772 unsigned int offset;
1773 {
1774 unw_rec_list *ptr = alloc_record (rp_sprel);
1775 ptr->r.record.p.spoff = offset / 4;
1776 return ptr;
1777 }
1778
1779 static unw_rec_list *
1780 output_pfs_when ()
1781 {
1782 unw_rec_list *ptr = alloc_record (pfs_when);
1783 return ptr;
1784 }
1785
1786 static unw_rec_list *
1787 output_pfs_gr (gr)
1788 unsigned int gr;
1789 {
1790 unw_rec_list *ptr = alloc_record (pfs_gr);
1791 ptr->r.record.p.gr = gr;
1792 return ptr;
1793 }
1794
1795 static unw_rec_list *
1796 output_pfs_psprel (offset)
1797 unsigned int offset;
1798 {
1799 unw_rec_list *ptr = alloc_record (pfs_psprel);
1800 ptr->r.record.p.pspoff = offset / 4;
1801 return ptr;
1802 }
1803
1804 static unw_rec_list *
1805 output_pfs_sprel (offset)
1806 unsigned int offset;
1807 {
1808 unw_rec_list *ptr = alloc_record (pfs_sprel);
1809 ptr->r.record.p.spoff = offset / 4;
1810 return ptr;
1811 }
1812
1813 static unw_rec_list *
1814 output_preds_when ()
1815 {
1816 unw_rec_list *ptr = alloc_record (preds_when);
1817 return ptr;
1818 }
1819
1820 static unw_rec_list *
1821 output_preds_gr (gr)
1822 unsigned int gr;
1823 {
1824 unw_rec_list *ptr = alloc_record (preds_gr);
1825 ptr->r.record.p.gr = gr;
1826 return ptr;
1827 }
1828
1829 static unw_rec_list *
1830 output_preds_psprel (offset)
1831 unsigned int offset;
1832 {
1833 unw_rec_list *ptr = alloc_record (preds_psprel);
1834 ptr->r.record.p.pspoff = offset / 4;
1835 return ptr;
1836 }
1837
1838 static unw_rec_list *
1839 output_preds_sprel (offset)
1840 unsigned int offset;
1841 {
1842 unw_rec_list *ptr = alloc_record (preds_sprel);
1843 ptr->r.record.p.spoff = offset / 4;
1844 return ptr;
1845 }
1846
1847 static unw_rec_list *
1848 output_fr_mem (mask)
1849 unsigned int mask;
1850 {
1851 unw_rec_list *ptr = alloc_record (fr_mem);
1852 ptr->r.record.p.rmask = mask;
1853 return ptr;
1854 }
1855
1856 static unw_rec_list *
1857 output_frgr_mem (gr_mask, fr_mask)
1858 unsigned int gr_mask;
1859 unsigned int fr_mask;
1860 {
1861 unw_rec_list *ptr = alloc_record (frgr_mem);
1862 ptr->r.record.p.grmask = gr_mask;
1863 ptr->r.record.p.frmask = fr_mask;
1864 return ptr;
1865 }
1866
1867 static unw_rec_list *
1868 output_gr_gr (mask, reg)
1869 unsigned int mask;
1870 unsigned int reg;
1871 {
1872 unw_rec_list *ptr = alloc_record (gr_gr);
1873 ptr->r.record.p.grmask = mask;
1874 ptr->r.record.p.gr = reg;
1875 return ptr;
1876 }
1877
1878 static unw_rec_list *
1879 output_gr_mem (mask)
1880 unsigned int mask;
1881 {
1882 unw_rec_list *ptr = alloc_record (gr_mem);
1883 ptr->r.record.p.rmask = mask;
1884 return ptr;
1885 }
1886
1887 static unw_rec_list *
1888 output_br_mem (unsigned int mask)
1889 {
1890 unw_rec_list *ptr = alloc_record (br_mem);
1891 ptr->r.record.p.brmask = mask;
1892 return ptr;
1893 }
1894
1895 static unw_rec_list *
1896 output_br_gr (save_mask, reg)
1897 unsigned int save_mask;
1898 unsigned int reg;
1899 {
1900 unw_rec_list *ptr = alloc_record (br_gr);
1901 ptr->r.record.p.brmask = save_mask;
1902 ptr->r.record.p.gr = reg;
1903 return ptr;
1904 }
1905
1906 static unw_rec_list *
1907 output_spill_base (offset)
1908 unsigned int offset;
1909 {
1910 unw_rec_list *ptr = alloc_record (spill_base);
1911 ptr->r.record.p.pspoff = offset / 4;
1912 return ptr;
1913 }
1914
1915 static unw_rec_list *
1916 output_unat_when ()
1917 {
1918 unw_rec_list *ptr = alloc_record (unat_when);
1919 return ptr;
1920 }
1921
1922 static unw_rec_list *
1923 output_unat_gr (gr)
1924 unsigned int gr;
1925 {
1926 unw_rec_list *ptr = alloc_record (unat_gr);
1927 ptr->r.record.p.gr = gr;
1928 return ptr;
1929 }
1930
1931 static unw_rec_list *
1932 output_unat_psprel (offset)
1933 unsigned int offset;
1934 {
1935 unw_rec_list *ptr = alloc_record (unat_psprel);
1936 ptr->r.record.p.pspoff = offset / 4;
1937 return ptr;
1938 }
1939
1940 static unw_rec_list *
1941 output_unat_sprel (offset)
1942 unsigned int offset;
1943 {
1944 unw_rec_list *ptr = alloc_record (unat_sprel);
1945 ptr->r.record.p.spoff = offset / 4;
1946 return ptr;
1947 }
1948
1949 static unw_rec_list *
1950 output_lc_when ()
1951 {
1952 unw_rec_list *ptr = alloc_record (lc_when);
1953 return ptr;
1954 }
1955
1956 static unw_rec_list *
1957 output_lc_gr (gr)
1958 unsigned int gr;
1959 {
1960 unw_rec_list *ptr = alloc_record (lc_gr);
1961 ptr->r.record.p.gr = gr;
1962 return ptr;
1963 }
1964
1965 static unw_rec_list *
1966 output_lc_psprel (offset)
1967 unsigned int offset;
1968 {
1969 unw_rec_list *ptr = alloc_record (lc_psprel);
1970 ptr->r.record.p.pspoff = offset / 4;
1971 return ptr;
1972 }
1973
1974 static unw_rec_list *
1975 output_lc_sprel (offset)
1976 unsigned int offset;
1977 {
1978 unw_rec_list *ptr = alloc_record (lc_sprel);
1979 ptr->r.record.p.spoff = offset / 4;
1980 return ptr;
1981 }
1982
1983 static unw_rec_list *
1984 output_fpsr_when ()
1985 {
1986 unw_rec_list *ptr = alloc_record (fpsr_when);
1987 return ptr;
1988 }
1989
1990 static unw_rec_list *
1991 output_fpsr_gr (gr)
1992 unsigned int gr;
1993 {
1994 unw_rec_list *ptr = alloc_record (fpsr_gr);
1995 ptr->r.record.p.gr = gr;
1996 return ptr;
1997 }
1998
1999 static unw_rec_list *
2000 output_fpsr_psprel (offset)
2001 unsigned int offset;
2002 {
2003 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2004 ptr->r.record.p.pspoff = offset / 4;
2005 return ptr;
2006 }
2007
2008 static unw_rec_list *
2009 output_fpsr_sprel (offset)
2010 unsigned int offset;
2011 {
2012 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2013 ptr->r.record.p.spoff = offset / 4;
2014 return ptr;
2015 }
2016
2017 static unw_rec_list *
2018 output_priunat_when_gr ()
2019 {
2020 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2021 return ptr;
2022 }
2023
2024 static unw_rec_list *
2025 output_priunat_when_mem ()
2026 {
2027 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2028 return ptr;
2029 }
2030
2031 static unw_rec_list *
2032 output_priunat_gr (gr)
2033 unsigned int gr;
2034 {
2035 unw_rec_list *ptr = alloc_record (priunat_gr);
2036 ptr->r.record.p.gr = gr;
2037 return ptr;
2038 }
2039
2040 static unw_rec_list *
2041 output_priunat_psprel (offset)
2042 unsigned int offset;
2043 {
2044 unw_rec_list *ptr = alloc_record (priunat_psprel);
2045 ptr->r.record.p.pspoff = offset / 4;
2046 return ptr;
2047 }
2048
2049 static unw_rec_list *
2050 output_priunat_sprel (offset)
2051 unsigned int offset;
2052 {
2053 unw_rec_list *ptr = alloc_record (priunat_sprel);
2054 ptr->r.record.p.spoff = offset / 4;
2055 return ptr;
2056 }
2057
2058 static unw_rec_list *
2059 output_bsp_when ()
2060 {
2061 unw_rec_list *ptr = alloc_record (bsp_when);
2062 return ptr;
2063 }
2064
2065 static unw_rec_list *
2066 output_bsp_gr (gr)
2067 unsigned int gr;
2068 {
2069 unw_rec_list *ptr = alloc_record (bsp_gr);
2070 ptr->r.record.p.gr = gr;
2071 return ptr;
2072 }
2073
2074 static unw_rec_list *
2075 output_bsp_psprel (offset)
2076 unsigned int offset;
2077 {
2078 unw_rec_list *ptr = alloc_record (bsp_psprel);
2079 ptr->r.record.p.pspoff = offset / 4;
2080 return ptr;
2081 }
2082
2083 static unw_rec_list *
2084 output_bsp_sprel (offset)
2085 unsigned int offset;
2086 {
2087 unw_rec_list *ptr = alloc_record (bsp_sprel);
2088 ptr->r.record.p.spoff = offset / 4;
2089 return ptr;
2090 }
2091
2092 static unw_rec_list *
2093 output_bspstore_when ()
2094 {
2095 unw_rec_list *ptr = alloc_record (bspstore_when);
2096 return ptr;
2097 }
2098
2099 static unw_rec_list *
2100 output_bspstore_gr (gr)
2101 unsigned int gr;
2102 {
2103 unw_rec_list *ptr = alloc_record (bspstore_gr);
2104 ptr->r.record.p.gr = gr;
2105 return ptr;
2106 }
2107
2108 static unw_rec_list *
2109 output_bspstore_psprel (offset)
2110 unsigned int offset;
2111 {
2112 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2113 ptr->r.record.p.pspoff = offset / 4;
2114 return ptr;
2115 }
2116
2117 static unw_rec_list *
2118 output_bspstore_sprel (offset)
2119 unsigned int offset;
2120 {
2121 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2122 ptr->r.record.p.spoff = offset / 4;
2123 return ptr;
2124 }
2125
2126 static unw_rec_list *
2127 output_rnat_when ()
2128 {
2129 unw_rec_list *ptr = alloc_record (rnat_when);
2130 return ptr;
2131 }
2132
2133 static unw_rec_list *
2134 output_rnat_gr (gr)
2135 unsigned int gr;
2136 {
2137 unw_rec_list *ptr = alloc_record (rnat_gr);
2138 ptr->r.record.p.gr = gr;
2139 return ptr;
2140 }
2141
2142 static unw_rec_list *
2143 output_rnat_psprel (offset)
2144 unsigned int offset;
2145 {
2146 unw_rec_list *ptr = alloc_record (rnat_psprel);
2147 ptr->r.record.p.pspoff = offset / 4;
2148 return ptr;
2149 }
2150
2151 static unw_rec_list *
2152 output_rnat_sprel (offset)
2153 unsigned int offset;
2154 {
2155 unw_rec_list *ptr = alloc_record (rnat_sprel);
2156 ptr->r.record.p.spoff = offset / 4;
2157 return ptr;
2158 }
2159
2160 static unw_rec_list *
2161 output_unwabi (abi, context)
2162 unsigned long abi;
2163 unsigned long context;
2164 {
2165 unw_rec_list *ptr = alloc_record (unwabi);
2166 ptr->r.record.p.abi = abi;
2167 ptr->r.record.p.context = context;
2168 return ptr;
2169 }
2170
2171 static unw_rec_list *
2172 output_epilogue (unsigned long ecount)
2173 {
2174 unw_rec_list *ptr = alloc_record (epilogue);
2175 ptr->r.record.b.ecount = ecount;
2176 return ptr;
2177 }
2178
2179 static unw_rec_list *
2180 output_label_state (unsigned long label)
2181 {
2182 unw_rec_list *ptr = alloc_record (label_state);
2183 ptr->r.record.b.label = label;
2184 return ptr;
2185 }
2186
2187 static unw_rec_list *
2188 output_copy_state (unsigned long label)
2189 {
2190 unw_rec_list *ptr = alloc_record (copy_state);
2191 ptr->r.record.b.label = label;
2192 return ptr;
2193 }
2194
2195 static unw_rec_list *
2196 output_spill_psprel (ab, reg, offset)
2197 unsigned int ab;
2198 unsigned int reg;
2199 unsigned int offset;
2200 {
2201 unw_rec_list *ptr = alloc_record (spill_psprel);
2202 ptr->r.record.x.ab = ab;
2203 ptr->r.record.x.reg = reg;
2204 ptr->r.record.x.pspoff = offset / 4;
2205 return ptr;
2206 }
2207
2208 static unw_rec_list *
2209 output_spill_sprel (ab, reg, offset)
2210 unsigned int ab;
2211 unsigned int reg;
2212 unsigned int offset;
2213 {
2214 unw_rec_list *ptr = alloc_record (spill_sprel);
2215 ptr->r.record.x.ab = ab;
2216 ptr->r.record.x.reg = reg;
2217 ptr->r.record.x.spoff = offset / 4;
2218 return ptr;
2219 }
2220
2221 static unw_rec_list *
2222 output_spill_psprel_p (ab, reg, offset, predicate)
2223 unsigned int ab;
2224 unsigned int reg;
2225 unsigned int offset;
2226 unsigned int predicate;
2227 {
2228 unw_rec_list *ptr = alloc_record (spill_psprel_p);
2229 ptr->r.record.x.ab = ab;
2230 ptr->r.record.x.reg = reg;
2231 ptr->r.record.x.pspoff = offset / 4;
2232 ptr->r.record.x.qp = predicate;
2233 return ptr;
2234 }
2235
2236 static unw_rec_list *
2237 output_spill_sprel_p (ab, reg, offset, predicate)
2238 unsigned int ab;
2239 unsigned int reg;
2240 unsigned int offset;
2241 unsigned int predicate;
2242 {
2243 unw_rec_list *ptr = alloc_record (spill_sprel_p);
2244 ptr->r.record.x.ab = ab;
2245 ptr->r.record.x.reg = reg;
2246 ptr->r.record.x.spoff = offset / 4;
2247 ptr->r.record.x.qp = predicate;
2248 return ptr;
2249 }
2250
2251 static unw_rec_list *
2252 output_spill_reg (ab, reg, targ_reg, xy)
2253 unsigned int ab;
2254 unsigned int reg;
2255 unsigned int targ_reg;
2256 unsigned int xy;
2257 {
2258 unw_rec_list *ptr = alloc_record (spill_reg);
2259 ptr->r.record.x.ab = ab;
2260 ptr->r.record.x.reg = reg;
2261 ptr->r.record.x.treg = targ_reg;
2262 ptr->r.record.x.xy = xy;
2263 return ptr;
2264 }
2265
2266 static unw_rec_list *
2267 output_spill_reg_p (ab, reg, targ_reg, xy, predicate)
2268 unsigned int ab;
2269 unsigned int reg;
2270 unsigned int targ_reg;
2271 unsigned int xy;
2272 unsigned int predicate;
2273 {
2274 unw_rec_list *ptr = alloc_record (spill_reg_p);
2275 ptr->r.record.x.ab = ab;
2276 ptr->r.record.x.reg = reg;
2277 ptr->r.record.x.treg = targ_reg;
2278 ptr->r.record.x.xy = xy;
2279 ptr->r.record.x.qp = predicate;
2280 return ptr;
2281 }
2282
2283 /* Given a unw_rec_list process the correct format with the
2284 specified function. */
2285
2286 static void
2287 process_one_record (ptr, f)
2288 unw_rec_list *ptr;
2289 vbyte_func f;
2290 {
2291 unsigned long fr_mask, gr_mask;
2292
2293 switch (ptr->r.type)
2294 {
2295 case gr_mem:
2296 case fr_mem:
2297 case br_mem:
2298 case frgr_mem:
2299 /* These are taken care of by prologue/prologue_gr. */
2300 break;
2301
2302 case prologue_gr:
2303 case prologue:
2304 if (ptr->r.type == prologue_gr)
2305 output_R2_format (f, ptr->r.record.r.grmask,
2306 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2307 else
2308 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2309
2310 /* Output descriptor(s) for union of register spills (if any). */
2311 gr_mask = ptr->r.record.r.mask.gr_mem;
2312 fr_mask = ptr->r.record.r.mask.fr_mem;
2313 if (fr_mask)
2314 {
2315 if ((fr_mask & ~0xfUL) == 0)
2316 output_P6_format (f, fr_mem, fr_mask);
2317 else
2318 {
2319 output_P5_format (f, gr_mask, fr_mask);
2320 gr_mask = 0;
2321 }
2322 }
2323 if (gr_mask)
2324 output_P6_format (f, gr_mem, gr_mask);
2325 if (ptr->r.record.r.mask.br_mem)
2326 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2327
2328 /* output imask descriptor if necessary: */
2329 if (ptr->r.record.r.mask.i)
2330 output_P4_format (f, ptr->r.record.r.mask.i,
2331 ptr->r.record.r.imask_size);
2332 break;
2333
2334 case body:
2335 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2336 break;
2337 case mem_stack_f:
2338 case mem_stack_v:
2339 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2340 ptr->r.record.p.size);
2341 break;
2342 case psp_gr:
2343 case rp_gr:
2344 case pfs_gr:
2345 case preds_gr:
2346 case unat_gr:
2347 case lc_gr:
2348 case fpsr_gr:
2349 case priunat_gr:
2350 case bsp_gr:
2351 case bspstore_gr:
2352 case rnat_gr:
2353 output_P3_format (f, ptr->r.type, ptr->r.record.p.gr);
2354 break;
2355 case rp_br:
2356 output_P3_format (f, rp_br, ptr->r.record.p.br);
2357 break;
2358 case psp_sprel:
2359 output_P7_format (f, psp_sprel, ptr->r.record.p.spoff, 0);
2360 break;
2361 case rp_when:
2362 case pfs_when:
2363 case preds_when:
2364 case unat_when:
2365 case lc_when:
2366 case fpsr_when:
2367 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2368 break;
2369 case rp_psprel:
2370 case pfs_psprel:
2371 case preds_psprel:
2372 case unat_psprel:
2373 case lc_psprel:
2374 case fpsr_psprel:
2375 case spill_base:
2376 output_P7_format (f, ptr->r.type, ptr->r.record.p.pspoff, 0);
2377 break;
2378 case rp_sprel:
2379 case pfs_sprel:
2380 case preds_sprel:
2381 case unat_sprel:
2382 case lc_sprel:
2383 case fpsr_sprel:
2384 case priunat_sprel:
2385 case bsp_sprel:
2386 case bspstore_sprel:
2387 case rnat_sprel:
2388 output_P8_format (f, ptr->r.type, ptr->r.record.p.spoff);
2389 break;
2390 case gr_gr:
2391 output_P9_format (f, ptr->r.record.p.grmask, ptr->r.record.p.gr);
2392 break;
2393 case br_gr:
2394 output_P2_format (f, ptr->r.record.p.brmask, ptr->r.record.p.gr);
2395 break;
2396 case spill_mask:
2397 as_bad ("spill_mask record unimplemented.");
2398 break;
2399 case priunat_when_gr:
2400 case priunat_when_mem:
2401 case bsp_when:
2402 case bspstore_when:
2403 case rnat_when:
2404 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2405 break;
2406 case priunat_psprel:
2407 case bsp_psprel:
2408 case bspstore_psprel:
2409 case rnat_psprel:
2410 output_P8_format (f, ptr->r.type, ptr->r.record.p.pspoff);
2411 break;
2412 case unwabi:
2413 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2414 break;
2415 case epilogue:
2416 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2417 break;
2418 case label_state:
2419 case copy_state:
2420 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2421 break;
2422 case spill_psprel:
2423 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2424 ptr->r.record.x.reg, ptr->r.record.x.t,
2425 ptr->r.record.x.pspoff);
2426 break;
2427 case spill_sprel:
2428 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2429 ptr->r.record.x.reg, ptr->r.record.x.t,
2430 ptr->r.record.x.spoff);
2431 break;
2432 case spill_reg:
2433 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2434 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2435 ptr->r.record.x.treg, ptr->r.record.x.t);
2436 break;
2437 case spill_psprel_p:
2438 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2439 ptr->r.record.x.ab, ptr->r.record.x.reg,
2440 ptr->r.record.x.t, ptr->r.record.x.pspoff);
2441 break;
2442 case spill_sprel_p:
2443 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2444 ptr->r.record.x.ab, ptr->r.record.x.reg,
2445 ptr->r.record.x.t, ptr->r.record.x.spoff);
2446 break;
2447 case spill_reg_p:
2448 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2449 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2450 ptr->r.record.x.xy, ptr->r.record.x.treg,
2451 ptr->r.record.x.t);
2452 break;
2453 default:
2454 as_bad ("record_type_not_valid");
2455 break;
2456 }
2457 }
2458
2459 /* Given a unw_rec_list list, process all the records with
2460 the specified function. */
2461 static void
2462 process_unw_records (list, f)
2463 unw_rec_list *list;
2464 vbyte_func f;
2465 {
2466 unw_rec_list *ptr;
2467 for (ptr = list; ptr; ptr = ptr->next)
2468 process_one_record (ptr, f);
2469 }
2470
2471 /* Determine the size of a record list in bytes. */
2472 static int
2473 calc_record_size (list)
2474 unw_rec_list *list;
2475 {
2476 vbyte_count = 0;
2477 process_unw_records (list, count_output);
2478 return vbyte_count;
2479 }
2480
2481 /* Update IMASK bitmask to reflect the fact that one or more registers
2482 of type TYPE are saved starting at instruction with index T. If N
2483 bits are set in REGMASK, it is assumed that instructions T through
2484 T+N-1 save these registers.
2485
2486 TYPE values:
2487 0: no save
2488 1: instruction saves next fp reg
2489 2: instruction saves next general reg
2490 3: instruction saves next branch reg */
2491 static void
2492 set_imask (region, regmask, t, type)
2493 unw_rec_list *region;
2494 unsigned long regmask;
2495 unsigned long t;
2496 unsigned int type;
2497 {
2498 unsigned char *imask;
2499 unsigned long imask_size;
2500 unsigned int i;
2501 int pos;
2502
2503 imask = region->r.record.r.mask.i;
2504 imask_size = region->r.record.r.imask_size;
2505 if (!imask)
2506 {
2507 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2508 imask = xmalloc (imask_size);
2509 memset (imask, 0, imask_size);
2510
2511 region->r.record.r.imask_size = imask_size;
2512 region->r.record.r.mask.i = imask;
2513 }
2514
2515 i = (t / 4) + 1;
2516 pos = 2 * (3 - t % 4);
2517 while (regmask)
2518 {
2519 if (i >= imask_size)
2520 {
2521 as_bad ("Ignoring attempt to spill beyond end of region");
2522 return;
2523 }
2524
2525 imask[i] |= (type & 0x3) << pos;
2526
2527 regmask &= (regmask - 1);
2528 pos -= 2;
2529 if (pos < 0)
2530 {
2531 pos = 0;
2532 ++i;
2533 }
2534 }
2535 }
2536
2537 static int
2538 count_bits (unsigned long mask)
2539 {
2540 int n = 0;
2541
2542 while (mask)
2543 {
2544 mask &= mask - 1;
2545 ++n;
2546 }
2547 return n;
2548 }
2549
2550 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2551 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2552 containing FIRST_ADDR. */
2553
2554 unsigned long
2555 slot_index (slot_addr, slot_frag, first_addr, first_frag)
2556 unsigned long slot_addr;
2557 fragS *slot_frag;
2558 unsigned long first_addr;
2559 fragS *first_frag;
2560 {
2561 unsigned long index = 0;
2562
2563 /* First time we are called, the initial address and frag are invalid. */
2564 if (first_addr == 0)
2565 return 0;
2566
2567 /* If the two addresses are in different frags, then we need to add in
2568 the remaining size of this frag, and then the entire size of intermediate
2569 frags. */
2570 while (slot_frag != first_frag)
2571 {
2572 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2573
2574 /* Add in the full size of the frag converted to instruction slots. */
2575 index += 3 * (first_frag->fr_fix >> 4);
2576 /* Subtract away the initial part before first_addr. */
2577 index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2578 + ((first_addr & 0x3) - (start_addr & 0x3)));
2579
2580 /* Move to the beginning of the next frag. */
2581 first_frag = first_frag->fr_next;
2582 first_addr = (unsigned long) &first_frag->fr_literal;
2583 }
2584
2585 /* Add in the used part of the last frag. */
2586 index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2587 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2588 return index;
2589 }
2590
2591 /* Optimize unwind record directives. */
2592
2593 static unw_rec_list *
2594 optimize_unw_records (list)
2595 unw_rec_list *list;
2596 {
2597 if (!list)
2598 return NULL;
2599
2600 /* If the only unwind record is ".prologue" or ".prologue" followed
2601 by ".body", then we can optimize the unwind directives away. */
2602 if (list->r.type == prologue
2603 && (list->next == NULL
2604 || (list->next->r.type == body && list->next->next == NULL)))
2605 return NULL;
2606
2607 return list;
2608 }
2609
2610 /* Given a complete record list, process any records which have
2611 unresolved fields, (ie length counts for a prologue). After
2612 this has been run, all neccessary information should be available
2613 within each record to generate an image. */
2614
2615 static void
2616 fixup_unw_records (list)
2617 unw_rec_list *list;
2618 {
2619 unw_rec_list *ptr, *region = 0;
2620 unsigned long first_addr = 0, rlen = 0, t;
2621 fragS *first_frag = 0;
2622
2623 for (ptr = list; ptr; ptr = ptr->next)
2624 {
2625 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2626 as_bad (" Insn slot not set in unwind record.");
2627 t = slot_index (ptr->slot_number, ptr->slot_frag,
2628 first_addr, first_frag);
2629 switch (ptr->r.type)
2630 {
2631 case prologue:
2632 case prologue_gr:
2633 case body:
2634 {
2635 unw_rec_list *last;
2636 int size, dir_len = 0;
2637 unsigned long last_addr;
2638 fragS *last_frag;
2639
2640 first_addr = ptr->slot_number;
2641 first_frag = ptr->slot_frag;
2642 ptr->slot_number = 0;
2643 /* Find either the next body/prologue start, or the end of
2644 the list, and determine the size of the region. */
2645 last_addr = unwind.next_slot_number;
2646 last_frag = unwind.next_slot_frag;
2647 for (last = ptr->next; last != NULL; last = last->next)
2648 if (last->r.type == prologue || last->r.type == prologue_gr
2649 || last->r.type == body)
2650 {
2651 last_addr = last->slot_number;
2652 last_frag = last->slot_frag;
2653 break;
2654 }
2655 else if (!last->next)
2656 {
2657 /* In the absence of an explicit .body directive,
2658 the prologue ends after the last instruction
2659 covered by an unwind directive. */
2660 if (ptr->r.type != body)
2661 {
2662 last_addr = last->slot_number;
2663 last_frag = last->slot_frag;
2664 switch (last->r.type)
2665 {
2666 case frgr_mem:
2667 dir_len = (count_bits (last->r.record.p.frmask)
2668 + count_bits (last->r.record.p.grmask));
2669 break;
2670 case fr_mem:
2671 case gr_mem:
2672 dir_len += count_bits (last->r.record.p.rmask);
2673 break;
2674 case br_mem:
2675 case br_gr:
2676 dir_len += count_bits (last->r.record.p.brmask);
2677 break;
2678 case gr_gr:
2679 dir_len += count_bits (last->r.record.p.grmask);
2680 break;
2681 default:
2682 dir_len = 1;
2683 break;
2684 }
2685 }
2686 break;
2687 }
2688 size = (slot_index (last_addr, last_frag, first_addr, first_frag)
2689 + dir_len);
2690 rlen = ptr->r.record.r.rlen = size;
2691 region = ptr;
2692 break;
2693 }
2694 case epilogue:
2695 ptr->r.record.b.t = rlen - 1 - t;
2696 break;
2697
2698 case mem_stack_f:
2699 case mem_stack_v:
2700 case rp_when:
2701 case pfs_when:
2702 case preds_when:
2703 case unat_when:
2704 case lc_when:
2705 case fpsr_when:
2706 case priunat_when_gr:
2707 case priunat_when_mem:
2708 case bsp_when:
2709 case bspstore_when:
2710 case rnat_when:
2711 ptr->r.record.p.t = t;
2712 break;
2713
2714 case spill_reg:
2715 case spill_sprel:
2716 case spill_psprel:
2717 case spill_reg_p:
2718 case spill_sprel_p:
2719 case spill_psprel_p:
2720 ptr->r.record.x.t = t;
2721 break;
2722
2723 case frgr_mem:
2724 if (!region)
2725 {
2726 as_bad ("frgr_mem record before region record!\n");
2727 return;
2728 }
2729 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2730 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2731 set_imask (region, ptr->r.record.p.frmask, t, 1);
2732 set_imask (region, ptr->r.record.p.grmask, t, 2);
2733 break;
2734 case fr_mem:
2735 if (!region)
2736 {
2737 as_bad ("fr_mem record before region record!\n");
2738 return;
2739 }
2740 region->r.record.r.mask.fr_mem |= ptr->r.record.p.rmask;
2741 set_imask (region, ptr->r.record.p.rmask, t, 1);
2742 break;
2743 case gr_mem:
2744 if (!region)
2745 {
2746 as_bad ("gr_mem record before region record!\n");
2747 return;
2748 }
2749 region->r.record.r.mask.gr_mem |= ptr->r.record.p.rmask;
2750 set_imask (region, ptr->r.record.p.rmask, t, 2);
2751 break;
2752 case br_mem:
2753 if (!region)
2754 {
2755 as_bad ("br_mem record before region record!\n");
2756 return;
2757 }
2758 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2759 set_imask (region, ptr->r.record.p.brmask, t, 3);
2760 break;
2761
2762 case gr_gr:
2763 if (!region)
2764 {
2765 as_bad ("gr_gr record before region record!\n");
2766 return;
2767 }
2768 set_imask (region, ptr->r.record.p.grmask, t, 2);
2769 break;
2770 case br_gr:
2771 if (!region)
2772 {
2773 as_bad ("br_gr record before region record!\n");
2774 return;
2775 }
2776 set_imask (region, ptr->r.record.p.brmask, t, 3);
2777 break;
2778
2779 default:
2780 break;
2781 }
2782 }
2783 }
2784
2785 /* Helper routine for output_unw_records. Emits the header for the unwind
2786 info. */
2787
2788 static int
2789 setup_unwind_header (int size, unsigned char **mem)
2790 {
2791 int x, extra = 0;
2792
2793 /* pad to pointer-size boundry. */
2794 x = size % md.pointer_size;
2795 if (x != 0)
2796 extra = md.pointer_size - x;
2797
2798 /* Add 8 for the header + a pointer for the
2799 personality offset. */
2800 *mem = xmalloc (size + extra + 8 + md.pointer_size);
2801
2802 /* Clear the padding area and personality. */
2803 memset (*mem + 8 + size, 0 , extra + md.pointer_size);
2804 /* Initialize the header area. */
2805
2806 md_number_to_chars (*mem, (((bfd_vma) 1 << 48) /* version */
2807 | (unwind.personality_routine
2808 ? ((bfd_vma) 3 << 32) /* U & E handler flags */
2809 : 0)
2810 | ((size + extra) / md.pointer_size)), /* length */
2811 8);
2812
2813 return extra;
2814 }
2815
2816 /* Generate an unwind image from a record list. Returns the number of
2817 bytes in the resulting image. The memory image itselof is returned
2818 in the 'ptr' parameter. */
2819 static int
2820 output_unw_records (list, ptr)
2821 unw_rec_list *list;
2822 void **ptr;
2823 {
2824 int size, extra;
2825 unsigned char *mem;
2826
2827 *ptr = NULL;
2828
2829 list = optimize_unw_records (list);
2830 fixup_unw_records (list);
2831 size = calc_record_size (list);
2832
2833 if (size > 0 || unwind.force_unwind_entry)
2834 {
2835 unwind.force_unwind_entry = 0;
2836 extra = setup_unwind_header (size, &mem);
2837
2838 vbyte_mem_ptr = mem + 8;
2839 process_unw_records (list, output_vbyte_mem);
2840
2841 *ptr = mem;
2842
2843 size += extra + 8 + md.pointer_size;
2844 }
2845 return size;
2846 }
2847
2848 static int
2849 convert_expr_to_ab_reg (e, ab, regp)
2850 expressionS *e;
2851 unsigned int *ab;
2852 unsigned int *regp;
2853 {
2854 unsigned int reg;
2855
2856 if (e->X_op != O_register)
2857 return 0;
2858
2859 reg = e->X_add_number;
2860 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
2861 {
2862 *ab = 0;
2863 *regp = reg - REG_GR;
2864 }
2865 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
2866 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
2867 {
2868 *ab = 1;
2869 *regp = reg - REG_FR;
2870 }
2871 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
2872 {
2873 *ab = 2;
2874 *regp = reg - REG_BR;
2875 }
2876 else
2877 {
2878 *ab = 3;
2879 switch (reg)
2880 {
2881 case REG_PR: *regp = 0; break;
2882 case REG_PSP: *regp = 1; break;
2883 case REG_PRIUNAT: *regp = 2; break;
2884 case REG_BR + 0: *regp = 3; break;
2885 case REG_AR + AR_BSP: *regp = 4; break;
2886 case REG_AR + AR_BSPSTORE: *regp = 5; break;
2887 case REG_AR + AR_RNAT: *regp = 6; break;
2888 case REG_AR + AR_UNAT: *regp = 7; break;
2889 case REG_AR + AR_FPSR: *regp = 8; break;
2890 case REG_AR + AR_PFS: *regp = 9; break;
2891 case REG_AR + AR_LC: *regp = 10; break;
2892
2893 default:
2894 return 0;
2895 }
2896 }
2897 return 1;
2898 }
2899
2900 static int
2901 convert_expr_to_xy_reg (e, xy, regp)
2902 expressionS *e;
2903 unsigned int *xy;
2904 unsigned int *regp;
2905 {
2906 unsigned int reg;
2907
2908 if (e->X_op != O_register)
2909 return 0;
2910
2911 reg = e->X_add_number;
2912
2913 if (/* reg >= REG_GR && */ reg <= (REG_GR + 127))
2914 {
2915 *xy = 0;
2916 *regp = reg - REG_GR;
2917 }
2918 else if (reg >= REG_FR && reg <= (REG_FR + 127))
2919 {
2920 *xy = 1;
2921 *regp = reg - REG_FR;
2922 }
2923 else if (reg >= REG_BR && reg <= (REG_BR + 7))
2924 {
2925 *xy = 2;
2926 *regp = reg - REG_BR;
2927 }
2928 else
2929 return -1;
2930 return 1;
2931 }
2932
2933 static void
2934 dot_radix (dummy)
2935 int dummy ATTRIBUTE_UNUSED;
2936 {
2937 int radix;
2938
2939 SKIP_WHITESPACE ();
2940 radix = *input_line_pointer++;
2941
2942 if (radix != 'C' && !is_end_of_line[(unsigned char) radix])
2943 {
2944 as_bad ("Radix `%c' unsupported", *input_line_pointer);
2945 ignore_rest_of_line ();
2946 return;
2947 }
2948 }
2949
2950 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
2951 static void
2952 dot_special_section (which)
2953 int which;
2954 {
2955 set_section ((char *) special_section_name[which]);
2956 }
2957
2958 static void
2959 add_unwind_entry (ptr)
2960 unw_rec_list *ptr;
2961 {
2962 if (unwind.tail)
2963 unwind.tail->next = ptr;
2964 else
2965 unwind.list = ptr;
2966 unwind.tail = ptr;
2967
2968 /* The current entry can in fact be a chain of unwind entries. */
2969 if (unwind.current_entry == NULL)
2970 unwind.current_entry = ptr;
2971 }
2972
2973 static void
2974 dot_fframe (dummy)
2975 int dummy ATTRIBUTE_UNUSED;
2976 {
2977 expressionS e;
2978
2979 parse_operand (&e);
2980
2981 if (e.X_op != O_constant)
2982 as_bad ("Operand to .fframe must be a constant");
2983 else
2984 add_unwind_entry (output_mem_stack_f (e.X_add_number));
2985 }
2986
2987 static void
2988 dot_vframe (dummy)
2989 int dummy ATTRIBUTE_UNUSED;
2990 {
2991 expressionS e;
2992 unsigned reg;
2993
2994 parse_operand (&e);
2995 reg = e.X_add_number - REG_GR;
2996 if (e.X_op == O_register && reg < 128)
2997 {
2998 add_unwind_entry (output_mem_stack_v ());
2999 if (! (unwind.prologue_mask & 2))
3000 add_unwind_entry (output_psp_gr (reg));
3001 }
3002 else
3003 as_bad ("First operand to .vframe must be a general register");
3004 }
3005
3006 static void
3007 dot_vframesp (dummy)
3008 int dummy ATTRIBUTE_UNUSED;
3009 {
3010 expressionS e;
3011
3012 parse_operand (&e);
3013 if (e.X_op == O_constant)
3014 {
3015 add_unwind_entry (output_mem_stack_v ());
3016 add_unwind_entry (output_psp_sprel (e.X_add_number));
3017 }
3018 else
3019 as_bad ("First operand to .vframesp must be a general register");
3020 }
3021
3022 static void
3023 dot_vframepsp (dummy)
3024 int dummy ATTRIBUTE_UNUSED;
3025 {
3026 expressionS e;
3027
3028 parse_operand (&e);
3029 if (e.X_op == O_constant)
3030 {
3031 add_unwind_entry (output_mem_stack_v ());
3032 add_unwind_entry (output_psp_sprel (e.X_add_number));
3033 }
3034 else
3035 as_bad ("First operand to .vframepsp must be a general register");
3036 }
3037
3038 static void
3039 dot_save (dummy)
3040 int dummy ATTRIBUTE_UNUSED;
3041 {
3042 expressionS e1, e2;
3043 int sep;
3044 int reg1, reg2;
3045
3046 sep = parse_operand (&e1);
3047 if (sep != ',')
3048 as_bad ("No second operand to .save");
3049 sep = parse_operand (&e2);
3050
3051 reg1 = e1.X_add_number;
3052 reg2 = e2.X_add_number - REG_GR;
3053
3054 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3055 if (e1.X_op == O_register)
3056 {
3057 if (e2.X_op == O_register && reg2 >= 0 && reg2 < 128)
3058 {
3059 switch (reg1)
3060 {
3061 case REG_AR + AR_BSP:
3062 add_unwind_entry (output_bsp_when ());
3063 add_unwind_entry (output_bsp_gr (reg2));
3064 break;
3065 case REG_AR + AR_BSPSTORE:
3066 add_unwind_entry (output_bspstore_when ());
3067 add_unwind_entry (output_bspstore_gr (reg2));
3068 break;
3069 case REG_AR + AR_RNAT:
3070 add_unwind_entry (output_rnat_when ());
3071 add_unwind_entry (output_rnat_gr (reg2));
3072 break;
3073 case REG_AR + AR_UNAT:
3074 add_unwind_entry (output_unat_when ());
3075 add_unwind_entry (output_unat_gr (reg2));
3076 break;
3077 case REG_AR + AR_FPSR:
3078 add_unwind_entry (output_fpsr_when ());
3079 add_unwind_entry (output_fpsr_gr (reg2));
3080 break;
3081 case REG_AR + AR_PFS:
3082 add_unwind_entry (output_pfs_when ());
3083 if (! (unwind.prologue_mask & 4))
3084 add_unwind_entry (output_pfs_gr (reg2));
3085 break;
3086 case REG_AR + AR_LC:
3087 add_unwind_entry (output_lc_when ());
3088 add_unwind_entry (output_lc_gr (reg2));
3089 break;
3090 case REG_BR:
3091 add_unwind_entry (output_rp_when ());
3092 if (! (unwind.prologue_mask & 8))
3093 add_unwind_entry (output_rp_gr (reg2));
3094 break;
3095 case REG_PR:
3096 add_unwind_entry (output_preds_when ());
3097 if (! (unwind.prologue_mask & 1))
3098 add_unwind_entry (output_preds_gr (reg2));
3099 break;
3100 case REG_PRIUNAT:
3101 add_unwind_entry (output_priunat_when_gr ());
3102 add_unwind_entry (output_priunat_gr (reg2));
3103 break;
3104 default:
3105 as_bad ("First operand not a valid register");
3106 }
3107 }
3108 else
3109 as_bad (" Second operand not a valid register");
3110 }
3111 else
3112 as_bad ("First operand not a register");
3113 }
3114
3115 static void
3116 dot_restore (dummy)
3117 int dummy ATTRIBUTE_UNUSED;
3118 {
3119 expressionS e1, e2;
3120 unsigned long ecount; /* # of _additional_ regions to pop */
3121 int sep;
3122
3123 sep = parse_operand (&e1);
3124 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3125 {
3126 as_bad ("First operand to .restore must be stack pointer (sp)");
3127 return;
3128 }
3129
3130 if (sep == ',')
3131 {
3132 parse_operand (&e2);
3133 if (e2.X_op != O_constant || e2.X_add_number < 0)
3134 {
3135 as_bad ("Second operand to .restore must be a constant >= 0");
3136 return;
3137 }
3138 ecount = e2.X_add_number;
3139 }
3140 else
3141 ecount = unwind.prologue_count - 1;
3142 add_unwind_entry (output_epilogue (ecount));
3143
3144 if (ecount < unwind.prologue_count)
3145 unwind.prologue_count -= ecount + 1;
3146 else
3147 unwind.prologue_count = 0;
3148 }
3149
3150 static void
3151 dot_restorereg (dummy)
3152 int dummy ATTRIBUTE_UNUSED;
3153 {
3154 unsigned int ab, reg;
3155 expressionS e;
3156
3157 parse_operand (&e);
3158
3159 if (!convert_expr_to_ab_reg (&e, &ab, &reg))
3160 {
3161 as_bad ("First operand to .restorereg must be a preserved register");
3162 return;
3163 }
3164 add_unwind_entry (output_spill_reg (ab, reg, 0, 0));
3165 }
3166
3167 static void
3168 dot_restorereg_p (dummy)
3169 int dummy ATTRIBUTE_UNUSED;
3170 {
3171 unsigned int qp, ab, reg;
3172 expressionS e1, e2;
3173 int sep;
3174
3175 sep = parse_operand (&e1);
3176 if (sep != ',')
3177 {
3178 as_bad ("No second operand to .restorereg.p");
3179 return;
3180 }
3181
3182 parse_operand (&e2);
3183
3184 qp = e1.X_add_number - REG_P;
3185 if (e1.X_op != O_register || qp > 63)
3186 {
3187 as_bad ("First operand to .restorereg.p must be a predicate");
3188 return;
3189 }
3190
3191 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3192 {
3193 as_bad ("Second operand to .restorereg.p must be a preserved register");
3194 return;
3195 }
3196 add_unwind_entry (output_spill_reg_p (ab, reg, 0, 0, qp));
3197 }
3198
3199 static int
3200 generate_unwind_image (text_name)
3201 const char *text_name;
3202 {
3203 int size;
3204 unsigned char *unw_rec;
3205
3206 /* Force out pending instructions, to make sure all unwind records have
3207 a valid slot_number field. */
3208 ia64_flush_insns ();
3209
3210 /* Generate the unwind record. */
3211 size = output_unw_records (unwind.list, (void **) &unw_rec);
3212 if (size % md.pointer_size != 0)
3213 as_bad ("Unwind record is not a multiple of %d bytes.", md.pointer_size);
3214
3215 /* If there are unwind records, switch sections, and output the info. */
3216 if (size != 0)
3217 {
3218 unsigned char *where;
3219 char *sec_name;
3220 expressionS exp;
3221
3222 make_unw_section_name (SPECIAL_SECTION_UNWIND_INFO, text_name, sec_name);
3223 set_section (sec_name);
3224 bfd_set_section_flags (stdoutput, now_seg,
3225 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3226
3227 /* Make sure the section has 4 byte alignment for ILP32 and
3228 8 byte alignment for LP64. */
3229 frag_align (md.pointer_size_shift, 0, 0);
3230 record_alignment (now_seg, md.pointer_size_shift);
3231
3232 /* Set expression which points to start of unwind descriptor area. */
3233 unwind.info = expr_build_dot ();
3234
3235 where = (unsigned char *) frag_more (size);
3236
3237 /* Issue a label for this address, and keep track of it to put it
3238 in the unwind section. */
3239
3240 /* Copy the information from the unwind record into this section. The
3241 data is already in the correct byte order. */
3242 memcpy (where, unw_rec, size);
3243
3244 /* Add the personality address to the image. */
3245 if (unwind.personality_routine != 0)
3246 {
3247 exp.X_op = O_symbol;
3248 exp.X_add_symbol = unwind.personality_routine;
3249 exp.X_add_number = 0;
3250 fix_new_exp (frag_now, frag_now_fix () - 8, 8,
3251 &exp, 0, BFD_RELOC_IA64_LTOFF_FPTR64LSB);
3252 unwind.personality_routine = 0;
3253 }
3254 }
3255
3256 free_list_records (unwind.list);
3257 unwind.list = unwind.tail = unwind.current_entry = NULL;
3258
3259 return size;
3260 }
3261
3262 static void
3263 dot_handlerdata (dummy)
3264 int dummy ATTRIBUTE_UNUSED;
3265 {
3266 const char *text_name = segment_name (now_seg);
3267
3268 /* If text section name starts with ".text" (which it should),
3269 strip this prefix off. */
3270 if (strcmp (text_name, ".text") == 0)
3271 text_name = "";
3272
3273 unwind.force_unwind_entry = 1;
3274
3275 /* Remember which segment we're in so we can switch back after .endp */
3276 unwind.saved_text_seg = now_seg;
3277 unwind.saved_text_subseg = now_subseg;
3278
3279 /* Generate unwind info into unwind-info section and then leave that
3280 section as the currently active one so dataXX directives go into
3281 the language specific data area of the unwind info block. */
3282 generate_unwind_image (text_name);
3283 demand_empty_rest_of_line ();
3284 }
3285
3286 static void
3287 dot_unwentry (dummy)
3288 int dummy ATTRIBUTE_UNUSED;
3289 {
3290 unwind.force_unwind_entry = 1;
3291 demand_empty_rest_of_line ();
3292 }
3293
3294 static void
3295 dot_altrp (dummy)
3296 int dummy ATTRIBUTE_UNUSED;
3297 {
3298 expressionS e;
3299 unsigned reg;
3300
3301 parse_operand (&e);
3302 reg = e.X_add_number - REG_BR;
3303 if (e.X_op == O_register && reg < 8)
3304 add_unwind_entry (output_rp_br (reg));
3305 else
3306 as_bad ("First operand not a valid branch register");
3307 }
3308
3309 static void
3310 dot_savemem (psprel)
3311 int psprel;
3312 {
3313 expressionS e1, e2;
3314 int sep;
3315 int reg1, val;
3316
3317 sep = parse_operand (&e1);
3318 if (sep != ',')
3319 as_bad ("No second operand to .save%ssp", psprel ? "p" : "");
3320 sep = parse_operand (&e2);
3321
3322 reg1 = e1.X_add_number;
3323 val = e2.X_add_number;
3324
3325 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3326 if (e1.X_op == O_register)
3327 {
3328 if (e2.X_op == O_constant)
3329 {
3330 switch (reg1)
3331 {
3332 case REG_AR + AR_BSP:
3333 add_unwind_entry (output_bsp_when ());
3334 add_unwind_entry ((psprel
3335 ? output_bsp_psprel
3336 : output_bsp_sprel) (val));
3337 break;
3338 case REG_AR + AR_BSPSTORE:
3339 add_unwind_entry (output_bspstore_when ());
3340 add_unwind_entry ((psprel
3341 ? output_bspstore_psprel
3342 : output_bspstore_sprel) (val));
3343 break;
3344 case REG_AR + AR_RNAT:
3345 add_unwind_entry (output_rnat_when ());
3346 add_unwind_entry ((psprel
3347 ? output_rnat_psprel
3348 : output_rnat_sprel) (val));
3349 break;
3350 case REG_AR + AR_UNAT:
3351 add_unwind_entry (output_unat_when ());
3352 add_unwind_entry ((psprel
3353 ? output_unat_psprel
3354 : output_unat_sprel) (val));
3355 break;
3356 case REG_AR + AR_FPSR:
3357 add_unwind_entry (output_fpsr_when ());
3358 add_unwind_entry ((psprel
3359 ? output_fpsr_psprel
3360 : output_fpsr_sprel) (val));
3361 break;
3362 case REG_AR + AR_PFS:
3363 add_unwind_entry (output_pfs_when ());
3364 add_unwind_entry ((psprel
3365 ? output_pfs_psprel
3366 : output_pfs_sprel) (val));
3367 break;
3368 case REG_AR + AR_LC:
3369 add_unwind_entry (output_lc_when ());
3370 add_unwind_entry ((psprel
3371 ? output_lc_psprel
3372 : output_lc_sprel) (val));
3373 break;
3374 case REG_BR:
3375 add_unwind_entry (output_rp_when ());
3376 add_unwind_entry ((psprel
3377 ? output_rp_psprel
3378 : output_rp_sprel) (val));
3379 break;
3380 case REG_PR:
3381 add_unwind_entry (output_preds_when ());
3382 add_unwind_entry ((psprel
3383 ? output_preds_psprel
3384 : output_preds_sprel) (val));
3385 break;
3386 case REG_PRIUNAT:
3387 add_unwind_entry (output_priunat_when_mem ());
3388 add_unwind_entry ((psprel
3389 ? output_priunat_psprel
3390 : output_priunat_sprel) (val));
3391 break;
3392 default:
3393 as_bad ("First operand not a valid register");
3394 }
3395 }
3396 else
3397 as_bad (" Second operand not a valid constant");
3398 }
3399 else
3400 as_bad ("First operand not a register");
3401 }
3402
3403 static void
3404 dot_saveg (dummy)
3405 int dummy ATTRIBUTE_UNUSED;
3406 {
3407 expressionS e1, e2;
3408 int sep;
3409 sep = parse_operand (&e1);
3410 if (sep == ',')
3411 parse_operand (&e2);
3412
3413 if (e1.X_op != O_constant)
3414 as_bad ("First operand to .save.g must be a constant.");
3415 else
3416 {
3417 int grmask = e1.X_add_number;
3418 if (sep != ',')
3419 add_unwind_entry (output_gr_mem (grmask));
3420 else
3421 {
3422 int reg = e2.X_add_number - REG_GR;
3423 if (e2.X_op == O_register && reg >= 0 && reg < 128)
3424 add_unwind_entry (output_gr_gr (grmask, reg));
3425 else
3426 as_bad ("Second operand is an invalid register.");
3427 }
3428 }
3429 }
3430
3431 static void
3432 dot_savef (dummy)
3433 int dummy ATTRIBUTE_UNUSED;
3434 {
3435 expressionS e1;
3436 int sep;
3437 sep = parse_operand (&e1);
3438
3439 if (e1.X_op != O_constant)
3440 as_bad ("Operand to .save.f must be a constant.");
3441 else
3442 add_unwind_entry (output_fr_mem (e1.X_add_number));
3443 }
3444
3445 static void
3446 dot_saveb (dummy)
3447 int dummy ATTRIBUTE_UNUSED;
3448 {
3449 expressionS e1, e2;
3450 unsigned int reg;
3451 unsigned char sep;
3452 int brmask;
3453
3454 sep = parse_operand (&e1);
3455 if (e1.X_op != O_constant)
3456 {
3457 as_bad ("First operand to .save.b must be a constant.");
3458 return;
3459 }
3460 brmask = e1.X_add_number;
3461
3462 if (sep == ',')
3463 {
3464 sep = parse_operand (&e2);
3465 reg = e2.X_add_number - REG_GR;
3466 if (e2.X_op != O_register || reg > 127)
3467 {
3468 as_bad ("Second operand to .save.b must be a general register.");
3469 return;
3470 }
3471 add_unwind_entry (output_br_gr (brmask, e2.X_add_number));
3472 }
3473 else
3474 add_unwind_entry (output_br_mem (brmask));
3475
3476 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3477 ignore_rest_of_line ();
3478 }
3479
3480 static void
3481 dot_savegf (dummy)
3482 int dummy ATTRIBUTE_UNUSED;
3483 {
3484 expressionS e1, e2;
3485 int sep;
3486 sep = parse_operand (&e1);
3487 if (sep == ',')
3488 parse_operand (&e2);
3489
3490 if (e1.X_op != O_constant || sep != ',' || e2.X_op != O_constant)
3491 as_bad ("Both operands of .save.gf must be constants.");
3492 else
3493 {
3494 int grmask = e1.X_add_number;
3495 int frmask = e2.X_add_number;
3496 add_unwind_entry (output_frgr_mem (grmask, frmask));
3497 }
3498 }
3499
3500 static void
3501 dot_spill (dummy)
3502 int dummy ATTRIBUTE_UNUSED;
3503 {
3504 expressionS e;
3505 unsigned char sep;
3506
3507 sep = parse_operand (&e);
3508 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3509 ignore_rest_of_line ();
3510
3511 if (e.X_op != O_constant)
3512 as_bad ("Operand to .spill must be a constant");
3513 else
3514 add_unwind_entry (output_spill_base (e.X_add_number));
3515 }
3516
3517 static void
3518 dot_spillreg (dummy)
3519 int dummy ATTRIBUTE_UNUSED;
3520 {
3521 int sep, ab, xy, reg, treg;
3522 expressionS e1, e2;
3523
3524 sep = parse_operand (&e1);
3525 if (sep != ',')
3526 {
3527 as_bad ("No second operand to .spillreg");
3528 return;
3529 }
3530
3531 parse_operand (&e2);
3532
3533 if (!convert_expr_to_ab_reg (&e1, &ab, &reg))
3534 {
3535 as_bad ("First operand to .spillreg must be a preserved register");
3536 return;
3537 }
3538
3539 if (!convert_expr_to_xy_reg (&e2, &xy, &treg))
3540 {
3541 as_bad ("Second operand to .spillreg must be a register");
3542 return;
3543 }
3544
3545 add_unwind_entry (output_spill_reg (ab, reg, treg, xy));
3546 }
3547
3548 static void
3549 dot_spillmem (psprel)
3550 int psprel;
3551 {
3552 expressionS e1, e2;
3553 int sep, ab, reg;
3554
3555 sep = parse_operand (&e1);
3556 if (sep != ',')
3557 {
3558 as_bad ("Second operand missing");
3559 return;
3560 }
3561
3562 parse_operand (&e2);
3563
3564 if (!convert_expr_to_ab_reg (&e1, &ab, &reg))
3565 {
3566 as_bad ("First operand to .spill%s must be a preserved register",
3567 psprel ? "psp" : "sp");
3568 return;
3569 }
3570
3571 if (e2.X_op != O_constant)
3572 {
3573 as_bad ("Second operand to .spill%s must be a constant",
3574 psprel ? "psp" : "sp");
3575 return;
3576 }
3577
3578 if (psprel)
3579 add_unwind_entry (output_spill_psprel (ab, reg, e2.X_add_number));
3580 else
3581 add_unwind_entry (output_spill_sprel (ab, reg, e2.X_add_number));
3582 }
3583
3584 static void
3585 dot_spillreg_p (dummy)
3586 int dummy ATTRIBUTE_UNUSED;
3587 {
3588 int sep, ab, xy, reg, treg;
3589 expressionS e1, e2, e3;
3590 unsigned int qp;
3591
3592 sep = parse_operand (&e1);
3593 if (sep != ',')
3594 {
3595 as_bad ("No second and third operand to .spillreg.p");
3596 return;
3597 }
3598
3599 sep = parse_operand (&e2);
3600 if (sep != ',')
3601 {
3602 as_bad ("No third operand to .spillreg.p");
3603 return;
3604 }
3605
3606 parse_operand (&e3);
3607
3608 qp = e1.X_add_number - REG_P;
3609
3610 if (e1.X_op != O_register || qp > 63)
3611 {
3612 as_bad ("First operand to .spillreg.p must be a predicate");
3613 return;
3614 }
3615
3616 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3617 {
3618 as_bad ("Second operand to .spillreg.p must be a preserved register");
3619 return;
3620 }
3621
3622 if (!convert_expr_to_xy_reg (&e3, &xy, &treg))
3623 {
3624 as_bad ("Third operand to .spillreg.p must be a register");
3625 return;
3626 }
3627
3628 add_unwind_entry (output_spill_reg_p (ab, reg, treg, xy, qp));
3629 }
3630
3631 static void
3632 dot_spillmem_p (psprel)
3633 int psprel;
3634 {
3635 expressionS e1, e2, e3;
3636 int sep, ab, reg;
3637 unsigned int qp;
3638
3639 sep = parse_operand (&e1);
3640 if (sep != ',')
3641 {
3642 as_bad ("Second operand missing");
3643 return;
3644 }
3645
3646 parse_operand (&e2);
3647 if (sep != ',')
3648 {
3649 as_bad ("Second operand missing");
3650 return;
3651 }
3652
3653 parse_operand (&e3);
3654
3655 qp = e1.X_add_number - REG_P;
3656 if (e1.X_op != O_register || qp > 63)
3657 {
3658 as_bad ("First operand to .spill%s_p must be a predicate",
3659 psprel ? "psp" : "sp");
3660 return;
3661 }
3662
3663 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3664 {
3665 as_bad ("Second operand to .spill%s_p must be a preserved register",
3666 psprel ? "psp" : "sp");
3667 return;
3668 }
3669
3670 if (e3.X_op != O_constant)
3671 {
3672 as_bad ("Third operand to .spill%s_p must be a constant",
3673 psprel ? "psp" : "sp");
3674 return;
3675 }
3676
3677 if (psprel)
3678 add_unwind_entry (output_spill_psprel_p (ab, reg, e3.X_add_number, qp));
3679 else
3680 add_unwind_entry (output_spill_sprel_p (ab, reg, e3.X_add_number, qp));
3681 }
3682
3683 static void
3684 dot_label_state (dummy)
3685 int dummy ATTRIBUTE_UNUSED;
3686 {
3687 expressionS e;
3688
3689 parse_operand (&e);
3690 if (e.X_op != O_constant)
3691 {
3692 as_bad ("Operand to .label_state must be a constant");
3693 return;
3694 }
3695 add_unwind_entry (output_label_state (e.X_add_number));
3696 }
3697
3698 static void
3699 dot_copy_state (dummy)
3700 int dummy ATTRIBUTE_UNUSED;
3701 {
3702 expressionS e;
3703
3704 parse_operand (&e);
3705 if (e.X_op != O_constant)
3706 {
3707 as_bad ("Operand to .copy_state must be a constant");
3708 return;
3709 }
3710 add_unwind_entry (output_copy_state (e.X_add_number));
3711 }
3712
3713 static void
3714 dot_unwabi (dummy)
3715 int dummy ATTRIBUTE_UNUSED;
3716 {
3717 expressionS e1, e2;
3718 unsigned char sep;
3719
3720 sep = parse_operand (&e1);
3721 if (sep != ',')
3722 {
3723 as_bad ("Second operand to .unwabi missing");
3724 return;
3725 }
3726 sep = parse_operand (&e2);
3727 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3728 ignore_rest_of_line ();
3729
3730 if (e1.X_op != O_constant)
3731 {
3732 as_bad ("First operand to .unwabi must be a constant");
3733 return;
3734 }
3735
3736 if (e2.X_op != O_constant)
3737 {
3738 as_bad ("Second operand to .unwabi must be a constant");
3739 return;
3740 }
3741
3742 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number));
3743 }
3744
3745 static void
3746 dot_personality (dummy)
3747 int dummy ATTRIBUTE_UNUSED;
3748 {
3749 char *name, *p, c;
3750 SKIP_WHITESPACE ();
3751 name = input_line_pointer;
3752 c = get_symbol_end ();
3753 p = input_line_pointer;
3754 unwind.personality_routine = symbol_find_or_make (name);
3755 unwind.force_unwind_entry = 1;
3756 *p = c;
3757 SKIP_WHITESPACE ();
3758 demand_empty_rest_of_line ();
3759 }
3760
3761 static void
3762 dot_proc (dummy)
3763 int dummy ATTRIBUTE_UNUSED;
3764 {
3765 char *name, *p, c;
3766 symbolS *sym;
3767
3768 unwind.proc_start = expr_build_dot ();
3769 /* Parse names of main and alternate entry points and mark them as
3770 function symbols: */
3771 while (1)
3772 {
3773 SKIP_WHITESPACE ();
3774 name = input_line_pointer;
3775 c = get_symbol_end ();
3776 p = input_line_pointer;
3777 sym = symbol_find_or_make (name);
3778 if (unwind.proc_start == 0)
3779 {
3780 unwind.proc_start = sym;
3781 }
3782 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
3783 *p = c;
3784 SKIP_WHITESPACE ();
3785 if (*input_line_pointer != ',')
3786 break;
3787 ++input_line_pointer;
3788 }
3789 demand_empty_rest_of_line ();
3790 ia64_do_align (16);
3791
3792 unwind.prologue_count = 0;
3793 unwind.list = unwind.tail = unwind.current_entry = NULL;
3794 unwind.personality_routine = 0;
3795 }
3796
3797 static void
3798 dot_body (dummy)
3799 int dummy ATTRIBUTE_UNUSED;
3800 {
3801 unwind.prologue = 0;
3802 unwind.prologue_mask = 0;
3803
3804 add_unwind_entry (output_body ());
3805 demand_empty_rest_of_line ();
3806 }
3807
3808 static void
3809 dot_prologue (dummy)
3810 int dummy ATTRIBUTE_UNUSED;
3811 {
3812 unsigned char sep;
3813 int mask = 0, grsave = 0;
3814
3815 if (!is_it_end_of_statement ())
3816 {
3817 expressionS e1, e2;
3818 sep = parse_operand (&e1);
3819 if (sep != ',')
3820 as_bad ("No second operand to .prologue");
3821 sep = parse_operand (&e2);
3822 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3823 ignore_rest_of_line ();
3824
3825 if (e1.X_op == O_constant)
3826 {
3827 mask = e1.X_add_number;
3828
3829 if (e2.X_op == O_constant)
3830 grsave = e2.X_add_number;
3831 else if (e2.X_op == O_register
3832 && (grsave = e2.X_add_number - REG_GR) < 128)
3833 ;
3834 else
3835 as_bad ("Second operand not a constant or general register");
3836
3837 add_unwind_entry (output_prologue_gr (mask, grsave));
3838 }
3839 else
3840 as_bad ("First operand not a constant");
3841 }
3842 else
3843 add_unwind_entry (output_prologue ());
3844
3845 unwind.prologue = 1;
3846 unwind.prologue_mask = mask;
3847 ++unwind.prologue_count;
3848 }
3849
3850 static void
3851 dot_endp (dummy)
3852 int dummy ATTRIBUTE_UNUSED;
3853 {
3854 expressionS e;
3855 unsigned char *ptr;
3856 int bytes_per_address;
3857 long where;
3858 segT saved_seg;
3859 subsegT saved_subseg;
3860 const char *sec_name, *text_name;
3861 char *name, *p, c;
3862 symbolS *sym;
3863
3864 if (unwind.saved_text_seg)
3865 {
3866 saved_seg = unwind.saved_text_seg;
3867 saved_subseg = unwind.saved_text_subseg;
3868 unwind.saved_text_seg = NULL;
3869 }
3870 else
3871 {
3872 saved_seg = now_seg;
3873 saved_subseg = now_subseg;
3874 }
3875
3876 /*
3877 Use a slightly ugly scheme to derive the unwind section names from
3878 the text section name:
3879
3880 text sect. unwind table sect.
3881 name: name: comments:
3882 ---------- ----------------- --------------------------------
3883 .text .IA_64.unwind
3884 .text.foo .IA_64.unwind.text.foo
3885 .foo .IA_64.unwind.foo
3886 .gnu.linkonce.t.foo
3887 .gnu.linkonce.ia64unw.foo
3888 _info .IA_64.unwind_info gas issues error message (ditto)
3889 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3890
3891 This mapping is done so that:
3892
3893 (a) An object file with unwind info only in .text will use
3894 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3895 This follows the letter of the ABI and also ensures backwards
3896 compatibility with older toolchains.
3897
3898 (b) An object file with unwind info in multiple text sections
3899 will use separate unwind sections for each text section.
3900 This allows us to properly set the "sh_info" and "sh_link"
3901 fields in SHT_IA_64_UNWIND as required by the ABI and also
3902 lets GNU ld support programs with multiple segments
3903 containing unwind info (as might be the case for certain
3904 embedded applications).
3905
3906 (c) An error is issued if there would be a name clash.
3907 */
3908 text_name = segment_name (saved_seg);
3909 if (strncmp (text_name, "_info", 5) == 0)
3910 {
3911 as_bad ("Illegal section name `%s' (causes unwind section name clash)",
3912 text_name);
3913 ignore_rest_of_line ();
3914 return;
3915 }
3916 if (strcmp (text_name, ".text") == 0)
3917 text_name = "";
3918
3919 insn_group_break (1, 0, 0);
3920
3921 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
3922 if (!unwind.info)
3923 generate_unwind_image (text_name);
3924
3925 if (unwind.info || unwind.force_unwind_entry)
3926 {
3927 subseg_set (md.last_text_seg, 0);
3928 unwind.proc_end = expr_build_dot ();
3929
3930 make_unw_section_name (SPECIAL_SECTION_UNWIND, text_name, sec_name);
3931 set_section ((char *) sec_name);
3932 bfd_set_section_flags (stdoutput, now_seg,
3933 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3934
3935 /* Make sure that section has 4 byte alignment for ILP32 and
3936 8 byte alignment for LP64. */
3937 record_alignment (now_seg, md.pointer_size_shift);
3938
3939 /* Need space for 3 pointers for procedure start, procedure end,
3940 and unwind info. */
3941 ptr = frag_more (3 * md.pointer_size);
3942 where = frag_now_fix () - (3 * md.pointer_size);
3943 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
3944
3945 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
3946 e.X_op = O_pseudo_fixup;
3947 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3948 e.X_add_number = 0;
3949 e.X_add_symbol = unwind.proc_start;
3950 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e);
3951
3952 e.X_op = O_pseudo_fixup;
3953 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3954 e.X_add_number = 0;
3955 e.X_add_symbol = unwind.proc_end;
3956 ia64_cons_fix_new (frag_now, where + bytes_per_address,
3957 bytes_per_address, &e);
3958
3959 if (unwind.info)
3960 {
3961 e.X_op = O_pseudo_fixup;
3962 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3963 e.X_add_number = 0;
3964 e.X_add_symbol = unwind.info;
3965 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
3966 bytes_per_address, &e);
3967 }
3968 else
3969 md_number_to_chars (ptr + (bytes_per_address * 2), 0,
3970 bytes_per_address);
3971
3972 }
3973 subseg_set (saved_seg, saved_subseg);
3974
3975 /* Parse names of main and alternate entry points and set symbol sizes. */
3976 while (1)
3977 {
3978 SKIP_WHITESPACE ();
3979 name = input_line_pointer;
3980 c = get_symbol_end ();
3981 p = input_line_pointer;
3982 sym = symbol_find (name);
3983 if (sym && unwind.proc_start
3984 && (symbol_get_bfdsym (sym)->flags & BSF_FUNCTION)
3985 && S_GET_SIZE (sym) == 0 && symbol_get_obj (sym)->size == NULL)
3986 {
3987 fragS *fr = symbol_get_frag (unwind.proc_start);
3988 fragS *frag = symbol_get_frag (sym);
3989
3990 /* Check whether the function label is at or beyond last
3991 .proc directive. */
3992 while (fr && fr != frag)
3993 fr = fr->fr_next;
3994 if (fr)
3995 {
3996 if (frag == frag_now && SEG_NORMAL (now_seg))
3997 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
3998 else
3999 {
4000 symbol_get_obj (sym)->size =
4001 (expressionS *) xmalloc (sizeof (expressionS));
4002 symbol_get_obj (sym)->size->X_op = O_subtract;
4003 symbol_get_obj (sym)->size->X_add_symbol
4004 = symbol_new (FAKE_LABEL_NAME, now_seg,
4005 frag_now_fix (), frag_now);
4006 symbol_get_obj (sym)->size->X_op_symbol = sym;
4007 symbol_get_obj (sym)->size->X_add_number = 0;
4008 }
4009 }
4010 }
4011 *p = c;
4012 SKIP_WHITESPACE ();
4013 if (*input_line_pointer != ',')
4014 break;
4015 ++input_line_pointer;
4016 }
4017 demand_empty_rest_of_line ();
4018 unwind.proc_start = unwind.proc_end = unwind.info = 0;
4019 }
4020
4021 static void
4022 dot_template (template)
4023 int template;
4024 {
4025 CURR_SLOT.user_template = template;
4026 }
4027
4028 static void
4029 dot_regstk (dummy)
4030 int dummy ATTRIBUTE_UNUSED;
4031 {
4032 int ins, locs, outs, rots;
4033
4034 if (is_it_end_of_statement ())
4035 ins = locs = outs = rots = 0;
4036 else
4037 {
4038 ins = get_absolute_expression ();
4039 if (*input_line_pointer++ != ',')
4040 goto err;
4041 locs = get_absolute_expression ();
4042 if (*input_line_pointer++ != ',')
4043 goto err;
4044 outs = get_absolute_expression ();
4045 if (*input_line_pointer++ != ',')
4046 goto err;
4047 rots = get_absolute_expression ();
4048 }
4049 set_regstack (ins, locs, outs, rots);
4050 return;
4051
4052 err:
4053 as_bad ("Comma expected");
4054 ignore_rest_of_line ();
4055 }
4056
4057 static void
4058 dot_rot (type)
4059 int type;
4060 {
4061 unsigned num_regs, num_alloced = 0;
4062 struct dynreg **drpp, *dr;
4063 int ch, base_reg = 0;
4064 char *name, *start;
4065 size_t len;
4066
4067 switch (type)
4068 {
4069 case DYNREG_GR: base_reg = REG_GR + 32; break;
4070 case DYNREG_FR: base_reg = REG_FR + 32; break;
4071 case DYNREG_PR: base_reg = REG_P + 16; break;
4072 default: break;
4073 }
4074
4075 /* First, remove existing names from hash table. */
4076 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4077 {
4078 hash_delete (md.dynreg_hash, dr->name);
4079 dr->num_regs = 0;
4080 }
4081
4082 drpp = &md.dynreg[type];
4083 while (1)
4084 {
4085 start = input_line_pointer;
4086 ch = get_symbol_end ();
4087 *input_line_pointer = ch;
4088 len = (input_line_pointer - start);
4089
4090 SKIP_WHITESPACE ();
4091 if (*input_line_pointer != '[')
4092 {
4093 as_bad ("Expected '['");
4094 goto err;
4095 }
4096 ++input_line_pointer; /* skip '[' */
4097
4098 num_regs = get_absolute_expression ();
4099
4100 if (*input_line_pointer++ != ']')
4101 {
4102 as_bad ("Expected ']'");
4103 goto err;
4104 }
4105 SKIP_WHITESPACE ();
4106
4107 num_alloced += num_regs;
4108 switch (type)
4109 {
4110 case DYNREG_GR:
4111 if (num_alloced > md.rot.num_regs)
4112 {
4113 as_bad ("Used more than the declared %d rotating registers",
4114 md.rot.num_regs);
4115 goto err;
4116 }
4117 break;
4118 case DYNREG_FR:
4119 if (num_alloced > 96)
4120 {
4121 as_bad ("Used more than the available 96 rotating registers");
4122 goto err;
4123 }
4124 break;
4125 case DYNREG_PR:
4126 if (num_alloced > 48)
4127 {
4128 as_bad ("Used more than the available 48 rotating registers");
4129 goto err;
4130 }
4131 break;
4132
4133 default:
4134 break;
4135 }
4136
4137 name = obstack_alloc (&notes, len + 1);
4138 memcpy (name, start, len);
4139 name[len] = '\0';
4140
4141 if (!*drpp)
4142 {
4143 *drpp = obstack_alloc (&notes, sizeof (*dr));
4144 memset (*drpp, 0, sizeof (*dr));
4145 }
4146
4147 dr = *drpp;
4148 dr->name = name;
4149 dr->num_regs = num_regs;
4150 dr->base = base_reg;
4151 drpp = &dr->next;
4152 base_reg += num_regs;
4153
4154 if (hash_insert (md.dynreg_hash, name, dr))
4155 {
4156 as_bad ("Attempt to redefine register set `%s'", name);
4157 goto err;
4158 }
4159
4160 if (*input_line_pointer != ',')
4161 break;
4162 ++input_line_pointer; /* skip comma */
4163 SKIP_WHITESPACE ();
4164 }
4165 demand_empty_rest_of_line ();
4166 return;
4167
4168 err:
4169 ignore_rest_of_line ();
4170 }
4171
4172 static void
4173 dot_byteorder (byteorder)
4174 int byteorder;
4175 {
4176 target_big_endian = byteorder;
4177 }
4178
4179 static void
4180 dot_psr (dummy)
4181 int dummy ATTRIBUTE_UNUSED;
4182 {
4183 char *option;
4184 int ch;
4185
4186 while (1)
4187 {
4188 option = input_line_pointer;
4189 ch = get_symbol_end ();
4190 if (strcmp (option, "lsb") == 0)
4191 md.flags &= ~EF_IA_64_BE;
4192 else if (strcmp (option, "msb") == 0)
4193 md.flags |= EF_IA_64_BE;
4194 else if (strcmp (option, "abi32") == 0)
4195 md.flags &= ~EF_IA_64_ABI64;
4196 else if (strcmp (option, "abi64") == 0)
4197 md.flags |= EF_IA_64_ABI64;
4198 else
4199 as_bad ("Unknown psr option `%s'", option);
4200 *input_line_pointer = ch;
4201
4202 SKIP_WHITESPACE ();
4203 if (*input_line_pointer != ',')
4204 break;
4205
4206 ++input_line_pointer;
4207 SKIP_WHITESPACE ();
4208 }
4209 demand_empty_rest_of_line ();
4210 }
4211
4212 static void
4213 dot_alias (dummy)
4214 int dummy ATTRIBUTE_UNUSED;
4215 {
4216 as_bad (".alias not implemented yet");
4217 }
4218
4219 static void
4220 dot_ln (dummy)
4221 int dummy ATTRIBUTE_UNUSED;
4222 {
4223 new_logical_line (0, get_absolute_expression ());
4224 demand_empty_rest_of_line ();
4225 }
4226
4227 static char *
4228 parse_section_name ()
4229 {
4230 char *name;
4231 int len;
4232
4233 SKIP_WHITESPACE ();
4234 if (*input_line_pointer != '"')
4235 {
4236 as_bad ("Missing section name");
4237 ignore_rest_of_line ();
4238 return 0;
4239 }
4240 name = demand_copy_C_string (&len);
4241 if (!name)
4242 {
4243 ignore_rest_of_line ();
4244 return 0;
4245 }
4246 SKIP_WHITESPACE ();
4247 if (*input_line_pointer != ',')
4248 {
4249 as_bad ("Comma expected after section name");
4250 ignore_rest_of_line ();
4251 return 0;
4252 }
4253 ++input_line_pointer; /* skip comma */
4254 return name;
4255 }
4256
4257 static void
4258 dot_xdata (size)
4259 int size;
4260 {
4261 char *name = parse_section_name ();
4262 if (!name)
4263 return;
4264
4265 md.keep_pending_output = 1;
4266 set_section (name);
4267 cons (size);
4268 obj_elf_previous (0);
4269 md.keep_pending_output = 0;
4270 }
4271
4272 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4273
4274 static void
4275 stmt_float_cons (kind)
4276 int kind;
4277 {
4278 size_t size;
4279
4280 switch (kind)
4281 {
4282 case 'd': size = 8; break;
4283 case 'x': size = 10; break;
4284
4285 case 'f':
4286 default:
4287 size = 4;
4288 break;
4289 }
4290 ia64_do_align (size);
4291 float_cons (kind);
4292 }
4293
4294 static void
4295 stmt_cons_ua (size)
4296 int size;
4297 {
4298 int saved_auto_align = md.auto_align;
4299
4300 md.auto_align = 0;
4301 cons (size);
4302 md.auto_align = saved_auto_align;
4303 }
4304
4305 static void
4306 dot_xfloat_cons (kind)
4307 int kind;
4308 {
4309 char *name = parse_section_name ();
4310 if (!name)
4311 return;
4312
4313 md.keep_pending_output = 1;
4314 set_section (name);
4315 stmt_float_cons (kind);
4316 obj_elf_previous (0);
4317 md.keep_pending_output = 0;
4318 }
4319
4320 static void
4321 dot_xstringer (zero)
4322 int zero;
4323 {
4324 char *name = parse_section_name ();
4325 if (!name)
4326 return;
4327
4328 md.keep_pending_output = 1;
4329 set_section (name);
4330 stringer (zero);
4331 obj_elf_previous (0);
4332 md.keep_pending_output = 0;
4333 }
4334
4335 static void
4336 dot_xdata_ua (size)
4337 int size;
4338 {
4339 int saved_auto_align = md.auto_align;
4340 char *name = parse_section_name ();
4341 if (!name)
4342 return;
4343
4344 md.keep_pending_output = 1;
4345 set_section (name);
4346 md.auto_align = 0;
4347 cons (size);
4348 md.auto_align = saved_auto_align;
4349 obj_elf_previous (0);
4350 md.keep_pending_output = 0;
4351 }
4352
4353 static void
4354 dot_xfloat_cons_ua (kind)
4355 int kind;
4356 {
4357 int saved_auto_align = md.auto_align;
4358 char *name = parse_section_name ();
4359 if (!name)
4360 return;
4361
4362 md.keep_pending_output = 1;
4363 set_section (name);
4364 md.auto_align = 0;
4365 stmt_float_cons (kind);
4366 md.auto_align = saved_auto_align;
4367 obj_elf_previous (0);
4368 md.keep_pending_output = 0;
4369 }
4370
4371 /* .reg.val <regname>,value */
4372
4373 static void
4374 dot_reg_val (dummy)
4375 int dummy ATTRIBUTE_UNUSED;
4376 {
4377 expressionS reg;
4378
4379 expression (&reg);
4380 if (reg.X_op != O_register)
4381 {
4382 as_bad (_("Register name expected"));
4383 ignore_rest_of_line ();
4384 }
4385 else if (*input_line_pointer++ != ',')
4386 {
4387 as_bad (_("Comma expected"));
4388 ignore_rest_of_line ();
4389 }
4390 else
4391 {
4392 valueT value = get_absolute_expression ();
4393 int regno = reg.X_add_number;
4394 if (regno < REG_GR || regno > REG_GR + 128)
4395 as_warn (_("Register value annotation ignored"));
4396 else
4397 {
4398 gr_values[regno - REG_GR].known = 1;
4399 gr_values[regno - REG_GR].value = value;
4400 gr_values[regno - REG_GR].path = md.path;
4401 }
4402 }
4403 demand_empty_rest_of_line ();
4404 }
4405
4406 /* select dv checking mode
4407 .auto
4408 .explicit
4409 .default
4410
4411 A stop is inserted when changing modes
4412 */
4413
4414 static void
4415 dot_dv_mode (type)
4416 int type;
4417 {
4418 if (md.manual_bundling)
4419 as_warn (_("Directive invalid within a bundle"));
4420
4421 if (type == 'E' || type == 'A')
4422 md.mode_explicitly_set = 0;
4423 else
4424 md.mode_explicitly_set = 1;
4425
4426 md.detect_dv = 1;
4427 switch (type)
4428 {
4429 case 'A':
4430 case 'a':
4431 if (md.explicit_mode)
4432 insn_group_break (1, 0, 0);
4433 md.explicit_mode = 0;
4434 break;
4435 case 'E':
4436 case 'e':
4437 if (!md.explicit_mode)
4438 insn_group_break (1, 0, 0);
4439 md.explicit_mode = 1;
4440 break;
4441 default:
4442 case 'd':
4443 if (md.explicit_mode != md.default_explicit_mode)
4444 insn_group_break (1, 0, 0);
4445 md.explicit_mode = md.default_explicit_mode;
4446 md.mode_explicitly_set = 0;
4447 break;
4448 }
4449 }
4450
4451 static void
4452 print_prmask (mask)
4453 valueT mask;
4454 {
4455 int regno;
4456 char *comma = "";
4457 for (regno = 0; regno < 64; regno++)
4458 {
4459 if (mask & ((valueT) 1 << regno))
4460 {
4461 fprintf (stderr, "%s p%d", comma, regno);
4462 comma = ",";
4463 }
4464 }
4465 }
4466
4467 /*
4468 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear")
4469 .pred.rel.imply p1, p2 (also .pred.rel "imply")
4470 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex")
4471 .pred.safe_across_calls p1 [, p2 [,...]]
4472 */
4473
4474 static void
4475 dot_pred_rel (type)
4476 int type;
4477 {
4478 valueT mask = 0;
4479 int count = 0;
4480 int p1 = -1, p2 = -1;
4481
4482 if (type == 0)
4483 {
4484 if (*input_line_pointer != '"')
4485 {
4486 as_bad (_("Missing predicate relation type"));
4487 ignore_rest_of_line ();
4488 return;
4489 }
4490 else
4491 {
4492 int len;
4493 char *form = demand_copy_C_string (&len);
4494 if (strcmp (form, "mutex") == 0)
4495 type = 'm';
4496 else if (strcmp (form, "clear") == 0)
4497 type = 'c';
4498 else if (strcmp (form, "imply") == 0)
4499 type = 'i';
4500 else
4501 {
4502 as_bad (_("Unrecognized predicate relation type"));
4503 ignore_rest_of_line ();
4504 return;
4505 }
4506 }
4507 if (*input_line_pointer == ',')
4508 ++input_line_pointer;
4509 SKIP_WHITESPACE ();
4510 }
4511
4512 SKIP_WHITESPACE ();
4513 while (1)
4514 {
4515 valueT bit = 1;
4516 int regno;
4517
4518 if (TOUPPER (*input_line_pointer) != 'P'
4519 || (regno = atoi (++input_line_pointer)) < 0
4520 || regno > 63)
4521 {
4522 as_bad (_("Predicate register expected"));
4523 ignore_rest_of_line ();
4524 return;
4525 }
4526 while (ISDIGIT (*input_line_pointer))
4527 ++input_line_pointer;
4528 if (p1 == -1)
4529 p1 = regno;
4530 else if (p2 == -1)
4531 p2 = regno;
4532 bit <<= regno;
4533 if (mask & bit)
4534 as_warn (_("Duplicate predicate register ignored"));
4535 mask |= bit;
4536 count++;
4537 /* See if it's a range. */
4538 if (*input_line_pointer == '-')
4539 {
4540 valueT stop = 1;
4541 ++input_line_pointer;
4542
4543 if (TOUPPER (*input_line_pointer) != 'P'
4544 || (regno = atoi (++input_line_pointer)) < 0
4545 || regno > 63)
4546 {
4547 as_bad (_("Predicate register expected"));
4548 ignore_rest_of_line ();
4549 return;
4550 }
4551 while (ISDIGIT (*input_line_pointer))
4552 ++input_line_pointer;
4553 stop <<= regno;
4554 if (bit >= stop)
4555 {
4556 as_bad (_("Bad register range"));
4557 ignore_rest_of_line ();
4558 return;
4559 }
4560 while (bit < stop)
4561 {
4562 bit <<= 1;
4563 mask |= bit;
4564 count++;
4565 }
4566 SKIP_WHITESPACE ();
4567 }
4568 if (*input_line_pointer != ',')
4569 break;
4570 ++input_line_pointer;
4571 SKIP_WHITESPACE ();
4572 }
4573
4574 switch (type)
4575 {
4576 case 'c':
4577 if (count == 0)
4578 mask = ~(valueT) 0;
4579 clear_qp_mutex (mask);
4580 clear_qp_implies (mask, (valueT) 0);
4581 break;
4582 case 'i':
4583 if (count != 2 || p1 == -1 || p2 == -1)
4584 as_bad (_("Predicate source and target required"));
4585 else if (p1 == 0 || p2 == 0)
4586 as_bad (_("Use of p0 is not valid in this context"));
4587 else
4588 add_qp_imply (p1, p2);
4589 break;
4590 case 'm':
4591 if (count < 2)
4592 {
4593 as_bad (_("At least two PR arguments expected"));
4594 break;
4595 }
4596 else if (mask & 1)
4597 {
4598 as_bad (_("Use of p0 is not valid in this context"));
4599 break;
4600 }
4601 add_qp_mutex (mask);
4602 break;
4603 case 's':
4604 /* note that we don't override any existing relations */
4605 if (count == 0)
4606 {
4607 as_bad (_("At least one PR argument expected"));
4608 break;
4609 }
4610 if (md.debug_dv)
4611 {
4612 fprintf (stderr, "Safe across calls: ");
4613 print_prmask (mask);
4614 fprintf (stderr, "\n");
4615 }
4616 qp_safe_across_calls = mask;
4617 break;
4618 }
4619 demand_empty_rest_of_line ();
4620 }
4621
4622 /* .entry label [, label [, ...]]
4623 Hint to DV code that the given labels are to be considered entry points.
4624 Otherwise, only global labels are considered entry points. */
4625
4626 static void
4627 dot_entry (dummy)
4628 int dummy ATTRIBUTE_UNUSED;
4629 {
4630 const char *err;
4631 char *name;
4632 int c;
4633 symbolS *symbolP;
4634
4635 do
4636 {
4637 name = input_line_pointer;
4638 c = get_symbol_end ();
4639 symbolP = symbol_find_or_make (name);
4640
4641 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (PTR) symbolP);
4642 if (err)
4643 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
4644 name, err);
4645
4646 *input_line_pointer = c;
4647 SKIP_WHITESPACE ();
4648 c = *input_line_pointer;
4649 if (c == ',')
4650 {
4651 input_line_pointer++;
4652 SKIP_WHITESPACE ();
4653 if (*input_line_pointer == '\n')
4654 c = '\n';
4655 }
4656 }
4657 while (c == ',');
4658
4659 demand_empty_rest_of_line ();
4660 }
4661
4662 /* .mem.offset offset, base
4663 "base" is used to distinguish between offsets from a different base. */
4664
4665 static void
4666 dot_mem_offset (dummy)
4667 int dummy ATTRIBUTE_UNUSED;
4668 {
4669 md.mem_offset.hint = 1;
4670 md.mem_offset.offset = get_absolute_expression ();
4671 if (*input_line_pointer != ',')
4672 {
4673 as_bad (_("Comma expected"));
4674 ignore_rest_of_line ();
4675 return;
4676 }
4677 ++input_line_pointer;
4678 md.mem_offset.base = get_absolute_expression ();
4679 demand_empty_rest_of_line ();
4680 }
4681
4682 /* ia64-specific pseudo-ops: */
4683 const pseudo_typeS md_pseudo_table[] =
4684 {
4685 { "radix", dot_radix, 0 },
4686 { "lcomm", s_lcomm_bytes, 1 },
4687 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
4688 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
4689 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
4690 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
4691 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
4692 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
4693 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
4694 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
4695 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
4696 { "proc", dot_proc, 0 },
4697 { "body", dot_body, 0 },
4698 { "prologue", dot_prologue, 0 },
4699 { "endp", dot_endp, 0 },
4700 { "file", dwarf2_directive_file, 0 },
4701 { "loc", dwarf2_directive_loc, 0 },
4702
4703 { "fframe", dot_fframe, 0 },
4704 { "vframe", dot_vframe, 0 },
4705 { "vframesp", dot_vframesp, 0 },
4706 { "vframepsp", dot_vframepsp, 0 },
4707 { "save", dot_save, 0 },
4708 { "restore", dot_restore, 0 },
4709 { "restorereg", dot_restorereg, 0 },
4710 { "restorereg.p", dot_restorereg_p, 0 },
4711 { "handlerdata", dot_handlerdata, 0 },
4712 { "unwentry", dot_unwentry, 0 },
4713 { "altrp", dot_altrp, 0 },
4714 { "savesp", dot_savemem, 0 },
4715 { "savepsp", dot_savemem, 1 },
4716 { "save.g", dot_saveg, 0 },
4717 { "save.f", dot_savef, 0 },
4718 { "save.b", dot_saveb, 0 },
4719 { "save.gf", dot_savegf, 0 },
4720 { "spill", dot_spill, 0 },
4721 { "spillreg", dot_spillreg, 0 },
4722 { "spillsp", dot_spillmem, 0 },
4723 { "spillpsp", dot_spillmem, 1 },
4724 { "spillreg.p", dot_spillreg_p, 0 },
4725 { "spillsp.p", dot_spillmem_p, 0 },
4726 { "spillpsp.p", dot_spillmem_p, 1 },
4727 { "label_state", dot_label_state, 0 },
4728 { "copy_state", dot_copy_state, 0 },
4729 { "unwabi", dot_unwabi, 0 },
4730 { "personality", dot_personality, 0 },
4731 #if 0
4732 { "estate", dot_estate, 0 },
4733 #endif
4734 { "mii", dot_template, 0x0 },
4735 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
4736 { "mlx", dot_template, 0x2 },
4737 { "mmi", dot_template, 0x4 },
4738 { "mfi", dot_template, 0x6 },
4739 { "mmf", dot_template, 0x7 },
4740 { "mib", dot_template, 0x8 },
4741 { "mbb", dot_template, 0x9 },
4742 { "bbb", dot_template, 0xb },
4743 { "mmb", dot_template, 0xc },
4744 { "mfb", dot_template, 0xe },
4745 #if 0
4746 { "lb", dot_scope, 0 },
4747 { "le", dot_scope, 1 },
4748 #endif
4749 { "align", s_align_bytes, 0 },
4750 { "regstk", dot_regstk, 0 },
4751 { "rotr", dot_rot, DYNREG_GR },
4752 { "rotf", dot_rot, DYNREG_FR },
4753 { "rotp", dot_rot, DYNREG_PR },
4754 { "lsb", dot_byteorder, 0 },
4755 { "msb", dot_byteorder, 1 },
4756 { "psr", dot_psr, 0 },
4757 { "alias", dot_alias, 0 },
4758 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
4759
4760 { "xdata1", dot_xdata, 1 },
4761 { "xdata2", dot_xdata, 2 },
4762 { "xdata4", dot_xdata, 4 },
4763 { "xdata8", dot_xdata, 8 },
4764 { "xreal4", dot_xfloat_cons, 'f' },
4765 { "xreal8", dot_xfloat_cons, 'd' },
4766 { "xreal10", dot_xfloat_cons, 'x' },
4767 { "xstring", dot_xstringer, 0 },
4768 { "xstringz", dot_xstringer, 1 },
4769
4770 /* unaligned versions: */
4771 { "xdata2.ua", dot_xdata_ua, 2 },
4772 { "xdata4.ua", dot_xdata_ua, 4 },
4773 { "xdata8.ua", dot_xdata_ua, 8 },
4774 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
4775 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
4776 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
4777
4778 /* annotations/DV checking support */
4779 { "entry", dot_entry, 0 },
4780 { "mem.offset", dot_mem_offset, 0 },
4781 { "pred.rel", dot_pred_rel, 0 },
4782 { "pred.rel.clear", dot_pred_rel, 'c' },
4783 { "pred.rel.imply", dot_pred_rel, 'i' },
4784 { "pred.rel.mutex", dot_pred_rel, 'm' },
4785 { "pred.safe_across_calls", dot_pred_rel, 's' },
4786 { "reg.val", dot_reg_val, 0 },
4787 { "auto", dot_dv_mode, 'a' },
4788 { "explicit", dot_dv_mode, 'e' },
4789 { "default", dot_dv_mode, 'd' },
4790
4791 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
4792 IA-64 aligns data allocation pseudo-ops by default, so we have to
4793 tell it that these ones are supposed to be unaligned. Long term,
4794 should rewrite so that only IA-64 specific data allocation pseudo-ops
4795 are aligned by default. */
4796 {"2byte", stmt_cons_ua, 2},
4797 {"4byte", stmt_cons_ua, 4},
4798 {"8byte", stmt_cons_ua, 8},
4799
4800 { NULL, 0, 0 }
4801 };
4802
4803 static const struct pseudo_opcode
4804 {
4805 const char *name;
4806 void (*handler) (int);
4807 int arg;
4808 }
4809 pseudo_opcode[] =
4810 {
4811 /* these are more like pseudo-ops, but don't start with a dot */
4812 { "data1", cons, 1 },
4813 { "data2", cons, 2 },
4814 { "data4", cons, 4 },
4815 { "data8", cons, 8 },
4816 { "data16", cons, 16 },
4817 { "real4", stmt_float_cons, 'f' },
4818 { "real8", stmt_float_cons, 'd' },
4819 { "real10", stmt_float_cons, 'x' },
4820 { "string", stringer, 0 },
4821 { "stringz", stringer, 1 },
4822
4823 /* unaligned versions: */
4824 { "data2.ua", stmt_cons_ua, 2 },
4825 { "data4.ua", stmt_cons_ua, 4 },
4826 { "data8.ua", stmt_cons_ua, 8 },
4827 { "data16.ua", stmt_cons_ua, 16 },
4828 { "real4.ua", float_cons, 'f' },
4829 { "real8.ua", float_cons, 'd' },
4830 { "real10.ua", float_cons, 'x' },
4831 };
4832
4833 /* Declare a register by creating a symbol for it and entering it in
4834 the symbol table. */
4835
4836 static symbolS *
4837 declare_register (name, regnum)
4838 const char *name;
4839 int regnum;
4840 {
4841 const char *err;
4842 symbolS *sym;
4843
4844 sym = symbol_new (name, reg_section, regnum, &zero_address_frag);
4845
4846 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (PTR) sym);
4847 if (err)
4848 as_fatal ("Inserting \"%s\" into register table failed: %s",
4849 name, err);
4850
4851 return sym;
4852 }
4853
4854 static void
4855 declare_register_set (prefix, num_regs, base_regnum)
4856 const char *prefix;
4857 int num_regs;
4858 int base_regnum;
4859 {
4860 char name[8];
4861 int i;
4862
4863 for (i = 0; i < num_regs; ++i)
4864 {
4865 sprintf (name, "%s%u", prefix, i);
4866 declare_register (name, base_regnum + i);
4867 }
4868 }
4869
4870 static unsigned int
4871 operand_width (opnd)
4872 enum ia64_opnd opnd;
4873 {
4874 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
4875 unsigned int bits = 0;
4876 int i;
4877
4878 bits = 0;
4879 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
4880 bits += odesc->field[i].bits;
4881
4882 return bits;
4883 }
4884
4885 static enum operand_match_result
4886 operand_match (idesc, index, e)
4887 const struct ia64_opcode *idesc;
4888 int index;
4889 expressionS *e;
4890 {
4891 enum ia64_opnd opnd = idesc->operands[index];
4892 int bits, relocatable = 0;
4893 struct insn_fix *fix;
4894 bfd_signed_vma val;
4895
4896 switch (opnd)
4897 {
4898 /* constants: */
4899
4900 case IA64_OPND_AR_CCV:
4901 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
4902 return OPERAND_MATCH;
4903 break;
4904
4905 case IA64_OPND_AR_PFS:
4906 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
4907 return OPERAND_MATCH;
4908 break;
4909
4910 case IA64_OPND_GR0:
4911 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
4912 return OPERAND_MATCH;
4913 break;
4914
4915 case IA64_OPND_IP:
4916 if (e->X_op == O_register && e->X_add_number == REG_IP)
4917 return OPERAND_MATCH;
4918 break;
4919
4920 case IA64_OPND_PR:
4921 if (e->X_op == O_register && e->X_add_number == REG_PR)
4922 return OPERAND_MATCH;
4923 break;
4924
4925 case IA64_OPND_PR_ROT:
4926 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
4927 return OPERAND_MATCH;
4928 break;
4929
4930 case IA64_OPND_PSR:
4931 if (e->X_op == O_register && e->X_add_number == REG_PSR)
4932 return OPERAND_MATCH;
4933 break;
4934
4935 case IA64_OPND_PSR_L:
4936 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
4937 return OPERAND_MATCH;
4938 break;
4939
4940 case IA64_OPND_PSR_UM:
4941 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
4942 return OPERAND_MATCH;
4943 break;
4944
4945 case IA64_OPND_C1:
4946 if (e->X_op == O_constant)
4947 {
4948 if (e->X_add_number == 1)
4949 return OPERAND_MATCH;
4950 else
4951 return OPERAND_OUT_OF_RANGE;
4952 }
4953 break;
4954
4955 case IA64_OPND_C8:
4956 if (e->X_op == O_constant)
4957 {
4958 if (e->X_add_number == 8)
4959 return OPERAND_MATCH;
4960 else
4961 return OPERAND_OUT_OF_RANGE;
4962 }
4963 break;
4964
4965 case IA64_OPND_C16:
4966 if (e->X_op == O_constant)
4967 {
4968 if (e->X_add_number == 16)
4969 return OPERAND_MATCH;
4970 else
4971 return OPERAND_OUT_OF_RANGE;
4972 }
4973 break;
4974
4975 /* register operands: */
4976
4977 case IA64_OPND_AR3:
4978 if (e->X_op == O_register && e->X_add_number >= REG_AR
4979 && e->X_add_number < REG_AR + 128)
4980 return OPERAND_MATCH;
4981 break;
4982
4983 case IA64_OPND_B1:
4984 case IA64_OPND_B2:
4985 if (e->X_op == O_register && e->X_add_number >= REG_BR
4986 && e->X_add_number < REG_BR + 8)
4987 return OPERAND_MATCH;
4988 break;
4989
4990 case IA64_OPND_CR3:
4991 if (e->X_op == O_register && e->X_add_number >= REG_CR
4992 && e->X_add_number < REG_CR + 128)
4993 return OPERAND_MATCH;
4994 break;
4995
4996 case IA64_OPND_F1:
4997 case IA64_OPND_F2:
4998 case IA64_OPND_F3:
4999 case IA64_OPND_F4:
5000 if (e->X_op == O_register && e->X_add_number >= REG_FR
5001 && e->X_add_number < REG_FR + 128)
5002 return OPERAND_MATCH;
5003 break;
5004
5005 case IA64_OPND_P1:
5006 case IA64_OPND_P2:
5007 if (e->X_op == O_register && e->X_add_number >= REG_P
5008 && e->X_add_number < REG_P + 64)
5009 return OPERAND_MATCH;
5010 break;
5011
5012 case IA64_OPND_R1:
5013 case IA64_OPND_R2:
5014 case IA64_OPND_R3:
5015 if (e->X_op == O_register && e->X_add_number >= REG_GR
5016 && e->X_add_number < REG_GR + 128)
5017 return OPERAND_MATCH;
5018 break;
5019
5020 case IA64_OPND_R3_2:
5021 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5022 {
5023 if (e->X_add_number < REG_GR + 4)
5024 return OPERAND_MATCH;
5025 else if (e->X_add_number < REG_GR + 128)
5026 return OPERAND_OUT_OF_RANGE;
5027 }
5028 break;
5029
5030 /* indirect operands: */
5031 case IA64_OPND_CPUID_R3:
5032 case IA64_OPND_DBR_R3:
5033 case IA64_OPND_DTR_R3:
5034 case IA64_OPND_ITR_R3:
5035 case IA64_OPND_IBR_R3:
5036 case IA64_OPND_MSR_R3:
5037 case IA64_OPND_PKR_R3:
5038 case IA64_OPND_PMC_R3:
5039 case IA64_OPND_PMD_R3:
5040 case IA64_OPND_RR_R3:
5041 if (e->X_op == O_index && e->X_op_symbol
5042 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5043 == opnd - IA64_OPND_CPUID_R3))
5044 return OPERAND_MATCH;
5045 break;
5046
5047 case IA64_OPND_MR3:
5048 if (e->X_op == O_index && !e->X_op_symbol)
5049 return OPERAND_MATCH;
5050 break;
5051
5052 /* immediate operands: */
5053 case IA64_OPND_CNT2a:
5054 case IA64_OPND_LEN4:
5055 case IA64_OPND_LEN6:
5056 bits = operand_width (idesc->operands[index]);
5057 if (e->X_op == O_constant)
5058 {
5059 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5060 return OPERAND_MATCH;
5061 else
5062 return OPERAND_OUT_OF_RANGE;
5063 }
5064 break;
5065
5066 case IA64_OPND_CNT2b:
5067 if (e->X_op == O_constant)
5068 {
5069 if ((bfd_vma) (e->X_add_number - 1) < 3)
5070 return OPERAND_MATCH;
5071 else
5072 return OPERAND_OUT_OF_RANGE;
5073 }
5074 break;
5075
5076 case IA64_OPND_CNT2c:
5077 val = e->X_add_number;
5078 if (e->X_op == O_constant)
5079 {
5080 if ((val == 0 || val == 7 || val == 15 || val == 16))
5081 return OPERAND_MATCH;
5082 else
5083 return OPERAND_OUT_OF_RANGE;
5084 }
5085 break;
5086
5087 case IA64_OPND_SOR:
5088 /* SOR must be an integer multiple of 8 */
5089 if (e->X_op == O_constant && e->X_add_number & 0x7)
5090 return OPERAND_OUT_OF_RANGE;
5091 case IA64_OPND_SOF:
5092 case IA64_OPND_SOL:
5093 if (e->X_op == O_constant)
5094 {
5095 if ((bfd_vma) e->X_add_number <= 96)
5096 return OPERAND_MATCH;
5097 else
5098 return OPERAND_OUT_OF_RANGE;
5099 }
5100 break;
5101
5102 case IA64_OPND_IMMU62:
5103 if (e->X_op == O_constant)
5104 {
5105 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5106 return OPERAND_MATCH;
5107 else
5108 return OPERAND_OUT_OF_RANGE;
5109 }
5110 else
5111 {
5112 /* FIXME -- need 62-bit relocation type */
5113 as_bad (_("62-bit relocation not yet implemented"));
5114 }
5115 break;
5116
5117 case IA64_OPND_IMMU64:
5118 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5119 || e->X_op == O_subtract)
5120 {
5121 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5122 fix->code = BFD_RELOC_IA64_IMM64;
5123 if (e->X_op != O_subtract)
5124 {
5125 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5126 if (e->X_op == O_pseudo_fixup)
5127 e->X_op = O_symbol;
5128 }
5129
5130 fix->opnd = idesc->operands[index];
5131 fix->expr = *e;
5132 fix->is_pcrel = 0;
5133 ++CURR_SLOT.num_fixups;
5134 return OPERAND_MATCH;
5135 }
5136 else if (e->X_op == O_constant)
5137 return OPERAND_MATCH;
5138 break;
5139
5140 case IA64_OPND_CCNT5:
5141 case IA64_OPND_CNT5:
5142 case IA64_OPND_CNT6:
5143 case IA64_OPND_CPOS6a:
5144 case IA64_OPND_CPOS6b:
5145 case IA64_OPND_CPOS6c:
5146 case IA64_OPND_IMMU2:
5147 case IA64_OPND_IMMU7a:
5148 case IA64_OPND_IMMU7b:
5149 case IA64_OPND_IMMU21:
5150 case IA64_OPND_IMMU24:
5151 case IA64_OPND_MBTYPE4:
5152 case IA64_OPND_MHTYPE8:
5153 case IA64_OPND_POS6:
5154 bits = operand_width (idesc->operands[index]);
5155 if (e->X_op == O_constant)
5156 {
5157 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5158 return OPERAND_MATCH;
5159 else
5160 return OPERAND_OUT_OF_RANGE;
5161 }
5162 break;
5163
5164 case IA64_OPND_IMMU9:
5165 bits = operand_width (idesc->operands[index]);
5166 if (e->X_op == O_constant)
5167 {
5168 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5169 {
5170 int lobits = e->X_add_number & 0x3;
5171 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5172 e->X_add_number |= (bfd_vma) 0x3;
5173 return OPERAND_MATCH;
5174 }
5175 else
5176 return OPERAND_OUT_OF_RANGE;
5177 }
5178 break;
5179
5180 case IA64_OPND_IMM44:
5181 /* least 16 bits must be zero */
5182 if ((e->X_add_number & 0xffff) != 0)
5183 /* XXX technically, this is wrong: we should not be issuing warning
5184 messages until we're sure this instruction pattern is going to
5185 be used! */
5186 as_warn (_("lower 16 bits of mask ignored"));
5187
5188 if (e->X_op == O_constant)
5189 {
5190 if (((e->X_add_number >= 0
5191 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5192 || (e->X_add_number < 0
5193 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5194 {
5195 /* sign-extend */
5196 if (e->X_add_number >= 0
5197 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5198 {
5199 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5200 }
5201 return OPERAND_MATCH;
5202 }
5203 else
5204 return OPERAND_OUT_OF_RANGE;
5205 }
5206 break;
5207
5208 case IA64_OPND_IMM17:
5209 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5210 if (e->X_op == O_constant)
5211 {
5212 if (((e->X_add_number >= 0
5213 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5214 || (e->X_add_number < 0
5215 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5216 {
5217 /* sign-extend */
5218 if (e->X_add_number >= 0
5219 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5220 {
5221 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5222 }
5223 return OPERAND_MATCH;
5224 }
5225 else
5226 return OPERAND_OUT_OF_RANGE;
5227 }
5228 break;
5229
5230 case IA64_OPND_IMM14:
5231 case IA64_OPND_IMM22:
5232 relocatable = 1;
5233 case IA64_OPND_IMM1:
5234 case IA64_OPND_IMM8:
5235 case IA64_OPND_IMM8U4:
5236 case IA64_OPND_IMM8M1:
5237 case IA64_OPND_IMM8M1U4:
5238 case IA64_OPND_IMM8M1U8:
5239 case IA64_OPND_IMM9a:
5240 case IA64_OPND_IMM9b:
5241 bits = operand_width (idesc->operands[index]);
5242 if (relocatable && (e->X_op == O_symbol
5243 || e->X_op == O_subtract
5244 || e->X_op == O_pseudo_fixup))
5245 {
5246 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5247
5248 if (idesc->operands[index] == IA64_OPND_IMM14)
5249 fix->code = BFD_RELOC_IA64_IMM14;
5250 else
5251 fix->code = BFD_RELOC_IA64_IMM22;
5252
5253 if (e->X_op != O_subtract)
5254 {
5255 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5256 if (e->X_op == O_pseudo_fixup)
5257 e->X_op = O_symbol;
5258 }
5259
5260 fix->opnd = idesc->operands[index];
5261 fix->expr = *e;
5262 fix->is_pcrel = 0;
5263 ++CURR_SLOT.num_fixups;
5264 return OPERAND_MATCH;
5265 }
5266 else if (e->X_op != O_constant
5267 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5268 return OPERAND_MISMATCH;
5269
5270 if (opnd == IA64_OPND_IMM8M1U4)
5271 {
5272 /* Zero is not valid for unsigned compares that take an adjusted
5273 constant immediate range. */
5274 if (e->X_add_number == 0)
5275 return OPERAND_OUT_OF_RANGE;
5276
5277 /* Sign-extend 32-bit unsigned numbers, so that the following range
5278 checks will work. */
5279 val = e->X_add_number;
5280 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5281 && ((val & ((bfd_vma) 1 << 31)) != 0))
5282 val = ((val << 32) >> 32);
5283
5284 /* Check for 0x100000000. This is valid because
5285 0x100000000-1 is the same as ((uint32_t) -1). */
5286 if (val == ((bfd_signed_vma) 1 << 32))
5287 return OPERAND_MATCH;
5288
5289 val = val - 1;
5290 }
5291 else if (opnd == IA64_OPND_IMM8M1U8)
5292 {
5293 /* Zero is not valid for unsigned compares that take an adjusted
5294 constant immediate range. */
5295 if (e->X_add_number == 0)
5296 return OPERAND_OUT_OF_RANGE;
5297
5298 /* Check for 0x10000000000000000. */
5299 if (e->X_op == O_big)
5300 {
5301 if (generic_bignum[0] == 0
5302 && generic_bignum[1] == 0
5303 && generic_bignum[2] == 0
5304 && generic_bignum[3] == 0
5305 && generic_bignum[4] == 1)
5306 return OPERAND_MATCH;
5307 else
5308 return OPERAND_OUT_OF_RANGE;
5309 }
5310 else
5311 val = e->X_add_number - 1;
5312 }
5313 else if (opnd == IA64_OPND_IMM8M1)
5314 val = e->X_add_number - 1;
5315 else if (opnd == IA64_OPND_IMM8U4)
5316 {
5317 /* Sign-extend 32-bit unsigned numbers, so that the following range
5318 checks will work. */
5319 val = e->X_add_number;
5320 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5321 && ((val & ((bfd_vma) 1 << 31)) != 0))
5322 val = ((val << 32) >> 32);
5323 }
5324 else
5325 val = e->X_add_number;
5326
5327 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5328 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5329 return OPERAND_MATCH;
5330 else
5331 return OPERAND_OUT_OF_RANGE;
5332
5333 case IA64_OPND_INC3:
5334 /* +/- 1, 4, 8, 16 */
5335 val = e->X_add_number;
5336 if (val < 0)
5337 val = -val;
5338 if (e->X_op == O_constant)
5339 {
5340 if ((val == 1 || val == 4 || val == 8 || val == 16))
5341 return OPERAND_MATCH;
5342 else
5343 return OPERAND_OUT_OF_RANGE;
5344 }
5345 break;
5346
5347 case IA64_OPND_TGT25:
5348 case IA64_OPND_TGT25b:
5349 case IA64_OPND_TGT25c:
5350 case IA64_OPND_TGT64:
5351 if (e->X_op == O_symbol)
5352 {
5353 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5354 if (opnd == IA64_OPND_TGT25)
5355 fix->code = BFD_RELOC_IA64_PCREL21F;
5356 else if (opnd == IA64_OPND_TGT25b)
5357 fix->code = BFD_RELOC_IA64_PCREL21M;
5358 else if (opnd == IA64_OPND_TGT25c)
5359 fix->code = BFD_RELOC_IA64_PCREL21B;
5360 else if (opnd == IA64_OPND_TGT64)
5361 fix->code = BFD_RELOC_IA64_PCREL60B;
5362 else
5363 abort ();
5364
5365 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5366 fix->opnd = idesc->operands[index];
5367 fix->expr = *e;
5368 fix->is_pcrel = 1;
5369 ++CURR_SLOT.num_fixups;
5370 return OPERAND_MATCH;
5371 }
5372 case IA64_OPND_TAG13:
5373 case IA64_OPND_TAG13b:
5374 switch (e->X_op)
5375 {
5376 case O_constant:
5377 return OPERAND_MATCH;
5378
5379 case O_symbol:
5380 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5381 /* There are no external relocs for TAG13/TAG13b fields, so we
5382 create a dummy reloc. This will not live past md_apply_fix3. */
5383 fix->code = BFD_RELOC_UNUSED;
5384 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5385 fix->opnd = idesc->operands[index];
5386 fix->expr = *e;
5387 fix->is_pcrel = 1;
5388 ++CURR_SLOT.num_fixups;
5389 return OPERAND_MATCH;
5390
5391 default:
5392 break;
5393 }
5394 break;
5395
5396 default:
5397 break;
5398 }
5399 return OPERAND_MISMATCH;
5400 }
5401
5402 static int
5403 parse_operand (e)
5404 expressionS *e;
5405 {
5406 int sep = '\0';
5407
5408 memset (e, 0, sizeof (*e));
5409 e->X_op = O_absent;
5410 SKIP_WHITESPACE ();
5411 if (*input_line_pointer != '}')
5412 expression (e);
5413 sep = *input_line_pointer++;
5414
5415 if (sep == '}')
5416 {
5417 if (!md.manual_bundling)
5418 as_warn ("Found '}' when manual bundling is off");
5419 else
5420 CURR_SLOT.manual_bundling_off = 1;
5421 md.manual_bundling = 0;
5422 sep = '\0';
5423 }
5424 return sep;
5425 }
5426
5427 /* Returns the next entry in the opcode table that matches the one in
5428 IDESC, and frees the entry in IDESC. If no matching entry is
5429 found, NULL is returned instead. */
5430
5431 static struct ia64_opcode *
5432 get_next_opcode (struct ia64_opcode *idesc)
5433 {
5434 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
5435 ia64_free_opcode (idesc);
5436 return next;
5437 }
5438
5439 /* Parse the operands for the opcode and find the opcode variant that
5440 matches the specified operands, or NULL if no match is possible. */
5441
5442 static struct ia64_opcode *
5443 parse_operands (idesc)
5444 struct ia64_opcode *idesc;
5445 {
5446 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
5447 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
5448 enum ia64_opnd expected_operand = IA64_OPND_NIL;
5449 enum operand_match_result result;
5450 char mnemonic[129];
5451 char *first_arg = 0, *end, *saved_input_pointer;
5452 unsigned int sof;
5453
5454 assert (strlen (idesc->name) <= 128);
5455
5456 strcpy (mnemonic, idesc->name);
5457 if (idesc->operands[2] == IA64_OPND_SOF)
5458 {
5459 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
5460 can't parse the first operand until we have parsed the
5461 remaining operands of the "alloc" instruction. */
5462 SKIP_WHITESPACE ();
5463 first_arg = input_line_pointer;
5464 end = strchr (input_line_pointer, '=');
5465 if (!end)
5466 {
5467 as_bad ("Expected separator `='");
5468 return 0;
5469 }
5470 input_line_pointer = end + 1;
5471 ++i;
5472 ++num_outputs;
5473 }
5474
5475 for (; i < NELEMS (CURR_SLOT.opnd); ++i)
5476 {
5477 sep = parse_operand (CURR_SLOT.opnd + i);
5478 if (CURR_SLOT.opnd[i].X_op == O_absent)
5479 break;
5480
5481 ++num_operands;
5482
5483 if (sep != '=' && sep != ',')
5484 break;
5485
5486 if (sep == '=')
5487 {
5488 if (num_outputs > 0)
5489 as_bad ("Duplicate equal sign (=) in instruction");
5490 else
5491 num_outputs = i + 1;
5492 }
5493 }
5494 if (sep != '\0')
5495 {
5496 as_bad ("Illegal operand separator `%c'", sep);
5497 return 0;
5498 }
5499
5500 if (idesc->operands[2] == IA64_OPND_SOF)
5501 {
5502 /* map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r */
5503 know (strcmp (idesc->name, "alloc") == 0);
5504 if (num_operands == 5 /* first_arg not included in this count! */
5505 && CURR_SLOT.opnd[2].X_op == O_constant
5506 && CURR_SLOT.opnd[3].X_op == O_constant
5507 && CURR_SLOT.opnd[4].X_op == O_constant
5508 && CURR_SLOT.opnd[5].X_op == O_constant)
5509 {
5510 sof = set_regstack (CURR_SLOT.opnd[2].X_add_number,
5511 CURR_SLOT.opnd[3].X_add_number,
5512 CURR_SLOT.opnd[4].X_add_number,
5513 CURR_SLOT.opnd[5].X_add_number);
5514
5515 /* now we can parse the first arg: */
5516 saved_input_pointer = input_line_pointer;
5517 input_line_pointer = first_arg;
5518 sep = parse_operand (CURR_SLOT.opnd + 0);
5519 if (sep != '=')
5520 --num_outputs; /* force error */
5521 input_line_pointer = saved_input_pointer;
5522
5523 CURR_SLOT.opnd[2].X_add_number = sof;
5524 CURR_SLOT.opnd[3].X_add_number
5525 = sof - CURR_SLOT.opnd[4].X_add_number;
5526 CURR_SLOT.opnd[4] = CURR_SLOT.opnd[5];
5527 }
5528 }
5529
5530 highest_unmatched_operand = 0;
5531 curr_out_of_range_pos = -1;
5532 error_pos = 0;
5533 expected_operand = idesc->operands[0];
5534 for (; idesc; idesc = get_next_opcode (idesc))
5535 {
5536 if (num_outputs != idesc->num_outputs)
5537 continue; /* mismatch in # of outputs */
5538
5539 CURR_SLOT.num_fixups = 0;
5540
5541 /* Try to match all operands. If we see an out-of-range operand,
5542 then continue trying to match the rest of the operands, since if
5543 the rest match, then this idesc will give the best error message. */
5544
5545 out_of_range_pos = -1;
5546 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
5547 {
5548 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
5549 if (result != OPERAND_MATCH)
5550 {
5551 if (result != OPERAND_OUT_OF_RANGE)
5552 break;
5553 if (out_of_range_pos < 0)
5554 /* remember position of the first out-of-range operand: */
5555 out_of_range_pos = i;
5556 }
5557 }
5558
5559 /* If we did not match all operands, or if at least one operand was
5560 out-of-range, then this idesc does not match. Keep track of which
5561 idesc matched the most operands before failing. If we have two
5562 idescs that failed at the same position, and one had an out-of-range
5563 operand, then prefer the out-of-range operand. Thus if we have
5564 "add r0=0x1000000,r1" we get an error saying the constant is out
5565 of range instead of an error saying that the constant should have been
5566 a register. */
5567
5568 if (i != num_operands || out_of_range_pos >= 0)
5569 {
5570 if (i > highest_unmatched_operand
5571 || (i == highest_unmatched_operand
5572 && out_of_range_pos > curr_out_of_range_pos))
5573 {
5574 highest_unmatched_operand = i;
5575 if (out_of_range_pos >= 0)
5576 {
5577 expected_operand = idesc->operands[out_of_range_pos];
5578 error_pos = out_of_range_pos;
5579 }
5580 else
5581 {
5582 expected_operand = idesc->operands[i];
5583 error_pos = i;
5584 }
5585 curr_out_of_range_pos = out_of_range_pos;
5586 }
5587 continue;
5588 }
5589
5590 if (num_operands < NELEMS (idesc->operands)
5591 && idesc->operands[num_operands])
5592 continue; /* mismatch in number of arguments */
5593
5594 break;
5595 }
5596 if (!idesc)
5597 {
5598 if (expected_operand)
5599 as_bad ("Operand %u of `%s' should be %s",
5600 error_pos + 1, mnemonic,
5601 elf64_ia64_operands[expected_operand].desc);
5602 else
5603 as_bad ("Operand mismatch");
5604 return 0;
5605 }
5606 return idesc;
5607 }
5608
5609 /* Keep track of state necessary to determine whether a NOP is necessary
5610 to avoid an erratum in A and B step Itanium chips, and return 1 if we
5611 detect a case where additional NOPs may be necessary. */
5612 static int
5613 errata_nop_necessary_p (slot, insn_unit)
5614 struct slot *slot;
5615 enum ia64_unit insn_unit;
5616 {
5617 int i;
5618 struct group *this_group = md.last_groups + md.group_idx;
5619 struct group *prev_group = md.last_groups + (md.group_idx + 2) % 3;
5620 struct ia64_opcode *idesc = slot->idesc;
5621
5622 /* Test whether this could be the first insn in a problematic sequence. */
5623 if (insn_unit == IA64_UNIT_F)
5624 {
5625 for (i = 0; i < idesc->num_outputs; i++)
5626 if (idesc->operands[i] == IA64_OPND_P1
5627 || idesc->operands[i] == IA64_OPND_P2)
5628 {
5629 int regno = slot->opnd[i].X_add_number - REG_P;
5630 /* Ignore invalid operands; they generate errors elsewhere. */
5631 if (regno >= 64)
5632 return 0;
5633 this_group->p_reg_set[regno] = 1;
5634 }
5635 }
5636
5637 /* Test whether this could be the second insn in a problematic sequence. */
5638 if (insn_unit == IA64_UNIT_M && slot->qp_regno > 0
5639 && prev_group->p_reg_set[slot->qp_regno])
5640 {
5641 for (i = 0; i < idesc->num_outputs; i++)
5642 if (idesc->operands[i] == IA64_OPND_R1
5643 || idesc->operands[i] == IA64_OPND_R2
5644 || idesc->operands[i] == IA64_OPND_R3)
5645 {
5646 int regno = slot->opnd[i].X_add_number - REG_GR;
5647 /* Ignore invalid operands; they generate errors elsewhere. */
5648 if (regno >= 128)
5649 return 0;
5650 if (strncmp (idesc->name, "add", 3) != 0
5651 && strncmp (idesc->name, "sub", 3) != 0
5652 && strncmp (idesc->name, "shladd", 6) != 0
5653 && (idesc->flags & IA64_OPCODE_POSTINC) == 0)
5654 this_group->g_reg_set_conditionally[regno] = 1;
5655 }
5656 }
5657
5658 /* Test whether this could be the third insn in a problematic sequence. */
5659 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; i++)
5660 {
5661 if (/* For fc, ptc, ptr, tak, thash, tpa, ttag, probe, ptr, ptc. */
5662 idesc->operands[i] == IA64_OPND_R3
5663 /* For mov indirect. */
5664 || idesc->operands[i] == IA64_OPND_RR_R3
5665 || idesc->operands[i] == IA64_OPND_DBR_R3
5666 || idesc->operands[i] == IA64_OPND_IBR_R3
5667 || idesc->operands[i] == IA64_OPND_PKR_R3
5668 || idesc->operands[i] == IA64_OPND_PMC_R3
5669 || idesc->operands[i] == IA64_OPND_PMD_R3
5670 || idesc->operands[i] == IA64_OPND_MSR_R3
5671 || idesc->operands[i] == IA64_OPND_CPUID_R3
5672 /* For itr. */
5673 || idesc->operands[i] == IA64_OPND_ITR_R3
5674 || idesc->operands[i] == IA64_OPND_DTR_R3
5675 /* Normal memory addresses (load, store, xchg, cmpxchg, etc.). */
5676 || idesc->operands[i] == IA64_OPND_MR3)
5677 {
5678 int regno = slot->opnd[i].X_add_number - REG_GR;
5679 /* Ignore invalid operands; they generate errors elsewhere. */
5680 if (regno >= 128)
5681 return 0;
5682 if (idesc->operands[i] == IA64_OPND_R3)
5683 {
5684 if (strcmp (idesc->name, "fc") != 0
5685 && strcmp (idesc->name, "tak") != 0
5686 && strcmp (idesc->name, "thash") != 0
5687 && strcmp (idesc->name, "tpa") != 0
5688 && strcmp (idesc->name, "ttag") != 0
5689 && strncmp (idesc->name, "ptr", 3) != 0
5690 && strncmp (idesc->name, "ptc", 3) != 0
5691 && strncmp (idesc->name, "probe", 5) != 0)
5692 return 0;
5693 }
5694 if (prev_group->g_reg_set_conditionally[regno])
5695 return 1;
5696 }
5697 }
5698 return 0;
5699 }
5700
5701 static void
5702 build_insn (slot, insnp)
5703 struct slot *slot;
5704 bfd_vma *insnp;
5705 {
5706 const struct ia64_operand *odesc, *o2desc;
5707 struct ia64_opcode *idesc = slot->idesc;
5708 bfd_signed_vma insn, val;
5709 const char *err;
5710 int i;
5711
5712 insn = idesc->opcode | slot->qp_regno;
5713
5714 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
5715 {
5716 if (slot->opnd[i].X_op == O_register
5717 || slot->opnd[i].X_op == O_constant
5718 || slot->opnd[i].X_op == O_index)
5719 val = slot->opnd[i].X_add_number;
5720 else if (slot->opnd[i].X_op == O_big)
5721 {
5722 /* This must be the value 0x10000000000000000. */
5723 assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
5724 val = 0;
5725 }
5726 else
5727 val = 0;
5728
5729 switch (idesc->operands[i])
5730 {
5731 case IA64_OPND_IMMU64:
5732 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
5733 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
5734 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
5735 | (((val >> 63) & 0x1) << 36));
5736 continue;
5737
5738 case IA64_OPND_IMMU62:
5739 val &= 0x3fffffffffffffffULL;
5740 if (val != slot->opnd[i].X_add_number)
5741 as_warn (_("Value truncated to 62 bits"));
5742 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
5743 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
5744 continue;
5745
5746 case IA64_OPND_TGT64:
5747 val >>= 4;
5748 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
5749 insn |= ((((val >> 59) & 0x1) << 36)
5750 | (((val >> 0) & 0xfffff) << 13));
5751 continue;
5752
5753 case IA64_OPND_AR3:
5754 val -= REG_AR;
5755 break;
5756
5757 case IA64_OPND_B1:
5758 case IA64_OPND_B2:
5759 val -= REG_BR;
5760 break;
5761
5762 case IA64_OPND_CR3:
5763 val -= REG_CR;
5764 break;
5765
5766 case IA64_OPND_F1:
5767 case IA64_OPND_F2:
5768 case IA64_OPND_F3:
5769 case IA64_OPND_F4:
5770 val -= REG_FR;
5771 break;
5772
5773 case IA64_OPND_P1:
5774 case IA64_OPND_P2:
5775 val -= REG_P;
5776 break;
5777
5778 case IA64_OPND_R1:
5779 case IA64_OPND_R2:
5780 case IA64_OPND_R3:
5781 case IA64_OPND_R3_2:
5782 case IA64_OPND_CPUID_R3:
5783 case IA64_OPND_DBR_R3:
5784 case IA64_OPND_DTR_R3:
5785 case IA64_OPND_ITR_R3:
5786 case IA64_OPND_IBR_R3:
5787 case IA64_OPND_MR3:
5788 case IA64_OPND_MSR_R3:
5789 case IA64_OPND_PKR_R3:
5790 case IA64_OPND_PMC_R3:
5791 case IA64_OPND_PMD_R3:
5792 case IA64_OPND_RR_R3:
5793 val -= REG_GR;
5794 break;
5795
5796 default:
5797 break;
5798 }
5799
5800 odesc = elf64_ia64_operands + idesc->operands[i];
5801 err = (*odesc->insert) (odesc, val, &insn);
5802 if (err)
5803 as_bad_where (slot->src_file, slot->src_line,
5804 "Bad operand value: %s", err);
5805 if (idesc->flags & IA64_OPCODE_PSEUDO)
5806 {
5807 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
5808 && odesc == elf64_ia64_operands + IA64_OPND_F3)
5809 {
5810 o2desc = elf64_ia64_operands + IA64_OPND_F2;
5811 (*o2desc->insert) (o2desc, val, &insn);
5812 }
5813 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
5814 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
5815 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
5816 {
5817 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
5818 (*o2desc->insert) (o2desc, 64 - val, &insn);
5819 }
5820 }
5821 }
5822 *insnp = insn;
5823 }
5824
5825 static void
5826 emit_one_bundle ()
5827 {
5828 unsigned int manual_bundling_on = 0, manual_bundling_off = 0;
5829 unsigned int manual_bundling = 0;
5830 enum ia64_unit required_unit, insn_unit = 0;
5831 enum ia64_insn_type type[3], insn_type;
5832 unsigned int template, orig_template;
5833 bfd_vma insn[3] = { -1, -1, -1 };
5834 struct ia64_opcode *idesc;
5835 int end_of_insn_group = 0, user_template = -1;
5836 int n, i, j, first, curr;
5837 unw_rec_list *ptr;
5838 bfd_vma t0 = 0, t1 = 0;
5839 struct label_fix *lfix;
5840 struct insn_fix *ifix;
5841 char mnemonic[16];
5842 fixS *fix;
5843 char *f;
5844
5845 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
5846 know (first >= 0 & first < NUM_SLOTS);
5847 n = MIN (3, md.num_slots_in_use);
5848
5849 /* Determine template: user user_template if specified, best match
5850 otherwise: */
5851
5852 if (md.slot[first].user_template >= 0)
5853 user_template = template = md.slot[first].user_template;
5854 else
5855 {
5856 /* Auto select appropriate template. */
5857 memset (type, 0, sizeof (type));
5858 curr = first;
5859 for (i = 0; i < n; ++i)
5860 {
5861 if (md.slot[curr].label_fixups && i != 0)
5862 break;
5863 type[i] = md.slot[curr].idesc->type;
5864 curr = (curr + 1) % NUM_SLOTS;
5865 }
5866 template = best_template[type[0]][type[1]][type[2]];
5867 }
5868
5869 /* initialize instructions with appropriate nops: */
5870 for (i = 0; i < 3; ++i)
5871 insn[i] = nop[ia64_templ_desc[template].exec_unit[i]];
5872
5873 f = frag_more (16);
5874
5875 /* now fill in slots with as many insns as possible: */
5876 curr = first;
5877 idesc = md.slot[curr].idesc;
5878 end_of_insn_group = 0;
5879 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
5880 {
5881 /* Set the slot number for prologue/body records now as those
5882 refer to the current point, not the point after the
5883 instruction has been issued: */
5884 /* Don't try to delete prologue/body records here, as that will cause
5885 them to also be deleted from the master list of unwind records. */
5886 for (ptr = md.slot[curr].unwind_record; ptr; ptr = ptr->next)
5887 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
5888 || ptr->r.type == body)
5889 {
5890 ptr->slot_number = (unsigned long) f + i;
5891 ptr->slot_frag = frag_now;
5892 }
5893
5894 if (idesc->flags & IA64_OPCODE_SLOT2)
5895 {
5896 if (manual_bundling && i != 2)
5897 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5898 "`%s' must be last in bundle", idesc->name);
5899 else
5900 i = 2;
5901 }
5902 if (idesc->flags & IA64_OPCODE_LAST)
5903 {
5904 int required_slot;
5905 unsigned int required_template;
5906
5907 /* If we need a stop bit after an M slot, our only choice is
5908 template 5 (M;;MI). If we need a stop bit after a B
5909 slot, our only choice is to place it at the end of the
5910 bundle, because the only available templates are MIB,
5911 MBB, BBB, MMB, and MFB. We don't handle anything other
5912 than M and B slots because these are the only kind of
5913 instructions that can have the IA64_OPCODE_LAST bit set. */
5914 required_template = template;
5915 switch (idesc->type)
5916 {
5917 case IA64_TYPE_M:
5918 required_slot = 0;
5919 required_template = 5;
5920 break;
5921
5922 case IA64_TYPE_B:
5923 required_slot = 2;
5924 break;
5925
5926 default:
5927 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5928 "Internal error: don't know how to force %s to end"
5929 "of instruction group", idesc->name);
5930 required_slot = i;
5931 break;
5932 }
5933 if (manual_bundling && i != required_slot)
5934 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5935 "`%s' must be last in instruction group",
5936 idesc->name);
5937 if (required_slot < i)
5938 /* Can't fit this instruction. */
5939 break;
5940
5941 i = required_slot;
5942 if (required_template != template)
5943 {
5944 /* If we switch the template, we need to reset the NOPs
5945 after slot i. The slot-types of the instructions ahead
5946 of i never change, so we don't need to worry about
5947 changing NOPs in front of this slot. */
5948 for (j = i; j < 3; ++j)
5949 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
5950 }
5951 template = required_template;
5952 }
5953 if (curr != first && md.slot[curr].label_fixups)
5954 {
5955 if (manual_bundling_on)
5956 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5957 "Label must be first in a bundle");
5958 /* This insn must go into the first slot of a bundle. */
5959 break;
5960 }
5961
5962 manual_bundling_on = md.slot[curr].manual_bundling_on;
5963 manual_bundling_off = md.slot[curr].manual_bundling_off;
5964
5965 if (manual_bundling_on)
5966 {
5967 if (curr == first)
5968 manual_bundling = 1;
5969 else
5970 break; /* need to start a new bundle */
5971 }
5972
5973 if (end_of_insn_group && md.num_slots_in_use >= 1)
5974 {
5975 /* We need an instruction group boundary in the middle of a
5976 bundle. See if we can switch to an other template with
5977 an appropriate boundary. */
5978
5979 orig_template = template;
5980 if (i == 1 && (user_template == 4
5981 || (user_template < 0
5982 && (ia64_templ_desc[template].exec_unit[0]
5983 == IA64_UNIT_M))))
5984 {
5985 template = 5;
5986 end_of_insn_group = 0;
5987 }
5988 else if (i == 2 && (user_template == 0
5989 || (user_template < 0
5990 && (ia64_templ_desc[template].exec_unit[1]
5991 == IA64_UNIT_I)))
5992 /* This test makes sure we don't switch the template if
5993 the next instruction is one that needs to be first in
5994 an instruction group. Since all those instructions are
5995 in the M group, there is no way such an instruction can
5996 fit in this bundle even if we switch the template. The
5997 reason we have to check for this is that otherwise we
5998 may end up generating "MI;;I M.." which has the deadly
5999 effect that the second M instruction is no longer the
6000 first in the bundle! --davidm 99/12/16 */
6001 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6002 {
6003 template = 1;
6004 end_of_insn_group = 0;
6005 }
6006 else if (curr != first)
6007 /* can't fit this insn */
6008 break;
6009
6010 if (template != orig_template)
6011 /* if we switch the template, we need to reset the NOPs
6012 after slot i. The slot-types of the instructions ahead
6013 of i never change, so we don't need to worry about
6014 changing NOPs in front of this slot. */
6015 for (j = i; j < 3; ++j)
6016 insn[j] = nop[ia64_templ_desc[template].exec_unit[j]];
6017 }
6018 required_unit = ia64_templ_desc[template].exec_unit[i];
6019
6020 /* resolve dynamic opcodes such as "break" and "nop": */
6021 if (idesc->type == IA64_TYPE_DYN)
6022 {
6023 if ((strcmp (idesc->name, "nop") == 0)
6024 || (strcmp (idesc->name, "break") == 0))
6025 insn_unit = required_unit;
6026 else if (strcmp (idesc->name, "chk.s") == 0)
6027 {
6028 insn_unit = IA64_UNIT_M;
6029 if (required_unit == IA64_UNIT_I)
6030 insn_unit = IA64_UNIT_I;
6031 }
6032 else
6033 as_fatal ("emit_one_bundle: unexpected dynamic op");
6034
6035 sprintf (mnemonic, "%s.%c", idesc->name, "?imbf??"[insn_unit]);
6036 ia64_free_opcode (idesc);
6037 md.slot[curr].idesc = idesc = ia64_find_opcode (mnemonic);
6038 #if 0
6039 know (!idesc->next); /* no resolved dynamic ops have collisions */
6040 #endif
6041 }
6042 else
6043 {
6044 insn_type = idesc->type;
6045 insn_unit = IA64_UNIT_NIL;
6046 switch (insn_type)
6047 {
6048 case IA64_TYPE_A:
6049 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6050 insn_unit = required_unit;
6051 break;
6052 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6053 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6054 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6055 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6056 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6057 default: break;
6058 }
6059 }
6060
6061 if (insn_unit != required_unit)
6062 {
6063 if (required_unit == IA64_UNIT_L
6064 && insn_unit == IA64_UNIT_I
6065 && !(idesc->flags & IA64_OPCODE_X_IN_MLX))
6066 {
6067 /* we got ourselves an MLX template but the current
6068 instruction isn't an X-unit, or an I-unit instruction
6069 that can go into the X slot of an MLX template. Duh. */
6070 if (md.num_slots_in_use >= NUM_SLOTS)
6071 {
6072 as_bad_where (md.slot[curr].src_file,
6073 md.slot[curr].src_line,
6074 "`%s' can't go in X slot of "
6075 "MLX template", idesc->name);
6076 /* drop this insn so we don't livelock: */
6077 --md.num_slots_in_use;
6078 }
6079 break;
6080 }
6081 continue; /* try next slot */
6082 }
6083
6084 {
6085 bfd_vma addr;
6086
6087 addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6088 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6089 }
6090
6091 if (errata_nop_necessary_p (md.slot + curr, insn_unit))
6092 as_warn (_("Additional NOP may be necessary to workaround Itanium processor A/B step errata"));
6093
6094 build_insn (md.slot + curr, insn + i);
6095
6096 /* Set slot counts for non prologue/body unwind records. */
6097 for (ptr = md.slot[curr].unwind_record; ptr; ptr = ptr->next)
6098 if (ptr->r.type != prologue && ptr->r.type != prologue_gr
6099 && ptr->r.type != body)
6100 {
6101 ptr->slot_number = (unsigned long) f + i;
6102 ptr->slot_frag = frag_now;
6103 }
6104 md.slot[curr].unwind_record = NULL;
6105
6106 if (required_unit == IA64_UNIT_L)
6107 {
6108 know (i == 1);
6109 /* skip one slot for long/X-unit instructions */
6110 ++i;
6111 }
6112 --md.num_slots_in_use;
6113
6114 /* now is a good time to fix up the labels for this insn: */
6115 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6116 {
6117 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6118 symbol_set_frag (lfix->sym, frag_now);
6119 }
6120 /* and fix up the tags also. */
6121 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6122 {
6123 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6124 symbol_set_frag (lfix->sym, frag_now);
6125 }
6126
6127 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6128 {
6129 ifix = md.slot[curr].fixup + j;
6130 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6131 &ifix->expr, ifix->is_pcrel, ifix->code);
6132 fix->tc_fix_data.opnd = ifix->opnd;
6133 fix->fx_plt = (fix->fx_r_type == BFD_RELOC_IA64_PLTOFF22);
6134 fix->fx_file = md.slot[curr].src_file;
6135 fix->fx_line = md.slot[curr].src_line;
6136 }
6137
6138 end_of_insn_group = md.slot[curr].end_of_insn_group;
6139
6140 if (end_of_insn_group)
6141 {
6142 md.group_idx = (md.group_idx + 1) % 3;
6143 memset (md.last_groups + md.group_idx, 0, sizeof md.last_groups[0]);
6144 }
6145
6146 /* clear slot: */
6147 ia64_free_opcode (md.slot[curr].idesc);
6148 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6149 md.slot[curr].user_template = -1;
6150
6151 if (manual_bundling_off)
6152 {
6153 manual_bundling = 0;
6154 break;
6155 }
6156 curr = (curr + 1) % NUM_SLOTS;
6157 idesc = md.slot[curr].idesc;
6158 }
6159 if (manual_bundling)
6160 {
6161 if (md.num_slots_in_use > 0)
6162 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6163 "`%s' does not fit into %s template",
6164 idesc->name, ia64_templ_desc[template].name);
6165 else
6166 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6167 "Missing '}' at end of file");
6168 }
6169 know (md.num_slots_in_use < NUM_SLOTS);
6170
6171 t0 = end_of_insn_group | (template << 1) | (insn[0] << 5) | (insn[1] << 46);
6172 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6173
6174 number_to_chars_littleendian (f + 0, t0, 8);
6175 number_to_chars_littleendian (f + 8, t1, 8);
6176
6177 unwind.next_slot_number = (unsigned long) f + 16;
6178 unwind.next_slot_frag = frag_now;
6179 }
6180
6181 int
6182 md_parse_option (c, arg)
6183 int c;
6184 char *arg;
6185 {
6186
6187 switch (c)
6188 {
6189 /* Switches from the Intel assembler. */
6190 case 'm':
6191 if (strcmp (arg, "ilp64") == 0
6192 || strcmp (arg, "lp64") == 0
6193 || strcmp (arg, "p64") == 0)
6194 {
6195 md.flags |= EF_IA_64_ABI64;
6196 }
6197 else if (strcmp (arg, "ilp32") == 0)
6198 {
6199 md.flags &= ~EF_IA_64_ABI64;
6200 }
6201 else if (strcmp (arg, "le") == 0)
6202 {
6203 md.flags &= ~EF_IA_64_BE;
6204 }
6205 else if (strcmp (arg, "be") == 0)
6206 {
6207 md.flags |= EF_IA_64_BE;
6208 }
6209 else
6210 return 0;
6211 break;
6212
6213 case 'N':
6214 if (strcmp (arg, "so") == 0)
6215 {
6216 /* Suppress signon message. */
6217 }
6218 else if (strcmp (arg, "pi") == 0)
6219 {
6220 /* Reject privileged instructions. FIXME */
6221 }
6222 else if (strcmp (arg, "us") == 0)
6223 {
6224 /* Allow union of signed and unsigned range. FIXME */
6225 }
6226 else if (strcmp (arg, "close_fcalls") == 0)
6227 {
6228 /* Do not resolve global function calls. */
6229 }
6230 else
6231 return 0;
6232 break;
6233
6234 case 'C':
6235 /* temp[="prefix"] Insert temporary labels into the object file
6236 symbol table prefixed by "prefix".
6237 Default prefix is ":temp:".
6238 */
6239 break;
6240
6241 case 'a':
6242 /* indirect=<tgt> Assume unannotated indirect branches behavior
6243 according to <tgt> --
6244 exit: branch out from the current context (default)
6245 labels: all labels in context may be branch targets
6246 */
6247 if (strncmp (arg, "indirect=", 9) != 0)
6248 return 0;
6249 break;
6250
6251 case 'x':
6252 /* -X conflicts with an ignored option, use -x instead */
6253 md.detect_dv = 1;
6254 if (!arg || strcmp (arg, "explicit") == 0)
6255 {
6256 /* set default mode to explicit */
6257 md.default_explicit_mode = 1;
6258 break;
6259 }
6260 else if (strcmp (arg, "auto") == 0)
6261 {
6262 md.default_explicit_mode = 0;
6263 }
6264 else if (strcmp (arg, "debug") == 0)
6265 {
6266 md.debug_dv = 1;
6267 }
6268 else if (strcmp (arg, "debugx") == 0)
6269 {
6270 md.default_explicit_mode = 1;
6271 md.debug_dv = 1;
6272 }
6273 else
6274 {
6275 as_bad (_("Unrecognized option '-x%s'"), arg);
6276 }
6277 break;
6278
6279 case 'S':
6280 /* nops Print nops statistics. */
6281 break;
6282
6283 /* GNU specific switches for gcc. */
6284 case OPTION_MCONSTANT_GP:
6285 md.flags |= EF_IA_64_CONS_GP;
6286 break;
6287
6288 case OPTION_MAUTO_PIC:
6289 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
6290 break;
6291
6292 default:
6293 return 0;
6294 }
6295
6296 return 1;
6297 }
6298
6299 void
6300 md_show_usage (stream)
6301 FILE *stream;
6302 {
6303 fputs (_("\
6304 IA-64 options:\n\
6305 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
6306 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
6307 -x | -xexplicit turn on dependency violation checking (default)\n\
6308 -xauto automagically remove dependency violations\n\
6309 -xdebug debug dependency violation checker\n"),
6310 stream);
6311 }
6312
6313 void
6314 ia64_after_parse_args ()
6315 {
6316 if (debug_type == DEBUG_STABS)
6317 as_fatal (_("--gstabs is not supported for ia64"));
6318 }
6319
6320 /* Return true if TYPE fits in TEMPL at SLOT. */
6321
6322 static int
6323 match (int templ, int type, int slot)
6324 {
6325 enum ia64_unit unit;
6326 int result;
6327
6328 unit = ia64_templ_desc[templ].exec_unit[slot];
6329 switch (type)
6330 {
6331 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
6332 case IA64_TYPE_A:
6333 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
6334 break;
6335 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
6336 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
6337 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
6338 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
6339 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
6340 default: result = 0; break;
6341 }
6342 return result;
6343 }
6344
6345 /* Add a bit of extra goodness if a nop of type F or B would fit
6346 in TEMPL at SLOT. */
6347
6348 static inline int
6349 extra_goodness (int templ, int slot)
6350 {
6351 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
6352 return 2;
6353 if (slot == 2 && match (templ, IA64_TYPE_B, slot))
6354 return 1;
6355 return 0;
6356 }
6357
6358 /* This function is called once, at assembler startup time. It sets
6359 up all the tables, etc. that the MD part of the assembler will need
6360 that can be determined before arguments are parsed. */
6361 void
6362 md_begin ()
6363 {
6364 int i, j, k, t, total, ar_base, cr_base, goodness, best, regnum, ok;
6365 const char *err;
6366 char name[8];
6367
6368 md.auto_align = 1;
6369 md.explicit_mode = md.default_explicit_mode;
6370
6371 bfd_set_section_alignment (stdoutput, text_section, 4);
6372
6373 target_big_endian = TARGET_BYTES_BIG_ENDIAN;
6374 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
6375 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
6376 &zero_address_frag);
6377
6378 pseudo_func[FUNC_GP_RELATIVE].u.sym =
6379 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
6380 &zero_address_frag);
6381
6382 pseudo_func[FUNC_LT_RELATIVE].u.sym =
6383 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
6384 &zero_address_frag);
6385
6386 pseudo_func[FUNC_PC_RELATIVE].u.sym =
6387 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
6388 &zero_address_frag);
6389
6390 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
6391 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
6392 &zero_address_frag);
6393
6394 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
6395 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
6396 &zero_address_frag);
6397
6398 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
6399 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
6400 &zero_address_frag);
6401
6402 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
6403 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
6404 &zero_address_frag);
6405
6406 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
6407 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
6408 &zero_address_frag);
6409
6410 pseudo_func[FUNC_IPLT_RELOC].u.sym =
6411 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC,
6412 &zero_address_frag);
6413
6414 /* Compute the table of best templates. We compute goodness as a
6415 base 4 value, in which each match counts for 3, each F counts
6416 for 2, each B counts for 1. This should maximize the number of
6417 F and B nops in the chosen bundles, which is good because these
6418 pipelines are least likely to be overcommitted. */
6419 for (i = 0; i < IA64_NUM_TYPES; ++i)
6420 for (j = 0; j < IA64_NUM_TYPES; ++j)
6421 for (k = 0; k < IA64_NUM_TYPES; ++k)
6422 {
6423 best = 0;
6424 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
6425 {
6426 goodness = 0;
6427 if (match (t, i, 0))
6428 {
6429 if (match (t, j, 1))
6430 {
6431 if (match (t, k, 2))
6432 goodness = 3 + 3 + 3;
6433 else
6434 goodness = 3 + 3 + extra_goodness (t, 2);
6435 }
6436 else if (match (t, j, 2))
6437 goodness = 3 + 3 + extra_goodness (t, 1);
6438 else
6439 {
6440 goodness = 3;
6441 goodness += extra_goodness (t, 1);
6442 goodness += extra_goodness (t, 2);
6443 }
6444 }
6445 else if (match (t, i, 1))
6446 {
6447 if (match (t, j, 2))
6448 goodness = 3 + 3;
6449 else
6450 goodness = 3 + extra_goodness (t, 2);
6451 }
6452 else if (match (t, i, 2))
6453 goodness = 3 + extra_goodness (t, 1);
6454
6455 if (goodness > best)
6456 {
6457 best = goodness;
6458 best_template[i][j][k] = t;
6459 }
6460 }
6461 }
6462
6463 for (i = 0; i < NUM_SLOTS; ++i)
6464 md.slot[i].user_template = -1;
6465
6466 md.pseudo_hash = hash_new ();
6467 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
6468 {
6469 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
6470 (void *) (pseudo_opcode + i));
6471 if (err)
6472 as_fatal ("ia64.md_begin: can't hash `%s': %s",
6473 pseudo_opcode[i].name, err);
6474 }
6475
6476 md.reg_hash = hash_new ();
6477 md.dynreg_hash = hash_new ();
6478 md.const_hash = hash_new ();
6479 md.entry_hash = hash_new ();
6480
6481 /* general registers: */
6482
6483 total = 128;
6484 for (i = 0; i < total; ++i)
6485 {
6486 sprintf (name, "r%d", i - REG_GR);
6487 md.regsym[i] = declare_register (name, i);
6488 }
6489
6490 /* floating point registers: */
6491 total += 128;
6492 for (; i < total; ++i)
6493 {
6494 sprintf (name, "f%d", i - REG_FR);
6495 md.regsym[i] = declare_register (name, i);
6496 }
6497
6498 /* application registers: */
6499 total += 128;
6500 ar_base = i;
6501 for (; i < total; ++i)
6502 {
6503 sprintf (name, "ar%d", i - REG_AR);
6504 md.regsym[i] = declare_register (name, i);
6505 }
6506
6507 /* control registers: */
6508 total += 128;
6509 cr_base = i;
6510 for (; i < total; ++i)
6511 {
6512 sprintf (name, "cr%d", i - REG_CR);
6513 md.regsym[i] = declare_register (name, i);
6514 }
6515
6516 /* predicate registers: */
6517 total += 64;
6518 for (; i < total; ++i)
6519 {
6520 sprintf (name, "p%d", i - REG_P);
6521 md.regsym[i] = declare_register (name, i);
6522 }
6523
6524 /* branch registers: */
6525 total += 8;
6526 for (; i < total; ++i)
6527 {
6528 sprintf (name, "b%d", i - REG_BR);
6529 md.regsym[i] = declare_register (name, i);
6530 }
6531
6532 md.regsym[REG_IP] = declare_register ("ip", REG_IP);
6533 md.regsym[REG_CFM] = declare_register ("cfm", REG_CFM);
6534 md.regsym[REG_PR] = declare_register ("pr", REG_PR);
6535 md.regsym[REG_PR_ROT] = declare_register ("pr.rot", REG_PR_ROT);
6536 md.regsym[REG_PSR] = declare_register ("psr", REG_PSR);
6537 md.regsym[REG_PSR_L] = declare_register ("psr.l", REG_PSR_L);
6538 md.regsym[REG_PSR_UM] = declare_register ("psr.um", REG_PSR_UM);
6539
6540 for (i = 0; i < NELEMS (indirect_reg); ++i)
6541 {
6542 regnum = indirect_reg[i].regnum;
6543 md.regsym[regnum] = declare_register (indirect_reg[i].name, regnum);
6544 }
6545
6546 /* define synonyms for application registers: */
6547 for (i = REG_AR; i < REG_AR + NELEMS (ar); ++i)
6548 md.regsym[i] = declare_register (ar[i - REG_AR].name,
6549 REG_AR + ar[i - REG_AR].regnum);
6550
6551 /* define synonyms for control registers: */
6552 for (i = REG_CR; i < REG_CR + NELEMS (cr); ++i)
6553 md.regsym[i] = declare_register (cr[i - REG_CR].name,
6554 REG_CR + cr[i - REG_CR].regnum);
6555
6556 declare_register ("gp", REG_GR + 1);
6557 declare_register ("sp", REG_GR + 12);
6558 declare_register ("rp", REG_BR + 0);
6559
6560 /* pseudo-registers used to specify unwind info: */
6561 declare_register ("psp", REG_PSP);
6562
6563 declare_register_set ("ret", 4, REG_GR + 8);
6564 declare_register_set ("farg", 8, REG_FR + 8);
6565 declare_register_set ("fret", 8, REG_FR + 8);
6566
6567 for (i = 0; i < NELEMS (const_bits); ++i)
6568 {
6569 err = hash_insert (md.const_hash, const_bits[i].name,
6570 (PTR) (const_bits + i));
6571 if (err)
6572 as_fatal ("Inserting \"%s\" into constant hash table failed: %s",
6573 name, err);
6574 }
6575
6576 /* Set the architecture and machine depending on defaults and command line
6577 options. */
6578 if (md.flags & EF_IA_64_ABI64)
6579 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
6580 else
6581 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
6582
6583 if (! ok)
6584 as_warn (_("Could not set architecture and machine"));
6585
6586 /* Set the pointer size and pointer shift size depending on md.flags */
6587
6588 if (md.flags & EF_IA_64_ABI64)
6589 {
6590 md.pointer_size = 8; /* pointers are 8 bytes */
6591 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
6592 }
6593 else
6594 {
6595 md.pointer_size = 4; /* pointers are 4 bytes */
6596 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
6597 }
6598
6599 md.mem_offset.hint = 0;
6600 md.path = 0;
6601 md.maxpaths = 0;
6602 md.entry_labels = NULL;
6603 }
6604
6605 /* Set the elf type to 64 bit ABI by default. Cannot do this in md_begin
6606 because that is called after md_parse_option which is where we do the
6607 dynamic changing of md.flags based on -mlp64 or -milp32. Also, set the
6608 default endianness. */
6609
6610 void
6611 ia64_init (argc, argv)
6612 int argc ATTRIBUTE_UNUSED;
6613 char **argv ATTRIBUTE_UNUSED;
6614 {
6615 md.flags = EF_IA_64_ABI64;
6616 if (TARGET_BYTES_BIG_ENDIAN)
6617 md.flags |= EF_IA_64_BE;
6618 }
6619
6620 /* Return a string for the target object file format. */
6621
6622 const char *
6623 ia64_target_format ()
6624 {
6625 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
6626 {
6627 if (md.flags & EF_IA_64_BE)
6628 {
6629 if (md.flags & EF_IA_64_ABI64)
6630 #ifdef TE_AIX50
6631 return "elf64-ia64-aix-big";
6632 #else
6633 return "elf64-ia64-big";
6634 #endif
6635 else
6636 #ifdef TE_AIX50
6637 return "elf32-ia64-aix-big";
6638 #else
6639 return "elf32-ia64-big";
6640 #endif
6641 }
6642 else
6643 {
6644 if (md.flags & EF_IA_64_ABI64)
6645 #ifdef TE_AIX50
6646 return "elf64-ia64-aix-little";
6647 #else
6648 return "elf64-ia64-little";
6649 #endif
6650 else
6651 #ifdef TE_AIX50
6652 return "elf32-ia64-aix-little";
6653 #else
6654 return "elf32-ia64-little";
6655 #endif
6656 }
6657 }
6658 else
6659 return "unknown-format";
6660 }
6661
6662 void
6663 ia64_end_of_source ()
6664 {
6665 /* terminate insn group upon reaching end of file: */
6666 insn_group_break (1, 0, 0);
6667
6668 /* emits slots we haven't written yet: */
6669 ia64_flush_insns ();
6670
6671 bfd_set_private_flags (stdoutput, md.flags);
6672
6673 md.mem_offset.hint = 0;
6674 }
6675
6676 void
6677 ia64_start_line ()
6678 {
6679 if (md.qp.X_op == O_register)
6680 as_bad ("qualifying predicate not followed by instruction");
6681 md.qp.X_op = O_absent;
6682
6683 if (ignore_input ())
6684 return;
6685
6686 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
6687 {
6688 if (md.detect_dv && !md.explicit_mode)
6689 as_warn (_("Explicit stops are ignored in auto mode"));
6690 else
6691 insn_group_break (1, 0, 0);
6692 }
6693 }
6694
6695 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
6696 labels. */
6697 static int defining_tag = 0;
6698
6699 int
6700 ia64_unrecognized_line (ch)
6701 int ch;
6702 {
6703 switch (ch)
6704 {
6705 case '(':
6706 expression (&md.qp);
6707 if (*input_line_pointer++ != ')')
6708 {
6709 as_bad ("Expected ')'");
6710 return 0;
6711 }
6712 if (md.qp.X_op != O_register)
6713 {
6714 as_bad ("Qualifying predicate expected");
6715 return 0;
6716 }
6717 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
6718 {
6719 as_bad ("Predicate register expected");
6720 return 0;
6721 }
6722 return 1;
6723
6724 case '{':
6725 if (md.manual_bundling)
6726 as_warn ("Found '{' when manual bundling is already turned on");
6727 else
6728 CURR_SLOT.manual_bundling_on = 1;
6729 md.manual_bundling = 1;
6730
6731 /* Bundling is only acceptable in explicit mode
6732 or when in default automatic mode. */
6733 if (md.detect_dv && !md.explicit_mode)
6734 {
6735 if (!md.mode_explicitly_set
6736 && !md.default_explicit_mode)
6737 dot_dv_mode ('E');
6738 else
6739 as_warn (_("Found '{' after explicit switch to automatic mode"));
6740 }
6741 return 1;
6742
6743 case '}':
6744 if (!md.manual_bundling)
6745 as_warn ("Found '}' when manual bundling is off");
6746 else
6747 PREV_SLOT.manual_bundling_off = 1;
6748 md.manual_bundling = 0;
6749
6750 /* switch back to automatic mode, if applicable */
6751 if (md.detect_dv
6752 && md.explicit_mode
6753 && !md.mode_explicitly_set
6754 && !md.default_explicit_mode)
6755 dot_dv_mode ('A');
6756
6757 /* Allow '{' to follow on the same line. We also allow ";;", but that
6758 happens automatically because ';' is an end of line marker. */
6759 SKIP_WHITESPACE ();
6760 if (input_line_pointer[0] == '{')
6761 {
6762 input_line_pointer++;
6763 return ia64_unrecognized_line ('{');
6764 }
6765
6766 demand_empty_rest_of_line ();
6767 return 1;
6768
6769 case '[':
6770 {
6771 char *s;
6772 char c;
6773 symbolS *tag;
6774 int temp;
6775
6776 if (md.qp.X_op == O_register)
6777 {
6778 as_bad ("Tag must come before qualifying predicate.");
6779 return 0;
6780 }
6781
6782 /* This implements just enough of read_a_source_file in read.c to
6783 recognize labels. */
6784 if (is_name_beginner (*input_line_pointer))
6785 {
6786 s = input_line_pointer;
6787 c = get_symbol_end ();
6788 }
6789 else if (LOCAL_LABELS_FB
6790 && ISDIGIT (*input_line_pointer))
6791 {
6792 temp = 0;
6793 while (ISDIGIT (*input_line_pointer))
6794 temp = (temp * 10) + *input_line_pointer++ - '0';
6795 fb_label_instance_inc (temp);
6796 s = fb_label_name (temp, 0);
6797 c = *input_line_pointer;
6798 }
6799 else
6800 {
6801 s = NULL;
6802 c = '\0';
6803 }
6804 if (c != ':')
6805 {
6806 /* Put ':' back for error messages' sake. */
6807 *input_line_pointer++ = ':';
6808 as_bad ("Expected ':'");
6809 return 0;
6810 }
6811
6812 defining_tag = 1;
6813 tag = colon (s);
6814 defining_tag = 0;
6815 /* Put ':' back for error messages' sake. */
6816 *input_line_pointer++ = ':';
6817 if (*input_line_pointer++ != ']')
6818 {
6819 as_bad ("Expected ']'");
6820 return 0;
6821 }
6822 if (! tag)
6823 {
6824 as_bad ("Tag name expected");
6825 return 0;
6826 }
6827 return 1;
6828 }
6829
6830 default:
6831 break;
6832 }
6833
6834 /* Not a valid line. */
6835 return 0;
6836 }
6837
6838 void
6839 ia64_frob_label (sym)
6840 struct symbol *sym;
6841 {
6842 struct label_fix *fix;
6843
6844 /* Tags need special handling since they are not bundle breaks like
6845 labels. */
6846 if (defining_tag)
6847 {
6848 fix = obstack_alloc (&notes, sizeof (*fix));
6849 fix->sym = sym;
6850 fix->next = CURR_SLOT.tag_fixups;
6851 CURR_SLOT.tag_fixups = fix;
6852
6853 return;
6854 }
6855
6856 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
6857 {
6858 md.last_text_seg = now_seg;
6859 fix = obstack_alloc (&notes, sizeof (*fix));
6860 fix->sym = sym;
6861 fix->next = CURR_SLOT.label_fixups;
6862 CURR_SLOT.label_fixups = fix;
6863
6864 /* Keep track of how many code entry points we've seen. */
6865 if (md.path == md.maxpaths)
6866 {
6867 md.maxpaths += 20;
6868 md.entry_labels = (const char **)
6869 xrealloc ((void *) md.entry_labels,
6870 md.maxpaths * sizeof (char *));
6871 }
6872 md.entry_labels[md.path++] = S_GET_NAME (sym);
6873 }
6874 }
6875
6876 void
6877 ia64_flush_pending_output ()
6878 {
6879 if (!md.keep_pending_output
6880 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
6881 {
6882 /* ??? This causes many unnecessary stop bits to be emitted.
6883 Unfortunately, it isn't clear if it is safe to remove this. */
6884 insn_group_break (1, 0, 0);
6885 ia64_flush_insns ();
6886 }
6887 }
6888
6889 /* Do ia64-specific expression optimization. All that's done here is
6890 to transform index expressions that are either due to the indexing
6891 of rotating registers or due to the indexing of indirect register
6892 sets. */
6893 int
6894 ia64_optimize_expr (l, op, r)
6895 expressionS *l;
6896 operatorT op;
6897 expressionS *r;
6898 {
6899 unsigned num_regs;
6900
6901 if (op == O_index)
6902 {
6903 if (l->X_op == O_register && r->X_op == O_constant)
6904 {
6905 num_regs = (l->X_add_number >> 16);
6906 if ((unsigned) r->X_add_number >= num_regs)
6907 {
6908 if (!num_regs)
6909 as_bad ("No current frame");
6910 else
6911 as_bad ("Index out of range 0..%u", num_regs - 1);
6912 r->X_add_number = 0;
6913 }
6914 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
6915 return 1;
6916 }
6917 else if (l->X_op == O_register && r->X_op == O_register)
6918 {
6919 if (l->X_add_number < IND_CPUID || l->X_add_number > IND_RR
6920 || l->X_add_number == IND_MEM)
6921 {
6922 as_bad ("Indirect register set name expected");
6923 l->X_add_number = IND_CPUID;
6924 }
6925 l->X_op = O_index;
6926 l->X_op_symbol = md.regsym[l->X_add_number];
6927 l->X_add_number = r->X_add_number;
6928 return 1;
6929 }
6930 }
6931 return 0;
6932 }
6933
6934 int
6935 ia64_parse_name (name, e)
6936 char *name;
6937 expressionS *e;
6938 {
6939 struct const_desc *cdesc;
6940 struct dynreg *dr = 0;
6941 unsigned int regnum;
6942 struct symbol *sym;
6943 char *end;
6944
6945 /* first see if NAME is a known register name: */
6946 sym = hash_find (md.reg_hash, name);
6947 if (sym)
6948 {
6949 e->X_op = O_register;
6950 e->X_add_number = S_GET_VALUE (sym);
6951 return 1;
6952 }
6953
6954 cdesc = hash_find (md.const_hash, name);
6955 if (cdesc)
6956 {
6957 e->X_op = O_constant;
6958 e->X_add_number = cdesc->value;
6959 return 1;
6960 }
6961
6962 /* check for inN, locN, or outN: */
6963 switch (name[0])
6964 {
6965 case 'i':
6966 if (name[1] == 'n' && ISDIGIT (name[2]))
6967 {
6968 dr = &md.in;
6969 name += 2;
6970 }
6971 break;
6972
6973 case 'l':
6974 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
6975 {
6976 dr = &md.loc;
6977 name += 3;
6978 }
6979 break;
6980
6981 case 'o':
6982 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
6983 {
6984 dr = &md.out;
6985 name += 3;
6986 }
6987 break;
6988
6989 default:
6990 break;
6991 }
6992
6993 if (dr)
6994 {
6995 /* The name is inN, locN, or outN; parse the register number. */
6996 regnum = strtoul (name, &end, 10);
6997 if (end > name && *end == '\0')
6998 {
6999 if ((unsigned) regnum >= dr->num_regs)
7000 {
7001 if (!dr->num_regs)
7002 as_bad ("No current frame");
7003 else
7004 as_bad ("Register number out of range 0..%u",
7005 dr->num_regs - 1);
7006 regnum = 0;
7007 }
7008 e->X_op = O_register;
7009 e->X_add_number = dr->base + regnum;
7010 return 1;
7011 }
7012 }
7013
7014 if ((dr = hash_find (md.dynreg_hash, name)))
7015 {
7016 /* We've got ourselves the name of a rotating register set.
7017 Store the base register number in the low 16 bits of
7018 X_add_number and the size of the register set in the top 16
7019 bits. */
7020 e->X_op = O_register;
7021 e->X_add_number = dr->base | (dr->num_regs << 16);
7022 return 1;
7023 }
7024 return 0;
7025 }
7026
7027 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
7028
7029 char *
7030 ia64_canonicalize_symbol_name (name)
7031 char *name;
7032 {
7033 size_t len = strlen (name);
7034 if (len > 1 && name[len - 1] == '#')
7035 name[len - 1] = '\0';
7036 return name;
7037 }
7038
7039 /* Return true if idesc is a conditional branch instruction. This excludes
7040 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
7041 because they always read/write resources regardless of the value of the
7042 qualifying predicate. br.ia must always use p0, and hence is always
7043 taken. Thus this function returns true for branches which can fall
7044 through, and which use no resources if they do fall through. */
7045
7046 static int
7047 is_conditional_branch (idesc)
7048 struct ia64_opcode *idesc;
7049 {
7050 /* br is a conditional branch. Everything that starts with br. except
7051 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
7052 Everything that starts with brl is a conditional branch. */
7053 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
7054 && (idesc->name[2] == '\0'
7055 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
7056 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
7057 || idesc->name[2] == 'l'
7058 /* br.cond, br.call, br.clr */
7059 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
7060 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
7061 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
7062 }
7063
7064 /* Return whether the given opcode is a taken branch. If there's any doubt,
7065 returns zero. */
7066
7067 static int
7068 is_taken_branch (idesc)
7069 struct ia64_opcode *idesc;
7070 {
7071 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
7072 || strncmp (idesc->name, "br.ia", 5) == 0);
7073 }
7074
7075 /* Return whether the given opcode is an interruption or rfi. If there's any
7076 doubt, returns zero. */
7077
7078 static int
7079 is_interruption_or_rfi (idesc)
7080 struct ia64_opcode *idesc;
7081 {
7082 if (strcmp (idesc->name, "rfi") == 0)
7083 return 1;
7084 return 0;
7085 }
7086
7087 /* Returns the index of the given dependency in the opcode's list of chks, or
7088 -1 if there is no dependency. */
7089
7090 static int
7091 depends_on (depind, idesc)
7092 int depind;
7093 struct ia64_opcode *idesc;
7094 {
7095 int i;
7096 const struct ia64_opcode_dependency *dep = idesc->dependencies;
7097 for (i = 0; i < dep->nchks; i++)
7098 {
7099 if (depind == DEP (dep->chks[i]))
7100 return i;
7101 }
7102 return -1;
7103 }
7104
7105 /* Determine a set of specific resources used for a particular resource
7106 class. Returns the number of specific resources identified For those
7107 cases which are not determinable statically, the resource returned is
7108 marked nonspecific.
7109
7110 Meanings of value in 'NOTE':
7111 1) only read/write when the register number is explicitly encoded in the
7112 insn.
7113 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
7114 accesses CFM when qualifying predicate is in the rotating region.
7115 3) general register value is used to specify an indirect register; not
7116 determinable statically.
7117 4) only read the given resource when bits 7:0 of the indirect index
7118 register value does not match the register number of the resource; not
7119 determinable statically.
7120 5) all rules are implementation specific.
7121 6) only when both the index specified by the reader and the index specified
7122 by the writer have the same value in bits 63:61; not determinable
7123 statically.
7124 7) only access the specified resource when the corresponding mask bit is
7125 set
7126 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
7127 only read when these insns reference FR2-31
7128 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
7129 written when these insns write FR32-127
7130 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
7131 instruction
7132 11) The target predicates are written independently of PR[qp], but source
7133 registers are only read if PR[qp] is true. Since the state of PR[qp]
7134 cannot statically be determined, all source registers are marked used.
7135 12) This insn only reads the specified predicate register when that
7136 register is the PR[qp].
7137 13) This reference to ld-c only applies to teh GR whose value is loaded
7138 with data returned from memory, not the post-incremented address register.
7139 14) The RSE resource includes the implementation-specific RSE internal
7140 state resources. At least one (and possibly more) of these resources are
7141 read by each instruction listed in IC:rse-readers. At least one (and
7142 possibly more) of these resources are written by each insn listed in
7143 IC:rse-writers.
7144 15+16) Represents reserved instructions, which the assembler does not
7145 generate.
7146
7147 Memory resources (i.e. locations in memory) are *not* marked or tracked by
7148 this code; there are no dependency violations based on memory access.
7149 */
7150
7151 #define MAX_SPECS 256
7152 #define DV_CHK 1
7153 #define DV_REG 0
7154
7155 static int
7156 specify_resource (dep, idesc, type, specs, note, path)
7157 const struct ia64_dependency *dep;
7158 struct ia64_opcode *idesc;
7159 int type; /* is this a DV chk or a DV reg? */
7160 struct rsrc specs[MAX_SPECS]; /* returned specific resources */
7161 int note; /* resource note for this insn's usage */
7162 int path; /* which execution path to examine */
7163 {
7164 int count = 0;
7165 int i;
7166 int rsrc_write = 0;
7167 struct rsrc tmpl;
7168
7169 if (dep->mode == IA64_DV_WAW
7170 || (dep->mode == IA64_DV_RAW && type == DV_REG)
7171 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
7172 rsrc_write = 1;
7173
7174 /* template for any resources we identify */
7175 tmpl.dependency = dep;
7176 tmpl.note = note;
7177 tmpl.insn_srlz = tmpl.data_srlz = 0;
7178 tmpl.qp_regno = CURR_SLOT.qp_regno;
7179 tmpl.link_to_qp_branch = 1;
7180 tmpl.mem_offset.hint = 0;
7181 tmpl.specific = 1;
7182 tmpl.index = 0;
7183 tmpl.cmp_type = CMP_NONE;
7184
7185 #define UNHANDLED \
7186 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
7187 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
7188 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
7189
7190 /* we don't need to track these */
7191 if (dep->semantics == IA64_DVS_NONE)
7192 return 0;
7193
7194 switch (dep->specifier)
7195 {
7196 case IA64_RS_AR_K:
7197 if (note == 1)
7198 {
7199 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7200 {
7201 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7202 if (regno >= 0 && regno <= 7)
7203 {
7204 specs[count] = tmpl;
7205 specs[count++].index = regno;
7206 }
7207 }
7208 }
7209 else if (note == 0)
7210 {
7211 for (i = 0; i < 8; i++)
7212 {
7213 specs[count] = tmpl;
7214 specs[count++].index = i;
7215 }
7216 }
7217 else
7218 {
7219 UNHANDLED;
7220 }
7221 break;
7222
7223 case IA64_RS_AR_UNAT:
7224 /* This is a mov =AR or mov AR= instruction. */
7225 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7226 {
7227 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7228 if (regno == AR_UNAT)
7229 {
7230 specs[count++] = tmpl;
7231 }
7232 }
7233 else
7234 {
7235 /* This is a spill/fill, or other instruction that modifies the
7236 unat register. */
7237
7238 /* Unless we can determine the specific bits used, mark the whole
7239 thing; bits 8:3 of the memory address indicate the bit used in
7240 UNAT. The .mem.offset hint may be used to eliminate a small
7241 subset of conflicts. */
7242 specs[count] = tmpl;
7243 if (md.mem_offset.hint)
7244 {
7245 if (md.debug_dv)
7246 fprintf (stderr, " Using hint for spill/fill\n");
7247 /* The index isn't actually used, just set it to something
7248 approximating the bit index. */
7249 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
7250 specs[count].mem_offset.hint = 1;
7251 specs[count].mem_offset.offset = md.mem_offset.offset;
7252 specs[count++].mem_offset.base = md.mem_offset.base;
7253 }
7254 else
7255 {
7256 specs[count++].specific = 0;
7257 }
7258 }
7259 break;
7260
7261 case IA64_RS_AR:
7262 if (note == 1)
7263 {
7264 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7265 {
7266 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7267 if ((regno >= 8 && regno <= 15)
7268 || (regno >= 20 && regno <= 23)
7269 || (regno >= 31 && regno <= 39)
7270 || (regno >= 41 && regno <= 47)
7271 || (regno >= 67 && regno <= 111))
7272 {
7273 specs[count] = tmpl;
7274 specs[count++].index = regno;
7275 }
7276 }
7277 }
7278 else
7279 {
7280 UNHANDLED;
7281 }
7282 break;
7283
7284 case IA64_RS_ARb:
7285 if (note == 1)
7286 {
7287 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7288 {
7289 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7290 if ((regno >= 48 && regno <= 63)
7291 || (regno >= 112 && regno <= 127))
7292 {
7293 specs[count] = tmpl;
7294 specs[count++].index = regno;
7295 }
7296 }
7297 }
7298 else if (note == 0)
7299 {
7300 for (i = 48; i < 64; i++)
7301 {
7302 specs[count] = tmpl;
7303 specs[count++].index = i;
7304 }
7305 for (i = 112; i < 128; i++)
7306 {
7307 specs[count] = tmpl;
7308 specs[count++].index = i;
7309 }
7310 }
7311 else
7312 {
7313 UNHANDLED;
7314 }
7315 break;
7316
7317 case IA64_RS_BR:
7318 if (note != 1)
7319 {
7320 UNHANDLED;
7321 }
7322 else
7323 {
7324 if (rsrc_write)
7325 {
7326 for (i = 0; i < idesc->num_outputs; i++)
7327 if (idesc->operands[i] == IA64_OPND_B1
7328 || idesc->operands[i] == IA64_OPND_B2)
7329 {
7330 specs[count] = tmpl;
7331 specs[count++].index =
7332 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7333 }
7334 }
7335 else
7336 {
7337 for (i = idesc->num_outputs;i < NELEMS (idesc->operands); i++)
7338 if (idesc->operands[i] == IA64_OPND_B1
7339 || idesc->operands[i] == IA64_OPND_B2)
7340 {
7341 specs[count] = tmpl;
7342 specs[count++].index =
7343 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7344 }
7345 }
7346 }
7347 break;
7348
7349 case IA64_RS_CPUID: /* four or more registers */
7350 if (note == 3)
7351 {
7352 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
7353 {
7354 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7355 if (regno >= 0 && regno < NELEMS (gr_values)
7356 && KNOWN (regno))
7357 {
7358 specs[count] = tmpl;
7359 specs[count++].index = gr_values[regno].value & 0xFF;
7360 }
7361 else
7362 {
7363 specs[count] = tmpl;
7364 specs[count++].specific = 0;
7365 }
7366 }
7367 }
7368 else
7369 {
7370 UNHANDLED;
7371 }
7372 break;
7373
7374 case IA64_RS_DBR: /* four or more registers */
7375 if (note == 3)
7376 {
7377 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
7378 {
7379 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7380 if (regno >= 0 && regno < NELEMS (gr_values)
7381 && KNOWN (regno))
7382 {
7383 specs[count] = tmpl;
7384 specs[count++].index = gr_values[regno].value & 0xFF;
7385 }
7386 else
7387 {
7388 specs[count] = tmpl;
7389 specs[count++].specific = 0;
7390 }
7391 }
7392 }
7393 else if (note == 0 && !rsrc_write)
7394 {
7395 specs[count] = tmpl;
7396 specs[count++].specific = 0;
7397 }
7398 else
7399 {
7400 UNHANDLED;
7401 }
7402 break;
7403
7404 case IA64_RS_IBR: /* four or more registers */
7405 if (note == 3)
7406 {
7407 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
7408 {
7409 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7410 if (regno >= 0 && regno < NELEMS (gr_values)
7411 && KNOWN (regno))
7412 {
7413 specs[count] = tmpl;
7414 specs[count++].index = gr_values[regno].value & 0xFF;
7415 }
7416 else
7417 {
7418 specs[count] = tmpl;
7419 specs[count++].specific = 0;
7420 }
7421 }
7422 }
7423 else
7424 {
7425 UNHANDLED;
7426 }
7427 break;
7428
7429 case IA64_RS_MSR:
7430 if (note == 5)
7431 {
7432 /* These are implementation specific. Force all references to
7433 conflict with all other references. */
7434 specs[count] = tmpl;
7435 specs[count++].specific = 0;
7436 }
7437 else
7438 {
7439 UNHANDLED;
7440 }
7441 break;
7442
7443 case IA64_RS_PKR: /* 16 or more registers */
7444 if (note == 3 || note == 4)
7445 {
7446 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
7447 {
7448 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7449 if (regno >= 0 && regno < NELEMS (gr_values)
7450 && KNOWN (regno))
7451 {
7452 if (note == 3)
7453 {
7454 specs[count] = tmpl;
7455 specs[count++].index = gr_values[regno].value & 0xFF;
7456 }
7457 else
7458 for (i = 0; i < NELEMS (gr_values); i++)
7459 {
7460 /* Uses all registers *except* the one in R3. */
7461 if ((unsigned)i != (gr_values[regno].value & 0xFF))
7462 {
7463 specs[count] = tmpl;
7464 specs[count++].index = i;
7465 }
7466 }
7467 }
7468 else
7469 {
7470 specs[count] = tmpl;
7471 specs[count++].specific = 0;
7472 }
7473 }
7474 }
7475 else if (note == 0)
7476 {
7477 /* probe et al. */
7478 specs[count] = tmpl;
7479 specs[count++].specific = 0;
7480 }
7481 break;
7482
7483 case IA64_RS_PMC: /* four or more registers */
7484 if (note == 3)
7485 {
7486 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
7487 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
7488
7489 {
7490 int index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
7491 ? 1 : !rsrc_write);
7492 int regno = CURR_SLOT.opnd[index].X_add_number - REG_GR;
7493 if (regno >= 0 && regno < NELEMS (gr_values)
7494 && KNOWN (regno))
7495 {
7496 specs[count] = tmpl;
7497 specs[count++].index = gr_values[regno].value & 0xFF;
7498 }
7499 else
7500 {
7501 specs[count] = tmpl;
7502 specs[count++].specific = 0;
7503 }
7504 }
7505 }
7506 else
7507 {
7508 UNHANDLED;
7509 }
7510 break;
7511
7512 case IA64_RS_PMD: /* four or more registers */
7513 if (note == 3)
7514 {
7515 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
7516 {
7517 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7518 if (regno >= 0 && regno < NELEMS (gr_values)
7519 && KNOWN (regno))
7520 {
7521 specs[count] = tmpl;
7522 specs[count++].index = gr_values[regno].value & 0xFF;
7523 }
7524 else
7525 {
7526 specs[count] = tmpl;
7527 specs[count++].specific = 0;
7528 }
7529 }
7530 }
7531 else
7532 {
7533 UNHANDLED;
7534 }
7535 break;
7536
7537 case IA64_RS_RR: /* eight registers */
7538 if (note == 6)
7539 {
7540 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
7541 {
7542 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7543 if (regno >= 0 && regno < NELEMS (gr_values)
7544 && KNOWN (regno))
7545 {
7546 specs[count] = tmpl;
7547 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
7548 }
7549 else
7550 {
7551 specs[count] = tmpl;
7552 specs[count++].specific = 0;
7553 }
7554 }
7555 }
7556 else if (note == 0 && !rsrc_write)
7557 {
7558 specs[count] = tmpl;
7559 specs[count++].specific = 0;
7560 }
7561 else
7562 {
7563 UNHANDLED;
7564 }
7565 break;
7566
7567 case IA64_RS_CR_IRR:
7568 if (note == 0)
7569 {
7570 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
7571 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
7572 if (rsrc_write
7573 && idesc->operands[1] == IA64_OPND_CR3
7574 && regno == CR_IVR)
7575 {
7576 for (i = 0; i < 4; i++)
7577 {
7578 specs[count] = tmpl;
7579 specs[count++].index = CR_IRR0 + i;
7580 }
7581 }
7582 }
7583 else if (note == 1)
7584 {
7585 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7586 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
7587 && regno >= CR_IRR0
7588 && regno <= CR_IRR3)
7589 {
7590 specs[count] = tmpl;
7591 specs[count++].index = regno;
7592 }
7593 }
7594 else
7595 {
7596 UNHANDLED;
7597 }
7598 break;
7599
7600 case IA64_RS_CR_LRR:
7601 if (note != 1)
7602 {
7603 UNHANDLED;
7604 }
7605 else
7606 {
7607 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7608 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
7609 && (regno == CR_LRR0 || regno == CR_LRR1))
7610 {
7611 specs[count] = tmpl;
7612 specs[count++].index = regno;
7613 }
7614 }
7615 break;
7616
7617 case IA64_RS_CR:
7618 if (note == 1)
7619 {
7620 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
7621 {
7622 specs[count] = tmpl;
7623 specs[count++].index =
7624 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7625 }
7626 }
7627 else
7628 {
7629 UNHANDLED;
7630 }
7631 break;
7632
7633 case IA64_RS_FR:
7634 case IA64_RS_FRb:
7635 if (note != 1)
7636 {
7637 UNHANDLED;
7638 }
7639 else if (rsrc_write)
7640 {
7641 if (dep->specifier == IA64_RS_FRb
7642 && idesc->operands[0] == IA64_OPND_F1)
7643 {
7644 specs[count] = tmpl;
7645 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
7646 }
7647 }
7648 else
7649 {
7650 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
7651 {
7652 if (idesc->operands[i] == IA64_OPND_F2
7653 || idesc->operands[i] == IA64_OPND_F3
7654 || idesc->operands[i] == IA64_OPND_F4)
7655 {
7656 specs[count] = tmpl;
7657 specs[count++].index =
7658 CURR_SLOT.opnd[i].X_add_number - REG_FR;
7659 }
7660 }
7661 }
7662 break;
7663
7664 case IA64_RS_GR:
7665 if (note == 13)
7666 {
7667 /* This reference applies only to the GR whose value is loaded with
7668 data returned from memory. */
7669 specs[count] = tmpl;
7670 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
7671 }
7672 else if (note == 1)
7673 {
7674 if (rsrc_write)
7675 {
7676 for (i = 0; i < idesc->num_outputs; i++)
7677 if (idesc->operands[i] == IA64_OPND_R1
7678 || idesc->operands[i] == IA64_OPND_R2
7679 || idesc->operands[i] == IA64_OPND_R3)
7680 {
7681 specs[count] = tmpl;
7682 specs[count++].index =
7683 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7684 }
7685 if (idesc->flags & IA64_OPCODE_POSTINC)
7686 for (i = 0; i < NELEMS (idesc->operands); i++)
7687 if (idesc->operands[i] == IA64_OPND_MR3)
7688 {
7689 specs[count] = tmpl;
7690 specs[count++].index =
7691 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7692 }
7693 }
7694 else
7695 {
7696 /* Look for anything that reads a GR. */
7697 for (i = 0; i < NELEMS (idesc->operands); i++)
7698 {
7699 if (idesc->operands[i] == IA64_OPND_MR3
7700 || idesc->operands[i] == IA64_OPND_CPUID_R3
7701 || idesc->operands[i] == IA64_OPND_DBR_R3
7702 || idesc->operands[i] == IA64_OPND_IBR_R3
7703 || idesc->operands[i] == IA64_OPND_MSR_R3
7704 || idesc->operands[i] == IA64_OPND_PKR_R3
7705 || idesc->operands[i] == IA64_OPND_PMC_R3
7706 || idesc->operands[i] == IA64_OPND_PMD_R3
7707 || idesc->operands[i] == IA64_OPND_RR_R3
7708 || ((i >= idesc->num_outputs)
7709 && (idesc->operands[i] == IA64_OPND_R1
7710 || idesc->operands[i] == IA64_OPND_R2
7711 || idesc->operands[i] == IA64_OPND_R3
7712 /* addl source register. */
7713 || idesc->operands[i] == IA64_OPND_R3_2)))
7714 {
7715 specs[count] = tmpl;
7716 specs[count++].index =
7717 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7718 }
7719 }
7720 }
7721 }
7722 else
7723 {
7724 UNHANDLED;
7725 }
7726 break;
7727
7728 /* This is the same as IA64_RS_PRr, except that the register range is
7729 from 1 - 15, and there are no rotating register reads/writes here. */
7730 case IA64_RS_PR:
7731 if (note == 0)
7732 {
7733 for (i = 1; i < 16; i++)
7734 {
7735 specs[count] = tmpl;
7736 specs[count++].index = i;
7737 }
7738 }
7739 else if (note == 7)
7740 {
7741 valueT mask = 0;
7742 /* Mark only those registers indicated by the mask. */
7743 if (rsrc_write)
7744 {
7745 mask = CURR_SLOT.opnd[2].X_add_number;
7746 for (i = 1; i < 16; i++)
7747 if (mask & ((valueT) 1 << i))
7748 {
7749 specs[count] = tmpl;
7750 specs[count++].index = i;
7751 }
7752 }
7753 else
7754 {
7755 UNHANDLED;
7756 }
7757 }
7758 else if (note == 11) /* note 11 implies note 1 as well */
7759 {
7760 if (rsrc_write)
7761 {
7762 for (i = 0; i < idesc->num_outputs; i++)
7763 {
7764 if (idesc->operands[i] == IA64_OPND_P1
7765 || idesc->operands[i] == IA64_OPND_P2)
7766 {
7767 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
7768 if (regno >= 1 && regno < 16)
7769 {
7770 specs[count] = tmpl;
7771 specs[count++].index = regno;
7772 }
7773 }
7774 }
7775 }
7776 else
7777 {
7778 UNHANDLED;
7779 }
7780 }
7781 else if (note == 12)
7782 {
7783 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
7784 {
7785 specs[count] = tmpl;
7786 specs[count++].index = CURR_SLOT.qp_regno;
7787 }
7788 }
7789 else if (note == 1)
7790 {
7791 if (rsrc_write)
7792 {
7793 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
7794 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
7795 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
7796 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
7797
7798 if ((idesc->operands[0] == IA64_OPND_P1
7799 || idesc->operands[0] == IA64_OPND_P2)
7800 && p1 >= 1 && p1 < 16)
7801 {
7802 specs[count] = tmpl;
7803 specs[count].cmp_type =
7804 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
7805 specs[count++].index = p1;
7806 }
7807 if ((idesc->operands[1] == IA64_OPND_P1
7808 || idesc->operands[1] == IA64_OPND_P2)
7809 && p2 >= 1 && p2 < 16)
7810 {
7811 specs[count] = tmpl;
7812 specs[count].cmp_type =
7813 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
7814 specs[count++].index = p2;
7815 }
7816 }
7817 else
7818 {
7819 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
7820 {
7821 specs[count] = tmpl;
7822 specs[count++].index = CURR_SLOT.qp_regno;
7823 }
7824 if (idesc->operands[1] == IA64_OPND_PR)
7825 {
7826 for (i = 1; i < 16; i++)
7827 {
7828 specs[count] = tmpl;
7829 specs[count++].index = i;
7830 }
7831 }
7832 }
7833 }
7834 else
7835 {
7836 UNHANDLED;
7837 }
7838 break;
7839
7840 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
7841 simplified cases of this. */
7842 case IA64_RS_PRr:
7843 if (note == 0)
7844 {
7845 for (i = 16; i < 63; i++)
7846 {
7847 specs[count] = tmpl;
7848 specs[count++].index = i;
7849 }
7850 }
7851 else if (note == 7)
7852 {
7853 valueT mask = 0;
7854 /* Mark only those registers indicated by the mask. */
7855 if (rsrc_write
7856 && idesc->operands[0] == IA64_OPND_PR)
7857 {
7858 mask = CURR_SLOT.opnd[2].X_add_number;
7859 if (mask & ((valueT) 1<<16))
7860 for (i = 16; i < 63; i++)
7861 {
7862 specs[count] = tmpl;
7863 specs[count++].index = i;
7864 }
7865 }
7866 else if (rsrc_write
7867 && idesc->operands[0] == IA64_OPND_PR_ROT)
7868 {
7869 for (i = 16; i < 63; i++)
7870 {
7871 specs[count] = tmpl;
7872 specs[count++].index = i;
7873 }
7874 }
7875 else
7876 {
7877 UNHANDLED;
7878 }
7879 }
7880 else if (note == 11) /* note 11 implies note 1 as well */
7881 {
7882 if (rsrc_write)
7883 {
7884 for (i = 0; i < idesc->num_outputs; i++)
7885 {
7886 if (idesc->operands[i] == IA64_OPND_P1
7887 || idesc->operands[i] == IA64_OPND_P2)
7888 {
7889 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
7890 if (regno >= 16 && regno < 63)
7891 {
7892 specs[count] = tmpl;
7893 specs[count++].index = regno;
7894 }
7895 }
7896 }
7897 }
7898 else
7899 {
7900 UNHANDLED;
7901 }
7902 }
7903 else if (note == 12)
7904 {
7905 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
7906 {
7907 specs[count] = tmpl;
7908 specs[count++].index = CURR_SLOT.qp_regno;
7909 }
7910 }
7911 else if (note == 1)
7912 {
7913 if (rsrc_write)
7914 {
7915 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
7916 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
7917 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
7918 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
7919
7920 if ((idesc->operands[0] == IA64_OPND_P1
7921 || idesc->operands[0] == IA64_OPND_P2)
7922 && p1 >= 16 && p1 < 63)
7923 {
7924 specs[count] = tmpl;
7925 specs[count].cmp_type =
7926 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
7927 specs[count++].index = p1;
7928 }
7929 if ((idesc->operands[1] == IA64_OPND_P1
7930 || idesc->operands[1] == IA64_OPND_P2)
7931 && p2 >= 16 && p2 < 63)
7932 {
7933 specs[count] = tmpl;
7934 specs[count].cmp_type =
7935 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
7936 specs[count++].index = p2;
7937 }
7938 }
7939 else
7940 {
7941 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
7942 {
7943 specs[count] = tmpl;
7944 specs[count++].index = CURR_SLOT.qp_regno;
7945 }
7946 if (idesc->operands[1] == IA64_OPND_PR)
7947 {
7948 for (i = 16; i < 63; i++)
7949 {
7950 specs[count] = tmpl;
7951 specs[count++].index = i;
7952 }
7953 }
7954 }
7955 }
7956 else
7957 {
7958 UNHANDLED;
7959 }
7960 break;
7961
7962 case IA64_RS_PSR:
7963 /* Verify that the instruction is using the PSR bit indicated in
7964 dep->regindex. */
7965 if (note == 0)
7966 {
7967 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
7968 {
7969 if (dep->regindex < 6)
7970 {
7971 specs[count++] = tmpl;
7972 }
7973 }
7974 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
7975 {
7976 if (dep->regindex < 32
7977 || dep->regindex == 35
7978 || dep->regindex == 36
7979 || (!rsrc_write && dep->regindex == PSR_CPL))
7980 {
7981 specs[count++] = tmpl;
7982 }
7983 }
7984 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
7985 {
7986 if (dep->regindex < 32
7987 || dep->regindex == 35
7988 || dep->regindex == 36
7989 || (rsrc_write && dep->regindex == PSR_CPL))
7990 {
7991 specs[count++] = tmpl;
7992 }
7993 }
7994 else
7995 {
7996 /* Several PSR bits have very specific dependencies. */
7997 switch (dep->regindex)
7998 {
7999 default:
8000 specs[count++] = tmpl;
8001 break;
8002 case PSR_IC:
8003 if (rsrc_write)
8004 {
8005 specs[count++] = tmpl;
8006 }
8007 else
8008 {
8009 /* Only certain CR accesses use PSR.ic */
8010 if (idesc->operands[0] == IA64_OPND_CR3
8011 || idesc->operands[1] == IA64_OPND_CR3)
8012 {
8013 int index =
8014 ((idesc->operands[0] == IA64_OPND_CR3)
8015 ? 0 : 1);
8016 int regno =
8017 CURR_SLOT.opnd[index].X_add_number - REG_CR;
8018
8019 switch (regno)
8020 {
8021 default:
8022 break;
8023 case CR_ITIR:
8024 case CR_IFS:
8025 case CR_IIM:
8026 case CR_IIP:
8027 case CR_IPSR:
8028 case CR_ISR:
8029 case CR_IFA:
8030 case CR_IHA:
8031 case CR_IIPA:
8032 specs[count++] = tmpl;
8033 break;
8034 }
8035 }
8036 }
8037 break;
8038 case PSR_CPL:
8039 if (rsrc_write)
8040 {
8041 specs[count++] = tmpl;
8042 }
8043 else
8044 {
8045 /* Only some AR accesses use cpl */
8046 if (idesc->operands[0] == IA64_OPND_AR3
8047 || idesc->operands[1] == IA64_OPND_AR3)
8048 {
8049 int index =
8050 ((idesc->operands[0] == IA64_OPND_AR3)
8051 ? 0 : 1);
8052 int regno =
8053 CURR_SLOT.opnd[index].X_add_number - REG_AR;
8054
8055 if (regno == AR_ITC
8056 || (index == 0
8057 && (regno == AR_ITC
8058 || regno == AR_RSC
8059 || (regno >= AR_K0
8060 && regno <= AR_K7))))
8061 {
8062 specs[count++] = tmpl;
8063 }
8064 }
8065 else
8066 {
8067 specs[count++] = tmpl;
8068 }
8069 break;
8070 }
8071 }
8072 }
8073 }
8074 else if (note == 7)
8075 {
8076 valueT mask = 0;
8077 if (idesc->operands[0] == IA64_OPND_IMMU24)
8078 {
8079 mask = CURR_SLOT.opnd[0].X_add_number;
8080 }
8081 else
8082 {
8083 UNHANDLED;
8084 }
8085 if (mask & ((valueT) 1 << dep->regindex))
8086 {
8087 specs[count++] = tmpl;
8088 }
8089 }
8090 else if (note == 8)
8091 {
8092 int min = dep->regindex == PSR_DFL ? 2 : 32;
8093 int max = dep->regindex == PSR_DFL ? 31 : 127;
8094 /* dfh is read on FR32-127; dfl is read on FR2-31 */
8095 for (i = 0; i < NELEMS (idesc->operands); i++)
8096 {
8097 if (idesc->operands[i] == IA64_OPND_F1
8098 || idesc->operands[i] == IA64_OPND_F2
8099 || idesc->operands[i] == IA64_OPND_F3
8100 || idesc->operands[i] == IA64_OPND_F4)
8101 {
8102 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8103 if (reg >= min && reg <= max)
8104 {
8105 specs[count++] = tmpl;
8106 }
8107 }
8108 }
8109 }
8110 else if (note == 9)
8111 {
8112 int min = dep->regindex == PSR_MFL ? 2 : 32;
8113 int max = dep->regindex == PSR_MFL ? 31 : 127;
8114 /* mfh is read on writes to FR32-127; mfl is read on writes to
8115 FR2-31 */
8116 for (i = 0; i < idesc->num_outputs; i++)
8117 {
8118 if (idesc->operands[i] == IA64_OPND_F1)
8119 {
8120 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8121 if (reg >= min && reg <= max)
8122 {
8123 specs[count++] = tmpl;
8124 }
8125 }
8126 }
8127 }
8128 else if (note == 10)
8129 {
8130 for (i = 0; i < NELEMS (idesc->operands); i++)
8131 {
8132 if (idesc->operands[i] == IA64_OPND_R1
8133 || idesc->operands[i] == IA64_OPND_R2
8134 || idesc->operands[i] == IA64_OPND_R3)
8135 {
8136 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8137 if (regno >= 16 && regno <= 31)
8138 {
8139 specs[count++] = tmpl;
8140 }
8141 }
8142 }
8143 }
8144 else
8145 {
8146 UNHANDLED;
8147 }
8148 break;
8149
8150 case IA64_RS_AR_FPSR:
8151 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8152 {
8153 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8154 if (regno == AR_FPSR)
8155 {
8156 specs[count++] = tmpl;
8157 }
8158 }
8159 else
8160 {
8161 specs[count++] = tmpl;
8162 }
8163 break;
8164
8165 case IA64_RS_ARX:
8166 /* Handle all AR[REG] resources */
8167 if (note == 0 || note == 1)
8168 {
8169 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8170 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
8171 && regno == dep->regindex)
8172 {
8173 specs[count++] = tmpl;
8174 }
8175 /* other AR[REG] resources may be affected by AR accesses */
8176 else if (idesc->operands[0] == IA64_OPND_AR3)
8177 {
8178 /* AR[] writes */
8179 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
8180 switch (dep->regindex)
8181 {
8182 default:
8183 break;
8184 case AR_BSP:
8185 case AR_RNAT:
8186 if (regno == AR_BSPSTORE)
8187 {
8188 specs[count++] = tmpl;
8189 }
8190 case AR_RSC:
8191 if (!rsrc_write &&
8192 (regno == AR_BSPSTORE
8193 || regno == AR_RNAT))
8194 {
8195 specs[count++] = tmpl;
8196 }
8197 break;
8198 }
8199 }
8200 else if (idesc->operands[1] == IA64_OPND_AR3)
8201 {
8202 /* AR[] reads */
8203 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
8204 switch (dep->regindex)
8205 {
8206 default:
8207 break;
8208 case AR_RSC:
8209 if (regno == AR_BSPSTORE || regno == AR_RNAT)
8210 {
8211 specs[count++] = tmpl;
8212 }
8213 break;
8214 }
8215 }
8216 else
8217 {
8218 specs[count++] = tmpl;
8219 }
8220 }
8221 else
8222 {
8223 UNHANDLED;
8224 }
8225 break;
8226
8227 case IA64_RS_CRX:
8228 /* Handle all CR[REG] resources */
8229 if (note == 0 || note == 1)
8230 {
8231 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8232 {
8233 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8234 if (regno == dep->regindex)
8235 {
8236 specs[count++] = tmpl;
8237 }
8238 else if (!rsrc_write)
8239 {
8240 /* Reads from CR[IVR] affect other resources. */
8241 if (regno == CR_IVR)
8242 {
8243 if ((dep->regindex >= CR_IRR0
8244 && dep->regindex <= CR_IRR3)
8245 || dep->regindex == CR_TPR)
8246 {
8247 specs[count++] = tmpl;
8248 }
8249 }
8250 }
8251 }
8252 else
8253 {
8254 specs[count++] = tmpl;
8255 }
8256 }
8257 else
8258 {
8259 UNHANDLED;
8260 }
8261 break;
8262
8263 case IA64_RS_INSERVICE:
8264 /* look for write of EOI (67) or read of IVR (65) */
8265 if ((idesc->operands[0] == IA64_OPND_CR3
8266 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
8267 || (idesc->operands[1] == IA64_OPND_CR3
8268 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
8269 {
8270 specs[count++] = tmpl;
8271 }
8272 break;
8273
8274 case IA64_RS_GR0:
8275 if (note == 1)
8276 {
8277 specs[count++] = tmpl;
8278 }
8279 else
8280 {
8281 UNHANDLED;
8282 }
8283 break;
8284
8285 case IA64_RS_CFM:
8286 if (note != 2)
8287 {
8288 specs[count++] = tmpl;
8289 }
8290 else
8291 {
8292 /* Check if any of the registers accessed are in the rotating region.
8293 mov to/from pr accesses CFM only when qp_regno is in the rotating
8294 region */
8295 for (i = 0; i < NELEMS (idesc->operands); i++)
8296 {
8297 if (idesc->operands[i] == IA64_OPND_R1
8298 || idesc->operands[i] == IA64_OPND_R2
8299 || idesc->operands[i] == IA64_OPND_R3)
8300 {
8301 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8302 /* Assumes that md.rot.num_regs is always valid */
8303 if (md.rot.num_regs > 0
8304 && num > 31
8305 && num < 31 + md.rot.num_regs)
8306 {
8307 specs[count] = tmpl;
8308 specs[count++].specific = 0;
8309 }
8310 }
8311 else if (idesc->operands[i] == IA64_OPND_F1
8312 || idesc->operands[i] == IA64_OPND_F2
8313 || idesc->operands[i] == IA64_OPND_F3
8314 || idesc->operands[i] == IA64_OPND_F4)
8315 {
8316 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8317 if (num > 31)
8318 {
8319 specs[count] = tmpl;
8320 specs[count++].specific = 0;
8321 }
8322 }
8323 else if (idesc->operands[i] == IA64_OPND_P1
8324 || idesc->operands[i] == IA64_OPND_P2)
8325 {
8326 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
8327 if (num > 15)
8328 {
8329 specs[count] = tmpl;
8330 specs[count++].specific = 0;
8331 }
8332 }
8333 }
8334 if (CURR_SLOT.qp_regno > 15)
8335 {
8336 specs[count] = tmpl;
8337 specs[count++].specific = 0;
8338 }
8339 }
8340 break;
8341
8342 /* This is the same as IA64_RS_PRr, except simplified to account for
8343 the fact that there is only one register. */
8344 case IA64_RS_PR63:
8345 if (note == 0)
8346 {
8347 specs[count++] = tmpl;
8348 }
8349 else if (note == 7)
8350 {
8351 valueT mask = 0;
8352 if (idesc->operands[2] == IA64_OPND_IMM17)
8353 mask = CURR_SLOT.opnd[2].X_add_number;
8354 if (mask & ((valueT) 1 << 63))
8355 specs[count++] = tmpl;
8356 }
8357 else if (note == 11)
8358 {
8359 if ((idesc->operands[0] == IA64_OPND_P1
8360 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
8361 || (idesc->operands[1] == IA64_OPND_P2
8362 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
8363 {
8364 specs[count++] = tmpl;
8365 }
8366 }
8367 else if (note == 12)
8368 {
8369 if (CURR_SLOT.qp_regno == 63)
8370 {
8371 specs[count++] = tmpl;
8372 }
8373 }
8374 else if (note == 1)
8375 {
8376 if (rsrc_write)
8377 {
8378 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8379 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8380 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8381 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8382
8383 if (p1 == 63
8384 && (idesc->operands[0] == IA64_OPND_P1
8385 || idesc->operands[0] == IA64_OPND_P2))
8386 {
8387 specs[count] = tmpl;
8388 specs[count++].cmp_type =
8389 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8390 }
8391 if (p2 == 63
8392 && (idesc->operands[1] == IA64_OPND_P1
8393 || idesc->operands[1] == IA64_OPND_P2))
8394 {
8395 specs[count] = tmpl;
8396 specs[count++].cmp_type =
8397 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8398 }
8399 }
8400 else
8401 {
8402 if (CURR_SLOT.qp_regno == 63)
8403 {
8404 specs[count++] = tmpl;
8405 }
8406 }
8407 }
8408 else
8409 {
8410 UNHANDLED;
8411 }
8412 break;
8413
8414 case IA64_RS_RSE:
8415 /* FIXME we can identify some individual RSE written resources, but RSE
8416 read resources have not yet been completely identified, so for now
8417 treat RSE as a single resource */
8418 if (strncmp (idesc->name, "mov", 3) == 0)
8419 {
8420 if (rsrc_write)
8421 {
8422 if (idesc->operands[0] == IA64_OPND_AR3
8423 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
8424 {
8425 specs[count] = tmpl;
8426 specs[count++].index = 0; /* IA64_RSE_BSPLOAD/RNATBITINDEX */
8427 }
8428 }
8429 else
8430 {
8431 if (idesc->operands[0] == IA64_OPND_AR3)
8432 {
8433 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
8434 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
8435 {
8436 specs[count++] = tmpl;
8437 }
8438 }
8439 else if (idesc->operands[1] == IA64_OPND_AR3)
8440 {
8441 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
8442 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
8443 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
8444 {
8445 specs[count++] = tmpl;
8446 }
8447 }
8448 }
8449 }
8450 else
8451 {
8452 specs[count++] = tmpl;
8453 }
8454 break;
8455
8456 case IA64_RS_ANY:
8457 /* FIXME -- do any of these need to be non-specific? */
8458 specs[count++] = tmpl;
8459 break;
8460
8461 default:
8462 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
8463 break;
8464 }
8465
8466 return count;
8467 }
8468
8469 /* Clear branch flags on marked resources. This breaks the link between the
8470 QP of the marking instruction and a subsequent branch on the same QP. */
8471
8472 static void
8473 clear_qp_branch_flag (mask)
8474 valueT mask;
8475 {
8476 int i;
8477 for (i = 0; i < regdepslen; i++)
8478 {
8479 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
8480 if ((bit & mask) != 0)
8481 {
8482 regdeps[i].link_to_qp_branch = 0;
8483 }
8484 }
8485 }
8486
8487 /* Remove any mutexes which contain any of the PRs indicated in the mask.
8488
8489 Any changes to a PR clears the mutex relations which include that PR. */
8490
8491 static void
8492 clear_qp_mutex (mask)
8493 valueT mask;
8494 {
8495 int i;
8496
8497 i = 0;
8498 while (i < qp_mutexeslen)
8499 {
8500 if ((qp_mutexes[i].prmask & mask) != 0)
8501 {
8502 if (md.debug_dv)
8503 {
8504 fprintf (stderr, " Clearing mutex relation");
8505 print_prmask (qp_mutexes[i].prmask);
8506 fprintf (stderr, "\n");
8507 }
8508 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
8509 }
8510 else
8511 ++i;
8512 }
8513 }
8514
8515 /* Clear implies relations which contain PRs in the given masks.
8516 P1_MASK indicates the source of the implies relation, while P2_MASK
8517 indicates the implied PR. */
8518
8519 static void
8520 clear_qp_implies (p1_mask, p2_mask)
8521 valueT p1_mask;
8522 valueT p2_mask;
8523 {
8524 int i;
8525
8526 i = 0;
8527 while (i < qp_implieslen)
8528 {
8529 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
8530 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
8531 {
8532 if (md.debug_dv)
8533 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
8534 qp_implies[i].p1, qp_implies[i].p2);
8535 qp_implies[i] = qp_implies[--qp_implieslen];
8536 }
8537 else
8538 ++i;
8539 }
8540 }
8541
8542 /* Add the PRs specified to the list of implied relations. */
8543
8544 static void
8545 add_qp_imply (p1, p2)
8546 int p1, p2;
8547 {
8548 valueT mask;
8549 valueT bit;
8550 int i;
8551
8552 /* p0 is not meaningful here. */
8553 if (p1 == 0 || p2 == 0)
8554 abort ();
8555
8556 if (p1 == p2)
8557 return;
8558
8559 /* If it exists already, ignore it. */
8560 for (i = 0; i < qp_implieslen; i++)
8561 {
8562 if (qp_implies[i].p1 == p1
8563 && qp_implies[i].p2 == p2
8564 && qp_implies[i].path == md.path
8565 && !qp_implies[i].p2_branched)
8566 return;
8567 }
8568
8569 if (qp_implieslen == qp_impliestotlen)
8570 {
8571 qp_impliestotlen += 20;
8572 qp_implies = (struct qp_imply *)
8573 xrealloc ((void *) qp_implies,
8574 qp_impliestotlen * sizeof (struct qp_imply));
8575 }
8576 if (md.debug_dv)
8577 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
8578 qp_implies[qp_implieslen].p1 = p1;
8579 qp_implies[qp_implieslen].p2 = p2;
8580 qp_implies[qp_implieslen].path = md.path;
8581 qp_implies[qp_implieslen++].p2_branched = 0;
8582
8583 /* Add in the implied transitive relations; for everything that p2 implies,
8584 make p1 imply that, too; for everything that implies p1, make it imply p2
8585 as well. */
8586 for (i = 0; i < qp_implieslen; i++)
8587 {
8588 if (qp_implies[i].p1 == p2)
8589 add_qp_imply (p1, qp_implies[i].p2);
8590 if (qp_implies[i].p2 == p1)
8591 add_qp_imply (qp_implies[i].p1, p2);
8592 }
8593 /* Add in mutex relations implied by this implies relation; for each mutex
8594 relation containing p2, duplicate it and replace p2 with p1. */
8595 bit = (valueT) 1 << p1;
8596 mask = (valueT) 1 << p2;
8597 for (i = 0; i < qp_mutexeslen; i++)
8598 {
8599 if (qp_mutexes[i].prmask & mask)
8600 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
8601 }
8602 }
8603
8604 /* Add the PRs specified in the mask to the mutex list; this means that only
8605 one of the PRs can be true at any time. PR0 should never be included in
8606 the mask. */
8607
8608 static void
8609 add_qp_mutex (mask)
8610 valueT mask;
8611 {
8612 if (mask & 0x1)
8613 abort ();
8614
8615 if (qp_mutexeslen == qp_mutexestotlen)
8616 {
8617 qp_mutexestotlen += 20;
8618 qp_mutexes = (struct qpmutex *)
8619 xrealloc ((void *) qp_mutexes,
8620 qp_mutexestotlen * sizeof (struct qpmutex));
8621 }
8622 if (md.debug_dv)
8623 {
8624 fprintf (stderr, " Registering mutex on");
8625 print_prmask (mask);
8626 fprintf (stderr, "\n");
8627 }
8628 qp_mutexes[qp_mutexeslen].path = md.path;
8629 qp_mutexes[qp_mutexeslen++].prmask = mask;
8630 }
8631
8632 static void
8633 clear_register_values ()
8634 {
8635 int i;
8636 if (md.debug_dv)
8637 fprintf (stderr, " Clearing register values\n");
8638 for (i = 1; i < NELEMS (gr_values); i++)
8639 gr_values[i].known = 0;
8640 }
8641
8642 /* Keep track of register values/changes which affect DV tracking.
8643
8644 optimization note: should add a flag to classes of insns where otherwise we
8645 have to examine a group of strings to identify them. */
8646
8647 static void
8648 note_register_values (idesc)
8649 struct ia64_opcode *idesc;
8650 {
8651 valueT qp_changemask = 0;
8652 int i;
8653
8654 /* Invalidate values for registers being written to. */
8655 for (i = 0; i < idesc->num_outputs; i++)
8656 {
8657 if (idesc->operands[i] == IA64_OPND_R1
8658 || idesc->operands[i] == IA64_OPND_R2
8659 || idesc->operands[i] == IA64_OPND_R3)
8660 {
8661 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8662 if (regno > 0 && regno < NELEMS (gr_values))
8663 gr_values[regno].known = 0;
8664 }
8665 else if (idesc->operands[i] == IA64_OPND_R3_2)
8666 {
8667 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8668 if (regno > 0 && regno < 4)
8669 gr_values[regno].known = 0;
8670 }
8671 else if (idesc->operands[i] == IA64_OPND_P1
8672 || idesc->operands[i] == IA64_OPND_P2)
8673 {
8674 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8675 qp_changemask |= (valueT) 1 << regno;
8676 }
8677 else if (idesc->operands[i] == IA64_OPND_PR)
8678 {
8679 if (idesc->operands[2] & (valueT) 0x10000)
8680 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
8681 else
8682 qp_changemask = idesc->operands[2];
8683 break;
8684 }
8685 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
8686 {
8687 if (idesc->operands[1] & ((valueT) 1 << 43))
8688 qp_changemask = ~(valueT) 0xFFFFFFFFFFF | idesc->operands[1];
8689 else
8690 qp_changemask = idesc->operands[1];
8691 qp_changemask &= ~(valueT) 0xFFFF;
8692 break;
8693 }
8694 }
8695
8696 /* Always clear qp branch flags on any PR change. */
8697 /* FIXME there may be exceptions for certain compares. */
8698 clear_qp_branch_flag (qp_changemask);
8699
8700 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
8701 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
8702 {
8703 qp_changemask |= ~(valueT) 0xFFFF;
8704 if (strcmp (idesc->name, "clrrrb.pr") != 0)
8705 {
8706 for (i = 32; i < 32 + md.rot.num_regs; i++)
8707 gr_values[i].known = 0;
8708 }
8709 clear_qp_mutex (qp_changemask);
8710 clear_qp_implies (qp_changemask, qp_changemask);
8711 }
8712 /* After a call, all register values are undefined, except those marked
8713 as "safe". */
8714 else if (strncmp (idesc->name, "br.call", 6) == 0
8715 || strncmp (idesc->name, "brl.call", 7) == 0)
8716 {
8717 /* FIXME keep GR values which are marked as "safe_across_calls" */
8718 clear_register_values ();
8719 clear_qp_mutex (~qp_safe_across_calls);
8720 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
8721 clear_qp_branch_flag (~qp_safe_across_calls);
8722 }
8723 else if (is_interruption_or_rfi (idesc)
8724 || is_taken_branch (idesc))
8725 {
8726 clear_register_values ();
8727 clear_qp_mutex (~(valueT) 0);
8728 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
8729 }
8730 /* Look for mutex and implies relations. */
8731 else if ((idesc->operands[0] == IA64_OPND_P1
8732 || idesc->operands[0] == IA64_OPND_P2)
8733 && (idesc->operands[1] == IA64_OPND_P1
8734 || idesc->operands[1] == IA64_OPND_P2))
8735 {
8736 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8737 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8738 valueT p1mask = (valueT) 1 << p1;
8739 valueT p2mask = (valueT) 1 << p2;
8740
8741 /* If one of the PRs is PR0, we can't really do anything. */
8742 if (p1 == 0 || p2 == 0)
8743 {
8744 if (md.debug_dv)
8745 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
8746 }
8747 /* In general, clear mutexes and implies which include P1 or P2,
8748 with the following exceptions. */
8749 else if (strstr (idesc->name, ".or.andcm") != NULL)
8750 {
8751 add_qp_mutex (p1mask | p2mask);
8752 clear_qp_implies (p2mask, p1mask);
8753 }
8754 else if (strstr (idesc->name, ".and.orcm") != NULL)
8755 {
8756 add_qp_mutex (p1mask | p2mask);
8757 clear_qp_implies (p1mask, p2mask);
8758 }
8759 else if (strstr (idesc->name, ".and") != NULL)
8760 {
8761 clear_qp_implies (0, p1mask | p2mask);
8762 }
8763 else if (strstr (idesc->name, ".or") != NULL)
8764 {
8765 clear_qp_mutex (p1mask | p2mask);
8766 clear_qp_implies (p1mask | p2mask, 0);
8767 }
8768 else
8769 {
8770 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
8771 if (strstr (idesc->name, ".unc") != NULL)
8772 {
8773 add_qp_mutex (p1mask | p2mask);
8774 if (CURR_SLOT.qp_regno != 0)
8775 {
8776 add_qp_imply (CURR_SLOT.opnd[0].X_add_number - REG_P,
8777 CURR_SLOT.qp_regno);
8778 add_qp_imply (CURR_SLOT.opnd[1].X_add_number - REG_P,
8779 CURR_SLOT.qp_regno);
8780 }
8781 }
8782 else if (CURR_SLOT.qp_regno == 0)
8783 {
8784 add_qp_mutex (p1mask | p2mask);
8785 }
8786 else
8787 {
8788 clear_qp_mutex (p1mask | p2mask);
8789 }
8790 }
8791 }
8792 /* Look for mov imm insns into GRs. */
8793 else if (idesc->operands[0] == IA64_OPND_R1
8794 && (idesc->operands[1] == IA64_OPND_IMM22
8795 || idesc->operands[1] == IA64_OPND_IMMU64)
8796 && (strcmp (idesc->name, "mov") == 0
8797 || strcmp (idesc->name, "movl") == 0))
8798 {
8799 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8800 if (regno > 0 && regno < NELEMS (gr_values))
8801 {
8802 gr_values[regno].known = 1;
8803 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
8804 gr_values[regno].path = md.path;
8805 if (md.debug_dv)
8806 {
8807 fprintf (stderr, " Know gr%d = ", regno);
8808 fprintf_vma (stderr, gr_values[regno].value);
8809 fputs ("\n", stderr);
8810 }
8811 }
8812 }
8813 else
8814 {
8815 clear_qp_mutex (qp_changemask);
8816 clear_qp_implies (qp_changemask, qp_changemask);
8817 }
8818 }
8819
8820 /* Return whether the given predicate registers are currently mutex. */
8821
8822 static int
8823 qp_mutex (p1, p2, path)
8824 int p1;
8825 int p2;
8826 int path;
8827 {
8828 int i;
8829 valueT mask;
8830
8831 if (p1 != p2)
8832 {
8833 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
8834 for (i = 0; i < qp_mutexeslen; i++)
8835 {
8836 if (qp_mutexes[i].path >= path
8837 && (qp_mutexes[i].prmask & mask) == mask)
8838 return 1;
8839 }
8840 }
8841 return 0;
8842 }
8843
8844 /* Return whether the given resource is in the given insn's list of chks
8845 Return 1 if the conflict is absolutely determined, 2 if it's a potential
8846 conflict. */
8847
8848 static int
8849 resources_match (rs, idesc, note, qp_regno, path)
8850 struct rsrc *rs;
8851 struct ia64_opcode *idesc;
8852 int note;
8853 int qp_regno;
8854 int path;
8855 {
8856 struct rsrc specs[MAX_SPECS];
8857 int count;
8858
8859 /* If the marked resource's qp_regno and the given qp_regno are mutex,
8860 we don't need to check. One exception is note 11, which indicates that
8861 target predicates are written regardless of PR[qp]. */
8862 if (qp_mutex (rs->qp_regno, qp_regno, path)
8863 && note != 11)
8864 return 0;
8865
8866 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
8867 while (count-- > 0)
8868 {
8869 /* UNAT checking is a bit more specific than other resources */
8870 if (rs->dependency->specifier == IA64_RS_AR_UNAT
8871 && specs[count].mem_offset.hint
8872 && rs->mem_offset.hint)
8873 {
8874 if (rs->mem_offset.base == specs[count].mem_offset.base)
8875 {
8876 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
8877 ((specs[count].mem_offset.offset >> 3) & 0x3F))
8878 return 1;
8879 else
8880 continue;
8881 }
8882 }
8883
8884 /* Skip apparent PR write conflicts where both writes are an AND or both
8885 writes are an OR. */
8886 if (rs->dependency->specifier == IA64_RS_PR
8887 || rs->dependency->specifier == IA64_RS_PRr
8888 || rs->dependency->specifier == IA64_RS_PR63)
8889 {
8890 if (specs[count].cmp_type != CMP_NONE
8891 && specs[count].cmp_type == rs->cmp_type)
8892 {
8893 if (md.debug_dv)
8894 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
8895 dv_mode[rs->dependency->mode],
8896 rs->dependency->specifier != IA64_RS_PR63 ?
8897 specs[count].index : 63);
8898 continue;
8899 }
8900 if (md.debug_dv)
8901 fprintf (stderr,
8902 " %s on parallel compare conflict %s vs %s on PR%d\n",
8903 dv_mode[rs->dependency->mode],
8904 dv_cmp_type[rs->cmp_type],
8905 dv_cmp_type[specs[count].cmp_type],
8906 rs->dependency->specifier != IA64_RS_PR63 ?
8907 specs[count].index : 63);
8908
8909 }
8910
8911 /* If either resource is not specific, conservatively assume a conflict
8912 */
8913 if (!specs[count].specific || !rs->specific)
8914 return 2;
8915 else if (specs[count].index == rs->index)
8916 return 1;
8917 }
8918 #if 0
8919 if (md.debug_dv)
8920 fprintf (stderr, " No %s conflicts\n", rs->dependency->name);
8921 #endif
8922
8923 return 0;
8924 }
8925
8926 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
8927 insert a stop to create the break. Update all resource dependencies
8928 appropriately. If QP_REGNO is non-zero, only apply the break to resources
8929 which use the same QP_REGNO and have the link_to_qp_branch flag set.
8930 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
8931 instruction. */
8932
8933 static void
8934 insn_group_break (insert_stop, qp_regno, save_current)
8935 int insert_stop;
8936 int qp_regno;
8937 int save_current;
8938 {
8939 int i;
8940
8941 if (insert_stop && md.num_slots_in_use > 0)
8942 PREV_SLOT.end_of_insn_group = 1;
8943
8944 if (md.debug_dv)
8945 {
8946 fprintf (stderr, " Insn group break%s",
8947 (insert_stop ? " (w/stop)" : ""));
8948 if (qp_regno != 0)
8949 fprintf (stderr, " effective for QP=%d", qp_regno);
8950 fprintf (stderr, "\n");
8951 }
8952
8953 i = 0;
8954 while (i < regdepslen)
8955 {
8956 const struct ia64_dependency *dep = regdeps[i].dependency;
8957
8958 if (qp_regno != 0
8959 && regdeps[i].qp_regno != qp_regno)
8960 {
8961 ++i;
8962 continue;
8963 }
8964
8965 if (save_current
8966 && CURR_SLOT.src_file == regdeps[i].file
8967 && CURR_SLOT.src_line == regdeps[i].line)
8968 {
8969 ++i;
8970 continue;
8971 }
8972
8973 /* clear dependencies which are automatically cleared by a stop, or
8974 those that have reached the appropriate state of insn serialization */
8975 if (dep->semantics == IA64_DVS_IMPLIED
8976 || dep->semantics == IA64_DVS_IMPLIEDF
8977 || regdeps[i].insn_srlz == STATE_SRLZ)
8978 {
8979 print_dependency ("Removing", i);
8980 regdeps[i] = regdeps[--regdepslen];
8981 }
8982 else
8983 {
8984 if (dep->semantics == IA64_DVS_DATA
8985 || dep->semantics == IA64_DVS_INSTR
8986 || dep->semantics == IA64_DVS_SPECIFIC)
8987 {
8988 if (regdeps[i].insn_srlz == STATE_NONE)
8989 regdeps[i].insn_srlz = STATE_STOP;
8990 if (regdeps[i].data_srlz == STATE_NONE)
8991 regdeps[i].data_srlz = STATE_STOP;
8992 }
8993 ++i;
8994 }
8995 }
8996 }
8997
8998 /* Add the given resource usage spec to the list of active dependencies. */
8999
9000 static void
9001 mark_resource (idesc, dep, spec, depind, path)
9002 struct ia64_opcode *idesc ATTRIBUTE_UNUSED;
9003 const struct ia64_dependency *dep ATTRIBUTE_UNUSED;
9004 struct rsrc *spec;
9005 int depind;
9006 int path;
9007 {
9008 if (regdepslen == regdepstotlen)
9009 {
9010 regdepstotlen += 20;
9011 regdeps = (struct rsrc *)
9012 xrealloc ((void *) regdeps,
9013 regdepstotlen * sizeof (struct rsrc));
9014 }
9015
9016 regdeps[regdepslen] = *spec;
9017 regdeps[regdepslen].depind = depind;
9018 regdeps[regdepslen].path = path;
9019 regdeps[regdepslen].file = CURR_SLOT.src_file;
9020 regdeps[regdepslen].line = CURR_SLOT.src_line;
9021
9022 print_dependency ("Adding", regdepslen);
9023
9024 ++regdepslen;
9025 }
9026
9027 static void
9028 print_dependency (action, depind)
9029 const char *action;
9030 int depind;
9031 {
9032 if (md.debug_dv)
9033 {
9034 fprintf (stderr, " %s %s '%s'",
9035 action, dv_mode[(regdeps[depind].dependency)->mode],
9036 (regdeps[depind].dependency)->name);
9037 if (regdeps[depind].specific && regdeps[depind].index != 0)
9038 fprintf (stderr, " (%d)", regdeps[depind].index);
9039 if (regdeps[depind].mem_offset.hint)
9040 {
9041 fputs (" ", stderr);
9042 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
9043 fputs ("+", stderr);
9044 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
9045 }
9046 fprintf (stderr, "\n");
9047 }
9048 }
9049
9050 static void
9051 instruction_serialization ()
9052 {
9053 int i;
9054 if (md.debug_dv)
9055 fprintf (stderr, " Instruction serialization\n");
9056 for (i = 0; i < regdepslen; i++)
9057 if (regdeps[i].insn_srlz == STATE_STOP)
9058 regdeps[i].insn_srlz = STATE_SRLZ;
9059 }
9060
9061 static void
9062 data_serialization ()
9063 {
9064 int i = 0;
9065 if (md.debug_dv)
9066 fprintf (stderr, " Data serialization\n");
9067 while (i < regdepslen)
9068 {
9069 if (regdeps[i].data_srlz == STATE_STOP
9070 /* Note: as of 991210, all "other" dependencies are cleared by a
9071 data serialization. This might change with new tables */
9072 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
9073 {
9074 print_dependency ("Removing", i);
9075 regdeps[i] = regdeps[--regdepslen];
9076 }
9077 else
9078 ++i;
9079 }
9080 }
9081
9082 /* Insert stops and serializations as needed to avoid DVs. */
9083
9084 static void
9085 remove_marked_resource (rs)
9086 struct rsrc *rs;
9087 {
9088 switch (rs->dependency->semantics)
9089 {
9090 case IA64_DVS_SPECIFIC:
9091 if (md.debug_dv)
9092 fprintf (stderr, "Implementation-specific, assume worst case...\n");
9093 /* ...fall through... */
9094 case IA64_DVS_INSTR:
9095 if (md.debug_dv)
9096 fprintf (stderr, "Inserting instr serialization\n");
9097 if (rs->insn_srlz < STATE_STOP)
9098 insn_group_break (1, 0, 0);
9099 if (rs->insn_srlz < STATE_SRLZ)
9100 {
9101 int oldqp = CURR_SLOT.qp_regno;
9102 struct ia64_opcode *oldidesc = CURR_SLOT.idesc;
9103 /* Manually jam a srlz.i insn into the stream */
9104 CURR_SLOT.qp_regno = 0;
9105 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
9106 instruction_serialization ();
9107 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9108 if (++md.num_slots_in_use >= NUM_SLOTS)
9109 emit_one_bundle ();
9110 CURR_SLOT.qp_regno = oldqp;
9111 CURR_SLOT.idesc = oldidesc;
9112 }
9113 insn_group_break (1, 0, 0);
9114 break;
9115 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
9116 "other" types of DV are eliminated
9117 by a data serialization */
9118 case IA64_DVS_DATA:
9119 if (md.debug_dv)
9120 fprintf (stderr, "Inserting data serialization\n");
9121 if (rs->data_srlz < STATE_STOP)
9122 insn_group_break (1, 0, 0);
9123 {
9124 int oldqp = CURR_SLOT.qp_regno;
9125 struct ia64_opcode *oldidesc = CURR_SLOT.idesc;
9126 /* Manually jam a srlz.d insn into the stream */
9127 CURR_SLOT.qp_regno = 0;
9128 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
9129 data_serialization ();
9130 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9131 if (++md.num_slots_in_use >= NUM_SLOTS)
9132 emit_one_bundle ();
9133 CURR_SLOT.qp_regno = oldqp;
9134 CURR_SLOT.idesc = oldidesc;
9135 }
9136 break;
9137 case IA64_DVS_IMPLIED:
9138 case IA64_DVS_IMPLIEDF:
9139 if (md.debug_dv)
9140 fprintf (stderr, "Inserting stop\n");
9141 insn_group_break (1, 0, 0);
9142 break;
9143 default:
9144 break;
9145 }
9146 }
9147
9148 /* Check the resources used by the given opcode against the current dependency
9149 list.
9150
9151 The check is run once for each execution path encountered. In this case,
9152 a unique execution path is the sequence of instructions following a code
9153 entry point, e.g. the following has three execution paths, one starting
9154 at L0, one at L1, and one at L2.
9155
9156 L0: nop
9157 L1: add
9158 L2: add
9159 br.ret
9160 */
9161
9162 static void
9163 check_dependencies (idesc)
9164 struct ia64_opcode *idesc;
9165 {
9166 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9167 int path;
9168 int i;
9169
9170 /* Note that the number of marked resources may change within the
9171 loop if in auto mode. */
9172 i = 0;
9173 while (i < regdepslen)
9174 {
9175 struct rsrc *rs = &regdeps[i];
9176 const struct ia64_dependency *dep = rs->dependency;
9177 int chkind;
9178 int note;
9179 int start_over = 0;
9180
9181 if (dep->semantics == IA64_DVS_NONE
9182 || (chkind = depends_on (rs->depind, idesc)) == -1)
9183 {
9184 ++i;
9185 continue;
9186 }
9187
9188 note = NOTE (opdeps->chks[chkind]);
9189
9190 /* Check this resource against each execution path seen thus far. */
9191 for (path = 0; path <= md.path; path++)
9192 {
9193 int matchtype;
9194
9195 /* If the dependency wasn't on the path being checked, ignore it. */
9196 if (rs->path < path)
9197 continue;
9198
9199 /* If the QP for this insn implies a QP which has branched, don't
9200 bother checking. Ed. NOTE: I don't think this check is terribly
9201 useful; what's the point of generating code which will only be
9202 reached if its QP is zero?
9203 This code was specifically inserted to handle the following code,
9204 based on notes from Intel's DV checking code, where p1 implies p2.
9205
9206 mov r4 = 2
9207 (p2) br.cond L
9208 (p1) mov r4 = 7
9209 */
9210 if (CURR_SLOT.qp_regno != 0)
9211 {
9212 int skip = 0;
9213 int implies;
9214 for (implies = 0; implies < qp_implieslen; implies++)
9215 {
9216 if (qp_implies[implies].path >= path
9217 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
9218 && qp_implies[implies].p2_branched)
9219 {
9220 skip = 1;
9221 break;
9222 }
9223 }
9224 if (skip)
9225 continue;
9226 }
9227
9228 if ((matchtype = resources_match (rs, idesc, note,
9229 CURR_SLOT.qp_regno, path)) != 0)
9230 {
9231 char msg[1024];
9232 char pathmsg[256] = "";
9233 char indexmsg[256] = "";
9234 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
9235
9236 if (path != 0)
9237 sprintf (pathmsg, " when entry is at label '%s'",
9238 md.entry_labels[path - 1]);
9239 if (rs->specific && rs->index != 0)
9240 sprintf (indexmsg, ", specific resource number is %d",
9241 rs->index);
9242 sprintf (msg, "Use of '%s' %s %s dependency '%s' (%s)%s%s",
9243 idesc->name,
9244 (certain ? "violates" : "may violate"),
9245 dv_mode[dep->mode], dep->name,
9246 dv_sem[dep->semantics],
9247 pathmsg, indexmsg);
9248
9249 if (md.explicit_mode)
9250 {
9251 as_warn ("%s", msg);
9252 if (path < md.path)
9253 as_warn (_("Only the first path encountering the conflict "
9254 "is reported"));
9255 as_warn_where (rs->file, rs->line,
9256 _("This is the location of the "
9257 "conflicting usage"));
9258 /* Don't bother checking other paths, to avoid duplicating
9259 the same warning */
9260 break;
9261 }
9262 else
9263 {
9264 if (md.debug_dv)
9265 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
9266
9267 remove_marked_resource (rs);
9268
9269 /* since the set of dependencies has changed, start over */
9270 /* FIXME -- since we're removing dvs as we go, we
9271 probably don't really need to start over... */
9272 start_over = 1;
9273 break;
9274 }
9275 }
9276 }
9277 if (start_over)
9278 i = 0;
9279 else
9280 ++i;
9281 }
9282 }
9283
9284 /* Register new dependencies based on the given opcode. */
9285
9286 static void
9287 mark_resources (idesc)
9288 struct ia64_opcode *idesc;
9289 {
9290 int i;
9291 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9292 int add_only_qp_reads = 0;
9293
9294 /* A conditional branch only uses its resources if it is taken; if it is
9295 taken, we stop following that path. The other branch types effectively
9296 *always* write their resources. If it's not taken, register only QP
9297 reads. */
9298 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
9299 {
9300 add_only_qp_reads = 1;
9301 }
9302
9303 if (md.debug_dv)
9304 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
9305
9306 for (i = 0; i < opdeps->nregs; i++)
9307 {
9308 const struct ia64_dependency *dep;
9309 struct rsrc specs[MAX_SPECS];
9310 int note;
9311 int path;
9312 int count;
9313
9314 dep = ia64_find_dependency (opdeps->regs[i]);
9315 note = NOTE (opdeps->regs[i]);
9316
9317 if (add_only_qp_reads
9318 && !(dep->mode == IA64_DV_WAR
9319 && (dep->specifier == IA64_RS_PR
9320 || dep->specifier == IA64_RS_PRr
9321 || dep->specifier == IA64_RS_PR63)))
9322 continue;
9323
9324 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
9325
9326 #if 0
9327 if (md.debug_dv && !count)
9328 fprintf (stderr, " No %s %s usage found (path %d)\n",
9329 dv_mode[dep->mode], dep->name, md.path);
9330 #endif
9331
9332 while (count-- > 0)
9333 {
9334 mark_resource (idesc, dep, &specs[count],
9335 DEP (opdeps->regs[i]), md.path);
9336 }
9337
9338 /* The execution path may affect register values, which may in turn
9339 affect which indirect-access resources are accessed. */
9340 switch (dep->specifier)
9341 {
9342 default:
9343 break;
9344 case IA64_RS_CPUID:
9345 case IA64_RS_DBR:
9346 case IA64_RS_IBR:
9347 case IA64_RS_MSR:
9348 case IA64_RS_PKR:
9349 case IA64_RS_PMC:
9350 case IA64_RS_PMD:
9351 case IA64_RS_RR:
9352 for (path = 0; path < md.path; path++)
9353 {
9354 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
9355 while (count-- > 0)
9356 mark_resource (idesc, dep, &specs[count],
9357 DEP (opdeps->regs[i]), path);
9358 }
9359 break;
9360 }
9361 }
9362 }
9363
9364 /* Remove dependencies when they no longer apply. */
9365
9366 static void
9367 update_dependencies (idesc)
9368 struct ia64_opcode *idesc;
9369 {
9370 int i;
9371
9372 if (strcmp (idesc->name, "srlz.i") == 0)
9373 {
9374 instruction_serialization ();
9375 }
9376 else if (strcmp (idesc->name, "srlz.d") == 0)
9377 {
9378 data_serialization ();
9379 }
9380 else if (is_interruption_or_rfi (idesc)
9381 || is_taken_branch (idesc))
9382 {
9383 /* Although technically the taken branch doesn't clear dependencies
9384 which require a srlz.[id], we don't follow the branch; the next
9385 instruction is assumed to start with a clean slate. */
9386 regdepslen = 0;
9387 md.path = 0;
9388 }
9389 else if (is_conditional_branch (idesc)
9390 && CURR_SLOT.qp_regno != 0)
9391 {
9392 int is_call = strstr (idesc->name, ".call") != NULL;
9393
9394 for (i = 0; i < qp_implieslen; i++)
9395 {
9396 /* If the conditional branch's predicate is implied by the predicate
9397 in an existing dependency, remove that dependency. */
9398 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
9399 {
9400 int depind = 0;
9401 /* Note that this implied predicate takes a branch so that if
9402 a later insn generates a DV but its predicate implies this
9403 one, we can avoid the false DV warning. */
9404 qp_implies[i].p2_branched = 1;
9405 while (depind < regdepslen)
9406 {
9407 if (regdeps[depind].qp_regno == qp_implies[i].p1)
9408 {
9409 print_dependency ("Removing", depind);
9410 regdeps[depind] = regdeps[--regdepslen];
9411 }
9412 else
9413 ++depind;
9414 }
9415 }
9416 }
9417 /* Any marked resources which have this same predicate should be
9418 cleared, provided that the QP hasn't been modified between the
9419 marking instruction and the branch. */
9420 if (is_call)
9421 {
9422 insn_group_break (0, CURR_SLOT.qp_regno, 1);
9423 }
9424 else
9425 {
9426 i = 0;
9427 while (i < regdepslen)
9428 {
9429 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
9430 && regdeps[i].link_to_qp_branch
9431 && (regdeps[i].file != CURR_SLOT.src_file
9432 || regdeps[i].line != CURR_SLOT.src_line))
9433 {
9434 /* Treat like a taken branch */
9435 print_dependency ("Removing", i);
9436 regdeps[i] = regdeps[--regdepslen];
9437 }
9438 else
9439 ++i;
9440 }
9441 }
9442 }
9443 }
9444
9445 /* Examine the current instruction for dependency violations. */
9446
9447 static int
9448 check_dv (idesc)
9449 struct ia64_opcode *idesc;
9450 {
9451 if (md.debug_dv)
9452 {
9453 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
9454 idesc->name, CURR_SLOT.src_line,
9455 idesc->dependencies->nchks,
9456 idesc->dependencies->nregs);
9457 }
9458
9459 /* Look through the list of currently marked resources; if the current
9460 instruction has the dependency in its chks list which uses that resource,
9461 check against the specific resources used. */
9462 check_dependencies (idesc);
9463
9464 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
9465 then add them to the list of marked resources. */
9466 mark_resources (idesc);
9467
9468 /* There are several types of dependency semantics, and each has its own
9469 requirements for being cleared
9470
9471 Instruction serialization (insns separated by interruption, rfi, or
9472 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
9473
9474 Data serialization (instruction serialization, or writer + srlz.d +
9475 reader, where writer and srlz.d are in separate groups) clears
9476 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
9477 always be the case).
9478
9479 Instruction group break (groups separated by stop, taken branch,
9480 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
9481 */
9482 update_dependencies (idesc);
9483
9484 /* Sometimes, knowing a register value allows us to avoid giving a false DV
9485 warning. Keep track of as many as possible that are useful. */
9486 note_register_values (idesc);
9487
9488 /* We don't need or want this anymore. */
9489 md.mem_offset.hint = 0;
9490
9491 return 0;
9492 }
9493
9494 /* Translate one line of assembly. Pseudo ops and labels do not show
9495 here. */
9496 void
9497 md_assemble (str)
9498 char *str;
9499 {
9500 char *saved_input_line_pointer, *mnemonic;
9501 const struct pseudo_opcode *pdesc;
9502 struct ia64_opcode *idesc;
9503 unsigned char qp_regno;
9504 unsigned int flags;
9505 int ch;
9506
9507 saved_input_line_pointer = input_line_pointer;
9508 input_line_pointer = str;
9509
9510 /* extract the opcode (mnemonic): */
9511
9512 mnemonic = input_line_pointer;
9513 ch = get_symbol_end ();
9514 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
9515 if (pdesc)
9516 {
9517 *input_line_pointer = ch;
9518 (*pdesc->handler) (pdesc->arg);
9519 goto done;
9520 }
9521
9522 /* Find the instruction descriptor matching the arguments. */
9523
9524 idesc = ia64_find_opcode (mnemonic);
9525 *input_line_pointer = ch;
9526 if (!idesc)
9527 {
9528 as_bad ("Unknown opcode `%s'", mnemonic);
9529 goto done;
9530 }
9531
9532 idesc = parse_operands (idesc);
9533 if (!idesc)
9534 goto done;
9535
9536 /* Handle the dynamic ops we can handle now: */
9537 if (idesc->type == IA64_TYPE_DYN)
9538 {
9539 if (strcmp (idesc->name, "add") == 0)
9540 {
9541 if (CURR_SLOT.opnd[2].X_op == O_register
9542 && CURR_SLOT.opnd[2].X_add_number < 4)
9543 mnemonic = "addl";
9544 else
9545 mnemonic = "adds";
9546 ia64_free_opcode (idesc);
9547 idesc = ia64_find_opcode (mnemonic);
9548 #if 0
9549 know (!idesc->next);
9550 #endif
9551 }
9552 else if (strcmp (idesc->name, "mov") == 0)
9553 {
9554 enum ia64_opnd opnd1, opnd2;
9555 int rop;
9556
9557 opnd1 = idesc->operands[0];
9558 opnd2 = idesc->operands[1];
9559 if (opnd1 == IA64_OPND_AR3)
9560 rop = 0;
9561 else if (opnd2 == IA64_OPND_AR3)
9562 rop = 1;
9563 else
9564 abort ();
9565 if (CURR_SLOT.opnd[rop].X_op == O_register
9566 && ar_is_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
9567 mnemonic = "mov.i";
9568 else
9569 mnemonic = "mov.m";
9570 ia64_free_opcode (idesc);
9571 idesc = ia64_find_opcode (mnemonic);
9572 while (idesc != NULL
9573 && (idesc->operands[0] != opnd1
9574 || idesc->operands[1] != opnd2))
9575 idesc = get_next_opcode (idesc);
9576 }
9577 }
9578
9579 qp_regno = 0;
9580 if (md.qp.X_op == O_register)
9581 {
9582 qp_regno = md.qp.X_add_number - REG_P;
9583 md.qp.X_op = O_absent;
9584 }
9585
9586 flags = idesc->flags;
9587
9588 if ((flags & IA64_OPCODE_FIRST) != 0)
9589 insn_group_break (1, 0, 0);
9590
9591 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
9592 {
9593 as_bad ("`%s' cannot be predicated", idesc->name);
9594 goto done;
9595 }
9596
9597 /* Build the instruction. */
9598 CURR_SLOT.qp_regno = qp_regno;
9599 CURR_SLOT.idesc = idesc;
9600 as_where (&CURR_SLOT.src_file, &CURR_SLOT.src_line);
9601 dwarf2_where (&CURR_SLOT.debug_line);
9602
9603 /* Add unwind entry, if there is one. */
9604 if (unwind.current_entry)
9605 {
9606 CURR_SLOT.unwind_record = unwind.current_entry;
9607 unwind.current_entry = NULL;
9608 }
9609
9610 /* Check for dependency violations. */
9611 if (md.detect_dv)
9612 check_dv (idesc);
9613
9614 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9615 if (++md.num_slots_in_use >= NUM_SLOTS)
9616 emit_one_bundle ();
9617
9618 if ((flags & IA64_OPCODE_LAST) != 0)
9619 insn_group_break (1, 0, 0);
9620
9621 md.last_text_seg = now_seg;
9622
9623 done:
9624 input_line_pointer = saved_input_line_pointer;
9625 }
9626
9627 /* Called when symbol NAME cannot be found in the symbol table.
9628 Should be used for dynamic valued symbols only. */
9629
9630 symbolS *
9631 md_undefined_symbol (name)
9632 char *name ATTRIBUTE_UNUSED;
9633 {
9634 return 0;
9635 }
9636
9637 /* Called for any expression that can not be recognized. When the
9638 function is called, `input_line_pointer' will point to the start of
9639 the expression. */
9640
9641 void
9642 md_operand (e)
9643 expressionS *e;
9644 {
9645 enum pseudo_type pseudo_type;
9646 const char *name;
9647 size_t len;
9648 int ch, i;
9649
9650 switch (*input_line_pointer)
9651 {
9652 case '@':
9653 /* Find what relocation pseudo-function we're dealing with. */
9654 pseudo_type = 0;
9655 ch = *++input_line_pointer;
9656 for (i = 0; i < NELEMS (pseudo_func); ++i)
9657 if (pseudo_func[i].name && pseudo_func[i].name[0] == ch)
9658 {
9659 len = strlen (pseudo_func[i].name);
9660 if (strncmp (pseudo_func[i].name + 1,
9661 input_line_pointer + 1, len - 1) == 0
9662 && !is_part_of_name (input_line_pointer[len]))
9663 {
9664 input_line_pointer += len;
9665 pseudo_type = pseudo_func[i].type;
9666 break;
9667 }
9668 }
9669 switch (pseudo_type)
9670 {
9671 case PSEUDO_FUNC_RELOC:
9672 SKIP_WHITESPACE ();
9673 if (*input_line_pointer != '(')
9674 {
9675 as_bad ("Expected '('");
9676 goto err;
9677 }
9678 /* Skip '('. */
9679 ++input_line_pointer;
9680 expression (e);
9681 if (*input_line_pointer++ != ')')
9682 {
9683 as_bad ("Missing ')'");
9684 goto err;
9685 }
9686 if (e->X_op != O_symbol)
9687 {
9688 if (e->X_op != O_pseudo_fixup)
9689 {
9690 as_bad ("Not a symbolic expression");
9691 goto err;
9692 }
9693 if (S_GET_VALUE (e->X_op_symbol) == FUNC_FPTR_RELATIVE
9694 && i == FUNC_LT_RELATIVE)
9695 i = FUNC_LT_FPTR_RELATIVE;
9696 else
9697 {
9698 as_bad ("Illegal combination of relocation functions");
9699 goto err;
9700 }
9701 }
9702 /* Make sure gas doesn't get rid of local symbols that are used
9703 in relocs. */
9704 e->X_op = O_pseudo_fixup;
9705 e->X_op_symbol = pseudo_func[i].u.sym;
9706 break;
9707
9708 case PSEUDO_FUNC_CONST:
9709 e->X_op = O_constant;
9710 e->X_add_number = pseudo_func[i].u.ival;
9711 break;
9712
9713 case PSEUDO_FUNC_REG:
9714 e->X_op = O_register;
9715 e->X_add_number = pseudo_func[i].u.ival;
9716 break;
9717
9718 default:
9719 name = input_line_pointer - 1;
9720 get_symbol_end ();
9721 as_bad ("Unknown pseudo function `%s'", name);
9722 goto err;
9723 }
9724 break;
9725
9726 case '[':
9727 ++input_line_pointer;
9728 expression (e);
9729 if (*input_line_pointer != ']')
9730 {
9731 as_bad ("Closing bracket misssing");
9732 goto err;
9733 }
9734 else
9735 {
9736 if (e->X_op != O_register)
9737 as_bad ("Register expected as index");
9738
9739 ++input_line_pointer;
9740 e->X_op = O_index;
9741 }
9742 break;
9743
9744 default:
9745 break;
9746 }
9747 return;
9748
9749 err:
9750 ignore_rest_of_line ();
9751 }
9752
9753 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
9754 a section symbol plus some offset. For relocs involving @fptr(),
9755 directives we don't want such adjustments since we need to have the
9756 original symbol's name in the reloc. */
9757 int
9758 ia64_fix_adjustable (fix)
9759 fixS *fix;
9760 {
9761 /* Prevent all adjustments to global symbols */
9762 if (S_IS_EXTERN (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
9763 return 0;
9764
9765 switch (fix->fx_r_type)
9766 {
9767 case BFD_RELOC_IA64_FPTR64I:
9768 case BFD_RELOC_IA64_FPTR32MSB:
9769 case BFD_RELOC_IA64_FPTR32LSB:
9770 case BFD_RELOC_IA64_FPTR64MSB:
9771 case BFD_RELOC_IA64_FPTR64LSB:
9772 case BFD_RELOC_IA64_LTOFF_FPTR22:
9773 case BFD_RELOC_IA64_LTOFF_FPTR64I:
9774 return 0;
9775 default:
9776 break;
9777 }
9778
9779 return 1;
9780 }
9781
9782 int
9783 ia64_force_relocation (fix)
9784 fixS *fix;
9785 {
9786 switch (fix->fx_r_type)
9787 {
9788 case BFD_RELOC_IA64_FPTR64I:
9789 case BFD_RELOC_IA64_FPTR32MSB:
9790 case BFD_RELOC_IA64_FPTR32LSB:
9791 case BFD_RELOC_IA64_FPTR64MSB:
9792 case BFD_RELOC_IA64_FPTR64LSB:
9793
9794 case BFD_RELOC_IA64_LTOFF22:
9795 case BFD_RELOC_IA64_LTOFF64I:
9796 case BFD_RELOC_IA64_LTOFF_FPTR22:
9797 case BFD_RELOC_IA64_LTOFF_FPTR64I:
9798 case BFD_RELOC_IA64_PLTOFF22:
9799 case BFD_RELOC_IA64_PLTOFF64I:
9800 case BFD_RELOC_IA64_PLTOFF64MSB:
9801 case BFD_RELOC_IA64_PLTOFF64LSB:
9802 return 1;
9803
9804 default:
9805 return 0;
9806 }
9807 return 0;
9808 }
9809
9810 /* Decide from what point a pc-relative relocation is relative to,
9811 relative to the pc-relative fixup. Er, relatively speaking. */
9812 long
9813 ia64_pcrel_from_section (fix, sec)
9814 fixS *fix;
9815 segT sec;
9816 {
9817 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
9818
9819 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
9820 off &= ~0xfUL;
9821
9822 return off;
9823 }
9824
9825 /* This is called whenever some data item (not an instruction) needs a
9826 fixup. We pick the right reloc code depending on the byteorder
9827 currently in effect. */
9828 void
9829 ia64_cons_fix_new (f, where, nbytes, exp)
9830 fragS *f;
9831 int where;
9832 int nbytes;
9833 expressionS *exp;
9834 {
9835 bfd_reloc_code_real_type code;
9836 fixS *fix;
9837
9838 switch (nbytes)
9839 {
9840 /* There are no reloc for 8 and 16 bit quantities, but we allow
9841 them here since they will work fine as long as the expression
9842 is fully defined at the end of the pass over the source file. */
9843 case 1: code = BFD_RELOC_8; break;
9844 case 2: code = BFD_RELOC_16; break;
9845 case 4:
9846 if (target_big_endian)
9847 code = BFD_RELOC_IA64_DIR32MSB;
9848 else
9849 code = BFD_RELOC_IA64_DIR32LSB;
9850 break;
9851
9852 case 8:
9853 if (target_big_endian)
9854 code = BFD_RELOC_IA64_DIR64MSB;
9855 else
9856 code = BFD_RELOC_IA64_DIR64LSB;
9857 break;
9858
9859 case 16:
9860 if (exp->X_op == O_pseudo_fixup
9861 && exp->X_op_symbol
9862 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
9863 {
9864 if (target_big_endian)
9865 code = BFD_RELOC_IA64_IPLTMSB;
9866 else
9867 code = BFD_RELOC_IA64_IPLTLSB;
9868
9869 exp->X_op = O_symbol;
9870 break;
9871 }
9872 /* FALLTHRU */
9873
9874 default:
9875 as_bad ("Unsupported fixup size %d", nbytes);
9876 ignore_rest_of_line ();
9877 return;
9878 }
9879 if (exp->X_op == O_pseudo_fixup)
9880 {
9881 /* ??? */
9882 exp->X_op = O_symbol;
9883 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
9884 }
9885
9886 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
9887 /* We need to store the byte order in effect in case we're going
9888 to fix an 8 or 16 bit relocation (for which there no real
9889 relocs available). See md_apply_fix3(). */
9890 fix->tc_fix_data.bigendian = target_big_endian;
9891 }
9892
9893 /* Return the actual relocation we wish to associate with the pseudo
9894 reloc described by SYM and R_TYPE. SYM should be one of the
9895 symbols in the pseudo_func array, or NULL. */
9896
9897 static bfd_reloc_code_real_type
9898 ia64_gen_real_reloc_type (sym, r_type)
9899 struct symbol *sym;
9900 bfd_reloc_code_real_type r_type;
9901 {
9902 bfd_reloc_code_real_type new = 0;
9903
9904 if (sym == NULL)
9905 {
9906 return r_type;
9907 }
9908
9909 switch (S_GET_VALUE (sym))
9910 {
9911 case FUNC_FPTR_RELATIVE:
9912 switch (r_type)
9913 {
9914 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_FPTR64I; break;
9915 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_FPTR32MSB; break;
9916 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_FPTR32LSB; break;
9917 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_FPTR64MSB; break;
9918 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_FPTR64LSB; break;
9919 default: break;
9920 }
9921 break;
9922
9923 case FUNC_GP_RELATIVE:
9924 switch (r_type)
9925 {
9926 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_GPREL22; break;
9927 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_GPREL64I; break;
9928 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_GPREL32MSB; break;
9929 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_GPREL32LSB; break;
9930 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_GPREL64MSB; break;
9931 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_GPREL64LSB; break;
9932 default: break;
9933 }
9934 break;
9935
9936 case FUNC_LT_RELATIVE:
9937 switch (r_type)
9938 {
9939 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22; break;
9940 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_LTOFF64I; break;
9941 default: break;
9942 }
9943 break;
9944
9945 case FUNC_PC_RELATIVE:
9946 switch (r_type)
9947 {
9948 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PCREL22; break;
9949 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PCREL64I; break;
9950 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_PCREL32MSB; break;
9951 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_PCREL32LSB; break;
9952 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PCREL64MSB; break;
9953 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PCREL64LSB; break;
9954 default: break;
9955 }
9956 break;
9957
9958 case FUNC_PLT_RELATIVE:
9959 switch (r_type)
9960 {
9961 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PLTOFF22; break;
9962 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PLTOFF64I; break;
9963 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PLTOFF64MSB;break;
9964 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PLTOFF64LSB;break;
9965 default: break;
9966 }
9967 break;
9968
9969 case FUNC_SEC_RELATIVE:
9970 switch (r_type)
9971 {
9972 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SECREL32MSB;break;
9973 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SECREL32LSB;break;
9974 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SECREL64MSB;break;
9975 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SECREL64LSB;break;
9976 default: break;
9977 }
9978 break;
9979
9980 case FUNC_SEG_RELATIVE:
9981 switch (r_type)
9982 {
9983 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SEGREL32MSB;break;
9984 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SEGREL32LSB;break;
9985 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SEGREL64MSB;break;
9986 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SEGREL64LSB;break;
9987 default: break;
9988 }
9989 break;
9990
9991 case FUNC_LTV_RELATIVE:
9992 switch (r_type)
9993 {
9994 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_LTV32MSB; break;
9995 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_LTV32LSB; break;
9996 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_LTV64MSB; break;
9997 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_LTV64LSB; break;
9998 default: break;
9999 }
10000 break;
10001
10002 case FUNC_LT_FPTR_RELATIVE:
10003 switch (r_type)
10004 {
10005 case BFD_RELOC_IA64_IMM22:
10006 new = BFD_RELOC_IA64_LTOFF_FPTR22; break;
10007 case BFD_RELOC_IA64_IMM64:
10008 new = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
10009 default:
10010 break;
10011 }
10012 break;
10013
10014 default:
10015 abort ();
10016 }
10017 /* Hmmmm. Should this ever occur? */
10018 if (new)
10019 return new;
10020 else
10021 return r_type;
10022 }
10023
10024 /* Here is where generate the appropriate reloc for pseudo relocation
10025 functions. */
10026 void
10027 ia64_validate_fix (fix)
10028 fixS *fix;
10029 {
10030 switch (fix->fx_r_type)
10031 {
10032 case BFD_RELOC_IA64_FPTR64I:
10033 case BFD_RELOC_IA64_FPTR32MSB:
10034 case BFD_RELOC_IA64_FPTR64LSB:
10035 case BFD_RELOC_IA64_LTOFF_FPTR22:
10036 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10037 if (fix->fx_offset != 0)
10038 as_bad_where (fix->fx_file, fix->fx_line,
10039 "No addend allowed in @fptr() relocation");
10040 break;
10041 default:
10042 break;
10043 }
10044
10045 return;
10046 }
10047
10048 static void
10049 fix_insn (fix, odesc, value)
10050 fixS *fix;
10051 const struct ia64_operand *odesc;
10052 valueT value;
10053 {
10054 bfd_vma insn[3], t0, t1, control_bits;
10055 const char *err;
10056 char *fixpos;
10057 long slot;
10058
10059 slot = fix->fx_where & 0x3;
10060 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
10061
10062 /* Bundles are always in little-endian byte order */
10063 t0 = bfd_getl64 (fixpos);
10064 t1 = bfd_getl64 (fixpos + 8);
10065 control_bits = t0 & 0x1f;
10066 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
10067 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
10068 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
10069
10070 err = NULL;
10071 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
10072 {
10073 insn[1] = (value >> 22) & 0x1ffffffffffLL;
10074 insn[2] |= (((value & 0x7f) << 13)
10075 | (((value >> 7) & 0x1ff) << 27)
10076 | (((value >> 16) & 0x1f) << 22)
10077 | (((value >> 21) & 0x1) << 21)
10078 | (((value >> 63) & 0x1) << 36));
10079 }
10080 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
10081 {
10082 if (value & ~0x3fffffffffffffffULL)
10083 err = "integer operand out of range";
10084 insn[1] = (value >> 21) & 0x1ffffffffffLL;
10085 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
10086 }
10087 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
10088 {
10089 value >>= 4;
10090 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
10091 insn[2] |= ((((value >> 59) & 0x1) << 36)
10092 | (((value >> 0) & 0xfffff) << 13));
10093 }
10094 else
10095 err = (*odesc->insert) (odesc, value, insn + slot);
10096
10097 if (err)
10098 as_bad_where (fix->fx_file, fix->fx_line, err);
10099
10100 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
10101 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
10102 number_to_chars_littleendian (fixpos + 0, t0, 8);
10103 number_to_chars_littleendian (fixpos + 8, t1, 8);
10104 }
10105
10106 /* Attempt to simplify or even eliminate a fixup. The return value is
10107 ignored; perhaps it was once meaningful, but now it is historical.
10108 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
10109
10110 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
10111 (if possible). */
10112
10113 void
10114 md_apply_fix3 (fix, valP, seg)
10115 fixS *fix;
10116 valueT * valP;
10117 segT seg ATTRIBUTE_UNUSED;
10118 {
10119 char *fixpos;
10120 valueT value = * valP;
10121 int adjust = 0;
10122
10123 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
10124
10125 if (fix->fx_pcrel)
10126 {
10127 switch (fix->fx_r_type)
10128 {
10129 case BFD_RELOC_IA64_DIR32MSB:
10130 fix->fx_r_type = BFD_RELOC_IA64_PCREL32MSB;
10131 adjust = 1;
10132 break;
10133
10134 case BFD_RELOC_IA64_DIR32LSB:
10135 fix->fx_r_type = BFD_RELOC_IA64_PCREL32LSB;
10136 adjust = 1;
10137 break;
10138
10139 case BFD_RELOC_IA64_DIR64MSB:
10140 fix->fx_r_type = BFD_RELOC_IA64_PCREL64MSB;
10141 adjust = 1;
10142 break;
10143
10144 case BFD_RELOC_IA64_DIR64LSB:
10145 fix->fx_r_type = BFD_RELOC_IA64_PCREL64LSB;
10146 adjust = 1;
10147 break;
10148
10149 default:
10150 break;
10151 }
10152 }
10153 if (fix->fx_addsy)
10154 {
10155 if (fix->fx_r_type == (int) BFD_RELOC_UNUSED)
10156 {
10157 /* This must be a TAG13 or TAG13b operand. There are no external
10158 relocs defined for them, so we must give an error. */
10159 as_bad_where (fix->fx_file, fix->fx_line,
10160 "%s must have a constant value",
10161 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
10162 fix->fx_done = 1;
10163 return;
10164 }
10165
10166 /* ??? This is a hack copied from tc-i386.c to make PCREL relocs
10167 work. There should be a better way to handle this. */
10168 if (adjust)
10169 fix->fx_offset += fix->fx_where + fix->fx_frag->fr_address;
10170 }
10171 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
10172 {
10173 if (fix->tc_fix_data.bigendian)
10174 number_to_chars_bigendian (fixpos, value, fix->fx_size);
10175 else
10176 number_to_chars_littleendian (fixpos, value, fix->fx_size);
10177 fix->fx_done = 1;
10178 }
10179 else
10180 {
10181 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
10182 fix->fx_done = 1;
10183 }
10184 }
10185
10186 /* Generate the BFD reloc to be stuck in the object file from the
10187 fixup used internally in the assembler. */
10188
10189 arelent *
10190 tc_gen_reloc (sec, fixp)
10191 asection *sec ATTRIBUTE_UNUSED;
10192 fixS *fixp;
10193 {
10194 arelent *reloc;
10195
10196 reloc = xmalloc (sizeof (*reloc));
10197 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
10198 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10199 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
10200 reloc->addend = fixp->fx_offset;
10201 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
10202
10203 if (!reloc->howto)
10204 {
10205 as_bad_where (fixp->fx_file, fixp->fx_line,
10206 "Cannot represent %s relocation in object file",
10207 bfd_get_reloc_code_name (fixp->fx_r_type));
10208 }
10209 return reloc;
10210 }
10211
10212 /* Turn a string in input_line_pointer into a floating point constant
10213 of type TYPE, and store the appropriate bytes in *LIT. The number
10214 of LITTLENUMS emitted is stored in *SIZE. An error message is
10215 returned, or NULL on OK. */
10216
10217 #define MAX_LITTLENUMS 5
10218
10219 char *
10220 md_atof (type, lit, size)
10221 int type;
10222 char *lit;
10223 int *size;
10224 {
10225 LITTLENUM_TYPE words[MAX_LITTLENUMS];
10226 LITTLENUM_TYPE *word;
10227 char *t;
10228 int prec;
10229
10230 switch (type)
10231 {
10232 /* IEEE floats */
10233 case 'f':
10234 case 'F':
10235 case 's':
10236 case 'S':
10237 prec = 2;
10238 break;
10239
10240 case 'd':
10241 case 'D':
10242 case 'r':
10243 case 'R':
10244 prec = 4;
10245 break;
10246
10247 case 'x':
10248 case 'X':
10249 case 'p':
10250 case 'P':
10251 prec = 5;
10252 break;
10253
10254 default:
10255 *size = 0;
10256 return "Bad call to MD_ATOF()";
10257 }
10258 t = atof_ieee (input_line_pointer, type, words);
10259 if (t)
10260 input_line_pointer = t;
10261 *size = prec * sizeof (LITTLENUM_TYPE);
10262
10263 for (word = words + prec - 1; prec--;)
10264 {
10265 md_number_to_chars (lit, (long) (*word--), sizeof (LITTLENUM_TYPE));
10266 lit += sizeof (LITTLENUM_TYPE);
10267 }
10268 return 0;
10269 }
10270
10271 /* Round up a section's size to the appropriate boundary. */
10272 valueT
10273 md_section_align (seg, size)
10274 segT seg;
10275 valueT size;
10276 {
10277 int align = bfd_get_section_alignment (stdoutput, seg);
10278 valueT mask = ((valueT) 1 << align) - 1;
10279
10280 return (size + mask) & ~mask;
10281 }
10282
10283 /* Handle ia64 specific semantics of the align directive. */
10284
10285 void
10286 ia64_md_do_align (n, fill, len, max)
10287 int n ATTRIBUTE_UNUSED;
10288 const char *fill ATTRIBUTE_UNUSED;
10289 int len ATTRIBUTE_UNUSED;
10290 int max ATTRIBUTE_UNUSED;
10291 {
10292 if (subseg_text_p (now_seg))
10293 ia64_flush_insns ();
10294 }
10295
10296 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
10297 of an rs_align_code fragment. */
10298
10299 void
10300 ia64_handle_align (fragp)
10301 fragS *fragp;
10302 {
10303 /* Use mfi bundle of nops with no stop bits. */
10304 static const unsigned char be_nop[]
10305 = { 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
10306 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c};
10307 static const unsigned char le_nop[]
10308 = { 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
10309 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00};
10310
10311 int bytes;
10312 char *p;
10313
10314 if (fragp->fr_type != rs_align_code)
10315 return;
10316
10317 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
10318 p = fragp->fr_literal + fragp->fr_fix;
10319
10320 /* Make sure we are on a 16-byte boundary, in case someone has been
10321 putting data into a text section. */
10322 if (bytes & 15)
10323 {
10324 int fix = bytes & 15;
10325 memset (p, 0, fix);
10326 p += fix;
10327 bytes -= fix;
10328 fragp->fr_fix += fix;
10329 }
10330
10331 memcpy (p, (target_big_endian ? be_nop : le_nop), 16);
10332 fragp->fr_var = 16;
10333 }