Fix 2 ia64-linux gas testsuite failures.
[binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .alias
28 .eb
29 .estate
30 .lb
31 .popsection
32 .previous
33 .psr
34 .pushsection
35 - labels are wrong if automatic alignment is introduced
36 (e.g., checkout the second real10 definition in test-data.s)
37 - DV-related stuff:
38 <reg>.safe_across_calls and any other DV-related directives I don't
39 have documentation for.
40 verify mod-sched-brs reads/writes are checked/marked (and other
41 notes)
42
43 */
44
45 #include "as.h"
46 #include "dwarf2dbg.h"
47 #include "subsegs.h"
48
49 #include "opcode/ia64.h"
50
51 #include "elf/ia64.h"
52
53 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
54 #define MIN(a,b) ((a) < (b) ? (a) : (b))
55
56 #define NUM_SLOTS 4
57 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
58 #define CURR_SLOT md.slot[md.curr_slot]
59
60 #define O_pseudo_fixup (O_max + 1)
61
62 enum special_section
63 {
64 SPECIAL_SECTION_BSS = 0,
65 SPECIAL_SECTION_SBSS,
66 SPECIAL_SECTION_SDATA,
67 SPECIAL_SECTION_RODATA,
68 SPECIAL_SECTION_COMMENT,
69 SPECIAL_SECTION_UNWIND,
70 SPECIAL_SECTION_UNWIND_INFO
71 };
72
73 enum reloc_func
74 {
75 FUNC_FPTR_RELATIVE,
76 FUNC_GP_RELATIVE,
77 FUNC_LT_RELATIVE,
78 FUNC_PC_RELATIVE,
79 FUNC_PLT_RELATIVE,
80 FUNC_SEC_RELATIVE,
81 FUNC_SEG_RELATIVE,
82 FUNC_LTV_RELATIVE,
83 FUNC_LT_FPTR_RELATIVE,
84 };
85
86 enum reg_symbol
87 {
88 REG_GR = 0,
89 REG_FR = (REG_GR + 128),
90 REG_AR = (REG_FR + 128),
91 REG_CR = (REG_AR + 128),
92 REG_P = (REG_CR + 128),
93 REG_BR = (REG_P + 64),
94 REG_IP = (REG_BR + 8),
95 REG_CFM,
96 REG_PR,
97 REG_PR_ROT,
98 REG_PSR,
99 REG_PSR_L,
100 REG_PSR_UM,
101 /* The following are pseudo-registers for use by gas only. */
102 IND_CPUID,
103 IND_DBR,
104 IND_DTR,
105 IND_ITR,
106 IND_IBR,
107 IND_MEM,
108 IND_MSR,
109 IND_PKR,
110 IND_PMC,
111 IND_PMD,
112 IND_RR,
113 /* The following pseudo-registers are used for unwind directives only: */
114 REG_PSP,
115 REG_PRIUNAT,
116 REG_NUM
117 };
118
119 enum dynreg_type
120 {
121 DYNREG_GR = 0, /* dynamic general purpose register */
122 DYNREG_FR, /* dynamic floating point register */
123 DYNREG_PR, /* dynamic predicate register */
124 DYNREG_NUM_TYPES
125 };
126
127 enum operand_match_result
128 {
129 OPERAND_MATCH,
130 OPERAND_OUT_OF_RANGE,
131 OPERAND_MISMATCH
132 };
133
134 /* On the ia64, we can't know the address of a text label until the
135 instructions are packed into a bundle. To handle this, we keep
136 track of the list of labels that appear in front of each
137 instruction. */
138 struct label_fix
139 {
140 struct label_fix *next;
141 struct symbol *sym;
142 };
143
144 extern int target_big_endian;
145
146 /* Characters which always start a comment. */
147 const char comment_chars[] = "";
148
149 /* Characters which start a comment at the beginning of a line. */
150 const char line_comment_chars[] = "#";
151
152 /* Characters which may be used to separate multiple commands on a
153 single line. */
154 const char line_separator_chars[] = ";";
155
156 /* Characters which are used to indicate an exponent in a floating
157 point number. */
158 const char EXP_CHARS[] = "eE";
159
160 /* Characters which mean that a number is a floating point constant,
161 as in 0d1.0. */
162 const char FLT_CHARS[] = "rRsSfFdDxXpP";
163
164 /* ia64-specific option processing: */
165
166 const char *md_shortopts = "m:N:x::";
167
168 struct option md_longopts[] =
169 {
170 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
171 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
172 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
173 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
174 };
175
176 size_t md_longopts_size = sizeof (md_longopts);
177
178 static struct
179 {
180 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
181 struct hash_control *reg_hash; /* register name hash table */
182 struct hash_control *dynreg_hash; /* dynamic register hash table */
183 struct hash_control *const_hash; /* constant hash table */
184 struct hash_control *entry_hash; /* code entry hint hash table */
185
186 symbolS *regsym[REG_NUM];
187
188 /* If X_op is != O_absent, the registername for the instruction's
189 qualifying predicate. If NULL, p0 is assumed for instructions
190 that are predicatable. */
191 expressionS qp;
192
193 unsigned int
194 manual_bundling : 1,
195 debug_dv: 1,
196 detect_dv: 1,
197 explicit_mode : 1, /* which mode we're in */
198 default_explicit_mode : 1, /* which mode is the default */
199 mode_explicitly_set : 1, /* was the current mode explicitly set? */
200 auto_align : 1,
201 keep_pending_output : 1;
202
203 /* Each bundle consists of up to three instructions. We keep
204 track of four most recent instructions so we can correctly set
205 the end_of_insn_group for the last instruction in a bundle. */
206 int curr_slot;
207 int num_slots_in_use;
208 struct slot
209 {
210 unsigned int
211 end_of_insn_group : 1,
212 manual_bundling_on : 1,
213 manual_bundling_off : 1;
214 signed char user_template; /* user-selected template, if any */
215 unsigned char qp_regno; /* qualifying predicate */
216 /* This duplicates a good fraction of "struct fix" but we
217 can't use a "struct fix" instead since we can't call
218 fix_new_exp() until we know the address of the instruction. */
219 int num_fixups;
220 struct insn_fix
221 {
222 bfd_reloc_code_real_type code;
223 enum ia64_opnd opnd; /* type of operand in need of fix */
224 unsigned int is_pcrel : 1; /* is operand pc-relative? */
225 expressionS expr; /* the value to be inserted */
226 }
227 fixup[2]; /* at most two fixups per insn */
228 struct ia64_opcode *idesc;
229 struct label_fix *label_fixups;
230 struct label_fix *tag_fixups;
231 struct unw_rec_list *unwind_record; /* Unwind directive. */
232 expressionS opnd[6];
233 char *src_file;
234 unsigned int src_line;
235 struct dwarf2_line_info debug_line;
236 }
237 slot[NUM_SLOTS];
238
239 segT last_text_seg;
240
241 struct dynreg
242 {
243 struct dynreg *next; /* next dynamic register */
244 const char *name;
245 unsigned short base; /* the base register number */
246 unsigned short num_regs; /* # of registers in this set */
247 }
248 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
249
250 flagword flags; /* ELF-header flags */
251
252 struct mem_offset {
253 unsigned hint:1; /* is this hint currently valid? */
254 bfd_vma offset; /* mem.offset offset */
255 bfd_vma base; /* mem.offset base */
256 } mem_offset;
257
258 int path; /* number of alt. entry points seen */
259 const char **entry_labels; /* labels of all alternate paths in
260 the current DV-checking block. */
261 int maxpaths; /* size currently allocated for
262 entry_labels */
263 /* Support for hardware errata workarounds. */
264
265 /* Record data about the last three insn groups. */
266 struct group
267 {
268 /* B-step workaround.
269 For each predicate register, this is set if the corresponding insn
270 group conditionally sets this register with one of the affected
271 instructions. */
272 int p_reg_set[64];
273 /* B-step workaround.
274 For each general register, this is set if the corresponding insn
275 a) is conditional one one of the predicate registers for which
276 P_REG_SET is 1 in the corresponding entry of the previous group,
277 b) sets this general register with one of the affected
278 instructions. */
279 int g_reg_set_conditionally[128];
280 } last_groups[3];
281 int group_idx;
282 }
283 md;
284
285 /* application registers: */
286
287 #define AR_K0 0
288 #define AR_K7 7
289 #define AR_RSC 16
290 #define AR_BSP 17
291 #define AR_BSPSTORE 18
292 #define AR_RNAT 19
293 #define AR_UNAT 36
294 #define AR_FPSR 40
295 #define AR_ITC 44
296 #define AR_PFS 64
297 #define AR_LC 65
298
299 static const struct
300 {
301 const char *name;
302 int regnum;
303 }
304 ar[] =
305 {
306 {"ar.k0", 0}, {"ar.k1", 1}, {"ar.k2", 2}, {"ar.k3", 3},
307 {"ar.k4", 4}, {"ar.k5", 5}, {"ar.k6", 6}, {"ar.k7", 7},
308 {"ar.rsc", 16}, {"ar.bsp", 17},
309 {"ar.bspstore", 18}, {"ar.rnat", 19},
310 {"ar.fcr", 21}, {"ar.eflag", 24},
311 {"ar.csd", 25}, {"ar.ssd", 26},
312 {"ar.cflg", 27}, {"ar.fsr", 28},
313 {"ar.fir", 29}, {"ar.fdr", 30},
314 {"ar.ccv", 32}, {"ar.unat", 36},
315 {"ar.fpsr", 40}, {"ar.itc", 44},
316 {"ar.pfs", 64}, {"ar.lc", 65},
317 {"ar.ec", 66},
318 };
319
320 #define CR_IPSR 16
321 #define CR_ISR 17
322 #define CR_IIP 19
323 #define CR_IFA 20
324 #define CR_ITIR 21
325 #define CR_IIPA 22
326 #define CR_IFS 23
327 #define CR_IIM 24
328 #define CR_IHA 25
329 #define CR_IVR 65
330 #define CR_TPR 66
331 #define CR_EOI 67
332 #define CR_IRR0 68
333 #define CR_IRR3 71
334 #define CR_LRR0 80
335 #define CR_LRR1 81
336
337 /* control registers: */
338 static const struct
339 {
340 const char *name;
341 int regnum;
342 }
343 cr[] =
344 {
345 {"cr.dcr", 0},
346 {"cr.itm", 1},
347 {"cr.iva", 2},
348 {"cr.pta", 8},
349 {"cr.gpta", 9},
350 {"cr.ipsr", 16},
351 {"cr.isr", 17},
352 {"cr.iip", 19},
353 {"cr.ifa", 20},
354 {"cr.itir", 21},
355 {"cr.iipa", 22},
356 {"cr.ifs", 23},
357 {"cr.iim", 24},
358 {"cr.iha", 25},
359 {"cr.lid", 64},
360 {"cr.ivr", 65},
361 {"cr.tpr", 66},
362 {"cr.eoi", 67},
363 {"cr.irr0", 68},
364 {"cr.irr1", 69},
365 {"cr.irr2", 70},
366 {"cr.irr3", 71},
367 {"cr.itv", 72},
368 {"cr.pmv", 73},
369 {"cr.cmcv", 74},
370 {"cr.lrr0", 80},
371 {"cr.lrr1", 81}
372 };
373
374 #define PSR_MFL 4
375 #define PSR_IC 13
376 #define PSR_DFL 18
377 #define PSR_CPL 32
378
379 static const struct const_desc
380 {
381 const char *name;
382 valueT value;
383 }
384 const_bits[] =
385 {
386 /* PSR constant masks: */
387
388 /* 0: reserved */
389 {"psr.be", ((valueT) 1) << 1},
390 {"psr.up", ((valueT) 1) << 2},
391 {"psr.ac", ((valueT) 1) << 3},
392 {"psr.mfl", ((valueT) 1) << 4},
393 {"psr.mfh", ((valueT) 1) << 5},
394 /* 6-12: reserved */
395 {"psr.ic", ((valueT) 1) << 13},
396 {"psr.i", ((valueT) 1) << 14},
397 {"psr.pk", ((valueT) 1) << 15},
398 /* 16: reserved */
399 {"psr.dt", ((valueT) 1) << 17},
400 {"psr.dfl", ((valueT) 1) << 18},
401 {"psr.dfh", ((valueT) 1) << 19},
402 {"psr.sp", ((valueT) 1) << 20},
403 {"psr.pp", ((valueT) 1) << 21},
404 {"psr.di", ((valueT) 1) << 22},
405 {"psr.si", ((valueT) 1) << 23},
406 {"psr.db", ((valueT) 1) << 24},
407 {"psr.lp", ((valueT) 1) << 25},
408 {"psr.tb", ((valueT) 1) << 26},
409 {"psr.rt", ((valueT) 1) << 27},
410 /* 28-31: reserved */
411 /* 32-33: cpl (current privilege level) */
412 {"psr.is", ((valueT) 1) << 34},
413 {"psr.mc", ((valueT) 1) << 35},
414 {"psr.it", ((valueT) 1) << 36},
415 {"psr.id", ((valueT) 1) << 37},
416 {"psr.da", ((valueT) 1) << 38},
417 {"psr.dd", ((valueT) 1) << 39},
418 {"psr.ss", ((valueT) 1) << 40},
419 /* 41-42: ri (restart instruction) */
420 {"psr.ed", ((valueT) 1) << 43},
421 {"psr.bn", ((valueT) 1) << 44},
422 };
423
424 /* indirect register-sets/memory: */
425
426 static const struct
427 {
428 const char *name;
429 int regnum;
430 }
431 indirect_reg[] =
432 {
433 { "CPUID", IND_CPUID },
434 { "cpuid", IND_CPUID },
435 { "dbr", IND_DBR },
436 { "dtr", IND_DTR },
437 { "itr", IND_ITR },
438 { "ibr", IND_IBR },
439 { "msr", IND_MSR },
440 { "pkr", IND_PKR },
441 { "pmc", IND_PMC },
442 { "pmd", IND_PMD },
443 { "rr", IND_RR },
444 };
445
446 /* Pseudo functions used to indicate relocation types (these functions
447 start with an at sign (@). */
448 static struct
449 {
450 const char *name;
451 enum pseudo_type
452 {
453 PSEUDO_FUNC_NONE,
454 PSEUDO_FUNC_RELOC,
455 PSEUDO_FUNC_CONST,
456 PSEUDO_FUNC_REG,
457 PSEUDO_FUNC_FLOAT
458 }
459 type;
460 union
461 {
462 unsigned long ival;
463 symbolS *sym;
464 }
465 u;
466 }
467 pseudo_func[] =
468 {
469 /* reloc pseudo functions (these must come first!): */
470 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
471 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
472 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
473 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
474 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
475 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
476 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
477 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
478 { "", 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
479
480 /* mbtype4 constants: */
481 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
482 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
483 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
484 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
485 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
486
487 /* fclass constants: */
488 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
489 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
490 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
491 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
492 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
493 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
494 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
495 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
496 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
497
498 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
499
500 /* unwind-related constants: */
501 { "svr4", PSEUDO_FUNC_CONST, { 0 } },
502 { "hpux", PSEUDO_FUNC_CONST, { 1 } },
503 { "nt", PSEUDO_FUNC_CONST, { 2 } },
504
505 /* unwind-related registers: */
506 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
507 };
508
509 /* 41-bit nop opcodes (one per unit): */
510 static const bfd_vma nop[IA64_NUM_UNITS] =
511 {
512 0x0000000000LL, /* NIL => break 0 */
513 0x0008000000LL, /* I-unit nop */
514 0x0008000000LL, /* M-unit nop */
515 0x4000000000LL, /* B-unit nop */
516 0x0008000000LL, /* F-unit nop */
517 0x0008000000LL, /* L-"unit" nop */
518 0x0008000000LL, /* X-unit nop */
519 };
520
521 /* Can't be `const' as it's passed to input routines (which have the
522 habit of setting temporary sentinels. */
523 static char special_section_name[][20] =
524 {
525 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
526 {".IA_64.unwind"}, {".IA_64.unwind_info"}
527 };
528
529 static char *special_linkonce_name[] =
530 {
531 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
532 };
533
534 /* The best template for a particular sequence of up to three
535 instructions: */
536 #define N IA64_NUM_TYPES
537 static unsigned char best_template[N][N][N];
538 #undef N
539
540 /* Resource dependencies currently in effect */
541 static struct rsrc {
542 int depind; /* dependency index */
543 const struct ia64_dependency *dependency; /* actual dependency */
544 unsigned specific:1, /* is this a specific bit/regno? */
545 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
546 int index; /* specific regno/bit within dependency */
547 int note; /* optional qualifying note (0 if none) */
548 #define STATE_NONE 0
549 #define STATE_STOP 1
550 #define STATE_SRLZ 2
551 int insn_srlz; /* current insn serialization state */
552 int data_srlz; /* current data serialization state */
553 int qp_regno; /* qualifying predicate for this usage */
554 char *file; /* what file marked this dependency */
555 unsigned int line; /* what line marked this dependency */
556 struct mem_offset mem_offset; /* optional memory offset hint */
557 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
558 int path; /* corresponding code entry index */
559 } *regdeps = NULL;
560 static int regdepslen = 0;
561 static int regdepstotlen = 0;
562 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
563 static const char *dv_sem[] = { "none", "implied", "impliedf",
564 "data", "instr", "specific", "stop", "other" };
565 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
566
567 /* Current state of PR mutexation */
568 static struct qpmutex {
569 valueT prmask;
570 int path;
571 } *qp_mutexes = NULL; /* QP mutex bitmasks */
572 static int qp_mutexeslen = 0;
573 static int qp_mutexestotlen = 0;
574 static valueT qp_safe_across_calls = 0;
575
576 /* Current state of PR implications */
577 static struct qp_imply {
578 unsigned p1:6;
579 unsigned p2:6;
580 unsigned p2_branched:1;
581 int path;
582 } *qp_implies = NULL;
583 static int qp_implieslen = 0;
584 static int qp_impliestotlen = 0;
585
586 /* Keep track of static GR values so that indirect register usage can
587 sometimes be tracked. */
588 static struct gr {
589 unsigned known:1;
590 int path;
591 valueT value;
592 } gr_values[128] = {{ 1, 0, 0 }};
593
594 /* These are the routines required to output the various types of
595 unwind records. */
596
597 /* A slot_number is a frag address plus the slot index (0-2). We use the
598 frag address here so that if there is a section switch in the middle of
599 a function, then instructions emitted to a different section are not
600 counted. Since there may be more than one frag for a function, this
601 means we also need to keep track of which frag this address belongs to
602 so we can compute inter-frag distances. This also nicely solves the
603 problem with nops emitted for align directives, which can't easily be
604 counted, but can easily be derived from frag sizes. */
605
606 typedef struct unw_rec_list {
607 unwind_record r;
608 unsigned long slot_number;
609 fragS *slot_frag;
610 struct unw_rec_list *next;
611 } unw_rec_list;
612
613 #define SLOT_NUM_NOT_SET (unsigned)-1
614
615 static struct
616 {
617 unsigned long next_slot_number;
618 fragS *next_slot_frag;
619
620 /* Maintain a list of unwind entries for the current function. */
621 unw_rec_list *list;
622 unw_rec_list *tail;
623
624 /* Any unwind entires that should be attached to the current slot
625 that an insn is being constructed for. */
626 unw_rec_list *current_entry;
627
628 /* These are used to create the unwind table entry for this function. */
629 symbolS *proc_start;
630 symbolS *proc_end;
631 symbolS *info; /* pointer to unwind info */
632 symbolS *personality_routine;
633 segT saved_text_seg;
634 subsegT saved_text_subseg;
635 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
636
637 /* TRUE if processing unwind directives in a prologue region. */
638 int prologue;
639 int prologue_mask;
640 unsigned int prologue_count; /* number of .prologues seen so far */
641 } unwind;
642
643 typedef void (*vbyte_func) PARAMS ((int, char *, char *));
644
645 /* Forward delarations: */
646 static int ar_is_in_integer_unit PARAMS ((int regnum));
647 static void set_section PARAMS ((char *name));
648 static unsigned int set_regstack PARAMS ((unsigned int, unsigned int,
649 unsigned int, unsigned int));
650 static void dot_radix PARAMS ((int));
651 static void dot_special_section PARAMS ((int));
652 static void dot_proc PARAMS ((int));
653 static void dot_fframe PARAMS ((int));
654 static void dot_vframe PARAMS ((int));
655 static void dot_vframesp PARAMS ((int));
656 static void dot_vframepsp PARAMS ((int));
657 static void dot_save PARAMS ((int));
658 static void dot_restore PARAMS ((int));
659 static void dot_restorereg PARAMS ((int));
660 static void dot_restorereg_p PARAMS ((int));
661 static void dot_handlerdata PARAMS ((int));
662 static void dot_unwentry PARAMS ((int));
663 static void dot_altrp PARAMS ((int));
664 static void dot_savemem PARAMS ((int));
665 static void dot_saveg PARAMS ((int));
666 static void dot_savef PARAMS ((int));
667 static void dot_saveb PARAMS ((int));
668 static void dot_savegf PARAMS ((int));
669 static void dot_spill PARAMS ((int));
670 static void dot_spillreg PARAMS ((int));
671 static void dot_spillmem PARAMS ((int));
672 static void dot_spillreg_p PARAMS ((int));
673 static void dot_spillmem_p PARAMS ((int));
674 static void dot_label_state PARAMS ((int));
675 static void dot_copy_state PARAMS ((int));
676 static void dot_unwabi PARAMS ((int));
677 static void dot_personality PARAMS ((int));
678 static void dot_body PARAMS ((int));
679 static void dot_prologue PARAMS ((int));
680 static void dot_endp PARAMS ((int));
681 static void dot_template PARAMS ((int));
682 static void dot_regstk PARAMS ((int));
683 static void dot_rot PARAMS ((int));
684 static void dot_byteorder PARAMS ((int));
685 static void dot_psr PARAMS ((int));
686 static void dot_alias PARAMS ((int));
687 static void dot_ln PARAMS ((int));
688 static char *parse_section_name PARAMS ((void));
689 static void dot_xdata PARAMS ((int));
690 static void stmt_float_cons PARAMS ((int));
691 static void stmt_cons_ua PARAMS ((int));
692 static void dot_xfloat_cons PARAMS ((int));
693 static void dot_xstringer PARAMS ((int));
694 static void dot_xdata_ua PARAMS ((int));
695 static void dot_xfloat_cons_ua PARAMS ((int));
696 static void print_prmask PARAMS ((valueT mask));
697 static void dot_pred_rel PARAMS ((int));
698 static void dot_reg_val PARAMS ((int));
699 static void dot_dv_mode PARAMS ((int));
700 static void dot_entry PARAMS ((int));
701 static void dot_mem_offset PARAMS ((int));
702 static void add_unwind_entry PARAMS((unw_rec_list *ptr));
703 static symbolS *declare_register PARAMS ((const char *name, int regnum));
704 static void declare_register_set PARAMS ((const char *, int, int));
705 static unsigned int operand_width PARAMS ((enum ia64_opnd));
706 static enum operand_match_result operand_match PARAMS ((const struct ia64_opcode *idesc,
707 int index,
708 expressionS *e));
709 static int parse_operand PARAMS ((expressionS *e));
710 static struct ia64_opcode * parse_operands PARAMS ((struct ia64_opcode *));
711 static void build_insn PARAMS ((struct slot *, bfd_vma *));
712 static void emit_one_bundle PARAMS ((void));
713 static void fix_insn PARAMS ((fixS *, const struct ia64_operand *, valueT));
714 static bfd_reloc_code_real_type ia64_gen_real_reloc_type PARAMS ((struct symbol *sym,
715 bfd_reloc_code_real_type r_type));
716 static void insn_group_break PARAMS ((int, int, int));
717 static void mark_resource PARAMS ((struct ia64_opcode *, const struct ia64_dependency *,
718 struct rsrc *, int depind, int path));
719 static void add_qp_mutex PARAMS((valueT mask));
720 static void add_qp_imply PARAMS((int p1, int p2));
721 static void clear_qp_branch_flag PARAMS((valueT mask));
722 static void clear_qp_mutex PARAMS((valueT mask));
723 static void clear_qp_implies PARAMS((valueT p1_mask, valueT p2_mask));
724 static void clear_register_values PARAMS ((void));
725 static void print_dependency PARAMS ((const char *action, int depind));
726 static void instruction_serialization PARAMS ((void));
727 static void data_serialization PARAMS ((void));
728 static void remove_marked_resource PARAMS ((struct rsrc *));
729 static int is_conditional_branch PARAMS ((struct ia64_opcode *));
730 static int is_taken_branch PARAMS ((struct ia64_opcode *));
731 static int is_interruption_or_rfi PARAMS ((struct ia64_opcode *));
732 static int depends_on PARAMS ((int, struct ia64_opcode *));
733 static int specify_resource PARAMS ((const struct ia64_dependency *,
734 struct ia64_opcode *, int, struct rsrc [], int, int));
735 static int check_dv PARAMS((struct ia64_opcode *idesc));
736 static void check_dependencies PARAMS((struct ia64_opcode *));
737 static void mark_resources PARAMS((struct ia64_opcode *));
738 static void update_dependencies PARAMS((struct ia64_opcode *));
739 static void note_register_values PARAMS((struct ia64_opcode *));
740 static int qp_mutex PARAMS ((int, int, int));
741 static int resources_match PARAMS ((struct rsrc *, struct ia64_opcode *, int, int, int));
742 static void output_vbyte_mem PARAMS ((int, char *, char *));
743 static void count_output PARAMS ((int, char *, char *));
744 static void output_R1_format PARAMS ((vbyte_func, unw_record_type, int));
745 static void output_R2_format PARAMS ((vbyte_func, int, int, unsigned long));
746 static void output_R3_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
747 static void output_P1_format PARAMS ((vbyte_func, int));
748 static void output_P2_format PARAMS ((vbyte_func, int, int));
749 static void output_P3_format PARAMS ((vbyte_func, unw_record_type, int));
750 static void output_P4_format PARAMS ((vbyte_func, unsigned char *, unsigned long));
751 static void output_P5_format PARAMS ((vbyte_func, int, unsigned long));
752 static void output_P6_format PARAMS ((vbyte_func, unw_record_type, int));
753 static void output_P7_format PARAMS ((vbyte_func, unw_record_type, unsigned long, unsigned long));
754 static void output_P8_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
755 static void output_P9_format PARAMS ((vbyte_func, int, int));
756 static void output_P10_format PARAMS ((vbyte_func, int, int));
757 static void output_B1_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
758 static void output_B2_format PARAMS ((vbyte_func, unsigned long, unsigned long));
759 static void output_B3_format PARAMS ((vbyte_func, unsigned long, unsigned long));
760 static void output_B4_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
761 static char format_ab_reg PARAMS ((int, int));
762 static void output_X1_format PARAMS ((vbyte_func, unw_record_type, int, int, unsigned long,
763 unsigned long));
764 static void output_X2_format PARAMS ((vbyte_func, int, int, int, int, int, unsigned long));
765 static void output_X3_format PARAMS ((vbyte_func, unw_record_type, int, int, int, unsigned long,
766 unsigned long));
767 static void output_X4_format PARAMS ((vbyte_func, int, int, int, int, int, int, unsigned long));
768 static void free_list_records PARAMS ((unw_rec_list *));
769 static unw_rec_list *output_prologue PARAMS ((void));
770 static unw_rec_list *output_prologue_gr PARAMS ((unsigned int, unsigned int));
771 static unw_rec_list *output_body PARAMS ((void));
772 static unw_rec_list *output_mem_stack_f PARAMS ((unsigned int));
773 static unw_rec_list *output_mem_stack_v PARAMS ((void));
774 static unw_rec_list *output_psp_gr PARAMS ((unsigned int));
775 static unw_rec_list *output_psp_sprel PARAMS ((unsigned int));
776 static unw_rec_list *output_rp_when PARAMS ((void));
777 static unw_rec_list *output_rp_gr PARAMS ((unsigned int));
778 static unw_rec_list *output_rp_br PARAMS ((unsigned int));
779 static unw_rec_list *output_rp_psprel PARAMS ((unsigned int));
780 static unw_rec_list *output_rp_sprel PARAMS ((unsigned int));
781 static unw_rec_list *output_pfs_when PARAMS ((void));
782 static unw_rec_list *output_pfs_gr PARAMS ((unsigned int));
783 static unw_rec_list *output_pfs_psprel PARAMS ((unsigned int));
784 static unw_rec_list *output_pfs_sprel PARAMS ((unsigned int));
785 static unw_rec_list *output_preds_when PARAMS ((void));
786 static unw_rec_list *output_preds_gr PARAMS ((unsigned int));
787 static unw_rec_list *output_preds_psprel PARAMS ((unsigned int));
788 static unw_rec_list *output_preds_sprel PARAMS ((unsigned int));
789 static unw_rec_list *output_fr_mem PARAMS ((unsigned int));
790 static unw_rec_list *output_frgr_mem PARAMS ((unsigned int, unsigned int));
791 static unw_rec_list *output_gr_gr PARAMS ((unsigned int, unsigned int));
792 static unw_rec_list *output_gr_mem PARAMS ((unsigned int));
793 static unw_rec_list *output_br_mem PARAMS ((unsigned int));
794 static unw_rec_list *output_br_gr PARAMS ((unsigned int, unsigned int));
795 static unw_rec_list *output_spill_base PARAMS ((unsigned int));
796 static unw_rec_list *output_unat_when PARAMS ((void));
797 static unw_rec_list *output_unat_gr PARAMS ((unsigned int));
798 static unw_rec_list *output_unat_psprel PARAMS ((unsigned int));
799 static unw_rec_list *output_unat_sprel PARAMS ((unsigned int));
800 static unw_rec_list *output_lc_when PARAMS ((void));
801 static unw_rec_list *output_lc_gr PARAMS ((unsigned int));
802 static unw_rec_list *output_lc_psprel PARAMS ((unsigned int));
803 static unw_rec_list *output_lc_sprel PARAMS ((unsigned int));
804 static unw_rec_list *output_fpsr_when PARAMS ((void));
805 static unw_rec_list *output_fpsr_gr PARAMS ((unsigned int));
806 static unw_rec_list *output_fpsr_psprel PARAMS ((unsigned int));
807 static unw_rec_list *output_fpsr_sprel PARAMS ((unsigned int));
808 static unw_rec_list *output_priunat_when_gr PARAMS ((void));
809 static unw_rec_list *output_priunat_when_mem PARAMS ((void));
810 static unw_rec_list *output_priunat_gr PARAMS ((unsigned int));
811 static unw_rec_list *output_priunat_psprel PARAMS ((unsigned int));
812 static unw_rec_list *output_priunat_sprel PARAMS ((unsigned int));
813 static unw_rec_list *output_bsp_when PARAMS ((void));
814 static unw_rec_list *output_bsp_gr PARAMS ((unsigned int));
815 static unw_rec_list *output_bsp_psprel PARAMS ((unsigned int));
816 static unw_rec_list *output_bsp_sprel PARAMS ((unsigned int));
817 static unw_rec_list *output_bspstore_when PARAMS ((void));
818 static unw_rec_list *output_bspstore_gr PARAMS ((unsigned int));
819 static unw_rec_list *output_bspstore_psprel PARAMS ((unsigned int));
820 static unw_rec_list *output_bspstore_sprel PARAMS ((unsigned int));
821 static unw_rec_list *output_rnat_when PARAMS ((void));
822 static unw_rec_list *output_rnat_gr PARAMS ((unsigned int));
823 static unw_rec_list *output_rnat_psprel PARAMS ((unsigned int));
824 static unw_rec_list *output_rnat_sprel PARAMS ((unsigned int));
825 static unw_rec_list *output_unwabi PARAMS ((unsigned long, unsigned long));
826 static unw_rec_list *output_epilogue PARAMS ((unsigned long));
827 static unw_rec_list *output_label_state PARAMS ((unsigned long));
828 static unw_rec_list *output_copy_state PARAMS ((unsigned long));
829 static unw_rec_list *output_spill_psprel PARAMS ((unsigned int, unsigned int, unsigned int));
830 static unw_rec_list *output_spill_sprel PARAMS ((unsigned int, unsigned int, unsigned int));
831 static unw_rec_list *output_spill_psprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
832 unsigned int));
833 static unw_rec_list *output_spill_sprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
834 unsigned int));
835 static unw_rec_list *output_spill_reg PARAMS ((unsigned int, unsigned int, unsigned int,
836 unsigned int));
837 static unw_rec_list *output_spill_reg_p PARAMS ((unsigned int, unsigned int, unsigned int,
838 unsigned int, unsigned int));
839 static void process_one_record PARAMS ((unw_rec_list *, vbyte_func));
840 static void process_unw_records PARAMS ((unw_rec_list *, vbyte_func));
841 static int calc_record_size PARAMS ((unw_rec_list *));
842 static void set_imask PARAMS ((unw_rec_list *, unsigned long, unsigned long, unsigned int));
843 static int count_bits PARAMS ((unsigned long));
844 static unsigned long slot_index PARAMS ((unsigned long, fragS *,
845 unsigned long, fragS *));
846 static unw_rec_list *optimize_unw_records PARAMS ((unw_rec_list *));
847 static void fixup_unw_records PARAMS ((unw_rec_list *));
848 static int output_unw_records PARAMS ((unw_rec_list *, void **));
849 static int convert_expr_to_ab_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
850 static int convert_expr_to_xy_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
851 static int generate_unwind_image PARAMS ((const char *));
852
853 /* Build the unwind section name by appending the (possibly stripped)
854 text section NAME to the unwind PREFIX. The resulting string
855 pointer is assigned to RESULT. The string is allocated on the
856 stack, so this must be a macro... */
857 #define make_unw_section_name(special, text_name, result) \
858 { \
859 char *_prefix = special_section_name[special]; \
860 char *_suffix = text_name; \
861 size_t _prefix_len, _suffix_len; \
862 char *_result; \
863 if (strncmp (text_name, ".gnu.linkonce.t.", \
864 sizeof (".gnu.linkonce.t.") - 1) == 0) \
865 { \
866 _prefix = special_linkonce_name[special - SPECIAL_SECTION_UNWIND]; \
867 _suffix += sizeof (".gnu.linkonce.t.") - 1; \
868 } \
869 _prefix_len = strlen (_prefix), _suffix_len = strlen (_suffix); \
870 _result = alloca (_prefix_len + _suffix_len + 1); \
871 memcpy(_result, _prefix, _prefix_len); \
872 memcpy(_result + _prefix_len, _suffix, _suffix_len); \
873 _result[_prefix_len + _suffix_len] = '\0'; \
874 result = _result; \
875 } \
876 while (0)
877
878 /* Determine if application register REGNUM resides in the integer
879 unit (as opposed to the memory unit). */
880 static int
881 ar_is_in_integer_unit (reg)
882 int reg;
883 {
884 reg -= REG_AR;
885
886 return (reg == 64 /* pfs */
887 || reg == 65 /* lc */
888 || reg == 66 /* ec */
889 /* ??? ias accepts and puts these in the integer unit. */
890 || (reg >= 112 && reg <= 127));
891 }
892
893 /* Switch to section NAME and create section if necessary. It's
894 rather ugly that we have to manipulate input_line_pointer but I
895 don't see any other way to accomplish the same thing without
896 changing obj-elf.c (which may be the Right Thing, in the end). */
897 static void
898 set_section (name)
899 char *name;
900 {
901 char *saved_input_line_pointer;
902
903 saved_input_line_pointer = input_line_pointer;
904 input_line_pointer = name;
905 obj_elf_section (0);
906 input_line_pointer = saved_input_line_pointer;
907 }
908
909 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
910
911 flagword
912 ia64_elf_section_flags (flags, attr, type)
913 flagword flags;
914 int attr, type ATTRIBUTE_UNUSED;
915 {
916 if (attr & SHF_IA_64_SHORT)
917 flags |= SEC_SMALL_DATA;
918 return flags;
919 }
920
921 int
922 ia64_elf_section_type (str, len)
923 const char *str;
924 size_t len;
925 {
926 len = sizeof (ELF_STRING_ia64_unwind_info) - 1;
927 if (strncmp (str, ELF_STRING_ia64_unwind_info, len) == 0)
928 return SHT_PROGBITS;
929
930 len = sizeof (ELF_STRING_ia64_unwind_info_once) - 1;
931 if (strncmp (str, ELF_STRING_ia64_unwind_info_once, len) == 0)
932 return SHT_PROGBITS;
933
934 len = sizeof (ELF_STRING_ia64_unwind) - 1;
935 if (strncmp (str, ELF_STRING_ia64_unwind, len) == 0)
936 return SHT_IA_64_UNWIND;
937
938 len = sizeof (ELF_STRING_ia64_unwind_once) - 1;
939 if (strncmp (str, ELF_STRING_ia64_unwind_once, len) == 0)
940 return SHT_IA_64_UNWIND;
941
942 return -1;
943 }
944
945 static unsigned int
946 set_regstack (ins, locs, outs, rots)
947 unsigned int ins, locs, outs, rots;
948 {
949 /* Size of frame. */
950 unsigned int sof;
951
952 sof = ins + locs + outs;
953 if (sof > 96)
954 {
955 as_bad ("Size of frame exceeds maximum of 96 registers");
956 return 0;
957 }
958 if (rots > sof)
959 {
960 as_warn ("Size of rotating registers exceeds frame size");
961 return 0;
962 }
963 md.in.base = REG_GR + 32;
964 md.loc.base = md.in.base + ins;
965 md.out.base = md.loc.base + locs;
966
967 md.in.num_regs = ins;
968 md.loc.num_regs = locs;
969 md.out.num_regs = outs;
970 md.rot.num_regs = rots;
971 return sof;
972 }
973
974 void
975 ia64_flush_insns ()
976 {
977 struct label_fix *lfix;
978 segT saved_seg;
979 subsegT saved_subseg;
980 unw_rec_list *ptr;
981
982 if (!md.last_text_seg)
983 return;
984
985 saved_seg = now_seg;
986 saved_subseg = now_subseg;
987
988 subseg_set (md.last_text_seg, 0);
989
990 while (md.num_slots_in_use > 0)
991 emit_one_bundle (); /* force out queued instructions */
992
993 /* In case there are labels following the last instruction, resolve
994 those now: */
995 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
996 {
997 S_SET_VALUE (lfix->sym, frag_now_fix ());
998 symbol_set_frag (lfix->sym, frag_now);
999 }
1000 CURR_SLOT.label_fixups = 0;
1001 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
1002 {
1003 S_SET_VALUE (lfix->sym, frag_now_fix ());
1004 symbol_set_frag (lfix->sym, frag_now);
1005 }
1006 CURR_SLOT.tag_fixups = 0;
1007
1008 /* In case there are unwind directives following the last instruction,
1009 resolve those now. We only handle body and prologue directives here.
1010 Give an error for others. */
1011 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
1012 {
1013 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
1014 || ptr->r.type == body)
1015 {
1016 ptr->slot_number = (unsigned long) frag_more (0);
1017 ptr->slot_frag = frag_now;
1018 }
1019 else
1020 as_bad (_("Unwind directive not followed by an instruction."));
1021 }
1022 unwind.current_entry = NULL;
1023
1024 subseg_set (saved_seg, saved_subseg);
1025
1026 if (md.qp.X_op == O_register)
1027 as_bad ("qualifying predicate not followed by instruction");
1028 }
1029
1030 void
1031 ia64_do_align (nbytes)
1032 int nbytes;
1033 {
1034 char *saved_input_line_pointer = input_line_pointer;
1035
1036 input_line_pointer = "";
1037 s_align_bytes (nbytes);
1038 input_line_pointer = saved_input_line_pointer;
1039 }
1040
1041 void
1042 ia64_cons_align (nbytes)
1043 int nbytes;
1044 {
1045 if (md.auto_align)
1046 {
1047 char *saved_input_line_pointer = input_line_pointer;
1048 input_line_pointer = "";
1049 s_align_bytes (nbytes);
1050 input_line_pointer = saved_input_line_pointer;
1051 }
1052 }
1053
1054 /* Output COUNT bytes to a memory location. */
1055 static unsigned char *vbyte_mem_ptr = NULL;
1056
1057 void
1058 output_vbyte_mem (count, ptr, comment)
1059 int count;
1060 char *ptr;
1061 char *comment ATTRIBUTE_UNUSED;
1062 {
1063 int x;
1064 if (vbyte_mem_ptr == NULL)
1065 abort ();
1066
1067 if (count == 0)
1068 return;
1069 for (x = 0; x < count; x++)
1070 *(vbyte_mem_ptr++) = ptr[x];
1071 }
1072
1073 /* Count the number of bytes required for records. */
1074 static int vbyte_count = 0;
1075 void
1076 count_output (count, ptr, comment)
1077 int count;
1078 char *ptr ATTRIBUTE_UNUSED;
1079 char *comment ATTRIBUTE_UNUSED;
1080 {
1081 vbyte_count += count;
1082 }
1083
1084 static void
1085 output_R1_format (f, rtype, rlen)
1086 vbyte_func f;
1087 unw_record_type rtype;
1088 int rlen;
1089 {
1090 int r = 0;
1091 char byte;
1092 if (rlen > 0x1f)
1093 {
1094 output_R3_format (f, rtype, rlen);
1095 return;
1096 }
1097
1098 if (rtype == body)
1099 r = 1;
1100 else if (rtype != prologue)
1101 as_bad ("record type is not valid");
1102
1103 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1104 (*f) (1, &byte, NULL);
1105 }
1106
1107 static void
1108 output_R2_format (f, mask, grsave, rlen)
1109 vbyte_func f;
1110 int mask, grsave;
1111 unsigned long rlen;
1112 {
1113 char bytes[20];
1114 int count = 2;
1115 mask = (mask & 0x0f);
1116 grsave = (grsave & 0x7f);
1117
1118 bytes[0] = (UNW_R2 | (mask >> 1));
1119 bytes[1] = (((mask & 0x01) << 7) | grsave);
1120 count += output_leb128 (bytes + 2, rlen, 0);
1121 (*f) (count, bytes, NULL);
1122 }
1123
1124 static void
1125 output_R3_format (f, rtype, rlen)
1126 vbyte_func f;
1127 unw_record_type rtype;
1128 unsigned long rlen;
1129 {
1130 int r = 0, count;
1131 char bytes[20];
1132 if (rlen <= 0x1f)
1133 {
1134 output_R1_format (f, rtype, rlen);
1135 return;
1136 }
1137
1138 if (rtype == body)
1139 r = 1;
1140 else if (rtype != prologue)
1141 as_bad ("record type is not valid");
1142 bytes[0] = (UNW_R3 | r);
1143 count = output_leb128 (bytes + 1, rlen, 0);
1144 (*f) (count + 1, bytes, NULL);
1145 }
1146
1147 static void
1148 output_P1_format (f, brmask)
1149 vbyte_func f;
1150 int brmask;
1151 {
1152 char byte;
1153 byte = UNW_P1 | (brmask & 0x1f);
1154 (*f) (1, &byte, NULL);
1155 }
1156
1157 static void
1158 output_P2_format (f, brmask, gr)
1159 vbyte_func f;
1160 int brmask;
1161 int gr;
1162 {
1163 char bytes[2];
1164 brmask = (brmask & 0x1f);
1165 bytes[0] = UNW_P2 | (brmask >> 1);
1166 bytes[1] = (((brmask & 1) << 7) | gr);
1167 (*f) (2, bytes, NULL);
1168 }
1169
1170 static void
1171 output_P3_format (f, rtype, reg)
1172 vbyte_func f;
1173 unw_record_type rtype;
1174 int reg;
1175 {
1176 char bytes[2];
1177 int r = 0;
1178 reg = (reg & 0x7f);
1179 switch (rtype)
1180 {
1181 case psp_gr:
1182 r = 0;
1183 break;
1184 case rp_gr:
1185 r = 1;
1186 break;
1187 case pfs_gr:
1188 r = 2;
1189 break;
1190 case preds_gr:
1191 r = 3;
1192 break;
1193 case unat_gr:
1194 r = 4;
1195 break;
1196 case lc_gr:
1197 r = 5;
1198 break;
1199 case rp_br:
1200 r = 6;
1201 break;
1202 case rnat_gr:
1203 r = 7;
1204 break;
1205 case bsp_gr:
1206 r = 8;
1207 break;
1208 case bspstore_gr:
1209 r = 9;
1210 break;
1211 case fpsr_gr:
1212 r = 10;
1213 break;
1214 case priunat_gr:
1215 r = 11;
1216 break;
1217 default:
1218 as_bad ("Invalid record type for P3 format.");
1219 }
1220 bytes[0] = (UNW_P3 | (r >> 1));
1221 bytes[1] = (((r & 1) << 7) | reg);
1222 (*f) (2, bytes, NULL);
1223 }
1224
1225 static void
1226 output_P4_format (f, imask, imask_size)
1227 vbyte_func f;
1228 unsigned char *imask;
1229 unsigned long imask_size;
1230 {
1231 imask[0] = UNW_P4;
1232 (*f) (imask_size, imask, NULL);
1233 }
1234
1235 static void
1236 output_P5_format (f, grmask, frmask)
1237 vbyte_func f;
1238 int grmask;
1239 unsigned long frmask;
1240 {
1241 char bytes[4];
1242 grmask = (grmask & 0x0f);
1243
1244 bytes[0] = UNW_P5;
1245 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1246 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1247 bytes[3] = (frmask & 0x000000ff);
1248 (*f) (4, bytes, NULL);
1249 }
1250
1251 static void
1252 output_P6_format (f, rtype, rmask)
1253 vbyte_func f;
1254 unw_record_type rtype;
1255 int rmask;
1256 {
1257 char byte;
1258 int r = 0;
1259
1260 if (rtype == gr_mem)
1261 r = 1;
1262 else if (rtype != fr_mem)
1263 as_bad ("Invalid record type for format P6");
1264 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1265 (*f) (1, &byte, NULL);
1266 }
1267
1268 static void
1269 output_P7_format (f, rtype, w1, w2)
1270 vbyte_func f;
1271 unw_record_type rtype;
1272 unsigned long w1;
1273 unsigned long w2;
1274 {
1275 char bytes[20];
1276 int count = 1;
1277 int r = 0;
1278 count += output_leb128 (bytes + 1, w1, 0);
1279 switch (rtype)
1280 {
1281 case mem_stack_f:
1282 r = 0;
1283 count += output_leb128 (bytes + count, w2 >> 4, 0);
1284 break;
1285 case mem_stack_v:
1286 r = 1;
1287 break;
1288 case spill_base:
1289 r = 2;
1290 break;
1291 case psp_sprel:
1292 r = 3;
1293 break;
1294 case rp_when:
1295 r = 4;
1296 break;
1297 case rp_psprel:
1298 r = 5;
1299 break;
1300 case pfs_when:
1301 r = 6;
1302 break;
1303 case pfs_psprel:
1304 r = 7;
1305 break;
1306 case preds_when:
1307 r = 8;
1308 break;
1309 case preds_psprel:
1310 r = 9;
1311 break;
1312 case lc_when:
1313 r = 10;
1314 break;
1315 case lc_psprel:
1316 r = 11;
1317 break;
1318 case unat_when:
1319 r = 12;
1320 break;
1321 case unat_psprel:
1322 r = 13;
1323 break;
1324 case fpsr_when:
1325 r = 14;
1326 break;
1327 case fpsr_psprel:
1328 r = 15;
1329 break;
1330 default:
1331 break;
1332 }
1333 bytes[0] = (UNW_P7 | r);
1334 (*f) (count, bytes, NULL);
1335 }
1336
1337 static void
1338 output_P8_format (f, rtype, t)
1339 vbyte_func f;
1340 unw_record_type rtype;
1341 unsigned long t;
1342 {
1343 char bytes[20];
1344 int r = 0;
1345 int count = 2;
1346 bytes[0] = UNW_P8;
1347 switch (rtype)
1348 {
1349 case rp_sprel:
1350 r = 1;
1351 break;
1352 case pfs_sprel:
1353 r = 2;
1354 break;
1355 case preds_sprel:
1356 r = 3;
1357 break;
1358 case lc_sprel:
1359 r = 4;
1360 break;
1361 case unat_sprel:
1362 r = 5;
1363 break;
1364 case fpsr_sprel:
1365 r = 6;
1366 break;
1367 case bsp_when:
1368 r = 7;
1369 break;
1370 case bsp_psprel:
1371 r = 8;
1372 break;
1373 case bsp_sprel:
1374 r = 9;
1375 break;
1376 case bspstore_when:
1377 r = 10;
1378 break;
1379 case bspstore_psprel:
1380 r = 11;
1381 break;
1382 case bspstore_sprel:
1383 r = 12;
1384 break;
1385 case rnat_when:
1386 r = 13;
1387 break;
1388 case rnat_psprel:
1389 r = 14;
1390 break;
1391 case rnat_sprel:
1392 r = 15;
1393 break;
1394 case priunat_when_gr:
1395 r = 16;
1396 break;
1397 case priunat_psprel:
1398 r = 17;
1399 break;
1400 case priunat_sprel:
1401 r = 18;
1402 break;
1403 case priunat_when_mem:
1404 r = 19;
1405 break;
1406 default:
1407 break;
1408 }
1409 bytes[1] = r;
1410 count += output_leb128 (bytes + 2, t, 0);
1411 (*f) (count, bytes, NULL);
1412 }
1413
1414 static void
1415 output_P9_format (f, grmask, gr)
1416 vbyte_func f;
1417 int grmask;
1418 int gr;
1419 {
1420 char bytes[3];
1421 bytes[0] = UNW_P9;
1422 bytes[1] = (grmask & 0x0f);
1423 bytes[2] = (gr & 0x7f);
1424 (*f) (3, bytes, NULL);
1425 }
1426
1427 static void
1428 output_P10_format (f, abi, context)
1429 vbyte_func f;
1430 int abi;
1431 int context;
1432 {
1433 char bytes[3];
1434 bytes[0] = UNW_P10;
1435 bytes[1] = (abi & 0xff);
1436 bytes[2] = (context & 0xff);
1437 (*f) (3, bytes, NULL);
1438 }
1439
1440 static void
1441 output_B1_format (f, rtype, label)
1442 vbyte_func f;
1443 unw_record_type rtype;
1444 unsigned long label;
1445 {
1446 char byte;
1447 int r = 0;
1448 if (label > 0x1f)
1449 {
1450 output_B4_format (f, rtype, label);
1451 return;
1452 }
1453 if (rtype == copy_state)
1454 r = 1;
1455 else if (rtype != label_state)
1456 as_bad ("Invalid record type for format B1");
1457
1458 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1459 (*f) (1, &byte, NULL);
1460 }
1461
1462 static void
1463 output_B2_format (f, ecount, t)
1464 vbyte_func f;
1465 unsigned long ecount;
1466 unsigned long t;
1467 {
1468 char bytes[20];
1469 int count = 1;
1470 if (ecount > 0x1f)
1471 {
1472 output_B3_format (f, ecount, t);
1473 return;
1474 }
1475 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1476 count += output_leb128 (bytes + 1, t, 0);
1477 (*f) (count, bytes, NULL);
1478 }
1479
1480 static void
1481 output_B3_format (f, ecount, t)
1482 vbyte_func f;
1483 unsigned long ecount;
1484 unsigned long t;
1485 {
1486 char bytes[20];
1487 int count = 1;
1488 if (ecount <= 0x1f)
1489 {
1490 output_B2_format (f, ecount, t);
1491 return;
1492 }
1493 bytes[0] = UNW_B3;
1494 count += output_leb128 (bytes + 1, t, 0);
1495 count += output_leb128 (bytes + count, ecount, 0);
1496 (*f) (count, bytes, NULL);
1497 }
1498
1499 static void
1500 output_B4_format (f, rtype, label)
1501 vbyte_func f;
1502 unw_record_type rtype;
1503 unsigned long label;
1504 {
1505 char bytes[20];
1506 int r = 0;
1507 int count = 1;
1508 if (label <= 0x1f)
1509 {
1510 output_B1_format (f, rtype, label);
1511 return;
1512 }
1513
1514 if (rtype == copy_state)
1515 r = 1;
1516 else if (rtype != label_state)
1517 as_bad ("Invalid record type for format B1");
1518
1519 bytes[0] = (UNW_B4 | (r << 3));
1520 count += output_leb128 (bytes + 1, label, 0);
1521 (*f) (count, bytes, NULL);
1522 }
1523
1524 static char
1525 format_ab_reg (ab, reg)
1526 int ab;
1527 int reg;
1528 {
1529 int ret;
1530 ab = (ab & 3);
1531 reg = (reg & 0x1f);
1532 ret = (ab << 5) | reg;
1533 return ret;
1534 }
1535
1536 static void
1537 output_X1_format (f, rtype, ab, reg, t, w1)
1538 vbyte_func f;
1539 unw_record_type rtype;
1540 int ab, reg;
1541 unsigned long t;
1542 unsigned long w1;
1543 {
1544 char bytes[20];
1545 int r = 0;
1546 int count = 2;
1547 bytes[0] = UNW_X1;
1548
1549 if (rtype == spill_sprel)
1550 r = 1;
1551 else if (rtype != spill_psprel)
1552 as_bad ("Invalid record type for format X1");
1553 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1554 count += output_leb128 (bytes + 2, t, 0);
1555 count += output_leb128 (bytes + count, w1, 0);
1556 (*f) (count, bytes, NULL);
1557 }
1558
1559 static void
1560 output_X2_format (f, ab, reg, x, y, treg, t)
1561 vbyte_func f;
1562 int ab, reg;
1563 int x, y, treg;
1564 unsigned long t;
1565 {
1566 char bytes[20];
1567 int count = 3;
1568 bytes[0] = UNW_X2;
1569 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1570 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1571 count += output_leb128 (bytes + 3, t, 0);
1572 (*f) (count, bytes, NULL);
1573 }
1574
1575 static void
1576 output_X3_format (f, rtype, qp, ab, reg, t, w1)
1577 vbyte_func f;
1578 unw_record_type rtype;
1579 int qp;
1580 int ab, reg;
1581 unsigned long t;
1582 unsigned long w1;
1583 {
1584 char bytes[20];
1585 int r = 0;
1586 int count = 3;
1587 bytes[0] = UNW_X3;
1588
1589 if (rtype == spill_sprel_p)
1590 r = 1;
1591 else if (rtype != spill_psprel_p)
1592 as_bad ("Invalid record type for format X3");
1593 bytes[1] = ((r << 7) | (qp & 0x3f));
1594 bytes[2] = format_ab_reg (ab, reg);
1595 count += output_leb128 (bytes + 3, t, 0);
1596 count += output_leb128 (bytes + count, w1, 0);
1597 (*f) (count, bytes, NULL);
1598 }
1599
1600 static void
1601 output_X4_format (f, qp, ab, reg, x, y, treg, t)
1602 vbyte_func f;
1603 int qp;
1604 int ab, reg;
1605 int x, y, treg;
1606 unsigned long t;
1607 {
1608 char bytes[20];
1609 int count = 4;
1610 bytes[0] = UNW_X4;
1611 bytes[1] = (qp & 0x3f);
1612 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1613 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1614 count += output_leb128 (bytes + 4, t, 0);
1615 (*f) (count, bytes, NULL);
1616 }
1617
1618 /* This function allocates a record list structure, and initializes fields. */
1619
1620 static unw_rec_list *
1621 alloc_record (unw_record_type t)
1622 {
1623 unw_rec_list *ptr;
1624 ptr = xmalloc (sizeof (*ptr));
1625 ptr->next = NULL;
1626 ptr->slot_number = SLOT_NUM_NOT_SET;
1627 ptr->r.type = t;
1628 return ptr;
1629 }
1630
1631 /* This function frees an entire list of record structures. */
1632
1633 void
1634 free_list_records (unw_rec_list *first)
1635 {
1636 unw_rec_list *ptr;
1637 for (ptr = first; ptr != NULL;)
1638 {
1639 unw_rec_list *tmp = ptr;
1640
1641 if ((tmp->r.type == prologue || tmp->r.type == prologue_gr)
1642 && tmp->r.record.r.mask.i)
1643 free (tmp->r.record.r.mask.i);
1644
1645 ptr = ptr->next;
1646 free (tmp);
1647 }
1648 }
1649
1650 static unw_rec_list *
1651 output_prologue ()
1652 {
1653 unw_rec_list *ptr = alloc_record (prologue);
1654 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1655 return ptr;
1656 }
1657
1658 static unw_rec_list *
1659 output_prologue_gr (saved_mask, reg)
1660 unsigned int saved_mask;
1661 unsigned int reg;
1662 {
1663 unw_rec_list *ptr = alloc_record (prologue_gr);
1664 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1665 ptr->r.record.r.grmask = saved_mask;
1666 ptr->r.record.r.grsave = reg;
1667 return ptr;
1668 }
1669
1670 static unw_rec_list *
1671 output_body ()
1672 {
1673 unw_rec_list *ptr = alloc_record (body);
1674 return ptr;
1675 }
1676
1677 static unw_rec_list *
1678 output_mem_stack_f (size)
1679 unsigned int size;
1680 {
1681 unw_rec_list *ptr = alloc_record (mem_stack_f);
1682 ptr->r.record.p.size = size;
1683 return ptr;
1684 }
1685
1686 static unw_rec_list *
1687 output_mem_stack_v ()
1688 {
1689 unw_rec_list *ptr = alloc_record (mem_stack_v);
1690 return ptr;
1691 }
1692
1693 static unw_rec_list *
1694 output_psp_gr (gr)
1695 unsigned int gr;
1696 {
1697 unw_rec_list *ptr = alloc_record (psp_gr);
1698 ptr->r.record.p.gr = gr;
1699 return ptr;
1700 }
1701
1702 static unw_rec_list *
1703 output_psp_sprel (offset)
1704 unsigned int offset;
1705 {
1706 unw_rec_list *ptr = alloc_record (psp_sprel);
1707 ptr->r.record.p.spoff = offset / 4;
1708 return ptr;
1709 }
1710
1711 static unw_rec_list *
1712 output_rp_when ()
1713 {
1714 unw_rec_list *ptr = alloc_record (rp_when);
1715 return ptr;
1716 }
1717
1718 static unw_rec_list *
1719 output_rp_gr (gr)
1720 unsigned int gr;
1721 {
1722 unw_rec_list *ptr = alloc_record (rp_gr);
1723 ptr->r.record.p.gr = gr;
1724 return ptr;
1725 }
1726
1727 static unw_rec_list *
1728 output_rp_br (br)
1729 unsigned int br;
1730 {
1731 unw_rec_list *ptr = alloc_record (rp_br);
1732 ptr->r.record.p.br = br;
1733 return ptr;
1734 }
1735
1736 static unw_rec_list *
1737 output_rp_psprel (offset)
1738 unsigned int offset;
1739 {
1740 unw_rec_list *ptr = alloc_record (rp_psprel);
1741 ptr->r.record.p.pspoff = offset / 4;
1742 return ptr;
1743 }
1744
1745 static unw_rec_list *
1746 output_rp_sprel (offset)
1747 unsigned int offset;
1748 {
1749 unw_rec_list *ptr = alloc_record (rp_sprel);
1750 ptr->r.record.p.spoff = offset / 4;
1751 return ptr;
1752 }
1753
1754 static unw_rec_list *
1755 output_pfs_when ()
1756 {
1757 unw_rec_list *ptr = alloc_record (pfs_when);
1758 return ptr;
1759 }
1760
1761 static unw_rec_list *
1762 output_pfs_gr (gr)
1763 unsigned int gr;
1764 {
1765 unw_rec_list *ptr = alloc_record (pfs_gr);
1766 ptr->r.record.p.gr = gr;
1767 return ptr;
1768 }
1769
1770 static unw_rec_list *
1771 output_pfs_psprel (offset)
1772 unsigned int offset;
1773 {
1774 unw_rec_list *ptr = alloc_record (pfs_psprel);
1775 ptr->r.record.p.pspoff = offset / 4;
1776 return ptr;
1777 }
1778
1779 static unw_rec_list *
1780 output_pfs_sprel (offset)
1781 unsigned int offset;
1782 {
1783 unw_rec_list *ptr = alloc_record (pfs_sprel);
1784 ptr->r.record.p.spoff = offset / 4;
1785 return ptr;
1786 }
1787
1788 static unw_rec_list *
1789 output_preds_when ()
1790 {
1791 unw_rec_list *ptr = alloc_record (preds_when);
1792 return ptr;
1793 }
1794
1795 static unw_rec_list *
1796 output_preds_gr (gr)
1797 unsigned int gr;
1798 {
1799 unw_rec_list *ptr = alloc_record (preds_gr);
1800 ptr->r.record.p.gr = gr;
1801 return ptr;
1802 }
1803
1804 static unw_rec_list *
1805 output_preds_psprel (offset)
1806 unsigned int offset;
1807 {
1808 unw_rec_list *ptr = alloc_record (preds_psprel);
1809 ptr->r.record.p.pspoff = offset / 4;
1810 return ptr;
1811 }
1812
1813 static unw_rec_list *
1814 output_preds_sprel (offset)
1815 unsigned int offset;
1816 {
1817 unw_rec_list *ptr = alloc_record (preds_sprel);
1818 ptr->r.record.p.spoff = offset / 4;
1819 return ptr;
1820 }
1821
1822 static unw_rec_list *
1823 output_fr_mem (mask)
1824 unsigned int mask;
1825 {
1826 unw_rec_list *ptr = alloc_record (fr_mem);
1827 ptr->r.record.p.rmask = mask;
1828 return ptr;
1829 }
1830
1831 static unw_rec_list *
1832 output_frgr_mem (gr_mask, fr_mask)
1833 unsigned int gr_mask;
1834 unsigned int fr_mask;
1835 {
1836 unw_rec_list *ptr = alloc_record (frgr_mem);
1837 ptr->r.record.p.grmask = gr_mask;
1838 ptr->r.record.p.frmask = fr_mask;
1839 return ptr;
1840 }
1841
1842 static unw_rec_list *
1843 output_gr_gr (mask, reg)
1844 unsigned int mask;
1845 unsigned int reg;
1846 {
1847 unw_rec_list *ptr = alloc_record (gr_gr);
1848 ptr->r.record.p.grmask = mask;
1849 ptr->r.record.p.gr = reg;
1850 return ptr;
1851 }
1852
1853 static unw_rec_list *
1854 output_gr_mem (mask)
1855 unsigned int mask;
1856 {
1857 unw_rec_list *ptr = alloc_record (gr_mem);
1858 ptr->r.record.p.rmask = mask;
1859 return ptr;
1860 }
1861
1862 static unw_rec_list *
1863 output_br_mem (unsigned int mask)
1864 {
1865 unw_rec_list *ptr = alloc_record (br_mem);
1866 ptr->r.record.p.brmask = mask;
1867 return ptr;
1868 }
1869
1870 static unw_rec_list *
1871 output_br_gr (save_mask, reg)
1872 unsigned int save_mask;
1873 unsigned int reg;
1874 {
1875 unw_rec_list *ptr = alloc_record (br_gr);
1876 ptr->r.record.p.brmask = save_mask;
1877 ptr->r.record.p.gr = reg;
1878 return ptr;
1879 }
1880
1881 static unw_rec_list *
1882 output_spill_base (offset)
1883 unsigned int offset;
1884 {
1885 unw_rec_list *ptr = alloc_record (spill_base);
1886 ptr->r.record.p.pspoff = offset / 4;
1887 return ptr;
1888 }
1889
1890 static unw_rec_list *
1891 output_unat_when ()
1892 {
1893 unw_rec_list *ptr = alloc_record (unat_when);
1894 return ptr;
1895 }
1896
1897 static unw_rec_list *
1898 output_unat_gr (gr)
1899 unsigned int gr;
1900 {
1901 unw_rec_list *ptr = alloc_record (unat_gr);
1902 ptr->r.record.p.gr = gr;
1903 return ptr;
1904 }
1905
1906 static unw_rec_list *
1907 output_unat_psprel (offset)
1908 unsigned int offset;
1909 {
1910 unw_rec_list *ptr = alloc_record (unat_psprel);
1911 ptr->r.record.p.pspoff = offset / 4;
1912 return ptr;
1913 }
1914
1915 static unw_rec_list *
1916 output_unat_sprel (offset)
1917 unsigned int offset;
1918 {
1919 unw_rec_list *ptr = alloc_record (unat_sprel);
1920 ptr->r.record.p.spoff = offset / 4;
1921 return ptr;
1922 }
1923
1924 static unw_rec_list *
1925 output_lc_when ()
1926 {
1927 unw_rec_list *ptr = alloc_record (lc_when);
1928 return ptr;
1929 }
1930
1931 static unw_rec_list *
1932 output_lc_gr (gr)
1933 unsigned int gr;
1934 {
1935 unw_rec_list *ptr = alloc_record (lc_gr);
1936 ptr->r.record.p.gr = gr;
1937 return ptr;
1938 }
1939
1940 static unw_rec_list *
1941 output_lc_psprel (offset)
1942 unsigned int offset;
1943 {
1944 unw_rec_list *ptr = alloc_record (lc_psprel);
1945 ptr->r.record.p.pspoff = offset / 4;
1946 return ptr;
1947 }
1948
1949 static unw_rec_list *
1950 output_lc_sprel (offset)
1951 unsigned int offset;
1952 {
1953 unw_rec_list *ptr = alloc_record (lc_sprel);
1954 ptr->r.record.p.spoff = offset / 4;
1955 return ptr;
1956 }
1957
1958 static unw_rec_list *
1959 output_fpsr_when ()
1960 {
1961 unw_rec_list *ptr = alloc_record (fpsr_when);
1962 return ptr;
1963 }
1964
1965 static unw_rec_list *
1966 output_fpsr_gr (gr)
1967 unsigned int gr;
1968 {
1969 unw_rec_list *ptr = alloc_record (fpsr_gr);
1970 ptr->r.record.p.gr = gr;
1971 return ptr;
1972 }
1973
1974 static unw_rec_list *
1975 output_fpsr_psprel (offset)
1976 unsigned int offset;
1977 {
1978 unw_rec_list *ptr = alloc_record (fpsr_psprel);
1979 ptr->r.record.p.pspoff = offset / 4;
1980 return ptr;
1981 }
1982
1983 static unw_rec_list *
1984 output_fpsr_sprel (offset)
1985 unsigned int offset;
1986 {
1987 unw_rec_list *ptr = alloc_record (fpsr_sprel);
1988 ptr->r.record.p.spoff = offset / 4;
1989 return ptr;
1990 }
1991
1992 static unw_rec_list *
1993 output_priunat_when_gr ()
1994 {
1995 unw_rec_list *ptr = alloc_record (priunat_when_gr);
1996 return ptr;
1997 }
1998
1999 static unw_rec_list *
2000 output_priunat_when_mem ()
2001 {
2002 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2003 return ptr;
2004 }
2005
2006 static unw_rec_list *
2007 output_priunat_gr (gr)
2008 unsigned int gr;
2009 {
2010 unw_rec_list *ptr = alloc_record (priunat_gr);
2011 ptr->r.record.p.gr = gr;
2012 return ptr;
2013 }
2014
2015 static unw_rec_list *
2016 output_priunat_psprel (offset)
2017 unsigned int offset;
2018 {
2019 unw_rec_list *ptr = alloc_record (priunat_psprel);
2020 ptr->r.record.p.pspoff = offset / 4;
2021 return ptr;
2022 }
2023
2024 static unw_rec_list *
2025 output_priunat_sprel (offset)
2026 unsigned int offset;
2027 {
2028 unw_rec_list *ptr = alloc_record (priunat_sprel);
2029 ptr->r.record.p.spoff = offset / 4;
2030 return ptr;
2031 }
2032
2033 static unw_rec_list *
2034 output_bsp_when ()
2035 {
2036 unw_rec_list *ptr = alloc_record (bsp_when);
2037 return ptr;
2038 }
2039
2040 static unw_rec_list *
2041 output_bsp_gr (gr)
2042 unsigned int gr;
2043 {
2044 unw_rec_list *ptr = alloc_record (bsp_gr);
2045 ptr->r.record.p.gr = gr;
2046 return ptr;
2047 }
2048
2049 static unw_rec_list *
2050 output_bsp_psprel (offset)
2051 unsigned int offset;
2052 {
2053 unw_rec_list *ptr = alloc_record (bsp_psprel);
2054 ptr->r.record.p.pspoff = offset / 4;
2055 return ptr;
2056 }
2057
2058 static unw_rec_list *
2059 output_bsp_sprel (offset)
2060 unsigned int offset;
2061 {
2062 unw_rec_list *ptr = alloc_record (bsp_sprel);
2063 ptr->r.record.p.spoff = offset / 4;
2064 return ptr;
2065 }
2066
2067 static unw_rec_list *
2068 output_bspstore_when ()
2069 {
2070 unw_rec_list *ptr = alloc_record (bspstore_when);
2071 return ptr;
2072 }
2073
2074 static unw_rec_list *
2075 output_bspstore_gr (gr)
2076 unsigned int gr;
2077 {
2078 unw_rec_list *ptr = alloc_record (bspstore_gr);
2079 ptr->r.record.p.gr = gr;
2080 return ptr;
2081 }
2082
2083 static unw_rec_list *
2084 output_bspstore_psprel (offset)
2085 unsigned int offset;
2086 {
2087 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2088 ptr->r.record.p.pspoff = offset / 4;
2089 return ptr;
2090 }
2091
2092 static unw_rec_list *
2093 output_bspstore_sprel (offset)
2094 unsigned int offset;
2095 {
2096 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2097 ptr->r.record.p.spoff = offset / 4;
2098 return ptr;
2099 }
2100
2101 static unw_rec_list *
2102 output_rnat_when ()
2103 {
2104 unw_rec_list *ptr = alloc_record (rnat_when);
2105 return ptr;
2106 }
2107
2108 static unw_rec_list *
2109 output_rnat_gr (gr)
2110 unsigned int gr;
2111 {
2112 unw_rec_list *ptr = alloc_record (rnat_gr);
2113 ptr->r.record.p.gr = gr;
2114 return ptr;
2115 }
2116
2117 static unw_rec_list *
2118 output_rnat_psprel (offset)
2119 unsigned int offset;
2120 {
2121 unw_rec_list *ptr = alloc_record (rnat_psprel);
2122 ptr->r.record.p.pspoff = offset / 4;
2123 return ptr;
2124 }
2125
2126 static unw_rec_list *
2127 output_rnat_sprel (offset)
2128 unsigned int offset;
2129 {
2130 unw_rec_list *ptr = alloc_record (rnat_sprel);
2131 ptr->r.record.p.spoff = offset / 4;
2132 return ptr;
2133 }
2134
2135 static unw_rec_list *
2136 output_unwabi (abi, context)
2137 unsigned long abi;
2138 unsigned long context;
2139 {
2140 unw_rec_list *ptr = alloc_record (unwabi);
2141 ptr->r.record.p.abi = abi;
2142 ptr->r.record.p.context = context;
2143 return ptr;
2144 }
2145
2146 static unw_rec_list *
2147 output_epilogue (unsigned long ecount)
2148 {
2149 unw_rec_list *ptr = alloc_record (epilogue);
2150 ptr->r.record.b.ecount = ecount;
2151 return ptr;
2152 }
2153
2154 static unw_rec_list *
2155 output_label_state (unsigned long label)
2156 {
2157 unw_rec_list *ptr = alloc_record (label_state);
2158 ptr->r.record.b.label = label;
2159 return ptr;
2160 }
2161
2162 static unw_rec_list *
2163 output_copy_state (unsigned long label)
2164 {
2165 unw_rec_list *ptr = alloc_record (copy_state);
2166 ptr->r.record.b.label = label;
2167 return ptr;
2168 }
2169
2170 static unw_rec_list *
2171 output_spill_psprel (ab, reg, offset)
2172 unsigned int ab;
2173 unsigned int reg;
2174 unsigned int offset;
2175 {
2176 unw_rec_list *ptr = alloc_record (spill_psprel);
2177 ptr->r.record.x.ab = ab;
2178 ptr->r.record.x.reg = reg;
2179 ptr->r.record.x.pspoff = offset / 4;
2180 return ptr;
2181 }
2182
2183 static unw_rec_list *
2184 output_spill_sprel (ab, reg, offset)
2185 unsigned int ab;
2186 unsigned int reg;
2187 unsigned int offset;
2188 {
2189 unw_rec_list *ptr = alloc_record (spill_sprel);
2190 ptr->r.record.x.ab = ab;
2191 ptr->r.record.x.reg = reg;
2192 ptr->r.record.x.spoff = offset / 4;
2193 return ptr;
2194 }
2195
2196 static unw_rec_list *
2197 output_spill_psprel_p (ab, reg, offset, predicate)
2198 unsigned int ab;
2199 unsigned int reg;
2200 unsigned int offset;
2201 unsigned int predicate;
2202 {
2203 unw_rec_list *ptr = alloc_record (spill_psprel_p);
2204 ptr->r.record.x.ab = ab;
2205 ptr->r.record.x.reg = reg;
2206 ptr->r.record.x.pspoff = offset / 4;
2207 ptr->r.record.x.qp = predicate;
2208 return ptr;
2209 }
2210
2211 static unw_rec_list *
2212 output_spill_sprel_p (ab, reg, offset, predicate)
2213 unsigned int ab;
2214 unsigned int reg;
2215 unsigned int offset;
2216 unsigned int predicate;
2217 {
2218 unw_rec_list *ptr = alloc_record (spill_sprel_p);
2219 ptr->r.record.x.ab = ab;
2220 ptr->r.record.x.reg = reg;
2221 ptr->r.record.x.spoff = offset / 4;
2222 ptr->r.record.x.qp = predicate;
2223 return ptr;
2224 }
2225
2226 static unw_rec_list *
2227 output_spill_reg (ab, reg, targ_reg, xy)
2228 unsigned int ab;
2229 unsigned int reg;
2230 unsigned int targ_reg;
2231 unsigned int xy;
2232 {
2233 unw_rec_list *ptr = alloc_record (spill_reg);
2234 ptr->r.record.x.ab = ab;
2235 ptr->r.record.x.reg = reg;
2236 ptr->r.record.x.treg = targ_reg;
2237 ptr->r.record.x.xy = xy;
2238 return ptr;
2239 }
2240
2241 static unw_rec_list *
2242 output_spill_reg_p (ab, reg, targ_reg, xy, predicate)
2243 unsigned int ab;
2244 unsigned int reg;
2245 unsigned int targ_reg;
2246 unsigned int xy;
2247 unsigned int predicate;
2248 {
2249 unw_rec_list *ptr = alloc_record (spill_reg_p);
2250 ptr->r.record.x.ab = ab;
2251 ptr->r.record.x.reg = reg;
2252 ptr->r.record.x.treg = targ_reg;
2253 ptr->r.record.x.xy = xy;
2254 ptr->r.record.x.qp = predicate;
2255 return ptr;
2256 }
2257
2258 /* Given a unw_rec_list process the correct format with the
2259 specified function. */
2260
2261 static void
2262 process_one_record (ptr, f)
2263 unw_rec_list *ptr;
2264 vbyte_func f;
2265 {
2266 unsigned long fr_mask, gr_mask;
2267
2268 switch (ptr->r.type)
2269 {
2270 case gr_mem:
2271 case fr_mem:
2272 case br_mem:
2273 case frgr_mem:
2274 /* These are taken care of by prologue/prologue_gr. */
2275 break;
2276
2277 case prologue_gr:
2278 case prologue:
2279 if (ptr->r.type == prologue_gr)
2280 output_R2_format (f, ptr->r.record.r.grmask,
2281 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2282 else
2283 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2284
2285 /* Output descriptor(s) for union of register spills (if any). */
2286 gr_mask = ptr->r.record.r.mask.gr_mem;
2287 fr_mask = ptr->r.record.r.mask.fr_mem;
2288 if (fr_mask)
2289 {
2290 if ((fr_mask & ~0xfUL) == 0)
2291 output_P6_format (f, fr_mem, fr_mask);
2292 else
2293 {
2294 output_P5_format (f, gr_mask, fr_mask);
2295 gr_mask = 0;
2296 }
2297 }
2298 if (gr_mask)
2299 output_P6_format (f, gr_mem, gr_mask);
2300 if (ptr->r.record.r.mask.br_mem)
2301 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2302
2303 /* output imask descriptor if necessary: */
2304 if (ptr->r.record.r.mask.i)
2305 output_P4_format (f, ptr->r.record.r.mask.i,
2306 ptr->r.record.r.imask_size);
2307 break;
2308
2309 case body:
2310 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2311 break;
2312 case mem_stack_f:
2313 case mem_stack_v:
2314 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2315 ptr->r.record.p.size);
2316 break;
2317 case psp_gr:
2318 case rp_gr:
2319 case pfs_gr:
2320 case preds_gr:
2321 case unat_gr:
2322 case lc_gr:
2323 case fpsr_gr:
2324 case priunat_gr:
2325 case bsp_gr:
2326 case bspstore_gr:
2327 case rnat_gr:
2328 output_P3_format (f, ptr->r.type, ptr->r.record.p.gr);
2329 break;
2330 case rp_br:
2331 output_P3_format (f, rp_br, ptr->r.record.p.br);
2332 break;
2333 case psp_sprel:
2334 output_P7_format (f, psp_sprel, ptr->r.record.p.spoff, 0);
2335 break;
2336 case rp_when:
2337 case pfs_when:
2338 case preds_when:
2339 case unat_when:
2340 case lc_when:
2341 case fpsr_when:
2342 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2343 break;
2344 case rp_psprel:
2345 case pfs_psprel:
2346 case preds_psprel:
2347 case unat_psprel:
2348 case lc_psprel:
2349 case fpsr_psprel:
2350 case spill_base:
2351 output_P7_format (f, ptr->r.type, ptr->r.record.p.pspoff, 0);
2352 break;
2353 case rp_sprel:
2354 case pfs_sprel:
2355 case preds_sprel:
2356 case unat_sprel:
2357 case lc_sprel:
2358 case fpsr_sprel:
2359 case priunat_sprel:
2360 case bsp_sprel:
2361 case bspstore_sprel:
2362 case rnat_sprel:
2363 output_P8_format (f, ptr->r.type, ptr->r.record.p.spoff);
2364 break;
2365 case gr_gr:
2366 output_P9_format (f, ptr->r.record.p.grmask, ptr->r.record.p.gr);
2367 break;
2368 case br_gr:
2369 output_P2_format (f, ptr->r.record.p.brmask, ptr->r.record.p.gr);
2370 break;
2371 case spill_mask:
2372 as_bad ("spill_mask record unimplemented.");
2373 break;
2374 case priunat_when_gr:
2375 case priunat_when_mem:
2376 case bsp_when:
2377 case bspstore_when:
2378 case rnat_when:
2379 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2380 break;
2381 case priunat_psprel:
2382 case bsp_psprel:
2383 case bspstore_psprel:
2384 case rnat_psprel:
2385 output_P8_format (f, ptr->r.type, ptr->r.record.p.pspoff);
2386 break;
2387 case unwabi:
2388 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2389 break;
2390 case epilogue:
2391 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2392 break;
2393 case label_state:
2394 case copy_state:
2395 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2396 break;
2397 case spill_psprel:
2398 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2399 ptr->r.record.x.reg, ptr->r.record.x.t,
2400 ptr->r.record.x.pspoff);
2401 break;
2402 case spill_sprel:
2403 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2404 ptr->r.record.x.reg, ptr->r.record.x.t,
2405 ptr->r.record.x.spoff);
2406 break;
2407 case spill_reg:
2408 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2409 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2410 ptr->r.record.x.treg, ptr->r.record.x.t);
2411 break;
2412 case spill_psprel_p:
2413 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2414 ptr->r.record.x.ab, ptr->r.record.x.reg,
2415 ptr->r.record.x.t, ptr->r.record.x.pspoff);
2416 break;
2417 case spill_sprel_p:
2418 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2419 ptr->r.record.x.ab, ptr->r.record.x.reg,
2420 ptr->r.record.x.t, ptr->r.record.x.spoff);
2421 break;
2422 case spill_reg_p:
2423 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2424 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2425 ptr->r.record.x.xy, ptr->r.record.x.treg,
2426 ptr->r.record.x.t);
2427 break;
2428 default:
2429 as_bad ("record_type_not_valid");
2430 break;
2431 }
2432 }
2433
2434 /* Given a unw_rec_list list, process all the records with
2435 the specified function. */
2436 static void
2437 process_unw_records (list, f)
2438 unw_rec_list *list;
2439 vbyte_func f;
2440 {
2441 unw_rec_list *ptr;
2442 for (ptr = list; ptr; ptr = ptr->next)
2443 process_one_record (ptr, f);
2444 }
2445
2446 /* Determine the size of a record list in bytes. */
2447 static int
2448 calc_record_size (list)
2449 unw_rec_list *list;
2450 {
2451 vbyte_count = 0;
2452 process_unw_records (list, count_output);
2453 return vbyte_count;
2454 }
2455
2456 /* Update IMASK bitmask to reflect the fact that one or more registers
2457 of type TYPE are saved starting at instruction with index T. If N
2458 bits are set in REGMASK, it is assumed that instructions T through
2459 T+N-1 save these registers.
2460
2461 TYPE values:
2462 0: no save
2463 1: instruction saves next fp reg
2464 2: instruction saves next general reg
2465 3: instruction saves next branch reg */
2466 static void
2467 set_imask (region, regmask, t, type)
2468 unw_rec_list *region;
2469 unsigned long regmask;
2470 unsigned long t;
2471 unsigned int type;
2472 {
2473 unsigned char *imask;
2474 unsigned long imask_size;
2475 unsigned int i;
2476 int pos;
2477
2478 imask = region->r.record.r.mask.i;
2479 imask_size = region->r.record.r.imask_size;
2480 if (!imask)
2481 {
2482 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2483 imask = xmalloc (imask_size);
2484 memset (imask, 0, imask_size);
2485
2486 region->r.record.r.imask_size = imask_size;
2487 region->r.record.r.mask.i = imask;
2488 }
2489
2490 i = (t / 4) + 1;
2491 pos = 2 * (3 - t % 4);
2492 while (regmask)
2493 {
2494 if (i >= imask_size)
2495 {
2496 as_bad ("Ignoring attempt to spill beyond end of region");
2497 return;
2498 }
2499
2500 imask[i] |= (type & 0x3) << pos;
2501
2502 regmask &= (regmask - 1);
2503 pos -= 2;
2504 if (pos < 0)
2505 {
2506 pos = 0;
2507 ++i;
2508 }
2509 }
2510 }
2511
2512 static int
2513 count_bits (unsigned long mask)
2514 {
2515 int n = 0;
2516
2517 while (mask)
2518 {
2519 mask &= mask - 1;
2520 ++n;
2521 }
2522 return n;
2523 }
2524
2525 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2526 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2527 containing FIRST_ADDR. */
2528
2529 unsigned long
2530 slot_index (slot_addr, slot_frag, first_addr, first_frag)
2531 unsigned long slot_addr;
2532 fragS *slot_frag;
2533 unsigned long first_addr;
2534 fragS *first_frag;
2535 {
2536 unsigned long index = 0;
2537
2538 /* First time we are called, the initial address and frag are invalid. */
2539 if (first_addr == 0)
2540 return 0;
2541
2542 /* If the two addresses are in different frags, then we need to add in
2543 the remaining size of this frag, and then the entire size of intermediate
2544 frags. */
2545 while (slot_frag != first_frag)
2546 {
2547 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2548
2549 /* Add in the full size of the frag converted to instruction slots. */
2550 index += 3 * (first_frag->fr_fix >> 4);
2551 /* Subtract away the initial part before first_addr. */
2552 index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2553 + ((first_addr & 0x3) - (start_addr & 0x3)));
2554
2555 /* Move to the beginning of the next frag. */
2556 first_frag = first_frag->fr_next;
2557 first_addr = (unsigned long) &first_frag->fr_literal;
2558 }
2559
2560 /* Add in the used part of the last frag. */
2561 index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2562 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2563 return index;
2564 }
2565
2566 /* Optimize unwind record directives. */
2567
2568 static unw_rec_list *
2569 optimize_unw_records (list)
2570 unw_rec_list *list;
2571 {
2572 if (!list)
2573 return NULL;
2574
2575 /* If the only unwind record is ".prologue" or ".prologue" followed
2576 by ".body", then we can optimize the unwind directives away. */
2577 if (list->r.type == prologue
2578 && (list->next == NULL
2579 || (list->next->r.type == body && list->next->next == NULL)))
2580 return NULL;
2581
2582 return list;
2583 }
2584
2585 /* Given a complete record list, process any records which have
2586 unresolved fields, (ie length counts for a prologue). After
2587 this has been run, all neccessary information should be available
2588 within each record to generate an image. */
2589
2590 static void
2591 fixup_unw_records (list)
2592 unw_rec_list *list;
2593 {
2594 unw_rec_list *ptr, *region = 0;
2595 unsigned long first_addr = 0, rlen = 0, t;
2596 fragS *first_frag = 0;
2597
2598 for (ptr = list; ptr; ptr = ptr->next)
2599 {
2600 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2601 as_bad (" Insn slot not set in unwind record.");
2602 t = slot_index (ptr->slot_number, ptr->slot_frag,
2603 first_addr, first_frag);
2604 switch (ptr->r.type)
2605 {
2606 case prologue:
2607 case prologue_gr:
2608 case body:
2609 {
2610 unw_rec_list *last;
2611 int size, dir_len = 0;
2612 unsigned long last_addr;
2613 fragS *last_frag;
2614
2615 first_addr = ptr->slot_number;
2616 first_frag = ptr->slot_frag;
2617 ptr->slot_number = 0;
2618 /* Find either the next body/prologue start, or the end of
2619 the list, and determine the size of the region. */
2620 last_addr = unwind.next_slot_number;
2621 last_frag = unwind.next_slot_frag;
2622 for (last = ptr->next; last != NULL; last = last->next)
2623 if (last->r.type == prologue || last->r.type == prologue_gr
2624 || last->r.type == body)
2625 {
2626 last_addr = last->slot_number;
2627 last_frag = last->slot_frag;
2628 break;
2629 }
2630 else if (!last->next)
2631 {
2632 /* In the absence of an explicit .body directive,
2633 the prologue ends after the last instruction
2634 covered by an unwind directive. */
2635 if (ptr->r.type != body)
2636 {
2637 last_addr = last->slot_number;
2638 last_frag = last->slot_frag;
2639 switch (last->r.type)
2640 {
2641 case frgr_mem:
2642 dir_len = (count_bits (last->r.record.p.frmask)
2643 + count_bits (last->r.record.p.grmask));
2644 break;
2645 case fr_mem:
2646 case gr_mem:
2647 dir_len += count_bits (last->r.record.p.rmask);
2648 break;
2649 case br_mem:
2650 case br_gr:
2651 dir_len += count_bits (last->r.record.p.brmask);
2652 break;
2653 case gr_gr:
2654 dir_len += count_bits (last->r.record.p.grmask);
2655 break;
2656 default:
2657 dir_len = 1;
2658 break;
2659 }
2660 }
2661 break;
2662 }
2663 size = (slot_index (last_addr, last_frag, first_addr, first_frag)
2664 + dir_len);
2665 rlen = ptr->r.record.r.rlen = size;
2666 region = ptr;
2667 break;
2668 }
2669 case epilogue:
2670 ptr->r.record.b.t = rlen - 1 - t;
2671 break;
2672
2673 case mem_stack_f:
2674 case mem_stack_v:
2675 case rp_when:
2676 case pfs_when:
2677 case preds_when:
2678 case unat_when:
2679 case lc_when:
2680 case fpsr_when:
2681 case priunat_when_gr:
2682 case priunat_when_mem:
2683 case bsp_when:
2684 case bspstore_when:
2685 case rnat_when:
2686 ptr->r.record.p.t = t;
2687 break;
2688
2689 case spill_reg:
2690 case spill_sprel:
2691 case spill_psprel:
2692 case spill_reg_p:
2693 case spill_sprel_p:
2694 case spill_psprel_p:
2695 ptr->r.record.x.t = t;
2696 break;
2697
2698 case frgr_mem:
2699 if (!region)
2700 {
2701 as_bad ("frgr_mem record before region record!\n");
2702 return;
2703 }
2704 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2705 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2706 set_imask (region, ptr->r.record.p.frmask, t, 1);
2707 set_imask (region, ptr->r.record.p.grmask, t, 2);
2708 break;
2709 case fr_mem:
2710 if (!region)
2711 {
2712 as_bad ("fr_mem record before region record!\n");
2713 return;
2714 }
2715 region->r.record.r.mask.fr_mem |= ptr->r.record.p.rmask;
2716 set_imask (region, ptr->r.record.p.rmask, t, 1);
2717 break;
2718 case gr_mem:
2719 if (!region)
2720 {
2721 as_bad ("gr_mem record before region record!\n");
2722 return;
2723 }
2724 region->r.record.r.mask.gr_mem |= ptr->r.record.p.rmask;
2725 set_imask (region, ptr->r.record.p.rmask, t, 2);
2726 break;
2727 case br_mem:
2728 if (!region)
2729 {
2730 as_bad ("br_mem record before region record!\n");
2731 return;
2732 }
2733 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2734 set_imask (region, ptr->r.record.p.brmask, t, 3);
2735 break;
2736
2737 case gr_gr:
2738 if (!region)
2739 {
2740 as_bad ("gr_gr record before region record!\n");
2741 return;
2742 }
2743 set_imask (region, ptr->r.record.p.grmask, t, 2);
2744 break;
2745 case br_gr:
2746 if (!region)
2747 {
2748 as_bad ("br_gr record before region record!\n");
2749 return;
2750 }
2751 set_imask (region, ptr->r.record.p.brmask, t, 3);
2752 break;
2753
2754 default:
2755 break;
2756 }
2757 }
2758 }
2759
2760 /* Generate an unwind image from a record list. Returns the number of
2761 bytes in the resulting image. The memory image itselof is returned
2762 in the 'ptr' parameter. */
2763 static int
2764 output_unw_records (list, ptr)
2765 unw_rec_list *list;
2766 void **ptr;
2767 {
2768 int size, x, extra = 0;
2769 unsigned char *mem;
2770
2771 *ptr = NULL;
2772
2773 list = optimize_unw_records (list);
2774 fixup_unw_records (list);
2775 size = calc_record_size (list);
2776
2777 /* pad to 8 byte boundry. */
2778 x = size % 8;
2779 if (x != 0)
2780 extra = 8 - x;
2781
2782 if (size > 0 || unwind.force_unwind_entry)
2783 {
2784 unwind.force_unwind_entry = 0;
2785
2786 /* Add 8 for the header + 8 more bytes for the personality offset. */
2787 mem = xmalloc (size + extra + 16);
2788
2789 vbyte_mem_ptr = mem + 8;
2790 /* Clear the padding area and personality. */
2791 memset (mem + 8 + size, 0 , extra + 8);
2792 /* Initialize the header area. */
2793 md_number_to_chars (mem,
2794 (((bfd_vma) 1 << 48) /* version */
2795 | (unwind.personality_routine
2796 ? ((bfd_vma) 3 << 32) /* U & E handler flags */
2797 : 0)
2798 | ((size + extra) / 8)), /* length (dwords) */
2799 8);
2800
2801 process_unw_records (list, output_vbyte_mem);
2802
2803 *ptr = mem;
2804
2805 size += extra + 16;
2806 }
2807 return size;
2808 }
2809
2810 static int
2811 convert_expr_to_ab_reg (e, ab, regp)
2812 expressionS *e;
2813 unsigned int *ab;
2814 unsigned int *regp;
2815 {
2816 unsigned int reg;
2817
2818 if (e->X_op != O_register)
2819 return 0;
2820
2821 reg = e->X_add_number;
2822 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
2823 {
2824 *ab = 0;
2825 *regp = reg - REG_GR;
2826 }
2827 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
2828 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
2829 {
2830 *ab = 1;
2831 *regp = reg - REG_FR;
2832 }
2833 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
2834 {
2835 *ab = 2;
2836 *regp = reg - REG_BR;
2837 }
2838 else
2839 {
2840 *ab = 3;
2841 switch (reg)
2842 {
2843 case REG_PR: *regp = 0; break;
2844 case REG_PSP: *regp = 1; break;
2845 case REG_PRIUNAT: *regp = 2; break;
2846 case REG_BR + 0: *regp = 3; break;
2847 case REG_AR + AR_BSP: *regp = 4; break;
2848 case REG_AR + AR_BSPSTORE: *regp = 5; break;
2849 case REG_AR + AR_RNAT: *regp = 6; break;
2850 case REG_AR + AR_UNAT: *regp = 7; break;
2851 case REG_AR + AR_FPSR: *regp = 8; break;
2852 case REG_AR + AR_PFS: *regp = 9; break;
2853 case REG_AR + AR_LC: *regp = 10; break;
2854
2855 default:
2856 return 0;
2857 }
2858 }
2859 return 1;
2860 }
2861
2862 static int
2863 convert_expr_to_xy_reg (e, xy, regp)
2864 expressionS *e;
2865 unsigned int *xy;
2866 unsigned int *regp;
2867 {
2868 unsigned int reg;
2869
2870 if (e->X_op != O_register)
2871 return 0;
2872
2873 reg = e->X_add_number;
2874
2875 if (/* reg >= REG_GR && */ reg <= (REG_GR + 127))
2876 {
2877 *xy = 0;
2878 *regp = reg - REG_GR;
2879 }
2880 else if (reg >= REG_FR && reg <= (REG_FR + 127))
2881 {
2882 *xy = 1;
2883 *regp = reg - REG_FR;
2884 }
2885 else if (reg >= REG_BR && reg <= (REG_BR + 7))
2886 {
2887 *xy = 2;
2888 *regp = reg - REG_BR;
2889 }
2890 else
2891 return -1;
2892 return 1;
2893 }
2894
2895 static void
2896 dot_radix (dummy)
2897 int dummy ATTRIBUTE_UNUSED;
2898 {
2899 int radix;
2900
2901 SKIP_WHITESPACE ();
2902 radix = *input_line_pointer++;
2903
2904 if (radix != 'C' && !is_end_of_line[(unsigned char) radix])
2905 {
2906 as_bad ("Radix `%c' unsupported", *input_line_pointer);
2907 ignore_rest_of_line ();
2908 return;
2909 }
2910 }
2911
2912 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
2913 static void
2914 dot_special_section (which)
2915 int which;
2916 {
2917 set_section ((char *) special_section_name[which]);
2918 }
2919
2920 static void
2921 add_unwind_entry (ptr)
2922 unw_rec_list *ptr;
2923 {
2924 if (unwind.tail)
2925 unwind.tail->next = ptr;
2926 else
2927 unwind.list = ptr;
2928 unwind.tail = ptr;
2929
2930 /* The current entry can in fact be a chain of unwind entries. */
2931 if (unwind.current_entry == NULL)
2932 unwind.current_entry = ptr;
2933 }
2934
2935 static void
2936 dot_fframe (dummy)
2937 int dummy ATTRIBUTE_UNUSED;
2938 {
2939 expressionS e;
2940
2941 parse_operand (&e);
2942
2943 if (e.X_op != O_constant)
2944 as_bad ("Operand to .fframe must be a constant");
2945 else
2946 add_unwind_entry (output_mem_stack_f (e.X_add_number));
2947 }
2948
2949 static void
2950 dot_vframe (dummy)
2951 int dummy ATTRIBUTE_UNUSED;
2952 {
2953 expressionS e;
2954 unsigned reg;
2955
2956 parse_operand (&e);
2957 reg = e.X_add_number - REG_GR;
2958 if (e.X_op == O_register && reg < 128)
2959 {
2960 add_unwind_entry (output_mem_stack_v ());
2961 if (! (unwind.prologue_mask & 2))
2962 add_unwind_entry (output_psp_gr (reg));
2963 }
2964 else
2965 as_bad ("First operand to .vframe must be a general register");
2966 }
2967
2968 static void
2969 dot_vframesp (dummy)
2970 int dummy ATTRIBUTE_UNUSED;
2971 {
2972 expressionS e;
2973
2974 parse_operand (&e);
2975 if (e.X_op == O_constant)
2976 {
2977 add_unwind_entry (output_mem_stack_v ());
2978 add_unwind_entry (output_psp_sprel (e.X_add_number));
2979 }
2980 else
2981 as_bad ("First operand to .vframesp must be a general register");
2982 }
2983
2984 static void
2985 dot_vframepsp (dummy)
2986 int dummy ATTRIBUTE_UNUSED;
2987 {
2988 expressionS e;
2989
2990 parse_operand (&e);
2991 if (e.X_op == O_constant)
2992 {
2993 add_unwind_entry (output_mem_stack_v ());
2994 add_unwind_entry (output_psp_sprel (e.X_add_number));
2995 }
2996 else
2997 as_bad ("First operand to .vframepsp must be a general register");
2998 }
2999
3000 static void
3001 dot_save (dummy)
3002 int dummy ATTRIBUTE_UNUSED;
3003 {
3004 expressionS e1, e2;
3005 int sep;
3006 int reg1, reg2;
3007
3008 sep = parse_operand (&e1);
3009 if (sep != ',')
3010 as_bad ("No second operand to .save");
3011 sep = parse_operand (&e2);
3012
3013 reg1 = e1.X_add_number;
3014 reg2 = e2.X_add_number - REG_GR;
3015
3016 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3017 if (e1.X_op == O_register)
3018 {
3019 if (e2.X_op == O_register && reg2 >= 0 && reg2 < 128)
3020 {
3021 switch (reg1)
3022 {
3023 case REG_AR + AR_BSP:
3024 add_unwind_entry (output_bsp_when ());
3025 add_unwind_entry (output_bsp_gr (reg2));
3026 break;
3027 case REG_AR + AR_BSPSTORE:
3028 add_unwind_entry (output_bspstore_when ());
3029 add_unwind_entry (output_bspstore_gr (reg2));
3030 break;
3031 case REG_AR + AR_RNAT:
3032 add_unwind_entry (output_rnat_when ());
3033 add_unwind_entry (output_rnat_gr (reg2));
3034 break;
3035 case REG_AR + AR_UNAT:
3036 add_unwind_entry (output_unat_when ());
3037 add_unwind_entry (output_unat_gr (reg2));
3038 break;
3039 case REG_AR + AR_FPSR:
3040 add_unwind_entry (output_fpsr_when ());
3041 add_unwind_entry (output_fpsr_gr (reg2));
3042 break;
3043 case REG_AR + AR_PFS:
3044 add_unwind_entry (output_pfs_when ());
3045 if (! (unwind.prologue_mask & 4))
3046 add_unwind_entry (output_pfs_gr (reg2));
3047 break;
3048 case REG_AR + AR_LC:
3049 add_unwind_entry (output_lc_when ());
3050 add_unwind_entry (output_lc_gr (reg2));
3051 break;
3052 case REG_BR:
3053 add_unwind_entry (output_rp_when ());
3054 if (! (unwind.prologue_mask & 8))
3055 add_unwind_entry (output_rp_gr (reg2));
3056 break;
3057 case REG_PR:
3058 add_unwind_entry (output_preds_when ());
3059 if (! (unwind.prologue_mask & 1))
3060 add_unwind_entry (output_preds_gr (reg2));
3061 break;
3062 case REG_PRIUNAT:
3063 add_unwind_entry (output_priunat_when_gr ());
3064 add_unwind_entry (output_priunat_gr (reg2));
3065 break;
3066 default:
3067 as_bad ("First operand not a valid register");
3068 }
3069 }
3070 else
3071 as_bad (" Second operand not a valid register");
3072 }
3073 else
3074 as_bad ("First operand not a register");
3075 }
3076
3077 static void
3078 dot_restore (dummy)
3079 int dummy ATTRIBUTE_UNUSED;
3080 {
3081 expressionS e1, e2;
3082 unsigned long ecount; /* # of _additional_ regions to pop */
3083 int sep;
3084
3085 sep = parse_operand (&e1);
3086 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3087 {
3088 as_bad ("First operand to .restore must be stack pointer (sp)");
3089 return;
3090 }
3091
3092 if (sep == ',')
3093 {
3094 parse_operand (&e2);
3095 if (e2.X_op != O_constant || e2.X_add_number < 0)
3096 {
3097 as_bad ("Second operand to .restore must be a constant >= 0");
3098 return;
3099 }
3100 ecount = e2.X_add_number;
3101 }
3102 else
3103 ecount = unwind.prologue_count - 1;
3104 add_unwind_entry (output_epilogue (ecount));
3105
3106 if (ecount < unwind.prologue_count)
3107 unwind.prologue_count -= ecount + 1;
3108 else
3109 unwind.prologue_count = 0;
3110 }
3111
3112 static void
3113 dot_restorereg (dummy)
3114 int dummy ATTRIBUTE_UNUSED;
3115 {
3116 unsigned int ab, reg;
3117 expressionS e;
3118
3119 parse_operand (&e);
3120
3121 if (!convert_expr_to_ab_reg (&e, &ab, &reg))
3122 {
3123 as_bad ("First operand to .restorereg must be a preserved register");
3124 return;
3125 }
3126 add_unwind_entry (output_spill_reg (ab, reg, 0, 0));
3127 }
3128
3129 static void
3130 dot_restorereg_p (dummy)
3131 int dummy ATTRIBUTE_UNUSED;
3132 {
3133 unsigned int qp, ab, reg;
3134 expressionS e1, e2;
3135 int sep;
3136
3137 sep = parse_operand (&e1);
3138 if (sep != ',')
3139 {
3140 as_bad ("No second operand to .restorereg.p");
3141 return;
3142 }
3143
3144 parse_operand (&e2);
3145
3146 qp = e1.X_add_number - REG_P;
3147 if (e1.X_op != O_register || qp > 63)
3148 {
3149 as_bad ("First operand to .restorereg.p must be a predicate");
3150 return;
3151 }
3152
3153 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3154 {
3155 as_bad ("Second operand to .restorereg.p must be a preserved register");
3156 return;
3157 }
3158 add_unwind_entry (output_spill_reg_p (ab, reg, 0, 0, qp));
3159 }
3160
3161 static int
3162 generate_unwind_image (text_name)
3163 const char *text_name;
3164 {
3165 int size;
3166 unsigned char *unw_rec;
3167
3168 /* Force out pending instructions, to make sure all unwind records have
3169 a valid slot_number field. */
3170 ia64_flush_insns ();
3171
3172 /* Generate the unwind record. */
3173 size = output_unw_records (unwind.list, (void **) &unw_rec);
3174 if (size % 8 != 0)
3175 as_bad ("Unwind record is not a multiple of 8 bytes.");
3176
3177 /* If there are unwind records, switch sections, and output the info. */
3178 if (size != 0)
3179 {
3180 unsigned char *where;
3181 char *sec_name;
3182 expressionS exp;
3183
3184 make_unw_section_name (SPECIAL_SECTION_UNWIND_INFO, text_name, sec_name);
3185 set_section (sec_name);
3186 bfd_set_section_flags (stdoutput, now_seg,
3187 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3188
3189 /* Make sure the section has 8 byte alignment. */
3190 frag_align (3, 0, 0);
3191 record_alignment (now_seg, 3);
3192
3193 /* Set expression which points to start of unwind descriptor area. */
3194 unwind.info = expr_build_dot ();
3195
3196 where = (unsigned char *) frag_more (size);
3197
3198 /* Issue a label for this address, and keep track of it to put it
3199 in the unwind section. */
3200
3201 /* Copy the information from the unwind record into this section. The
3202 data is already in the correct byte order. */
3203 memcpy (where, unw_rec, size);
3204
3205 /* Add the personality address to the image. */
3206 if (unwind.personality_routine != 0)
3207 {
3208 exp.X_op = O_symbol;
3209 exp.X_add_symbol = unwind.personality_routine;
3210 exp.X_add_number = 0;
3211 fix_new_exp (frag_now, frag_now_fix () - 8, 8,
3212 &exp, 0, BFD_RELOC_IA64_LTOFF_FPTR64LSB);
3213 unwind.personality_routine = 0;
3214 }
3215 }
3216
3217 free_list_records (unwind.list);
3218 unwind.list = unwind.tail = unwind.current_entry = NULL;
3219
3220 return size;
3221 }
3222
3223 static void
3224 dot_handlerdata (dummy)
3225 int dummy ATTRIBUTE_UNUSED;
3226 {
3227 const char *text_name = segment_name (now_seg);
3228
3229 /* If text section name starts with ".text" (which it should),
3230 strip this prefix off. */
3231 if (strcmp (text_name, ".text") == 0)
3232 text_name = "";
3233
3234 unwind.force_unwind_entry = 1;
3235
3236 /* Remember which segment we're in so we can switch back after .endp */
3237 unwind.saved_text_seg = now_seg;
3238 unwind.saved_text_subseg = now_subseg;
3239
3240 /* Generate unwind info into unwind-info section and then leave that
3241 section as the currently active one so dataXX directives go into
3242 the language specific data area of the unwind info block. */
3243 generate_unwind_image (text_name);
3244 demand_empty_rest_of_line ();
3245 }
3246
3247 static void
3248 dot_unwentry (dummy)
3249 int dummy ATTRIBUTE_UNUSED;
3250 {
3251 unwind.force_unwind_entry = 1;
3252 demand_empty_rest_of_line ();
3253 }
3254
3255 static void
3256 dot_altrp (dummy)
3257 int dummy ATTRIBUTE_UNUSED;
3258 {
3259 expressionS e;
3260 unsigned reg;
3261
3262 parse_operand (&e);
3263 reg = e.X_add_number - REG_BR;
3264 if (e.X_op == O_register && reg < 8)
3265 add_unwind_entry (output_rp_br (reg));
3266 else
3267 as_bad ("First operand not a valid branch register");
3268 }
3269
3270 static void
3271 dot_savemem (psprel)
3272 int psprel;
3273 {
3274 expressionS e1, e2;
3275 int sep;
3276 int reg1, val;
3277
3278 sep = parse_operand (&e1);
3279 if (sep != ',')
3280 as_bad ("No second operand to .save%ssp", psprel ? "p" : "");
3281 sep = parse_operand (&e2);
3282
3283 reg1 = e1.X_add_number;
3284 val = e2.X_add_number;
3285
3286 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3287 if (e1.X_op == O_register)
3288 {
3289 if (e2.X_op == O_constant)
3290 {
3291 switch (reg1)
3292 {
3293 case REG_AR + AR_BSP:
3294 add_unwind_entry (output_bsp_when ());
3295 add_unwind_entry ((psprel
3296 ? output_bsp_psprel
3297 : output_bsp_sprel) (val));
3298 break;
3299 case REG_AR + AR_BSPSTORE:
3300 add_unwind_entry (output_bspstore_when ());
3301 add_unwind_entry ((psprel
3302 ? output_bspstore_psprel
3303 : output_bspstore_sprel) (val));
3304 break;
3305 case REG_AR + AR_RNAT:
3306 add_unwind_entry (output_rnat_when ());
3307 add_unwind_entry ((psprel
3308 ? output_rnat_psprel
3309 : output_rnat_sprel) (val));
3310 break;
3311 case REG_AR + AR_UNAT:
3312 add_unwind_entry (output_unat_when ());
3313 add_unwind_entry ((psprel
3314 ? output_unat_psprel
3315 : output_unat_sprel) (val));
3316 break;
3317 case REG_AR + AR_FPSR:
3318 add_unwind_entry (output_fpsr_when ());
3319 add_unwind_entry ((psprel
3320 ? output_fpsr_psprel
3321 : output_fpsr_sprel) (val));
3322 break;
3323 case REG_AR + AR_PFS:
3324 add_unwind_entry (output_pfs_when ());
3325 add_unwind_entry ((psprel
3326 ? output_pfs_psprel
3327 : output_pfs_sprel) (val));
3328 break;
3329 case REG_AR + AR_LC:
3330 add_unwind_entry (output_lc_when ());
3331 add_unwind_entry ((psprel
3332 ? output_lc_psprel
3333 : output_lc_sprel) (val));
3334 break;
3335 case REG_BR:
3336 add_unwind_entry (output_rp_when ());
3337 add_unwind_entry ((psprel
3338 ? output_rp_psprel
3339 : output_rp_sprel) (val));
3340 break;
3341 case REG_PR:
3342 add_unwind_entry (output_preds_when ());
3343 add_unwind_entry ((psprel
3344 ? output_preds_psprel
3345 : output_preds_sprel) (val));
3346 break;
3347 case REG_PRIUNAT:
3348 add_unwind_entry (output_priunat_when_mem ());
3349 add_unwind_entry ((psprel
3350 ? output_priunat_psprel
3351 : output_priunat_sprel) (val));
3352 break;
3353 default:
3354 as_bad ("First operand not a valid register");
3355 }
3356 }
3357 else
3358 as_bad (" Second operand not a valid constant");
3359 }
3360 else
3361 as_bad ("First operand not a register");
3362 }
3363
3364 static void
3365 dot_saveg (dummy)
3366 int dummy ATTRIBUTE_UNUSED;
3367 {
3368 expressionS e1, e2;
3369 int sep;
3370 sep = parse_operand (&e1);
3371 if (sep == ',')
3372 parse_operand (&e2);
3373
3374 if (e1.X_op != O_constant)
3375 as_bad ("First operand to .save.g must be a constant.");
3376 else
3377 {
3378 int grmask = e1.X_add_number;
3379 if (sep != ',')
3380 add_unwind_entry (output_gr_mem (grmask));
3381 else
3382 {
3383 int reg = e2.X_add_number - REG_GR;
3384 if (e2.X_op == O_register && reg >= 0 && reg < 128)
3385 add_unwind_entry (output_gr_gr (grmask, reg));
3386 else
3387 as_bad ("Second operand is an invalid register.");
3388 }
3389 }
3390 }
3391
3392 static void
3393 dot_savef (dummy)
3394 int dummy ATTRIBUTE_UNUSED;
3395 {
3396 expressionS e1;
3397 int sep;
3398 sep = parse_operand (&e1);
3399
3400 if (e1.X_op != O_constant)
3401 as_bad ("Operand to .save.f must be a constant.");
3402 else
3403 add_unwind_entry (output_fr_mem (e1.X_add_number));
3404 }
3405
3406 static void
3407 dot_saveb (dummy)
3408 int dummy ATTRIBUTE_UNUSED;
3409 {
3410 expressionS e1, e2;
3411 unsigned int reg;
3412 unsigned char sep;
3413 int brmask;
3414
3415 sep = parse_operand (&e1);
3416 if (e1.X_op != O_constant)
3417 {
3418 as_bad ("First operand to .save.b must be a constant.");
3419 return;
3420 }
3421 brmask = e1.X_add_number;
3422
3423 if (sep == ',')
3424 {
3425 sep = parse_operand (&e2);
3426 reg = e2.X_add_number - REG_GR;
3427 if (e2.X_op != O_register || reg > 127)
3428 {
3429 as_bad ("Second operand to .save.b must be a general register.");
3430 return;
3431 }
3432 add_unwind_entry (output_br_gr (brmask, e2.X_add_number));
3433 }
3434 else
3435 add_unwind_entry (output_br_mem (brmask));
3436
3437 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3438 ignore_rest_of_line ();
3439 }
3440
3441 static void
3442 dot_savegf (dummy)
3443 int dummy ATTRIBUTE_UNUSED;
3444 {
3445 expressionS e1, e2;
3446 int sep;
3447 sep = parse_operand (&e1);
3448 if (sep == ',')
3449 parse_operand (&e2);
3450
3451 if (e1.X_op != O_constant || sep != ',' || e2.X_op != O_constant)
3452 as_bad ("Both operands of .save.gf must be constants.");
3453 else
3454 {
3455 int grmask = e1.X_add_number;
3456 int frmask = e2.X_add_number;
3457 add_unwind_entry (output_frgr_mem (grmask, frmask));
3458 }
3459 }
3460
3461 static void
3462 dot_spill (dummy)
3463 int dummy ATTRIBUTE_UNUSED;
3464 {
3465 expressionS e;
3466 unsigned char sep;
3467
3468 sep = parse_operand (&e);
3469 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3470 ignore_rest_of_line ();
3471
3472 if (e.X_op != O_constant)
3473 as_bad ("Operand to .spill must be a constant");
3474 else
3475 add_unwind_entry (output_spill_base (e.X_add_number));
3476 }
3477
3478 static void
3479 dot_spillreg (dummy)
3480 int dummy ATTRIBUTE_UNUSED;
3481 {
3482 int sep, ab, xy, reg, treg;
3483 expressionS e1, e2;
3484
3485 sep = parse_operand (&e1);
3486 if (sep != ',')
3487 {
3488 as_bad ("No second operand to .spillreg");
3489 return;
3490 }
3491
3492 parse_operand (&e2);
3493
3494 if (!convert_expr_to_ab_reg (&e1, &ab, &reg))
3495 {
3496 as_bad ("First operand to .spillreg must be a preserved register");
3497 return;
3498 }
3499
3500 if (!convert_expr_to_xy_reg (&e2, &xy, &treg))
3501 {
3502 as_bad ("Second operand to .spillreg must be a register");
3503 return;
3504 }
3505
3506 add_unwind_entry (output_spill_reg (ab, reg, treg, xy));
3507 }
3508
3509 static void
3510 dot_spillmem (psprel)
3511 int psprel;
3512 {
3513 expressionS e1, e2;
3514 int sep, ab, reg;
3515
3516 sep = parse_operand (&e1);
3517 if (sep != ',')
3518 {
3519 as_bad ("Second operand missing");
3520 return;
3521 }
3522
3523 parse_operand (&e2);
3524
3525 if (!convert_expr_to_ab_reg (&e1, &ab, &reg))
3526 {
3527 as_bad ("First operand to .spill%s must be a preserved register",
3528 psprel ? "psp" : "sp");
3529 return;
3530 }
3531
3532 if (e2.X_op != O_constant)
3533 {
3534 as_bad ("Second operand to .spill%s must be a constant",
3535 psprel ? "psp" : "sp");
3536 return;
3537 }
3538
3539 if (psprel)
3540 add_unwind_entry (output_spill_psprel (ab, reg, e2.X_add_number));
3541 else
3542 add_unwind_entry (output_spill_sprel (ab, reg, e2.X_add_number));
3543 }
3544
3545 static void
3546 dot_spillreg_p (dummy)
3547 int dummy ATTRIBUTE_UNUSED;
3548 {
3549 int sep, ab, xy, reg, treg;
3550 expressionS e1, e2, e3;
3551 unsigned int qp;
3552
3553 sep = parse_operand (&e1);
3554 if (sep != ',')
3555 {
3556 as_bad ("No second and third operand to .spillreg.p");
3557 return;
3558 }
3559
3560 sep = parse_operand (&e2);
3561 if (sep != ',')
3562 {
3563 as_bad ("No third operand to .spillreg.p");
3564 return;
3565 }
3566
3567 parse_operand (&e3);
3568
3569 qp = e1.X_add_number - REG_P;
3570
3571 if (e1.X_op != O_register || qp > 63)
3572 {
3573 as_bad ("First operand to .spillreg.p must be a predicate");
3574 return;
3575 }
3576
3577 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3578 {
3579 as_bad ("Second operand to .spillreg.p must be a preserved register");
3580 return;
3581 }
3582
3583 if (!convert_expr_to_xy_reg (&e3, &xy, &treg))
3584 {
3585 as_bad ("Third operand to .spillreg.p must be a register");
3586 return;
3587 }
3588
3589 add_unwind_entry (output_spill_reg_p (ab, reg, treg, xy, qp));
3590 }
3591
3592 static void
3593 dot_spillmem_p (psprel)
3594 int psprel;
3595 {
3596 expressionS e1, e2, e3;
3597 int sep, ab, reg;
3598 unsigned int qp;
3599
3600 sep = parse_operand (&e1);
3601 if (sep != ',')
3602 {
3603 as_bad ("Second operand missing");
3604 return;
3605 }
3606
3607 parse_operand (&e2);
3608 if (sep != ',')
3609 {
3610 as_bad ("Second operand missing");
3611 return;
3612 }
3613
3614 parse_operand (&e3);
3615
3616 qp = e1.X_add_number - REG_P;
3617 if (e1.X_op != O_register || qp > 63)
3618 {
3619 as_bad ("First operand to .spill%s_p must be a predicate",
3620 psprel ? "psp" : "sp");
3621 return;
3622 }
3623
3624 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3625 {
3626 as_bad ("Second operand to .spill%s_p must be a preserved register",
3627 psprel ? "psp" : "sp");
3628 return;
3629 }
3630
3631 if (e3.X_op != O_constant)
3632 {
3633 as_bad ("Third operand to .spill%s_p must be a constant",
3634 psprel ? "psp" : "sp");
3635 return;
3636 }
3637
3638 if (psprel)
3639 add_unwind_entry (output_spill_psprel_p (ab, reg, e3.X_add_number, qp));
3640 else
3641 add_unwind_entry (output_spill_sprel_p (ab, reg, e3.X_add_number, qp));
3642 }
3643
3644 static void
3645 dot_label_state (dummy)
3646 int dummy ATTRIBUTE_UNUSED;
3647 {
3648 expressionS e;
3649
3650 parse_operand (&e);
3651 if (e.X_op != O_constant)
3652 {
3653 as_bad ("Operand to .label_state must be a constant");
3654 return;
3655 }
3656 add_unwind_entry (output_label_state (e.X_add_number));
3657 }
3658
3659 static void
3660 dot_copy_state (dummy)
3661 int dummy ATTRIBUTE_UNUSED;
3662 {
3663 expressionS e;
3664
3665 parse_operand (&e);
3666 if (e.X_op != O_constant)
3667 {
3668 as_bad ("Operand to .copy_state must be a constant");
3669 return;
3670 }
3671 add_unwind_entry (output_copy_state (e.X_add_number));
3672 }
3673
3674 static void
3675 dot_unwabi (dummy)
3676 int dummy ATTRIBUTE_UNUSED;
3677 {
3678 expressionS e1, e2;
3679 unsigned char sep;
3680
3681 sep = parse_operand (&e1);
3682 if (sep != ',')
3683 {
3684 as_bad ("Second operand to .unwabi missing");
3685 return;
3686 }
3687 sep = parse_operand (&e2);
3688 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3689 ignore_rest_of_line ();
3690
3691 if (e1.X_op != O_constant)
3692 {
3693 as_bad ("First operand to .unwabi must be a constant");
3694 return;
3695 }
3696
3697 if (e2.X_op != O_constant)
3698 {
3699 as_bad ("Second operand to .unwabi must be a constant");
3700 return;
3701 }
3702
3703 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number));
3704 }
3705
3706 static void
3707 dot_personality (dummy)
3708 int dummy ATTRIBUTE_UNUSED;
3709 {
3710 char *name, *p, c;
3711 SKIP_WHITESPACE ();
3712 name = input_line_pointer;
3713 c = get_symbol_end ();
3714 p = input_line_pointer;
3715 unwind.personality_routine = symbol_find_or_make (name);
3716 unwind.force_unwind_entry = 1;
3717 *p = c;
3718 SKIP_WHITESPACE ();
3719 demand_empty_rest_of_line ();
3720 }
3721
3722 static void
3723 dot_proc (dummy)
3724 int dummy ATTRIBUTE_UNUSED;
3725 {
3726 char *name, *p, c;
3727 symbolS *sym;
3728
3729 unwind.proc_start = expr_build_dot ();
3730 /* Parse names of main and alternate entry points and mark them as
3731 function symbols: */
3732 while (1)
3733 {
3734 SKIP_WHITESPACE ();
3735 name = input_line_pointer;
3736 c = get_symbol_end ();
3737 p = input_line_pointer;
3738 sym = symbol_find_or_make (name);
3739 if (unwind.proc_start == 0)
3740 {
3741 unwind.proc_start = sym;
3742 }
3743 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
3744 *p = c;
3745 SKIP_WHITESPACE ();
3746 if (*input_line_pointer != ',')
3747 break;
3748 ++input_line_pointer;
3749 }
3750 demand_empty_rest_of_line ();
3751 ia64_do_align (16);
3752
3753 unwind.prologue_count = 0;
3754 unwind.list = unwind.tail = unwind.current_entry = NULL;
3755 unwind.personality_routine = 0;
3756 }
3757
3758 static void
3759 dot_body (dummy)
3760 int dummy ATTRIBUTE_UNUSED;
3761 {
3762 unwind.prologue = 0;
3763 unwind.prologue_mask = 0;
3764
3765 add_unwind_entry (output_body ());
3766 demand_empty_rest_of_line ();
3767 }
3768
3769 static void
3770 dot_prologue (dummy)
3771 int dummy ATTRIBUTE_UNUSED;
3772 {
3773 unsigned char sep;
3774 int mask = 0, grsave = 0;
3775
3776 if (!is_it_end_of_statement ())
3777 {
3778 expressionS e1, e2;
3779 sep = parse_operand (&e1);
3780 if (sep != ',')
3781 as_bad ("No second operand to .prologue");
3782 sep = parse_operand (&e2);
3783 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3784 ignore_rest_of_line ();
3785
3786 if (e1.X_op == O_constant)
3787 {
3788 mask = e1.X_add_number;
3789
3790 if (e2.X_op == O_constant)
3791 grsave = e2.X_add_number;
3792 else if (e2.X_op == O_register
3793 && (grsave = e2.X_add_number - REG_GR) < 128)
3794 ;
3795 else
3796 as_bad ("Second operand not a constant or general register");
3797
3798 add_unwind_entry (output_prologue_gr (mask, grsave));
3799 }
3800 else
3801 as_bad ("First operand not a constant");
3802 }
3803 else
3804 add_unwind_entry (output_prologue ());
3805
3806 unwind.prologue = 1;
3807 unwind.prologue_mask = mask;
3808 ++unwind.prologue_count;
3809 }
3810
3811 static void
3812 dot_endp (dummy)
3813 int dummy ATTRIBUTE_UNUSED;
3814 {
3815 expressionS e;
3816 unsigned char *ptr;
3817 int bytes_per_address;
3818 long where;
3819 segT saved_seg;
3820 subsegT saved_subseg;
3821 const char *sec_name, *text_name;
3822
3823 if (unwind.saved_text_seg)
3824 {
3825 saved_seg = unwind.saved_text_seg;
3826 saved_subseg = unwind.saved_text_subseg;
3827 unwind.saved_text_seg = NULL;
3828 }
3829 else
3830 {
3831 saved_seg = now_seg;
3832 saved_subseg = now_subseg;
3833 }
3834
3835 /*
3836 Use a slightly ugly scheme to derive the unwind section names from
3837 the text section name:
3838
3839 text sect. unwind table sect.
3840 name: name: comments:
3841 ---------- ----------------- --------------------------------
3842 .text .IA_64.unwind
3843 .text.foo .IA_64.unwind.text.foo
3844 .foo .IA_64.unwind.foo
3845 .gnu.linkonce.t.foo
3846 .gnu.linkonce.ia64unw.foo
3847 _info .IA_64.unwind_info gas issues error message (ditto)
3848 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3849
3850 This mapping is done so that:
3851
3852 (a) An object file with unwind info only in .text will use
3853 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3854 This follows the letter of the ABI and also ensures backwards
3855 compatibility with older toolchains.
3856
3857 (b) An object file with unwind info in multiple text sections
3858 will use separate unwind sections for each text section.
3859 This allows us to properly set the "sh_info" and "sh_link"
3860 fields in SHT_IA_64_UNWIND as required by the ABI and also
3861 lets GNU ld support programs with multiple segments
3862 containing unwind info (as might be the case for certain
3863 embedded applications).
3864
3865 (c) An error is issued if there would be a name clash.
3866 */
3867 text_name = segment_name (saved_seg);
3868 if (strncmp (text_name, "_info", 5) == 0)
3869 {
3870 as_bad ("Illegal section name `%s' (causes unwind section name clash)",
3871 text_name);
3872 ignore_rest_of_line ();
3873 return;
3874 }
3875 if (strcmp (text_name, ".text") == 0)
3876 text_name = "";
3877
3878 expression (&e);
3879 demand_empty_rest_of_line ();
3880
3881 insn_group_break (1, 0, 0);
3882
3883 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
3884 if (!unwind.info)
3885 generate_unwind_image (text_name);
3886
3887 if (unwind.info || unwind.force_unwind_entry)
3888 {
3889 subseg_set (md.last_text_seg, 0);
3890 unwind.proc_end = expr_build_dot ();
3891
3892 make_unw_section_name (SPECIAL_SECTION_UNWIND, text_name, sec_name);
3893 set_section ((char *) sec_name);
3894 bfd_set_section_flags (stdoutput, now_seg,
3895 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3896
3897 /* Make sure the section has 8 byte alignment. */
3898 record_alignment (now_seg, 3);
3899
3900 ptr = frag_more (24);
3901 where = frag_now_fix () - 24;
3902 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
3903
3904 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
3905 e.X_op = O_pseudo_fixup;
3906 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3907 e.X_add_number = 0;
3908 e.X_add_symbol = unwind.proc_start;
3909 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e);
3910
3911 e.X_op = O_pseudo_fixup;
3912 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3913 e.X_add_number = 0;
3914 e.X_add_symbol = unwind.proc_end;
3915 ia64_cons_fix_new (frag_now, where + bytes_per_address,
3916 bytes_per_address, &e);
3917
3918 if (unwind.info)
3919 {
3920 e.X_op = O_pseudo_fixup;
3921 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3922 e.X_add_number = 0;
3923 e.X_add_symbol = unwind.info;
3924 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
3925 bytes_per_address, &e);
3926 }
3927 else
3928 md_number_to_chars (ptr + (bytes_per_address * 2), 0,
3929 bytes_per_address);
3930
3931 }
3932 subseg_set (saved_seg, saved_subseg);
3933 unwind.proc_start = unwind.proc_end = unwind.info = 0;
3934 }
3935
3936 static void
3937 dot_template (template)
3938 int template;
3939 {
3940 CURR_SLOT.user_template = template;
3941 }
3942
3943 static void
3944 dot_regstk (dummy)
3945 int dummy ATTRIBUTE_UNUSED;
3946 {
3947 int ins, locs, outs, rots;
3948
3949 if (is_it_end_of_statement ())
3950 ins = locs = outs = rots = 0;
3951 else
3952 {
3953 ins = get_absolute_expression ();
3954 if (*input_line_pointer++ != ',')
3955 goto err;
3956 locs = get_absolute_expression ();
3957 if (*input_line_pointer++ != ',')
3958 goto err;
3959 outs = get_absolute_expression ();
3960 if (*input_line_pointer++ != ',')
3961 goto err;
3962 rots = get_absolute_expression ();
3963 }
3964 set_regstack (ins, locs, outs, rots);
3965 return;
3966
3967 err:
3968 as_bad ("Comma expected");
3969 ignore_rest_of_line ();
3970 }
3971
3972 static void
3973 dot_rot (type)
3974 int type;
3975 {
3976 unsigned num_regs, num_alloced = 0;
3977 struct dynreg **drpp, *dr;
3978 int ch, base_reg = 0;
3979 char *name, *start;
3980 size_t len;
3981
3982 switch (type)
3983 {
3984 case DYNREG_GR: base_reg = REG_GR + 32; break;
3985 case DYNREG_FR: base_reg = REG_FR + 32; break;
3986 case DYNREG_PR: base_reg = REG_P + 16; break;
3987 default: break;
3988 }
3989
3990 /* First, remove existing names from hash table. */
3991 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
3992 {
3993 hash_delete (md.dynreg_hash, dr->name);
3994 dr->num_regs = 0;
3995 }
3996
3997 drpp = &md.dynreg[type];
3998 while (1)
3999 {
4000 start = input_line_pointer;
4001 ch = get_symbol_end ();
4002 *input_line_pointer = ch;
4003 len = (input_line_pointer - start);
4004
4005 SKIP_WHITESPACE ();
4006 if (*input_line_pointer != '[')
4007 {
4008 as_bad ("Expected '['");
4009 goto err;
4010 }
4011 ++input_line_pointer; /* skip '[' */
4012
4013 num_regs = get_absolute_expression ();
4014
4015 if (*input_line_pointer++ != ']')
4016 {
4017 as_bad ("Expected ']'");
4018 goto err;
4019 }
4020 SKIP_WHITESPACE ();
4021
4022 num_alloced += num_regs;
4023 switch (type)
4024 {
4025 case DYNREG_GR:
4026 if (num_alloced > md.rot.num_regs)
4027 {
4028 as_bad ("Used more than the declared %d rotating registers",
4029 md.rot.num_regs);
4030 goto err;
4031 }
4032 break;
4033 case DYNREG_FR:
4034 if (num_alloced > 96)
4035 {
4036 as_bad ("Used more than the available 96 rotating registers");
4037 goto err;
4038 }
4039 break;
4040 case DYNREG_PR:
4041 if (num_alloced > 48)
4042 {
4043 as_bad ("Used more than the available 48 rotating registers");
4044 goto err;
4045 }
4046 break;
4047
4048 default:
4049 break;
4050 }
4051
4052 name = obstack_alloc (&notes, len + 1);
4053 memcpy (name, start, len);
4054 name[len] = '\0';
4055
4056 if (!*drpp)
4057 {
4058 *drpp = obstack_alloc (&notes, sizeof (*dr));
4059 memset (*drpp, 0, sizeof (*dr));
4060 }
4061
4062 dr = *drpp;
4063 dr->name = name;
4064 dr->num_regs = num_regs;
4065 dr->base = base_reg;
4066 drpp = &dr->next;
4067 base_reg += num_regs;
4068
4069 if (hash_insert (md.dynreg_hash, name, dr))
4070 {
4071 as_bad ("Attempt to redefine register set `%s'", name);
4072 goto err;
4073 }
4074
4075 if (*input_line_pointer != ',')
4076 break;
4077 ++input_line_pointer; /* skip comma */
4078 SKIP_WHITESPACE ();
4079 }
4080 demand_empty_rest_of_line ();
4081 return;
4082
4083 err:
4084 ignore_rest_of_line ();
4085 }
4086
4087 static void
4088 dot_byteorder (byteorder)
4089 int byteorder;
4090 {
4091 target_big_endian = byteorder;
4092 }
4093
4094 static void
4095 dot_psr (dummy)
4096 int dummy ATTRIBUTE_UNUSED;
4097 {
4098 char *option;
4099 int ch;
4100
4101 while (1)
4102 {
4103 option = input_line_pointer;
4104 ch = get_symbol_end ();
4105 if (strcmp (option, "lsb") == 0)
4106 md.flags &= ~EF_IA_64_BE;
4107 else if (strcmp (option, "msb") == 0)
4108 md.flags |= EF_IA_64_BE;
4109 else if (strcmp (option, "abi32") == 0)
4110 md.flags &= ~EF_IA_64_ABI64;
4111 else if (strcmp (option, "abi64") == 0)
4112 md.flags |= EF_IA_64_ABI64;
4113 else
4114 as_bad ("Unknown psr option `%s'", option);
4115 *input_line_pointer = ch;
4116
4117 SKIP_WHITESPACE ();
4118 if (*input_line_pointer != ',')
4119 break;
4120
4121 ++input_line_pointer;
4122 SKIP_WHITESPACE ();
4123 }
4124 demand_empty_rest_of_line ();
4125 }
4126
4127 static void
4128 dot_alias (dummy)
4129 int dummy ATTRIBUTE_UNUSED;
4130 {
4131 as_bad (".alias not implemented yet");
4132 }
4133
4134 static void
4135 dot_ln (dummy)
4136 int dummy ATTRIBUTE_UNUSED;
4137 {
4138 new_logical_line (0, get_absolute_expression ());
4139 demand_empty_rest_of_line ();
4140 }
4141
4142 static char *
4143 parse_section_name ()
4144 {
4145 char *name;
4146 int len;
4147
4148 SKIP_WHITESPACE ();
4149 if (*input_line_pointer != '"')
4150 {
4151 as_bad ("Missing section name");
4152 ignore_rest_of_line ();
4153 return 0;
4154 }
4155 name = demand_copy_C_string (&len);
4156 if (!name)
4157 {
4158 ignore_rest_of_line ();
4159 return 0;
4160 }
4161 SKIP_WHITESPACE ();
4162 if (*input_line_pointer != ',')
4163 {
4164 as_bad ("Comma expected after section name");
4165 ignore_rest_of_line ();
4166 return 0;
4167 }
4168 ++input_line_pointer; /* skip comma */
4169 return name;
4170 }
4171
4172 static void
4173 dot_xdata (size)
4174 int size;
4175 {
4176 char *name = parse_section_name ();
4177 if (!name)
4178 return;
4179
4180 md.keep_pending_output = 1;
4181 set_section (name);
4182 cons (size);
4183 obj_elf_previous (0);
4184 md.keep_pending_output = 0;
4185 }
4186
4187 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4188
4189 static void
4190 stmt_float_cons (kind)
4191 int kind;
4192 {
4193 size_t size;
4194
4195 switch (kind)
4196 {
4197 case 'd': size = 8; break;
4198 case 'x': size = 10; break;
4199
4200 case 'f':
4201 default:
4202 size = 4;
4203 break;
4204 }
4205 ia64_do_align (size);
4206 float_cons (kind);
4207 }
4208
4209 static void
4210 stmt_cons_ua (size)
4211 int size;
4212 {
4213 int saved_auto_align = md.auto_align;
4214
4215 md.auto_align = 0;
4216 cons (size);
4217 md.auto_align = saved_auto_align;
4218 }
4219
4220 static void
4221 dot_xfloat_cons (kind)
4222 int kind;
4223 {
4224 char *name = parse_section_name ();
4225 if (!name)
4226 return;
4227
4228 md.keep_pending_output = 1;
4229 set_section (name);
4230 stmt_float_cons (kind);
4231 obj_elf_previous (0);
4232 md.keep_pending_output = 0;
4233 }
4234
4235 static void
4236 dot_xstringer (zero)
4237 int zero;
4238 {
4239 char *name = parse_section_name ();
4240 if (!name)
4241 return;
4242
4243 md.keep_pending_output = 1;
4244 set_section (name);
4245 stringer (zero);
4246 obj_elf_previous (0);
4247 md.keep_pending_output = 0;
4248 }
4249
4250 static void
4251 dot_xdata_ua (size)
4252 int size;
4253 {
4254 int saved_auto_align = md.auto_align;
4255 char *name = parse_section_name ();
4256 if (!name)
4257 return;
4258
4259 md.keep_pending_output = 1;
4260 set_section (name);
4261 md.auto_align = 0;
4262 cons (size);
4263 md.auto_align = saved_auto_align;
4264 obj_elf_previous (0);
4265 md.keep_pending_output = 0;
4266 }
4267
4268 static void
4269 dot_xfloat_cons_ua (kind)
4270 int kind;
4271 {
4272 int saved_auto_align = md.auto_align;
4273 char *name = parse_section_name ();
4274 if (!name)
4275 return;
4276
4277 md.keep_pending_output = 1;
4278 set_section (name);
4279 md.auto_align = 0;
4280 stmt_float_cons (kind);
4281 md.auto_align = saved_auto_align;
4282 obj_elf_previous (0);
4283 md.keep_pending_output = 0;
4284 }
4285
4286 /* .reg.val <regname>,value */
4287
4288 static void
4289 dot_reg_val (dummy)
4290 int dummy ATTRIBUTE_UNUSED;
4291 {
4292 expressionS reg;
4293
4294 expression (&reg);
4295 if (reg.X_op != O_register)
4296 {
4297 as_bad (_("Register name expected"));
4298 ignore_rest_of_line ();
4299 }
4300 else if (*input_line_pointer++ != ',')
4301 {
4302 as_bad (_("Comma expected"));
4303 ignore_rest_of_line ();
4304 }
4305 else
4306 {
4307 valueT value = get_absolute_expression ();
4308 int regno = reg.X_add_number;
4309 if (regno < REG_GR || regno > REG_GR + 128)
4310 as_warn (_("Register value annotation ignored"));
4311 else
4312 {
4313 gr_values[regno - REG_GR].known = 1;
4314 gr_values[regno - REG_GR].value = value;
4315 gr_values[regno - REG_GR].path = md.path;
4316 }
4317 }
4318 demand_empty_rest_of_line ();
4319 }
4320
4321 /* select dv checking mode
4322 .auto
4323 .explicit
4324 .default
4325
4326 A stop is inserted when changing modes
4327 */
4328
4329 static void
4330 dot_dv_mode (type)
4331 int type;
4332 {
4333 if (md.manual_bundling)
4334 as_warn (_("Directive invalid within a bundle"));
4335
4336 if (type == 'E' || type == 'A')
4337 md.mode_explicitly_set = 0;
4338 else
4339 md.mode_explicitly_set = 1;
4340
4341 md.detect_dv = 1;
4342 switch (type)
4343 {
4344 case 'A':
4345 case 'a':
4346 if (md.explicit_mode)
4347 insn_group_break (1, 0, 0);
4348 md.explicit_mode = 0;
4349 break;
4350 case 'E':
4351 case 'e':
4352 if (!md.explicit_mode)
4353 insn_group_break (1, 0, 0);
4354 md.explicit_mode = 1;
4355 break;
4356 default:
4357 case 'd':
4358 if (md.explicit_mode != md.default_explicit_mode)
4359 insn_group_break (1, 0, 0);
4360 md.explicit_mode = md.default_explicit_mode;
4361 md.mode_explicitly_set = 0;
4362 break;
4363 }
4364 }
4365
4366 static void
4367 print_prmask (mask)
4368 valueT mask;
4369 {
4370 int regno;
4371 char *comma = "";
4372 for (regno = 0; regno < 64; regno++)
4373 {
4374 if (mask & ((valueT) 1 << regno))
4375 {
4376 fprintf (stderr, "%s p%d", comma, regno);
4377 comma = ",";
4378 }
4379 }
4380 }
4381
4382 /*
4383 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear")
4384 .pred.rel.imply p1, p2 (also .pred.rel "imply")
4385 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex")
4386 .pred.safe_across_calls p1 [, p2 [,...]]
4387 */
4388
4389 static void
4390 dot_pred_rel (type)
4391 int type;
4392 {
4393 valueT mask = 0;
4394 int count = 0;
4395 int p1 = -1, p2 = -1;
4396
4397 if (type == 0)
4398 {
4399 if (*input_line_pointer != '"')
4400 {
4401 as_bad (_("Missing predicate relation type"));
4402 ignore_rest_of_line ();
4403 return;
4404 }
4405 else
4406 {
4407 int len;
4408 char *form = demand_copy_C_string (&len);
4409 if (strcmp (form, "mutex") == 0)
4410 type = 'm';
4411 else if (strcmp (form, "clear") == 0)
4412 type = 'c';
4413 else if (strcmp (form, "imply") == 0)
4414 type = 'i';
4415 else
4416 {
4417 as_bad (_("Unrecognized predicate relation type"));
4418 ignore_rest_of_line ();
4419 return;
4420 }
4421 }
4422 if (*input_line_pointer == ',')
4423 ++input_line_pointer;
4424 SKIP_WHITESPACE ();
4425 }
4426
4427 SKIP_WHITESPACE ();
4428 while (1)
4429 {
4430 valueT bit = 1;
4431 int regno;
4432
4433 if (toupper (*input_line_pointer) != 'P'
4434 || (regno = atoi (++input_line_pointer)) < 0
4435 || regno > 63)
4436 {
4437 as_bad (_("Predicate register expected"));
4438 ignore_rest_of_line ();
4439 return;
4440 }
4441 while (isdigit (*input_line_pointer))
4442 ++input_line_pointer;
4443 if (p1 == -1)
4444 p1 = regno;
4445 else if (p2 == -1)
4446 p2 = regno;
4447 bit <<= regno;
4448 if (mask & bit)
4449 as_warn (_("Duplicate predicate register ignored"));
4450 mask |= bit;
4451 count++;
4452 /* See if it's a range. */
4453 if (*input_line_pointer == '-')
4454 {
4455 valueT stop = 1;
4456 ++input_line_pointer;
4457
4458 if (toupper (*input_line_pointer) != 'P'
4459 || (regno = atoi (++input_line_pointer)) < 0
4460 || regno > 63)
4461 {
4462 as_bad (_("Predicate register expected"));
4463 ignore_rest_of_line ();
4464 return;
4465 }
4466 while (isdigit (*input_line_pointer))
4467 ++input_line_pointer;
4468 stop <<= regno;
4469 if (bit >= stop)
4470 {
4471 as_bad (_("Bad register range"));
4472 ignore_rest_of_line ();
4473 return;
4474 }
4475 while (bit < stop)
4476 {
4477 bit <<= 1;
4478 mask |= bit;
4479 count++;
4480 }
4481 SKIP_WHITESPACE ();
4482 }
4483 if (*input_line_pointer != ',')
4484 break;
4485 ++input_line_pointer;
4486 SKIP_WHITESPACE ();
4487 }
4488
4489 switch (type)
4490 {
4491 case 'c':
4492 if (count == 0)
4493 mask = ~(valueT) 0;
4494 clear_qp_mutex (mask);
4495 clear_qp_implies (mask, (valueT) 0);
4496 break;
4497 case 'i':
4498 if (count != 2 || p1 == -1 || p2 == -1)
4499 as_bad (_("Predicate source and target required"));
4500 else if (p1 == 0 || p2 == 0)
4501 as_bad (_("Use of p0 is not valid in this context"));
4502 else
4503 add_qp_imply (p1, p2);
4504 break;
4505 case 'm':
4506 if (count < 2)
4507 {
4508 as_bad (_("At least two PR arguments expected"));
4509 break;
4510 }
4511 else if (mask & 1)
4512 {
4513 as_bad (_("Use of p0 is not valid in this context"));
4514 break;
4515 }
4516 add_qp_mutex (mask);
4517 break;
4518 case 's':
4519 /* note that we don't override any existing relations */
4520 if (count == 0)
4521 {
4522 as_bad (_("At least one PR argument expected"));
4523 break;
4524 }
4525 if (md.debug_dv)
4526 {
4527 fprintf (stderr, "Safe across calls: ");
4528 print_prmask (mask);
4529 fprintf (stderr, "\n");
4530 }
4531 qp_safe_across_calls = mask;
4532 break;
4533 }
4534 demand_empty_rest_of_line ();
4535 }
4536
4537 /* .entry label [, label [, ...]]
4538 Hint to DV code that the given labels are to be considered entry points.
4539 Otherwise, only global labels are considered entry points. */
4540
4541 static void
4542 dot_entry (dummy)
4543 int dummy ATTRIBUTE_UNUSED;
4544 {
4545 const char *err;
4546 char *name;
4547 int c;
4548 symbolS *symbolP;
4549
4550 do
4551 {
4552 name = input_line_pointer;
4553 c = get_symbol_end ();
4554 symbolP = symbol_find_or_make (name);
4555
4556 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (PTR) symbolP);
4557 if (err)
4558 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
4559 name, err);
4560
4561 *input_line_pointer = c;
4562 SKIP_WHITESPACE ();
4563 c = *input_line_pointer;
4564 if (c == ',')
4565 {
4566 input_line_pointer++;
4567 SKIP_WHITESPACE ();
4568 if (*input_line_pointer == '\n')
4569 c = '\n';
4570 }
4571 }
4572 while (c == ',');
4573
4574 demand_empty_rest_of_line ();
4575 }
4576
4577 /* .mem.offset offset, base
4578 "base" is used to distinguish between offsets from a different base. */
4579
4580 static void
4581 dot_mem_offset (dummy)
4582 int dummy ATTRIBUTE_UNUSED;
4583 {
4584 md.mem_offset.hint = 1;
4585 md.mem_offset.offset = get_absolute_expression ();
4586 if (*input_line_pointer != ',')
4587 {
4588 as_bad (_("Comma expected"));
4589 ignore_rest_of_line ();
4590 return;
4591 }
4592 ++input_line_pointer;
4593 md.mem_offset.base = get_absolute_expression ();
4594 demand_empty_rest_of_line ();
4595 }
4596
4597 /* ia64-specific pseudo-ops: */
4598 const pseudo_typeS md_pseudo_table[] =
4599 {
4600 { "radix", dot_radix, 0 },
4601 { "lcomm", s_lcomm_bytes, 1 },
4602 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
4603 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
4604 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
4605 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
4606 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
4607 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
4608 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
4609 { "proc", dot_proc, 0 },
4610 { "body", dot_body, 0 },
4611 { "prologue", dot_prologue, 0 },
4612 { "endp", dot_endp, 0 },
4613 { "file", dwarf2_directive_file, 0 },
4614 { "loc", dwarf2_directive_loc, 0 },
4615
4616 { "fframe", dot_fframe, 0 },
4617 { "vframe", dot_vframe, 0 },
4618 { "vframesp", dot_vframesp, 0 },
4619 { "vframepsp", dot_vframepsp, 0 },
4620 { "save", dot_save, 0 },
4621 { "restore", dot_restore, 0 },
4622 { "restorereg", dot_restorereg, 0 },
4623 { "restorereg.p", dot_restorereg_p, 0 },
4624 { "handlerdata", dot_handlerdata, 0 },
4625 { "unwentry", dot_unwentry, 0 },
4626 { "altrp", dot_altrp, 0 },
4627 { "savesp", dot_savemem, 0 },
4628 { "savepsp", dot_savemem, 1 },
4629 { "save.g", dot_saveg, 0 },
4630 { "save.f", dot_savef, 0 },
4631 { "save.b", dot_saveb, 0 },
4632 { "save.gf", dot_savegf, 0 },
4633 { "spill", dot_spill, 0 },
4634 { "spillreg", dot_spillreg, 0 },
4635 { "spillsp", dot_spillmem, 0 },
4636 { "spillpsp", dot_spillmem, 1 },
4637 { "spillreg.p", dot_spillreg_p, 0 },
4638 { "spillsp.p", dot_spillmem_p, 0 },
4639 { "spillpsp.p", dot_spillmem_p, 1 },
4640 { "label_state", dot_label_state, 0 },
4641 { "copy_state", dot_copy_state, 0 },
4642 { "unwabi", dot_unwabi, 0 },
4643 { "personality", dot_personality, 0 },
4644 #if 0
4645 { "estate", dot_estate, 0 },
4646 #endif
4647 { "mii", dot_template, 0x0 },
4648 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
4649 { "mlx", dot_template, 0x2 },
4650 { "mmi", dot_template, 0x4 },
4651 { "mfi", dot_template, 0x6 },
4652 { "mmf", dot_template, 0x7 },
4653 { "mib", dot_template, 0x8 },
4654 { "mbb", dot_template, 0x9 },
4655 { "bbb", dot_template, 0xb },
4656 { "mmb", dot_template, 0xc },
4657 { "mfb", dot_template, 0xe },
4658 #if 0
4659 { "lb", dot_scope, 0 },
4660 { "le", dot_scope, 1 },
4661 #endif
4662 { "align", s_align_bytes, 0 },
4663 { "regstk", dot_regstk, 0 },
4664 { "rotr", dot_rot, DYNREG_GR },
4665 { "rotf", dot_rot, DYNREG_FR },
4666 { "rotp", dot_rot, DYNREG_PR },
4667 { "lsb", dot_byteorder, 0 },
4668 { "msb", dot_byteorder, 1 },
4669 { "psr", dot_psr, 0 },
4670 { "alias", dot_alias, 0 },
4671 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
4672
4673 { "xdata1", dot_xdata, 1 },
4674 { "xdata2", dot_xdata, 2 },
4675 { "xdata4", dot_xdata, 4 },
4676 { "xdata8", dot_xdata, 8 },
4677 { "xreal4", dot_xfloat_cons, 'f' },
4678 { "xreal8", dot_xfloat_cons, 'd' },
4679 { "xreal10", dot_xfloat_cons, 'x' },
4680 { "xstring", dot_xstringer, 0 },
4681 { "xstringz", dot_xstringer, 1 },
4682
4683 /* unaligned versions: */
4684 { "xdata2.ua", dot_xdata_ua, 2 },
4685 { "xdata4.ua", dot_xdata_ua, 4 },
4686 { "xdata8.ua", dot_xdata_ua, 8 },
4687 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
4688 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
4689 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
4690
4691 /* annotations/DV checking support */
4692 { "entry", dot_entry, 0 },
4693 { "mem.offset", dot_mem_offset, 0 },
4694 { "pred.rel", dot_pred_rel, 0 },
4695 { "pred.rel.clear", dot_pred_rel, 'c' },
4696 { "pred.rel.imply", dot_pred_rel, 'i' },
4697 { "pred.rel.mutex", dot_pred_rel, 'm' },
4698 { "pred.safe_across_calls", dot_pred_rel, 's' },
4699 { "reg.val", dot_reg_val, 0 },
4700 { "auto", dot_dv_mode, 'a' },
4701 { "explicit", dot_dv_mode, 'e' },
4702 { "default", dot_dv_mode, 'd' },
4703
4704 { NULL, 0, 0 }
4705 };
4706
4707 static const struct pseudo_opcode
4708 {
4709 const char *name;
4710 void (*handler) (int);
4711 int arg;
4712 }
4713 pseudo_opcode[] =
4714 {
4715 /* these are more like pseudo-ops, but don't start with a dot */
4716 { "data1", cons, 1 },
4717 { "data2", cons, 2 },
4718 { "data4", cons, 4 },
4719 { "data8", cons, 8 },
4720 { "real4", stmt_float_cons, 'f' },
4721 { "real8", stmt_float_cons, 'd' },
4722 { "real10", stmt_float_cons, 'x' },
4723 { "string", stringer, 0 },
4724 { "stringz", stringer, 1 },
4725
4726 /* unaligned versions: */
4727 { "data2.ua", stmt_cons_ua, 2 },
4728 { "data4.ua", stmt_cons_ua, 4 },
4729 { "data8.ua", stmt_cons_ua, 8 },
4730 { "real4.ua", float_cons, 'f' },
4731 { "real8.ua", float_cons, 'd' },
4732 { "real10.ua", float_cons, 'x' },
4733 };
4734
4735 /* Declare a register by creating a symbol for it and entering it in
4736 the symbol table. */
4737
4738 static symbolS *
4739 declare_register (name, regnum)
4740 const char *name;
4741 int regnum;
4742 {
4743 const char *err;
4744 symbolS *sym;
4745
4746 sym = symbol_new (name, reg_section, regnum, &zero_address_frag);
4747
4748 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (PTR) sym);
4749 if (err)
4750 as_fatal ("Inserting \"%s\" into register table failed: %s",
4751 name, err);
4752
4753 return sym;
4754 }
4755
4756 static void
4757 declare_register_set (prefix, num_regs, base_regnum)
4758 const char *prefix;
4759 int num_regs;
4760 int base_regnum;
4761 {
4762 char name[8];
4763 int i;
4764
4765 for (i = 0; i < num_regs; ++i)
4766 {
4767 sprintf (name, "%s%u", prefix, i);
4768 declare_register (name, base_regnum + i);
4769 }
4770 }
4771
4772 static unsigned int
4773 operand_width (opnd)
4774 enum ia64_opnd opnd;
4775 {
4776 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
4777 unsigned int bits = 0;
4778 int i;
4779
4780 bits = 0;
4781 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
4782 bits += odesc->field[i].bits;
4783
4784 return bits;
4785 }
4786
4787 static enum operand_match_result
4788 operand_match (idesc, index, e)
4789 const struct ia64_opcode *idesc;
4790 int index;
4791 expressionS *e;
4792 {
4793 enum ia64_opnd opnd = idesc->operands[index];
4794 int bits, relocatable = 0;
4795 struct insn_fix *fix;
4796 bfd_signed_vma val;
4797
4798 switch (opnd)
4799 {
4800 /* constants: */
4801
4802 case IA64_OPND_AR_CCV:
4803 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
4804 return OPERAND_MATCH;
4805 break;
4806
4807 case IA64_OPND_AR_PFS:
4808 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
4809 return OPERAND_MATCH;
4810 break;
4811
4812 case IA64_OPND_GR0:
4813 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
4814 return OPERAND_MATCH;
4815 break;
4816
4817 case IA64_OPND_IP:
4818 if (e->X_op == O_register && e->X_add_number == REG_IP)
4819 return OPERAND_MATCH;
4820 break;
4821
4822 case IA64_OPND_PR:
4823 if (e->X_op == O_register && e->X_add_number == REG_PR)
4824 return OPERAND_MATCH;
4825 break;
4826
4827 case IA64_OPND_PR_ROT:
4828 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
4829 return OPERAND_MATCH;
4830 break;
4831
4832 case IA64_OPND_PSR:
4833 if (e->X_op == O_register && e->X_add_number == REG_PSR)
4834 return OPERAND_MATCH;
4835 break;
4836
4837 case IA64_OPND_PSR_L:
4838 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
4839 return OPERAND_MATCH;
4840 break;
4841
4842 case IA64_OPND_PSR_UM:
4843 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
4844 return OPERAND_MATCH;
4845 break;
4846
4847 case IA64_OPND_C1:
4848 if (e->X_op == O_constant)
4849 {
4850 if (e->X_add_number == 1)
4851 return OPERAND_MATCH;
4852 else
4853 return OPERAND_OUT_OF_RANGE;
4854 }
4855 break;
4856
4857 case IA64_OPND_C8:
4858 if (e->X_op == O_constant)
4859 {
4860 if (e->X_add_number == 8)
4861 return OPERAND_MATCH;
4862 else
4863 return OPERAND_OUT_OF_RANGE;
4864 }
4865 break;
4866
4867 case IA64_OPND_C16:
4868 if (e->X_op == O_constant)
4869 {
4870 if (e->X_add_number == 16)
4871 return OPERAND_MATCH;
4872 else
4873 return OPERAND_OUT_OF_RANGE;
4874 }
4875 break;
4876
4877 /* register operands: */
4878
4879 case IA64_OPND_AR3:
4880 if (e->X_op == O_register && e->X_add_number >= REG_AR
4881 && e->X_add_number < REG_AR + 128)
4882 return OPERAND_MATCH;
4883 break;
4884
4885 case IA64_OPND_B1:
4886 case IA64_OPND_B2:
4887 if (e->X_op == O_register && e->X_add_number >= REG_BR
4888 && e->X_add_number < REG_BR + 8)
4889 return OPERAND_MATCH;
4890 break;
4891
4892 case IA64_OPND_CR3:
4893 if (e->X_op == O_register && e->X_add_number >= REG_CR
4894 && e->X_add_number < REG_CR + 128)
4895 return OPERAND_MATCH;
4896 break;
4897
4898 case IA64_OPND_F1:
4899 case IA64_OPND_F2:
4900 case IA64_OPND_F3:
4901 case IA64_OPND_F4:
4902 if (e->X_op == O_register && e->X_add_number >= REG_FR
4903 && e->X_add_number < REG_FR + 128)
4904 return OPERAND_MATCH;
4905 break;
4906
4907 case IA64_OPND_P1:
4908 case IA64_OPND_P2:
4909 if (e->X_op == O_register && e->X_add_number >= REG_P
4910 && e->X_add_number < REG_P + 64)
4911 return OPERAND_MATCH;
4912 break;
4913
4914 case IA64_OPND_R1:
4915 case IA64_OPND_R2:
4916 case IA64_OPND_R3:
4917 if (e->X_op == O_register && e->X_add_number >= REG_GR
4918 && e->X_add_number < REG_GR + 128)
4919 return OPERAND_MATCH;
4920 break;
4921
4922 case IA64_OPND_R3_2:
4923 if (e->X_op == O_register && e->X_add_number >= REG_GR)
4924 {
4925 if (e->X_add_number < REG_GR + 4)
4926 return OPERAND_MATCH;
4927 else if (e->X_add_number < REG_GR + 128)
4928 return OPERAND_OUT_OF_RANGE;
4929 }
4930 break;
4931
4932 /* indirect operands: */
4933 case IA64_OPND_CPUID_R3:
4934 case IA64_OPND_DBR_R3:
4935 case IA64_OPND_DTR_R3:
4936 case IA64_OPND_ITR_R3:
4937 case IA64_OPND_IBR_R3:
4938 case IA64_OPND_MSR_R3:
4939 case IA64_OPND_PKR_R3:
4940 case IA64_OPND_PMC_R3:
4941 case IA64_OPND_PMD_R3:
4942 case IA64_OPND_RR_R3:
4943 if (e->X_op == O_index && e->X_op_symbol
4944 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
4945 == opnd - IA64_OPND_CPUID_R3))
4946 return OPERAND_MATCH;
4947 break;
4948
4949 case IA64_OPND_MR3:
4950 if (e->X_op == O_index && !e->X_op_symbol)
4951 return OPERAND_MATCH;
4952 break;
4953
4954 /* immediate operands: */
4955 case IA64_OPND_CNT2a:
4956 case IA64_OPND_LEN4:
4957 case IA64_OPND_LEN6:
4958 bits = operand_width (idesc->operands[index]);
4959 if (e->X_op == O_constant)
4960 {
4961 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
4962 return OPERAND_MATCH;
4963 else
4964 return OPERAND_OUT_OF_RANGE;
4965 }
4966 break;
4967
4968 case IA64_OPND_CNT2b:
4969 if (e->X_op == O_constant)
4970 {
4971 if ((bfd_vma) (e->X_add_number - 1) < 3)
4972 return OPERAND_MATCH;
4973 else
4974 return OPERAND_OUT_OF_RANGE;
4975 }
4976 break;
4977
4978 case IA64_OPND_CNT2c:
4979 val = e->X_add_number;
4980 if (e->X_op == O_constant)
4981 {
4982 if ((val == 0 || val == 7 || val == 15 || val == 16))
4983 return OPERAND_MATCH;
4984 else
4985 return OPERAND_OUT_OF_RANGE;
4986 }
4987 break;
4988
4989 case IA64_OPND_SOR:
4990 /* SOR must be an integer multiple of 8 */
4991 if (e->X_op == O_constant && e->X_add_number & 0x7)
4992 return OPERAND_OUT_OF_RANGE;
4993 case IA64_OPND_SOF:
4994 case IA64_OPND_SOL:
4995 if (e->X_op == O_constant)
4996 {
4997 if ((bfd_vma) e->X_add_number <= 96)
4998 return OPERAND_MATCH;
4999 else
5000 return OPERAND_OUT_OF_RANGE;
5001 }
5002 break;
5003
5004 case IA64_OPND_IMMU62:
5005 if (e->X_op == O_constant)
5006 {
5007 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5008 return OPERAND_MATCH;
5009 else
5010 return OPERAND_OUT_OF_RANGE;
5011 }
5012 else
5013 {
5014 /* FIXME -- need 62-bit relocation type */
5015 as_bad (_("62-bit relocation not yet implemented"));
5016 }
5017 break;
5018
5019 case IA64_OPND_IMMU64:
5020 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5021 || e->X_op == O_subtract)
5022 {
5023 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5024 fix->code = BFD_RELOC_IA64_IMM64;
5025 if (e->X_op != O_subtract)
5026 {
5027 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5028 if (e->X_op == O_pseudo_fixup)
5029 e->X_op = O_symbol;
5030 }
5031
5032 fix->opnd = idesc->operands[index];
5033 fix->expr = *e;
5034 fix->is_pcrel = 0;
5035 ++CURR_SLOT.num_fixups;
5036 return OPERAND_MATCH;
5037 }
5038 else if (e->X_op == O_constant)
5039 return OPERAND_MATCH;
5040 break;
5041
5042 case IA64_OPND_CCNT5:
5043 case IA64_OPND_CNT5:
5044 case IA64_OPND_CNT6:
5045 case IA64_OPND_CPOS6a:
5046 case IA64_OPND_CPOS6b:
5047 case IA64_OPND_CPOS6c:
5048 case IA64_OPND_IMMU2:
5049 case IA64_OPND_IMMU7a:
5050 case IA64_OPND_IMMU7b:
5051 case IA64_OPND_IMMU21:
5052 case IA64_OPND_IMMU24:
5053 case IA64_OPND_MBTYPE4:
5054 case IA64_OPND_MHTYPE8:
5055 case IA64_OPND_POS6:
5056 bits = operand_width (idesc->operands[index]);
5057 if (e->X_op == O_constant)
5058 {
5059 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5060 return OPERAND_MATCH;
5061 else
5062 return OPERAND_OUT_OF_RANGE;
5063 }
5064 break;
5065
5066 case IA64_OPND_IMMU9:
5067 bits = operand_width (idesc->operands[index]);
5068 if (e->X_op == O_constant)
5069 {
5070 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5071 {
5072 int lobits = e->X_add_number & 0x3;
5073 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5074 e->X_add_number |= (bfd_vma) 0x3;
5075 return OPERAND_MATCH;
5076 }
5077 else
5078 return OPERAND_OUT_OF_RANGE;
5079 }
5080 break;
5081
5082 case IA64_OPND_IMM44:
5083 /* least 16 bits must be zero */
5084 if ((e->X_add_number & 0xffff) != 0)
5085 /* XXX technically, this is wrong: we should not be issuing warning
5086 messages until we're sure this instruction pattern is going to
5087 be used! */
5088 as_warn (_("lower 16 bits of mask ignored"));
5089
5090 if (e->X_op == O_constant)
5091 {
5092 if (((e->X_add_number >= 0
5093 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5094 || (e->X_add_number < 0
5095 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5096 {
5097 /* sign-extend */
5098 if (e->X_add_number >= 0
5099 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5100 {
5101 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5102 }
5103 return OPERAND_MATCH;
5104 }
5105 else
5106 return OPERAND_OUT_OF_RANGE;
5107 }
5108 break;
5109
5110 case IA64_OPND_IMM17:
5111 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5112 if (e->X_op == O_constant)
5113 {
5114 if (((e->X_add_number >= 0
5115 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5116 || (e->X_add_number < 0
5117 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5118 {
5119 /* sign-extend */
5120 if (e->X_add_number >= 0
5121 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5122 {
5123 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5124 }
5125 return OPERAND_MATCH;
5126 }
5127 else
5128 return OPERAND_OUT_OF_RANGE;
5129 }
5130 break;
5131
5132 case IA64_OPND_IMM14:
5133 case IA64_OPND_IMM22:
5134 relocatable = 1;
5135 case IA64_OPND_IMM1:
5136 case IA64_OPND_IMM8:
5137 case IA64_OPND_IMM8U4:
5138 case IA64_OPND_IMM8M1:
5139 case IA64_OPND_IMM8M1U4:
5140 case IA64_OPND_IMM8M1U8:
5141 case IA64_OPND_IMM9a:
5142 case IA64_OPND_IMM9b:
5143 bits = operand_width (idesc->operands[index]);
5144 if (relocatable && (e->X_op == O_symbol
5145 || e->X_op == O_subtract
5146 || e->X_op == O_pseudo_fixup))
5147 {
5148 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5149
5150 if (idesc->operands[index] == IA64_OPND_IMM14)
5151 fix->code = BFD_RELOC_IA64_IMM14;
5152 else
5153 fix->code = BFD_RELOC_IA64_IMM22;
5154
5155 if (e->X_op != O_subtract)
5156 {
5157 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5158 if (e->X_op == O_pseudo_fixup)
5159 e->X_op = O_symbol;
5160 }
5161
5162 fix->opnd = idesc->operands[index];
5163 fix->expr = *e;
5164 fix->is_pcrel = 0;
5165 ++CURR_SLOT.num_fixups;
5166 return OPERAND_MATCH;
5167 }
5168 else if (e->X_op != O_constant
5169 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5170 return OPERAND_MISMATCH;
5171
5172 if (opnd == IA64_OPND_IMM8M1U4)
5173 {
5174 /* Zero is not valid for unsigned compares that take an adjusted
5175 constant immediate range. */
5176 if (e->X_add_number == 0)
5177 return OPERAND_OUT_OF_RANGE;
5178
5179 /* Sign-extend 32-bit unsigned numbers, so that the following range
5180 checks will work. */
5181 val = e->X_add_number;
5182 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5183 && ((val & ((bfd_vma) 1 << 31)) != 0))
5184 val = ((val << 32) >> 32);
5185
5186 /* Check for 0x100000000. This is valid because
5187 0x100000000-1 is the same as ((uint32_t) -1). */
5188 if (val == ((bfd_signed_vma) 1 << 32))
5189 return OPERAND_MATCH;
5190
5191 val = val - 1;
5192 }
5193 else if (opnd == IA64_OPND_IMM8M1U8)
5194 {
5195 /* Zero is not valid for unsigned compares that take an adjusted
5196 constant immediate range. */
5197 if (e->X_add_number == 0)
5198 return OPERAND_OUT_OF_RANGE;
5199
5200 /* Check for 0x10000000000000000. */
5201 if (e->X_op == O_big)
5202 {
5203 if (generic_bignum[0] == 0
5204 && generic_bignum[1] == 0
5205 && generic_bignum[2] == 0
5206 && generic_bignum[3] == 0
5207 && generic_bignum[4] == 1)
5208 return OPERAND_MATCH;
5209 else
5210 return OPERAND_OUT_OF_RANGE;
5211 }
5212 else
5213 val = e->X_add_number - 1;
5214 }
5215 else if (opnd == IA64_OPND_IMM8M1)
5216 val = e->X_add_number - 1;
5217 else if (opnd == IA64_OPND_IMM8U4)
5218 {
5219 /* Sign-extend 32-bit unsigned numbers, so that the following range
5220 checks will work. */
5221 val = e->X_add_number;
5222 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5223 && ((val & ((bfd_vma) 1 << 31)) != 0))
5224 val = ((val << 32) >> 32);
5225 }
5226 else
5227 val = e->X_add_number;
5228
5229 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5230 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5231 return OPERAND_MATCH;
5232 else
5233 return OPERAND_OUT_OF_RANGE;
5234
5235 case IA64_OPND_INC3:
5236 /* +/- 1, 4, 8, 16 */
5237 val = e->X_add_number;
5238 if (val < 0)
5239 val = -val;
5240 if (e->X_op == O_constant)
5241 {
5242 if ((val == 1 || val == 4 || val == 8 || val == 16))
5243 return OPERAND_MATCH;
5244 else
5245 return OPERAND_OUT_OF_RANGE;
5246 }
5247 break;
5248
5249 case IA64_OPND_TGT25:
5250 case IA64_OPND_TGT25b:
5251 case IA64_OPND_TGT25c:
5252 case IA64_OPND_TGT64:
5253 if (e->X_op == O_symbol)
5254 {
5255 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5256 if (opnd == IA64_OPND_TGT25)
5257 fix->code = BFD_RELOC_IA64_PCREL21F;
5258 else if (opnd == IA64_OPND_TGT25b)
5259 fix->code = BFD_RELOC_IA64_PCREL21M;
5260 else if (opnd == IA64_OPND_TGT25c)
5261 fix->code = BFD_RELOC_IA64_PCREL21B;
5262 else if (opnd == IA64_OPND_TGT64)
5263 fix->code = BFD_RELOC_IA64_PCREL60B;
5264 else
5265 abort ();
5266
5267 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5268 fix->opnd = idesc->operands[index];
5269 fix->expr = *e;
5270 fix->is_pcrel = 1;
5271 ++CURR_SLOT.num_fixups;
5272 return OPERAND_MATCH;
5273 }
5274 case IA64_OPND_TAG13:
5275 case IA64_OPND_TAG13b:
5276 switch (e->X_op)
5277 {
5278 case O_constant:
5279 return OPERAND_MATCH;
5280
5281 case O_symbol:
5282 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5283 /* There are no external relocs for TAG13/TAG13b fields, so we
5284 create a dummy reloc. This will not live past md_apply_fix3. */
5285 fix->code = BFD_RELOC_UNUSED;
5286 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5287 fix->opnd = idesc->operands[index];
5288 fix->expr = *e;
5289 fix->is_pcrel = 1;
5290 ++CURR_SLOT.num_fixups;
5291 return OPERAND_MATCH;
5292
5293 default:
5294 break;
5295 }
5296 break;
5297
5298 default:
5299 break;
5300 }
5301 return OPERAND_MISMATCH;
5302 }
5303
5304 static int
5305 parse_operand (e)
5306 expressionS *e;
5307 {
5308 int sep = '\0';
5309
5310 memset (e, 0, sizeof (*e));
5311 e->X_op = O_absent;
5312 SKIP_WHITESPACE ();
5313 if (*input_line_pointer != '}')
5314 expression (e);
5315 sep = *input_line_pointer++;
5316
5317 if (sep == '}')
5318 {
5319 if (!md.manual_bundling)
5320 as_warn ("Found '}' when manual bundling is off");
5321 else
5322 CURR_SLOT.manual_bundling_off = 1;
5323 md.manual_bundling = 0;
5324 sep = '\0';
5325 }
5326 return sep;
5327 }
5328
5329 /* Returns the next entry in the opcode table that matches the one in
5330 IDESC, and frees the entry in IDESC. If no matching entry is
5331 found, NULL is returned instead. */
5332
5333 static struct ia64_opcode *
5334 get_next_opcode (struct ia64_opcode *idesc)
5335 {
5336 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
5337 ia64_free_opcode (idesc);
5338 return next;
5339 }
5340
5341 /* Parse the operands for the opcode and find the opcode variant that
5342 matches the specified operands, or NULL if no match is possible. */
5343
5344 static struct ia64_opcode *
5345 parse_operands (idesc)
5346 struct ia64_opcode *idesc;
5347 {
5348 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
5349 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
5350 enum ia64_opnd expected_operand = IA64_OPND_NIL;
5351 enum operand_match_result result;
5352 char mnemonic[129];
5353 char *first_arg = 0, *end, *saved_input_pointer;
5354 unsigned int sof;
5355
5356 assert (strlen (idesc->name) <= 128);
5357
5358 strcpy (mnemonic, idesc->name);
5359 if (idesc->operands[2] == IA64_OPND_SOF)
5360 {
5361 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
5362 can't parse the first operand until we have parsed the
5363 remaining operands of the "alloc" instruction. */
5364 SKIP_WHITESPACE ();
5365 first_arg = input_line_pointer;
5366 end = strchr (input_line_pointer, '=');
5367 if (!end)
5368 {
5369 as_bad ("Expected separator `='");
5370 return 0;
5371 }
5372 input_line_pointer = end + 1;
5373 ++i;
5374 ++num_outputs;
5375 }
5376
5377 for (; i < NELEMS (CURR_SLOT.opnd); ++i)
5378 {
5379 sep = parse_operand (CURR_SLOT.opnd + i);
5380 if (CURR_SLOT.opnd[i].X_op == O_absent)
5381 break;
5382
5383 ++num_operands;
5384
5385 if (sep != '=' && sep != ',')
5386 break;
5387
5388 if (sep == '=')
5389 {
5390 if (num_outputs > 0)
5391 as_bad ("Duplicate equal sign (=) in instruction");
5392 else
5393 num_outputs = i + 1;
5394 }
5395 }
5396 if (sep != '\0')
5397 {
5398 as_bad ("Illegal operand separator `%c'", sep);
5399 return 0;
5400 }
5401
5402 if (idesc->operands[2] == IA64_OPND_SOF)
5403 {
5404 /* map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r */
5405 know (strcmp (idesc->name, "alloc") == 0);
5406 if (num_operands == 5 /* first_arg not included in this count! */
5407 && CURR_SLOT.opnd[2].X_op == O_constant
5408 && CURR_SLOT.opnd[3].X_op == O_constant
5409 && CURR_SLOT.opnd[4].X_op == O_constant
5410 && CURR_SLOT.opnd[5].X_op == O_constant)
5411 {
5412 sof = set_regstack (CURR_SLOT.opnd[2].X_add_number,
5413 CURR_SLOT.opnd[3].X_add_number,
5414 CURR_SLOT.opnd[4].X_add_number,
5415 CURR_SLOT.opnd[5].X_add_number);
5416
5417 /* now we can parse the first arg: */
5418 saved_input_pointer = input_line_pointer;
5419 input_line_pointer = first_arg;
5420 sep = parse_operand (CURR_SLOT.opnd + 0);
5421 if (sep != '=')
5422 --num_outputs; /* force error */
5423 input_line_pointer = saved_input_pointer;
5424
5425 CURR_SLOT.opnd[2].X_add_number = sof;
5426 CURR_SLOT.opnd[3].X_add_number
5427 = sof - CURR_SLOT.opnd[4].X_add_number;
5428 CURR_SLOT.opnd[4] = CURR_SLOT.opnd[5];
5429 }
5430 }
5431
5432 highest_unmatched_operand = 0;
5433 curr_out_of_range_pos = -1;
5434 error_pos = 0;
5435 expected_operand = idesc->operands[0];
5436 for (; idesc; idesc = get_next_opcode (idesc))
5437 {
5438 if (num_outputs != idesc->num_outputs)
5439 continue; /* mismatch in # of outputs */
5440
5441 CURR_SLOT.num_fixups = 0;
5442
5443 /* Try to match all operands. If we see an out-of-range operand,
5444 then continue trying to match the rest of the operands, since if
5445 the rest match, then this idesc will give the best error message. */
5446
5447 out_of_range_pos = -1;
5448 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
5449 {
5450 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
5451 if (result != OPERAND_MATCH)
5452 {
5453 if (result != OPERAND_OUT_OF_RANGE)
5454 break;
5455 if (out_of_range_pos < 0)
5456 /* remember position of the first out-of-range operand: */
5457 out_of_range_pos = i;
5458 }
5459 }
5460
5461 /* If we did not match all operands, or if at least one operand was
5462 out-of-range, then this idesc does not match. Keep track of which
5463 idesc matched the most operands before failing. If we have two
5464 idescs that failed at the same position, and one had an out-of-range
5465 operand, then prefer the out-of-range operand. Thus if we have
5466 "add r0=0x1000000,r1" we get an error saying the constant is out
5467 of range instead of an error saying that the constant should have been
5468 a register. */
5469
5470 if (i != num_operands || out_of_range_pos >= 0)
5471 {
5472 if (i > highest_unmatched_operand
5473 || (i == highest_unmatched_operand
5474 && out_of_range_pos > curr_out_of_range_pos))
5475 {
5476 highest_unmatched_operand = i;
5477 if (out_of_range_pos >= 0)
5478 {
5479 expected_operand = idesc->operands[out_of_range_pos];
5480 error_pos = out_of_range_pos;
5481 }
5482 else
5483 {
5484 expected_operand = idesc->operands[i];
5485 error_pos = i;
5486 }
5487 curr_out_of_range_pos = out_of_range_pos;
5488 }
5489 continue;
5490 }
5491
5492 if (num_operands < NELEMS (idesc->operands)
5493 && idesc->operands[num_operands])
5494 continue; /* mismatch in number of arguments */
5495
5496 break;
5497 }
5498 if (!idesc)
5499 {
5500 if (expected_operand)
5501 as_bad ("Operand %u of `%s' should be %s",
5502 error_pos + 1, mnemonic,
5503 elf64_ia64_operands[expected_operand].desc);
5504 else
5505 as_bad ("Operand mismatch");
5506 return 0;
5507 }
5508 return idesc;
5509 }
5510
5511 /* Keep track of state necessary to determine whether a NOP is necessary
5512 to avoid an erratum in A and B step Itanium chips, and return 1 if we
5513 detect a case where additional NOPs may be necessary. */
5514 static int
5515 errata_nop_necessary_p (slot, insn_unit)
5516 struct slot *slot;
5517 enum ia64_unit insn_unit;
5518 {
5519 int i;
5520 struct group *this_group = md.last_groups + md.group_idx;
5521 struct group *prev_group = md.last_groups + (md.group_idx + 2) % 3;
5522 struct ia64_opcode *idesc = slot->idesc;
5523
5524 /* Test whether this could be the first insn in a problematic sequence. */
5525 if (insn_unit == IA64_UNIT_F)
5526 {
5527 for (i = 0; i < idesc->num_outputs; i++)
5528 if (idesc->operands[i] == IA64_OPND_P1
5529 || idesc->operands[i] == IA64_OPND_P2)
5530 {
5531 int regno = slot->opnd[i].X_add_number - REG_P;
5532 /* Ignore invalid operands; they generate errors elsewhere. */
5533 if (regno >= 64)
5534 return 0;
5535 this_group->p_reg_set[regno] = 1;
5536 }
5537 }
5538
5539 /* Test whether this could be the second insn in a problematic sequence. */
5540 if (insn_unit == IA64_UNIT_M && slot->qp_regno > 0
5541 && prev_group->p_reg_set[slot->qp_regno])
5542 {
5543 for (i = 0; i < idesc->num_outputs; i++)
5544 if (idesc->operands[i] == IA64_OPND_R1
5545 || idesc->operands[i] == IA64_OPND_R2
5546 || idesc->operands[i] == IA64_OPND_R3)
5547 {
5548 int regno = slot->opnd[i].X_add_number - REG_GR;
5549 /* Ignore invalid operands; they generate errors elsewhere. */
5550 if (regno >= 128)
5551 return 0;
5552 if (strncmp (idesc->name, "add", 3) != 0
5553 && strncmp (idesc->name, "sub", 3) != 0
5554 && strncmp (idesc->name, "shladd", 6) != 0
5555 && (idesc->flags & IA64_OPCODE_POSTINC) == 0)
5556 this_group->g_reg_set_conditionally[regno] = 1;
5557 }
5558 }
5559
5560 /* Test whether this could be the third insn in a problematic sequence. */
5561 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; i++)
5562 {
5563 if (/* For fc, ptc, ptr, tak, thash, tpa, ttag, probe, ptr, ptc. */
5564 idesc->operands[i] == IA64_OPND_R3
5565 /* For mov indirect. */
5566 || idesc->operands[i] == IA64_OPND_RR_R3
5567 || idesc->operands[i] == IA64_OPND_DBR_R3
5568 || idesc->operands[i] == IA64_OPND_IBR_R3
5569 || idesc->operands[i] == IA64_OPND_PKR_R3
5570 || idesc->operands[i] == IA64_OPND_PMC_R3
5571 || idesc->operands[i] == IA64_OPND_PMD_R3
5572 || idesc->operands[i] == IA64_OPND_MSR_R3
5573 || idesc->operands[i] == IA64_OPND_CPUID_R3
5574 /* For itr. */
5575 || idesc->operands[i] == IA64_OPND_ITR_R3
5576 || idesc->operands[i] == IA64_OPND_DTR_R3
5577 /* Normal memory addresses (load, store, xchg, cmpxchg, etc.). */
5578 || idesc->operands[i] == IA64_OPND_MR3)
5579 {
5580 int regno = slot->opnd[i].X_add_number - REG_GR;
5581 /* Ignore invalid operands; they generate errors elsewhere. */
5582 if (regno >= 128)
5583 return 0;
5584 if (idesc->operands[i] == IA64_OPND_R3)
5585 {
5586 if (strcmp (idesc->name, "fc") != 0
5587 && strcmp (idesc->name, "tak") != 0
5588 && strcmp (idesc->name, "thash") != 0
5589 && strcmp (idesc->name, "tpa") != 0
5590 && strcmp (idesc->name, "ttag") != 0
5591 && strncmp (idesc->name, "ptr", 3) != 0
5592 && strncmp (idesc->name, "ptc", 3) != 0
5593 && strncmp (idesc->name, "probe", 5) != 0)
5594 return 0;
5595 }
5596 if (prev_group->g_reg_set_conditionally[regno])
5597 return 1;
5598 }
5599 }
5600 return 0;
5601 }
5602
5603 static void
5604 build_insn (slot, insnp)
5605 struct slot *slot;
5606 bfd_vma *insnp;
5607 {
5608 const struct ia64_operand *odesc, *o2desc;
5609 struct ia64_opcode *idesc = slot->idesc;
5610 bfd_signed_vma insn, val;
5611 const char *err;
5612 int i;
5613
5614 insn = idesc->opcode | slot->qp_regno;
5615
5616 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
5617 {
5618 if (slot->opnd[i].X_op == O_register
5619 || slot->opnd[i].X_op == O_constant
5620 || slot->opnd[i].X_op == O_index)
5621 val = slot->opnd[i].X_add_number;
5622 else if (slot->opnd[i].X_op == O_big)
5623 {
5624 /* This must be the value 0x10000000000000000. */
5625 assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
5626 val = 0;
5627 }
5628 else
5629 val = 0;
5630
5631 switch (idesc->operands[i])
5632 {
5633 case IA64_OPND_IMMU64:
5634 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
5635 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
5636 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
5637 | (((val >> 63) & 0x1) << 36));
5638 continue;
5639
5640 case IA64_OPND_IMMU62:
5641 val &= 0x3fffffffffffffffULL;
5642 if (val != slot->opnd[i].X_add_number)
5643 as_warn (_("Value truncated to 62 bits"));
5644 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
5645 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
5646 continue;
5647
5648 case IA64_OPND_TGT64:
5649 val >>= 4;
5650 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
5651 insn |= ((((val >> 59) & 0x1) << 36)
5652 | (((val >> 0) & 0xfffff) << 13));
5653 continue;
5654
5655 case IA64_OPND_AR3:
5656 val -= REG_AR;
5657 break;
5658
5659 case IA64_OPND_B1:
5660 case IA64_OPND_B2:
5661 val -= REG_BR;
5662 break;
5663
5664 case IA64_OPND_CR3:
5665 val -= REG_CR;
5666 break;
5667
5668 case IA64_OPND_F1:
5669 case IA64_OPND_F2:
5670 case IA64_OPND_F3:
5671 case IA64_OPND_F4:
5672 val -= REG_FR;
5673 break;
5674
5675 case IA64_OPND_P1:
5676 case IA64_OPND_P2:
5677 val -= REG_P;
5678 break;
5679
5680 case IA64_OPND_R1:
5681 case IA64_OPND_R2:
5682 case IA64_OPND_R3:
5683 case IA64_OPND_R3_2:
5684 case IA64_OPND_CPUID_R3:
5685 case IA64_OPND_DBR_R3:
5686 case IA64_OPND_DTR_R3:
5687 case IA64_OPND_ITR_R3:
5688 case IA64_OPND_IBR_R3:
5689 case IA64_OPND_MR3:
5690 case IA64_OPND_MSR_R3:
5691 case IA64_OPND_PKR_R3:
5692 case IA64_OPND_PMC_R3:
5693 case IA64_OPND_PMD_R3:
5694 case IA64_OPND_RR_R3:
5695 val -= REG_GR;
5696 break;
5697
5698 default:
5699 break;
5700 }
5701
5702 odesc = elf64_ia64_operands + idesc->operands[i];
5703 err = (*odesc->insert) (odesc, val, &insn);
5704 if (err)
5705 as_bad_where (slot->src_file, slot->src_line,
5706 "Bad operand value: %s", err);
5707 if (idesc->flags & IA64_OPCODE_PSEUDO)
5708 {
5709 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
5710 && odesc == elf64_ia64_operands + IA64_OPND_F3)
5711 {
5712 o2desc = elf64_ia64_operands + IA64_OPND_F2;
5713 (*o2desc->insert) (o2desc, val, &insn);
5714 }
5715 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
5716 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
5717 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
5718 {
5719 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
5720 (*o2desc->insert) (o2desc, 64 - val, &insn);
5721 }
5722 }
5723 }
5724 *insnp = insn;
5725 }
5726
5727 static void
5728 emit_one_bundle ()
5729 {
5730 unsigned int manual_bundling_on = 0, manual_bundling_off = 0;
5731 unsigned int manual_bundling = 0;
5732 enum ia64_unit required_unit, insn_unit = 0;
5733 enum ia64_insn_type type[3], insn_type;
5734 unsigned int template, orig_template;
5735 bfd_vma insn[3] = { -1, -1, -1 };
5736 struct ia64_opcode *idesc;
5737 int end_of_insn_group = 0, user_template = -1;
5738 int n, i, j, first, curr;
5739 unw_rec_list *ptr;
5740 bfd_vma t0 = 0, t1 = 0;
5741 struct label_fix *lfix;
5742 struct insn_fix *ifix;
5743 char mnemonic[16];
5744 fixS *fix;
5745 char *f;
5746
5747 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
5748 know (first >= 0 & first < NUM_SLOTS);
5749 n = MIN (3, md.num_slots_in_use);
5750
5751 /* Determine template: user user_template if specified, best match
5752 otherwise: */
5753
5754 if (md.slot[first].user_template >= 0)
5755 user_template = template = md.slot[first].user_template;
5756 else
5757 {
5758 /* Auto select appropriate template. */
5759 memset (type, 0, sizeof (type));
5760 curr = first;
5761 for (i = 0; i < n; ++i)
5762 {
5763 if (md.slot[curr].label_fixups && i != 0)
5764 break;
5765 type[i] = md.slot[curr].idesc->type;
5766 curr = (curr + 1) % NUM_SLOTS;
5767 }
5768 template = best_template[type[0]][type[1]][type[2]];
5769 }
5770
5771 /* initialize instructions with appropriate nops: */
5772 for (i = 0; i < 3; ++i)
5773 insn[i] = nop[ia64_templ_desc[template].exec_unit[i]];
5774
5775 f = frag_more (16);
5776
5777 /* now fill in slots with as many insns as possible: */
5778 curr = first;
5779 idesc = md.slot[curr].idesc;
5780 end_of_insn_group = 0;
5781 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
5782 {
5783 /* Set the slot number for prologue/body records now as those
5784 refer to the current point, not the point after the
5785 instruction has been issued: */
5786 /* Don't try to delete prologue/body records here, as that will cause
5787 them to also be deleted from the master list of unwind records. */
5788 for (ptr = md.slot[curr].unwind_record; ptr; ptr = ptr->next)
5789 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
5790 || ptr->r.type == body)
5791 {
5792 ptr->slot_number = (unsigned long) f + i;
5793 ptr->slot_frag = frag_now;
5794 }
5795
5796 if (idesc->flags & IA64_OPCODE_SLOT2)
5797 {
5798 if (manual_bundling && i != 2)
5799 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5800 "`%s' must be last in bundle", idesc->name);
5801 else
5802 i = 2;
5803 }
5804 if (idesc->flags & IA64_OPCODE_LAST)
5805 {
5806 int required_slot;
5807 unsigned int required_template;
5808
5809 /* If we need a stop bit after an M slot, our only choice is
5810 template 5 (M;;MI). If we need a stop bit after a B
5811 slot, our only choice is to place it at the end of the
5812 bundle, because the only available templates are MIB,
5813 MBB, BBB, MMB, and MFB. We don't handle anything other
5814 than M and B slots because these are the only kind of
5815 instructions that can have the IA64_OPCODE_LAST bit set. */
5816 required_template = template;
5817 switch (idesc->type)
5818 {
5819 case IA64_TYPE_M:
5820 required_slot = 0;
5821 required_template = 5;
5822 break;
5823
5824 case IA64_TYPE_B:
5825 required_slot = 2;
5826 break;
5827
5828 default:
5829 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5830 "Internal error: don't know how to force %s to end"
5831 "of instruction group", idesc->name);
5832 required_slot = i;
5833 break;
5834 }
5835 if (manual_bundling && i != required_slot)
5836 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5837 "`%s' must be last in instruction group",
5838 idesc->name);
5839 if (required_slot < i)
5840 /* Can't fit this instruction. */
5841 break;
5842
5843 i = required_slot;
5844 if (required_template != template)
5845 {
5846 /* If we switch the template, we need to reset the NOPs
5847 after slot i. The slot-types of the instructions ahead
5848 of i never change, so we don't need to worry about
5849 changing NOPs in front of this slot. */
5850 for (j = i; j < 3; ++j)
5851 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
5852 }
5853 template = required_template;
5854 }
5855 if (curr != first && md.slot[curr].label_fixups)
5856 {
5857 if (manual_bundling_on)
5858 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5859 "Label must be first in a bundle");
5860 /* This insn must go into the first slot of a bundle. */
5861 break;
5862 }
5863
5864 manual_bundling_on = md.slot[curr].manual_bundling_on;
5865 manual_bundling_off = md.slot[curr].manual_bundling_off;
5866
5867 if (manual_bundling_on)
5868 {
5869 if (curr == first)
5870 manual_bundling = 1;
5871 else
5872 break; /* need to start a new bundle */
5873 }
5874
5875 if (end_of_insn_group && md.num_slots_in_use >= 1)
5876 {
5877 /* We need an instruction group boundary in the middle of a
5878 bundle. See if we can switch to an other template with
5879 an appropriate boundary. */
5880
5881 orig_template = template;
5882 if (i == 1 && (user_template == 4
5883 || (user_template < 0
5884 && (ia64_templ_desc[template].exec_unit[0]
5885 == IA64_UNIT_M))))
5886 {
5887 template = 5;
5888 end_of_insn_group = 0;
5889 }
5890 else if (i == 2 && (user_template == 0
5891 || (user_template < 0
5892 && (ia64_templ_desc[template].exec_unit[1]
5893 == IA64_UNIT_I)))
5894 /* This test makes sure we don't switch the template if
5895 the next instruction is one that needs to be first in
5896 an instruction group. Since all those instructions are
5897 in the M group, there is no way such an instruction can
5898 fit in this bundle even if we switch the template. The
5899 reason we have to check for this is that otherwise we
5900 may end up generating "MI;;I M.." which has the deadly
5901 effect that the second M instruction is no longer the
5902 first in the bundle! --davidm 99/12/16 */
5903 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
5904 {
5905 template = 1;
5906 end_of_insn_group = 0;
5907 }
5908 else if (curr != first)
5909 /* can't fit this insn */
5910 break;
5911
5912 if (template != orig_template)
5913 /* if we switch the template, we need to reset the NOPs
5914 after slot i. The slot-types of the instructions ahead
5915 of i never change, so we don't need to worry about
5916 changing NOPs in front of this slot. */
5917 for (j = i; j < 3; ++j)
5918 insn[j] = nop[ia64_templ_desc[template].exec_unit[j]];
5919 }
5920 required_unit = ia64_templ_desc[template].exec_unit[i];
5921
5922 /* resolve dynamic opcodes such as "break" and "nop": */
5923 if (idesc->type == IA64_TYPE_DYN)
5924 {
5925 if ((strcmp (idesc->name, "nop") == 0)
5926 || (strcmp (idesc->name, "break") == 0))
5927 insn_unit = required_unit;
5928 else if (strcmp (idesc->name, "chk.s") == 0)
5929 {
5930 insn_unit = IA64_UNIT_M;
5931 if (required_unit == IA64_UNIT_I)
5932 insn_unit = IA64_UNIT_I;
5933 }
5934 else
5935 as_fatal ("emit_one_bundle: unexpected dynamic op");
5936
5937 sprintf (mnemonic, "%s.%c", idesc->name, "?imbf??"[insn_unit]);
5938 ia64_free_opcode (idesc);
5939 md.slot[curr].idesc = idesc = ia64_find_opcode (mnemonic);
5940 #if 0
5941 know (!idesc->next); /* no resolved dynamic ops have collisions */
5942 #endif
5943 }
5944 else
5945 {
5946 insn_type = idesc->type;
5947 insn_unit = IA64_UNIT_NIL;
5948 switch (insn_type)
5949 {
5950 case IA64_TYPE_A:
5951 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
5952 insn_unit = required_unit;
5953 break;
5954 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
5955 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
5956 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
5957 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
5958 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
5959 default: break;
5960 }
5961 }
5962
5963 if (insn_unit != required_unit)
5964 {
5965 if (required_unit == IA64_UNIT_L
5966 && insn_unit == IA64_UNIT_I
5967 && !(idesc->flags & IA64_OPCODE_X_IN_MLX))
5968 {
5969 /* we got ourselves an MLX template but the current
5970 instruction isn't an X-unit, or an I-unit instruction
5971 that can go into the X slot of an MLX template. Duh. */
5972 if (md.num_slots_in_use >= NUM_SLOTS)
5973 {
5974 as_bad_where (md.slot[curr].src_file,
5975 md.slot[curr].src_line,
5976 "`%s' can't go in X slot of "
5977 "MLX template", idesc->name);
5978 /* drop this insn so we don't livelock: */
5979 --md.num_slots_in_use;
5980 }
5981 break;
5982 }
5983 continue; /* try next slot */
5984 }
5985
5986 {
5987 bfd_vma addr;
5988
5989 addr = frag_now->fr_address + frag_now_fix () - 16 + i;
5990 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
5991 }
5992
5993 if (errata_nop_necessary_p (md.slot + curr, insn_unit))
5994 as_warn (_("Additional NOP may be necessary to workaround Itanium processor A/B step errata"));
5995
5996 build_insn (md.slot + curr, insn + i);
5997
5998 /* Set slot counts for non prologue/body unwind records. */
5999 for (ptr = md.slot[curr].unwind_record; ptr; ptr = ptr->next)
6000 if (ptr->r.type != prologue && ptr->r.type != prologue_gr
6001 && ptr->r.type != body)
6002 {
6003 ptr->slot_number = (unsigned long) f + i;
6004 ptr->slot_frag = frag_now;
6005 }
6006 md.slot[curr].unwind_record = NULL;
6007
6008 if (required_unit == IA64_UNIT_L)
6009 {
6010 know (i == 1);
6011 /* skip one slot for long/X-unit instructions */
6012 ++i;
6013 }
6014 --md.num_slots_in_use;
6015
6016 /* now is a good time to fix up the labels for this insn: */
6017 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6018 {
6019 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6020 symbol_set_frag (lfix->sym, frag_now);
6021 }
6022 /* and fix up the tags also. */
6023 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6024 {
6025 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6026 symbol_set_frag (lfix->sym, frag_now);
6027 }
6028
6029 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6030 {
6031 ifix = md.slot[curr].fixup + j;
6032 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6033 &ifix->expr, ifix->is_pcrel, ifix->code);
6034 fix->tc_fix_data.opnd = ifix->opnd;
6035 fix->fx_plt = (fix->fx_r_type == BFD_RELOC_IA64_PLTOFF22);
6036 fix->fx_file = md.slot[curr].src_file;
6037 fix->fx_line = md.slot[curr].src_line;
6038 }
6039
6040 end_of_insn_group = md.slot[curr].end_of_insn_group;
6041
6042 if (end_of_insn_group)
6043 {
6044 md.group_idx = (md.group_idx + 1) % 3;
6045 memset (md.last_groups + md.group_idx, 0, sizeof md.last_groups[0]);
6046 }
6047
6048 /* clear slot: */
6049 ia64_free_opcode (md.slot[curr].idesc);
6050 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6051 md.slot[curr].user_template = -1;
6052
6053 if (manual_bundling_off)
6054 {
6055 manual_bundling = 0;
6056 break;
6057 }
6058 curr = (curr + 1) % NUM_SLOTS;
6059 idesc = md.slot[curr].idesc;
6060 }
6061 if (manual_bundling)
6062 {
6063 if (md.num_slots_in_use > 0)
6064 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6065 "`%s' does not fit into %s template",
6066 idesc->name, ia64_templ_desc[template].name);
6067 else
6068 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6069 "Missing '}' at end of file");
6070 }
6071 know (md.num_slots_in_use < NUM_SLOTS);
6072
6073 t0 = end_of_insn_group | (template << 1) | (insn[0] << 5) | (insn[1] << 46);
6074 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6075
6076 number_to_chars_littleendian (f + 0, t0, 8);
6077 number_to_chars_littleendian (f + 8, t1, 8);
6078
6079 unwind.next_slot_number = (unsigned long) f + 16;
6080 unwind.next_slot_frag = frag_now;
6081 }
6082
6083 int
6084 md_parse_option (c, arg)
6085 int c;
6086 char *arg;
6087 {
6088
6089 switch (c)
6090 {
6091 /* Switches from the Intel assembler. */
6092 case 'm':
6093 if (strcmp (arg, "ilp64") == 0
6094 || strcmp (arg, "lp64") == 0
6095 || strcmp (arg, "p64") == 0)
6096 {
6097 md.flags |= EF_IA_64_ABI64;
6098 }
6099 else if (strcmp (arg, "ilp32") == 0)
6100 {
6101 md.flags &= ~EF_IA_64_ABI64;
6102 }
6103 else if (strcmp (arg, "le") == 0)
6104 {
6105 md.flags &= ~EF_IA_64_BE;
6106 }
6107 else if (strcmp (arg, "be") == 0)
6108 {
6109 md.flags |= EF_IA_64_BE;
6110 }
6111 else
6112 return 0;
6113 break;
6114
6115 case 'N':
6116 if (strcmp (arg, "so") == 0)
6117 {
6118 /* Suppress signon message. */
6119 }
6120 else if (strcmp (arg, "pi") == 0)
6121 {
6122 /* Reject privileged instructions. FIXME */
6123 }
6124 else if (strcmp (arg, "us") == 0)
6125 {
6126 /* Allow union of signed and unsigned range. FIXME */
6127 }
6128 else if (strcmp (arg, "close_fcalls") == 0)
6129 {
6130 /* Do not resolve global function calls. */
6131 }
6132 else
6133 return 0;
6134 break;
6135
6136 case 'C':
6137 /* temp[="prefix"] Insert temporary labels into the object file
6138 symbol table prefixed by "prefix".
6139 Default prefix is ":temp:".
6140 */
6141 break;
6142
6143 case 'a':
6144 /* indirect=<tgt> Assume unannotated indirect branches behavior
6145 according to <tgt> --
6146 exit: branch out from the current context (default)
6147 labels: all labels in context may be branch targets
6148 */
6149 if (strncmp (arg, "indirect=", 9) != 0)
6150 return 0;
6151 break;
6152
6153 case 'x':
6154 /* -X conflicts with an ignored option, use -x instead */
6155 md.detect_dv = 1;
6156 if (!arg || strcmp (arg, "explicit") == 0)
6157 {
6158 /* set default mode to explicit */
6159 md.default_explicit_mode = 1;
6160 break;
6161 }
6162 else if (strcmp (arg, "auto") == 0)
6163 {
6164 md.default_explicit_mode = 0;
6165 }
6166 else if (strcmp (arg, "debug") == 0)
6167 {
6168 md.debug_dv = 1;
6169 }
6170 else if (strcmp (arg, "debugx") == 0)
6171 {
6172 md.default_explicit_mode = 1;
6173 md.debug_dv = 1;
6174 }
6175 else
6176 {
6177 as_bad (_("Unrecognized option '-x%s'"), arg);
6178 }
6179 break;
6180
6181 case 'S':
6182 /* nops Print nops statistics. */
6183 break;
6184
6185 /* GNU specific switches for gcc. */
6186 case OPTION_MCONSTANT_GP:
6187 md.flags |= EF_IA_64_CONS_GP;
6188 break;
6189
6190 case OPTION_MAUTO_PIC:
6191 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
6192 break;
6193
6194 default:
6195 return 0;
6196 }
6197
6198 return 1;
6199 }
6200
6201 void
6202 md_show_usage (stream)
6203 FILE *stream;
6204 {
6205 fputs (_("\
6206 IA-64 options:\n\
6207 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
6208 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
6209 -x | -xexplicit turn on dependency violation checking (default)\n\
6210 -xauto automagically remove dependency violations\n\
6211 -xdebug debug dependency violation checker\n"),
6212 stream);
6213 }
6214
6215 /* Return true if TYPE fits in TEMPL at SLOT. */
6216
6217 static int
6218 match (int templ, int type, int slot)
6219 {
6220 enum ia64_unit unit;
6221 int result;
6222
6223 unit = ia64_templ_desc[templ].exec_unit[slot];
6224 switch (type)
6225 {
6226 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
6227 case IA64_TYPE_A:
6228 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
6229 break;
6230 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
6231 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
6232 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
6233 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
6234 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
6235 default: result = 0; break;
6236 }
6237 return result;
6238 }
6239
6240 /* Add a bit of extra goodness if a nop of type F or B would fit
6241 in TEMPL at SLOT. */
6242
6243 static inline int
6244 extra_goodness (int templ, int slot)
6245 {
6246 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
6247 return 2;
6248 if (slot == 2 && match (templ, IA64_TYPE_B, slot))
6249 return 1;
6250 return 0;
6251 }
6252
6253 /* This function is called once, at assembler startup time. It sets
6254 up all the tables, etc. that the MD part of the assembler will need
6255 that can be determined before arguments are parsed. */
6256 void
6257 md_begin ()
6258 {
6259 int i, j, k, t, total, ar_base, cr_base, goodness, best, regnum, ok;
6260 const char *err;
6261 char name[8];
6262
6263 md.auto_align = 1;
6264 md.explicit_mode = md.default_explicit_mode;
6265
6266 bfd_set_section_alignment (stdoutput, text_section, 4);
6267
6268 target_big_endian = TARGET_BYTES_BIG_ENDIAN;
6269 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
6270 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
6271 &zero_address_frag);
6272
6273 pseudo_func[FUNC_GP_RELATIVE].u.sym =
6274 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
6275 &zero_address_frag);
6276
6277 pseudo_func[FUNC_LT_RELATIVE].u.sym =
6278 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
6279 &zero_address_frag);
6280
6281 pseudo_func[FUNC_PC_RELATIVE].u.sym =
6282 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
6283 &zero_address_frag);
6284
6285 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
6286 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
6287 &zero_address_frag);
6288
6289 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
6290 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
6291 &zero_address_frag);
6292
6293 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
6294 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
6295 &zero_address_frag);
6296
6297 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
6298 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
6299 &zero_address_frag);
6300
6301 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
6302 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
6303 &zero_address_frag);
6304
6305 /* Compute the table of best templates. We compute goodness as a
6306 base 4 value, in which each match counts for 3, each F counts
6307 for 2, each B counts for 1. This should maximize the number of
6308 F and B nops in the chosen bundles, which is good because these
6309 pipelines are least likely to be overcommitted. */
6310 for (i = 0; i < IA64_NUM_TYPES; ++i)
6311 for (j = 0; j < IA64_NUM_TYPES; ++j)
6312 for (k = 0; k < IA64_NUM_TYPES; ++k)
6313 {
6314 best = 0;
6315 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
6316 {
6317 goodness = 0;
6318 if (match (t, i, 0))
6319 {
6320 if (match (t, j, 1))
6321 {
6322 if (match (t, k, 2))
6323 goodness = 3 + 3 + 3;
6324 else
6325 goodness = 3 + 3 + extra_goodness (t, 2);
6326 }
6327 else if (match (t, j, 2))
6328 goodness = 3 + 3 + extra_goodness (t, 1);
6329 else
6330 {
6331 goodness = 3;
6332 goodness += extra_goodness (t, 1);
6333 goodness += extra_goodness (t, 2);
6334 }
6335 }
6336 else if (match (t, i, 1))
6337 {
6338 if (match (t, j, 2))
6339 goodness = 3 + 3;
6340 else
6341 goodness = 3 + extra_goodness (t, 2);
6342 }
6343 else if (match (t, i, 2))
6344 goodness = 3 + extra_goodness (t, 1);
6345
6346 if (goodness > best)
6347 {
6348 best = goodness;
6349 best_template[i][j][k] = t;
6350 }
6351 }
6352 }
6353
6354 for (i = 0; i < NUM_SLOTS; ++i)
6355 md.slot[i].user_template = -1;
6356
6357 md.pseudo_hash = hash_new ();
6358 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
6359 {
6360 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
6361 (void *) (pseudo_opcode + i));
6362 if (err)
6363 as_fatal ("ia64.md_begin: can't hash `%s': %s",
6364 pseudo_opcode[i].name, err);
6365 }
6366
6367 md.reg_hash = hash_new ();
6368 md.dynreg_hash = hash_new ();
6369 md.const_hash = hash_new ();
6370 md.entry_hash = hash_new ();
6371
6372 /* general registers: */
6373
6374 total = 128;
6375 for (i = 0; i < total; ++i)
6376 {
6377 sprintf (name, "r%d", i - REG_GR);
6378 md.regsym[i] = declare_register (name, i);
6379 }
6380
6381 /* floating point registers: */
6382 total += 128;
6383 for (; i < total; ++i)
6384 {
6385 sprintf (name, "f%d", i - REG_FR);
6386 md.regsym[i] = declare_register (name, i);
6387 }
6388
6389 /* application registers: */
6390 total += 128;
6391 ar_base = i;
6392 for (; i < total; ++i)
6393 {
6394 sprintf (name, "ar%d", i - REG_AR);
6395 md.regsym[i] = declare_register (name, i);
6396 }
6397
6398 /* control registers: */
6399 total += 128;
6400 cr_base = i;
6401 for (; i < total; ++i)
6402 {
6403 sprintf (name, "cr%d", i - REG_CR);
6404 md.regsym[i] = declare_register (name, i);
6405 }
6406
6407 /* predicate registers: */
6408 total += 64;
6409 for (; i < total; ++i)
6410 {
6411 sprintf (name, "p%d", i - REG_P);
6412 md.regsym[i] = declare_register (name, i);
6413 }
6414
6415 /* branch registers: */
6416 total += 8;
6417 for (; i < total; ++i)
6418 {
6419 sprintf (name, "b%d", i - REG_BR);
6420 md.regsym[i] = declare_register (name, i);
6421 }
6422
6423 md.regsym[REG_IP] = declare_register ("ip", REG_IP);
6424 md.regsym[REG_CFM] = declare_register ("cfm", REG_CFM);
6425 md.regsym[REG_PR] = declare_register ("pr", REG_PR);
6426 md.regsym[REG_PR_ROT] = declare_register ("pr.rot", REG_PR_ROT);
6427 md.regsym[REG_PSR] = declare_register ("psr", REG_PSR);
6428 md.regsym[REG_PSR_L] = declare_register ("psr.l", REG_PSR_L);
6429 md.regsym[REG_PSR_UM] = declare_register ("psr.um", REG_PSR_UM);
6430
6431 for (i = 0; i < NELEMS (indirect_reg); ++i)
6432 {
6433 regnum = indirect_reg[i].regnum;
6434 md.regsym[regnum] = declare_register (indirect_reg[i].name, regnum);
6435 }
6436
6437 /* define synonyms for application registers: */
6438 for (i = REG_AR; i < REG_AR + NELEMS (ar); ++i)
6439 md.regsym[i] = declare_register (ar[i - REG_AR].name,
6440 REG_AR + ar[i - REG_AR].regnum);
6441
6442 /* define synonyms for control registers: */
6443 for (i = REG_CR; i < REG_CR + NELEMS (cr); ++i)
6444 md.regsym[i] = declare_register (cr[i - REG_CR].name,
6445 REG_CR + cr[i - REG_CR].regnum);
6446
6447 declare_register ("gp", REG_GR + 1);
6448 declare_register ("sp", REG_GR + 12);
6449 declare_register ("rp", REG_BR + 0);
6450
6451 /* pseudo-registers used to specify unwind info: */
6452 declare_register ("psp", REG_PSP);
6453
6454 declare_register_set ("ret", 4, REG_GR + 8);
6455 declare_register_set ("farg", 8, REG_FR + 8);
6456 declare_register_set ("fret", 8, REG_FR + 8);
6457
6458 for (i = 0; i < NELEMS (const_bits); ++i)
6459 {
6460 err = hash_insert (md.const_hash, const_bits[i].name,
6461 (PTR) (const_bits + i));
6462 if (err)
6463 as_fatal ("Inserting \"%s\" into constant hash table failed: %s",
6464 name, err);
6465 }
6466
6467 /* Set the architecture and machine depending on defaults and command line
6468 options. */
6469 if (md.flags & EF_IA_64_ABI64)
6470 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
6471 else
6472 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
6473
6474 if (! ok)
6475 as_warn (_("Could not set architecture and machine"));
6476
6477 md.mem_offset.hint = 0;
6478 md.path = 0;
6479 md.maxpaths = 0;
6480 md.entry_labels = NULL;
6481 }
6482
6483 /* Set the elf type to 64 bit ABI by default. Cannot do this in md_begin
6484 because that is called after md_parse_option which is where we do the
6485 dynamic changing of md.flags based on -mlp64 or -milp32. Also, set the
6486 default endianness. */
6487
6488 void
6489 ia64_init (argc, argv)
6490 int argc ATTRIBUTE_UNUSED;
6491 char **argv ATTRIBUTE_UNUSED;
6492 {
6493 md.flags = EF_IA_64_ABI64;
6494 if (TARGET_BYTES_BIG_ENDIAN)
6495 md.flags |= EF_IA_64_BE;
6496 }
6497
6498 /* Return a string for the target object file format. */
6499
6500 const char *
6501 ia64_target_format ()
6502 {
6503 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
6504 {
6505 if (md.flags & EF_IA_64_BE)
6506 {
6507 if (md.flags & EF_IA_64_ABI64)
6508 #ifdef TE_AIX50
6509 return "elf64-ia64-aix-big";
6510 #else
6511 return "elf64-ia64-big";
6512 #endif
6513 else
6514 #ifdef TE_AIX50
6515 return "elf32-ia64-aix-big";
6516 #else
6517 return "elf32-ia64-big";
6518 #endif
6519 }
6520 else
6521 {
6522 if (md.flags & EF_IA_64_ABI64)
6523 #ifdef TE_AIX50
6524 return "elf64-ia64-aix-little";
6525 #else
6526 return "elf64-ia64-little";
6527 #endif
6528 else
6529 #ifdef TE_AIX50
6530 return "elf32-ia64-aix-little";
6531 #else
6532 return "elf32-ia64-little";
6533 #endif
6534 }
6535 }
6536 else
6537 return "unknown-format";
6538 }
6539
6540 void
6541 ia64_end_of_source ()
6542 {
6543 /* terminate insn group upon reaching end of file: */
6544 insn_group_break (1, 0, 0);
6545
6546 /* emits slots we haven't written yet: */
6547 ia64_flush_insns ();
6548
6549 bfd_set_private_flags (stdoutput, md.flags);
6550
6551 md.mem_offset.hint = 0;
6552 }
6553
6554 void
6555 ia64_start_line ()
6556 {
6557 if (md.qp.X_op == O_register)
6558 as_bad ("qualifying predicate not followed by instruction");
6559 md.qp.X_op = O_absent;
6560
6561 if (ignore_input ())
6562 return;
6563
6564 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
6565 {
6566 if (md.detect_dv && !md.explicit_mode)
6567 as_warn (_("Explicit stops are ignored in auto mode"));
6568 else
6569 insn_group_break (1, 0, 0);
6570 }
6571 }
6572
6573 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
6574 labels. */
6575 static int defining_tag = 0;
6576
6577 int
6578 ia64_unrecognized_line (ch)
6579 int ch;
6580 {
6581 switch (ch)
6582 {
6583 case '(':
6584 expression (&md.qp);
6585 if (*input_line_pointer++ != ')')
6586 {
6587 as_bad ("Expected ')'");
6588 return 0;
6589 }
6590 if (md.qp.X_op != O_register)
6591 {
6592 as_bad ("Qualifying predicate expected");
6593 return 0;
6594 }
6595 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
6596 {
6597 as_bad ("Predicate register expected");
6598 return 0;
6599 }
6600 return 1;
6601
6602 case '{':
6603 if (md.manual_bundling)
6604 as_warn ("Found '{' when manual bundling is already turned on");
6605 else
6606 CURR_SLOT.manual_bundling_on = 1;
6607 md.manual_bundling = 1;
6608
6609 /* Bundling is only acceptable in explicit mode
6610 or when in default automatic mode. */
6611 if (md.detect_dv && !md.explicit_mode)
6612 {
6613 if (!md.mode_explicitly_set
6614 && !md.default_explicit_mode)
6615 dot_dv_mode ('E');
6616 else
6617 as_warn (_("Found '{' after explicit switch to automatic mode"));
6618 }
6619 return 1;
6620
6621 case '}':
6622 if (!md.manual_bundling)
6623 as_warn ("Found '}' when manual bundling is off");
6624 else
6625 PREV_SLOT.manual_bundling_off = 1;
6626 md.manual_bundling = 0;
6627
6628 /* switch back to automatic mode, if applicable */
6629 if (md.detect_dv
6630 && md.explicit_mode
6631 && !md.mode_explicitly_set
6632 && !md.default_explicit_mode)
6633 dot_dv_mode ('A');
6634
6635 /* Allow '{' to follow on the same line. We also allow ";;", but that
6636 happens automatically because ';' is an end of line marker. */
6637 SKIP_WHITESPACE ();
6638 if (input_line_pointer[0] == '{')
6639 {
6640 input_line_pointer++;
6641 return ia64_unrecognized_line ('{');
6642 }
6643
6644 demand_empty_rest_of_line ();
6645 return 1;
6646
6647 case '[':
6648 {
6649 char *s;
6650 char c;
6651 symbolS *tag;
6652 int temp;
6653
6654 if (md.qp.X_op == O_register)
6655 {
6656 as_bad ("Tag must come before qualifying predicate.");
6657 return 0;
6658 }
6659
6660 /* This implements just enough of read_a_source_file in read.c to
6661 recognize labels. */
6662 if (is_name_beginner (*input_line_pointer))
6663 {
6664 s = input_line_pointer;
6665 c = get_symbol_end ();
6666 }
6667 else if (LOCAL_LABELS_FB
6668 && isdigit ((unsigned char) *input_line_pointer))
6669 {
6670 temp = 0;
6671 while (isdigit ((unsigned char) *input_line_pointer))
6672 temp = (temp * 10) + *input_line_pointer++ - '0';
6673 fb_label_instance_inc (temp);
6674 s = fb_label_name (temp, 0);
6675 c = *input_line_pointer;
6676 }
6677 else
6678 {
6679 s = NULL;
6680 c = '\0';
6681 }
6682 if (c != ':')
6683 {
6684 /* Put ':' back for error messages' sake. */
6685 *input_line_pointer++ = ':';
6686 as_bad ("Expected ':'");
6687 return 0;
6688 }
6689
6690 defining_tag = 1;
6691 tag = colon (s);
6692 defining_tag = 0;
6693 /* Put ':' back for error messages' sake. */
6694 *input_line_pointer++ = ':';
6695 if (*input_line_pointer++ != ']')
6696 {
6697 as_bad ("Expected ']'");
6698 return 0;
6699 }
6700 if (! tag)
6701 {
6702 as_bad ("Tag name expected");
6703 return 0;
6704 }
6705 return 1;
6706 }
6707
6708 default:
6709 break;
6710 }
6711
6712 /* Not a valid line. */
6713 return 0;
6714 }
6715
6716 void
6717 ia64_frob_label (sym)
6718 struct symbol *sym;
6719 {
6720 struct label_fix *fix;
6721
6722 /* Tags need special handling since they are not bundle breaks like
6723 labels. */
6724 if (defining_tag)
6725 {
6726 fix = obstack_alloc (&notes, sizeof (*fix));
6727 fix->sym = sym;
6728 fix->next = CURR_SLOT.tag_fixups;
6729 CURR_SLOT.tag_fixups = fix;
6730
6731 return;
6732 }
6733
6734 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
6735 {
6736 md.last_text_seg = now_seg;
6737 fix = obstack_alloc (&notes, sizeof (*fix));
6738 fix->sym = sym;
6739 fix->next = CURR_SLOT.label_fixups;
6740 CURR_SLOT.label_fixups = fix;
6741
6742 /* Keep track of how many code entry points we've seen. */
6743 if (md.path == md.maxpaths)
6744 {
6745 md.maxpaths += 20;
6746 md.entry_labels = (const char **)
6747 xrealloc ((void *) md.entry_labels,
6748 md.maxpaths * sizeof (char *));
6749 }
6750 md.entry_labels[md.path++] = S_GET_NAME (sym);
6751 }
6752 }
6753
6754 void
6755 ia64_flush_pending_output ()
6756 {
6757 if (!md.keep_pending_output
6758 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
6759 {
6760 /* ??? This causes many unnecessary stop bits to be emitted.
6761 Unfortunately, it isn't clear if it is safe to remove this. */
6762 insn_group_break (1, 0, 0);
6763 ia64_flush_insns ();
6764 }
6765 }
6766
6767 /* Do ia64-specific expression optimization. All that's done here is
6768 to transform index expressions that are either due to the indexing
6769 of rotating registers or due to the indexing of indirect register
6770 sets. */
6771 int
6772 ia64_optimize_expr (l, op, r)
6773 expressionS *l;
6774 operatorT op;
6775 expressionS *r;
6776 {
6777 unsigned num_regs;
6778
6779 if (op == O_index)
6780 {
6781 if (l->X_op == O_register && r->X_op == O_constant)
6782 {
6783 num_regs = (l->X_add_number >> 16);
6784 if ((unsigned) r->X_add_number >= num_regs)
6785 {
6786 if (!num_regs)
6787 as_bad ("No current frame");
6788 else
6789 as_bad ("Index out of range 0..%u", num_regs - 1);
6790 r->X_add_number = 0;
6791 }
6792 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
6793 return 1;
6794 }
6795 else if (l->X_op == O_register && r->X_op == O_register)
6796 {
6797 if (l->X_add_number < IND_CPUID || l->X_add_number > IND_RR
6798 || l->X_add_number == IND_MEM)
6799 {
6800 as_bad ("Indirect register set name expected");
6801 l->X_add_number = IND_CPUID;
6802 }
6803 l->X_op = O_index;
6804 l->X_op_symbol = md.regsym[l->X_add_number];
6805 l->X_add_number = r->X_add_number;
6806 return 1;
6807 }
6808 }
6809 return 0;
6810 }
6811
6812 int
6813 ia64_parse_name (name, e)
6814 char *name;
6815 expressionS *e;
6816 {
6817 struct const_desc *cdesc;
6818 struct dynreg *dr = 0;
6819 unsigned int regnum;
6820 struct symbol *sym;
6821 char *end;
6822
6823 /* first see if NAME is a known register name: */
6824 sym = hash_find (md.reg_hash, name);
6825 if (sym)
6826 {
6827 e->X_op = O_register;
6828 e->X_add_number = S_GET_VALUE (sym);
6829 return 1;
6830 }
6831
6832 cdesc = hash_find (md.const_hash, name);
6833 if (cdesc)
6834 {
6835 e->X_op = O_constant;
6836 e->X_add_number = cdesc->value;
6837 return 1;
6838 }
6839
6840 /* check for inN, locN, or outN: */
6841 switch (name[0])
6842 {
6843 case 'i':
6844 if (name[1] == 'n' && isdigit (name[2]))
6845 {
6846 dr = &md.in;
6847 name += 2;
6848 }
6849 break;
6850
6851 case 'l':
6852 if (name[1] == 'o' && name[2] == 'c' && isdigit (name[3]))
6853 {
6854 dr = &md.loc;
6855 name += 3;
6856 }
6857 break;
6858
6859 case 'o':
6860 if (name[1] == 'u' && name[2] == 't' && isdigit (name[3]))
6861 {
6862 dr = &md.out;
6863 name += 3;
6864 }
6865 break;
6866
6867 default:
6868 break;
6869 }
6870
6871 if (dr)
6872 {
6873 /* The name is inN, locN, or outN; parse the register number. */
6874 regnum = strtoul (name, &end, 10);
6875 if (end > name && *end == '\0')
6876 {
6877 if ((unsigned) regnum >= dr->num_regs)
6878 {
6879 if (!dr->num_regs)
6880 as_bad ("No current frame");
6881 else
6882 as_bad ("Register number out of range 0..%u",
6883 dr->num_regs - 1);
6884 regnum = 0;
6885 }
6886 e->X_op = O_register;
6887 e->X_add_number = dr->base + regnum;
6888 return 1;
6889 }
6890 }
6891
6892 if ((dr = hash_find (md.dynreg_hash, name)))
6893 {
6894 /* We've got ourselves the name of a rotating register set.
6895 Store the base register number in the low 16 bits of
6896 X_add_number and the size of the register set in the top 16
6897 bits. */
6898 e->X_op = O_register;
6899 e->X_add_number = dr->base | (dr->num_regs << 16);
6900 return 1;
6901 }
6902 return 0;
6903 }
6904
6905 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
6906
6907 char *
6908 ia64_canonicalize_symbol_name (name)
6909 char *name;
6910 {
6911 size_t len = strlen (name);
6912 if (len > 1 && name[len - 1] == '#')
6913 name[len - 1] = '\0';
6914 return name;
6915 }
6916
6917 /* Return true if idesc is a conditional branch instruction. This excludes
6918 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
6919 because they always read/write resources regardless of the value of the
6920 qualifying predicate. br.ia must always use p0, and hence is always
6921 taken. Thus this function returns true for branches which can fall
6922 through, and which use no resources if they do fall through. */
6923
6924 static int
6925 is_conditional_branch (idesc)
6926 struct ia64_opcode *idesc;
6927 {
6928 /* br is a conditional branch. Everything that starts with br. except
6929 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
6930 Everything that starts with brl is a conditional branch. */
6931 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
6932 && (idesc->name[2] == '\0'
6933 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
6934 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
6935 || idesc->name[2] == 'l'
6936 /* br.cond, br.call, br.clr */
6937 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
6938 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
6939 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
6940 }
6941
6942 /* Return whether the given opcode is a taken branch. If there's any doubt,
6943 returns zero. */
6944
6945 static int
6946 is_taken_branch (idesc)
6947 struct ia64_opcode *idesc;
6948 {
6949 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
6950 || strncmp (idesc->name, "br.ia", 5) == 0);
6951 }
6952
6953 /* Return whether the given opcode is an interruption or rfi. If there's any
6954 doubt, returns zero. */
6955
6956 static int
6957 is_interruption_or_rfi (idesc)
6958 struct ia64_opcode *idesc;
6959 {
6960 if (strcmp (idesc->name, "rfi") == 0)
6961 return 1;
6962 return 0;
6963 }
6964
6965 /* Returns the index of the given dependency in the opcode's list of chks, or
6966 -1 if there is no dependency. */
6967
6968 static int
6969 depends_on (depind, idesc)
6970 int depind;
6971 struct ia64_opcode *idesc;
6972 {
6973 int i;
6974 const struct ia64_opcode_dependency *dep = idesc->dependencies;
6975 for (i = 0; i < dep->nchks; i++)
6976 {
6977 if (depind == DEP (dep->chks[i]))
6978 return i;
6979 }
6980 return -1;
6981 }
6982
6983 /* Determine a set of specific resources used for a particular resource
6984 class. Returns the number of specific resources identified For those
6985 cases which are not determinable statically, the resource returned is
6986 marked nonspecific.
6987
6988 Meanings of value in 'NOTE':
6989 1) only read/write when the register number is explicitly encoded in the
6990 insn.
6991 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
6992 accesses CFM when qualifying predicate is in the rotating region.
6993 3) general register value is used to specify an indirect register; not
6994 determinable statically.
6995 4) only read the given resource when bits 7:0 of the indirect index
6996 register value does not match the register number of the resource; not
6997 determinable statically.
6998 5) all rules are implementation specific.
6999 6) only when both the index specified by the reader and the index specified
7000 by the writer have the same value in bits 63:61; not determinable
7001 statically.
7002 7) only access the specified resource when the corresponding mask bit is
7003 set
7004 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
7005 only read when these insns reference FR2-31
7006 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
7007 written when these insns write FR32-127
7008 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
7009 instruction
7010 11) The target predicates are written independently of PR[qp], but source
7011 registers are only read if PR[qp] is true. Since the state of PR[qp]
7012 cannot statically be determined, all source registers are marked used.
7013 12) This insn only reads the specified predicate register when that
7014 register is the PR[qp].
7015 13) This reference to ld-c only applies to teh GR whose value is loaded
7016 with data returned from memory, not the post-incremented address register.
7017 14) The RSE resource includes the implementation-specific RSE internal
7018 state resources. At least one (and possibly more) of these resources are
7019 read by each instruction listed in IC:rse-readers. At least one (and
7020 possibly more) of these resources are written by each insn listed in
7021 IC:rse-writers.
7022 15+16) Represents reserved instructions, which the assembler does not
7023 generate.
7024
7025 Memory resources (i.e. locations in memory) are *not* marked or tracked by
7026 this code; there are no dependency violations based on memory access.
7027 */
7028
7029 #define MAX_SPECS 256
7030 #define DV_CHK 1
7031 #define DV_REG 0
7032
7033 static int
7034 specify_resource (dep, idesc, type, specs, note, path)
7035 const struct ia64_dependency *dep;
7036 struct ia64_opcode *idesc;
7037 int type; /* is this a DV chk or a DV reg? */
7038 struct rsrc specs[MAX_SPECS]; /* returned specific resources */
7039 int note; /* resource note for this insn's usage */
7040 int path; /* which execution path to examine */
7041 {
7042 int count = 0;
7043 int i;
7044 int rsrc_write = 0;
7045 struct rsrc tmpl;
7046
7047 if (dep->mode == IA64_DV_WAW
7048 || (dep->mode == IA64_DV_RAW && type == DV_REG)
7049 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
7050 rsrc_write = 1;
7051
7052 /* template for any resources we identify */
7053 tmpl.dependency = dep;
7054 tmpl.note = note;
7055 tmpl.insn_srlz = tmpl.data_srlz = 0;
7056 tmpl.qp_regno = CURR_SLOT.qp_regno;
7057 tmpl.link_to_qp_branch = 1;
7058 tmpl.mem_offset.hint = 0;
7059 tmpl.specific = 1;
7060 tmpl.index = 0;
7061 tmpl.cmp_type = CMP_NONE;
7062
7063 #define UNHANDLED \
7064 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
7065 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
7066 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
7067
7068 /* we don't need to track these */
7069 if (dep->semantics == IA64_DVS_NONE)
7070 return 0;
7071
7072 switch (dep->specifier)
7073 {
7074 case IA64_RS_AR_K:
7075 if (note == 1)
7076 {
7077 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7078 {
7079 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7080 if (regno >= 0 && regno <= 7)
7081 {
7082 specs[count] = tmpl;
7083 specs[count++].index = regno;
7084 }
7085 }
7086 }
7087 else if (note == 0)
7088 {
7089 for (i = 0; i < 8; i++)
7090 {
7091 specs[count] = tmpl;
7092 specs[count++].index = i;
7093 }
7094 }
7095 else
7096 {
7097 UNHANDLED;
7098 }
7099 break;
7100
7101 case IA64_RS_AR_UNAT:
7102 /* This is a mov =AR or mov AR= instruction. */
7103 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7104 {
7105 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7106 if (regno == AR_UNAT)
7107 {
7108 specs[count++] = tmpl;
7109 }
7110 }
7111 else
7112 {
7113 /* This is a spill/fill, or other instruction that modifies the
7114 unat register. */
7115
7116 /* Unless we can determine the specific bits used, mark the whole
7117 thing; bits 8:3 of the memory address indicate the bit used in
7118 UNAT. The .mem.offset hint may be used to eliminate a small
7119 subset of conflicts. */
7120 specs[count] = tmpl;
7121 if (md.mem_offset.hint)
7122 {
7123 if (md.debug_dv)
7124 fprintf (stderr, " Using hint for spill/fill\n");
7125 /* The index isn't actually used, just set it to something
7126 approximating the bit index. */
7127 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
7128 specs[count].mem_offset.hint = 1;
7129 specs[count].mem_offset.offset = md.mem_offset.offset;
7130 specs[count++].mem_offset.base = md.mem_offset.base;
7131 }
7132 else
7133 {
7134 specs[count++].specific = 0;
7135 }
7136 }
7137 break;
7138
7139 case IA64_RS_AR:
7140 if (note == 1)
7141 {
7142 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7143 {
7144 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7145 if ((regno >= 8 && regno <= 15)
7146 || (regno >= 20 && regno <= 23)
7147 || (regno >= 31 && regno <= 39)
7148 || (regno >= 41 && regno <= 47)
7149 || (regno >= 67 && regno <= 111))
7150 {
7151 specs[count] = tmpl;
7152 specs[count++].index = regno;
7153 }
7154 }
7155 }
7156 else
7157 {
7158 UNHANDLED;
7159 }
7160 break;
7161
7162 case IA64_RS_ARb:
7163 if (note == 1)
7164 {
7165 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7166 {
7167 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7168 if ((regno >= 48 && regno <= 63)
7169 || (regno >= 112 && regno <= 127))
7170 {
7171 specs[count] = tmpl;
7172 specs[count++].index = regno;
7173 }
7174 }
7175 }
7176 else if (note == 0)
7177 {
7178 for (i = 48; i < 64; i++)
7179 {
7180 specs[count] = tmpl;
7181 specs[count++].index = i;
7182 }
7183 for (i = 112; i < 128; i++)
7184 {
7185 specs[count] = tmpl;
7186 specs[count++].index = i;
7187 }
7188 }
7189 else
7190 {
7191 UNHANDLED;
7192 }
7193 break;
7194
7195 case IA64_RS_BR:
7196 if (note != 1)
7197 {
7198 UNHANDLED;
7199 }
7200 else
7201 {
7202 if (rsrc_write)
7203 {
7204 for (i = 0; i < idesc->num_outputs; i++)
7205 if (idesc->operands[i] == IA64_OPND_B1
7206 || idesc->operands[i] == IA64_OPND_B2)
7207 {
7208 specs[count] = tmpl;
7209 specs[count++].index =
7210 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7211 }
7212 }
7213 else
7214 {
7215 for (i = idesc->num_outputs;i < NELEMS (idesc->operands); i++)
7216 if (idesc->operands[i] == IA64_OPND_B1
7217 || idesc->operands[i] == IA64_OPND_B2)
7218 {
7219 specs[count] = tmpl;
7220 specs[count++].index =
7221 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7222 }
7223 }
7224 }
7225 break;
7226
7227 case IA64_RS_CPUID: /* four or more registers */
7228 if (note == 3)
7229 {
7230 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
7231 {
7232 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7233 if (regno >= 0 && regno < NELEMS (gr_values)
7234 && KNOWN (regno))
7235 {
7236 specs[count] = tmpl;
7237 specs[count++].index = gr_values[regno].value & 0xFF;
7238 }
7239 else
7240 {
7241 specs[count] = tmpl;
7242 specs[count++].specific = 0;
7243 }
7244 }
7245 }
7246 else
7247 {
7248 UNHANDLED;
7249 }
7250 break;
7251
7252 case IA64_RS_DBR: /* four or more registers */
7253 if (note == 3)
7254 {
7255 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
7256 {
7257 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7258 if (regno >= 0 && regno < NELEMS (gr_values)
7259 && KNOWN (regno))
7260 {
7261 specs[count] = tmpl;
7262 specs[count++].index = gr_values[regno].value & 0xFF;
7263 }
7264 else
7265 {
7266 specs[count] = tmpl;
7267 specs[count++].specific = 0;
7268 }
7269 }
7270 }
7271 else if (note == 0 && !rsrc_write)
7272 {
7273 specs[count] = tmpl;
7274 specs[count++].specific = 0;
7275 }
7276 else
7277 {
7278 UNHANDLED;
7279 }
7280 break;
7281
7282 case IA64_RS_IBR: /* four or more registers */
7283 if (note == 3)
7284 {
7285 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
7286 {
7287 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7288 if (regno >= 0 && regno < NELEMS (gr_values)
7289 && KNOWN (regno))
7290 {
7291 specs[count] = tmpl;
7292 specs[count++].index = gr_values[regno].value & 0xFF;
7293 }
7294 else
7295 {
7296 specs[count] = tmpl;
7297 specs[count++].specific = 0;
7298 }
7299 }
7300 }
7301 else
7302 {
7303 UNHANDLED;
7304 }
7305 break;
7306
7307 case IA64_RS_MSR:
7308 if (note == 5)
7309 {
7310 /* These are implementation specific. Force all references to
7311 conflict with all other references. */
7312 specs[count] = tmpl;
7313 specs[count++].specific = 0;
7314 }
7315 else
7316 {
7317 UNHANDLED;
7318 }
7319 break;
7320
7321 case IA64_RS_PKR: /* 16 or more registers */
7322 if (note == 3 || note == 4)
7323 {
7324 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
7325 {
7326 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7327 if (regno >= 0 && regno < NELEMS (gr_values)
7328 && KNOWN (regno))
7329 {
7330 if (note == 3)
7331 {
7332 specs[count] = tmpl;
7333 specs[count++].index = gr_values[regno].value & 0xFF;
7334 }
7335 else
7336 for (i = 0; i < NELEMS (gr_values); i++)
7337 {
7338 /* Uses all registers *except* the one in R3. */
7339 if ((unsigned)i != (gr_values[regno].value & 0xFF))
7340 {
7341 specs[count] = tmpl;
7342 specs[count++].index = i;
7343 }
7344 }
7345 }
7346 else
7347 {
7348 specs[count] = tmpl;
7349 specs[count++].specific = 0;
7350 }
7351 }
7352 }
7353 else if (note == 0)
7354 {
7355 /* probe et al. */
7356 specs[count] = tmpl;
7357 specs[count++].specific = 0;
7358 }
7359 break;
7360
7361 case IA64_RS_PMC: /* four or more registers */
7362 if (note == 3)
7363 {
7364 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
7365 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
7366
7367 {
7368 int index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
7369 ? 1 : !rsrc_write);
7370 int regno = CURR_SLOT.opnd[index].X_add_number - REG_GR;
7371 if (regno >= 0 && regno < NELEMS (gr_values)
7372 && KNOWN (regno))
7373 {
7374 specs[count] = tmpl;
7375 specs[count++].index = gr_values[regno].value & 0xFF;
7376 }
7377 else
7378 {
7379 specs[count] = tmpl;
7380 specs[count++].specific = 0;
7381 }
7382 }
7383 }
7384 else
7385 {
7386 UNHANDLED;
7387 }
7388 break;
7389
7390 case IA64_RS_PMD: /* four or more registers */
7391 if (note == 3)
7392 {
7393 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
7394 {
7395 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7396 if (regno >= 0 && regno < NELEMS (gr_values)
7397 && KNOWN (regno))
7398 {
7399 specs[count] = tmpl;
7400 specs[count++].index = gr_values[regno].value & 0xFF;
7401 }
7402 else
7403 {
7404 specs[count] = tmpl;
7405 specs[count++].specific = 0;
7406 }
7407 }
7408 }
7409 else
7410 {
7411 UNHANDLED;
7412 }
7413 break;
7414
7415 case IA64_RS_RR: /* eight registers */
7416 if (note == 6)
7417 {
7418 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
7419 {
7420 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7421 if (regno >= 0 && regno < NELEMS (gr_values)
7422 && KNOWN (regno))
7423 {
7424 specs[count] = tmpl;
7425 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
7426 }
7427 else
7428 {
7429 specs[count] = tmpl;
7430 specs[count++].specific = 0;
7431 }
7432 }
7433 }
7434 else if (note == 0 && !rsrc_write)
7435 {
7436 specs[count] = tmpl;
7437 specs[count++].specific = 0;
7438 }
7439 else
7440 {
7441 UNHANDLED;
7442 }
7443 break;
7444
7445 case IA64_RS_CR_IRR:
7446 if (note == 0)
7447 {
7448 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
7449 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
7450 if (rsrc_write
7451 && idesc->operands[1] == IA64_OPND_CR3
7452 && regno == CR_IVR)
7453 {
7454 for (i = 0; i < 4; i++)
7455 {
7456 specs[count] = tmpl;
7457 specs[count++].index = CR_IRR0 + i;
7458 }
7459 }
7460 }
7461 else if (note == 1)
7462 {
7463 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7464 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
7465 && regno >= CR_IRR0
7466 && regno <= CR_IRR3)
7467 {
7468 specs[count] = tmpl;
7469 specs[count++].index = regno;
7470 }
7471 }
7472 else
7473 {
7474 UNHANDLED;
7475 }
7476 break;
7477
7478 case IA64_RS_CR_LRR:
7479 if (note != 1)
7480 {
7481 UNHANDLED;
7482 }
7483 else
7484 {
7485 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7486 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
7487 && (regno == CR_LRR0 || regno == CR_LRR1))
7488 {
7489 specs[count] = tmpl;
7490 specs[count++].index = regno;
7491 }
7492 }
7493 break;
7494
7495 case IA64_RS_CR:
7496 if (note == 1)
7497 {
7498 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
7499 {
7500 specs[count] = tmpl;
7501 specs[count++].index =
7502 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7503 }
7504 }
7505 else
7506 {
7507 UNHANDLED;
7508 }
7509 break;
7510
7511 case IA64_RS_FR:
7512 case IA64_RS_FRb:
7513 if (note != 1)
7514 {
7515 UNHANDLED;
7516 }
7517 else if (rsrc_write)
7518 {
7519 if (dep->specifier == IA64_RS_FRb
7520 && idesc->operands[0] == IA64_OPND_F1)
7521 {
7522 specs[count] = tmpl;
7523 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
7524 }
7525 }
7526 else
7527 {
7528 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
7529 {
7530 if (idesc->operands[i] == IA64_OPND_F2
7531 || idesc->operands[i] == IA64_OPND_F3
7532 || idesc->operands[i] == IA64_OPND_F4)
7533 {
7534 specs[count] = tmpl;
7535 specs[count++].index =
7536 CURR_SLOT.opnd[i].X_add_number - REG_FR;
7537 }
7538 }
7539 }
7540 break;
7541
7542 case IA64_RS_GR:
7543 if (note == 13)
7544 {
7545 /* This reference applies only to the GR whose value is loaded with
7546 data returned from memory. */
7547 specs[count] = tmpl;
7548 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
7549 }
7550 else if (note == 1)
7551 {
7552 if (rsrc_write)
7553 {
7554 for (i = 0; i < idesc->num_outputs; i++)
7555 if (idesc->operands[i] == IA64_OPND_R1
7556 || idesc->operands[i] == IA64_OPND_R2
7557 || idesc->operands[i] == IA64_OPND_R3)
7558 {
7559 specs[count] = tmpl;
7560 specs[count++].index =
7561 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7562 }
7563 if (idesc->flags & IA64_OPCODE_POSTINC)
7564 for (i = 0; i < NELEMS (idesc->operands); i++)
7565 if (idesc->operands[i] == IA64_OPND_MR3)
7566 {
7567 specs[count] = tmpl;
7568 specs[count++].index =
7569 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7570 }
7571 }
7572 else
7573 {
7574 /* Look for anything that reads a GR. */
7575 for (i = 0; i < NELEMS (idesc->operands); i++)
7576 {
7577 if (idesc->operands[i] == IA64_OPND_MR3
7578 || idesc->operands[i] == IA64_OPND_CPUID_R3
7579 || idesc->operands[i] == IA64_OPND_DBR_R3
7580 || idesc->operands[i] == IA64_OPND_IBR_R3
7581 || idesc->operands[i] == IA64_OPND_MSR_R3
7582 || idesc->operands[i] == IA64_OPND_PKR_R3
7583 || idesc->operands[i] == IA64_OPND_PMC_R3
7584 || idesc->operands[i] == IA64_OPND_PMD_R3
7585 || idesc->operands[i] == IA64_OPND_RR_R3
7586 || ((i >= idesc->num_outputs)
7587 && (idesc->operands[i] == IA64_OPND_R1
7588 || idesc->operands[i] == IA64_OPND_R2
7589 || idesc->operands[i] == IA64_OPND_R3
7590 /* addl source register. */
7591 || idesc->operands[i] == IA64_OPND_R3_2)))
7592 {
7593 specs[count] = tmpl;
7594 specs[count++].index =
7595 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7596 }
7597 }
7598 }
7599 }
7600 else
7601 {
7602 UNHANDLED;
7603 }
7604 break;
7605
7606 /* This is the same as IA64_RS_PRr, except that the register range is
7607 from 1 - 15, and there are no rotating register reads/writes here. */
7608 case IA64_RS_PR:
7609 if (note == 0)
7610 {
7611 for (i = 1; i < 16; i++)
7612 {
7613 specs[count] = tmpl;
7614 specs[count++].index = i;
7615 }
7616 }
7617 else if (note == 7)
7618 {
7619 valueT mask = 0;
7620 /* Mark only those registers indicated by the mask. */
7621 if (rsrc_write)
7622 {
7623 mask = CURR_SLOT.opnd[2].X_add_number;
7624 for (i = 1; i < 16; i++)
7625 if (mask & ((valueT) 1 << i))
7626 {
7627 specs[count] = tmpl;
7628 specs[count++].index = i;
7629 }
7630 }
7631 else
7632 {
7633 UNHANDLED;
7634 }
7635 }
7636 else if (note == 11) /* note 11 implies note 1 as well */
7637 {
7638 if (rsrc_write)
7639 {
7640 for (i = 0; i < idesc->num_outputs; i++)
7641 {
7642 if (idesc->operands[i] == IA64_OPND_P1
7643 || idesc->operands[i] == IA64_OPND_P2)
7644 {
7645 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
7646 if (regno >= 1 && regno < 16)
7647 {
7648 specs[count] = tmpl;
7649 specs[count++].index = regno;
7650 }
7651 }
7652 }
7653 }
7654 else
7655 {
7656 UNHANDLED;
7657 }
7658 }
7659 else if (note == 12)
7660 {
7661 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
7662 {
7663 specs[count] = tmpl;
7664 specs[count++].index = CURR_SLOT.qp_regno;
7665 }
7666 }
7667 else if (note == 1)
7668 {
7669 if (rsrc_write)
7670 {
7671 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
7672 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
7673 int or_andcm = strstr(idesc->name, "or.andcm") != NULL;
7674 int and_orcm = strstr(idesc->name, "and.orcm") != NULL;
7675
7676 if ((idesc->operands[0] == IA64_OPND_P1
7677 || idesc->operands[0] == IA64_OPND_P2)
7678 && p1 >= 1 && p1 < 16)
7679 {
7680 specs[count] = tmpl;
7681 specs[count].cmp_type =
7682 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
7683 specs[count++].index = p1;
7684 }
7685 if ((idesc->operands[1] == IA64_OPND_P1
7686 || idesc->operands[1] == IA64_OPND_P2)
7687 && p2 >= 1 && p2 < 16)
7688 {
7689 specs[count] = tmpl;
7690 specs[count].cmp_type =
7691 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
7692 specs[count++].index = p2;
7693 }
7694 }
7695 else
7696 {
7697 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
7698 {
7699 specs[count] = tmpl;
7700 specs[count++].index = CURR_SLOT.qp_regno;
7701 }
7702 if (idesc->operands[1] == IA64_OPND_PR)
7703 {
7704 for (i = 1; i < 16; i++)
7705 {
7706 specs[count] = tmpl;
7707 specs[count++].index = i;
7708 }
7709 }
7710 }
7711 }
7712 else
7713 {
7714 UNHANDLED;
7715 }
7716 break;
7717
7718 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
7719 simplified cases of this. */
7720 case IA64_RS_PRr:
7721 if (note == 0)
7722 {
7723 for (i = 16; i < 63; i++)
7724 {
7725 specs[count] = tmpl;
7726 specs[count++].index = i;
7727 }
7728 }
7729 else if (note == 7)
7730 {
7731 valueT mask = 0;
7732 /* Mark only those registers indicated by the mask. */
7733 if (rsrc_write
7734 && idesc->operands[0] == IA64_OPND_PR)
7735 {
7736 mask = CURR_SLOT.opnd[2].X_add_number;
7737 if (mask & ((valueT) 1<<16))
7738 for (i = 16; i < 63; i++)
7739 {
7740 specs[count] = tmpl;
7741 specs[count++].index = i;
7742 }
7743 }
7744 else if (rsrc_write
7745 && idesc->operands[0] == IA64_OPND_PR_ROT)
7746 {
7747 for (i = 16; i < 63; i++)
7748 {
7749 specs[count] = tmpl;
7750 specs[count++].index = i;
7751 }
7752 }
7753 else
7754 {
7755 UNHANDLED;
7756 }
7757 }
7758 else if (note == 11) /* note 11 implies note 1 as well */
7759 {
7760 if (rsrc_write)
7761 {
7762 for (i = 0; i < idesc->num_outputs; i++)
7763 {
7764 if (idesc->operands[i] == IA64_OPND_P1
7765 || idesc->operands[i] == IA64_OPND_P2)
7766 {
7767 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
7768 if (regno >= 16 && regno < 63)
7769 {
7770 specs[count] = tmpl;
7771 specs[count++].index = regno;
7772 }
7773 }
7774 }
7775 }
7776 else
7777 {
7778 UNHANDLED;
7779 }
7780 }
7781 else if (note == 12)
7782 {
7783 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
7784 {
7785 specs[count] = tmpl;
7786 specs[count++].index = CURR_SLOT.qp_regno;
7787 }
7788 }
7789 else if (note == 1)
7790 {
7791 if (rsrc_write)
7792 {
7793 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
7794 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
7795 int or_andcm = strstr(idesc->name, "or.andcm") != NULL;
7796 int and_orcm = strstr(idesc->name, "and.orcm") != NULL;
7797
7798 if ((idesc->operands[0] == IA64_OPND_P1
7799 || idesc->operands[0] == IA64_OPND_P2)
7800 && p1 >= 16 && p1 < 63)
7801 {
7802 specs[count] = tmpl;
7803 specs[count].cmp_type =
7804 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
7805 specs[count++].index = p1;
7806 }
7807 if ((idesc->operands[1] == IA64_OPND_P1
7808 || idesc->operands[1] == IA64_OPND_P2)
7809 && p2 >= 16 && p2 < 63)
7810 {
7811 specs[count] = tmpl;
7812 specs[count].cmp_type =
7813 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
7814 specs[count++].index = p2;
7815 }
7816 }
7817 else
7818 {
7819 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
7820 {
7821 specs[count] = tmpl;
7822 specs[count++].index = CURR_SLOT.qp_regno;
7823 }
7824 if (idesc->operands[1] == IA64_OPND_PR)
7825 {
7826 for (i = 16; i < 63; i++)
7827 {
7828 specs[count] = tmpl;
7829 specs[count++].index = i;
7830 }
7831 }
7832 }
7833 }
7834 else
7835 {
7836 UNHANDLED;
7837 }
7838 break;
7839
7840 case IA64_RS_PSR:
7841 /* Verify that the instruction is using the PSR bit indicated in
7842 dep->regindex. */
7843 if (note == 0)
7844 {
7845 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
7846 {
7847 if (dep->regindex < 6)
7848 {
7849 specs[count++] = tmpl;
7850 }
7851 }
7852 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
7853 {
7854 if (dep->regindex < 32
7855 || dep->regindex == 35
7856 || dep->regindex == 36
7857 || (!rsrc_write && dep->regindex == PSR_CPL))
7858 {
7859 specs[count++] = tmpl;
7860 }
7861 }
7862 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
7863 {
7864 if (dep->regindex < 32
7865 || dep->regindex == 35
7866 || dep->regindex == 36
7867 || (rsrc_write && dep->regindex == PSR_CPL))
7868 {
7869 specs[count++] = tmpl;
7870 }
7871 }
7872 else
7873 {
7874 /* Several PSR bits have very specific dependencies. */
7875 switch (dep->regindex)
7876 {
7877 default:
7878 specs[count++] = tmpl;
7879 break;
7880 case PSR_IC:
7881 if (rsrc_write)
7882 {
7883 specs[count++] = tmpl;
7884 }
7885 else
7886 {
7887 /* Only certain CR accesses use PSR.ic */
7888 if (idesc->operands[0] == IA64_OPND_CR3
7889 || idesc->operands[1] == IA64_OPND_CR3)
7890 {
7891 int index =
7892 ((idesc->operands[0] == IA64_OPND_CR3)
7893 ? 0 : 1);
7894 int regno =
7895 CURR_SLOT.opnd[index].X_add_number - REG_CR;
7896
7897 switch (regno)
7898 {
7899 default:
7900 break;
7901 case CR_ITIR:
7902 case CR_IFS:
7903 case CR_IIM:
7904 case CR_IIP:
7905 case CR_IPSR:
7906 case CR_ISR:
7907 case CR_IFA:
7908 case CR_IHA:
7909 case CR_IIPA:
7910 specs[count++] = tmpl;
7911 break;
7912 }
7913 }
7914 }
7915 break;
7916 case PSR_CPL:
7917 if (rsrc_write)
7918 {
7919 specs[count++] = tmpl;
7920 }
7921 else
7922 {
7923 /* Only some AR accesses use cpl */
7924 if (idesc->operands[0] == IA64_OPND_AR3
7925 || idesc->operands[1] == IA64_OPND_AR3)
7926 {
7927 int index =
7928 ((idesc->operands[0] == IA64_OPND_AR3)
7929 ? 0 : 1);
7930 int regno =
7931 CURR_SLOT.opnd[index].X_add_number - REG_AR;
7932
7933 if (regno == AR_ITC
7934 || (index == 0
7935 && (regno == AR_ITC
7936 || regno == AR_RSC
7937 || (regno >= AR_K0
7938 && regno <= AR_K7))))
7939 {
7940 specs[count++] = tmpl;
7941 }
7942 }
7943 else
7944 {
7945 specs[count++] = tmpl;
7946 }
7947 break;
7948 }
7949 }
7950 }
7951 }
7952 else if (note == 7)
7953 {
7954 valueT mask = 0;
7955 if (idesc->operands[0] == IA64_OPND_IMMU24)
7956 {
7957 mask = CURR_SLOT.opnd[0].X_add_number;
7958 }
7959 else
7960 {
7961 UNHANDLED;
7962 }
7963 if (mask & ((valueT) 1 << dep->regindex))
7964 {
7965 specs[count++] = tmpl;
7966 }
7967 }
7968 else if (note == 8)
7969 {
7970 int min = dep->regindex == PSR_DFL ? 2 : 32;
7971 int max = dep->regindex == PSR_DFL ? 31 : 127;
7972 /* dfh is read on FR32-127; dfl is read on FR2-31 */
7973 for (i = 0; i < NELEMS (idesc->operands); i++)
7974 {
7975 if (idesc->operands[i] == IA64_OPND_F1
7976 || idesc->operands[i] == IA64_OPND_F2
7977 || idesc->operands[i] == IA64_OPND_F3
7978 || idesc->operands[i] == IA64_OPND_F4)
7979 {
7980 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
7981 if (reg >= min && reg <= max)
7982 {
7983 specs[count++] = tmpl;
7984 }
7985 }
7986 }
7987 }
7988 else if (note == 9)
7989 {
7990 int min = dep->regindex == PSR_MFL ? 2 : 32;
7991 int max = dep->regindex == PSR_MFL ? 31 : 127;
7992 /* mfh is read on writes to FR32-127; mfl is read on writes to
7993 FR2-31 */
7994 for (i = 0; i < idesc->num_outputs; i++)
7995 {
7996 if (idesc->operands[i] == IA64_OPND_F1)
7997 {
7998 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
7999 if (reg >= min && reg <= max)
8000 {
8001 specs[count++] = tmpl;
8002 }
8003 }
8004 }
8005 }
8006 else if (note == 10)
8007 {
8008 for (i = 0; i < NELEMS (idesc->operands); i++)
8009 {
8010 if (idesc->operands[i] == IA64_OPND_R1
8011 || idesc->operands[i] == IA64_OPND_R2
8012 || idesc->operands[i] == IA64_OPND_R3)
8013 {
8014 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8015 if (regno >= 16 && regno <= 31)
8016 {
8017 specs[count++] = tmpl;
8018 }
8019 }
8020 }
8021 }
8022 else
8023 {
8024 UNHANDLED;
8025 }
8026 break;
8027
8028 case IA64_RS_AR_FPSR:
8029 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8030 {
8031 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8032 if (regno == AR_FPSR)
8033 {
8034 specs[count++] = tmpl;
8035 }
8036 }
8037 else
8038 {
8039 specs[count++] = tmpl;
8040 }
8041 break;
8042
8043 case IA64_RS_ARX:
8044 /* Handle all AR[REG] resources */
8045 if (note == 0 || note == 1)
8046 {
8047 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8048 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
8049 && regno == dep->regindex)
8050 {
8051 specs[count++] = tmpl;
8052 }
8053 /* other AR[REG] resources may be affected by AR accesses */
8054 else if (idesc->operands[0] == IA64_OPND_AR3)
8055 {
8056 /* AR[] writes */
8057 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
8058 switch (dep->regindex)
8059 {
8060 default:
8061 break;
8062 case AR_BSP:
8063 case AR_RNAT:
8064 if (regno == AR_BSPSTORE)
8065 {
8066 specs[count++] = tmpl;
8067 }
8068 case AR_RSC:
8069 if (!rsrc_write &&
8070 (regno == AR_BSPSTORE
8071 || regno == AR_RNAT))
8072 {
8073 specs[count++] = tmpl;
8074 }
8075 break;
8076 }
8077 }
8078 else if (idesc->operands[1] == IA64_OPND_AR3)
8079 {
8080 /* AR[] reads */
8081 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
8082 switch (dep->regindex)
8083 {
8084 default:
8085 break;
8086 case AR_RSC:
8087 if (regno == AR_BSPSTORE || regno == AR_RNAT)
8088 {
8089 specs[count++] = tmpl;
8090 }
8091 break;
8092 }
8093 }
8094 else
8095 {
8096 specs[count++] = tmpl;
8097 }
8098 }
8099 else
8100 {
8101 UNHANDLED;
8102 }
8103 break;
8104
8105 case IA64_RS_CRX:
8106 /* Handle all CR[REG] resources */
8107 if (note == 0 || note == 1)
8108 {
8109 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8110 {
8111 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8112 if (regno == dep->regindex)
8113 {
8114 specs[count++] = tmpl;
8115 }
8116 else if (!rsrc_write)
8117 {
8118 /* Reads from CR[IVR] affect other resources. */
8119 if (regno == CR_IVR)
8120 {
8121 if ((dep->regindex >= CR_IRR0
8122 && dep->regindex <= CR_IRR3)
8123 || dep->regindex == CR_TPR)
8124 {
8125 specs[count++] = tmpl;
8126 }
8127 }
8128 }
8129 }
8130 else
8131 {
8132 specs[count++] = tmpl;
8133 }
8134 }
8135 else
8136 {
8137 UNHANDLED;
8138 }
8139 break;
8140
8141 case IA64_RS_INSERVICE:
8142 /* look for write of EOI (67) or read of IVR (65) */
8143 if ((idesc->operands[0] == IA64_OPND_CR3
8144 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
8145 || (idesc->operands[1] == IA64_OPND_CR3
8146 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
8147 {
8148 specs[count++] = tmpl;
8149 }
8150 break;
8151
8152 case IA64_RS_GR0:
8153 if (note == 1)
8154 {
8155 specs[count++] = tmpl;
8156 }
8157 else
8158 {
8159 UNHANDLED;
8160 }
8161 break;
8162
8163 case IA64_RS_CFM:
8164 if (note != 2)
8165 {
8166 specs[count++] = tmpl;
8167 }
8168 else
8169 {
8170 /* Check if any of the registers accessed are in the rotating region.
8171 mov to/from pr accesses CFM only when qp_regno is in the rotating
8172 region */
8173 for (i = 0; i < NELEMS (idesc->operands); i++)
8174 {
8175 if (idesc->operands[i] == IA64_OPND_R1
8176 || idesc->operands[i] == IA64_OPND_R2
8177 || idesc->operands[i] == IA64_OPND_R3)
8178 {
8179 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8180 /* Assumes that md.rot.num_regs is always valid */
8181 if (md.rot.num_regs > 0
8182 && num > 31
8183 && num < 31 + md.rot.num_regs)
8184 {
8185 specs[count] = tmpl;
8186 specs[count++].specific = 0;
8187 }
8188 }
8189 else if (idesc->operands[i] == IA64_OPND_F1
8190 || idesc->operands[i] == IA64_OPND_F2
8191 || idesc->operands[i] == IA64_OPND_F3
8192 || idesc->operands[i] == IA64_OPND_F4)
8193 {
8194 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8195 if (num > 31)
8196 {
8197 specs[count] = tmpl;
8198 specs[count++].specific = 0;
8199 }
8200 }
8201 else if (idesc->operands[i] == IA64_OPND_P1
8202 || idesc->operands[i] == IA64_OPND_P2)
8203 {
8204 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
8205 if (num > 15)
8206 {
8207 specs[count] = tmpl;
8208 specs[count++].specific = 0;
8209 }
8210 }
8211 }
8212 if (CURR_SLOT.qp_regno > 15)
8213 {
8214 specs[count] = tmpl;
8215 specs[count++].specific = 0;
8216 }
8217 }
8218 break;
8219
8220 /* This is the same as IA64_RS_PRr, except simplified to account for
8221 the fact that there is only one register. */
8222 case IA64_RS_PR63:
8223 if (note == 0)
8224 {
8225 specs[count++] = tmpl;
8226 }
8227 else if (note == 7)
8228 {
8229 valueT mask = 0;
8230 if (idesc->operands[2] == IA64_OPND_IMM17)
8231 mask = CURR_SLOT.opnd[2].X_add_number;
8232 if (mask & ((valueT) 1 << 63))
8233 specs[count++] = tmpl;
8234 }
8235 else if (note == 11)
8236 {
8237 if ((idesc->operands[0] == IA64_OPND_P1
8238 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
8239 || (idesc->operands[1] == IA64_OPND_P2
8240 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
8241 {
8242 specs[count++] = tmpl;
8243 }
8244 }
8245 else if (note == 12)
8246 {
8247 if (CURR_SLOT.qp_regno == 63)
8248 {
8249 specs[count++] = tmpl;
8250 }
8251 }
8252 else if (note == 1)
8253 {
8254 if (rsrc_write)
8255 {
8256 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8257 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8258 int or_andcm = strstr(idesc->name, "or.andcm") != NULL;
8259 int and_orcm = strstr(idesc->name, "and.orcm") != NULL;
8260
8261 if (p1 == 63
8262 && (idesc->operands[0] == IA64_OPND_P1
8263 || idesc->operands[0] == IA64_OPND_P2))
8264 {
8265 specs[count] = tmpl;
8266 specs[count++].cmp_type =
8267 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8268 }
8269 if (p2 == 63
8270 && (idesc->operands[1] == IA64_OPND_P1
8271 || idesc->operands[1] == IA64_OPND_P2))
8272 {
8273 specs[count] = tmpl;
8274 specs[count++].cmp_type =
8275 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8276 }
8277 }
8278 else
8279 {
8280 if (CURR_SLOT.qp_regno == 63)
8281 {
8282 specs[count++] = tmpl;
8283 }
8284 }
8285 }
8286 else
8287 {
8288 UNHANDLED;
8289 }
8290 break;
8291
8292 case IA64_RS_RSE:
8293 /* FIXME we can identify some individual RSE written resources, but RSE
8294 read resources have not yet been completely identified, so for now
8295 treat RSE as a single resource */
8296 if (strncmp (idesc->name, "mov", 3) == 0)
8297 {
8298 if (rsrc_write)
8299 {
8300 if (idesc->operands[0] == IA64_OPND_AR3
8301 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
8302 {
8303 specs[count] = tmpl;
8304 specs[count++].index = 0; /* IA64_RSE_BSPLOAD/RNATBITINDEX */
8305 }
8306 }
8307 else
8308 {
8309 if (idesc->operands[0] == IA64_OPND_AR3)
8310 {
8311 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
8312 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
8313 {
8314 specs[count++] = tmpl;
8315 }
8316 }
8317 else if (idesc->operands[1] == IA64_OPND_AR3)
8318 {
8319 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
8320 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
8321 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
8322 {
8323 specs[count++] = tmpl;
8324 }
8325 }
8326 }
8327 }
8328 else
8329 {
8330 specs[count++] = tmpl;
8331 }
8332 break;
8333
8334 case IA64_RS_ANY:
8335 /* FIXME -- do any of these need to be non-specific? */
8336 specs[count++] = tmpl;
8337 break;
8338
8339 default:
8340 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
8341 break;
8342 }
8343
8344 return count;
8345 }
8346
8347 /* Clear branch flags on marked resources. This breaks the link between the
8348 QP of the marking instruction and a subsequent branch on the same QP. */
8349
8350 static void
8351 clear_qp_branch_flag (mask)
8352 valueT mask;
8353 {
8354 int i;
8355 for (i = 0; i < regdepslen; i++)
8356 {
8357 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
8358 if ((bit & mask) != 0)
8359 {
8360 regdeps[i].link_to_qp_branch = 0;
8361 }
8362 }
8363 }
8364
8365 /* Remove any mutexes which contain any of the PRs indicated in the mask.
8366
8367 Any changes to a PR clears the mutex relations which include that PR. */
8368
8369 static void
8370 clear_qp_mutex (mask)
8371 valueT mask;
8372 {
8373 int i;
8374
8375 i = 0;
8376 while (i < qp_mutexeslen)
8377 {
8378 if ((qp_mutexes[i].prmask & mask) != 0)
8379 {
8380 if (md.debug_dv)
8381 {
8382 fprintf (stderr, " Clearing mutex relation");
8383 print_prmask (qp_mutexes[i].prmask);
8384 fprintf (stderr, "\n");
8385 }
8386 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
8387 }
8388 else
8389 ++i;
8390 }
8391 }
8392
8393 /* Clear implies relations which contain PRs in the given masks.
8394 P1_MASK indicates the source of the implies relation, while P2_MASK
8395 indicates the implied PR. */
8396
8397 static void
8398 clear_qp_implies (p1_mask, p2_mask)
8399 valueT p1_mask;
8400 valueT p2_mask;
8401 {
8402 int i;
8403
8404 i = 0;
8405 while (i < qp_implieslen)
8406 {
8407 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
8408 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
8409 {
8410 if (md.debug_dv)
8411 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
8412 qp_implies[i].p1, qp_implies[i].p2);
8413 qp_implies[i] = qp_implies[--qp_implieslen];
8414 }
8415 else
8416 ++i;
8417 }
8418 }
8419
8420 /* Add the PRs specified to the list of implied relations. */
8421
8422 static void
8423 add_qp_imply (p1, p2)
8424 int p1, p2;
8425 {
8426 valueT mask;
8427 valueT bit;
8428 int i;
8429
8430 /* p0 is not meaningful here. */
8431 if (p1 == 0 || p2 == 0)
8432 abort ();
8433
8434 if (p1 == p2)
8435 return;
8436
8437 /* If it exists already, ignore it. */
8438 for (i = 0; i < qp_implieslen; i++)
8439 {
8440 if (qp_implies[i].p1 == p1
8441 && qp_implies[i].p2 == p2
8442 && qp_implies[i].path == md.path
8443 && !qp_implies[i].p2_branched)
8444 return;
8445 }
8446
8447 if (qp_implieslen == qp_impliestotlen)
8448 {
8449 qp_impliestotlen += 20;
8450 qp_implies = (struct qp_imply *)
8451 xrealloc ((void *) qp_implies,
8452 qp_impliestotlen * sizeof (struct qp_imply));
8453 }
8454 if (md.debug_dv)
8455 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
8456 qp_implies[qp_implieslen].p1 = p1;
8457 qp_implies[qp_implieslen].p2 = p2;
8458 qp_implies[qp_implieslen].path = md.path;
8459 qp_implies[qp_implieslen++].p2_branched = 0;
8460
8461 /* Add in the implied transitive relations; for everything that p2 implies,
8462 make p1 imply that, too; for everything that implies p1, make it imply p2
8463 as well. */
8464 for (i = 0; i < qp_implieslen; i++)
8465 {
8466 if (qp_implies[i].p1 == p2)
8467 add_qp_imply (p1, qp_implies[i].p2);
8468 if (qp_implies[i].p2 == p1)
8469 add_qp_imply (qp_implies[i].p1, p2);
8470 }
8471 /* Add in mutex relations implied by this implies relation; for each mutex
8472 relation containing p2, duplicate it and replace p2 with p1. */
8473 bit = (valueT) 1 << p1;
8474 mask = (valueT) 1 << p2;
8475 for (i = 0; i < qp_mutexeslen; i++)
8476 {
8477 if (qp_mutexes[i].prmask & mask)
8478 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
8479 }
8480 }
8481
8482 /* Add the PRs specified in the mask to the mutex list; this means that only
8483 one of the PRs can be true at any time. PR0 should never be included in
8484 the mask. */
8485
8486 static void
8487 add_qp_mutex (mask)
8488 valueT mask;
8489 {
8490 if (mask & 0x1)
8491 abort ();
8492
8493 if (qp_mutexeslen == qp_mutexestotlen)
8494 {
8495 qp_mutexestotlen += 20;
8496 qp_mutexes = (struct qpmutex *)
8497 xrealloc ((void *) qp_mutexes,
8498 qp_mutexestotlen * sizeof (struct qpmutex));
8499 }
8500 if (md.debug_dv)
8501 {
8502 fprintf (stderr, " Registering mutex on");
8503 print_prmask (mask);
8504 fprintf (stderr, "\n");
8505 }
8506 qp_mutexes[qp_mutexeslen].path = md.path;
8507 qp_mutexes[qp_mutexeslen++].prmask = mask;
8508 }
8509
8510 static void
8511 clear_register_values ()
8512 {
8513 int i;
8514 if (md.debug_dv)
8515 fprintf (stderr, " Clearing register values\n");
8516 for (i = 1; i < NELEMS (gr_values); i++)
8517 gr_values[i].known = 0;
8518 }
8519
8520 /* Keep track of register values/changes which affect DV tracking.
8521
8522 optimization note: should add a flag to classes of insns where otherwise we
8523 have to examine a group of strings to identify them. */
8524
8525 static void
8526 note_register_values (idesc)
8527 struct ia64_opcode *idesc;
8528 {
8529 valueT qp_changemask = 0;
8530 int i;
8531
8532 /* Invalidate values for registers being written to. */
8533 for (i = 0; i < idesc->num_outputs; i++)
8534 {
8535 if (idesc->operands[i] == IA64_OPND_R1
8536 || idesc->operands[i] == IA64_OPND_R2
8537 || idesc->operands[i] == IA64_OPND_R3)
8538 {
8539 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8540 if (regno > 0 && regno < NELEMS (gr_values))
8541 gr_values[regno].known = 0;
8542 }
8543 else if (idesc->operands[i] == IA64_OPND_R3_2)
8544 {
8545 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8546 if (regno > 0 && regno < 4)
8547 gr_values[regno].known = 0;
8548 }
8549 else if (idesc->operands[i] == IA64_OPND_P1
8550 || idesc->operands[i] == IA64_OPND_P2)
8551 {
8552 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8553 qp_changemask |= (valueT) 1 << regno;
8554 }
8555 else if (idesc->operands[i] == IA64_OPND_PR)
8556 {
8557 if (idesc->operands[2] & (valueT) 0x10000)
8558 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
8559 else
8560 qp_changemask = idesc->operands[2];
8561 break;
8562 }
8563 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
8564 {
8565 if (idesc->operands[1] & ((valueT) 1 << 43))
8566 qp_changemask = ~(valueT) 0xFFFFFFFFFFF | idesc->operands[1];
8567 else
8568 qp_changemask = idesc->operands[1];
8569 qp_changemask &= ~(valueT) 0xFFFF;
8570 break;
8571 }
8572 }
8573
8574 /* Always clear qp branch flags on any PR change. */
8575 /* FIXME there may be exceptions for certain compares. */
8576 clear_qp_branch_flag (qp_changemask);
8577
8578 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
8579 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
8580 {
8581 qp_changemask |= ~(valueT) 0xFFFF;
8582 if (strcmp (idesc->name, "clrrrb.pr") != 0)
8583 {
8584 for (i = 32; i < 32 + md.rot.num_regs; i++)
8585 gr_values[i].known = 0;
8586 }
8587 clear_qp_mutex (qp_changemask);
8588 clear_qp_implies (qp_changemask, qp_changemask);
8589 }
8590 /* After a call, all register values are undefined, except those marked
8591 as "safe". */
8592 else if (strncmp (idesc->name, "br.call", 6) == 0
8593 || strncmp (idesc->name, "brl.call", 7) == 0)
8594 {
8595 /* FIXME keep GR values which are marked as "safe_across_calls" */
8596 clear_register_values ();
8597 clear_qp_mutex (~qp_safe_across_calls);
8598 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
8599 clear_qp_branch_flag (~qp_safe_across_calls);
8600 }
8601 else if (is_interruption_or_rfi (idesc)
8602 || is_taken_branch (idesc))
8603 {
8604 clear_register_values ();
8605 clear_qp_mutex (~(valueT) 0);
8606 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
8607 }
8608 /* Look for mutex and implies relations. */
8609 else if ((idesc->operands[0] == IA64_OPND_P1
8610 || idesc->operands[0] == IA64_OPND_P2)
8611 && (idesc->operands[1] == IA64_OPND_P1
8612 || idesc->operands[1] == IA64_OPND_P2))
8613 {
8614 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8615 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8616 valueT p1mask = (valueT) 1 << p1;
8617 valueT p2mask = (valueT) 1 << p2;
8618
8619 /* If one of the PRs is PR0, we can't really do anything. */
8620 if (p1 == 0 || p2 == 0)
8621 {
8622 if (md.debug_dv)
8623 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
8624 }
8625 /* In general, clear mutexes and implies which include P1 or P2,
8626 with the following exceptions. */
8627 else if (strstr (idesc->name, ".or.andcm") != NULL)
8628 {
8629 add_qp_mutex (p1mask | p2mask);
8630 clear_qp_implies (p2mask, p1mask);
8631 }
8632 else if (strstr (idesc->name, ".and.orcm") != NULL)
8633 {
8634 add_qp_mutex (p1mask | p2mask);
8635 clear_qp_implies (p1mask, p2mask);
8636 }
8637 else if (strstr (idesc->name, ".and") != NULL)
8638 {
8639 clear_qp_implies (0, p1mask | p2mask);
8640 }
8641 else if (strstr (idesc->name, ".or") != NULL)
8642 {
8643 clear_qp_mutex (p1mask | p2mask);
8644 clear_qp_implies (p1mask | p2mask, 0);
8645 }
8646 else
8647 {
8648 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
8649 if (strstr (idesc->name, ".unc") != NULL)
8650 {
8651 add_qp_mutex (p1mask | p2mask);
8652 if (CURR_SLOT.qp_regno != 0)
8653 {
8654 add_qp_imply (CURR_SLOT.opnd[0].X_add_number - REG_P,
8655 CURR_SLOT.qp_regno);
8656 add_qp_imply (CURR_SLOT.opnd[1].X_add_number - REG_P,
8657 CURR_SLOT.qp_regno);
8658 }
8659 }
8660 else if (CURR_SLOT.qp_regno == 0)
8661 {
8662 add_qp_mutex (p1mask | p2mask);
8663 }
8664 else
8665 {
8666 clear_qp_mutex (p1mask | p2mask);
8667 }
8668 }
8669 }
8670 /* Look for mov imm insns into GRs. */
8671 else if (idesc->operands[0] == IA64_OPND_R1
8672 && (idesc->operands[1] == IA64_OPND_IMM22
8673 || idesc->operands[1] == IA64_OPND_IMMU64)
8674 && (strcmp (idesc->name, "mov") == 0
8675 || strcmp (idesc->name, "movl") == 0))
8676 {
8677 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8678 if (regno > 0 && regno < NELEMS (gr_values))
8679 {
8680 gr_values[regno].known = 1;
8681 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
8682 gr_values[regno].path = md.path;
8683 if (md.debug_dv)
8684 {
8685 fprintf (stderr, " Know gr%d = ", regno);
8686 fprintf_vma (stderr, gr_values[regno].value);
8687 fputs ("\n", stderr);
8688 }
8689 }
8690 }
8691 else
8692 {
8693 clear_qp_mutex (qp_changemask);
8694 clear_qp_implies (qp_changemask, qp_changemask);
8695 }
8696 }
8697
8698 /* Return whether the given predicate registers are currently mutex. */
8699
8700 static int
8701 qp_mutex (p1, p2, path)
8702 int p1;
8703 int p2;
8704 int path;
8705 {
8706 int i;
8707 valueT mask;
8708
8709 if (p1 != p2)
8710 {
8711 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
8712 for (i = 0; i < qp_mutexeslen; i++)
8713 {
8714 if (qp_mutexes[i].path >= path
8715 && (qp_mutexes[i].prmask & mask) == mask)
8716 return 1;
8717 }
8718 }
8719 return 0;
8720 }
8721
8722 /* Return whether the given resource is in the given insn's list of chks
8723 Return 1 if the conflict is absolutely determined, 2 if it's a potential
8724 conflict. */
8725
8726 static int
8727 resources_match (rs, idesc, note, qp_regno, path)
8728 struct rsrc *rs;
8729 struct ia64_opcode *idesc;
8730 int note;
8731 int qp_regno;
8732 int path;
8733 {
8734 struct rsrc specs[MAX_SPECS];
8735 int count;
8736
8737 /* If the marked resource's qp_regno and the given qp_regno are mutex,
8738 we don't need to check. One exception is note 11, which indicates that
8739 target predicates are written regardless of PR[qp]. */
8740 if (qp_mutex (rs->qp_regno, qp_regno, path)
8741 && note != 11)
8742 return 0;
8743
8744 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
8745 while (count-- > 0)
8746 {
8747 /* UNAT checking is a bit more specific than other resources */
8748 if (rs->dependency->specifier == IA64_RS_AR_UNAT
8749 && specs[count].mem_offset.hint
8750 && rs->mem_offset.hint)
8751 {
8752 if (rs->mem_offset.base == specs[count].mem_offset.base)
8753 {
8754 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
8755 ((specs[count].mem_offset.offset >> 3) & 0x3F))
8756 return 1;
8757 else
8758 continue;
8759 }
8760 }
8761
8762 /* Skip apparent PR write conflicts where both writes are an AND or both
8763 writes are an OR. */
8764 if (rs->dependency->specifier == IA64_RS_PR
8765 || rs->dependency->specifier == IA64_RS_PRr
8766 || rs->dependency->specifier == IA64_RS_PR63)
8767 {
8768 if (specs[count].cmp_type != CMP_NONE
8769 && specs[count].cmp_type == rs->cmp_type)
8770 {
8771 if (md.debug_dv)
8772 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
8773 dv_mode[rs->dependency->mode],
8774 rs->dependency->specifier != IA64_RS_PR63 ?
8775 specs[count].index : 63);
8776 continue;
8777 }
8778 if (md.debug_dv)
8779 fprintf (stderr,
8780 " %s on parallel compare conflict %s vs %s on PR%d\n",
8781 dv_mode[rs->dependency->mode],
8782 dv_cmp_type[rs->cmp_type],
8783 dv_cmp_type[specs[count].cmp_type],
8784 rs->dependency->specifier != IA64_RS_PR63 ?
8785 specs[count].index : 63);
8786
8787 }
8788
8789 /* If either resource is not specific, conservatively assume a conflict
8790 */
8791 if (!specs[count].specific || !rs->specific)
8792 return 2;
8793 else if (specs[count].index == rs->index)
8794 return 1;
8795 }
8796 #if 0
8797 if (md.debug_dv)
8798 fprintf (stderr, " No %s conflicts\n", rs->dependency->name);
8799 #endif
8800
8801 return 0;
8802 }
8803
8804 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
8805 insert a stop to create the break. Update all resource dependencies
8806 appropriately. If QP_REGNO is non-zero, only apply the break to resources
8807 which use the same QP_REGNO and have the link_to_qp_branch flag set.
8808 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
8809 instruction. */
8810
8811 static void
8812 insn_group_break (insert_stop, qp_regno, save_current)
8813 int insert_stop;
8814 int qp_regno;
8815 int save_current;
8816 {
8817 int i;
8818
8819 if (insert_stop && md.num_slots_in_use > 0)
8820 PREV_SLOT.end_of_insn_group = 1;
8821
8822 if (md.debug_dv)
8823 {
8824 fprintf (stderr, " Insn group break%s",
8825 (insert_stop ? " (w/stop)" : ""));
8826 if (qp_regno != 0)
8827 fprintf (stderr, " effective for QP=%d", qp_regno);
8828 fprintf (stderr, "\n");
8829 }
8830
8831 i = 0;
8832 while (i < regdepslen)
8833 {
8834 const struct ia64_dependency *dep = regdeps[i].dependency;
8835
8836 if (qp_regno != 0
8837 && regdeps[i].qp_regno != qp_regno)
8838 {
8839 ++i;
8840 continue;
8841 }
8842
8843 if (save_current
8844 && CURR_SLOT.src_file == regdeps[i].file
8845 && CURR_SLOT.src_line == regdeps[i].line)
8846 {
8847 ++i;
8848 continue;
8849 }
8850
8851 /* clear dependencies which are automatically cleared by a stop, or
8852 those that have reached the appropriate state of insn serialization */
8853 if (dep->semantics == IA64_DVS_IMPLIED
8854 || dep->semantics == IA64_DVS_IMPLIEDF
8855 || regdeps[i].insn_srlz == STATE_SRLZ)
8856 {
8857 print_dependency ("Removing", i);
8858 regdeps[i] = regdeps[--regdepslen];
8859 }
8860 else
8861 {
8862 if (dep->semantics == IA64_DVS_DATA
8863 || dep->semantics == IA64_DVS_INSTR
8864 || dep->semantics == IA64_DVS_SPECIFIC)
8865 {
8866 if (regdeps[i].insn_srlz == STATE_NONE)
8867 regdeps[i].insn_srlz = STATE_STOP;
8868 if (regdeps[i].data_srlz == STATE_NONE)
8869 regdeps[i].data_srlz = STATE_STOP;
8870 }
8871 ++i;
8872 }
8873 }
8874 }
8875
8876 /* Add the given resource usage spec to the list of active dependencies. */
8877
8878 static void
8879 mark_resource (idesc, dep, spec, depind, path)
8880 struct ia64_opcode *idesc ATTRIBUTE_UNUSED;
8881 const struct ia64_dependency *dep ATTRIBUTE_UNUSED;
8882 struct rsrc *spec;
8883 int depind;
8884 int path;
8885 {
8886 if (regdepslen == regdepstotlen)
8887 {
8888 regdepstotlen += 20;
8889 regdeps = (struct rsrc *)
8890 xrealloc ((void *) regdeps,
8891 regdepstotlen * sizeof (struct rsrc));
8892 }
8893
8894 regdeps[regdepslen] = *spec;
8895 regdeps[regdepslen].depind = depind;
8896 regdeps[regdepslen].path = path;
8897 regdeps[regdepslen].file = CURR_SLOT.src_file;
8898 regdeps[regdepslen].line = CURR_SLOT.src_line;
8899
8900 print_dependency ("Adding", regdepslen);
8901
8902 ++regdepslen;
8903 }
8904
8905 static void
8906 print_dependency (action, depind)
8907 const char *action;
8908 int depind;
8909 {
8910 if (md.debug_dv)
8911 {
8912 fprintf (stderr, " %s %s '%s'",
8913 action, dv_mode[(regdeps[depind].dependency)->mode],
8914 (regdeps[depind].dependency)->name);
8915 if (regdeps[depind].specific && regdeps[depind].index != 0)
8916 fprintf (stderr, " (%d)", regdeps[depind].index);
8917 if (regdeps[depind].mem_offset.hint)
8918 {
8919 fputs (" ", stderr);
8920 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
8921 fputs ("+", stderr);
8922 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
8923 }
8924 fprintf (stderr, "\n");
8925 }
8926 }
8927
8928 static void
8929 instruction_serialization ()
8930 {
8931 int i;
8932 if (md.debug_dv)
8933 fprintf (stderr, " Instruction serialization\n");
8934 for (i = 0; i < regdepslen; i++)
8935 if (regdeps[i].insn_srlz == STATE_STOP)
8936 regdeps[i].insn_srlz = STATE_SRLZ;
8937 }
8938
8939 static void
8940 data_serialization ()
8941 {
8942 int i = 0;
8943 if (md.debug_dv)
8944 fprintf (stderr, " Data serialization\n");
8945 while (i < regdepslen)
8946 {
8947 if (regdeps[i].data_srlz == STATE_STOP
8948 /* Note: as of 991210, all "other" dependencies are cleared by a
8949 data serialization. This might change with new tables */
8950 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
8951 {
8952 print_dependency ("Removing", i);
8953 regdeps[i] = regdeps[--regdepslen];
8954 }
8955 else
8956 ++i;
8957 }
8958 }
8959
8960 /* Insert stops and serializations as needed to avoid DVs. */
8961
8962 static void
8963 remove_marked_resource (rs)
8964 struct rsrc *rs;
8965 {
8966 switch (rs->dependency->semantics)
8967 {
8968 case IA64_DVS_SPECIFIC:
8969 if (md.debug_dv)
8970 fprintf (stderr, "Implementation-specific, assume worst case...\n");
8971 /* ...fall through... */
8972 case IA64_DVS_INSTR:
8973 if (md.debug_dv)
8974 fprintf (stderr, "Inserting instr serialization\n");
8975 if (rs->insn_srlz < STATE_STOP)
8976 insn_group_break (1, 0, 0);
8977 if (rs->insn_srlz < STATE_SRLZ)
8978 {
8979 int oldqp = CURR_SLOT.qp_regno;
8980 struct ia64_opcode *oldidesc = CURR_SLOT.idesc;
8981 /* Manually jam a srlz.i insn into the stream */
8982 CURR_SLOT.qp_regno = 0;
8983 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
8984 instruction_serialization ();
8985 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
8986 if (++md.num_slots_in_use >= NUM_SLOTS)
8987 emit_one_bundle ();
8988 CURR_SLOT.qp_regno = oldqp;
8989 CURR_SLOT.idesc = oldidesc;
8990 }
8991 insn_group_break (1, 0, 0);
8992 break;
8993 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
8994 "other" types of DV are eliminated
8995 by a data serialization */
8996 case IA64_DVS_DATA:
8997 if (md.debug_dv)
8998 fprintf (stderr, "Inserting data serialization\n");
8999 if (rs->data_srlz < STATE_STOP)
9000 insn_group_break (1, 0, 0);
9001 {
9002 int oldqp = CURR_SLOT.qp_regno;
9003 struct ia64_opcode *oldidesc = CURR_SLOT.idesc;
9004 /* Manually jam a srlz.d insn into the stream */
9005 CURR_SLOT.qp_regno = 0;
9006 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
9007 data_serialization ();
9008 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9009 if (++md.num_slots_in_use >= NUM_SLOTS)
9010 emit_one_bundle ();
9011 CURR_SLOT.qp_regno = oldqp;
9012 CURR_SLOT.idesc = oldidesc;
9013 }
9014 break;
9015 case IA64_DVS_IMPLIED:
9016 case IA64_DVS_IMPLIEDF:
9017 if (md.debug_dv)
9018 fprintf (stderr, "Inserting stop\n");
9019 insn_group_break (1, 0, 0);
9020 break;
9021 default:
9022 break;
9023 }
9024 }
9025
9026 /* Check the resources used by the given opcode against the current dependency
9027 list.
9028
9029 The check is run once for each execution path encountered. In this case,
9030 a unique execution path is the sequence of instructions following a code
9031 entry point, e.g. the following has three execution paths, one starting
9032 at L0, one at L1, and one at L2.
9033
9034 L0: nop
9035 L1: add
9036 L2: add
9037 br.ret
9038 */
9039
9040 static void
9041 check_dependencies (idesc)
9042 struct ia64_opcode *idesc;
9043 {
9044 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9045 int path;
9046 int i;
9047
9048 /* Note that the number of marked resources may change within the
9049 loop if in auto mode. */
9050 i = 0;
9051 while (i < regdepslen)
9052 {
9053 struct rsrc *rs = &regdeps[i];
9054 const struct ia64_dependency *dep = rs->dependency;
9055 int chkind;
9056 int note;
9057 int start_over = 0;
9058
9059 if (dep->semantics == IA64_DVS_NONE
9060 || (chkind = depends_on (rs->depind, idesc)) == -1)
9061 {
9062 ++i;
9063 continue;
9064 }
9065
9066 note = NOTE (opdeps->chks[chkind]);
9067
9068 /* Check this resource against each execution path seen thus far. */
9069 for (path = 0; path <= md.path; path++)
9070 {
9071 int matchtype;
9072
9073 /* If the dependency wasn't on the path being checked, ignore it. */
9074 if (rs->path < path)
9075 continue;
9076
9077 /* If the QP for this insn implies a QP which has branched, don't
9078 bother checking. Ed. NOTE: I don't think this check is terribly
9079 useful; what's the point of generating code which will only be
9080 reached if its QP is zero?
9081 This code was specifically inserted to handle the following code,
9082 based on notes from Intel's DV checking code, where p1 implies p2.
9083
9084 mov r4 = 2
9085 (p2) br.cond L
9086 (p1) mov r4 = 7
9087 */
9088 if (CURR_SLOT.qp_regno != 0)
9089 {
9090 int skip = 0;
9091 int implies;
9092 for (implies = 0; implies < qp_implieslen; implies++)
9093 {
9094 if (qp_implies[implies].path >= path
9095 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
9096 && qp_implies[implies].p2_branched)
9097 {
9098 skip = 1;
9099 break;
9100 }
9101 }
9102 if (skip)
9103 continue;
9104 }
9105
9106 if ((matchtype = resources_match (rs, idesc, note,
9107 CURR_SLOT.qp_regno, path)) != 0)
9108 {
9109 char msg[1024];
9110 char pathmsg[256] = "";
9111 char indexmsg[256] = "";
9112 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
9113
9114 if (path != 0)
9115 sprintf (pathmsg, " when entry is at label '%s'",
9116 md.entry_labels[path - 1]);
9117 if (rs->specific && rs->index != 0)
9118 sprintf (indexmsg, ", specific resource number is %d",
9119 rs->index);
9120 sprintf (msg, "Use of '%s' %s %s dependency '%s' (%s)%s%s",
9121 idesc->name,
9122 (certain ? "violates" : "may violate"),
9123 dv_mode[dep->mode], dep->name,
9124 dv_sem[dep->semantics],
9125 pathmsg, indexmsg);
9126
9127 if (md.explicit_mode)
9128 {
9129 as_warn ("%s", msg);
9130 if (path < md.path)
9131 as_warn (_("Only the first path encountering the conflict "
9132 "is reported"));
9133 as_warn_where (rs->file, rs->line,
9134 _("This is the location of the "
9135 "conflicting usage"));
9136 /* Don't bother checking other paths, to avoid duplicating
9137 the same warning */
9138 break;
9139 }
9140 else
9141 {
9142 if (md.debug_dv)
9143 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
9144
9145 remove_marked_resource (rs);
9146
9147 /* since the set of dependencies has changed, start over */
9148 /* FIXME -- since we're removing dvs as we go, we
9149 probably don't really need to start over... */
9150 start_over = 1;
9151 break;
9152 }
9153 }
9154 }
9155 if (start_over)
9156 i = 0;
9157 else
9158 ++i;
9159 }
9160 }
9161
9162 /* Register new dependencies based on the given opcode. */
9163
9164 static void
9165 mark_resources (idesc)
9166 struct ia64_opcode *idesc;
9167 {
9168 int i;
9169 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9170 int add_only_qp_reads = 0;
9171
9172 /* A conditional branch only uses its resources if it is taken; if it is
9173 taken, we stop following that path. The other branch types effectively
9174 *always* write their resources. If it's not taken, register only QP
9175 reads. */
9176 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
9177 {
9178 add_only_qp_reads = 1;
9179 }
9180
9181 if (md.debug_dv)
9182 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
9183
9184 for (i = 0; i < opdeps->nregs; i++)
9185 {
9186 const struct ia64_dependency *dep;
9187 struct rsrc specs[MAX_SPECS];
9188 int note;
9189 int path;
9190 int count;
9191
9192 dep = ia64_find_dependency (opdeps->regs[i]);
9193 note = NOTE (opdeps->regs[i]);
9194
9195 if (add_only_qp_reads
9196 && !(dep->mode == IA64_DV_WAR
9197 && (dep->specifier == IA64_RS_PR
9198 || dep->specifier == IA64_RS_PRr
9199 || dep->specifier == IA64_RS_PR63)))
9200 continue;
9201
9202 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
9203
9204 #if 0
9205 if (md.debug_dv && !count)
9206 fprintf (stderr, " No %s %s usage found (path %d)\n",
9207 dv_mode[dep->mode], dep->name, md.path);
9208 #endif
9209
9210 while (count-- > 0)
9211 {
9212 mark_resource (idesc, dep, &specs[count],
9213 DEP (opdeps->regs[i]), md.path);
9214 }
9215
9216 /* The execution path may affect register values, which may in turn
9217 affect which indirect-access resources are accessed. */
9218 switch (dep->specifier)
9219 {
9220 default:
9221 break;
9222 case IA64_RS_CPUID:
9223 case IA64_RS_DBR:
9224 case IA64_RS_IBR:
9225 case IA64_RS_MSR:
9226 case IA64_RS_PKR:
9227 case IA64_RS_PMC:
9228 case IA64_RS_PMD:
9229 case IA64_RS_RR:
9230 for (path = 0; path < md.path; path++)
9231 {
9232 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
9233 while (count-- > 0)
9234 mark_resource (idesc, dep, &specs[count],
9235 DEP (opdeps->regs[i]), path);
9236 }
9237 break;
9238 }
9239 }
9240 }
9241
9242 /* Remove dependencies when they no longer apply. */
9243
9244 static void
9245 update_dependencies (idesc)
9246 struct ia64_opcode *idesc;
9247 {
9248 int i;
9249
9250 if (strcmp (idesc->name, "srlz.i") == 0)
9251 {
9252 instruction_serialization ();
9253 }
9254 else if (strcmp (idesc->name, "srlz.d") == 0)
9255 {
9256 data_serialization ();
9257 }
9258 else if (is_interruption_or_rfi (idesc)
9259 || is_taken_branch (idesc))
9260 {
9261 /* Although technically the taken branch doesn't clear dependencies
9262 which require a srlz.[id], we don't follow the branch; the next
9263 instruction is assumed to start with a clean slate. */
9264 regdepslen = 0;
9265 md.path = 0;
9266 }
9267 else if (is_conditional_branch (idesc)
9268 && CURR_SLOT.qp_regno != 0)
9269 {
9270 int is_call = strstr (idesc->name, ".call") != NULL;
9271
9272 for (i = 0; i < qp_implieslen; i++)
9273 {
9274 /* If the conditional branch's predicate is implied by the predicate
9275 in an existing dependency, remove that dependency. */
9276 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
9277 {
9278 int depind = 0;
9279 /* Note that this implied predicate takes a branch so that if
9280 a later insn generates a DV but its predicate implies this
9281 one, we can avoid the false DV warning. */
9282 qp_implies[i].p2_branched = 1;
9283 while (depind < regdepslen)
9284 {
9285 if (regdeps[depind].qp_regno == qp_implies[i].p1)
9286 {
9287 print_dependency ("Removing", depind);
9288 regdeps[depind] = regdeps[--regdepslen];
9289 }
9290 else
9291 ++depind;
9292 }
9293 }
9294 }
9295 /* Any marked resources which have this same predicate should be
9296 cleared, provided that the QP hasn't been modified between the
9297 marking instruction and the branch. */
9298 if (is_call)
9299 {
9300 insn_group_break (0, CURR_SLOT.qp_regno, 1);
9301 }
9302 else
9303 {
9304 i = 0;
9305 while (i < regdepslen)
9306 {
9307 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
9308 && regdeps[i].link_to_qp_branch
9309 && (regdeps[i].file != CURR_SLOT.src_file
9310 || regdeps[i].line != CURR_SLOT.src_line))
9311 {
9312 /* Treat like a taken branch */
9313 print_dependency ("Removing", i);
9314 regdeps[i] = regdeps[--regdepslen];
9315 }
9316 else
9317 ++i;
9318 }
9319 }
9320 }
9321 }
9322
9323 /* Examine the current instruction for dependency violations. */
9324
9325 static int
9326 check_dv (idesc)
9327 struct ia64_opcode *idesc;
9328 {
9329 if (md.debug_dv)
9330 {
9331 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
9332 idesc->name, CURR_SLOT.src_line,
9333 idesc->dependencies->nchks,
9334 idesc->dependencies->nregs);
9335 }
9336
9337 /* Look through the list of currently marked resources; if the current
9338 instruction has the dependency in its chks list which uses that resource,
9339 check against the specific resources used. */
9340 check_dependencies (idesc);
9341
9342 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
9343 then add them to the list of marked resources. */
9344 mark_resources (idesc);
9345
9346 /* There are several types of dependency semantics, and each has its own
9347 requirements for being cleared
9348
9349 Instruction serialization (insns separated by interruption, rfi, or
9350 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
9351
9352 Data serialization (instruction serialization, or writer + srlz.d +
9353 reader, where writer and srlz.d are in separate groups) clears
9354 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
9355 always be the case).
9356
9357 Instruction group break (groups separated by stop, taken branch,
9358 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
9359 */
9360 update_dependencies (idesc);
9361
9362 /* Sometimes, knowing a register value allows us to avoid giving a false DV
9363 warning. Keep track of as many as possible that are useful. */
9364 note_register_values (idesc);
9365
9366 /* We don't need or want this anymore. */
9367 md.mem_offset.hint = 0;
9368
9369 return 0;
9370 }
9371
9372 /* Translate one line of assembly. Pseudo ops and labels do not show
9373 here. */
9374 void
9375 md_assemble (str)
9376 char *str;
9377 {
9378 char *saved_input_line_pointer, *mnemonic;
9379 const struct pseudo_opcode *pdesc;
9380 struct ia64_opcode *idesc;
9381 unsigned char qp_regno;
9382 unsigned int flags;
9383 int ch;
9384
9385 saved_input_line_pointer = input_line_pointer;
9386 input_line_pointer = str;
9387
9388 /* extract the opcode (mnemonic): */
9389
9390 mnemonic = input_line_pointer;
9391 ch = get_symbol_end ();
9392 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
9393 if (pdesc)
9394 {
9395 *input_line_pointer = ch;
9396 (*pdesc->handler) (pdesc->arg);
9397 goto done;
9398 }
9399
9400 /* Find the instruction descriptor matching the arguments. */
9401
9402 idesc = ia64_find_opcode (mnemonic);
9403 *input_line_pointer = ch;
9404 if (!idesc)
9405 {
9406 as_bad ("Unknown opcode `%s'", mnemonic);
9407 goto done;
9408 }
9409
9410 idesc = parse_operands (idesc);
9411 if (!idesc)
9412 goto done;
9413
9414 /* Handle the dynamic ops we can handle now: */
9415 if (idesc->type == IA64_TYPE_DYN)
9416 {
9417 if (strcmp (idesc->name, "add") == 0)
9418 {
9419 if (CURR_SLOT.opnd[2].X_op == O_register
9420 && CURR_SLOT.opnd[2].X_add_number < 4)
9421 mnemonic = "addl";
9422 else
9423 mnemonic = "adds";
9424 ia64_free_opcode (idesc);
9425 idesc = ia64_find_opcode (mnemonic);
9426 #if 0
9427 know (!idesc->next);
9428 #endif
9429 }
9430 else if (strcmp (idesc->name, "mov") == 0)
9431 {
9432 enum ia64_opnd opnd1, opnd2;
9433 int rop;
9434
9435 opnd1 = idesc->operands[0];
9436 opnd2 = idesc->operands[1];
9437 if (opnd1 == IA64_OPND_AR3)
9438 rop = 0;
9439 else if (opnd2 == IA64_OPND_AR3)
9440 rop = 1;
9441 else
9442 abort ();
9443 if (CURR_SLOT.opnd[rop].X_op == O_register
9444 && ar_is_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
9445 mnemonic = "mov.i";
9446 else
9447 mnemonic = "mov.m";
9448 ia64_free_opcode (idesc);
9449 idesc = ia64_find_opcode (mnemonic);
9450 while (idesc != NULL
9451 && (idesc->operands[0] != opnd1
9452 || idesc->operands[1] != opnd2))
9453 idesc = get_next_opcode (idesc);
9454 }
9455 }
9456
9457 qp_regno = 0;
9458 if (md.qp.X_op == O_register)
9459 {
9460 qp_regno = md.qp.X_add_number - REG_P;
9461 md.qp.X_op = O_absent;
9462 }
9463
9464 flags = idesc->flags;
9465
9466 if ((flags & IA64_OPCODE_FIRST) != 0)
9467 insn_group_break (1, 0, 0);
9468
9469 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
9470 {
9471 as_bad ("`%s' cannot be predicated", idesc->name);
9472 goto done;
9473 }
9474
9475 /* Build the instruction. */
9476 CURR_SLOT.qp_regno = qp_regno;
9477 CURR_SLOT.idesc = idesc;
9478 as_where (&CURR_SLOT.src_file, &CURR_SLOT.src_line);
9479 dwarf2_where (&CURR_SLOT.debug_line);
9480
9481 /* Add unwind entry, if there is one. */
9482 if (unwind.current_entry)
9483 {
9484 CURR_SLOT.unwind_record = unwind.current_entry;
9485 unwind.current_entry = NULL;
9486 }
9487
9488 /* Check for dependency violations. */
9489 if (md.detect_dv)
9490 check_dv (idesc);
9491
9492 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9493 if (++md.num_slots_in_use >= NUM_SLOTS)
9494 emit_one_bundle ();
9495
9496 if ((flags & IA64_OPCODE_LAST) != 0)
9497 insn_group_break (1, 0, 0);
9498
9499 md.last_text_seg = now_seg;
9500
9501 done:
9502 input_line_pointer = saved_input_line_pointer;
9503 }
9504
9505 /* Called when symbol NAME cannot be found in the symbol table.
9506 Should be used for dynamic valued symbols only. */
9507
9508 symbolS *
9509 md_undefined_symbol (name)
9510 char *name ATTRIBUTE_UNUSED;
9511 {
9512 return 0;
9513 }
9514
9515 /* Called for any expression that can not be recognized. When the
9516 function is called, `input_line_pointer' will point to the start of
9517 the expression. */
9518
9519 void
9520 md_operand (e)
9521 expressionS *e;
9522 {
9523 enum pseudo_type pseudo_type;
9524 const char *name;
9525 size_t len;
9526 int ch, i;
9527
9528 switch (*input_line_pointer)
9529 {
9530 case '@':
9531 /* Find what relocation pseudo-function we're dealing with. */
9532 pseudo_type = 0;
9533 ch = *++input_line_pointer;
9534 for (i = 0; i < NELEMS (pseudo_func); ++i)
9535 if (pseudo_func[i].name && pseudo_func[i].name[0] == ch)
9536 {
9537 len = strlen (pseudo_func[i].name);
9538 if (strncmp (pseudo_func[i].name + 1,
9539 input_line_pointer + 1, len - 1) == 0
9540 && !is_part_of_name (input_line_pointer[len]))
9541 {
9542 input_line_pointer += len;
9543 pseudo_type = pseudo_func[i].type;
9544 break;
9545 }
9546 }
9547 switch (pseudo_type)
9548 {
9549 case PSEUDO_FUNC_RELOC:
9550 SKIP_WHITESPACE ();
9551 if (*input_line_pointer != '(')
9552 {
9553 as_bad ("Expected '('");
9554 goto err;
9555 }
9556 /* Skip '('. */
9557 ++input_line_pointer;
9558 expression (e);
9559 if (*input_line_pointer++ != ')')
9560 {
9561 as_bad ("Missing ')'");
9562 goto err;
9563 }
9564 if (e->X_op != O_symbol)
9565 {
9566 if (e->X_op != O_pseudo_fixup)
9567 {
9568 as_bad ("Not a symbolic expression");
9569 goto err;
9570 }
9571 if (S_GET_VALUE (e->X_op_symbol) == FUNC_FPTR_RELATIVE
9572 && i == FUNC_LT_RELATIVE)
9573 i = FUNC_LT_FPTR_RELATIVE;
9574 else
9575 {
9576 as_bad ("Illegal combination of relocation functions");
9577 goto err;
9578 }
9579 }
9580 /* Make sure gas doesn't get rid of local symbols that are used
9581 in relocs. */
9582 e->X_op = O_pseudo_fixup;
9583 e->X_op_symbol = pseudo_func[i].u.sym;
9584 break;
9585
9586 case PSEUDO_FUNC_CONST:
9587 e->X_op = O_constant;
9588 e->X_add_number = pseudo_func[i].u.ival;
9589 break;
9590
9591 case PSEUDO_FUNC_REG:
9592 e->X_op = O_register;
9593 e->X_add_number = pseudo_func[i].u.ival;
9594 break;
9595
9596 default:
9597 name = input_line_pointer - 1;
9598 get_symbol_end ();
9599 as_bad ("Unknown pseudo function `%s'", name);
9600 goto err;
9601 }
9602 break;
9603
9604 case '[':
9605 ++input_line_pointer;
9606 expression (e);
9607 if (*input_line_pointer != ']')
9608 {
9609 as_bad ("Closing bracket misssing");
9610 goto err;
9611 }
9612 else
9613 {
9614 if (e->X_op != O_register)
9615 as_bad ("Register expected as index");
9616
9617 ++input_line_pointer;
9618 e->X_op = O_index;
9619 }
9620 break;
9621
9622 default:
9623 break;
9624 }
9625 return;
9626
9627 err:
9628 ignore_rest_of_line ();
9629 }
9630
9631 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
9632 a section symbol plus some offset. For relocs involving @fptr(),
9633 directives we don't want such adjustments since we need to have the
9634 original symbol's name in the reloc. */
9635 int
9636 ia64_fix_adjustable (fix)
9637 fixS *fix;
9638 {
9639 /* Prevent all adjustments to global symbols */
9640 if (S_IS_EXTERN (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
9641 return 0;
9642
9643 switch (fix->fx_r_type)
9644 {
9645 case BFD_RELOC_IA64_FPTR64I:
9646 case BFD_RELOC_IA64_FPTR32MSB:
9647 case BFD_RELOC_IA64_FPTR32LSB:
9648 case BFD_RELOC_IA64_FPTR64MSB:
9649 case BFD_RELOC_IA64_FPTR64LSB:
9650 case BFD_RELOC_IA64_LTOFF_FPTR22:
9651 case BFD_RELOC_IA64_LTOFF_FPTR64I:
9652 return 0;
9653 default:
9654 break;
9655 }
9656
9657 return 1;
9658 }
9659
9660 int
9661 ia64_force_relocation (fix)
9662 fixS *fix;
9663 {
9664 switch (fix->fx_r_type)
9665 {
9666 case BFD_RELOC_IA64_FPTR64I:
9667 case BFD_RELOC_IA64_FPTR32MSB:
9668 case BFD_RELOC_IA64_FPTR32LSB:
9669 case BFD_RELOC_IA64_FPTR64MSB:
9670 case BFD_RELOC_IA64_FPTR64LSB:
9671
9672 case BFD_RELOC_IA64_LTOFF22:
9673 case BFD_RELOC_IA64_LTOFF64I:
9674 case BFD_RELOC_IA64_LTOFF_FPTR22:
9675 case BFD_RELOC_IA64_LTOFF_FPTR64I:
9676 case BFD_RELOC_IA64_PLTOFF22:
9677 case BFD_RELOC_IA64_PLTOFF64I:
9678 case BFD_RELOC_IA64_PLTOFF64MSB:
9679 case BFD_RELOC_IA64_PLTOFF64LSB:
9680 return 1;
9681
9682 default:
9683 return 0;
9684 }
9685 return 0;
9686 }
9687
9688 /* Decide from what point a pc-relative relocation is relative to,
9689 relative to the pc-relative fixup. Er, relatively speaking. */
9690 long
9691 ia64_pcrel_from_section (fix, sec)
9692 fixS *fix;
9693 segT sec;
9694 {
9695 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
9696
9697 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
9698 off &= ~0xfUL;
9699
9700 return off;
9701 }
9702
9703 /* This is called whenever some data item (not an instruction) needs a
9704 fixup. We pick the right reloc code depending on the byteorder
9705 currently in effect. */
9706 void
9707 ia64_cons_fix_new (f, where, nbytes, exp)
9708 fragS *f;
9709 int where;
9710 int nbytes;
9711 expressionS *exp;
9712 {
9713 bfd_reloc_code_real_type code;
9714 fixS *fix;
9715
9716 switch (nbytes)
9717 {
9718 /* There are no reloc for 8 and 16 bit quantities, but we allow
9719 them here since they will work fine as long as the expression
9720 is fully defined at the end of the pass over the source file. */
9721 case 1: code = BFD_RELOC_8; break;
9722 case 2: code = BFD_RELOC_16; break;
9723 case 4:
9724 if (target_big_endian)
9725 code = BFD_RELOC_IA64_DIR32MSB;
9726 else
9727 code = BFD_RELOC_IA64_DIR32LSB;
9728 break;
9729
9730 case 8:
9731 if (target_big_endian)
9732 code = BFD_RELOC_IA64_DIR64MSB;
9733 else
9734 code = BFD_RELOC_IA64_DIR64LSB;
9735 break;
9736
9737 default:
9738 as_bad ("Unsupported fixup size %d", nbytes);
9739 ignore_rest_of_line ();
9740 return;
9741 }
9742 if (exp->X_op == O_pseudo_fixup)
9743 {
9744 /* ??? */
9745 exp->X_op = O_symbol;
9746 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
9747 }
9748 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
9749 /* We need to store the byte order in effect in case we're going
9750 to fix an 8 or 16 bit relocation (for which there no real
9751 relocs available). See md_apply_fix(). */
9752 fix->tc_fix_data.bigendian = target_big_endian;
9753 }
9754
9755 /* Return the actual relocation we wish to associate with the pseudo
9756 reloc described by SYM and R_TYPE. SYM should be one of the
9757 symbols in the pseudo_func array, or NULL. */
9758
9759 static bfd_reloc_code_real_type
9760 ia64_gen_real_reloc_type (sym, r_type)
9761 struct symbol *sym;
9762 bfd_reloc_code_real_type r_type;
9763 {
9764 bfd_reloc_code_real_type new = 0;
9765
9766 if (sym == NULL)
9767 {
9768 return r_type;
9769 }
9770
9771 switch (S_GET_VALUE (sym))
9772 {
9773 case FUNC_FPTR_RELATIVE:
9774 switch (r_type)
9775 {
9776 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_FPTR64I; break;
9777 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_FPTR32MSB; break;
9778 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_FPTR32LSB; break;
9779 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_FPTR64MSB; break;
9780 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_FPTR64LSB; break;
9781 default: break;
9782 }
9783 break;
9784
9785 case FUNC_GP_RELATIVE:
9786 switch (r_type)
9787 {
9788 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_GPREL22; break;
9789 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_GPREL64I; break;
9790 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_GPREL32MSB; break;
9791 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_GPREL32LSB; break;
9792 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_GPREL64MSB; break;
9793 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_GPREL64LSB; break;
9794 default: break;
9795 }
9796 break;
9797
9798 case FUNC_LT_RELATIVE:
9799 switch (r_type)
9800 {
9801 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22; break;
9802 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_LTOFF64I; break;
9803 default: break;
9804 }
9805 break;
9806
9807 case FUNC_PC_RELATIVE:
9808 switch (r_type)
9809 {
9810 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PCREL22; break;
9811 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PCREL64I; break;
9812 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_PCREL32MSB; break;
9813 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_PCREL32LSB; break;
9814 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PCREL64MSB; break;
9815 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PCREL64LSB; break;
9816 default: break;
9817 }
9818 break;
9819
9820 case FUNC_PLT_RELATIVE:
9821 switch (r_type)
9822 {
9823 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PLTOFF22; break;
9824 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PLTOFF64I; break;
9825 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PLTOFF64MSB;break;
9826 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PLTOFF64LSB;break;
9827 default: break;
9828 }
9829 break;
9830
9831 case FUNC_SEC_RELATIVE:
9832 switch (r_type)
9833 {
9834 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SECREL32MSB;break;
9835 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SECREL32LSB;break;
9836 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SECREL64MSB;break;
9837 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SECREL64LSB;break;
9838 default: break;
9839 }
9840 break;
9841
9842 case FUNC_SEG_RELATIVE:
9843 switch (r_type)
9844 {
9845 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SEGREL32MSB;break;
9846 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SEGREL32LSB;break;
9847 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SEGREL64MSB;break;
9848 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SEGREL64LSB;break;
9849 default: break;
9850 }
9851 break;
9852
9853 case FUNC_LTV_RELATIVE:
9854 switch (r_type)
9855 {
9856 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_LTV32MSB; break;
9857 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_LTV32LSB; break;
9858 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_LTV64MSB; break;
9859 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_LTV64LSB; break;
9860 default: break;
9861 }
9862 break;
9863
9864 case FUNC_LT_FPTR_RELATIVE:
9865 switch (r_type)
9866 {
9867 case BFD_RELOC_IA64_IMM22:
9868 new = BFD_RELOC_IA64_LTOFF_FPTR22; break;
9869 case BFD_RELOC_IA64_IMM64:
9870 new = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
9871 default:
9872 break;
9873 }
9874 break;
9875 default:
9876 abort ();
9877 }
9878 /* Hmmmm. Should this ever occur? */
9879 if (new)
9880 return new;
9881 else
9882 return r_type;
9883 }
9884
9885 /* Here is where generate the appropriate reloc for pseudo relocation
9886 functions. */
9887 void
9888 ia64_validate_fix (fix)
9889 fixS *fix;
9890 {
9891 switch (fix->fx_r_type)
9892 {
9893 case BFD_RELOC_IA64_FPTR64I:
9894 case BFD_RELOC_IA64_FPTR32MSB:
9895 case BFD_RELOC_IA64_FPTR64LSB:
9896 case BFD_RELOC_IA64_LTOFF_FPTR22:
9897 case BFD_RELOC_IA64_LTOFF_FPTR64I:
9898 if (fix->fx_offset != 0)
9899 as_bad_where (fix->fx_file, fix->fx_line,
9900 "No addend allowed in @fptr() relocation");
9901 break;
9902 default:
9903 break;
9904 }
9905
9906 return;
9907 }
9908
9909 static void
9910 fix_insn (fix, odesc, value)
9911 fixS *fix;
9912 const struct ia64_operand *odesc;
9913 valueT value;
9914 {
9915 bfd_vma insn[3], t0, t1, control_bits;
9916 const char *err;
9917 char *fixpos;
9918 long slot;
9919
9920 slot = fix->fx_where & 0x3;
9921 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
9922
9923 /* Bundles are always in little-endian byte order */
9924 t0 = bfd_getl64 (fixpos);
9925 t1 = bfd_getl64 (fixpos + 8);
9926 control_bits = t0 & 0x1f;
9927 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
9928 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
9929 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
9930
9931 err = NULL;
9932 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
9933 {
9934 insn[1] = (value >> 22) & 0x1ffffffffffLL;
9935 insn[2] |= (((value & 0x7f) << 13)
9936 | (((value >> 7) & 0x1ff) << 27)
9937 | (((value >> 16) & 0x1f) << 22)
9938 | (((value >> 21) & 0x1) << 21)
9939 | (((value >> 63) & 0x1) << 36));
9940 }
9941 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
9942 {
9943 if (value & ~0x3fffffffffffffffULL)
9944 err = "integer operand out of range";
9945 insn[1] = (value >> 21) & 0x1ffffffffffLL;
9946 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
9947 }
9948 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
9949 {
9950 value >>= 4;
9951 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
9952 insn[2] |= ((((value >> 59) & 0x1) << 36)
9953 | (((value >> 0) & 0xfffff) << 13));
9954 }
9955 else
9956 err = (*odesc->insert) (odesc, value, insn + slot);
9957
9958 if (err)
9959 as_bad_where (fix->fx_file, fix->fx_line, err);
9960
9961 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
9962 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
9963 number_to_chars_littleendian (fixpos + 0, t0, 8);
9964 number_to_chars_littleendian (fixpos + 8, t1, 8);
9965 }
9966
9967 /* Attempt to simplify or even eliminate a fixup. The return value is
9968 ignored; perhaps it was once meaningful, but now it is historical.
9969 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
9970
9971 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
9972 (if possible). */
9973 int
9974 md_apply_fix3 (fix, valuep, seg)
9975 fixS *fix;
9976 valueT *valuep;
9977 segT seg ATTRIBUTE_UNUSED;
9978 {
9979 char *fixpos;
9980 valueT value = *valuep;
9981 int adjust = 0;
9982
9983 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
9984
9985 if (fix->fx_pcrel)
9986 {
9987 switch (fix->fx_r_type)
9988 {
9989 case BFD_RELOC_IA64_DIR32MSB:
9990 fix->fx_r_type = BFD_RELOC_IA64_PCREL32MSB;
9991 adjust = 1;
9992 break;
9993
9994 case BFD_RELOC_IA64_DIR32LSB:
9995 fix->fx_r_type = BFD_RELOC_IA64_PCREL32LSB;
9996 adjust = 1;
9997 break;
9998
9999 case BFD_RELOC_IA64_DIR64MSB:
10000 fix->fx_r_type = BFD_RELOC_IA64_PCREL64MSB;
10001 adjust = 1;
10002 break;
10003
10004 case BFD_RELOC_IA64_DIR64LSB:
10005 fix->fx_r_type = BFD_RELOC_IA64_PCREL64LSB;
10006 adjust = 1;
10007 break;
10008
10009 default:
10010 break;
10011 }
10012 }
10013 if (fix->fx_addsy)
10014 {
10015 if (fix->fx_r_type == (int) BFD_RELOC_UNUSED)
10016 {
10017 /* This must be a TAG13 or TAG13b operand. There are no external
10018 relocs defined for them, so we must give an error. */
10019 as_bad_where (fix->fx_file, fix->fx_line,
10020 "%s must have a constant value",
10021 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
10022 fix->fx_done = 1;
10023 return 1;
10024 }
10025
10026 /* ??? This is a hack copied from tc-i386.c to make PCREL relocs
10027 work. There should be a better way to handle this. */
10028 if (adjust)
10029 fix->fx_offset += fix->fx_where + fix->fx_frag->fr_address;
10030 }
10031 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
10032 {
10033 if (fix->tc_fix_data.bigendian)
10034 number_to_chars_bigendian (fixpos, value, fix->fx_size);
10035 else
10036 number_to_chars_littleendian (fixpos, value, fix->fx_size);
10037 fix->fx_done = 1;
10038 return 1;
10039 }
10040 else
10041 {
10042 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
10043 fix->fx_done = 1;
10044 return 1;
10045 }
10046 return 1;
10047 }
10048
10049 /* Generate the BFD reloc to be stuck in the object file from the
10050 fixup used internally in the assembler. */
10051
10052 arelent *
10053 tc_gen_reloc (sec, fixp)
10054 asection *sec ATTRIBUTE_UNUSED;
10055 fixS *fixp;
10056 {
10057 arelent *reloc;
10058
10059 reloc = xmalloc (sizeof (*reloc));
10060 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
10061 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10062 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
10063 reloc->addend = fixp->fx_offset;
10064 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
10065
10066 if (!reloc->howto)
10067 {
10068 as_bad_where (fixp->fx_file, fixp->fx_line,
10069 "Cannot represent %s relocation in object file",
10070 bfd_get_reloc_code_name (fixp->fx_r_type));
10071 }
10072 return reloc;
10073 }
10074
10075 /* Turn a string in input_line_pointer into a floating point constant
10076 of type TYPE, and store the appropriate bytes in *LIT. The number
10077 of LITTLENUMS emitted is stored in *SIZE. An error message is
10078 returned, or NULL on OK. */
10079
10080 #define MAX_LITTLENUMS 5
10081
10082 char *
10083 md_atof (type, lit, size)
10084 int type;
10085 char *lit;
10086 int *size;
10087 {
10088 LITTLENUM_TYPE words[MAX_LITTLENUMS];
10089 LITTLENUM_TYPE *word;
10090 char *t;
10091 int prec;
10092
10093 switch (type)
10094 {
10095 /* IEEE floats */
10096 case 'f':
10097 case 'F':
10098 case 's':
10099 case 'S':
10100 prec = 2;
10101 break;
10102
10103 case 'd':
10104 case 'D':
10105 case 'r':
10106 case 'R':
10107 prec = 4;
10108 break;
10109
10110 case 'x':
10111 case 'X':
10112 case 'p':
10113 case 'P':
10114 prec = 5;
10115 break;
10116
10117 default:
10118 *size = 0;
10119 return "Bad call to MD_ATOF()";
10120 }
10121 t = atof_ieee (input_line_pointer, type, words);
10122 if (t)
10123 input_line_pointer = t;
10124 *size = prec * sizeof (LITTLENUM_TYPE);
10125
10126 for (word = words + prec - 1; prec--;)
10127 {
10128 md_number_to_chars (lit, (long) (*word--), sizeof (LITTLENUM_TYPE));
10129 lit += sizeof (LITTLENUM_TYPE);
10130 }
10131 return 0;
10132 }
10133
10134 /* Round up a section's size to the appropriate boundary. */
10135 valueT
10136 md_section_align (seg, size)
10137 segT seg;
10138 valueT size;
10139 {
10140 int align = bfd_get_section_alignment (stdoutput, seg);
10141 valueT mask = ((valueT) 1 << align) - 1;
10142
10143 return (size + mask) & ~mask;
10144 }
10145
10146 /* Handle ia64 specific semantics of the align directive. */
10147
10148 void
10149 ia64_md_do_align (n, fill, len, max)
10150 int n ATTRIBUTE_UNUSED;
10151 const char *fill ATTRIBUTE_UNUSED;
10152 int len ATTRIBUTE_UNUSED;
10153 int max ATTRIBUTE_UNUSED;
10154 {
10155 if (subseg_text_p (now_seg))
10156 ia64_flush_insns ();
10157 }
10158
10159 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
10160 of an rs_align_code fragment. */
10161
10162 void
10163 ia64_handle_align (fragp)
10164 fragS *fragp;
10165 {
10166 /* Use mfi bundle of nops with no stop bits. */
10167 static const unsigned char be_nop[]
10168 = { 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
10169 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c};
10170 static const unsigned char le_nop[]
10171 = { 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
10172 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00};
10173
10174 int bytes;
10175 char *p;
10176
10177 if (fragp->fr_type != rs_align_code)
10178 return;
10179
10180 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
10181 p = fragp->fr_literal + fragp->fr_fix;
10182
10183 /* Make sure we are on a 16-byte boundary, in case someone has been
10184 putting data into a text section. */
10185 if (bytes & 15)
10186 {
10187 int fix = bytes & 15;
10188 memset (p, 0, fix);
10189 p += fix;
10190 bytes -= fix;
10191 fragp->fr_fix += fix;
10192 }
10193
10194 memcpy (p, (target_big_endian ? be_nop : le_nop), 16);
10195 fragp->fr_var = 16;
10196 }