]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-ia64.c
21b62fb13217624bbe15203e33c1e72dd7be0d3f
[thirdparty/binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23 /*
24 TODO:
25
26 - optional operands
27 - directives:
28 .eb
29 .estate
30 .lb
31 .popsection
32 .previous
33 .psr
34 .pushsection
35 - labels are wrong if automatic alignment is introduced
36 (e.g., checkout the second real10 definition in test-data.s)
37 - DV-related stuff:
38 <reg>.safe_across_calls and any other DV-related directives I don't
39 have documentation for.
40 verify mod-sched-brs reads/writes are checked/marked (and other
41 notes)
42
43 */
44
45 #include "as.h"
46 #include "safe-ctype.h"
47 #include "dwarf2dbg.h"
48 #include "subsegs.h"
49
50 #include "opcode/ia64.h"
51
52 #include "elf/ia64.h"
53
54 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
55 #define MIN(a,b) ((a) < (b) ? (a) : (b))
56
57 #define NUM_SLOTS 4
58 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
59 #define CURR_SLOT md.slot[md.curr_slot]
60
61 #define O_pseudo_fixup (O_max + 1)
62
63 enum special_section
64 {
65 /* IA-64 ABI section pseudo-ops. */
66 SPECIAL_SECTION_BSS = 0,
67 SPECIAL_SECTION_SBSS,
68 SPECIAL_SECTION_SDATA,
69 SPECIAL_SECTION_RODATA,
70 SPECIAL_SECTION_COMMENT,
71 SPECIAL_SECTION_UNWIND,
72 SPECIAL_SECTION_UNWIND_INFO,
73 /* HPUX specific section pseudo-ops. */
74 SPECIAL_SECTION_INIT_ARRAY,
75 SPECIAL_SECTION_FINI_ARRAY,
76 };
77
78 enum reloc_func
79 {
80 FUNC_DTP_MODULE,
81 FUNC_DTP_RELATIVE,
82 FUNC_FPTR_RELATIVE,
83 FUNC_GP_RELATIVE,
84 FUNC_LT_RELATIVE,
85 FUNC_LT_RELATIVE_X,
86 FUNC_PC_RELATIVE,
87 FUNC_PLT_RELATIVE,
88 FUNC_SEC_RELATIVE,
89 FUNC_SEG_RELATIVE,
90 FUNC_TP_RELATIVE,
91 FUNC_LTV_RELATIVE,
92 FUNC_LT_FPTR_RELATIVE,
93 FUNC_LT_DTP_MODULE,
94 FUNC_LT_DTP_RELATIVE,
95 FUNC_LT_TP_RELATIVE,
96 FUNC_IPLT_RELOC,
97 };
98
99 enum reg_symbol
100 {
101 REG_GR = 0,
102 REG_FR = (REG_GR + 128),
103 REG_AR = (REG_FR + 128),
104 REG_CR = (REG_AR + 128),
105 REG_P = (REG_CR + 128),
106 REG_BR = (REG_P + 64),
107 REG_IP = (REG_BR + 8),
108 REG_CFM,
109 REG_PR,
110 REG_PR_ROT,
111 REG_PSR,
112 REG_PSR_L,
113 REG_PSR_UM,
114 /* The following are pseudo-registers for use by gas only. */
115 IND_CPUID,
116 IND_DBR,
117 IND_DTR,
118 IND_ITR,
119 IND_IBR,
120 IND_MEM,
121 IND_MSR,
122 IND_PKR,
123 IND_PMC,
124 IND_PMD,
125 IND_RR,
126 /* The following pseudo-registers are used for unwind directives only: */
127 REG_PSP,
128 REG_PRIUNAT,
129 REG_NUM
130 };
131
132 enum dynreg_type
133 {
134 DYNREG_GR = 0, /* dynamic general purpose register */
135 DYNREG_FR, /* dynamic floating point register */
136 DYNREG_PR, /* dynamic predicate register */
137 DYNREG_NUM_TYPES
138 };
139
140 enum operand_match_result
141 {
142 OPERAND_MATCH,
143 OPERAND_OUT_OF_RANGE,
144 OPERAND_MISMATCH
145 };
146
147 /* On the ia64, we can't know the address of a text label until the
148 instructions are packed into a bundle. To handle this, we keep
149 track of the list of labels that appear in front of each
150 instruction. */
151 struct label_fix
152 {
153 struct label_fix *next;
154 struct symbol *sym;
155 };
156
157 /* This is the endianness of the current section. */
158 extern int target_big_endian;
159
160 /* This is the default endianness. */
161 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
162
163 void (*ia64_number_to_chars) PARAMS ((char *, valueT, int));
164
165 static void ia64_float_to_chars_bigendian
166 PARAMS ((char *, LITTLENUM_TYPE *, int));
167 static void ia64_float_to_chars_littleendian
168 PARAMS ((char *, LITTLENUM_TYPE *, int));
169 static void (*ia64_float_to_chars)
170 PARAMS ((char *, LITTLENUM_TYPE *, int));
171
172 static struct hash_control *alias_hash;
173 static struct hash_control *alias_name_hash;
174 static struct hash_control *secalias_hash;
175 static struct hash_control *secalias_name_hash;
176
177 /* Characters which always start a comment. */
178 const char comment_chars[] = "";
179
180 /* Characters which start a comment at the beginning of a line. */
181 const char line_comment_chars[] = "#";
182
183 /* Characters which may be used to separate multiple commands on a
184 single line. */
185 const char line_separator_chars[] = ";";
186
187 /* Characters which are used to indicate an exponent in a floating
188 point number. */
189 const char EXP_CHARS[] = "eE";
190
191 /* Characters which mean that a number is a floating point constant,
192 as in 0d1.0. */
193 const char FLT_CHARS[] = "rRsSfFdDxXpP";
194
195 /* ia64-specific option processing: */
196
197 const char *md_shortopts = "m:N:x::";
198
199 struct option md_longopts[] =
200 {
201 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
202 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
203 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
204 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
205 };
206
207 size_t md_longopts_size = sizeof (md_longopts);
208
209 static struct
210 {
211 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
212 struct hash_control *reg_hash; /* register name hash table */
213 struct hash_control *dynreg_hash; /* dynamic register hash table */
214 struct hash_control *const_hash; /* constant hash table */
215 struct hash_control *entry_hash; /* code entry hint hash table */
216
217 symbolS *regsym[REG_NUM];
218
219 /* If X_op is != O_absent, the registername for the instruction's
220 qualifying predicate. If NULL, p0 is assumed for instructions
221 that are predicatable. */
222 expressionS qp;
223
224 unsigned int
225 manual_bundling : 1,
226 debug_dv: 1,
227 detect_dv: 1,
228 explicit_mode : 1, /* which mode we're in */
229 default_explicit_mode : 1, /* which mode is the default */
230 mode_explicitly_set : 1, /* was the current mode explicitly set? */
231 auto_align : 1,
232 keep_pending_output : 1;
233
234 /* Each bundle consists of up to three instructions. We keep
235 track of four most recent instructions so we can correctly set
236 the end_of_insn_group for the last instruction in a bundle. */
237 int curr_slot;
238 int num_slots_in_use;
239 struct slot
240 {
241 unsigned int
242 end_of_insn_group : 1,
243 manual_bundling_on : 1,
244 manual_bundling_off : 1,
245 loc_directive_seen : 1;
246 signed char user_template; /* user-selected template, if any */
247 unsigned char qp_regno; /* qualifying predicate */
248 /* This duplicates a good fraction of "struct fix" but we
249 can't use a "struct fix" instead since we can't call
250 fix_new_exp() until we know the address of the instruction. */
251 int num_fixups;
252 struct insn_fix
253 {
254 bfd_reloc_code_real_type code;
255 enum ia64_opnd opnd; /* type of operand in need of fix */
256 unsigned int is_pcrel : 1; /* is operand pc-relative? */
257 expressionS expr; /* the value to be inserted */
258 }
259 fixup[2]; /* at most two fixups per insn */
260 struct ia64_opcode *idesc;
261 struct label_fix *label_fixups;
262 struct label_fix *tag_fixups;
263 struct unw_rec_list *unwind_record; /* Unwind directive. */
264 expressionS opnd[6];
265 char *src_file;
266 unsigned int src_line;
267 struct dwarf2_line_info debug_line;
268 }
269 slot[NUM_SLOTS];
270
271 segT last_text_seg;
272
273 struct dynreg
274 {
275 struct dynreg *next; /* next dynamic register */
276 const char *name;
277 unsigned short base; /* the base register number */
278 unsigned short num_regs; /* # of registers in this set */
279 }
280 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
281
282 flagword flags; /* ELF-header flags */
283
284 struct mem_offset {
285 unsigned hint:1; /* is this hint currently valid? */
286 bfd_vma offset; /* mem.offset offset */
287 bfd_vma base; /* mem.offset base */
288 } mem_offset;
289
290 int path; /* number of alt. entry points seen */
291 const char **entry_labels; /* labels of all alternate paths in
292 the current DV-checking block. */
293 int maxpaths; /* size currently allocated for
294 entry_labels */
295 /* Support for hardware errata workarounds. */
296
297 /* Record data about the last three insn groups. */
298 struct group
299 {
300 /* B-step workaround.
301 For each predicate register, this is set if the corresponding insn
302 group conditionally sets this register with one of the affected
303 instructions. */
304 int p_reg_set[64];
305 /* B-step workaround.
306 For each general register, this is set if the corresponding insn
307 a) is conditional one one of the predicate registers for which
308 P_REG_SET is 1 in the corresponding entry of the previous group,
309 b) sets this general register with one of the affected
310 instructions. */
311 int g_reg_set_conditionally[128];
312 } last_groups[3];
313 int group_idx;
314
315 int pointer_size; /* size in bytes of a pointer */
316 int pointer_size_shift; /* shift size of a pointer for alignment */
317 }
318 md;
319
320 /* application registers: */
321
322 #define AR_K0 0
323 #define AR_K7 7
324 #define AR_RSC 16
325 #define AR_BSP 17
326 #define AR_BSPSTORE 18
327 #define AR_RNAT 19
328 #define AR_UNAT 36
329 #define AR_FPSR 40
330 #define AR_ITC 44
331 #define AR_PFS 64
332 #define AR_LC 65
333
334 static const struct
335 {
336 const char *name;
337 int regnum;
338 }
339 ar[] =
340 {
341 {"ar.k0", 0}, {"ar.k1", 1}, {"ar.k2", 2}, {"ar.k3", 3},
342 {"ar.k4", 4}, {"ar.k5", 5}, {"ar.k6", 6}, {"ar.k7", 7},
343 {"ar.rsc", 16}, {"ar.bsp", 17},
344 {"ar.bspstore", 18}, {"ar.rnat", 19},
345 {"ar.fcr", 21}, {"ar.eflag", 24},
346 {"ar.csd", 25}, {"ar.ssd", 26},
347 {"ar.cflg", 27}, {"ar.fsr", 28},
348 {"ar.fir", 29}, {"ar.fdr", 30},
349 {"ar.ccv", 32}, {"ar.unat", 36},
350 {"ar.fpsr", 40}, {"ar.itc", 44},
351 {"ar.pfs", 64}, {"ar.lc", 65},
352 {"ar.ec", 66},
353 };
354
355 #define CR_IPSR 16
356 #define CR_ISR 17
357 #define CR_IIP 19
358 #define CR_IFA 20
359 #define CR_ITIR 21
360 #define CR_IIPA 22
361 #define CR_IFS 23
362 #define CR_IIM 24
363 #define CR_IHA 25
364 #define CR_IVR 65
365 #define CR_TPR 66
366 #define CR_EOI 67
367 #define CR_IRR0 68
368 #define CR_IRR3 71
369 #define CR_LRR0 80
370 #define CR_LRR1 81
371
372 /* control registers: */
373 static const struct
374 {
375 const char *name;
376 int regnum;
377 }
378 cr[] =
379 {
380 {"cr.dcr", 0},
381 {"cr.itm", 1},
382 {"cr.iva", 2},
383 {"cr.pta", 8},
384 {"cr.gpta", 9},
385 {"cr.ipsr", 16},
386 {"cr.isr", 17},
387 {"cr.iip", 19},
388 {"cr.ifa", 20},
389 {"cr.itir", 21},
390 {"cr.iipa", 22},
391 {"cr.ifs", 23},
392 {"cr.iim", 24},
393 {"cr.iha", 25},
394 {"cr.lid", 64},
395 {"cr.ivr", 65},
396 {"cr.tpr", 66},
397 {"cr.eoi", 67},
398 {"cr.irr0", 68},
399 {"cr.irr1", 69},
400 {"cr.irr2", 70},
401 {"cr.irr3", 71},
402 {"cr.itv", 72},
403 {"cr.pmv", 73},
404 {"cr.cmcv", 74},
405 {"cr.lrr0", 80},
406 {"cr.lrr1", 81}
407 };
408
409 #define PSR_MFL 4
410 #define PSR_IC 13
411 #define PSR_DFL 18
412 #define PSR_CPL 32
413
414 static const struct const_desc
415 {
416 const char *name;
417 valueT value;
418 }
419 const_bits[] =
420 {
421 /* PSR constant masks: */
422
423 /* 0: reserved */
424 {"psr.be", ((valueT) 1) << 1},
425 {"psr.up", ((valueT) 1) << 2},
426 {"psr.ac", ((valueT) 1) << 3},
427 {"psr.mfl", ((valueT) 1) << 4},
428 {"psr.mfh", ((valueT) 1) << 5},
429 /* 6-12: reserved */
430 {"psr.ic", ((valueT) 1) << 13},
431 {"psr.i", ((valueT) 1) << 14},
432 {"psr.pk", ((valueT) 1) << 15},
433 /* 16: reserved */
434 {"psr.dt", ((valueT) 1) << 17},
435 {"psr.dfl", ((valueT) 1) << 18},
436 {"psr.dfh", ((valueT) 1) << 19},
437 {"psr.sp", ((valueT) 1) << 20},
438 {"psr.pp", ((valueT) 1) << 21},
439 {"psr.di", ((valueT) 1) << 22},
440 {"psr.si", ((valueT) 1) << 23},
441 {"psr.db", ((valueT) 1) << 24},
442 {"psr.lp", ((valueT) 1) << 25},
443 {"psr.tb", ((valueT) 1) << 26},
444 {"psr.rt", ((valueT) 1) << 27},
445 /* 28-31: reserved */
446 /* 32-33: cpl (current privilege level) */
447 {"psr.is", ((valueT) 1) << 34},
448 {"psr.mc", ((valueT) 1) << 35},
449 {"psr.it", ((valueT) 1) << 36},
450 {"psr.id", ((valueT) 1) << 37},
451 {"psr.da", ((valueT) 1) << 38},
452 {"psr.dd", ((valueT) 1) << 39},
453 {"psr.ss", ((valueT) 1) << 40},
454 /* 41-42: ri (restart instruction) */
455 {"psr.ed", ((valueT) 1) << 43},
456 {"psr.bn", ((valueT) 1) << 44},
457 };
458
459 /* indirect register-sets/memory: */
460
461 static const struct
462 {
463 const char *name;
464 int regnum;
465 }
466 indirect_reg[] =
467 {
468 { "CPUID", IND_CPUID },
469 { "cpuid", IND_CPUID },
470 { "dbr", IND_DBR },
471 { "dtr", IND_DTR },
472 { "itr", IND_ITR },
473 { "ibr", IND_IBR },
474 { "msr", IND_MSR },
475 { "pkr", IND_PKR },
476 { "pmc", IND_PMC },
477 { "pmd", IND_PMD },
478 { "rr", IND_RR },
479 };
480
481 /* Pseudo functions used to indicate relocation types (these functions
482 start with an at sign (@). */
483 static struct
484 {
485 const char *name;
486 enum pseudo_type
487 {
488 PSEUDO_FUNC_NONE,
489 PSEUDO_FUNC_RELOC,
490 PSEUDO_FUNC_CONST,
491 PSEUDO_FUNC_REG,
492 PSEUDO_FUNC_FLOAT
493 }
494 type;
495 union
496 {
497 unsigned long ival;
498 symbolS *sym;
499 }
500 u;
501 }
502 pseudo_func[] =
503 {
504 /* reloc pseudo functions (these must come first!): */
505 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
506 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
507 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
508 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
509 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
510 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
511 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
512 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
513 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
514 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
515 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
516 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
517 { "", 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
518 { "", 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
519 { "", 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
520 { "", 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
521 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
522
523 /* mbtype4 constants: */
524 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
525 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
526 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
527 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
528 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
529
530 /* fclass constants: */
531 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
532 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
533 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
534 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
535 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
536 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
537 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
538 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
539 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
540
541 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
542
543 /* hint constants: */
544 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
545
546 /* unwind-related constants: */
547 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
548 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
549 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
550 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_LINUX } },
551 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
552 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
553 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
554
555 /* unwind-related registers: */
556 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
557 };
558
559 /* 41-bit nop opcodes (one per unit): */
560 static const bfd_vma nop[IA64_NUM_UNITS] =
561 {
562 0x0000000000LL, /* NIL => break 0 */
563 0x0008000000LL, /* I-unit nop */
564 0x0008000000LL, /* M-unit nop */
565 0x4000000000LL, /* B-unit nop */
566 0x0008000000LL, /* F-unit nop */
567 0x0008000000LL, /* L-"unit" nop */
568 0x0008000000LL, /* X-unit nop */
569 };
570
571 /* Can't be `const' as it's passed to input routines (which have the
572 habit of setting temporary sentinels. */
573 static char special_section_name[][20] =
574 {
575 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
576 {".IA_64.unwind"}, {".IA_64.unwind_info"},
577 {".init_array"}, {".fini_array"}
578 };
579
580 /* The best template for a particular sequence of up to three
581 instructions: */
582 #define N IA64_NUM_TYPES
583 static unsigned char best_template[N][N][N];
584 #undef N
585
586 /* Resource dependencies currently in effect */
587 static struct rsrc {
588 int depind; /* dependency index */
589 const struct ia64_dependency *dependency; /* actual dependency */
590 unsigned specific:1, /* is this a specific bit/regno? */
591 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
592 int index; /* specific regno/bit within dependency */
593 int note; /* optional qualifying note (0 if none) */
594 #define STATE_NONE 0
595 #define STATE_STOP 1
596 #define STATE_SRLZ 2
597 int insn_srlz; /* current insn serialization state */
598 int data_srlz; /* current data serialization state */
599 int qp_regno; /* qualifying predicate for this usage */
600 char *file; /* what file marked this dependency */
601 unsigned int line; /* what line marked this dependency */
602 struct mem_offset mem_offset; /* optional memory offset hint */
603 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
604 int path; /* corresponding code entry index */
605 } *regdeps = NULL;
606 static int regdepslen = 0;
607 static int regdepstotlen = 0;
608 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
609 static const char *dv_sem[] = { "none", "implied", "impliedf",
610 "data", "instr", "specific", "stop", "other" };
611 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
612
613 /* Current state of PR mutexation */
614 static struct qpmutex {
615 valueT prmask;
616 int path;
617 } *qp_mutexes = NULL; /* QP mutex bitmasks */
618 static int qp_mutexeslen = 0;
619 static int qp_mutexestotlen = 0;
620 static valueT qp_safe_across_calls = 0;
621
622 /* Current state of PR implications */
623 static struct qp_imply {
624 unsigned p1:6;
625 unsigned p2:6;
626 unsigned p2_branched:1;
627 int path;
628 } *qp_implies = NULL;
629 static int qp_implieslen = 0;
630 static int qp_impliestotlen = 0;
631
632 /* Keep track of static GR values so that indirect register usage can
633 sometimes be tracked. */
634 static struct gr {
635 unsigned known:1;
636 int path;
637 valueT value;
638 } gr_values[128] = {{ 1, 0, 0 }};
639
640 /* Remember the alignment frag. */
641 static fragS *align_frag;
642
643 /* These are the routines required to output the various types of
644 unwind records. */
645
646 /* A slot_number is a frag address plus the slot index (0-2). We use the
647 frag address here so that if there is a section switch in the middle of
648 a function, then instructions emitted to a different section are not
649 counted. Since there may be more than one frag for a function, this
650 means we also need to keep track of which frag this address belongs to
651 so we can compute inter-frag distances. This also nicely solves the
652 problem with nops emitted for align directives, which can't easily be
653 counted, but can easily be derived from frag sizes. */
654
655 typedef struct unw_rec_list {
656 unwind_record r;
657 unsigned long slot_number;
658 fragS *slot_frag;
659 unsigned long next_slot_number;
660 fragS *next_slot_frag;
661 struct unw_rec_list *next;
662 } unw_rec_list;
663
664 #define SLOT_NUM_NOT_SET (unsigned)-1
665
666 /* Linked list of saved prologue counts. A very poor
667 implementation of a map from label numbers to prologue counts. */
668 typedef struct label_prologue_count
669 {
670 struct label_prologue_count *next;
671 unsigned long label_number;
672 unsigned int prologue_count;
673 } label_prologue_count;
674
675 static struct
676 {
677 /* Maintain a list of unwind entries for the current function. */
678 unw_rec_list *list;
679 unw_rec_list *tail;
680
681 /* Any unwind entires that should be attached to the current slot
682 that an insn is being constructed for. */
683 unw_rec_list *current_entry;
684
685 /* These are used to create the unwind table entry for this function. */
686 symbolS *proc_start;
687 symbolS *proc_end;
688 symbolS *info; /* pointer to unwind info */
689 symbolS *personality_routine;
690 segT saved_text_seg;
691 subsegT saved_text_subseg;
692 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
693
694 /* TRUE if processing unwind directives in a prologue region. */
695 int prologue;
696 int prologue_mask;
697 unsigned int prologue_count; /* number of .prologues seen so far */
698 /* Prologue counts at previous .label_state directives. */
699 struct label_prologue_count * saved_prologue_counts;
700 } unwind;
701
702 /* The input value is a negated offset from psp, and specifies an address
703 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
704 must add 16 and divide by 4 to get the encoded value. */
705
706 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
707
708 typedef void (*vbyte_func) PARAMS ((int, char *, char *));
709
710 /* Forward declarations: */
711 static void set_section PARAMS ((char *name));
712 static unsigned int set_regstack PARAMS ((unsigned int, unsigned int,
713 unsigned int, unsigned int));
714 static void dot_align (int);
715 static void dot_radix PARAMS ((int));
716 static void dot_special_section PARAMS ((int));
717 static void dot_proc PARAMS ((int));
718 static void dot_fframe PARAMS ((int));
719 static void dot_vframe PARAMS ((int));
720 static void dot_vframesp PARAMS ((int));
721 static void dot_vframepsp PARAMS ((int));
722 static void dot_save PARAMS ((int));
723 static void dot_restore PARAMS ((int));
724 static void dot_restorereg PARAMS ((int));
725 static void dot_restorereg_p PARAMS ((int));
726 static void dot_handlerdata PARAMS ((int));
727 static void dot_unwentry PARAMS ((int));
728 static void dot_altrp PARAMS ((int));
729 static void dot_savemem PARAMS ((int));
730 static void dot_saveg PARAMS ((int));
731 static void dot_savef PARAMS ((int));
732 static void dot_saveb PARAMS ((int));
733 static void dot_savegf PARAMS ((int));
734 static void dot_spill PARAMS ((int));
735 static void dot_spillreg PARAMS ((int));
736 static void dot_spillmem PARAMS ((int));
737 static void dot_spillreg_p PARAMS ((int));
738 static void dot_spillmem_p PARAMS ((int));
739 static void dot_label_state PARAMS ((int));
740 static void dot_copy_state PARAMS ((int));
741 static void dot_unwabi PARAMS ((int));
742 static void dot_personality PARAMS ((int));
743 static void dot_body PARAMS ((int));
744 static void dot_prologue PARAMS ((int));
745 static void dot_endp PARAMS ((int));
746 static void dot_template PARAMS ((int));
747 static void dot_regstk PARAMS ((int));
748 static void dot_rot PARAMS ((int));
749 static void dot_byteorder PARAMS ((int));
750 static void dot_psr PARAMS ((int));
751 static void dot_alias PARAMS ((int));
752 static void dot_ln PARAMS ((int));
753 static char *parse_section_name PARAMS ((void));
754 static void dot_xdata PARAMS ((int));
755 static void stmt_float_cons PARAMS ((int));
756 static void stmt_cons_ua PARAMS ((int));
757 static void dot_xfloat_cons PARAMS ((int));
758 static void dot_xstringer PARAMS ((int));
759 static void dot_xdata_ua PARAMS ((int));
760 static void dot_xfloat_cons_ua PARAMS ((int));
761 static void print_prmask PARAMS ((valueT mask));
762 static void dot_pred_rel PARAMS ((int));
763 static void dot_reg_val PARAMS ((int));
764 static void dot_serialize PARAMS ((int));
765 static void dot_dv_mode PARAMS ((int));
766 static void dot_entry PARAMS ((int));
767 static void dot_mem_offset PARAMS ((int));
768 static void add_unwind_entry PARAMS((unw_rec_list *ptr));
769 static symbolS *declare_register PARAMS ((const char *name, int regnum));
770 static void declare_register_set PARAMS ((const char *, int, int));
771 static unsigned int operand_width PARAMS ((enum ia64_opnd));
772 static enum operand_match_result operand_match PARAMS ((const struct ia64_opcode *idesc,
773 int index,
774 expressionS *e));
775 static int parse_operand PARAMS ((expressionS *e));
776 static struct ia64_opcode * parse_operands PARAMS ((struct ia64_opcode *));
777 static int errata_nop_necessary_p PARAMS ((struct slot *, enum ia64_unit));
778 static void build_insn PARAMS ((struct slot *, bfd_vma *));
779 static void emit_one_bundle PARAMS ((void));
780 static void fix_insn PARAMS ((fixS *, const struct ia64_operand *, valueT));
781 static bfd_reloc_code_real_type ia64_gen_real_reloc_type PARAMS ((struct symbol *sym,
782 bfd_reloc_code_real_type r_type));
783 static void insn_group_break PARAMS ((int, int, int));
784 static void mark_resource PARAMS ((struct ia64_opcode *, const struct ia64_dependency *,
785 struct rsrc *, int depind, int path));
786 static void add_qp_mutex PARAMS((valueT mask));
787 static void add_qp_imply PARAMS((int p1, int p2));
788 static void clear_qp_branch_flag PARAMS((valueT mask));
789 static void clear_qp_mutex PARAMS((valueT mask));
790 static void clear_qp_implies PARAMS((valueT p1_mask, valueT p2_mask));
791 static int has_suffix_p PARAMS((const char *, const char *));
792 static void clear_register_values PARAMS ((void));
793 static void print_dependency PARAMS ((const char *action, int depind));
794 static void instruction_serialization PARAMS ((void));
795 static void data_serialization PARAMS ((void));
796 static void remove_marked_resource PARAMS ((struct rsrc *));
797 static int is_conditional_branch PARAMS ((struct ia64_opcode *));
798 static int is_taken_branch PARAMS ((struct ia64_opcode *));
799 static int is_interruption_or_rfi PARAMS ((struct ia64_opcode *));
800 static int depends_on PARAMS ((int, struct ia64_opcode *));
801 static int specify_resource PARAMS ((const struct ia64_dependency *,
802 struct ia64_opcode *, int, struct rsrc [], int, int));
803 static int check_dv PARAMS((struct ia64_opcode *idesc));
804 static void check_dependencies PARAMS((struct ia64_opcode *));
805 static void mark_resources PARAMS((struct ia64_opcode *));
806 static void update_dependencies PARAMS((struct ia64_opcode *));
807 static void note_register_values PARAMS((struct ia64_opcode *));
808 static int qp_mutex PARAMS ((int, int, int));
809 static int resources_match PARAMS ((struct rsrc *, struct ia64_opcode *, int, int, int));
810 static void output_vbyte_mem PARAMS ((int, char *, char *));
811 static void count_output PARAMS ((int, char *, char *));
812 static void output_R1_format PARAMS ((vbyte_func, unw_record_type, int));
813 static void output_R2_format PARAMS ((vbyte_func, int, int, unsigned long));
814 static void output_R3_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
815 static void output_P1_format PARAMS ((vbyte_func, int));
816 static void output_P2_format PARAMS ((vbyte_func, int, int));
817 static void output_P3_format PARAMS ((vbyte_func, unw_record_type, int));
818 static void output_P4_format PARAMS ((vbyte_func, unsigned char *, unsigned long));
819 static void output_P5_format PARAMS ((vbyte_func, int, unsigned long));
820 static void output_P6_format PARAMS ((vbyte_func, unw_record_type, int));
821 static void output_P7_format PARAMS ((vbyte_func, unw_record_type, unsigned long, unsigned long));
822 static void output_P8_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
823 static void output_P9_format PARAMS ((vbyte_func, int, int));
824 static void output_P10_format PARAMS ((vbyte_func, int, int));
825 static void output_B1_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
826 static void output_B2_format PARAMS ((vbyte_func, unsigned long, unsigned long));
827 static void output_B3_format PARAMS ((vbyte_func, unsigned long, unsigned long));
828 static void output_B4_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
829 static char format_ab_reg PARAMS ((int, int));
830 static void output_X1_format PARAMS ((vbyte_func, unw_record_type, int, int, unsigned long,
831 unsigned long));
832 static void output_X2_format PARAMS ((vbyte_func, int, int, int, int, int, unsigned long));
833 static void output_X3_format PARAMS ((vbyte_func, unw_record_type, int, int, int, unsigned long,
834 unsigned long));
835 static void output_X4_format PARAMS ((vbyte_func, int, int, int, int, int, int, unsigned long));
836 static unw_rec_list *output_endp PARAMS ((void));
837 static unw_rec_list *output_prologue PARAMS ((void));
838 static unw_rec_list *output_prologue_gr PARAMS ((unsigned int, unsigned int));
839 static unw_rec_list *output_body PARAMS ((void));
840 static unw_rec_list *output_mem_stack_f PARAMS ((unsigned int));
841 static unw_rec_list *output_mem_stack_v PARAMS ((void));
842 static unw_rec_list *output_psp_gr PARAMS ((unsigned int));
843 static unw_rec_list *output_psp_sprel PARAMS ((unsigned int));
844 static unw_rec_list *output_rp_when PARAMS ((void));
845 static unw_rec_list *output_rp_gr PARAMS ((unsigned int));
846 static unw_rec_list *output_rp_br PARAMS ((unsigned int));
847 static unw_rec_list *output_rp_psprel PARAMS ((unsigned int));
848 static unw_rec_list *output_rp_sprel PARAMS ((unsigned int));
849 static unw_rec_list *output_pfs_when PARAMS ((void));
850 static unw_rec_list *output_pfs_gr PARAMS ((unsigned int));
851 static unw_rec_list *output_pfs_psprel PARAMS ((unsigned int));
852 static unw_rec_list *output_pfs_sprel PARAMS ((unsigned int));
853 static unw_rec_list *output_preds_when PARAMS ((void));
854 static unw_rec_list *output_preds_gr PARAMS ((unsigned int));
855 static unw_rec_list *output_preds_psprel PARAMS ((unsigned int));
856 static unw_rec_list *output_preds_sprel PARAMS ((unsigned int));
857 static unw_rec_list *output_fr_mem PARAMS ((unsigned int));
858 static unw_rec_list *output_frgr_mem PARAMS ((unsigned int, unsigned int));
859 static unw_rec_list *output_gr_gr PARAMS ((unsigned int, unsigned int));
860 static unw_rec_list *output_gr_mem PARAMS ((unsigned int));
861 static unw_rec_list *output_br_mem PARAMS ((unsigned int));
862 static unw_rec_list *output_br_gr PARAMS ((unsigned int, unsigned int));
863 static unw_rec_list *output_spill_base PARAMS ((unsigned int));
864 static unw_rec_list *output_unat_when PARAMS ((void));
865 static unw_rec_list *output_unat_gr PARAMS ((unsigned int));
866 static unw_rec_list *output_unat_psprel PARAMS ((unsigned int));
867 static unw_rec_list *output_unat_sprel PARAMS ((unsigned int));
868 static unw_rec_list *output_lc_when PARAMS ((void));
869 static unw_rec_list *output_lc_gr PARAMS ((unsigned int));
870 static unw_rec_list *output_lc_psprel PARAMS ((unsigned int));
871 static unw_rec_list *output_lc_sprel PARAMS ((unsigned int));
872 static unw_rec_list *output_fpsr_when PARAMS ((void));
873 static unw_rec_list *output_fpsr_gr PARAMS ((unsigned int));
874 static unw_rec_list *output_fpsr_psprel PARAMS ((unsigned int));
875 static unw_rec_list *output_fpsr_sprel PARAMS ((unsigned int));
876 static unw_rec_list *output_priunat_when_gr PARAMS ((void));
877 static unw_rec_list *output_priunat_when_mem PARAMS ((void));
878 static unw_rec_list *output_priunat_gr PARAMS ((unsigned int));
879 static unw_rec_list *output_priunat_psprel PARAMS ((unsigned int));
880 static unw_rec_list *output_priunat_sprel PARAMS ((unsigned int));
881 static unw_rec_list *output_bsp_when PARAMS ((void));
882 static unw_rec_list *output_bsp_gr PARAMS ((unsigned int));
883 static unw_rec_list *output_bsp_psprel PARAMS ((unsigned int));
884 static unw_rec_list *output_bsp_sprel PARAMS ((unsigned int));
885 static unw_rec_list *output_bspstore_when PARAMS ((void));
886 static unw_rec_list *output_bspstore_gr PARAMS ((unsigned int));
887 static unw_rec_list *output_bspstore_psprel PARAMS ((unsigned int));
888 static unw_rec_list *output_bspstore_sprel PARAMS ((unsigned int));
889 static unw_rec_list *output_rnat_when PARAMS ((void));
890 static unw_rec_list *output_rnat_gr PARAMS ((unsigned int));
891 static unw_rec_list *output_rnat_psprel PARAMS ((unsigned int));
892 static unw_rec_list *output_rnat_sprel PARAMS ((unsigned int));
893 static unw_rec_list *output_unwabi PARAMS ((unsigned long, unsigned long));
894 static unw_rec_list *output_epilogue PARAMS ((unsigned long));
895 static unw_rec_list *output_label_state PARAMS ((unsigned long));
896 static unw_rec_list *output_copy_state PARAMS ((unsigned long));
897 static unw_rec_list *output_spill_psprel PARAMS ((unsigned int, unsigned int, unsigned int));
898 static unw_rec_list *output_spill_sprel PARAMS ((unsigned int, unsigned int, unsigned int));
899 static unw_rec_list *output_spill_psprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
900 unsigned int));
901 static unw_rec_list *output_spill_sprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
902 unsigned int));
903 static unw_rec_list *output_spill_reg PARAMS ((unsigned int, unsigned int, unsigned int,
904 unsigned int));
905 static unw_rec_list *output_spill_reg_p PARAMS ((unsigned int, unsigned int, unsigned int,
906 unsigned int, unsigned int));
907 static void process_one_record PARAMS ((unw_rec_list *, vbyte_func));
908 static void process_unw_records PARAMS ((unw_rec_list *, vbyte_func));
909 static int calc_record_size PARAMS ((unw_rec_list *));
910 static void set_imask PARAMS ((unw_rec_list *, unsigned long, unsigned long, unsigned int));
911 static unsigned long slot_index PARAMS ((unsigned long, fragS *,
912 unsigned long, fragS *,
913 int));
914 static unw_rec_list *optimize_unw_records PARAMS ((unw_rec_list *));
915 static void fixup_unw_records PARAMS ((unw_rec_list *, int));
916 static int convert_expr_to_ab_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
917 static int convert_expr_to_xy_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
918 static unsigned int get_saved_prologue_count PARAMS ((unsigned long));
919 static void save_prologue_count PARAMS ((unsigned long, unsigned int));
920 static void free_saved_prologue_counts PARAMS ((void));
921
922 /* Determine if application register REGNUM resides only in the integer
923 unit (as opposed to the memory unit). */
924 static int
925 ar_is_only_in_integer_unit (int reg)
926 {
927 reg -= REG_AR;
928 return reg >= 64 && reg <= 111;
929 }
930
931 /* Determine if application register REGNUM resides only in the memory
932 unit (as opposed to the integer unit). */
933 static int
934 ar_is_only_in_memory_unit (int reg)
935 {
936 reg -= REG_AR;
937 return reg >= 0 && reg <= 47;
938 }
939
940 /* Switch to section NAME and create section if necessary. It's
941 rather ugly that we have to manipulate input_line_pointer but I
942 don't see any other way to accomplish the same thing without
943 changing obj-elf.c (which may be the Right Thing, in the end). */
944 static void
945 set_section (name)
946 char *name;
947 {
948 char *saved_input_line_pointer;
949
950 saved_input_line_pointer = input_line_pointer;
951 input_line_pointer = name;
952 obj_elf_section (0);
953 input_line_pointer = saved_input_line_pointer;
954 }
955
956 /* Map 's' to SHF_IA_64_SHORT. */
957
958 int
959 ia64_elf_section_letter (letter, ptr_msg)
960 int letter;
961 char **ptr_msg;
962 {
963 if (letter == 's')
964 return SHF_IA_64_SHORT;
965 else if (letter == 'o')
966 return SHF_LINK_ORDER;
967
968 *ptr_msg = _("Bad .section directive: want a,o,s,w,x,M,S,G,T in string");
969 return -1;
970 }
971
972 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
973
974 flagword
975 ia64_elf_section_flags (flags, attr, type)
976 flagword flags;
977 int attr, type ATTRIBUTE_UNUSED;
978 {
979 if (attr & SHF_IA_64_SHORT)
980 flags |= SEC_SMALL_DATA;
981 return flags;
982 }
983
984 int
985 ia64_elf_section_type (str, len)
986 const char *str;
987 size_t len;
988 {
989 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
990
991 if (STREQ (ELF_STRING_ia64_unwind_info))
992 return SHT_PROGBITS;
993
994 if (STREQ (ELF_STRING_ia64_unwind_info_once))
995 return SHT_PROGBITS;
996
997 if (STREQ (ELF_STRING_ia64_unwind))
998 return SHT_IA_64_UNWIND;
999
1000 if (STREQ (ELF_STRING_ia64_unwind_once))
1001 return SHT_IA_64_UNWIND;
1002
1003 if (STREQ ("unwind"))
1004 return SHT_IA_64_UNWIND;
1005
1006 return -1;
1007 #undef STREQ
1008 }
1009
1010 static unsigned int
1011 set_regstack (ins, locs, outs, rots)
1012 unsigned int ins, locs, outs, rots;
1013 {
1014 /* Size of frame. */
1015 unsigned int sof;
1016
1017 sof = ins + locs + outs;
1018 if (sof > 96)
1019 {
1020 as_bad ("Size of frame exceeds maximum of 96 registers");
1021 return 0;
1022 }
1023 if (rots > sof)
1024 {
1025 as_warn ("Size of rotating registers exceeds frame size");
1026 return 0;
1027 }
1028 md.in.base = REG_GR + 32;
1029 md.loc.base = md.in.base + ins;
1030 md.out.base = md.loc.base + locs;
1031
1032 md.in.num_regs = ins;
1033 md.loc.num_regs = locs;
1034 md.out.num_regs = outs;
1035 md.rot.num_regs = rots;
1036 return sof;
1037 }
1038
1039 void
1040 ia64_flush_insns ()
1041 {
1042 struct label_fix *lfix;
1043 segT saved_seg;
1044 subsegT saved_subseg;
1045 unw_rec_list *ptr;
1046
1047 if (!md.last_text_seg)
1048 return;
1049
1050 saved_seg = now_seg;
1051 saved_subseg = now_subseg;
1052
1053 subseg_set (md.last_text_seg, 0);
1054
1055 while (md.num_slots_in_use > 0)
1056 emit_one_bundle (); /* force out queued instructions */
1057
1058 /* In case there are labels following the last instruction, resolve
1059 those now: */
1060 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
1061 {
1062 S_SET_VALUE (lfix->sym, frag_now_fix ());
1063 symbol_set_frag (lfix->sym, frag_now);
1064 }
1065 CURR_SLOT.label_fixups = 0;
1066 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
1067 {
1068 S_SET_VALUE (lfix->sym, frag_now_fix ());
1069 symbol_set_frag (lfix->sym, frag_now);
1070 }
1071 CURR_SLOT.tag_fixups = 0;
1072
1073 /* In case there are unwind directives following the last instruction,
1074 resolve those now. We only handle prologue, body, and endp directives
1075 here. Give an error for others. */
1076 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
1077 {
1078 switch (ptr->r.type)
1079 {
1080 case prologue:
1081 case prologue_gr:
1082 case body:
1083 case endp:
1084 ptr->slot_number = (unsigned long) frag_more (0);
1085 ptr->slot_frag = frag_now;
1086 break;
1087
1088 /* Allow any record which doesn't have a "t" field (i.e.,
1089 doesn't relate to a particular instruction). */
1090 case unwabi:
1091 case br_gr:
1092 case copy_state:
1093 case fr_mem:
1094 case frgr_mem:
1095 case gr_gr:
1096 case gr_mem:
1097 case label_state:
1098 case rp_br:
1099 case spill_base:
1100 case spill_mask:
1101 /* nothing */
1102 break;
1103
1104 default:
1105 as_bad (_("Unwind directive not followed by an instruction."));
1106 break;
1107 }
1108 }
1109 unwind.current_entry = NULL;
1110
1111 subseg_set (saved_seg, saved_subseg);
1112
1113 if (md.qp.X_op == O_register)
1114 as_bad ("qualifying predicate not followed by instruction");
1115 }
1116
1117 static void
1118 ia64_do_align (int nbytes)
1119 {
1120 char *saved_input_line_pointer = input_line_pointer;
1121
1122 input_line_pointer = "";
1123 s_align_bytes (nbytes);
1124 input_line_pointer = saved_input_line_pointer;
1125 }
1126
1127 void
1128 ia64_cons_align (nbytes)
1129 int nbytes;
1130 {
1131 if (md.auto_align)
1132 {
1133 char *saved_input_line_pointer = input_line_pointer;
1134 input_line_pointer = "";
1135 s_align_bytes (nbytes);
1136 input_line_pointer = saved_input_line_pointer;
1137 }
1138 }
1139
1140 /* Output COUNT bytes to a memory location. */
1141 static unsigned char *vbyte_mem_ptr = NULL;
1142
1143 void
1144 output_vbyte_mem (count, ptr, comment)
1145 int count;
1146 char *ptr;
1147 char *comment ATTRIBUTE_UNUSED;
1148 {
1149 int x;
1150 if (vbyte_mem_ptr == NULL)
1151 abort ();
1152
1153 if (count == 0)
1154 return;
1155 for (x = 0; x < count; x++)
1156 *(vbyte_mem_ptr++) = ptr[x];
1157 }
1158
1159 /* Count the number of bytes required for records. */
1160 static int vbyte_count = 0;
1161 void
1162 count_output (count, ptr, comment)
1163 int count;
1164 char *ptr ATTRIBUTE_UNUSED;
1165 char *comment ATTRIBUTE_UNUSED;
1166 {
1167 vbyte_count += count;
1168 }
1169
1170 static void
1171 output_R1_format (f, rtype, rlen)
1172 vbyte_func f;
1173 unw_record_type rtype;
1174 int rlen;
1175 {
1176 int r = 0;
1177 char byte;
1178 if (rlen > 0x1f)
1179 {
1180 output_R3_format (f, rtype, rlen);
1181 return;
1182 }
1183
1184 if (rtype == body)
1185 r = 1;
1186 else if (rtype != prologue)
1187 as_bad ("record type is not valid");
1188
1189 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1190 (*f) (1, &byte, NULL);
1191 }
1192
1193 static void
1194 output_R2_format (f, mask, grsave, rlen)
1195 vbyte_func f;
1196 int mask, grsave;
1197 unsigned long rlen;
1198 {
1199 char bytes[20];
1200 int count = 2;
1201 mask = (mask & 0x0f);
1202 grsave = (grsave & 0x7f);
1203
1204 bytes[0] = (UNW_R2 | (mask >> 1));
1205 bytes[1] = (((mask & 0x01) << 7) | grsave);
1206 count += output_leb128 (bytes + 2, rlen, 0);
1207 (*f) (count, bytes, NULL);
1208 }
1209
1210 static void
1211 output_R3_format (f, rtype, rlen)
1212 vbyte_func f;
1213 unw_record_type rtype;
1214 unsigned long rlen;
1215 {
1216 int r = 0, count;
1217 char bytes[20];
1218 if (rlen <= 0x1f)
1219 {
1220 output_R1_format (f, rtype, rlen);
1221 return;
1222 }
1223
1224 if (rtype == body)
1225 r = 1;
1226 else if (rtype != prologue)
1227 as_bad ("record type is not valid");
1228 bytes[0] = (UNW_R3 | r);
1229 count = output_leb128 (bytes + 1, rlen, 0);
1230 (*f) (count + 1, bytes, NULL);
1231 }
1232
1233 static void
1234 output_P1_format (f, brmask)
1235 vbyte_func f;
1236 int brmask;
1237 {
1238 char byte;
1239 byte = UNW_P1 | (brmask & 0x1f);
1240 (*f) (1, &byte, NULL);
1241 }
1242
1243 static void
1244 output_P2_format (f, brmask, gr)
1245 vbyte_func f;
1246 int brmask;
1247 int gr;
1248 {
1249 char bytes[2];
1250 brmask = (brmask & 0x1f);
1251 bytes[0] = UNW_P2 | (brmask >> 1);
1252 bytes[1] = (((brmask & 1) << 7) | gr);
1253 (*f) (2, bytes, NULL);
1254 }
1255
1256 static void
1257 output_P3_format (f, rtype, reg)
1258 vbyte_func f;
1259 unw_record_type rtype;
1260 int reg;
1261 {
1262 char bytes[2];
1263 int r = 0;
1264 reg = (reg & 0x7f);
1265 switch (rtype)
1266 {
1267 case psp_gr:
1268 r = 0;
1269 break;
1270 case rp_gr:
1271 r = 1;
1272 break;
1273 case pfs_gr:
1274 r = 2;
1275 break;
1276 case preds_gr:
1277 r = 3;
1278 break;
1279 case unat_gr:
1280 r = 4;
1281 break;
1282 case lc_gr:
1283 r = 5;
1284 break;
1285 case rp_br:
1286 r = 6;
1287 break;
1288 case rnat_gr:
1289 r = 7;
1290 break;
1291 case bsp_gr:
1292 r = 8;
1293 break;
1294 case bspstore_gr:
1295 r = 9;
1296 break;
1297 case fpsr_gr:
1298 r = 10;
1299 break;
1300 case priunat_gr:
1301 r = 11;
1302 break;
1303 default:
1304 as_bad ("Invalid record type for P3 format.");
1305 }
1306 bytes[0] = (UNW_P3 | (r >> 1));
1307 bytes[1] = (((r & 1) << 7) | reg);
1308 (*f) (2, bytes, NULL);
1309 }
1310
1311 static void
1312 output_P4_format (f, imask, imask_size)
1313 vbyte_func f;
1314 unsigned char *imask;
1315 unsigned long imask_size;
1316 {
1317 imask[0] = UNW_P4;
1318 (*f) (imask_size, imask, NULL);
1319 }
1320
1321 static void
1322 output_P5_format (f, grmask, frmask)
1323 vbyte_func f;
1324 int grmask;
1325 unsigned long frmask;
1326 {
1327 char bytes[4];
1328 grmask = (grmask & 0x0f);
1329
1330 bytes[0] = UNW_P5;
1331 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1332 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1333 bytes[3] = (frmask & 0x000000ff);
1334 (*f) (4, bytes, NULL);
1335 }
1336
1337 static void
1338 output_P6_format (f, rtype, rmask)
1339 vbyte_func f;
1340 unw_record_type rtype;
1341 int rmask;
1342 {
1343 char byte;
1344 int r = 0;
1345
1346 if (rtype == gr_mem)
1347 r = 1;
1348 else if (rtype != fr_mem)
1349 as_bad ("Invalid record type for format P6");
1350 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1351 (*f) (1, &byte, NULL);
1352 }
1353
1354 static void
1355 output_P7_format (f, rtype, w1, w2)
1356 vbyte_func f;
1357 unw_record_type rtype;
1358 unsigned long w1;
1359 unsigned long w2;
1360 {
1361 char bytes[20];
1362 int count = 1;
1363 int r = 0;
1364 count += output_leb128 (bytes + 1, w1, 0);
1365 switch (rtype)
1366 {
1367 case mem_stack_f:
1368 r = 0;
1369 count += output_leb128 (bytes + count, w2 >> 4, 0);
1370 break;
1371 case mem_stack_v:
1372 r = 1;
1373 break;
1374 case spill_base:
1375 r = 2;
1376 break;
1377 case psp_sprel:
1378 r = 3;
1379 break;
1380 case rp_when:
1381 r = 4;
1382 break;
1383 case rp_psprel:
1384 r = 5;
1385 break;
1386 case pfs_when:
1387 r = 6;
1388 break;
1389 case pfs_psprel:
1390 r = 7;
1391 break;
1392 case preds_when:
1393 r = 8;
1394 break;
1395 case preds_psprel:
1396 r = 9;
1397 break;
1398 case lc_when:
1399 r = 10;
1400 break;
1401 case lc_psprel:
1402 r = 11;
1403 break;
1404 case unat_when:
1405 r = 12;
1406 break;
1407 case unat_psprel:
1408 r = 13;
1409 break;
1410 case fpsr_when:
1411 r = 14;
1412 break;
1413 case fpsr_psprel:
1414 r = 15;
1415 break;
1416 default:
1417 break;
1418 }
1419 bytes[0] = (UNW_P7 | r);
1420 (*f) (count, bytes, NULL);
1421 }
1422
1423 static void
1424 output_P8_format (f, rtype, t)
1425 vbyte_func f;
1426 unw_record_type rtype;
1427 unsigned long t;
1428 {
1429 char bytes[20];
1430 int r = 0;
1431 int count = 2;
1432 bytes[0] = UNW_P8;
1433 switch (rtype)
1434 {
1435 case rp_sprel:
1436 r = 1;
1437 break;
1438 case pfs_sprel:
1439 r = 2;
1440 break;
1441 case preds_sprel:
1442 r = 3;
1443 break;
1444 case lc_sprel:
1445 r = 4;
1446 break;
1447 case unat_sprel:
1448 r = 5;
1449 break;
1450 case fpsr_sprel:
1451 r = 6;
1452 break;
1453 case bsp_when:
1454 r = 7;
1455 break;
1456 case bsp_psprel:
1457 r = 8;
1458 break;
1459 case bsp_sprel:
1460 r = 9;
1461 break;
1462 case bspstore_when:
1463 r = 10;
1464 break;
1465 case bspstore_psprel:
1466 r = 11;
1467 break;
1468 case bspstore_sprel:
1469 r = 12;
1470 break;
1471 case rnat_when:
1472 r = 13;
1473 break;
1474 case rnat_psprel:
1475 r = 14;
1476 break;
1477 case rnat_sprel:
1478 r = 15;
1479 break;
1480 case priunat_when_gr:
1481 r = 16;
1482 break;
1483 case priunat_psprel:
1484 r = 17;
1485 break;
1486 case priunat_sprel:
1487 r = 18;
1488 break;
1489 case priunat_when_mem:
1490 r = 19;
1491 break;
1492 default:
1493 break;
1494 }
1495 bytes[1] = r;
1496 count += output_leb128 (bytes + 2, t, 0);
1497 (*f) (count, bytes, NULL);
1498 }
1499
1500 static void
1501 output_P9_format (f, grmask, gr)
1502 vbyte_func f;
1503 int grmask;
1504 int gr;
1505 {
1506 char bytes[3];
1507 bytes[0] = UNW_P9;
1508 bytes[1] = (grmask & 0x0f);
1509 bytes[2] = (gr & 0x7f);
1510 (*f) (3, bytes, NULL);
1511 }
1512
1513 static void
1514 output_P10_format (f, abi, context)
1515 vbyte_func f;
1516 int abi;
1517 int context;
1518 {
1519 char bytes[3];
1520 bytes[0] = UNW_P10;
1521 bytes[1] = (abi & 0xff);
1522 bytes[2] = (context & 0xff);
1523 (*f) (3, bytes, NULL);
1524 }
1525
1526 static void
1527 output_B1_format (f, rtype, label)
1528 vbyte_func f;
1529 unw_record_type rtype;
1530 unsigned long label;
1531 {
1532 char byte;
1533 int r = 0;
1534 if (label > 0x1f)
1535 {
1536 output_B4_format (f, rtype, label);
1537 return;
1538 }
1539 if (rtype == copy_state)
1540 r = 1;
1541 else if (rtype != label_state)
1542 as_bad ("Invalid record type for format B1");
1543
1544 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1545 (*f) (1, &byte, NULL);
1546 }
1547
1548 static void
1549 output_B2_format (f, ecount, t)
1550 vbyte_func f;
1551 unsigned long ecount;
1552 unsigned long t;
1553 {
1554 char bytes[20];
1555 int count = 1;
1556 if (ecount > 0x1f)
1557 {
1558 output_B3_format (f, ecount, t);
1559 return;
1560 }
1561 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1562 count += output_leb128 (bytes + 1, t, 0);
1563 (*f) (count, bytes, NULL);
1564 }
1565
1566 static void
1567 output_B3_format (f, ecount, t)
1568 vbyte_func f;
1569 unsigned long ecount;
1570 unsigned long t;
1571 {
1572 char bytes[20];
1573 int count = 1;
1574 if (ecount <= 0x1f)
1575 {
1576 output_B2_format (f, ecount, t);
1577 return;
1578 }
1579 bytes[0] = UNW_B3;
1580 count += output_leb128 (bytes + 1, t, 0);
1581 count += output_leb128 (bytes + count, ecount, 0);
1582 (*f) (count, bytes, NULL);
1583 }
1584
1585 static void
1586 output_B4_format (f, rtype, label)
1587 vbyte_func f;
1588 unw_record_type rtype;
1589 unsigned long label;
1590 {
1591 char bytes[20];
1592 int r = 0;
1593 int count = 1;
1594 if (label <= 0x1f)
1595 {
1596 output_B1_format (f, rtype, label);
1597 return;
1598 }
1599
1600 if (rtype == copy_state)
1601 r = 1;
1602 else if (rtype != label_state)
1603 as_bad ("Invalid record type for format B1");
1604
1605 bytes[0] = (UNW_B4 | (r << 3));
1606 count += output_leb128 (bytes + 1, label, 0);
1607 (*f) (count, bytes, NULL);
1608 }
1609
1610 static char
1611 format_ab_reg (ab, reg)
1612 int ab;
1613 int reg;
1614 {
1615 int ret;
1616 ab = (ab & 3);
1617 reg = (reg & 0x1f);
1618 ret = (ab << 5) | reg;
1619 return ret;
1620 }
1621
1622 static void
1623 output_X1_format (f, rtype, ab, reg, t, w1)
1624 vbyte_func f;
1625 unw_record_type rtype;
1626 int ab, reg;
1627 unsigned long t;
1628 unsigned long w1;
1629 {
1630 char bytes[20];
1631 int r = 0;
1632 int count = 2;
1633 bytes[0] = UNW_X1;
1634
1635 if (rtype == spill_sprel)
1636 r = 1;
1637 else if (rtype != spill_psprel)
1638 as_bad ("Invalid record type for format X1");
1639 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1640 count += output_leb128 (bytes + 2, t, 0);
1641 count += output_leb128 (bytes + count, w1, 0);
1642 (*f) (count, bytes, NULL);
1643 }
1644
1645 static void
1646 output_X2_format (f, ab, reg, x, y, treg, t)
1647 vbyte_func f;
1648 int ab, reg;
1649 int x, y, treg;
1650 unsigned long t;
1651 {
1652 char bytes[20];
1653 int count = 3;
1654 bytes[0] = UNW_X2;
1655 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1656 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1657 count += output_leb128 (bytes + 3, t, 0);
1658 (*f) (count, bytes, NULL);
1659 }
1660
1661 static void
1662 output_X3_format (f, rtype, qp, ab, reg, t, w1)
1663 vbyte_func f;
1664 unw_record_type rtype;
1665 int qp;
1666 int ab, reg;
1667 unsigned long t;
1668 unsigned long w1;
1669 {
1670 char bytes[20];
1671 int r = 0;
1672 int count = 3;
1673 bytes[0] = UNW_X3;
1674
1675 if (rtype == spill_sprel_p)
1676 r = 1;
1677 else if (rtype != spill_psprel_p)
1678 as_bad ("Invalid record type for format X3");
1679 bytes[1] = ((r << 7) | (qp & 0x3f));
1680 bytes[2] = format_ab_reg (ab, reg);
1681 count += output_leb128 (bytes + 3, t, 0);
1682 count += output_leb128 (bytes + count, w1, 0);
1683 (*f) (count, bytes, NULL);
1684 }
1685
1686 static void
1687 output_X4_format (f, qp, ab, reg, x, y, treg, t)
1688 vbyte_func f;
1689 int qp;
1690 int ab, reg;
1691 int x, y, treg;
1692 unsigned long t;
1693 {
1694 char bytes[20];
1695 int count = 4;
1696 bytes[0] = UNW_X4;
1697 bytes[1] = (qp & 0x3f);
1698 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1699 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1700 count += output_leb128 (bytes + 4, t, 0);
1701 (*f) (count, bytes, NULL);
1702 }
1703
1704 /* This function allocates a record list structure, and initializes fields. */
1705
1706 static unw_rec_list *
1707 alloc_record (unw_record_type t)
1708 {
1709 unw_rec_list *ptr;
1710 ptr = xmalloc (sizeof (*ptr));
1711 ptr->next = NULL;
1712 ptr->slot_number = SLOT_NUM_NOT_SET;
1713 ptr->r.type = t;
1714 ptr->next_slot_number = 0;
1715 ptr->next_slot_frag = 0;
1716 return ptr;
1717 }
1718
1719 /* Dummy unwind record used for calculating the length of the last prologue or
1720 body region. */
1721
1722 static unw_rec_list *
1723 output_endp ()
1724 {
1725 unw_rec_list *ptr = alloc_record (endp);
1726 return ptr;
1727 }
1728
1729 static unw_rec_list *
1730 output_prologue ()
1731 {
1732 unw_rec_list *ptr = alloc_record (prologue);
1733 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1734 return ptr;
1735 }
1736
1737 static unw_rec_list *
1738 output_prologue_gr (saved_mask, reg)
1739 unsigned int saved_mask;
1740 unsigned int reg;
1741 {
1742 unw_rec_list *ptr = alloc_record (prologue_gr);
1743 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1744 ptr->r.record.r.grmask = saved_mask;
1745 ptr->r.record.r.grsave = reg;
1746 return ptr;
1747 }
1748
1749 static unw_rec_list *
1750 output_body ()
1751 {
1752 unw_rec_list *ptr = alloc_record (body);
1753 return ptr;
1754 }
1755
1756 static unw_rec_list *
1757 output_mem_stack_f (size)
1758 unsigned int size;
1759 {
1760 unw_rec_list *ptr = alloc_record (mem_stack_f);
1761 ptr->r.record.p.size = size;
1762 return ptr;
1763 }
1764
1765 static unw_rec_list *
1766 output_mem_stack_v ()
1767 {
1768 unw_rec_list *ptr = alloc_record (mem_stack_v);
1769 return ptr;
1770 }
1771
1772 static unw_rec_list *
1773 output_psp_gr (gr)
1774 unsigned int gr;
1775 {
1776 unw_rec_list *ptr = alloc_record (psp_gr);
1777 ptr->r.record.p.gr = gr;
1778 return ptr;
1779 }
1780
1781 static unw_rec_list *
1782 output_psp_sprel (offset)
1783 unsigned int offset;
1784 {
1785 unw_rec_list *ptr = alloc_record (psp_sprel);
1786 ptr->r.record.p.spoff = offset / 4;
1787 return ptr;
1788 }
1789
1790 static unw_rec_list *
1791 output_rp_when ()
1792 {
1793 unw_rec_list *ptr = alloc_record (rp_when);
1794 return ptr;
1795 }
1796
1797 static unw_rec_list *
1798 output_rp_gr (gr)
1799 unsigned int gr;
1800 {
1801 unw_rec_list *ptr = alloc_record (rp_gr);
1802 ptr->r.record.p.gr = gr;
1803 return ptr;
1804 }
1805
1806 static unw_rec_list *
1807 output_rp_br (br)
1808 unsigned int br;
1809 {
1810 unw_rec_list *ptr = alloc_record (rp_br);
1811 ptr->r.record.p.br = br;
1812 return ptr;
1813 }
1814
1815 static unw_rec_list *
1816 output_rp_psprel (offset)
1817 unsigned int offset;
1818 {
1819 unw_rec_list *ptr = alloc_record (rp_psprel);
1820 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1821 return ptr;
1822 }
1823
1824 static unw_rec_list *
1825 output_rp_sprel (offset)
1826 unsigned int offset;
1827 {
1828 unw_rec_list *ptr = alloc_record (rp_sprel);
1829 ptr->r.record.p.spoff = offset / 4;
1830 return ptr;
1831 }
1832
1833 static unw_rec_list *
1834 output_pfs_when ()
1835 {
1836 unw_rec_list *ptr = alloc_record (pfs_when);
1837 return ptr;
1838 }
1839
1840 static unw_rec_list *
1841 output_pfs_gr (gr)
1842 unsigned int gr;
1843 {
1844 unw_rec_list *ptr = alloc_record (pfs_gr);
1845 ptr->r.record.p.gr = gr;
1846 return ptr;
1847 }
1848
1849 static unw_rec_list *
1850 output_pfs_psprel (offset)
1851 unsigned int offset;
1852 {
1853 unw_rec_list *ptr = alloc_record (pfs_psprel);
1854 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1855 return ptr;
1856 }
1857
1858 static unw_rec_list *
1859 output_pfs_sprel (offset)
1860 unsigned int offset;
1861 {
1862 unw_rec_list *ptr = alloc_record (pfs_sprel);
1863 ptr->r.record.p.spoff = offset / 4;
1864 return ptr;
1865 }
1866
1867 static unw_rec_list *
1868 output_preds_when ()
1869 {
1870 unw_rec_list *ptr = alloc_record (preds_when);
1871 return ptr;
1872 }
1873
1874 static unw_rec_list *
1875 output_preds_gr (gr)
1876 unsigned int gr;
1877 {
1878 unw_rec_list *ptr = alloc_record (preds_gr);
1879 ptr->r.record.p.gr = gr;
1880 return ptr;
1881 }
1882
1883 static unw_rec_list *
1884 output_preds_psprel (offset)
1885 unsigned int offset;
1886 {
1887 unw_rec_list *ptr = alloc_record (preds_psprel);
1888 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1889 return ptr;
1890 }
1891
1892 static unw_rec_list *
1893 output_preds_sprel (offset)
1894 unsigned int offset;
1895 {
1896 unw_rec_list *ptr = alloc_record (preds_sprel);
1897 ptr->r.record.p.spoff = offset / 4;
1898 return ptr;
1899 }
1900
1901 static unw_rec_list *
1902 output_fr_mem (mask)
1903 unsigned int mask;
1904 {
1905 unw_rec_list *ptr = alloc_record (fr_mem);
1906 ptr->r.record.p.rmask = mask;
1907 return ptr;
1908 }
1909
1910 static unw_rec_list *
1911 output_frgr_mem (gr_mask, fr_mask)
1912 unsigned int gr_mask;
1913 unsigned int fr_mask;
1914 {
1915 unw_rec_list *ptr = alloc_record (frgr_mem);
1916 ptr->r.record.p.grmask = gr_mask;
1917 ptr->r.record.p.frmask = fr_mask;
1918 return ptr;
1919 }
1920
1921 static unw_rec_list *
1922 output_gr_gr (mask, reg)
1923 unsigned int mask;
1924 unsigned int reg;
1925 {
1926 unw_rec_list *ptr = alloc_record (gr_gr);
1927 ptr->r.record.p.grmask = mask;
1928 ptr->r.record.p.gr = reg;
1929 return ptr;
1930 }
1931
1932 static unw_rec_list *
1933 output_gr_mem (mask)
1934 unsigned int mask;
1935 {
1936 unw_rec_list *ptr = alloc_record (gr_mem);
1937 ptr->r.record.p.rmask = mask;
1938 return ptr;
1939 }
1940
1941 static unw_rec_list *
1942 output_br_mem (unsigned int mask)
1943 {
1944 unw_rec_list *ptr = alloc_record (br_mem);
1945 ptr->r.record.p.brmask = mask;
1946 return ptr;
1947 }
1948
1949 static unw_rec_list *
1950 output_br_gr (save_mask, reg)
1951 unsigned int save_mask;
1952 unsigned int reg;
1953 {
1954 unw_rec_list *ptr = alloc_record (br_gr);
1955 ptr->r.record.p.brmask = save_mask;
1956 ptr->r.record.p.gr = reg;
1957 return ptr;
1958 }
1959
1960 static unw_rec_list *
1961 output_spill_base (offset)
1962 unsigned int offset;
1963 {
1964 unw_rec_list *ptr = alloc_record (spill_base);
1965 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1966 return ptr;
1967 }
1968
1969 static unw_rec_list *
1970 output_unat_when ()
1971 {
1972 unw_rec_list *ptr = alloc_record (unat_when);
1973 return ptr;
1974 }
1975
1976 static unw_rec_list *
1977 output_unat_gr (gr)
1978 unsigned int gr;
1979 {
1980 unw_rec_list *ptr = alloc_record (unat_gr);
1981 ptr->r.record.p.gr = gr;
1982 return ptr;
1983 }
1984
1985 static unw_rec_list *
1986 output_unat_psprel (offset)
1987 unsigned int offset;
1988 {
1989 unw_rec_list *ptr = alloc_record (unat_psprel);
1990 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1991 return ptr;
1992 }
1993
1994 static unw_rec_list *
1995 output_unat_sprel (offset)
1996 unsigned int offset;
1997 {
1998 unw_rec_list *ptr = alloc_record (unat_sprel);
1999 ptr->r.record.p.spoff = offset / 4;
2000 return ptr;
2001 }
2002
2003 static unw_rec_list *
2004 output_lc_when ()
2005 {
2006 unw_rec_list *ptr = alloc_record (lc_when);
2007 return ptr;
2008 }
2009
2010 static unw_rec_list *
2011 output_lc_gr (gr)
2012 unsigned int gr;
2013 {
2014 unw_rec_list *ptr = alloc_record (lc_gr);
2015 ptr->r.record.p.gr = gr;
2016 return ptr;
2017 }
2018
2019 static unw_rec_list *
2020 output_lc_psprel (offset)
2021 unsigned int offset;
2022 {
2023 unw_rec_list *ptr = alloc_record (lc_psprel);
2024 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2025 return ptr;
2026 }
2027
2028 static unw_rec_list *
2029 output_lc_sprel (offset)
2030 unsigned int offset;
2031 {
2032 unw_rec_list *ptr = alloc_record (lc_sprel);
2033 ptr->r.record.p.spoff = offset / 4;
2034 return ptr;
2035 }
2036
2037 static unw_rec_list *
2038 output_fpsr_when ()
2039 {
2040 unw_rec_list *ptr = alloc_record (fpsr_when);
2041 return ptr;
2042 }
2043
2044 static unw_rec_list *
2045 output_fpsr_gr (gr)
2046 unsigned int gr;
2047 {
2048 unw_rec_list *ptr = alloc_record (fpsr_gr);
2049 ptr->r.record.p.gr = gr;
2050 return ptr;
2051 }
2052
2053 static unw_rec_list *
2054 output_fpsr_psprel (offset)
2055 unsigned int offset;
2056 {
2057 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2058 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2059 return ptr;
2060 }
2061
2062 static unw_rec_list *
2063 output_fpsr_sprel (offset)
2064 unsigned int offset;
2065 {
2066 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2067 ptr->r.record.p.spoff = offset / 4;
2068 return ptr;
2069 }
2070
2071 static unw_rec_list *
2072 output_priunat_when_gr ()
2073 {
2074 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2075 return ptr;
2076 }
2077
2078 static unw_rec_list *
2079 output_priunat_when_mem ()
2080 {
2081 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2082 return ptr;
2083 }
2084
2085 static unw_rec_list *
2086 output_priunat_gr (gr)
2087 unsigned int gr;
2088 {
2089 unw_rec_list *ptr = alloc_record (priunat_gr);
2090 ptr->r.record.p.gr = gr;
2091 return ptr;
2092 }
2093
2094 static unw_rec_list *
2095 output_priunat_psprel (offset)
2096 unsigned int offset;
2097 {
2098 unw_rec_list *ptr = alloc_record (priunat_psprel);
2099 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2100 return ptr;
2101 }
2102
2103 static unw_rec_list *
2104 output_priunat_sprel (offset)
2105 unsigned int offset;
2106 {
2107 unw_rec_list *ptr = alloc_record (priunat_sprel);
2108 ptr->r.record.p.spoff = offset / 4;
2109 return ptr;
2110 }
2111
2112 static unw_rec_list *
2113 output_bsp_when ()
2114 {
2115 unw_rec_list *ptr = alloc_record (bsp_when);
2116 return ptr;
2117 }
2118
2119 static unw_rec_list *
2120 output_bsp_gr (gr)
2121 unsigned int gr;
2122 {
2123 unw_rec_list *ptr = alloc_record (bsp_gr);
2124 ptr->r.record.p.gr = gr;
2125 return ptr;
2126 }
2127
2128 static unw_rec_list *
2129 output_bsp_psprel (offset)
2130 unsigned int offset;
2131 {
2132 unw_rec_list *ptr = alloc_record (bsp_psprel);
2133 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2134 return ptr;
2135 }
2136
2137 static unw_rec_list *
2138 output_bsp_sprel (offset)
2139 unsigned int offset;
2140 {
2141 unw_rec_list *ptr = alloc_record (bsp_sprel);
2142 ptr->r.record.p.spoff = offset / 4;
2143 return ptr;
2144 }
2145
2146 static unw_rec_list *
2147 output_bspstore_when ()
2148 {
2149 unw_rec_list *ptr = alloc_record (bspstore_when);
2150 return ptr;
2151 }
2152
2153 static unw_rec_list *
2154 output_bspstore_gr (gr)
2155 unsigned int gr;
2156 {
2157 unw_rec_list *ptr = alloc_record (bspstore_gr);
2158 ptr->r.record.p.gr = gr;
2159 return ptr;
2160 }
2161
2162 static unw_rec_list *
2163 output_bspstore_psprel (offset)
2164 unsigned int offset;
2165 {
2166 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2167 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2168 return ptr;
2169 }
2170
2171 static unw_rec_list *
2172 output_bspstore_sprel (offset)
2173 unsigned int offset;
2174 {
2175 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2176 ptr->r.record.p.spoff = offset / 4;
2177 return ptr;
2178 }
2179
2180 static unw_rec_list *
2181 output_rnat_when ()
2182 {
2183 unw_rec_list *ptr = alloc_record (rnat_when);
2184 return ptr;
2185 }
2186
2187 static unw_rec_list *
2188 output_rnat_gr (gr)
2189 unsigned int gr;
2190 {
2191 unw_rec_list *ptr = alloc_record (rnat_gr);
2192 ptr->r.record.p.gr = gr;
2193 return ptr;
2194 }
2195
2196 static unw_rec_list *
2197 output_rnat_psprel (offset)
2198 unsigned int offset;
2199 {
2200 unw_rec_list *ptr = alloc_record (rnat_psprel);
2201 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2202 return ptr;
2203 }
2204
2205 static unw_rec_list *
2206 output_rnat_sprel (offset)
2207 unsigned int offset;
2208 {
2209 unw_rec_list *ptr = alloc_record (rnat_sprel);
2210 ptr->r.record.p.spoff = offset / 4;
2211 return ptr;
2212 }
2213
2214 static unw_rec_list *
2215 output_unwabi (abi, context)
2216 unsigned long abi;
2217 unsigned long context;
2218 {
2219 unw_rec_list *ptr = alloc_record (unwabi);
2220 ptr->r.record.p.abi = abi;
2221 ptr->r.record.p.context = context;
2222 return ptr;
2223 }
2224
2225 static unw_rec_list *
2226 output_epilogue (unsigned long ecount)
2227 {
2228 unw_rec_list *ptr = alloc_record (epilogue);
2229 ptr->r.record.b.ecount = ecount;
2230 return ptr;
2231 }
2232
2233 static unw_rec_list *
2234 output_label_state (unsigned long label)
2235 {
2236 unw_rec_list *ptr = alloc_record (label_state);
2237 ptr->r.record.b.label = label;
2238 return ptr;
2239 }
2240
2241 static unw_rec_list *
2242 output_copy_state (unsigned long label)
2243 {
2244 unw_rec_list *ptr = alloc_record (copy_state);
2245 ptr->r.record.b.label = label;
2246 return ptr;
2247 }
2248
2249 static unw_rec_list *
2250 output_spill_psprel (ab, reg, offset)
2251 unsigned int ab;
2252 unsigned int reg;
2253 unsigned int offset;
2254 {
2255 unw_rec_list *ptr = alloc_record (spill_psprel);
2256 ptr->r.record.x.ab = ab;
2257 ptr->r.record.x.reg = reg;
2258 ptr->r.record.x.pspoff = ENCODED_PSP_OFFSET (offset);
2259 return ptr;
2260 }
2261
2262 static unw_rec_list *
2263 output_spill_sprel (ab, reg, offset)
2264 unsigned int ab;
2265 unsigned int reg;
2266 unsigned int offset;
2267 {
2268 unw_rec_list *ptr = alloc_record (spill_sprel);
2269 ptr->r.record.x.ab = ab;
2270 ptr->r.record.x.reg = reg;
2271 ptr->r.record.x.spoff = offset / 4;
2272 return ptr;
2273 }
2274
2275 static unw_rec_list *
2276 output_spill_psprel_p (ab, reg, offset, predicate)
2277 unsigned int ab;
2278 unsigned int reg;
2279 unsigned int offset;
2280 unsigned int predicate;
2281 {
2282 unw_rec_list *ptr = alloc_record (spill_psprel_p);
2283 ptr->r.record.x.ab = ab;
2284 ptr->r.record.x.reg = reg;
2285 ptr->r.record.x.pspoff = ENCODED_PSP_OFFSET (offset);
2286 ptr->r.record.x.qp = predicate;
2287 return ptr;
2288 }
2289
2290 static unw_rec_list *
2291 output_spill_sprel_p (ab, reg, offset, predicate)
2292 unsigned int ab;
2293 unsigned int reg;
2294 unsigned int offset;
2295 unsigned int predicate;
2296 {
2297 unw_rec_list *ptr = alloc_record (spill_sprel_p);
2298 ptr->r.record.x.ab = ab;
2299 ptr->r.record.x.reg = reg;
2300 ptr->r.record.x.spoff = offset / 4;
2301 ptr->r.record.x.qp = predicate;
2302 return ptr;
2303 }
2304
2305 static unw_rec_list *
2306 output_spill_reg (ab, reg, targ_reg, xy)
2307 unsigned int ab;
2308 unsigned int reg;
2309 unsigned int targ_reg;
2310 unsigned int xy;
2311 {
2312 unw_rec_list *ptr = alloc_record (spill_reg);
2313 ptr->r.record.x.ab = ab;
2314 ptr->r.record.x.reg = reg;
2315 ptr->r.record.x.treg = targ_reg;
2316 ptr->r.record.x.xy = xy;
2317 return ptr;
2318 }
2319
2320 static unw_rec_list *
2321 output_spill_reg_p (ab, reg, targ_reg, xy, predicate)
2322 unsigned int ab;
2323 unsigned int reg;
2324 unsigned int targ_reg;
2325 unsigned int xy;
2326 unsigned int predicate;
2327 {
2328 unw_rec_list *ptr = alloc_record (spill_reg_p);
2329 ptr->r.record.x.ab = ab;
2330 ptr->r.record.x.reg = reg;
2331 ptr->r.record.x.treg = targ_reg;
2332 ptr->r.record.x.xy = xy;
2333 ptr->r.record.x.qp = predicate;
2334 return ptr;
2335 }
2336
2337 /* Given a unw_rec_list process the correct format with the
2338 specified function. */
2339
2340 static void
2341 process_one_record (ptr, f)
2342 unw_rec_list *ptr;
2343 vbyte_func f;
2344 {
2345 unsigned long fr_mask, gr_mask;
2346
2347 switch (ptr->r.type)
2348 {
2349 /* This is a dummy record that takes up no space in the output. */
2350 case endp:
2351 break;
2352
2353 case gr_mem:
2354 case fr_mem:
2355 case br_mem:
2356 case frgr_mem:
2357 /* These are taken care of by prologue/prologue_gr. */
2358 break;
2359
2360 case prologue_gr:
2361 case prologue:
2362 if (ptr->r.type == prologue_gr)
2363 output_R2_format (f, ptr->r.record.r.grmask,
2364 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2365 else
2366 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2367
2368 /* Output descriptor(s) for union of register spills (if any). */
2369 gr_mask = ptr->r.record.r.mask.gr_mem;
2370 fr_mask = ptr->r.record.r.mask.fr_mem;
2371 if (fr_mask)
2372 {
2373 if ((fr_mask & ~0xfUL) == 0)
2374 output_P6_format (f, fr_mem, fr_mask);
2375 else
2376 {
2377 output_P5_format (f, gr_mask, fr_mask);
2378 gr_mask = 0;
2379 }
2380 }
2381 if (gr_mask)
2382 output_P6_format (f, gr_mem, gr_mask);
2383 if (ptr->r.record.r.mask.br_mem)
2384 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2385
2386 /* output imask descriptor if necessary: */
2387 if (ptr->r.record.r.mask.i)
2388 output_P4_format (f, ptr->r.record.r.mask.i,
2389 ptr->r.record.r.imask_size);
2390 break;
2391
2392 case body:
2393 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2394 break;
2395 case mem_stack_f:
2396 case mem_stack_v:
2397 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2398 ptr->r.record.p.size);
2399 break;
2400 case psp_gr:
2401 case rp_gr:
2402 case pfs_gr:
2403 case preds_gr:
2404 case unat_gr:
2405 case lc_gr:
2406 case fpsr_gr:
2407 case priunat_gr:
2408 case bsp_gr:
2409 case bspstore_gr:
2410 case rnat_gr:
2411 output_P3_format (f, ptr->r.type, ptr->r.record.p.gr);
2412 break;
2413 case rp_br:
2414 output_P3_format (f, rp_br, ptr->r.record.p.br);
2415 break;
2416 case psp_sprel:
2417 output_P7_format (f, psp_sprel, ptr->r.record.p.spoff, 0);
2418 break;
2419 case rp_when:
2420 case pfs_when:
2421 case preds_when:
2422 case unat_when:
2423 case lc_when:
2424 case fpsr_when:
2425 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2426 break;
2427 case rp_psprel:
2428 case pfs_psprel:
2429 case preds_psprel:
2430 case unat_psprel:
2431 case lc_psprel:
2432 case fpsr_psprel:
2433 case spill_base:
2434 output_P7_format (f, ptr->r.type, ptr->r.record.p.pspoff, 0);
2435 break;
2436 case rp_sprel:
2437 case pfs_sprel:
2438 case preds_sprel:
2439 case unat_sprel:
2440 case lc_sprel:
2441 case fpsr_sprel:
2442 case priunat_sprel:
2443 case bsp_sprel:
2444 case bspstore_sprel:
2445 case rnat_sprel:
2446 output_P8_format (f, ptr->r.type, ptr->r.record.p.spoff);
2447 break;
2448 case gr_gr:
2449 output_P9_format (f, ptr->r.record.p.grmask, ptr->r.record.p.gr);
2450 break;
2451 case br_gr:
2452 output_P2_format (f, ptr->r.record.p.brmask, ptr->r.record.p.gr);
2453 break;
2454 case spill_mask:
2455 as_bad ("spill_mask record unimplemented.");
2456 break;
2457 case priunat_when_gr:
2458 case priunat_when_mem:
2459 case bsp_when:
2460 case bspstore_when:
2461 case rnat_when:
2462 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2463 break;
2464 case priunat_psprel:
2465 case bsp_psprel:
2466 case bspstore_psprel:
2467 case rnat_psprel:
2468 output_P8_format (f, ptr->r.type, ptr->r.record.p.pspoff);
2469 break;
2470 case unwabi:
2471 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2472 break;
2473 case epilogue:
2474 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2475 break;
2476 case label_state:
2477 case copy_state:
2478 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2479 break;
2480 case spill_psprel:
2481 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2482 ptr->r.record.x.reg, ptr->r.record.x.t,
2483 ptr->r.record.x.pspoff);
2484 break;
2485 case spill_sprel:
2486 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2487 ptr->r.record.x.reg, ptr->r.record.x.t,
2488 ptr->r.record.x.spoff);
2489 break;
2490 case spill_reg:
2491 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2492 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2493 ptr->r.record.x.treg, ptr->r.record.x.t);
2494 break;
2495 case spill_psprel_p:
2496 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2497 ptr->r.record.x.ab, ptr->r.record.x.reg,
2498 ptr->r.record.x.t, ptr->r.record.x.pspoff);
2499 break;
2500 case spill_sprel_p:
2501 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2502 ptr->r.record.x.ab, ptr->r.record.x.reg,
2503 ptr->r.record.x.t, ptr->r.record.x.spoff);
2504 break;
2505 case spill_reg_p:
2506 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2507 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2508 ptr->r.record.x.xy, ptr->r.record.x.treg,
2509 ptr->r.record.x.t);
2510 break;
2511 default:
2512 as_bad ("record_type_not_valid");
2513 break;
2514 }
2515 }
2516
2517 /* Given a unw_rec_list list, process all the records with
2518 the specified function. */
2519 static void
2520 process_unw_records (list, f)
2521 unw_rec_list *list;
2522 vbyte_func f;
2523 {
2524 unw_rec_list *ptr;
2525 for (ptr = list; ptr; ptr = ptr->next)
2526 process_one_record (ptr, f);
2527 }
2528
2529 /* Determine the size of a record list in bytes. */
2530 static int
2531 calc_record_size (list)
2532 unw_rec_list *list;
2533 {
2534 vbyte_count = 0;
2535 process_unw_records (list, count_output);
2536 return vbyte_count;
2537 }
2538
2539 /* Update IMASK bitmask to reflect the fact that one or more registers
2540 of type TYPE are saved starting at instruction with index T. If N
2541 bits are set in REGMASK, it is assumed that instructions T through
2542 T+N-1 save these registers.
2543
2544 TYPE values:
2545 0: no save
2546 1: instruction saves next fp reg
2547 2: instruction saves next general reg
2548 3: instruction saves next branch reg */
2549 static void
2550 set_imask (region, regmask, t, type)
2551 unw_rec_list *region;
2552 unsigned long regmask;
2553 unsigned long t;
2554 unsigned int type;
2555 {
2556 unsigned char *imask;
2557 unsigned long imask_size;
2558 unsigned int i;
2559 int pos;
2560
2561 imask = region->r.record.r.mask.i;
2562 imask_size = region->r.record.r.imask_size;
2563 if (!imask)
2564 {
2565 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2566 imask = xmalloc (imask_size);
2567 memset (imask, 0, imask_size);
2568
2569 region->r.record.r.imask_size = imask_size;
2570 region->r.record.r.mask.i = imask;
2571 }
2572
2573 i = (t / 4) + 1;
2574 pos = 2 * (3 - t % 4);
2575 while (regmask)
2576 {
2577 if (i >= imask_size)
2578 {
2579 as_bad ("Ignoring attempt to spill beyond end of region");
2580 return;
2581 }
2582
2583 imask[i] |= (type & 0x3) << pos;
2584
2585 regmask &= (regmask - 1);
2586 pos -= 2;
2587 if (pos < 0)
2588 {
2589 pos = 0;
2590 ++i;
2591 }
2592 }
2593 }
2594
2595 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2596 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2597 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2598 for frag sizes. */
2599
2600 unsigned long
2601 slot_index (slot_addr, slot_frag, first_addr, first_frag, before_relax)
2602 unsigned long slot_addr;
2603 fragS *slot_frag;
2604 unsigned long first_addr;
2605 fragS *first_frag;
2606 int before_relax;
2607 {
2608 unsigned long index = 0;
2609
2610 /* First time we are called, the initial address and frag are invalid. */
2611 if (first_addr == 0)
2612 return 0;
2613
2614 /* If the two addresses are in different frags, then we need to add in
2615 the remaining size of this frag, and then the entire size of intermediate
2616 frags. */
2617 while (slot_frag != first_frag)
2618 {
2619 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2620
2621 if (! before_relax)
2622 {
2623 /* We can get the final addresses only during and after
2624 relaxation. */
2625 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2626 index += 3 * ((first_frag->fr_next->fr_address
2627 - first_frag->fr_address
2628 - first_frag->fr_fix) >> 4);
2629 }
2630 else
2631 /* We don't know what the final addresses will be. We try our
2632 best to estimate. */
2633 switch (first_frag->fr_type)
2634 {
2635 default:
2636 break;
2637
2638 case rs_space:
2639 as_fatal ("only constant space allocation is supported");
2640 break;
2641
2642 case rs_align:
2643 case rs_align_code:
2644 case rs_align_test:
2645 /* Take alignment into account. Assume the worst case
2646 before relaxation. */
2647 index += 3 * ((1 << first_frag->fr_offset) >> 4);
2648 break;
2649
2650 case rs_org:
2651 if (first_frag->fr_symbol)
2652 {
2653 as_fatal ("only constant offsets are supported");
2654 break;
2655 }
2656 case rs_fill:
2657 index += 3 * (first_frag->fr_offset >> 4);
2658 break;
2659 }
2660
2661 /* Add in the full size of the frag converted to instruction slots. */
2662 index += 3 * (first_frag->fr_fix >> 4);
2663 /* Subtract away the initial part before first_addr. */
2664 index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2665 + ((first_addr & 0x3) - (start_addr & 0x3)));
2666
2667 /* Move to the beginning of the next frag. */
2668 first_frag = first_frag->fr_next;
2669 first_addr = (unsigned long) &first_frag->fr_literal;
2670 }
2671
2672 /* Add in the used part of the last frag. */
2673 index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2674 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2675 return index;
2676 }
2677
2678 /* Optimize unwind record directives. */
2679
2680 static unw_rec_list *
2681 optimize_unw_records (list)
2682 unw_rec_list *list;
2683 {
2684 if (!list)
2685 return NULL;
2686
2687 /* If the only unwind record is ".prologue" or ".prologue" followed
2688 by ".body", then we can optimize the unwind directives away. */
2689 if (list->r.type == prologue
2690 && (list->next->r.type == endp
2691 || (list->next->r.type == body && list->next->next->r.type == endp)))
2692 return NULL;
2693
2694 return list;
2695 }
2696
2697 /* Given a complete record list, process any records which have
2698 unresolved fields, (ie length counts for a prologue). After
2699 this has been run, all necessary information should be available
2700 within each record to generate an image. */
2701
2702 static void
2703 fixup_unw_records (list, before_relax)
2704 unw_rec_list *list;
2705 int before_relax;
2706 {
2707 unw_rec_list *ptr, *region = 0;
2708 unsigned long first_addr = 0, rlen = 0, t;
2709 fragS *first_frag = 0;
2710
2711 for (ptr = list; ptr; ptr = ptr->next)
2712 {
2713 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2714 as_bad (" Insn slot not set in unwind record.");
2715 t = slot_index (ptr->slot_number, ptr->slot_frag,
2716 first_addr, first_frag, before_relax);
2717 switch (ptr->r.type)
2718 {
2719 case prologue:
2720 case prologue_gr:
2721 case body:
2722 {
2723 unw_rec_list *last;
2724 int size;
2725 unsigned long last_addr = 0;
2726 fragS *last_frag = NULL;
2727
2728 first_addr = ptr->slot_number;
2729 first_frag = ptr->slot_frag;
2730 /* Find either the next body/prologue start, or the end of
2731 the function, and determine the size of the region. */
2732 for (last = ptr->next; last != NULL; last = last->next)
2733 if (last->r.type == prologue || last->r.type == prologue_gr
2734 || last->r.type == body || last->r.type == endp)
2735 {
2736 last_addr = last->slot_number;
2737 last_frag = last->slot_frag;
2738 break;
2739 }
2740 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2741 before_relax);
2742 rlen = ptr->r.record.r.rlen = size;
2743 if (ptr->r.type == body)
2744 /* End of region. */
2745 region = 0;
2746 else
2747 region = ptr;
2748 break;
2749 }
2750 case epilogue:
2751 if (t < rlen)
2752 ptr->r.record.b.t = rlen - 1 - t;
2753 else
2754 /* This happens when a memory-stack-less procedure uses a
2755 ".restore sp" directive at the end of a region to pop
2756 the frame state. */
2757 ptr->r.record.b.t = 0;
2758 break;
2759
2760 case mem_stack_f:
2761 case mem_stack_v:
2762 case rp_when:
2763 case pfs_when:
2764 case preds_when:
2765 case unat_when:
2766 case lc_when:
2767 case fpsr_when:
2768 case priunat_when_gr:
2769 case priunat_when_mem:
2770 case bsp_when:
2771 case bspstore_when:
2772 case rnat_when:
2773 ptr->r.record.p.t = t;
2774 break;
2775
2776 case spill_reg:
2777 case spill_sprel:
2778 case spill_psprel:
2779 case spill_reg_p:
2780 case spill_sprel_p:
2781 case spill_psprel_p:
2782 ptr->r.record.x.t = t;
2783 break;
2784
2785 case frgr_mem:
2786 if (!region)
2787 {
2788 as_bad ("frgr_mem record before region record!\n");
2789 return;
2790 }
2791 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2792 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2793 set_imask (region, ptr->r.record.p.frmask, t, 1);
2794 set_imask (region, ptr->r.record.p.grmask, t, 2);
2795 break;
2796 case fr_mem:
2797 if (!region)
2798 {
2799 as_bad ("fr_mem record before region record!\n");
2800 return;
2801 }
2802 region->r.record.r.mask.fr_mem |= ptr->r.record.p.rmask;
2803 set_imask (region, ptr->r.record.p.rmask, t, 1);
2804 break;
2805 case gr_mem:
2806 if (!region)
2807 {
2808 as_bad ("gr_mem record before region record!\n");
2809 return;
2810 }
2811 region->r.record.r.mask.gr_mem |= ptr->r.record.p.rmask;
2812 set_imask (region, ptr->r.record.p.rmask, t, 2);
2813 break;
2814 case br_mem:
2815 if (!region)
2816 {
2817 as_bad ("br_mem record before region record!\n");
2818 return;
2819 }
2820 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2821 set_imask (region, ptr->r.record.p.brmask, t, 3);
2822 break;
2823
2824 case gr_gr:
2825 if (!region)
2826 {
2827 as_bad ("gr_gr record before region record!\n");
2828 return;
2829 }
2830 set_imask (region, ptr->r.record.p.grmask, t, 2);
2831 break;
2832 case br_gr:
2833 if (!region)
2834 {
2835 as_bad ("br_gr record before region record!\n");
2836 return;
2837 }
2838 set_imask (region, ptr->r.record.p.brmask, t, 3);
2839 break;
2840
2841 default:
2842 break;
2843 }
2844 }
2845 }
2846
2847 /* Estimate the size of a frag before relaxing. We only have one type of frag
2848 to handle here, which is the unwind info frag. */
2849
2850 int
2851 ia64_estimate_size_before_relax (fragS *frag,
2852 asection *segtype ATTRIBUTE_UNUSED)
2853 {
2854 unw_rec_list *list;
2855 int len, size, pad;
2856
2857 /* ??? This code is identical to the first part of ia64_convert_frag. */
2858 list = (unw_rec_list *) frag->fr_opcode;
2859 fixup_unw_records (list, 0);
2860
2861 len = calc_record_size (list);
2862 /* pad to pointer-size boundary. */
2863 pad = len % md.pointer_size;
2864 if (pad != 0)
2865 len += md.pointer_size - pad;
2866 /* Add 8 for the header. */
2867 size = len + 8;
2868 /* Add a pointer for the personality offset. */
2869 if (frag->fr_offset)
2870 size += md.pointer_size;
2871
2872 /* fr_var carries the max_chars that we created the fragment with.
2873 We must, of course, have allocated enough memory earlier. */
2874 assert (frag->fr_var >= size);
2875
2876 return frag->fr_fix + size;
2877 }
2878
2879 /* This function converts a rs_machine_dependent variant frag into a
2880 normal fill frag with the unwind image from the the record list. */
2881 void
2882 ia64_convert_frag (fragS *frag)
2883 {
2884 unw_rec_list *list;
2885 int len, size, pad;
2886 valueT flag_value;
2887
2888 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2889 list = (unw_rec_list *) frag->fr_opcode;
2890 fixup_unw_records (list, 0);
2891
2892 len = calc_record_size (list);
2893 /* pad to pointer-size boundary. */
2894 pad = len % md.pointer_size;
2895 if (pad != 0)
2896 len += md.pointer_size - pad;
2897 /* Add 8 for the header. */
2898 size = len + 8;
2899 /* Add a pointer for the personality offset. */
2900 if (frag->fr_offset)
2901 size += md.pointer_size;
2902
2903 /* fr_var carries the max_chars that we created the fragment with.
2904 We must, of course, have allocated enough memory earlier. */
2905 assert (frag->fr_var >= size);
2906
2907 /* Initialize the header area. fr_offset is initialized with
2908 unwind.personality_routine. */
2909 if (frag->fr_offset)
2910 {
2911 if (md.flags & EF_IA_64_ABI64)
2912 flag_value = (bfd_vma) 3 << 32;
2913 else
2914 /* 32-bit unwind info block. */
2915 flag_value = (bfd_vma) 0x1003 << 32;
2916 }
2917 else
2918 flag_value = 0;
2919
2920 md_number_to_chars (frag->fr_literal,
2921 (((bfd_vma) 1 << 48) /* Version. */
2922 | flag_value /* U & E handler flags. */
2923 | (len / md.pointer_size)), /* Length. */
2924 8);
2925
2926 /* Skip the header. */
2927 vbyte_mem_ptr = frag->fr_literal + 8;
2928 process_unw_records (list, output_vbyte_mem);
2929
2930 /* Fill the padding bytes with zeros. */
2931 if (pad != 0)
2932 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
2933 md.pointer_size - pad);
2934
2935 frag->fr_fix += size;
2936 frag->fr_type = rs_fill;
2937 frag->fr_var = 0;
2938 frag->fr_offset = 0;
2939 }
2940
2941 static int
2942 convert_expr_to_ab_reg (e, ab, regp)
2943 expressionS *e;
2944 unsigned int *ab;
2945 unsigned int *regp;
2946 {
2947 unsigned int reg;
2948
2949 if (e->X_op != O_register)
2950 return 0;
2951
2952 reg = e->X_add_number;
2953 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
2954 {
2955 *ab = 0;
2956 *regp = reg - REG_GR;
2957 }
2958 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
2959 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
2960 {
2961 *ab = 1;
2962 *regp = reg - REG_FR;
2963 }
2964 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
2965 {
2966 *ab = 2;
2967 *regp = reg - REG_BR;
2968 }
2969 else
2970 {
2971 *ab = 3;
2972 switch (reg)
2973 {
2974 case REG_PR: *regp = 0; break;
2975 case REG_PSP: *regp = 1; break;
2976 case REG_PRIUNAT: *regp = 2; break;
2977 case REG_BR + 0: *regp = 3; break;
2978 case REG_AR + AR_BSP: *regp = 4; break;
2979 case REG_AR + AR_BSPSTORE: *regp = 5; break;
2980 case REG_AR + AR_RNAT: *regp = 6; break;
2981 case REG_AR + AR_UNAT: *regp = 7; break;
2982 case REG_AR + AR_FPSR: *regp = 8; break;
2983 case REG_AR + AR_PFS: *regp = 9; break;
2984 case REG_AR + AR_LC: *regp = 10; break;
2985
2986 default:
2987 return 0;
2988 }
2989 }
2990 return 1;
2991 }
2992
2993 static int
2994 convert_expr_to_xy_reg (e, xy, regp)
2995 expressionS *e;
2996 unsigned int *xy;
2997 unsigned int *regp;
2998 {
2999 unsigned int reg;
3000
3001 if (e->X_op != O_register)
3002 return 0;
3003
3004 reg = e->X_add_number;
3005
3006 if (/* reg >= REG_GR && */ reg <= (REG_GR + 127))
3007 {
3008 *xy = 0;
3009 *regp = reg - REG_GR;
3010 }
3011 else if (reg >= REG_FR && reg <= (REG_FR + 127))
3012 {
3013 *xy = 1;
3014 *regp = reg - REG_FR;
3015 }
3016 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3017 {
3018 *xy = 2;
3019 *regp = reg - REG_BR;
3020 }
3021 else
3022 return -1;
3023 return 1;
3024 }
3025
3026 static void
3027 dot_align (int arg)
3028 {
3029 /* The current frag is an alignment frag. */
3030 align_frag = frag_now;
3031 s_align_bytes (arg);
3032 }
3033
3034 static void
3035 dot_radix (dummy)
3036 int dummy ATTRIBUTE_UNUSED;
3037 {
3038 int radix;
3039
3040 SKIP_WHITESPACE ();
3041 radix = *input_line_pointer++;
3042
3043 if (radix != 'C' && !is_end_of_line[(unsigned char) radix])
3044 {
3045 as_bad ("Radix `%c' unsupported", *input_line_pointer);
3046 ignore_rest_of_line ();
3047 return;
3048 }
3049 }
3050
3051 /* Helper function for .loc directives. If the assembler is not generating
3052 line number info, then we need to remember which instructions have a .loc
3053 directive, and only call dwarf2_gen_line_info for those instructions. */
3054
3055 static void
3056 dot_loc (int x)
3057 {
3058 CURR_SLOT.loc_directive_seen = 1;
3059 dwarf2_directive_loc (x);
3060 }
3061
3062 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3063 static void
3064 dot_special_section (which)
3065 int which;
3066 {
3067 set_section ((char *) special_section_name[which]);
3068 }
3069
3070 static void
3071 add_unwind_entry (ptr)
3072 unw_rec_list *ptr;
3073 {
3074 if (unwind.tail)
3075 unwind.tail->next = ptr;
3076 else
3077 unwind.list = ptr;
3078 unwind.tail = ptr;
3079
3080 /* The current entry can in fact be a chain of unwind entries. */
3081 if (unwind.current_entry == NULL)
3082 unwind.current_entry = ptr;
3083 }
3084
3085 static void
3086 dot_fframe (dummy)
3087 int dummy ATTRIBUTE_UNUSED;
3088 {
3089 expressionS e;
3090
3091 parse_operand (&e);
3092
3093 if (e.X_op != O_constant)
3094 as_bad ("Operand to .fframe must be a constant");
3095 else
3096 add_unwind_entry (output_mem_stack_f (e.X_add_number));
3097 }
3098
3099 static void
3100 dot_vframe (dummy)
3101 int dummy ATTRIBUTE_UNUSED;
3102 {
3103 expressionS e;
3104 unsigned reg;
3105
3106 parse_operand (&e);
3107 reg = e.X_add_number - REG_GR;
3108 if (e.X_op == O_register && reg < 128)
3109 {
3110 add_unwind_entry (output_mem_stack_v ());
3111 if (! (unwind.prologue_mask & 2))
3112 add_unwind_entry (output_psp_gr (reg));
3113 }
3114 else
3115 as_bad ("First operand to .vframe must be a general register");
3116 }
3117
3118 static void
3119 dot_vframesp (dummy)
3120 int dummy ATTRIBUTE_UNUSED;
3121 {
3122 expressionS e;
3123
3124 parse_operand (&e);
3125 if (e.X_op == O_constant)
3126 {
3127 add_unwind_entry (output_mem_stack_v ());
3128 add_unwind_entry (output_psp_sprel (e.X_add_number));
3129 }
3130 else
3131 as_bad ("Operand to .vframesp must be a constant (sp-relative offset)");
3132 }
3133
3134 static void
3135 dot_vframepsp (dummy)
3136 int dummy ATTRIBUTE_UNUSED;
3137 {
3138 expressionS e;
3139
3140 parse_operand (&e);
3141 if (e.X_op == O_constant)
3142 {
3143 add_unwind_entry (output_mem_stack_v ());
3144 add_unwind_entry (output_psp_sprel (e.X_add_number));
3145 }
3146 else
3147 as_bad ("Operand to .vframepsp must be a constant (psp-relative offset)");
3148 }
3149
3150 static void
3151 dot_save (dummy)
3152 int dummy ATTRIBUTE_UNUSED;
3153 {
3154 expressionS e1, e2;
3155 int sep;
3156 int reg1, reg2;
3157
3158 sep = parse_operand (&e1);
3159 if (sep != ',')
3160 as_bad ("No second operand to .save");
3161 sep = parse_operand (&e2);
3162
3163 reg1 = e1.X_add_number;
3164 reg2 = e2.X_add_number - REG_GR;
3165
3166 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3167 if (e1.X_op == O_register)
3168 {
3169 if (e2.X_op == O_register && reg2 >= 0 && reg2 < 128)
3170 {
3171 switch (reg1)
3172 {
3173 case REG_AR + AR_BSP:
3174 add_unwind_entry (output_bsp_when ());
3175 add_unwind_entry (output_bsp_gr (reg2));
3176 break;
3177 case REG_AR + AR_BSPSTORE:
3178 add_unwind_entry (output_bspstore_when ());
3179 add_unwind_entry (output_bspstore_gr (reg2));
3180 break;
3181 case REG_AR + AR_RNAT:
3182 add_unwind_entry (output_rnat_when ());
3183 add_unwind_entry (output_rnat_gr (reg2));
3184 break;
3185 case REG_AR + AR_UNAT:
3186 add_unwind_entry (output_unat_when ());
3187 add_unwind_entry (output_unat_gr (reg2));
3188 break;
3189 case REG_AR + AR_FPSR:
3190 add_unwind_entry (output_fpsr_when ());
3191 add_unwind_entry (output_fpsr_gr (reg2));
3192 break;
3193 case REG_AR + AR_PFS:
3194 add_unwind_entry (output_pfs_when ());
3195 if (! (unwind.prologue_mask & 4))
3196 add_unwind_entry (output_pfs_gr (reg2));
3197 break;
3198 case REG_AR + AR_LC:
3199 add_unwind_entry (output_lc_when ());
3200 add_unwind_entry (output_lc_gr (reg2));
3201 break;
3202 case REG_BR:
3203 add_unwind_entry (output_rp_when ());
3204 if (! (unwind.prologue_mask & 8))
3205 add_unwind_entry (output_rp_gr (reg2));
3206 break;
3207 case REG_PR:
3208 add_unwind_entry (output_preds_when ());
3209 if (! (unwind.prologue_mask & 1))
3210 add_unwind_entry (output_preds_gr (reg2));
3211 break;
3212 case REG_PRIUNAT:
3213 add_unwind_entry (output_priunat_when_gr ());
3214 add_unwind_entry (output_priunat_gr (reg2));
3215 break;
3216 default:
3217 as_bad ("First operand not a valid register");
3218 }
3219 }
3220 else
3221 as_bad (" Second operand not a valid register");
3222 }
3223 else
3224 as_bad ("First operand not a register");
3225 }
3226
3227 static void
3228 dot_restore (dummy)
3229 int dummy ATTRIBUTE_UNUSED;
3230 {
3231 expressionS e1, e2;
3232 unsigned long ecount; /* # of _additional_ regions to pop */
3233 int sep;
3234
3235 sep = parse_operand (&e1);
3236 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3237 {
3238 as_bad ("First operand to .restore must be stack pointer (sp)");
3239 return;
3240 }
3241
3242 if (sep == ',')
3243 {
3244 parse_operand (&e2);
3245 if (e2.X_op != O_constant || e2.X_add_number < 0)
3246 {
3247 as_bad ("Second operand to .restore must be a constant >= 0");
3248 return;
3249 }
3250 ecount = e2.X_add_number;
3251 }
3252 else
3253 ecount = unwind.prologue_count - 1;
3254
3255 if (ecount >= unwind.prologue_count)
3256 {
3257 as_bad ("Epilogue count of %lu exceeds number of nested prologues (%u)",
3258 ecount + 1, unwind.prologue_count);
3259 return;
3260 }
3261
3262 add_unwind_entry (output_epilogue (ecount));
3263
3264 if (ecount < unwind.prologue_count)
3265 unwind.prologue_count -= ecount + 1;
3266 else
3267 unwind.prologue_count = 0;
3268 }
3269
3270 static void
3271 dot_restorereg (dummy)
3272 int dummy ATTRIBUTE_UNUSED;
3273 {
3274 unsigned int ab, reg;
3275 expressionS e;
3276
3277 parse_operand (&e);
3278
3279 if (!convert_expr_to_ab_reg (&e, &ab, &reg))
3280 {
3281 as_bad ("First operand to .restorereg must be a preserved register");
3282 return;
3283 }
3284 add_unwind_entry (output_spill_reg (ab, reg, 0, 0));
3285 }
3286
3287 static void
3288 dot_restorereg_p (dummy)
3289 int dummy ATTRIBUTE_UNUSED;
3290 {
3291 unsigned int qp, ab, reg;
3292 expressionS e1, e2;
3293 int sep;
3294
3295 sep = parse_operand (&e1);
3296 if (sep != ',')
3297 {
3298 as_bad ("No second operand to .restorereg.p");
3299 return;
3300 }
3301
3302 parse_operand (&e2);
3303
3304 qp = e1.X_add_number - REG_P;
3305 if (e1.X_op != O_register || qp > 63)
3306 {
3307 as_bad ("First operand to .restorereg.p must be a predicate");
3308 return;
3309 }
3310
3311 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3312 {
3313 as_bad ("Second operand to .restorereg.p must be a preserved register");
3314 return;
3315 }
3316 add_unwind_entry (output_spill_reg_p (ab, reg, 0, 0, qp));
3317 }
3318
3319 static char *special_linkonce_name[] =
3320 {
3321 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3322 };
3323
3324 static void
3325 start_unwind_section (const segT text_seg, int sec_index, int linkonce_empty)
3326 {
3327 /*
3328 Use a slightly ugly scheme to derive the unwind section names from
3329 the text section name:
3330
3331 text sect. unwind table sect.
3332 name: name: comments:
3333 ---------- ----------------- --------------------------------
3334 .text .IA_64.unwind
3335 .text.foo .IA_64.unwind.text.foo
3336 .foo .IA_64.unwind.foo
3337 .gnu.linkonce.t.foo
3338 .gnu.linkonce.ia64unw.foo
3339 _info .IA_64.unwind_info gas issues error message (ditto)
3340 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3341
3342 This mapping is done so that:
3343
3344 (a) An object file with unwind info only in .text will use
3345 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3346 This follows the letter of the ABI and also ensures backwards
3347 compatibility with older toolchains.
3348
3349 (b) An object file with unwind info in multiple text sections
3350 will use separate unwind sections for each text section.
3351 This allows us to properly set the "sh_info" and "sh_link"
3352 fields in SHT_IA_64_UNWIND as required by the ABI and also
3353 lets GNU ld support programs with multiple segments
3354 containing unwind info (as might be the case for certain
3355 embedded applications).
3356
3357 (c) An error is issued if there would be a name clash.
3358 */
3359
3360 const char *text_name, *sec_text_name;
3361 char *sec_name;
3362 const char *prefix = special_section_name [sec_index];
3363 const char *suffix;
3364 size_t prefix_len, suffix_len, sec_name_len;
3365
3366 sec_text_name = segment_name (text_seg);
3367 text_name = sec_text_name;
3368 if (strncmp (text_name, "_info", 5) == 0)
3369 {
3370 as_bad ("Illegal section name `%s' (causes unwind section name clash)",
3371 text_name);
3372 ignore_rest_of_line ();
3373 return;
3374 }
3375 if (strcmp (text_name, ".text") == 0)
3376 text_name = "";
3377
3378 /* Build the unwind section name by appending the (possibly stripped)
3379 text section name to the unwind prefix. */
3380 suffix = text_name;
3381 if (strncmp (text_name, ".gnu.linkonce.t.",
3382 sizeof (".gnu.linkonce.t.") - 1) == 0)
3383 {
3384 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3385 suffix += sizeof (".gnu.linkonce.t.") - 1;
3386 }
3387 else if (linkonce_empty)
3388 return;
3389
3390 prefix_len = strlen (prefix);
3391 suffix_len = strlen (suffix);
3392 sec_name_len = prefix_len + suffix_len;
3393 sec_name = alloca (sec_name_len + 1);
3394 memcpy (sec_name, prefix, prefix_len);
3395 memcpy (sec_name + prefix_len, suffix, suffix_len);
3396 sec_name [sec_name_len] = '\0';
3397
3398 /* Handle COMDAT group. */
3399 if (suffix == text_name && (text_seg->flags & SEC_LINK_ONCE) != 0)
3400 {
3401 char *section;
3402 size_t len, group_name_len;
3403 const char *group_name = elf_group_name (text_seg);
3404
3405 if (group_name == NULL)
3406 {
3407 as_bad ("Group section `%s' has no group signature",
3408 sec_text_name);
3409 ignore_rest_of_line ();
3410 return;
3411 }
3412 /* We have to construct a fake section directive. */
3413 group_name_len = strlen (group_name);
3414 len = (sec_name_len
3415 + 16 /* ,"aG",@progbits, */
3416 + group_name_len /* ,group_name */
3417 + 7); /* ,comdat */
3418
3419 section = alloca (len + 1);
3420 memcpy (section, sec_name, sec_name_len);
3421 memcpy (section + sec_name_len, ",\"aG\",@progbits,", 16);
3422 memcpy (section + sec_name_len + 16, group_name, group_name_len);
3423 memcpy (section + len - 7, ",comdat", 7);
3424 section [len] = '\0';
3425 set_section (section);
3426 }
3427 else
3428 {
3429 set_section (sec_name);
3430 bfd_set_section_flags (stdoutput, now_seg,
3431 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3432 }
3433
3434 elf_linked_to_section (now_seg) = text_seg;
3435 }
3436
3437 static void
3438 generate_unwind_image (const segT text_seg)
3439 {
3440 int size, pad;
3441 unw_rec_list *list;
3442
3443 /* Mark the end of the unwind info, so that we can compute the size of the
3444 last unwind region. */
3445 add_unwind_entry (output_endp ());
3446
3447 /* Force out pending instructions, to make sure all unwind records have
3448 a valid slot_number field. */
3449 ia64_flush_insns ();
3450
3451 /* Generate the unwind record. */
3452 list = optimize_unw_records (unwind.list);
3453 fixup_unw_records (list, 1);
3454 size = calc_record_size (list);
3455
3456 if (size > 0 || unwind.force_unwind_entry)
3457 {
3458 unwind.force_unwind_entry = 0;
3459 /* pad to pointer-size boundary. */
3460 pad = size % md.pointer_size;
3461 if (pad != 0)
3462 size += md.pointer_size - pad;
3463 /* Add 8 for the header. */
3464 size += 8;
3465 /* Add a pointer for the personality offset. */
3466 if (unwind.personality_routine)
3467 size += md.pointer_size;
3468 }
3469
3470 /* If there are unwind records, switch sections, and output the info. */
3471 if (size != 0)
3472 {
3473 expressionS exp;
3474 bfd_reloc_code_real_type reloc;
3475
3476 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO, 0);
3477
3478 /* Make sure the section has 4 byte alignment for ILP32 and
3479 8 byte alignment for LP64. */
3480 frag_align (md.pointer_size_shift, 0, 0);
3481 record_alignment (now_seg, md.pointer_size_shift);
3482
3483 /* Set expression which points to start of unwind descriptor area. */
3484 unwind.info = expr_build_dot ();
3485
3486 frag_var (rs_machine_dependent, size, size, 0, 0,
3487 (offsetT) (long) unwind.personality_routine,
3488 (char *) list);
3489
3490 /* Add the personality address to the image. */
3491 if (unwind.personality_routine != 0)
3492 {
3493 exp.X_op = O_symbol;
3494 exp.X_add_symbol = unwind.personality_routine;
3495 exp.X_add_number = 0;
3496
3497 if (md.flags & EF_IA_64_BE)
3498 {
3499 if (md.flags & EF_IA_64_ABI64)
3500 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3501 else
3502 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3503 }
3504 else
3505 {
3506 if (md.flags & EF_IA_64_ABI64)
3507 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3508 else
3509 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3510 }
3511
3512 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3513 md.pointer_size, &exp, 0, reloc);
3514 unwind.personality_routine = 0;
3515 }
3516 }
3517 else
3518 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO, 1);
3519
3520 free_saved_prologue_counts ();
3521 unwind.list = unwind.tail = unwind.current_entry = NULL;
3522 }
3523
3524 static void
3525 dot_handlerdata (dummy)
3526 int dummy ATTRIBUTE_UNUSED;
3527 {
3528 unwind.force_unwind_entry = 1;
3529
3530 /* Remember which segment we're in so we can switch back after .endp */
3531 unwind.saved_text_seg = now_seg;
3532 unwind.saved_text_subseg = now_subseg;
3533
3534 /* Generate unwind info into unwind-info section and then leave that
3535 section as the currently active one so dataXX directives go into
3536 the language specific data area of the unwind info block. */
3537 generate_unwind_image (now_seg);
3538 demand_empty_rest_of_line ();
3539 }
3540
3541 static void
3542 dot_unwentry (dummy)
3543 int dummy ATTRIBUTE_UNUSED;
3544 {
3545 unwind.force_unwind_entry = 1;
3546 demand_empty_rest_of_line ();
3547 }
3548
3549 static void
3550 dot_altrp (dummy)
3551 int dummy ATTRIBUTE_UNUSED;
3552 {
3553 expressionS e;
3554 unsigned reg;
3555
3556 parse_operand (&e);
3557 reg = e.X_add_number - REG_BR;
3558 if (e.X_op == O_register && reg < 8)
3559 add_unwind_entry (output_rp_br (reg));
3560 else
3561 as_bad ("First operand not a valid branch register");
3562 }
3563
3564 static void
3565 dot_savemem (psprel)
3566 int psprel;
3567 {
3568 expressionS e1, e2;
3569 int sep;
3570 int reg1, val;
3571
3572 sep = parse_operand (&e1);
3573 if (sep != ',')
3574 as_bad ("No second operand to .save%ssp", psprel ? "p" : "");
3575 sep = parse_operand (&e2);
3576
3577 reg1 = e1.X_add_number;
3578 val = e2.X_add_number;
3579
3580 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3581 if (e1.X_op == O_register)
3582 {
3583 if (e2.X_op == O_constant)
3584 {
3585 switch (reg1)
3586 {
3587 case REG_AR + AR_BSP:
3588 add_unwind_entry (output_bsp_when ());
3589 add_unwind_entry ((psprel
3590 ? output_bsp_psprel
3591 : output_bsp_sprel) (val));
3592 break;
3593 case REG_AR + AR_BSPSTORE:
3594 add_unwind_entry (output_bspstore_when ());
3595 add_unwind_entry ((psprel
3596 ? output_bspstore_psprel
3597 : output_bspstore_sprel) (val));
3598 break;
3599 case REG_AR + AR_RNAT:
3600 add_unwind_entry (output_rnat_when ());
3601 add_unwind_entry ((psprel
3602 ? output_rnat_psprel
3603 : output_rnat_sprel) (val));
3604 break;
3605 case REG_AR + AR_UNAT:
3606 add_unwind_entry (output_unat_when ());
3607 add_unwind_entry ((psprel
3608 ? output_unat_psprel
3609 : output_unat_sprel) (val));
3610 break;
3611 case REG_AR + AR_FPSR:
3612 add_unwind_entry (output_fpsr_when ());
3613 add_unwind_entry ((psprel
3614 ? output_fpsr_psprel
3615 : output_fpsr_sprel) (val));
3616 break;
3617 case REG_AR + AR_PFS:
3618 add_unwind_entry (output_pfs_when ());
3619 add_unwind_entry ((psprel
3620 ? output_pfs_psprel
3621 : output_pfs_sprel) (val));
3622 break;
3623 case REG_AR + AR_LC:
3624 add_unwind_entry (output_lc_when ());
3625 add_unwind_entry ((psprel
3626 ? output_lc_psprel
3627 : output_lc_sprel) (val));
3628 break;
3629 case REG_BR:
3630 add_unwind_entry (output_rp_when ());
3631 add_unwind_entry ((psprel
3632 ? output_rp_psprel
3633 : output_rp_sprel) (val));
3634 break;
3635 case REG_PR:
3636 add_unwind_entry (output_preds_when ());
3637 add_unwind_entry ((psprel
3638 ? output_preds_psprel
3639 : output_preds_sprel) (val));
3640 break;
3641 case REG_PRIUNAT:
3642 add_unwind_entry (output_priunat_when_mem ());
3643 add_unwind_entry ((psprel
3644 ? output_priunat_psprel
3645 : output_priunat_sprel) (val));
3646 break;
3647 default:
3648 as_bad ("First operand not a valid register");
3649 }
3650 }
3651 else
3652 as_bad (" Second operand not a valid constant");
3653 }
3654 else
3655 as_bad ("First operand not a register");
3656 }
3657
3658 static void
3659 dot_saveg (dummy)
3660 int dummy ATTRIBUTE_UNUSED;
3661 {
3662 expressionS e1, e2;
3663 int sep;
3664 sep = parse_operand (&e1);
3665 if (sep == ',')
3666 parse_operand (&e2);
3667
3668 if (e1.X_op != O_constant)
3669 as_bad ("First operand to .save.g must be a constant.");
3670 else
3671 {
3672 int grmask = e1.X_add_number;
3673 if (sep != ',')
3674 add_unwind_entry (output_gr_mem (grmask));
3675 else
3676 {
3677 int reg = e2.X_add_number - REG_GR;
3678 if (e2.X_op == O_register && reg >= 0 && reg < 128)
3679 add_unwind_entry (output_gr_gr (grmask, reg));
3680 else
3681 as_bad ("Second operand is an invalid register.");
3682 }
3683 }
3684 }
3685
3686 static void
3687 dot_savef (dummy)
3688 int dummy ATTRIBUTE_UNUSED;
3689 {
3690 expressionS e1;
3691 int sep;
3692 sep = parse_operand (&e1);
3693
3694 if (e1.X_op != O_constant)
3695 as_bad ("Operand to .save.f must be a constant.");
3696 else
3697 add_unwind_entry (output_fr_mem (e1.X_add_number));
3698 }
3699
3700 static void
3701 dot_saveb (dummy)
3702 int dummy ATTRIBUTE_UNUSED;
3703 {
3704 expressionS e1, e2;
3705 unsigned int reg;
3706 unsigned char sep;
3707 int brmask;
3708
3709 sep = parse_operand (&e1);
3710 if (e1.X_op != O_constant)
3711 {
3712 as_bad ("First operand to .save.b must be a constant.");
3713 return;
3714 }
3715 brmask = e1.X_add_number;
3716
3717 if (sep == ',')
3718 {
3719 sep = parse_operand (&e2);
3720 reg = e2.X_add_number - REG_GR;
3721 if (e2.X_op != O_register || reg > 127)
3722 {
3723 as_bad ("Second operand to .save.b must be a general register.");
3724 return;
3725 }
3726 add_unwind_entry (output_br_gr (brmask, e2.X_add_number));
3727 }
3728 else
3729 add_unwind_entry (output_br_mem (brmask));
3730
3731 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3732 demand_empty_rest_of_line ();
3733 }
3734
3735 static void
3736 dot_savegf (dummy)
3737 int dummy ATTRIBUTE_UNUSED;
3738 {
3739 expressionS e1, e2;
3740 int sep;
3741 sep = parse_operand (&e1);
3742 if (sep == ',')
3743 parse_operand (&e2);
3744
3745 if (e1.X_op != O_constant || sep != ',' || e2.X_op != O_constant)
3746 as_bad ("Both operands of .save.gf must be constants.");
3747 else
3748 {
3749 int grmask = e1.X_add_number;
3750 int frmask = e2.X_add_number;
3751 add_unwind_entry (output_frgr_mem (grmask, frmask));
3752 }
3753 }
3754
3755 static void
3756 dot_spill (dummy)
3757 int dummy ATTRIBUTE_UNUSED;
3758 {
3759 expressionS e;
3760 unsigned char sep;
3761
3762 sep = parse_operand (&e);
3763 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3764 demand_empty_rest_of_line ();
3765
3766 if (e.X_op != O_constant)
3767 as_bad ("Operand to .spill must be a constant");
3768 else
3769 add_unwind_entry (output_spill_base (e.X_add_number));
3770 }
3771
3772 static void
3773 dot_spillreg (dummy)
3774 int dummy ATTRIBUTE_UNUSED;
3775 {
3776 int sep, ab, xy, reg, treg;
3777 expressionS e1, e2;
3778
3779 sep = parse_operand (&e1);
3780 if (sep != ',')
3781 {
3782 as_bad ("No second operand to .spillreg");
3783 return;
3784 }
3785
3786 parse_operand (&e2);
3787
3788 if (!convert_expr_to_ab_reg (&e1, &ab, &reg))
3789 {
3790 as_bad ("First operand to .spillreg must be a preserved register");
3791 return;
3792 }
3793
3794 if (!convert_expr_to_xy_reg (&e2, &xy, &treg))
3795 {
3796 as_bad ("Second operand to .spillreg must be a register");
3797 return;
3798 }
3799
3800 add_unwind_entry (output_spill_reg (ab, reg, treg, xy));
3801 }
3802
3803 static void
3804 dot_spillmem (psprel)
3805 int psprel;
3806 {
3807 expressionS e1, e2;
3808 int sep, ab, reg;
3809
3810 sep = parse_operand (&e1);
3811 if (sep != ',')
3812 {
3813 as_bad ("Second operand missing");
3814 return;
3815 }
3816
3817 parse_operand (&e2);
3818
3819 if (!convert_expr_to_ab_reg (&e1, &ab, &reg))
3820 {
3821 as_bad ("First operand to .spill%s must be a preserved register",
3822 psprel ? "psp" : "sp");
3823 return;
3824 }
3825
3826 if (e2.X_op != O_constant)
3827 {
3828 as_bad ("Second operand to .spill%s must be a constant",
3829 psprel ? "psp" : "sp");
3830 return;
3831 }
3832
3833 if (psprel)
3834 add_unwind_entry (output_spill_psprel (ab, reg, e2.X_add_number));
3835 else
3836 add_unwind_entry (output_spill_sprel (ab, reg, e2.X_add_number));
3837 }
3838
3839 static void
3840 dot_spillreg_p (dummy)
3841 int dummy ATTRIBUTE_UNUSED;
3842 {
3843 int sep, ab, xy, reg, treg;
3844 expressionS e1, e2, e3;
3845 unsigned int qp;
3846
3847 sep = parse_operand (&e1);
3848 if (sep != ',')
3849 {
3850 as_bad ("No second and third operand to .spillreg.p");
3851 return;
3852 }
3853
3854 sep = parse_operand (&e2);
3855 if (sep != ',')
3856 {
3857 as_bad ("No third operand to .spillreg.p");
3858 return;
3859 }
3860
3861 parse_operand (&e3);
3862
3863 qp = e1.X_add_number - REG_P;
3864
3865 if (e1.X_op != O_register || qp > 63)
3866 {
3867 as_bad ("First operand to .spillreg.p must be a predicate");
3868 return;
3869 }
3870
3871 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3872 {
3873 as_bad ("Second operand to .spillreg.p must be a preserved register");
3874 return;
3875 }
3876
3877 if (!convert_expr_to_xy_reg (&e3, &xy, &treg))
3878 {
3879 as_bad ("Third operand to .spillreg.p must be a register");
3880 return;
3881 }
3882
3883 add_unwind_entry (output_spill_reg_p (ab, reg, treg, xy, qp));
3884 }
3885
3886 static void
3887 dot_spillmem_p (psprel)
3888 int psprel;
3889 {
3890 expressionS e1, e2, e3;
3891 int sep, ab, reg;
3892 unsigned int qp;
3893
3894 sep = parse_operand (&e1);
3895 if (sep != ',')
3896 {
3897 as_bad ("Second operand missing");
3898 return;
3899 }
3900
3901 parse_operand (&e2);
3902 if (sep != ',')
3903 {
3904 as_bad ("Second operand missing");
3905 return;
3906 }
3907
3908 parse_operand (&e3);
3909
3910 qp = e1.X_add_number - REG_P;
3911 if (e1.X_op != O_register || qp > 63)
3912 {
3913 as_bad ("First operand to .spill%s_p must be a predicate",
3914 psprel ? "psp" : "sp");
3915 return;
3916 }
3917
3918 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3919 {
3920 as_bad ("Second operand to .spill%s_p must be a preserved register",
3921 psprel ? "psp" : "sp");
3922 return;
3923 }
3924
3925 if (e3.X_op != O_constant)
3926 {
3927 as_bad ("Third operand to .spill%s_p must be a constant",
3928 psprel ? "psp" : "sp");
3929 return;
3930 }
3931
3932 if (psprel)
3933 add_unwind_entry (output_spill_psprel_p (ab, reg, e3.X_add_number, qp));
3934 else
3935 add_unwind_entry (output_spill_sprel_p (ab, reg, e3.X_add_number, qp));
3936 }
3937
3938 static unsigned int
3939 get_saved_prologue_count (lbl)
3940 unsigned long lbl;
3941 {
3942 label_prologue_count *lpc = unwind.saved_prologue_counts;
3943
3944 while (lpc != NULL && lpc->label_number != lbl)
3945 lpc = lpc->next;
3946
3947 if (lpc != NULL)
3948 return lpc->prologue_count;
3949
3950 as_bad ("Missing .label_state %ld", lbl);
3951 return 1;
3952 }
3953
3954 static void
3955 save_prologue_count (lbl, count)
3956 unsigned long lbl;
3957 unsigned int count;
3958 {
3959 label_prologue_count *lpc = unwind.saved_prologue_counts;
3960
3961 while (lpc != NULL && lpc->label_number != lbl)
3962 lpc = lpc->next;
3963
3964 if (lpc != NULL)
3965 lpc->prologue_count = count;
3966 else
3967 {
3968 label_prologue_count *new_lpc = xmalloc (sizeof (* new_lpc));
3969
3970 new_lpc->next = unwind.saved_prologue_counts;
3971 new_lpc->label_number = lbl;
3972 new_lpc->prologue_count = count;
3973 unwind.saved_prologue_counts = new_lpc;
3974 }
3975 }
3976
3977 static void
3978 free_saved_prologue_counts ()
3979 {
3980 label_prologue_count *lpc = unwind.saved_prologue_counts;
3981 label_prologue_count *next;
3982
3983 while (lpc != NULL)
3984 {
3985 next = lpc->next;
3986 free (lpc);
3987 lpc = next;
3988 }
3989
3990 unwind.saved_prologue_counts = NULL;
3991 }
3992
3993 static void
3994 dot_label_state (dummy)
3995 int dummy ATTRIBUTE_UNUSED;
3996 {
3997 expressionS e;
3998
3999 parse_operand (&e);
4000 if (e.X_op != O_constant)
4001 {
4002 as_bad ("Operand to .label_state must be a constant");
4003 return;
4004 }
4005 add_unwind_entry (output_label_state (e.X_add_number));
4006 save_prologue_count (e.X_add_number, unwind.prologue_count);
4007 }
4008
4009 static void
4010 dot_copy_state (dummy)
4011 int dummy ATTRIBUTE_UNUSED;
4012 {
4013 expressionS e;
4014
4015 parse_operand (&e);
4016 if (e.X_op != O_constant)
4017 {
4018 as_bad ("Operand to .copy_state must be a constant");
4019 return;
4020 }
4021 add_unwind_entry (output_copy_state (e.X_add_number));
4022 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4023 }
4024
4025 static void
4026 dot_unwabi (dummy)
4027 int dummy ATTRIBUTE_UNUSED;
4028 {
4029 expressionS e1, e2;
4030 unsigned char sep;
4031
4032 sep = parse_operand (&e1);
4033 if (sep != ',')
4034 {
4035 as_bad ("Second operand to .unwabi missing");
4036 return;
4037 }
4038 sep = parse_operand (&e2);
4039 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
4040 demand_empty_rest_of_line ();
4041
4042 if (e1.X_op != O_constant)
4043 {
4044 as_bad ("First operand to .unwabi must be a constant");
4045 return;
4046 }
4047
4048 if (e2.X_op != O_constant)
4049 {
4050 as_bad ("Second operand to .unwabi must be a constant");
4051 return;
4052 }
4053
4054 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number));
4055 }
4056
4057 static void
4058 dot_personality (dummy)
4059 int dummy ATTRIBUTE_UNUSED;
4060 {
4061 char *name, *p, c;
4062 SKIP_WHITESPACE ();
4063 name = input_line_pointer;
4064 c = get_symbol_end ();
4065 p = input_line_pointer;
4066 unwind.personality_routine = symbol_find_or_make (name);
4067 unwind.force_unwind_entry = 1;
4068 *p = c;
4069 SKIP_WHITESPACE ();
4070 demand_empty_rest_of_line ();
4071 }
4072
4073 static void
4074 dot_proc (dummy)
4075 int dummy ATTRIBUTE_UNUSED;
4076 {
4077 char *name, *p, c;
4078 symbolS *sym;
4079
4080 unwind.proc_start = expr_build_dot ();
4081 /* Parse names of main and alternate entry points and mark them as
4082 function symbols: */
4083 while (1)
4084 {
4085 SKIP_WHITESPACE ();
4086 name = input_line_pointer;
4087 c = get_symbol_end ();
4088 p = input_line_pointer;
4089 sym = symbol_find_or_make (name);
4090 if (unwind.proc_start == 0)
4091 {
4092 unwind.proc_start = sym;
4093 }
4094 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4095 *p = c;
4096 SKIP_WHITESPACE ();
4097 if (*input_line_pointer != ',')
4098 break;
4099 ++input_line_pointer;
4100 }
4101 demand_empty_rest_of_line ();
4102 ia64_do_align (16);
4103
4104 unwind.prologue_count = 0;
4105 unwind.list = unwind.tail = unwind.current_entry = NULL;
4106 unwind.personality_routine = 0;
4107 }
4108
4109 static void
4110 dot_body (dummy)
4111 int dummy ATTRIBUTE_UNUSED;
4112 {
4113 unwind.prologue = 0;
4114 unwind.prologue_mask = 0;
4115
4116 add_unwind_entry (output_body ());
4117 demand_empty_rest_of_line ();
4118 }
4119
4120 static void
4121 dot_prologue (dummy)
4122 int dummy ATTRIBUTE_UNUSED;
4123 {
4124 unsigned char sep;
4125 int mask = 0, grsave = 0;
4126
4127 if (!is_it_end_of_statement ())
4128 {
4129 expressionS e1, e2;
4130 sep = parse_operand (&e1);
4131 if (sep != ',')
4132 as_bad ("No second operand to .prologue");
4133 sep = parse_operand (&e2);
4134 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
4135 demand_empty_rest_of_line ();
4136
4137 if (e1.X_op == O_constant)
4138 {
4139 mask = e1.X_add_number;
4140
4141 if (e2.X_op == O_constant)
4142 grsave = e2.X_add_number;
4143 else if (e2.X_op == O_register
4144 && (grsave = e2.X_add_number - REG_GR) < 128)
4145 ;
4146 else
4147 as_bad ("Second operand not a constant or general register");
4148
4149 add_unwind_entry (output_prologue_gr (mask, grsave));
4150 }
4151 else
4152 as_bad ("First operand not a constant");
4153 }
4154 else
4155 add_unwind_entry (output_prologue ());
4156
4157 unwind.prologue = 1;
4158 unwind.prologue_mask = mask;
4159 ++unwind.prologue_count;
4160 }
4161
4162 static void
4163 dot_endp (dummy)
4164 int dummy ATTRIBUTE_UNUSED;
4165 {
4166 expressionS e;
4167 unsigned char *ptr;
4168 int bytes_per_address;
4169 long where;
4170 segT saved_seg;
4171 subsegT saved_subseg;
4172 char *name, *p, c;
4173 symbolS *sym;
4174
4175 if (unwind.saved_text_seg)
4176 {
4177 saved_seg = unwind.saved_text_seg;
4178 saved_subseg = unwind.saved_text_subseg;
4179 unwind.saved_text_seg = NULL;
4180 }
4181 else
4182 {
4183 saved_seg = now_seg;
4184 saved_subseg = now_subseg;
4185 }
4186
4187 insn_group_break (1, 0, 0);
4188
4189 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4190 if (!unwind.info)
4191 generate_unwind_image (saved_seg);
4192
4193 if (unwind.info || unwind.force_unwind_entry)
4194 {
4195 subseg_set (md.last_text_seg, 0);
4196 unwind.proc_end = expr_build_dot ();
4197
4198 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND, 0);
4199
4200 /* Make sure that section has 4 byte alignment for ILP32 and
4201 8 byte alignment for LP64. */
4202 record_alignment (now_seg, md.pointer_size_shift);
4203
4204 /* Need space for 3 pointers for procedure start, procedure end,
4205 and unwind info. */
4206 ptr = frag_more (3 * md.pointer_size);
4207 where = frag_now_fix () - (3 * md.pointer_size);
4208 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4209
4210 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4211 e.X_op = O_pseudo_fixup;
4212 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4213 e.X_add_number = 0;
4214 e.X_add_symbol = unwind.proc_start;
4215 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e);
4216
4217 e.X_op = O_pseudo_fixup;
4218 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4219 e.X_add_number = 0;
4220 e.X_add_symbol = unwind.proc_end;
4221 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4222 bytes_per_address, &e);
4223
4224 if (unwind.info)
4225 {
4226 e.X_op = O_pseudo_fixup;
4227 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4228 e.X_add_number = 0;
4229 e.X_add_symbol = unwind.info;
4230 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4231 bytes_per_address, &e);
4232 }
4233 else
4234 md_number_to_chars (ptr + (bytes_per_address * 2), 0,
4235 bytes_per_address);
4236
4237 }
4238 else
4239 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND, 1);
4240
4241 subseg_set (saved_seg, saved_subseg);
4242
4243 /* Parse names of main and alternate entry points and set symbol sizes. */
4244 while (1)
4245 {
4246 SKIP_WHITESPACE ();
4247 name = input_line_pointer;
4248 c = get_symbol_end ();
4249 p = input_line_pointer;
4250 sym = symbol_find (name);
4251 if (sym && unwind.proc_start
4252 && (symbol_get_bfdsym (sym)->flags & BSF_FUNCTION)
4253 && S_GET_SIZE (sym) == 0 && symbol_get_obj (sym)->size == NULL)
4254 {
4255 fragS *fr = symbol_get_frag (unwind.proc_start);
4256 fragS *frag = symbol_get_frag (sym);
4257
4258 /* Check whether the function label is at or beyond last
4259 .proc directive. */
4260 while (fr && fr != frag)
4261 fr = fr->fr_next;
4262 if (fr)
4263 {
4264 if (frag == frag_now && SEG_NORMAL (now_seg))
4265 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4266 else
4267 {
4268 symbol_get_obj (sym)->size =
4269 (expressionS *) xmalloc (sizeof (expressionS));
4270 symbol_get_obj (sym)->size->X_op = O_subtract;
4271 symbol_get_obj (sym)->size->X_add_symbol
4272 = symbol_new (FAKE_LABEL_NAME, now_seg,
4273 frag_now_fix (), frag_now);
4274 symbol_get_obj (sym)->size->X_op_symbol = sym;
4275 symbol_get_obj (sym)->size->X_add_number = 0;
4276 }
4277 }
4278 }
4279 *p = c;
4280 SKIP_WHITESPACE ();
4281 if (*input_line_pointer != ',')
4282 break;
4283 ++input_line_pointer;
4284 }
4285 demand_empty_rest_of_line ();
4286 unwind.proc_start = unwind.proc_end = unwind.info = 0;
4287 }
4288
4289 static void
4290 dot_template (template)
4291 int template;
4292 {
4293 CURR_SLOT.user_template = template;
4294 }
4295
4296 static void
4297 dot_regstk (dummy)
4298 int dummy ATTRIBUTE_UNUSED;
4299 {
4300 int ins, locs, outs, rots;
4301
4302 if (is_it_end_of_statement ())
4303 ins = locs = outs = rots = 0;
4304 else
4305 {
4306 ins = get_absolute_expression ();
4307 if (*input_line_pointer++ != ',')
4308 goto err;
4309 locs = get_absolute_expression ();
4310 if (*input_line_pointer++ != ',')
4311 goto err;
4312 outs = get_absolute_expression ();
4313 if (*input_line_pointer++ != ',')
4314 goto err;
4315 rots = get_absolute_expression ();
4316 }
4317 set_regstack (ins, locs, outs, rots);
4318 return;
4319
4320 err:
4321 as_bad ("Comma expected");
4322 ignore_rest_of_line ();
4323 }
4324
4325 static void
4326 dot_rot (type)
4327 int type;
4328 {
4329 unsigned num_regs, num_alloced = 0;
4330 struct dynreg **drpp, *dr;
4331 int ch, base_reg = 0;
4332 char *name, *start;
4333 size_t len;
4334
4335 switch (type)
4336 {
4337 case DYNREG_GR: base_reg = REG_GR + 32; break;
4338 case DYNREG_FR: base_reg = REG_FR + 32; break;
4339 case DYNREG_PR: base_reg = REG_P + 16; break;
4340 default: break;
4341 }
4342
4343 /* First, remove existing names from hash table. */
4344 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4345 {
4346 hash_delete (md.dynreg_hash, dr->name);
4347 dr->num_regs = 0;
4348 }
4349
4350 drpp = &md.dynreg[type];
4351 while (1)
4352 {
4353 start = input_line_pointer;
4354 ch = get_symbol_end ();
4355 *input_line_pointer = ch;
4356 len = (input_line_pointer - start);
4357
4358 SKIP_WHITESPACE ();
4359 if (*input_line_pointer != '[')
4360 {
4361 as_bad ("Expected '['");
4362 goto err;
4363 }
4364 ++input_line_pointer; /* skip '[' */
4365
4366 num_regs = get_absolute_expression ();
4367
4368 if (*input_line_pointer++ != ']')
4369 {
4370 as_bad ("Expected ']'");
4371 goto err;
4372 }
4373 SKIP_WHITESPACE ();
4374
4375 num_alloced += num_regs;
4376 switch (type)
4377 {
4378 case DYNREG_GR:
4379 if (num_alloced > md.rot.num_regs)
4380 {
4381 as_bad ("Used more than the declared %d rotating registers",
4382 md.rot.num_regs);
4383 goto err;
4384 }
4385 break;
4386 case DYNREG_FR:
4387 if (num_alloced > 96)
4388 {
4389 as_bad ("Used more than the available 96 rotating registers");
4390 goto err;
4391 }
4392 break;
4393 case DYNREG_PR:
4394 if (num_alloced > 48)
4395 {
4396 as_bad ("Used more than the available 48 rotating registers");
4397 goto err;
4398 }
4399 break;
4400
4401 default:
4402 break;
4403 }
4404
4405 name = obstack_alloc (&notes, len + 1);
4406 memcpy (name, start, len);
4407 name[len] = '\0';
4408
4409 if (!*drpp)
4410 {
4411 *drpp = obstack_alloc (&notes, sizeof (*dr));
4412 memset (*drpp, 0, sizeof (*dr));
4413 }
4414
4415 dr = *drpp;
4416 dr->name = name;
4417 dr->num_regs = num_regs;
4418 dr->base = base_reg;
4419 drpp = &dr->next;
4420 base_reg += num_regs;
4421
4422 if (hash_insert (md.dynreg_hash, name, dr))
4423 {
4424 as_bad ("Attempt to redefine register set `%s'", name);
4425 goto err;
4426 }
4427
4428 if (*input_line_pointer != ',')
4429 break;
4430 ++input_line_pointer; /* skip comma */
4431 SKIP_WHITESPACE ();
4432 }
4433 demand_empty_rest_of_line ();
4434 return;
4435
4436 err:
4437 ignore_rest_of_line ();
4438 }
4439
4440 static void
4441 dot_byteorder (byteorder)
4442 int byteorder;
4443 {
4444 segment_info_type *seginfo = seg_info (now_seg);
4445
4446 if (byteorder == -1)
4447 {
4448 if (seginfo->tc_segment_info_data.endian == 0)
4449 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4450 byteorder = seginfo->tc_segment_info_data.endian == 1;
4451 }
4452 else
4453 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4454
4455 if (target_big_endian != byteorder)
4456 {
4457 target_big_endian = byteorder;
4458 if (target_big_endian)
4459 {
4460 ia64_number_to_chars = number_to_chars_bigendian;
4461 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4462 }
4463 else
4464 {
4465 ia64_number_to_chars = number_to_chars_littleendian;
4466 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4467 }
4468 }
4469 }
4470
4471 static void
4472 dot_psr (dummy)
4473 int dummy ATTRIBUTE_UNUSED;
4474 {
4475 char *option;
4476 int ch;
4477
4478 while (1)
4479 {
4480 option = input_line_pointer;
4481 ch = get_symbol_end ();
4482 if (strcmp (option, "lsb") == 0)
4483 md.flags &= ~EF_IA_64_BE;
4484 else if (strcmp (option, "msb") == 0)
4485 md.flags |= EF_IA_64_BE;
4486 else if (strcmp (option, "abi32") == 0)
4487 md.flags &= ~EF_IA_64_ABI64;
4488 else if (strcmp (option, "abi64") == 0)
4489 md.flags |= EF_IA_64_ABI64;
4490 else
4491 as_bad ("Unknown psr option `%s'", option);
4492 *input_line_pointer = ch;
4493
4494 SKIP_WHITESPACE ();
4495 if (*input_line_pointer != ',')
4496 break;
4497
4498 ++input_line_pointer;
4499 SKIP_WHITESPACE ();
4500 }
4501 demand_empty_rest_of_line ();
4502 }
4503
4504 static void
4505 dot_ln (dummy)
4506 int dummy ATTRIBUTE_UNUSED;
4507 {
4508 new_logical_line (0, get_absolute_expression ());
4509 demand_empty_rest_of_line ();
4510 }
4511
4512 static char *
4513 parse_section_name ()
4514 {
4515 char *name;
4516 int len;
4517
4518 SKIP_WHITESPACE ();
4519 if (*input_line_pointer != '"')
4520 {
4521 as_bad ("Missing section name");
4522 ignore_rest_of_line ();
4523 return 0;
4524 }
4525 name = demand_copy_C_string (&len);
4526 if (!name)
4527 {
4528 ignore_rest_of_line ();
4529 return 0;
4530 }
4531 SKIP_WHITESPACE ();
4532 if (*input_line_pointer != ',')
4533 {
4534 as_bad ("Comma expected after section name");
4535 ignore_rest_of_line ();
4536 return 0;
4537 }
4538 ++input_line_pointer; /* skip comma */
4539 return name;
4540 }
4541
4542 static void
4543 dot_xdata (size)
4544 int size;
4545 {
4546 char *name = parse_section_name ();
4547 if (!name)
4548 return;
4549
4550 md.keep_pending_output = 1;
4551 set_section (name);
4552 cons (size);
4553 obj_elf_previous (0);
4554 md.keep_pending_output = 0;
4555 }
4556
4557 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4558
4559 static void
4560 stmt_float_cons (kind)
4561 int kind;
4562 {
4563 size_t alignment;
4564
4565 switch (kind)
4566 {
4567 case 'd':
4568 alignment = 8;
4569 break;
4570
4571 case 'x':
4572 case 'X':
4573 alignment = 16;
4574 break;
4575
4576 case 'f':
4577 default:
4578 alignment = 4;
4579 break;
4580 }
4581 ia64_do_align (alignment);
4582 float_cons (kind);
4583 }
4584
4585 static void
4586 stmt_cons_ua (size)
4587 int size;
4588 {
4589 int saved_auto_align = md.auto_align;
4590
4591 md.auto_align = 0;
4592 cons (size);
4593 md.auto_align = saved_auto_align;
4594 }
4595
4596 static void
4597 dot_xfloat_cons (kind)
4598 int kind;
4599 {
4600 char *name = parse_section_name ();
4601 if (!name)
4602 return;
4603
4604 md.keep_pending_output = 1;
4605 set_section (name);
4606 stmt_float_cons (kind);
4607 obj_elf_previous (0);
4608 md.keep_pending_output = 0;
4609 }
4610
4611 static void
4612 dot_xstringer (zero)
4613 int zero;
4614 {
4615 char *name = parse_section_name ();
4616 if (!name)
4617 return;
4618
4619 md.keep_pending_output = 1;
4620 set_section (name);
4621 stringer (zero);
4622 obj_elf_previous (0);
4623 md.keep_pending_output = 0;
4624 }
4625
4626 static void
4627 dot_xdata_ua (size)
4628 int size;
4629 {
4630 int saved_auto_align = md.auto_align;
4631 char *name = parse_section_name ();
4632 if (!name)
4633 return;
4634
4635 md.keep_pending_output = 1;
4636 set_section (name);
4637 md.auto_align = 0;
4638 cons (size);
4639 md.auto_align = saved_auto_align;
4640 obj_elf_previous (0);
4641 md.keep_pending_output = 0;
4642 }
4643
4644 static void
4645 dot_xfloat_cons_ua (kind)
4646 int kind;
4647 {
4648 int saved_auto_align = md.auto_align;
4649 char *name = parse_section_name ();
4650 if (!name)
4651 return;
4652
4653 md.keep_pending_output = 1;
4654 set_section (name);
4655 md.auto_align = 0;
4656 stmt_float_cons (kind);
4657 md.auto_align = saved_auto_align;
4658 obj_elf_previous (0);
4659 md.keep_pending_output = 0;
4660 }
4661
4662 /* .reg.val <regname>,value */
4663
4664 static void
4665 dot_reg_val (dummy)
4666 int dummy ATTRIBUTE_UNUSED;
4667 {
4668 expressionS reg;
4669
4670 expression (&reg);
4671 if (reg.X_op != O_register)
4672 {
4673 as_bad (_("Register name expected"));
4674 ignore_rest_of_line ();
4675 }
4676 else if (*input_line_pointer++ != ',')
4677 {
4678 as_bad (_("Comma expected"));
4679 ignore_rest_of_line ();
4680 }
4681 else
4682 {
4683 valueT value = get_absolute_expression ();
4684 int regno = reg.X_add_number;
4685 if (regno < REG_GR || regno > REG_GR + 128)
4686 as_warn (_("Register value annotation ignored"));
4687 else
4688 {
4689 gr_values[regno - REG_GR].known = 1;
4690 gr_values[regno - REG_GR].value = value;
4691 gr_values[regno - REG_GR].path = md.path;
4692 }
4693 }
4694 demand_empty_rest_of_line ();
4695 }
4696
4697 /*
4698 .serialize.data
4699 .serialize.instruction
4700 */
4701 static void
4702 dot_serialize (type)
4703 int type;
4704 {
4705 insn_group_break (0, 0, 0);
4706 if (type)
4707 instruction_serialization ();
4708 else
4709 data_serialization ();
4710 insn_group_break (0, 0, 0);
4711 demand_empty_rest_of_line ();
4712 }
4713
4714 /* select dv checking mode
4715 .auto
4716 .explicit
4717 .default
4718
4719 A stop is inserted when changing modes
4720 */
4721
4722 static void
4723 dot_dv_mode (type)
4724 int type;
4725 {
4726 if (md.manual_bundling)
4727 as_warn (_("Directive invalid within a bundle"));
4728
4729 if (type == 'E' || type == 'A')
4730 md.mode_explicitly_set = 0;
4731 else
4732 md.mode_explicitly_set = 1;
4733
4734 md.detect_dv = 1;
4735 switch (type)
4736 {
4737 case 'A':
4738 case 'a':
4739 if (md.explicit_mode)
4740 insn_group_break (1, 0, 0);
4741 md.explicit_mode = 0;
4742 break;
4743 case 'E':
4744 case 'e':
4745 if (!md.explicit_mode)
4746 insn_group_break (1, 0, 0);
4747 md.explicit_mode = 1;
4748 break;
4749 default:
4750 case 'd':
4751 if (md.explicit_mode != md.default_explicit_mode)
4752 insn_group_break (1, 0, 0);
4753 md.explicit_mode = md.default_explicit_mode;
4754 md.mode_explicitly_set = 0;
4755 break;
4756 }
4757 }
4758
4759 static void
4760 print_prmask (mask)
4761 valueT mask;
4762 {
4763 int regno;
4764 char *comma = "";
4765 for (regno = 0; regno < 64; regno++)
4766 {
4767 if (mask & ((valueT) 1 << regno))
4768 {
4769 fprintf (stderr, "%s p%d", comma, regno);
4770 comma = ",";
4771 }
4772 }
4773 }
4774
4775 /*
4776 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear")
4777 .pred.rel.imply p1, p2 (also .pred.rel "imply")
4778 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex")
4779 .pred.safe_across_calls p1 [, p2 [,...]]
4780 */
4781
4782 static void
4783 dot_pred_rel (type)
4784 int type;
4785 {
4786 valueT mask = 0;
4787 int count = 0;
4788 int p1 = -1, p2 = -1;
4789
4790 if (type == 0)
4791 {
4792 if (*input_line_pointer != '"')
4793 {
4794 as_bad (_("Missing predicate relation type"));
4795 ignore_rest_of_line ();
4796 return;
4797 }
4798 else
4799 {
4800 int len;
4801 char *form = demand_copy_C_string (&len);
4802 if (strcmp (form, "mutex") == 0)
4803 type = 'm';
4804 else if (strcmp (form, "clear") == 0)
4805 type = 'c';
4806 else if (strcmp (form, "imply") == 0)
4807 type = 'i';
4808 else
4809 {
4810 as_bad (_("Unrecognized predicate relation type"));
4811 ignore_rest_of_line ();
4812 return;
4813 }
4814 }
4815 if (*input_line_pointer == ',')
4816 ++input_line_pointer;
4817 SKIP_WHITESPACE ();
4818 }
4819
4820 SKIP_WHITESPACE ();
4821 while (1)
4822 {
4823 valueT bit = 1;
4824 int regno;
4825
4826 if (TOUPPER (*input_line_pointer) != 'P'
4827 || (regno = atoi (++input_line_pointer)) < 0
4828 || regno > 63)
4829 {
4830 as_bad (_("Predicate register expected"));
4831 ignore_rest_of_line ();
4832 return;
4833 }
4834 while (ISDIGIT (*input_line_pointer))
4835 ++input_line_pointer;
4836 if (p1 == -1)
4837 p1 = regno;
4838 else if (p2 == -1)
4839 p2 = regno;
4840 bit <<= regno;
4841 if (mask & bit)
4842 as_warn (_("Duplicate predicate register ignored"));
4843 mask |= bit;
4844 count++;
4845 /* See if it's a range. */
4846 if (*input_line_pointer == '-')
4847 {
4848 valueT stop = 1;
4849 ++input_line_pointer;
4850
4851 if (TOUPPER (*input_line_pointer) != 'P'
4852 || (regno = atoi (++input_line_pointer)) < 0
4853 || regno > 63)
4854 {
4855 as_bad (_("Predicate register expected"));
4856 ignore_rest_of_line ();
4857 return;
4858 }
4859 while (ISDIGIT (*input_line_pointer))
4860 ++input_line_pointer;
4861 stop <<= regno;
4862 if (bit >= stop)
4863 {
4864 as_bad (_("Bad register range"));
4865 ignore_rest_of_line ();
4866 return;
4867 }
4868 while (bit < stop)
4869 {
4870 bit <<= 1;
4871 mask |= bit;
4872 count++;
4873 }
4874 SKIP_WHITESPACE ();
4875 }
4876 if (*input_line_pointer != ',')
4877 break;
4878 ++input_line_pointer;
4879 SKIP_WHITESPACE ();
4880 }
4881
4882 switch (type)
4883 {
4884 case 'c':
4885 if (count == 0)
4886 mask = ~(valueT) 0;
4887 clear_qp_mutex (mask);
4888 clear_qp_implies (mask, (valueT) 0);
4889 break;
4890 case 'i':
4891 if (count != 2 || p1 == -1 || p2 == -1)
4892 as_bad (_("Predicate source and target required"));
4893 else if (p1 == 0 || p2 == 0)
4894 as_bad (_("Use of p0 is not valid in this context"));
4895 else
4896 add_qp_imply (p1, p2);
4897 break;
4898 case 'm':
4899 if (count < 2)
4900 {
4901 as_bad (_("At least two PR arguments expected"));
4902 break;
4903 }
4904 else if (mask & 1)
4905 {
4906 as_bad (_("Use of p0 is not valid in this context"));
4907 break;
4908 }
4909 add_qp_mutex (mask);
4910 break;
4911 case 's':
4912 /* note that we don't override any existing relations */
4913 if (count == 0)
4914 {
4915 as_bad (_("At least one PR argument expected"));
4916 break;
4917 }
4918 if (md.debug_dv)
4919 {
4920 fprintf (stderr, "Safe across calls: ");
4921 print_prmask (mask);
4922 fprintf (stderr, "\n");
4923 }
4924 qp_safe_across_calls = mask;
4925 break;
4926 }
4927 demand_empty_rest_of_line ();
4928 }
4929
4930 /* .entry label [, label [, ...]]
4931 Hint to DV code that the given labels are to be considered entry points.
4932 Otherwise, only global labels are considered entry points. */
4933
4934 static void
4935 dot_entry (dummy)
4936 int dummy ATTRIBUTE_UNUSED;
4937 {
4938 const char *err;
4939 char *name;
4940 int c;
4941 symbolS *symbolP;
4942
4943 do
4944 {
4945 name = input_line_pointer;
4946 c = get_symbol_end ();
4947 symbolP = symbol_find_or_make (name);
4948
4949 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (PTR) symbolP);
4950 if (err)
4951 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
4952 name, err);
4953
4954 *input_line_pointer = c;
4955 SKIP_WHITESPACE ();
4956 c = *input_line_pointer;
4957 if (c == ',')
4958 {
4959 input_line_pointer++;
4960 SKIP_WHITESPACE ();
4961 if (*input_line_pointer == '\n')
4962 c = '\n';
4963 }
4964 }
4965 while (c == ',');
4966
4967 demand_empty_rest_of_line ();
4968 }
4969
4970 /* .mem.offset offset, base
4971 "base" is used to distinguish between offsets from a different base. */
4972
4973 static void
4974 dot_mem_offset (dummy)
4975 int dummy ATTRIBUTE_UNUSED;
4976 {
4977 md.mem_offset.hint = 1;
4978 md.mem_offset.offset = get_absolute_expression ();
4979 if (*input_line_pointer != ',')
4980 {
4981 as_bad (_("Comma expected"));
4982 ignore_rest_of_line ();
4983 return;
4984 }
4985 ++input_line_pointer;
4986 md.mem_offset.base = get_absolute_expression ();
4987 demand_empty_rest_of_line ();
4988 }
4989
4990 /* ia64-specific pseudo-ops: */
4991 const pseudo_typeS md_pseudo_table[] =
4992 {
4993 { "radix", dot_radix, 0 },
4994 { "lcomm", s_lcomm_bytes, 1 },
4995 { "loc", dot_loc, 0 },
4996 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
4997 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
4998 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
4999 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5000 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5001 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5002 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5003 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5004 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5005 { "proc", dot_proc, 0 },
5006 { "body", dot_body, 0 },
5007 { "prologue", dot_prologue, 0 },
5008 { "endp", dot_endp, 0 },
5009
5010 { "fframe", dot_fframe, 0 },
5011 { "vframe", dot_vframe, 0 },
5012 { "vframesp", dot_vframesp, 0 },
5013 { "vframepsp", dot_vframepsp, 0 },
5014 { "save", dot_save, 0 },
5015 { "restore", dot_restore, 0 },
5016 { "restorereg", dot_restorereg, 0 },
5017 { "restorereg.p", dot_restorereg_p, 0 },
5018 { "handlerdata", dot_handlerdata, 0 },
5019 { "unwentry", dot_unwentry, 0 },
5020 { "altrp", dot_altrp, 0 },
5021 { "savesp", dot_savemem, 0 },
5022 { "savepsp", dot_savemem, 1 },
5023 { "save.g", dot_saveg, 0 },
5024 { "save.f", dot_savef, 0 },
5025 { "save.b", dot_saveb, 0 },
5026 { "save.gf", dot_savegf, 0 },
5027 { "spill", dot_spill, 0 },
5028 { "spillreg", dot_spillreg, 0 },
5029 { "spillsp", dot_spillmem, 0 },
5030 { "spillpsp", dot_spillmem, 1 },
5031 { "spillreg.p", dot_spillreg_p, 0 },
5032 { "spillsp.p", dot_spillmem_p, 0 },
5033 { "spillpsp.p", dot_spillmem_p, 1 },
5034 { "label_state", dot_label_state, 0 },
5035 { "copy_state", dot_copy_state, 0 },
5036 { "unwabi", dot_unwabi, 0 },
5037 { "personality", dot_personality, 0 },
5038 #if 0
5039 { "estate", dot_estate, 0 },
5040 #endif
5041 { "mii", dot_template, 0x0 },
5042 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5043 { "mlx", dot_template, 0x2 },
5044 { "mmi", dot_template, 0x4 },
5045 { "mfi", dot_template, 0x6 },
5046 { "mmf", dot_template, 0x7 },
5047 { "mib", dot_template, 0x8 },
5048 { "mbb", dot_template, 0x9 },
5049 { "bbb", dot_template, 0xb },
5050 { "mmb", dot_template, 0xc },
5051 { "mfb", dot_template, 0xe },
5052 #if 0
5053 { "lb", dot_scope, 0 },
5054 { "le", dot_scope, 1 },
5055 #endif
5056 { "align", dot_align, 0 },
5057 { "regstk", dot_regstk, 0 },
5058 { "rotr", dot_rot, DYNREG_GR },
5059 { "rotf", dot_rot, DYNREG_FR },
5060 { "rotp", dot_rot, DYNREG_PR },
5061 { "lsb", dot_byteorder, 0 },
5062 { "msb", dot_byteorder, 1 },
5063 { "psr", dot_psr, 0 },
5064 { "alias", dot_alias, 0 },
5065 { "secalias", dot_alias, 1 },
5066 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5067
5068 { "xdata1", dot_xdata, 1 },
5069 { "xdata2", dot_xdata, 2 },
5070 { "xdata4", dot_xdata, 4 },
5071 { "xdata8", dot_xdata, 8 },
5072 { "xreal4", dot_xfloat_cons, 'f' },
5073 { "xreal8", dot_xfloat_cons, 'd' },
5074 { "xreal10", dot_xfloat_cons, 'x' },
5075 { "xreal16", dot_xfloat_cons, 'X' },
5076 { "xstring", dot_xstringer, 0 },
5077 { "xstringz", dot_xstringer, 1 },
5078
5079 /* unaligned versions: */
5080 { "xdata2.ua", dot_xdata_ua, 2 },
5081 { "xdata4.ua", dot_xdata_ua, 4 },
5082 { "xdata8.ua", dot_xdata_ua, 8 },
5083 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5084 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5085 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5086 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5087
5088 /* annotations/DV checking support */
5089 { "entry", dot_entry, 0 },
5090 { "mem.offset", dot_mem_offset, 0 },
5091 { "pred.rel", dot_pred_rel, 0 },
5092 { "pred.rel.clear", dot_pred_rel, 'c' },
5093 { "pred.rel.imply", dot_pred_rel, 'i' },
5094 { "pred.rel.mutex", dot_pred_rel, 'm' },
5095 { "pred.safe_across_calls", dot_pred_rel, 's' },
5096 { "reg.val", dot_reg_val, 0 },
5097 { "serialize.data", dot_serialize, 0 },
5098 { "serialize.instruction", dot_serialize, 1 },
5099 { "auto", dot_dv_mode, 'a' },
5100 { "explicit", dot_dv_mode, 'e' },
5101 { "default", dot_dv_mode, 'd' },
5102
5103 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5104 IA-64 aligns data allocation pseudo-ops by default, so we have to
5105 tell it that these ones are supposed to be unaligned. Long term,
5106 should rewrite so that only IA-64 specific data allocation pseudo-ops
5107 are aligned by default. */
5108 {"2byte", stmt_cons_ua, 2},
5109 {"4byte", stmt_cons_ua, 4},
5110 {"8byte", stmt_cons_ua, 8},
5111
5112 { NULL, 0, 0 }
5113 };
5114
5115 static const struct pseudo_opcode
5116 {
5117 const char *name;
5118 void (*handler) (int);
5119 int arg;
5120 }
5121 pseudo_opcode[] =
5122 {
5123 /* these are more like pseudo-ops, but don't start with a dot */
5124 { "data1", cons, 1 },
5125 { "data2", cons, 2 },
5126 { "data4", cons, 4 },
5127 { "data8", cons, 8 },
5128 { "data16", cons, 16 },
5129 { "real4", stmt_float_cons, 'f' },
5130 { "real8", stmt_float_cons, 'd' },
5131 { "real10", stmt_float_cons, 'x' },
5132 { "real16", stmt_float_cons, 'X' },
5133 { "string", stringer, 0 },
5134 { "stringz", stringer, 1 },
5135
5136 /* unaligned versions: */
5137 { "data2.ua", stmt_cons_ua, 2 },
5138 { "data4.ua", stmt_cons_ua, 4 },
5139 { "data8.ua", stmt_cons_ua, 8 },
5140 { "data16.ua", stmt_cons_ua, 16 },
5141 { "real4.ua", float_cons, 'f' },
5142 { "real8.ua", float_cons, 'd' },
5143 { "real10.ua", float_cons, 'x' },
5144 { "real16.ua", float_cons, 'X' },
5145 };
5146
5147 /* Declare a register by creating a symbol for it and entering it in
5148 the symbol table. */
5149
5150 static symbolS *
5151 declare_register (name, regnum)
5152 const char *name;
5153 int regnum;
5154 {
5155 const char *err;
5156 symbolS *sym;
5157
5158 sym = symbol_new (name, reg_section, regnum, &zero_address_frag);
5159
5160 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (PTR) sym);
5161 if (err)
5162 as_fatal ("Inserting \"%s\" into register table failed: %s",
5163 name, err);
5164
5165 return sym;
5166 }
5167
5168 static void
5169 declare_register_set (prefix, num_regs, base_regnum)
5170 const char *prefix;
5171 int num_regs;
5172 int base_regnum;
5173 {
5174 char name[8];
5175 int i;
5176
5177 for (i = 0; i < num_regs; ++i)
5178 {
5179 sprintf (name, "%s%u", prefix, i);
5180 declare_register (name, base_regnum + i);
5181 }
5182 }
5183
5184 static unsigned int
5185 operand_width (opnd)
5186 enum ia64_opnd opnd;
5187 {
5188 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5189 unsigned int bits = 0;
5190 int i;
5191
5192 bits = 0;
5193 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5194 bits += odesc->field[i].bits;
5195
5196 return bits;
5197 }
5198
5199 static enum operand_match_result
5200 operand_match (idesc, index, e)
5201 const struct ia64_opcode *idesc;
5202 int index;
5203 expressionS *e;
5204 {
5205 enum ia64_opnd opnd = idesc->operands[index];
5206 int bits, relocatable = 0;
5207 struct insn_fix *fix;
5208 bfd_signed_vma val;
5209
5210 switch (opnd)
5211 {
5212 /* constants: */
5213
5214 case IA64_OPND_AR_CCV:
5215 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5216 return OPERAND_MATCH;
5217 break;
5218
5219 case IA64_OPND_AR_CSD:
5220 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5221 return OPERAND_MATCH;
5222 break;
5223
5224 case IA64_OPND_AR_PFS:
5225 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5226 return OPERAND_MATCH;
5227 break;
5228
5229 case IA64_OPND_GR0:
5230 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5231 return OPERAND_MATCH;
5232 break;
5233
5234 case IA64_OPND_IP:
5235 if (e->X_op == O_register && e->X_add_number == REG_IP)
5236 return OPERAND_MATCH;
5237 break;
5238
5239 case IA64_OPND_PR:
5240 if (e->X_op == O_register && e->X_add_number == REG_PR)
5241 return OPERAND_MATCH;
5242 break;
5243
5244 case IA64_OPND_PR_ROT:
5245 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5246 return OPERAND_MATCH;
5247 break;
5248
5249 case IA64_OPND_PSR:
5250 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5251 return OPERAND_MATCH;
5252 break;
5253
5254 case IA64_OPND_PSR_L:
5255 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5256 return OPERAND_MATCH;
5257 break;
5258
5259 case IA64_OPND_PSR_UM:
5260 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5261 return OPERAND_MATCH;
5262 break;
5263
5264 case IA64_OPND_C1:
5265 if (e->X_op == O_constant)
5266 {
5267 if (e->X_add_number == 1)
5268 return OPERAND_MATCH;
5269 else
5270 return OPERAND_OUT_OF_RANGE;
5271 }
5272 break;
5273
5274 case IA64_OPND_C8:
5275 if (e->X_op == O_constant)
5276 {
5277 if (e->X_add_number == 8)
5278 return OPERAND_MATCH;
5279 else
5280 return OPERAND_OUT_OF_RANGE;
5281 }
5282 break;
5283
5284 case IA64_OPND_C16:
5285 if (e->X_op == O_constant)
5286 {
5287 if (e->X_add_number == 16)
5288 return OPERAND_MATCH;
5289 else
5290 return OPERAND_OUT_OF_RANGE;
5291 }
5292 break;
5293
5294 /* register operands: */
5295
5296 case IA64_OPND_AR3:
5297 if (e->X_op == O_register && e->X_add_number >= REG_AR
5298 && e->X_add_number < REG_AR + 128)
5299 return OPERAND_MATCH;
5300 break;
5301
5302 case IA64_OPND_B1:
5303 case IA64_OPND_B2:
5304 if (e->X_op == O_register && e->X_add_number >= REG_BR
5305 && e->X_add_number < REG_BR + 8)
5306 return OPERAND_MATCH;
5307 break;
5308
5309 case IA64_OPND_CR3:
5310 if (e->X_op == O_register && e->X_add_number >= REG_CR
5311 && e->X_add_number < REG_CR + 128)
5312 return OPERAND_MATCH;
5313 break;
5314
5315 case IA64_OPND_F1:
5316 case IA64_OPND_F2:
5317 case IA64_OPND_F3:
5318 case IA64_OPND_F4:
5319 if (e->X_op == O_register && e->X_add_number >= REG_FR
5320 && e->X_add_number < REG_FR + 128)
5321 return OPERAND_MATCH;
5322 break;
5323
5324 case IA64_OPND_P1:
5325 case IA64_OPND_P2:
5326 if (e->X_op == O_register && e->X_add_number >= REG_P
5327 && e->X_add_number < REG_P + 64)
5328 return OPERAND_MATCH;
5329 break;
5330
5331 case IA64_OPND_R1:
5332 case IA64_OPND_R2:
5333 case IA64_OPND_R3:
5334 if (e->X_op == O_register && e->X_add_number >= REG_GR
5335 && e->X_add_number < REG_GR + 128)
5336 return OPERAND_MATCH;
5337 break;
5338
5339 case IA64_OPND_R3_2:
5340 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5341 {
5342 if (e->X_add_number < REG_GR + 4)
5343 return OPERAND_MATCH;
5344 else if (e->X_add_number < REG_GR + 128)
5345 return OPERAND_OUT_OF_RANGE;
5346 }
5347 break;
5348
5349 /* indirect operands: */
5350 case IA64_OPND_CPUID_R3:
5351 case IA64_OPND_DBR_R3:
5352 case IA64_OPND_DTR_R3:
5353 case IA64_OPND_ITR_R3:
5354 case IA64_OPND_IBR_R3:
5355 case IA64_OPND_MSR_R3:
5356 case IA64_OPND_PKR_R3:
5357 case IA64_OPND_PMC_R3:
5358 case IA64_OPND_PMD_R3:
5359 case IA64_OPND_RR_R3:
5360 if (e->X_op == O_index && e->X_op_symbol
5361 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5362 == opnd - IA64_OPND_CPUID_R3))
5363 return OPERAND_MATCH;
5364 break;
5365
5366 case IA64_OPND_MR3:
5367 if (e->X_op == O_index && !e->X_op_symbol)
5368 return OPERAND_MATCH;
5369 break;
5370
5371 /* immediate operands: */
5372 case IA64_OPND_CNT2a:
5373 case IA64_OPND_LEN4:
5374 case IA64_OPND_LEN6:
5375 bits = operand_width (idesc->operands[index]);
5376 if (e->X_op == O_constant)
5377 {
5378 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5379 return OPERAND_MATCH;
5380 else
5381 return OPERAND_OUT_OF_RANGE;
5382 }
5383 break;
5384
5385 case IA64_OPND_CNT2b:
5386 if (e->X_op == O_constant)
5387 {
5388 if ((bfd_vma) (e->X_add_number - 1) < 3)
5389 return OPERAND_MATCH;
5390 else
5391 return OPERAND_OUT_OF_RANGE;
5392 }
5393 break;
5394
5395 case IA64_OPND_CNT2c:
5396 val = e->X_add_number;
5397 if (e->X_op == O_constant)
5398 {
5399 if ((val == 0 || val == 7 || val == 15 || val == 16))
5400 return OPERAND_MATCH;
5401 else
5402 return OPERAND_OUT_OF_RANGE;
5403 }
5404 break;
5405
5406 case IA64_OPND_SOR:
5407 /* SOR must be an integer multiple of 8 */
5408 if (e->X_op == O_constant && e->X_add_number & 0x7)
5409 return OPERAND_OUT_OF_RANGE;
5410 case IA64_OPND_SOF:
5411 case IA64_OPND_SOL:
5412 if (e->X_op == O_constant)
5413 {
5414 if ((bfd_vma) e->X_add_number <= 96)
5415 return OPERAND_MATCH;
5416 else
5417 return OPERAND_OUT_OF_RANGE;
5418 }
5419 break;
5420
5421 case IA64_OPND_IMMU62:
5422 if (e->X_op == O_constant)
5423 {
5424 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5425 return OPERAND_MATCH;
5426 else
5427 return OPERAND_OUT_OF_RANGE;
5428 }
5429 else
5430 {
5431 /* FIXME -- need 62-bit relocation type */
5432 as_bad (_("62-bit relocation not yet implemented"));
5433 }
5434 break;
5435
5436 case IA64_OPND_IMMU64:
5437 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5438 || e->X_op == O_subtract)
5439 {
5440 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5441 fix->code = BFD_RELOC_IA64_IMM64;
5442 if (e->X_op != O_subtract)
5443 {
5444 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5445 if (e->X_op == O_pseudo_fixup)
5446 e->X_op = O_symbol;
5447 }
5448
5449 fix->opnd = idesc->operands[index];
5450 fix->expr = *e;
5451 fix->is_pcrel = 0;
5452 ++CURR_SLOT.num_fixups;
5453 return OPERAND_MATCH;
5454 }
5455 else if (e->X_op == O_constant)
5456 return OPERAND_MATCH;
5457 break;
5458
5459 case IA64_OPND_CCNT5:
5460 case IA64_OPND_CNT5:
5461 case IA64_OPND_CNT6:
5462 case IA64_OPND_CPOS6a:
5463 case IA64_OPND_CPOS6b:
5464 case IA64_OPND_CPOS6c:
5465 case IA64_OPND_IMMU2:
5466 case IA64_OPND_IMMU7a:
5467 case IA64_OPND_IMMU7b:
5468 case IA64_OPND_IMMU21:
5469 case IA64_OPND_IMMU24:
5470 case IA64_OPND_MBTYPE4:
5471 case IA64_OPND_MHTYPE8:
5472 case IA64_OPND_POS6:
5473 bits = operand_width (idesc->operands[index]);
5474 if (e->X_op == O_constant)
5475 {
5476 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5477 return OPERAND_MATCH;
5478 else
5479 return OPERAND_OUT_OF_RANGE;
5480 }
5481 break;
5482
5483 case IA64_OPND_IMMU9:
5484 bits = operand_width (idesc->operands[index]);
5485 if (e->X_op == O_constant)
5486 {
5487 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5488 {
5489 int lobits = e->X_add_number & 0x3;
5490 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5491 e->X_add_number |= (bfd_vma) 0x3;
5492 return OPERAND_MATCH;
5493 }
5494 else
5495 return OPERAND_OUT_OF_RANGE;
5496 }
5497 break;
5498
5499 case IA64_OPND_IMM44:
5500 /* least 16 bits must be zero */
5501 if ((e->X_add_number & 0xffff) != 0)
5502 /* XXX technically, this is wrong: we should not be issuing warning
5503 messages until we're sure this instruction pattern is going to
5504 be used! */
5505 as_warn (_("lower 16 bits of mask ignored"));
5506
5507 if (e->X_op == O_constant)
5508 {
5509 if (((e->X_add_number >= 0
5510 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5511 || (e->X_add_number < 0
5512 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5513 {
5514 /* sign-extend */
5515 if (e->X_add_number >= 0
5516 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5517 {
5518 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5519 }
5520 return OPERAND_MATCH;
5521 }
5522 else
5523 return OPERAND_OUT_OF_RANGE;
5524 }
5525 break;
5526
5527 case IA64_OPND_IMM17:
5528 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5529 if (e->X_op == O_constant)
5530 {
5531 if (((e->X_add_number >= 0
5532 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5533 || (e->X_add_number < 0
5534 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5535 {
5536 /* sign-extend */
5537 if (e->X_add_number >= 0
5538 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5539 {
5540 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5541 }
5542 return OPERAND_MATCH;
5543 }
5544 else
5545 return OPERAND_OUT_OF_RANGE;
5546 }
5547 break;
5548
5549 case IA64_OPND_IMM14:
5550 case IA64_OPND_IMM22:
5551 relocatable = 1;
5552 case IA64_OPND_IMM1:
5553 case IA64_OPND_IMM8:
5554 case IA64_OPND_IMM8U4:
5555 case IA64_OPND_IMM8M1:
5556 case IA64_OPND_IMM8M1U4:
5557 case IA64_OPND_IMM8M1U8:
5558 case IA64_OPND_IMM9a:
5559 case IA64_OPND_IMM9b:
5560 bits = operand_width (idesc->operands[index]);
5561 if (relocatable && (e->X_op == O_symbol
5562 || e->X_op == O_subtract
5563 || e->X_op == O_pseudo_fixup))
5564 {
5565 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5566
5567 if (idesc->operands[index] == IA64_OPND_IMM14)
5568 fix->code = BFD_RELOC_IA64_IMM14;
5569 else
5570 fix->code = BFD_RELOC_IA64_IMM22;
5571
5572 if (e->X_op != O_subtract)
5573 {
5574 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5575 if (e->X_op == O_pseudo_fixup)
5576 e->X_op = O_symbol;
5577 }
5578
5579 fix->opnd = idesc->operands[index];
5580 fix->expr = *e;
5581 fix->is_pcrel = 0;
5582 ++CURR_SLOT.num_fixups;
5583 return OPERAND_MATCH;
5584 }
5585 else if (e->X_op != O_constant
5586 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5587 return OPERAND_MISMATCH;
5588
5589 if (opnd == IA64_OPND_IMM8M1U4)
5590 {
5591 /* Zero is not valid for unsigned compares that take an adjusted
5592 constant immediate range. */
5593 if (e->X_add_number == 0)
5594 return OPERAND_OUT_OF_RANGE;
5595
5596 /* Sign-extend 32-bit unsigned numbers, so that the following range
5597 checks will work. */
5598 val = e->X_add_number;
5599 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5600 && ((val & ((bfd_vma) 1 << 31)) != 0))
5601 val = ((val << 32) >> 32);
5602
5603 /* Check for 0x100000000. This is valid because
5604 0x100000000-1 is the same as ((uint32_t) -1). */
5605 if (val == ((bfd_signed_vma) 1 << 32))
5606 return OPERAND_MATCH;
5607
5608 val = val - 1;
5609 }
5610 else if (opnd == IA64_OPND_IMM8M1U8)
5611 {
5612 /* Zero is not valid for unsigned compares that take an adjusted
5613 constant immediate range. */
5614 if (e->X_add_number == 0)
5615 return OPERAND_OUT_OF_RANGE;
5616
5617 /* Check for 0x10000000000000000. */
5618 if (e->X_op == O_big)
5619 {
5620 if (generic_bignum[0] == 0
5621 && generic_bignum[1] == 0
5622 && generic_bignum[2] == 0
5623 && generic_bignum[3] == 0
5624 && generic_bignum[4] == 1)
5625 return OPERAND_MATCH;
5626 else
5627 return OPERAND_OUT_OF_RANGE;
5628 }
5629 else
5630 val = e->X_add_number - 1;
5631 }
5632 else if (opnd == IA64_OPND_IMM8M1)
5633 val = e->X_add_number - 1;
5634 else if (opnd == IA64_OPND_IMM8U4)
5635 {
5636 /* Sign-extend 32-bit unsigned numbers, so that the following range
5637 checks will work. */
5638 val = e->X_add_number;
5639 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5640 && ((val & ((bfd_vma) 1 << 31)) != 0))
5641 val = ((val << 32) >> 32);
5642 }
5643 else
5644 val = e->X_add_number;
5645
5646 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5647 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5648 return OPERAND_MATCH;
5649 else
5650 return OPERAND_OUT_OF_RANGE;
5651
5652 case IA64_OPND_INC3:
5653 /* +/- 1, 4, 8, 16 */
5654 val = e->X_add_number;
5655 if (val < 0)
5656 val = -val;
5657 if (e->X_op == O_constant)
5658 {
5659 if ((val == 1 || val == 4 || val == 8 || val == 16))
5660 return OPERAND_MATCH;
5661 else
5662 return OPERAND_OUT_OF_RANGE;
5663 }
5664 break;
5665
5666 case IA64_OPND_TGT25:
5667 case IA64_OPND_TGT25b:
5668 case IA64_OPND_TGT25c:
5669 case IA64_OPND_TGT64:
5670 if (e->X_op == O_symbol)
5671 {
5672 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5673 if (opnd == IA64_OPND_TGT25)
5674 fix->code = BFD_RELOC_IA64_PCREL21F;
5675 else if (opnd == IA64_OPND_TGT25b)
5676 fix->code = BFD_RELOC_IA64_PCREL21M;
5677 else if (opnd == IA64_OPND_TGT25c)
5678 fix->code = BFD_RELOC_IA64_PCREL21B;
5679 else if (opnd == IA64_OPND_TGT64)
5680 fix->code = BFD_RELOC_IA64_PCREL60B;
5681 else
5682 abort ();
5683
5684 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5685 fix->opnd = idesc->operands[index];
5686 fix->expr = *e;
5687 fix->is_pcrel = 1;
5688 ++CURR_SLOT.num_fixups;
5689 return OPERAND_MATCH;
5690 }
5691 case IA64_OPND_TAG13:
5692 case IA64_OPND_TAG13b:
5693 switch (e->X_op)
5694 {
5695 case O_constant:
5696 return OPERAND_MATCH;
5697
5698 case O_symbol:
5699 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5700 /* There are no external relocs for TAG13/TAG13b fields, so we
5701 create a dummy reloc. This will not live past md_apply_fix3. */
5702 fix->code = BFD_RELOC_UNUSED;
5703 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5704 fix->opnd = idesc->operands[index];
5705 fix->expr = *e;
5706 fix->is_pcrel = 1;
5707 ++CURR_SLOT.num_fixups;
5708 return OPERAND_MATCH;
5709
5710 default:
5711 break;
5712 }
5713 break;
5714
5715 case IA64_OPND_LDXMOV:
5716 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5717 fix->code = BFD_RELOC_IA64_LDXMOV;
5718 fix->opnd = idesc->operands[index];
5719 fix->expr = *e;
5720 fix->is_pcrel = 0;
5721 ++CURR_SLOT.num_fixups;
5722 return OPERAND_MATCH;
5723
5724 default:
5725 break;
5726 }
5727 return OPERAND_MISMATCH;
5728 }
5729
5730 static int
5731 parse_operand (e)
5732 expressionS *e;
5733 {
5734 int sep = '\0';
5735
5736 memset (e, 0, sizeof (*e));
5737 e->X_op = O_absent;
5738 SKIP_WHITESPACE ();
5739 if (*input_line_pointer != '}')
5740 expression (e);
5741 sep = *input_line_pointer++;
5742
5743 if (sep == '}')
5744 {
5745 if (!md.manual_bundling)
5746 as_warn ("Found '}' when manual bundling is off");
5747 else
5748 CURR_SLOT.manual_bundling_off = 1;
5749 md.manual_bundling = 0;
5750 sep = '\0';
5751 }
5752 return sep;
5753 }
5754
5755 /* Returns the next entry in the opcode table that matches the one in
5756 IDESC, and frees the entry in IDESC. If no matching entry is
5757 found, NULL is returned instead. */
5758
5759 static struct ia64_opcode *
5760 get_next_opcode (struct ia64_opcode *idesc)
5761 {
5762 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
5763 ia64_free_opcode (idesc);
5764 return next;
5765 }
5766
5767 /* Parse the operands for the opcode and find the opcode variant that
5768 matches the specified operands, or NULL if no match is possible. */
5769
5770 static struct ia64_opcode *
5771 parse_operands (idesc)
5772 struct ia64_opcode *idesc;
5773 {
5774 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
5775 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
5776 enum ia64_opnd expected_operand = IA64_OPND_NIL;
5777 enum operand_match_result result;
5778 char mnemonic[129];
5779 char *first_arg = 0, *end, *saved_input_pointer;
5780 unsigned int sof;
5781
5782 assert (strlen (idesc->name) <= 128);
5783
5784 strcpy (mnemonic, idesc->name);
5785 if (idesc->operands[2] == IA64_OPND_SOF)
5786 {
5787 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
5788 can't parse the first operand until we have parsed the
5789 remaining operands of the "alloc" instruction. */
5790 SKIP_WHITESPACE ();
5791 first_arg = input_line_pointer;
5792 end = strchr (input_line_pointer, '=');
5793 if (!end)
5794 {
5795 as_bad ("Expected separator `='");
5796 return 0;
5797 }
5798 input_line_pointer = end + 1;
5799 ++i;
5800 ++num_outputs;
5801 }
5802
5803 for (; i < NELEMS (CURR_SLOT.opnd); ++i)
5804 {
5805 sep = parse_operand (CURR_SLOT.opnd + i);
5806 if (CURR_SLOT.opnd[i].X_op == O_absent)
5807 break;
5808
5809 ++num_operands;
5810
5811 if (sep != '=' && sep != ',')
5812 break;
5813
5814 if (sep == '=')
5815 {
5816 if (num_outputs > 0)
5817 as_bad ("Duplicate equal sign (=) in instruction");
5818 else
5819 num_outputs = i + 1;
5820 }
5821 }
5822 if (sep != '\0')
5823 {
5824 as_bad ("Illegal operand separator `%c'", sep);
5825 return 0;
5826 }
5827
5828 if (idesc->operands[2] == IA64_OPND_SOF)
5829 {
5830 /* map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r */
5831 know (strcmp (idesc->name, "alloc") == 0);
5832 if (num_operands == 5 /* first_arg not included in this count! */
5833 && CURR_SLOT.opnd[2].X_op == O_constant
5834 && CURR_SLOT.opnd[3].X_op == O_constant
5835 && CURR_SLOT.opnd[4].X_op == O_constant
5836 && CURR_SLOT.opnd[5].X_op == O_constant)
5837 {
5838 sof = set_regstack (CURR_SLOT.opnd[2].X_add_number,
5839 CURR_SLOT.opnd[3].X_add_number,
5840 CURR_SLOT.opnd[4].X_add_number,
5841 CURR_SLOT.opnd[5].X_add_number);
5842
5843 /* now we can parse the first arg: */
5844 saved_input_pointer = input_line_pointer;
5845 input_line_pointer = first_arg;
5846 sep = parse_operand (CURR_SLOT.opnd + 0);
5847 if (sep != '=')
5848 --num_outputs; /* force error */
5849 input_line_pointer = saved_input_pointer;
5850
5851 CURR_SLOT.opnd[2].X_add_number = sof;
5852 CURR_SLOT.opnd[3].X_add_number
5853 = sof - CURR_SLOT.opnd[4].X_add_number;
5854 CURR_SLOT.opnd[4] = CURR_SLOT.opnd[5];
5855 }
5856 }
5857
5858 highest_unmatched_operand = 0;
5859 curr_out_of_range_pos = -1;
5860 error_pos = 0;
5861 expected_operand = idesc->operands[0];
5862 for (; idesc; idesc = get_next_opcode (idesc))
5863 {
5864 if (num_outputs != idesc->num_outputs)
5865 continue; /* mismatch in # of outputs */
5866
5867 CURR_SLOT.num_fixups = 0;
5868
5869 /* Try to match all operands. If we see an out-of-range operand,
5870 then continue trying to match the rest of the operands, since if
5871 the rest match, then this idesc will give the best error message. */
5872
5873 out_of_range_pos = -1;
5874 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
5875 {
5876 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
5877 if (result != OPERAND_MATCH)
5878 {
5879 if (result != OPERAND_OUT_OF_RANGE)
5880 break;
5881 if (out_of_range_pos < 0)
5882 /* remember position of the first out-of-range operand: */
5883 out_of_range_pos = i;
5884 }
5885 }
5886
5887 /* If we did not match all operands, or if at least one operand was
5888 out-of-range, then this idesc does not match. Keep track of which
5889 idesc matched the most operands before failing. If we have two
5890 idescs that failed at the same position, and one had an out-of-range
5891 operand, then prefer the out-of-range operand. Thus if we have
5892 "add r0=0x1000000,r1" we get an error saying the constant is out
5893 of range instead of an error saying that the constant should have been
5894 a register. */
5895
5896 if (i != num_operands || out_of_range_pos >= 0)
5897 {
5898 if (i > highest_unmatched_operand
5899 || (i == highest_unmatched_operand
5900 && out_of_range_pos > curr_out_of_range_pos))
5901 {
5902 highest_unmatched_operand = i;
5903 if (out_of_range_pos >= 0)
5904 {
5905 expected_operand = idesc->operands[out_of_range_pos];
5906 error_pos = out_of_range_pos;
5907 }
5908 else
5909 {
5910 expected_operand = idesc->operands[i];
5911 error_pos = i;
5912 }
5913 curr_out_of_range_pos = out_of_range_pos;
5914 }
5915 continue;
5916 }
5917
5918 if (num_operands < NELEMS (idesc->operands)
5919 && idesc->operands[num_operands])
5920 continue; /* mismatch in number of arguments */
5921
5922 break;
5923 }
5924 if (!idesc)
5925 {
5926 if (expected_operand)
5927 as_bad ("Operand %u of `%s' should be %s",
5928 error_pos + 1, mnemonic,
5929 elf64_ia64_operands[expected_operand].desc);
5930 else
5931 as_bad ("Operand mismatch");
5932 return 0;
5933 }
5934 return idesc;
5935 }
5936
5937 /* Keep track of state necessary to determine whether a NOP is necessary
5938 to avoid an erratum in A and B step Itanium chips, and return 1 if we
5939 detect a case where additional NOPs may be necessary. */
5940 static int
5941 errata_nop_necessary_p (slot, insn_unit)
5942 struct slot *slot;
5943 enum ia64_unit insn_unit;
5944 {
5945 int i;
5946 struct group *this_group = md.last_groups + md.group_idx;
5947 struct group *prev_group = md.last_groups + (md.group_idx + 2) % 3;
5948 struct ia64_opcode *idesc = slot->idesc;
5949
5950 /* Test whether this could be the first insn in a problematic sequence. */
5951 if (insn_unit == IA64_UNIT_F)
5952 {
5953 for (i = 0; i < idesc->num_outputs; i++)
5954 if (idesc->operands[i] == IA64_OPND_P1
5955 || idesc->operands[i] == IA64_OPND_P2)
5956 {
5957 int regno = slot->opnd[i].X_add_number - REG_P;
5958 /* Ignore invalid operands; they generate errors elsewhere. */
5959 if (regno >= 64)
5960 return 0;
5961 this_group->p_reg_set[regno] = 1;
5962 }
5963 }
5964
5965 /* Test whether this could be the second insn in a problematic sequence. */
5966 if (insn_unit == IA64_UNIT_M && slot->qp_regno > 0
5967 && prev_group->p_reg_set[slot->qp_regno])
5968 {
5969 for (i = 0; i < idesc->num_outputs; i++)
5970 if (idesc->operands[i] == IA64_OPND_R1
5971 || idesc->operands[i] == IA64_OPND_R2
5972 || idesc->operands[i] == IA64_OPND_R3)
5973 {
5974 int regno = slot->opnd[i].X_add_number - REG_GR;
5975 /* Ignore invalid operands; they generate errors elsewhere. */
5976 if (regno >= 128)
5977 return 0;
5978 if (strncmp (idesc->name, "add", 3) != 0
5979 && strncmp (idesc->name, "sub", 3) != 0
5980 && strncmp (idesc->name, "shladd", 6) != 0
5981 && (idesc->flags & IA64_OPCODE_POSTINC) == 0)
5982 this_group->g_reg_set_conditionally[regno] = 1;
5983 }
5984 }
5985
5986 /* Test whether this could be the third insn in a problematic sequence. */
5987 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; i++)
5988 {
5989 if (/* For fc, ptc, ptr, tak, thash, tpa, ttag, probe, ptr, ptc. */
5990 idesc->operands[i] == IA64_OPND_R3
5991 /* For mov indirect. */
5992 || idesc->operands[i] == IA64_OPND_RR_R3
5993 || idesc->operands[i] == IA64_OPND_DBR_R3
5994 || idesc->operands[i] == IA64_OPND_IBR_R3
5995 || idesc->operands[i] == IA64_OPND_PKR_R3
5996 || idesc->operands[i] == IA64_OPND_PMC_R3
5997 || idesc->operands[i] == IA64_OPND_PMD_R3
5998 || idesc->operands[i] == IA64_OPND_MSR_R3
5999 || idesc->operands[i] == IA64_OPND_CPUID_R3
6000 /* For itr. */
6001 || idesc->operands[i] == IA64_OPND_ITR_R3
6002 || idesc->operands[i] == IA64_OPND_DTR_R3
6003 /* Normal memory addresses (load, store, xchg, cmpxchg, etc.). */
6004 || idesc->operands[i] == IA64_OPND_MR3)
6005 {
6006 int regno = slot->opnd[i].X_add_number - REG_GR;
6007 /* Ignore invalid operands; they generate errors elsewhere. */
6008 if (regno >= 128)
6009 return 0;
6010 if (idesc->operands[i] == IA64_OPND_R3)
6011 {
6012 if (strcmp (idesc->name, "fc") != 0
6013 && strcmp (idesc->name, "tak") != 0
6014 && strcmp (idesc->name, "thash") != 0
6015 && strcmp (idesc->name, "tpa") != 0
6016 && strcmp (idesc->name, "ttag") != 0
6017 && strncmp (idesc->name, "ptr", 3) != 0
6018 && strncmp (idesc->name, "ptc", 3) != 0
6019 && strncmp (idesc->name, "probe", 5) != 0)
6020 return 0;
6021 }
6022 if (prev_group->g_reg_set_conditionally[regno])
6023 return 1;
6024 }
6025 }
6026 return 0;
6027 }
6028
6029 static void
6030 build_insn (slot, insnp)
6031 struct slot *slot;
6032 bfd_vma *insnp;
6033 {
6034 const struct ia64_operand *odesc, *o2desc;
6035 struct ia64_opcode *idesc = slot->idesc;
6036 bfd_signed_vma insn, val;
6037 const char *err;
6038 int i;
6039
6040 insn = idesc->opcode | slot->qp_regno;
6041
6042 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6043 {
6044 if (slot->opnd[i].X_op == O_register
6045 || slot->opnd[i].X_op == O_constant
6046 || slot->opnd[i].X_op == O_index)
6047 val = slot->opnd[i].X_add_number;
6048 else if (slot->opnd[i].X_op == O_big)
6049 {
6050 /* This must be the value 0x10000000000000000. */
6051 assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6052 val = 0;
6053 }
6054 else
6055 val = 0;
6056
6057 switch (idesc->operands[i])
6058 {
6059 case IA64_OPND_IMMU64:
6060 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6061 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6062 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6063 | (((val >> 63) & 0x1) << 36));
6064 continue;
6065
6066 case IA64_OPND_IMMU62:
6067 val &= 0x3fffffffffffffffULL;
6068 if (val != slot->opnd[i].X_add_number)
6069 as_warn (_("Value truncated to 62 bits"));
6070 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6071 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6072 continue;
6073
6074 case IA64_OPND_TGT64:
6075 val >>= 4;
6076 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6077 insn |= ((((val >> 59) & 0x1) << 36)
6078 | (((val >> 0) & 0xfffff) << 13));
6079 continue;
6080
6081 case IA64_OPND_AR3:
6082 val -= REG_AR;
6083 break;
6084
6085 case IA64_OPND_B1:
6086 case IA64_OPND_B2:
6087 val -= REG_BR;
6088 break;
6089
6090 case IA64_OPND_CR3:
6091 val -= REG_CR;
6092 break;
6093
6094 case IA64_OPND_F1:
6095 case IA64_OPND_F2:
6096 case IA64_OPND_F3:
6097 case IA64_OPND_F4:
6098 val -= REG_FR;
6099 break;
6100
6101 case IA64_OPND_P1:
6102 case IA64_OPND_P2:
6103 val -= REG_P;
6104 break;
6105
6106 case IA64_OPND_R1:
6107 case IA64_OPND_R2:
6108 case IA64_OPND_R3:
6109 case IA64_OPND_R3_2:
6110 case IA64_OPND_CPUID_R3:
6111 case IA64_OPND_DBR_R3:
6112 case IA64_OPND_DTR_R3:
6113 case IA64_OPND_ITR_R3:
6114 case IA64_OPND_IBR_R3:
6115 case IA64_OPND_MR3:
6116 case IA64_OPND_MSR_R3:
6117 case IA64_OPND_PKR_R3:
6118 case IA64_OPND_PMC_R3:
6119 case IA64_OPND_PMD_R3:
6120 case IA64_OPND_RR_R3:
6121 val -= REG_GR;
6122 break;
6123
6124 default:
6125 break;
6126 }
6127
6128 odesc = elf64_ia64_operands + idesc->operands[i];
6129 err = (*odesc->insert) (odesc, val, &insn);
6130 if (err)
6131 as_bad_where (slot->src_file, slot->src_line,
6132 "Bad operand value: %s", err);
6133 if (idesc->flags & IA64_OPCODE_PSEUDO)
6134 {
6135 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6136 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6137 {
6138 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6139 (*o2desc->insert) (o2desc, val, &insn);
6140 }
6141 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6142 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6143 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6144 {
6145 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6146 (*o2desc->insert) (o2desc, 64 - val, &insn);
6147 }
6148 }
6149 }
6150 *insnp = insn;
6151 }
6152
6153 static void
6154 emit_one_bundle ()
6155 {
6156 unsigned int manual_bundling_on = 0, manual_bundling_off = 0;
6157 unsigned int manual_bundling = 0;
6158 enum ia64_unit required_unit, insn_unit = 0;
6159 enum ia64_insn_type type[3], insn_type;
6160 unsigned int template, orig_template;
6161 bfd_vma insn[3] = { -1, -1, -1 };
6162 struct ia64_opcode *idesc;
6163 int end_of_insn_group = 0, user_template = -1;
6164 int n, i, j, first, curr;
6165 unw_rec_list *ptr, *last_ptr, *end_ptr;
6166 bfd_vma t0 = 0, t1 = 0;
6167 struct label_fix *lfix;
6168 struct insn_fix *ifix;
6169 char mnemonic[16];
6170 fixS *fix;
6171 char *f;
6172 int addr_mod;
6173
6174 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6175 know (first >= 0 & first < NUM_SLOTS);
6176 n = MIN (3, md.num_slots_in_use);
6177
6178 /* Determine template: user user_template if specified, best match
6179 otherwise: */
6180
6181 if (md.slot[first].user_template >= 0)
6182 user_template = template = md.slot[first].user_template;
6183 else
6184 {
6185 /* Auto select appropriate template. */
6186 memset (type, 0, sizeof (type));
6187 curr = first;
6188 for (i = 0; i < n; ++i)
6189 {
6190 if (md.slot[curr].label_fixups && i != 0)
6191 break;
6192 type[i] = md.slot[curr].idesc->type;
6193 curr = (curr + 1) % NUM_SLOTS;
6194 }
6195 template = best_template[type[0]][type[1]][type[2]];
6196 }
6197
6198 /* initialize instructions with appropriate nops: */
6199 for (i = 0; i < 3; ++i)
6200 insn[i] = nop[ia64_templ_desc[template].exec_unit[i]];
6201
6202 f = frag_more (16);
6203
6204 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6205 from the start of the frag. */
6206 addr_mod = frag_now_fix () & 15;
6207 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6208 as_bad (_("instruction address is not a multiple of 16"));
6209 frag_now->insn_addr = addr_mod;
6210 frag_now->has_code = 1;
6211
6212 /* now fill in slots with as many insns as possible: */
6213 curr = first;
6214 idesc = md.slot[curr].idesc;
6215 end_of_insn_group = 0;
6216 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6217 {
6218 /* If we have unwind records, we may need to update some now. */
6219 ptr = md.slot[curr].unwind_record;
6220 if (ptr)
6221 {
6222 /* Find the last prologue/body record in the list for the current
6223 insn, and set the slot number for all records up to that point.
6224 This needs to be done now, because prologue/body records refer to
6225 the current point, not the point after the instruction has been
6226 issued. This matters because there may have been nops emitted
6227 meanwhile. Any non-prologue non-body record followed by a
6228 prologue/body record must also refer to the current point. */
6229 last_ptr = NULL;
6230 end_ptr = md.slot[(curr + 1) % NUM_SLOTS].unwind_record;
6231 for (; ptr != end_ptr; ptr = ptr->next)
6232 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6233 || ptr->r.type == body)
6234 last_ptr = ptr;
6235 if (last_ptr)
6236 {
6237 /* Make last_ptr point one after the last prologue/body
6238 record. */
6239 last_ptr = last_ptr->next;
6240 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6241 ptr = ptr->next)
6242 {
6243 ptr->slot_number = (unsigned long) f + i;
6244 ptr->slot_frag = frag_now;
6245 }
6246 /* Remove the initialized records, so that we won't accidentally
6247 update them again if we insert a nop and continue. */
6248 md.slot[curr].unwind_record = last_ptr;
6249 }
6250 }
6251
6252 if (idesc->flags & IA64_OPCODE_SLOT2)
6253 {
6254 if (manual_bundling && i != 2)
6255 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6256 "`%s' must be last in bundle", idesc->name);
6257 else
6258 i = 2;
6259 }
6260 if (idesc->flags & IA64_OPCODE_LAST)
6261 {
6262 int required_slot;
6263 unsigned int required_template;
6264
6265 /* If we need a stop bit after an M slot, our only choice is
6266 template 5 (M;;MI). If we need a stop bit after a B
6267 slot, our only choice is to place it at the end of the
6268 bundle, because the only available templates are MIB,
6269 MBB, BBB, MMB, and MFB. We don't handle anything other
6270 than M and B slots because these are the only kind of
6271 instructions that can have the IA64_OPCODE_LAST bit set. */
6272 required_template = template;
6273 switch (idesc->type)
6274 {
6275 case IA64_TYPE_M:
6276 required_slot = 0;
6277 required_template = 5;
6278 break;
6279
6280 case IA64_TYPE_B:
6281 required_slot = 2;
6282 break;
6283
6284 default:
6285 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6286 "Internal error: don't know how to force %s to end"
6287 "of instruction group", idesc->name);
6288 required_slot = i;
6289 break;
6290 }
6291 if (manual_bundling && i != required_slot)
6292 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6293 "`%s' must be last in instruction group",
6294 idesc->name);
6295 if (required_slot < i)
6296 /* Can't fit this instruction. */
6297 break;
6298
6299 i = required_slot;
6300 if (required_template != template)
6301 {
6302 /* If we switch the template, we need to reset the NOPs
6303 after slot i. The slot-types of the instructions ahead
6304 of i never change, so we don't need to worry about
6305 changing NOPs in front of this slot. */
6306 for (j = i; j < 3; ++j)
6307 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6308 }
6309 template = required_template;
6310 }
6311 if (curr != first && md.slot[curr].label_fixups)
6312 {
6313 if (manual_bundling_on)
6314 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6315 "Label must be first in a bundle");
6316 /* This insn must go into the first slot of a bundle. */
6317 break;
6318 }
6319
6320 manual_bundling_on = md.slot[curr].manual_bundling_on;
6321 manual_bundling_off = md.slot[curr].manual_bundling_off;
6322
6323 if (manual_bundling_on)
6324 {
6325 if (curr == first)
6326 manual_bundling = 1;
6327 else
6328 break; /* need to start a new bundle */
6329 }
6330
6331 if (end_of_insn_group && md.num_slots_in_use >= 1)
6332 {
6333 /* We need an instruction group boundary in the middle of a
6334 bundle. See if we can switch to an other template with
6335 an appropriate boundary. */
6336
6337 orig_template = template;
6338 if (i == 1 && (user_template == 4
6339 || (user_template < 0
6340 && (ia64_templ_desc[template].exec_unit[0]
6341 == IA64_UNIT_M))))
6342 {
6343 template = 5;
6344 end_of_insn_group = 0;
6345 }
6346 else if (i == 2 && (user_template == 0
6347 || (user_template < 0
6348 && (ia64_templ_desc[template].exec_unit[1]
6349 == IA64_UNIT_I)))
6350 /* This test makes sure we don't switch the template if
6351 the next instruction is one that needs to be first in
6352 an instruction group. Since all those instructions are
6353 in the M group, there is no way such an instruction can
6354 fit in this bundle even if we switch the template. The
6355 reason we have to check for this is that otherwise we
6356 may end up generating "MI;;I M.." which has the deadly
6357 effect that the second M instruction is no longer the
6358 first in the bundle! --davidm 99/12/16 */
6359 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6360 {
6361 template = 1;
6362 end_of_insn_group = 0;
6363 }
6364 else if (curr != first)
6365 /* can't fit this insn */
6366 break;
6367
6368 if (template != orig_template)
6369 /* if we switch the template, we need to reset the NOPs
6370 after slot i. The slot-types of the instructions ahead
6371 of i never change, so we don't need to worry about
6372 changing NOPs in front of this slot. */
6373 for (j = i; j < 3; ++j)
6374 insn[j] = nop[ia64_templ_desc[template].exec_unit[j]];
6375 }
6376 required_unit = ia64_templ_desc[template].exec_unit[i];
6377
6378 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6379 if (idesc->type == IA64_TYPE_DYN)
6380 {
6381 enum ia64_opnd opnd1, opnd2;
6382
6383 if ((strcmp (idesc->name, "nop") == 0)
6384 || (strcmp (idesc->name, "hint") == 0)
6385 || (strcmp (idesc->name, "break") == 0))
6386 insn_unit = required_unit;
6387 else if (strcmp (idesc->name, "chk.s") == 0
6388 || strcmp (idesc->name, "mov") == 0)
6389 {
6390 insn_unit = IA64_UNIT_M;
6391 if (required_unit == IA64_UNIT_I
6392 || (required_unit == IA64_UNIT_F && template == 6))
6393 insn_unit = IA64_UNIT_I;
6394 }
6395 else
6396 as_fatal ("emit_one_bundle: unexpected dynamic op");
6397
6398 sprintf (mnemonic, "%s.%c", idesc->name, "?imbfxx"[insn_unit]);
6399 opnd1 = idesc->operands[0];
6400 opnd2 = idesc->operands[1];
6401 ia64_free_opcode (idesc);
6402 idesc = ia64_find_opcode (mnemonic);
6403 /* moves to/from ARs have collisions */
6404 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6405 {
6406 while (idesc != NULL
6407 && (idesc->operands[0] != opnd1
6408 || idesc->operands[1] != opnd2))
6409 idesc = get_next_opcode (idesc);
6410 }
6411 #if 0
6412 else
6413 /* no other resolved dynamic ops have collisions */
6414 know (!get_next_opcode (idesc));
6415 #endif
6416 md.slot[curr].idesc = idesc;
6417 }
6418 else
6419 {
6420 insn_type = idesc->type;
6421 insn_unit = IA64_UNIT_NIL;
6422 switch (insn_type)
6423 {
6424 case IA64_TYPE_A:
6425 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6426 insn_unit = required_unit;
6427 break;
6428 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6429 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6430 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6431 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6432 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6433 default: break;
6434 }
6435 }
6436
6437 if (insn_unit != required_unit)
6438 {
6439 if (required_unit == IA64_UNIT_L
6440 && insn_unit == IA64_UNIT_I
6441 && !(idesc->flags & IA64_OPCODE_X_IN_MLX))
6442 {
6443 /* we got ourselves an MLX template but the current
6444 instruction isn't an X-unit, or an I-unit instruction
6445 that can go into the X slot of an MLX template. Duh. */
6446 if (md.num_slots_in_use >= NUM_SLOTS)
6447 {
6448 as_bad_where (md.slot[curr].src_file,
6449 md.slot[curr].src_line,
6450 "`%s' can't go in X slot of "
6451 "MLX template", idesc->name);
6452 /* drop this insn so we don't livelock: */
6453 --md.num_slots_in_use;
6454 }
6455 break;
6456 }
6457 continue; /* try next slot */
6458 }
6459
6460 if (debug_type == DEBUG_DWARF2 || md.slot[curr].loc_directive_seen)
6461 {
6462 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6463
6464 md.slot[curr].loc_directive_seen = 0;
6465 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6466 }
6467
6468 if (errata_nop_necessary_p (md.slot + curr, insn_unit))
6469 as_warn (_("Additional NOP may be necessary to workaround Itanium processor A/B step errata"));
6470
6471 build_insn (md.slot + curr, insn + i);
6472
6473 ptr = md.slot[curr].unwind_record;
6474 if (ptr)
6475 {
6476 /* Set slot numbers for all remaining unwind records belonging to the
6477 current insn. There can not be any prologue/body unwind records
6478 here. */
6479 end_ptr = md.slot[(curr + 1) % NUM_SLOTS].unwind_record;
6480 for (; ptr != end_ptr; ptr = ptr->next)
6481 {
6482 ptr->slot_number = (unsigned long) f + i;
6483 ptr->slot_frag = frag_now;
6484 }
6485 md.slot[curr].unwind_record = NULL;
6486 }
6487
6488 if (required_unit == IA64_UNIT_L)
6489 {
6490 know (i == 1);
6491 /* skip one slot for long/X-unit instructions */
6492 ++i;
6493 }
6494 --md.num_slots_in_use;
6495
6496 /* now is a good time to fix up the labels for this insn: */
6497 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6498 {
6499 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6500 symbol_set_frag (lfix->sym, frag_now);
6501 }
6502 /* and fix up the tags also. */
6503 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6504 {
6505 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6506 symbol_set_frag (lfix->sym, frag_now);
6507 }
6508
6509 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6510 {
6511 ifix = md.slot[curr].fixup + j;
6512 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6513 &ifix->expr, ifix->is_pcrel, ifix->code);
6514 fix->tc_fix_data.opnd = ifix->opnd;
6515 fix->fx_plt = (fix->fx_r_type == BFD_RELOC_IA64_PLTOFF22);
6516 fix->fx_file = md.slot[curr].src_file;
6517 fix->fx_line = md.slot[curr].src_line;
6518 }
6519
6520 end_of_insn_group = md.slot[curr].end_of_insn_group;
6521
6522 if (end_of_insn_group)
6523 {
6524 md.group_idx = (md.group_idx + 1) % 3;
6525 memset (md.last_groups + md.group_idx, 0, sizeof md.last_groups[0]);
6526 }
6527
6528 /* clear slot: */
6529 ia64_free_opcode (md.slot[curr].idesc);
6530 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6531 md.slot[curr].user_template = -1;
6532
6533 if (manual_bundling_off)
6534 {
6535 manual_bundling = 0;
6536 break;
6537 }
6538 curr = (curr + 1) % NUM_SLOTS;
6539 idesc = md.slot[curr].idesc;
6540 }
6541 if (manual_bundling)
6542 {
6543 if (md.num_slots_in_use > 0)
6544 {
6545 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6546 "`%s' does not fit into %s template",
6547 idesc->name, ia64_templ_desc[template].name);
6548 --md.num_slots_in_use;
6549 }
6550 else
6551 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6552 "Missing '}' at end of file");
6553 }
6554 know (md.num_slots_in_use < NUM_SLOTS);
6555
6556 t0 = end_of_insn_group | (template << 1) | (insn[0] << 5) | (insn[1] << 46);
6557 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6558
6559 number_to_chars_littleendian (f + 0, t0, 8);
6560 number_to_chars_littleendian (f + 8, t1, 8);
6561
6562 if (unwind.list)
6563 {
6564 unwind.list->next_slot_number = (unsigned long) f + 16;
6565 unwind.list->next_slot_frag = frag_now;
6566 }
6567 }
6568
6569 int
6570 md_parse_option (c, arg)
6571 int c;
6572 char *arg;
6573 {
6574
6575 switch (c)
6576 {
6577 /* Switches from the Intel assembler. */
6578 case 'm':
6579 if (strcmp (arg, "ilp64") == 0
6580 || strcmp (arg, "lp64") == 0
6581 || strcmp (arg, "p64") == 0)
6582 {
6583 md.flags |= EF_IA_64_ABI64;
6584 }
6585 else if (strcmp (arg, "ilp32") == 0)
6586 {
6587 md.flags &= ~EF_IA_64_ABI64;
6588 }
6589 else if (strcmp (arg, "le") == 0)
6590 {
6591 md.flags &= ~EF_IA_64_BE;
6592 default_big_endian = 0;
6593 }
6594 else if (strcmp (arg, "be") == 0)
6595 {
6596 md.flags |= EF_IA_64_BE;
6597 default_big_endian = 1;
6598 }
6599 else
6600 return 0;
6601 break;
6602
6603 case 'N':
6604 if (strcmp (arg, "so") == 0)
6605 {
6606 /* Suppress signon message. */
6607 }
6608 else if (strcmp (arg, "pi") == 0)
6609 {
6610 /* Reject privileged instructions. FIXME */
6611 }
6612 else if (strcmp (arg, "us") == 0)
6613 {
6614 /* Allow union of signed and unsigned range. FIXME */
6615 }
6616 else if (strcmp (arg, "close_fcalls") == 0)
6617 {
6618 /* Do not resolve global function calls. */
6619 }
6620 else
6621 return 0;
6622 break;
6623
6624 case 'C':
6625 /* temp[="prefix"] Insert temporary labels into the object file
6626 symbol table prefixed by "prefix".
6627 Default prefix is ":temp:".
6628 */
6629 break;
6630
6631 case 'a':
6632 /* indirect=<tgt> Assume unannotated indirect branches behavior
6633 according to <tgt> --
6634 exit: branch out from the current context (default)
6635 labels: all labels in context may be branch targets
6636 */
6637 if (strncmp (arg, "indirect=", 9) != 0)
6638 return 0;
6639 break;
6640
6641 case 'x':
6642 /* -X conflicts with an ignored option, use -x instead */
6643 md.detect_dv = 1;
6644 if (!arg || strcmp (arg, "explicit") == 0)
6645 {
6646 /* set default mode to explicit */
6647 md.default_explicit_mode = 1;
6648 break;
6649 }
6650 else if (strcmp (arg, "auto") == 0)
6651 {
6652 md.default_explicit_mode = 0;
6653 }
6654 else if (strcmp (arg, "debug") == 0)
6655 {
6656 md.debug_dv = 1;
6657 }
6658 else if (strcmp (arg, "debugx") == 0)
6659 {
6660 md.default_explicit_mode = 1;
6661 md.debug_dv = 1;
6662 }
6663 else
6664 {
6665 as_bad (_("Unrecognized option '-x%s'"), arg);
6666 }
6667 break;
6668
6669 case 'S':
6670 /* nops Print nops statistics. */
6671 break;
6672
6673 /* GNU specific switches for gcc. */
6674 case OPTION_MCONSTANT_GP:
6675 md.flags |= EF_IA_64_CONS_GP;
6676 break;
6677
6678 case OPTION_MAUTO_PIC:
6679 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
6680 break;
6681
6682 default:
6683 return 0;
6684 }
6685
6686 return 1;
6687 }
6688
6689 void
6690 md_show_usage (stream)
6691 FILE *stream;
6692 {
6693 fputs (_("\
6694 IA-64 options:\n\
6695 --mconstant-gp mark output file as using the constant-GP model\n\
6696 (sets ELF header flag EF_IA_64_CONS_GP)\n\
6697 --mauto-pic mark output file as using the constant-GP model\n\
6698 without function descriptors (sets ELF header flag\n\
6699 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
6700 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
6701 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
6702 -x | -xexplicit turn on dependency violation checking (default)\n\
6703 -xauto automagically remove dependency violations\n\
6704 -xdebug debug dependency violation checker\n"),
6705 stream);
6706 }
6707
6708 void
6709 ia64_after_parse_args ()
6710 {
6711 if (debug_type == DEBUG_STABS)
6712 as_fatal (_("--gstabs is not supported for ia64"));
6713 }
6714
6715 /* Return true if TYPE fits in TEMPL at SLOT. */
6716
6717 static int
6718 match (int templ, int type, int slot)
6719 {
6720 enum ia64_unit unit;
6721 int result;
6722
6723 unit = ia64_templ_desc[templ].exec_unit[slot];
6724 switch (type)
6725 {
6726 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
6727 case IA64_TYPE_A:
6728 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
6729 break;
6730 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
6731 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
6732 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
6733 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
6734 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
6735 default: result = 0; break;
6736 }
6737 return result;
6738 }
6739
6740 /* Add a bit of extra goodness if a nop of type F or B would fit
6741 in TEMPL at SLOT. */
6742
6743 static inline int
6744 extra_goodness (int templ, int slot)
6745 {
6746 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
6747 return 2;
6748 if (slot == 2 && match (templ, IA64_TYPE_B, slot))
6749 return 1;
6750 return 0;
6751 }
6752
6753 /* This function is called once, at assembler startup time. It sets
6754 up all the tables, etc. that the MD part of the assembler will need
6755 that can be determined before arguments are parsed. */
6756 void
6757 md_begin ()
6758 {
6759 int i, j, k, t, total, ar_base, cr_base, goodness, best, regnum, ok;
6760 const char *err;
6761 char name[8];
6762
6763 md.auto_align = 1;
6764 md.explicit_mode = md.default_explicit_mode;
6765
6766 bfd_set_section_alignment (stdoutput, text_section, 4);
6767
6768 /* Make sure function pointers get initialized. */
6769 target_big_endian = -1;
6770 dot_byteorder (default_big_endian);
6771
6772 alias_hash = hash_new ();
6773 alias_name_hash = hash_new ();
6774 secalias_hash = hash_new ();
6775 secalias_name_hash = hash_new ();
6776
6777 pseudo_func[FUNC_DTP_MODULE].u.sym =
6778 symbol_new (".<dtpmod>", undefined_section, FUNC_DTP_MODULE,
6779 &zero_address_frag);
6780
6781 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
6782 symbol_new (".<dtprel>", undefined_section, FUNC_DTP_RELATIVE,
6783 &zero_address_frag);
6784
6785 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
6786 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
6787 &zero_address_frag);
6788
6789 pseudo_func[FUNC_GP_RELATIVE].u.sym =
6790 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
6791 &zero_address_frag);
6792
6793 pseudo_func[FUNC_LT_RELATIVE].u.sym =
6794 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
6795 &zero_address_frag);
6796
6797 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
6798 symbol_new (".<ltoffx>", undefined_section, FUNC_LT_RELATIVE_X,
6799 &zero_address_frag);
6800
6801 pseudo_func[FUNC_PC_RELATIVE].u.sym =
6802 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
6803 &zero_address_frag);
6804
6805 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
6806 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
6807 &zero_address_frag);
6808
6809 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
6810 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
6811 &zero_address_frag);
6812
6813 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
6814 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
6815 &zero_address_frag);
6816
6817 pseudo_func[FUNC_TP_RELATIVE].u.sym =
6818 symbol_new (".<tprel>", undefined_section, FUNC_TP_RELATIVE,
6819 &zero_address_frag);
6820
6821 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
6822 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
6823 &zero_address_frag);
6824
6825 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
6826 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
6827 &zero_address_frag);
6828
6829 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
6830 symbol_new (".<ltoff.dtpmod>", undefined_section, FUNC_LT_DTP_MODULE,
6831 &zero_address_frag);
6832
6833 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
6834 symbol_new (".<ltoff.dptrel>", undefined_section, FUNC_LT_DTP_RELATIVE,
6835 &zero_address_frag);
6836
6837 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
6838 symbol_new (".<ltoff.tprel>", undefined_section, FUNC_LT_TP_RELATIVE,
6839 &zero_address_frag);
6840
6841 pseudo_func[FUNC_IPLT_RELOC].u.sym =
6842 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC,
6843 &zero_address_frag);
6844
6845 /* Compute the table of best templates. We compute goodness as a
6846 base 4 value, in which each match counts for 3, each F counts
6847 for 2, each B counts for 1. This should maximize the number of
6848 F and B nops in the chosen bundles, which is good because these
6849 pipelines are least likely to be overcommitted. */
6850 for (i = 0; i < IA64_NUM_TYPES; ++i)
6851 for (j = 0; j < IA64_NUM_TYPES; ++j)
6852 for (k = 0; k < IA64_NUM_TYPES; ++k)
6853 {
6854 best = 0;
6855 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
6856 {
6857 goodness = 0;
6858 if (match (t, i, 0))
6859 {
6860 if (match (t, j, 1))
6861 {
6862 if (match (t, k, 2))
6863 goodness = 3 + 3 + 3;
6864 else
6865 goodness = 3 + 3 + extra_goodness (t, 2);
6866 }
6867 else if (match (t, j, 2))
6868 goodness = 3 + 3 + extra_goodness (t, 1);
6869 else
6870 {
6871 goodness = 3;
6872 goodness += extra_goodness (t, 1);
6873 goodness += extra_goodness (t, 2);
6874 }
6875 }
6876 else if (match (t, i, 1))
6877 {
6878 if (match (t, j, 2))
6879 goodness = 3 + 3;
6880 else
6881 goodness = 3 + extra_goodness (t, 2);
6882 }
6883 else if (match (t, i, 2))
6884 goodness = 3 + extra_goodness (t, 1);
6885
6886 if (goodness > best)
6887 {
6888 best = goodness;
6889 best_template[i][j][k] = t;
6890 }
6891 }
6892 }
6893
6894 for (i = 0; i < NUM_SLOTS; ++i)
6895 md.slot[i].user_template = -1;
6896
6897 md.pseudo_hash = hash_new ();
6898 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
6899 {
6900 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
6901 (void *) (pseudo_opcode + i));
6902 if (err)
6903 as_fatal ("ia64.md_begin: can't hash `%s': %s",
6904 pseudo_opcode[i].name, err);
6905 }
6906
6907 md.reg_hash = hash_new ();
6908 md.dynreg_hash = hash_new ();
6909 md.const_hash = hash_new ();
6910 md.entry_hash = hash_new ();
6911
6912 /* general registers: */
6913
6914 total = 128;
6915 for (i = 0; i < total; ++i)
6916 {
6917 sprintf (name, "r%d", i - REG_GR);
6918 md.regsym[i] = declare_register (name, i);
6919 }
6920
6921 /* floating point registers: */
6922 total += 128;
6923 for (; i < total; ++i)
6924 {
6925 sprintf (name, "f%d", i - REG_FR);
6926 md.regsym[i] = declare_register (name, i);
6927 }
6928
6929 /* application registers: */
6930 total += 128;
6931 ar_base = i;
6932 for (; i < total; ++i)
6933 {
6934 sprintf (name, "ar%d", i - REG_AR);
6935 md.regsym[i] = declare_register (name, i);
6936 }
6937
6938 /* control registers: */
6939 total += 128;
6940 cr_base = i;
6941 for (; i < total; ++i)
6942 {
6943 sprintf (name, "cr%d", i - REG_CR);
6944 md.regsym[i] = declare_register (name, i);
6945 }
6946
6947 /* predicate registers: */
6948 total += 64;
6949 for (; i < total; ++i)
6950 {
6951 sprintf (name, "p%d", i - REG_P);
6952 md.regsym[i] = declare_register (name, i);
6953 }
6954
6955 /* branch registers: */
6956 total += 8;
6957 for (; i < total; ++i)
6958 {
6959 sprintf (name, "b%d", i - REG_BR);
6960 md.regsym[i] = declare_register (name, i);
6961 }
6962
6963 md.regsym[REG_IP] = declare_register ("ip", REG_IP);
6964 md.regsym[REG_CFM] = declare_register ("cfm", REG_CFM);
6965 md.regsym[REG_PR] = declare_register ("pr", REG_PR);
6966 md.regsym[REG_PR_ROT] = declare_register ("pr.rot", REG_PR_ROT);
6967 md.regsym[REG_PSR] = declare_register ("psr", REG_PSR);
6968 md.regsym[REG_PSR_L] = declare_register ("psr.l", REG_PSR_L);
6969 md.regsym[REG_PSR_UM] = declare_register ("psr.um", REG_PSR_UM);
6970
6971 for (i = 0; i < NELEMS (indirect_reg); ++i)
6972 {
6973 regnum = indirect_reg[i].regnum;
6974 md.regsym[regnum] = declare_register (indirect_reg[i].name, regnum);
6975 }
6976
6977 /* define synonyms for application registers: */
6978 for (i = REG_AR; i < REG_AR + NELEMS (ar); ++i)
6979 md.regsym[i] = declare_register (ar[i - REG_AR].name,
6980 REG_AR + ar[i - REG_AR].regnum);
6981
6982 /* define synonyms for control registers: */
6983 for (i = REG_CR; i < REG_CR + NELEMS (cr); ++i)
6984 md.regsym[i] = declare_register (cr[i - REG_CR].name,
6985 REG_CR + cr[i - REG_CR].regnum);
6986
6987 declare_register ("gp", REG_GR + 1);
6988 declare_register ("sp", REG_GR + 12);
6989 declare_register ("rp", REG_BR + 0);
6990
6991 /* pseudo-registers used to specify unwind info: */
6992 declare_register ("psp", REG_PSP);
6993
6994 declare_register_set ("ret", 4, REG_GR + 8);
6995 declare_register_set ("farg", 8, REG_FR + 8);
6996 declare_register_set ("fret", 8, REG_FR + 8);
6997
6998 for (i = 0; i < NELEMS (const_bits); ++i)
6999 {
7000 err = hash_insert (md.const_hash, const_bits[i].name,
7001 (PTR) (const_bits + i));
7002 if (err)
7003 as_fatal ("Inserting \"%s\" into constant hash table failed: %s",
7004 name, err);
7005 }
7006
7007 /* Set the architecture and machine depending on defaults and command line
7008 options. */
7009 if (md.flags & EF_IA_64_ABI64)
7010 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7011 else
7012 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7013
7014 if (! ok)
7015 as_warn (_("Could not set architecture and machine"));
7016
7017 /* Set the pointer size and pointer shift size depending on md.flags */
7018
7019 if (md.flags & EF_IA_64_ABI64)
7020 {
7021 md.pointer_size = 8; /* pointers are 8 bytes */
7022 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7023 }
7024 else
7025 {
7026 md.pointer_size = 4; /* pointers are 4 bytes */
7027 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7028 }
7029
7030 md.mem_offset.hint = 0;
7031 md.path = 0;
7032 md.maxpaths = 0;
7033 md.entry_labels = NULL;
7034 }
7035
7036 /* Set the elf type to 64 bit ABI by default. Cannot do this in md_begin
7037 because that is called after md_parse_option which is where we do the
7038 dynamic changing of md.flags based on -mlp64 or -milp32. Also, set the
7039 default endianness. */
7040
7041 void
7042 ia64_init (argc, argv)
7043 int argc ATTRIBUTE_UNUSED;
7044 char **argv ATTRIBUTE_UNUSED;
7045 {
7046 md.flags = MD_FLAGS_DEFAULT;
7047 }
7048
7049 /* Return a string for the target object file format. */
7050
7051 const char *
7052 ia64_target_format ()
7053 {
7054 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7055 {
7056 if (md.flags & EF_IA_64_BE)
7057 {
7058 if (md.flags & EF_IA_64_ABI64)
7059 #if defined(TE_AIX50)
7060 return "elf64-ia64-aix-big";
7061 #elif defined(TE_HPUX)
7062 return "elf64-ia64-hpux-big";
7063 #else
7064 return "elf64-ia64-big";
7065 #endif
7066 else
7067 #if defined(TE_AIX50)
7068 return "elf32-ia64-aix-big";
7069 #elif defined(TE_HPUX)
7070 return "elf32-ia64-hpux-big";
7071 #else
7072 return "elf32-ia64-big";
7073 #endif
7074 }
7075 else
7076 {
7077 if (md.flags & EF_IA_64_ABI64)
7078 #ifdef TE_AIX50
7079 return "elf64-ia64-aix-little";
7080 #else
7081 return "elf64-ia64-little";
7082 #endif
7083 else
7084 #ifdef TE_AIX50
7085 return "elf32-ia64-aix-little";
7086 #else
7087 return "elf32-ia64-little";
7088 #endif
7089 }
7090 }
7091 else
7092 return "unknown-format";
7093 }
7094
7095 void
7096 ia64_end_of_source ()
7097 {
7098 /* terminate insn group upon reaching end of file: */
7099 insn_group_break (1, 0, 0);
7100
7101 /* emits slots we haven't written yet: */
7102 ia64_flush_insns ();
7103
7104 bfd_set_private_flags (stdoutput, md.flags);
7105
7106 md.mem_offset.hint = 0;
7107 }
7108
7109 void
7110 ia64_start_line ()
7111 {
7112 if (md.qp.X_op == O_register)
7113 as_bad ("qualifying predicate not followed by instruction");
7114 md.qp.X_op = O_absent;
7115
7116 if (ignore_input ())
7117 return;
7118
7119 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7120 {
7121 if (md.detect_dv && !md.explicit_mode)
7122 as_warn (_("Explicit stops are ignored in auto mode"));
7123 else
7124 insn_group_break (1, 0, 0);
7125 }
7126 }
7127
7128 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7129 labels. */
7130 static int defining_tag = 0;
7131
7132 int
7133 ia64_unrecognized_line (ch)
7134 int ch;
7135 {
7136 switch (ch)
7137 {
7138 case '(':
7139 expression (&md.qp);
7140 if (*input_line_pointer++ != ')')
7141 {
7142 as_bad ("Expected ')'");
7143 return 0;
7144 }
7145 if (md.qp.X_op != O_register)
7146 {
7147 as_bad ("Qualifying predicate expected");
7148 return 0;
7149 }
7150 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7151 {
7152 as_bad ("Predicate register expected");
7153 return 0;
7154 }
7155 return 1;
7156
7157 case '{':
7158 if (md.manual_bundling)
7159 as_warn ("Found '{' when manual bundling is already turned on");
7160 else
7161 CURR_SLOT.manual_bundling_on = 1;
7162 md.manual_bundling = 1;
7163
7164 /* Bundling is only acceptable in explicit mode
7165 or when in default automatic mode. */
7166 if (md.detect_dv && !md.explicit_mode)
7167 {
7168 if (!md.mode_explicitly_set
7169 && !md.default_explicit_mode)
7170 dot_dv_mode ('E');
7171 else
7172 as_warn (_("Found '{' after explicit switch to automatic mode"));
7173 }
7174 return 1;
7175
7176 case '}':
7177 if (!md.manual_bundling)
7178 as_warn ("Found '}' when manual bundling is off");
7179 else
7180 PREV_SLOT.manual_bundling_off = 1;
7181 md.manual_bundling = 0;
7182
7183 /* switch back to automatic mode, if applicable */
7184 if (md.detect_dv
7185 && md.explicit_mode
7186 && !md.mode_explicitly_set
7187 && !md.default_explicit_mode)
7188 dot_dv_mode ('A');
7189
7190 /* Allow '{' to follow on the same line. We also allow ";;", but that
7191 happens automatically because ';' is an end of line marker. */
7192 SKIP_WHITESPACE ();
7193 if (input_line_pointer[0] == '{')
7194 {
7195 input_line_pointer++;
7196 return ia64_unrecognized_line ('{');
7197 }
7198
7199 demand_empty_rest_of_line ();
7200 return 1;
7201
7202 case '[':
7203 {
7204 char *s;
7205 char c;
7206 symbolS *tag;
7207 int temp;
7208
7209 if (md.qp.X_op == O_register)
7210 {
7211 as_bad ("Tag must come before qualifying predicate.");
7212 return 0;
7213 }
7214
7215 /* This implements just enough of read_a_source_file in read.c to
7216 recognize labels. */
7217 if (is_name_beginner (*input_line_pointer))
7218 {
7219 s = input_line_pointer;
7220 c = get_symbol_end ();
7221 }
7222 else if (LOCAL_LABELS_FB
7223 && ISDIGIT (*input_line_pointer))
7224 {
7225 temp = 0;
7226 while (ISDIGIT (*input_line_pointer))
7227 temp = (temp * 10) + *input_line_pointer++ - '0';
7228 fb_label_instance_inc (temp);
7229 s = fb_label_name (temp, 0);
7230 c = *input_line_pointer;
7231 }
7232 else
7233 {
7234 s = NULL;
7235 c = '\0';
7236 }
7237 if (c != ':')
7238 {
7239 /* Put ':' back for error messages' sake. */
7240 *input_line_pointer++ = ':';
7241 as_bad ("Expected ':'");
7242 return 0;
7243 }
7244
7245 defining_tag = 1;
7246 tag = colon (s);
7247 defining_tag = 0;
7248 /* Put ':' back for error messages' sake. */
7249 *input_line_pointer++ = ':';
7250 if (*input_line_pointer++ != ']')
7251 {
7252 as_bad ("Expected ']'");
7253 return 0;
7254 }
7255 if (! tag)
7256 {
7257 as_bad ("Tag name expected");
7258 return 0;
7259 }
7260 return 1;
7261 }
7262
7263 default:
7264 break;
7265 }
7266
7267 /* Not a valid line. */
7268 return 0;
7269 }
7270
7271 void
7272 ia64_frob_label (sym)
7273 struct symbol *sym;
7274 {
7275 struct label_fix *fix;
7276
7277 /* Tags need special handling since they are not bundle breaks like
7278 labels. */
7279 if (defining_tag)
7280 {
7281 fix = obstack_alloc (&notes, sizeof (*fix));
7282 fix->sym = sym;
7283 fix->next = CURR_SLOT.tag_fixups;
7284 CURR_SLOT.tag_fixups = fix;
7285
7286 return;
7287 }
7288
7289 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7290 {
7291 md.last_text_seg = now_seg;
7292 fix = obstack_alloc (&notes, sizeof (*fix));
7293 fix->sym = sym;
7294 fix->next = CURR_SLOT.label_fixups;
7295 CURR_SLOT.label_fixups = fix;
7296
7297 /* Keep track of how many code entry points we've seen. */
7298 if (md.path == md.maxpaths)
7299 {
7300 md.maxpaths += 20;
7301 md.entry_labels = (const char **)
7302 xrealloc ((void *) md.entry_labels,
7303 md.maxpaths * sizeof (char *));
7304 }
7305 md.entry_labels[md.path++] = S_GET_NAME (sym);
7306 }
7307 }
7308
7309 #ifdef TE_HPUX
7310 /* The HP-UX linker will give unresolved symbol errors for symbols
7311 that are declared but unused. This routine removes declared,
7312 unused symbols from an object. */
7313 int
7314 ia64_frob_symbol (sym)
7315 struct symbol *sym;
7316 {
7317 if ((S_GET_SEGMENT (sym) == &bfd_und_section && ! symbol_used_p (sym) &&
7318 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7319 || (S_GET_SEGMENT (sym) == &bfd_abs_section
7320 && ! S_IS_EXTERNAL (sym)))
7321 return 1;
7322 return 0;
7323 }
7324 #endif
7325
7326 void
7327 ia64_flush_pending_output ()
7328 {
7329 if (!md.keep_pending_output
7330 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7331 {
7332 /* ??? This causes many unnecessary stop bits to be emitted.
7333 Unfortunately, it isn't clear if it is safe to remove this. */
7334 insn_group_break (1, 0, 0);
7335 ia64_flush_insns ();
7336 }
7337 }
7338
7339 /* Do ia64-specific expression optimization. All that's done here is
7340 to transform index expressions that are either due to the indexing
7341 of rotating registers or due to the indexing of indirect register
7342 sets. */
7343 int
7344 ia64_optimize_expr (l, op, r)
7345 expressionS *l;
7346 operatorT op;
7347 expressionS *r;
7348 {
7349 unsigned num_regs;
7350
7351 if (op == O_index)
7352 {
7353 if (l->X_op == O_register && r->X_op == O_constant)
7354 {
7355 num_regs = (l->X_add_number >> 16);
7356 if ((unsigned) r->X_add_number >= num_regs)
7357 {
7358 if (!num_regs)
7359 as_bad ("No current frame");
7360 else
7361 as_bad ("Index out of range 0..%u", num_regs - 1);
7362 r->X_add_number = 0;
7363 }
7364 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7365 return 1;
7366 }
7367 else if (l->X_op == O_register && r->X_op == O_register)
7368 {
7369 if (l->X_add_number < IND_CPUID || l->X_add_number > IND_RR
7370 || l->X_add_number == IND_MEM)
7371 {
7372 as_bad ("Indirect register set name expected");
7373 l->X_add_number = IND_CPUID;
7374 }
7375 l->X_op = O_index;
7376 l->X_op_symbol = md.regsym[l->X_add_number];
7377 l->X_add_number = r->X_add_number;
7378 return 1;
7379 }
7380 }
7381 return 0;
7382 }
7383
7384 int
7385 ia64_parse_name (name, e)
7386 char *name;
7387 expressionS *e;
7388 {
7389 struct const_desc *cdesc;
7390 struct dynreg *dr = 0;
7391 unsigned int regnum;
7392 struct symbol *sym;
7393 char *end;
7394
7395 /* first see if NAME is a known register name: */
7396 sym = hash_find (md.reg_hash, name);
7397 if (sym)
7398 {
7399 e->X_op = O_register;
7400 e->X_add_number = S_GET_VALUE (sym);
7401 return 1;
7402 }
7403
7404 cdesc = hash_find (md.const_hash, name);
7405 if (cdesc)
7406 {
7407 e->X_op = O_constant;
7408 e->X_add_number = cdesc->value;
7409 return 1;
7410 }
7411
7412 /* check for inN, locN, or outN: */
7413 switch (name[0])
7414 {
7415 case 'i':
7416 if (name[1] == 'n' && ISDIGIT (name[2]))
7417 {
7418 dr = &md.in;
7419 name += 2;
7420 }
7421 break;
7422
7423 case 'l':
7424 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
7425 {
7426 dr = &md.loc;
7427 name += 3;
7428 }
7429 break;
7430
7431 case 'o':
7432 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
7433 {
7434 dr = &md.out;
7435 name += 3;
7436 }
7437 break;
7438
7439 default:
7440 break;
7441 }
7442
7443 if (dr)
7444 {
7445 /* The name is inN, locN, or outN; parse the register number. */
7446 regnum = strtoul (name, &end, 10);
7447 if (end > name && *end == '\0')
7448 {
7449 if ((unsigned) regnum >= dr->num_regs)
7450 {
7451 if (!dr->num_regs)
7452 as_bad ("No current frame");
7453 else
7454 as_bad ("Register number out of range 0..%u",
7455 dr->num_regs - 1);
7456 regnum = 0;
7457 }
7458 e->X_op = O_register;
7459 e->X_add_number = dr->base + regnum;
7460 return 1;
7461 }
7462 }
7463
7464 if ((dr = hash_find (md.dynreg_hash, name)))
7465 {
7466 /* We've got ourselves the name of a rotating register set.
7467 Store the base register number in the low 16 bits of
7468 X_add_number and the size of the register set in the top 16
7469 bits. */
7470 e->X_op = O_register;
7471 e->X_add_number = dr->base | (dr->num_regs << 16);
7472 return 1;
7473 }
7474 return 0;
7475 }
7476
7477 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
7478
7479 char *
7480 ia64_canonicalize_symbol_name (name)
7481 char *name;
7482 {
7483 size_t len = strlen (name);
7484 if (len > 1 && name[len - 1] == '#')
7485 name[len - 1] = '\0';
7486 return name;
7487 }
7488
7489 /* Return true if idesc is a conditional branch instruction. This excludes
7490 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
7491 because they always read/write resources regardless of the value of the
7492 qualifying predicate. br.ia must always use p0, and hence is always
7493 taken. Thus this function returns true for branches which can fall
7494 through, and which use no resources if they do fall through. */
7495
7496 static int
7497 is_conditional_branch (idesc)
7498 struct ia64_opcode *idesc;
7499 {
7500 /* br is a conditional branch. Everything that starts with br. except
7501 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
7502 Everything that starts with brl is a conditional branch. */
7503 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
7504 && (idesc->name[2] == '\0'
7505 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
7506 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
7507 || idesc->name[2] == 'l'
7508 /* br.cond, br.call, br.clr */
7509 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
7510 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
7511 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
7512 }
7513
7514 /* Return whether the given opcode is a taken branch. If there's any doubt,
7515 returns zero. */
7516
7517 static int
7518 is_taken_branch (idesc)
7519 struct ia64_opcode *idesc;
7520 {
7521 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
7522 || strncmp (idesc->name, "br.ia", 5) == 0);
7523 }
7524
7525 /* Return whether the given opcode is an interruption or rfi. If there's any
7526 doubt, returns zero. */
7527
7528 static int
7529 is_interruption_or_rfi (idesc)
7530 struct ia64_opcode *idesc;
7531 {
7532 if (strcmp (idesc->name, "rfi") == 0)
7533 return 1;
7534 return 0;
7535 }
7536
7537 /* Returns the index of the given dependency in the opcode's list of chks, or
7538 -1 if there is no dependency. */
7539
7540 static int
7541 depends_on (depind, idesc)
7542 int depind;
7543 struct ia64_opcode *idesc;
7544 {
7545 int i;
7546 const struct ia64_opcode_dependency *dep = idesc->dependencies;
7547 for (i = 0; i < dep->nchks; i++)
7548 {
7549 if (depind == DEP (dep->chks[i]))
7550 return i;
7551 }
7552 return -1;
7553 }
7554
7555 /* Determine a set of specific resources used for a particular resource
7556 class. Returns the number of specific resources identified For those
7557 cases which are not determinable statically, the resource returned is
7558 marked nonspecific.
7559
7560 Meanings of value in 'NOTE':
7561 1) only read/write when the register number is explicitly encoded in the
7562 insn.
7563 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
7564 accesses CFM when qualifying predicate is in the rotating region.
7565 3) general register value is used to specify an indirect register; not
7566 determinable statically.
7567 4) only read the given resource when bits 7:0 of the indirect index
7568 register value does not match the register number of the resource; not
7569 determinable statically.
7570 5) all rules are implementation specific.
7571 6) only when both the index specified by the reader and the index specified
7572 by the writer have the same value in bits 63:61; not determinable
7573 statically.
7574 7) only access the specified resource when the corresponding mask bit is
7575 set
7576 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
7577 only read when these insns reference FR2-31
7578 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
7579 written when these insns write FR32-127
7580 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
7581 instruction
7582 11) The target predicates are written independently of PR[qp], but source
7583 registers are only read if PR[qp] is true. Since the state of PR[qp]
7584 cannot statically be determined, all source registers are marked used.
7585 12) This insn only reads the specified predicate register when that
7586 register is the PR[qp].
7587 13) This reference to ld-c only applies to teh GR whose value is loaded
7588 with data returned from memory, not the post-incremented address register.
7589 14) The RSE resource includes the implementation-specific RSE internal
7590 state resources. At least one (and possibly more) of these resources are
7591 read by each instruction listed in IC:rse-readers. At least one (and
7592 possibly more) of these resources are written by each insn listed in
7593 IC:rse-writers.
7594 15+16) Represents reserved instructions, which the assembler does not
7595 generate.
7596
7597 Memory resources (i.e. locations in memory) are *not* marked or tracked by
7598 this code; there are no dependency violations based on memory access.
7599 */
7600
7601 #define MAX_SPECS 256
7602 #define DV_CHK 1
7603 #define DV_REG 0
7604
7605 static int
7606 specify_resource (dep, idesc, type, specs, note, path)
7607 const struct ia64_dependency *dep;
7608 struct ia64_opcode *idesc;
7609 int type; /* is this a DV chk or a DV reg? */
7610 struct rsrc specs[MAX_SPECS]; /* returned specific resources */
7611 int note; /* resource note for this insn's usage */
7612 int path; /* which execution path to examine */
7613 {
7614 int count = 0;
7615 int i;
7616 int rsrc_write = 0;
7617 struct rsrc tmpl;
7618
7619 if (dep->mode == IA64_DV_WAW
7620 || (dep->mode == IA64_DV_RAW && type == DV_REG)
7621 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
7622 rsrc_write = 1;
7623
7624 /* template for any resources we identify */
7625 tmpl.dependency = dep;
7626 tmpl.note = note;
7627 tmpl.insn_srlz = tmpl.data_srlz = 0;
7628 tmpl.qp_regno = CURR_SLOT.qp_regno;
7629 tmpl.link_to_qp_branch = 1;
7630 tmpl.mem_offset.hint = 0;
7631 tmpl.specific = 1;
7632 tmpl.index = 0;
7633 tmpl.cmp_type = CMP_NONE;
7634
7635 #define UNHANDLED \
7636 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
7637 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
7638 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
7639
7640 /* we don't need to track these */
7641 if (dep->semantics == IA64_DVS_NONE)
7642 return 0;
7643
7644 switch (dep->specifier)
7645 {
7646 case IA64_RS_AR_K:
7647 if (note == 1)
7648 {
7649 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7650 {
7651 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7652 if (regno >= 0 && regno <= 7)
7653 {
7654 specs[count] = tmpl;
7655 specs[count++].index = regno;
7656 }
7657 }
7658 }
7659 else if (note == 0)
7660 {
7661 for (i = 0; i < 8; i++)
7662 {
7663 specs[count] = tmpl;
7664 specs[count++].index = i;
7665 }
7666 }
7667 else
7668 {
7669 UNHANDLED;
7670 }
7671 break;
7672
7673 case IA64_RS_AR_UNAT:
7674 /* This is a mov =AR or mov AR= instruction. */
7675 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7676 {
7677 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7678 if (regno == AR_UNAT)
7679 {
7680 specs[count++] = tmpl;
7681 }
7682 }
7683 else
7684 {
7685 /* This is a spill/fill, or other instruction that modifies the
7686 unat register. */
7687
7688 /* Unless we can determine the specific bits used, mark the whole
7689 thing; bits 8:3 of the memory address indicate the bit used in
7690 UNAT. The .mem.offset hint may be used to eliminate a small
7691 subset of conflicts. */
7692 specs[count] = tmpl;
7693 if (md.mem_offset.hint)
7694 {
7695 if (md.debug_dv)
7696 fprintf (stderr, " Using hint for spill/fill\n");
7697 /* The index isn't actually used, just set it to something
7698 approximating the bit index. */
7699 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
7700 specs[count].mem_offset.hint = 1;
7701 specs[count].mem_offset.offset = md.mem_offset.offset;
7702 specs[count++].mem_offset.base = md.mem_offset.base;
7703 }
7704 else
7705 {
7706 specs[count++].specific = 0;
7707 }
7708 }
7709 break;
7710
7711 case IA64_RS_AR:
7712 if (note == 1)
7713 {
7714 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7715 {
7716 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7717 if ((regno >= 8 && regno <= 15)
7718 || (regno >= 20 && regno <= 23)
7719 || (regno >= 31 && regno <= 39)
7720 || (regno >= 41 && regno <= 47)
7721 || (regno >= 67 && regno <= 111))
7722 {
7723 specs[count] = tmpl;
7724 specs[count++].index = regno;
7725 }
7726 }
7727 }
7728 else
7729 {
7730 UNHANDLED;
7731 }
7732 break;
7733
7734 case IA64_RS_ARb:
7735 if (note == 1)
7736 {
7737 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7738 {
7739 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7740 if ((regno >= 48 && regno <= 63)
7741 || (regno >= 112 && regno <= 127))
7742 {
7743 specs[count] = tmpl;
7744 specs[count++].index = regno;
7745 }
7746 }
7747 }
7748 else if (note == 0)
7749 {
7750 for (i = 48; i < 64; i++)
7751 {
7752 specs[count] = tmpl;
7753 specs[count++].index = i;
7754 }
7755 for (i = 112; i < 128; i++)
7756 {
7757 specs[count] = tmpl;
7758 specs[count++].index = i;
7759 }
7760 }
7761 else
7762 {
7763 UNHANDLED;
7764 }
7765 break;
7766
7767 case IA64_RS_BR:
7768 if (note != 1)
7769 {
7770 UNHANDLED;
7771 }
7772 else
7773 {
7774 if (rsrc_write)
7775 {
7776 for (i = 0; i < idesc->num_outputs; i++)
7777 if (idesc->operands[i] == IA64_OPND_B1
7778 || idesc->operands[i] == IA64_OPND_B2)
7779 {
7780 specs[count] = tmpl;
7781 specs[count++].index =
7782 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7783 }
7784 }
7785 else
7786 {
7787 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
7788 if (idesc->operands[i] == IA64_OPND_B1
7789 || idesc->operands[i] == IA64_OPND_B2)
7790 {
7791 specs[count] = tmpl;
7792 specs[count++].index =
7793 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7794 }
7795 }
7796 }
7797 break;
7798
7799 case IA64_RS_CPUID: /* four or more registers */
7800 if (note == 3)
7801 {
7802 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
7803 {
7804 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7805 if (regno >= 0 && regno < NELEMS (gr_values)
7806 && KNOWN (regno))
7807 {
7808 specs[count] = tmpl;
7809 specs[count++].index = gr_values[regno].value & 0xFF;
7810 }
7811 else
7812 {
7813 specs[count] = tmpl;
7814 specs[count++].specific = 0;
7815 }
7816 }
7817 }
7818 else
7819 {
7820 UNHANDLED;
7821 }
7822 break;
7823
7824 case IA64_RS_DBR: /* four or more registers */
7825 if (note == 3)
7826 {
7827 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
7828 {
7829 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7830 if (regno >= 0 && regno < NELEMS (gr_values)
7831 && KNOWN (regno))
7832 {
7833 specs[count] = tmpl;
7834 specs[count++].index = gr_values[regno].value & 0xFF;
7835 }
7836 else
7837 {
7838 specs[count] = tmpl;
7839 specs[count++].specific = 0;
7840 }
7841 }
7842 }
7843 else if (note == 0 && !rsrc_write)
7844 {
7845 specs[count] = tmpl;
7846 specs[count++].specific = 0;
7847 }
7848 else
7849 {
7850 UNHANDLED;
7851 }
7852 break;
7853
7854 case IA64_RS_IBR: /* four or more registers */
7855 if (note == 3)
7856 {
7857 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
7858 {
7859 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7860 if (regno >= 0 && regno < NELEMS (gr_values)
7861 && KNOWN (regno))
7862 {
7863 specs[count] = tmpl;
7864 specs[count++].index = gr_values[regno].value & 0xFF;
7865 }
7866 else
7867 {
7868 specs[count] = tmpl;
7869 specs[count++].specific = 0;
7870 }
7871 }
7872 }
7873 else
7874 {
7875 UNHANDLED;
7876 }
7877 break;
7878
7879 case IA64_RS_MSR:
7880 if (note == 5)
7881 {
7882 /* These are implementation specific. Force all references to
7883 conflict with all other references. */
7884 specs[count] = tmpl;
7885 specs[count++].specific = 0;
7886 }
7887 else
7888 {
7889 UNHANDLED;
7890 }
7891 break;
7892
7893 case IA64_RS_PKR: /* 16 or more registers */
7894 if (note == 3 || note == 4)
7895 {
7896 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
7897 {
7898 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7899 if (regno >= 0 && regno < NELEMS (gr_values)
7900 && KNOWN (regno))
7901 {
7902 if (note == 3)
7903 {
7904 specs[count] = tmpl;
7905 specs[count++].index = gr_values[regno].value & 0xFF;
7906 }
7907 else
7908 for (i = 0; i < NELEMS (gr_values); i++)
7909 {
7910 /* Uses all registers *except* the one in R3. */
7911 if ((unsigned)i != (gr_values[regno].value & 0xFF))
7912 {
7913 specs[count] = tmpl;
7914 specs[count++].index = i;
7915 }
7916 }
7917 }
7918 else
7919 {
7920 specs[count] = tmpl;
7921 specs[count++].specific = 0;
7922 }
7923 }
7924 }
7925 else if (note == 0)
7926 {
7927 /* probe et al. */
7928 specs[count] = tmpl;
7929 specs[count++].specific = 0;
7930 }
7931 break;
7932
7933 case IA64_RS_PMC: /* four or more registers */
7934 if (note == 3)
7935 {
7936 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
7937 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
7938
7939 {
7940 int index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
7941 ? 1 : !rsrc_write);
7942 int regno = CURR_SLOT.opnd[index].X_add_number - REG_GR;
7943 if (regno >= 0 && regno < NELEMS (gr_values)
7944 && KNOWN (regno))
7945 {
7946 specs[count] = tmpl;
7947 specs[count++].index = gr_values[regno].value & 0xFF;
7948 }
7949 else
7950 {
7951 specs[count] = tmpl;
7952 specs[count++].specific = 0;
7953 }
7954 }
7955 }
7956 else
7957 {
7958 UNHANDLED;
7959 }
7960 break;
7961
7962 case IA64_RS_PMD: /* four or more registers */
7963 if (note == 3)
7964 {
7965 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
7966 {
7967 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7968 if (regno >= 0 && regno < NELEMS (gr_values)
7969 && KNOWN (regno))
7970 {
7971 specs[count] = tmpl;
7972 specs[count++].index = gr_values[regno].value & 0xFF;
7973 }
7974 else
7975 {
7976 specs[count] = tmpl;
7977 specs[count++].specific = 0;
7978 }
7979 }
7980 }
7981 else
7982 {
7983 UNHANDLED;
7984 }
7985 break;
7986
7987 case IA64_RS_RR: /* eight registers */
7988 if (note == 6)
7989 {
7990 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
7991 {
7992 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7993 if (regno >= 0 && regno < NELEMS (gr_values)
7994 && KNOWN (regno))
7995 {
7996 specs[count] = tmpl;
7997 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
7998 }
7999 else
8000 {
8001 specs[count] = tmpl;
8002 specs[count++].specific = 0;
8003 }
8004 }
8005 }
8006 else if (note == 0 && !rsrc_write)
8007 {
8008 specs[count] = tmpl;
8009 specs[count++].specific = 0;
8010 }
8011 else
8012 {
8013 UNHANDLED;
8014 }
8015 break;
8016
8017 case IA64_RS_CR_IRR:
8018 if (note == 0)
8019 {
8020 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8021 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8022 if (rsrc_write
8023 && idesc->operands[1] == IA64_OPND_CR3
8024 && regno == CR_IVR)
8025 {
8026 for (i = 0; i < 4; i++)
8027 {
8028 specs[count] = tmpl;
8029 specs[count++].index = CR_IRR0 + i;
8030 }
8031 }
8032 }
8033 else if (note == 1)
8034 {
8035 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8036 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8037 && regno >= CR_IRR0
8038 && regno <= CR_IRR3)
8039 {
8040 specs[count] = tmpl;
8041 specs[count++].index = regno;
8042 }
8043 }
8044 else
8045 {
8046 UNHANDLED;
8047 }
8048 break;
8049
8050 case IA64_RS_CR_LRR:
8051 if (note != 1)
8052 {
8053 UNHANDLED;
8054 }
8055 else
8056 {
8057 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8058 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8059 && (regno == CR_LRR0 || regno == CR_LRR1))
8060 {
8061 specs[count] = tmpl;
8062 specs[count++].index = regno;
8063 }
8064 }
8065 break;
8066
8067 case IA64_RS_CR:
8068 if (note == 1)
8069 {
8070 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8071 {
8072 specs[count] = tmpl;
8073 specs[count++].index =
8074 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8075 }
8076 }
8077 else
8078 {
8079 UNHANDLED;
8080 }
8081 break;
8082
8083 case IA64_RS_FR:
8084 case IA64_RS_FRb:
8085 if (note != 1)
8086 {
8087 UNHANDLED;
8088 }
8089 else if (rsrc_write)
8090 {
8091 if (dep->specifier == IA64_RS_FRb
8092 && idesc->operands[0] == IA64_OPND_F1)
8093 {
8094 specs[count] = tmpl;
8095 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8096 }
8097 }
8098 else
8099 {
8100 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8101 {
8102 if (idesc->operands[i] == IA64_OPND_F2
8103 || idesc->operands[i] == IA64_OPND_F3
8104 || idesc->operands[i] == IA64_OPND_F4)
8105 {
8106 specs[count] = tmpl;
8107 specs[count++].index =
8108 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8109 }
8110 }
8111 }
8112 break;
8113
8114 case IA64_RS_GR:
8115 if (note == 13)
8116 {
8117 /* This reference applies only to the GR whose value is loaded with
8118 data returned from memory. */
8119 specs[count] = tmpl;
8120 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8121 }
8122 else if (note == 1)
8123 {
8124 if (rsrc_write)
8125 {
8126 for (i = 0; i < idesc->num_outputs; i++)
8127 if (idesc->operands[i] == IA64_OPND_R1
8128 || idesc->operands[i] == IA64_OPND_R2
8129 || idesc->operands[i] == IA64_OPND_R3)
8130 {
8131 specs[count] = tmpl;
8132 specs[count++].index =
8133 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8134 }
8135 if (idesc->flags & IA64_OPCODE_POSTINC)
8136 for (i = 0; i < NELEMS (idesc->operands); i++)
8137 if (idesc->operands[i] == IA64_OPND_MR3)
8138 {
8139 specs[count] = tmpl;
8140 specs[count++].index =
8141 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8142 }
8143 }
8144 else
8145 {
8146 /* Look for anything that reads a GR. */
8147 for (i = 0; i < NELEMS (idesc->operands); i++)
8148 {
8149 if (idesc->operands[i] == IA64_OPND_MR3
8150 || idesc->operands[i] == IA64_OPND_CPUID_R3
8151 || idesc->operands[i] == IA64_OPND_DBR_R3
8152 || idesc->operands[i] == IA64_OPND_IBR_R3
8153 || idesc->operands[i] == IA64_OPND_MSR_R3
8154 || idesc->operands[i] == IA64_OPND_PKR_R3
8155 || idesc->operands[i] == IA64_OPND_PMC_R3
8156 || idesc->operands[i] == IA64_OPND_PMD_R3
8157 || idesc->operands[i] == IA64_OPND_RR_R3
8158 || ((i >= idesc->num_outputs)
8159 && (idesc->operands[i] == IA64_OPND_R1
8160 || idesc->operands[i] == IA64_OPND_R2
8161 || idesc->operands[i] == IA64_OPND_R3
8162 /* addl source register. */
8163 || idesc->operands[i] == IA64_OPND_R3_2)))
8164 {
8165 specs[count] = tmpl;
8166 specs[count++].index =
8167 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8168 }
8169 }
8170 }
8171 }
8172 else
8173 {
8174 UNHANDLED;
8175 }
8176 break;
8177
8178 /* This is the same as IA64_RS_PRr, except that the register range is
8179 from 1 - 15, and there are no rotating register reads/writes here. */
8180 case IA64_RS_PR:
8181 if (note == 0)
8182 {
8183 for (i = 1; i < 16; i++)
8184 {
8185 specs[count] = tmpl;
8186 specs[count++].index = i;
8187 }
8188 }
8189 else if (note == 7)
8190 {
8191 valueT mask = 0;
8192 /* Mark only those registers indicated by the mask. */
8193 if (rsrc_write)
8194 {
8195 mask = CURR_SLOT.opnd[2].X_add_number;
8196 for (i = 1; i < 16; i++)
8197 if (mask & ((valueT) 1 << i))
8198 {
8199 specs[count] = tmpl;
8200 specs[count++].index = i;
8201 }
8202 }
8203 else
8204 {
8205 UNHANDLED;
8206 }
8207 }
8208 else if (note == 11) /* note 11 implies note 1 as well */
8209 {
8210 if (rsrc_write)
8211 {
8212 for (i = 0; i < idesc->num_outputs; i++)
8213 {
8214 if (idesc->operands[i] == IA64_OPND_P1
8215 || idesc->operands[i] == IA64_OPND_P2)
8216 {
8217 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8218 if (regno >= 1 && regno < 16)
8219 {
8220 specs[count] = tmpl;
8221 specs[count++].index = regno;
8222 }
8223 }
8224 }
8225 }
8226 else
8227 {
8228 UNHANDLED;
8229 }
8230 }
8231 else if (note == 12)
8232 {
8233 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8234 {
8235 specs[count] = tmpl;
8236 specs[count++].index = CURR_SLOT.qp_regno;
8237 }
8238 }
8239 else if (note == 1)
8240 {
8241 if (rsrc_write)
8242 {
8243 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8244 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8245 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8246 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8247
8248 if ((idesc->operands[0] == IA64_OPND_P1
8249 || idesc->operands[0] == IA64_OPND_P2)
8250 && p1 >= 1 && p1 < 16)
8251 {
8252 specs[count] = tmpl;
8253 specs[count].cmp_type =
8254 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8255 specs[count++].index = p1;
8256 }
8257 if ((idesc->operands[1] == IA64_OPND_P1
8258 || idesc->operands[1] == IA64_OPND_P2)
8259 && p2 >= 1 && p2 < 16)
8260 {
8261 specs[count] = tmpl;
8262 specs[count].cmp_type =
8263 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8264 specs[count++].index = p2;
8265 }
8266 }
8267 else
8268 {
8269 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8270 {
8271 specs[count] = tmpl;
8272 specs[count++].index = CURR_SLOT.qp_regno;
8273 }
8274 if (idesc->operands[1] == IA64_OPND_PR)
8275 {
8276 for (i = 1; i < 16; i++)
8277 {
8278 specs[count] = tmpl;
8279 specs[count++].index = i;
8280 }
8281 }
8282 }
8283 }
8284 else
8285 {
8286 UNHANDLED;
8287 }
8288 break;
8289
8290 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8291 simplified cases of this. */
8292 case IA64_RS_PRr:
8293 if (note == 0)
8294 {
8295 for (i = 16; i < 63; i++)
8296 {
8297 specs[count] = tmpl;
8298 specs[count++].index = i;
8299 }
8300 }
8301 else if (note == 7)
8302 {
8303 valueT mask = 0;
8304 /* Mark only those registers indicated by the mask. */
8305 if (rsrc_write
8306 && idesc->operands[0] == IA64_OPND_PR)
8307 {
8308 mask = CURR_SLOT.opnd[2].X_add_number;
8309 if (mask & ((valueT) 1 << 16))
8310 for (i = 16; i < 63; i++)
8311 {
8312 specs[count] = tmpl;
8313 specs[count++].index = i;
8314 }
8315 }
8316 else if (rsrc_write
8317 && idesc->operands[0] == IA64_OPND_PR_ROT)
8318 {
8319 for (i = 16; i < 63; i++)
8320 {
8321 specs[count] = tmpl;
8322 specs[count++].index = i;
8323 }
8324 }
8325 else
8326 {
8327 UNHANDLED;
8328 }
8329 }
8330 else if (note == 11) /* note 11 implies note 1 as well */
8331 {
8332 if (rsrc_write)
8333 {
8334 for (i = 0; i < idesc->num_outputs; i++)
8335 {
8336 if (idesc->operands[i] == IA64_OPND_P1
8337 || idesc->operands[i] == IA64_OPND_P2)
8338 {
8339 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8340 if (regno >= 16 && regno < 63)
8341 {
8342 specs[count] = tmpl;
8343 specs[count++].index = regno;
8344 }
8345 }
8346 }
8347 }
8348 else
8349 {
8350 UNHANDLED;
8351 }
8352 }
8353 else if (note == 12)
8354 {
8355 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8356 {
8357 specs[count] = tmpl;
8358 specs[count++].index = CURR_SLOT.qp_regno;
8359 }
8360 }
8361 else if (note == 1)
8362 {
8363 if (rsrc_write)
8364 {
8365 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8366 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8367 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8368 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8369
8370 if ((idesc->operands[0] == IA64_OPND_P1
8371 || idesc->operands[0] == IA64_OPND_P2)
8372 && p1 >= 16 && p1 < 63)
8373 {
8374 specs[count] = tmpl;
8375 specs[count].cmp_type =
8376 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8377 specs[count++].index = p1;
8378 }
8379 if ((idesc->operands[1] == IA64_OPND_P1
8380 || idesc->operands[1] == IA64_OPND_P2)
8381 && p2 >= 16 && p2 < 63)
8382 {
8383 specs[count] = tmpl;
8384 specs[count].cmp_type =
8385 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8386 specs[count++].index = p2;
8387 }
8388 }
8389 else
8390 {
8391 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8392 {
8393 specs[count] = tmpl;
8394 specs[count++].index = CURR_SLOT.qp_regno;
8395 }
8396 if (idesc->operands[1] == IA64_OPND_PR)
8397 {
8398 for (i = 16; i < 63; i++)
8399 {
8400 specs[count] = tmpl;
8401 specs[count++].index = i;
8402 }
8403 }
8404 }
8405 }
8406 else
8407 {
8408 UNHANDLED;
8409 }
8410 break;
8411
8412 case IA64_RS_PSR:
8413 /* Verify that the instruction is using the PSR bit indicated in
8414 dep->regindex. */
8415 if (note == 0)
8416 {
8417 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
8418 {
8419 if (dep->regindex < 6)
8420 {
8421 specs[count++] = tmpl;
8422 }
8423 }
8424 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
8425 {
8426 if (dep->regindex < 32
8427 || dep->regindex == 35
8428 || dep->regindex == 36
8429 || (!rsrc_write && dep->regindex == PSR_CPL))
8430 {
8431 specs[count++] = tmpl;
8432 }
8433 }
8434 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
8435 {
8436 if (dep->regindex < 32
8437 || dep->regindex == 35
8438 || dep->regindex == 36
8439 || (rsrc_write && dep->regindex == PSR_CPL))
8440 {
8441 specs[count++] = tmpl;
8442 }
8443 }
8444 else
8445 {
8446 /* Several PSR bits have very specific dependencies. */
8447 switch (dep->regindex)
8448 {
8449 default:
8450 specs[count++] = tmpl;
8451 break;
8452 case PSR_IC:
8453 if (rsrc_write)
8454 {
8455 specs[count++] = tmpl;
8456 }
8457 else
8458 {
8459 /* Only certain CR accesses use PSR.ic */
8460 if (idesc->operands[0] == IA64_OPND_CR3
8461 || idesc->operands[1] == IA64_OPND_CR3)
8462 {
8463 int index =
8464 ((idesc->operands[0] == IA64_OPND_CR3)
8465 ? 0 : 1);
8466 int regno =
8467 CURR_SLOT.opnd[index].X_add_number - REG_CR;
8468
8469 switch (regno)
8470 {
8471 default:
8472 break;
8473 case CR_ITIR:
8474 case CR_IFS:
8475 case CR_IIM:
8476 case CR_IIP:
8477 case CR_IPSR:
8478 case CR_ISR:
8479 case CR_IFA:
8480 case CR_IHA:
8481 case CR_IIPA:
8482 specs[count++] = tmpl;
8483 break;
8484 }
8485 }
8486 }
8487 break;
8488 case PSR_CPL:
8489 if (rsrc_write)
8490 {
8491 specs[count++] = tmpl;
8492 }
8493 else
8494 {
8495 /* Only some AR accesses use cpl */
8496 if (idesc->operands[0] == IA64_OPND_AR3
8497 || idesc->operands[1] == IA64_OPND_AR3)
8498 {
8499 int index =
8500 ((idesc->operands[0] == IA64_OPND_AR3)
8501 ? 0 : 1);
8502 int regno =
8503 CURR_SLOT.opnd[index].X_add_number - REG_AR;
8504
8505 if (regno == AR_ITC
8506 || (index == 0
8507 && (regno == AR_ITC
8508 || regno == AR_RSC
8509 || (regno >= AR_K0
8510 && regno <= AR_K7))))
8511 {
8512 specs[count++] = tmpl;
8513 }
8514 }
8515 else
8516 {
8517 specs[count++] = tmpl;
8518 }
8519 break;
8520 }
8521 }
8522 }
8523 }
8524 else if (note == 7)
8525 {
8526 valueT mask = 0;
8527 if (idesc->operands[0] == IA64_OPND_IMMU24)
8528 {
8529 mask = CURR_SLOT.opnd[0].X_add_number;
8530 }
8531 else
8532 {
8533 UNHANDLED;
8534 }
8535 if (mask & ((valueT) 1 << dep->regindex))
8536 {
8537 specs[count++] = tmpl;
8538 }
8539 }
8540 else if (note == 8)
8541 {
8542 int min = dep->regindex == PSR_DFL ? 2 : 32;
8543 int max = dep->regindex == PSR_DFL ? 31 : 127;
8544 /* dfh is read on FR32-127; dfl is read on FR2-31 */
8545 for (i = 0; i < NELEMS (idesc->operands); i++)
8546 {
8547 if (idesc->operands[i] == IA64_OPND_F1
8548 || idesc->operands[i] == IA64_OPND_F2
8549 || idesc->operands[i] == IA64_OPND_F3
8550 || idesc->operands[i] == IA64_OPND_F4)
8551 {
8552 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8553 if (reg >= min && reg <= max)
8554 {
8555 specs[count++] = tmpl;
8556 }
8557 }
8558 }
8559 }
8560 else if (note == 9)
8561 {
8562 int min = dep->regindex == PSR_MFL ? 2 : 32;
8563 int max = dep->regindex == PSR_MFL ? 31 : 127;
8564 /* mfh is read on writes to FR32-127; mfl is read on writes to
8565 FR2-31 */
8566 for (i = 0; i < idesc->num_outputs; i++)
8567 {
8568 if (idesc->operands[i] == IA64_OPND_F1)
8569 {
8570 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8571 if (reg >= min && reg <= max)
8572 {
8573 specs[count++] = tmpl;
8574 }
8575 }
8576 }
8577 }
8578 else if (note == 10)
8579 {
8580 for (i = 0; i < NELEMS (idesc->operands); i++)
8581 {
8582 if (idesc->operands[i] == IA64_OPND_R1
8583 || idesc->operands[i] == IA64_OPND_R2
8584 || idesc->operands[i] == IA64_OPND_R3)
8585 {
8586 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8587 if (regno >= 16 && regno <= 31)
8588 {
8589 specs[count++] = tmpl;
8590 }
8591 }
8592 }
8593 }
8594 else
8595 {
8596 UNHANDLED;
8597 }
8598 break;
8599
8600 case IA64_RS_AR_FPSR:
8601 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8602 {
8603 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8604 if (regno == AR_FPSR)
8605 {
8606 specs[count++] = tmpl;
8607 }
8608 }
8609 else
8610 {
8611 specs[count++] = tmpl;
8612 }
8613 break;
8614
8615 case IA64_RS_ARX:
8616 /* Handle all AR[REG] resources */
8617 if (note == 0 || note == 1)
8618 {
8619 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8620 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
8621 && regno == dep->regindex)
8622 {
8623 specs[count++] = tmpl;
8624 }
8625 /* other AR[REG] resources may be affected by AR accesses */
8626 else if (idesc->operands[0] == IA64_OPND_AR3)
8627 {
8628 /* AR[] writes */
8629 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
8630 switch (dep->regindex)
8631 {
8632 default:
8633 break;
8634 case AR_BSP:
8635 case AR_RNAT:
8636 if (regno == AR_BSPSTORE)
8637 {
8638 specs[count++] = tmpl;
8639 }
8640 case AR_RSC:
8641 if (!rsrc_write &&
8642 (regno == AR_BSPSTORE
8643 || regno == AR_RNAT))
8644 {
8645 specs[count++] = tmpl;
8646 }
8647 break;
8648 }
8649 }
8650 else if (idesc->operands[1] == IA64_OPND_AR3)
8651 {
8652 /* AR[] reads */
8653 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
8654 switch (dep->regindex)
8655 {
8656 default:
8657 break;
8658 case AR_RSC:
8659 if (regno == AR_BSPSTORE || regno == AR_RNAT)
8660 {
8661 specs[count++] = tmpl;
8662 }
8663 break;
8664 }
8665 }
8666 else
8667 {
8668 specs[count++] = tmpl;
8669 }
8670 }
8671 else
8672 {
8673 UNHANDLED;
8674 }
8675 break;
8676
8677 case IA64_RS_CRX:
8678 /* Handle all CR[REG] resources */
8679 if (note == 0 || note == 1)
8680 {
8681 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8682 {
8683 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8684 if (regno == dep->regindex)
8685 {
8686 specs[count++] = tmpl;
8687 }
8688 else if (!rsrc_write)
8689 {
8690 /* Reads from CR[IVR] affect other resources. */
8691 if (regno == CR_IVR)
8692 {
8693 if ((dep->regindex >= CR_IRR0
8694 && dep->regindex <= CR_IRR3)
8695 || dep->regindex == CR_TPR)
8696 {
8697 specs[count++] = tmpl;
8698 }
8699 }
8700 }
8701 }
8702 else
8703 {
8704 specs[count++] = tmpl;
8705 }
8706 }
8707 else
8708 {
8709 UNHANDLED;
8710 }
8711 break;
8712
8713 case IA64_RS_INSERVICE:
8714 /* look for write of EOI (67) or read of IVR (65) */
8715 if ((idesc->operands[0] == IA64_OPND_CR3
8716 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
8717 || (idesc->operands[1] == IA64_OPND_CR3
8718 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
8719 {
8720 specs[count++] = tmpl;
8721 }
8722 break;
8723
8724 case IA64_RS_GR0:
8725 if (note == 1)
8726 {
8727 specs[count++] = tmpl;
8728 }
8729 else
8730 {
8731 UNHANDLED;
8732 }
8733 break;
8734
8735 case IA64_RS_CFM:
8736 if (note != 2)
8737 {
8738 specs[count++] = tmpl;
8739 }
8740 else
8741 {
8742 /* Check if any of the registers accessed are in the rotating region.
8743 mov to/from pr accesses CFM only when qp_regno is in the rotating
8744 region */
8745 for (i = 0; i < NELEMS (idesc->operands); i++)
8746 {
8747 if (idesc->operands[i] == IA64_OPND_R1
8748 || idesc->operands[i] == IA64_OPND_R2
8749 || idesc->operands[i] == IA64_OPND_R3)
8750 {
8751 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8752 /* Assumes that md.rot.num_regs is always valid */
8753 if (md.rot.num_regs > 0
8754 && num > 31
8755 && num < 31 + md.rot.num_regs)
8756 {
8757 specs[count] = tmpl;
8758 specs[count++].specific = 0;
8759 }
8760 }
8761 else if (idesc->operands[i] == IA64_OPND_F1
8762 || idesc->operands[i] == IA64_OPND_F2
8763 || idesc->operands[i] == IA64_OPND_F3
8764 || idesc->operands[i] == IA64_OPND_F4)
8765 {
8766 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8767 if (num > 31)
8768 {
8769 specs[count] = tmpl;
8770 specs[count++].specific = 0;
8771 }
8772 }
8773 else if (idesc->operands[i] == IA64_OPND_P1
8774 || idesc->operands[i] == IA64_OPND_P2)
8775 {
8776 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
8777 if (num > 15)
8778 {
8779 specs[count] = tmpl;
8780 specs[count++].specific = 0;
8781 }
8782 }
8783 }
8784 if (CURR_SLOT.qp_regno > 15)
8785 {
8786 specs[count] = tmpl;
8787 specs[count++].specific = 0;
8788 }
8789 }
8790 break;
8791
8792 /* This is the same as IA64_RS_PRr, except simplified to account for
8793 the fact that there is only one register. */
8794 case IA64_RS_PR63:
8795 if (note == 0)
8796 {
8797 specs[count++] = tmpl;
8798 }
8799 else if (note == 7)
8800 {
8801 valueT mask = 0;
8802 if (idesc->operands[2] == IA64_OPND_IMM17)
8803 mask = CURR_SLOT.opnd[2].X_add_number;
8804 if (mask & ((valueT) 1 << 63))
8805 specs[count++] = tmpl;
8806 }
8807 else if (note == 11)
8808 {
8809 if ((idesc->operands[0] == IA64_OPND_P1
8810 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
8811 || (idesc->operands[1] == IA64_OPND_P2
8812 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
8813 {
8814 specs[count++] = tmpl;
8815 }
8816 }
8817 else if (note == 12)
8818 {
8819 if (CURR_SLOT.qp_regno == 63)
8820 {
8821 specs[count++] = tmpl;
8822 }
8823 }
8824 else if (note == 1)
8825 {
8826 if (rsrc_write)
8827 {
8828 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8829 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8830 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8831 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8832
8833 if (p1 == 63
8834 && (idesc->operands[0] == IA64_OPND_P1
8835 || idesc->operands[0] == IA64_OPND_P2))
8836 {
8837 specs[count] = tmpl;
8838 specs[count++].cmp_type =
8839 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8840 }
8841 if (p2 == 63
8842 && (idesc->operands[1] == IA64_OPND_P1
8843 || idesc->operands[1] == IA64_OPND_P2))
8844 {
8845 specs[count] = tmpl;
8846 specs[count++].cmp_type =
8847 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8848 }
8849 }
8850 else
8851 {
8852 if (CURR_SLOT.qp_regno == 63)
8853 {
8854 specs[count++] = tmpl;
8855 }
8856 }
8857 }
8858 else
8859 {
8860 UNHANDLED;
8861 }
8862 break;
8863
8864 case IA64_RS_RSE:
8865 /* FIXME we can identify some individual RSE written resources, but RSE
8866 read resources have not yet been completely identified, so for now
8867 treat RSE as a single resource */
8868 if (strncmp (idesc->name, "mov", 3) == 0)
8869 {
8870 if (rsrc_write)
8871 {
8872 if (idesc->operands[0] == IA64_OPND_AR3
8873 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
8874 {
8875 specs[count] = tmpl;
8876 specs[count++].index = 0; /* IA64_RSE_BSPLOAD/RNATBITINDEX */
8877 }
8878 }
8879 else
8880 {
8881 if (idesc->operands[0] == IA64_OPND_AR3)
8882 {
8883 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
8884 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
8885 {
8886 specs[count++] = tmpl;
8887 }
8888 }
8889 else if (idesc->operands[1] == IA64_OPND_AR3)
8890 {
8891 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
8892 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
8893 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
8894 {
8895 specs[count++] = tmpl;
8896 }
8897 }
8898 }
8899 }
8900 else
8901 {
8902 specs[count++] = tmpl;
8903 }
8904 break;
8905
8906 case IA64_RS_ANY:
8907 /* FIXME -- do any of these need to be non-specific? */
8908 specs[count++] = tmpl;
8909 break;
8910
8911 default:
8912 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
8913 break;
8914 }
8915
8916 return count;
8917 }
8918
8919 /* Clear branch flags on marked resources. This breaks the link between the
8920 QP of the marking instruction and a subsequent branch on the same QP. */
8921
8922 static void
8923 clear_qp_branch_flag (mask)
8924 valueT mask;
8925 {
8926 int i;
8927 for (i = 0; i < regdepslen; i++)
8928 {
8929 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
8930 if ((bit & mask) != 0)
8931 {
8932 regdeps[i].link_to_qp_branch = 0;
8933 }
8934 }
8935 }
8936
8937 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
8938 any mutexes which contain one of the PRs and create new ones when
8939 needed. */
8940
8941 static int
8942 update_qp_mutex (valueT mask)
8943 {
8944 int i;
8945 int add = 0;
8946
8947 i = 0;
8948 while (i < qp_mutexeslen)
8949 {
8950 if ((qp_mutexes[i].prmask & mask) != 0)
8951 {
8952 /* If it destroys and creates the same mutex, do nothing. */
8953 if (qp_mutexes[i].prmask == mask
8954 && qp_mutexes[i].path == md.path)
8955 {
8956 i++;
8957 add = -1;
8958 }
8959 else
8960 {
8961 int keep = 0;
8962
8963 if (md.debug_dv)
8964 {
8965 fprintf (stderr, " Clearing mutex relation");
8966 print_prmask (qp_mutexes[i].prmask);
8967 fprintf (stderr, "\n");
8968 }
8969
8970 /* Deal with the old mutex with more than 3+ PRs only if
8971 the new mutex on the same execution path with it.
8972
8973 FIXME: The 3+ mutex support is incomplete.
8974 dot_pred_rel () may be a better place to fix it. */
8975 if (qp_mutexes[i].path == md.path)
8976 {
8977 /* If it is a proper subset of the mutex, create a
8978 new mutex. */
8979 if (add == 0
8980 && (qp_mutexes[i].prmask & mask) == mask)
8981 add = 1;
8982
8983 qp_mutexes[i].prmask &= ~mask;
8984 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
8985 {
8986 /* Modify the mutex if there are more than one
8987 PR left. */
8988 keep = 1;
8989 i++;
8990 }
8991 }
8992
8993 if (keep == 0)
8994 /* Remove the mutex. */
8995 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
8996 }
8997 }
8998 else
8999 ++i;
9000 }
9001
9002 if (add == 1)
9003 add_qp_mutex (mask);
9004
9005 return add;
9006 }
9007
9008 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9009
9010 Any changes to a PR clears the mutex relations which include that PR. */
9011
9012 static void
9013 clear_qp_mutex (mask)
9014 valueT mask;
9015 {
9016 int i;
9017
9018 i = 0;
9019 while (i < qp_mutexeslen)
9020 {
9021 if ((qp_mutexes[i].prmask & mask) != 0)
9022 {
9023 if (md.debug_dv)
9024 {
9025 fprintf (stderr, " Clearing mutex relation");
9026 print_prmask (qp_mutexes[i].prmask);
9027 fprintf (stderr, "\n");
9028 }
9029 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9030 }
9031 else
9032 ++i;
9033 }
9034 }
9035
9036 /* Clear implies relations which contain PRs in the given masks.
9037 P1_MASK indicates the source of the implies relation, while P2_MASK
9038 indicates the implied PR. */
9039
9040 static void
9041 clear_qp_implies (p1_mask, p2_mask)
9042 valueT p1_mask;
9043 valueT p2_mask;
9044 {
9045 int i;
9046
9047 i = 0;
9048 while (i < qp_implieslen)
9049 {
9050 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9051 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9052 {
9053 if (md.debug_dv)
9054 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9055 qp_implies[i].p1, qp_implies[i].p2);
9056 qp_implies[i] = qp_implies[--qp_implieslen];
9057 }
9058 else
9059 ++i;
9060 }
9061 }
9062
9063 /* Add the PRs specified to the list of implied relations. */
9064
9065 static void
9066 add_qp_imply (p1, p2)
9067 int p1, p2;
9068 {
9069 valueT mask;
9070 valueT bit;
9071 int i;
9072
9073 /* p0 is not meaningful here. */
9074 if (p1 == 0 || p2 == 0)
9075 abort ();
9076
9077 if (p1 == p2)
9078 return;
9079
9080 /* If it exists already, ignore it. */
9081 for (i = 0; i < qp_implieslen; i++)
9082 {
9083 if (qp_implies[i].p1 == p1
9084 && qp_implies[i].p2 == p2
9085 && qp_implies[i].path == md.path
9086 && !qp_implies[i].p2_branched)
9087 return;
9088 }
9089
9090 if (qp_implieslen == qp_impliestotlen)
9091 {
9092 qp_impliestotlen += 20;
9093 qp_implies = (struct qp_imply *)
9094 xrealloc ((void *) qp_implies,
9095 qp_impliestotlen * sizeof (struct qp_imply));
9096 }
9097 if (md.debug_dv)
9098 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9099 qp_implies[qp_implieslen].p1 = p1;
9100 qp_implies[qp_implieslen].p2 = p2;
9101 qp_implies[qp_implieslen].path = md.path;
9102 qp_implies[qp_implieslen++].p2_branched = 0;
9103
9104 /* Add in the implied transitive relations; for everything that p2 implies,
9105 make p1 imply that, too; for everything that implies p1, make it imply p2
9106 as well. */
9107 for (i = 0; i < qp_implieslen; i++)
9108 {
9109 if (qp_implies[i].p1 == p2)
9110 add_qp_imply (p1, qp_implies[i].p2);
9111 if (qp_implies[i].p2 == p1)
9112 add_qp_imply (qp_implies[i].p1, p2);
9113 }
9114 /* Add in mutex relations implied by this implies relation; for each mutex
9115 relation containing p2, duplicate it and replace p2 with p1. */
9116 bit = (valueT) 1 << p1;
9117 mask = (valueT) 1 << p2;
9118 for (i = 0; i < qp_mutexeslen; i++)
9119 {
9120 if (qp_mutexes[i].prmask & mask)
9121 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9122 }
9123 }
9124
9125 /* Add the PRs specified in the mask to the mutex list; this means that only
9126 one of the PRs can be true at any time. PR0 should never be included in
9127 the mask. */
9128
9129 static void
9130 add_qp_mutex (mask)
9131 valueT mask;
9132 {
9133 if (mask & 0x1)
9134 abort ();
9135
9136 if (qp_mutexeslen == qp_mutexestotlen)
9137 {
9138 qp_mutexestotlen += 20;
9139 qp_mutexes = (struct qpmutex *)
9140 xrealloc ((void *) qp_mutexes,
9141 qp_mutexestotlen * sizeof (struct qpmutex));
9142 }
9143 if (md.debug_dv)
9144 {
9145 fprintf (stderr, " Registering mutex on");
9146 print_prmask (mask);
9147 fprintf (stderr, "\n");
9148 }
9149 qp_mutexes[qp_mutexeslen].path = md.path;
9150 qp_mutexes[qp_mutexeslen++].prmask = mask;
9151 }
9152
9153 static int
9154 has_suffix_p (name, suffix)
9155 const char *name;
9156 const char *suffix;
9157 {
9158 size_t namelen = strlen (name);
9159 size_t sufflen = strlen (suffix);
9160
9161 if (namelen <= sufflen)
9162 return 0;
9163 return strcmp (name + namelen - sufflen, suffix) == 0;
9164 }
9165
9166 static void
9167 clear_register_values ()
9168 {
9169 int i;
9170 if (md.debug_dv)
9171 fprintf (stderr, " Clearing register values\n");
9172 for (i = 1; i < NELEMS (gr_values); i++)
9173 gr_values[i].known = 0;
9174 }
9175
9176 /* Keep track of register values/changes which affect DV tracking.
9177
9178 optimization note: should add a flag to classes of insns where otherwise we
9179 have to examine a group of strings to identify them. */
9180
9181 static void
9182 note_register_values (idesc)
9183 struct ia64_opcode *idesc;
9184 {
9185 valueT qp_changemask = 0;
9186 int i;
9187
9188 /* Invalidate values for registers being written to. */
9189 for (i = 0; i < idesc->num_outputs; i++)
9190 {
9191 if (idesc->operands[i] == IA64_OPND_R1
9192 || idesc->operands[i] == IA64_OPND_R2
9193 || idesc->operands[i] == IA64_OPND_R3)
9194 {
9195 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9196 if (regno > 0 && regno < NELEMS (gr_values))
9197 gr_values[regno].known = 0;
9198 }
9199 else if (idesc->operands[i] == IA64_OPND_R3_2)
9200 {
9201 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9202 if (regno > 0 && regno < 4)
9203 gr_values[regno].known = 0;
9204 }
9205 else if (idesc->operands[i] == IA64_OPND_P1
9206 || idesc->operands[i] == IA64_OPND_P2)
9207 {
9208 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9209 qp_changemask |= (valueT) 1 << regno;
9210 }
9211 else if (idesc->operands[i] == IA64_OPND_PR)
9212 {
9213 if (idesc->operands[2] & (valueT) 0x10000)
9214 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9215 else
9216 qp_changemask = idesc->operands[2];
9217 break;
9218 }
9219 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9220 {
9221 if (idesc->operands[1] & ((valueT) 1 << 43))
9222 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9223 else
9224 qp_changemask = idesc->operands[1];
9225 qp_changemask &= ~(valueT) 0xFFFF;
9226 break;
9227 }
9228 }
9229
9230 /* Always clear qp branch flags on any PR change. */
9231 /* FIXME there may be exceptions for certain compares. */
9232 clear_qp_branch_flag (qp_changemask);
9233
9234 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9235 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9236 {
9237 qp_changemask |= ~(valueT) 0xFFFF;
9238 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9239 {
9240 for (i = 32; i < 32 + md.rot.num_regs; i++)
9241 gr_values[i].known = 0;
9242 }
9243 clear_qp_mutex (qp_changemask);
9244 clear_qp_implies (qp_changemask, qp_changemask);
9245 }
9246 /* After a call, all register values are undefined, except those marked
9247 as "safe". */
9248 else if (strncmp (idesc->name, "br.call", 6) == 0
9249 || strncmp (idesc->name, "brl.call", 7) == 0)
9250 {
9251 /* FIXME keep GR values which are marked as "safe_across_calls" */
9252 clear_register_values ();
9253 clear_qp_mutex (~qp_safe_across_calls);
9254 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9255 clear_qp_branch_flag (~qp_safe_across_calls);
9256 }
9257 else if (is_interruption_or_rfi (idesc)
9258 || is_taken_branch (idesc))
9259 {
9260 clear_register_values ();
9261 clear_qp_mutex (~(valueT) 0);
9262 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9263 }
9264 /* Look for mutex and implies relations. */
9265 else if ((idesc->operands[0] == IA64_OPND_P1
9266 || idesc->operands[0] == IA64_OPND_P2)
9267 && (idesc->operands[1] == IA64_OPND_P1
9268 || idesc->operands[1] == IA64_OPND_P2))
9269 {
9270 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9271 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9272 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9273 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9274
9275 /* If both PRs are PR0, we can't really do anything. */
9276 if (p1 == 0 && p2 == 0)
9277 {
9278 if (md.debug_dv)
9279 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9280 }
9281 /* In general, clear mutexes and implies which include P1 or P2,
9282 with the following exceptions. */
9283 else if (has_suffix_p (idesc->name, ".or.andcm")
9284 || has_suffix_p (idesc->name, ".and.orcm"))
9285 {
9286 clear_qp_implies (p2mask, p1mask);
9287 }
9288 else if (has_suffix_p (idesc->name, ".andcm")
9289 || has_suffix_p (idesc->name, ".and"))
9290 {
9291 clear_qp_implies (0, p1mask | p2mask);
9292 }
9293 else if (has_suffix_p (idesc->name, ".orcm")
9294 || has_suffix_p (idesc->name, ".or"))
9295 {
9296 clear_qp_mutex (p1mask | p2mask);
9297 clear_qp_implies (p1mask | p2mask, 0);
9298 }
9299 else
9300 {
9301 int added = 0;
9302
9303 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9304
9305 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9306 if (p1 == 0 || p2 == 0)
9307 clear_qp_mutex (p1mask | p2mask);
9308 else
9309 added = update_qp_mutex (p1mask | p2mask);
9310
9311 if (CURR_SLOT.qp_regno == 0
9312 || has_suffix_p (idesc->name, ".unc"))
9313 {
9314 if (added == 0 && p1 && p2)
9315 add_qp_mutex (p1mask | p2mask);
9316 if (CURR_SLOT.qp_regno != 0)
9317 {
9318 if (p1)
9319 add_qp_imply (p1, CURR_SLOT.qp_regno);
9320 if (p2)
9321 add_qp_imply (p2, CURR_SLOT.qp_regno);
9322 }
9323 }
9324 }
9325 }
9326 /* Look for mov imm insns into GRs. */
9327 else if (idesc->operands[0] == IA64_OPND_R1
9328 && (idesc->operands[1] == IA64_OPND_IMM22
9329 || idesc->operands[1] == IA64_OPND_IMMU64)
9330 && (strcmp (idesc->name, "mov") == 0
9331 || strcmp (idesc->name, "movl") == 0))
9332 {
9333 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9334 if (regno > 0 && regno < NELEMS (gr_values))
9335 {
9336 gr_values[regno].known = 1;
9337 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9338 gr_values[regno].path = md.path;
9339 if (md.debug_dv)
9340 {
9341 fprintf (stderr, " Know gr%d = ", regno);
9342 fprintf_vma (stderr, gr_values[regno].value);
9343 fputs ("\n", stderr);
9344 }
9345 }
9346 }
9347 else
9348 {
9349 clear_qp_mutex (qp_changemask);
9350 clear_qp_implies (qp_changemask, qp_changemask);
9351 }
9352 }
9353
9354 /* Return whether the given predicate registers are currently mutex. */
9355
9356 static int
9357 qp_mutex (p1, p2, path)
9358 int p1;
9359 int p2;
9360 int path;
9361 {
9362 int i;
9363 valueT mask;
9364
9365 if (p1 != p2)
9366 {
9367 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
9368 for (i = 0; i < qp_mutexeslen; i++)
9369 {
9370 if (qp_mutexes[i].path >= path
9371 && (qp_mutexes[i].prmask & mask) == mask)
9372 return 1;
9373 }
9374 }
9375 return 0;
9376 }
9377
9378 /* Return whether the given resource is in the given insn's list of chks
9379 Return 1 if the conflict is absolutely determined, 2 if it's a potential
9380 conflict. */
9381
9382 static int
9383 resources_match (rs, idesc, note, qp_regno, path)
9384 struct rsrc *rs;
9385 struct ia64_opcode *idesc;
9386 int note;
9387 int qp_regno;
9388 int path;
9389 {
9390 struct rsrc specs[MAX_SPECS];
9391 int count;
9392
9393 /* If the marked resource's qp_regno and the given qp_regno are mutex,
9394 we don't need to check. One exception is note 11, which indicates that
9395 target predicates are written regardless of PR[qp]. */
9396 if (qp_mutex (rs->qp_regno, qp_regno, path)
9397 && note != 11)
9398 return 0;
9399
9400 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
9401 while (count-- > 0)
9402 {
9403 /* UNAT checking is a bit more specific than other resources */
9404 if (rs->dependency->specifier == IA64_RS_AR_UNAT
9405 && specs[count].mem_offset.hint
9406 && rs->mem_offset.hint)
9407 {
9408 if (rs->mem_offset.base == specs[count].mem_offset.base)
9409 {
9410 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
9411 ((specs[count].mem_offset.offset >> 3) & 0x3F))
9412 return 1;
9413 else
9414 continue;
9415 }
9416 }
9417
9418 /* Skip apparent PR write conflicts where both writes are an AND or both
9419 writes are an OR. */
9420 if (rs->dependency->specifier == IA64_RS_PR
9421 || rs->dependency->specifier == IA64_RS_PRr
9422 || rs->dependency->specifier == IA64_RS_PR63)
9423 {
9424 if (specs[count].cmp_type != CMP_NONE
9425 && specs[count].cmp_type == rs->cmp_type)
9426 {
9427 if (md.debug_dv)
9428 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
9429 dv_mode[rs->dependency->mode],
9430 rs->dependency->specifier != IA64_RS_PR63 ?
9431 specs[count].index : 63);
9432 continue;
9433 }
9434 if (md.debug_dv)
9435 fprintf (stderr,
9436 " %s on parallel compare conflict %s vs %s on PR%d\n",
9437 dv_mode[rs->dependency->mode],
9438 dv_cmp_type[rs->cmp_type],
9439 dv_cmp_type[specs[count].cmp_type],
9440 rs->dependency->specifier != IA64_RS_PR63 ?
9441 specs[count].index : 63);
9442
9443 }
9444
9445 /* If either resource is not specific, conservatively assume a conflict
9446 */
9447 if (!specs[count].specific || !rs->specific)
9448 return 2;
9449 else if (specs[count].index == rs->index)
9450 return 1;
9451 }
9452 #if 0
9453 if (md.debug_dv)
9454 fprintf (stderr, " No %s conflicts\n", rs->dependency->name);
9455 #endif
9456
9457 return 0;
9458 }
9459
9460 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
9461 insert a stop to create the break. Update all resource dependencies
9462 appropriately. If QP_REGNO is non-zero, only apply the break to resources
9463 which use the same QP_REGNO and have the link_to_qp_branch flag set.
9464 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
9465 instruction. */
9466
9467 static void
9468 insn_group_break (insert_stop, qp_regno, save_current)
9469 int insert_stop;
9470 int qp_regno;
9471 int save_current;
9472 {
9473 int i;
9474
9475 if (insert_stop && md.num_slots_in_use > 0)
9476 PREV_SLOT.end_of_insn_group = 1;
9477
9478 if (md.debug_dv)
9479 {
9480 fprintf (stderr, " Insn group break%s",
9481 (insert_stop ? " (w/stop)" : ""));
9482 if (qp_regno != 0)
9483 fprintf (stderr, " effective for QP=%d", qp_regno);
9484 fprintf (stderr, "\n");
9485 }
9486
9487 i = 0;
9488 while (i < regdepslen)
9489 {
9490 const struct ia64_dependency *dep = regdeps[i].dependency;
9491
9492 if (qp_regno != 0
9493 && regdeps[i].qp_regno != qp_regno)
9494 {
9495 ++i;
9496 continue;
9497 }
9498
9499 if (save_current
9500 && CURR_SLOT.src_file == regdeps[i].file
9501 && CURR_SLOT.src_line == regdeps[i].line)
9502 {
9503 ++i;
9504 continue;
9505 }
9506
9507 /* clear dependencies which are automatically cleared by a stop, or
9508 those that have reached the appropriate state of insn serialization */
9509 if (dep->semantics == IA64_DVS_IMPLIED
9510 || dep->semantics == IA64_DVS_IMPLIEDF
9511 || regdeps[i].insn_srlz == STATE_SRLZ)
9512 {
9513 print_dependency ("Removing", i);
9514 regdeps[i] = regdeps[--regdepslen];
9515 }
9516 else
9517 {
9518 if (dep->semantics == IA64_DVS_DATA
9519 || dep->semantics == IA64_DVS_INSTR
9520 || dep->semantics == IA64_DVS_SPECIFIC)
9521 {
9522 if (regdeps[i].insn_srlz == STATE_NONE)
9523 regdeps[i].insn_srlz = STATE_STOP;
9524 if (regdeps[i].data_srlz == STATE_NONE)
9525 regdeps[i].data_srlz = STATE_STOP;
9526 }
9527 ++i;
9528 }
9529 }
9530 }
9531
9532 /* Add the given resource usage spec to the list of active dependencies. */
9533
9534 static void
9535 mark_resource (idesc, dep, spec, depind, path)
9536 struct ia64_opcode *idesc ATTRIBUTE_UNUSED;
9537 const struct ia64_dependency *dep ATTRIBUTE_UNUSED;
9538 struct rsrc *spec;
9539 int depind;
9540 int path;
9541 {
9542 if (regdepslen == regdepstotlen)
9543 {
9544 regdepstotlen += 20;
9545 regdeps = (struct rsrc *)
9546 xrealloc ((void *) regdeps,
9547 regdepstotlen * sizeof (struct rsrc));
9548 }
9549
9550 regdeps[regdepslen] = *spec;
9551 regdeps[regdepslen].depind = depind;
9552 regdeps[regdepslen].path = path;
9553 regdeps[regdepslen].file = CURR_SLOT.src_file;
9554 regdeps[regdepslen].line = CURR_SLOT.src_line;
9555
9556 print_dependency ("Adding", regdepslen);
9557
9558 ++regdepslen;
9559 }
9560
9561 static void
9562 print_dependency (action, depind)
9563 const char *action;
9564 int depind;
9565 {
9566 if (md.debug_dv)
9567 {
9568 fprintf (stderr, " %s %s '%s'",
9569 action, dv_mode[(regdeps[depind].dependency)->mode],
9570 (regdeps[depind].dependency)->name);
9571 if (regdeps[depind].specific && regdeps[depind].index != 0)
9572 fprintf (stderr, " (%d)", regdeps[depind].index);
9573 if (regdeps[depind].mem_offset.hint)
9574 {
9575 fputs (" ", stderr);
9576 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
9577 fputs ("+", stderr);
9578 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
9579 }
9580 fprintf (stderr, "\n");
9581 }
9582 }
9583
9584 static void
9585 instruction_serialization ()
9586 {
9587 int i;
9588 if (md.debug_dv)
9589 fprintf (stderr, " Instruction serialization\n");
9590 for (i = 0; i < regdepslen; i++)
9591 if (regdeps[i].insn_srlz == STATE_STOP)
9592 regdeps[i].insn_srlz = STATE_SRLZ;
9593 }
9594
9595 static void
9596 data_serialization ()
9597 {
9598 int i = 0;
9599 if (md.debug_dv)
9600 fprintf (stderr, " Data serialization\n");
9601 while (i < regdepslen)
9602 {
9603 if (regdeps[i].data_srlz == STATE_STOP
9604 /* Note: as of 991210, all "other" dependencies are cleared by a
9605 data serialization. This might change with new tables */
9606 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
9607 {
9608 print_dependency ("Removing", i);
9609 regdeps[i] = regdeps[--regdepslen];
9610 }
9611 else
9612 ++i;
9613 }
9614 }
9615
9616 /* Insert stops and serializations as needed to avoid DVs. */
9617
9618 static void
9619 remove_marked_resource (rs)
9620 struct rsrc *rs;
9621 {
9622 switch (rs->dependency->semantics)
9623 {
9624 case IA64_DVS_SPECIFIC:
9625 if (md.debug_dv)
9626 fprintf (stderr, "Implementation-specific, assume worst case...\n");
9627 /* ...fall through... */
9628 case IA64_DVS_INSTR:
9629 if (md.debug_dv)
9630 fprintf (stderr, "Inserting instr serialization\n");
9631 if (rs->insn_srlz < STATE_STOP)
9632 insn_group_break (1, 0, 0);
9633 if (rs->insn_srlz < STATE_SRLZ)
9634 {
9635 struct slot oldslot = CURR_SLOT;
9636 /* Manually jam a srlz.i insn into the stream */
9637 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
9638 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
9639 instruction_serialization ();
9640 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9641 if (++md.num_slots_in_use >= NUM_SLOTS)
9642 emit_one_bundle ();
9643 CURR_SLOT = oldslot;
9644 }
9645 insn_group_break (1, 0, 0);
9646 break;
9647 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
9648 "other" types of DV are eliminated
9649 by a data serialization */
9650 case IA64_DVS_DATA:
9651 if (md.debug_dv)
9652 fprintf (stderr, "Inserting data serialization\n");
9653 if (rs->data_srlz < STATE_STOP)
9654 insn_group_break (1, 0, 0);
9655 {
9656 struct slot oldslot = CURR_SLOT;
9657 /* Manually jam a srlz.d insn into the stream */
9658 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
9659 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
9660 data_serialization ();
9661 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9662 if (++md.num_slots_in_use >= NUM_SLOTS)
9663 emit_one_bundle ();
9664 CURR_SLOT = oldslot;
9665 }
9666 break;
9667 case IA64_DVS_IMPLIED:
9668 case IA64_DVS_IMPLIEDF:
9669 if (md.debug_dv)
9670 fprintf (stderr, "Inserting stop\n");
9671 insn_group_break (1, 0, 0);
9672 break;
9673 default:
9674 break;
9675 }
9676 }
9677
9678 /* Check the resources used by the given opcode against the current dependency
9679 list.
9680
9681 The check is run once for each execution path encountered. In this case,
9682 a unique execution path is the sequence of instructions following a code
9683 entry point, e.g. the following has three execution paths, one starting
9684 at L0, one at L1, and one at L2.
9685
9686 L0: nop
9687 L1: add
9688 L2: add
9689 br.ret
9690 */
9691
9692 static void
9693 check_dependencies (idesc)
9694 struct ia64_opcode *idesc;
9695 {
9696 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9697 int path;
9698 int i;
9699
9700 /* Note that the number of marked resources may change within the
9701 loop if in auto mode. */
9702 i = 0;
9703 while (i < regdepslen)
9704 {
9705 struct rsrc *rs = &regdeps[i];
9706 const struct ia64_dependency *dep = rs->dependency;
9707 int chkind;
9708 int note;
9709 int start_over = 0;
9710
9711 if (dep->semantics == IA64_DVS_NONE
9712 || (chkind = depends_on (rs->depind, idesc)) == -1)
9713 {
9714 ++i;
9715 continue;
9716 }
9717
9718 note = NOTE (opdeps->chks[chkind]);
9719
9720 /* Check this resource against each execution path seen thus far. */
9721 for (path = 0; path <= md.path; path++)
9722 {
9723 int matchtype;
9724
9725 /* If the dependency wasn't on the path being checked, ignore it. */
9726 if (rs->path < path)
9727 continue;
9728
9729 /* If the QP for this insn implies a QP which has branched, don't
9730 bother checking. Ed. NOTE: I don't think this check is terribly
9731 useful; what's the point of generating code which will only be
9732 reached if its QP is zero?
9733 This code was specifically inserted to handle the following code,
9734 based on notes from Intel's DV checking code, where p1 implies p2.
9735
9736 mov r4 = 2
9737 (p2) br.cond L
9738 (p1) mov r4 = 7
9739 */
9740 if (CURR_SLOT.qp_regno != 0)
9741 {
9742 int skip = 0;
9743 int implies;
9744 for (implies = 0; implies < qp_implieslen; implies++)
9745 {
9746 if (qp_implies[implies].path >= path
9747 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
9748 && qp_implies[implies].p2_branched)
9749 {
9750 skip = 1;
9751 break;
9752 }
9753 }
9754 if (skip)
9755 continue;
9756 }
9757
9758 if ((matchtype = resources_match (rs, idesc, note,
9759 CURR_SLOT.qp_regno, path)) != 0)
9760 {
9761 char msg[1024];
9762 char pathmsg[256] = "";
9763 char indexmsg[256] = "";
9764 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
9765
9766 if (path != 0)
9767 sprintf (pathmsg, " when entry is at label '%s'",
9768 md.entry_labels[path - 1]);
9769 if (rs->specific && rs->index != 0)
9770 sprintf (indexmsg, ", specific resource number is %d",
9771 rs->index);
9772 sprintf (msg, "Use of '%s' %s %s dependency '%s' (%s)%s%s",
9773 idesc->name,
9774 (certain ? "violates" : "may violate"),
9775 dv_mode[dep->mode], dep->name,
9776 dv_sem[dep->semantics],
9777 pathmsg, indexmsg);
9778
9779 if (md.explicit_mode)
9780 {
9781 as_warn ("%s", msg);
9782 if (path < md.path)
9783 as_warn (_("Only the first path encountering the conflict "
9784 "is reported"));
9785 as_warn_where (rs->file, rs->line,
9786 _("This is the location of the "
9787 "conflicting usage"));
9788 /* Don't bother checking other paths, to avoid duplicating
9789 the same warning */
9790 break;
9791 }
9792 else
9793 {
9794 if (md.debug_dv)
9795 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
9796
9797 remove_marked_resource (rs);
9798
9799 /* since the set of dependencies has changed, start over */
9800 /* FIXME -- since we're removing dvs as we go, we
9801 probably don't really need to start over... */
9802 start_over = 1;
9803 break;
9804 }
9805 }
9806 }
9807 if (start_over)
9808 i = 0;
9809 else
9810 ++i;
9811 }
9812 }
9813
9814 /* Register new dependencies based on the given opcode. */
9815
9816 static void
9817 mark_resources (idesc)
9818 struct ia64_opcode *idesc;
9819 {
9820 int i;
9821 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9822 int add_only_qp_reads = 0;
9823
9824 /* A conditional branch only uses its resources if it is taken; if it is
9825 taken, we stop following that path. The other branch types effectively
9826 *always* write their resources. If it's not taken, register only QP
9827 reads. */
9828 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
9829 {
9830 add_only_qp_reads = 1;
9831 }
9832
9833 if (md.debug_dv)
9834 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
9835
9836 for (i = 0; i < opdeps->nregs; i++)
9837 {
9838 const struct ia64_dependency *dep;
9839 struct rsrc specs[MAX_SPECS];
9840 int note;
9841 int path;
9842 int count;
9843
9844 dep = ia64_find_dependency (opdeps->regs[i]);
9845 note = NOTE (opdeps->regs[i]);
9846
9847 if (add_only_qp_reads
9848 && !(dep->mode == IA64_DV_WAR
9849 && (dep->specifier == IA64_RS_PR
9850 || dep->specifier == IA64_RS_PRr
9851 || dep->specifier == IA64_RS_PR63)))
9852 continue;
9853
9854 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
9855
9856 #if 0
9857 if (md.debug_dv && !count)
9858 fprintf (stderr, " No %s %s usage found (path %d)\n",
9859 dv_mode[dep->mode], dep->name, md.path);
9860 #endif
9861
9862 while (count-- > 0)
9863 {
9864 mark_resource (idesc, dep, &specs[count],
9865 DEP (opdeps->regs[i]), md.path);
9866 }
9867
9868 /* The execution path may affect register values, which may in turn
9869 affect which indirect-access resources are accessed. */
9870 switch (dep->specifier)
9871 {
9872 default:
9873 break;
9874 case IA64_RS_CPUID:
9875 case IA64_RS_DBR:
9876 case IA64_RS_IBR:
9877 case IA64_RS_MSR:
9878 case IA64_RS_PKR:
9879 case IA64_RS_PMC:
9880 case IA64_RS_PMD:
9881 case IA64_RS_RR:
9882 for (path = 0; path < md.path; path++)
9883 {
9884 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
9885 while (count-- > 0)
9886 mark_resource (idesc, dep, &specs[count],
9887 DEP (opdeps->regs[i]), path);
9888 }
9889 break;
9890 }
9891 }
9892 }
9893
9894 /* Remove dependencies when they no longer apply. */
9895
9896 static void
9897 update_dependencies (idesc)
9898 struct ia64_opcode *idesc;
9899 {
9900 int i;
9901
9902 if (strcmp (idesc->name, "srlz.i") == 0)
9903 {
9904 instruction_serialization ();
9905 }
9906 else if (strcmp (idesc->name, "srlz.d") == 0)
9907 {
9908 data_serialization ();
9909 }
9910 else if (is_interruption_or_rfi (idesc)
9911 || is_taken_branch (idesc))
9912 {
9913 /* Although technically the taken branch doesn't clear dependencies
9914 which require a srlz.[id], we don't follow the branch; the next
9915 instruction is assumed to start with a clean slate. */
9916 regdepslen = 0;
9917 md.path = 0;
9918 }
9919 else if (is_conditional_branch (idesc)
9920 && CURR_SLOT.qp_regno != 0)
9921 {
9922 int is_call = strstr (idesc->name, ".call") != NULL;
9923
9924 for (i = 0; i < qp_implieslen; i++)
9925 {
9926 /* If the conditional branch's predicate is implied by the predicate
9927 in an existing dependency, remove that dependency. */
9928 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
9929 {
9930 int depind = 0;
9931 /* Note that this implied predicate takes a branch so that if
9932 a later insn generates a DV but its predicate implies this
9933 one, we can avoid the false DV warning. */
9934 qp_implies[i].p2_branched = 1;
9935 while (depind < regdepslen)
9936 {
9937 if (regdeps[depind].qp_regno == qp_implies[i].p1)
9938 {
9939 print_dependency ("Removing", depind);
9940 regdeps[depind] = regdeps[--regdepslen];
9941 }
9942 else
9943 ++depind;
9944 }
9945 }
9946 }
9947 /* Any marked resources which have this same predicate should be
9948 cleared, provided that the QP hasn't been modified between the
9949 marking instruction and the branch. */
9950 if (is_call)
9951 {
9952 insn_group_break (0, CURR_SLOT.qp_regno, 1);
9953 }
9954 else
9955 {
9956 i = 0;
9957 while (i < regdepslen)
9958 {
9959 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
9960 && regdeps[i].link_to_qp_branch
9961 && (regdeps[i].file != CURR_SLOT.src_file
9962 || regdeps[i].line != CURR_SLOT.src_line))
9963 {
9964 /* Treat like a taken branch */
9965 print_dependency ("Removing", i);
9966 regdeps[i] = regdeps[--regdepslen];
9967 }
9968 else
9969 ++i;
9970 }
9971 }
9972 }
9973 }
9974
9975 /* Examine the current instruction for dependency violations. */
9976
9977 static int
9978 check_dv (idesc)
9979 struct ia64_opcode *idesc;
9980 {
9981 if (md.debug_dv)
9982 {
9983 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
9984 idesc->name, CURR_SLOT.src_line,
9985 idesc->dependencies->nchks,
9986 idesc->dependencies->nregs);
9987 }
9988
9989 /* Look through the list of currently marked resources; if the current
9990 instruction has the dependency in its chks list which uses that resource,
9991 check against the specific resources used. */
9992 check_dependencies (idesc);
9993
9994 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
9995 then add them to the list of marked resources. */
9996 mark_resources (idesc);
9997
9998 /* There are several types of dependency semantics, and each has its own
9999 requirements for being cleared
10000
10001 Instruction serialization (insns separated by interruption, rfi, or
10002 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10003
10004 Data serialization (instruction serialization, or writer + srlz.d +
10005 reader, where writer and srlz.d are in separate groups) clears
10006 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10007 always be the case).
10008
10009 Instruction group break (groups separated by stop, taken branch,
10010 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10011 */
10012 update_dependencies (idesc);
10013
10014 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10015 warning. Keep track of as many as possible that are useful. */
10016 note_register_values (idesc);
10017
10018 /* We don't need or want this anymore. */
10019 md.mem_offset.hint = 0;
10020
10021 return 0;
10022 }
10023
10024 /* Translate one line of assembly. Pseudo ops and labels do not show
10025 here. */
10026 void
10027 md_assemble (str)
10028 char *str;
10029 {
10030 char *saved_input_line_pointer, *mnemonic;
10031 const struct pseudo_opcode *pdesc;
10032 struct ia64_opcode *idesc;
10033 unsigned char qp_regno;
10034 unsigned int flags;
10035 int ch;
10036
10037 saved_input_line_pointer = input_line_pointer;
10038 input_line_pointer = str;
10039
10040 /* extract the opcode (mnemonic): */
10041
10042 mnemonic = input_line_pointer;
10043 ch = get_symbol_end ();
10044 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
10045 if (pdesc)
10046 {
10047 *input_line_pointer = ch;
10048 (*pdesc->handler) (pdesc->arg);
10049 goto done;
10050 }
10051
10052 /* Find the instruction descriptor matching the arguments. */
10053
10054 idesc = ia64_find_opcode (mnemonic);
10055 *input_line_pointer = ch;
10056 if (!idesc)
10057 {
10058 as_bad ("Unknown opcode `%s'", mnemonic);
10059 goto done;
10060 }
10061
10062 idesc = parse_operands (idesc);
10063 if (!idesc)
10064 goto done;
10065
10066 /* Handle the dynamic ops we can handle now: */
10067 if (idesc->type == IA64_TYPE_DYN)
10068 {
10069 if (strcmp (idesc->name, "add") == 0)
10070 {
10071 if (CURR_SLOT.opnd[2].X_op == O_register
10072 && CURR_SLOT.opnd[2].X_add_number < 4)
10073 mnemonic = "addl";
10074 else
10075 mnemonic = "adds";
10076 ia64_free_opcode (idesc);
10077 idesc = ia64_find_opcode (mnemonic);
10078 #if 0
10079 know (!idesc->next);
10080 #endif
10081 }
10082 else if (strcmp (idesc->name, "mov") == 0)
10083 {
10084 enum ia64_opnd opnd1, opnd2;
10085 int rop;
10086
10087 opnd1 = idesc->operands[0];
10088 opnd2 = idesc->operands[1];
10089 if (opnd1 == IA64_OPND_AR3)
10090 rop = 0;
10091 else if (opnd2 == IA64_OPND_AR3)
10092 rop = 1;
10093 else
10094 abort ();
10095 if (CURR_SLOT.opnd[rop].X_op == O_register)
10096 {
10097 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10098 mnemonic = "mov.i";
10099 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10100 mnemonic = "mov.m";
10101 else
10102 rop = -1;
10103 }
10104 else
10105 abort ();
10106 if (rop >= 0)
10107 {
10108 ia64_free_opcode (idesc);
10109 idesc = ia64_find_opcode (mnemonic);
10110 while (idesc != NULL
10111 && (idesc->operands[0] != opnd1
10112 || idesc->operands[1] != opnd2))
10113 idesc = get_next_opcode (idesc);
10114 }
10115 }
10116 }
10117 else if (strcmp (idesc->name, "mov.i") == 0
10118 || strcmp (idesc->name, "mov.m") == 0)
10119 {
10120 enum ia64_opnd opnd1, opnd2;
10121 int rop;
10122
10123 opnd1 = idesc->operands[0];
10124 opnd2 = idesc->operands[1];
10125 if (opnd1 == IA64_OPND_AR3)
10126 rop = 0;
10127 else if (opnd2 == IA64_OPND_AR3)
10128 rop = 1;
10129 else
10130 abort ();
10131 if (CURR_SLOT.opnd[rop].X_op == O_register)
10132 {
10133 char unit = 'a';
10134 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10135 unit = 'i';
10136 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10137 unit = 'm';
10138 if (unit != 'a' && unit != idesc->name [4])
10139 as_bad ("AR %d cannot be accessed by %c-unit",
10140 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10141 TOUPPER (unit));
10142 }
10143 }
10144
10145 qp_regno = 0;
10146 if (md.qp.X_op == O_register)
10147 {
10148 qp_regno = md.qp.X_add_number - REG_P;
10149 md.qp.X_op = O_absent;
10150 }
10151
10152 flags = idesc->flags;
10153
10154 if ((flags & IA64_OPCODE_FIRST) != 0)
10155 {
10156 /* The alignment frag has to end with a stop bit only if the
10157 next instruction after the alignment directive has to be
10158 the first instruction in an instruction group. */
10159 if (align_frag)
10160 {
10161 while (align_frag->fr_type != rs_align_code)
10162 {
10163 align_frag = align_frag->fr_next;
10164 if (!align_frag)
10165 break;
10166 }
10167 /* align_frag can be NULL if there are directives in
10168 between. */
10169 if (align_frag && align_frag->fr_next == frag_now)
10170 align_frag->tc_frag_data = 1;
10171 }
10172
10173 insn_group_break (1, 0, 0);
10174 }
10175 align_frag = NULL;
10176
10177 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10178 {
10179 as_bad ("`%s' cannot be predicated", idesc->name);
10180 goto done;
10181 }
10182
10183 /* Build the instruction. */
10184 CURR_SLOT.qp_regno = qp_regno;
10185 CURR_SLOT.idesc = idesc;
10186 as_where (&CURR_SLOT.src_file, &CURR_SLOT.src_line);
10187 dwarf2_where (&CURR_SLOT.debug_line);
10188
10189 /* Add unwind entry, if there is one. */
10190 if (unwind.current_entry)
10191 {
10192 CURR_SLOT.unwind_record = unwind.current_entry;
10193 unwind.current_entry = NULL;
10194 }
10195
10196 /* Check for dependency violations. */
10197 if (md.detect_dv)
10198 check_dv (idesc);
10199
10200 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10201 if (++md.num_slots_in_use >= NUM_SLOTS)
10202 emit_one_bundle ();
10203
10204 if ((flags & IA64_OPCODE_LAST) != 0)
10205 insn_group_break (1, 0, 0);
10206
10207 md.last_text_seg = now_seg;
10208
10209 done:
10210 input_line_pointer = saved_input_line_pointer;
10211 }
10212
10213 /* Called when symbol NAME cannot be found in the symbol table.
10214 Should be used for dynamic valued symbols only. */
10215
10216 symbolS *
10217 md_undefined_symbol (name)
10218 char *name ATTRIBUTE_UNUSED;
10219 {
10220 return 0;
10221 }
10222
10223 /* Called for any expression that can not be recognized. When the
10224 function is called, `input_line_pointer' will point to the start of
10225 the expression. */
10226
10227 void
10228 md_operand (e)
10229 expressionS *e;
10230 {
10231 enum pseudo_type pseudo_type;
10232 const char *name;
10233 size_t len;
10234 int ch, i;
10235
10236 switch (*input_line_pointer)
10237 {
10238 case '@':
10239 /* Find what relocation pseudo-function we're dealing with. */
10240 pseudo_type = 0;
10241 ch = *++input_line_pointer;
10242 for (i = 0; i < NELEMS (pseudo_func); ++i)
10243 if (pseudo_func[i].name && pseudo_func[i].name[0] == ch)
10244 {
10245 len = strlen (pseudo_func[i].name);
10246 if (strncmp (pseudo_func[i].name + 1,
10247 input_line_pointer + 1, len - 1) == 0
10248 && !is_part_of_name (input_line_pointer[len]))
10249 {
10250 input_line_pointer += len;
10251 pseudo_type = pseudo_func[i].type;
10252 break;
10253 }
10254 }
10255 switch (pseudo_type)
10256 {
10257 case PSEUDO_FUNC_RELOC:
10258 SKIP_WHITESPACE ();
10259 if (*input_line_pointer != '(')
10260 {
10261 as_bad ("Expected '('");
10262 goto err;
10263 }
10264 /* Skip '('. */
10265 ++input_line_pointer;
10266 expression (e);
10267 if (*input_line_pointer++ != ')')
10268 {
10269 as_bad ("Missing ')'");
10270 goto err;
10271 }
10272 if (e->X_op != O_symbol)
10273 {
10274 if (e->X_op != O_pseudo_fixup)
10275 {
10276 as_bad ("Not a symbolic expression");
10277 goto err;
10278 }
10279 if (i != FUNC_LT_RELATIVE)
10280 {
10281 as_bad ("Illegal combination of relocation functions");
10282 goto err;
10283 }
10284 switch (S_GET_VALUE (e->X_op_symbol))
10285 {
10286 case FUNC_FPTR_RELATIVE:
10287 i = FUNC_LT_FPTR_RELATIVE; break;
10288 case FUNC_DTP_MODULE:
10289 i = FUNC_LT_DTP_MODULE; break;
10290 case FUNC_DTP_RELATIVE:
10291 i = FUNC_LT_DTP_RELATIVE; break;
10292 case FUNC_TP_RELATIVE:
10293 i = FUNC_LT_TP_RELATIVE; break;
10294 default:
10295 as_bad ("Illegal combination of relocation functions");
10296 goto err;
10297 }
10298 }
10299 /* Make sure gas doesn't get rid of local symbols that are used
10300 in relocs. */
10301 e->X_op = O_pseudo_fixup;
10302 e->X_op_symbol = pseudo_func[i].u.sym;
10303 break;
10304
10305 case PSEUDO_FUNC_CONST:
10306 e->X_op = O_constant;
10307 e->X_add_number = pseudo_func[i].u.ival;
10308 break;
10309
10310 case PSEUDO_FUNC_REG:
10311 e->X_op = O_register;
10312 e->X_add_number = pseudo_func[i].u.ival;
10313 break;
10314
10315 default:
10316 name = input_line_pointer - 1;
10317 get_symbol_end ();
10318 as_bad ("Unknown pseudo function `%s'", name);
10319 goto err;
10320 }
10321 break;
10322
10323 case '[':
10324 ++input_line_pointer;
10325 expression (e);
10326 if (*input_line_pointer != ']')
10327 {
10328 as_bad ("Closing bracket misssing");
10329 goto err;
10330 }
10331 else
10332 {
10333 if (e->X_op != O_register)
10334 as_bad ("Register expected as index");
10335
10336 ++input_line_pointer;
10337 e->X_op = O_index;
10338 }
10339 break;
10340
10341 default:
10342 break;
10343 }
10344 return;
10345
10346 err:
10347 ignore_rest_of_line ();
10348 }
10349
10350 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10351 a section symbol plus some offset. For relocs involving @fptr(),
10352 directives we don't want such adjustments since we need to have the
10353 original symbol's name in the reloc. */
10354 int
10355 ia64_fix_adjustable (fix)
10356 fixS *fix;
10357 {
10358 /* Prevent all adjustments to global symbols */
10359 if (S_IS_EXTERN (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10360 return 0;
10361
10362 switch (fix->fx_r_type)
10363 {
10364 case BFD_RELOC_IA64_FPTR64I:
10365 case BFD_RELOC_IA64_FPTR32MSB:
10366 case BFD_RELOC_IA64_FPTR32LSB:
10367 case BFD_RELOC_IA64_FPTR64MSB:
10368 case BFD_RELOC_IA64_FPTR64LSB:
10369 case BFD_RELOC_IA64_LTOFF_FPTR22:
10370 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10371 return 0;
10372 default:
10373 break;
10374 }
10375
10376 return 1;
10377 }
10378
10379 int
10380 ia64_force_relocation (fix)
10381 fixS *fix;
10382 {
10383 switch (fix->fx_r_type)
10384 {
10385 case BFD_RELOC_IA64_FPTR64I:
10386 case BFD_RELOC_IA64_FPTR32MSB:
10387 case BFD_RELOC_IA64_FPTR32LSB:
10388 case BFD_RELOC_IA64_FPTR64MSB:
10389 case BFD_RELOC_IA64_FPTR64LSB:
10390
10391 case BFD_RELOC_IA64_LTOFF22:
10392 case BFD_RELOC_IA64_LTOFF64I:
10393 case BFD_RELOC_IA64_LTOFF_FPTR22:
10394 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10395 case BFD_RELOC_IA64_PLTOFF22:
10396 case BFD_RELOC_IA64_PLTOFF64I:
10397 case BFD_RELOC_IA64_PLTOFF64MSB:
10398 case BFD_RELOC_IA64_PLTOFF64LSB:
10399
10400 case BFD_RELOC_IA64_LTOFF22X:
10401 case BFD_RELOC_IA64_LDXMOV:
10402 return 1;
10403
10404 default:
10405 break;
10406 }
10407
10408 return generic_force_reloc (fix);
10409 }
10410
10411 /* Decide from what point a pc-relative relocation is relative to,
10412 relative to the pc-relative fixup. Er, relatively speaking. */
10413 long
10414 ia64_pcrel_from_section (fix, sec)
10415 fixS *fix;
10416 segT sec;
10417 {
10418 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10419
10420 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
10421 off &= ~0xfUL;
10422
10423 return off;
10424 }
10425
10426
10427 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10428 void
10429 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10430 {
10431 expressionS expr;
10432
10433 expr.X_op = O_pseudo_fixup;
10434 expr.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
10435 expr.X_add_number = 0;
10436 expr.X_add_symbol = symbol;
10437 emit_expr (&expr, size);
10438 }
10439
10440 /* This is called whenever some data item (not an instruction) needs a
10441 fixup. We pick the right reloc code depending on the byteorder
10442 currently in effect. */
10443 void
10444 ia64_cons_fix_new (f, where, nbytes, exp)
10445 fragS *f;
10446 int where;
10447 int nbytes;
10448 expressionS *exp;
10449 {
10450 bfd_reloc_code_real_type code;
10451 fixS *fix;
10452
10453 switch (nbytes)
10454 {
10455 /* There are no reloc for 8 and 16 bit quantities, but we allow
10456 them here since they will work fine as long as the expression
10457 is fully defined at the end of the pass over the source file. */
10458 case 1: code = BFD_RELOC_8; break;
10459 case 2: code = BFD_RELOC_16; break;
10460 case 4:
10461 if (target_big_endian)
10462 code = BFD_RELOC_IA64_DIR32MSB;
10463 else
10464 code = BFD_RELOC_IA64_DIR32LSB;
10465 break;
10466
10467 case 8:
10468 /* In 32-bit mode, data8 could mean function descriptors too. */
10469 if (exp->X_op == O_pseudo_fixup
10470 && exp->X_op_symbol
10471 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
10472 && !(md.flags & EF_IA_64_ABI64))
10473 {
10474 if (target_big_endian)
10475 code = BFD_RELOC_IA64_IPLTMSB;
10476 else
10477 code = BFD_RELOC_IA64_IPLTLSB;
10478 exp->X_op = O_symbol;
10479 break;
10480 }
10481 else
10482 {
10483 if (target_big_endian)
10484 code = BFD_RELOC_IA64_DIR64MSB;
10485 else
10486 code = BFD_RELOC_IA64_DIR64LSB;
10487 break;
10488 }
10489
10490 case 16:
10491 if (exp->X_op == O_pseudo_fixup
10492 && exp->X_op_symbol
10493 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
10494 {
10495 if (target_big_endian)
10496 code = BFD_RELOC_IA64_IPLTMSB;
10497 else
10498 code = BFD_RELOC_IA64_IPLTLSB;
10499 exp->X_op = O_symbol;
10500 break;
10501 }
10502 /* FALLTHRU */
10503
10504 default:
10505 as_bad ("Unsupported fixup size %d", nbytes);
10506 ignore_rest_of_line ();
10507 return;
10508 }
10509
10510 if (exp->X_op == O_pseudo_fixup)
10511 {
10512 exp->X_op = O_symbol;
10513 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
10514 /* ??? If code unchanged, unsupported. */
10515 }
10516
10517 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
10518 /* We need to store the byte order in effect in case we're going
10519 to fix an 8 or 16 bit relocation (for which there no real
10520 relocs available). See md_apply_fix3(). */
10521 fix->tc_fix_data.bigendian = target_big_endian;
10522 }
10523
10524 /* Return the actual relocation we wish to associate with the pseudo
10525 reloc described by SYM and R_TYPE. SYM should be one of the
10526 symbols in the pseudo_func array, or NULL. */
10527
10528 static bfd_reloc_code_real_type
10529 ia64_gen_real_reloc_type (sym, r_type)
10530 struct symbol *sym;
10531 bfd_reloc_code_real_type r_type;
10532 {
10533 bfd_reloc_code_real_type new = 0;
10534
10535 if (sym == NULL)
10536 {
10537 return r_type;
10538 }
10539
10540 switch (S_GET_VALUE (sym))
10541 {
10542 case FUNC_FPTR_RELATIVE:
10543 switch (r_type)
10544 {
10545 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_FPTR64I; break;
10546 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_FPTR32MSB; break;
10547 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_FPTR32LSB; break;
10548 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_FPTR64MSB; break;
10549 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_FPTR64LSB; break;
10550 default: break;
10551 }
10552 break;
10553
10554 case FUNC_GP_RELATIVE:
10555 switch (r_type)
10556 {
10557 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_GPREL22; break;
10558 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_GPREL64I; break;
10559 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_GPREL32MSB; break;
10560 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_GPREL32LSB; break;
10561 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_GPREL64MSB; break;
10562 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_GPREL64LSB; break;
10563 default: break;
10564 }
10565 break;
10566
10567 case FUNC_LT_RELATIVE:
10568 switch (r_type)
10569 {
10570 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22; break;
10571 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_LTOFF64I; break;
10572 default: break;
10573 }
10574 break;
10575
10576 case FUNC_LT_RELATIVE_X:
10577 switch (r_type)
10578 {
10579 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22X; break;
10580 default: break;
10581 }
10582 break;
10583
10584 case FUNC_PC_RELATIVE:
10585 switch (r_type)
10586 {
10587 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PCREL22; break;
10588 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PCREL64I; break;
10589 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_PCREL32MSB; break;
10590 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_PCREL32LSB; break;
10591 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PCREL64MSB; break;
10592 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PCREL64LSB; break;
10593 default: break;
10594 }
10595 break;
10596
10597 case FUNC_PLT_RELATIVE:
10598 switch (r_type)
10599 {
10600 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PLTOFF22; break;
10601 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PLTOFF64I; break;
10602 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PLTOFF64MSB;break;
10603 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PLTOFF64LSB;break;
10604 default: break;
10605 }
10606 break;
10607
10608 case FUNC_SEC_RELATIVE:
10609 switch (r_type)
10610 {
10611 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SECREL32MSB;break;
10612 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SECREL32LSB;break;
10613 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SECREL64MSB;break;
10614 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SECREL64LSB;break;
10615 default: break;
10616 }
10617 break;
10618
10619 case FUNC_SEG_RELATIVE:
10620 switch (r_type)
10621 {
10622 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SEGREL32MSB;break;
10623 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SEGREL32LSB;break;
10624 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SEGREL64MSB;break;
10625 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SEGREL64LSB;break;
10626 default: break;
10627 }
10628 break;
10629
10630 case FUNC_LTV_RELATIVE:
10631 switch (r_type)
10632 {
10633 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_LTV32MSB; break;
10634 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_LTV32LSB; break;
10635 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_LTV64MSB; break;
10636 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_LTV64LSB; break;
10637 default: break;
10638 }
10639 break;
10640
10641 case FUNC_LT_FPTR_RELATIVE:
10642 switch (r_type)
10643 {
10644 case BFD_RELOC_IA64_IMM22:
10645 new = BFD_RELOC_IA64_LTOFF_FPTR22; break;
10646 case BFD_RELOC_IA64_IMM64:
10647 new = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
10648 default:
10649 break;
10650 }
10651 break;
10652
10653 case FUNC_TP_RELATIVE:
10654 switch (r_type)
10655 {
10656 case BFD_RELOC_IA64_IMM14:
10657 new = BFD_RELOC_IA64_TPREL14; break;
10658 case BFD_RELOC_IA64_IMM22:
10659 new = BFD_RELOC_IA64_TPREL22; break;
10660 case BFD_RELOC_IA64_IMM64:
10661 new = BFD_RELOC_IA64_TPREL64I; break;
10662 default:
10663 break;
10664 }
10665 break;
10666
10667 case FUNC_LT_TP_RELATIVE:
10668 switch (r_type)
10669 {
10670 case BFD_RELOC_IA64_IMM22:
10671 new = BFD_RELOC_IA64_LTOFF_TPREL22; break;
10672 default:
10673 break;
10674 }
10675 break;
10676
10677 case FUNC_LT_DTP_MODULE:
10678 switch (r_type)
10679 {
10680 case BFD_RELOC_IA64_IMM22:
10681 new = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
10682 default:
10683 break;
10684 }
10685 break;
10686
10687 case FUNC_DTP_RELATIVE:
10688 switch (r_type)
10689 {
10690 case BFD_RELOC_IA64_DIR64MSB:
10691 new = BFD_RELOC_IA64_DTPREL64MSB; break;
10692 case BFD_RELOC_IA64_DIR64LSB:
10693 new = BFD_RELOC_IA64_DTPREL64LSB; break;
10694 case BFD_RELOC_IA64_IMM14:
10695 new = BFD_RELOC_IA64_DTPREL14; break;
10696 case BFD_RELOC_IA64_IMM22:
10697 new = BFD_RELOC_IA64_DTPREL22; break;
10698 case BFD_RELOC_IA64_IMM64:
10699 new = BFD_RELOC_IA64_DTPREL64I; break;
10700 default:
10701 break;
10702 }
10703 break;
10704
10705 case FUNC_LT_DTP_RELATIVE:
10706 switch (r_type)
10707 {
10708 case BFD_RELOC_IA64_IMM22:
10709 new = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
10710 default:
10711 break;
10712 }
10713 break;
10714
10715 case FUNC_IPLT_RELOC:
10716 break;
10717
10718 default:
10719 abort ();
10720 }
10721
10722 /* Hmmmm. Should this ever occur? */
10723 if (new)
10724 return new;
10725 else
10726 return r_type;
10727 }
10728
10729 /* Here is where generate the appropriate reloc for pseudo relocation
10730 functions. */
10731 void
10732 ia64_validate_fix (fix)
10733 fixS *fix;
10734 {
10735 switch (fix->fx_r_type)
10736 {
10737 case BFD_RELOC_IA64_FPTR64I:
10738 case BFD_RELOC_IA64_FPTR32MSB:
10739 case BFD_RELOC_IA64_FPTR64LSB:
10740 case BFD_RELOC_IA64_LTOFF_FPTR22:
10741 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10742 if (fix->fx_offset != 0)
10743 as_bad_where (fix->fx_file, fix->fx_line,
10744 "No addend allowed in @fptr() relocation");
10745 break;
10746 default:
10747 break;
10748 }
10749 }
10750
10751 static void
10752 fix_insn (fix, odesc, value)
10753 fixS *fix;
10754 const struct ia64_operand *odesc;
10755 valueT value;
10756 {
10757 bfd_vma insn[3], t0, t1, control_bits;
10758 const char *err;
10759 char *fixpos;
10760 long slot;
10761
10762 slot = fix->fx_where & 0x3;
10763 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
10764
10765 /* Bundles are always in little-endian byte order */
10766 t0 = bfd_getl64 (fixpos);
10767 t1 = bfd_getl64 (fixpos + 8);
10768 control_bits = t0 & 0x1f;
10769 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
10770 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
10771 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
10772
10773 err = NULL;
10774 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
10775 {
10776 insn[1] = (value >> 22) & 0x1ffffffffffLL;
10777 insn[2] |= (((value & 0x7f) << 13)
10778 | (((value >> 7) & 0x1ff) << 27)
10779 | (((value >> 16) & 0x1f) << 22)
10780 | (((value >> 21) & 0x1) << 21)
10781 | (((value >> 63) & 0x1) << 36));
10782 }
10783 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
10784 {
10785 if (value & ~0x3fffffffffffffffULL)
10786 err = "integer operand out of range";
10787 insn[1] = (value >> 21) & 0x1ffffffffffLL;
10788 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
10789 }
10790 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
10791 {
10792 value >>= 4;
10793 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
10794 insn[2] |= ((((value >> 59) & 0x1) << 36)
10795 | (((value >> 0) & 0xfffff) << 13));
10796 }
10797 else
10798 err = (*odesc->insert) (odesc, value, insn + slot);
10799
10800 if (err)
10801 as_bad_where (fix->fx_file, fix->fx_line, err);
10802
10803 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
10804 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
10805 number_to_chars_littleendian (fixpos + 0, t0, 8);
10806 number_to_chars_littleendian (fixpos + 8, t1, 8);
10807 }
10808
10809 /* Attempt to simplify or even eliminate a fixup. The return value is
10810 ignored; perhaps it was once meaningful, but now it is historical.
10811 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
10812
10813 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
10814 (if possible). */
10815
10816 void
10817 md_apply_fix3 (fix, valP, seg)
10818 fixS *fix;
10819 valueT *valP;
10820 segT seg ATTRIBUTE_UNUSED;
10821 {
10822 char *fixpos;
10823 valueT value = *valP;
10824
10825 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
10826
10827 if (fix->fx_pcrel)
10828 {
10829 switch (fix->fx_r_type)
10830 {
10831 case BFD_RELOC_IA64_DIR32MSB:
10832 fix->fx_r_type = BFD_RELOC_IA64_PCREL32MSB;
10833 break;
10834
10835 case BFD_RELOC_IA64_DIR32LSB:
10836 fix->fx_r_type = BFD_RELOC_IA64_PCREL32LSB;
10837 break;
10838
10839 case BFD_RELOC_IA64_DIR64MSB:
10840 fix->fx_r_type = BFD_RELOC_IA64_PCREL64MSB;
10841 break;
10842
10843 case BFD_RELOC_IA64_DIR64LSB:
10844 fix->fx_r_type = BFD_RELOC_IA64_PCREL64LSB;
10845 break;
10846
10847 default:
10848 break;
10849 }
10850 }
10851 if (fix->fx_addsy)
10852 {
10853 switch (fix->fx_r_type)
10854 {
10855 case BFD_RELOC_UNUSED:
10856 /* This must be a TAG13 or TAG13b operand. There are no external
10857 relocs defined for them, so we must give an error. */
10858 as_bad_where (fix->fx_file, fix->fx_line,
10859 "%s must have a constant value",
10860 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
10861 fix->fx_done = 1;
10862 return;
10863
10864 case BFD_RELOC_IA64_TPREL14:
10865 case BFD_RELOC_IA64_TPREL22:
10866 case BFD_RELOC_IA64_TPREL64I:
10867 case BFD_RELOC_IA64_LTOFF_TPREL22:
10868 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
10869 case BFD_RELOC_IA64_DTPREL14:
10870 case BFD_RELOC_IA64_DTPREL22:
10871 case BFD_RELOC_IA64_DTPREL64I:
10872 case BFD_RELOC_IA64_LTOFF_DTPREL22:
10873 S_SET_THREAD_LOCAL (fix->fx_addsy);
10874 break;
10875
10876 default:
10877 break;
10878 }
10879 }
10880 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
10881 {
10882 if (fix->tc_fix_data.bigendian)
10883 number_to_chars_bigendian (fixpos, value, fix->fx_size);
10884 else
10885 number_to_chars_littleendian (fixpos, value, fix->fx_size);
10886 fix->fx_done = 1;
10887 }
10888 else
10889 {
10890 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
10891 fix->fx_done = 1;
10892 }
10893 }
10894
10895 /* Generate the BFD reloc to be stuck in the object file from the
10896 fixup used internally in the assembler. */
10897
10898 arelent *
10899 tc_gen_reloc (sec, fixp)
10900 asection *sec ATTRIBUTE_UNUSED;
10901 fixS *fixp;
10902 {
10903 arelent *reloc;
10904
10905 reloc = xmalloc (sizeof (*reloc));
10906 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
10907 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10908 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
10909 reloc->addend = fixp->fx_offset;
10910 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
10911
10912 if (!reloc->howto)
10913 {
10914 as_bad_where (fixp->fx_file, fixp->fx_line,
10915 "Cannot represent %s relocation in object file",
10916 bfd_get_reloc_code_name (fixp->fx_r_type));
10917 }
10918 return reloc;
10919 }
10920
10921 /* Turn a string in input_line_pointer into a floating point constant
10922 of type TYPE, and store the appropriate bytes in *LIT. The number
10923 of LITTLENUMS emitted is stored in *SIZE. An error message is
10924 returned, or NULL on OK. */
10925
10926 #define MAX_LITTLENUMS 5
10927
10928 char *
10929 md_atof (type, lit, size)
10930 int type;
10931 char *lit;
10932 int *size;
10933 {
10934 LITTLENUM_TYPE words[MAX_LITTLENUMS];
10935 char *t;
10936 int prec;
10937
10938 switch (type)
10939 {
10940 /* IEEE floats */
10941 case 'f':
10942 case 'F':
10943 case 's':
10944 case 'S':
10945 prec = 2;
10946 break;
10947
10948 case 'd':
10949 case 'D':
10950 case 'r':
10951 case 'R':
10952 prec = 4;
10953 break;
10954
10955 case 'x':
10956 case 'X':
10957 case 'p':
10958 case 'P':
10959 prec = 5;
10960 break;
10961
10962 default:
10963 *size = 0;
10964 return "Bad call to MD_ATOF()";
10965 }
10966 t = atof_ieee (input_line_pointer, type, words);
10967 if (t)
10968 input_line_pointer = t;
10969
10970 (*ia64_float_to_chars) (lit, words, prec);
10971
10972 if (type == 'X')
10973 {
10974 /* It is 10 byte floating point with 6 byte padding. */
10975 memset (&lit [10], 0, 6);
10976 *size = 8 * sizeof (LITTLENUM_TYPE);
10977 }
10978 else
10979 *size = prec * sizeof (LITTLENUM_TYPE);
10980
10981 return 0;
10982 }
10983
10984 /* Handle ia64 specific semantics of the align directive. */
10985
10986 void
10987 ia64_md_do_align (n, fill, len, max)
10988 int n ATTRIBUTE_UNUSED;
10989 const char *fill ATTRIBUTE_UNUSED;
10990 int len ATTRIBUTE_UNUSED;
10991 int max ATTRIBUTE_UNUSED;
10992 {
10993 if (subseg_text_p (now_seg))
10994 ia64_flush_insns ();
10995 }
10996
10997 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
10998 of an rs_align_code fragment. */
10999
11000 void
11001 ia64_handle_align (fragp)
11002 fragS *fragp;
11003 {
11004 /* Use mfi bundle of nops with no stop bits. */
11005 static const unsigned char le_nop[]
11006 = { 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
11007 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00};
11008 static const unsigned char le_nop_stop[]
11009 = { 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
11010 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00};
11011
11012 int bytes;
11013 char *p;
11014 const unsigned char *nop;
11015
11016 if (fragp->fr_type != rs_align_code)
11017 return;
11018
11019 /* Check if this frag has to end with a stop bit. */
11020 nop = fragp->tc_frag_data ? le_nop_stop : le_nop;
11021
11022 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11023 p = fragp->fr_literal + fragp->fr_fix;
11024
11025 /* If no paddings are needed, we check if we need a stop bit. */
11026 if (!bytes && fragp->tc_frag_data)
11027 {
11028 if (fragp->fr_fix < 16)
11029 #if 1
11030 /* FIXME: It won't work with
11031 .align 16
11032 alloc r32=ar.pfs,1,2,4,0
11033 */
11034 ;
11035 #else
11036 as_bad_where (fragp->fr_file, fragp->fr_line,
11037 _("Can't add stop bit to mark end of instruction group"));
11038 #endif
11039 else
11040 /* Bundles are always in little-endian byte order. Make sure
11041 the previous bundle has the stop bit. */
11042 *(p - 16) |= 1;
11043 }
11044
11045 /* Make sure we are on a 16-byte boundary, in case someone has been
11046 putting data into a text section. */
11047 if (bytes & 15)
11048 {
11049 int fix = bytes & 15;
11050 memset (p, 0, fix);
11051 p += fix;
11052 bytes -= fix;
11053 fragp->fr_fix += fix;
11054 }
11055
11056 /* Instruction bundles are always little-endian. */
11057 memcpy (p, nop, 16);
11058 fragp->fr_var = 16;
11059 }
11060
11061 static void
11062 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11063 int prec)
11064 {
11065 while (prec--)
11066 {
11067 number_to_chars_bigendian (lit, (long) (*words++),
11068 sizeof (LITTLENUM_TYPE));
11069 lit += sizeof (LITTLENUM_TYPE);
11070 }
11071 }
11072
11073 static void
11074 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11075 int prec)
11076 {
11077 while (prec--)
11078 {
11079 number_to_chars_littleendian (lit, (long) (words[prec]),
11080 sizeof (LITTLENUM_TYPE));
11081 lit += sizeof (LITTLENUM_TYPE);
11082 }
11083 }
11084
11085 void
11086 ia64_elf_section_change_hook (void)
11087 {
11088 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11089 && elf_linked_to_section (now_seg) == NULL)
11090 elf_linked_to_section (now_seg) = text_section;
11091 dot_byteorder (-1);
11092 }
11093
11094 /* Check if a label should be made global. */
11095 void
11096 ia64_check_label (symbolS *label)
11097 {
11098 if (*input_line_pointer == ':')
11099 {
11100 S_SET_EXTERNAL (label);
11101 input_line_pointer++;
11102 }
11103 }
11104
11105 /* Used to remember where .alias and .secalias directives are seen. We
11106 will rename symbol and section names when we are about to output
11107 the relocatable file. */
11108 struct alias
11109 {
11110 char *file; /* The file where the directive is seen. */
11111 unsigned int line; /* The line number the directive is at. */
11112 const char *name; /* The orignale name of the symbol. */
11113 };
11114
11115 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11116 .secalias. Otherwise, it is .alias. */
11117 static void
11118 dot_alias (int section)
11119 {
11120 char *name, *alias;
11121 char delim;
11122 char *end_name;
11123 int len;
11124 const char *error_string;
11125 struct alias *h;
11126 const char *a;
11127 struct hash_control *ahash, *nhash;
11128 const char *kind;
11129
11130 name = input_line_pointer;
11131 delim = get_symbol_end ();
11132 end_name = input_line_pointer;
11133 *end_name = delim;
11134
11135 if (name == end_name)
11136 {
11137 as_bad (_("expected symbol name"));
11138 discard_rest_of_line ();
11139 return;
11140 }
11141
11142 SKIP_WHITESPACE ();
11143
11144 if (*input_line_pointer != ',')
11145 {
11146 *end_name = 0;
11147 as_bad (_("expected comma after \"%s\""), name);
11148 *end_name = delim;
11149 ignore_rest_of_line ();
11150 return;
11151 }
11152
11153 input_line_pointer++;
11154 *end_name = 0;
11155
11156 /* We call demand_copy_C_string to check if alias string is valid.
11157 There should be a closing `"' and no `\0' in the string. */
11158 alias = demand_copy_C_string (&len);
11159 if (alias == NULL)
11160 {
11161 ignore_rest_of_line ();
11162 return;
11163 }
11164
11165 /* Make a copy of name string. */
11166 len = strlen (name) + 1;
11167 obstack_grow (&notes, name, len);
11168 name = obstack_finish (&notes);
11169
11170 if (section)
11171 {
11172 kind = "section";
11173 ahash = secalias_hash;
11174 nhash = secalias_name_hash;
11175 }
11176 else
11177 {
11178 kind = "symbol";
11179 ahash = alias_hash;
11180 nhash = alias_name_hash;
11181 }
11182
11183 /* Check if alias has been used before. */
11184 h = (struct alias *) hash_find (ahash, alias);
11185 if (h)
11186 {
11187 if (strcmp (h->name, name))
11188 as_bad (_("`%s' is already the alias of %s `%s'"),
11189 alias, kind, h->name);
11190 goto out;
11191 }
11192
11193 /* Check if name already has an alias. */
11194 a = (const char *) hash_find (nhash, name);
11195 if (a)
11196 {
11197 if (strcmp (a, alias))
11198 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11199 goto out;
11200 }
11201
11202 h = (struct alias *) xmalloc (sizeof (struct alias));
11203 as_where (&h->file, &h->line);
11204 h->name = name;
11205
11206 error_string = hash_jam (ahash, alias, (PTR) h);
11207 if (error_string)
11208 {
11209 as_fatal (_("inserting \"%s\" into %s alias hash table failed: %s"),
11210 alias, kind, error_string);
11211 goto out;
11212 }
11213
11214 error_string = hash_jam (nhash, name, (PTR) alias);
11215 if (error_string)
11216 {
11217 as_fatal (_("inserting \"%s\" into %s name hash table failed: %s"),
11218 alias, kind, error_string);
11219 out:
11220 obstack_free (&notes, name);
11221 obstack_free (&notes, alias);
11222 }
11223
11224 demand_empty_rest_of_line ();
11225 }
11226
11227 /* It renames the original symbol name to its alias. */
11228 static void
11229 do_alias (const char *alias, PTR value)
11230 {
11231 struct alias *h = (struct alias *) value;
11232 symbolS *sym = symbol_find (h->name);
11233
11234 if (sym == NULL)
11235 as_warn_where (h->file, h->line,
11236 _("symbol `%s' aliased to `%s' is not used"),
11237 h->name, alias);
11238 else
11239 S_SET_NAME (sym, (char *) alias);
11240 }
11241
11242 /* Called from write_object_file. */
11243 void
11244 ia64_adjust_symtab (void)
11245 {
11246 hash_traverse (alias_hash, do_alias);
11247 }
11248
11249 /* It renames the original section name to its alias. */
11250 static void
11251 do_secalias (const char *alias, PTR value)
11252 {
11253 struct alias *h = (struct alias *) value;
11254 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11255
11256 if (sec == NULL)
11257 as_warn_where (h->file, h->line,
11258 _("section `%s' aliased to `%s' is not used"),
11259 h->name, alias);
11260 else
11261 sec->name = alias;
11262 }
11263
11264 /* Called from write_object_file. */
11265 void
11266 ia64_frob_file (void)
11267 {
11268 hash_traverse (secalias_hash, do_secalias);
11269 }