]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-ia64.c
Update year range in copyright notice of binutils files
[thirdparty/binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright (C) 1998-2024 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street - Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .eb
28 .estate
29 .lb
30 .popsection
31 .previous
32 .psr
33 .pushsection
34 - labels are wrong if automatic alignment is introduced
35 (e.g., checkout the second real10 definition in test-data.s)
36 - DV-related stuff:
37 <reg>.safe_across_calls and any other DV-related directives I don't
38 have documentation for.
39 verify mod-sched-brs reads/writes are checked/marked (and other
40 notes)
41
42 */
43
44 #include "as.h"
45 #include "safe-ctype.h"
46 #include "dwarf2dbg.h"
47 #include "subsegs.h"
48
49 #include "opcode/ia64.h"
50
51 #include "elf/ia64.h"
52 #include "bfdver.h"
53 #include <time.h>
54 #include <limits.h>
55
56 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
57
58 /* Some systems define MIN in, e.g., param.h. */
59 #undef MIN
60 #define MIN(a,b) ((a) < (b) ? (a) : (b))
61
62 #define NUM_SLOTS 4
63 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
64 #define CURR_SLOT md.slot[md.curr_slot]
65
66 #define O_pseudo_fixup (O_max + 1)
67
68 enum special_section
69 {
70 /* IA-64 ABI section pseudo-ops. */
71 SPECIAL_SECTION_BSS = 0,
72 SPECIAL_SECTION_SBSS,
73 SPECIAL_SECTION_SDATA,
74 SPECIAL_SECTION_RODATA,
75 SPECIAL_SECTION_COMMENT,
76 SPECIAL_SECTION_UNWIND,
77 SPECIAL_SECTION_UNWIND_INFO,
78 /* HPUX specific section pseudo-ops. */
79 SPECIAL_SECTION_INIT_ARRAY,
80 SPECIAL_SECTION_FINI_ARRAY,
81 };
82
83 enum reloc_func
84 {
85 FUNC_DTP_MODULE,
86 FUNC_DTP_RELATIVE,
87 FUNC_FPTR_RELATIVE,
88 FUNC_GP_RELATIVE,
89 FUNC_LT_RELATIVE,
90 FUNC_LT_RELATIVE_X,
91 FUNC_PC_RELATIVE,
92 FUNC_PLT_RELATIVE,
93 FUNC_SEC_RELATIVE,
94 FUNC_SEG_RELATIVE,
95 FUNC_TP_RELATIVE,
96 FUNC_LTV_RELATIVE,
97 FUNC_LT_FPTR_RELATIVE,
98 FUNC_LT_DTP_MODULE,
99 FUNC_LT_DTP_RELATIVE,
100 FUNC_LT_TP_RELATIVE,
101 FUNC_IPLT_RELOC,
102 #ifdef TE_VMS
103 FUNC_SLOTCOUNT_RELOC,
104 #endif
105 };
106
107 enum reg_symbol
108 {
109 REG_GR = 0,
110 REG_FR = (REG_GR + 128),
111 REG_AR = (REG_FR + 128),
112 REG_CR = (REG_AR + 128),
113 REG_DAHR = (REG_CR + 128),
114 REG_P = (REG_DAHR + 8),
115 REG_BR = (REG_P + 64),
116 REG_IP = (REG_BR + 8),
117 REG_CFM,
118 REG_PR,
119 REG_PR_ROT,
120 REG_PSR,
121 REG_PSR_L,
122 REG_PSR_UM,
123 /* The following are pseudo-registers for use by gas only. */
124 IND_CPUID,
125 IND_DBR,
126 IND_DTR,
127 IND_ITR,
128 IND_IBR,
129 IND_MSR,
130 IND_PKR,
131 IND_PMC,
132 IND_PMD,
133 IND_DAHR,
134 IND_RR,
135 /* The following pseudo-registers are used for unwind directives only: */
136 REG_PSP,
137 REG_PRIUNAT,
138 REG_NUM
139 };
140
141 enum dynreg_type
142 {
143 DYNREG_GR = 0, /* dynamic general purpose register */
144 DYNREG_FR, /* dynamic floating point register */
145 DYNREG_PR, /* dynamic predicate register */
146 DYNREG_NUM_TYPES
147 };
148
149 enum operand_match_result
150 {
151 OPERAND_MATCH,
152 OPERAND_OUT_OF_RANGE,
153 OPERAND_MISMATCH
154 };
155
156 /* On the ia64, we can't know the address of a text label until the
157 instructions are packed into a bundle. To handle this, we keep
158 track of the list of labels that appear in front of each
159 instruction. */
160 struct label_fix
161 {
162 struct label_fix *next;
163 struct symbol *sym;
164 bool dw2_mark_labels;
165 };
166
167 #ifdef TE_VMS
168 /* An internally used relocation. */
169 #define DUMMY_RELOC_IA64_SLOTCOUNT (BFD_RELOC_UNUSED + 1)
170 #endif
171
172 /* This is the endianness of the current section. */
173 extern int target_big_endian;
174
175 /* This is the default endianness. */
176 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
177
178 void (*ia64_number_to_chars) (char *, valueT, int);
179
180 static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int);
181 static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int);
182
183 static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int);
184
185 static htab_t alias_hash;
186 static htab_t alias_name_hash;
187 static htab_t secalias_hash;
188 static htab_t secalias_name_hash;
189
190 /* List of chars besides those in app.c:symbol_chars that can start an
191 operand. Used to prevent the scrubber eating vital white-space. */
192 const char ia64_symbol_chars[] = "@?";
193
194 /* Characters which always start a comment. */
195 const char comment_chars[] = "";
196
197 /* Characters which start a comment at the beginning of a line. */
198 const char line_comment_chars[] = "#";
199
200 /* Characters which may be used to separate multiple commands on a
201 single line. */
202 const char line_separator_chars[] = ";{}";
203
204 /* Characters which are used to indicate an exponent in a floating
205 point number. */
206 const char EXP_CHARS[] = "eE";
207
208 /* Characters which mean that a number is a floating point constant,
209 as in 0d1.0. */
210 const char FLT_CHARS[] = "rRsSfFdDxXpP";
211
212 /* ia64-specific option processing: */
213
214 const char *md_shortopts = "m:N:x::";
215
216 struct option md_longopts[] =
217 {
218 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
219 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
220 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
221 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
222 };
223
224 size_t md_longopts_size = sizeof (md_longopts);
225
226 static struct
227 {
228 htab_t pseudo_hash; /* pseudo opcode hash table */
229 htab_t reg_hash; /* register name hash table */
230 htab_t dynreg_hash; /* dynamic register hash table */
231 htab_t const_hash; /* constant hash table */
232 htab_t entry_hash; /* code entry hint hash table */
233
234 /* If X_op is != O_absent, the register name for the instruction's
235 qualifying predicate. If NULL, p0 is assumed for instructions
236 that are predictable. */
237 expressionS qp;
238
239 /* Optimize for which CPU. */
240 enum
241 {
242 itanium1,
243 itanium2
244 } tune;
245
246 /* What to do when hint.b is used. */
247 enum
248 {
249 hint_b_error,
250 hint_b_warning,
251 hint_b_ok
252 } hint_b;
253
254 unsigned int
255 manual_bundling : 1,
256 debug_dv: 1,
257 detect_dv: 1,
258 explicit_mode : 1, /* which mode we're in */
259 default_explicit_mode : 1, /* which mode is the default */
260 mode_explicitly_set : 1, /* was the current mode explicitly set? */
261 auto_align : 1,
262 keep_pending_output : 1;
263
264 /* What to do when something is wrong with unwind directives. */
265 enum
266 {
267 unwind_check_warning,
268 unwind_check_error
269 } unwind_check;
270
271 /* Each bundle consists of up to three instructions. We keep
272 track of four most recent instructions so we can correctly set
273 the end_of_insn_group for the last instruction in a bundle. */
274 int curr_slot;
275 int num_slots_in_use;
276 struct slot
277 {
278 unsigned int
279 end_of_insn_group : 1,
280 manual_bundling_on : 1,
281 manual_bundling_off : 1,
282 loc_directive_seen : 1;
283 signed char user_template; /* user-selected template, if any */
284 unsigned char qp_regno; /* qualifying predicate */
285 /* This duplicates a good fraction of "struct fix" but we
286 can't use a "struct fix" instead since we can't call
287 fix_new_exp() until we know the address of the instruction. */
288 int num_fixups;
289 struct insn_fix
290 {
291 bfd_reloc_code_real_type code;
292 enum ia64_opnd opnd; /* type of operand in need of fix */
293 unsigned int is_pcrel : 1; /* is operand pc-relative? */
294 expressionS expr; /* the value to be inserted */
295 }
296 fixup[2]; /* at most two fixups per insn */
297 struct ia64_opcode *idesc;
298 struct label_fix *label_fixups;
299 struct label_fix *tag_fixups;
300 struct unw_rec_list *unwind_record; /* Unwind directive. */
301 expressionS opnd[6];
302 const char *src_file;
303 unsigned int src_line;
304 struct dwarf2_line_info debug_line;
305 }
306 slot[NUM_SLOTS];
307
308 segT last_text_seg;
309 subsegT last_text_subseg;
310
311 struct dynreg
312 {
313 struct dynreg *next; /* next dynamic register */
314 const char *name;
315 unsigned short base; /* the base register number */
316 unsigned short num_regs; /* # of registers in this set */
317 }
318 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
319
320 flagword flags; /* ELF-header flags */
321
322 struct mem_offset {
323 unsigned hint:1; /* is this hint currently valid? */
324 bfd_vma offset; /* mem.offset offset */
325 bfd_vma base; /* mem.offset base */
326 } mem_offset;
327
328 int path; /* number of alt. entry points seen */
329 const char **entry_labels; /* labels of all alternate paths in
330 the current DV-checking block. */
331 int maxpaths; /* size currently allocated for
332 entry_labels */
333
334 int pointer_size; /* size in bytes of a pointer */
335 int pointer_size_shift; /* shift size of a pointer for alignment */
336
337 symbolS *indregsym[IND_RR - IND_CPUID + 1];
338 }
339 md;
340
341 /* These are not const, because they are modified to MMI for non-itanium1
342 targets below. */
343 /* MFI bundle of nops. */
344 static unsigned char le_nop[16] =
345 {
346 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
347 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
348 };
349 /* MFI bundle of nops with stop-bit. */
350 static unsigned char le_nop_stop[16] =
351 {
352 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
353 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
354 };
355
356 /* application registers: */
357
358 #define AR_K0 0
359 #define AR_K7 7
360 #define AR_RSC 16
361 #define AR_BSP 17
362 #define AR_BSPSTORE 18
363 #define AR_RNAT 19
364 #define AR_FCR 21
365 #define AR_EFLAG 24
366 #define AR_CSD 25
367 #define AR_SSD 26
368 #define AR_CFLG 27
369 #define AR_FSR 28
370 #define AR_FIR 29
371 #define AR_FDR 30
372 #define AR_CCV 32
373 #define AR_UNAT 36
374 #define AR_FPSR 40
375 #define AR_ITC 44
376 #define AR_RUC 45
377 #define AR_PFS 64
378 #define AR_LC 65
379 #define AR_EC 66
380
381 static const struct
382 {
383 const char *name;
384 unsigned int regnum;
385 }
386 ar[] =
387 {
388 {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1},
389 {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3},
390 {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5},
391 {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7},
392 {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP},
393 {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT},
394 {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG},
395 {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD},
396 {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR},
397 {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR},
398 {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT},
399 {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC},
400 {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS},
401 {"ar.lc", AR_LC}, {"ar.ec", AR_EC},
402 };
403
404 /* control registers: */
405
406 #define CR_DCR 0
407 #define CR_ITM 1
408 #define CR_IVA 2
409 #define CR_PTA 8
410 #define CR_GPTA 9
411 #define CR_IPSR 16
412 #define CR_ISR 17
413 #define CR_IIP 19
414 #define CR_IFA 20
415 #define CR_ITIR 21
416 #define CR_IIPA 22
417 #define CR_IFS 23
418 #define CR_IIM 24
419 #define CR_IHA 25
420 #define CR_IIB0 26
421 #define CR_IIB1 27
422 #define CR_LID 64
423 #define CR_IVR 65
424 #define CR_TPR 66
425 #define CR_EOI 67
426 #define CR_IRR0 68
427 #define CR_IRR3 71
428 #define CR_ITV 72
429 #define CR_PMV 73
430 #define CR_CMCV 74
431 #define CR_LRR0 80
432 #define CR_LRR1 81
433
434 static const struct
435 {
436 const char *name;
437 unsigned int regnum;
438 }
439 cr[] =
440 {
441 {"cr.dcr", CR_DCR},
442 {"cr.itm", CR_ITM},
443 {"cr.iva", CR_IVA},
444 {"cr.pta", CR_PTA},
445 {"cr.gpta", CR_GPTA},
446 {"cr.ipsr", CR_IPSR},
447 {"cr.isr", CR_ISR},
448 {"cr.iip", CR_IIP},
449 {"cr.ifa", CR_IFA},
450 {"cr.itir", CR_ITIR},
451 {"cr.iipa", CR_IIPA},
452 {"cr.ifs", CR_IFS},
453 {"cr.iim", CR_IIM},
454 {"cr.iha", CR_IHA},
455 {"cr.iib0", CR_IIB0},
456 {"cr.iib1", CR_IIB1},
457 {"cr.lid", CR_LID},
458 {"cr.ivr", CR_IVR},
459 {"cr.tpr", CR_TPR},
460 {"cr.eoi", CR_EOI},
461 {"cr.irr0", CR_IRR0},
462 {"cr.irr1", CR_IRR0 + 1},
463 {"cr.irr2", CR_IRR0 + 2},
464 {"cr.irr3", CR_IRR3},
465 {"cr.itv", CR_ITV},
466 {"cr.pmv", CR_PMV},
467 {"cr.cmcv", CR_CMCV},
468 {"cr.lrr0", CR_LRR0},
469 {"cr.lrr1", CR_LRR1}
470 };
471
472 #define PSR_MFL 4
473 #define PSR_IC 13
474 #define PSR_DFL 18
475 #define PSR_CPL 32
476
477 static const struct const_desc
478 {
479 const char *name;
480 valueT value;
481 }
482 const_bits[] =
483 {
484 /* PSR constant masks: */
485
486 /* 0: reserved */
487 {"psr.be", ((valueT) 1) << 1},
488 {"psr.up", ((valueT) 1) << 2},
489 {"psr.ac", ((valueT) 1) << 3},
490 {"psr.mfl", ((valueT) 1) << 4},
491 {"psr.mfh", ((valueT) 1) << 5},
492 /* 6-12: reserved */
493 {"psr.ic", ((valueT) 1) << 13},
494 {"psr.i", ((valueT) 1) << 14},
495 {"psr.pk", ((valueT) 1) << 15},
496 /* 16: reserved */
497 {"psr.dt", ((valueT) 1) << 17},
498 {"psr.dfl", ((valueT) 1) << 18},
499 {"psr.dfh", ((valueT) 1) << 19},
500 {"psr.sp", ((valueT) 1) << 20},
501 {"psr.pp", ((valueT) 1) << 21},
502 {"psr.di", ((valueT) 1) << 22},
503 {"psr.si", ((valueT) 1) << 23},
504 {"psr.db", ((valueT) 1) << 24},
505 {"psr.lp", ((valueT) 1) << 25},
506 {"psr.tb", ((valueT) 1) << 26},
507 {"psr.rt", ((valueT) 1) << 27},
508 /* 28-31: reserved */
509 /* 32-33: cpl (current privilege level) */
510 {"psr.is", ((valueT) 1) << 34},
511 {"psr.mc", ((valueT) 1) << 35},
512 {"psr.it", ((valueT) 1) << 36},
513 {"psr.id", ((valueT) 1) << 37},
514 {"psr.da", ((valueT) 1) << 38},
515 {"psr.dd", ((valueT) 1) << 39},
516 {"psr.ss", ((valueT) 1) << 40},
517 /* 41-42: ri (restart instruction) */
518 {"psr.ed", ((valueT) 1) << 43},
519 {"psr.bn", ((valueT) 1) << 44},
520 };
521
522 /* indirect register-sets/memory: */
523
524 static const struct
525 {
526 const char *name;
527 unsigned int regnum;
528 }
529 indirect_reg[] =
530 {
531 { "CPUID", IND_CPUID },
532 { "cpuid", IND_CPUID },
533 { "dbr", IND_DBR },
534 { "dtr", IND_DTR },
535 { "itr", IND_ITR },
536 { "ibr", IND_IBR },
537 { "msr", IND_MSR },
538 { "pkr", IND_PKR },
539 { "pmc", IND_PMC },
540 { "pmd", IND_PMD },
541 { "dahr", IND_DAHR },
542 { "rr", IND_RR },
543 };
544
545 /* Pseudo functions used to indicate relocation types (these functions
546 start with an at sign (@). */
547 static struct
548 {
549 const char *name;
550 enum pseudo_type
551 {
552 PSEUDO_FUNC_NONE,
553 PSEUDO_FUNC_RELOC,
554 PSEUDO_FUNC_CONST,
555 PSEUDO_FUNC_REG,
556 PSEUDO_FUNC_FLOAT
557 }
558 type;
559 union
560 {
561 unsigned long ival;
562 symbolS *sym;
563 }
564 u;
565 }
566 pseudo_func[] =
567 {
568 /* reloc pseudo functions (these must come first!): */
569 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
570 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
571 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
572 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
573 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
574 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
575 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
576 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
577 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
578 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
579 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
580 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
581 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
582 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
583 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
584 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
585 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
586 #ifdef TE_VMS
587 { "slotcount", PSEUDO_FUNC_RELOC, { 0 } },
588 #endif
589
590 /* mbtype4 constants: */
591 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
592 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
593 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
594 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
595 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
596
597 /* fclass constants: */
598 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
599 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
600 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
601 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
602 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
603 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
604 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
605 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
606 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
607
608 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
609
610 /* hint constants: */
611 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
612 { "priority", PSEUDO_FUNC_CONST, { 0x1 } },
613
614 /* tf constants: */
615 { "clz", PSEUDO_FUNC_CONST, { 32 } },
616 { "mpy", PSEUDO_FUNC_CONST, { 33 } },
617 { "datahints", PSEUDO_FUNC_CONST, { 34 } },
618
619 /* unwind-related constants: */
620 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
621 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
622 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
623 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_GNU } },
624 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
625 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
626 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
627
628 /* unwind-related registers: */
629 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
630 };
631
632 /* 41-bit nop opcodes (one per unit): */
633 static const bfd_vma nop[IA64_NUM_UNITS] =
634 {
635 0x0000000000LL, /* NIL => break 0 */
636 0x0008000000LL, /* I-unit nop */
637 0x0008000000LL, /* M-unit nop */
638 0x4000000000LL, /* B-unit nop */
639 0x0008000000LL, /* F-unit nop */
640 0x0000000000LL, /* L-"unit" nop immediate */
641 0x0008000000LL, /* X-unit nop */
642 };
643
644 /* Can't be `const' as it's passed to input routines (which have the
645 habit of setting temporary sentinels. */
646 static char special_section_name[][20] =
647 {
648 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
649 {".IA_64.unwind"}, {".IA_64.unwind_info"},
650 {".init_array"}, {".fini_array"}
651 };
652
653 /* The best template for a particular sequence of up to three
654 instructions: */
655 #define N IA64_NUM_TYPES
656 static unsigned char best_template[N][N][N];
657 #undef N
658
659 /* Resource dependencies currently in effect */
660 static struct rsrc {
661 int depind; /* dependency index */
662 const struct ia64_dependency *dependency; /* actual dependency */
663 unsigned specific:1, /* is this a specific bit/regno? */
664 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
665 int index; /* specific regno/bit within dependency */
666 int note; /* optional qualifying note (0 if none) */
667 #define STATE_NONE 0
668 #define STATE_STOP 1
669 #define STATE_SRLZ 2
670 int insn_srlz; /* current insn serialization state */
671 int data_srlz; /* current data serialization state */
672 int qp_regno; /* qualifying predicate for this usage */
673 const char *file; /* what file marked this dependency */
674 unsigned int line; /* what line marked this dependency */
675 struct mem_offset mem_offset; /* optional memory offset hint */
676 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
677 int path; /* corresponding code entry index */
678 } *regdeps = NULL;
679 static int regdepslen = 0;
680 static int regdepstotlen = 0;
681 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
682 static const char *dv_sem[] = { "none", "implied", "impliedf",
683 "data", "instr", "specific", "stop", "other" };
684 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
685
686 /* Current state of PR mutexation */
687 static struct qpmutex {
688 valueT prmask;
689 int path;
690 } *qp_mutexes = NULL; /* QP mutex bitmasks */
691 static int qp_mutexeslen = 0;
692 static int qp_mutexestotlen = 0;
693 static valueT qp_safe_across_calls = 0;
694
695 /* Current state of PR implications */
696 static struct qp_imply {
697 unsigned p1:6;
698 unsigned p2:6;
699 unsigned p2_branched:1;
700 int path;
701 } *qp_implies = NULL;
702 static int qp_implieslen = 0;
703 static int qp_impliestotlen = 0;
704
705 /* Keep track of static GR values so that indirect register usage can
706 sometimes be tracked. */
707 static struct gr {
708 unsigned known:1;
709 int path;
710 valueT value;
711 } gr_values[128] = {
712 {
713 1,
714 #ifdef INT_MAX
715 INT_MAX,
716 #else
717 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1,
718 #endif
719 0
720 }
721 };
722
723 /* Remember the alignment frag. */
724 static fragS *align_frag;
725
726 /* These are the routines required to output the various types of
727 unwind records. */
728
729 /* A slot_number is a frag address plus the slot index (0-2). We use the
730 frag address here so that if there is a section switch in the middle of
731 a function, then instructions emitted to a different section are not
732 counted. Since there may be more than one frag for a function, this
733 means we also need to keep track of which frag this address belongs to
734 so we can compute inter-frag distances. This also nicely solves the
735 problem with nops emitted for align directives, which can't easily be
736 counted, but can easily be derived from frag sizes. */
737
738 typedef struct unw_rec_list {
739 unwind_record r;
740 unsigned long slot_number;
741 fragS *slot_frag;
742 struct unw_rec_list *next;
743 } unw_rec_list;
744
745 #define SLOT_NUM_NOT_SET (unsigned)-1
746
747 /* Linked list of saved prologue counts. A very poor
748 implementation of a map from label numbers to prologue counts. */
749 typedef struct label_prologue_count
750 {
751 struct label_prologue_count *next;
752 unsigned long label_number;
753 unsigned int prologue_count;
754 } label_prologue_count;
755
756 typedef struct proc_pending
757 {
758 symbolS *sym;
759 struct proc_pending *next;
760 } proc_pending;
761
762 static struct
763 {
764 /* Maintain a list of unwind entries for the current function. */
765 unw_rec_list *list;
766 unw_rec_list *tail;
767
768 /* Any unwind entries that should be attached to the current slot
769 that an insn is being constructed for. */
770 unw_rec_list *current_entry;
771
772 /* These are used to create the unwind table entry for this function. */
773 proc_pending proc_pending;
774 symbolS *info; /* pointer to unwind info */
775 symbolS *personality_routine;
776 segT saved_text_seg;
777 subsegT saved_text_subseg;
778 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
779
780 /* TRUE if processing unwind directives in a prologue region. */
781 unsigned int prologue : 1;
782 unsigned int prologue_mask : 4;
783 unsigned int prologue_gr : 7;
784 unsigned int body : 1;
785 unsigned int insn : 1;
786 unsigned int prologue_count; /* number of .prologues seen so far */
787 /* Prologue counts at previous .label_state directives. */
788 struct label_prologue_count * saved_prologue_counts;
789
790 /* List of split up .save-s. */
791 unw_p_record *pending_saves;
792 } unwind;
793
794 /* The input value is a negated offset from psp, and specifies an address
795 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
796 must add 16 and divide by 4 to get the encoded value. */
797
798 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
799
800 typedef void (*vbyte_func) (int, char *, char *);
801
802 /* Forward declarations: */
803 static void dot_alias (int);
804 static int parse_operand_and_eval (expressionS *, int);
805 static void emit_one_bundle (void);
806 static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *,
807 bfd_reloc_code_real_type);
808 static void insn_group_break (int, int, int);
809 static void add_qp_mutex (valueT);
810 static void add_qp_imply (int, int);
811 static void clear_qp_mutex (valueT);
812 static void clear_qp_implies (valueT, valueT);
813 static void print_dependency (const char *, int);
814 static void instruction_serialization (void);
815 static void data_serialization (void);
816 static void output_R3_format (vbyte_func, unw_record_type, unsigned long);
817 static void output_B3_format (vbyte_func, unsigned long, unsigned long);
818 static void output_B4_format (vbyte_func, unw_record_type, unsigned long);
819 static void free_saved_prologue_counts (void);
820
821 /* Determine if application register REGNUM resides only in the integer
822 unit (as opposed to the memory unit). */
823 static int
824 ar_is_only_in_integer_unit (int reg)
825 {
826 reg -= REG_AR;
827 return reg >= 64 && reg <= 111;
828 }
829
830 /* Determine if application register REGNUM resides only in the memory
831 unit (as opposed to the integer unit). */
832 static int
833 ar_is_only_in_memory_unit (int reg)
834 {
835 reg -= REG_AR;
836 return reg >= 0 && reg <= 47;
837 }
838
839 /* Switch to section NAME and create section if necessary. It's
840 rather ugly that we have to manipulate input_line_pointer but I
841 don't see any other way to accomplish the same thing without
842 changing obj-elf.c (which may be the Right Thing, in the end). */
843 static void
844 set_section (char *name)
845 {
846 char *saved_input_line_pointer;
847
848 saved_input_line_pointer = input_line_pointer;
849 input_line_pointer = name;
850 obj_elf_section (0);
851 input_line_pointer = saved_input_line_pointer;
852 }
853
854 /* Map 's' to SHF_IA_64_SHORT. */
855
856 bfd_vma
857 ia64_elf_section_letter (int letter, const char **ptr_msg)
858 {
859 if (letter == 's')
860 return SHF_IA_64_SHORT;
861 else if (letter == 'o')
862 return SHF_LINK_ORDER;
863 #ifdef TE_VMS
864 else if (letter == 'O')
865 return SHF_IA_64_VMS_OVERLAID;
866 else if (letter == 'g')
867 return SHF_IA_64_VMS_GLOBAL;
868 #endif
869
870 *ptr_msg = _("bad .section directive: want a,o,s,w,x,M,S,G,T in string");
871 return -1;
872 }
873
874 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
875
876 flagword
877 ia64_elf_section_flags (flagword flags,
878 bfd_vma attr,
879 int type ATTRIBUTE_UNUSED)
880 {
881 if (attr & SHF_IA_64_SHORT)
882 flags |= SEC_SMALL_DATA;
883 return flags;
884 }
885
886 int
887 ia64_elf_section_type (const char *str, size_t len)
888 {
889 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
890
891 if (STREQ (ELF_STRING_ia64_unwind_info))
892 return SHT_PROGBITS;
893
894 if (STREQ (ELF_STRING_ia64_unwind_info_once))
895 return SHT_PROGBITS;
896
897 if (STREQ (ELF_STRING_ia64_unwind))
898 return SHT_IA_64_UNWIND;
899
900 if (STREQ (ELF_STRING_ia64_unwind_once))
901 return SHT_IA_64_UNWIND;
902
903 if (STREQ ("unwind"))
904 return SHT_IA_64_UNWIND;
905
906 return -1;
907 #undef STREQ
908 }
909
910 static unsigned int
911 set_regstack (unsigned int ins,
912 unsigned int locs,
913 unsigned int outs,
914 unsigned int rots)
915 {
916 /* Size of frame. */
917 unsigned int sof;
918
919 sof = ins + locs + outs;
920 if (sof > 96)
921 {
922 as_bad (_("Size of frame exceeds maximum of 96 registers"));
923 return 0;
924 }
925 if (rots > sof)
926 {
927 as_warn (_("Size of rotating registers exceeds frame size"));
928 return 0;
929 }
930 md.in.base = REG_GR + 32;
931 md.loc.base = md.in.base + ins;
932 md.out.base = md.loc.base + locs;
933
934 md.in.num_regs = ins;
935 md.loc.num_regs = locs;
936 md.out.num_regs = outs;
937 md.rot.num_regs = rots;
938 return sof;
939 }
940
941 void
942 ia64_flush_insns (void)
943 {
944 struct label_fix *lfix;
945 segT saved_seg;
946 subsegT saved_subseg;
947 unw_rec_list *ptr;
948 bool mark;
949
950 if (!md.last_text_seg)
951 return;
952
953 saved_seg = now_seg;
954 saved_subseg = now_subseg;
955
956 subseg_set (md.last_text_seg, md.last_text_subseg);
957
958 while (md.num_slots_in_use > 0)
959 emit_one_bundle (); /* force out queued instructions */
960
961 /* In case there are labels following the last instruction, resolve
962 those now. */
963 mark = false;
964 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
965 {
966 symbol_set_value_now (lfix->sym);
967 mark |= lfix->dw2_mark_labels;
968 }
969 if (mark)
970 {
971 dwarf2_where (&CURR_SLOT.debug_line);
972 CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
973 dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line);
974 dwarf2_consume_line_info ();
975 }
976 CURR_SLOT.label_fixups = 0;
977
978 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
979 symbol_set_value_now (lfix->sym);
980 CURR_SLOT.tag_fixups = 0;
981
982 /* In case there are unwind directives following the last instruction,
983 resolve those now. We only handle prologue, body, and endp directives
984 here. Give an error for others. */
985 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
986 {
987 switch (ptr->r.type)
988 {
989 case prologue:
990 case prologue_gr:
991 case body:
992 case endp:
993 ptr->slot_number = (unsigned long) frag_more (0);
994 ptr->slot_frag = frag_now;
995 break;
996
997 /* Allow any record which doesn't have a "t" field (i.e.,
998 doesn't relate to a particular instruction). */
999 case unwabi:
1000 case br_gr:
1001 case copy_state:
1002 case fr_mem:
1003 case frgr_mem:
1004 case gr_gr:
1005 case gr_mem:
1006 case label_state:
1007 case rp_br:
1008 case spill_base:
1009 case spill_mask:
1010 /* nothing */
1011 break;
1012
1013 default:
1014 as_bad (_("Unwind directive not followed by an instruction."));
1015 break;
1016 }
1017 }
1018 unwind.current_entry = NULL;
1019
1020 subseg_set (saved_seg, saved_subseg);
1021
1022 if (md.qp.X_op == O_register)
1023 as_bad (_("qualifying predicate not followed by instruction"));
1024 }
1025
1026 void
1027 ia64_cons_align (int nbytes)
1028 {
1029 if (md.auto_align)
1030 {
1031 int log;
1032 for (log = 0; (nbytes & 1) != 1; nbytes >>= 1)
1033 log++;
1034
1035 do_align (log, NULL, 0, 0);
1036 }
1037 }
1038
1039 #ifdef TE_VMS
1040
1041 /* .vms_common section, symbol, size, alignment */
1042
1043 static void
1044 obj_elf_vms_common (int ignore ATTRIBUTE_UNUSED)
1045 {
1046 const char *sec_name;
1047 char *sym_name;
1048 char c;
1049 offsetT size;
1050 offsetT cur_size;
1051 offsetT temp;
1052 symbolS *symbolP;
1053 segT current_seg = now_seg;
1054 subsegT current_subseg = now_subseg;
1055 offsetT log_align;
1056
1057 /* Section name. */
1058 sec_name = obj_elf_section_name ();
1059 if (sec_name == NULL)
1060 return;
1061
1062 /* Symbol name. */
1063 SKIP_WHITESPACE ();
1064 if (*input_line_pointer == ',')
1065 {
1066 input_line_pointer++;
1067 SKIP_WHITESPACE ();
1068 }
1069 else
1070 {
1071 as_bad (_("expected ',' after section name"));
1072 ignore_rest_of_line ();
1073 return;
1074 }
1075
1076 c = get_symbol_name (&sym_name);
1077
1078 if (input_line_pointer == sym_name)
1079 {
1080 (void) restore_line_pointer (c);
1081 as_bad (_("expected symbol name"));
1082 ignore_rest_of_line ();
1083 return;
1084 }
1085
1086 symbolP = symbol_find_or_make (sym_name);
1087 (void) restore_line_pointer (c);
1088
1089 if ((S_IS_DEFINED (symbolP) || symbol_equated_p (symbolP))
1090 && !S_IS_COMMON (symbolP))
1091 {
1092 as_bad (_("Ignoring attempt to re-define symbol"));
1093 ignore_rest_of_line ();
1094 return;
1095 }
1096
1097 /* Symbol size. */
1098 SKIP_WHITESPACE ();
1099 if (*input_line_pointer == ',')
1100 {
1101 input_line_pointer++;
1102 SKIP_WHITESPACE ();
1103 }
1104 else
1105 {
1106 as_bad (_("expected ',' after symbol name"));
1107 ignore_rest_of_line ();
1108 return;
1109 }
1110
1111 temp = get_absolute_expression ();
1112 size = temp;
1113 size &= ((offsetT) 2 << (stdoutput->arch_info->bits_per_address - 1)) - 1;
1114 if (temp != size)
1115 {
1116 as_warn (_("size (%ld) out of range, ignored"), (long) temp);
1117 ignore_rest_of_line ();
1118 return;
1119 }
1120
1121 /* Alignment. */
1122 SKIP_WHITESPACE ();
1123 if (*input_line_pointer == ',')
1124 {
1125 input_line_pointer++;
1126 SKIP_WHITESPACE ();
1127 }
1128 else
1129 {
1130 as_bad (_("expected ',' after symbol size"));
1131 ignore_rest_of_line ();
1132 return;
1133 }
1134
1135 log_align = get_absolute_expression ();
1136
1137 demand_empty_rest_of_line ();
1138
1139 obj_elf_change_section
1140 (sec_name, SHT_NOBITS,
1141 SHF_ALLOC | SHF_WRITE | SHF_IA_64_VMS_OVERLAID | SHF_IA_64_VMS_GLOBAL,
1142 0, NULL, true);
1143
1144 S_SET_VALUE (symbolP, 0);
1145 S_SET_SIZE (symbolP, size);
1146 S_SET_EXTERNAL (symbolP);
1147 S_SET_SEGMENT (symbolP, now_seg);
1148
1149 symbol_get_bfdsym (symbolP)->flags |= BSF_OBJECT;
1150
1151 record_alignment (now_seg, log_align);
1152
1153 cur_size = bfd_section_size (now_seg);
1154 if ((int) size > cur_size)
1155 {
1156 char *pfrag
1157 = frag_var (rs_fill, 1, 1, (relax_substateT)0, NULL,
1158 (valueT)size - (valueT)cur_size, NULL);
1159 *pfrag = 0;
1160 bfd_set_section_size (now_seg, size);
1161 }
1162
1163 /* Switch back to current segment. */
1164 subseg_set (current_seg, current_subseg);
1165
1166 #ifdef md_elf_section_change_hook
1167 md_elf_section_change_hook ();
1168 #endif
1169 }
1170
1171 #endif /* TE_VMS */
1172
1173 /* Output COUNT bytes to a memory location. */
1174 static char *vbyte_mem_ptr = NULL;
1175
1176 static void
1177 output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED)
1178 {
1179 int x;
1180 if (vbyte_mem_ptr == NULL)
1181 abort ();
1182
1183 if (count == 0)
1184 return;
1185 for (x = 0; x < count; x++)
1186 *(vbyte_mem_ptr++) = ptr[x];
1187 }
1188
1189 /* Count the number of bytes required for records. */
1190 static int vbyte_count = 0;
1191 static void
1192 count_output (int count,
1193 char *ptr ATTRIBUTE_UNUSED,
1194 char *comment ATTRIBUTE_UNUSED)
1195 {
1196 vbyte_count += count;
1197 }
1198
1199 static void
1200 output_R1_format (vbyte_func f, unw_record_type rtype, int rlen)
1201 {
1202 int r = 0;
1203 char byte;
1204 if (rlen > 0x1f)
1205 {
1206 output_R3_format (f, rtype, rlen);
1207 return;
1208 }
1209
1210 if (rtype == body)
1211 r = 1;
1212 else if (rtype != prologue)
1213 as_bad (_("record type is not valid"));
1214
1215 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1216 (*f) (1, &byte, NULL);
1217 }
1218
1219 static void
1220 output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen)
1221 {
1222 char bytes[20];
1223 int count = 2;
1224 mask = (mask & 0x0f);
1225 grsave = (grsave & 0x7f);
1226
1227 bytes[0] = (UNW_R2 | (mask >> 1));
1228 bytes[1] = (((mask & 0x01) << 7) | grsave);
1229 count += output_leb128 (bytes + 2, rlen, 0);
1230 (*f) (count, bytes, NULL);
1231 }
1232
1233 static void
1234 output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen)
1235 {
1236 int r = 0, count;
1237 char bytes[20];
1238 if (rlen <= 0x1f)
1239 {
1240 output_R1_format (f, rtype, rlen);
1241 return;
1242 }
1243
1244 if (rtype == body)
1245 r = 1;
1246 else if (rtype != prologue)
1247 as_bad (_("record type is not valid"));
1248 bytes[0] = (UNW_R3 | r);
1249 count = output_leb128 (bytes + 1, rlen, 0);
1250 (*f) (count + 1, bytes, NULL);
1251 }
1252
1253 static void
1254 output_P1_format (vbyte_func f, int brmask)
1255 {
1256 char byte;
1257 byte = UNW_P1 | (brmask & 0x1f);
1258 (*f) (1, &byte, NULL);
1259 }
1260
1261 static void
1262 output_P2_format (vbyte_func f, int brmask, int gr)
1263 {
1264 char bytes[2];
1265 brmask = (brmask & 0x1f);
1266 bytes[0] = UNW_P2 | (brmask >> 1);
1267 bytes[1] = (((brmask & 1) << 7) | gr);
1268 (*f) (2, bytes, NULL);
1269 }
1270
1271 static void
1272 output_P3_format (vbyte_func f, unw_record_type rtype, int reg)
1273 {
1274 char bytes[2];
1275 int r = 0;
1276 reg = (reg & 0x7f);
1277 switch (rtype)
1278 {
1279 case psp_gr:
1280 r = 0;
1281 break;
1282 case rp_gr:
1283 r = 1;
1284 break;
1285 case pfs_gr:
1286 r = 2;
1287 break;
1288 case preds_gr:
1289 r = 3;
1290 break;
1291 case unat_gr:
1292 r = 4;
1293 break;
1294 case lc_gr:
1295 r = 5;
1296 break;
1297 case rp_br:
1298 r = 6;
1299 break;
1300 case rnat_gr:
1301 r = 7;
1302 break;
1303 case bsp_gr:
1304 r = 8;
1305 break;
1306 case bspstore_gr:
1307 r = 9;
1308 break;
1309 case fpsr_gr:
1310 r = 10;
1311 break;
1312 case priunat_gr:
1313 r = 11;
1314 break;
1315 default:
1316 as_bad (_("Invalid record type for P3 format."));
1317 }
1318 bytes[0] = (UNW_P3 | (r >> 1));
1319 bytes[1] = (((r & 1) << 7) | reg);
1320 (*f) (2, bytes, NULL);
1321 }
1322
1323 static void
1324 output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size)
1325 {
1326 imask[0] = UNW_P4;
1327 (*f) (imask_size, (char *) imask, NULL);
1328 }
1329
1330 static void
1331 output_P5_format (vbyte_func f, int grmask, unsigned long frmask)
1332 {
1333 char bytes[4];
1334 grmask = (grmask & 0x0f);
1335
1336 bytes[0] = UNW_P5;
1337 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1338 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1339 bytes[3] = (frmask & 0x000000ff);
1340 (*f) (4, bytes, NULL);
1341 }
1342
1343 static void
1344 output_P6_format (vbyte_func f, unw_record_type rtype, int rmask)
1345 {
1346 char byte;
1347 int r = 0;
1348
1349 if (rtype == gr_mem)
1350 r = 1;
1351 else if (rtype != fr_mem)
1352 as_bad (_("Invalid record type for format P6"));
1353 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1354 (*f) (1, &byte, NULL);
1355 }
1356
1357 static void
1358 output_P7_format (vbyte_func f,
1359 unw_record_type rtype,
1360 unsigned long w1,
1361 unsigned long w2)
1362 {
1363 char bytes[20];
1364 int count = 1;
1365 int r = 0;
1366 count += output_leb128 (bytes + 1, w1, 0);
1367 switch (rtype)
1368 {
1369 case mem_stack_f:
1370 r = 0;
1371 count += output_leb128 (bytes + count, w2 >> 4, 0);
1372 break;
1373 case mem_stack_v:
1374 r = 1;
1375 break;
1376 case spill_base:
1377 r = 2;
1378 break;
1379 case psp_sprel:
1380 r = 3;
1381 break;
1382 case rp_when:
1383 r = 4;
1384 break;
1385 case rp_psprel:
1386 r = 5;
1387 break;
1388 case pfs_when:
1389 r = 6;
1390 break;
1391 case pfs_psprel:
1392 r = 7;
1393 break;
1394 case preds_when:
1395 r = 8;
1396 break;
1397 case preds_psprel:
1398 r = 9;
1399 break;
1400 case lc_when:
1401 r = 10;
1402 break;
1403 case lc_psprel:
1404 r = 11;
1405 break;
1406 case unat_when:
1407 r = 12;
1408 break;
1409 case unat_psprel:
1410 r = 13;
1411 break;
1412 case fpsr_when:
1413 r = 14;
1414 break;
1415 case fpsr_psprel:
1416 r = 15;
1417 break;
1418 default:
1419 break;
1420 }
1421 bytes[0] = (UNW_P7 | r);
1422 (*f) (count, bytes, NULL);
1423 }
1424
1425 static void
1426 output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t)
1427 {
1428 char bytes[20];
1429 int r = 0;
1430 int count = 2;
1431 bytes[0] = UNW_P8;
1432 switch (rtype)
1433 {
1434 case rp_sprel:
1435 r = 1;
1436 break;
1437 case pfs_sprel:
1438 r = 2;
1439 break;
1440 case preds_sprel:
1441 r = 3;
1442 break;
1443 case lc_sprel:
1444 r = 4;
1445 break;
1446 case unat_sprel:
1447 r = 5;
1448 break;
1449 case fpsr_sprel:
1450 r = 6;
1451 break;
1452 case bsp_when:
1453 r = 7;
1454 break;
1455 case bsp_psprel:
1456 r = 8;
1457 break;
1458 case bsp_sprel:
1459 r = 9;
1460 break;
1461 case bspstore_when:
1462 r = 10;
1463 break;
1464 case bspstore_psprel:
1465 r = 11;
1466 break;
1467 case bspstore_sprel:
1468 r = 12;
1469 break;
1470 case rnat_when:
1471 r = 13;
1472 break;
1473 case rnat_psprel:
1474 r = 14;
1475 break;
1476 case rnat_sprel:
1477 r = 15;
1478 break;
1479 case priunat_when_gr:
1480 r = 16;
1481 break;
1482 case priunat_psprel:
1483 r = 17;
1484 break;
1485 case priunat_sprel:
1486 r = 18;
1487 break;
1488 case priunat_when_mem:
1489 r = 19;
1490 break;
1491 default:
1492 break;
1493 }
1494 bytes[1] = r;
1495 count += output_leb128 (bytes + 2, t, 0);
1496 (*f) (count, bytes, NULL);
1497 }
1498
1499 static void
1500 output_P9_format (vbyte_func f, int grmask, int gr)
1501 {
1502 char bytes[3];
1503 bytes[0] = UNW_P9;
1504 bytes[1] = (grmask & 0x0f);
1505 bytes[2] = (gr & 0x7f);
1506 (*f) (3, bytes, NULL);
1507 }
1508
1509 static void
1510 output_P10_format (vbyte_func f, int abi, int context)
1511 {
1512 char bytes[3];
1513 bytes[0] = UNW_P10;
1514 bytes[1] = (abi & 0xff);
1515 bytes[2] = (context & 0xff);
1516 (*f) (3, bytes, NULL);
1517 }
1518
1519 static void
1520 output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1521 {
1522 char byte;
1523 int r = 0;
1524 if (label > 0x1f)
1525 {
1526 output_B4_format (f, rtype, label);
1527 return;
1528 }
1529 if (rtype == copy_state)
1530 r = 1;
1531 else if (rtype != label_state)
1532 as_bad (_("Invalid record type for format B1"));
1533
1534 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1535 (*f) (1, &byte, NULL);
1536 }
1537
1538 static void
1539 output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t)
1540 {
1541 char bytes[20];
1542 int count = 1;
1543 if (ecount > 0x1f)
1544 {
1545 output_B3_format (f, ecount, t);
1546 return;
1547 }
1548 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1549 count += output_leb128 (bytes + 1, t, 0);
1550 (*f) (count, bytes, NULL);
1551 }
1552
1553 static void
1554 output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t)
1555 {
1556 char bytes[20];
1557 int count = 1;
1558 if (ecount <= 0x1f)
1559 {
1560 output_B2_format (f, ecount, t);
1561 return;
1562 }
1563 bytes[0] = UNW_B3;
1564 count += output_leb128 (bytes + 1, t, 0);
1565 count += output_leb128 (bytes + count, ecount, 0);
1566 (*f) (count, bytes, NULL);
1567 }
1568
1569 static void
1570 output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1571 {
1572 char bytes[20];
1573 int r = 0;
1574 int count = 1;
1575 if (label <= 0x1f)
1576 {
1577 output_B1_format (f, rtype, label);
1578 return;
1579 }
1580
1581 if (rtype == copy_state)
1582 r = 1;
1583 else if (rtype != label_state)
1584 as_bad (_("Invalid record type for format B1"));
1585
1586 bytes[0] = (UNW_B4 | (r << 3));
1587 count += output_leb128 (bytes + 1, label, 0);
1588 (*f) (count, bytes, NULL);
1589 }
1590
1591 static char
1592 format_ab_reg (int ab, int reg)
1593 {
1594 int ret;
1595 ab = (ab & 3);
1596 reg = (reg & 0x1f);
1597 ret = (ab << 5) | reg;
1598 return ret;
1599 }
1600
1601 static void
1602 output_X1_format (vbyte_func f,
1603 unw_record_type rtype,
1604 int ab,
1605 int reg,
1606 unsigned long t,
1607 unsigned long w1)
1608 {
1609 char bytes[20];
1610 int r = 0;
1611 int count = 2;
1612 bytes[0] = UNW_X1;
1613
1614 if (rtype == spill_sprel)
1615 r = 1;
1616 else if (rtype != spill_psprel)
1617 as_bad (_("Invalid record type for format X1"));
1618 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1619 count += output_leb128 (bytes + 2, t, 0);
1620 count += output_leb128 (bytes + count, w1, 0);
1621 (*f) (count, bytes, NULL);
1622 }
1623
1624 static void
1625 output_X2_format (vbyte_func f,
1626 int ab,
1627 int reg,
1628 int x,
1629 int y,
1630 int treg,
1631 unsigned long t)
1632 {
1633 char bytes[20];
1634 int count = 3;
1635 bytes[0] = UNW_X2;
1636 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1637 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1638 count += output_leb128 (bytes + 3, t, 0);
1639 (*f) (count, bytes, NULL);
1640 }
1641
1642 static void
1643 output_X3_format (vbyte_func f,
1644 unw_record_type rtype,
1645 int qp,
1646 int ab,
1647 int reg,
1648 unsigned long t,
1649 unsigned long w1)
1650 {
1651 char bytes[20];
1652 int r = 0;
1653 int count = 3;
1654 bytes[0] = UNW_X3;
1655
1656 if (rtype == spill_sprel_p)
1657 r = 1;
1658 else if (rtype != spill_psprel_p)
1659 as_bad (_("Invalid record type for format X3"));
1660 bytes[1] = ((r << 7) | (qp & 0x3f));
1661 bytes[2] = format_ab_reg (ab, reg);
1662 count += output_leb128 (bytes + 3, t, 0);
1663 count += output_leb128 (bytes + count, w1, 0);
1664 (*f) (count, bytes, NULL);
1665 }
1666
1667 static void
1668 output_X4_format (vbyte_func f,
1669 int qp,
1670 int ab,
1671 int reg,
1672 int x,
1673 int y,
1674 int treg,
1675 unsigned long t)
1676 {
1677 char bytes[20];
1678 int count = 4;
1679 bytes[0] = UNW_X4;
1680 bytes[1] = (qp & 0x3f);
1681 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1682 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1683 count += output_leb128 (bytes + 4, t, 0);
1684 (*f) (count, bytes, NULL);
1685 }
1686
1687 /* This function checks whether there are any outstanding .save-s and
1688 discards them if so. */
1689
1690 static void
1691 check_pending_save (void)
1692 {
1693 if (unwind.pending_saves)
1694 {
1695 unw_rec_list *cur, *prev;
1696
1697 as_warn (_("Previous .save incomplete"));
1698 for (cur = unwind.list, prev = NULL; cur; )
1699 if (&cur->r.record.p == unwind.pending_saves)
1700 {
1701 if (prev)
1702 prev->next = cur->next;
1703 else
1704 unwind.list = cur->next;
1705 if (cur == unwind.tail)
1706 unwind.tail = prev;
1707 if (cur == unwind.current_entry)
1708 unwind.current_entry = cur->next;
1709 /* Don't free the first discarded record, it's being used as
1710 terminator for (currently) br_gr and gr_gr processing, and
1711 also prevents leaving a dangling pointer to it in its
1712 predecessor. */
1713 cur->r.record.p.grmask = 0;
1714 cur->r.record.p.brmask = 0;
1715 cur->r.record.p.frmask = 0;
1716 prev = cur->r.record.p.next;
1717 cur->r.record.p.next = NULL;
1718 cur = prev;
1719 break;
1720 }
1721 else
1722 {
1723 prev = cur;
1724 cur = cur->next;
1725 }
1726 while (cur)
1727 {
1728 prev = cur;
1729 cur = cur->r.record.p.next;
1730 free (prev);
1731 }
1732 unwind.pending_saves = NULL;
1733 }
1734 }
1735
1736 /* This function allocates a record list structure, and initializes fields. */
1737
1738 static unw_rec_list *
1739 alloc_record (unw_record_type t)
1740 {
1741 unw_rec_list *ptr;
1742 ptr = XNEW (unw_rec_list);
1743 memset (ptr, 0, sizeof (*ptr));
1744 ptr->slot_number = SLOT_NUM_NOT_SET;
1745 ptr->r.type = t;
1746 return ptr;
1747 }
1748
1749 /* Dummy unwind record used for calculating the length of the last prologue or
1750 body region. */
1751
1752 static unw_rec_list *
1753 output_endp (void)
1754 {
1755 unw_rec_list *ptr = alloc_record (endp);
1756 return ptr;
1757 }
1758
1759 static unw_rec_list *
1760 output_prologue (void)
1761 {
1762 unw_rec_list *ptr = alloc_record (prologue);
1763 return ptr;
1764 }
1765
1766 static unw_rec_list *
1767 output_prologue_gr (unsigned int saved_mask, unsigned int reg)
1768 {
1769 unw_rec_list *ptr = alloc_record (prologue_gr);
1770 ptr->r.record.r.grmask = saved_mask;
1771 ptr->r.record.r.grsave = reg;
1772 return ptr;
1773 }
1774
1775 static unw_rec_list *
1776 output_body (void)
1777 {
1778 unw_rec_list *ptr = alloc_record (body);
1779 return ptr;
1780 }
1781
1782 static unw_rec_list *
1783 output_mem_stack_f (unsigned int size)
1784 {
1785 unw_rec_list *ptr = alloc_record (mem_stack_f);
1786 ptr->r.record.p.size = size;
1787 return ptr;
1788 }
1789
1790 static unw_rec_list *
1791 output_mem_stack_v (void)
1792 {
1793 unw_rec_list *ptr = alloc_record (mem_stack_v);
1794 return ptr;
1795 }
1796
1797 static unw_rec_list *
1798 output_psp_gr (unsigned int gr)
1799 {
1800 unw_rec_list *ptr = alloc_record (psp_gr);
1801 ptr->r.record.p.r.gr = gr;
1802 return ptr;
1803 }
1804
1805 static unw_rec_list *
1806 output_psp_sprel (unsigned int offset)
1807 {
1808 unw_rec_list *ptr = alloc_record (psp_sprel);
1809 ptr->r.record.p.off.sp = offset / 4;
1810 return ptr;
1811 }
1812
1813 static unw_rec_list *
1814 output_rp_when (void)
1815 {
1816 unw_rec_list *ptr = alloc_record (rp_when);
1817 return ptr;
1818 }
1819
1820 static unw_rec_list *
1821 output_rp_gr (unsigned int gr)
1822 {
1823 unw_rec_list *ptr = alloc_record (rp_gr);
1824 ptr->r.record.p.r.gr = gr;
1825 return ptr;
1826 }
1827
1828 static unw_rec_list *
1829 output_rp_br (unsigned int br)
1830 {
1831 unw_rec_list *ptr = alloc_record (rp_br);
1832 ptr->r.record.p.r.br = br;
1833 return ptr;
1834 }
1835
1836 static unw_rec_list *
1837 output_rp_psprel (unsigned int offset)
1838 {
1839 unw_rec_list *ptr = alloc_record (rp_psprel);
1840 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1841 return ptr;
1842 }
1843
1844 static unw_rec_list *
1845 output_rp_sprel (unsigned int offset)
1846 {
1847 unw_rec_list *ptr = alloc_record (rp_sprel);
1848 ptr->r.record.p.off.sp = offset / 4;
1849 return ptr;
1850 }
1851
1852 static unw_rec_list *
1853 output_pfs_when (void)
1854 {
1855 unw_rec_list *ptr = alloc_record (pfs_when);
1856 return ptr;
1857 }
1858
1859 static unw_rec_list *
1860 output_pfs_gr (unsigned int gr)
1861 {
1862 unw_rec_list *ptr = alloc_record (pfs_gr);
1863 ptr->r.record.p.r.gr = gr;
1864 return ptr;
1865 }
1866
1867 static unw_rec_list *
1868 output_pfs_psprel (unsigned int offset)
1869 {
1870 unw_rec_list *ptr = alloc_record (pfs_psprel);
1871 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1872 return ptr;
1873 }
1874
1875 static unw_rec_list *
1876 output_pfs_sprel (unsigned int offset)
1877 {
1878 unw_rec_list *ptr = alloc_record (pfs_sprel);
1879 ptr->r.record.p.off.sp = offset / 4;
1880 return ptr;
1881 }
1882
1883 static unw_rec_list *
1884 output_preds_when (void)
1885 {
1886 unw_rec_list *ptr = alloc_record (preds_when);
1887 return ptr;
1888 }
1889
1890 static unw_rec_list *
1891 output_preds_gr (unsigned int gr)
1892 {
1893 unw_rec_list *ptr = alloc_record (preds_gr);
1894 ptr->r.record.p.r.gr = gr;
1895 return ptr;
1896 }
1897
1898 static unw_rec_list *
1899 output_preds_psprel (unsigned int offset)
1900 {
1901 unw_rec_list *ptr = alloc_record (preds_psprel);
1902 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1903 return ptr;
1904 }
1905
1906 static unw_rec_list *
1907 output_preds_sprel (unsigned int offset)
1908 {
1909 unw_rec_list *ptr = alloc_record (preds_sprel);
1910 ptr->r.record.p.off.sp = offset / 4;
1911 return ptr;
1912 }
1913
1914 static unw_rec_list *
1915 output_fr_mem (unsigned int mask)
1916 {
1917 unw_rec_list *ptr = alloc_record (fr_mem);
1918 unw_rec_list *cur = ptr;
1919
1920 ptr->r.record.p.frmask = mask;
1921 unwind.pending_saves = &ptr->r.record.p;
1922 for (;;)
1923 {
1924 unw_rec_list *prev = cur;
1925
1926 /* Clear least significant set bit. */
1927 mask &= ~(mask & (~mask + 1));
1928 if (!mask)
1929 return ptr;
1930 cur = alloc_record (fr_mem);
1931 cur->r.record.p.frmask = mask;
1932 /* Retain only least significant bit. */
1933 prev->r.record.p.frmask ^= mask;
1934 prev->r.record.p.next = cur;
1935 }
1936 }
1937
1938 static unw_rec_list *
1939 output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask)
1940 {
1941 unw_rec_list *ptr = alloc_record (frgr_mem);
1942 unw_rec_list *cur = ptr;
1943
1944 unwind.pending_saves = &cur->r.record.p;
1945 cur->r.record.p.frmask = fr_mask;
1946 while (fr_mask)
1947 {
1948 unw_rec_list *prev = cur;
1949
1950 /* Clear least significant set bit. */
1951 fr_mask &= ~(fr_mask & (~fr_mask + 1));
1952 if (!gr_mask && !fr_mask)
1953 return ptr;
1954 cur = alloc_record (frgr_mem);
1955 cur->r.record.p.frmask = fr_mask;
1956 /* Retain only least significant bit. */
1957 prev->r.record.p.frmask ^= fr_mask;
1958 prev->r.record.p.next = cur;
1959 }
1960 cur->r.record.p.grmask = gr_mask;
1961 for (;;)
1962 {
1963 unw_rec_list *prev = cur;
1964
1965 /* Clear least significant set bit. */
1966 gr_mask &= ~(gr_mask & (~gr_mask + 1));
1967 if (!gr_mask)
1968 return ptr;
1969 cur = alloc_record (frgr_mem);
1970 cur->r.record.p.grmask = gr_mask;
1971 /* Retain only least significant bit. */
1972 prev->r.record.p.grmask ^= gr_mask;
1973 prev->r.record.p.next = cur;
1974 }
1975 }
1976
1977 static unw_rec_list *
1978 output_gr_gr (unsigned int mask, unsigned int reg)
1979 {
1980 unw_rec_list *ptr = alloc_record (gr_gr);
1981 unw_rec_list *cur = ptr;
1982
1983 ptr->r.record.p.grmask = mask;
1984 ptr->r.record.p.r.gr = reg;
1985 unwind.pending_saves = &ptr->r.record.p;
1986 for (;;)
1987 {
1988 unw_rec_list *prev = cur;
1989
1990 /* Clear least significant set bit. */
1991 mask &= ~(mask & (~mask + 1));
1992 if (!mask)
1993 return ptr;
1994 cur = alloc_record (gr_gr);
1995 cur->r.record.p.grmask = mask;
1996 /* Indicate this record shouldn't be output. */
1997 cur->r.record.p.r.gr = REG_NUM;
1998 /* Retain only least significant bit. */
1999 prev->r.record.p.grmask ^= mask;
2000 prev->r.record.p.next = cur;
2001 }
2002 }
2003
2004 static unw_rec_list *
2005 output_gr_mem (unsigned int mask)
2006 {
2007 unw_rec_list *ptr = alloc_record (gr_mem);
2008 unw_rec_list *cur = ptr;
2009
2010 ptr->r.record.p.grmask = mask;
2011 unwind.pending_saves = &ptr->r.record.p;
2012 for (;;)
2013 {
2014 unw_rec_list *prev = cur;
2015
2016 /* Clear least significant set bit. */
2017 mask &= ~(mask & (~mask + 1));
2018 if (!mask)
2019 return ptr;
2020 cur = alloc_record (gr_mem);
2021 cur->r.record.p.grmask = mask;
2022 /* Retain only least significant bit. */
2023 prev->r.record.p.grmask ^= mask;
2024 prev->r.record.p.next = cur;
2025 }
2026 }
2027
2028 static unw_rec_list *
2029 output_br_mem (unsigned int mask)
2030 {
2031 unw_rec_list *ptr = alloc_record (br_mem);
2032 unw_rec_list *cur = ptr;
2033
2034 ptr->r.record.p.brmask = mask;
2035 unwind.pending_saves = &ptr->r.record.p;
2036 for (;;)
2037 {
2038 unw_rec_list *prev = cur;
2039
2040 /* Clear least significant set bit. */
2041 mask &= ~(mask & (~mask + 1));
2042 if (!mask)
2043 return ptr;
2044 cur = alloc_record (br_mem);
2045 cur->r.record.p.brmask = mask;
2046 /* Retain only least significant bit. */
2047 prev->r.record.p.brmask ^= mask;
2048 prev->r.record.p.next = cur;
2049 }
2050 }
2051
2052 static unw_rec_list *
2053 output_br_gr (unsigned int mask, unsigned int reg)
2054 {
2055 unw_rec_list *ptr = alloc_record (br_gr);
2056 unw_rec_list *cur = ptr;
2057
2058 ptr->r.record.p.brmask = mask;
2059 ptr->r.record.p.r.gr = reg;
2060 unwind.pending_saves = &ptr->r.record.p;
2061 for (;;)
2062 {
2063 unw_rec_list *prev = cur;
2064
2065 /* Clear least significant set bit. */
2066 mask &= ~(mask & (~mask + 1));
2067 if (!mask)
2068 return ptr;
2069 cur = alloc_record (br_gr);
2070 cur->r.record.p.brmask = mask;
2071 /* Indicate this record shouldn't be output. */
2072 cur->r.record.p.r.gr = REG_NUM;
2073 /* Retain only least significant bit. */
2074 prev->r.record.p.brmask ^= mask;
2075 prev->r.record.p.next = cur;
2076 }
2077 }
2078
2079 static unw_rec_list *
2080 output_spill_base (unsigned int offset)
2081 {
2082 unw_rec_list *ptr = alloc_record (spill_base);
2083 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2084 return ptr;
2085 }
2086
2087 static unw_rec_list *
2088 output_unat_when (void)
2089 {
2090 unw_rec_list *ptr = alloc_record (unat_when);
2091 return ptr;
2092 }
2093
2094 static unw_rec_list *
2095 output_unat_gr (unsigned int gr)
2096 {
2097 unw_rec_list *ptr = alloc_record (unat_gr);
2098 ptr->r.record.p.r.gr = gr;
2099 return ptr;
2100 }
2101
2102 static unw_rec_list *
2103 output_unat_psprel (unsigned int offset)
2104 {
2105 unw_rec_list *ptr = alloc_record (unat_psprel);
2106 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2107 return ptr;
2108 }
2109
2110 static unw_rec_list *
2111 output_unat_sprel (unsigned int offset)
2112 {
2113 unw_rec_list *ptr = alloc_record (unat_sprel);
2114 ptr->r.record.p.off.sp = offset / 4;
2115 return ptr;
2116 }
2117
2118 static unw_rec_list *
2119 output_lc_when (void)
2120 {
2121 unw_rec_list *ptr = alloc_record (lc_when);
2122 return ptr;
2123 }
2124
2125 static unw_rec_list *
2126 output_lc_gr (unsigned int gr)
2127 {
2128 unw_rec_list *ptr = alloc_record (lc_gr);
2129 ptr->r.record.p.r.gr = gr;
2130 return ptr;
2131 }
2132
2133 static unw_rec_list *
2134 output_lc_psprel (unsigned int offset)
2135 {
2136 unw_rec_list *ptr = alloc_record (lc_psprel);
2137 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2138 return ptr;
2139 }
2140
2141 static unw_rec_list *
2142 output_lc_sprel (unsigned int offset)
2143 {
2144 unw_rec_list *ptr = alloc_record (lc_sprel);
2145 ptr->r.record.p.off.sp = offset / 4;
2146 return ptr;
2147 }
2148
2149 static unw_rec_list *
2150 output_fpsr_when (void)
2151 {
2152 unw_rec_list *ptr = alloc_record (fpsr_when);
2153 return ptr;
2154 }
2155
2156 static unw_rec_list *
2157 output_fpsr_gr (unsigned int gr)
2158 {
2159 unw_rec_list *ptr = alloc_record (fpsr_gr);
2160 ptr->r.record.p.r.gr = gr;
2161 return ptr;
2162 }
2163
2164 static unw_rec_list *
2165 output_fpsr_psprel (unsigned int offset)
2166 {
2167 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2168 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2169 return ptr;
2170 }
2171
2172 static unw_rec_list *
2173 output_fpsr_sprel (unsigned int offset)
2174 {
2175 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2176 ptr->r.record.p.off.sp = offset / 4;
2177 return ptr;
2178 }
2179
2180 static unw_rec_list *
2181 output_priunat_when_gr (void)
2182 {
2183 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2184 return ptr;
2185 }
2186
2187 static unw_rec_list *
2188 output_priunat_when_mem (void)
2189 {
2190 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2191 return ptr;
2192 }
2193
2194 static unw_rec_list *
2195 output_priunat_gr (unsigned int gr)
2196 {
2197 unw_rec_list *ptr = alloc_record (priunat_gr);
2198 ptr->r.record.p.r.gr = gr;
2199 return ptr;
2200 }
2201
2202 static unw_rec_list *
2203 output_priunat_psprel (unsigned int offset)
2204 {
2205 unw_rec_list *ptr = alloc_record (priunat_psprel);
2206 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2207 return ptr;
2208 }
2209
2210 static unw_rec_list *
2211 output_priunat_sprel (unsigned int offset)
2212 {
2213 unw_rec_list *ptr = alloc_record (priunat_sprel);
2214 ptr->r.record.p.off.sp = offset / 4;
2215 return ptr;
2216 }
2217
2218 static unw_rec_list *
2219 output_bsp_when (void)
2220 {
2221 unw_rec_list *ptr = alloc_record (bsp_when);
2222 return ptr;
2223 }
2224
2225 static unw_rec_list *
2226 output_bsp_gr (unsigned int gr)
2227 {
2228 unw_rec_list *ptr = alloc_record (bsp_gr);
2229 ptr->r.record.p.r.gr = gr;
2230 return ptr;
2231 }
2232
2233 static unw_rec_list *
2234 output_bsp_psprel (unsigned int offset)
2235 {
2236 unw_rec_list *ptr = alloc_record (bsp_psprel);
2237 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2238 return ptr;
2239 }
2240
2241 static unw_rec_list *
2242 output_bsp_sprel (unsigned int offset)
2243 {
2244 unw_rec_list *ptr = alloc_record (bsp_sprel);
2245 ptr->r.record.p.off.sp = offset / 4;
2246 return ptr;
2247 }
2248
2249 static unw_rec_list *
2250 output_bspstore_when (void)
2251 {
2252 unw_rec_list *ptr = alloc_record (bspstore_when);
2253 return ptr;
2254 }
2255
2256 static unw_rec_list *
2257 output_bspstore_gr (unsigned int gr)
2258 {
2259 unw_rec_list *ptr = alloc_record (bspstore_gr);
2260 ptr->r.record.p.r.gr = gr;
2261 return ptr;
2262 }
2263
2264 static unw_rec_list *
2265 output_bspstore_psprel (unsigned int offset)
2266 {
2267 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2268 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2269 return ptr;
2270 }
2271
2272 static unw_rec_list *
2273 output_bspstore_sprel (unsigned int offset)
2274 {
2275 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2276 ptr->r.record.p.off.sp = offset / 4;
2277 return ptr;
2278 }
2279
2280 static unw_rec_list *
2281 output_rnat_when (void)
2282 {
2283 unw_rec_list *ptr = alloc_record (rnat_when);
2284 return ptr;
2285 }
2286
2287 static unw_rec_list *
2288 output_rnat_gr (unsigned int gr)
2289 {
2290 unw_rec_list *ptr = alloc_record (rnat_gr);
2291 ptr->r.record.p.r.gr = gr;
2292 return ptr;
2293 }
2294
2295 static unw_rec_list *
2296 output_rnat_psprel (unsigned int offset)
2297 {
2298 unw_rec_list *ptr = alloc_record (rnat_psprel);
2299 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2300 return ptr;
2301 }
2302
2303 static unw_rec_list *
2304 output_rnat_sprel (unsigned int offset)
2305 {
2306 unw_rec_list *ptr = alloc_record (rnat_sprel);
2307 ptr->r.record.p.off.sp = offset / 4;
2308 return ptr;
2309 }
2310
2311 static unw_rec_list *
2312 output_unwabi (unsigned long abi, unsigned long context)
2313 {
2314 unw_rec_list *ptr = alloc_record (unwabi);
2315 ptr->r.record.p.abi = abi;
2316 ptr->r.record.p.context = context;
2317 return ptr;
2318 }
2319
2320 static unw_rec_list *
2321 output_epilogue (unsigned long ecount)
2322 {
2323 unw_rec_list *ptr = alloc_record (epilogue);
2324 ptr->r.record.b.ecount = ecount;
2325 return ptr;
2326 }
2327
2328 static unw_rec_list *
2329 output_label_state (unsigned long label)
2330 {
2331 unw_rec_list *ptr = alloc_record (label_state);
2332 ptr->r.record.b.label = label;
2333 return ptr;
2334 }
2335
2336 static unw_rec_list *
2337 output_copy_state (unsigned long label)
2338 {
2339 unw_rec_list *ptr = alloc_record (copy_state);
2340 ptr->r.record.b.label = label;
2341 return ptr;
2342 }
2343
2344 static unw_rec_list *
2345 output_spill_psprel (unsigned int ab,
2346 unsigned int reg,
2347 unsigned int offset,
2348 unsigned int predicate)
2349 {
2350 unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel);
2351 ptr->r.record.x.ab = ab;
2352 ptr->r.record.x.reg = reg;
2353 ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset);
2354 ptr->r.record.x.qp = predicate;
2355 return ptr;
2356 }
2357
2358 static unw_rec_list *
2359 output_spill_sprel (unsigned int ab,
2360 unsigned int reg,
2361 unsigned int offset,
2362 unsigned int predicate)
2363 {
2364 unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel);
2365 ptr->r.record.x.ab = ab;
2366 ptr->r.record.x.reg = reg;
2367 ptr->r.record.x.where.spoff = offset / 4;
2368 ptr->r.record.x.qp = predicate;
2369 return ptr;
2370 }
2371
2372 static unw_rec_list *
2373 output_spill_reg (unsigned int ab,
2374 unsigned int reg,
2375 unsigned int targ_reg,
2376 unsigned int xy,
2377 unsigned int predicate)
2378 {
2379 unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg);
2380 ptr->r.record.x.ab = ab;
2381 ptr->r.record.x.reg = reg;
2382 ptr->r.record.x.where.reg = targ_reg;
2383 ptr->r.record.x.xy = xy;
2384 ptr->r.record.x.qp = predicate;
2385 return ptr;
2386 }
2387
2388 /* Given a unw_rec_list process the correct format with the
2389 specified function. */
2390
2391 static void
2392 process_one_record (unw_rec_list *ptr, vbyte_func f)
2393 {
2394 unsigned int fr_mask, gr_mask;
2395
2396 switch (ptr->r.type)
2397 {
2398 /* This is a dummy record that takes up no space in the output. */
2399 case endp:
2400 break;
2401
2402 case gr_mem:
2403 case fr_mem:
2404 case br_mem:
2405 case frgr_mem:
2406 /* These are taken care of by prologue/prologue_gr. */
2407 break;
2408
2409 case prologue_gr:
2410 case prologue:
2411 if (ptr->r.type == prologue_gr)
2412 output_R2_format (f, ptr->r.record.r.grmask,
2413 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2414 else
2415 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2416
2417 /* Output descriptor(s) for union of register spills (if any). */
2418 gr_mask = ptr->r.record.r.mask.gr_mem;
2419 fr_mask = ptr->r.record.r.mask.fr_mem;
2420 if (fr_mask)
2421 {
2422 if ((fr_mask & ~0xfUL) == 0)
2423 output_P6_format (f, fr_mem, fr_mask);
2424 else
2425 {
2426 output_P5_format (f, gr_mask, fr_mask);
2427 gr_mask = 0;
2428 }
2429 }
2430 if (gr_mask)
2431 output_P6_format (f, gr_mem, gr_mask);
2432 if (ptr->r.record.r.mask.br_mem)
2433 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2434
2435 /* output imask descriptor if necessary: */
2436 if (ptr->r.record.r.mask.i)
2437 output_P4_format (f, ptr->r.record.r.mask.i,
2438 ptr->r.record.r.imask_size);
2439 break;
2440
2441 case body:
2442 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2443 break;
2444 case mem_stack_f:
2445 case mem_stack_v:
2446 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2447 ptr->r.record.p.size);
2448 break;
2449 case psp_gr:
2450 case rp_gr:
2451 case pfs_gr:
2452 case preds_gr:
2453 case unat_gr:
2454 case lc_gr:
2455 case fpsr_gr:
2456 case priunat_gr:
2457 case bsp_gr:
2458 case bspstore_gr:
2459 case rnat_gr:
2460 output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr);
2461 break;
2462 case rp_br:
2463 output_P3_format (f, rp_br, ptr->r.record.p.r.br);
2464 break;
2465 case psp_sprel:
2466 output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0);
2467 break;
2468 case rp_when:
2469 case pfs_when:
2470 case preds_when:
2471 case unat_when:
2472 case lc_when:
2473 case fpsr_when:
2474 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2475 break;
2476 case rp_psprel:
2477 case pfs_psprel:
2478 case preds_psprel:
2479 case unat_psprel:
2480 case lc_psprel:
2481 case fpsr_psprel:
2482 case spill_base:
2483 output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0);
2484 break;
2485 case rp_sprel:
2486 case pfs_sprel:
2487 case preds_sprel:
2488 case unat_sprel:
2489 case lc_sprel:
2490 case fpsr_sprel:
2491 case priunat_sprel:
2492 case bsp_sprel:
2493 case bspstore_sprel:
2494 case rnat_sprel:
2495 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp);
2496 break;
2497 case gr_gr:
2498 if (ptr->r.record.p.r.gr < REG_NUM)
2499 {
2500 const unw_rec_list *cur = ptr;
2501
2502 gr_mask = cur->r.record.p.grmask;
2503 while ((cur = cur->r.record.p.next) != NULL)
2504 gr_mask |= cur->r.record.p.grmask;
2505 output_P9_format (f, gr_mask, ptr->r.record.p.r.gr);
2506 }
2507 break;
2508 case br_gr:
2509 if (ptr->r.record.p.r.gr < REG_NUM)
2510 {
2511 const unw_rec_list *cur = ptr;
2512
2513 gr_mask = cur->r.record.p.brmask;
2514 while ((cur = cur->r.record.p.next) != NULL)
2515 gr_mask |= cur->r.record.p.brmask;
2516 output_P2_format (f, gr_mask, ptr->r.record.p.r.gr);
2517 }
2518 break;
2519 case spill_mask:
2520 as_bad (_("spill_mask record unimplemented."));
2521 break;
2522 case priunat_when_gr:
2523 case priunat_when_mem:
2524 case bsp_when:
2525 case bspstore_when:
2526 case rnat_when:
2527 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2528 break;
2529 case priunat_psprel:
2530 case bsp_psprel:
2531 case bspstore_psprel:
2532 case rnat_psprel:
2533 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp);
2534 break;
2535 case unwabi:
2536 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2537 break;
2538 case epilogue:
2539 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2540 break;
2541 case label_state:
2542 case copy_state:
2543 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2544 break;
2545 case spill_psprel:
2546 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2547 ptr->r.record.x.reg, ptr->r.record.x.t,
2548 ptr->r.record.x.where.pspoff);
2549 break;
2550 case spill_sprel:
2551 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2552 ptr->r.record.x.reg, ptr->r.record.x.t,
2553 ptr->r.record.x.where.spoff);
2554 break;
2555 case spill_reg:
2556 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2557 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2558 ptr->r.record.x.where.reg, ptr->r.record.x.t);
2559 break;
2560 case spill_psprel_p:
2561 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2562 ptr->r.record.x.ab, ptr->r.record.x.reg,
2563 ptr->r.record.x.t, ptr->r.record.x.where.pspoff);
2564 break;
2565 case spill_sprel_p:
2566 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2567 ptr->r.record.x.ab, ptr->r.record.x.reg,
2568 ptr->r.record.x.t, ptr->r.record.x.where.spoff);
2569 break;
2570 case spill_reg_p:
2571 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2572 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2573 ptr->r.record.x.xy, ptr->r.record.x.where.reg,
2574 ptr->r.record.x.t);
2575 break;
2576 default:
2577 as_bad (_("record_type_not_valid"));
2578 break;
2579 }
2580 }
2581
2582 /* Given a unw_rec_list list, process all the records with
2583 the specified function. */
2584 static void
2585 process_unw_records (unw_rec_list *list, vbyte_func f)
2586 {
2587 unw_rec_list *ptr;
2588 for (ptr = list; ptr; ptr = ptr->next)
2589 process_one_record (ptr, f);
2590 }
2591
2592 /* Determine the size of a record list in bytes. */
2593 static int
2594 calc_record_size (unw_rec_list *list)
2595 {
2596 vbyte_count = 0;
2597 process_unw_records (list, count_output);
2598 return vbyte_count;
2599 }
2600
2601 /* Return the number of bits set in the input value.
2602 Perhaps this has a better place... */
2603 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
2604 # define popcount __builtin_popcount
2605 #else
2606 static int
2607 popcount (unsigned x)
2608 {
2609 static const unsigned char popcnt[16] =
2610 {
2611 0, 1, 1, 2,
2612 1, 2, 2, 3,
2613 1, 2, 2, 3,
2614 2, 3, 3, 4
2615 };
2616
2617 if (x < NELEMS (popcnt))
2618 return popcnt[x];
2619 return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt));
2620 }
2621 #endif
2622
2623 /* Update IMASK bitmask to reflect the fact that one or more registers
2624 of type TYPE are saved starting at instruction with index T. If N
2625 bits are set in REGMASK, it is assumed that instructions T through
2626 T+N-1 save these registers.
2627
2628 TYPE values:
2629 0: no save
2630 1: instruction saves next fp reg
2631 2: instruction saves next general reg
2632 3: instruction saves next branch reg */
2633 static void
2634 set_imask (unw_rec_list *region,
2635 unsigned long regmask,
2636 unsigned long t,
2637 unsigned int type)
2638 {
2639 unsigned char *imask;
2640 unsigned long imask_size;
2641 unsigned int i;
2642 int pos;
2643
2644 imask = region->r.record.r.mask.i;
2645 imask_size = region->r.record.r.imask_size;
2646 if (!imask)
2647 {
2648 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2649 imask = XCNEWVEC (unsigned char, imask_size);
2650
2651 region->r.record.r.imask_size = imask_size;
2652 region->r.record.r.mask.i = imask;
2653 }
2654
2655 i = (t / 4) + 1;
2656 pos = 2 * (3 - t % 4);
2657 while (regmask)
2658 {
2659 if (i >= imask_size)
2660 {
2661 as_bad (_("Ignoring attempt to spill beyond end of region"));
2662 return;
2663 }
2664
2665 imask[i] |= (type & 0x3) << pos;
2666
2667 regmask &= (regmask - 1);
2668 pos -= 2;
2669 if (pos < 0)
2670 {
2671 pos = 0;
2672 ++i;
2673 }
2674 }
2675 }
2676
2677 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2678 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2679 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2680 for frag sizes. */
2681
2682 static unsigned long
2683 slot_index (unsigned long slot_addr,
2684 fragS *slot_frag,
2685 unsigned long first_addr,
2686 fragS *first_frag,
2687 int before_relax)
2688 {
2689 unsigned long s_index = 0;
2690
2691 /* First time we are called, the initial address and frag are invalid. */
2692 if (first_addr == 0)
2693 return 0;
2694
2695 /* If the two addresses are in different frags, then we need to add in
2696 the remaining size of this frag, and then the entire size of intermediate
2697 frags. */
2698 while (slot_frag != first_frag)
2699 {
2700 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2701
2702 if (! before_relax)
2703 {
2704 /* We can get the final addresses only during and after
2705 relaxation. */
2706 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2707 s_index += 3 * ((first_frag->fr_next->fr_address
2708 - first_frag->fr_address
2709 - first_frag->fr_fix) >> 4);
2710 }
2711 else
2712 /* We don't know what the final addresses will be. We try our
2713 best to estimate. */
2714 switch (first_frag->fr_type)
2715 {
2716 default:
2717 break;
2718
2719 case rs_space:
2720 as_fatal (_("Only constant space allocation is supported"));
2721 break;
2722
2723 case rs_align:
2724 case rs_align_code:
2725 case rs_align_test:
2726 /* Take alignment into account. Assume the worst case
2727 before relaxation. */
2728 s_index += 3 * ((1 << first_frag->fr_offset) >> 4);
2729 break;
2730
2731 case rs_org:
2732 if (first_frag->fr_symbol)
2733 {
2734 as_fatal (_("Only constant offsets are supported"));
2735 break;
2736 }
2737 /* Fall through. */
2738 case rs_fill:
2739 s_index += 3 * (first_frag->fr_offset >> 4);
2740 break;
2741 }
2742
2743 /* Add in the full size of the frag converted to instruction slots. */
2744 s_index += 3 * (first_frag->fr_fix >> 4);
2745 /* Subtract away the initial part before first_addr. */
2746 s_index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2747 + ((first_addr & 0x3) - (start_addr & 0x3)));
2748
2749 /* Move to the beginning of the next frag. */
2750 first_frag = first_frag->fr_next;
2751 first_addr = (unsigned long) &first_frag->fr_literal;
2752
2753 /* This can happen if there is section switching in the middle of a
2754 function, causing the frag chain for the function to be broken.
2755 It is too difficult to recover safely from this problem, so we just
2756 exit with an error. */
2757 if (first_frag == NULL)
2758 as_fatal (_("Section switching in code is not supported."));
2759 }
2760
2761 /* Add in the used part of the last frag. */
2762 s_index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2763 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2764 return s_index;
2765 }
2766
2767 /* Optimize unwind record directives. */
2768
2769 static unw_rec_list *
2770 optimize_unw_records (unw_rec_list *list)
2771 {
2772 if (!list)
2773 return NULL;
2774
2775 /* If the only unwind record is ".prologue" or ".prologue" followed
2776 by ".body", then we can optimize the unwind directives away. */
2777 if (list->r.type == prologue
2778 && (list->next->r.type == endp
2779 || (list->next->r.type == body && list->next->next->r.type == endp)))
2780 return NULL;
2781
2782 return list;
2783 }
2784
2785 /* Given a complete record list, process any records which have
2786 unresolved fields, (ie length counts for a prologue). After
2787 this has been run, all necessary information should be available
2788 within each record to generate an image. */
2789
2790 static void
2791 fixup_unw_records (unw_rec_list *list, int before_relax)
2792 {
2793 unw_rec_list *ptr, *region = 0;
2794 unsigned long first_addr = 0, rlen = 0, t;
2795 fragS *first_frag = 0;
2796
2797 for (ptr = list; ptr; ptr = ptr->next)
2798 {
2799 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2800 as_bad (_("Insn slot not set in unwind record."));
2801 t = slot_index (ptr->slot_number, ptr->slot_frag,
2802 first_addr, first_frag, before_relax);
2803 switch (ptr->r.type)
2804 {
2805 case prologue:
2806 case prologue_gr:
2807 case body:
2808 {
2809 unw_rec_list *last;
2810 int size;
2811 unsigned long last_addr = 0;
2812 fragS *last_frag = NULL;
2813
2814 first_addr = ptr->slot_number;
2815 first_frag = ptr->slot_frag;
2816 /* Find either the next body/prologue start, or the end of
2817 the function, and determine the size of the region. */
2818 for (last = ptr->next; last != NULL; last = last->next)
2819 if (last->r.type == prologue || last->r.type == prologue_gr
2820 || last->r.type == body || last->r.type == endp)
2821 {
2822 last_addr = last->slot_number;
2823 last_frag = last->slot_frag;
2824 break;
2825 }
2826 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2827 before_relax);
2828 rlen = ptr->r.record.r.rlen = size;
2829 if (ptr->r.type == body)
2830 /* End of region. */
2831 region = 0;
2832 else
2833 region = ptr;
2834 break;
2835 }
2836 case epilogue:
2837 if (t < rlen)
2838 ptr->r.record.b.t = rlen - 1 - t;
2839 else
2840 /* This happens when a memory-stack-less procedure uses a
2841 ".restore sp" directive at the end of a region to pop
2842 the frame state. */
2843 ptr->r.record.b.t = 0;
2844 break;
2845
2846 case mem_stack_f:
2847 case mem_stack_v:
2848 case rp_when:
2849 case pfs_when:
2850 case preds_when:
2851 case unat_when:
2852 case lc_when:
2853 case fpsr_when:
2854 case priunat_when_gr:
2855 case priunat_when_mem:
2856 case bsp_when:
2857 case bspstore_when:
2858 case rnat_when:
2859 ptr->r.record.p.t = t;
2860 break;
2861
2862 case spill_reg:
2863 case spill_sprel:
2864 case spill_psprel:
2865 case spill_reg_p:
2866 case spill_sprel_p:
2867 case spill_psprel_p:
2868 ptr->r.record.x.t = t;
2869 break;
2870
2871 case frgr_mem:
2872 if (!region)
2873 {
2874 as_bad (_("frgr_mem record before region record!"));
2875 return;
2876 }
2877 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2878 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2879 set_imask (region, ptr->r.record.p.frmask, t, 1);
2880 set_imask (region, ptr->r.record.p.grmask, t, 2);
2881 break;
2882 case fr_mem:
2883 if (!region)
2884 {
2885 as_bad (_("fr_mem record before region record!"));
2886 return;
2887 }
2888 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2889 set_imask (region, ptr->r.record.p.frmask, t, 1);
2890 break;
2891 case gr_mem:
2892 if (!region)
2893 {
2894 as_bad (_("gr_mem record before region record!"));
2895 return;
2896 }
2897 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2898 set_imask (region, ptr->r.record.p.grmask, t, 2);
2899 break;
2900 case br_mem:
2901 if (!region)
2902 {
2903 as_bad (_("br_mem record before region record!"));
2904 return;
2905 }
2906 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2907 set_imask (region, ptr->r.record.p.brmask, t, 3);
2908 break;
2909
2910 case gr_gr:
2911 if (!region)
2912 {
2913 as_bad (_("gr_gr record before region record!"));
2914 return;
2915 }
2916 set_imask (region, ptr->r.record.p.grmask, t, 2);
2917 break;
2918 case br_gr:
2919 if (!region)
2920 {
2921 as_bad (_("br_gr record before region record!"));
2922 return;
2923 }
2924 set_imask (region, ptr->r.record.p.brmask, t, 3);
2925 break;
2926
2927 default:
2928 break;
2929 }
2930 }
2931 }
2932
2933 /* Estimate the size of a frag before relaxing. We only have one type of frag
2934 to handle here, which is the unwind info frag. */
2935
2936 int
2937 ia64_estimate_size_before_relax (fragS *frag,
2938 asection *segtype ATTRIBUTE_UNUSED)
2939 {
2940 unw_rec_list *list;
2941 int len, size, pad;
2942
2943 /* ??? This code is identical to the first part of ia64_convert_frag. */
2944 list = (unw_rec_list *) frag->fr_opcode;
2945 fixup_unw_records (list, 0);
2946
2947 len = calc_record_size (list);
2948 /* pad to pointer-size boundary. */
2949 pad = len % md.pointer_size;
2950 if (pad != 0)
2951 len += md.pointer_size - pad;
2952 /* Add 8 for the header. */
2953 size = len + 8;
2954 /* Add a pointer for the personality offset. */
2955 if (frag->fr_offset)
2956 size += md.pointer_size;
2957
2958 /* fr_var carries the max_chars that we created the fragment with.
2959 We must, of course, have allocated enough memory earlier. */
2960 gas_assert (frag->fr_var >= size);
2961
2962 return frag->fr_fix + size;
2963 }
2964
2965 /* This function converts a rs_machine_dependent variant frag into a
2966 normal fill frag with the unwind image from the record list. */
2967 void
2968 ia64_convert_frag (fragS *frag)
2969 {
2970 unw_rec_list *list;
2971 int len, size, pad;
2972 valueT flag_value;
2973
2974 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2975 list = (unw_rec_list *) frag->fr_opcode;
2976 fixup_unw_records (list, 0);
2977
2978 len = calc_record_size (list);
2979 /* pad to pointer-size boundary. */
2980 pad = len % md.pointer_size;
2981 if (pad != 0)
2982 len += md.pointer_size - pad;
2983 /* Add 8 for the header. */
2984 size = len + 8;
2985 /* Add a pointer for the personality offset. */
2986 if (frag->fr_offset)
2987 size += md.pointer_size;
2988
2989 /* fr_var carries the max_chars that we created the fragment with.
2990 We must, of course, have allocated enough memory earlier. */
2991 gas_assert (frag->fr_var >= size);
2992
2993 /* Initialize the header area. fr_offset is initialized with
2994 unwind.personality_routine. */
2995 if (frag->fr_offset)
2996 {
2997 if (md.flags & EF_IA_64_ABI64)
2998 flag_value = (bfd_vma) 3 << 32;
2999 else
3000 /* 32-bit unwind info block. */
3001 flag_value = (bfd_vma) 0x1003 << 32;
3002 }
3003 else
3004 flag_value = 0;
3005
3006 md_number_to_chars (frag->fr_literal,
3007 (((bfd_vma) 1 << 48) /* Version. */
3008 | flag_value /* U & E handler flags. */
3009 | (len / md.pointer_size)), /* Length. */
3010 8);
3011
3012 /* Skip the header. */
3013 vbyte_mem_ptr = frag->fr_literal + 8;
3014 process_unw_records (list, output_vbyte_mem);
3015
3016 /* Fill the padding bytes with zeros. */
3017 if (pad != 0)
3018 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
3019 md.pointer_size - pad);
3020 /* Fill the unwind personality with zeros. */
3021 if (frag->fr_offset)
3022 md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0,
3023 md.pointer_size);
3024
3025 frag->fr_fix += size;
3026 frag->fr_type = rs_fill;
3027 frag->fr_var = 0;
3028 frag->fr_offset = 0;
3029 }
3030
3031 static int
3032 parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po)
3033 {
3034 int sep = parse_operand_and_eval (e, ',');
3035
3036 *qp = e->X_add_number - REG_P;
3037 if (e->X_op != O_register || *qp > 63)
3038 {
3039 as_bad (_("First operand to .%s must be a predicate"), po);
3040 *qp = 0;
3041 }
3042 else if (*qp == 0)
3043 as_warn (_("Pointless use of p0 as first operand to .%s"), po);
3044 if (sep == ',')
3045 sep = parse_operand_and_eval (e, ',');
3046 else
3047 e->X_op = O_absent;
3048 return sep;
3049 }
3050
3051 static void
3052 convert_expr_to_ab_reg (const expressionS *e,
3053 unsigned int *ab,
3054 unsigned int *regp,
3055 const char *po,
3056 int n)
3057 {
3058 unsigned int reg = e->X_add_number;
3059
3060 *ab = *regp = 0; /* Anything valid is good here. */
3061
3062 if (e->X_op != O_register)
3063 reg = REG_GR; /* Anything invalid is good here. */
3064
3065 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
3066 {
3067 *ab = 0;
3068 *regp = reg - REG_GR;
3069 }
3070 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
3071 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
3072 {
3073 *ab = 1;
3074 *regp = reg - REG_FR;
3075 }
3076 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
3077 {
3078 *ab = 2;
3079 *regp = reg - REG_BR;
3080 }
3081 else
3082 {
3083 *ab = 3;
3084 switch (reg)
3085 {
3086 case REG_PR: *regp = 0; break;
3087 case REG_PSP: *regp = 1; break;
3088 case REG_PRIUNAT: *regp = 2; break;
3089 case REG_BR + 0: *regp = 3; break;
3090 case REG_AR + AR_BSP: *regp = 4; break;
3091 case REG_AR + AR_BSPSTORE: *regp = 5; break;
3092 case REG_AR + AR_RNAT: *regp = 6; break;
3093 case REG_AR + AR_UNAT: *regp = 7; break;
3094 case REG_AR + AR_FPSR: *regp = 8; break;
3095 case REG_AR + AR_PFS: *regp = 9; break;
3096 case REG_AR + AR_LC: *regp = 10; break;
3097
3098 default:
3099 as_bad (_("Operand %d to .%s must be a preserved register"), n, po);
3100 break;
3101 }
3102 }
3103 }
3104
3105 static void
3106 convert_expr_to_xy_reg (const expressionS *e,
3107 unsigned int *xy,
3108 unsigned int *regp,
3109 const char *po,
3110 int n)
3111 {
3112 unsigned int reg = e->X_add_number;
3113
3114 *xy = *regp = 0; /* Anything valid is good here. */
3115
3116 if (e->X_op != O_register)
3117 reg = REG_GR; /* Anything invalid is good here. */
3118
3119 if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127))
3120 {
3121 *xy = 0;
3122 *regp = reg - REG_GR;
3123 }
3124 else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127))
3125 {
3126 *xy = 1;
3127 *regp = reg - REG_FR;
3128 }
3129 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3130 {
3131 *xy = 2;
3132 *regp = reg - REG_BR;
3133 }
3134 else
3135 as_bad (_("Operand %d to .%s must be a writable register"), n, po);
3136 }
3137
3138 static void
3139 dot_align (int arg)
3140 {
3141 /* The current frag is an alignment frag. */
3142 align_frag = frag_now;
3143 s_align_bytes (arg);
3144 }
3145
3146 static void
3147 dot_radix (int dummy ATTRIBUTE_UNUSED)
3148 {
3149 char *radix;
3150 int ch;
3151
3152 SKIP_WHITESPACE ();
3153
3154 if (is_it_end_of_statement ())
3155 return;
3156 ch = get_symbol_name (&radix);
3157 ia64_canonicalize_symbol_name (radix);
3158 if (strcasecmp (radix, "C"))
3159 as_bad (_("Radix `%s' unsupported or invalid"), radix);
3160 (void) restore_line_pointer (ch);
3161 demand_empty_rest_of_line ();
3162 }
3163
3164 /* Helper function for .loc directives. If the assembler is not generating
3165 line number info, then we need to remember which instructions have a .loc
3166 directive, and only call dwarf2_gen_line_info for those instructions. */
3167
3168 static void
3169 dot_loc (int x)
3170 {
3171 CURR_SLOT.loc_directive_seen = 1;
3172 dwarf2_directive_loc (x);
3173 }
3174
3175 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3176 static void
3177 dot_special_section (int which)
3178 {
3179 set_section ((char *) special_section_name[which]);
3180 }
3181
3182 /* Return -1 for warning and 0 for error. */
3183
3184 static int
3185 unwind_diagnostic (const char * region, const char *directive)
3186 {
3187 if (md.unwind_check == unwind_check_warning)
3188 {
3189 as_warn (_(".%s outside of %s"), directive, region);
3190 return -1;
3191 }
3192 else
3193 {
3194 as_bad (_(".%s outside of %s"), directive, region);
3195 ignore_rest_of_line ();
3196 return 0;
3197 }
3198 }
3199
3200 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in
3201 a procedure but the unwind directive check is set to warning, 0 if
3202 a directive isn't in a procedure and the unwind directive check is set
3203 to error. */
3204
3205 static int
3206 in_procedure (const char *directive)
3207 {
3208 if (unwind.proc_pending.sym
3209 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0))
3210 return 1;
3211 return unwind_diagnostic ("procedure", directive);
3212 }
3213
3214 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in
3215 a prologue but the unwind directive check is set to warning, 0 if
3216 a directive isn't in a prologue and the unwind directive check is set
3217 to error. */
3218
3219 static int
3220 in_prologue (const char *directive)
3221 {
3222 int in = in_procedure (directive);
3223
3224 if (in > 0 && !unwind.prologue)
3225 in = unwind_diagnostic ("prologue", directive);
3226 check_pending_save ();
3227 return in;
3228 }
3229
3230 /* Return 1 if a directive is in a body, -1 if a directive isn't in
3231 a body but the unwind directive check is set to warning, 0 if
3232 a directive isn't in a body and the unwind directive check is set
3233 to error. */
3234
3235 static int
3236 in_body (const char *directive)
3237 {
3238 int in = in_procedure (directive);
3239
3240 if (in > 0 && !unwind.body)
3241 in = unwind_diagnostic ("body region", directive);
3242 return in;
3243 }
3244
3245 static void
3246 add_unwind_entry (unw_rec_list *ptr, int sep)
3247 {
3248 if (ptr)
3249 {
3250 if (unwind.tail)
3251 unwind.tail->next = ptr;
3252 else
3253 unwind.list = ptr;
3254 unwind.tail = ptr;
3255
3256 /* The current entry can in fact be a chain of unwind entries. */
3257 if (unwind.current_entry == NULL)
3258 unwind.current_entry = ptr;
3259 }
3260
3261 /* The current entry can in fact be a chain of unwind entries. */
3262 if (unwind.current_entry == NULL)
3263 unwind.current_entry = ptr;
3264
3265 if (sep == ',')
3266 {
3267 char *name;
3268 /* Parse a tag permitted for the current directive. */
3269 int ch;
3270
3271 SKIP_WHITESPACE ();
3272 ch = get_symbol_name (&name);
3273 /* FIXME: For now, just issue a warning that this isn't implemented. */
3274 {
3275 static int warned;
3276
3277 if (!warned)
3278 {
3279 warned = 1;
3280 as_warn (_("Tags on unwind pseudo-ops aren't supported, yet"));
3281 }
3282 }
3283 (void) restore_line_pointer (ch);
3284 }
3285 if (sep != NOT_A_CHAR)
3286 demand_empty_rest_of_line ();
3287 }
3288
3289 static void
3290 dot_fframe (int dummy ATTRIBUTE_UNUSED)
3291 {
3292 expressionS e;
3293 int sep;
3294
3295 if (!in_prologue ("fframe"))
3296 return;
3297
3298 sep = parse_operand_and_eval (&e, ',');
3299
3300 if (e.X_op != O_constant)
3301 {
3302 as_bad (_("First operand to .fframe must be a constant"));
3303 e.X_add_number = 0;
3304 }
3305 add_unwind_entry (output_mem_stack_f (e.X_add_number), sep);
3306 }
3307
3308 static void
3309 dot_vframe (int dummy ATTRIBUTE_UNUSED)
3310 {
3311 expressionS e;
3312 unsigned reg;
3313 int sep;
3314
3315 if (!in_prologue ("vframe"))
3316 return;
3317
3318 sep = parse_operand_and_eval (&e, ',');
3319 reg = e.X_add_number - REG_GR;
3320 if (e.X_op != O_register || reg > 127)
3321 {
3322 as_bad (_("First operand to .vframe must be a general register"));
3323 reg = 0;
3324 }
3325 add_unwind_entry (output_mem_stack_v (), sep);
3326 if (! (unwind.prologue_mask & 2))
3327 add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR);
3328 else if (reg != unwind.prologue_gr
3329 + (unsigned) popcount (unwind.prologue_mask & -(2 << 1)))
3330 as_warn (_("Operand of .vframe contradicts .prologue"));
3331 }
3332
3333 static void
3334 dot_vframesp (int psp)
3335 {
3336 expressionS e;
3337 int sep;
3338
3339 if (psp)
3340 as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant"));
3341
3342 if (!in_prologue ("vframesp"))
3343 return;
3344
3345 sep = parse_operand_and_eval (&e, ',');
3346 if (e.X_op != O_constant)
3347 {
3348 as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)"));
3349 e.X_add_number = 0;
3350 }
3351 add_unwind_entry (output_mem_stack_v (), sep);
3352 add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR);
3353 }
3354
3355 static void
3356 dot_save (int dummy ATTRIBUTE_UNUSED)
3357 {
3358 expressionS e1, e2;
3359 unsigned reg1, reg2;
3360 int sep;
3361
3362 if (!in_prologue ("save"))
3363 return;
3364
3365 sep = parse_operand_and_eval (&e1, ',');
3366 if (sep == ',')
3367 sep = parse_operand_and_eval (&e2, ',');
3368 else
3369 e2.X_op = O_absent;
3370
3371 reg1 = e1.X_add_number;
3372 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3373 if (e1.X_op != O_register)
3374 {
3375 as_bad (_("First operand to .save not a register"));
3376 reg1 = REG_PR; /* Anything valid is good here. */
3377 }
3378 reg2 = e2.X_add_number - REG_GR;
3379 if (e2.X_op != O_register || reg2 > 127)
3380 {
3381 as_bad (_("Second operand to .save not a valid register"));
3382 reg2 = 0;
3383 }
3384 switch (reg1)
3385 {
3386 case REG_AR + AR_BSP:
3387 add_unwind_entry (output_bsp_when (), sep);
3388 add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR);
3389 break;
3390 case REG_AR + AR_BSPSTORE:
3391 add_unwind_entry (output_bspstore_when (), sep);
3392 add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR);
3393 break;
3394 case REG_AR + AR_RNAT:
3395 add_unwind_entry (output_rnat_when (), sep);
3396 add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR);
3397 break;
3398 case REG_AR + AR_UNAT:
3399 add_unwind_entry (output_unat_when (), sep);
3400 add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR);
3401 break;
3402 case REG_AR + AR_FPSR:
3403 add_unwind_entry (output_fpsr_when (), sep);
3404 add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR);
3405 break;
3406 case REG_AR + AR_PFS:
3407 add_unwind_entry (output_pfs_when (), sep);
3408 if (! (unwind.prologue_mask & 4))
3409 add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR);
3410 else if (reg2 != unwind.prologue_gr
3411 + (unsigned) popcount (unwind.prologue_mask & -(4 << 1)))
3412 as_warn (_("Second operand of .save contradicts .prologue"));
3413 break;
3414 case REG_AR + AR_LC:
3415 add_unwind_entry (output_lc_when (), sep);
3416 add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR);
3417 break;
3418 case REG_BR:
3419 add_unwind_entry (output_rp_when (), sep);
3420 if (! (unwind.prologue_mask & 8))
3421 add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR);
3422 else if (reg2 != unwind.prologue_gr)
3423 as_warn (_("Second operand of .save contradicts .prologue"));
3424 break;
3425 case REG_PR:
3426 add_unwind_entry (output_preds_when (), sep);
3427 if (! (unwind.prologue_mask & 1))
3428 add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR);
3429 else if (reg2 != unwind.prologue_gr
3430 + (unsigned) popcount (unwind.prologue_mask & -(1 << 1)))
3431 as_warn (_("Second operand of .save contradicts .prologue"));
3432 break;
3433 case REG_PRIUNAT:
3434 add_unwind_entry (output_priunat_when_gr (), sep);
3435 add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR);
3436 break;
3437 default:
3438 as_bad (_("First operand to .save not a valid register"));
3439 add_unwind_entry (NULL, sep);
3440 break;
3441 }
3442 }
3443
3444 static void
3445 dot_restore (int dummy ATTRIBUTE_UNUSED)
3446 {
3447 expressionS e1;
3448 unsigned long ecount; /* # of _additional_ regions to pop */
3449 int sep;
3450
3451 if (!in_body ("restore"))
3452 return;
3453
3454 sep = parse_operand_and_eval (&e1, ',');
3455 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3456 as_bad (_("First operand to .restore must be stack pointer (sp)"));
3457
3458 if (sep == ',')
3459 {
3460 expressionS e2;
3461
3462 sep = parse_operand_and_eval (&e2, ',');
3463 if (e2.X_op != O_constant || e2.X_add_number < 0)
3464 {
3465 as_bad (_("Second operand to .restore must be a constant >= 0"));
3466 e2.X_add_number = 0;
3467 }
3468 ecount = e2.X_add_number;
3469 }
3470 else
3471 ecount = unwind.prologue_count - 1;
3472
3473 if (ecount >= unwind.prologue_count)
3474 {
3475 as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"),
3476 ecount + 1, unwind.prologue_count);
3477 ecount = 0;
3478 }
3479
3480 add_unwind_entry (output_epilogue (ecount), sep);
3481
3482 if (ecount < unwind.prologue_count)
3483 unwind.prologue_count -= ecount + 1;
3484 else
3485 unwind.prologue_count = 0;
3486 }
3487
3488 static void
3489 dot_restorereg (int pred)
3490 {
3491 unsigned int qp, ab, reg;
3492 expressionS e;
3493 int sep;
3494 const char * const po = pred ? "restorereg.p" : "restorereg";
3495
3496 if (!in_procedure (po))
3497 return;
3498
3499 if (pred)
3500 sep = parse_predicate_and_operand (&e, &qp, po);
3501 else
3502 {
3503 sep = parse_operand_and_eval (&e, ',');
3504 qp = 0;
3505 }
3506 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
3507
3508 add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep);
3509 }
3510
3511 static const char *special_linkonce_name[] =
3512 {
3513 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3514 };
3515
3516 static void
3517 start_unwind_section (const segT text_seg, int sec_index)
3518 {
3519 /*
3520 Use a slightly ugly scheme to derive the unwind section names from
3521 the text section name:
3522
3523 text sect. unwind table sect.
3524 name: name: comments:
3525 ---------- ----------------- --------------------------------
3526 .text .IA_64.unwind
3527 .text.foo .IA_64.unwind.text.foo
3528 .foo .IA_64.unwind.foo
3529 .gnu.linkonce.t.foo
3530 .gnu.linkonce.ia64unw.foo
3531 _info .IA_64.unwind_info gas issues error message (ditto)
3532 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3533
3534 This mapping is done so that:
3535
3536 (a) An object file with unwind info only in .text will use
3537 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3538 This follows the letter of the ABI and also ensures backwards
3539 compatibility with older toolchains.
3540
3541 (b) An object file with unwind info in multiple text sections
3542 will use separate unwind sections for each text section.
3543 This allows us to properly set the "sh_info" and "sh_link"
3544 fields in SHT_IA_64_UNWIND as required by the ABI and also
3545 lets GNU ld support programs with multiple segments
3546 containing unwind info (as might be the case for certain
3547 embedded applications).
3548
3549 (c) An error is issued if there would be a name clash.
3550 */
3551
3552 const char *text_name, *sec_text_name;
3553 char *sec_name;
3554 const char *prefix = special_section_name [sec_index];
3555 const char *suffix;
3556
3557 sec_text_name = segment_name (text_seg);
3558 text_name = sec_text_name;
3559 if (startswith (text_name, "_info"))
3560 {
3561 as_bad (_("Illegal section name `%s' (causes unwind section name clash)"),
3562 text_name);
3563 ignore_rest_of_line ();
3564 return;
3565 }
3566 if (strcmp (text_name, ".text") == 0)
3567 text_name = "";
3568
3569 /* Build the unwind section name by appending the (possibly stripped)
3570 text section name to the unwind prefix. */
3571 suffix = text_name;
3572 if (startswith (text_name, ".gnu.linkonce.t."))
3573 {
3574 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3575 suffix += sizeof (".gnu.linkonce.t.") - 1;
3576 }
3577
3578 sec_name = concat (prefix, suffix, NULL);
3579
3580 /* Handle COMDAT group. */
3581 if ((text_seg->flags & SEC_LINK_ONCE) != 0
3582 && (elf_section_flags (text_seg) & SHF_GROUP) != 0)
3583 {
3584 char *section;
3585 const char *group_name = elf_group_name (text_seg);
3586
3587 if (group_name == NULL)
3588 {
3589 as_bad (_("Group section `%s' has no group signature"),
3590 sec_text_name);
3591 ignore_rest_of_line ();
3592 free (sec_name);
3593 return;
3594 }
3595
3596 /* We have to construct a fake section directive. */
3597 section = concat (sec_name, ",\"aG\",@progbits,", group_name, ",comdat", NULL);
3598 set_section (section);
3599 free (section);
3600 }
3601 else
3602 {
3603 set_section (sec_name);
3604 bfd_set_section_flags (now_seg, SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3605 }
3606
3607 elf_linked_to_section (now_seg) = text_seg;
3608 free (sec_name);
3609 }
3610
3611 static void
3612 generate_unwind_image (const segT text_seg)
3613 {
3614 int size, pad;
3615 unw_rec_list *list;
3616
3617 /* Mark the end of the unwind info, so that we can compute the size of the
3618 last unwind region. */
3619 add_unwind_entry (output_endp (), NOT_A_CHAR);
3620
3621 /* Force out pending instructions, to make sure all unwind records have
3622 a valid slot_number field. */
3623 ia64_flush_insns ();
3624
3625 /* Generate the unwind record. */
3626 list = optimize_unw_records (unwind.list);
3627 fixup_unw_records (list, 1);
3628 size = calc_record_size (list);
3629
3630 if (size > 0 || unwind.force_unwind_entry)
3631 {
3632 unwind.force_unwind_entry = 0;
3633 /* pad to pointer-size boundary. */
3634 pad = size % md.pointer_size;
3635 if (pad != 0)
3636 size += md.pointer_size - pad;
3637 /* Add 8 for the header. */
3638 size += 8;
3639 /* Add a pointer for the personality offset. */
3640 if (unwind.personality_routine)
3641 size += md.pointer_size;
3642 }
3643
3644 /* If there are unwind records, switch sections, and output the info. */
3645 if (size != 0)
3646 {
3647 expressionS exp;
3648 bfd_reloc_code_real_type reloc;
3649
3650 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO);
3651
3652 /* Make sure the section has 4 byte alignment for ILP32 and
3653 8 byte alignment for LP64. */
3654 frag_align (md.pointer_size_shift, 0, 0);
3655 record_alignment (now_seg, md.pointer_size_shift);
3656
3657 /* Set expression which points to start of unwind descriptor area. */
3658 unwind.info = expr_build_dot ();
3659
3660 frag_var (rs_machine_dependent, size, size, 0, 0,
3661 (offsetT) (long) unwind.personality_routine,
3662 (char *) list);
3663
3664 /* Add the personality address to the image. */
3665 if (unwind.personality_routine != 0)
3666 {
3667 exp.X_op = O_symbol;
3668 exp.X_add_symbol = unwind.personality_routine;
3669 exp.X_add_number = 0;
3670
3671 if (md.flags & EF_IA_64_BE)
3672 {
3673 if (md.flags & EF_IA_64_ABI64)
3674 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3675 else
3676 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3677 }
3678 else
3679 {
3680 if (md.flags & EF_IA_64_ABI64)
3681 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3682 else
3683 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3684 }
3685
3686 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3687 md.pointer_size, &exp, 0, reloc);
3688 unwind.personality_routine = 0;
3689 }
3690 }
3691
3692 free_saved_prologue_counts ();
3693 unwind.list = unwind.tail = unwind.current_entry = NULL;
3694 }
3695
3696 static void
3697 dot_handlerdata (int dummy ATTRIBUTE_UNUSED)
3698 {
3699 if (!in_procedure ("handlerdata"))
3700 return;
3701 unwind.force_unwind_entry = 1;
3702
3703 /* Remember which segment we're in so we can switch back after .endp */
3704 unwind.saved_text_seg = now_seg;
3705 unwind.saved_text_subseg = now_subseg;
3706
3707 /* Generate unwind info into unwind-info section and then leave that
3708 section as the currently active one so dataXX directives go into
3709 the language specific data area of the unwind info block. */
3710 generate_unwind_image (now_seg);
3711 demand_empty_rest_of_line ();
3712 }
3713
3714 static void
3715 dot_unwentry (int dummy ATTRIBUTE_UNUSED)
3716 {
3717 if (!in_procedure ("unwentry"))
3718 return;
3719 unwind.force_unwind_entry = 1;
3720 demand_empty_rest_of_line ();
3721 }
3722
3723 static void
3724 dot_altrp (int dummy ATTRIBUTE_UNUSED)
3725 {
3726 expressionS e;
3727 unsigned reg;
3728
3729 if (!in_prologue ("altrp"))
3730 return;
3731
3732 parse_operand_and_eval (&e, 0);
3733 reg = e.X_add_number - REG_BR;
3734 if (e.X_op != O_register || reg > 7)
3735 {
3736 as_bad (_("First operand to .altrp not a valid branch register"));
3737 reg = 0;
3738 }
3739 add_unwind_entry (output_rp_br (reg), 0);
3740 }
3741
3742 static void
3743 dot_savemem (int psprel)
3744 {
3745 expressionS e1, e2;
3746 int sep;
3747 int reg1, val;
3748 const char * const po = psprel ? "savepsp" : "savesp";
3749
3750 if (!in_prologue (po))
3751 return;
3752
3753 sep = parse_operand_and_eval (&e1, ',');
3754 if (sep == ',')
3755 sep = parse_operand_and_eval (&e2, ',');
3756 else
3757 e2.X_op = O_absent;
3758
3759 reg1 = e1.X_add_number;
3760 val = e2.X_add_number;
3761
3762 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3763 if (e1.X_op != O_register)
3764 {
3765 as_bad (_("First operand to .%s not a register"), po);
3766 reg1 = REG_PR; /* Anything valid is good here. */
3767 }
3768 if (e2.X_op != O_constant)
3769 {
3770 as_bad (_("Second operand to .%s not a constant"), po);
3771 val = 0;
3772 }
3773
3774 switch (reg1)
3775 {
3776 case REG_AR + AR_BSP:
3777 add_unwind_entry (output_bsp_when (), sep);
3778 add_unwind_entry ((psprel
3779 ? output_bsp_psprel
3780 : output_bsp_sprel) (val), NOT_A_CHAR);
3781 break;
3782 case REG_AR + AR_BSPSTORE:
3783 add_unwind_entry (output_bspstore_when (), sep);
3784 add_unwind_entry ((psprel
3785 ? output_bspstore_psprel
3786 : output_bspstore_sprel) (val), NOT_A_CHAR);
3787 break;
3788 case REG_AR + AR_RNAT:
3789 add_unwind_entry (output_rnat_when (), sep);
3790 add_unwind_entry ((psprel
3791 ? output_rnat_psprel
3792 : output_rnat_sprel) (val), NOT_A_CHAR);
3793 break;
3794 case REG_AR + AR_UNAT:
3795 add_unwind_entry (output_unat_when (), sep);
3796 add_unwind_entry ((psprel
3797 ? output_unat_psprel
3798 : output_unat_sprel) (val), NOT_A_CHAR);
3799 break;
3800 case REG_AR + AR_FPSR:
3801 add_unwind_entry (output_fpsr_when (), sep);
3802 add_unwind_entry ((psprel
3803 ? output_fpsr_psprel
3804 : output_fpsr_sprel) (val), NOT_A_CHAR);
3805 break;
3806 case REG_AR + AR_PFS:
3807 add_unwind_entry (output_pfs_when (), sep);
3808 add_unwind_entry ((psprel
3809 ? output_pfs_psprel
3810 : output_pfs_sprel) (val), NOT_A_CHAR);
3811 break;
3812 case REG_AR + AR_LC:
3813 add_unwind_entry (output_lc_when (), sep);
3814 add_unwind_entry ((psprel
3815 ? output_lc_psprel
3816 : output_lc_sprel) (val), NOT_A_CHAR);
3817 break;
3818 case REG_BR:
3819 add_unwind_entry (output_rp_when (), sep);
3820 add_unwind_entry ((psprel
3821 ? output_rp_psprel
3822 : output_rp_sprel) (val), NOT_A_CHAR);
3823 break;
3824 case REG_PR:
3825 add_unwind_entry (output_preds_when (), sep);
3826 add_unwind_entry ((psprel
3827 ? output_preds_psprel
3828 : output_preds_sprel) (val), NOT_A_CHAR);
3829 break;
3830 case REG_PRIUNAT:
3831 add_unwind_entry (output_priunat_when_mem (), sep);
3832 add_unwind_entry ((psprel
3833 ? output_priunat_psprel
3834 : output_priunat_sprel) (val), NOT_A_CHAR);
3835 break;
3836 default:
3837 as_bad (_("First operand to .%s not a valid register"), po);
3838 add_unwind_entry (NULL, sep);
3839 break;
3840 }
3841 }
3842
3843 static void
3844 dot_saveg (int dummy ATTRIBUTE_UNUSED)
3845 {
3846 expressionS e;
3847 unsigned grmask;
3848 int sep;
3849
3850 if (!in_prologue ("save.g"))
3851 return;
3852
3853 sep = parse_operand_and_eval (&e, ',');
3854
3855 grmask = e.X_add_number;
3856 if (e.X_op != O_constant
3857 || e.X_add_number <= 0
3858 || e.X_add_number > 0xf)
3859 {
3860 as_bad (_("First operand to .save.g must be a positive 4-bit constant"));
3861 grmask = 0;
3862 }
3863
3864 if (sep == ',')
3865 {
3866 unsigned reg;
3867 int n = popcount (grmask);
3868
3869 parse_operand_and_eval (&e, 0);
3870 reg = e.X_add_number - REG_GR;
3871 if (e.X_op != O_register || reg > 127)
3872 {
3873 as_bad (_("Second operand to .save.g must be a general register"));
3874 reg = 0;
3875 }
3876 else if (reg > 128U - n)
3877 {
3878 as_bad (_("Second operand to .save.g must be the first of %d general registers"), n);
3879 reg = 0;
3880 }
3881 add_unwind_entry (output_gr_gr (grmask, reg), 0);
3882 }
3883 else
3884 add_unwind_entry (output_gr_mem (grmask), 0);
3885 }
3886
3887 static void
3888 dot_savef (int dummy ATTRIBUTE_UNUSED)
3889 {
3890 expressionS e;
3891
3892 if (!in_prologue ("save.f"))
3893 return;
3894
3895 parse_operand_and_eval (&e, 0);
3896
3897 if (e.X_op != O_constant
3898 || e.X_add_number <= 0
3899 || e.X_add_number > 0xfffff)
3900 {
3901 as_bad (_("Operand to .save.f must be a positive 20-bit constant"));
3902 e.X_add_number = 0;
3903 }
3904 add_unwind_entry (output_fr_mem (e.X_add_number), 0);
3905 }
3906
3907 static void
3908 dot_saveb (int dummy ATTRIBUTE_UNUSED)
3909 {
3910 expressionS e;
3911 unsigned brmask;
3912 int sep;
3913
3914 if (!in_prologue ("save.b"))
3915 return;
3916
3917 sep = parse_operand_and_eval (&e, ',');
3918
3919 brmask = e.X_add_number;
3920 if (e.X_op != O_constant
3921 || e.X_add_number <= 0
3922 || e.X_add_number > 0x1f)
3923 {
3924 as_bad (_("First operand to .save.b must be a positive 5-bit constant"));
3925 brmask = 0;
3926 }
3927
3928 if (sep == ',')
3929 {
3930 unsigned reg;
3931 int n = popcount (brmask);
3932
3933 parse_operand_and_eval (&e, 0);
3934 reg = e.X_add_number - REG_GR;
3935 if (e.X_op != O_register || reg > 127)
3936 {
3937 as_bad (_("Second operand to .save.b must be a general register"));
3938 reg = 0;
3939 }
3940 else if (reg > 128U - n)
3941 {
3942 as_bad (_("Second operand to .save.b must be the first of %d general registers"), n);
3943 reg = 0;
3944 }
3945 add_unwind_entry (output_br_gr (brmask, reg), 0);
3946 }
3947 else
3948 add_unwind_entry (output_br_mem (brmask), 0);
3949 }
3950
3951 static void
3952 dot_savegf (int dummy ATTRIBUTE_UNUSED)
3953 {
3954 expressionS e1, e2;
3955
3956 if (!in_prologue ("save.gf"))
3957 return;
3958
3959 if (parse_operand_and_eval (&e1, ',') == ',')
3960 parse_operand_and_eval (&e2, 0);
3961 else
3962 e2.X_op = O_absent;
3963
3964 if (e1.X_op != O_constant
3965 || e1.X_add_number < 0
3966 || e1.X_add_number > 0xf)
3967 {
3968 as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant"));
3969 e1.X_op = O_absent;
3970 e1.X_add_number = 0;
3971 }
3972 if (e2.X_op != O_constant
3973 || e2.X_add_number < 0
3974 || e2.X_add_number > 0xfffff)
3975 {
3976 as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant"));
3977 e2.X_op = O_absent;
3978 e2.X_add_number = 0;
3979 }
3980 if (e1.X_op == O_constant
3981 && e2.X_op == O_constant
3982 && e1.X_add_number == 0
3983 && e2.X_add_number == 0)
3984 as_bad (_("Operands to .save.gf may not be both zero"));
3985
3986 add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0);
3987 }
3988
3989 static void
3990 dot_spill (int dummy ATTRIBUTE_UNUSED)
3991 {
3992 expressionS e;
3993
3994 if (!in_prologue ("spill"))
3995 return;
3996
3997 parse_operand_and_eval (&e, 0);
3998
3999 if (e.X_op != O_constant)
4000 {
4001 as_bad (_("Operand to .spill must be a constant"));
4002 e.X_add_number = 0;
4003 }
4004 add_unwind_entry (output_spill_base (e.X_add_number), 0);
4005 }
4006
4007 static void
4008 dot_spillreg (int pred)
4009 {
4010 int sep;
4011 unsigned int qp, ab, xy, reg, treg;
4012 expressionS e;
4013 const char * const po = pred ? "spillreg.p" : "spillreg";
4014
4015 if (!in_procedure (po))
4016 return;
4017
4018 if (pred)
4019 sep = parse_predicate_and_operand (&e, &qp, po);
4020 else
4021 {
4022 sep = parse_operand_and_eval (&e, ',');
4023 qp = 0;
4024 }
4025 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4026
4027 if (sep == ',')
4028 sep = parse_operand_and_eval (&e, ',');
4029 else
4030 e.X_op = O_absent;
4031 convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred);
4032
4033 add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep);
4034 }
4035
4036 static void
4037 dot_spillmem (int psprel)
4038 {
4039 expressionS e;
4040 int pred = (psprel < 0), sep;
4041 unsigned int qp, ab, reg;
4042 const char * po;
4043
4044 if (pred)
4045 {
4046 psprel = ~psprel;
4047 po = psprel ? "spillpsp.p" : "spillsp.p";
4048 }
4049 else
4050 po = psprel ? "spillpsp" : "spillsp";
4051
4052 if (!in_procedure (po))
4053 return;
4054
4055 if (pred)
4056 sep = parse_predicate_and_operand (&e, &qp, po);
4057 else
4058 {
4059 sep = parse_operand_and_eval (&e, ',');
4060 qp = 0;
4061 }
4062 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4063
4064 if (sep == ',')
4065 sep = parse_operand_and_eval (&e, ',');
4066 else
4067 e.X_op = O_absent;
4068 if (e.X_op != O_constant)
4069 {
4070 as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po);
4071 e.X_add_number = 0;
4072 }
4073
4074 if (psprel)
4075 add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep);
4076 else
4077 add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep);
4078 }
4079
4080 static unsigned int
4081 get_saved_prologue_count (unsigned long lbl)
4082 {
4083 label_prologue_count *lpc = unwind.saved_prologue_counts;
4084
4085 while (lpc != NULL && lpc->label_number != lbl)
4086 lpc = lpc->next;
4087
4088 if (lpc != NULL)
4089 return lpc->prologue_count;
4090
4091 as_bad (_("Missing .label_state %ld"), lbl);
4092 return 1;
4093 }
4094
4095 static void
4096 save_prologue_count (unsigned long lbl, unsigned int count)
4097 {
4098 label_prologue_count *lpc = unwind.saved_prologue_counts;
4099
4100 while (lpc != NULL && lpc->label_number != lbl)
4101 lpc = lpc->next;
4102
4103 if (lpc != NULL)
4104 lpc->prologue_count = count;
4105 else
4106 {
4107 label_prologue_count *new_lpc = XNEW (label_prologue_count);
4108
4109 new_lpc->next = unwind.saved_prologue_counts;
4110 new_lpc->label_number = lbl;
4111 new_lpc->prologue_count = count;
4112 unwind.saved_prologue_counts = new_lpc;
4113 }
4114 }
4115
4116 static void
4117 free_saved_prologue_counts (void)
4118 {
4119 label_prologue_count *lpc = unwind.saved_prologue_counts;
4120 label_prologue_count *next;
4121
4122 while (lpc != NULL)
4123 {
4124 next = lpc->next;
4125 free (lpc);
4126 lpc = next;
4127 }
4128
4129 unwind.saved_prologue_counts = NULL;
4130 }
4131
4132 static void
4133 dot_label_state (int dummy ATTRIBUTE_UNUSED)
4134 {
4135 expressionS e;
4136
4137 if (!in_body ("label_state"))
4138 return;
4139
4140 parse_operand_and_eval (&e, 0);
4141 if (e.X_op == O_constant)
4142 save_prologue_count (e.X_add_number, unwind.prologue_count);
4143 else
4144 {
4145 as_bad (_("Operand to .label_state must be a constant"));
4146 e.X_add_number = 0;
4147 }
4148 add_unwind_entry (output_label_state (e.X_add_number), 0);
4149 }
4150
4151 static void
4152 dot_copy_state (int dummy ATTRIBUTE_UNUSED)
4153 {
4154 expressionS e;
4155
4156 if (!in_body ("copy_state"))
4157 return;
4158
4159 parse_operand_and_eval (&e, 0);
4160 if (e.X_op == O_constant)
4161 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4162 else
4163 {
4164 as_bad (_("Operand to .copy_state must be a constant"));
4165 e.X_add_number = 0;
4166 }
4167 add_unwind_entry (output_copy_state (e.X_add_number), 0);
4168 }
4169
4170 static void
4171 dot_unwabi (int dummy ATTRIBUTE_UNUSED)
4172 {
4173 expressionS e1, e2;
4174 unsigned char sep;
4175
4176 if (!in_prologue ("unwabi"))
4177 return;
4178
4179 sep = parse_operand_and_eval (&e1, ',');
4180 if (sep == ',')
4181 parse_operand_and_eval (&e2, 0);
4182 else
4183 e2.X_op = O_absent;
4184
4185 if (e1.X_op != O_constant)
4186 {
4187 as_bad (_("First operand to .unwabi must be a constant"));
4188 e1.X_add_number = 0;
4189 }
4190
4191 if (e2.X_op != O_constant)
4192 {
4193 as_bad (_("Second operand to .unwabi must be a constant"));
4194 e2.X_add_number = 0;
4195 }
4196
4197 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0);
4198 }
4199
4200 static void
4201 dot_personality (int dummy ATTRIBUTE_UNUSED)
4202 {
4203 char *name, *p, c;
4204
4205 if (!in_procedure ("personality"))
4206 return;
4207 SKIP_WHITESPACE ();
4208 c = get_symbol_name (&name);
4209 p = input_line_pointer;
4210 unwind.personality_routine = symbol_find_or_make (name);
4211 unwind.force_unwind_entry = 1;
4212 *p = c;
4213 SKIP_WHITESPACE_AFTER_NAME ();
4214 demand_empty_rest_of_line ();
4215 }
4216
4217 static void
4218 dot_proc (int dummy ATTRIBUTE_UNUSED)
4219 {
4220 char *name, *p, c;
4221 symbolS *sym;
4222 proc_pending *pending, *last_pending;
4223
4224 if (unwind.proc_pending.sym)
4225 {
4226 (md.unwind_check == unwind_check_warning
4227 ? as_warn
4228 : as_bad) (_("Missing .endp after previous .proc"));
4229 while (unwind.proc_pending.next)
4230 {
4231 pending = unwind.proc_pending.next;
4232 unwind.proc_pending.next = pending->next;
4233 free (pending);
4234 }
4235 }
4236 last_pending = NULL;
4237
4238 /* Parse names of main and alternate entry points and mark them as
4239 function symbols: */
4240 while (1)
4241 {
4242 SKIP_WHITESPACE ();
4243 c = get_symbol_name (&name);
4244 p = input_line_pointer;
4245 if (!*name)
4246 as_bad (_("Empty argument of .proc"));
4247 else
4248 {
4249 sym = symbol_find_or_make (name);
4250 if (S_IS_DEFINED (sym))
4251 as_bad (_("`%s' was already defined"), name);
4252 else if (!last_pending)
4253 {
4254 unwind.proc_pending.sym = sym;
4255 last_pending = &unwind.proc_pending;
4256 }
4257 else
4258 {
4259 pending = XNEW (proc_pending);
4260 pending->sym = sym;
4261 last_pending = last_pending->next = pending;
4262 }
4263 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4264 }
4265 *p = c;
4266 SKIP_WHITESPACE_AFTER_NAME ();
4267 if (*input_line_pointer != ',')
4268 break;
4269 ++input_line_pointer;
4270 }
4271 if (!last_pending)
4272 {
4273 unwind.proc_pending.sym = expr_build_dot ();
4274 last_pending = &unwind.proc_pending;
4275 }
4276 last_pending->next = NULL;
4277 demand_empty_rest_of_line ();
4278 do_align (4, NULL, 0, 0);
4279
4280 unwind.prologue = 0;
4281 unwind.prologue_count = 0;
4282 unwind.body = 0;
4283 unwind.insn = 0;
4284 unwind.list = unwind.tail = unwind.current_entry = NULL;
4285 unwind.personality_routine = 0;
4286 }
4287
4288 static void
4289 dot_body (int dummy ATTRIBUTE_UNUSED)
4290 {
4291 if (!in_procedure ("body"))
4292 return;
4293 if (!unwind.prologue && !unwind.body && unwind.insn)
4294 as_warn (_("Initial .body should precede any instructions"));
4295 check_pending_save ();
4296
4297 unwind.prologue = 0;
4298 unwind.prologue_mask = 0;
4299 unwind.body = 1;
4300
4301 add_unwind_entry (output_body (), 0);
4302 }
4303
4304 static void
4305 dot_prologue (int dummy ATTRIBUTE_UNUSED)
4306 {
4307 unsigned mask = 0, grsave = 0;
4308
4309 if (!in_procedure ("prologue"))
4310 return;
4311 if (unwind.prologue)
4312 {
4313 as_bad (_(".prologue within prologue"));
4314 ignore_rest_of_line ();
4315 return;
4316 }
4317 if (!unwind.body && unwind.insn)
4318 as_warn (_("Initial .prologue should precede any instructions"));
4319
4320 if (!is_it_end_of_statement ())
4321 {
4322 expressionS e;
4323 int n, sep = parse_operand_and_eval (&e, ',');
4324
4325 if (e.X_op != O_constant
4326 || e.X_add_number < 0
4327 || e.X_add_number > 0xf)
4328 as_bad (_("First operand to .prologue must be a positive 4-bit constant"));
4329 else if (e.X_add_number == 0)
4330 as_warn (_("Pointless use of zero first operand to .prologue"));
4331 else
4332 mask = e.X_add_number;
4333
4334 n = popcount (mask);
4335
4336 if (sep == ',')
4337 parse_operand_and_eval (&e, 0);
4338 else
4339 e.X_op = O_absent;
4340
4341 if (e.X_op == O_constant
4342 && e.X_add_number >= 0
4343 && e.X_add_number < 128)
4344 {
4345 if (md.unwind_check == unwind_check_error)
4346 as_warn (_("Using a constant as second operand to .prologue is deprecated"));
4347 grsave = e.X_add_number;
4348 }
4349 else if (e.X_op != O_register
4350 || (grsave = e.X_add_number - REG_GR) > 127)
4351 {
4352 as_bad (_("Second operand to .prologue must be a general register"));
4353 grsave = 0;
4354 }
4355 else if (grsave > 128U - n)
4356 {
4357 as_bad (_("Second operand to .prologue must be the first of %d general registers"), n);
4358 grsave = 0;
4359 }
4360 }
4361
4362 if (mask)
4363 add_unwind_entry (output_prologue_gr (mask, grsave), 0);
4364 else
4365 add_unwind_entry (output_prologue (), 0);
4366
4367 unwind.prologue = 1;
4368 unwind.prologue_mask = mask;
4369 unwind.prologue_gr = grsave;
4370 unwind.body = 0;
4371 ++unwind.prologue_count;
4372 }
4373
4374 static void
4375 dot_endp (int dummy ATTRIBUTE_UNUSED)
4376 {
4377 expressionS e;
4378 int bytes_per_address;
4379 long where;
4380 segT saved_seg;
4381 subsegT saved_subseg;
4382 proc_pending *pending;
4383 int unwind_check = md.unwind_check;
4384
4385 md.unwind_check = unwind_check_error;
4386 if (!in_procedure ("endp"))
4387 return;
4388 md.unwind_check = unwind_check;
4389
4390 if (unwind.saved_text_seg)
4391 {
4392 saved_seg = unwind.saved_text_seg;
4393 saved_subseg = unwind.saved_text_subseg;
4394 unwind.saved_text_seg = NULL;
4395 }
4396 else
4397 {
4398 saved_seg = now_seg;
4399 saved_subseg = now_subseg;
4400 }
4401
4402 insn_group_break (1, 0, 0);
4403
4404 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4405 if (!unwind.info)
4406 generate_unwind_image (saved_seg);
4407
4408 if (unwind.info || unwind.force_unwind_entry)
4409 {
4410 symbolS *proc_end;
4411
4412 subseg_set (md.last_text_seg, md.last_text_subseg);
4413 proc_end = expr_build_dot ();
4414
4415 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND);
4416
4417 /* Make sure that section has 4 byte alignment for ILP32 and
4418 8 byte alignment for LP64. */
4419 record_alignment (now_seg, md.pointer_size_shift);
4420
4421 /* Need space for 3 pointers for procedure start, procedure end,
4422 and unwind info. */
4423 memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size);
4424 where = frag_now_fix () - (3 * md.pointer_size);
4425 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4426
4427 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4428 e.X_op = O_pseudo_fixup;
4429 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4430 e.X_add_number = 0;
4431 if (!S_IS_LOCAL (unwind.proc_pending.sym)
4432 && S_IS_DEFINED (unwind.proc_pending.sym))
4433 e.X_add_symbol
4434 = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym),
4435 symbol_get_frag (unwind.proc_pending.sym),
4436 S_GET_VALUE (unwind.proc_pending.sym));
4437 else
4438 e.X_add_symbol = unwind.proc_pending.sym;
4439 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e,
4440 BFD_RELOC_NONE);
4441
4442 e.X_op = O_pseudo_fixup;
4443 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4444 e.X_add_number = 0;
4445 e.X_add_symbol = proc_end;
4446 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4447 bytes_per_address, &e, BFD_RELOC_NONE);
4448
4449 if (unwind.info)
4450 {
4451 e.X_op = O_pseudo_fixup;
4452 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4453 e.X_add_number = 0;
4454 e.X_add_symbol = unwind.info;
4455 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4456 bytes_per_address, &e, BFD_RELOC_NONE);
4457 }
4458 }
4459 subseg_set (saved_seg, saved_subseg);
4460
4461 /* Set symbol sizes. */
4462 pending = &unwind.proc_pending;
4463 if (S_GET_NAME (pending->sym))
4464 {
4465 do
4466 {
4467 symbolS *sym = pending->sym;
4468
4469 if (!S_IS_DEFINED (sym))
4470 as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym));
4471 else if (S_GET_SIZE (sym) == 0
4472 && symbol_get_obj (sym)->size == NULL)
4473 {
4474 fragS *frag = symbol_get_frag (sym);
4475
4476 if (frag)
4477 {
4478 if (frag == frag_now && SEG_NORMAL (now_seg))
4479 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4480 else
4481 {
4482 symbol_get_obj (sym)->size = XNEW (expressionS);
4483 symbol_get_obj (sym)->size->X_op = O_subtract;
4484 symbol_get_obj (sym)->size->X_add_symbol
4485 = symbol_new (FAKE_LABEL_NAME, now_seg,
4486 frag_now, frag_now_fix ());
4487 symbol_get_obj (sym)->size->X_op_symbol = sym;
4488 symbol_get_obj (sym)->size->X_add_number = 0;
4489 }
4490 }
4491 }
4492 } while ((pending = pending->next) != NULL);
4493 }
4494
4495 /* Parse names of main and alternate entry points. */
4496 while (1)
4497 {
4498 char *name, *p, c;
4499
4500 SKIP_WHITESPACE ();
4501 c = get_symbol_name (&name);
4502 p = input_line_pointer;
4503 if (!*name)
4504 (md.unwind_check == unwind_check_warning
4505 ? as_warn
4506 : as_bad) (_("Empty argument of .endp"));
4507 else
4508 {
4509 symbolS *sym = symbol_find (name);
4510
4511 for (pending = &unwind.proc_pending; pending; pending = pending->next)
4512 {
4513 if (sym == pending->sym)
4514 {
4515 pending->sym = NULL;
4516 break;
4517 }
4518 }
4519 if (!sym || !pending)
4520 as_warn (_("`%s' was not specified with previous .proc"), name);
4521 }
4522 *p = c;
4523 SKIP_WHITESPACE_AFTER_NAME ();
4524 if (*input_line_pointer != ',')
4525 break;
4526 ++input_line_pointer;
4527 }
4528 demand_empty_rest_of_line ();
4529
4530 /* Deliberately only checking for the main entry point here; the
4531 language spec even says all arguments to .endp are ignored. */
4532 if (unwind.proc_pending.sym
4533 && S_GET_NAME (unwind.proc_pending.sym)
4534 && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME))
4535 as_warn (_("`%s' should be an operand to this .endp"),
4536 S_GET_NAME (unwind.proc_pending.sym));
4537 while (unwind.proc_pending.next)
4538 {
4539 pending = unwind.proc_pending.next;
4540 unwind.proc_pending.next = pending->next;
4541 free (pending);
4542 }
4543 unwind.proc_pending.sym = unwind.info = NULL;
4544 }
4545
4546 static void
4547 dot_template (int template_val)
4548 {
4549 CURR_SLOT.user_template = template_val;
4550 }
4551
4552 static void
4553 dot_regstk (int dummy ATTRIBUTE_UNUSED)
4554 {
4555 int ins, locs, outs, rots;
4556
4557 if (is_it_end_of_statement ())
4558 ins = locs = outs = rots = 0;
4559 else
4560 {
4561 ins = get_absolute_expression ();
4562 if (*input_line_pointer++ != ',')
4563 goto err;
4564 locs = get_absolute_expression ();
4565 if (*input_line_pointer++ != ',')
4566 goto err;
4567 outs = get_absolute_expression ();
4568 if (*input_line_pointer++ != ',')
4569 goto err;
4570 rots = get_absolute_expression ();
4571 }
4572 set_regstack (ins, locs, outs, rots);
4573 return;
4574
4575 err:
4576 as_bad (_("Comma expected"));
4577 ignore_rest_of_line ();
4578 }
4579
4580 static void
4581 dot_rot (int type)
4582 {
4583 offsetT num_regs;
4584 valueT num_alloced = 0;
4585 struct dynreg **drpp, *dr;
4586 int ch, base_reg = 0;
4587 char *name, *start;
4588 size_t len;
4589
4590 switch (type)
4591 {
4592 case DYNREG_GR: base_reg = REG_GR + 32; break;
4593 case DYNREG_FR: base_reg = REG_FR + 32; break;
4594 case DYNREG_PR: base_reg = REG_P + 16; break;
4595 default: break;
4596 }
4597
4598 /* First, remove existing names from hash table. */
4599 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4600 {
4601 str_hash_delete (md.dynreg_hash, dr->name);
4602 /* FIXME: Free dr->name. */
4603 dr->num_regs = 0;
4604 }
4605
4606 drpp = &md.dynreg[type];
4607 while (1)
4608 {
4609 ch = get_symbol_name (&start);
4610 len = strlen (ia64_canonicalize_symbol_name (start));
4611 *input_line_pointer = ch;
4612
4613 SKIP_WHITESPACE_AFTER_NAME ();
4614 if (*input_line_pointer != '[')
4615 {
4616 as_bad (_("Expected '['"));
4617 goto err;
4618 }
4619 ++input_line_pointer; /* skip '[' */
4620
4621 num_regs = get_absolute_expression ();
4622
4623 if (*input_line_pointer++ != ']')
4624 {
4625 as_bad (_("Expected ']'"));
4626 goto err;
4627 }
4628 if (num_regs <= 0)
4629 {
4630 as_bad (_("Number of elements must be positive"));
4631 goto err;
4632 }
4633 SKIP_WHITESPACE ();
4634
4635 num_alloced += num_regs;
4636 switch (type)
4637 {
4638 case DYNREG_GR:
4639 if (num_alloced > md.rot.num_regs)
4640 {
4641 as_bad (_("Used more than the declared %d rotating registers"),
4642 md.rot.num_regs);
4643 goto err;
4644 }
4645 break;
4646 case DYNREG_FR:
4647 if (num_alloced > 96)
4648 {
4649 as_bad (_("Used more than the available 96 rotating registers"));
4650 goto err;
4651 }
4652 break;
4653 case DYNREG_PR:
4654 if (num_alloced > 48)
4655 {
4656 as_bad (_("Used more than the available 48 rotating registers"));
4657 goto err;
4658 }
4659 break;
4660
4661 default:
4662 break;
4663 }
4664
4665 if (!*drpp)
4666 *drpp = notes_calloc (1, sizeof (**drpp));
4667
4668 name = notes_memdup (start, len, len + 1);
4669
4670 dr = *drpp;
4671 dr->name = name;
4672 dr->num_regs = num_regs;
4673 dr->base = base_reg;
4674 drpp = &dr->next;
4675 base_reg += num_regs;
4676
4677 if (str_hash_insert (md.dynreg_hash, name, dr, 0) != NULL)
4678 {
4679 as_bad (_("Attempt to redefine register set `%s'"), name);
4680 goto err;
4681 }
4682
4683 if (*input_line_pointer != ',')
4684 break;
4685 ++input_line_pointer; /* skip comma */
4686 SKIP_WHITESPACE ();
4687 }
4688 demand_empty_rest_of_line ();
4689 return;
4690
4691 err:
4692 ignore_rest_of_line ();
4693 }
4694
4695 static void
4696 dot_byteorder (int byteorder)
4697 {
4698 segment_info_type *seginfo = seg_info (now_seg);
4699
4700 if (byteorder == -1)
4701 {
4702 if (seginfo->tc_segment_info_data.endian == 0)
4703 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4704 byteorder = seginfo->tc_segment_info_data.endian == 1;
4705 }
4706 else
4707 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4708
4709 if (target_big_endian != byteorder)
4710 {
4711 target_big_endian = byteorder;
4712 if (target_big_endian)
4713 {
4714 ia64_number_to_chars = number_to_chars_bigendian;
4715 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4716 }
4717 else
4718 {
4719 ia64_number_to_chars = number_to_chars_littleendian;
4720 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4721 }
4722 }
4723 }
4724
4725 static void
4726 dot_psr (int dummy ATTRIBUTE_UNUSED)
4727 {
4728 char *option;
4729 int ch;
4730
4731 while (1)
4732 {
4733 ch = get_symbol_name (&option);
4734 if (strcmp (option, "lsb") == 0)
4735 md.flags &= ~EF_IA_64_BE;
4736 else if (strcmp (option, "msb") == 0)
4737 md.flags |= EF_IA_64_BE;
4738 else if (strcmp (option, "abi32") == 0)
4739 md.flags &= ~EF_IA_64_ABI64;
4740 else if (strcmp (option, "abi64") == 0)
4741 md.flags |= EF_IA_64_ABI64;
4742 else
4743 as_bad (_("Unknown psr option `%s'"), option);
4744 *input_line_pointer = ch;
4745
4746 SKIP_WHITESPACE_AFTER_NAME ();
4747 if (*input_line_pointer != ',')
4748 break;
4749
4750 ++input_line_pointer;
4751 SKIP_WHITESPACE ();
4752 }
4753 demand_empty_rest_of_line ();
4754 }
4755
4756 static void
4757 dot_ln (int dummy ATTRIBUTE_UNUSED)
4758 {
4759 new_logical_line (0, get_absolute_expression ());
4760 demand_empty_rest_of_line ();
4761 }
4762
4763 static void
4764 cross_section (int ref, void (*builder) (int), int ua)
4765 {
4766 char *start, *end;
4767 int saved_auto_align;
4768 unsigned int section_count;
4769 const char *name;
4770
4771 start = input_line_pointer;
4772 name = obj_elf_section_name ();
4773 if (name == NULL)
4774 return;
4775 end = input_line_pointer;
4776 if (*input_line_pointer != ',')
4777 {
4778 as_bad (_("Comma expected after section name"));
4779 ignore_rest_of_line ();
4780 return;
4781 }
4782 *end = '\0';
4783 end = input_line_pointer + 1; /* skip comma */
4784 input_line_pointer = start;
4785 md.keep_pending_output = 1;
4786 section_count = bfd_count_sections (stdoutput);
4787 obj_elf_section (0);
4788 if (section_count != bfd_count_sections (stdoutput))
4789 as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated."));
4790 input_line_pointer = end;
4791 saved_auto_align = md.auto_align;
4792 if (ua)
4793 md.auto_align = 0;
4794 (*builder) (ref);
4795 if (ua)
4796 md.auto_align = saved_auto_align;
4797 obj_elf_previous (0);
4798 md.keep_pending_output = 0;
4799 }
4800
4801 static void
4802 dot_xdata (int size)
4803 {
4804 cross_section (size, cons, 0);
4805 }
4806
4807 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4808
4809 static void
4810 stmt_float_cons (int kind)
4811 {
4812 size_t alignment;
4813
4814 switch (kind)
4815 {
4816 case 'd':
4817 alignment = 3;
4818 break;
4819
4820 case 'x':
4821 case 'X':
4822 alignment = 4;
4823 break;
4824
4825 case 'f':
4826 default:
4827 alignment = 2;
4828 break;
4829 }
4830 do_align (alignment, NULL, 0, 0);
4831 float_cons (kind);
4832 }
4833
4834 static void
4835 stmt_cons_ua (int size)
4836 {
4837 int saved_auto_align = md.auto_align;
4838
4839 md.auto_align = 0;
4840 cons (size);
4841 md.auto_align = saved_auto_align;
4842 }
4843
4844 static void
4845 dot_xfloat_cons (int kind)
4846 {
4847 cross_section (kind, stmt_float_cons, 0);
4848 }
4849
4850 static void
4851 dot_xstringer (int zero)
4852 {
4853 cross_section (zero, stringer, 0);
4854 }
4855
4856 static void
4857 dot_xdata_ua (int size)
4858 {
4859 cross_section (size, cons, 1);
4860 }
4861
4862 static void
4863 dot_xfloat_cons_ua (int kind)
4864 {
4865 cross_section (kind, float_cons, 1);
4866 }
4867
4868 /* .reg.val <regname>,value */
4869
4870 static void
4871 dot_reg_val (int dummy ATTRIBUTE_UNUSED)
4872 {
4873 expressionS reg;
4874
4875 expression_and_evaluate (&reg);
4876 if (reg.X_op != O_register)
4877 {
4878 as_bad (_("Register name expected"));
4879 ignore_rest_of_line ();
4880 }
4881 else if (*input_line_pointer++ != ',')
4882 {
4883 as_bad (_("Comma expected"));
4884 ignore_rest_of_line ();
4885 }
4886 else
4887 {
4888 valueT value = get_absolute_expression ();
4889 int regno = reg.X_add_number;
4890 if (regno <= REG_GR || regno > REG_GR + 127)
4891 as_warn (_("Register value annotation ignored"));
4892 else
4893 {
4894 gr_values[regno - REG_GR].known = 1;
4895 gr_values[regno - REG_GR].value = value;
4896 gr_values[regno - REG_GR].path = md.path;
4897 }
4898 }
4899 demand_empty_rest_of_line ();
4900 }
4901
4902 /*
4903 .serialize.data
4904 .serialize.instruction
4905 */
4906 static void
4907 dot_serialize (int type)
4908 {
4909 insn_group_break (0, 0, 0);
4910 if (type)
4911 instruction_serialization ();
4912 else
4913 data_serialization ();
4914 insn_group_break (0, 0, 0);
4915 demand_empty_rest_of_line ();
4916 }
4917
4918 /* select dv checking mode
4919 .auto
4920 .explicit
4921 .default
4922
4923 A stop is inserted when changing modes
4924 */
4925
4926 static void
4927 dot_dv_mode (int type)
4928 {
4929 if (md.manual_bundling)
4930 as_warn (_("Directive invalid within a bundle"));
4931
4932 if (type == 'E' || type == 'A')
4933 md.mode_explicitly_set = 0;
4934 else
4935 md.mode_explicitly_set = 1;
4936
4937 md.detect_dv = 1;
4938 switch (type)
4939 {
4940 case 'A':
4941 case 'a':
4942 if (md.explicit_mode)
4943 insn_group_break (1, 0, 0);
4944 md.explicit_mode = 0;
4945 break;
4946 case 'E':
4947 case 'e':
4948 if (!md.explicit_mode)
4949 insn_group_break (1, 0, 0);
4950 md.explicit_mode = 1;
4951 break;
4952 default:
4953 case 'd':
4954 if (md.explicit_mode != md.default_explicit_mode)
4955 insn_group_break (1, 0, 0);
4956 md.explicit_mode = md.default_explicit_mode;
4957 md.mode_explicitly_set = 0;
4958 break;
4959 }
4960 }
4961
4962 static void
4963 print_prmask (valueT mask)
4964 {
4965 int regno;
4966 const char *comma = "";
4967 for (regno = 0; regno < 64; regno++)
4968 {
4969 if (mask & ((valueT) 1 << regno))
4970 {
4971 fprintf (stderr, "%s p%d", comma, regno);
4972 comma = ",";
4973 }
4974 }
4975 }
4976
4977 /*
4978 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear)
4979 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply)
4980 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex)
4981 .pred.safe_across_calls p1 [, p2 [,...]]
4982 */
4983
4984 static void
4985 dot_pred_rel (int type)
4986 {
4987 valueT mask = 0;
4988 int count = 0;
4989 int p1 = -1, p2 = -1;
4990
4991 if (type == 0)
4992 {
4993 if (*input_line_pointer == '"')
4994 {
4995 int len;
4996 char *form = demand_copy_C_string (&len);
4997
4998 if (strcmp (form, "mutex") == 0)
4999 type = 'm';
5000 else if (strcmp (form, "clear") == 0)
5001 type = 'c';
5002 else if (strcmp (form, "imply") == 0)
5003 type = 'i';
5004 notes_free (form);
5005 }
5006 else if (*input_line_pointer == '@')
5007 {
5008 char *form;
5009 char c;
5010
5011 ++input_line_pointer;
5012 c = get_symbol_name (&form);
5013
5014 if (strcmp (form, "mutex") == 0)
5015 type = 'm';
5016 else if (strcmp (form, "clear") == 0)
5017 type = 'c';
5018 else if (strcmp (form, "imply") == 0)
5019 type = 'i';
5020 (void) restore_line_pointer (c);
5021 }
5022 else
5023 {
5024 as_bad (_("Missing predicate relation type"));
5025 ignore_rest_of_line ();
5026 return;
5027 }
5028 if (type == 0)
5029 {
5030 as_bad (_("Unrecognized predicate relation type"));
5031 ignore_rest_of_line ();
5032 return;
5033 }
5034 if (*input_line_pointer == ',')
5035 ++input_line_pointer;
5036 SKIP_WHITESPACE ();
5037 }
5038
5039 while (1)
5040 {
5041 valueT bits = 1;
5042 int sep, regno;
5043 expressionS pr, *pr1, *pr2;
5044
5045 sep = parse_operand_and_eval (&pr, ',');
5046 if (pr.X_op == O_register
5047 && pr.X_add_number >= REG_P
5048 && pr.X_add_number <= REG_P + 63)
5049 {
5050 regno = pr.X_add_number - REG_P;
5051 bits <<= regno;
5052 count++;
5053 if (p1 == -1)
5054 p1 = regno;
5055 else if (p2 == -1)
5056 p2 = regno;
5057 }
5058 else if (type != 'i'
5059 && pr.X_op == O_subtract
5060 && (pr1 = symbol_get_value_expression (pr.X_add_symbol))
5061 && pr1->X_op == O_register
5062 && pr1->X_add_number >= REG_P
5063 && pr1->X_add_number <= REG_P + 63
5064 && (pr2 = symbol_get_value_expression (pr.X_op_symbol))
5065 && pr2->X_op == O_register
5066 && pr2->X_add_number >= REG_P
5067 && pr2->X_add_number <= REG_P + 63)
5068 {
5069 /* It's a range. */
5070 int stop;
5071
5072 regno = pr1->X_add_number - REG_P;
5073 stop = pr2->X_add_number - REG_P;
5074 if (regno >= stop)
5075 {
5076 as_bad (_("Bad register range"));
5077 ignore_rest_of_line ();
5078 return;
5079 }
5080 bits = ((bits << stop) << 1) - (bits << regno);
5081 count += stop - regno + 1;
5082 }
5083 else
5084 {
5085 as_bad (_("Predicate register expected"));
5086 ignore_rest_of_line ();
5087 return;
5088 }
5089 if (mask & bits)
5090 as_warn (_("Duplicate predicate register ignored"));
5091 mask |= bits;
5092 if (sep != ',')
5093 break;
5094 }
5095
5096 switch (type)
5097 {
5098 case 'c':
5099 if (count == 0)
5100 mask = ~(valueT) 0;
5101 clear_qp_mutex (mask);
5102 clear_qp_implies (mask, (valueT) 0);
5103 break;
5104 case 'i':
5105 if (count != 2 || p1 == -1 || p2 == -1)
5106 as_bad (_("Predicate source and target required"));
5107 else if (p1 == 0 || p2 == 0)
5108 as_bad (_("Use of p0 is not valid in this context"));
5109 else
5110 add_qp_imply (p1, p2);
5111 break;
5112 case 'm':
5113 if (count < 2)
5114 {
5115 as_bad (_("At least two PR arguments expected"));
5116 break;
5117 }
5118 else if (mask & 1)
5119 {
5120 as_bad (_("Use of p0 is not valid in this context"));
5121 break;
5122 }
5123 add_qp_mutex (mask);
5124 break;
5125 case 's':
5126 /* note that we don't override any existing relations */
5127 if (count == 0)
5128 {
5129 as_bad (_("At least one PR argument expected"));
5130 break;
5131 }
5132 if (md.debug_dv)
5133 {
5134 fprintf (stderr, "Safe across calls: ");
5135 print_prmask (mask);
5136 fprintf (stderr, "\n");
5137 }
5138 qp_safe_across_calls = mask;
5139 break;
5140 }
5141 demand_empty_rest_of_line ();
5142 }
5143
5144 /* .entry label [, label [, ...]]
5145 Hint to DV code that the given labels are to be considered entry points.
5146 Otherwise, only global labels are considered entry points. */
5147
5148 static void
5149 dot_entry (int dummy ATTRIBUTE_UNUSED)
5150 {
5151 char *name;
5152 int c;
5153 symbolS *symbolP;
5154
5155 do
5156 {
5157 c = get_symbol_name (&name);
5158 symbolP = symbol_find_or_make (name);
5159
5160 if (str_hash_insert (md.entry_hash, S_GET_NAME (symbolP), symbolP, 0))
5161 as_bad (_("duplicate entry hint %s"), name);
5162
5163 *input_line_pointer = c;
5164 SKIP_WHITESPACE_AFTER_NAME ();
5165 c = *input_line_pointer;
5166 if (c == ',')
5167 {
5168 input_line_pointer++;
5169 SKIP_WHITESPACE ();
5170 if (*input_line_pointer == '\n')
5171 c = '\n';
5172 }
5173 }
5174 while (c == ',');
5175
5176 demand_empty_rest_of_line ();
5177 }
5178
5179 /* .mem.offset offset, base
5180 "base" is used to distinguish between offsets from a different base. */
5181
5182 static void
5183 dot_mem_offset (int dummy ATTRIBUTE_UNUSED)
5184 {
5185 md.mem_offset.hint = 1;
5186 md.mem_offset.offset = get_absolute_expression ();
5187 if (*input_line_pointer != ',')
5188 {
5189 as_bad (_("Comma expected"));
5190 ignore_rest_of_line ();
5191 return;
5192 }
5193 ++input_line_pointer;
5194 md.mem_offset.base = get_absolute_expression ();
5195 demand_empty_rest_of_line ();
5196 }
5197
5198 /* ia64-specific pseudo-ops: */
5199 const pseudo_typeS md_pseudo_table[] =
5200 {
5201 { "radix", dot_radix, 0 },
5202 { "lcomm", s_lcomm_bytes, 1 },
5203 { "loc", dot_loc, 0 },
5204 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
5205 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
5206 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
5207 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5208 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5209 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5210 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5211 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5212 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5213 { "proc", dot_proc, 0 },
5214 { "body", dot_body, 0 },
5215 { "prologue", dot_prologue, 0 },
5216 { "endp", dot_endp, 0 },
5217
5218 { "fframe", dot_fframe, 0 },
5219 { "vframe", dot_vframe, 0 },
5220 { "vframesp", dot_vframesp, 0 },
5221 { "vframepsp", dot_vframesp, 1 },
5222 { "save", dot_save, 0 },
5223 { "restore", dot_restore, 0 },
5224 { "restorereg", dot_restorereg, 0 },
5225 { "restorereg.p", dot_restorereg, 1 },
5226 { "handlerdata", dot_handlerdata, 0 },
5227 { "unwentry", dot_unwentry, 0 },
5228 { "altrp", dot_altrp, 0 },
5229 { "savesp", dot_savemem, 0 },
5230 { "savepsp", dot_savemem, 1 },
5231 { "save.g", dot_saveg, 0 },
5232 { "save.f", dot_savef, 0 },
5233 { "save.b", dot_saveb, 0 },
5234 { "save.gf", dot_savegf, 0 },
5235 { "spill", dot_spill, 0 },
5236 { "spillreg", dot_spillreg, 0 },
5237 { "spillsp", dot_spillmem, 0 },
5238 { "spillpsp", dot_spillmem, 1 },
5239 { "spillreg.p", dot_spillreg, 1 },
5240 { "spillsp.p", dot_spillmem, ~0 },
5241 { "spillpsp.p", dot_spillmem, ~1 },
5242 { "label_state", dot_label_state, 0 },
5243 { "copy_state", dot_copy_state, 0 },
5244 { "unwabi", dot_unwabi, 0 },
5245 { "personality", dot_personality, 0 },
5246 { "mii", dot_template, 0x0 },
5247 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5248 { "mlx", dot_template, 0x2 },
5249 { "mmi", dot_template, 0x4 },
5250 { "mfi", dot_template, 0x6 },
5251 { "mmf", dot_template, 0x7 },
5252 { "mib", dot_template, 0x8 },
5253 { "mbb", dot_template, 0x9 },
5254 { "bbb", dot_template, 0xb },
5255 { "mmb", dot_template, 0xc },
5256 { "mfb", dot_template, 0xe },
5257 { "align", dot_align, 0 },
5258 { "regstk", dot_regstk, 0 },
5259 { "rotr", dot_rot, DYNREG_GR },
5260 { "rotf", dot_rot, DYNREG_FR },
5261 { "rotp", dot_rot, DYNREG_PR },
5262 { "lsb", dot_byteorder, 0 },
5263 { "msb", dot_byteorder, 1 },
5264 { "psr", dot_psr, 0 },
5265 { "alias", dot_alias, 0 },
5266 { "secalias", dot_alias, 1 },
5267 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5268
5269 { "xdata1", dot_xdata, 1 },
5270 { "xdata2", dot_xdata, 2 },
5271 { "xdata4", dot_xdata, 4 },
5272 { "xdata8", dot_xdata, 8 },
5273 { "xdata16", dot_xdata, 16 },
5274 { "xreal4", dot_xfloat_cons, 'f' },
5275 { "xreal8", dot_xfloat_cons, 'd' },
5276 { "xreal10", dot_xfloat_cons, 'x' },
5277 { "xreal16", dot_xfloat_cons, 'X' },
5278 { "xstring", dot_xstringer, 8 + 0 },
5279 { "xstringz", dot_xstringer, 8 + 1 },
5280
5281 /* unaligned versions: */
5282 { "xdata2.ua", dot_xdata_ua, 2 },
5283 { "xdata4.ua", dot_xdata_ua, 4 },
5284 { "xdata8.ua", dot_xdata_ua, 8 },
5285 { "xdata16.ua", dot_xdata_ua, 16 },
5286 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5287 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5288 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5289 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5290
5291 /* annotations/DV checking support */
5292 { "entry", dot_entry, 0 },
5293 { "mem.offset", dot_mem_offset, 0 },
5294 { "pred.rel", dot_pred_rel, 0 },
5295 { "pred.rel.clear", dot_pred_rel, 'c' },
5296 { "pred.rel.imply", dot_pred_rel, 'i' },
5297 { "pred.rel.mutex", dot_pred_rel, 'm' },
5298 { "pred.safe_across_calls", dot_pred_rel, 's' },
5299 { "reg.val", dot_reg_val, 0 },
5300 { "serialize.data", dot_serialize, 0 },
5301 { "serialize.instruction", dot_serialize, 1 },
5302 { "auto", dot_dv_mode, 'a' },
5303 { "explicit", dot_dv_mode, 'e' },
5304 { "default", dot_dv_mode, 'd' },
5305
5306 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5307 IA-64 aligns data allocation pseudo-ops by default, so we have to
5308 tell it that these ones are supposed to be unaligned. Long term,
5309 should rewrite so that only IA-64 specific data allocation pseudo-ops
5310 are aligned by default. */
5311 {"2byte", stmt_cons_ua, 2},
5312 {"4byte", stmt_cons_ua, 4},
5313 {"8byte", stmt_cons_ua, 8},
5314
5315 #ifdef TE_VMS
5316 {"vms_common", obj_elf_vms_common, 0},
5317 #endif
5318
5319 { NULL, 0, 0 }
5320 };
5321
5322 static const struct pseudo_opcode
5323 {
5324 const char *name;
5325 void (*handler) (int);
5326 int arg;
5327 }
5328 pseudo_opcode[] =
5329 {
5330 /* these are more like pseudo-ops, but don't start with a dot */
5331 { "data1", cons, 1 },
5332 { "data2", cons, 2 },
5333 { "data4", cons, 4 },
5334 { "data8", cons, 8 },
5335 { "data16", cons, 16 },
5336 { "real4", stmt_float_cons, 'f' },
5337 { "real8", stmt_float_cons, 'd' },
5338 { "real10", stmt_float_cons, 'x' },
5339 { "real16", stmt_float_cons, 'X' },
5340 { "string", stringer, 8 + 0 },
5341 { "stringz", stringer, 8 + 1 },
5342
5343 /* unaligned versions: */
5344 { "data2.ua", stmt_cons_ua, 2 },
5345 { "data4.ua", stmt_cons_ua, 4 },
5346 { "data8.ua", stmt_cons_ua, 8 },
5347 { "data16.ua", stmt_cons_ua, 16 },
5348 { "real4.ua", float_cons, 'f' },
5349 { "real8.ua", float_cons, 'd' },
5350 { "real10.ua", float_cons, 'x' },
5351 { "real16.ua", float_cons, 'X' },
5352 };
5353
5354 /* Declare a register by creating a symbol for it and entering it in
5355 the symbol table. */
5356
5357 static symbolS *
5358 declare_register (const char *name, unsigned int regnum)
5359 {
5360 symbolS *sym;
5361
5362 sym = symbol_create (name, reg_section, &zero_address_frag, regnum);
5363
5364 if (str_hash_insert (md.reg_hash, S_GET_NAME (sym), sym, 0) != NULL)
5365 as_fatal (_("duplicate %s"), name);
5366
5367 return sym;
5368 }
5369
5370 static void
5371 declare_register_set (const char *prefix,
5372 unsigned int num_regs,
5373 unsigned int base_regnum)
5374 {
5375 char name[8];
5376 unsigned int i;
5377
5378 for (i = 0; i < num_regs; ++i)
5379 {
5380 snprintf (name, sizeof (name), "%s%u", prefix, i);
5381 declare_register (name, base_regnum + i);
5382 }
5383 }
5384
5385 static unsigned int
5386 operand_width (enum ia64_opnd opnd)
5387 {
5388 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5389 unsigned int bits = 0;
5390 int i;
5391
5392 bits = 0;
5393 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5394 bits += odesc->field[i].bits;
5395
5396 return bits;
5397 }
5398
5399 static enum operand_match_result
5400 operand_match (const struct ia64_opcode *idesc, int res_index, expressionS *e)
5401 {
5402 enum ia64_opnd opnd = idesc->operands[res_index];
5403 int bits, relocatable = 0;
5404 struct insn_fix *fix;
5405 bfd_signed_vma val;
5406
5407 switch (opnd)
5408 {
5409 /* constants: */
5410
5411 case IA64_OPND_AR_CCV:
5412 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5413 return OPERAND_MATCH;
5414 break;
5415
5416 case IA64_OPND_AR_CSD:
5417 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5418 return OPERAND_MATCH;
5419 break;
5420
5421 case IA64_OPND_AR_PFS:
5422 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5423 return OPERAND_MATCH;
5424 break;
5425
5426 case IA64_OPND_GR0:
5427 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5428 return OPERAND_MATCH;
5429 break;
5430
5431 case IA64_OPND_IP:
5432 if (e->X_op == O_register && e->X_add_number == REG_IP)
5433 return OPERAND_MATCH;
5434 break;
5435
5436 case IA64_OPND_PR:
5437 if (e->X_op == O_register && e->X_add_number == REG_PR)
5438 return OPERAND_MATCH;
5439 break;
5440
5441 case IA64_OPND_PR_ROT:
5442 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5443 return OPERAND_MATCH;
5444 break;
5445
5446 case IA64_OPND_PSR:
5447 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5448 return OPERAND_MATCH;
5449 break;
5450
5451 case IA64_OPND_PSR_L:
5452 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5453 return OPERAND_MATCH;
5454 break;
5455
5456 case IA64_OPND_PSR_UM:
5457 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5458 return OPERAND_MATCH;
5459 break;
5460
5461 case IA64_OPND_C1:
5462 if (e->X_op == O_constant)
5463 {
5464 if (e->X_add_number == 1)
5465 return OPERAND_MATCH;
5466 else
5467 return OPERAND_OUT_OF_RANGE;
5468 }
5469 break;
5470
5471 case IA64_OPND_C8:
5472 if (e->X_op == O_constant)
5473 {
5474 if (e->X_add_number == 8)
5475 return OPERAND_MATCH;
5476 else
5477 return OPERAND_OUT_OF_RANGE;
5478 }
5479 break;
5480
5481 case IA64_OPND_C16:
5482 if (e->X_op == O_constant)
5483 {
5484 if (e->X_add_number == 16)
5485 return OPERAND_MATCH;
5486 else
5487 return OPERAND_OUT_OF_RANGE;
5488 }
5489 break;
5490
5491 /* register operands: */
5492
5493 case IA64_OPND_AR3:
5494 if (e->X_op == O_register && e->X_add_number >= REG_AR
5495 && e->X_add_number < REG_AR + 128)
5496 return OPERAND_MATCH;
5497 break;
5498
5499 case IA64_OPND_B1:
5500 case IA64_OPND_B2:
5501 if (e->X_op == O_register && e->X_add_number >= REG_BR
5502 && e->X_add_number < REG_BR + 8)
5503 return OPERAND_MATCH;
5504 break;
5505
5506 case IA64_OPND_CR3:
5507 if (e->X_op == O_register && e->X_add_number >= REG_CR
5508 && e->X_add_number < REG_CR + 128)
5509 return OPERAND_MATCH;
5510 break;
5511
5512 case IA64_OPND_DAHR3:
5513 if (e->X_op == O_register && e->X_add_number >= REG_DAHR
5514 && e->X_add_number < REG_DAHR + 8)
5515 return OPERAND_MATCH;
5516 break;
5517
5518 case IA64_OPND_F1:
5519 case IA64_OPND_F2:
5520 case IA64_OPND_F3:
5521 case IA64_OPND_F4:
5522 if (e->X_op == O_register && e->X_add_number >= REG_FR
5523 && e->X_add_number < REG_FR + 128)
5524 return OPERAND_MATCH;
5525 break;
5526
5527 case IA64_OPND_P1:
5528 case IA64_OPND_P2:
5529 if (e->X_op == O_register && e->X_add_number >= REG_P
5530 && e->X_add_number < REG_P + 64)
5531 return OPERAND_MATCH;
5532 break;
5533
5534 case IA64_OPND_R1:
5535 case IA64_OPND_R2:
5536 case IA64_OPND_R3:
5537 if (e->X_op == O_register && e->X_add_number >= REG_GR
5538 && e->X_add_number < REG_GR + 128)
5539 return OPERAND_MATCH;
5540 break;
5541
5542 case IA64_OPND_R3_2:
5543 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5544 {
5545 if (e->X_add_number < REG_GR + 4)
5546 return OPERAND_MATCH;
5547 else if (e->X_add_number < REG_GR + 128)
5548 return OPERAND_OUT_OF_RANGE;
5549 }
5550 break;
5551
5552 /* indirect operands: */
5553 case IA64_OPND_CPUID_R3:
5554 case IA64_OPND_DBR_R3:
5555 case IA64_OPND_DTR_R3:
5556 case IA64_OPND_ITR_R3:
5557 case IA64_OPND_IBR_R3:
5558 case IA64_OPND_MSR_R3:
5559 case IA64_OPND_PKR_R3:
5560 case IA64_OPND_PMC_R3:
5561 case IA64_OPND_PMD_R3:
5562 case IA64_OPND_DAHR_R3:
5563 case IA64_OPND_RR_R3:
5564 if (e->X_op == O_index && e->X_op_symbol
5565 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5566 == opnd - IA64_OPND_CPUID_R3))
5567 return OPERAND_MATCH;
5568 break;
5569
5570 case IA64_OPND_MR3:
5571 if (e->X_op == O_index && !e->X_op_symbol)
5572 return OPERAND_MATCH;
5573 break;
5574
5575 /* immediate operands: */
5576 case IA64_OPND_CNT2a:
5577 case IA64_OPND_LEN4:
5578 case IA64_OPND_LEN6:
5579 bits = operand_width (idesc->operands[res_index]);
5580 if (e->X_op == O_constant)
5581 {
5582 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5583 return OPERAND_MATCH;
5584 else
5585 return OPERAND_OUT_OF_RANGE;
5586 }
5587 break;
5588
5589 case IA64_OPND_CNT2b:
5590 if (e->X_op == O_constant)
5591 {
5592 if ((bfd_vma) (e->X_add_number - 1) < 3)
5593 return OPERAND_MATCH;
5594 else
5595 return OPERAND_OUT_OF_RANGE;
5596 }
5597 break;
5598
5599 case IA64_OPND_CNT2c:
5600 val = e->X_add_number;
5601 if (e->X_op == O_constant)
5602 {
5603 if ((val == 0 || val == 7 || val == 15 || val == 16))
5604 return OPERAND_MATCH;
5605 else
5606 return OPERAND_OUT_OF_RANGE;
5607 }
5608 break;
5609
5610 case IA64_OPND_SOR:
5611 /* SOR must be an integer multiple of 8 */
5612 if (e->X_op == O_constant && e->X_add_number & 0x7)
5613 return OPERAND_OUT_OF_RANGE;
5614 /* Fall through. */
5615 case IA64_OPND_SOF:
5616 case IA64_OPND_SOL:
5617 if (e->X_op == O_constant)
5618 {
5619 if ((bfd_vma) e->X_add_number <= 96)
5620 return OPERAND_MATCH;
5621 else
5622 return OPERAND_OUT_OF_RANGE;
5623 }
5624 break;
5625
5626 case IA64_OPND_IMMU62:
5627 if (e->X_op == O_constant)
5628 {
5629 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5630 return OPERAND_MATCH;
5631 else
5632 return OPERAND_OUT_OF_RANGE;
5633 }
5634 else
5635 {
5636 /* FIXME -- need 62-bit relocation type */
5637 as_bad (_("62-bit relocation not yet implemented"));
5638 }
5639 break;
5640
5641 case IA64_OPND_IMMU64:
5642 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5643 || e->X_op == O_subtract)
5644 {
5645 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5646 fix->code = BFD_RELOC_IA64_IMM64;
5647 if (e->X_op != O_subtract)
5648 {
5649 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5650 if (e->X_op == O_pseudo_fixup)
5651 e->X_op = O_symbol;
5652 }
5653
5654 fix->opnd = idesc->operands[res_index];
5655 fix->expr = *e;
5656 fix->is_pcrel = 0;
5657 ++CURR_SLOT.num_fixups;
5658 return OPERAND_MATCH;
5659 }
5660 else if (e->X_op == O_constant)
5661 return OPERAND_MATCH;
5662 break;
5663
5664 case IA64_OPND_IMMU5b:
5665 if (e->X_op == O_constant)
5666 {
5667 val = e->X_add_number;
5668 if (val >= 32 && val <= 63)
5669 return OPERAND_MATCH;
5670 else
5671 return OPERAND_OUT_OF_RANGE;
5672 }
5673 break;
5674
5675 case IA64_OPND_CCNT5:
5676 case IA64_OPND_CNT5:
5677 case IA64_OPND_CNT6:
5678 case IA64_OPND_CPOS6a:
5679 case IA64_OPND_CPOS6b:
5680 case IA64_OPND_CPOS6c:
5681 case IA64_OPND_IMMU2:
5682 case IA64_OPND_IMMU7a:
5683 case IA64_OPND_IMMU7b:
5684 case IA64_OPND_IMMU16:
5685 case IA64_OPND_IMMU19:
5686 case IA64_OPND_IMMU21:
5687 case IA64_OPND_IMMU24:
5688 case IA64_OPND_MBTYPE4:
5689 case IA64_OPND_MHTYPE8:
5690 case IA64_OPND_POS6:
5691 bits = operand_width (idesc->operands[res_index]);
5692 if (e->X_op == O_constant)
5693 {
5694 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5695 return OPERAND_MATCH;
5696 else
5697 return OPERAND_OUT_OF_RANGE;
5698 }
5699 break;
5700
5701 case IA64_OPND_IMMU9:
5702 bits = operand_width (idesc->operands[res_index]);
5703 if (e->X_op == O_constant)
5704 {
5705 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5706 {
5707 int lobits = e->X_add_number & 0x3;
5708 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5709 e->X_add_number |= (bfd_vma) 0x3;
5710 return OPERAND_MATCH;
5711 }
5712 else
5713 return OPERAND_OUT_OF_RANGE;
5714 }
5715 break;
5716
5717 case IA64_OPND_IMM44:
5718 /* least 16 bits must be zero */
5719 if ((e->X_add_number & 0xffff) != 0)
5720 /* XXX technically, this is wrong: we should not be issuing warning
5721 messages until we're sure this instruction pattern is going to
5722 be used! */
5723 as_warn (_("lower 16 bits of mask ignored"));
5724
5725 if (e->X_op == O_constant)
5726 {
5727 if (((e->X_add_number >= 0
5728 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5729 || (e->X_add_number < 0
5730 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5731 {
5732 /* sign-extend */
5733 if (e->X_add_number >= 0
5734 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5735 {
5736 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5737 }
5738 return OPERAND_MATCH;
5739 }
5740 else
5741 return OPERAND_OUT_OF_RANGE;
5742 }
5743 break;
5744
5745 case IA64_OPND_IMM17:
5746 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5747 if (e->X_op == O_constant)
5748 {
5749 if (((e->X_add_number >= 0
5750 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5751 || (e->X_add_number < 0
5752 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5753 {
5754 /* sign-extend */
5755 if (e->X_add_number >= 0
5756 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5757 {
5758 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5759 }
5760 return OPERAND_MATCH;
5761 }
5762 else
5763 return OPERAND_OUT_OF_RANGE;
5764 }
5765 break;
5766
5767 case IA64_OPND_IMM14:
5768 case IA64_OPND_IMM22:
5769 relocatable = 1;
5770 /* Fall through. */
5771 case IA64_OPND_IMM1:
5772 case IA64_OPND_IMM8:
5773 case IA64_OPND_IMM8U4:
5774 case IA64_OPND_IMM8M1:
5775 case IA64_OPND_IMM8M1U4:
5776 case IA64_OPND_IMM8M1U8:
5777 case IA64_OPND_IMM9a:
5778 case IA64_OPND_IMM9b:
5779 bits = operand_width (idesc->operands[res_index]);
5780 if (relocatable && (e->X_op == O_symbol
5781 || e->X_op == O_subtract
5782 || e->X_op == O_pseudo_fixup))
5783 {
5784 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5785
5786 if (idesc->operands[res_index] == IA64_OPND_IMM14)
5787 fix->code = BFD_RELOC_IA64_IMM14;
5788 else
5789 fix->code = BFD_RELOC_IA64_IMM22;
5790
5791 if (e->X_op != O_subtract)
5792 {
5793 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5794 if (e->X_op == O_pseudo_fixup)
5795 e->X_op = O_symbol;
5796 }
5797
5798 fix->opnd = idesc->operands[res_index];
5799 fix->expr = *e;
5800 fix->is_pcrel = 0;
5801 ++CURR_SLOT.num_fixups;
5802 return OPERAND_MATCH;
5803 }
5804 else if (e->X_op != O_constant
5805 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5806 return OPERAND_MISMATCH;
5807
5808 if (opnd == IA64_OPND_IMM8M1U4)
5809 {
5810 /* Zero is not valid for unsigned compares that take an adjusted
5811 constant immediate range. */
5812 if (e->X_add_number == 0)
5813 return OPERAND_OUT_OF_RANGE;
5814
5815 /* Sign-extend 32-bit unsigned numbers, so that the following range
5816 checks will work. */
5817 val = e->X_add_number;
5818 if ((val & (~(bfd_vma) 0 << 32)) == 0)
5819 val = (val ^ ((bfd_vma) 1 << 31)) - ((bfd_vma) 1 << 31);
5820
5821 /* Check for 0x100000000. This is valid because
5822 0x100000000-1 is the same as ((uint32_t) -1). */
5823 if (val == ((bfd_signed_vma) 1 << 32))
5824 return OPERAND_MATCH;
5825
5826 val = val - 1;
5827 }
5828 else if (opnd == IA64_OPND_IMM8M1U8)
5829 {
5830 /* Zero is not valid for unsigned compares that take an adjusted
5831 constant immediate range. */
5832 if (e->X_add_number == 0)
5833 return OPERAND_OUT_OF_RANGE;
5834
5835 /* Check for 0x10000000000000000. */
5836 if (e->X_op == O_big)
5837 {
5838 if (generic_bignum[0] == 0
5839 && generic_bignum[1] == 0
5840 && generic_bignum[2] == 0
5841 && generic_bignum[3] == 0
5842 && generic_bignum[4] == 1)
5843 return OPERAND_MATCH;
5844 else
5845 return OPERAND_OUT_OF_RANGE;
5846 }
5847 else
5848 val = e->X_add_number - 1;
5849 }
5850 else if (opnd == IA64_OPND_IMM8M1)
5851 val = e->X_add_number - 1;
5852 else if (opnd == IA64_OPND_IMM8U4)
5853 {
5854 /* Sign-extend 32-bit unsigned numbers, so that the following range
5855 checks will work. */
5856 val = e->X_add_number;
5857 if ((val & (~(bfd_vma) 0 << 32)) == 0)
5858 val = (val ^ ((bfd_vma) 1 << 31)) - ((bfd_vma) 1 << 31);
5859 }
5860 else
5861 val = e->X_add_number;
5862
5863 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5864 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5865 return OPERAND_MATCH;
5866 else
5867 return OPERAND_OUT_OF_RANGE;
5868
5869 case IA64_OPND_INC3:
5870 /* +/- 1, 4, 8, 16 */
5871 val = e->X_add_number;
5872 if (val < 0)
5873 val = -val;
5874 if (e->X_op == O_constant)
5875 {
5876 if ((val == 1 || val == 4 || val == 8 || val == 16))
5877 return OPERAND_MATCH;
5878 else
5879 return OPERAND_OUT_OF_RANGE;
5880 }
5881 break;
5882
5883 case IA64_OPND_TGT25:
5884 case IA64_OPND_TGT25b:
5885 case IA64_OPND_TGT25c:
5886 case IA64_OPND_TGT64:
5887 if (e->X_op == O_symbol)
5888 {
5889 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5890 if (opnd == IA64_OPND_TGT25)
5891 fix->code = BFD_RELOC_IA64_PCREL21F;
5892 else if (opnd == IA64_OPND_TGT25b)
5893 fix->code = BFD_RELOC_IA64_PCREL21M;
5894 else if (opnd == IA64_OPND_TGT25c)
5895 fix->code = BFD_RELOC_IA64_PCREL21B;
5896 else if (opnd == IA64_OPND_TGT64)
5897 fix->code = BFD_RELOC_IA64_PCREL60B;
5898 else
5899 abort ();
5900
5901 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5902 fix->opnd = idesc->operands[res_index];
5903 fix->expr = *e;
5904 fix->is_pcrel = 1;
5905 ++CURR_SLOT.num_fixups;
5906 return OPERAND_MATCH;
5907 }
5908 /* Fall through. */
5909 case IA64_OPND_TAG13:
5910 case IA64_OPND_TAG13b:
5911 switch (e->X_op)
5912 {
5913 case O_constant:
5914 return OPERAND_MATCH;
5915
5916 case O_symbol:
5917 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5918 /* There are no external relocs for TAG13/TAG13b fields, so we
5919 create a dummy reloc. This will not live past md_apply_fix. */
5920 fix->code = BFD_RELOC_UNUSED;
5921 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5922 fix->opnd = idesc->operands[res_index];
5923 fix->expr = *e;
5924 fix->is_pcrel = 1;
5925 ++CURR_SLOT.num_fixups;
5926 return OPERAND_MATCH;
5927
5928 default:
5929 break;
5930 }
5931 break;
5932
5933 case IA64_OPND_LDXMOV:
5934 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5935 fix->code = BFD_RELOC_IA64_LDXMOV;
5936 fix->opnd = idesc->operands[res_index];
5937 fix->expr = *e;
5938 fix->is_pcrel = 0;
5939 ++CURR_SLOT.num_fixups;
5940 return OPERAND_MATCH;
5941
5942 case IA64_OPND_STRD5b:
5943 if (e->X_op == O_constant)
5944 {
5945 /* 5-bit signed scaled by 64 */
5946 if ((e->X_add_number <= ( 0xf << 6 ))
5947 && (e->X_add_number >= -( 0x10 << 6 )))
5948 {
5949
5950 /* Must be a multiple of 64 */
5951 if ((e->X_add_number & 0x3f) != 0)
5952 as_warn (_("stride must be a multiple of 64; lower 6 bits ignored"));
5953
5954 e->X_add_number &= ~ 0x3f;
5955 return OPERAND_MATCH;
5956 }
5957 else
5958 return OPERAND_OUT_OF_RANGE;
5959 }
5960 break;
5961 case IA64_OPND_CNT6a:
5962 if (e->X_op == O_constant)
5963 {
5964 /* 6-bit unsigned biased by 1 -- count 0 is meaningless */
5965 if ((e->X_add_number <= 64)
5966 && (e->X_add_number > 0) )
5967 {
5968 return OPERAND_MATCH;
5969 }
5970 else
5971 return OPERAND_OUT_OF_RANGE;
5972 }
5973 break;
5974
5975 default:
5976 break;
5977 }
5978 return OPERAND_MISMATCH;
5979 }
5980
5981 static int
5982 parse_operand (expressionS *e, int more)
5983 {
5984 int sep = '\0';
5985
5986 memset (e, 0, sizeof (*e));
5987 e->X_op = O_absent;
5988 SKIP_WHITESPACE ();
5989 expression (e);
5990 resolve_register (e);
5991 sep = *input_line_pointer;
5992 if (more && (sep == ',' || sep == more))
5993 ++input_line_pointer;
5994 return sep;
5995 }
5996
5997 static int
5998 parse_operand_and_eval (expressionS *e, int more)
5999 {
6000 int sep = parse_operand (e, more);
6001 resolve_expression (e);
6002 return sep;
6003 }
6004
6005 static int
6006 parse_operand_maybe_eval (expressionS *e, int more, enum ia64_opnd op)
6007 {
6008 int sep = parse_operand (e, more);
6009 switch (op)
6010 {
6011 case IA64_OPND_IMM14:
6012 case IA64_OPND_IMM22:
6013 case IA64_OPND_IMMU64:
6014 case IA64_OPND_TGT25:
6015 case IA64_OPND_TGT25b:
6016 case IA64_OPND_TGT25c:
6017 case IA64_OPND_TGT64:
6018 case IA64_OPND_TAG13:
6019 case IA64_OPND_TAG13b:
6020 case IA64_OPND_LDXMOV:
6021 break;
6022 default:
6023 resolve_expression (e);
6024 break;
6025 }
6026 return sep;
6027 }
6028
6029 /* Returns the next entry in the opcode table that matches the one in
6030 IDESC, and frees the entry in IDESC. If no matching entry is
6031 found, NULL is returned instead. */
6032
6033 static struct ia64_opcode *
6034 get_next_opcode (struct ia64_opcode *idesc)
6035 {
6036 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
6037 ia64_free_opcode (idesc);
6038 return next;
6039 }
6040
6041 /* Parse the operands for the opcode and find the opcode variant that
6042 matches the specified operands, or NULL if no match is possible. */
6043
6044 static struct ia64_opcode *
6045 parse_operands (struct ia64_opcode *idesc)
6046 {
6047 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
6048 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
6049 int reg1, reg2;
6050 char reg_class;
6051 enum ia64_opnd expected_operand = IA64_OPND_NIL;
6052 enum operand_match_result result;
6053 char mnemonic[129];
6054 char *first_arg = 0, *end, *saved_input_pointer;
6055 unsigned int sof;
6056
6057 gas_assert (strlen (idesc->name) <= 128);
6058
6059 strcpy (mnemonic, idesc->name);
6060 if (idesc->operands[2] == IA64_OPND_SOF
6061 || idesc->operands[1] == IA64_OPND_SOF)
6062 {
6063 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
6064 can't parse the first operand until we have parsed the
6065 remaining operands of the "alloc" instruction. */
6066 SKIP_WHITESPACE ();
6067 first_arg = input_line_pointer;
6068 end = strchr (input_line_pointer, '=');
6069 if (!end)
6070 {
6071 as_bad (_("Expected separator `='"));
6072 return 0;
6073 }
6074 input_line_pointer = end + 1;
6075 ++i;
6076 ++num_outputs;
6077 }
6078
6079 for (; ; ++i)
6080 {
6081 if (i < NELEMS (CURR_SLOT.opnd))
6082 {
6083 enum ia64_opnd op = IA64_OPND_NIL;
6084 if (i < NELEMS (idesc->operands))
6085 op = idesc->operands[i];
6086 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + i, '=', op);
6087 if (CURR_SLOT.opnd[i].X_op == O_absent)
6088 break;
6089 }
6090 else
6091 {
6092 expressionS dummy;
6093
6094 sep = parse_operand (&dummy, '=');
6095 if (dummy.X_op == O_absent)
6096 break;
6097 }
6098
6099 ++num_operands;
6100
6101 if (sep != '=' && sep != ',')
6102 break;
6103
6104 if (sep == '=')
6105 {
6106 if (num_outputs > 0)
6107 as_bad (_("Duplicate equal sign (=) in instruction"));
6108 else
6109 num_outputs = i + 1;
6110 }
6111 }
6112 if (sep != '\0')
6113 {
6114 as_bad (_("Illegal operand separator `%c'"), sep);
6115 return 0;
6116 }
6117
6118 if (idesc->operands[2] == IA64_OPND_SOF
6119 || idesc->operands[1] == IA64_OPND_SOF)
6120 {
6121 /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r.
6122 Note, however, that due to that mapping operand numbers in error
6123 messages for any of the constant operands will not be correct. */
6124 know (strcmp (idesc->name, "alloc") == 0);
6125 /* The first operand hasn't been parsed/initialized, yet (but
6126 num_operands intentionally doesn't account for that). */
6127 i = num_operands > 4 ? 2 : 1;
6128 #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \
6129 ? CURR_SLOT.opnd[n].X_add_number \
6130 : 0)
6131 sof = set_regstack (FORCE_CONST(i),
6132 FORCE_CONST(i + 1),
6133 FORCE_CONST(i + 2),
6134 FORCE_CONST(i + 3));
6135 #undef FORCE_CONST
6136
6137 /* now we can parse the first arg: */
6138 saved_input_pointer = input_line_pointer;
6139 input_line_pointer = first_arg;
6140 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + 0, '=',
6141 idesc->operands[0]);
6142 if (sep != '=')
6143 --num_outputs; /* force error */
6144 input_line_pointer = saved_input_pointer;
6145
6146 CURR_SLOT.opnd[i].X_add_number = sof;
6147 if (CURR_SLOT.opnd[i + 1].X_op == O_constant
6148 && CURR_SLOT.opnd[i + 2].X_op == O_constant)
6149 CURR_SLOT.opnd[i + 1].X_add_number
6150 = sof - CURR_SLOT.opnd[i + 2].X_add_number;
6151 else
6152 CURR_SLOT.opnd[i + 1].X_op = O_illegal;
6153 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3];
6154 }
6155
6156 highest_unmatched_operand = -4;
6157 curr_out_of_range_pos = -1;
6158 error_pos = 0;
6159 for (; idesc; idesc = get_next_opcode (idesc))
6160 {
6161 if (num_outputs != idesc->num_outputs)
6162 continue; /* mismatch in # of outputs */
6163 if (highest_unmatched_operand < 0)
6164 highest_unmatched_operand |= 1;
6165 if (num_operands > NELEMS (idesc->operands)
6166 || (num_operands < NELEMS (idesc->operands)
6167 && idesc->operands[num_operands])
6168 || (num_operands > 0 && !idesc->operands[num_operands - 1]))
6169 continue; /* mismatch in number of arguments */
6170 if (highest_unmatched_operand < 0)
6171 highest_unmatched_operand |= 2;
6172
6173 CURR_SLOT.num_fixups = 0;
6174
6175 /* Try to match all operands. If we see an out-of-range operand,
6176 then continue trying to match the rest of the operands, since if
6177 the rest match, then this idesc will give the best error message. */
6178
6179 out_of_range_pos = -1;
6180 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
6181 {
6182 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
6183 if (result != OPERAND_MATCH)
6184 {
6185 if (result != OPERAND_OUT_OF_RANGE)
6186 break;
6187 if (out_of_range_pos < 0)
6188 /* remember position of the first out-of-range operand: */
6189 out_of_range_pos = i;
6190 }
6191 }
6192
6193 /* If we did not match all operands, or if at least one operand was
6194 out-of-range, then this idesc does not match. Keep track of which
6195 idesc matched the most operands before failing. If we have two
6196 idescs that failed at the same position, and one had an out-of-range
6197 operand, then prefer the out-of-range operand. Thus if we have
6198 "add r0=0x1000000,r1" we get an error saying the constant is out
6199 of range instead of an error saying that the constant should have been
6200 a register. */
6201
6202 if (i != num_operands || out_of_range_pos >= 0)
6203 {
6204 if (i > highest_unmatched_operand
6205 || (i == highest_unmatched_operand
6206 && out_of_range_pos > curr_out_of_range_pos))
6207 {
6208 highest_unmatched_operand = i;
6209 if (out_of_range_pos >= 0)
6210 {
6211 expected_operand = idesc->operands[out_of_range_pos];
6212 error_pos = out_of_range_pos;
6213 }
6214 else
6215 {
6216 expected_operand = idesc->operands[i];
6217 error_pos = i;
6218 }
6219 curr_out_of_range_pos = out_of_range_pos;
6220 }
6221 continue;
6222 }
6223
6224 break;
6225 }
6226 if (!idesc)
6227 {
6228 if (expected_operand)
6229 as_bad (_("Operand %u of `%s' should be %s"),
6230 error_pos + 1, mnemonic,
6231 elf64_ia64_operands[expected_operand].desc);
6232 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1))
6233 as_bad (_("Wrong number of output operands"));
6234 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2))
6235 as_bad (_("Wrong number of input operands"));
6236 else
6237 as_bad (_("Operand mismatch"));
6238 return 0;
6239 }
6240
6241 /* Check that the instruction doesn't use
6242 - r0, f0, or f1 as output operands
6243 - the same predicate twice as output operands
6244 - r0 as address of a base update load or store
6245 - the same GR as output and address of a base update load
6246 - two even- or two odd-numbered FRs as output operands of a floating
6247 point parallel load.
6248 At most two (conflicting) output (or output-like) operands can exist,
6249 (floating point parallel loads have three outputs, but the base register,
6250 if updated, cannot conflict with the actual outputs). */
6251 reg2 = reg1 = -1;
6252 for (i = 0; i < num_operands; ++i)
6253 {
6254 int regno = 0;
6255
6256 reg_class = 0;
6257 switch (idesc->operands[i])
6258 {
6259 case IA64_OPND_R1:
6260 case IA64_OPND_R2:
6261 case IA64_OPND_R3:
6262 if (i < num_outputs)
6263 {
6264 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6265 reg_class = 'r';
6266 else if (reg1 < 0)
6267 reg1 = CURR_SLOT.opnd[i].X_add_number;
6268 else if (reg2 < 0)
6269 reg2 = CURR_SLOT.opnd[i].X_add_number;
6270 }
6271 break;
6272 case IA64_OPND_P1:
6273 case IA64_OPND_P2:
6274 if (i < num_outputs)
6275 {
6276 if (reg1 < 0)
6277 reg1 = CURR_SLOT.opnd[i].X_add_number;
6278 else if (reg2 < 0)
6279 reg2 = CURR_SLOT.opnd[i].X_add_number;
6280 }
6281 break;
6282 case IA64_OPND_F1:
6283 case IA64_OPND_F2:
6284 case IA64_OPND_F3:
6285 case IA64_OPND_F4:
6286 if (i < num_outputs)
6287 {
6288 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR
6289 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1)
6290 {
6291 reg_class = 'f';
6292 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR;
6293 }
6294 else if (reg1 < 0)
6295 reg1 = CURR_SLOT.opnd[i].X_add_number;
6296 else if (reg2 < 0)
6297 reg2 = CURR_SLOT.opnd[i].X_add_number;
6298 }
6299 break;
6300 case IA64_OPND_MR3:
6301 if (idesc->flags & IA64_OPCODE_POSTINC)
6302 {
6303 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6304 reg_class = 'm';
6305 else if (reg1 < 0)
6306 reg1 = CURR_SLOT.opnd[i].X_add_number;
6307 else if (reg2 < 0)
6308 reg2 = CURR_SLOT.opnd[i].X_add_number;
6309 }
6310 break;
6311 default:
6312 break;
6313 }
6314 switch (reg_class)
6315 {
6316 case 0:
6317 break;
6318 default:
6319 as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno);
6320 break;
6321 case 'm':
6322 as_warn (_("Invalid use of `r%d' as base update address operand"), regno);
6323 break;
6324 }
6325 }
6326 if (reg1 == reg2)
6327 {
6328 if (reg1 >= REG_GR && reg1 <= REG_GR + 127)
6329 {
6330 reg1 -= REG_GR;
6331 reg_class = 'r';
6332 }
6333 else if (reg1 >= REG_P && reg1 <= REG_P + 63)
6334 {
6335 reg1 -= REG_P;
6336 reg_class = 'p';
6337 }
6338 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127)
6339 {
6340 reg1 -= REG_FR;
6341 reg_class = 'f';
6342 }
6343 else
6344 reg_class = 0;
6345 if (reg_class)
6346 as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1);
6347 }
6348 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31
6349 && reg2 >= REG_FR && reg2 <= REG_FR + 31)
6350 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6351 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127))
6352 && ! ((reg1 ^ reg2) & 1))
6353 as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"),
6354 reg1 - REG_FR, reg2 - REG_FR);
6355 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31
6356 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)
6357 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6358 && reg2 >= REG_FR && reg2 <= REG_FR + 31))
6359 as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"),
6360 reg1 - REG_FR, reg2 - REG_FR);
6361 return idesc;
6362 }
6363
6364 static void
6365 build_insn (struct slot *slot, bfd_vma *insnp)
6366 {
6367 const struct ia64_operand *odesc, *o2desc;
6368 struct ia64_opcode *idesc = slot->idesc;
6369 bfd_vma insn;
6370 bfd_signed_vma val;
6371 const char *err;
6372 int i;
6373
6374 insn = idesc->opcode | slot->qp_regno;
6375
6376 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6377 {
6378 if (slot->opnd[i].X_op == O_register
6379 || slot->opnd[i].X_op == O_constant
6380 || slot->opnd[i].X_op == O_index)
6381 val = slot->opnd[i].X_add_number;
6382 else if (slot->opnd[i].X_op == O_big)
6383 {
6384 /* This must be the value 0x10000000000000000. */
6385 gas_assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6386 val = 0;
6387 }
6388 else
6389 val = 0;
6390
6391 switch (idesc->operands[i])
6392 {
6393 case IA64_OPND_IMMU64:
6394 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6395 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6396 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6397 | (((val >> 63) & 0x1) << 36));
6398 continue;
6399
6400 case IA64_OPND_IMMU62:
6401 val &= 0x3fffffffffffffffULL;
6402 if (val != slot->opnd[i].X_add_number)
6403 as_warn (_("Value truncated to 62 bits"));
6404 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6405 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6406 continue;
6407
6408 case IA64_OPND_TGT64:
6409 val >>= 4;
6410 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6411 insn |= ((((val >> 59) & 0x1) << 36)
6412 | (((val >> 0) & 0xfffff) << 13));
6413 continue;
6414
6415 case IA64_OPND_AR3:
6416 val -= REG_AR;
6417 break;
6418
6419 case IA64_OPND_B1:
6420 case IA64_OPND_B2:
6421 val -= REG_BR;
6422 break;
6423
6424 case IA64_OPND_CR3:
6425 val -= REG_CR;
6426 break;
6427
6428 case IA64_OPND_DAHR3:
6429 val -= REG_DAHR;
6430 break;
6431
6432 case IA64_OPND_F1:
6433 case IA64_OPND_F2:
6434 case IA64_OPND_F3:
6435 case IA64_OPND_F4:
6436 val -= REG_FR;
6437 break;
6438
6439 case IA64_OPND_P1:
6440 case IA64_OPND_P2:
6441 val -= REG_P;
6442 break;
6443
6444 case IA64_OPND_R1:
6445 case IA64_OPND_R2:
6446 case IA64_OPND_R3:
6447 case IA64_OPND_R3_2:
6448 case IA64_OPND_CPUID_R3:
6449 case IA64_OPND_DBR_R3:
6450 case IA64_OPND_DTR_R3:
6451 case IA64_OPND_ITR_R3:
6452 case IA64_OPND_IBR_R3:
6453 case IA64_OPND_MR3:
6454 case IA64_OPND_MSR_R3:
6455 case IA64_OPND_PKR_R3:
6456 case IA64_OPND_PMC_R3:
6457 case IA64_OPND_PMD_R3:
6458 case IA64_OPND_DAHR_R3:
6459 case IA64_OPND_RR_R3:
6460 val -= REG_GR;
6461 break;
6462
6463 default:
6464 break;
6465 }
6466
6467 odesc = elf64_ia64_operands + idesc->operands[i];
6468 err = (*odesc->insert) (odesc, val, &insn);
6469 if (err)
6470 as_bad_where (slot->src_file, slot->src_line,
6471 _("Bad operand value: %s"), err);
6472 if (idesc->flags & IA64_OPCODE_PSEUDO)
6473 {
6474 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6475 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6476 {
6477 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6478 (*o2desc->insert) (o2desc, val, &insn);
6479 }
6480 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6481 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6482 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6483 {
6484 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6485 (*o2desc->insert) (o2desc, 64 - val, &insn);
6486 }
6487 }
6488 }
6489 *insnp = insn;
6490 }
6491
6492 static void
6493 emit_one_bundle (void)
6494 {
6495 int manual_bundling_off = 0, manual_bundling = 0;
6496 enum ia64_unit required_unit, insn_unit = 0;
6497 enum ia64_insn_type type[3], insn_type;
6498 unsigned int template_val, orig_template;
6499 bfd_vma insn[3] = { -1, -1, -1 };
6500 struct ia64_opcode *idesc;
6501 int end_of_insn_group = 0, user_template = -1;
6502 int n, i, j, first, curr, last_slot;
6503 bfd_vma t0 = 0, t1 = 0;
6504 struct label_fix *lfix;
6505 bool mark_label;
6506 struct insn_fix *ifix;
6507 char mnemonic[16];
6508 fixS *fix;
6509 char *f;
6510 int addr_mod;
6511
6512 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6513 know (first >= 0 && first < NUM_SLOTS);
6514 n = MIN (3, md.num_slots_in_use);
6515
6516 /* Determine template: user user_template if specified, best match
6517 otherwise: */
6518
6519 if (md.slot[first].user_template >= 0)
6520 user_template = template_val = md.slot[first].user_template;
6521 else
6522 {
6523 /* Auto select appropriate template. */
6524 memset (type, 0, sizeof (type));
6525 curr = first;
6526 for (i = 0; i < n; ++i)
6527 {
6528 if (md.slot[curr].label_fixups && i != 0)
6529 break;
6530 type[i] = md.slot[curr].idesc->type;
6531 curr = (curr + 1) % NUM_SLOTS;
6532 }
6533 template_val = best_template[type[0]][type[1]][type[2]];
6534 }
6535
6536 /* initialize instructions with appropriate nops: */
6537 for (i = 0; i < 3; ++i)
6538 insn[i] = nop[ia64_templ_desc[template_val].exec_unit[i]];
6539
6540 f = frag_more (16);
6541
6542 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6543 from the start of the frag. */
6544 addr_mod = frag_now_fix () & 15;
6545 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6546 as_bad (_("instruction address is not a multiple of 16"));
6547 frag_now->insn_addr = addr_mod;
6548 frag_now->has_code = 1;
6549
6550 /* now fill in slots with as many insns as possible: */
6551 curr = first;
6552 idesc = md.slot[curr].idesc;
6553 end_of_insn_group = 0;
6554 last_slot = -1;
6555 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6556 {
6557 /* If we have unwind records, we may need to update some now. */
6558 unw_rec_list *ptr = md.slot[curr].unwind_record;
6559 unw_rec_list *end_ptr = NULL;
6560
6561 if (ptr)
6562 {
6563 /* Find the last prologue/body record in the list for the current
6564 insn, and set the slot number for all records up to that point.
6565 This needs to be done now, because prologue/body records refer to
6566 the current point, not the point after the instruction has been
6567 issued. This matters because there may have been nops emitted
6568 meanwhile. Any non-prologue non-body record followed by a
6569 prologue/body record must also refer to the current point. */
6570 unw_rec_list *last_ptr;
6571
6572 for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j)
6573 end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record;
6574 for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next)
6575 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6576 || ptr->r.type == body)
6577 last_ptr = ptr;
6578 if (last_ptr)
6579 {
6580 /* Make last_ptr point one after the last prologue/body
6581 record. */
6582 last_ptr = last_ptr->next;
6583 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6584 ptr = ptr->next)
6585 {
6586 ptr->slot_number = (unsigned long) f + i;
6587 ptr->slot_frag = frag_now;
6588 }
6589 /* Remove the initialized records, so that we won't accidentally
6590 update them again if we insert a nop and continue. */
6591 md.slot[curr].unwind_record = last_ptr;
6592 }
6593 }
6594
6595 manual_bundling_off = md.slot[curr].manual_bundling_off;
6596 if (md.slot[curr].manual_bundling_on)
6597 {
6598 if (curr == first)
6599 manual_bundling = 1;
6600 else
6601 break; /* Need to start a new bundle. */
6602 }
6603
6604 /* If this instruction specifies a template, then it must be the first
6605 instruction of a bundle. */
6606 if (curr != first && md.slot[curr].user_template >= 0)
6607 break;
6608
6609 if (idesc->flags & IA64_OPCODE_SLOT2)
6610 {
6611 if (manual_bundling && !manual_bundling_off)
6612 {
6613 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6614 _("`%s' must be last in bundle"), idesc->name);
6615 if (i < 2)
6616 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6617 }
6618 i = 2;
6619 }
6620 if (idesc->flags & IA64_OPCODE_LAST)
6621 {
6622 int required_slot;
6623 unsigned int required_template;
6624
6625 /* If we need a stop bit after an M slot, our only choice is
6626 template 5 (M;;MI). If we need a stop bit after a B
6627 slot, our only choice is to place it at the end of the
6628 bundle, because the only available templates are MIB,
6629 MBB, BBB, MMB, and MFB. We don't handle anything other
6630 than M and B slots because these are the only kind of
6631 instructions that can have the IA64_OPCODE_LAST bit set. */
6632 required_template = template_val;
6633 switch (idesc->type)
6634 {
6635 case IA64_TYPE_M:
6636 required_slot = 0;
6637 required_template = 5;
6638 break;
6639
6640 case IA64_TYPE_B:
6641 required_slot = 2;
6642 break;
6643
6644 default:
6645 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6646 _("Internal error: don't know how to force %s to end of instruction group"),
6647 idesc->name);
6648 required_slot = i;
6649 break;
6650 }
6651 if (manual_bundling
6652 && (i > required_slot
6653 || (required_slot == 2 && !manual_bundling_off)
6654 || (user_template >= 0
6655 /* Changing from MMI to M;MI is OK. */
6656 && (template_val ^ required_template) > 1)))
6657 {
6658 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6659 _("`%s' must be last in instruction group"),
6660 idesc->name);
6661 if (i < 2 && required_slot == 2 && !manual_bundling_off)
6662 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6663 }
6664 if (required_slot < i)
6665 /* Can't fit this instruction. */
6666 break;
6667
6668 i = required_slot;
6669 if (required_template != template_val)
6670 {
6671 /* If we switch the template, we need to reset the NOPs
6672 after slot i. The slot-types of the instructions ahead
6673 of i never change, so we don't need to worry about
6674 changing NOPs in front of this slot. */
6675 for (j = i; j < 3; ++j)
6676 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6677
6678 /* We just picked a template that includes the stop bit in the
6679 middle, so we don't need another one emitted later. */
6680 md.slot[curr].end_of_insn_group = 0;
6681 }
6682 template_val = required_template;
6683 }
6684 if (curr != first && md.slot[curr].label_fixups)
6685 {
6686 if (manual_bundling)
6687 {
6688 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6689 _("Label must be first in a bundle"));
6690 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6691 }
6692 /* This insn must go into the first slot of a bundle. */
6693 break;
6694 }
6695
6696 if (end_of_insn_group && md.num_slots_in_use >= 1)
6697 {
6698 /* We need an instruction group boundary in the middle of a
6699 bundle. See if we can switch to an other template with
6700 an appropriate boundary. */
6701
6702 orig_template = template_val;
6703 if (i == 1 && (user_template == 4
6704 || (user_template < 0
6705 && (ia64_templ_desc[template_val].exec_unit[0]
6706 == IA64_UNIT_M))))
6707 {
6708 template_val = 5;
6709 end_of_insn_group = 0;
6710 }
6711 else if (i == 2 && (user_template == 0
6712 || (user_template < 0
6713 && (ia64_templ_desc[template_val].exec_unit[1]
6714 == IA64_UNIT_I)))
6715 /* This test makes sure we don't switch the template if
6716 the next instruction is one that needs to be first in
6717 an instruction group. Since all those instructions are
6718 in the M group, there is no way such an instruction can
6719 fit in this bundle even if we switch the template. The
6720 reason we have to check for this is that otherwise we
6721 may end up generating "MI;;I M.." which has the deadly
6722 effect that the second M instruction is no longer the
6723 first in the group! --davidm 99/12/16 */
6724 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6725 {
6726 template_val = 1;
6727 end_of_insn_group = 0;
6728 }
6729 else if (i == 1
6730 && user_template == 0
6731 && !(idesc->flags & IA64_OPCODE_FIRST))
6732 /* Use the next slot. */
6733 continue;
6734 else if (curr != first)
6735 /* can't fit this insn */
6736 break;
6737
6738 if (template_val != orig_template)
6739 /* if we switch the template, we need to reset the NOPs
6740 after slot i. The slot-types of the instructions ahead
6741 of i never change, so we don't need to worry about
6742 changing NOPs in front of this slot. */
6743 for (j = i; j < 3; ++j)
6744 insn[j] = nop[ia64_templ_desc[template_val].exec_unit[j]];
6745 }
6746 required_unit = ia64_templ_desc[template_val].exec_unit[i];
6747
6748 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6749 if (idesc->type == IA64_TYPE_DYN)
6750 {
6751 enum ia64_opnd opnd1, opnd2;
6752
6753 if ((strcmp (idesc->name, "nop") == 0)
6754 || (strcmp (idesc->name, "break") == 0))
6755 insn_unit = required_unit;
6756 else if (strcmp (idesc->name, "hint") == 0)
6757 {
6758 insn_unit = required_unit;
6759 if (required_unit == IA64_UNIT_B)
6760 {
6761 switch (md.hint_b)
6762 {
6763 case hint_b_ok:
6764 break;
6765 case hint_b_warning:
6766 as_warn (_("hint in B unit may be treated as nop"));
6767 break;
6768 case hint_b_error:
6769 /* When manual bundling is off and there is no
6770 user template, we choose a different unit so
6771 that hint won't go into the current slot. We
6772 will fill the current bundle with nops and
6773 try to put hint into the next bundle. */
6774 if (!manual_bundling && user_template < 0)
6775 insn_unit = IA64_UNIT_I;
6776 else
6777 as_bad (_("hint in B unit can't be used"));
6778 break;
6779 }
6780 }
6781 }
6782 else if (strcmp (idesc->name, "chk.s") == 0
6783 || strcmp (idesc->name, "mov") == 0)
6784 {
6785 insn_unit = IA64_UNIT_M;
6786 if (required_unit == IA64_UNIT_I
6787 || (required_unit == IA64_UNIT_F && template_val == 6))
6788 insn_unit = IA64_UNIT_I;
6789 }
6790 else
6791 as_fatal (_("emit_one_bundle: unexpected dynamic op"));
6792
6793 snprintf (mnemonic, sizeof (mnemonic), "%s.%c",
6794 idesc->name, "?imbfxx"[insn_unit]);
6795 opnd1 = idesc->operands[0];
6796 opnd2 = idesc->operands[1];
6797 ia64_free_opcode (idesc);
6798 idesc = ia64_find_opcode (mnemonic);
6799 /* moves to/from ARs have collisions */
6800 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6801 {
6802 while (idesc != NULL
6803 && (idesc->operands[0] != opnd1
6804 || idesc->operands[1] != opnd2))
6805 idesc = get_next_opcode (idesc);
6806 }
6807 md.slot[curr].idesc = idesc;
6808 }
6809 else
6810 {
6811 insn_type = idesc->type;
6812 insn_unit = IA64_UNIT_NIL;
6813 switch (insn_type)
6814 {
6815 case IA64_TYPE_A:
6816 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6817 insn_unit = required_unit;
6818 break;
6819 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6820 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6821 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6822 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6823 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6824 default: break;
6825 }
6826 }
6827
6828 if (insn_unit != required_unit)
6829 continue; /* Try next slot. */
6830
6831 /* Now is a good time to fix up the labels for this insn. */
6832 mark_label = false;
6833 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6834 {
6835 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6836 symbol_set_frag (lfix->sym, frag_now);
6837 mark_label |= lfix->dw2_mark_labels;
6838 }
6839 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6840 {
6841 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6842 symbol_set_frag (lfix->sym, frag_now);
6843 }
6844
6845 if (debug_type == DEBUG_DWARF2
6846 || md.slot[curr].loc_directive_seen
6847 || mark_label)
6848 {
6849 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6850
6851 md.slot[curr].loc_directive_seen = 0;
6852 if (mark_label)
6853 md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
6854
6855 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6856 }
6857
6858 build_insn (md.slot + curr, insn + i);
6859
6860 ptr = md.slot[curr].unwind_record;
6861 if (ptr)
6862 {
6863 /* Set slot numbers for all remaining unwind records belonging to the
6864 current insn. There can not be any prologue/body unwind records
6865 here. */
6866 for (; ptr != end_ptr; ptr = ptr->next)
6867 {
6868 ptr->slot_number = (unsigned long) f + i;
6869 ptr->slot_frag = frag_now;
6870 }
6871 md.slot[curr].unwind_record = NULL;
6872 }
6873
6874 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6875 {
6876 unsigned long where;
6877
6878 ifix = md.slot[curr].fixup + j;
6879 where = frag_now_fix () - 16 + i;
6880 #ifdef TE_HPUX
6881 /* Relocations for instructions specify the slot in the
6882 bottom two bits of r_offset. The IA64 HP-UX linker
6883 expects PCREL60B relocations to specify slot 2 of an
6884 instruction. gas generates PCREL60B against slot 1. */
6885 if (ifix->code == BFD_RELOC_IA64_PCREL60B)
6886 {
6887 know (i == 1);
6888 ++where;
6889 }
6890 #endif
6891
6892 fix = fix_new_exp (frag_now, where, 8,
6893 &ifix->expr, ifix->is_pcrel, ifix->code);
6894 fix->tc_fix_data.opnd = ifix->opnd;
6895 fix->fx_file = md.slot[curr].src_file;
6896 fix->fx_line = md.slot[curr].src_line;
6897 }
6898
6899 end_of_insn_group = md.slot[curr].end_of_insn_group;
6900
6901 /* This adjustment to "i" must occur after the fix, otherwise the fix
6902 is assigned to the wrong slot, and the VMS linker complains. */
6903 if (required_unit == IA64_UNIT_L)
6904 {
6905 know (i == 1);
6906 /* skip one slot for long/X-unit instructions */
6907 ++i;
6908 }
6909 --md.num_slots_in_use;
6910 last_slot = i;
6911
6912 /* clear slot: */
6913 ia64_free_opcode (md.slot[curr].idesc);
6914 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6915 md.slot[curr].user_template = -1;
6916
6917 if (manual_bundling_off)
6918 {
6919 manual_bundling = 0;
6920 break;
6921 }
6922 curr = (curr + 1) % NUM_SLOTS;
6923 idesc = md.slot[curr].idesc;
6924 }
6925
6926 /* A user template was specified, but the first following instruction did
6927 not fit. This can happen with or without manual bundling. */
6928 if (md.num_slots_in_use > 0 && last_slot < 0)
6929 {
6930 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6931 _("`%s' does not fit into %s template"),
6932 idesc->name, ia64_templ_desc[template_val].name);
6933 /* Drop first insn so we don't livelock. */
6934 --md.num_slots_in_use;
6935 know (curr == first);
6936 ia64_free_opcode (md.slot[curr].idesc);
6937 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6938 md.slot[curr].user_template = -1;
6939 }
6940 else if (manual_bundling > 0)
6941 {
6942 if (md.num_slots_in_use > 0)
6943 {
6944 if (last_slot >= 2)
6945 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6946 _("`%s' does not fit into bundle"), idesc->name);
6947 else
6948 {
6949 const char *where;
6950
6951 if (template_val == 2)
6952 where = "X slot";
6953 else if (last_slot == 0)
6954 where = "slots 2 or 3";
6955 else
6956 where = "slot 3";
6957 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6958 _("`%s' can't go in %s of %s template"),
6959 idesc->name, where, ia64_templ_desc[template_val].name);
6960 }
6961 }
6962 else
6963 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6964 _("Missing '}' at end of file"));
6965 }
6966
6967 know (md.num_slots_in_use < NUM_SLOTS);
6968
6969 t0 = end_of_insn_group | (template_val << 1) | (insn[0] << 5) | (insn[1] << 46);
6970 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6971
6972 number_to_chars_littleendian (f + 0, t0, 8);
6973 number_to_chars_littleendian (f + 8, t1, 8);
6974 }
6975
6976 int
6977 md_parse_option (int c, const char *arg)
6978 {
6979
6980 switch (c)
6981 {
6982 /* Switches from the Intel assembler. */
6983 case 'm':
6984 if (strcmp (arg, "ilp64") == 0
6985 || strcmp (arg, "lp64") == 0
6986 || strcmp (arg, "p64") == 0)
6987 {
6988 md.flags |= EF_IA_64_ABI64;
6989 }
6990 else if (strcmp (arg, "ilp32") == 0)
6991 {
6992 md.flags &= ~EF_IA_64_ABI64;
6993 }
6994 else if (strcmp (arg, "le") == 0)
6995 {
6996 md.flags &= ~EF_IA_64_BE;
6997 default_big_endian = 0;
6998 }
6999 else if (strcmp (arg, "be") == 0)
7000 {
7001 md.flags |= EF_IA_64_BE;
7002 default_big_endian = 1;
7003 }
7004 else if (startswith (arg, "unwind-check="))
7005 {
7006 arg += 13;
7007 if (strcmp (arg, "warning") == 0)
7008 md.unwind_check = unwind_check_warning;
7009 else if (strcmp (arg, "error") == 0)
7010 md.unwind_check = unwind_check_error;
7011 else
7012 return 0;
7013 }
7014 else if (startswith (arg, "hint.b="))
7015 {
7016 arg += 7;
7017 if (strcmp (arg, "ok") == 0)
7018 md.hint_b = hint_b_ok;
7019 else if (strcmp (arg, "warning") == 0)
7020 md.hint_b = hint_b_warning;
7021 else if (strcmp (arg, "error") == 0)
7022 md.hint_b = hint_b_error;
7023 else
7024 return 0;
7025 }
7026 else if (startswith (arg, "tune="))
7027 {
7028 arg += 5;
7029 if (strcmp (arg, "itanium1") == 0)
7030 md.tune = itanium1;
7031 else if (strcmp (arg, "itanium2") == 0)
7032 md.tune = itanium2;
7033 else
7034 return 0;
7035 }
7036 else
7037 return 0;
7038 break;
7039
7040 case 'N':
7041 if (strcmp (arg, "so") == 0)
7042 {
7043 /* Suppress signon message. */
7044 }
7045 else if (strcmp (arg, "pi") == 0)
7046 {
7047 /* Reject privileged instructions. FIXME */
7048 }
7049 else if (strcmp (arg, "us") == 0)
7050 {
7051 /* Allow union of signed and unsigned range. FIXME */
7052 }
7053 else if (strcmp (arg, "close_fcalls") == 0)
7054 {
7055 /* Do not resolve global function calls. */
7056 }
7057 else
7058 return 0;
7059 break;
7060
7061 case 'C':
7062 /* temp[="prefix"] Insert temporary labels into the object file
7063 symbol table prefixed by "prefix".
7064 Default prefix is ":temp:".
7065 */
7066 break;
7067
7068 case 'a':
7069 /* indirect=<tgt> Assume unannotated indirect branches behavior
7070 according to <tgt> --
7071 exit: branch out from the current context (default)
7072 labels: all labels in context may be branch targets
7073 */
7074 if (!startswith (arg, "indirect="))
7075 return 0;
7076 break;
7077
7078 case 'x':
7079 /* -X conflicts with an ignored option, use -x instead */
7080 md.detect_dv = 1;
7081 if (!arg || strcmp (arg, "explicit") == 0)
7082 {
7083 /* set default mode to explicit */
7084 md.default_explicit_mode = 1;
7085 break;
7086 }
7087 else if (strcmp (arg, "auto") == 0)
7088 {
7089 md.default_explicit_mode = 0;
7090 }
7091 else if (strcmp (arg, "none") == 0)
7092 {
7093 md.detect_dv = 0;
7094 }
7095 else if (strcmp (arg, "debug") == 0)
7096 {
7097 md.debug_dv = 1;
7098 }
7099 else if (strcmp (arg, "debugx") == 0)
7100 {
7101 md.default_explicit_mode = 1;
7102 md.debug_dv = 1;
7103 }
7104 else if (strcmp (arg, "debugn") == 0)
7105 {
7106 md.debug_dv = 1;
7107 md.detect_dv = 0;
7108 }
7109 else
7110 {
7111 as_bad (_("Unrecognized option '-x%s'"), arg);
7112 }
7113 break;
7114
7115 case 'S':
7116 /* nops Print nops statistics. */
7117 break;
7118
7119 /* GNU specific switches for gcc. */
7120 case OPTION_MCONSTANT_GP:
7121 md.flags |= EF_IA_64_CONS_GP;
7122 break;
7123
7124 case OPTION_MAUTO_PIC:
7125 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
7126 break;
7127
7128 default:
7129 return 0;
7130 }
7131
7132 return 1;
7133 }
7134
7135 void
7136 md_show_usage (FILE *stream)
7137 {
7138 fputs (_("\
7139 IA-64 options:\n\
7140 --mconstant-gp mark output file as using the constant-GP model\n\
7141 (sets ELF header flag EF_IA_64_CONS_GP)\n\
7142 --mauto-pic mark output file as using the constant-GP model\n\
7143 without function descriptors (sets ELF header flag\n\
7144 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
7145 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
7146 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
7147 -mtune=[itanium1|itanium2]\n\
7148 tune for a specific CPU (default -mtune=itanium2)\n\
7149 -munwind-check=[warning|error]\n\
7150 unwind directive check (default -munwind-check=warning)\n\
7151 -mhint.b=[ok|warning|error]\n\
7152 hint.b check (default -mhint.b=error)\n\
7153 -x | -xexplicit turn on dependency violation checking\n"), stream);
7154 /* Note for translators: "automagically" can be translated as "automatically" here. */
7155 fputs (_("\
7156 -xauto automagically remove dependency violations (default)\n\
7157 -xnone turn off dependency violation checking\n\
7158 -xdebug debug dependency violation checker\n\
7159 -xdebugn debug dependency violation checker but turn off\n\
7160 dependency violation checking\n\
7161 -xdebugx debug dependency violation checker and turn on\n\
7162 dependency violation checking\n"),
7163 stream);
7164 }
7165
7166 void
7167 ia64_after_parse_args (void)
7168 {
7169 if (debug_type == DEBUG_STABS)
7170 as_fatal (_("--gstabs is not supported for ia64"));
7171 }
7172
7173 /* Return true if TYPE fits in TEMPL at SLOT. */
7174
7175 static int
7176 match (int templ, int type, int slot)
7177 {
7178 enum ia64_unit unit;
7179 int result;
7180
7181 unit = ia64_templ_desc[templ].exec_unit[slot];
7182 switch (type)
7183 {
7184 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
7185 case IA64_TYPE_A:
7186 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
7187 break;
7188 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
7189 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
7190 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
7191 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
7192 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
7193 default: result = 0; break;
7194 }
7195 return result;
7196 }
7197
7198 /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit
7199 in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of
7200 type M or I would fit in TEMPL at SLOT. */
7201
7202 static inline int
7203 extra_goodness (int templ, int slot)
7204 {
7205 switch (md.tune)
7206 {
7207 case itanium1:
7208 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
7209 return 2;
7210 else if (slot == 2 && match (templ, IA64_TYPE_B, slot))
7211 return 1;
7212 else
7213 return 0;
7214 break;
7215 case itanium2:
7216 if (match (templ, IA64_TYPE_M, slot)
7217 || match (templ, IA64_TYPE_I, slot))
7218 /* Favor M- and I-unit NOPs. We definitely want to avoid
7219 F-unit and B-unit may cause split-issue or less-than-optimal
7220 branch-prediction. */
7221 return 2;
7222 else
7223 return 0;
7224 break;
7225 default:
7226 abort ();
7227 return 0;
7228 }
7229 }
7230
7231 /* This function is called once, at assembler startup time. It sets
7232 up all the tables, etc. that the MD part of the assembler will need
7233 that can be determined before arguments are parsed. */
7234 void
7235 md_begin (void)
7236 {
7237 int i, j, k, t, goodness, best, ok;
7238
7239 md.auto_align = 1;
7240 md.explicit_mode = md.default_explicit_mode;
7241
7242 bfd_set_section_alignment (text_section, 4);
7243
7244 /* Make sure function pointers get initialized. */
7245 target_big_endian = -1;
7246 dot_byteorder (default_big_endian);
7247
7248 alias_hash = str_htab_create ();
7249 alias_name_hash = str_htab_create ();
7250 secalias_hash = str_htab_create ();
7251 secalias_name_hash = str_htab_create ();
7252
7253 pseudo_func[FUNC_DTP_MODULE].u.sym =
7254 symbol_new (".<dtpmod>", undefined_section,
7255 &zero_address_frag, FUNC_DTP_MODULE);
7256
7257 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
7258 symbol_new (".<dtprel>", undefined_section,
7259 &zero_address_frag, FUNC_DTP_RELATIVE);
7260
7261 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
7262 symbol_new (".<fptr>", undefined_section,
7263 &zero_address_frag, FUNC_FPTR_RELATIVE);
7264
7265 pseudo_func[FUNC_GP_RELATIVE].u.sym =
7266 symbol_new (".<gprel>", undefined_section,
7267 &zero_address_frag, FUNC_GP_RELATIVE);
7268
7269 pseudo_func[FUNC_LT_RELATIVE].u.sym =
7270 symbol_new (".<ltoff>", undefined_section,
7271 &zero_address_frag, FUNC_LT_RELATIVE);
7272
7273 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
7274 symbol_new (".<ltoffx>", undefined_section,
7275 &zero_address_frag, FUNC_LT_RELATIVE_X);
7276
7277 pseudo_func[FUNC_PC_RELATIVE].u.sym =
7278 symbol_new (".<pcrel>", undefined_section,
7279 &zero_address_frag, FUNC_PC_RELATIVE);
7280
7281 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
7282 symbol_new (".<pltoff>", undefined_section,
7283 &zero_address_frag, FUNC_PLT_RELATIVE);
7284
7285 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
7286 symbol_new (".<secrel>", undefined_section,
7287 &zero_address_frag, FUNC_SEC_RELATIVE);
7288
7289 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
7290 symbol_new (".<segrel>", undefined_section,
7291 &zero_address_frag, FUNC_SEG_RELATIVE);
7292
7293 pseudo_func[FUNC_TP_RELATIVE].u.sym =
7294 symbol_new (".<tprel>", undefined_section,
7295 &zero_address_frag, FUNC_TP_RELATIVE);
7296
7297 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
7298 symbol_new (".<ltv>", undefined_section,
7299 &zero_address_frag, FUNC_LTV_RELATIVE);
7300
7301 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
7302 symbol_new (".<ltoff.fptr>", undefined_section,
7303 &zero_address_frag, FUNC_LT_FPTR_RELATIVE);
7304
7305 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
7306 symbol_new (".<ltoff.dtpmod>", undefined_section,
7307 &zero_address_frag, FUNC_LT_DTP_MODULE);
7308
7309 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
7310 symbol_new (".<ltoff.dptrel>", undefined_section,
7311 &zero_address_frag, FUNC_LT_DTP_RELATIVE);
7312
7313 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
7314 symbol_new (".<ltoff.tprel>", undefined_section,
7315 &zero_address_frag, FUNC_LT_TP_RELATIVE);
7316
7317 pseudo_func[FUNC_IPLT_RELOC].u.sym =
7318 symbol_new (".<iplt>", undefined_section,
7319 &zero_address_frag, FUNC_IPLT_RELOC);
7320
7321 #ifdef TE_VMS
7322 pseudo_func[FUNC_SLOTCOUNT_RELOC].u.sym =
7323 symbol_new (".<slotcount>", undefined_section,
7324 &zero_address_frag, FUNC_SLOTCOUNT_RELOC);
7325 #endif
7326
7327 if (md.tune != itanium1)
7328 {
7329 /* Convert MFI NOPs bundles into MMI NOPs bundles. */
7330 le_nop[0] = 0x8;
7331 le_nop_stop[0] = 0x9;
7332 }
7333
7334 /* Compute the table of best templates. We compute goodness as a
7335 base 4 value, in which each match counts for 3. Match-failures
7336 result in NOPs and we use extra_goodness() to pick the execution
7337 units that are best suited for issuing the NOP. */
7338 for (i = 0; i < IA64_NUM_TYPES; ++i)
7339 for (j = 0; j < IA64_NUM_TYPES; ++j)
7340 for (k = 0; k < IA64_NUM_TYPES; ++k)
7341 {
7342 best = 0;
7343 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
7344 {
7345 goodness = 0;
7346 if (match (t, i, 0))
7347 {
7348 if (match (t, j, 1))
7349 {
7350 if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2))
7351 goodness = 3 + 3 + 3;
7352 else
7353 goodness = 3 + 3 + extra_goodness (t, 2);
7354 }
7355 else if (match (t, j, 2))
7356 goodness = 3 + 3 + extra_goodness (t, 1);
7357 else
7358 {
7359 goodness = 3;
7360 goodness += extra_goodness (t, 1);
7361 goodness += extra_goodness (t, 2);
7362 }
7363 }
7364 else if (match (t, i, 1))
7365 {
7366 if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2))
7367 goodness = 3 + 3;
7368 else
7369 goodness = 3 + extra_goodness (t, 2);
7370 }
7371 else if (match (t, i, 2))
7372 goodness = 3 + extra_goodness (t, 1);
7373
7374 if (goodness > best)
7375 {
7376 best = goodness;
7377 best_template[i][j][k] = t;
7378 }
7379 }
7380 }
7381
7382 #ifdef DEBUG_TEMPLATES
7383 /* For debugging changes to the best_template calculations. We don't care
7384 about combinations with invalid instructions, so start the loops at 1. */
7385 for (i = 0; i < IA64_NUM_TYPES; ++i)
7386 for (j = 0; j < IA64_NUM_TYPES; ++j)
7387 for (k = 0; k < IA64_NUM_TYPES; ++k)
7388 {
7389 char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f',
7390 'x', 'd' };
7391 fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j],
7392 type_letter[k],
7393 ia64_templ_desc[best_template[i][j][k]].name);
7394 }
7395 #endif
7396
7397 for (i = 0; i < NUM_SLOTS; ++i)
7398 md.slot[i].user_template = -1;
7399
7400 md.pseudo_hash = str_htab_create ();
7401 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
7402 if (str_hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
7403 pseudo_opcode + i, 0) != NULL)
7404 as_fatal (_("duplicate %s"), pseudo_opcode[i].name);
7405
7406 md.reg_hash = str_htab_create ();
7407 md.dynreg_hash = str_htab_create ();
7408 md.const_hash = str_htab_create ();
7409 md.entry_hash = str_htab_create ();
7410
7411 /* general registers: */
7412 declare_register_set ("r", 128, REG_GR);
7413 declare_register ("gp", REG_GR + 1);
7414 declare_register ("sp", REG_GR + 12);
7415 declare_register ("tp", REG_GR + 13);
7416 declare_register_set ("ret", 4, REG_GR + 8);
7417
7418 /* floating point registers: */
7419 declare_register_set ("f", 128, REG_FR);
7420 declare_register_set ("farg", 8, REG_FR + 8);
7421 declare_register_set ("fret", 8, REG_FR + 8);
7422
7423 /* branch registers: */
7424 declare_register_set ("b", 8, REG_BR);
7425 declare_register ("rp", REG_BR + 0);
7426
7427 /* predicate registers: */
7428 declare_register_set ("p", 64, REG_P);
7429 declare_register ("pr", REG_PR);
7430 declare_register ("pr.rot", REG_PR_ROT);
7431
7432 /* application registers: */
7433 declare_register_set ("ar", 128, REG_AR);
7434 for (i = 0; i < NELEMS (ar); ++i)
7435 declare_register (ar[i].name, REG_AR + ar[i].regnum);
7436
7437 /* control registers: */
7438 declare_register_set ("cr", 128, REG_CR);
7439 for (i = 0; i < NELEMS (cr); ++i)
7440 declare_register (cr[i].name, REG_CR + cr[i].regnum);
7441
7442 /* dahr registers: */
7443 declare_register_set ("dahr", 8, REG_DAHR);
7444
7445 declare_register ("ip", REG_IP);
7446 declare_register ("cfm", REG_CFM);
7447 declare_register ("psr", REG_PSR);
7448 declare_register ("psr.l", REG_PSR_L);
7449 declare_register ("psr.um", REG_PSR_UM);
7450
7451 for (i = 0; i < NELEMS (indirect_reg); ++i)
7452 {
7453 unsigned int regnum = indirect_reg[i].regnum;
7454
7455 md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum);
7456 }
7457
7458 /* pseudo-registers used to specify unwind info: */
7459 declare_register ("psp", REG_PSP);
7460
7461 for (i = 0; i < NELEMS (const_bits); ++i)
7462 if (str_hash_insert (md.const_hash, const_bits[i].name, const_bits + i, 0))
7463 as_fatal (_("duplicate %s"), const_bits[i].name);
7464
7465 /* Set the architecture and machine depending on defaults and command line
7466 options. */
7467 if (md.flags & EF_IA_64_ABI64)
7468 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7469 else
7470 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7471
7472 if (! ok)
7473 as_warn (_("Could not set architecture and machine"));
7474
7475 /* Set the pointer size and pointer shift size depending on md.flags */
7476
7477 if (md.flags & EF_IA_64_ABI64)
7478 {
7479 md.pointer_size = 8; /* pointers are 8 bytes */
7480 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7481 }
7482 else
7483 {
7484 md.pointer_size = 4; /* pointers are 4 bytes */
7485 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7486 }
7487
7488 md.mem_offset.hint = 0;
7489 md.path = 0;
7490 md.maxpaths = 0;
7491 md.entry_labels = NULL;
7492 }
7493
7494 /* Set the default options in md. Cannot do this in md_begin because
7495 that is called after md_parse_option which is where we set the
7496 options in md based on command line options. */
7497
7498 void
7499 ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED)
7500 {
7501 md.flags = MD_FLAGS_DEFAULT;
7502 #ifndef TE_VMS
7503 /* Don't turn on dependency checking for VMS, doesn't work. */
7504 md.detect_dv = 1;
7505 #endif
7506 /* FIXME: We should change it to unwind_check_error someday. */
7507 md.unwind_check = unwind_check_warning;
7508 md.hint_b = hint_b_error;
7509 md.tune = itanium2;
7510 }
7511
7512 /* Return a string for the target object file format. */
7513
7514 const char *
7515 ia64_target_format (void)
7516 {
7517 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7518 {
7519 if (md.flags & EF_IA_64_BE)
7520 {
7521 if (md.flags & EF_IA_64_ABI64)
7522 #if defined(TE_AIX50)
7523 return "elf64-ia64-aix-big";
7524 #elif defined(TE_HPUX)
7525 return "elf64-ia64-hpux-big";
7526 #else
7527 return "elf64-ia64-big";
7528 #endif
7529 else
7530 #if defined(TE_AIX50)
7531 return "elf32-ia64-aix-big";
7532 #elif defined(TE_HPUX)
7533 return "elf32-ia64-hpux-big";
7534 #else
7535 return "elf32-ia64-big";
7536 #endif
7537 }
7538 else
7539 {
7540 if (md.flags & EF_IA_64_ABI64)
7541 #if defined (TE_AIX50)
7542 return "elf64-ia64-aix-little";
7543 #elif defined (TE_VMS)
7544 {
7545 md.flags |= EF_IA_64_ARCHVER_1;
7546 return "elf64-ia64-vms";
7547 }
7548 #else
7549 return "elf64-ia64-little";
7550 #endif
7551 else
7552 #ifdef TE_AIX50
7553 return "elf32-ia64-aix-little";
7554 #else
7555 return "elf32-ia64-little";
7556 #endif
7557 }
7558 }
7559 else
7560 return "unknown-format";
7561 }
7562
7563 void
7564 ia64_md_finish (void)
7565 {
7566 /* terminate insn group upon reaching end of file: */
7567 insn_group_break (1, 0, 0);
7568
7569 /* emits slots we haven't written yet: */
7570 ia64_flush_insns ();
7571
7572 bfd_set_private_flags (stdoutput, md.flags);
7573
7574 md.mem_offset.hint = 0;
7575 }
7576
7577 void
7578 ia64_start_line (void)
7579 {
7580 static int first;
7581
7582 if (!first) {
7583 /* Make sure we don't reference input_line_pointer[-1] when that's
7584 not valid. */
7585 first = 1;
7586 return;
7587 }
7588
7589 if (md.qp.X_op == O_register)
7590 as_bad (_("qualifying predicate not followed by instruction"));
7591 md.qp.X_op = O_absent;
7592
7593 if (ignore_input ())
7594 return;
7595
7596 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7597 {
7598 if (md.detect_dv && !md.explicit_mode)
7599 {
7600 static int warned;
7601
7602 if (!warned)
7603 {
7604 warned = 1;
7605 as_warn (_("Explicit stops are ignored in auto mode"));
7606 }
7607 }
7608 else
7609 insn_group_break (1, 0, 0);
7610 }
7611 else if (input_line_pointer[-1] == '{')
7612 {
7613 if (md.manual_bundling)
7614 as_warn (_("Found '{' when manual bundling is already turned on"));
7615 else
7616 CURR_SLOT.manual_bundling_on = 1;
7617 md.manual_bundling = 1;
7618
7619 /* Bundling is only acceptable in explicit mode
7620 or when in default automatic mode. */
7621 if (md.detect_dv && !md.explicit_mode)
7622 {
7623 if (!md.mode_explicitly_set
7624 && !md.default_explicit_mode)
7625 dot_dv_mode ('E');
7626 else
7627 as_warn (_("Found '{' after explicit switch to automatic mode"));
7628 }
7629 }
7630 else if (input_line_pointer[-1] == '}')
7631 {
7632 if (!md.manual_bundling)
7633 as_warn (_("Found '}' when manual bundling is off"));
7634 else
7635 PREV_SLOT.manual_bundling_off = 1;
7636 md.manual_bundling = 0;
7637
7638 /* switch back to automatic mode, if applicable */
7639 if (md.detect_dv
7640 && md.explicit_mode
7641 && !md.mode_explicitly_set
7642 && !md.default_explicit_mode)
7643 dot_dv_mode ('A');
7644 }
7645 }
7646
7647 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7648 labels. */
7649 static int defining_tag = 0;
7650
7651 int
7652 ia64_unrecognized_line (int ch)
7653 {
7654 switch (ch)
7655 {
7656 case '(':
7657 expression_and_evaluate (&md.qp);
7658 if (*input_line_pointer++ != ')')
7659 {
7660 as_bad (_("Expected ')'"));
7661 return 0;
7662 }
7663 if (md.qp.X_op != O_register)
7664 {
7665 as_bad (_("Qualifying predicate expected"));
7666 return 0;
7667 }
7668 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7669 {
7670 as_bad (_("Predicate register expected"));
7671 return 0;
7672 }
7673 return 1;
7674
7675 case '[':
7676 {
7677 char *s;
7678 char c;
7679 symbolS *tag;
7680 int temp;
7681
7682 if (md.qp.X_op == O_register)
7683 {
7684 as_bad (_("Tag must come before qualifying predicate."));
7685 return 0;
7686 }
7687
7688 /* This implements just enough of read_a_source_file in read.c to
7689 recognize labels. */
7690 if (is_name_beginner (*input_line_pointer))
7691 {
7692 c = get_symbol_name (&s);
7693 }
7694 else if (LOCAL_LABELS_FB
7695 && ISDIGIT (*input_line_pointer))
7696 {
7697 temp = 0;
7698 while (ISDIGIT (*input_line_pointer))
7699 temp = (temp * 10) + *input_line_pointer++ - '0';
7700 fb_label_instance_inc (temp);
7701 s = fb_label_name (temp, 0);
7702 c = *input_line_pointer;
7703 }
7704 else
7705 {
7706 s = NULL;
7707 c = '\0';
7708 }
7709 if (c != ':')
7710 {
7711 /* Put ':' back for error messages' sake. */
7712 *input_line_pointer++ = ':';
7713 as_bad (_("Expected ':'"));
7714 return 0;
7715 }
7716
7717 defining_tag = 1;
7718 tag = colon (s);
7719 defining_tag = 0;
7720 /* Put ':' back for error messages' sake. */
7721 *input_line_pointer++ = ':';
7722 if (*input_line_pointer++ != ']')
7723 {
7724 as_bad (_("Expected ']'"));
7725 return 0;
7726 }
7727 if (! tag)
7728 {
7729 as_bad (_("Tag name expected"));
7730 return 0;
7731 }
7732 return 1;
7733 }
7734
7735 default:
7736 break;
7737 }
7738
7739 /* Not a valid line. */
7740 return 0;
7741 }
7742
7743 void
7744 ia64_frob_label (struct symbol *sym)
7745 {
7746 struct label_fix *fix;
7747
7748 /* Tags need special handling since they are not bundle breaks like
7749 labels. */
7750 if (defining_tag)
7751 {
7752 fix = XOBNEW (&notes, struct label_fix);
7753 fix->sym = sym;
7754 fix->next = CURR_SLOT.tag_fixups;
7755 fix->dw2_mark_labels = false;
7756 CURR_SLOT.tag_fixups = fix;
7757
7758 return;
7759 }
7760
7761 if (bfd_section_flags (now_seg) & SEC_CODE)
7762 {
7763 md.last_text_seg = now_seg;
7764 md.last_text_subseg = now_subseg;
7765 fix = XOBNEW (&notes, struct label_fix);
7766 fix->sym = sym;
7767 fix->next = CURR_SLOT.label_fixups;
7768 fix->dw2_mark_labels = dwarf2_loc_mark_labels;
7769 CURR_SLOT.label_fixups = fix;
7770
7771 /* Keep track of how many code entry points we've seen. */
7772 if (md.path == md.maxpaths)
7773 {
7774 md.maxpaths += 20;
7775 md.entry_labels = XRESIZEVEC (const char *, md.entry_labels,
7776 md.maxpaths);
7777 }
7778 md.entry_labels[md.path++] = S_GET_NAME (sym);
7779 }
7780 }
7781
7782 #ifdef TE_HPUX
7783 /* The HP-UX linker will give unresolved symbol errors for symbols
7784 that are declared but unused. This routine removes declared,
7785 unused symbols from an object. */
7786 int
7787 ia64_frob_symbol (struct symbol *sym)
7788 {
7789 if ((S_GET_SEGMENT (sym) == bfd_und_section_ptr && ! symbol_used_p (sym) &&
7790 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7791 || (S_GET_SEGMENT (sym) == bfd_abs_section_ptr
7792 && ! S_IS_EXTERNAL (sym)))
7793 return 1;
7794 return 0;
7795 }
7796 #endif
7797
7798 void
7799 ia64_flush_pending_output (void)
7800 {
7801 if (!md.keep_pending_output
7802 && bfd_section_flags (now_seg) & SEC_CODE)
7803 {
7804 /* ??? This causes many unnecessary stop bits to be emitted.
7805 Unfortunately, it isn't clear if it is safe to remove this. */
7806 insn_group_break (1, 0, 0);
7807 ia64_flush_insns ();
7808 }
7809 }
7810
7811 /* Do ia64-specific expression optimization. All that's done here is
7812 to transform index expressions that are either due to the indexing
7813 of rotating registers or due to the indexing of indirect register
7814 sets. */
7815 int
7816 ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r)
7817 {
7818 if (op != O_index)
7819 return 0;
7820 resolve_expression (l);
7821 if (l->X_op == O_register)
7822 {
7823 unsigned num_regs = l->X_add_number >> 16;
7824
7825 resolve_expression (r);
7826 if (num_regs)
7827 {
7828 /* Left side is a .rotX-allocated register. */
7829 if (r->X_op != O_constant)
7830 {
7831 as_bad (_("Rotating register index must be a non-negative constant"));
7832 r->X_add_number = 0;
7833 }
7834 else if ((valueT) r->X_add_number >= num_regs)
7835 {
7836 as_bad (_("Index out of range 0..%u"), num_regs - 1);
7837 r->X_add_number = 0;
7838 }
7839 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7840 return 1;
7841 }
7842 else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR)
7843 {
7844 if (r->X_op != O_register
7845 || r->X_add_number < REG_GR
7846 || r->X_add_number > REG_GR + 127)
7847 {
7848 as_bad (_("Indirect register index must be a general register"));
7849 r->X_add_number = REG_GR;
7850 }
7851 l->X_op = O_index;
7852 l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID];
7853 l->X_add_number = r->X_add_number;
7854 return 1;
7855 }
7856 }
7857 as_bad (_("Index can only be applied to rotating or indirect registers"));
7858 /* Fall back to some register use of which has as little as possible
7859 side effects, to minimize subsequent error messages. */
7860 l->X_op = O_register;
7861 l->X_add_number = REG_GR + 3;
7862 return 1;
7863 }
7864
7865 int
7866 ia64_parse_name (char *name, expressionS *e, char *nextcharP)
7867 {
7868 struct const_desc *cdesc;
7869 struct dynreg *dr = 0;
7870 unsigned int idx;
7871 struct symbol *sym;
7872 char *end;
7873
7874 if (*name == '@')
7875 {
7876 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE;
7877
7878 /* Find what relocation pseudo-function we're dealing with. */
7879 for (idx = 0; idx < NELEMS (pseudo_func); ++idx)
7880 if (pseudo_func[idx].name
7881 && pseudo_func[idx].name[0] == name[1]
7882 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0)
7883 {
7884 pseudo_type = pseudo_func[idx].type;
7885 break;
7886 }
7887 switch (pseudo_type)
7888 {
7889 case PSEUDO_FUNC_RELOC:
7890 end = input_line_pointer;
7891 if (*nextcharP != '(')
7892 {
7893 as_bad (_("Expected '('"));
7894 break;
7895 }
7896 /* Skip '('. */
7897 ++input_line_pointer;
7898 expression (e);
7899 if (*input_line_pointer != ')')
7900 {
7901 as_bad (_("Missing ')'"));
7902 goto done;
7903 }
7904 /* Skip ')'. */
7905 ++input_line_pointer;
7906 #ifdef TE_VMS
7907 if (idx == FUNC_SLOTCOUNT_RELOC)
7908 {
7909 /* @slotcount can accept any expression. Canonicalize. */
7910 e->X_add_symbol = make_expr_symbol (e);
7911 e->X_op = O_symbol;
7912 e->X_add_number = 0;
7913 }
7914 #endif
7915 if (e->X_op != O_symbol)
7916 {
7917 if (e->X_op != O_pseudo_fixup)
7918 {
7919 as_bad (_("Not a symbolic expression"));
7920 goto done;
7921 }
7922 if (idx != FUNC_LT_RELATIVE)
7923 {
7924 as_bad (_("Illegal combination of relocation functions"));
7925 goto done;
7926 }
7927 switch (S_GET_VALUE (e->X_op_symbol))
7928 {
7929 case FUNC_FPTR_RELATIVE:
7930 idx = FUNC_LT_FPTR_RELATIVE; break;
7931 case FUNC_DTP_MODULE:
7932 idx = FUNC_LT_DTP_MODULE; break;
7933 case FUNC_DTP_RELATIVE:
7934 idx = FUNC_LT_DTP_RELATIVE; break;
7935 case FUNC_TP_RELATIVE:
7936 idx = FUNC_LT_TP_RELATIVE; break;
7937 default:
7938 as_bad (_("Illegal combination of relocation functions"));
7939 goto done;
7940 }
7941 }
7942 /* Make sure gas doesn't get rid of local symbols that are used
7943 in relocs. */
7944 e->X_op = O_pseudo_fixup;
7945 e->X_op_symbol = pseudo_func[idx].u.sym;
7946 done:
7947 *nextcharP = *input_line_pointer;
7948 break;
7949
7950 case PSEUDO_FUNC_CONST:
7951 e->X_op = O_constant;
7952 e->X_add_number = pseudo_func[idx].u.ival;
7953 break;
7954
7955 case PSEUDO_FUNC_REG:
7956 e->X_op = O_register;
7957 e->X_add_number = pseudo_func[idx].u.ival;
7958 break;
7959
7960 default:
7961 return 0;
7962 }
7963 return 1;
7964 }
7965
7966 /* first see if NAME is a known register name: */
7967 sym = str_hash_find (md.reg_hash, name);
7968 if (sym)
7969 {
7970 e->X_op = O_register;
7971 e->X_add_number = S_GET_VALUE (sym);
7972 return 1;
7973 }
7974
7975 cdesc = str_hash_find (md.const_hash, name);
7976 if (cdesc)
7977 {
7978 e->X_op = O_constant;
7979 e->X_add_number = cdesc->value;
7980 return 1;
7981 }
7982
7983 /* check for inN, locN, or outN: */
7984 idx = 0;
7985 switch (name[0])
7986 {
7987 case 'i':
7988 if (name[1] == 'n' && ISDIGIT (name[2]))
7989 {
7990 dr = &md.in;
7991 idx = 2;
7992 }
7993 break;
7994
7995 case 'l':
7996 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
7997 {
7998 dr = &md.loc;
7999 idx = 3;
8000 }
8001 break;
8002
8003 case 'o':
8004 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
8005 {
8006 dr = &md.out;
8007 idx = 3;
8008 }
8009 break;
8010
8011 default:
8012 break;
8013 }
8014
8015 /* Ignore register numbers with leading zeroes, except zero itself. */
8016 if (dr && (name[idx] != '0' || name[idx + 1] == '\0'))
8017 {
8018 unsigned long regnum;
8019
8020 /* The name is inN, locN, or outN; parse the register number. */
8021 regnum = strtoul (name + idx, &end, 10);
8022 if (end > name + idx && *end == '\0' && regnum < 96)
8023 {
8024 if (regnum >= dr->num_regs)
8025 {
8026 if (!dr->num_regs)
8027 as_bad (_("No current frame"));
8028 else
8029 as_bad (_("Register number out of range 0..%u"),
8030 dr->num_regs - 1);
8031 regnum = 0;
8032 }
8033 e->X_op = O_register;
8034 e->X_add_number = dr->base + regnum;
8035 return 1;
8036 }
8037 }
8038
8039 end = xstrdup (name);
8040 name = ia64_canonicalize_symbol_name (end);
8041 if ((dr = str_hash_find (md.dynreg_hash, name)))
8042 {
8043 /* We've got ourselves the name of a rotating register set.
8044 Store the base register number in the low 16 bits of
8045 X_add_number and the size of the register set in the top 16
8046 bits. */
8047 e->X_op = O_register;
8048 e->X_add_number = dr->base | (dr->num_regs << 16);
8049 free (end);
8050 return 1;
8051 }
8052 free (end);
8053 return 0;
8054 }
8055
8056 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
8057
8058 char *
8059 ia64_canonicalize_symbol_name (char *name)
8060 {
8061 size_t len = strlen (name), full = len;
8062
8063 while (len > 0 && name[len - 1] == '#')
8064 --len;
8065 if (len <= 0)
8066 {
8067 if (full > 0)
8068 as_bad (_("Standalone `#' is illegal"));
8069 }
8070 else if (len < full - 1)
8071 as_warn (_("Redundant `#' suffix operators"));
8072 name[len] = '\0';
8073 return name;
8074 }
8075
8076 /* Return true if idesc is a conditional branch instruction. This excludes
8077 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
8078 because they always read/write resources regardless of the value of the
8079 qualifying predicate. br.ia must always use p0, and hence is always
8080 taken. Thus this function returns true for branches which can fall
8081 through, and which use no resources if they do fall through. */
8082
8083 static int
8084 is_conditional_branch (struct ia64_opcode *idesc)
8085 {
8086 /* br is a conditional branch. Everything that starts with br. except
8087 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
8088 Everything that starts with brl is a conditional branch. */
8089 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
8090 && (idesc->name[2] == '\0'
8091 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
8092 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
8093 || idesc->name[2] == 'l'
8094 /* br.cond, br.call, br.clr */
8095 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
8096 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
8097 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
8098 }
8099
8100 /* Return whether the given opcode is a taken branch. If there's any doubt,
8101 returns zero. */
8102
8103 static int
8104 is_taken_branch (struct ia64_opcode *idesc)
8105 {
8106 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
8107 || startswith (idesc->name, "br.ia"));
8108 }
8109
8110 /* Return whether the given opcode is an interruption or rfi. If there's any
8111 doubt, returns zero. */
8112
8113 static int
8114 is_interruption_or_rfi (struct ia64_opcode *idesc)
8115 {
8116 if (strcmp (idesc->name, "rfi") == 0)
8117 return 1;
8118 return 0;
8119 }
8120
8121 /* Returns the index of the given dependency in the opcode's list of chks, or
8122 -1 if there is no dependency. */
8123
8124 static int
8125 depends_on (int depind, struct ia64_opcode *idesc)
8126 {
8127 int i;
8128 const struct ia64_opcode_dependency *dep = idesc->dependencies;
8129 for (i = 0; i < dep->nchks; i++)
8130 {
8131 if (depind == DEP (dep->chks[i]))
8132 return i;
8133 }
8134 return -1;
8135 }
8136
8137 /* Determine a set of specific resources used for a particular resource
8138 class. Returns the number of specific resources identified For those
8139 cases which are not determinable statically, the resource returned is
8140 marked nonspecific.
8141
8142 Meanings of value in 'NOTE':
8143 1) only read/write when the register number is explicitly encoded in the
8144 insn.
8145 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
8146 accesses CFM when qualifying predicate is in the rotating region.
8147 3) general register value is used to specify an indirect register; not
8148 determinable statically.
8149 4) only read the given resource when bits 7:0 of the indirect index
8150 register value does not match the register number of the resource; not
8151 determinable statically.
8152 5) all rules are implementation specific.
8153 6) only when both the index specified by the reader and the index specified
8154 by the writer have the same value in bits 63:61; not determinable
8155 statically.
8156 7) only access the specified resource when the corresponding mask bit is
8157 set
8158 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
8159 only read when these insns reference FR2-31
8160 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
8161 written when these insns write FR32-127
8162 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
8163 instruction
8164 11) The target predicates are written independently of PR[qp], but source
8165 registers are only read if PR[qp] is true. Since the state of PR[qp]
8166 cannot statically be determined, all source registers are marked used.
8167 12) This insn only reads the specified predicate register when that
8168 register is the PR[qp].
8169 13) This reference to ld-c only applies to the GR whose value is loaded
8170 with data returned from memory, not the post-incremented address register.
8171 14) The RSE resource includes the implementation-specific RSE internal
8172 state resources. At least one (and possibly more) of these resources are
8173 read by each instruction listed in IC:rse-readers. At least one (and
8174 possibly more) of these resources are written by each insn listed in
8175 IC:rse-writers.
8176 15+16) Represents reserved instructions, which the assembler does not
8177 generate.
8178 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and
8179 mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up.
8180
8181 Memory resources (i.e. locations in memory) are *not* marked or tracked by
8182 this code; there are no dependency violations based on memory access.
8183 */
8184
8185 #define MAX_SPECS 256
8186 #define DV_CHK 1
8187 #define DV_REG 0
8188
8189 static int
8190 specify_resource (const struct ia64_dependency *dep,
8191 struct ia64_opcode *idesc,
8192 /* is this a DV chk or a DV reg? */
8193 int type,
8194 /* returned specific resources */
8195 struct rsrc specs[MAX_SPECS],
8196 /* resource note for this insn's usage */
8197 int note,
8198 /* which execution path to examine */
8199 int path)
8200 {
8201 int count = 0;
8202 int i;
8203 int rsrc_write = 0;
8204 struct rsrc tmpl;
8205
8206 if (dep->mode == IA64_DV_WAW
8207 || (dep->mode == IA64_DV_RAW && type == DV_REG)
8208 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
8209 rsrc_write = 1;
8210
8211 /* template for any resources we identify */
8212 tmpl.dependency = dep;
8213 tmpl.note = note;
8214 tmpl.insn_srlz = tmpl.data_srlz = 0;
8215 tmpl.qp_regno = CURR_SLOT.qp_regno;
8216 tmpl.link_to_qp_branch = 1;
8217 tmpl.mem_offset.hint = 0;
8218 tmpl.mem_offset.offset = 0;
8219 tmpl.mem_offset.base = 0;
8220 tmpl.specific = 1;
8221 tmpl.index = -1;
8222 tmpl.cmp_type = CMP_NONE;
8223 tmpl.depind = 0;
8224 tmpl.file = NULL;
8225 tmpl.line = 0;
8226 tmpl.path = 0;
8227
8228 #define UNHANDLED \
8229 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
8230 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
8231 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
8232
8233 /* we don't need to track these */
8234 if (dep->semantics == IA64_DVS_NONE)
8235 return 0;
8236
8237 switch (dep->specifier)
8238 {
8239 case IA64_RS_AR_K:
8240 if (note == 1)
8241 {
8242 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8243 {
8244 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8245 if (regno >= 0 && regno <= 7)
8246 {
8247 specs[count] = tmpl;
8248 specs[count++].index = regno;
8249 }
8250 }
8251 }
8252 else if (note == 0)
8253 {
8254 for (i = 0; i < 8; i++)
8255 {
8256 specs[count] = tmpl;
8257 specs[count++].index = i;
8258 }
8259 }
8260 else
8261 {
8262 UNHANDLED;
8263 }
8264 break;
8265
8266 case IA64_RS_AR_UNAT:
8267 /* This is a mov =AR or mov AR= instruction. */
8268 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8269 {
8270 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8271 if (regno == AR_UNAT)
8272 {
8273 specs[count++] = tmpl;
8274 }
8275 }
8276 else
8277 {
8278 /* This is a spill/fill, or other instruction that modifies the
8279 unat register. */
8280
8281 /* Unless we can determine the specific bits used, mark the whole
8282 thing; bits 8:3 of the memory address indicate the bit used in
8283 UNAT. The .mem.offset hint may be used to eliminate a small
8284 subset of conflicts. */
8285 specs[count] = tmpl;
8286 if (md.mem_offset.hint)
8287 {
8288 if (md.debug_dv)
8289 fprintf (stderr, " Using hint for spill/fill\n");
8290 /* The index isn't actually used, just set it to something
8291 approximating the bit index. */
8292 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
8293 specs[count].mem_offset.hint = 1;
8294 specs[count].mem_offset.offset = md.mem_offset.offset;
8295 specs[count++].mem_offset.base = md.mem_offset.base;
8296 }
8297 else
8298 {
8299 specs[count++].specific = 0;
8300 }
8301 }
8302 break;
8303
8304 case IA64_RS_AR:
8305 if (note == 1)
8306 {
8307 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8308 {
8309 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8310 if ((regno >= 8 && regno <= 15)
8311 || (regno >= 20 && regno <= 23)
8312 || (regno >= 31 && regno <= 39)
8313 || (regno >= 41 && regno <= 47)
8314 || (regno >= 67 && regno <= 111))
8315 {
8316 specs[count] = tmpl;
8317 specs[count++].index = regno;
8318 }
8319 }
8320 }
8321 else
8322 {
8323 UNHANDLED;
8324 }
8325 break;
8326
8327 case IA64_RS_ARb:
8328 if (note == 1)
8329 {
8330 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8331 {
8332 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8333 if ((regno >= 48 && regno <= 63)
8334 || (regno >= 112 && regno <= 127))
8335 {
8336 specs[count] = tmpl;
8337 specs[count++].index = regno;
8338 }
8339 }
8340 }
8341 else if (note == 0)
8342 {
8343 for (i = 48; i < 64; i++)
8344 {
8345 specs[count] = tmpl;
8346 specs[count++].index = i;
8347 }
8348 for (i = 112; i < 128; i++)
8349 {
8350 specs[count] = tmpl;
8351 specs[count++].index = i;
8352 }
8353 }
8354 else
8355 {
8356 UNHANDLED;
8357 }
8358 break;
8359
8360 case IA64_RS_BR:
8361 if (note != 1)
8362 {
8363 UNHANDLED;
8364 }
8365 else
8366 {
8367 if (rsrc_write)
8368 {
8369 for (i = 0; i < idesc->num_outputs; i++)
8370 if (idesc->operands[i] == IA64_OPND_B1
8371 || idesc->operands[i] == IA64_OPND_B2)
8372 {
8373 specs[count] = tmpl;
8374 specs[count++].index =
8375 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8376 }
8377 }
8378 else
8379 {
8380 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8381 if (idesc->operands[i] == IA64_OPND_B1
8382 || idesc->operands[i] == IA64_OPND_B2)
8383 {
8384 specs[count] = tmpl;
8385 specs[count++].index =
8386 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8387 }
8388 }
8389 }
8390 break;
8391
8392 case IA64_RS_CPUID: /* four or more registers */
8393 if (note == 3)
8394 {
8395 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
8396 {
8397 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8398 if (regno >= 0 && regno < NELEMS (gr_values)
8399 && KNOWN (regno))
8400 {
8401 specs[count] = tmpl;
8402 specs[count++].index = gr_values[regno].value & 0xFF;
8403 }
8404 else
8405 {
8406 specs[count] = tmpl;
8407 specs[count++].specific = 0;
8408 }
8409 }
8410 }
8411 else
8412 {
8413 UNHANDLED;
8414 }
8415 break;
8416
8417 case IA64_RS_DBR: /* four or more registers */
8418 if (note == 3)
8419 {
8420 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
8421 {
8422 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8423 if (regno >= 0 && regno < NELEMS (gr_values)
8424 && KNOWN (regno))
8425 {
8426 specs[count] = tmpl;
8427 specs[count++].index = gr_values[regno].value & 0xFF;
8428 }
8429 else
8430 {
8431 specs[count] = tmpl;
8432 specs[count++].specific = 0;
8433 }
8434 }
8435 }
8436 else if (note == 0 && !rsrc_write)
8437 {
8438 specs[count] = tmpl;
8439 specs[count++].specific = 0;
8440 }
8441 else
8442 {
8443 UNHANDLED;
8444 }
8445 break;
8446
8447 case IA64_RS_IBR: /* four or more registers */
8448 if (note == 3)
8449 {
8450 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
8451 {
8452 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8453 if (regno >= 0 && regno < NELEMS (gr_values)
8454 && KNOWN (regno))
8455 {
8456 specs[count] = tmpl;
8457 specs[count++].index = gr_values[regno].value & 0xFF;
8458 }
8459 else
8460 {
8461 specs[count] = tmpl;
8462 specs[count++].specific = 0;
8463 }
8464 }
8465 }
8466 else
8467 {
8468 UNHANDLED;
8469 }
8470 break;
8471
8472 case IA64_RS_MSR:
8473 if (note == 5)
8474 {
8475 /* These are implementation specific. Force all references to
8476 conflict with all other references. */
8477 specs[count] = tmpl;
8478 specs[count++].specific = 0;
8479 }
8480 else
8481 {
8482 UNHANDLED;
8483 }
8484 break;
8485
8486 case IA64_RS_PKR: /* 16 or more registers */
8487 if (note == 3 || note == 4)
8488 {
8489 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
8490 {
8491 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8492 if (regno >= 0 && regno < NELEMS (gr_values)
8493 && KNOWN (regno))
8494 {
8495 if (note == 3)
8496 {
8497 specs[count] = tmpl;
8498 specs[count++].index = gr_values[regno].value & 0xFF;
8499 }
8500 else
8501 for (i = 0; i < NELEMS (gr_values); i++)
8502 {
8503 /* Uses all registers *except* the one in R3. */
8504 if ((unsigned)i != (gr_values[regno].value & 0xFF))
8505 {
8506 specs[count] = tmpl;
8507 specs[count++].index = i;
8508 }
8509 }
8510 }
8511 else
8512 {
8513 specs[count] = tmpl;
8514 specs[count++].specific = 0;
8515 }
8516 }
8517 }
8518 else if (note == 0)
8519 {
8520 /* probe et al. */
8521 specs[count] = tmpl;
8522 specs[count++].specific = 0;
8523 }
8524 break;
8525
8526 case IA64_RS_PMC: /* four or more registers */
8527 if (note == 3)
8528 {
8529 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
8530 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
8531
8532 {
8533 int reg_index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
8534 ? 1 : !rsrc_write);
8535 int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_GR;
8536 if (regno >= 0 && regno < NELEMS (gr_values)
8537 && KNOWN (regno))
8538 {
8539 specs[count] = tmpl;
8540 specs[count++].index = gr_values[regno].value & 0xFF;
8541 }
8542 else
8543 {
8544 specs[count] = tmpl;
8545 specs[count++].specific = 0;
8546 }
8547 }
8548 }
8549 else
8550 {
8551 UNHANDLED;
8552 }
8553 break;
8554
8555 case IA64_RS_PMD: /* four or more registers */
8556 if (note == 3)
8557 {
8558 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
8559 {
8560 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8561 if (regno >= 0 && regno < NELEMS (gr_values)
8562 && KNOWN (regno))
8563 {
8564 specs[count] = tmpl;
8565 specs[count++].index = gr_values[regno].value & 0xFF;
8566 }
8567 else
8568 {
8569 specs[count] = tmpl;
8570 specs[count++].specific = 0;
8571 }
8572 }
8573 }
8574 else
8575 {
8576 UNHANDLED;
8577 }
8578 break;
8579
8580 case IA64_RS_RR: /* eight registers */
8581 if (note == 6)
8582 {
8583 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
8584 {
8585 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8586 if (regno >= 0 && regno < NELEMS (gr_values)
8587 && KNOWN (regno))
8588 {
8589 specs[count] = tmpl;
8590 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
8591 }
8592 else
8593 {
8594 specs[count] = tmpl;
8595 specs[count++].specific = 0;
8596 }
8597 }
8598 }
8599 else if (note == 0 && !rsrc_write)
8600 {
8601 specs[count] = tmpl;
8602 specs[count++].specific = 0;
8603 }
8604 else
8605 {
8606 UNHANDLED;
8607 }
8608 break;
8609
8610 case IA64_RS_CR_IRR:
8611 if (note == 0)
8612 {
8613 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8614 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8615 if (rsrc_write
8616 && idesc->operands[1] == IA64_OPND_CR3
8617 && regno == CR_IVR)
8618 {
8619 for (i = 0; i < 4; i++)
8620 {
8621 specs[count] = tmpl;
8622 specs[count++].index = CR_IRR0 + i;
8623 }
8624 }
8625 }
8626 else if (note == 1)
8627 {
8628 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8629 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8630 && regno >= CR_IRR0
8631 && regno <= CR_IRR3)
8632 {
8633 specs[count] = tmpl;
8634 specs[count++].index = regno;
8635 }
8636 }
8637 else
8638 {
8639 UNHANDLED;
8640 }
8641 break;
8642
8643 case IA64_RS_CR_IIB:
8644 if (note != 0)
8645 {
8646 UNHANDLED;
8647 }
8648 else
8649 {
8650 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8651 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8652 && (regno == CR_IIB0 || regno == CR_IIB1))
8653 {
8654 specs[count] = tmpl;
8655 specs[count++].index = regno;
8656 }
8657 }
8658 break;
8659
8660 case IA64_RS_CR_LRR:
8661 if (note != 1)
8662 {
8663 UNHANDLED;
8664 }
8665 else
8666 {
8667 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8668 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8669 && (regno == CR_LRR0 || regno == CR_LRR1))
8670 {
8671 specs[count] = tmpl;
8672 specs[count++].index = regno;
8673 }
8674 }
8675 break;
8676
8677 case IA64_RS_CR:
8678 if (note == 1)
8679 {
8680 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8681 {
8682 specs[count] = tmpl;
8683 specs[count++].index =
8684 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8685 }
8686 }
8687 else
8688 {
8689 UNHANDLED;
8690 }
8691 break;
8692
8693 case IA64_RS_DAHR:
8694 if (note == 0)
8695 {
8696 if (idesc->operands[!rsrc_write] == IA64_OPND_DAHR3)
8697 {
8698 specs[count] = tmpl;
8699 specs[count++].index =
8700 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_DAHR;
8701 }
8702 }
8703 else
8704 {
8705 UNHANDLED;
8706 }
8707 break;
8708
8709 case IA64_RS_FR:
8710 case IA64_RS_FRb:
8711 if (note != 1)
8712 {
8713 UNHANDLED;
8714 }
8715 else if (rsrc_write)
8716 {
8717 if (dep->specifier == IA64_RS_FRb
8718 && idesc->operands[0] == IA64_OPND_F1)
8719 {
8720 specs[count] = tmpl;
8721 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8722 }
8723 }
8724 else
8725 {
8726 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8727 {
8728 if (idesc->operands[i] == IA64_OPND_F2
8729 || idesc->operands[i] == IA64_OPND_F3
8730 || idesc->operands[i] == IA64_OPND_F4)
8731 {
8732 specs[count] = tmpl;
8733 specs[count++].index =
8734 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8735 }
8736 }
8737 }
8738 break;
8739
8740 case IA64_RS_GR:
8741 if (note == 13)
8742 {
8743 /* This reference applies only to the GR whose value is loaded with
8744 data returned from memory. */
8745 specs[count] = tmpl;
8746 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8747 }
8748 else if (note == 1)
8749 {
8750 if (rsrc_write)
8751 {
8752 for (i = 0; i < idesc->num_outputs; i++)
8753 if (idesc->operands[i] == IA64_OPND_R1
8754 || idesc->operands[i] == IA64_OPND_R2
8755 || idesc->operands[i] == IA64_OPND_R3)
8756 {
8757 specs[count] = tmpl;
8758 specs[count++].index =
8759 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8760 }
8761 if (idesc->flags & IA64_OPCODE_POSTINC)
8762 for (i = 0; i < NELEMS (idesc->operands); i++)
8763 if (idesc->operands[i] == IA64_OPND_MR3)
8764 {
8765 specs[count] = tmpl;
8766 specs[count++].index =
8767 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8768 }
8769 }
8770 else
8771 {
8772 /* Look for anything that reads a GR. */
8773 for (i = 0; i < NELEMS (idesc->operands); i++)
8774 {
8775 if (idesc->operands[i] == IA64_OPND_MR3
8776 || idesc->operands[i] == IA64_OPND_CPUID_R3
8777 || idesc->operands[i] == IA64_OPND_DBR_R3
8778 || idesc->operands[i] == IA64_OPND_IBR_R3
8779 || idesc->operands[i] == IA64_OPND_MSR_R3
8780 || idesc->operands[i] == IA64_OPND_PKR_R3
8781 || idesc->operands[i] == IA64_OPND_PMC_R3
8782 || idesc->operands[i] == IA64_OPND_PMD_R3
8783 || idesc->operands[i] == IA64_OPND_DAHR_R3
8784 || idesc->operands[i] == IA64_OPND_RR_R3
8785 || ((i >= idesc->num_outputs)
8786 && (idesc->operands[i] == IA64_OPND_R1
8787 || idesc->operands[i] == IA64_OPND_R2
8788 || idesc->operands[i] == IA64_OPND_R3
8789 /* addl source register. */
8790 || idesc->operands[i] == IA64_OPND_R3_2)))
8791 {
8792 specs[count] = tmpl;
8793 specs[count++].index =
8794 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8795 }
8796 }
8797 }
8798 }
8799 else
8800 {
8801 UNHANDLED;
8802 }
8803 break;
8804
8805 /* This is the same as IA64_RS_PRr, except that the register range is
8806 from 1 - 15, and there are no rotating register reads/writes here. */
8807 case IA64_RS_PR:
8808 if (note == 0)
8809 {
8810 for (i = 1; i < 16; i++)
8811 {
8812 specs[count] = tmpl;
8813 specs[count++].index = i;
8814 }
8815 }
8816 else if (note == 7)
8817 {
8818 valueT mask = 0;
8819 /* Mark only those registers indicated by the mask. */
8820 if (rsrc_write)
8821 {
8822 mask = CURR_SLOT.opnd[2].X_add_number;
8823 for (i = 1; i < 16; i++)
8824 if (mask & ((valueT) 1 << i))
8825 {
8826 specs[count] = tmpl;
8827 specs[count++].index = i;
8828 }
8829 }
8830 else
8831 {
8832 UNHANDLED;
8833 }
8834 }
8835 else if (note == 11) /* note 11 implies note 1 as well */
8836 {
8837 if (rsrc_write)
8838 {
8839 for (i = 0; i < idesc->num_outputs; i++)
8840 {
8841 if (idesc->operands[i] == IA64_OPND_P1
8842 || idesc->operands[i] == IA64_OPND_P2)
8843 {
8844 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8845 if (regno >= 1 && regno < 16)
8846 {
8847 specs[count] = tmpl;
8848 specs[count++].index = regno;
8849 }
8850 }
8851 }
8852 }
8853 else
8854 {
8855 UNHANDLED;
8856 }
8857 }
8858 else if (note == 12)
8859 {
8860 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8861 {
8862 specs[count] = tmpl;
8863 specs[count++].index = CURR_SLOT.qp_regno;
8864 }
8865 }
8866 else if (note == 1)
8867 {
8868 if (rsrc_write)
8869 {
8870 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8871 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8872 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8873 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8874
8875 if ((idesc->operands[0] == IA64_OPND_P1
8876 || idesc->operands[0] == IA64_OPND_P2)
8877 && p1 >= 1 && p1 < 16)
8878 {
8879 specs[count] = tmpl;
8880 specs[count].cmp_type =
8881 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8882 specs[count++].index = p1;
8883 }
8884 if ((idesc->operands[1] == IA64_OPND_P1
8885 || idesc->operands[1] == IA64_OPND_P2)
8886 && p2 >= 1 && p2 < 16)
8887 {
8888 specs[count] = tmpl;
8889 specs[count].cmp_type =
8890 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8891 specs[count++].index = p2;
8892 }
8893 }
8894 else
8895 {
8896 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8897 {
8898 specs[count] = tmpl;
8899 specs[count++].index = CURR_SLOT.qp_regno;
8900 }
8901 if (idesc->operands[1] == IA64_OPND_PR)
8902 {
8903 for (i = 1; i < 16; i++)
8904 {
8905 specs[count] = tmpl;
8906 specs[count++].index = i;
8907 }
8908 }
8909 }
8910 }
8911 else
8912 {
8913 UNHANDLED;
8914 }
8915 break;
8916
8917 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8918 simplified cases of this. */
8919 case IA64_RS_PRr:
8920 if (note == 0)
8921 {
8922 for (i = 16; i < 63; i++)
8923 {
8924 specs[count] = tmpl;
8925 specs[count++].index = i;
8926 }
8927 }
8928 else if (note == 7)
8929 {
8930 valueT mask = 0;
8931 /* Mark only those registers indicated by the mask. */
8932 if (rsrc_write
8933 && idesc->operands[0] == IA64_OPND_PR)
8934 {
8935 mask = CURR_SLOT.opnd[2].X_add_number;
8936 if (mask & ((valueT) 1 << 16))
8937 for (i = 16; i < 63; i++)
8938 {
8939 specs[count] = tmpl;
8940 specs[count++].index = i;
8941 }
8942 }
8943 else if (rsrc_write
8944 && idesc->operands[0] == IA64_OPND_PR_ROT)
8945 {
8946 for (i = 16; i < 63; i++)
8947 {
8948 specs[count] = tmpl;
8949 specs[count++].index = i;
8950 }
8951 }
8952 else
8953 {
8954 UNHANDLED;
8955 }
8956 }
8957 else if (note == 11) /* note 11 implies note 1 as well */
8958 {
8959 if (rsrc_write)
8960 {
8961 for (i = 0; i < idesc->num_outputs; i++)
8962 {
8963 if (idesc->operands[i] == IA64_OPND_P1
8964 || idesc->operands[i] == IA64_OPND_P2)
8965 {
8966 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8967 if (regno >= 16 && regno < 63)
8968 {
8969 specs[count] = tmpl;
8970 specs[count++].index = regno;
8971 }
8972 }
8973 }
8974 }
8975 else
8976 {
8977 UNHANDLED;
8978 }
8979 }
8980 else if (note == 12)
8981 {
8982 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8983 {
8984 specs[count] = tmpl;
8985 specs[count++].index = CURR_SLOT.qp_regno;
8986 }
8987 }
8988 else if (note == 1)
8989 {
8990 if (rsrc_write)
8991 {
8992 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8993 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8994 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8995 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8996
8997 if ((idesc->operands[0] == IA64_OPND_P1
8998 || idesc->operands[0] == IA64_OPND_P2)
8999 && p1 >= 16 && p1 < 63)
9000 {
9001 specs[count] = tmpl;
9002 specs[count].cmp_type =
9003 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9004 specs[count++].index = p1;
9005 }
9006 if ((idesc->operands[1] == IA64_OPND_P1
9007 || idesc->operands[1] == IA64_OPND_P2)
9008 && p2 >= 16 && p2 < 63)
9009 {
9010 specs[count] = tmpl;
9011 specs[count].cmp_type =
9012 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9013 specs[count++].index = p2;
9014 }
9015 }
9016 else
9017 {
9018 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9019 {
9020 specs[count] = tmpl;
9021 specs[count++].index = CURR_SLOT.qp_regno;
9022 }
9023 if (idesc->operands[1] == IA64_OPND_PR)
9024 {
9025 for (i = 16; i < 63; i++)
9026 {
9027 specs[count] = tmpl;
9028 specs[count++].index = i;
9029 }
9030 }
9031 }
9032 }
9033 else
9034 {
9035 UNHANDLED;
9036 }
9037 break;
9038
9039 case IA64_RS_PSR:
9040 /* Verify that the instruction is using the PSR bit indicated in
9041 dep->regindex. */
9042 if (note == 0)
9043 {
9044 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
9045 {
9046 if (dep->regindex < 6)
9047 {
9048 specs[count++] = tmpl;
9049 }
9050 }
9051 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
9052 {
9053 if (dep->regindex < 32
9054 || dep->regindex == 35
9055 || dep->regindex == 36
9056 || (!rsrc_write && dep->regindex == PSR_CPL))
9057 {
9058 specs[count++] = tmpl;
9059 }
9060 }
9061 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
9062 {
9063 if (dep->regindex < 32
9064 || dep->regindex == 35
9065 || dep->regindex == 36
9066 || (rsrc_write && dep->regindex == PSR_CPL))
9067 {
9068 specs[count++] = tmpl;
9069 }
9070 }
9071 else
9072 {
9073 /* Several PSR bits have very specific dependencies. */
9074 switch (dep->regindex)
9075 {
9076 default:
9077 specs[count++] = tmpl;
9078 break;
9079 case PSR_IC:
9080 if (rsrc_write)
9081 {
9082 specs[count++] = tmpl;
9083 }
9084 else
9085 {
9086 /* Only certain CR accesses use PSR.ic */
9087 if (idesc->operands[0] == IA64_OPND_CR3
9088 || idesc->operands[1] == IA64_OPND_CR3)
9089 {
9090 int reg_index =
9091 ((idesc->operands[0] == IA64_OPND_CR3)
9092 ? 0 : 1);
9093 int regno =
9094 CURR_SLOT.opnd[reg_index].X_add_number - REG_CR;
9095
9096 switch (regno)
9097 {
9098 default:
9099 break;
9100 case CR_ITIR:
9101 case CR_IFS:
9102 case CR_IIM:
9103 case CR_IIP:
9104 case CR_IPSR:
9105 case CR_ISR:
9106 case CR_IFA:
9107 case CR_IHA:
9108 case CR_IIB0:
9109 case CR_IIB1:
9110 case CR_IIPA:
9111 specs[count++] = tmpl;
9112 break;
9113 }
9114 }
9115 }
9116 break;
9117 case PSR_CPL:
9118 if (rsrc_write)
9119 {
9120 specs[count++] = tmpl;
9121 }
9122 else
9123 {
9124 /* Only some AR accesses use cpl */
9125 if (idesc->operands[0] == IA64_OPND_AR3
9126 || idesc->operands[1] == IA64_OPND_AR3)
9127 {
9128 int reg_index =
9129 ((idesc->operands[0] == IA64_OPND_AR3)
9130 ? 0 : 1);
9131 int regno =
9132 CURR_SLOT.opnd[reg_index].X_add_number - REG_AR;
9133
9134 if (regno == AR_ITC
9135 || regno == AR_RUC
9136 || (reg_index == 0
9137 && (regno == AR_RSC
9138 || (regno >= AR_K0
9139 && regno <= AR_K7))))
9140 {
9141 specs[count++] = tmpl;
9142 }
9143 }
9144 else
9145 {
9146 specs[count++] = tmpl;
9147 }
9148 break;
9149 }
9150 }
9151 }
9152 }
9153 else if (note == 7)
9154 {
9155 valueT mask = 0;
9156 if (idesc->operands[0] == IA64_OPND_IMMU24)
9157 {
9158 mask = CURR_SLOT.opnd[0].X_add_number;
9159 }
9160 else
9161 {
9162 UNHANDLED;
9163 }
9164 if (mask & ((valueT) 1 << dep->regindex))
9165 {
9166 specs[count++] = tmpl;
9167 }
9168 }
9169 else if (note == 8)
9170 {
9171 int min = dep->regindex == PSR_DFL ? 2 : 32;
9172 int max = dep->regindex == PSR_DFL ? 31 : 127;
9173 /* dfh is read on FR32-127; dfl is read on FR2-31 */
9174 for (i = 0; i < NELEMS (idesc->operands); i++)
9175 {
9176 if (idesc->operands[i] == IA64_OPND_F1
9177 || idesc->operands[i] == IA64_OPND_F2
9178 || idesc->operands[i] == IA64_OPND_F3
9179 || idesc->operands[i] == IA64_OPND_F4)
9180 {
9181 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9182 if (reg >= min && reg <= max)
9183 {
9184 specs[count++] = tmpl;
9185 }
9186 }
9187 }
9188 }
9189 else if (note == 9)
9190 {
9191 int min = dep->regindex == PSR_MFL ? 2 : 32;
9192 int max = dep->regindex == PSR_MFL ? 31 : 127;
9193 /* mfh is read on writes to FR32-127; mfl is read on writes to
9194 FR2-31 */
9195 for (i = 0; i < idesc->num_outputs; i++)
9196 {
9197 if (idesc->operands[i] == IA64_OPND_F1)
9198 {
9199 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9200 if (reg >= min && reg <= max)
9201 {
9202 specs[count++] = tmpl;
9203 }
9204 }
9205 }
9206 }
9207 else if (note == 10)
9208 {
9209 for (i = 0; i < NELEMS (idesc->operands); i++)
9210 {
9211 if (idesc->operands[i] == IA64_OPND_R1
9212 || idesc->operands[i] == IA64_OPND_R2
9213 || idesc->operands[i] == IA64_OPND_R3)
9214 {
9215 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9216 if (regno >= 16 && regno <= 31)
9217 {
9218 specs[count++] = tmpl;
9219 }
9220 }
9221 }
9222 }
9223 else
9224 {
9225 UNHANDLED;
9226 }
9227 break;
9228
9229 case IA64_RS_AR_FPSR:
9230 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
9231 {
9232 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9233 if (regno == AR_FPSR)
9234 {
9235 specs[count++] = tmpl;
9236 }
9237 }
9238 else
9239 {
9240 specs[count++] = tmpl;
9241 }
9242 break;
9243
9244 case IA64_RS_ARX:
9245 /* Handle all AR[REG] resources */
9246 if (note == 0 || note == 1)
9247 {
9248 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9249 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
9250 && regno == dep->regindex)
9251 {
9252 specs[count++] = tmpl;
9253 }
9254 /* other AR[REG] resources may be affected by AR accesses */
9255 else if (idesc->operands[0] == IA64_OPND_AR3)
9256 {
9257 /* AR[] writes */
9258 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
9259 switch (dep->regindex)
9260 {
9261 default:
9262 break;
9263 case AR_BSP:
9264 case AR_RNAT:
9265 if (regno == AR_BSPSTORE)
9266 {
9267 specs[count++] = tmpl;
9268 }
9269 /* Fall through. */
9270 case AR_RSC:
9271 if (!rsrc_write &&
9272 (regno == AR_BSPSTORE
9273 || regno == AR_RNAT))
9274 {
9275 specs[count++] = tmpl;
9276 }
9277 break;
9278 }
9279 }
9280 else if (idesc->operands[1] == IA64_OPND_AR3)
9281 {
9282 /* AR[] reads */
9283 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
9284 switch (dep->regindex)
9285 {
9286 default:
9287 break;
9288 case AR_RSC:
9289 if (regno == AR_BSPSTORE || regno == AR_RNAT)
9290 {
9291 specs[count++] = tmpl;
9292 }
9293 break;
9294 }
9295 }
9296 else
9297 {
9298 specs[count++] = tmpl;
9299 }
9300 }
9301 else
9302 {
9303 UNHANDLED;
9304 }
9305 break;
9306
9307 case IA64_RS_CRX:
9308 /* Handle all CR[REG] resources.
9309 ??? FIXME: The rule 17 isn't really handled correctly. */
9310 if (note == 0 || note == 1 || note == 17)
9311 {
9312 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
9313 {
9314 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
9315 if (regno == dep->regindex)
9316 {
9317 specs[count++] = tmpl;
9318 }
9319 else if (!rsrc_write)
9320 {
9321 /* Reads from CR[IVR] affect other resources. */
9322 if (regno == CR_IVR)
9323 {
9324 if ((dep->regindex >= CR_IRR0
9325 && dep->regindex <= CR_IRR3)
9326 || dep->regindex == CR_TPR)
9327 {
9328 specs[count++] = tmpl;
9329 }
9330 }
9331 }
9332 }
9333 else
9334 {
9335 specs[count++] = tmpl;
9336 }
9337 }
9338 else
9339 {
9340 UNHANDLED;
9341 }
9342 break;
9343
9344 case IA64_RS_INSERVICE:
9345 /* look for write of EOI (67) or read of IVR (65) */
9346 if ((idesc->operands[0] == IA64_OPND_CR3
9347 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
9348 || (idesc->operands[1] == IA64_OPND_CR3
9349 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
9350 {
9351 specs[count++] = tmpl;
9352 }
9353 break;
9354
9355 case IA64_RS_GR0:
9356 if (note == 1)
9357 {
9358 specs[count++] = tmpl;
9359 }
9360 else
9361 {
9362 UNHANDLED;
9363 }
9364 break;
9365
9366 case IA64_RS_CFM:
9367 if (note != 2)
9368 {
9369 specs[count++] = tmpl;
9370 }
9371 else
9372 {
9373 /* Check if any of the registers accessed are in the rotating region.
9374 mov to/from pr accesses CFM only when qp_regno is in the rotating
9375 region */
9376 for (i = 0; i < NELEMS (idesc->operands); i++)
9377 {
9378 if (idesc->operands[i] == IA64_OPND_R1
9379 || idesc->operands[i] == IA64_OPND_R2
9380 || idesc->operands[i] == IA64_OPND_R3)
9381 {
9382 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9383 /* Assumes that md.rot.num_regs is always valid */
9384 if (md.rot.num_regs > 0
9385 && num > 31
9386 && num < 31 + md.rot.num_regs)
9387 {
9388 specs[count] = tmpl;
9389 specs[count++].specific = 0;
9390 }
9391 }
9392 else if (idesc->operands[i] == IA64_OPND_F1
9393 || idesc->operands[i] == IA64_OPND_F2
9394 || idesc->operands[i] == IA64_OPND_F3
9395 || idesc->operands[i] == IA64_OPND_F4)
9396 {
9397 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9398 if (num > 31)
9399 {
9400 specs[count] = tmpl;
9401 specs[count++].specific = 0;
9402 }
9403 }
9404 else if (idesc->operands[i] == IA64_OPND_P1
9405 || idesc->operands[i] == IA64_OPND_P2)
9406 {
9407 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
9408 if (num > 15)
9409 {
9410 specs[count] = tmpl;
9411 specs[count++].specific = 0;
9412 }
9413 }
9414 }
9415 if (CURR_SLOT.qp_regno > 15)
9416 {
9417 specs[count] = tmpl;
9418 specs[count++].specific = 0;
9419 }
9420 }
9421 break;
9422
9423 /* This is the same as IA64_RS_PRr, except simplified to account for
9424 the fact that there is only one register. */
9425 case IA64_RS_PR63:
9426 if (note == 0)
9427 {
9428 specs[count++] = tmpl;
9429 }
9430 else if (note == 7)
9431 {
9432 valueT mask = 0;
9433 if (idesc->operands[2] == IA64_OPND_IMM17)
9434 mask = CURR_SLOT.opnd[2].X_add_number;
9435 if (mask & ((valueT) 1 << 63))
9436 specs[count++] = tmpl;
9437 }
9438 else if (note == 11)
9439 {
9440 if ((idesc->operands[0] == IA64_OPND_P1
9441 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
9442 || (idesc->operands[1] == IA64_OPND_P2
9443 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
9444 {
9445 specs[count++] = tmpl;
9446 }
9447 }
9448 else if (note == 12)
9449 {
9450 if (CURR_SLOT.qp_regno == 63)
9451 {
9452 specs[count++] = tmpl;
9453 }
9454 }
9455 else if (note == 1)
9456 {
9457 if (rsrc_write)
9458 {
9459 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9460 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9461 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9462 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9463
9464 if (p1 == 63
9465 && (idesc->operands[0] == IA64_OPND_P1
9466 || idesc->operands[0] == IA64_OPND_P2))
9467 {
9468 specs[count] = tmpl;
9469 specs[count++].cmp_type =
9470 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9471 }
9472 if (p2 == 63
9473 && (idesc->operands[1] == IA64_OPND_P1
9474 || idesc->operands[1] == IA64_OPND_P2))
9475 {
9476 specs[count] = tmpl;
9477 specs[count++].cmp_type =
9478 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9479 }
9480 }
9481 else
9482 {
9483 if (CURR_SLOT.qp_regno == 63)
9484 {
9485 specs[count++] = tmpl;
9486 }
9487 }
9488 }
9489 else
9490 {
9491 UNHANDLED;
9492 }
9493 break;
9494
9495 case IA64_RS_RSE:
9496 /* FIXME we can identify some individual RSE written resources, but RSE
9497 read resources have not yet been completely identified, so for now
9498 treat RSE as a single resource */
9499 if (startswith (idesc->name, "mov"))
9500 {
9501 if (rsrc_write)
9502 {
9503 if (idesc->operands[0] == IA64_OPND_AR3
9504 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
9505 {
9506 specs[count++] = tmpl;
9507 }
9508 }
9509 else
9510 {
9511 if (idesc->operands[0] == IA64_OPND_AR3)
9512 {
9513 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
9514 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
9515 {
9516 specs[count++] = tmpl;
9517 }
9518 }
9519 else if (idesc->operands[1] == IA64_OPND_AR3)
9520 {
9521 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
9522 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
9523 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
9524 {
9525 specs[count++] = tmpl;
9526 }
9527 }
9528 }
9529 }
9530 else
9531 {
9532 specs[count++] = tmpl;
9533 }
9534 break;
9535
9536 case IA64_RS_ANY:
9537 /* FIXME -- do any of these need to be non-specific? */
9538 specs[count++] = tmpl;
9539 break;
9540
9541 default:
9542 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
9543 break;
9544 }
9545
9546 return count;
9547 }
9548
9549 /* Clear branch flags on marked resources. This breaks the link between the
9550 QP of the marking instruction and a subsequent branch on the same QP. */
9551
9552 static void
9553 clear_qp_branch_flag (valueT mask)
9554 {
9555 int i;
9556 for (i = 0; i < regdepslen; i++)
9557 {
9558 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
9559 if ((bit & mask) != 0)
9560 {
9561 regdeps[i].link_to_qp_branch = 0;
9562 }
9563 }
9564 }
9565
9566 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
9567 any mutexes which contain one of the PRs and create new ones when
9568 needed. */
9569
9570 static int
9571 update_qp_mutex (valueT mask)
9572 {
9573 int i;
9574 int add = 0;
9575
9576 i = 0;
9577 while (i < qp_mutexeslen)
9578 {
9579 if ((qp_mutexes[i].prmask & mask) != 0)
9580 {
9581 /* If it destroys and creates the same mutex, do nothing. */
9582 if (qp_mutexes[i].prmask == mask
9583 && qp_mutexes[i].path == md.path)
9584 {
9585 i++;
9586 add = -1;
9587 }
9588 else
9589 {
9590 int keep = 0;
9591
9592 if (md.debug_dv)
9593 {
9594 fprintf (stderr, " Clearing mutex relation");
9595 print_prmask (qp_mutexes[i].prmask);
9596 fprintf (stderr, "\n");
9597 }
9598
9599 /* Deal with the old mutex with more than 3+ PRs only if
9600 the new mutex on the same execution path with it.
9601
9602 FIXME: The 3+ mutex support is incomplete.
9603 dot_pred_rel () may be a better place to fix it. */
9604 if (qp_mutexes[i].path == md.path)
9605 {
9606 /* If it is a proper subset of the mutex, create a
9607 new mutex. */
9608 if (add == 0
9609 && (qp_mutexes[i].prmask & mask) == mask)
9610 add = 1;
9611
9612 qp_mutexes[i].prmask &= ~mask;
9613 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
9614 {
9615 /* Modify the mutex if there are more than one
9616 PR left. */
9617 keep = 1;
9618 i++;
9619 }
9620 }
9621
9622 if (keep == 0)
9623 /* Remove the mutex. */
9624 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9625 }
9626 }
9627 else
9628 ++i;
9629 }
9630
9631 if (add == 1)
9632 add_qp_mutex (mask);
9633
9634 return add;
9635 }
9636
9637 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9638
9639 Any changes to a PR clears the mutex relations which include that PR. */
9640
9641 static void
9642 clear_qp_mutex (valueT mask)
9643 {
9644 int i;
9645
9646 i = 0;
9647 while (i < qp_mutexeslen)
9648 {
9649 if ((qp_mutexes[i].prmask & mask) != 0)
9650 {
9651 if (md.debug_dv)
9652 {
9653 fprintf (stderr, " Clearing mutex relation");
9654 print_prmask (qp_mutexes[i].prmask);
9655 fprintf (stderr, "\n");
9656 }
9657 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9658 }
9659 else
9660 ++i;
9661 }
9662 }
9663
9664 /* Clear implies relations which contain PRs in the given masks.
9665 P1_MASK indicates the source of the implies relation, while P2_MASK
9666 indicates the implied PR. */
9667
9668 static void
9669 clear_qp_implies (valueT p1_mask, valueT p2_mask)
9670 {
9671 int i;
9672
9673 i = 0;
9674 while (i < qp_implieslen)
9675 {
9676 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9677 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9678 {
9679 if (md.debug_dv)
9680 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9681 qp_implies[i].p1, qp_implies[i].p2);
9682 qp_implies[i] = qp_implies[--qp_implieslen];
9683 }
9684 else
9685 ++i;
9686 }
9687 }
9688
9689 /* Add the PRs specified to the list of implied relations. */
9690
9691 static void
9692 add_qp_imply (int p1, int p2)
9693 {
9694 valueT mask;
9695 valueT bit;
9696 int i;
9697
9698 /* p0 is not meaningful here. */
9699 if (p1 == 0 || p2 == 0)
9700 abort ();
9701
9702 if (p1 == p2)
9703 return;
9704
9705 /* If it exists already, ignore it. */
9706 for (i = 0; i < qp_implieslen; i++)
9707 {
9708 if (qp_implies[i].p1 == p1
9709 && qp_implies[i].p2 == p2
9710 && qp_implies[i].path == md.path
9711 && !qp_implies[i].p2_branched)
9712 return;
9713 }
9714
9715 if (qp_implieslen == qp_impliestotlen)
9716 {
9717 qp_impliestotlen += 20;
9718 qp_implies = XRESIZEVEC (struct qp_imply, qp_implies, qp_impliestotlen);
9719 }
9720 if (md.debug_dv)
9721 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9722 qp_implies[qp_implieslen].p1 = p1;
9723 qp_implies[qp_implieslen].p2 = p2;
9724 qp_implies[qp_implieslen].path = md.path;
9725 qp_implies[qp_implieslen++].p2_branched = 0;
9726
9727 /* Add in the implied transitive relations; for everything that p2 implies,
9728 make p1 imply that, too; for everything that implies p1, make it imply p2
9729 as well. */
9730 for (i = 0; i < qp_implieslen; i++)
9731 {
9732 if (qp_implies[i].p1 == p2)
9733 add_qp_imply (p1, qp_implies[i].p2);
9734 if (qp_implies[i].p2 == p1)
9735 add_qp_imply (qp_implies[i].p1, p2);
9736 }
9737 /* Add in mutex relations implied by this implies relation; for each mutex
9738 relation containing p2, duplicate it and replace p2 with p1. */
9739 bit = (valueT) 1 << p1;
9740 mask = (valueT) 1 << p2;
9741 for (i = 0; i < qp_mutexeslen; i++)
9742 {
9743 if (qp_mutexes[i].prmask & mask)
9744 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9745 }
9746 }
9747
9748 /* Add the PRs specified in the mask to the mutex list; this means that only
9749 one of the PRs can be true at any time. PR0 should never be included in
9750 the mask. */
9751
9752 static void
9753 add_qp_mutex (valueT mask)
9754 {
9755 if (mask & 0x1)
9756 abort ();
9757
9758 if (qp_mutexeslen == qp_mutexestotlen)
9759 {
9760 qp_mutexestotlen += 20;
9761 qp_mutexes = XRESIZEVEC (struct qpmutex, qp_mutexes, qp_mutexestotlen);
9762 }
9763 if (md.debug_dv)
9764 {
9765 fprintf (stderr, " Registering mutex on");
9766 print_prmask (mask);
9767 fprintf (stderr, "\n");
9768 }
9769 qp_mutexes[qp_mutexeslen].path = md.path;
9770 qp_mutexes[qp_mutexeslen++].prmask = mask;
9771 }
9772
9773 static int
9774 has_suffix_p (const char *name, const char *suffix)
9775 {
9776 size_t namelen = strlen (name);
9777 size_t sufflen = strlen (suffix);
9778
9779 if (namelen <= sufflen)
9780 return 0;
9781 return strcmp (name + namelen - sufflen, suffix) == 0;
9782 }
9783
9784 static void
9785 clear_register_values (void)
9786 {
9787 int i;
9788 if (md.debug_dv)
9789 fprintf (stderr, " Clearing register values\n");
9790 for (i = 1; i < NELEMS (gr_values); i++)
9791 gr_values[i].known = 0;
9792 }
9793
9794 /* Keep track of register values/changes which affect DV tracking.
9795
9796 optimization note: should add a flag to classes of insns where otherwise we
9797 have to examine a group of strings to identify them. */
9798
9799 static void
9800 note_register_values (struct ia64_opcode *idesc)
9801 {
9802 valueT qp_changemask = 0;
9803 int i;
9804
9805 /* Invalidate values for registers being written to. */
9806 for (i = 0; i < idesc->num_outputs; i++)
9807 {
9808 if (idesc->operands[i] == IA64_OPND_R1
9809 || idesc->operands[i] == IA64_OPND_R2
9810 || idesc->operands[i] == IA64_OPND_R3)
9811 {
9812 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9813 if (regno > 0 && regno < NELEMS (gr_values))
9814 gr_values[regno].known = 0;
9815 }
9816 else if (idesc->operands[i] == IA64_OPND_R3_2)
9817 {
9818 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9819 if (regno > 0 && regno < 4)
9820 gr_values[regno].known = 0;
9821 }
9822 else if (idesc->operands[i] == IA64_OPND_P1
9823 || idesc->operands[i] == IA64_OPND_P2)
9824 {
9825 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9826 qp_changemask |= (valueT) 1 << regno;
9827 }
9828 else if (idesc->operands[i] == IA64_OPND_PR)
9829 {
9830 if (idesc->operands[2] & (valueT) 0x10000)
9831 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9832 else
9833 qp_changemask = idesc->operands[2];
9834 break;
9835 }
9836 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9837 {
9838 if (idesc->operands[1] & ((valueT) 1 << 43))
9839 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9840 else
9841 qp_changemask = idesc->operands[1];
9842 qp_changemask &= ~(valueT) 0xFFFF;
9843 break;
9844 }
9845 }
9846
9847 /* Always clear qp branch flags on any PR change. */
9848 /* FIXME there may be exceptions for certain compares. */
9849 clear_qp_branch_flag (qp_changemask);
9850
9851 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9852 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9853 {
9854 qp_changemask |= ~(valueT) 0xFFFF;
9855 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9856 {
9857 for (i = 32; i < 32 + md.rot.num_regs; i++)
9858 gr_values[i].known = 0;
9859 }
9860 clear_qp_mutex (qp_changemask);
9861 clear_qp_implies (qp_changemask, qp_changemask);
9862 }
9863 /* After a call, all register values are undefined, except those marked
9864 as "safe". */
9865 else if (startswith (idesc->name, "br.call")
9866 || startswith (idesc->name, "brl.call"))
9867 {
9868 /* FIXME keep GR values which are marked as "safe_across_calls" */
9869 clear_register_values ();
9870 clear_qp_mutex (~qp_safe_across_calls);
9871 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9872 clear_qp_branch_flag (~qp_safe_across_calls);
9873 }
9874 else if (is_interruption_or_rfi (idesc)
9875 || is_taken_branch (idesc))
9876 {
9877 clear_register_values ();
9878 clear_qp_mutex (~(valueT) 0);
9879 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9880 }
9881 /* Look for mutex and implies relations. */
9882 else if ((idesc->operands[0] == IA64_OPND_P1
9883 || idesc->operands[0] == IA64_OPND_P2)
9884 && (idesc->operands[1] == IA64_OPND_P1
9885 || idesc->operands[1] == IA64_OPND_P2))
9886 {
9887 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9888 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9889 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9890 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9891
9892 /* If both PRs are PR0, we can't really do anything. */
9893 if (p1 == 0 && p2 == 0)
9894 {
9895 if (md.debug_dv)
9896 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9897 }
9898 /* In general, clear mutexes and implies which include P1 or P2,
9899 with the following exceptions. */
9900 else if (has_suffix_p (idesc->name, ".or.andcm")
9901 || has_suffix_p (idesc->name, ".and.orcm"))
9902 {
9903 clear_qp_implies (p2mask, p1mask);
9904 }
9905 else if (has_suffix_p (idesc->name, ".andcm")
9906 || has_suffix_p (idesc->name, ".and"))
9907 {
9908 clear_qp_implies (0, p1mask | p2mask);
9909 }
9910 else if (has_suffix_p (idesc->name, ".orcm")
9911 || has_suffix_p (idesc->name, ".or"))
9912 {
9913 clear_qp_mutex (p1mask | p2mask);
9914 clear_qp_implies (p1mask | p2mask, 0);
9915 }
9916 else
9917 {
9918 int added = 0;
9919
9920 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9921
9922 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9923 if (p1 == 0 || p2 == 0)
9924 clear_qp_mutex (p1mask | p2mask);
9925 else
9926 added = update_qp_mutex (p1mask | p2mask);
9927
9928 if (CURR_SLOT.qp_regno == 0
9929 || has_suffix_p (idesc->name, ".unc"))
9930 {
9931 if (added == 0 && p1 && p2)
9932 add_qp_mutex (p1mask | p2mask);
9933 if (CURR_SLOT.qp_regno != 0)
9934 {
9935 if (p1)
9936 add_qp_imply (p1, CURR_SLOT.qp_regno);
9937 if (p2)
9938 add_qp_imply (p2, CURR_SLOT.qp_regno);
9939 }
9940 }
9941 }
9942 }
9943 /* Look for mov imm insns into GRs. */
9944 else if (idesc->operands[0] == IA64_OPND_R1
9945 && (idesc->operands[1] == IA64_OPND_IMM22
9946 || idesc->operands[1] == IA64_OPND_IMMU64)
9947 && CURR_SLOT.opnd[1].X_op == O_constant
9948 && (strcmp (idesc->name, "mov") == 0
9949 || strcmp (idesc->name, "movl") == 0))
9950 {
9951 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9952 if (regno > 0 && regno < NELEMS (gr_values))
9953 {
9954 gr_values[regno].known = 1;
9955 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9956 gr_values[regno].path = md.path;
9957 if (md.debug_dv)
9958 fprintf (stderr, " Know gr%d = %" PRIx64 "\n",
9959 regno, gr_values[regno].value);
9960 }
9961 }
9962 /* Look for dep.z imm insns. */
9963 else if (idesc->operands[0] == IA64_OPND_R1
9964 && idesc->operands[1] == IA64_OPND_IMM8
9965 && strcmp (idesc->name, "dep.z") == 0)
9966 {
9967 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9968 if (regno > 0 && regno < NELEMS (gr_values))
9969 {
9970 valueT value = CURR_SLOT.opnd[1].X_add_number;
9971
9972 if (CURR_SLOT.opnd[3].X_add_number < 64)
9973 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1;
9974 value <<= CURR_SLOT.opnd[2].X_add_number;
9975 gr_values[regno].known = 1;
9976 gr_values[regno].value = value;
9977 gr_values[regno].path = md.path;
9978 if (md.debug_dv)
9979 fprintf (stderr, " Know gr%d = %" PRIx64 "\n",
9980 regno, gr_values[regno].value);
9981 }
9982 }
9983 else
9984 {
9985 clear_qp_mutex (qp_changemask);
9986 clear_qp_implies (qp_changemask, qp_changemask);
9987 }
9988 }
9989
9990 /* Return whether the given predicate registers are currently mutex. */
9991
9992 static int
9993 qp_mutex (int p1, int p2, int path)
9994 {
9995 int i;
9996 valueT mask;
9997
9998 if (p1 != p2)
9999 {
10000 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
10001 for (i = 0; i < qp_mutexeslen; i++)
10002 {
10003 if (qp_mutexes[i].path >= path
10004 && (qp_mutexes[i].prmask & mask) == mask)
10005 return 1;
10006 }
10007 }
10008 return 0;
10009 }
10010
10011 /* Return whether the given resource is in the given insn's list of chks
10012 Return 1 if the conflict is absolutely determined, 2 if it's a potential
10013 conflict. */
10014
10015 static int
10016 resources_match (struct rsrc *rs,
10017 struct ia64_opcode *idesc,
10018 int note,
10019 int qp_regno,
10020 int path)
10021 {
10022 struct rsrc specs[MAX_SPECS];
10023 int count;
10024
10025 /* If the marked resource's qp_regno and the given qp_regno are mutex,
10026 we don't need to check. One exception is note 11, which indicates that
10027 target predicates are written regardless of PR[qp]. */
10028 if (qp_mutex (rs->qp_regno, qp_regno, path)
10029 && note != 11)
10030 return 0;
10031
10032 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
10033 while (count-- > 0)
10034 {
10035 /* UNAT checking is a bit more specific than other resources */
10036 if (rs->dependency->specifier == IA64_RS_AR_UNAT
10037 && specs[count].mem_offset.hint
10038 && rs->mem_offset.hint)
10039 {
10040 if (rs->mem_offset.base == specs[count].mem_offset.base)
10041 {
10042 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
10043 ((specs[count].mem_offset.offset >> 3) & 0x3F))
10044 return 1;
10045 else
10046 continue;
10047 }
10048 }
10049
10050 /* Skip apparent PR write conflicts where both writes are an AND or both
10051 writes are an OR. */
10052 if (rs->dependency->specifier == IA64_RS_PR
10053 || rs->dependency->specifier == IA64_RS_PRr
10054 || rs->dependency->specifier == IA64_RS_PR63)
10055 {
10056 if (specs[count].cmp_type != CMP_NONE
10057 && specs[count].cmp_type == rs->cmp_type)
10058 {
10059 if (md.debug_dv)
10060 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
10061 dv_mode[rs->dependency->mode],
10062 rs->dependency->specifier != IA64_RS_PR63 ?
10063 specs[count].index : 63);
10064 continue;
10065 }
10066 if (md.debug_dv)
10067 fprintf (stderr,
10068 " %s on parallel compare conflict %s vs %s on PR%d\n",
10069 dv_mode[rs->dependency->mode],
10070 dv_cmp_type[rs->cmp_type],
10071 dv_cmp_type[specs[count].cmp_type],
10072 rs->dependency->specifier != IA64_RS_PR63 ?
10073 specs[count].index : 63);
10074
10075 }
10076
10077 /* If either resource is not specific, conservatively assume a conflict
10078 */
10079 if (!specs[count].specific || !rs->specific)
10080 return 2;
10081 else if (specs[count].index == rs->index)
10082 return 1;
10083 }
10084
10085 return 0;
10086 }
10087
10088 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
10089 insert a stop to create the break. Update all resource dependencies
10090 appropriately. If QP_REGNO is non-zero, only apply the break to resources
10091 which use the same QP_REGNO and have the link_to_qp_branch flag set.
10092 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
10093 instruction. */
10094
10095 static void
10096 insn_group_break (int insert_stop, int qp_regno, int save_current)
10097 {
10098 int i;
10099
10100 if (insert_stop && md.num_slots_in_use > 0)
10101 PREV_SLOT.end_of_insn_group = 1;
10102
10103 if (md.debug_dv)
10104 {
10105 fprintf (stderr, " Insn group break%s",
10106 (insert_stop ? " (w/stop)" : ""));
10107 if (qp_regno != 0)
10108 fprintf (stderr, " effective for QP=%d", qp_regno);
10109 fprintf (stderr, "\n");
10110 }
10111
10112 i = 0;
10113 while (i < regdepslen)
10114 {
10115 const struct ia64_dependency *dep = regdeps[i].dependency;
10116
10117 if (qp_regno != 0
10118 && regdeps[i].qp_regno != qp_regno)
10119 {
10120 ++i;
10121 continue;
10122 }
10123
10124 if (save_current
10125 && CURR_SLOT.src_file == regdeps[i].file
10126 && CURR_SLOT.src_line == regdeps[i].line)
10127 {
10128 ++i;
10129 continue;
10130 }
10131
10132 /* clear dependencies which are automatically cleared by a stop, or
10133 those that have reached the appropriate state of insn serialization */
10134 if (dep->semantics == IA64_DVS_IMPLIED
10135 || dep->semantics == IA64_DVS_IMPLIEDF
10136 || regdeps[i].insn_srlz == STATE_SRLZ)
10137 {
10138 print_dependency ("Removing", i);
10139 regdeps[i] = regdeps[--regdepslen];
10140 }
10141 else
10142 {
10143 if (dep->semantics == IA64_DVS_DATA
10144 || dep->semantics == IA64_DVS_INSTR
10145 || dep->semantics == IA64_DVS_SPECIFIC)
10146 {
10147 if (regdeps[i].insn_srlz == STATE_NONE)
10148 regdeps[i].insn_srlz = STATE_STOP;
10149 if (regdeps[i].data_srlz == STATE_NONE)
10150 regdeps[i].data_srlz = STATE_STOP;
10151 }
10152 ++i;
10153 }
10154 }
10155 }
10156
10157 /* Add the given resource usage spec to the list of active dependencies. */
10158
10159 static void
10160 mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED,
10161 const struct ia64_dependency *dep ATTRIBUTE_UNUSED,
10162 struct rsrc *spec,
10163 int depind,
10164 int path)
10165 {
10166 if (regdepslen == regdepstotlen)
10167 {
10168 regdepstotlen += 20;
10169 regdeps = XRESIZEVEC (struct rsrc, regdeps, regdepstotlen);
10170 }
10171
10172 regdeps[regdepslen] = *spec;
10173 regdeps[regdepslen].depind = depind;
10174 regdeps[regdepslen].path = path;
10175 regdeps[regdepslen].file = CURR_SLOT.src_file;
10176 regdeps[regdepslen].line = CURR_SLOT.src_line;
10177
10178 print_dependency ("Adding", regdepslen);
10179
10180 ++regdepslen;
10181 }
10182
10183 static void
10184 print_dependency (const char *action, int depind)
10185 {
10186 if (md.debug_dv)
10187 {
10188 fprintf (stderr, " %s %s '%s'",
10189 action, dv_mode[(regdeps[depind].dependency)->mode],
10190 (regdeps[depind].dependency)->name);
10191 if (regdeps[depind].specific && regdeps[depind].index >= 0)
10192 fprintf (stderr, " (%d)", regdeps[depind].index);
10193 if (regdeps[depind].mem_offset.hint)
10194 fprintf (stderr, " %" PRIx64 "+%" PRIx64,
10195 regdeps[depind].mem_offset.base,
10196 regdeps[depind].mem_offset.offset);
10197 fprintf (stderr, "\n");
10198 }
10199 }
10200
10201 static void
10202 instruction_serialization (void)
10203 {
10204 int i;
10205 if (md.debug_dv)
10206 fprintf (stderr, " Instruction serialization\n");
10207 for (i = 0; i < regdepslen; i++)
10208 if (regdeps[i].insn_srlz == STATE_STOP)
10209 regdeps[i].insn_srlz = STATE_SRLZ;
10210 }
10211
10212 static void
10213 data_serialization (void)
10214 {
10215 int i = 0;
10216 if (md.debug_dv)
10217 fprintf (stderr, " Data serialization\n");
10218 while (i < regdepslen)
10219 {
10220 if (regdeps[i].data_srlz == STATE_STOP
10221 /* Note: as of 991210, all "other" dependencies are cleared by a
10222 data serialization. This might change with new tables */
10223 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
10224 {
10225 print_dependency ("Removing", i);
10226 regdeps[i] = regdeps[--regdepslen];
10227 }
10228 else
10229 ++i;
10230 }
10231 }
10232
10233 /* Insert stops and serializations as needed to avoid DVs. */
10234
10235 static void
10236 remove_marked_resource (struct rsrc *rs)
10237 {
10238 switch (rs->dependency->semantics)
10239 {
10240 case IA64_DVS_SPECIFIC:
10241 if (md.debug_dv)
10242 fprintf (stderr, "Implementation-specific, assume worst case...\n");
10243 /* Fall through. */
10244 case IA64_DVS_INSTR:
10245 if (md.debug_dv)
10246 fprintf (stderr, "Inserting instr serialization\n");
10247 if (rs->insn_srlz < STATE_STOP)
10248 insn_group_break (1, 0, 0);
10249 if (rs->insn_srlz < STATE_SRLZ)
10250 {
10251 struct slot oldslot = CURR_SLOT;
10252 /* Manually jam a srlz.i insn into the stream */
10253 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10254 CURR_SLOT.user_template = -1;
10255 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
10256 instruction_serialization ();
10257 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10258 if (++md.num_slots_in_use >= NUM_SLOTS)
10259 emit_one_bundle ();
10260 CURR_SLOT = oldslot;
10261 }
10262 insn_group_break (1, 0, 0);
10263 break;
10264 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
10265 "other" types of DV are eliminated
10266 by a data serialization */
10267 case IA64_DVS_DATA:
10268 if (md.debug_dv)
10269 fprintf (stderr, "Inserting data serialization\n");
10270 if (rs->data_srlz < STATE_STOP)
10271 insn_group_break (1, 0, 0);
10272 {
10273 struct slot oldslot = CURR_SLOT;
10274 /* Manually jam a srlz.d insn into the stream */
10275 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10276 CURR_SLOT.user_template = -1;
10277 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
10278 data_serialization ();
10279 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10280 if (++md.num_slots_in_use >= NUM_SLOTS)
10281 emit_one_bundle ();
10282 CURR_SLOT = oldslot;
10283 }
10284 break;
10285 case IA64_DVS_IMPLIED:
10286 case IA64_DVS_IMPLIEDF:
10287 if (md.debug_dv)
10288 fprintf (stderr, "Inserting stop\n");
10289 insn_group_break (1, 0, 0);
10290 break;
10291 default:
10292 break;
10293 }
10294 }
10295
10296 /* Check the resources used by the given opcode against the current dependency
10297 list.
10298
10299 The check is run once for each execution path encountered. In this case,
10300 a unique execution path is the sequence of instructions following a code
10301 entry point, e.g. the following has three execution paths, one starting
10302 at L0, one at L1, and one at L2.
10303
10304 L0: nop
10305 L1: add
10306 L2: add
10307 br.ret
10308 */
10309
10310 static void
10311 check_dependencies (struct ia64_opcode *idesc)
10312 {
10313 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10314 int path;
10315 int i;
10316
10317 /* Note that the number of marked resources may change within the
10318 loop if in auto mode. */
10319 i = 0;
10320 while (i < regdepslen)
10321 {
10322 struct rsrc *rs = &regdeps[i];
10323 const struct ia64_dependency *dep = rs->dependency;
10324 int chkind;
10325 int note;
10326 int start_over = 0;
10327
10328 if (dep->semantics == IA64_DVS_NONE
10329 || (chkind = depends_on (rs->depind, idesc)) == -1)
10330 {
10331 ++i;
10332 continue;
10333 }
10334
10335 note = NOTE (opdeps->chks[chkind]);
10336
10337 /* Check this resource against each execution path seen thus far. */
10338 for (path = 0; path <= md.path; path++)
10339 {
10340 int matchtype;
10341
10342 /* If the dependency wasn't on the path being checked, ignore it. */
10343 if (rs->path < path)
10344 continue;
10345
10346 /* If the QP for this insn implies a QP which has branched, don't
10347 bother checking. Ed. NOTE: I don't think this check is terribly
10348 useful; what's the point of generating code which will only be
10349 reached if its QP is zero?
10350 This code was specifically inserted to handle the following code,
10351 based on notes from Intel's DV checking code, where p1 implies p2.
10352
10353 mov r4 = 2
10354 (p2) br.cond L
10355 (p1) mov r4 = 7
10356 */
10357 if (CURR_SLOT.qp_regno != 0)
10358 {
10359 int skip = 0;
10360 int implies;
10361 for (implies = 0; implies < qp_implieslen; implies++)
10362 {
10363 if (qp_implies[implies].path >= path
10364 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
10365 && qp_implies[implies].p2_branched)
10366 {
10367 skip = 1;
10368 break;
10369 }
10370 }
10371 if (skip)
10372 continue;
10373 }
10374
10375 if ((matchtype = resources_match (rs, idesc, note,
10376 CURR_SLOT.qp_regno, path)) != 0)
10377 {
10378 char msg[1024];
10379 char pathmsg[256] = "";
10380 char indexmsg[256] = "";
10381 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
10382
10383 if (path != 0)
10384 snprintf (pathmsg, sizeof (pathmsg),
10385 " when entry is at label '%s'",
10386 md.entry_labels[path - 1]);
10387 if (matchtype == 1 && rs->index >= 0)
10388 snprintf (indexmsg, sizeof (indexmsg),
10389 ", specific resource number is %d",
10390 rs->index);
10391 snprintf (msg, sizeof (msg),
10392 "Use of '%s' %s %s dependency '%s' (%s)%s%s",
10393 idesc->name,
10394 (certain ? "violates" : "may violate"),
10395 dv_mode[dep->mode], dep->name,
10396 dv_sem[dep->semantics],
10397 pathmsg, indexmsg);
10398
10399 if (md.explicit_mode)
10400 {
10401 as_warn ("%s", msg);
10402 if (path < md.path)
10403 as_warn (_("Only the first path encountering the conflict is reported"));
10404 as_warn_where (rs->file, rs->line,
10405 _("This is the location of the conflicting usage"));
10406 /* Don't bother checking other paths, to avoid duplicating
10407 the same warning */
10408 break;
10409 }
10410 else
10411 {
10412 if (md.debug_dv)
10413 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
10414
10415 remove_marked_resource (rs);
10416
10417 /* since the set of dependencies has changed, start over */
10418 /* FIXME -- since we're removing dvs as we go, we
10419 probably don't really need to start over... */
10420 start_over = 1;
10421 break;
10422 }
10423 }
10424 }
10425 if (start_over)
10426 i = 0;
10427 else
10428 ++i;
10429 }
10430 }
10431
10432 /* Register new dependencies based on the given opcode. */
10433
10434 static void
10435 mark_resources (struct ia64_opcode *idesc)
10436 {
10437 int i;
10438 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10439 int add_only_qp_reads = 0;
10440
10441 /* A conditional branch only uses its resources if it is taken; if it is
10442 taken, we stop following that path. The other branch types effectively
10443 *always* write their resources. If it's not taken, register only QP
10444 reads. */
10445 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
10446 {
10447 add_only_qp_reads = 1;
10448 }
10449
10450 if (md.debug_dv)
10451 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
10452
10453 for (i = 0; i < opdeps->nregs; i++)
10454 {
10455 const struct ia64_dependency *dep;
10456 struct rsrc specs[MAX_SPECS];
10457 int note;
10458 int path;
10459 int count;
10460
10461 dep = ia64_find_dependency (opdeps->regs[i]);
10462 note = NOTE (opdeps->regs[i]);
10463
10464 if (add_only_qp_reads
10465 && !(dep->mode == IA64_DV_WAR
10466 && (dep->specifier == IA64_RS_PR
10467 || dep->specifier == IA64_RS_PRr
10468 || dep->specifier == IA64_RS_PR63)))
10469 continue;
10470
10471 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
10472
10473 while (count-- > 0)
10474 {
10475 mark_resource (idesc, dep, &specs[count],
10476 DEP (opdeps->regs[i]), md.path);
10477 }
10478
10479 /* The execution path may affect register values, which may in turn
10480 affect which indirect-access resources are accessed. */
10481 switch (dep->specifier)
10482 {
10483 default:
10484 break;
10485 case IA64_RS_CPUID:
10486 case IA64_RS_DBR:
10487 case IA64_RS_IBR:
10488 case IA64_RS_MSR:
10489 case IA64_RS_PKR:
10490 case IA64_RS_PMC:
10491 case IA64_RS_PMD:
10492 case IA64_RS_RR:
10493 for (path = 0; path < md.path; path++)
10494 {
10495 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
10496 while (count-- > 0)
10497 mark_resource (idesc, dep, &specs[count],
10498 DEP (opdeps->regs[i]), path);
10499 }
10500 break;
10501 }
10502 }
10503 }
10504
10505 /* Remove dependencies when they no longer apply. */
10506
10507 static void
10508 update_dependencies (struct ia64_opcode *idesc)
10509 {
10510 int i;
10511
10512 if (strcmp (idesc->name, "srlz.i") == 0)
10513 {
10514 instruction_serialization ();
10515 }
10516 else if (strcmp (idesc->name, "srlz.d") == 0)
10517 {
10518 data_serialization ();
10519 }
10520 else if (is_interruption_or_rfi (idesc)
10521 || is_taken_branch (idesc))
10522 {
10523 /* Although technically the taken branch doesn't clear dependencies
10524 which require a srlz.[id], we don't follow the branch; the next
10525 instruction is assumed to start with a clean slate. */
10526 regdepslen = 0;
10527 md.path = 0;
10528 }
10529 else if (is_conditional_branch (idesc)
10530 && CURR_SLOT.qp_regno != 0)
10531 {
10532 int is_call = strstr (idesc->name, ".call") != NULL;
10533
10534 for (i = 0; i < qp_implieslen; i++)
10535 {
10536 /* If the conditional branch's predicate is implied by the predicate
10537 in an existing dependency, remove that dependency. */
10538 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
10539 {
10540 int depind = 0;
10541 /* Note that this implied predicate takes a branch so that if
10542 a later insn generates a DV but its predicate implies this
10543 one, we can avoid the false DV warning. */
10544 qp_implies[i].p2_branched = 1;
10545 while (depind < regdepslen)
10546 {
10547 if (regdeps[depind].qp_regno == qp_implies[i].p1)
10548 {
10549 print_dependency ("Removing", depind);
10550 regdeps[depind] = regdeps[--regdepslen];
10551 }
10552 else
10553 ++depind;
10554 }
10555 }
10556 }
10557 /* Any marked resources which have this same predicate should be
10558 cleared, provided that the QP hasn't been modified between the
10559 marking instruction and the branch. */
10560 if (is_call)
10561 {
10562 insn_group_break (0, CURR_SLOT.qp_regno, 1);
10563 }
10564 else
10565 {
10566 i = 0;
10567 while (i < regdepslen)
10568 {
10569 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
10570 && regdeps[i].link_to_qp_branch
10571 && (regdeps[i].file != CURR_SLOT.src_file
10572 || regdeps[i].line != CURR_SLOT.src_line))
10573 {
10574 /* Treat like a taken branch */
10575 print_dependency ("Removing", i);
10576 regdeps[i] = regdeps[--regdepslen];
10577 }
10578 else
10579 ++i;
10580 }
10581 }
10582 }
10583 }
10584
10585 /* Examine the current instruction for dependency violations. */
10586
10587 static int
10588 check_dv (struct ia64_opcode *idesc)
10589 {
10590 if (md.debug_dv)
10591 {
10592 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
10593 idesc->name, CURR_SLOT.src_line,
10594 idesc->dependencies->nchks,
10595 idesc->dependencies->nregs);
10596 }
10597
10598 /* Look through the list of currently marked resources; if the current
10599 instruction has the dependency in its chks list which uses that resource,
10600 check against the specific resources used. */
10601 check_dependencies (idesc);
10602
10603 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
10604 then add them to the list of marked resources. */
10605 mark_resources (idesc);
10606
10607 /* There are several types of dependency semantics, and each has its own
10608 requirements for being cleared
10609
10610 Instruction serialization (insns separated by interruption, rfi, or
10611 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10612
10613 Data serialization (instruction serialization, or writer + srlz.d +
10614 reader, where writer and srlz.d are in separate groups) clears
10615 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10616 always be the case).
10617
10618 Instruction group break (groups separated by stop, taken branch,
10619 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10620 */
10621 update_dependencies (idesc);
10622
10623 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10624 warning. Keep track of as many as possible that are useful. */
10625 note_register_values (idesc);
10626
10627 /* We don't need or want this anymore. */
10628 md.mem_offset.hint = 0;
10629
10630 return 0;
10631 }
10632
10633 /* Translate one line of assembly. Pseudo ops and labels do not show
10634 here. */
10635 void
10636 md_assemble (char *str)
10637 {
10638 char *saved_input_line_pointer, *temp;
10639 const char *mnemonic;
10640 const struct pseudo_opcode *pdesc;
10641 struct ia64_opcode *idesc;
10642 unsigned char qp_regno;
10643 unsigned int flags;
10644 int ch;
10645
10646 saved_input_line_pointer = input_line_pointer;
10647 input_line_pointer = str;
10648
10649 /* extract the opcode (mnemonic): */
10650
10651 ch = get_symbol_name (&temp);
10652 mnemonic = temp;
10653 pdesc = (struct pseudo_opcode *) str_hash_find (md.pseudo_hash, mnemonic);
10654 if (pdesc)
10655 {
10656 (void) restore_line_pointer (ch);
10657 (*pdesc->handler) (pdesc->arg);
10658 goto done;
10659 }
10660
10661 /* Find the instruction descriptor matching the arguments. */
10662
10663 idesc = ia64_find_opcode (mnemonic);
10664 (void) restore_line_pointer (ch);
10665 if (!idesc)
10666 {
10667 as_bad (_("Unknown opcode `%s'"), mnemonic);
10668 goto done;
10669 }
10670
10671 idesc = parse_operands (idesc);
10672 if (!idesc)
10673 goto done;
10674
10675 /* Handle the dynamic ops we can handle now: */
10676 if (idesc->type == IA64_TYPE_DYN)
10677 {
10678 if (strcmp (idesc->name, "add") == 0)
10679 {
10680 if (CURR_SLOT.opnd[2].X_op == O_register
10681 && CURR_SLOT.opnd[2].X_add_number < 4)
10682 mnemonic = "addl";
10683 else
10684 mnemonic = "adds";
10685 ia64_free_opcode (idesc);
10686 idesc = ia64_find_opcode (mnemonic);
10687 }
10688 else if (strcmp (idesc->name, "mov") == 0)
10689 {
10690 enum ia64_opnd opnd1, opnd2;
10691 int rop;
10692
10693 opnd1 = idesc->operands[0];
10694 opnd2 = idesc->operands[1];
10695 if (opnd1 == IA64_OPND_AR3)
10696 rop = 0;
10697 else if (opnd2 == IA64_OPND_AR3)
10698 rop = 1;
10699 else
10700 abort ();
10701 if (CURR_SLOT.opnd[rop].X_op == O_register)
10702 {
10703 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10704 mnemonic = "mov.i";
10705 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10706 mnemonic = "mov.m";
10707 else
10708 rop = -1;
10709 }
10710 else
10711 abort ();
10712 if (rop >= 0)
10713 {
10714 ia64_free_opcode (idesc);
10715 idesc = ia64_find_opcode (mnemonic);
10716 while (idesc != NULL
10717 && (idesc->operands[0] != opnd1
10718 || idesc->operands[1] != opnd2))
10719 idesc = get_next_opcode (idesc);
10720 }
10721 }
10722 }
10723 else if (strcmp (idesc->name, "mov.i") == 0
10724 || strcmp (idesc->name, "mov.m") == 0)
10725 {
10726 enum ia64_opnd opnd1, opnd2;
10727 int rop;
10728
10729 opnd1 = idesc->operands[0];
10730 opnd2 = idesc->operands[1];
10731 if (opnd1 == IA64_OPND_AR3)
10732 rop = 0;
10733 else if (opnd2 == IA64_OPND_AR3)
10734 rop = 1;
10735 else
10736 abort ();
10737 if (CURR_SLOT.opnd[rop].X_op == O_register)
10738 {
10739 char unit = 'a';
10740 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10741 unit = 'i';
10742 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10743 unit = 'm';
10744 if (unit != 'a' && unit != idesc->name [4])
10745 as_bad (_("AR %d can only be accessed by %c-unit"),
10746 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10747 TOUPPER (unit));
10748 }
10749 }
10750 else if (strcmp (idesc->name, "hint.b") == 0)
10751 {
10752 switch (md.hint_b)
10753 {
10754 case hint_b_ok:
10755 break;
10756 case hint_b_warning:
10757 as_warn (_("hint.b may be treated as nop"));
10758 break;
10759 case hint_b_error:
10760 as_bad (_("hint.b shouldn't be used"));
10761 break;
10762 }
10763 }
10764
10765 qp_regno = 0;
10766 if (md.qp.X_op == O_register)
10767 {
10768 qp_regno = md.qp.X_add_number - REG_P;
10769 md.qp.X_op = O_absent;
10770 }
10771
10772 flags = idesc->flags;
10773
10774 if ((flags & IA64_OPCODE_FIRST) != 0)
10775 {
10776 /* The alignment frag has to end with a stop bit only if the
10777 next instruction after the alignment directive has to be
10778 the first instruction in an instruction group. */
10779 if (align_frag)
10780 {
10781 while (align_frag->fr_type != rs_align_code)
10782 {
10783 align_frag = align_frag->fr_next;
10784 if (!align_frag)
10785 break;
10786 }
10787 /* align_frag can be NULL if there are directives in
10788 between. */
10789 if (align_frag && align_frag->fr_next == frag_now)
10790 align_frag->tc_frag_data = 1;
10791 }
10792
10793 insn_group_break (1, 0, 0);
10794 }
10795 align_frag = NULL;
10796
10797 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10798 {
10799 as_bad (_("`%s' cannot be predicated"), idesc->name);
10800 goto done;
10801 }
10802
10803 /* Build the instruction. */
10804 CURR_SLOT.qp_regno = qp_regno;
10805 CURR_SLOT.idesc = idesc;
10806 CURR_SLOT.src_file = as_where (&CURR_SLOT.src_line);
10807 dwarf2_where (&CURR_SLOT.debug_line);
10808 dwarf2_consume_line_info ();
10809
10810 /* Add unwind entries, if there are any. */
10811 if (unwind.current_entry)
10812 {
10813 CURR_SLOT.unwind_record = unwind.current_entry;
10814 unwind.current_entry = NULL;
10815 }
10816 if (unwind.pending_saves)
10817 {
10818 if (unwind.pending_saves->next)
10819 {
10820 /* Attach the next pending save to the next slot so that its
10821 slot number will get set correctly. */
10822 add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR);
10823 unwind.pending_saves = &unwind.pending_saves->next->r.record.p;
10824 }
10825 else
10826 unwind.pending_saves = NULL;
10827 }
10828 if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym))
10829 unwind.insn = 1;
10830
10831 /* Check for dependency violations. */
10832 if (md.detect_dv)
10833 check_dv (idesc);
10834
10835 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10836 if (++md.num_slots_in_use >= NUM_SLOTS)
10837 emit_one_bundle ();
10838
10839 if ((flags & IA64_OPCODE_LAST) != 0)
10840 insn_group_break (1, 0, 0);
10841
10842 md.last_text_seg = now_seg;
10843 md.last_text_subseg = now_subseg;
10844
10845 done:
10846 input_line_pointer = saved_input_line_pointer;
10847 }
10848
10849 /* Called when symbol NAME cannot be found in the symbol table.
10850 Should be used for dynamic valued symbols only. */
10851
10852 symbolS *
10853 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
10854 {
10855 return 0;
10856 }
10857
10858 /* Called for any expression that can not be recognized. When the
10859 function is called, `input_line_pointer' will point to the start of
10860 the expression. */
10861
10862 void
10863 md_operand (expressionS *e)
10864 {
10865 switch (*input_line_pointer)
10866 {
10867 case '[':
10868 ++input_line_pointer;
10869 expression_and_evaluate (e);
10870 if (*input_line_pointer != ']')
10871 {
10872 as_bad (_("Closing bracket missing"));
10873 goto err;
10874 }
10875 else
10876 {
10877 if (e->X_op != O_register
10878 || e->X_add_number < REG_GR
10879 || e->X_add_number > REG_GR + 127)
10880 {
10881 as_bad (_("Index must be a general register"));
10882 e->X_add_number = REG_GR;
10883 }
10884
10885 ++input_line_pointer;
10886 e->X_op = O_index;
10887 }
10888 break;
10889
10890 default:
10891 break;
10892 }
10893 return;
10894
10895 err:
10896 ignore_rest_of_line ();
10897 }
10898
10899 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10900 a section symbol plus some offset. For relocs involving @fptr(),
10901 directives we don't want such adjustments since we need to have the
10902 original symbol's name in the reloc. */
10903 int
10904 ia64_fix_adjustable (fixS *fix)
10905 {
10906 /* Prevent all adjustments to global symbols */
10907 if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10908 return 0;
10909
10910 switch (fix->fx_r_type)
10911 {
10912 case BFD_RELOC_IA64_FPTR64I:
10913 case BFD_RELOC_IA64_FPTR32MSB:
10914 case BFD_RELOC_IA64_FPTR32LSB:
10915 case BFD_RELOC_IA64_FPTR64MSB:
10916 case BFD_RELOC_IA64_FPTR64LSB:
10917 case BFD_RELOC_IA64_LTOFF_FPTR22:
10918 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10919 return 0;
10920 default:
10921 break;
10922 }
10923
10924 return 1;
10925 }
10926
10927 int
10928 ia64_force_relocation (fixS *fix)
10929 {
10930 switch (fix->fx_r_type)
10931 {
10932 case BFD_RELOC_IA64_FPTR64I:
10933 case BFD_RELOC_IA64_FPTR32MSB:
10934 case BFD_RELOC_IA64_FPTR32LSB:
10935 case BFD_RELOC_IA64_FPTR64MSB:
10936 case BFD_RELOC_IA64_FPTR64LSB:
10937
10938 case BFD_RELOC_IA64_LTOFF22:
10939 case BFD_RELOC_IA64_LTOFF64I:
10940 case BFD_RELOC_IA64_LTOFF_FPTR22:
10941 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10942 case BFD_RELOC_IA64_PLTOFF22:
10943 case BFD_RELOC_IA64_PLTOFF64I:
10944 case BFD_RELOC_IA64_PLTOFF64MSB:
10945 case BFD_RELOC_IA64_PLTOFF64LSB:
10946
10947 case BFD_RELOC_IA64_LTOFF22X:
10948 case BFD_RELOC_IA64_LDXMOV:
10949 return 1;
10950
10951 default:
10952 break;
10953 }
10954
10955 return generic_force_reloc (fix);
10956 }
10957
10958 /* Decide from what point a pc-relative relocation is relative to,
10959 relative to the pc-relative fixup. Er, relatively speaking. */
10960 long
10961 ia64_pcrel_from_section (fixS *fix, segT sec)
10962 {
10963 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10964
10965 if (bfd_section_flags (sec) & SEC_CODE)
10966 off &= ~0xfUL;
10967
10968 return off;
10969 }
10970
10971
10972 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10973 void
10974 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10975 {
10976 expressionS exp;
10977
10978 exp.X_op = O_pseudo_fixup;
10979 exp.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
10980 exp.X_add_number = 0;
10981 exp.X_add_symbol = symbol;
10982 emit_expr (&exp, size);
10983 }
10984
10985 /* This is called whenever some data item (not an instruction) needs a
10986 fixup. We pick the right reloc code depending on the byteorder
10987 currently in effect. */
10988 void
10989 ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp,
10990 bfd_reloc_code_real_type code)
10991 {
10992 fixS *fix;
10993
10994 switch (nbytes)
10995 {
10996 /* There are no reloc for 8 and 16 bit quantities, but we allow
10997 them here since they will work fine as long as the expression
10998 is fully defined at the end of the pass over the source file. */
10999 case 1: code = BFD_RELOC_8; break;
11000 case 2: code = BFD_RELOC_16; break;
11001 case 4:
11002 if (target_big_endian)
11003 code = BFD_RELOC_IA64_DIR32MSB;
11004 else
11005 code = BFD_RELOC_IA64_DIR32LSB;
11006 break;
11007
11008 case 8:
11009 /* In 32-bit mode, data8 could mean function descriptors too. */
11010 if (exp->X_op == O_pseudo_fixup
11011 && exp->X_op_symbol
11012 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
11013 && !(md.flags & EF_IA_64_ABI64))
11014 {
11015 if (target_big_endian)
11016 code = BFD_RELOC_IA64_IPLTMSB;
11017 else
11018 code = BFD_RELOC_IA64_IPLTLSB;
11019 exp->X_op = O_symbol;
11020 break;
11021 }
11022 else
11023 {
11024 if (target_big_endian)
11025 code = BFD_RELOC_IA64_DIR64MSB;
11026 else
11027 code = BFD_RELOC_IA64_DIR64LSB;
11028 break;
11029 }
11030
11031 case 16:
11032 if (exp->X_op == O_pseudo_fixup
11033 && exp->X_op_symbol
11034 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
11035 {
11036 if (target_big_endian)
11037 code = BFD_RELOC_IA64_IPLTMSB;
11038 else
11039 code = BFD_RELOC_IA64_IPLTLSB;
11040 exp->X_op = O_symbol;
11041 break;
11042 }
11043 /* FALLTHRU */
11044
11045 default:
11046 as_bad (_("Unsupported fixup size %d"), nbytes);
11047 ignore_rest_of_line ();
11048 return;
11049 }
11050
11051 if (exp->X_op == O_pseudo_fixup)
11052 {
11053 exp->X_op = O_symbol;
11054 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
11055 /* ??? If code unchanged, unsupported. */
11056 }
11057
11058 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
11059 /* We need to store the byte order in effect in case we're going
11060 to fix an 8 or 16 bit relocation (for which there no real
11061 relocs available). See md_apply_fix(). */
11062 fix->tc_fix_data.bigendian = target_big_endian;
11063 }
11064
11065 /* Return the actual relocation we wish to associate with the pseudo
11066 reloc described by SYM and R_TYPE. SYM should be one of the
11067 symbols in the pseudo_func array, or NULL. */
11068
11069 static bfd_reloc_code_real_type
11070 ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type)
11071 {
11072 bfd_reloc_code_real_type newr = 0;
11073 const char *type = NULL, *suffix = "";
11074
11075 if (sym == NULL)
11076 {
11077 return r_type;
11078 }
11079
11080 switch (S_GET_VALUE (sym))
11081 {
11082 case FUNC_FPTR_RELATIVE:
11083 switch (r_type)
11084 {
11085 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_FPTR64I; break;
11086 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_FPTR32MSB; break;
11087 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_FPTR32LSB; break;
11088 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_FPTR64MSB; break;
11089 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_FPTR64LSB; break;
11090 default: type = "FPTR"; break;
11091 }
11092 break;
11093
11094 case FUNC_GP_RELATIVE:
11095 switch (r_type)
11096 {
11097 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_GPREL22; break;
11098 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_GPREL64I; break;
11099 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_GPREL32MSB; break;
11100 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_GPREL32LSB; break;
11101 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_GPREL64MSB; break;
11102 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_GPREL64LSB; break;
11103 default: type = "GPREL"; break;
11104 }
11105 break;
11106
11107 case FUNC_LT_RELATIVE:
11108 switch (r_type)
11109 {
11110 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22; break;
11111 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_LTOFF64I; break;
11112 default: type = "LTOFF"; break;
11113 }
11114 break;
11115
11116 case FUNC_LT_RELATIVE_X:
11117 switch (r_type)
11118 {
11119 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22X; break;
11120 default: type = "LTOFF"; suffix = "X"; break;
11121 }
11122 break;
11123
11124 case FUNC_PC_RELATIVE:
11125 switch (r_type)
11126 {
11127 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PCREL22; break;
11128 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PCREL64I; break;
11129 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_PCREL32MSB; break;
11130 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_PCREL32LSB; break;
11131 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PCREL64MSB; break;
11132 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PCREL64LSB; break;
11133 default: type = "PCREL"; break;
11134 }
11135 break;
11136
11137 case FUNC_PLT_RELATIVE:
11138 switch (r_type)
11139 {
11140 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PLTOFF22; break;
11141 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PLTOFF64I; break;
11142 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PLTOFF64MSB;break;
11143 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PLTOFF64LSB;break;
11144 default: type = "PLTOFF"; break;
11145 }
11146 break;
11147
11148 case FUNC_SEC_RELATIVE:
11149 switch (r_type)
11150 {
11151 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SECREL32MSB;break;
11152 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SECREL32LSB;break;
11153 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SECREL64MSB;break;
11154 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SECREL64LSB;break;
11155 default: type = "SECREL"; break;
11156 }
11157 break;
11158
11159 case FUNC_SEG_RELATIVE:
11160 switch (r_type)
11161 {
11162 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SEGREL32MSB;break;
11163 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SEGREL32LSB;break;
11164 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SEGREL64MSB;break;
11165 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SEGREL64LSB;break;
11166 default: type = "SEGREL"; break;
11167 }
11168 break;
11169
11170 case FUNC_LTV_RELATIVE:
11171 switch (r_type)
11172 {
11173 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_LTV32MSB; break;
11174 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_LTV32LSB; break;
11175 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_LTV64MSB; break;
11176 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_LTV64LSB; break;
11177 default: type = "LTV"; break;
11178 }
11179 break;
11180
11181 case FUNC_LT_FPTR_RELATIVE:
11182 switch (r_type)
11183 {
11184 case BFD_RELOC_IA64_IMM22:
11185 newr = BFD_RELOC_IA64_LTOFF_FPTR22; break;
11186 case BFD_RELOC_IA64_IMM64:
11187 newr = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
11188 case BFD_RELOC_IA64_DIR32MSB:
11189 newr = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break;
11190 case BFD_RELOC_IA64_DIR32LSB:
11191 newr = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break;
11192 case BFD_RELOC_IA64_DIR64MSB:
11193 newr = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break;
11194 case BFD_RELOC_IA64_DIR64LSB:
11195 newr = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break;
11196 default:
11197 type = "LTOFF_FPTR"; break;
11198 }
11199 break;
11200
11201 case FUNC_TP_RELATIVE:
11202 switch (r_type)
11203 {
11204 case BFD_RELOC_IA64_IMM14: newr = BFD_RELOC_IA64_TPREL14; break;
11205 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_TPREL22; break;
11206 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_TPREL64I; break;
11207 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_TPREL64MSB; break;
11208 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_TPREL64LSB; break;
11209 default: type = "TPREL"; break;
11210 }
11211 break;
11212
11213 case FUNC_LT_TP_RELATIVE:
11214 switch (r_type)
11215 {
11216 case BFD_RELOC_IA64_IMM22:
11217 newr = BFD_RELOC_IA64_LTOFF_TPREL22; break;
11218 default:
11219 type = "LTOFF_TPREL"; break;
11220 }
11221 break;
11222
11223 case FUNC_DTP_MODULE:
11224 switch (r_type)
11225 {
11226 case BFD_RELOC_IA64_DIR64MSB:
11227 newr = BFD_RELOC_IA64_DTPMOD64MSB; break;
11228 case BFD_RELOC_IA64_DIR64LSB:
11229 newr = BFD_RELOC_IA64_DTPMOD64LSB; break;
11230 default:
11231 type = "DTPMOD"; break;
11232 }
11233 break;
11234
11235 case FUNC_LT_DTP_MODULE:
11236 switch (r_type)
11237 {
11238 case BFD_RELOC_IA64_IMM22:
11239 newr = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
11240 default:
11241 type = "LTOFF_DTPMOD"; break;
11242 }
11243 break;
11244
11245 case FUNC_DTP_RELATIVE:
11246 switch (r_type)
11247 {
11248 case BFD_RELOC_IA64_DIR32MSB:
11249 newr = BFD_RELOC_IA64_DTPREL32MSB; break;
11250 case BFD_RELOC_IA64_DIR32LSB:
11251 newr = BFD_RELOC_IA64_DTPREL32LSB; break;
11252 case BFD_RELOC_IA64_DIR64MSB:
11253 newr = BFD_RELOC_IA64_DTPREL64MSB; break;
11254 case BFD_RELOC_IA64_DIR64LSB:
11255 newr = BFD_RELOC_IA64_DTPREL64LSB; break;
11256 case BFD_RELOC_IA64_IMM14:
11257 newr = BFD_RELOC_IA64_DTPREL14; break;
11258 case BFD_RELOC_IA64_IMM22:
11259 newr = BFD_RELOC_IA64_DTPREL22; break;
11260 case BFD_RELOC_IA64_IMM64:
11261 newr = BFD_RELOC_IA64_DTPREL64I; break;
11262 default:
11263 type = "DTPREL"; break;
11264 }
11265 break;
11266
11267 case FUNC_LT_DTP_RELATIVE:
11268 switch (r_type)
11269 {
11270 case BFD_RELOC_IA64_IMM22:
11271 newr = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
11272 default:
11273 type = "LTOFF_DTPREL"; break;
11274 }
11275 break;
11276
11277 case FUNC_IPLT_RELOC:
11278 switch (r_type)
11279 {
11280 case BFD_RELOC_IA64_IPLTMSB: return r_type;
11281 case BFD_RELOC_IA64_IPLTLSB: return r_type;
11282 default: type = "IPLT"; break;
11283 }
11284 break;
11285
11286 #ifdef TE_VMS
11287 case FUNC_SLOTCOUNT_RELOC:
11288 return DUMMY_RELOC_IA64_SLOTCOUNT;
11289 #endif
11290
11291 default:
11292 abort ();
11293 }
11294
11295 if (newr)
11296 return newr;
11297 else
11298 {
11299 int width;
11300
11301 if (!type)
11302 abort ();
11303 switch (r_type)
11304 {
11305 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break;
11306 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break;
11307 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break;
11308 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break;
11309 case BFD_RELOC_UNUSED: width = 13; break;
11310 case BFD_RELOC_IA64_IMM14: width = 14; break;
11311 case BFD_RELOC_IA64_IMM22: width = 22; break;
11312 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break;
11313 default: abort ();
11314 }
11315
11316 /* This should be an error, but since previously there wasn't any
11317 diagnostic here, don't make it fail because of this for now. */
11318 as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix);
11319 return r_type;
11320 }
11321 }
11322
11323 /* Here is where generate the appropriate reloc for pseudo relocation
11324 functions. */
11325 void
11326 ia64_validate_fix (fixS *fix)
11327 {
11328 switch (fix->fx_r_type)
11329 {
11330 case BFD_RELOC_IA64_FPTR64I:
11331 case BFD_RELOC_IA64_FPTR32MSB:
11332 case BFD_RELOC_IA64_FPTR64LSB:
11333 case BFD_RELOC_IA64_LTOFF_FPTR22:
11334 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11335 if (fix->fx_offset != 0)
11336 as_bad_where (fix->fx_file, fix->fx_line,
11337 _("No addend allowed in @fptr() relocation"));
11338 break;
11339 default:
11340 break;
11341 }
11342 }
11343
11344 static void
11345 fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value)
11346 {
11347 bfd_vma insn[3], t0, t1, control_bits;
11348 const char *err;
11349 char *fixpos;
11350 long slot;
11351
11352 slot = fix->fx_where & 0x3;
11353 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
11354
11355 /* Bundles are always in little-endian byte order */
11356 t0 = bfd_getl64 (fixpos);
11357 t1 = bfd_getl64 (fixpos + 8);
11358 control_bits = t0 & 0x1f;
11359 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
11360 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
11361 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
11362
11363 err = NULL;
11364 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
11365 {
11366 insn[1] = (value >> 22) & 0x1ffffffffffLL;
11367 insn[2] |= (((value & 0x7f) << 13)
11368 | (((value >> 7) & 0x1ff) << 27)
11369 | (((value >> 16) & 0x1f) << 22)
11370 | (((value >> 21) & 0x1) << 21)
11371 | (((value >> 63) & 0x1) << 36));
11372 }
11373 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
11374 {
11375 if (value & ~0x3fffffffffffffffULL)
11376 err = _("integer operand out of range");
11377 insn[1] = (value >> 21) & 0x1ffffffffffLL;
11378 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
11379 }
11380 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
11381 {
11382 value >>= 4;
11383 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
11384 insn[2] |= ((((value >> 59) & 0x1) << 36)
11385 | (((value >> 0) & 0xfffff) << 13));
11386 }
11387 else
11388 err = (*odesc->insert) (odesc, value, insn + slot);
11389
11390 if (err)
11391 as_bad_where (fix->fx_file, fix->fx_line, "%s", err);
11392
11393 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
11394 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
11395 number_to_chars_littleendian (fixpos + 0, t0, 8);
11396 number_to_chars_littleendian (fixpos + 8, t1, 8);
11397 }
11398
11399 /* Attempt to simplify or even eliminate a fixup. The return value is
11400 ignored; perhaps it was once meaningful, but now it is historical.
11401 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
11402
11403 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
11404 (if possible). */
11405
11406 void
11407 md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED)
11408 {
11409 char *fixpos;
11410 valueT value = *valP;
11411
11412 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
11413
11414 if (fix->fx_pcrel)
11415 {
11416 switch (fix->fx_r_type)
11417 {
11418 case BFD_RELOC_IA64_PCREL21B: break;
11419 case BFD_RELOC_IA64_PCREL21BI: break;
11420 case BFD_RELOC_IA64_PCREL21F: break;
11421 case BFD_RELOC_IA64_PCREL21M: break;
11422 case BFD_RELOC_IA64_PCREL60B: break;
11423 case BFD_RELOC_IA64_PCREL22: break;
11424 case BFD_RELOC_IA64_PCREL64I: break;
11425 case BFD_RELOC_IA64_PCREL32MSB: break;
11426 case BFD_RELOC_IA64_PCREL32LSB: break;
11427 case BFD_RELOC_IA64_PCREL64MSB: break;
11428 case BFD_RELOC_IA64_PCREL64LSB: break;
11429 default:
11430 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym,
11431 fix->fx_r_type);
11432 break;
11433 }
11434 }
11435 if (fix->fx_addsy)
11436 {
11437 switch ((unsigned) fix->fx_r_type)
11438 {
11439 case BFD_RELOC_UNUSED:
11440 /* This must be a TAG13 or TAG13b operand. There are no external
11441 relocs defined for them, so we must give an error. */
11442 as_bad_where (fix->fx_file, fix->fx_line,
11443 _("%s must have a constant value"),
11444 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
11445 fix->fx_done = 1;
11446 return;
11447
11448 case BFD_RELOC_IA64_TPREL14:
11449 case BFD_RELOC_IA64_TPREL22:
11450 case BFD_RELOC_IA64_TPREL64I:
11451 case BFD_RELOC_IA64_LTOFF_TPREL22:
11452 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
11453 case BFD_RELOC_IA64_DTPREL14:
11454 case BFD_RELOC_IA64_DTPREL22:
11455 case BFD_RELOC_IA64_DTPREL64I:
11456 case BFD_RELOC_IA64_LTOFF_DTPREL22:
11457 S_SET_THREAD_LOCAL (fix->fx_addsy);
11458 break;
11459
11460 #ifdef TE_VMS
11461 case DUMMY_RELOC_IA64_SLOTCOUNT:
11462 as_bad_where (fix->fx_file, fix->fx_line,
11463 _("cannot resolve @slotcount parameter"));
11464 fix->fx_done = 1;
11465 return;
11466 #endif
11467
11468 default:
11469 break;
11470 }
11471 }
11472 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
11473 {
11474 #ifdef TE_VMS
11475 if (fix->fx_r_type == DUMMY_RELOC_IA64_SLOTCOUNT)
11476 {
11477 /* For @slotcount, convert an addresses difference to a slots
11478 difference. */
11479 valueT v;
11480
11481 v = (value >> 4) * 3;
11482 switch (value & 0x0f)
11483 {
11484 case 0:
11485 case 1:
11486 case 2:
11487 v += value & 0x0f;
11488 break;
11489 case 0x0f:
11490 v += 2;
11491 break;
11492 case 0x0e:
11493 v += 1;
11494 break;
11495 default:
11496 as_bad (_("invalid @slotcount value"));
11497 }
11498 value = v;
11499 }
11500 #endif
11501
11502 if (fix->tc_fix_data.bigendian)
11503 number_to_chars_bigendian (fixpos, value, fix->fx_size);
11504 else
11505 number_to_chars_littleendian (fixpos, value, fix->fx_size);
11506 fix->fx_done = 1;
11507 }
11508 else
11509 {
11510 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
11511 fix->fx_done = 1;
11512 }
11513 }
11514
11515 /* Generate the BFD reloc to be stuck in the object file from the
11516 fixup used internally in the assembler. */
11517
11518 arelent *
11519 tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp)
11520 {
11521 arelent *reloc;
11522
11523 reloc = XNEW (arelent);
11524 reloc->sym_ptr_ptr = XNEW (asymbol *);
11525 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11526 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
11527 reloc->addend = fixp->fx_offset;
11528 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
11529
11530 if (!reloc->howto)
11531 {
11532 as_bad_where (fixp->fx_file, fixp->fx_line,
11533 _("Cannot represent %s relocation in object file"),
11534 bfd_get_reloc_code_name (fixp->fx_r_type));
11535 free (reloc);
11536 return NULL;
11537 }
11538 return reloc;
11539 }
11540
11541 /* Turn a string in input_line_pointer into a floating point constant
11542 of type TYPE, and store the appropriate bytes in *LIT. The number
11543 of LITTLENUMS emitted is stored in *SIZE. An error message is
11544 returned, or NULL on OK. */
11545
11546 const char *
11547 md_atof (int type, char *lit, int *size)
11548 {
11549 LITTLENUM_TYPE words[MAX_LITTLENUMS];
11550 char *t;
11551 int prec;
11552
11553 switch (type)
11554 {
11555 /* IEEE floats */
11556 case 'f':
11557 case 'F':
11558 case 's':
11559 case 'S':
11560 prec = 2;
11561 break;
11562
11563 case 'd':
11564 case 'D':
11565 case 'r':
11566 case 'R':
11567 prec = 4;
11568 break;
11569
11570 case 'x':
11571 case 'X':
11572 case 'p':
11573 case 'P':
11574 prec = 5;
11575 break;
11576
11577 default:
11578 *size = 0;
11579 return _("Unrecognized or unsupported floating point constant");
11580 }
11581 t = atof_ieee (input_line_pointer, type, words);
11582 if (t)
11583 input_line_pointer = t;
11584
11585 (*ia64_float_to_chars) (lit, words, prec);
11586
11587 if (type == 'X')
11588 {
11589 /* It is 10 byte floating point with 6 byte padding. */
11590 memset (&lit [10], 0, 6);
11591 *size = 8 * sizeof (LITTLENUM_TYPE);
11592 }
11593 else
11594 *size = prec * sizeof (LITTLENUM_TYPE);
11595
11596 return NULL;
11597 }
11598
11599 /* Handle ia64 specific semantics of the align directive. */
11600
11601 void
11602 ia64_md_do_align (int n ATTRIBUTE_UNUSED,
11603 const char *fill ATTRIBUTE_UNUSED,
11604 int len ATTRIBUTE_UNUSED,
11605 int max ATTRIBUTE_UNUSED)
11606 {
11607 if (subseg_text_p (now_seg))
11608 ia64_flush_insns ();
11609 }
11610
11611 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
11612 of an rs_align_code fragment. */
11613
11614 void
11615 ia64_handle_align (fragS *fragp)
11616 {
11617 int bytes;
11618 char *p;
11619 const unsigned char *nop_type;
11620
11621 if (fragp->fr_type != rs_align_code)
11622 return;
11623
11624 /* Check if this frag has to end with a stop bit. */
11625 nop_type = fragp->tc_frag_data ? le_nop_stop : le_nop;
11626
11627 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11628 p = fragp->fr_literal + fragp->fr_fix;
11629
11630 /* If no paddings are needed, we check if we need a stop bit. */
11631 if (!bytes && fragp->tc_frag_data)
11632 {
11633 if (fragp->fr_fix < 16)
11634 #if 1
11635 /* FIXME: It won't work with
11636 .align 16
11637 alloc r32=ar.pfs,1,2,4,0
11638 */
11639 ;
11640 #else
11641 as_bad_where (fragp->fr_file, fragp->fr_line,
11642 _("Can't add stop bit to mark end of instruction group"));
11643 #endif
11644 else
11645 /* Bundles are always in little-endian byte order. Make sure
11646 the previous bundle has the stop bit. */
11647 *(p - 16) |= 1;
11648 }
11649
11650 /* Make sure we are on a 16-byte boundary, in case someone has been
11651 putting data into a text section. */
11652 if (bytes & 15)
11653 {
11654 int fix = bytes & 15;
11655 memset (p, 0, fix);
11656 p += fix;
11657 bytes -= fix;
11658 fragp->fr_fix += fix;
11659 }
11660
11661 /* Instruction bundles are always little-endian. */
11662 memcpy (p, nop_type, 16);
11663 fragp->fr_var = 16;
11664 }
11665
11666 static void
11667 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11668 int prec)
11669 {
11670 while (prec--)
11671 {
11672 number_to_chars_bigendian (lit, (long) (*words++),
11673 sizeof (LITTLENUM_TYPE));
11674 lit += sizeof (LITTLENUM_TYPE);
11675 }
11676 }
11677
11678 static void
11679 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11680 int prec)
11681 {
11682 while (prec--)
11683 {
11684 number_to_chars_littleendian (lit, (long) (words[prec]),
11685 sizeof (LITTLENUM_TYPE));
11686 lit += sizeof (LITTLENUM_TYPE);
11687 }
11688 }
11689
11690 void
11691 ia64_elf_section_change_hook (void)
11692 {
11693 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11694 && elf_linked_to_section (now_seg) == NULL)
11695 elf_linked_to_section (now_seg) = text_section;
11696 dot_byteorder (-1);
11697 }
11698
11699 /* Check if a label should be made global. */
11700 void
11701 ia64_check_label (symbolS *label)
11702 {
11703 if (*input_line_pointer == ':')
11704 {
11705 S_SET_EXTERNAL (label);
11706 input_line_pointer++;
11707 }
11708 }
11709
11710 /* Used to remember where .alias and .secalias directives are seen. We
11711 will rename symbol and section names when we are about to output
11712 the relocatable file. */
11713 struct alias
11714 {
11715 const char *file; /* The file where the directive is seen. */
11716 unsigned int line; /* The line number the directive is at. */
11717 const char *name; /* The original name of the symbol. */
11718 };
11719
11720 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11721 .secalias. Otherwise, it is .alias. */
11722 static void
11723 dot_alias (int section)
11724 {
11725 char *name, *alias;
11726 char delim;
11727 char *end_name;
11728 int len;
11729 struct alias *h;
11730 const char *a;
11731 htab_t ahash, nhash;
11732 const char *kind;
11733
11734 delim = get_symbol_name (&name);
11735 end_name = input_line_pointer;
11736 *end_name = delim;
11737
11738 if (name == end_name)
11739 {
11740 as_bad (_("expected symbol name"));
11741 ignore_rest_of_line ();
11742 return;
11743 }
11744
11745 SKIP_WHITESPACE_AFTER_NAME ();
11746
11747 if (*input_line_pointer != ',')
11748 {
11749 *end_name = 0;
11750 as_bad (_("expected comma after \"%s\""), name);
11751 *end_name = delim;
11752 ignore_rest_of_line ();
11753 return;
11754 }
11755
11756 input_line_pointer++;
11757 *end_name = 0;
11758 ia64_canonicalize_symbol_name (name);
11759
11760 /* We call demand_copy_C_string to check if alias string is valid.
11761 There should be a closing `"' and no `\0' in the string. */
11762 alias = demand_copy_C_string (&len);
11763 if (alias == NULL)
11764 {
11765 ignore_rest_of_line ();
11766 return;
11767 }
11768
11769 /* Make a copy of name string. */
11770 name = notes_strdup (name);
11771
11772 if (section)
11773 {
11774 kind = "section";
11775 ahash = secalias_hash;
11776 nhash = secalias_name_hash;
11777 }
11778 else
11779 {
11780 kind = "symbol";
11781 ahash = alias_hash;
11782 nhash = alias_name_hash;
11783 }
11784
11785 /* Check if alias has been used before. */
11786
11787 h = (struct alias *) str_hash_find (ahash, alias);
11788 if (h)
11789 {
11790 if (strcmp (h->name, name))
11791 as_bad (_("`%s' is already the alias of %s `%s'"),
11792 alias, kind, h->name);
11793 notes_free (alias);
11794 goto out;
11795 }
11796
11797 /* Check if name already has an alias. */
11798 a = (const char *) str_hash_find (nhash, name);
11799 if (a)
11800 {
11801 if (strcmp (a, alias))
11802 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11803 notes_free (alias);
11804 goto out;
11805 }
11806
11807 h = notes_alloc (sizeof (*h));
11808 h->file = as_where (&h->line);
11809 h->name = name;
11810
11811 str_hash_insert (ahash, alias, h, 0);
11812 str_hash_insert (nhash, name, alias, 0);
11813
11814 out:
11815 demand_empty_rest_of_line ();
11816 }
11817
11818 /* It renames the original symbol name to its alias. */
11819 static int
11820 do_alias (void **slot, void *arg ATTRIBUTE_UNUSED)
11821 {
11822 string_tuple_t *tuple = *((string_tuple_t **) slot);
11823 struct alias *h = (struct alias *) tuple->value;
11824 symbolS *sym = symbol_find (h->name);
11825
11826 if (sym == NULL)
11827 {
11828 #ifdef TE_VMS
11829 /* Uses .alias extensively to alias CRTL functions to same with
11830 decc$ prefix. Sometimes function gets optimized away and a
11831 warning results, which should be suppressed. */
11832 if (!startswith (tuple->key, "decc$"))
11833 #endif
11834 as_warn_where (h->file, h->line,
11835 _("symbol `%s' aliased to `%s' is not used"),
11836 h->name, tuple->key);
11837 }
11838 else
11839 S_SET_NAME (sym, (char *) tuple->key);
11840
11841 return 1;
11842 }
11843
11844 /* Called from write_object_file. */
11845 void
11846 ia64_adjust_symtab (void)
11847 {
11848 htab_traverse_noresize (alias_hash, do_alias, NULL);
11849 }
11850
11851 /* It renames the original section name to its alias. */
11852 static int
11853 do_secalias (void **slot, void *arg ATTRIBUTE_UNUSED)
11854 {
11855 string_tuple_t *tuple = *((string_tuple_t **) slot);
11856 struct alias *h = (struct alias *) tuple->value;
11857 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11858
11859 if (sec == NULL)
11860 as_warn_where (h->file, h->line,
11861 _("section `%s' aliased to `%s' is not used"),
11862 h->name, tuple->key);
11863 else
11864 sec->name = tuple->key;
11865
11866 return 1;
11867 }
11868
11869 /* Called from write_object_file. */
11870 void
11871 ia64_frob_file (void)
11872 {
11873 htab_traverse_noresize (secalias_hash, do_secalias, NULL);
11874 }
11875
11876 #ifdef TE_VMS
11877 #define NT_VMS_MHD 1
11878 #define NT_VMS_LNM 2
11879
11880 /* Integrity VMS 8.x identifies it's ELF modules with a standard ELF
11881 .note section. */
11882
11883 /* Manufacture a VMS-like time string. */
11884 static void
11885 get_vms_time (char *Now)
11886 {
11887 char *pnt;
11888 time_t timeb;
11889
11890 time (&timeb);
11891 pnt = ctime (&timeb);
11892 pnt[3] = 0;
11893 pnt[7] = 0;
11894 pnt[10] = 0;
11895 pnt[16] = 0;
11896 pnt[24] = 0;
11897 sprintf (Now, "%2s-%3s-%s %s", pnt + 8, pnt + 4, pnt + 20, pnt + 11);
11898 }
11899
11900 void
11901 ia64_vms_note (void)
11902 {
11903 char *p;
11904 asection *seg = now_seg;
11905 subsegT subseg = now_subseg;
11906 asection *secp = NULL;
11907 char *bname;
11908 char buf [256];
11909 symbolS *sym;
11910
11911 /* Create the .note section. */
11912
11913 secp = subseg_new (".note", 0);
11914 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11915
11916 /* Module header note (MHD). */
11917 bname = xstrdup (lbasename (out_file_name));
11918 if ((p = strrchr (bname, '.')))
11919 *p = '\0';
11920
11921 /* VMS note header is 24 bytes long. */
11922 p = frag_more (8 + 8 + 8);
11923 number_to_chars_littleendian (p + 0, 8, 8);
11924 number_to_chars_littleendian (p + 8, 40 + strlen (bname), 8);
11925 number_to_chars_littleendian (p + 16, NT_VMS_MHD, 8);
11926
11927 p = frag_more (8);
11928 strcpy (p, "IPF/VMS");
11929
11930 p = frag_more (17 + 17 + strlen (bname) + 1 + 5);
11931 get_vms_time (p);
11932 strcpy (p + 17, "24-FEB-2005 15:00");
11933 p += 17 + 17;
11934 strcpy (p, bname);
11935 p += strlen (bname) + 1;
11936 free (bname);
11937 strcpy (p, "V1.0");
11938
11939 frag_align (3, 0, 0);
11940
11941 /* Language processor name note. */
11942 sprintf (buf, "GNU assembler version %s (%s) using BFD version %s",
11943 VERSION, TARGET_ALIAS, BFD_VERSION_STRING);
11944
11945 p = frag_more (8 + 8 + 8);
11946 number_to_chars_littleendian (p + 0, 8, 8);
11947 number_to_chars_littleendian (p + 8, strlen (buf) + 1, 8);
11948 number_to_chars_littleendian (p + 16, NT_VMS_LNM, 8);
11949
11950 p = frag_more (8);
11951 strcpy (p, "IPF/VMS");
11952
11953 p = frag_more (strlen (buf) + 1);
11954 strcpy (p, buf);
11955
11956 frag_align (3, 0, 0);
11957
11958 secp = subseg_new (".vms_display_name_info", 0);
11959 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11960
11961 /* This symbol should be passed on the command line and be variable
11962 according to language. */
11963 sym = symbol_new ("__gnat_vms_display_name@gnat_demangler_rtl",
11964 absolute_section, &zero_address_frag, 0);
11965 symbol_table_insert (sym);
11966 symbol_get_bfdsym (sym)->flags |= BSF_DEBUGGING | BSF_DYNAMIC;
11967
11968 p = frag_more (4);
11969 /* Format 3 of VMS demangler Spec. */
11970 number_to_chars_littleendian (p, 3, 4);
11971
11972 p = frag_more (4);
11973 /* Place holder for symbol table index of above symbol. */
11974 number_to_chars_littleendian (p, -1, 4);
11975
11976 frag_align (3, 0, 0);
11977
11978 /* We probably can't restore the current segment, for there likely
11979 isn't one yet... */
11980 if (seg && subseg)
11981 subseg_set (seg, subseg);
11982 }
11983
11984 #endif /* TE_VMS */