]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-ia64.c
Automatic date update in version.in
[thirdparty/binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright (C) 1998-2024 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street - Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .eb
28 .estate
29 .lb
30 .popsection
31 .previous
32 .psr
33 .pushsection
34 - labels are wrong if automatic alignment is introduced
35 (e.g., checkout the second real10 definition in test-data.s)
36 - DV-related stuff:
37 <reg>.safe_across_calls and any other DV-related directives I don't
38 have documentation for.
39 verify mod-sched-brs reads/writes are checked/marked (and other
40 notes)
41
42 */
43
44 #include "as.h"
45 #include "safe-ctype.h"
46 #include "dwarf2dbg.h"
47 #include "subsegs.h"
48
49 #include "opcode/ia64.h"
50
51 #include "elf/ia64.h"
52 #include "bfdver.h"
53 #include <time.h>
54 #include <limits.h>
55
56 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
57
58 /* Some systems define MIN in, e.g., param.h. */
59 #undef MIN
60 #define MIN(a,b) ((a) < (b) ? (a) : (b))
61
62 #define NUM_SLOTS 4
63 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
64 #define CURR_SLOT md.slot[md.curr_slot]
65
66 #define O_pseudo_fixup (O_max + 1)
67
68 enum special_section
69 {
70 /* IA-64 ABI section pseudo-ops. */
71 SPECIAL_SECTION_SBSS = 0,
72 SPECIAL_SECTION_SDATA,
73 SPECIAL_SECTION_RODATA,
74 SPECIAL_SECTION_COMMENT,
75 SPECIAL_SECTION_UNWIND,
76 SPECIAL_SECTION_UNWIND_INFO,
77 /* HPUX specific section pseudo-ops. */
78 SPECIAL_SECTION_INIT_ARRAY,
79 SPECIAL_SECTION_FINI_ARRAY,
80 };
81
82 enum reloc_func
83 {
84 FUNC_DTP_MODULE,
85 FUNC_DTP_RELATIVE,
86 FUNC_FPTR_RELATIVE,
87 FUNC_GP_RELATIVE,
88 FUNC_LT_RELATIVE,
89 FUNC_LT_RELATIVE_X,
90 FUNC_PC_RELATIVE,
91 FUNC_PLT_RELATIVE,
92 FUNC_SEC_RELATIVE,
93 FUNC_SEG_RELATIVE,
94 FUNC_TP_RELATIVE,
95 FUNC_LTV_RELATIVE,
96 FUNC_LT_FPTR_RELATIVE,
97 FUNC_LT_DTP_MODULE,
98 FUNC_LT_DTP_RELATIVE,
99 FUNC_LT_TP_RELATIVE,
100 FUNC_IPLT_RELOC,
101 #ifdef TE_VMS
102 FUNC_SLOTCOUNT_RELOC,
103 #endif
104 };
105
106 enum reg_symbol
107 {
108 REG_GR = 0,
109 REG_FR = (REG_GR + 128),
110 REG_AR = (REG_FR + 128),
111 REG_CR = (REG_AR + 128),
112 REG_DAHR = (REG_CR + 128),
113 REG_P = (REG_DAHR + 8),
114 REG_BR = (REG_P + 64),
115 REG_IP = (REG_BR + 8),
116 REG_CFM,
117 REG_PR,
118 REG_PR_ROT,
119 REG_PSR,
120 REG_PSR_L,
121 REG_PSR_UM,
122 /* The following are pseudo-registers for use by gas only. */
123 IND_CPUID,
124 IND_DBR,
125 IND_DTR,
126 IND_ITR,
127 IND_IBR,
128 IND_MSR,
129 IND_PKR,
130 IND_PMC,
131 IND_PMD,
132 IND_DAHR,
133 IND_RR,
134 /* The following pseudo-registers are used for unwind directives only: */
135 REG_PSP,
136 REG_PRIUNAT,
137 REG_NUM
138 };
139
140 enum dynreg_type
141 {
142 DYNREG_GR = 0, /* dynamic general purpose register */
143 DYNREG_FR, /* dynamic floating point register */
144 DYNREG_PR, /* dynamic predicate register */
145 DYNREG_NUM_TYPES
146 };
147
148 enum operand_match_result
149 {
150 OPERAND_MATCH,
151 OPERAND_OUT_OF_RANGE,
152 OPERAND_MISMATCH
153 };
154
155 /* On the ia64, we can't know the address of a text label until the
156 instructions are packed into a bundle. To handle this, we keep
157 track of the list of labels that appear in front of each
158 instruction. */
159 struct label_fix
160 {
161 struct label_fix *next;
162 struct symbol *sym;
163 bool dw2_mark_labels;
164 };
165
166 #ifdef TE_VMS
167 /* An internally used relocation. */
168 #define DUMMY_RELOC_IA64_SLOTCOUNT (BFD_RELOC_UNUSED + 1)
169 #endif
170
171 /* This is the endianness of the current section. */
172 extern int target_big_endian;
173
174 /* This is the default endianness. */
175 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
176
177 void (*ia64_number_to_chars) (char *, valueT, int);
178
179 static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int);
180 static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int);
181
182 static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int);
183
184 static htab_t alias_hash;
185 static htab_t alias_name_hash;
186 static htab_t secalias_hash;
187 static htab_t secalias_name_hash;
188
189 /* List of chars besides those in app.c:symbol_chars that can start an
190 operand. Used to prevent the scrubber eating vital white-space. */
191 const char ia64_symbol_chars[] = "@?";
192
193 /* Characters which always start a comment. */
194 const char comment_chars[] = "";
195
196 /* Characters which start a comment at the beginning of a line. */
197 const char line_comment_chars[] = "#";
198
199 /* Characters which may be used to separate multiple commands on a
200 single line. */
201 const char line_separator_chars[] = ";{}";
202
203 /* Characters which are used to indicate an exponent in a floating
204 point number. */
205 const char EXP_CHARS[] = "eE";
206
207 /* Characters which mean that a number is a floating point constant,
208 as in 0d1.0. */
209 const char FLT_CHARS[] = "rRsSfFdDxXpP";
210
211 /* ia64-specific option processing: */
212
213 const char *md_shortopts = "m:N:x::";
214
215 struct option md_longopts[] =
216 {
217 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
218 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
219 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
220 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
221 };
222
223 size_t md_longopts_size = sizeof (md_longopts);
224
225 static struct
226 {
227 htab_t pseudo_hash; /* pseudo opcode hash table */
228 htab_t reg_hash; /* register name hash table */
229 htab_t dynreg_hash; /* dynamic register hash table */
230 htab_t const_hash; /* constant hash table */
231 htab_t entry_hash; /* code entry hint hash table */
232
233 /* If X_op is != O_absent, the register name for the instruction's
234 qualifying predicate. If NULL, p0 is assumed for instructions
235 that are predictable. */
236 expressionS qp;
237
238 /* Optimize for which CPU. */
239 enum
240 {
241 itanium1,
242 itanium2
243 } tune;
244
245 /* What to do when hint.b is used. */
246 enum
247 {
248 hint_b_error,
249 hint_b_warning,
250 hint_b_ok
251 } hint_b;
252
253 unsigned int
254 manual_bundling : 1,
255 debug_dv: 1,
256 detect_dv: 1,
257 explicit_mode : 1, /* which mode we're in */
258 default_explicit_mode : 1, /* which mode is the default */
259 mode_explicitly_set : 1, /* was the current mode explicitly set? */
260 auto_align : 1,
261 keep_pending_output : 1;
262
263 /* What to do when something is wrong with unwind directives. */
264 enum
265 {
266 unwind_check_warning,
267 unwind_check_error
268 } unwind_check;
269
270 /* Each bundle consists of up to three instructions. We keep
271 track of four most recent instructions so we can correctly set
272 the end_of_insn_group for the last instruction in a bundle. */
273 int curr_slot;
274 int num_slots_in_use;
275 struct slot
276 {
277 unsigned int
278 end_of_insn_group : 1,
279 manual_bundling_on : 1,
280 manual_bundling_off : 1,
281 loc_directive_seen : 1;
282 signed char user_template; /* user-selected template, if any */
283 unsigned char qp_regno; /* qualifying predicate */
284 /* This duplicates a good fraction of "struct fix" but we
285 can't use a "struct fix" instead since we can't call
286 fix_new_exp() until we know the address of the instruction. */
287 int num_fixups;
288 struct insn_fix
289 {
290 bfd_reloc_code_real_type code;
291 enum ia64_opnd opnd; /* type of operand in need of fix */
292 unsigned int is_pcrel : 1; /* is operand pc-relative? */
293 expressionS expr; /* the value to be inserted */
294 }
295 fixup[2]; /* at most two fixups per insn */
296 struct ia64_opcode *idesc;
297 struct label_fix *label_fixups;
298 struct label_fix *tag_fixups;
299 struct unw_rec_list *unwind_record; /* Unwind directive. */
300 expressionS opnd[6];
301 const char *src_file;
302 unsigned int src_line;
303 struct dwarf2_line_info debug_line;
304 }
305 slot[NUM_SLOTS];
306
307 segT last_text_seg;
308 subsegT last_text_subseg;
309
310 struct dynreg
311 {
312 struct dynreg *next; /* next dynamic register */
313 const char *name;
314 unsigned short base; /* the base register number */
315 unsigned short num_regs; /* # of registers in this set */
316 }
317 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
318
319 flagword flags; /* ELF-header flags */
320
321 struct mem_offset {
322 unsigned hint:1; /* is this hint currently valid? */
323 bfd_vma offset; /* mem.offset offset */
324 bfd_vma base; /* mem.offset base */
325 } mem_offset;
326
327 int path; /* number of alt. entry points seen */
328 const char **entry_labels; /* labels of all alternate paths in
329 the current DV-checking block. */
330 int maxpaths; /* size currently allocated for
331 entry_labels */
332
333 int pointer_size; /* size in bytes of a pointer */
334 int pointer_size_shift; /* shift size of a pointer for alignment */
335
336 symbolS *indregsym[IND_RR - IND_CPUID + 1];
337 }
338 md;
339
340 /* These are not const, because they are modified to MMI for non-itanium1
341 targets below. */
342 /* MFI bundle of nops. */
343 static unsigned char le_nop[16] =
344 {
345 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
346 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
347 };
348 /* MFI bundle of nops with stop-bit. */
349 static unsigned char le_nop_stop[16] =
350 {
351 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
352 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
353 };
354
355 /* application registers: */
356
357 #define AR_K0 0
358 #define AR_K7 7
359 #define AR_RSC 16
360 #define AR_BSP 17
361 #define AR_BSPSTORE 18
362 #define AR_RNAT 19
363 #define AR_FCR 21
364 #define AR_EFLAG 24
365 #define AR_CSD 25
366 #define AR_SSD 26
367 #define AR_CFLG 27
368 #define AR_FSR 28
369 #define AR_FIR 29
370 #define AR_FDR 30
371 #define AR_CCV 32
372 #define AR_UNAT 36
373 #define AR_FPSR 40
374 #define AR_ITC 44
375 #define AR_RUC 45
376 #define AR_PFS 64
377 #define AR_LC 65
378 #define AR_EC 66
379
380 static const struct
381 {
382 const char *name;
383 unsigned int regnum;
384 }
385 ar[] =
386 {
387 {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1},
388 {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3},
389 {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5},
390 {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7},
391 {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP},
392 {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT},
393 {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG},
394 {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD},
395 {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR},
396 {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR},
397 {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT},
398 {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC},
399 {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS},
400 {"ar.lc", AR_LC}, {"ar.ec", AR_EC},
401 };
402
403 /* control registers: */
404
405 #define CR_DCR 0
406 #define CR_ITM 1
407 #define CR_IVA 2
408 #define CR_PTA 8
409 #define CR_GPTA 9
410 #define CR_IPSR 16
411 #define CR_ISR 17
412 #define CR_IIP 19
413 #define CR_IFA 20
414 #define CR_ITIR 21
415 #define CR_IIPA 22
416 #define CR_IFS 23
417 #define CR_IIM 24
418 #define CR_IHA 25
419 #define CR_IIB0 26
420 #define CR_IIB1 27
421 #define CR_LID 64
422 #define CR_IVR 65
423 #define CR_TPR 66
424 #define CR_EOI 67
425 #define CR_IRR0 68
426 #define CR_IRR3 71
427 #define CR_ITV 72
428 #define CR_PMV 73
429 #define CR_CMCV 74
430 #define CR_LRR0 80
431 #define CR_LRR1 81
432
433 static const struct
434 {
435 const char *name;
436 unsigned int regnum;
437 }
438 cr[] =
439 {
440 {"cr.dcr", CR_DCR},
441 {"cr.itm", CR_ITM},
442 {"cr.iva", CR_IVA},
443 {"cr.pta", CR_PTA},
444 {"cr.gpta", CR_GPTA},
445 {"cr.ipsr", CR_IPSR},
446 {"cr.isr", CR_ISR},
447 {"cr.iip", CR_IIP},
448 {"cr.ifa", CR_IFA},
449 {"cr.itir", CR_ITIR},
450 {"cr.iipa", CR_IIPA},
451 {"cr.ifs", CR_IFS},
452 {"cr.iim", CR_IIM},
453 {"cr.iha", CR_IHA},
454 {"cr.iib0", CR_IIB0},
455 {"cr.iib1", CR_IIB1},
456 {"cr.lid", CR_LID},
457 {"cr.ivr", CR_IVR},
458 {"cr.tpr", CR_TPR},
459 {"cr.eoi", CR_EOI},
460 {"cr.irr0", CR_IRR0},
461 {"cr.irr1", CR_IRR0 + 1},
462 {"cr.irr2", CR_IRR0 + 2},
463 {"cr.irr3", CR_IRR3},
464 {"cr.itv", CR_ITV},
465 {"cr.pmv", CR_PMV},
466 {"cr.cmcv", CR_CMCV},
467 {"cr.lrr0", CR_LRR0},
468 {"cr.lrr1", CR_LRR1}
469 };
470
471 #define PSR_MFL 4
472 #define PSR_IC 13
473 #define PSR_DFL 18
474 #define PSR_CPL 32
475
476 static const struct const_desc
477 {
478 const char *name;
479 valueT value;
480 }
481 const_bits[] =
482 {
483 /* PSR constant masks: */
484
485 /* 0: reserved */
486 {"psr.be", ((valueT) 1) << 1},
487 {"psr.up", ((valueT) 1) << 2},
488 {"psr.ac", ((valueT) 1) << 3},
489 {"psr.mfl", ((valueT) 1) << 4},
490 {"psr.mfh", ((valueT) 1) << 5},
491 /* 6-12: reserved */
492 {"psr.ic", ((valueT) 1) << 13},
493 {"psr.i", ((valueT) 1) << 14},
494 {"psr.pk", ((valueT) 1) << 15},
495 /* 16: reserved */
496 {"psr.dt", ((valueT) 1) << 17},
497 {"psr.dfl", ((valueT) 1) << 18},
498 {"psr.dfh", ((valueT) 1) << 19},
499 {"psr.sp", ((valueT) 1) << 20},
500 {"psr.pp", ((valueT) 1) << 21},
501 {"psr.di", ((valueT) 1) << 22},
502 {"psr.si", ((valueT) 1) << 23},
503 {"psr.db", ((valueT) 1) << 24},
504 {"psr.lp", ((valueT) 1) << 25},
505 {"psr.tb", ((valueT) 1) << 26},
506 {"psr.rt", ((valueT) 1) << 27},
507 /* 28-31: reserved */
508 /* 32-33: cpl (current privilege level) */
509 {"psr.is", ((valueT) 1) << 34},
510 {"psr.mc", ((valueT) 1) << 35},
511 {"psr.it", ((valueT) 1) << 36},
512 {"psr.id", ((valueT) 1) << 37},
513 {"psr.da", ((valueT) 1) << 38},
514 {"psr.dd", ((valueT) 1) << 39},
515 {"psr.ss", ((valueT) 1) << 40},
516 /* 41-42: ri (restart instruction) */
517 {"psr.ed", ((valueT) 1) << 43},
518 {"psr.bn", ((valueT) 1) << 44},
519 };
520
521 /* indirect register-sets/memory: */
522
523 static const struct
524 {
525 const char *name;
526 unsigned int regnum;
527 }
528 indirect_reg[] =
529 {
530 { "CPUID", IND_CPUID },
531 { "cpuid", IND_CPUID },
532 { "dbr", IND_DBR },
533 { "dtr", IND_DTR },
534 { "itr", IND_ITR },
535 { "ibr", IND_IBR },
536 { "msr", IND_MSR },
537 { "pkr", IND_PKR },
538 { "pmc", IND_PMC },
539 { "pmd", IND_PMD },
540 { "dahr", IND_DAHR },
541 { "rr", IND_RR },
542 };
543
544 /* Pseudo functions used to indicate relocation types (these functions
545 start with an at sign (@). */
546 static struct
547 {
548 const char *name;
549 enum pseudo_type
550 {
551 PSEUDO_FUNC_NONE,
552 PSEUDO_FUNC_RELOC,
553 PSEUDO_FUNC_CONST,
554 PSEUDO_FUNC_REG,
555 PSEUDO_FUNC_FLOAT
556 }
557 type;
558 union
559 {
560 unsigned long ival;
561 symbolS *sym;
562 }
563 u;
564 }
565 pseudo_func[] =
566 {
567 /* reloc pseudo functions (these must come first!): */
568 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
569 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
570 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
571 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
572 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
573 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
574 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
575 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
576 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
577 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
578 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
579 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
580 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
581 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
582 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
583 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
584 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
585 #ifdef TE_VMS
586 { "slotcount", PSEUDO_FUNC_RELOC, { 0 } },
587 #endif
588
589 /* mbtype4 constants: */
590 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
591 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
592 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
593 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
594 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
595
596 /* fclass constants: */
597 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
598 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
599 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
600 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
601 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
602 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
603 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
604 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
605 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
606
607 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
608
609 /* hint constants: */
610 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
611 { "priority", PSEUDO_FUNC_CONST, { 0x1 } },
612
613 /* tf constants: */
614 { "clz", PSEUDO_FUNC_CONST, { 32 } },
615 { "mpy", PSEUDO_FUNC_CONST, { 33 } },
616 { "datahints", PSEUDO_FUNC_CONST, { 34 } },
617
618 /* unwind-related constants: */
619 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
620 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
621 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
622 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_GNU } },
623 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
624 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
625 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
626
627 /* unwind-related registers: */
628 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
629 };
630
631 /* 41-bit nop opcodes (one per unit): */
632 static const bfd_vma nop[IA64_NUM_UNITS] =
633 {
634 0x0000000000LL, /* NIL => break 0 */
635 0x0008000000LL, /* I-unit nop */
636 0x0008000000LL, /* M-unit nop */
637 0x4000000000LL, /* B-unit nop */
638 0x0008000000LL, /* F-unit nop */
639 0x0000000000LL, /* L-"unit" nop immediate */
640 0x0008000000LL, /* X-unit nop */
641 };
642
643 /* Can't be `const' as it's passed to input routines (which have the
644 habit of setting temporary sentinels. */
645 static char special_section_name[][20] =
646 {
647 {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
648 {".IA_64.unwind"}, {".IA_64.unwind_info"},
649 {".init_array"}, {".fini_array"}
650 };
651
652 /* The best template for a particular sequence of up to three
653 instructions: */
654 #define N IA64_NUM_TYPES
655 static unsigned char best_template[N][N][N];
656 #undef N
657
658 /* Resource dependencies currently in effect */
659 static struct rsrc {
660 int depind; /* dependency index */
661 const struct ia64_dependency *dependency; /* actual dependency */
662 unsigned specific:1, /* is this a specific bit/regno? */
663 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
664 int index; /* specific regno/bit within dependency */
665 int note; /* optional qualifying note (0 if none) */
666 #define STATE_NONE 0
667 #define STATE_STOP 1
668 #define STATE_SRLZ 2
669 int insn_srlz; /* current insn serialization state */
670 int data_srlz; /* current data serialization state */
671 int qp_regno; /* qualifying predicate for this usage */
672 const char *file; /* what file marked this dependency */
673 unsigned int line; /* what line marked this dependency */
674 struct mem_offset mem_offset; /* optional memory offset hint */
675 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
676 int path; /* corresponding code entry index */
677 } *regdeps = NULL;
678 static int regdepslen = 0;
679 static int regdepstotlen = 0;
680 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
681 static const char *dv_sem[] = { "none", "implied", "impliedf",
682 "data", "instr", "specific", "stop", "other" };
683 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
684
685 /* Current state of PR mutexation */
686 static struct qpmutex {
687 valueT prmask;
688 int path;
689 } *qp_mutexes = NULL; /* QP mutex bitmasks */
690 static int qp_mutexeslen = 0;
691 static int qp_mutexestotlen = 0;
692 static valueT qp_safe_across_calls = 0;
693
694 /* Current state of PR implications */
695 static struct qp_imply {
696 unsigned p1:6;
697 unsigned p2:6;
698 unsigned p2_branched:1;
699 int path;
700 } *qp_implies = NULL;
701 static int qp_implieslen = 0;
702 static int qp_impliestotlen = 0;
703
704 /* Keep track of static GR values so that indirect register usage can
705 sometimes be tracked. */
706 static struct gr {
707 unsigned known:1;
708 int path;
709 valueT value;
710 } gr_values[128] = {
711 {
712 1,
713 #ifdef INT_MAX
714 INT_MAX,
715 #else
716 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1,
717 #endif
718 0
719 }
720 };
721
722 /* Remember the alignment frag. */
723 static fragS *align_frag;
724
725 /* These are the routines required to output the various types of
726 unwind records. */
727
728 /* A slot_number is a frag address plus the slot index (0-2). We use the
729 frag address here so that if there is a section switch in the middle of
730 a function, then instructions emitted to a different section are not
731 counted. Since there may be more than one frag for a function, this
732 means we also need to keep track of which frag this address belongs to
733 so we can compute inter-frag distances. This also nicely solves the
734 problem with nops emitted for align directives, which can't easily be
735 counted, but can easily be derived from frag sizes. */
736
737 typedef struct unw_rec_list {
738 unwind_record r;
739 unsigned long slot_number;
740 fragS *slot_frag;
741 struct unw_rec_list *next;
742 } unw_rec_list;
743
744 #define SLOT_NUM_NOT_SET (unsigned)-1
745
746 /* Linked list of saved prologue counts. A very poor
747 implementation of a map from label numbers to prologue counts. */
748 typedef struct label_prologue_count
749 {
750 struct label_prologue_count *next;
751 unsigned long label_number;
752 unsigned int prologue_count;
753 } label_prologue_count;
754
755 typedef struct proc_pending
756 {
757 symbolS *sym;
758 struct proc_pending *next;
759 } proc_pending;
760
761 static struct
762 {
763 /* Maintain a list of unwind entries for the current function. */
764 unw_rec_list *list;
765 unw_rec_list *tail;
766
767 /* Any unwind entries that should be attached to the current slot
768 that an insn is being constructed for. */
769 unw_rec_list *current_entry;
770
771 /* These are used to create the unwind table entry for this function. */
772 proc_pending proc_pending;
773 symbolS *info; /* pointer to unwind info */
774 symbolS *personality_routine;
775 segT saved_text_seg;
776 subsegT saved_text_subseg;
777 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
778
779 /* TRUE if processing unwind directives in a prologue region. */
780 unsigned int prologue : 1;
781 unsigned int prologue_mask : 4;
782 unsigned int prologue_gr : 7;
783 unsigned int body : 1;
784 unsigned int insn : 1;
785 unsigned int prologue_count; /* number of .prologues seen so far */
786 /* Prologue counts at previous .label_state directives. */
787 struct label_prologue_count * saved_prologue_counts;
788
789 /* List of split up .save-s. */
790 unw_p_record *pending_saves;
791 } unwind;
792
793 /* The input value is a negated offset from psp, and specifies an address
794 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
795 must add 16 and divide by 4 to get the encoded value. */
796
797 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
798
799 typedef void (*vbyte_func) (int, char *, char *);
800
801 /* Forward declarations: */
802 static void dot_alias (int);
803 static int parse_operand_and_eval (expressionS *, int);
804 static void emit_one_bundle (void);
805 static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *,
806 bfd_reloc_code_real_type);
807 static void insn_group_break (int, int, int);
808 static void add_qp_mutex (valueT);
809 static void add_qp_imply (int, int);
810 static void clear_qp_mutex (valueT);
811 static void clear_qp_implies (valueT, valueT);
812 static void print_dependency (const char *, int);
813 static void instruction_serialization (void);
814 static void data_serialization (void);
815 static void output_R3_format (vbyte_func, unw_record_type, unsigned long);
816 static void output_B3_format (vbyte_func, unsigned long, unsigned long);
817 static void output_B4_format (vbyte_func, unw_record_type, unsigned long);
818 static void free_saved_prologue_counts (void);
819
820 /* Determine if application register REGNUM resides only in the integer
821 unit (as opposed to the memory unit). */
822 static int
823 ar_is_only_in_integer_unit (int reg)
824 {
825 reg -= REG_AR;
826 return reg >= 64 && reg <= 111;
827 }
828
829 /* Determine if application register REGNUM resides only in the memory
830 unit (as opposed to the integer unit). */
831 static int
832 ar_is_only_in_memory_unit (int reg)
833 {
834 reg -= REG_AR;
835 return reg >= 0 && reg <= 47;
836 }
837
838 /* Switch to section NAME and create section if necessary. It's
839 rather ugly that we have to manipulate input_line_pointer but I
840 don't see any other way to accomplish the same thing without
841 changing obj-elf.c (which may be the Right Thing, in the end). */
842 static void
843 set_section (char *name)
844 {
845 char *saved_input_line_pointer;
846
847 saved_input_line_pointer = input_line_pointer;
848 input_line_pointer = name;
849 obj_elf_section (0);
850 input_line_pointer = saved_input_line_pointer;
851 }
852
853 /* Map 's' to SHF_IA_64_SHORT. */
854
855 bfd_vma
856 ia64_elf_section_letter (int letter, const char **ptr_msg)
857 {
858 if (letter == 's')
859 return SHF_IA_64_SHORT;
860 else if (letter == 'o')
861 return SHF_LINK_ORDER;
862 #ifdef TE_VMS
863 else if (letter == 'O')
864 return SHF_IA_64_VMS_OVERLAID;
865 else if (letter == 'g')
866 return SHF_IA_64_VMS_GLOBAL;
867 #endif
868
869 *ptr_msg = _("bad .section directive: want a,o,s,w,x,M,S,G,T in string");
870 return -1;
871 }
872
873 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
874
875 flagword
876 ia64_elf_section_flags (flagword flags,
877 bfd_vma attr,
878 int type ATTRIBUTE_UNUSED)
879 {
880 if (attr & SHF_IA_64_SHORT)
881 flags |= SEC_SMALL_DATA;
882 return flags;
883 }
884
885 int
886 ia64_elf_section_type (const char *str, size_t len)
887 {
888 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
889
890 if (STREQ (ELF_STRING_ia64_unwind_info))
891 return SHT_PROGBITS;
892
893 if (STREQ (ELF_STRING_ia64_unwind_info_once))
894 return SHT_PROGBITS;
895
896 if (STREQ (ELF_STRING_ia64_unwind))
897 return SHT_IA_64_UNWIND;
898
899 if (STREQ (ELF_STRING_ia64_unwind_once))
900 return SHT_IA_64_UNWIND;
901
902 if (STREQ ("unwind"))
903 return SHT_IA_64_UNWIND;
904
905 return -1;
906 #undef STREQ
907 }
908
909 static unsigned int
910 set_regstack (unsigned int ins,
911 unsigned int locs,
912 unsigned int outs,
913 unsigned int rots)
914 {
915 /* Size of frame. */
916 unsigned int sof;
917
918 sof = ins + locs + outs;
919 if (sof > 96)
920 {
921 as_bad (_("Size of frame exceeds maximum of 96 registers"));
922 return 0;
923 }
924 if (rots > sof)
925 {
926 as_warn (_("Size of rotating registers exceeds frame size"));
927 return 0;
928 }
929 md.in.base = REG_GR + 32;
930 md.loc.base = md.in.base + ins;
931 md.out.base = md.loc.base + locs;
932
933 md.in.num_regs = ins;
934 md.loc.num_regs = locs;
935 md.out.num_regs = outs;
936 md.rot.num_regs = rots;
937 return sof;
938 }
939
940 void
941 ia64_flush_insns (void)
942 {
943 struct label_fix *lfix;
944 segT saved_seg;
945 subsegT saved_subseg;
946 unw_rec_list *ptr;
947 bool mark;
948
949 if (!md.last_text_seg)
950 return;
951
952 saved_seg = now_seg;
953 saved_subseg = now_subseg;
954
955 subseg_set (md.last_text_seg, md.last_text_subseg);
956
957 while (md.num_slots_in_use > 0)
958 emit_one_bundle (); /* force out queued instructions */
959
960 /* In case there are labels following the last instruction, resolve
961 those now. */
962 mark = false;
963 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
964 {
965 symbol_set_value_now (lfix->sym);
966 mark |= lfix->dw2_mark_labels;
967 }
968 if (mark)
969 {
970 dwarf2_where (&CURR_SLOT.debug_line);
971 CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
972 dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line);
973 dwarf2_consume_line_info ();
974 }
975 CURR_SLOT.label_fixups = 0;
976
977 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
978 symbol_set_value_now (lfix->sym);
979 CURR_SLOT.tag_fixups = 0;
980
981 /* In case there are unwind directives following the last instruction,
982 resolve those now. We only handle prologue, body, and endp directives
983 here. Give an error for others. */
984 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
985 {
986 switch (ptr->r.type)
987 {
988 case prologue:
989 case prologue_gr:
990 case body:
991 case endp:
992 ptr->slot_number = (unsigned long) frag_more (0);
993 ptr->slot_frag = frag_now;
994 break;
995
996 /* Allow any record which doesn't have a "t" field (i.e.,
997 doesn't relate to a particular instruction). */
998 case unwabi:
999 case br_gr:
1000 case copy_state:
1001 case fr_mem:
1002 case frgr_mem:
1003 case gr_gr:
1004 case gr_mem:
1005 case label_state:
1006 case rp_br:
1007 case spill_base:
1008 case spill_mask:
1009 /* nothing */
1010 break;
1011
1012 default:
1013 as_bad (_("Unwind directive not followed by an instruction."));
1014 break;
1015 }
1016 }
1017 unwind.current_entry = NULL;
1018
1019 subseg_set (saved_seg, saved_subseg);
1020
1021 if (md.qp.X_op == O_register)
1022 as_bad (_("qualifying predicate not followed by instruction"));
1023 }
1024
1025 void
1026 ia64_cons_align (int nbytes)
1027 {
1028 if (md.auto_align)
1029 {
1030 int log;
1031 for (log = 0; (nbytes & 1) != 1; nbytes >>= 1)
1032 log++;
1033
1034 do_align (log, NULL, 0, 0);
1035 }
1036 }
1037
1038 #ifdef TE_VMS
1039
1040 /* .vms_common section, symbol, size, alignment */
1041
1042 static void
1043 obj_elf_vms_common (int ignore ATTRIBUTE_UNUSED)
1044 {
1045 const char *sec_name;
1046 char *sym_name;
1047 char c;
1048 offsetT size;
1049 offsetT cur_size;
1050 offsetT temp;
1051 symbolS *symbolP;
1052 segT current_seg = now_seg;
1053 subsegT current_subseg = now_subseg;
1054 offsetT log_align;
1055
1056 /* Section name. */
1057 sec_name = obj_elf_section_name ();
1058 if (sec_name == NULL)
1059 return;
1060
1061 /* Symbol name. */
1062 SKIP_WHITESPACE ();
1063 if (*input_line_pointer == ',')
1064 {
1065 input_line_pointer++;
1066 SKIP_WHITESPACE ();
1067 }
1068 else
1069 {
1070 as_bad (_("expected ',' after section name"));
1071 ignore_rest_of_line ();
1072 return;
1073 }
1074
1075 c = get_symbol_name (&sym_name);
1076
1077 if (input_line_pointer == sym_name)
1078 {
1079 (void) restore_line_pointer (c);
1080 as_bad (_("expected symbol name"));
1081 ignore_rest_of_line ();
1082 return;
1083 }
1084
1085 symbolP = symbol_find_or_make (sym_name);
1086 (void) restore_line_pointer (c);
1087
1088 if ((S_IS_DEFINED (symbolP) || symbol_equated_p (symbolP))
1089 && !S_IS_COMMON (symbolP))
1090 {
1091 as_bad (_("Ignoring attempt to re-define symbol"));
1092 ignore_rest_of_line ();
1093 return;
1094 }
1095
1096 /* Symbol size. */
1097 SKIP_WHITESPACE ();
1098 if (*input_line_pointer == ',')
1099 {
1100 input_line_pointer++;
1101 SKIP_WHITESPACE ();
1102 }
1103 else
1104 {
1105 as_bad (_("expected ',' after symbol name"));
1106 ignore_rest_of_line ();
1107 return;
1108 }
1109
1110 temp = get_absolute_expression ();
1111 size = temp;
1112 size &= ((offsetT) 2 << (stdoutput->arch_info->bits_per_address - 1)) - 1;
1113 if (temp != size)
1114 {
1115 as_warn (_("size (%ld) out of range, ignored"), (long) temp);
1116 ignore_rest_of_line ();
1117 return;
1118 }
1119
1120 /* Alignment. */
1121 SKIP_WHITESPACE ();
1122 if (*input_line_pointer == ',')
1123 {
1124 input_line_pointer++;
1125 SKIP_WHITESPACE ();
1126 }
1127 else
1128 {
1129 as_bad (_("expected ',' after symbol size"));
1130 ignore_rest_of_line ();
1131 return;
1132 }
1133
1134 log_align = get_absolute_expression ();
1135
1136 demand_empty_rest_of_line ();
1137
1138 obj_elf_change_section
1139 (sec_name, SHT_NOBITS,
1140 SHF_ALLOC | SHF_WRITE | SHF_IA_64_VMS_OVERLAID | SHF_IA_64_VMS_GLOBAL,
1141 0, NULL, true);
1142
1143 S_SET_VALUE (symbolP, 0);
1144 S_SET_SIZE (symbolP, size);
1145 S_SET_EXTERNAL (symbolP);
1146 S_SET_SEGMENT (symbolP, now_seg);
1147
1148 symbol_get_bfdsym (symbolP)->flags |= BSF_OBJECT;
1149
1150 record_alignment (now_seg, log_align);
1151
1152 cur_size = bfd_section_size (now_seg);
1153 if ((int) size > cur_size)
1154 {
1155 char *pfrag
1156 = frag_var (rs_fill, 1, 1, (relax_substateT)0, NULL,
1157 (valueT)size - (valueT)cur_size, NULL);
1158 *pfrag = 0;
1159 bfd_set_section_size (now_seg, size);
1160 }
1161
1162 /* Switch back to current segment. */
1163 subseg_set (current_seg, current_subseg);
1164
1165 #ifdef md_elf_section_change_hook
1166 md_elf_section_change_hook ();
1167 #endif
1168 }
1169
1170 #endif /* TE_VMS */
1171
1172 /* Output COUNT bytes to a memory location. */
1173 static char *vbyte_mem_ptr = NULL;
1174
1175 static void
1176 output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED)
1177 {
1178 int x;
1179 if (vbyte_mem_ptr == NULL)
1180 abort ();
1181
1182 if (count == 0)
1183 return;
1184 for (x = 0; x < count; x++)
1185 *(vbyte_mem_ptr++) = ptr[x];
1186 }
1187
1188 /* Count the number of bytes required for records. */
1189 static int vbyte_count = 0;
1190 static void
1191 count_output (int count,
1192 char *ptr ATTRIBUTE_UNUSED,
1193 char *comment ATTRIBUTE_UNUSED)
1194 {
1195 vbyte_count += count;
1196 }
1197
1198 static void
1199 output_R1_format (vbyte_func f, unw_record_type rtype, int rlen)
1200 {
1201 int r = 0;
1202 char byte;
1203 if (rlen > 0x1f)
1204 {
1205 output_R3_format (f, rtype, rlen);
1206 return;
1207 }
1208
1209 if (rtype == body)
1210 r = 1;
1211 else if (rtype != prologue)
1212 as_bad (_("record type is not valid"));
1213
1214 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1215 (*f) (1, &byte, NULL);
1216 }
1217
1218 static void
1219 output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen)
1220 {
1221 char bytes[20];
1222 int count = 2;
1223 mask = (mask & 0x0f);
1224 grsave = (grsave & 0x7f);
1225
1226 bytes[0] = (UNW_R2 | (mask >> 1));
1227 bytes[1] = (((mask & 0x01) << 7) | grsave);
1228 count += output_leb128 (bytes + 2, rlen, 0);
1229 (*f) (count, bytes, NULL);
1230 }
1231
1232 static void
1233 output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen)
1234 {
1235 int r = 0, count;
1236 char bytes[20];
1237 if (rlen <= 0x1f)
1238 {
1239 output_R1_format (f, rtype, rlen);
1240 return;
1241 }
1242
1243 if (rtype == body)
1244 r = 1;
1245 else if (rtype != prologue)
1246 as_bad (_("record type is not valid"));
1247 bytes[0] = (UNW_R3 | r);
1248 count = output_leb128 (bytes + 1, rlen, 0);
1249 (*f) (count + 1, bytes, NULL);
1250 }
1251
1252 static void
1253 output_P1_format (vbyte_func f, int brmask)
1254 {
1255 char byte;
1256 byte = UNW_P1 | (brmask & 0x1f);
1257 (*f) (1, &byte, NULL);
1258 }
1259
1260 static void
1261 output_P2_format (vbyte_func f, int brmask, int gr)
1262 {
1263 char bytes[2];
1264 brmask = (brmask & 0x1f);
1265 bytes[0] = UNW_P2 | (brmask >> 1);
1266 bytes[1] = (((brmask & 1) << 7) | gr);
1267 (*f) (2, bytes, NULL);
1268 }
1269
1270 static void
1271 output_P3_format (vbyte_func f, unw_record_type rtype, int reg)
1272 {
1273 char bytes[2];
1274 int r = 0;
1275 reg = (reg & 0x7f);
1276 switch (rtype)
1277 {
1278 case psp_gr:
1279 r = 0;
1280 break;
1281 case rp_gr:
1282 r = 1;
1283 break;
1284 case pfs_gr:
1285 r = 2;
1286 break;
1287 case preds_gr:
1288 r = 3;
1289 break;
1290 case unat_gr:
1291 r = 4;
1292 break;
1293 case lc_gr:
1294 r = 5;
1295 break;
1296 case rp_br:
1297 r = 6;
1298 break;
1299 case rnat_gr:
1300 r = 7;
1301 break;
1302 case bsp_gr:
1303 r = 8;
1304 break;
1305 case bspstore_gr:
1306 r = 9;
1307 break;
1308 case fpsr_gr:
1309 r = 10;
1310 break;
1311 case priunat_gr:
1312 r = 11;
1313 break;
1314 default:
1315 as_bad (_("Invalid record type for P3 format."));
1316 }
1317 bytes[0] = (UNW_P3 | (r >> 1));
1318 bytes[1] = (((r & 1) << 7) | reg);
1319 (*f) (2, bytes, NULL);
1320 }
1321
1322 static void
1323 output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size)
1324 {
1325 imask[0] = UNW_P4;
1326 (*f) (imask_size, (char *) imask, NULL);
1327 }
1328
1329 static void
1330 output_P5_format (vbyte_func f, int grmask, unsigned long frmask)
1331 {
1332 char bytes[4];
1333 grmask = (grmask & 0x0f);
1334
1335 bytes[0] = UNW_P5;
1336 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1337 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1338 bytes[3] = (frmask & 0x000000ff);
1339 (*f) (4, bytes, NULL);
1340 }
1341
1342 static void
1343 output_P6_format (vbyte_func f, unw_record_type rtype, int rmask)
1344 {
1345 char byte;
1346 int r = 0;
1347
1348 if (rtype == gr_mem)
1349 r = 1;
1350 else if (rtype != fr_mem)
1351 as_bad (_("Invalid record type for format P6"));
1352 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1353 (*f) (1, &byte, NULL);
1354 }
1355
1356 static void
1357 output_P7_format (vbyte_func f,
1358 unw_record_type rtype,
1359 unsigned long w1,
1360 unsigned long w2)
1361 {
1362 char bytes[20];
1363 int count = 1;
1364 int r = 0;
1365 count += output_leb128 (bytes + 1, w1, 0);
1366 switch (rtype)
1367 {
1368 case mem_stack_f:
1369 r = 0;
1370 count += output_leb128 (bytes + count, w2 >> 4, 0);
1371 break;
1372 case mem_stack_v:
1373 r = 1;
1374 break;
1375 case spill_base:
1376 r = 2;
1377 break;
1378 case psp_sprel:
1379 r = 3;
1380 break;
1381 case rp_when:
1382 r = 4;
1383 break;
1384 case rp_psprel:
1385 r = 5;
1386 break;
1387 case pfs_when:
1388 r = 6;
1389 break;
1390 case pfs_psprel:
1391 r = 7;
1392 break;
1393 case preds_when:
1394 r = 8;
1395 break;
1396 case preds_psprel:
1397 r = 9;
1398 break;
1399 case lc_when:
1400 r = 10;
1401 break;
1402 case lc_psprel:
1403 r = 11;
1404 break;
1405 case unat_when:
1406 r = 12;
1407 break;
1408 case unat_psprel:
1409 r = 13;
1410 break;
1411 case fpsr_when:
1412 r = 14;
1413 break;
1414 case fpsr_psprel:
1415 r = 15;
1416 break;
1417 default:
1418 break;
1419 }
1420 bytes[0] = (UNW_P7 | r);
1421 (*f) (count, bytes, NULL);
1422 }
1423
1424 static void
1425 output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t)
1426 {
1427 char bytes[20];
1428 int r = 0;
1429 int count = 2;
1430 bytes[0] = UNW_P8;
1431 switch (rtype)
1432 {
1433 case rp_sprel:
1434 r = 1;
1435 break;
1436 case pfs_sprel:
1437 r = 2;
1438 break;
1439 case preds_sprel:
1440 r = 3;
1441 break;
1442 case lc_sprel:
1443 r = 4;
1444 break;
1445 case unat_sprel:
1446 r = 5;
1447 break;
1448 case fpsr_sprel:
1449 r = 6;
1450 break;
1451 case bsp_when:
1452 r = 7;
1453 break;
1454 case bsp_psprel:
1455 r = 8;
1456 break;
1457 case bsp_sprel:
1458 r = 9;
1459 break;
1460 case bspstore_when:
1461 r = 10;
1462 break;
1463 case bspstore_psprel:
1464 r = 11;
1465 break;
1466 case bspstore_sprel:
1467 r = 12;
1468 break;
1469 case rnat_when:
1470 r = 13;
1471 break;
1472 case rnat_psprel:
1473 r = 14;
1474 break;
1475 case rnat_sprel:
1476 r = 15;
1477 break;
1478 case priunat_when_gr:
1479 r = 16;
1480 break;
1481 case priunat_psprel:
1482 r = 17;
1483 break;
1484 case priunat_sprel:
1485 r = 18;
1486 break;
1487 case priunat_when_mem:
1488 r = 19;
1489 break;
1490 default:
1491 break;
1492 }
1493 bytes[1] = r;
1494 count += output_leb128 (bytes + 2, t, 0);
1495 (*f) (count, bytes, NULL);
1496 }
1497
1498 static void
1499 output_P9_format (vbyte_func f, int grmask, int gr)
1500 {
1501 char bytes[3];
1502 bytes[0] = UNW_P9;
1503 bytes[1] = (grmask & 0x0f);
1504 bytes[2] = (gr & 0x7f);
1505 (*f) (3, bytes, NULL);
1506 }
1507
1508 static void
1509 output_P10_format (vbyte_func f, int abi, int context)
1510 {
1511 char bytes[3];
1512 bytes[0] = UNW_P10;
1513 bytes[1] = (abi & 0xff);
1514 bytes[2] = (context & 0xff);
1515 (*f) (3, bytes, NULL);
1516 }
1517
1518 static void
1519 output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1520 {
1521 char byte;
1522 int r = 0;
1523 if (label > 0x1f)
1524 {
1525 output_B4_format (f, rtype, label);
1526 return;
1527 }
1528 if (rtype == copy_state)
1529 r = 1;
1530 else if (rtype != label_state)
1531 as_bad (_("Invalid record type for format B1"));
1532
1533 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1534 (*f) (1, &byte, NULL);
1535 }
1536
1537 static void
1538 output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t)
1539 {
1540 char bytes[20];
1541 int count = 1;
1542 if (ecount > 0x1f)
1543 {
1544 output_B3_format (f, ecount, t);
1545 return;
1546 }
1547 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1548 count += output_leb128 (bytes + 1, t, 0);
1549 (*f) (count, bytes, NULL);
1550 }
1551
1552 static void
1553 output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t)
1554 {
1555 char bytes[20];
1556 int count = 1;
1557 if (ecount <= 0x1f)
1558 {
1559 output_B2_format (f, ecount, t);
1560 return;
1561 }
1562 bytes[0] = UNW_B3;
1563 count += output_leb128 (bytes + 1, t, 0);
1564 count += output_leb128 (bytes + count, ecount, 0);
1565 (*f) (count, bytes, NULL);
1566 }
1567
1568 static void
1569 output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1570 {
1571 char bytes[20];
1572 int r = 0;
1573 int count = 1;
1574 if (label <= 0x1f)
1575 {
1576 output_B1_format (f, rtype, label);
1577 return;
1578 }
1579
1580 if (rtype == copy_state)
1581 r = 1;
1582 else if (rtype != label_state)
1583 as_bad (_("Invalid record type for format B1"));
1584
1585 bytes[0] = (UNW_B4 | (r << 3));
1586 count += output_leb128 (bytes + 1, label, 0);
1587 (*f) (count, bytes, NULL);
1588 }
1589
1590 static char
1591 format_ab_reg (int ab, int reg)
1592 {
1593 int ret;
1594 ab = (ab & 3);
1595 reg = (reg & 0x1f);
1596 ret = (ab << 5) | reg;
1597 return ret;
1598 }
1599
1600 static void
1601 output_X1_format (vbyte_func f,
1602 unw_record_type rtype,
1603 int ab,
1604 int reg,
1605 unsigned long t,
1606 unsigned long w1)
1607 {
1608 char bytes[20];
1609 int r = 0;
1610 int count = 2;
1611 bytes[0] = UNW_X1;
1612
1613 if (rtype == spill_sprel)
1614 r = 1;
1615 else if (rtype != spill_psprel)
1616 as_bad (_("Invalid record type for format X1"));
1617 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1618 count += output_leb128 (bytes + 2, t, 0);
1619 count += output_leb128 (bytes + count, w1, 0);
1620 (*f) (count, bytes, NULL);
1621 }
1622
1623 static void
1624 output_X2_format (vbyte_func f,
1625 int ab,
1626 int reg,
1627 int x,
1628 int y,
1629 int treg,
1630 unsigned long t)
1631 {
1632 char bytes[20];
1633 int count = 3;
1634 bytes[0] = UNW_X2;
1635 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1636 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1637 count += output_leb128 (bytes + 3, t, 0);
1638 (*f) (count, bytes, NULL);
1639 }
1640
1641 static void
1642 output_X3_format (vbyte_func f,
1643 unw_record_type rtype,
1644 int qp,
1645 int ab,
1646 int reg,
1647 unsigned long t,
1648 unsigned long w1)
1649 {
1650 char bytes[20];
1651 int r = 0;
1652 int count = 3;
1653 bytes[0] = UNW_X3;
1654
1655 if (rtype == spill_sprel_p)
1656 r = 1;
1657 else if (rtype != spill_psprel_p)
1658 as_bad (_("Invalid record type for format X3"));
1659 bytes[1] = ((r << 7) | (qp & 0x3f));
1660 bytes[2] = format_ab_reg (ab, reg);
1661 count += output_leb128 (bytes + 3, t, 0);
1662 count += output_leb128 (bytes + count, w1, 0);
1663 (*f) (count, bytes, NULL);
1664 }
1665
1666 static void
1667 output_X4_format (vbyte_func f,
1668 int qp,
1669 int ab,
1670 int reg,
1671 int x,
1672 int y,
1673 int treg,
1674 unsigned long t)
1675 {
1676 char bytes[20];
1677 int count = 4;
1678 bytes[0] = UNW_X4;
1679 bytes[1] = (qp & 0x3f);
1680 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1681 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1682 count += output_leb128 (bytes + 4, t, 0);
1683 (*f) (count, bytes, NULL);
1684 }
1685
1686 /* This function checks whether there are any outstanding .save-s and
1687 discards them if so. */
1688
1689 static void
1690 check_pending_save (void)
1691 {
1692 if (unwind.pending_saves)
1693 {
1694 unw_rec_list *cur, *prev;
1695
1696 as_warn (_("Previous .save incomplete"));
1697 for (cur = unwind.list, prev = NULL; cur; )
1698 if (&cur->r.record.p == unwind.pending_saves)
1699 {
1700 if (prev)
1701 prev->next = cur->next;
1702 else
1703 unwind.list = cur->next;
1704 if (cur == unwind.tail)
1705 unwind.tail = prev;
1706 if (cur == unwind.current_entry)
1707 unwind.current_entry = cur->next;
1708 /* Don't free the first discarded record, it's being used as
1709 terminator for (currently) br_gr and gr_gr processing, and
1710 also prevents leaving a dangling pointer to it in its
1711 predecessor. */
1712 cur->r.record.p.grmask = 0;
1713 cur->r.record.p.brmask = 0;
1714 cur->r.record.p.frmask = 0;
1715 prev = cur->r.record.p.next;
1716 cur->r.record.p.next = NULL;
1717 cur = prev;
1718 break;
1719 }
1720 else
1721 {
1722 prev = cur;
1723 cur = cur->next;
1724 }
1725 while (cur)
1726 {
1727 prev = cur;
1728 cur = cur->r.record.p.next;
1729 free (prev);
1730 }
1731 unwind.pending_saves = NULL;
1732 }
1733 }
1734
1735 /* This function allocates a record list structure, and initializes fields. */
1736
1737 static unw_rec_list *
1738 alloc_record (unw_record_type t)
1739 {
1740 unw_rec_list *ptr;
1741 ptr = XNEW (unw_rec_list);
1742 memset (ptr, 0, sizeof (*ptr));
1743 ptr->slot_number = SLOT_NUM_NOT_SET;
1744 ptr->r.type = t;
1745 return ptr;
1746 }
1747
1748 /* Dummy unwind record used for calculating the length of the last prologue or
1749 body region. */
1750
1751 static unw_rec_list *
1752 output_endp (void)
1753 {
1754 unw_rec_list *ptr = alloc_record (endp);
1755 return ptr;
1756 }
1757
1758 static unw_rec_list *
1759 output_prologue (void)
1760 {
1761 unw_rec_list *ptr = alloc_record (prologue);
1762 return ptr;
1763 }
1764
1765 static unw_rec_list *
1766 output_prologue_gr (unsigned int saved_mask, unsigned int reg)
1767 {
1768 unw_rec_list *ptr = alloc_record (prologue_gr);
1769 ptr->r.record.r.grmask = saved_mask;
1770 ptr->r.record.r.grsave = reg;
1771 return ptr;
1772 }
1773
1774 static unw_rec_list *
1775 output_body (void)
1776 {
1777 unw_rec_list *ptr = alloc_record (body);
1778 return ptr;
1779 }
1780
1781 static unw_rec_list *
1782 output_mem_stack_f (unsigned int size)
1783 {
1784 unw_rec_list *ptr = alloc_record (mem_stack_f);
1785 ptr->r.record.p.size = size;
1786 return ptr;
1787 }
1788
1789 static unw_rec_list *
1790 output_mem_stack_v (void)
1791 {
1792 unw_rec_list *ptr = alloc_record (mem_stack_v);
1793 return ptr;
1794 }
1795
1796 static unw_rec_list *
1797 output_psp_gr (unsigned int gr)
1798 {
1799 unw_rec_list *ptr = alloc_record (psp_gr);
1800 ptr->r.record.p.r.gr = gr;
1801 return ptr;
1802 }
1803
1804 static unw_rec_list *
1805 output_psp_sprel (unsigned int offset)
1806 {
1807 unw_rec_list *ptr = alloc_record (psp_sprel);
1808 ptr->r.record.p.off.sp = offset / 4;
1809 return ptr;
1810 }
1811
1812 static unw_rec_list *
1813 output_rp_when (void)
1814 {
1815 unw_rec_list *ptr = alloc_record (rp_when);
1816 return ptr;
1817 }
1818
1819 static unw_rec_list *
1820 output_rp_gr (unsigned int gr)
1821 {
1822 unw_rec_list *ptr = alloc_record (rp_gr);
1823 ptr->r.record.p.r.gr = gr;
1824 return ptr;
1825 }
1826
1827 static unw_rec_list *
1828 output_rp_br (unsigned int br)
1829 {
1830 unw_rec_list *ptr = alloc_record (rp_br);
1831 ptr->r.record.p.r.br = br;
1832 return ptr;
1833 }
1834
1835 static unw_rec_list *
1836 output_rp_psprel (unsigned int offset)
1837 {
1838 unw_rec_list *ptr = alloc_record (rp_psprel);
1839 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1840 return ptr;
1841 }
1842
1843 static unw_rec_list *
1844 output_rp_sprel (unsigned int offset)
1845 {
1846 unw_rec_list *ptr = alloc_record (rp_sprel);
1847 ptr->r.record.p.off.sp = offset / 4;
1848 return ptr;
1849 }
1850
1851 static unw_rec_list *
1852 output_pfs_when (void)
1853 {
1854 unw_rec_list *ptr = alloc_record (pfs_when);
1855 return ptr;
1856 }
1857
1858 static unw_rec_list *
1859 output_pfs_gr (unsigned int gr)
1860 {
1861 unw_rec_list *ptr = alloc_record (pfs_gr);
1862 ptr->r.record.p.r.gr = gr;
1863 return ptr;
1864 }
1865
1866 static unw_rec_list *
1867 output_pfs_psprel (unsigned int offset)
1868 {
1869 unw_rec_list *ptr = alloc_record (pfs_psprel);
1870 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1871 return ptr;
1872 }
1873
1874 static unw_rec_list *
1875 output_pfs_sprel (unsigned int offset)
1876 {
1877 unw_rec_list *ptr = alloc_record (pfs_sprel);
1878 ptr->r.record.p.off.sp = offset / 4;
1879 return ptr;
1880 }
1881
1882 static unw_rec_list *
1883 output_preds_when (void)
1884 {
1885 unw_rec_list *ptr = alloc_record (preds_when);
1886 return ptr;
1887 }
1888
1889 static unw_rec_list *
1890 output_preds_gr (unsigned int gr)
1891 {
1892 unw_rec_list *ptr = alloc_record (preds_gr);
1893 ptr->r.record.p.r.gr = gr;
1894 return ptr;
1895 }
1896
1897 static unw_rec_list *
1898 output_preds_psprel (unsigned int offset)
1899 {
1900 unw_rec_list *ptr = alloc_record (preds_psprel);
1901 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1902 return ptr;
1903 }
1904
1905 static unw_rec_list *
1906 output_preds_sprel (unsigned int offset)
1907 {
1908 unw_rec_list *ptr = alloc_record (preds_sprel);
1909 ptr->r.record.p.off.sp = offset / 4;
1910 return ptr;
1911 }
1912
1913 static unw_rec_list *
1914 output_fr_mem (unsigned int mask)
1915 {
1916 unw_rec_list *ptr = alloc_record (fr_mem);
1917 unw_rec_list *cur = ptr;
1918
1919 ptr->r.record.p.frmask = mask;
1920 unwind.pending_saves = &ptr->r.record.p;
1921 for (;;)
1922 {
1923 unw_rec_list *prev = cur;
1924
1925 /* Clear least significant set bit. */
1926 mask &= ~(mask & (~mask + 1));
1927 if (!mask)
1928 return ptr;
1929 cur = alloc_record (fr_mem);
1930 cur->r.record.p.frmask = mask;
1931 /* Retain only least significant bit. */
1932 prev->r.record.p.frmask ^= mask;
1933 prev->r.record.p.next = cur;
1934 }
1935 }
1936
1937 static unw_rec_list *
1938 output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask)
1939 {
1940 unw_rec_list *ptr = alloc_record (frgr_mem);
1941 unw_rec_list *cur = ptr;
1942
1943 unwind.pending_saves = &cur->r.record.p;
1944 cur->r.record.p.frmask = fr_mask;
1945 while (fr_mask)
1946 {
1947 unw_rec_list *prev = cur;
1948
1949 /* Clear least significant set bit. */
1950 fr_mask &= ~(fr_mask & (~fr_mask + 1));
1951 if (!gr_mask && !fr_mask)
1952 return ptr;
1953 cur = alloc_record (frgr_mem);
1954 cur->r.record.p.frmask = fr_mask;
1955 /* Retain only least significant bit. */
1956 prev->r.record.p.frmask ^= fr_mask;
1957 prev->r.record.p.next = cur;
1958 }
1959 cur->r.record.p.grmask = gr_mask;
1960 for (;;)
1961 {
1962 unw_rec_list *prev = cur;
1963
1964 /* Clear least significant set bit. */
1965 gr_mask &= ~(gr_mask & (~gr_mask + 1));
1966 if (!gr_mask)
1967 return ptr;
1968 cur = alloc_record (frgr_mem);
1969 cur->r.record.p.grmask = gr_mask;
1970 /* Retain only least significant bit. */
1971 prev->r.record.p.grmask ^= gr_mask;
1972 prev->r.record.p.next = cur;
1973 }
1974 }
1975
1976 static unw_rec_list *
1977 output_gr_gr (unsigned int mask, unsigned int reg)
1978 {
1979 unw_rec_list *ptr = alloc_record (gr_gr);
1980 unw_rec_list *cur = ptr;
1981
1982 ptr->r.record.p.grmask = mask;
1983 ptr->r.record.p.r.gr = reg;
1984 unwind.pending_saves = &ptr->r.record.p;
1985 for (;;)
1986 {
1987 unw_rec_list *prev = cur;
1988
1989 /* Clear least significant set bit. */
1990 mask &= ~(mask & (~mask + 1));
1991 if (!mask)
1992 return ptr;
1993 cur = alloc_record (gr_gr);
1994 cur->r.record.p.grmask = mask;
1995 /* Indicate this record shouldn't be output. */
1996 cur->r.record.p.r.gr = REG_NUM;
1997 /* Retain only least significant bit. */
1998 prev->r.record.p.grmask ^= mask;
1999 prev->r.record.p.next = cur;
2000 }
2001 }
2002
2003 static unw_rec_list *
2004 output_gr_mem (unsigned int mask)
2005 {
2006 unw_rec_list *ptr = alloc_record (gr_mem);
2007 unw_rec_list *cur = ptr;
2008
2009 ptr->r.record.p.grmask = mask;
2010 unwind.pending_saves = &ptr->r.record.p;
2011 for (;;)
2012 {
2013 unw_rec_list *prev = cur;
2014
2015 /* Clear least significant set bit. */
2016 mask &= ~(mask & (~mask + 1));
2017 if (!mask)
2018 return ptr;
2019 cur = alloc_record (gr_mem);
2020 cur->r.record.p.grmask = mask;
2021 /* Retain only least significant bit. */
2022 prev->r.record.p.grmask ^= mask;
2023 prev->r.record.p.next = cur;
2024 }
2025 }
2026
2027 static unw_rec_list *
2028 output_br_mem (unsigned int mask)
2029 {
2030 unw_rec_list *ptr = alloc_record (br_mem);
2031 unw_rec_list *cur = ptr;
2032
2033 ptr->r.record.p.brmask = mask;
2034 unwind.pending_saves = &ptr->r.record.p;
2035 for (;;)
2036 {
2037 unw_rec_list *prev = cur;
2038
2039 /* Clear least significant set bit. */
2040 mask &= ~(mask & (~mask + 1));
2041 if (!mask)
2042 return ptr;
2043 cur = alloc_record (br_mem);
2044 cur->r.record.p.brmask = mask;
2045 /* Retain only least significant bit. */
2046 prev->r.record.p.brmask ^= mask;
2047 prev->r.record.p.next = cur;
2048 }
2049 }
2050
2051 static unw_rec_list *
2052 output_br_gr (unsigned int mask, unsigned int reg)
2053 {
2054 unw_rec_list *ptr = alloc_record (br_gr);
2055 unw_rec_list *cur = ptr;
2056
2057 ptr->r.record.p.brmask = mask;
2058 ptr->r.record.p.r.gr = reg;
2059 unwind.pending_saves = &ptr->r.record.p;
2060 for (;;)
2061 {
2062 unw_rec_list *prev = cur;
2063
2064 /* Clear least significant set bit. */
2065 mask &= ~(mask & (~mask + 1));
2066 if (!mask)
2067 return ptr;
2068 cur = alloc_record (br_gr);
2069 cur->r.record.p.brmask = mask;
2070 /* Indicate this record shouldn't be output. */
2071 cur->r.record.p.r.gr = REG_NUM;
2072 /* Retain only least significant bit. */
2073 prev->r.record.p.brmask ^= mask;
2074 prev->r.record.p.next = cur;
2075 }
2076 }
2077
2078 static unw_rec_list *
2079 output_spill_base (unsigned int offset)
2080 {
2081 unw_rec_list *ptr = alloc_record (spill_base);
2082 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2083 return ptr;
2084 }
2085
2086 static unw_rec_list *
2087 output_unat_when (void)
2088 {
2089 unw_rec_list *ptr = alloc_record (unat_when);
2090 return ptr;
2091 }
2092
2093 static unw_rec_list *
2094 output_unat_gr (unsigned int gr)
2095 {
2096 unw_rec_list *ptr = alloc_record (unat_gr);
2097 ptr->r.record.p.r.gr = gr;
2098 return ptr;
2099 }
2100
2101 static unw_rec_list *
2102 output_unat_psprel (unsigned int offset)
2103 {
2104 unw_rec_list *ptr = alloc_record (unat_psprel);
2105 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2106 return ptr;
2107 }
2108
2109 static unw_rec_list *
2110 output_unat_sprel (unsigned int offset)
2111 {
2112 unw_rec_list *ptr = alloc_record (unat_sprel);
2113 ptr->r.record.p.off.sp = offset / 4;
2114 return ptr;
2115 }
2116
2117 static unw_rec_list *
2118 output_lc_when (void)
2119 {
2120 unw_rec_list *ptr = alloc_record (lc_when);
2121 return ptr;
2122 }
2123
2124 static unw_rec_list *
2125 output_lc_gr (unsigned int gr)
2126 {
2127 unw_rec_list *ptr = alloc_record (lc_gr);
2128 ptr->r.record.p.r.gr = gr;
2129 return ptr;
2130 }
2131
2132 static unw_rec_list *
2133 output_lc_psprel (unsigned int offset)
2134 {
2135 unw_rec_list *ptr = alloc_record (lc_psprel);
2136 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2137 return ptr;
2138 }
2139
2140 static unw_rec_list *
2141 output_lc_sprel (unsigned int offset)
2142 {
2143 unw_rec_list *ptr = alloc_record (lc_sprel);
2144 ptr->r.record.p.off.sp = offset / 4;
2145 return ptr;
2146 }
2147
2148 static unw_rec_list *
2149 output_fpsr_when (void)
2150 {
2151 unw_rec_list *ptr = alloc_record (fpsr_when);
2152 return ptr;
2153 }
2154
2155 static unw_rec_list *
2156 output_fpsr_gr (unsigned int gr)
2157 {
2158 unw_rec_list *ptr = alloc_record (fpsr_gr);
2159 ptr->r.record.p.r.gr = gr;
2160 return ptr;
2161 }
2162
2163 static unw_rec_list *
2164 output_fpsr_psprel (unsigned int offset)
2165 {
2166 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2167 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2168 return ptr;
2169 }
2170
2171 static unw_rec_list *
2172 output_fpsr_sprel (unsigned int offset)
2173 {
2174 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2175 ptr->r.record.p.off.sp = offset / 4;
2176 return ptr;
2177 }
2178
2179 static unw_rec_list *
2180 output_priunat_when_gr (void)
2181 {
2182 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2183 return ptr;
2184 }
2185
2186 static unw_rec_list *
2187 output_priunat_when_mem (void)
2188 {
2189 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2190 return ptr;
2191 }
2192
2193 static unw_rec_list *
2194 output_priunat_gr (unsigned int gr)
2195 {
2196 unw_rec_list *ptr = alloc_record (priunat_gr);
2197 ptr->r.record.p.r.gr = gr;
2198 return ptr;
2199 }
2200
2201 static unw_rec_list *
2202 output_priunat_psprel (unsigned int offset)
2203 {
2204 unw_rec_list *ptr = alloc_record (priunat_psprel);
2205 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2206 return ptr;
2207 }
2208
2209 static unw_rec_list *
2210 output_priunat_sprel (unsigned int offset)
2211 {
2212 unw_rec_list *ptr = alloc_record (priunat_sprel);
2213 ptr->r.record.p.off.sp = offset / 4;
2214 return ptr;
2215 }
2216
2217 static unw_rec_list *
2218 output_bsp_when (void)
2219 {
2220 unw_rec_list *ptr = alloc_record (bsp_when);
2221 return ptr;
2222 }
2223
2224 static unw_rec_list *
2225 output_bsp_gr (unsigned int gr)
2226 {
2227 unw_rec_list *ptr = alloc_record (bsp_gr);
2228 ptr->r.record.p.r.gr = gr;
2229 return ptr;
2230 }
2231
2232 static unw_rec_list *
2233 output_bsp_psprel (unsigned int offset)
2234 {
2235 unw_rec_list *ptr = alloc_record (bsp_psprel);
2236 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2237 return ptr;
2238 }
2239
2240 static unw_rec_list *
2241 output_bsp_sprel (unsigned int offset)
2242 {
2243 unw_rec_list *ptr = alloc_record (bsp_sprel);
2244 ptr->r.record.p.off.sp = offset / 4;
2245 return ptr;
2246 }
2247
2248 static unw_rec_list *
2249 output_bspstore_when (void)
2250 {
2251 unw_rec_list *ptr = alloc_record (bspstore_when);
2252 return ptr;
2253 }
2254
2255 static unw_rec_list *
2256 output_bspstore_gr (unsigned int gr)
2257 {
2258 unw_rec_list *ptr = alloc_record (bspstore_gr);
2259 ptr->r.record.p.r.gr = gr;
2260 return ptr;
2261 }
2262
2263 static unw_rec_list *
2264 output_bspstore_psprel (unsigned int offset)
2265 {
2266 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2267 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2268 return ptr;
2269 }
2270
2271 static unw_rec_list *
2272 output_bspstore_sprel (unsigned int offset)
2273 {
2274 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2275 ptr->r.record.p.off.sp = offset / 4;
2276 return ptr;
2277 }
2278
2279 static unw_rec_list *
2280 output_rnat_when (void)
2281 {
2282 unw_rec_list *ptr = alloc_record (rnat_when);
2283 return ptr;
2284 }
2285
2286 static unw_rec_list *
2287 output_rnat_gr (unsigned int gr)
2288 {
2289 unw_rec_list *ptr = alloc_record (rnat_gr);
2290 ptr->r.record.p.r.gr = gr;
2291 return ptr;
2292 }
2293
2294 static unw_rec_list *
2295 output_rnat_psprel (unsigned int offset)
2296 {
2297 unw_rec_list *ptr = alloc_record (rnat_psprel);
2298 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2299 return ptr;
2300 }
2301
2302 static unw_rec_list *
2303 output_rnat_sprel (unsigned int offset)
2304 {
2305 unw_rec_list *ptr = alloc_record (rnat_sprel);
2306 ptr->r.record.p.off.sp = offset / 4;
2307 return ptr;
2308 }
2309
2310 static unw_rec_list *
2311 output_unwabi (unsigned long abi, unsigned long context)
2312 {
2313 unw_rec_list *ptr = alloc_record (unwabi);
2314 ptr->r.record.p.abi = abi;
2315 ptr->r.record.p.context = context;
2316 return ptr;
2317 }
2318
2319 static unw_rec_list *
2320 output_epilogue (unsigned long ecount)
2321 {
2322 unw_rec_list *ptr = alloc_record (epilogue);
2323 ptr->r.record.b.ecount = ecount;
2324 return ptr;
2325 }
2326
2327 static unw_rec_list *
2328 output_label_state (unsigned long label)
2329 {
2330 unw_rec_list *ptr = alloc_record (label_state);
2331 ptr->r.record.b.label = label;
2332 return ptr;
2333 }
2334
2335 static unw_rec_list *
2336 output_copy_state (unsigned long label)
2337 {
2338 unw_rec_list *ptr = alloc_record (copy_state);
2339 ptr->r.record.b.label = label;
2340 return ptr;
2341 }
2342
2343 static unw_rec_list *
2344 output_spill_psprel (unsigned int ab,
2345 unsigned int reg,
2346 unsigned int offset,
2347 unsigned int predicate)
2348 {
2349 unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel);
2350 ptr->r.record.x.ab = ab;
2351 ptr->r.record.x.reg = reg;
2352 ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset);
2353 ptr->r.record.x.qp = predicate;
2354 return ptr;
2355 }
2356
2357 static unw_rec_list *
2358 output_spill_sprel (unsigned int ab,
2359 unsigned int reg,
2360 unsigned int offset,
2361 unsigned int predicate)
2362 {
2363 unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel);
2364 ptr->r.record.x.ab = ab;
2365 ptr->r.record.x.reg = reg;
2366 ptr->r.record.x.where.spoff = offset / 4;
2367 ptr->r.record.x.qp = predicate;
2368 return ptr;
2369 }
2370
2371 static unw_rec_list *
2372 output_spill_reg (unsigned int ab,
2373 unsigned int reg,
2374 unsigned int targ_reg,
2375 unsigned int xy,
2376 unsigned int predicate)
2377 {
2378 unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg);
2379 ptr->r.record.x.ab = ab;
2380 ptr->r.record.x.reg = reg;
2381 ptr->r.record.x.where.reg = targ_reg;
2382 ptr->r.record.x.xy = xy;
2383 ptr->r.record.x.qp = predicate;
2384 return ptr;
2385 }
2386
2387 /* Given a unw_rec_list process the correct format with the
2388 specified function. */
2389
2390 static void
2391 process_one_record (unw_rec_list *ptr, vbyte_func f)
2392 {
2393 unsigned int fr_mask, gr_mask;
2394
2395 switch (ptr->r.type)
2396 {
2397 /* This is a dummy record that takes up no space in the output. */
2398 case endp:
2399 break;
2400
2401 case gr_mem:
2402 case fr_mem:
2403 case br_mem:
2404 case frgr_mem:
2405 /* These are taken care of by prologue/prologue_gr. */
2406 break;
2407
2408 case prologue_gr:
2409 case prologue:
2410 if (ptr->r.type == prologue_gr)
2411 output_R2_format (f, ptr->r.record.r.grmask,
2412 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2413 else
2414 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2415
2416 /* Output descriptor(s) for union of register spills (if any). */
2417 gr_mask = ptr->r.record.r.mask.gr_mem;
2418 fr_mask = ptr->r.record.r.mask.fr_mem;
2419 if (fr_mask)
2420 {
2421 if ((fr_mask & ~0xfUL) == 0)
2422 output_P6_format (f, fr_mem, fr_mask);
2423 else
2424 {
2425 output_P5_format (f, gr_mask, fr_mask);
2426 gr_mask = 0;
2427 }
2428 }
2429 if (gr_mask)
2430 output_P6_format (f, gr_mem, gr_mask);
2431 if (ptr->r.record.r.mask.br_mem)
2432 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2433
2434 /* output imask descriptor if necessary: */
2435 if (ptr->r.record.r.mask.i)
2436 output_P4_format (f, ptr->r.record.r.mask.i,
2437 ptr->r.record.r.imask_size);
2438 break;
2439
2440 case body:
2441 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2442 break;
2443 case mem_stack_f:
2444 case mem_stack_v:
2445 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2446 ptr->r.record.p.size);
2447 break;
2448 case psp_gr:
2449 case rp_gr:
2450 case pfs_gr:
2451 case preds_gr:
2452 case unat_gr:
2453 case lc_gr:
2454 case fpsr_gr:
2455 case priunat_gr:
2456 case bsp_gr:
2457 case bspstore_gr:
2458 case rnat_gr:
2459 output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr);
2460 break;
2461 case rp_br:
2462 output_P3_format (f, rp_br, ptr->r.record.p.r.br);
2463 break;
2464 case psp_sprel:
2465 output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0);
2466 break;
2467 case rp_when:
2468 case pfs_when:
2469 case preds_when:
2470 case unat_when:
2471 case lc_when:
2472 case fpsr_when:
2473 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2474 break;
2475 case rp_psprel:
2476 case pfs_psprel:
2477 case preds_psprel:
2478 case unat_psprel:
2479 case lc_psprel:
2480 case fpsr_psprel:
2481 case spill_base:
2482 output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0);
2483 break;
2484 case rp_sprel:
2485 case pfs_sprel:
2486 case preds_sprel:
2487 case unat_sprel:
2488 case lc_sprel:
2489 case fpsr_sprel:
2490 case priunat_sprel:
2491 case bsp_sprel:
2492 case bspstore_sprel:
2493 case rnat_sprel:
2494 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp);
2495 break;
2496 case gr_gr:
2497 if (ptr->r.record.p.r.gr < REG_NUM)
2498 {
2499 const unw_rec_list *cur = ptr;
2500
2501 gr_mask = cur->r.record.p.grmask;
2502 while ((cur = cur->r.record.p.next) != NULL)
2503 gr_mask |= cur->r.record.p.grmask;
2504 output_P9_format (f, gr_mask, ptr->r.record.p.r.gr);
2505 }
2506 break;
2507 case br_gr:
2508 if (ptr->r.record.p.r.gr < REG_NUM)
2509 {
2510 const unw_rec_list *cur = ptr;
2511
2512 gr_mask = cur->r.record.p.brmask;
2513 while ((cur = cur->r.record.p.next) != NULL)
2514 gr_mask |= cur->r.record.p.brmask;
2515 output_P2_format (f, gr_mask, ptr->r.record.p.r.gr);
2516 }
2517 break;
2518 case spill_mask:
2519 as_bad (_("spill_mask record unimplemented."));
2520 break;
2521 case priunat_when_gr:
2522 case priunat_when_mem:
2523 case bsp_when:
2524 case bspstore_when:
2525 case rnat_when:
2526 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2527 break;
2528 case priunat_psprel:
2529 case bsp_psprel:
2530 case bspstore_psprel:
2531 case rnat_psprel:
2532 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp);
2533 break;
2534 case unwabi:
2535 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2536 break;
2537 case epilogue:
2538 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2539 break;
2540 case label_state:
2541 case copy_state:
2542 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2543 break;
2544 case spill_psprel:
2545 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2546 ptr->r.record.x.reg, ptr->r.record.x.t,
2547 ptr->r.record.x.where.pspoff);
2548 break;
2549 case spill_sprel:
2550 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2551 ptr->r.record.x.reg, ptr->r.record.x.t,
2552 ptr->r.record.x.where.spoff);
2553 break;
2554 case spill_reg:
2555 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2556 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2557 ptr->r.record.x.where.reg, ptr->r.record.x.t);
2558 break;
2559 case spill_psprel_p:
2560 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2561 ptr->r.record.x.ab, ptr->r.record.x.reg,
2562 ptr->r.record.x.t, ptr->r.record.x.where.pspoff);
2563 break;
2564 case spill_sprel_p:
2565 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2566 ptr->r.record.x.ab, ptr->r.record.x.reg,
2567 ptr->r.record.x.t, ptr->r.record.x.where.spoff);
2568 break;
2569 case spill_reg_p:
2570 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2571 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2572 ptr->r.record.x.xy, ptr->r.record.x.where.reg,
2573 ptr->r.record.x.t);
2574 break;
2575 default:
2576 as_bad (_("record_type_not_valid"));
2577 break;
2578 }
2579 }
2580
2581 /* Given a unw_rec_list list, process all the records with
2582 the specified function. */
2583 static void
2584 process_unw_records (unw_rec_list *list, vbyte_func f)
2585 {
2586 unw_rec_list *ptr;
2587 for (ptr = list; ptr; ptr = ptr->next)
2588 process_one_record (ptr, f);
2589 }
2590
2591 /* Determine the size of a record list in bytes. */
2592 static int
2593 calc_record_size (unw_rec_list *list)
2594 {
2595 vbyte_count = 0;
2596 process_unw_records (list, count_output);
2597 return vbyte_count;
2598 }
2599
2600 /* Return the number of bits set in the input value.
2601 Perhaps this has a better place... */
2602 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
2603 # define popcount __builtin_popcount
2604 #else
2605 static int
2606 popcount (unsigned x)
2607 {
2608 static const unsigned char popcnt[16] =
2609 {
2610 0, 1, 1, 2,
2611 1, 2, 2, 3,
2612 1, 2, 2, 3,
2613 2, 3, 3, 4
2614 };
2615
2616 if (x < NELEMS (popcnt))
2617 return popcnt[x];
2618 return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt));
2619 }
2620 #endif
2621
2622 /* Update IMASK bitmask to reflect the fact that one or more registers
2623 of type TYPE are saved starting at instruction with index T. If N
2624 bits are set in REGMASK, it is assumed that instructions T through
2625 T+N-1 save these registers.
2626
2627 TYPE values:
2628 0: no save
2629 1: instruction saves next fp reg
2630 2: instruction saves next general reg
2631 3: instruction saves next branch reg */
2632 static void
2633 set_imask (unw_rec_list *region,
2634 unsigned long regmask,
2635 unsigned long t,
2636 unsigned int type)
2637 {
2638 unsigned char *imask;
2639 unsigned long imask_size;
2640 unsigned int i;
2641 int pos;
2642
2643 imask = region->r.record.r.mask.i;
2644 imask_size = region->r.record.r.imask_size;
2645 if (!imask)
2646 {
2647 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2648 imask = XCNEWVEC (unsigned char, imask_size);
2649
2650 region->r.record.r.imask_size = imask_size;
2651 region->r.record.r.mask.i = imask;
2652 }
2653
2654 i = (t / 4) + 1;
2655 pos = 2 * (3 - t % 4);
2656 while (regmask)
2657 {
2658 if (i >= imask_size)
2659 {
2660 as_bad (_("Ignoring attempt to spill beyond end of region"));
2661 return;
2662 }
2663
2664 imask[i] |= (type & 0x3) << pos;
2665
2666 regmask &= (regmask - 1);
2667 pos -= 2;
2668 if (pos < 0)
2669 {
2670 pos = 0;
2671 ++i;
2672 }
2673 }
2674 }
2675
2676 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2677 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2678 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2679 for frag sizes. */
2680
2681 static unsigned long
2682 slot_index (unsigned long slot_addr,
2683 fragS *slot_frag,
2684 unsigned long first_addr,
2685 fragS *first_frag,
2686 int before_relax)
2687 {
2688 unsigned long s_index = 0;
2689
2690 /* First time we are called, the initial address and frag are invalid. */
2691 if (first_addr == 0)
2692 return 0;
2693
2694 /* If the two addresses are in different frags, then we need to add in
2695 the remaining size of this frag, and then the entire size of intermediate
2696 frags. */
2697 while (slot_frag != first_frag)
2698 {
2699 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2700
2701 if (! before_relax)
2702 {
2703 /* We can get the final addresses only during and after
2704 relaxation. */
2705 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2706 s_index += 3 * ((first_frag->fr_next->fr_address
2707 - first_frag->fr_address
2708 - first_frag->fr_fix) >> 4);
2709 }
2710 else
2711 /* We don't know what the final addresses will be. We try our
2712 best to estimate. */
2713 switch (first_frag->fr_type)
2714 {
2715 default:
2716 break;
2717
2718 case rs_space:
2719 as_fatal (_("Only constant space allocation is supported"));
2720 break;
2721
2722 case rs_align:
2723 case rs_align_code:
2724 case rs_align_test:
2725 /* Take alignment into account. Assume the worst case
2726 before relaxation. */
2727 s_index += 3 * ((1 << first_frag->fr_offset) >> 4);
2728 break;
2729
2730 case rs_org:
2731 if (first_frag->fr_symbol)
2732 {
2733 as_fatal (_("Only constant offsets are supported"));
2734 break;
2735 }
2736 /* Fall through. */
2737 case rs_fill:
2738 s_index += 3 * (first_frag->fr_offset >> 4);
2739 break;
2740 }
2741
2742 /* Add in the full size of the frag converted to instruction slots. */
2743 s_index += 3 * (first_frag->fr_fix >> 4);
2744 /* Subtract away the initial part before first_addr. */
2745 s_index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2746 + ((first_addr & 0x3) - (start_addr & 0x3)));
2747
2748 /* Move to the beginning of the next frag. */
2749 first_frag = first_frag->fr_next;
2750 first_addr = (unsigned long) &first_frag->fr_literal;
2751
2752 /* This can happen if there is section switching in the middle of a
2753 function, causing the frag chain for the function to be broken.
2754 It is too difficult to recover safely from this problem, so we just
2755 exit with an error. */
2756 if (first_frag == NULL)
2757 as_fatal (_("Section switching in code is not supported."));
2758 }
2759
2760 /* Add in the used part of the last frag. */
2761 s_index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2762 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2763 return s_index;
2764 }
2765
2766 /* Optimize unwind record directives. */
2767
2768 static unw_rec_list *
2769 optimize_unw_records (unw_rec_list *list)
2770 {
2771 if (!list)
2772 return NULL;
2773
2774 /* If the only unwind record is ".prologue" or ".prologue" followed
2775 by ".body", then we can optimize the unwind directives away. */
2776 if (list->r.type == prologue
2777 && (list->next->r.type == endp
2778 || (list->next->r.type == body && list->next->next->r.type == endp)))
2779 return NULL;
2780
2781 return list;
2782 }
2783
2784 /* Given a complete record list, process any records which have
2785 unresolved fields, (ie length counts for a prologue). After
2786 this has been run, all necessary information should be available
2787 within each record to generate an image. */
2788
2789 static void
2790 fixup_unw_records (unw_rec_list *list, int before_relax)
2791 {
2792 unw_rec_list *ptr, *region = 0;
2793 unsigned long first_addr = 0, rlen = 0, t;
2794 fragS *first_frag = 0;
2795
2796 for (ptr = list; ptr; ptr = ptr->next)
2797 {
2798 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2799 as_bad (_("Insn slot not set in unwind record."));
2800 t = slot_index (ptr->slot_number, ptr->slot_frag,
2801 first_addr, first_frag, before_relax);
2802 switch (ptr->r.type)
2803 {
2804 case prologue:
2805 case prologue_gr:
2806 case body:
2807 {
2808 unw_rec_list *last;
2809 int size;
2810 unsigned long last_addr = 0;
2811 fragS *last_frag = NULL;
2812
2813 first_addr = ptr->slot_number;
2814 first_frag = ptr->slot_frag;
2815 /* Find either the next body/prologue start, or the end of
2816 the function, and determine the size of the region. */
2817 for (last = ptr->next; last != NULL; last = last->next)
2818 if (last->r.type == prologue || last->r.type == prologue_gr
2819 || last->r.type == body || last->r.type == endp)
2820 {
2821 last_addr = last->slot_number;
2822 last_frag = last->slot_frag;
2823 break;
2824 }
2825 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2826 before_relax);
2827 rlen = ptr->r.record.r.rlen = size;
2828 if (ptr->r.type == body)
2829 /* End of region. */
2830 region = 0;
2831 else
2832 region = ptr;
2833 break;
2834 }
2835 case epilogue:
2836 if (t < rlen)
2837 ptr->r.record.b.t = rlen - 1 - t;
2838 else
2839 /* This happens when a memory-stack-less procedure uses a
2840 ".restore sp" directive at the end of a region to pop
2841 the frame state. */
2842 ptr->r.record.b.t = 0;
2843 break;
2844
2845 case mem_stack_f:
2846 case mem_stack_v:
2847 case rp_when:
2848 case pfs_when:
2849 case preds_when:
2850 case unat_when:
2851 case lc_when:
2852 case fpsr_when:
2853 case priunat_when_gr:
2854 case priunat_when_mem:
2855 case bsp_when:
2856 case bspstore_when:
2857 case rnat_when:
2858 ptr->r.record.p.t = t;
2859 break;
2860
2861 case spill_reg:
2862 case spill_sprel:
2863 case spill_psprel:
2864 case spill_reg_p:
2865 case spill_sprel_p:
2866 case spill_psprel_p:
2867 ptr->r.record.x.t = t;
2868 break;
2869
2870 case frgr_mem:
2871 if (!region)
2872 {
2873 as_bad (_("frgr_mem record before region record!"));
2874 return;
2875 }
2876 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2877 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2878 set_imask (region, ptr->r.record.p.frmask, t, 1);
2879 set_imask (region, ptr->r.record.p.grmask, t, 2);
2880 break;
2881 case fr_mem:
2882 if (!region)
2883 {
2884 as_bad (_("fr_mem record before region record!"));
2885 return;
2886 }
2887 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2888 set_imask (region, ptr->r.record.p.frmask, t, 1);
2889 break;
2890 case gr_mem:
2891 if (!region)
2892 {
2893 as_bad (_("gr_mem record before region record!"));
2894 return;
2895 }
2896 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2897 set_imask (region, ptr->r.record.p.grmask, t, 2);
2898 break;
2899 case br_mem:
2900 if (!region)
2901 {
2902 as_bad (_("br_mem record before region record!"));
2903 return;
2904 }
2905 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2906 set_imask (region, ptr->r.record.p.brmask, t, 3);
2907 break;
2908
2909 case gr_gr:
2910 if (!region)
2911 {
2912 as_bad (_("gr_gr record before region record!"));
2913 return;
2914 }
2915 set_imask (region, ptr->r.record.p.grmask, t, 2);
2916 break;
2917 case br_gr:
2918 if (!region)
2919 {
2920 as_bad (_("br_gr record before region record!"));
2921 return;
2922 }
2923 set_imask (region, ptr->r.record.p.brmask, t, 3);
2924 break;
2925
2926 default:
2927 break;
2928 }
2929 }
2930 }
2931
2932 /* Estimate the size of a frag before relaxing. We only have one type of frag
2933 to handle here, which is the unwind info frag. */
2934
2935 int
2936 ia64_estimate_size_before_relax (fragS *frag,
2937 asection *segtype ATTRIBUTE_UNUSED)
2938 {
2939 unw_rec_list *list;
2940 int len, size, pad;
2941
2942 /* ??? This code is identical to the first part of ia64_convert_frag. */
2943 list = (unw_rec_list *) frag->fr_opcode;
2944 fixup_unw_records (list, 0);
2945
2946 len = calc_record_size (list);
2947 /* pad to pointer-size boundary. */
2948 pad = len % md.pointer_size;
2949 if (pad != 0)
2950 len += md.pointer_size - pad;
2951 /* Add 8 for the header. */
2952 size = len + 8;
2953 /* Add a pointer for the personality offset. */
2954 if (frag->fr_offset)
2955 size += md.pointer_size;
2956
2957 /* fr_var carries the max_chars that we created the fragment with.
2958 We must, of course, have allocated enough memory earlier. */
2959 gas_assert (frag->fr_var >= size);
2960
2961 return frag->fr_fix + size;
2962 }
2963
2964 /* This function converts a rs_machine_dependent variant frag into a
2965 normal fill frag with the unwind image from the record list. */
2966 void
2967 ia64_convert_frag (fragS *frag)
2968 {
2969 unw_rec_list *list;
2970 int len, size, pad;
2971 valueT flag_value;
2972
2973 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2974 list = (unw_rec_list *) frag->fr_opcode;
2975 fixup_unw_records (list, 0);
2976
2977 len = calc_record_size (list);
2978 /* pad to pointer-size boundary. */
2979 pad = len % md.pointer_size;
2980 if (pad != 0)
2981 len += md.pointer_size - pad;
2982 /* Add 8 for the header. */
2983 size = len + 8;
2984 /* Add a pointer for the personality offset. */
2985 if (frag->fr_offset)
2986 size += md.pointer_size;
2987
2988 /* fr_var carries the max_chars that we created the fragment with.
2989 We must, of course, have allocated enough memory earlier. */
2990 gas_assert (frag->fr_var >= size);
2991
2992 /* Initialize the header area. fr_offset is initialized with
2993 unwind.personality_routine. */
2994 if (frag->fr_offset)
2995 {
2996 if (md.flags & EF_IA_64_ABI64)
2997 flag_value = (bfd_vma) 3 << 32;
2998 else
2999 /* 32-bit unwind info block. */
3000 flag_value = (bfd_vma) 0x1003 << 32;
3001 }
3002 else
3003 flag_value = 0;
3004
3005 md_number_to_chars (frag->fr_literal,
3006 (((bfd_vma) 1 << 48) /* Version. */
3007 | flag_value /* U & E handler flags. */
3008 | (len / md.pointer_size)), /* Length. */
3009 8);
3010
3011 /* Skip the header. */
3012 vbyte_mem_ptr = frag->fr_literal + 8;
3013 process_unw_records (list, output_vbyte_mem);
3014
3015 /* Fill the padding bytes with zeros. */
3016 if (pad != 0)
3017 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
3018 md.pointer_size - pad);
3019 /* Fill the unwind personality with zeros. */
3020 if (frag->fr_offset)
3021 md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0,
3022 md.pointer_size);
3023
3024 frag->fr_fix += size;
3025 frag->fr_type = rs_fill;
3026 frag->fr_var = 0;
3027 frag->fr_offset = 0;
3028 }
3029
3030 static int
3031 parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po)
3032 {
3033 int sep = parse_operand_and_eval (e, ',');
3034
3035 *qp = e->X_add_number - REG_P;
3036 if (e->X_op != O_register || *qp > 63)
3037 {
3038 as_bad (_("First operand to .%s must be a predicate"), po);
3039 *qp = 0;
3040 }
3041 else if (*qp == 0)
3042 as_warn (_("Pointless use of p0 as first operand to .%s"), po);
3043 if (sep == ',')
3044 sep = parse_operand_and_eval (e, ',');
3045 else
3046 e->X_op = O_absent;
3047 return sep;
3048 }
3049
3050 static void
3051 convert_expr_to_ab_reg (const expressionS *e,
3052 unsigned int *ab,
3053 unsigned int *regp,
3054 const char *po,
3055 int n)
3056 {
3057 unsigned int reg = e->X_add_number;
3058
3059 *ab = *regp = 0; /* Anything valid is good here. */
3060
3061 if (e->X_op != O_register)
3062 reg = REG_GR; /* Anything invalid is good here. */
3063
3064 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
3065 {
3066 *ab = 0;
3067 *regp = reg - REG_GR;
3068 }
3069 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
3070 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
3071 {
3072 *ab = 1;
3073 *regp = reg - REG_FR;
3074 }
3075 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
3076 {
3077 *ab = 2;
3078 *regp = reg - REG_BR;
3079 }
3080 else
3081 {
3082 *ab = 3;
3083 switch (reg)
3084 {
3085 case REG_PR: *regp = 0; break;
3086 case REG_PSP: *regp = 1; break;
3087 case REG_PRIUNAT: *regp = 2; break;
3088 case REG_BR + 0: *regp = 3; break;
3089 case REG_AR + AR_BSP: *regp = 4; break;
3090 case REG_AR + AR_BSPSTORE: *regp = 5; break;
3091 case REG_AR + AR_RNAT: *regp = 6; break;
3092 case REG_AR + AR_UNAT: *regp = 7; break;
3093 case REG_AR + AR_FPSR: *regp = 8; break;
3094 case REG_AR + AR_PFS: *regp = 9; break;
3095 case REG_AR + AR_LC: *regp = 10; break;
3096
3097 default:
3098 as_bad (_("Operand %d to .%s must be a preserved register"), n, po);
3099 break;
3100 }
3101 }
3102 }
3103
3104 static void
3105 convert_expr_to_xy_reg (const expressionS *e,
3106 unsigned int *xy,
3107 unsigned int *regp,
3108 const char *po,
3109 int n)
3110 {
3111 unsigned int reg = e->X_add_number;
3112
3113 *xy = *regp = 0; /* Anything valid is good here. */
3114
3115 if (e->X_op != O_register)
3116 reg = REG_GR; /* Anything invalid is good here. */
3117
3118 if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127))
3119 {
3120 *xy = 0;
3121 *regp = reg - REG_GR;
3122 }
3123 else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127))
3124 {
3125 *xy = 1;
3126 *regp = reg - REG_FR;
3127 }
3128 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3129 {
3130 *xy = 2;
3131 *regp = reg - REG_BR;
3132 }
3133 else
3134 as_bad (_("Operand %d to .%s must be a writable register"), n, po);
3135 }
3136
3137 static void
3138 dot_align (int arg)
3139 {
3140 /* The current frag is an alignment frag. */
3141 align_frag = frag_now;
3142 s_align_bytes (arg);
3143 }
3144
3145 static void
3146 dot_radix (int dummy ATTRIBUTE_UNUSED)
3147 {
3148 char *radix;
3149 int ch;
3150
3151 SKIP_WHITESPACE ();
3152
3153 if (is_it_end_of_statement ())
3154 return;
3155 ch = get_symbol_name (&radix);
3156 ia64_canonicalize_symbol_name (radix);
3157 if (strcasecmp (radix, "C"))
3158 as_bad (_("Radix `%s' unsupported or invalid"), radix);
3159 (void) restore_line_pointer (ch);
3160 demand_empty_rest_of_line ();
3161 }
3162
3163 /* Helper function for .loc directives. If the assembler is not generating
3164 line number info, then we need to remember which instructions have a .loc
3165 directive, and only call dwarf2_gen_line_info for those instructions. */
3166
3167 static void
3168 dot_loc (int x)
3169 {
3170 CURR_SLOT.loc_directive_seen = 1;
3171 dwarf2_directive_loc (x);
3172 }
3173
3174 /* .sbss, .srodata etc. are macros that expand into ".section SECNAME". */
3175 static void
3176 dot_special_section (int which)
3177 {
3178 set_section ((char *) special_section_name[which]);
3179 }
3180
3181 /* Return -1 for warning and 0 for error. */
3182
3183 static int
3184 unwind_diagnostic (const char * region, const char *directive)
3185 {
3186 if (md.unwind_check == unwind_check_warning)
3187 {
3188 as_warn (_(".%s outside of %s"), directive, region);
3189 return -1;
3190 }
3191 else
3192 {
3193 as_bad (_(".%s outside of %s"), directive, region);
3194 ignore_rest_of_line ();
3195 return 0;
3196 }
3197 }
3198
3199 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in
3200 a procedure but the unwind directive check is set to warning, 0 if
3201 a directive isn't in a procedure and the unwind directive check is set
3202 to error. */
3203
3204 static int
3205 in_procedure (const char *directive)
3206 {
3207 if (unwind.proc_pending.sym
3208 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0))
3209 return 1;
3210 return unwind_diagnostic ("procedure", directive);
3211 }
3212
3213 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in
3214 a prologue but the unwind directive check is set to warning, 0 if
3215 a directive isn't in a prologue and the unwind directive check is set
3216 to error. */
3217
3218 static int
3219 in_prologue (const char *directive)
3220 {
3221 int in = in_procedure (directive);
3222
3223 if (in > 0 && !unwind.prologue)
3224 in = unwind_diagnostic ("prologue", directive);
3225 check_pending_save ();
3226 return in;
3227 }
3228
3229 /* Return 1 if a directive is in a body, -1 if a directive isn't in
3230 a body but the unwind directive check is set to warning, 0 if
3231 a directive isn't in a body and the unwind directive check is set
3232 to error. */
3233
3234 static int
3235 in_body (const char *directive)
3236 {
3237 int in = in_procedure (directive);
3238
3239 if (in > 0 && !unwind.body)
3240 in = unwind_diagnostic ("body region", directive);
3241 return in;
3242 }
3243
3244 static void
3245 add_unwind_entry (unw_rec_list *ptr, int sep)
3246 {
3247 if (ptr)
3248 {
3249 if (unwind.tail)
3250 unwind.tail->next = ptr;
3251 else
3252 unwind.list = ptr;
3253 unwind.tail = ptr;
3254
3255 /* The current entry can in fact be a chain of unwind entries. */
3256 if (unwind.current_entry == NULL)
3257 unwind.current_entry = ptr;
3258 }
3259
3260 /* The current entry can in fact be a chain of unwind entries. */
3261 if (unwind.current_entry == NULL)
3262 unwind.current_entry = ptr;
3263
3264 if (sep == ',')
3265 {
3266 char *name;
3267 /* Parse a tag permitted for the current directive. */
3268 int ch;
3269
3270 SKIP_WHITESPACE ();
3271 ch = get_symbol_name (&name);
3272 /* FIXME: For now, just issue a warning that this isn't implemented. */
3273 {
3274 static int warned;
3275
3276 if (!warned)
3277 {
3278 warned = 1;
3279 as_warn (_("Tags on unwind pseudo-ops aren't supported, yet"));
3280 }
3281 }
3282 (void) restore_line_pointer (ch);
3283 }
3284 if (sep != NOT_A_CHAR)
3285 demand_empty_rest_of_line ();
3286 }
3287
3288 static void
3289 dot_fframe (int dummy ATTRIBUTE_UNUSED)
3290 {
3291 expressionS e;
3292 int sep;
3293
3294 if (!in_prologue ("fframe"))
3295 return;
3296
3297 sep = parse_operand_and_eval (&e, ',');
3298
3299 if (e.X_op != O_constant)
3300 {
3301 as_bad (_("First operand to .fframe must be a constant"));
3302 e.X_add_number = 0;
3303 }
3304 add_unwind_entry (output_mem_stack_f (e.X_add_number), sep);
3305 }
3306
3307 static void
3308 dot_vframe (int dummy ATTRIBUTE_UNUSED)
3309 {
3310 expressionS e;
3311 unsigned reg;
3312 int sep;
3313
3314 if (!in_prologue ("vframe"))
3315 return;
3316
3317 sep = parse_operand_and_eval (&e, ',');
3318 reg = e.X_add_number - REG_GR;
3319 if (e.X_op != O_register || reg > 127)
3320 {
3321 as_bad (_("First operand to .vframe must be a general register"));
3322 reg = 0;
3323 }
3324 add_unwind_entry (output_mem_stack_v (), sep);
3325 if (! (unwind.prologue_mask & 2))
3326 add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR);
3327 else if (reg != unwind.prologue_gr
3328 + (unsigned) popcount (unwind.prologue_mask & -(2 << 1)))
3329 as_warn (_("Operand of .vframe contradicts .prologue"));
3330 }
3331
3332 static void
3333 dot_vframesp (int psp)
3334 {
3335 expressionS e;
3336 int sep;
3337
3338 if (psp)
3339 as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant"));
3340
3341 if (!in_prologue ("vframesp"))
3342 return;
3343
3344 sep = parse_operand_and_eval (&e, ',');
3345 if (e.X_op != O_constant)
3346 {
3347 as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)"));
3348 e.X_add_number = 0;
3349 }
3350 add_unwind_entry (output_mem_stack_v (), sep);
3351 add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR);
3352 }
3353
3354 static void
3355 dot_save (int dummy ATTRIBUTE_UNUSED)
3356 {
3357 expressionS e1, e2;
3358 unsigned reg1, reg2;
3359 int sep;
3360
3361 if (!in_prologue ("save"))
3362 return;
3363
3364 sep = parse_operand_and_eval (&e1, ',');
3365 if (sep == ',')
3366 sep = parse_operand_and_eval (&e2, ',');
3367 else
3368 e2.X_op = O_absent;
3369
3370 reg1 = e1.X_add_number;
3371 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3372 if (e1.X_op != O_register)
3373 {
3374 as_bad (_("First operand to .save not a register"));
3375 reg1 = REG_PR; /* Anything valid is good here. */
3376 }
3377 reg2 = e2.X_add_number - REG_GR;
3378 if (e2.X_op != O_register || reg2 > 127)
3379 {
3380 as_bad (_("Second operand to .save not a valid register"));
3381 reg2 = 0;
3382 }
3383 switch (reg1)
3384 {
3385 case REG_AR + AR_BSP:
3386 add_unwind_entry (output_bsp_when (), sep);
3387 add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR);
3388 break;
3389 case REG_AR + AR_BSPSTORE:
3390 add_unwind_entry (output_bspstore_when (), sep);
3391 add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR);
3392 break;
3393 case REG_AR + AR_RNAT:
3394 add_unwind_entry (output_rnat_when (), sep);
3395 add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR);
3396 break;
3397 case REG_AR + AR_UNAT:
3398 add_unwind_entry (output_unat_when (), sep);
3399 add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR);
3400 break;
3401 case REG_AR + AR_FPSR:
3402 add_unwind_entry (output_fpsr_when (), sep);
3403 add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR);
3404 break;
3405 case REG_AR + AR_PFS:
3406 add_unwind_entry (output_pfs_when (), sep);
3407 if (! (unwind.prologue_mask & 4))
3408 add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR);
3409 else if (reg2 != unwind.prologue_gr
3410 + (unsigned) popcount (unwind.prologue_mask & -(4 << 1)))
3411 as_warn (_("Second operand of .save contradicts .prologue"));
3412 break;
3413 case REG_AR + AR_LC:
3414 add_unwind_entry (output_lc_when (), sep);
3415 add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR);
3416 break;
3417 case REG_BR:
3418 add_unwind_entry (output_rp_when (), sep);
3419 if (! (unwind.prologue_mask & 8))
3420 add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR);
3421 else if (reg2 != unwind.prologue_gr)
3422 as_warn (_("Second operand of .save contradicts .prologue"));
3423 break;
3424 case REG_PR:
3425 add_unwind_entry (output_preds_when (), sep);
3426 if (! (unwind.prologue_mask & 1))
3427 add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR);
3428 else if (reg2 != unwind.prologue_gr
3429 + (unsigned) popcount (unwind.prologue_mask & -(1 << 1)))
3430 as_warn (_("Second operand of .save contradicts .prologue"));
3431 break;
3432 case REG_PRIUNAT:
3433 add_unwind_entry (output_priunat_when_gr (), sep);
3434 add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR);
3435 break;
3436 default:
3437 as_bad (_("First operand to .save not a valid register"));
3438 add_unwind_entry (NULL, sep);
3439 break;
3440 }
3441 }
3442
3443 static void
3444 dot_restore (int dummy ATTRIBUTE_UNUSED)
3445 {
3446 expressionS e1;
3447 unsigned long ecount; /* # of _additional_ regions to pop */
3448 int sep;
3449
3450 if (!in_body ("restore"))
3451 return;
3452
3453 sep = parse_operand_and_eval (&e1, ',');
3454 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3455 as_bad (_("First operand to .restore must be stack pointer (sp)"));
3456
3457 if (sep == ',')
3458 {
3459 expressionS e2;
3460
3461 sep = parse_operand_and_eval (&e2, ',');
3462 if (e2.X_op != O_constant || e2.X_add_number < 0)
3463 {
3464 as_bad (_("Second operand to .restore must be a constant >= 0"));
3465 e2.X_add_number = 0;
3466 }
3467 ecount = e2.X_add_number;
3468 }
3469 else
3470 ecount = unwind.prologue_count - 1;
3471
3472 if (ecount >= unwind.prologue_count)
3473 {
3474 as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"),
3475 ecount + 1, unwind.prologue_count);
3476 ecount = 0;
3477 }
3478
3479 add_unwind_entry (output_epilogue (ecount), sep);
3480
3481 if (ecount < unwind.prologue_count)
3482 unwind.prologue_count -= ecount + 1;
3483 else
3484 unwind.prologue_count = 0;
3485 }
3486
3487 static void
3488 dot_restorereg (int pred)
3489 {
3490 unsigned int qp, ab, reg;
3491 expressionS e;
3492 int sep;
3493 const char * const po = pred ? "restorereg.p" : "restorereg";
3494
3495 if (!in_procedure (po))
3496 return;
3497
3498 if (pred)
3499 sep = parse_predicate_and_operand (&e, &qp, po);
3500 else
3501 {
3502 sep = parse_operand_and_eval (&e, ',');
3503 qp = 0;
3504 }
3505 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
3506
3507 add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep);
3508 }
3509
3510 static const char *special_linkonce_name[] =
3511 {
3512 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3513 };
3514
3515 static void
3516 start_unwind_section (const segT text_seg, int sec_index)
3517 {
3518 /*
3519 Use a slightly ugly scheme to derive the unwind section names from
3520 the text section name:
3521
3522 text sect. unwind table sect.
3523 name: name: comments:
3524 ---------- ----------------- --------------------------------
3525 .text .IA_64.unwind
3526 .text.foo .IA_64.unwind.text.foo
3527 .foo .IA_64.unwind.foo
3528 .gnu.linkonce.t.foo
3529 .gnu.linkonce.ia64unw.foo
3530 _info .IA_64.unwind_info gas issues error message (ditto)
3531 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3532
3533 This mapping is done so that:
3534
3535 (a) An object file with unwind info only in .text will use
3536 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3537 This follows the letter of the ABI and also ensures backwards
3538 compatibility with older toolchains.
3539
3540 (b) An object file with unwind info in multiple text sections
3541 will use separate unwind sections for each text section.
3542 This allows us to properly set the "sh_info" and "sh_link"
3543 fields in SHT_IA_64_UNWIND as required by the ABI and also
3544 lets GNU ld support programs with multiple segments
3545 containing unwind info (as might be the case for certain
3546 embedded applications).
3547
3548 (c) An error is issued if there would be a name clash.
3549 */
3550
3551 const char *text_name, *sec_text_name;
3552 char *sec_name;
3553 const char *prefix = special_section_name [sec_index];
3554 const char *suffix;
3555
3556 sec_text_name = segment_name (text_seg);
3557 text_name = sec_text_name;
3558 if (startswith (text_name, "_info"))
3559 {
3560 as_bad (_("Illegal section name `%s' (causes unwind section name clash)"),
3561 text_name);
3562 ignore_rest_of_line ();
3563 return;
3564 }
3565 if (strcmp (text_name, ".text") == 0)
3566 text_name = "";
3567
3568 /* Build the unwind section name by appending the (possibly stripped)
3569 text section name to the unwind prefix. */
3570 suffix = text_name;
3571 if (startswith (text_name, ".gnu.linkonce.t."))
3572 {
3573 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3574 suffix += sizeof (".gnu.linkonce.t.") - 1;
3575 }
3576
3577 sec_name = concat (prefix, suffix, NULL);
3578
3579 /* Handle COMDAT group. */
3580 if ((text_seg->flags & SEC_LINK_ONCE) != 0
3581 && (elf_section_flags (text_seg) & SHF_GROUP) != 0)
3582 {
3583 char *section;
3584 const char *group_name = elf_group_name (text_seg);
3585
3586 if (group_name == NULL)
3587 {
3588 as_bad (_("Group section `%s' has no group signature"),
3589 sec_text_name);
3590 ignore_rest_of_line ();
3591 free (sec_name);
3592 return;
3593 }
3594
3595 /* We have to construct a fake section directive. */
3596 section = concat (sec_name, ",\"aG\",@progbits,", group_name, ",comdat", NULL);
3597 set_section (section);
3598 free (section);
3599 }
3600 else
3601 {
3602 set_section (sec_name);
3603 bfd_set_section_flags (now_seg, SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3604 }
3605
3606 elf_linked_to_section (now_seg) = text_seg;
3607 free (sec_name);
3608 }
3609
3610 static void
3611 generate_unwind_image (const segT text_seg)
3612 {
3613 int size, pad;
3614 unw_rec_list *list;
3615
3616 /* Mark the end of the unwind info, so that we can compute the size of the
3617 last unwind region. */
3618 add_unwind_entry (output_endp (), NOT_A_CHAR);
3619
3620 /* Force out pending instructions, to make sure all unwind records have
3621 a valid slot_number field. */
3622 ia64_flush_insns ();
3623
3624 /* Generate the unwind record. */
3625 list = optimize_unw_records (unwind.list);
3626 fixup_unw_records (list, 1);
3627 size = calc_record_size (list);
3628
3629 if (size > 0 || unwind.force_unwind_entry)
3630 {
3631 unwind.force_unwind_entry = 0;
3632 /* pad to pointer-size boundary. */
3633 pad = size % md.pointer_size;
3634 if (pad != 0)
3635 size += md.pointer_size - pad;
3636 /* Add 8 for the header. */
3637 size += 8;
3638 /* Add a pointer for the personality offset. */
3639 if (unwind.personality_routine)
3640 size += md.pointer_size;
3641 }
3642
3643 /* If there are unwind records, switch sections, and output the info. */
3644 if (size != 0)
3645 {
3646 expressionS exp;
3647 bfd_reloc_code_real_type reloc;
3648
3649 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO);
3650
3651 /* Make sure the section has 4 byte alignment for ILP32 and
3652 8 byte alignment for LP64. */
3653 frag_align (md.pointer_size_shift, 0, 0);
3654 record_alignment (now_seg, md.pointer_size_shift);
3655
3656 /* Set expression which points to start of unwind descriptor area. */
3657 unwind.info = expr_build_dot ();
3658
3659 frag_var (rs_machine_dependent, size, size, 0, 0,
3660 (offsetT) (long) unwind.personality_routine,
3661 (char *) list);
3662
3663 /* Add the personality address to the image. */
3664 if (unwind.personality_routine != 0)
3665 {
3666 exp.X_op = O_symbol;
3667 exp.X_add_symbol = unwind.personality_routine;
3668 exp.X_add_number = 0;
3669
3670 if (md.flags & EF_IA_64_BE)
3671 {
3672 if (md.flags & EF_IA_64_ABI64)
3673 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3674 else
3675 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3676 }
3677 else
3678 {
3679 if (md.flags & EF_IA_64_ABI64)
3680 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3681 else
3682 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3683 }
3684
3685 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3686 md.pointer_size, &exp, 0, reloc);
3687 unwind.personality_routine = 0;
3688 }
3689 }
3690
3691 free_saved_prologue_counts ();
3692 unwind.list = unwind.tail = unwind.current_entry = NULL;
3693 }
3694
3695 static void
3696 dot_handlerdata (int dummy ATTRIBUTE_UNUSED)
3697 {
3698 if (!in_procedure ("handlerdata"))
3699 return;
3700 unwind.force_unwind_entry = 1;
3701
3702 /* Remember which segment we're in so we can switch back after .endp */
3703 unwind.saved_text_seg = now_seg;
3704 unwind.saved_text_subseg = now_subseg;
3705
3706 /* Generate unwind info into unwind-info section and then leave that
3707 section as the currently active one so dataXX directives go into
3708 the language specific data area of the unwind info block. */
3709 generate_unwind_image (now_seg);
3710 demand_empty_rest_of_line ();
3711 }
3712
3713 static void
3714 dot_unwentry (int dummy ATTRIBUTE_UNUSED)
3715 {
3716 if (!in_procedure ("unwentry"))
3717 return;
3718 unwind.force_unwind_entry = 1;
3719 demand_empty_rest_of_line ();
3720 }
3721
3722 static void
3723 dot_altrp (int dummy ATTRIBUTE_UNUSED)
3724 {
3725 expressionS e;
3726 unsigned reg;
3727
3728 if (!in_prologue ("altrp"))
3729 return;
3730
3731 parse_operand_and_eval (&e, 0);
3732 reg = e.X_add_number - REG_BR;
3733 if (e.X_op != O_register || reg > 7)
3734 {
3735 as_bad (_("First operand to .altrp not a valid branch register"));
3736 reg = 0;
3737 }
3738 add_unwind_entry (output_rp_br (reg), 0);
3739 }
3740
3741 static void
3742 dot_savemem (int psprel)
3743 {
3744 expressionS e1, e2;
3745 int sep;
3746 int reg1, val;
3747 const char * const po = psprel ? "savepsp" : "savesp";
3748
3749 if (!in_prologue (po))
3750 return;
3751
3752 sep = parse_operand_and_eval (&e1, ',');
3753 if (sep == ',')
3754 sep = parse_operand_and_eval (&e2, ',');
3755 else
3756 e2.X_op = O_absent;
3757
3758 reg1 = e1.X_add_number;
3759 val = e2.X_add_number;
3760
3761 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3762 if (e1.X_op != O_register)
3763 {
3764 as_bad (_("First operand to .%s not a register"), po);
3765 reg1 = REG_PR; /* Anything valid is good here. */
3766 }
3767 if (e2.X_op != O_constant)
3768 {
3769 as_bad (_("Second operand to .%s not a constant"), po);
3770 val = 0;
3771 }
3772
3773 switch (reg1)
3774 {
3775 case REG_AR + AR_BSP:
3776 add_unwind_entry (output_bsp_when (), sep);
3777 add_unwind_entry ((psprel
3778 ? output_bsp_psprel
3779 : output_bsp_sprel) (val), NOT_A_CHAR);
3780 break;
3781 case REG_AR + AR_BSPSTORE:
3782 add_unwind_entry (output_bspstore_when (), sep);
3783 add_unwind_entry ((psprel
3784 ? output_bspstore_psprel
3785 : output_bspstore_sprel) (val), NOT_A_CHAR);
3786 break;
3787 case REG_AR + AR_RNAT:
3788 add_unwind_entry (output_rnat_when (), sep);
3789 add_unwind_entry ((psprel
3790 ? output_rnat_psprel
3791 : output_rnat_sprel) (val), NOT_A_CHAR);
3792 break;
3793 case REG_AR + AR_UNAT:
3794 add_unwind_entry (output_unat_when (), sep);
3795 add_unwind_entry ((psprel
3796 ? output_unat_psprel
3797 : output_unat_sprel) (val), NOT_A_CHAR);
3798 break;
3799 case REG_AR + AR_FPSR:
3800 add_unwind_entry (output_fpsr_when (), sep);
3801 add_unwind_entry ((psprel
3802 ? output_fpsr_psprel
3803 : output_fpsr_sprel) (val), NOT_A_CHAR);
3804 break;
3805 case REG_AR + AR_PFS:
3806 add_unwind_entry (output_pfs_when (), sep);
3807 add_unwind_entry ((psprel
3808 ? output_pfs_psprel
3809 : output_pfs_sprel) (val), NOT_A_CHAR);
3810 break;
3811 case REG_AR + AR_LC:
3812 add_unwind_entry (output_lc_when (), sep);
3813 add_unwind_entry ((psprel
3814 ? output_lc_psprel
3815 : output_lc_sprel) (val), NOT_A_CHAR);
3816 break;
3817 case REG_BR:
3818 add_unwind_entry (output_rp_when (), sep);
3819 add_unwind_entry ((psprel
3820 ? output_rp_psprel
3821 : output_rp_sprel) (val), NOT_A_CHAR);
3822 break;
3823 case REG_PR:
3824 add_unwind_entry (output_preds_when (), sep);
3825 add_unwind_entry ((psprel
3826 ? output_preds_psprel
3827 : output_preds_sprel) (val), NOT_A_CHAR);
3828 break;
3829 case REG_PRIUNAT:
3830 add_unwind_entry (output_priunat_when_mem (), sep);
3831 add_unwind_entry ((psprel
3832 ? output_priunat_psprel
3833 : output_priunat_sprel) (val), NOT_A_CHAR);
3834 break;
3835 default:
3836 as_bad (_("First operand to .%s not a valid register"), po);
3837 add_unwind_entry (NULL, sep);
3838 break;
3839 }
3840 }
3841
3842 static void
3843 dot_saveg (int dummy ATTRIBUTE_UNUSED)
3844 {
3845 expressionS e;
3846 unsigned grmask;
3847 int sep;
3848
3849 if (!in_prologue ("save.g"))
3850 return;
3851
3852 sep = parse_operand_and_eval (&e, ',');
3853
3854 grmask = e.X_add_number;
3855 if (e.X_op != O_constant
3856 || e.X_add_number <= 0
3857 || e.X_add_number > 0xf)
3858 {
3859 as_bad (_("First operand to .save.g must be a positive 4-bit constant"));
3860 grmask = 0;
3861 }
3862
3863 if (sep == ',')
3864 {
3865 unsigned reg;
3866 int n = popcount (grmask);
3867
3868 parse_operand_and_eval (&e, 0);
3869 reg = e.X_add_number - REG_GR;
3870 if (e.X_op != O_register || reg > 127)
3871 {
3872 as_bad (_("Second operand to .save.g must be a general register"));
3873 reg = 0;
3874 }
3875 else if (reg > 128U - n)
3876 {
3877 as_bad (_("Second operand to .save.g must be the first of %d general registers"), n);
3878 reg = 0;
3879 }
3880 add_unwind_entry (output_gr_gr (grmask, reg), 0);
3881 }
3882 else
3883 add_unwind_entry (output_gr_mem (grmask), 0);
3884 }
3885
3886 static void
3887 dot_savef (int dummy ATTRIBUTE_UNUSED)
3888 {
3889 expressionS e;
3890
3891 if (!in_prologue ("save.f"))
3892 return;
3893
3894 parse_operand_and_eval (&e, 0);
3895
3896 if (e.X_op != O_constant
3897 || e.X_add_number <= 0
3898 || e.X_add_number > 0xfffff)
3899 {
3900 as_bad (_("Operand to .save.f must be a positive 20-bit constant"));
3901 e.X_add_number = 0;
3902 }
3903 add_unwind_entry (output_fr_mem (e.X_add_number), 0);
3904 }
3905
3906 static void
3907 dot_saveb (int dummy ATTRIBUTE_UNUSED)
3908 {
3909 expressionS e;
3910 unsigned brmask;
3911 int sep;
3912
3913 if (!in_prologue ("save.b"))
3914 return;
3915
3916 sep = parse_operand_and_eval (&e, ',');
3917
3918 brmask = e.X_add_number;
3919 if (e.X_op != O_constant
3920 || e.X_add_number <= 0
3921 || e.X_add_number > 0x1f)
3922 {
3923 as_bad (_("First operand to .save.b must be a positive 5-bit constant"));
3924 brmask = 0;
3925 }
3926
3927 if (sep == ',')
3928 {
3929 unsigned reg;
3930 int n = popcount (brmask);
3931
3932 parse_operand_and_eval (&e, 0);
3933 reg = e.X_add_number - REG_GR;
3934 if (e.X_op != O_register || reg > 127)
3935 {
3936 as_bad (_("Second operand to .save.b must be a general register"));
3937 reg = 0;
3938 }
3939 else if (reg > 128U - n)
3940 {
3941 as_bad (_("Second operand to .save.b must be the first of %d general registers"), n);
3942 reg = 0;
3943 }
3944 add_unwind_entry (output_br_gr (brmask, reg), 0);
3945 }
3946 else
3947 add_unwind_entry (output_br_mem (brmask), 0);
3948 }
3949
3950 static void
3951 dot_savegf (int dummy ATTRIBUTE_UNUSED)
3952 {
3953 expressionS e1, e2;
3954
3955 if (!in_prologue ("save.gf"))
3956 return;
3957
3958 if (parse_operand_and_eval (&e1, ',') == ',')
3959 parse_operand_and_eval (&e2, 0);
3960 else
3961 e2.X_op = O_absent;
3962
3963 if (e1.X_op != O_constant
3964 || e1.X_add_number < 0
3965 || e1.X_add_number > 0xf)
3966 {
3967 as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant"));
3968 e1.X_op = O_absent;
3969 e1.X_add_number = 0;
3970 }
3971 if (e2.X_op != O_constant
3972 || e2.X_add_number < 0
3973 || e2.X_add_number > 0xfffff)
3974 {
3975 as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant"));
3976 e2.X_op = O_absent;
3977 e2.X_add_number = 0;
3978 }
3979 if (e1.X_op == O_constant
3980 && e2.X_op == O_constant
3981 && e1.X_add_number == 0
3982 && e2.X_add_number == 0)
3983 as_bad (_("Operands to .save.gf may not be both zero"));
3984
3985 add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0);
3986 }
3987
3988 static void
3989 dot_spill (int dummy ATTRIBUTE_UNUSED)
3990 {
3991 expressionS e;
3992
3993 if (!in_prologue ("spill"))
3994 return;
3995
3996 parse_operand_and_eval (&e, 0);
3997
3998 if (e.X_op != O_constant)
3999 {
4000 as_bad (_("Operand to .spill must be a constant"));
4001 e.X_add_number = 0;
4002 }
4003 add_unwind_entry (output_spill_base (e.X_add_number), 0);
4004 }
4005
4006 static void
4007 dot_spillreg (int pred)
4008 {
4009 int sep;
4010 unsigned int qp, ab, xy, reg, treg;
4011 expressionS e;
4012 const char * const po = pred ? "spillreg.p" : "spillreg";
4013
4014 if (!in_procedure (po))
4015 return;
4016
4017 if (pred)
4018 sep = parse_predicate_and_operand (&e, &qp, po);
4019 else
4020 {
4021 sep = parse_operand_and_eval (&e, ',');
4022 qp = 0;
4023 }
4024 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4025
4026 if (sep == ',')
4027 sep = parse_operand_and_eval (&e, ',');
4028 else
4029 e.X_op = O_absent;
4030 convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred);
4031
4032 add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep);
4033 }
4034
4035 static void
4036 dot_spillmem (int psprel)
4037 {
4038 expressionS e;
4039 int pred = (psprel < 0), sep;
4040 unsigned int qp, ab, reg;
4041 const char * po;
4042
4043 if (pred)
4044 {
4045 psprel = ~psprel;
4046 po = psprel ? "spillpsp.p" : "spillsp.p";
4047 }
4048 else
4049 po = psprel ? "spillpsp" : "spillsp";
4050
4051 if (!in_procedure (po))
4052 return;
4053
4054 if (pred)
4055 sep = parse_predicate_and_operand (&e, &qp, po);
4056 else
4057 {
4058 sep = parse_operand_and_eval (&e, ',');
4059 qp = 0;
4060 }
4061 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4062
4063 if (sep == ',')
4064 sep = parse_operand_and_eval (&e, ',');
4065 else
4066 e.X_op = O_absent;
4067 if (e.X_op != O_constant)
4068 {
4069 as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po);
4070 e.X_add_number = 0;
4071 }
4072
4073 if (psprel)
4074 add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep);
4075 else
4076 add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep);
4077 }
4078
4079 static unsigned int
4080 get_saved_prologue_count (unsigned long lbl)
4081 {
4082 label_prologue_count *lpc = unwind.saved_prologue_counts;
4083
4084 while (lpc != NULL && lpc->label_number != lbl)
4085 lpc = lpc->next;
4086
4087 if (lpc != NULL)
4088 return lpc->prologue_count;
4089
4090 as_bad (_("Missing .label_state %ld"), lbl);
4091 return 1;
4092 }
4093
4094 static void
4095 save_prologue_count (unsigned long lbl, unsigned int count)
4096 {
4097 label_prologue_count *lpc = unwind.saved_prologue_counts;
4098
4099 while (lpc != NULL && lpc->label_number != lbl)
4100 lpc = lpc->next;
4101
4102 if (lpc != NULL)
4103 lpc->prologue_count = count;
4104 else
4105 {
4106 label_prologue_count *new_lpc = XNEW (label_prologue_count);
4107
4108 new_lpc->next = unwind.saved_prologue_counts;
4109 new_lpc->label_number = lbl;
4110 new_lpc->prologue_count = count;
4111 unwind.saved_prologue_counts = new_lpc;
4112 }
4113 }
4114
4115 static void
4116 free_saved_prologue_counts (void)
4117 {
4118 label_prologue_count *lpc = unwind.saved_prologue_counts;
4119 label_prologue_count *next;
4120
4121 while (lpc != NULL)
4122 {
4123 next = lpc->next;
4124 free (lpc);
4125 lpc = next;
4126 }
4127
4128 unwind.saved_prologue_counts = NULL;
4129 }
4130
4131 static void
4132 dot_label_state (int dummy ATTRIBUTE_UNUSED)
4133 {
4134 expressionS e;
4135
4136 if (!in_body ("label_state"))
4137 return;
4138
4139 parse_operand_and_eval (&e, 0);
4140 if (e.X_op == O_constant)
4141 save_prologue_count (e.X_add_number, unwind.prologue_count);
4142 else
4143 {
4144 as_bad (_("Operand to .label_state must be a constant"));
4145 e.X_add_number = 0;
4146 }
4147 add_unwind_entry (output_label_state (e.X_add_number), 0);
4148 }
4149
4150 static void
4151 dot_copy_state (int dummy ATTRIBUTE_UNUSED)
4152 {
4153 expressionS e;
4154
4155 if (!in_body ("copy_state"))
4156 return;
4157
4158 parse_operand_and_eval (&e, 0);
4159 if (e.X_op == O_constant)
4160 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4161 else
4162 {
4163 as_bad (_("Operand to .copy_state must be a constant"));
4164 e.X_add_number = 0;
4165 }
4166 add_unwind_entry (output_copy_state (e.X_add_number), 0);
4167 }
4168
4169 static void
4170 dot_unwabi (int dummy ATTRIBUTE_UNUSED)
4171 {
4172 expressionS e1, e2;
4173 unsigned char sep;
4174
4175 if (!in_prologue ("unwabi"))
4176 return;
4177
4178 sep = parse_operand_and_eval (&e1, ',');
4179 if (sep == ',')
4180 parse_operand_and_eval (&e2, 0);
4181 else
4182 e2.X_op = O_absent;
4183
4184 if (e1.X_op != O_constant)
4185 {
4186 as_bad (_("First operand to .unwabi must be a constant"));
4187 e1.X_add_number = 0;
4188 }
4189
4190 if (e2.X_op != O_constant)
4191 {
4192 as_bad (_("Second operand to .unwabi must be a constant"));
4193 e2.X_add_number = 0;
4194 }
4195
4196 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0);
4197 }
4198
4199 static void
4200 dot_personality (int dummy ATTRIBUTE_UNUSED)
4201 {
4202 char *name, *p, c;
4203
4204 if (!in_procedure ("personality"))
4205 return;
4206 SKIP_WHITESPACE ();
4207 c = get_symbol_name (&name);
4208 p = input_line_pointer;
4209 unwind.personality_routine = symbol_find_or_make (name);
4210 unwind.force_unwind_entry = 1;
4211 *p = c;
4212 SKIP_WHITESPACE_AFTER_NAME ();
4213 demand_empty_rest_of_line ();
4214 }
4215
4216 static void
4217 dot_proc (int dummy ATTRIBUTE_UNUSED)
4218 {
4219 char *name, *p, c;
4220 symbolS *sym;
4221 proc_pending *pending, *last_pending;
4222
4223 if (unwind.proc_pending.sym)
4224 {
4225 (md.unwind_check == unwind_check_warning
4226 ? as_warn
4227 : as_bad) (_("Missing .endp after previous .proc"));
4228 while (unwind.proc_pending.next)
4229 {
4230 pending = unwind.proc_pending.next;
4231 unwind.proc_pending.next = pending->next;
4232 free (pending);
4233 }
4234 }
4235 last_pending = NULL;
4236
4237 /* Parse names of main and alternate entry points and mark them as
4238 function symbols: */
4239 while (1)
4240 {
4241 SKIP_WHITESPACE ();
4242 c = get_symbol_name (&name);
4243 p = input_line_pointer;
4244 if (!*name)
4245 as_bad (_("Empty argument of .proc"));
4246 else
4247 {
4248 sym = symbol_find_or_make (name);
4249 if (S_IS_DEFINED (sym))
4250 as_bad (_("`%s' was already defined"), name);
4251 else if (!last_pending)
4252 {
4253 unwind.proc_pending.sym = sym;
4254 last_pending = &unwind.proc_pending;
4255 }
4256 else
4257 {
4258 pending = XNEW (proc_pending);
4259 pending->sym = sym;
4260 last_pending = last_pending->next = pending;
4261 }
4262 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4263 }
4264 *p = c;
4265 SKIP_WHITESPACE_AFTER_NAME ();
4266 if (*input_line_pointer != ',')
4267 break;
4268 ++input_line_pointer;
4269 }
4270 if (!last_pending)
4271 {
4272 unwind.proc_pending.sym = expr_build_dot ();
4273 last_pending = &unwind.proc_pending;
4274 }
4275 last_pending->next = NULL;
4276 demand_empty_rest_of_line ();
4277 do_align (4, NULL, 0, 0);
4278
4279 unwind.prologue = 0;
4280 unwind.prologue_count = 0;
4281 unwind.body = 0;
4282 unwind.insn = 0;
4283 unwind.list = unwind.tail = unwind.current_entry = NULL;
4284 unwind.personality_routine = 0;
4285 }
4286
4287 static void
4288 dot_body (int dummy ATTRIBUTE_UNUSED)
4289 {
4290 if (!in_procedure ("body"))
4291 return;
4292 if (!unwind.prologue && !unwind.body && unwind.insn)
4293 as_warn (_("Initial .body should precede any instructions"));
4294 check_pending_save ();
4295
4296 unwind.prologue = 0;
4297 unwind.prologue_mask = 0;
4298 unwind.body = 1;
4299
4300 add_unwind_entry (output_body (), 0);
4301 }
4302
4303 static void
4304 dot_prologue (int dummy ATTRIBUTE_UNUSED)
4305 {
4306 unsigned mask = 0, grsave = 0;
4307
4308 if (!in_procedure ("prologue"))
4309 return;
4310 if (unwind.prologue)
4311 {
4312 as_bad (_(".prologue within prologue"));
4313 ignore_rest_of_line ();
4314 return;
4315 }
4316 if (!unwind.body && unwind.insn)
4317 as_warn (_("Initial .prologue should precede any instructions"));
4318
4319 if (!is_it_end_of_statement ())
4320 {
4321 expressionS e;
4322 int n, sep = parse_operand_and_eval (&e, ',');
4323
4324 if (e.X_op != O_constant
4325 || e.X_add_number < 0
4326 || e.X_add_number > 0xf)
4327 as_bad (_("First operand to .prologue must be a positive 4-bit constant"));
4328 else if (e.X_add_number == 0)
4329 as_warn (_("Pointless use of zero first operand to .prologue"));
4330 else
4331 mask = e.X_add_number;
4332
4333 n = popcount (mask);
4334
4335 if (sep == ',')
4336 parse_operand_and_eval (&e, 0);
4337 else
4338 e.X_op = O_absent;
4339
4340 if (e.X_op == O_constant
4341 && e.X_add_number >= 0
4342 && e.X_add_number < 128)
4343 {
4344 if (md.unwind_check == unwind_check_error)
4345 as_warn (_("Using a constant as second operand to .prologue is deprecated"));
4346 grsave = e.X_add_number;
4347 }
4348 else if (e.X_op != O_register
4349 || (grsave = e.X_add_number - REG_GR) > 127)
4350 {
4351 as_bad (_("Second operand to .prologue must be a general register"));
4352 grsave = 0;
4353 }
4354 else if (grsave > 128U - n)
4355 {
4356 as_bad (_("Second operand to .prologue must be the first of %d general registers"), n);
4357 grsave = 0;
4358 }
4359 }
4360
4361 if (mask)
4362 add_unwind_entry (output_prologue_gr (mask, grsave), 0);
4363 else
4364 add_unwind_entry (output_prologue (), 0);
4365
4366 unwind.prologue = 1;
4367 unwind.prologue_mask = mask;
4368 unwind.prologue_gr = grsave;
4369 unwind.body = 0;
4370 ++unwind.prologue_count;
4371 }
4372
4373 static void
4374 dot_endp (int dummy ATTRIBUTE_UNUSED)
4375 {
4376 expressionS e;
4377 int bytes_per_address;
4378 long where;
4379 segT saved_seg;
4380 subsegT saved_subseg;
4381 proc_pending *pending;
4382 int unwind_check = md.unwind_check;
4383
4384 md.unwind_check = unwind_check_error;
4385 if (!in_procedure ("endp"))
4386 return;
4387 md.unwind_check = unwind_check;
4388
4389 if (unwind.saved_text_seg)
4390 {
4391 saved_seg = unwind.saved_text_seg;
4392 saved_subseg = unwind.saved_text_subseg;
4393 unwind.saved_text_seg = NULL;
4394 }
4395 else
4396 {
4397 saved_seg = now_seg;
4398 saved_subseg = now_subseg;
4399 }
4400
4401 insn_group_break (1, 0, 0);
4402
4403 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4404 if (!unwind.info)
4405 generate_unwind_image (saved_seg);
4406
4407 if (unwind.info || unwind.force_unwind_entry)
4408 {
4409 symbolS *proc_end;
4410
4411 subseg_set (md.last_text_seg, md.last_text_subseg);
4412 proc_end = expr_build_dot ();
4413
4414 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND);
4415
4416 /* Make sure that section has 4 byte alignment for ILP32 and
4417 8 byte alignment for LP64. */
4418 record_alignment (now_seg, md.pointer_size_shift);
4419
4420 /* Need space for 3 pointers for procedure start, procedure end,
4421 and unwind info. */
4422 memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size);
4423 where = frag_now_fix () - (3 * md.pointer_size);
4424 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4425
4426 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4427 e.X_op = O_pseudo_fixup;
4428 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4429 e.X_add_number = 0;
4430 if (!S_IS_LOCAL (unwind.proc_pending.sym)
4431 && S_IS_DEFINED (unwind.proc_pending.sym))
4432 e.X_add_symbol
4433 = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym),
4434 symbol_get_frag (unwind.proc_pending.sym),
4435 S_GET_VALUE (unwind.proc_pending.sym));
4436 else
4437 e.X_add_symbol = unwind.proc_pending.sym;
4438 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e,
4439 BFD_RELOC_NONE);
4440
4441 e.X_op = O_pseudo_fixup;
4442 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4443 e.X_add_number = 0;
4444 e.X_add_symbol = proc_end;
4445 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4446 bytes_per_address, &e, BFD_RELOC_NONE);
4447
4448 if (unwind.info)
4449 {
4450 e.X_op = O_pseudo_fixup;
4451 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4452 e.X_add_number = 0;
4453 e.X_add_symbol = unwind.info;
4454 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4455 bytes_per_address, &e, BFD_RELOC_NONE);
4456 }
4457 }
4458 subseg_set (saved_seg, saved_subseg);
4459
4460 /* Set symbol sizes. */
4461 pending = &unwind.proc_pending;
4462 if (S_GET_NAME (pending->sym))
4463 {
4464 do
4465 {
4466 symbolS *sym = pending->sym;
4467
4468 if (!S_IS_DEFINED (sym))
4469 as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym));
4470 else if (S_GET_SIZE (sym) == 0
4471 && symbol_get_obj (sym)->size == NULL)
4472 {
4473 fragS *frag = symbol_get_frag (sym);
4474
4475 if (frag)
4476 {
4477 if (frag == frag_now && SEG_NORMAL (now_seg))
4478 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4479 else
4480 {
4481 symbol_get_obj (sym)->size = XNEW (expressionS);
4482 symbol_get_obj (sym)->size->X_op = O_subtract;
4483 symbol_get_obj (sym)->size->X_add_symbol
4484 = symbol_new (FAKE_LABEL_NAME, now_seg,
4485 frag_now, frag_now_fix ());
4486 symbol_get_obj (sym)->size->X_op_symbol = sym;
4487 symbol_get_obj (sym)->size->X_add_number = 0;
4488 }
4489 }
4490 }
4491 } while ((pending = pending->next) != NULL);
4492 }
4493
4494 /* Parse names of main and alternate entry points. */
4495 while (1)
4496 {
4497 char *name, *p, c;
4498
4499 SKIP_WHITESPACE ();
4500 c = get_symbol_name (&name);
4501 p = input_line_pointer;
4502 if (!*name)
4503 (md.unwind_check == unwind_check_warning
4504 ? as_warn
4505 : as_bad) (_("Empty argument of .endp"));
4506 else
4507 {
4508 symbolS *sym = symbol_find (name);
4509
4510 for (pending = &unwind.proc_pending; pending; pending = pending->next)
4511 {
4512 if (sym == pending->sym)
4513 {
4514 pending->sym = NULL;
4515 break;
4516 }
4517 }
4518 if (!sym || !pending)
4519 as_warn (_("`%s' was not specified with previous .proc"), name);
4520 }
4521 *p = c;
4522 SKIP_WHITESPACE_AFTER_NAME ();
4523 if (*input_line_pointer != ',')
4524 break;
4525 ++input_line_pointer;
4526 }
4527 demand_empty_rest_of_line ();
4528
4529 /* Deliberately only checking for the main entry point here; the
4530 language spec even says all arguments to .endp are ignored. */
4531 if (unwind.proc_pending.sym
4532 && S_GET_NAME (unwind.proc_pending.sym)
4533 && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME))
4534 as_warn (_("`%s' should be an operand to this .endp"),
4535 S_GET_NAME (unwind.proc_pending.sym));
4536 while (unwind.proc_pending.next)
4537 {
4538 pending = unwind.proc_pending.next;
4539 unwind.proc_pending.next = pending->next;
4540 free (pending);
4541 }
4542 unwind.proc_pending.sym = unwind.info = NULL;
4543 }
4544
4545 static void
4546 dot_template (int template_val)
4547 {
4548 CURR_SLOT.user_template = template_val;
4549 }
4550
4551 static void
4552 dot_regstk (int dummy ATTRIBUTE_UNUSED)
4553 {
4554 int ins, locs, outs, rots;
4555
4556 if (is_it_end_of_statement ())
4557 ins = locs = outs = rots = 0;
4558 else
4559 {
4560 ins = get_absolute_expression ();
4561 if (*input_line_pointer++ != ',')
4562 goto err;
4563 locs = get_absolute_expression ();
4564 if (*input_line_pointer++ != ',')
4565 goto err;
4566 outs = get_absolute_expression ();
4567 if (*input_line_pointer++ != ',')
4568 goto err;
4569 rots = get_absolute_expression ();
4570 }
4571 set_regstack (ins, locs, outs, rots);
4572 return;
4573
4574 err:
4575 as_bad (_("Comma expected"));
4576 ignore_rest_of_line ();
4577 }
4578
4579 static void
4580 dot_rot (int type)
4581 {
4582 offsetT num_regs;
4583 valueT num_alloced = 0;
4584 struct dynreg **drpp, *dr;
4585 int ch, base_reg = 0;
4586 char *name, *start;
4587 size_t len;
4588
4589 switch (type)
4590 {
4591 case DYNREG_GR: base_reg = REG_GR + 32; break;
4592 case DYNREG_FR: base_reg = REG_FR + 32; break;
4593 case DYNREG_PR: base_reg = REG_P + 16; break;
4594 default: break;
4595 }
4596
4597 /* First, remove existing names from hash table. */
4598 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4599 {
4600 str_hash_delete (md.dynreg_hash, dr->name);
4601 /* FIXME: Free dr->name. */
4602 dr->num_regs = 0;
4603 }
4604
4605 drpp = &md.dynreg[type];
4606 while (1)
4607 {
4608 ch = get_symbol_name (&start);
4609 len = strlen (ia64_canonicalize_symbol_name (start));
4610 *input_line_pointer = ch;
4611
4612 SKIP_WHITESPACE_AFTER_NAME ();
4613 if (*input_line_pointer != '[')
4614 {
4615 as_bad (_("Expected '['"));
4616 goto err;
4617 }
4618 ++input_line_pointer; /* skip '[' */
4619
4620 num_regs = get_absolute_expression ();
4621
4622 if (*input_line_pointer++ != ']')
4623 {
4624 as_bad (_("Expected ']'"));
4625 goto err;
4626 }
4627 if (num_regs <= 0)
4628 {
4629 as_bad (_("Number of elements must be positive"));
4630 goto err;
4631 }
4632 SKIP_WHITESPACE ();
4633
4634 num_alloced += num_regs;
4635 switch (type)
4636 {
4637 case DYNREG_GR:
4638 if (num_alloced > md.rot.num_regs)
4639 {
4640 as_bad (_("Used more than the declared %d rotating registers"),
4641 md.rot.num_regs);
4642 goto err;
4643 }
4644 break;
4645 case DYNREG_FR:
4646 if (num_alloced > 96)
4647 {
4648 as_bad (_("Used more than the available 96 rotating registers"));
4649 goto err;
4650 }
4651 break;
4652 case DYNREG_PR:
4653 if (num_alloced > 48)
4654 {
4655 as_bad (_("Used more than the available 48 rotating registers"));
4656 goto err;
4657 }
4658 break;
4659
4660 default:
4661 break;
4662 }
4663
4664 if (!*drpp)
4665 *drpp = notes_calloc (1, sizeof (**drpp));
4666
4667 name = notes_memdup (start, len, len + 1);
4668
4669 dr = *drpp;
4670 dr->name = name;
4671 dr->num_regs = num_regs;
4672 dr->base = base_reg;
4673 drpp = &dr->next;
4674 base_reg += num_regs;
4675
4676 if (str_hash_insert (md.dynreg_hash, name, dr, 0) != NULL)
4677 {
4678 as_bad (_("Attempt to redefine register set `%s'"), name);
4679 goto err;
4680 }
4681
4682 if (*input_line_pointer != ',')
4683 break;
4684 ++input_line_pointer; /* skip comma */
4685 SKIP_WHITESPACE ();
4686 }
4687 demand_empty_rest_of_line ();
4688 return;
4689
4690 err:
4691 ignore_rest_of_line ();
4692 }
4693
4694 static void
4695 dot_byteorder (int byteorder)
4696 {
4697 segment_info_type *seginfo = seg_info (now_seg);
4698
4699 if (byteorder == -1)
4700 {
4701 if (seginfo->tc_segment_info_data.endian == 0)
4702 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4703 byteorder = seginfo->tc_segment_info_data.endian == 1;
4704 }
4705 else
4706 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4707
4708 if (target_big_endian != byteorder)
4709 {
4710 target_big_endian = byteorder;
4711 if (target_big_endian)
4712 {
4713 ia64_number_to_chars = number_to_chars_bigendian;
4714 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4715 }
4716 else
4717 {
4718 ia64_number_to_chars = number_to_chars_littleendian;
4719 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4720 }
4721 }
4722 }
4723
4724 static void
4725 dot_psr (int dummy ATTRIBUTE_UNUSED)
4726 {
4727 char *option;
4728 int ch;
4729
4730 while (1)
4731 {
4732 ch = get_symbol_name (&option);
4733 if (strcmp (option, "lsb") == 0)
4734 md.flags &= ~EF_IA_64_BE;
4735 else if (strcmp (option, "msb") == 0)
4736 md.flags |= EF_IA_64_BE;
4737 else if (strcmp (option, "abi32") == 0)
4738 md.flags &= ~EF_IA_64_ABI64;
4739 else if (strcmp (option, "abi64") == 0)
4740 md.flags |= EF_IA_64_ABI64;
4741 else
4742 as_bad (_("Unknown psr option `%s'"), option);
4743 *input_line_pointer = ch;
4744
4745 SKIP_WHITESPACE_AFTER_NAME ();
4746 if (*input_line_pointer != ',')
4747 break;
4748
4749 ++input_line_pointer;
4750 SKIP_WHITESPACE ();
4751 }
4752 demand_empty_rest_of_line ();
4753 }
4754
4755 static void
4756 dot_ln (int dummy ATTRIBUTE_UNUSED)
4757 {
4758 new_logical_line (0, get_absolute_expression ());
4759 demand_empty_rest_of_line ();
4760 }
4761
4762 static void
4763 cross_section (int ref, void (*builder) (int), int ua)
4764 {
4765 char *start, *end;
4766 int saved_auto_align;
4767 unsigned int section_count;
4768 const char *name;
4769
4770 start = input_line_pointer;
4771 name = obj_elf_section_name ();
4772 if (name == NULL)
4773 return;
4774 end = input_line_pointer;
4775 if (*input_line_pointer != ',')
4776 {
4777 as_bad (_("Comma expected after section name"));
4778 ignore_rest_of_line ();
4779 return;
4780 }
4781 *end = '\0';
4782 end = input_line_pointer + 1; /* skip comma */
4783 input_line_pointer = start;
4784 md.keep_pending_output = 1;
4785 section_count = bfd_count_sections (stdoutput);
4786 obj_elf_section (0);
4787 if (section_count != bfd_count_sections (stdoutput))
4788 as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated."));
4789 input_line_pointer = end;
4790 saved_auto_align = md.auto_align;
4791 if (ua)
4792 md.auto_align = 0;
4793 (*builder) (ref);
4794 if (ua)
4795 md.auto_align = saved_auto_align;
4796 obj_elf_previous (0);
4797 md.keep_pending_output = 0;
4798 }
4799
4800 static void
4801 dot_xdata (int size)
4802 {
4803 cross_section (size, cons, 0);
4804 }
4805
4806 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4807
4808 static void
4809 stmt_float_cons (int kind)
4810 {
4811 size_t alignment;
4812
4813 switch (kind)
4814 {
4815 case 'd':
4816 alignment = 3;
4817 break;
4818
4819 case 'x':
4820 case 'X':
4821 alignment = 4;
4822 break;
4823
4824 case 'f':
4825 default:
4826 alignment = 2;
4827 break;
4828 }
4829 do_align (alignment, NULL, 0, 0);
4830 float_cons (kind);
4831 }
4832
4833 static void
4834 stmt_cons_ua (int size)
4835 {
4836 int saved_auto_align = md.auto_align;
4837
4838 md.auto_align = 0;
4839 cons (size);
4840 md.auto_align = saved_auto_align;
4841 }
4842
4843 static void
4844 dot_xfloat_cons (int kind)
4845 {
4846 cross_section (kind, stmt_float_cons, 0);
4847 }
4848
4849 static void
4850 dot_xstringer (int zero)
4851 {
4852 cross_section (zero, stringer, 0);
4853 }
4854
4855 static void
4856 dot_xdata_ua (int size)
4857 {
4858 cross_section (size, cons, 1);
4859 }
4860
4861 static void
4862 dot_xfloat_cons_ua (int kind)
4863 {
4864 cross_section (kind, float_cons, 1);
4865 }
4866
4867 /* .reg.val <regname>,value */
4868
4869 static void
4870 dot_reg_val (int dummy ATTRIBUTE_UNUSED)
4871 {
4872 expressionS reg;
4873
4874 expression_and_evaluate (&reg);
4875 if (reg.X_op != O_register)
4876 {
4877 as_bad (_("Register name expected"));
4878 ignore_rest_of_line ();
4879 }
4880 else if (*input_line_pointer++ != ',')
4881 {
4882 as_bad (_("Comma expected"));
4883 ignore_rest_of_line ();
4884 }
4885 else
4886 {
4887 valueT value = get_absolute_expression ();
4888 int regno = reg.X_add_number;
4889 if (regno <= REG_GR || regno > REG_GR + 127)
4890 as_warn (_("Register value annotation ignored"));
4891 else
4892 {
4893 gr_values[regno - REG_GR].known = 1;
4894 gr_values[regno - REG_GR].value = value;
4895 gr_values[regno - REG_GR].path = md.path;
4896 }
4897 }
4898 demand_empty_rest_of_line ();
4899 }
4900
4901 /*
4902 .serialize.data
4903 .serialize.instruction
4904 */
4905 static void
4906 dot_serialize (int type)
4907 {
4908 insn_group_break (0, 0, 0);
4909 if (type)
4910 instruction_serialization ();
4911 else
4912 data_serialization ();
4913 insn_group_break (0, 0, 0);
4914 demand_empty_rest_of_line ();
4915 }
4916
4917 /* select dv checking mode
4918 .auto
4919 .explicit
4920 .default
4921
4922 A stop is inserted when changing modes
4923 */
4924
4925 static void
4926 dot_dv_mode (int type)
4927 {
4928 if (md.manual_bundling)
4929 as_warn (_("Directive invalid within a bundle"));
4930
4931 if (type == 'E' || type == 'A')
4932 md.mode_explicitly_set = 0;
4933 else
4934 md.mode_explicitly_set = 1;
4935
4936 md.detect_dv = 1;
4937 switch (type)
4938 {
4939 case 'A':
4940 case 'a':
4941 if (md.explicit_mode)
4942 insn_group_break (1, 0, 0);
4943 md.explicit_mode = 0;
4944 break;
4945 case 'E':
4946 case 'e':
4947 if (!md.explicit_mode)
4948 insn_group_break (1, 0, 0);
4949 md.explicit_mode = 1;
4950 break;
4951 default:
4952 case 'd':
4953 if (md.explicit_mode != md.default_explicit_mode)
4954 insn_group_break (1, 0, 0);
4955 md.explicit_mode = md.default_explicit_mode;
4956 md.mode_explicitly_set = 0;
4957 break;
4958 }
4959 }
4960
4961 static void
4962 print_prmask (valueT mask)
4963 {
4964 int regno;
4965 const char *comma = "";
4966 for (regno = 0; regno < 64; regno++)
4967 {
4968 if (mask & ((valueT) 1 << regno))
4969 {
4970 fprintf (stderr, "%s p%d", comma, regno);
4971 comma = ",";
4972 }
4973 }
4974 }
4975
4976 /*
4977 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear)
4978 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply)
4979 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex)
4980 .pred.safe_across_calls p1 [, p2 [,...]]
4981 */
4982
4983 static void
4984 dot_pred_rel (int type)
4985 {
4986 valueT mask = 0;
4987 int count = 0;
4988 int p1 = -1, p2 = -1;
4989
4990 if (type == 0)
4991 {
4992 if (*input_line_pointer == '"')
4993 {
4994 int len;
4995 char *form = demand_copy_C_string (&len);
4996
4997 if (strcmp (form, "mutex") == 0)
4998 type = 'm';
4999 else if (strcmp (form, "clear") == 0)
5000 type = 'c';
5001 else if (strcmp (form, "imply") == 0)
5002 type = 'i';
5003 notes_free (form);
5004 }
5005 else if (*input_line_pointer == '@')
5006 {
5007 char *form;
5008 char c;
5009
5010 ++input_line_pointer;
5011 c = get_symbol_name (&form);
5012
5013 if (strcmp (form, "mutex") == 0)
5014 type = 'm';
5015 else if (strcmp (form, "clear") == 0)
5016 type = 'c';
5017 else if (strcmp (form, "imply") == 0)
5018 type = 'i';
5019 (void) restore_line_pointer (c);
5020 }
5021 else
5022 {
5023 as_bad (_("Missing predicate relation type"));
5024 ignore_rest_of_line ();
5025 return;
5026 }
5027 if (type == 0)
5028 {
5029 as_bad (_("Unrecognized predicate relation type"));
5030 ignore_rest_of_line ();
5031 return;
5032 }
5033 if (*input_line_pointer == ',')
5034 ++input_line_pointer;
5035 SKIP_WHITESPACE ();
5036 }
5037
5038 while (1)
5039 {
5040 valueT bits = 1;
5041 int sep, regno;
5042 expressionS pr, *pr1, *pr2;
5043
5044 sep = parse_operand_and_eval (&pr, ',');
5045 if (pr.X_op == O_register
5046 && pr.X_add_number >= REG_P
5047 && pr.X_add_number <= REG_P + 63)
5048 {
5049 regno = pr.X_add_number - REG_P;
5050 bits <<= regno;
5051 count++;
5052 if (p1 == -1)
5053 p1 = regno;
5054 else if (p2 == -1)
5055 p2 = regno;
5056 }
5057 else if (type != 'i'
5058 && pr.X_op == O_subtract
5059 && (pr1 = symbol_get_value_expression (pr.X_add_symbol))
5060 && pr1->X_op == O_register
5061 && pr1->X_add_number >= REG_P
5062 && pr1->X_add_number <= REG_P + 63
5063 && (pr2 = symbol_get_value_expression (pr.X_op_symbol))
5064 && pr2->X_op == O_register
5065 && pr2->X_add_number >= REG_P
5066 && pr2->X_add_number <= REG_P + 63)
5067 {
5068 /* It's a range. */
5069 int stop;
5070
5071 regno = pr1->X_add_number - REG_P;
5072 stop = pr2->X_add_number - REG_P;
5073 if (regno >= stop)
5074 {
5075 as_bad (_("Bad register range"));
5076 ignore_rest_of_line ();
5077 return;
5078 }
5079 bits = ((bits << stop) << 1) - (bits << regno);
5080 count += stop - regno + 1;
5081 }
5082 else
5083 {
5084 as_bad (_("Predicate register expected"));
5085 ignore_rest_of_line ();
5086 return;
5087 }
5088 if (mask & bits)
5089 as_warn (_("Duplicate predicate register ignored"));
5090 mask |= bits;
5091 if (sep != ',')
5092 break;
5093 }
5094
5095 switch (type)
5096 {
5097 case 'c':
5098 if (count == 0)
5099 mask = ~(valueT) 0;
5100 clear_qp_mutex (mask);
5101 clear_qp_implies (mask, (valueT) 0);
5102 break;
5103 case 'i':
5104 if (count != 2 || p1 == -1 || p2 == -1)
5105 as_bad (_("Predicate source and target required"));
5106 else if (p1 == 0 || p2 == 0)
5107 as_bad (_("Use of p0 is not valid in this context"));
5108 else
5109 add_qp_imply (p1, p2);
5110 break;
5111 case 'm':
5112 if (count < 2)
5113 {
5114 as_bad (_("At least two PR arguments expected"));
5115 break;
5116 }
5117 else if (mask & 1)
5118 {
5119 as_bad (_("Use of p0 is not valid in this context"));
5120 break;
5121 }
5122 add_qp_mutex (mask);
5123 break;
5124 case 's':
5125 /* note that we don't override any existing relations */
5126 if (count == 0)
5127 {
5128 as_bad (_("At least one PR argument expected"));
5129 break;
5130 }
5131 if (md.debug_dv)
5132 {
5133 fprintf (stderr, "Safe across calls: ");
5134 print_prmask (mask);
5135 fprintf (stderr, "\n");
5136 }
5137 qp_safe_across_calls = mask;
5138 break;
5139 }
5140 demand_empty_rest_of_line ();
5141 }
5142
5143 /* .entry label [, label [, ...]]
5144 Hint to DV code that the given labels are to be considered entry points.
5145 Otherwise, only global labels are considered entry points. */
5146
5147 static void
5148 dot_entry (int dummy ATTRIBUTE_UNUSED)
5149 {
5150 char *name;
5151 int c;
5152 symbolS *symbolP;
5153
5154 do
5155 {
5156 c = get_symbol_name (&name);
5157 symbolP = symbol_find_or_make (name);
5158
5159 if (str_hash_insert (md.entry_hash, S_GET_NAME (symbolP), symbolP, 0))
5160 as_bad (_("duplicate entry hint %s"), name);
5161
5162 *input_line_pointer = c;
5163 SKIP_WHITESPACE_AFTER_NAME ();
5164 c = *input_line_pointer;
5165 if (c == ',')
5166 {
5167 input_line_pointer++;
5168 SKIP_WHITESPACE ();
5169 if (*input_line_pointer == '\n')
5170 c = '\n';
5171 }
5172 }
5173 while (c == ',');
5174
5175 demand_empty_rest_of_line ();
5176 }
5177
5178 /* .mem.offset offset, base
5179 "base" is used to distinguish between offsets from a different base. */
5180
5181 static void
5182 dot_mem_offset (int dummy ATTRIBUTE_UNUSED)
5183 {
5184 md.mem_offset.hint = 1;
5185 md.mem_offset.offset = get_absolute_expression ();
5186 if (*input_line_pointer != ',')
5187 {
5188 as_bad (_("Comma expected"));
5189 ignore_rest_of_line ();
5190 return;
5191 }
5192 ++input_line_pointer;
5193 md.mem_offset.base = get_absolute_expression ();
5194 demand_empty_rest_of_line ();
5195 }
5196
5197 /* ia64-specific pseudo-ops: */
5198 const pseudo_typeS md_pseudo_table[] =
5199 {
5200 { "radix", dot_radix, 0 },
5201 { "lcomm", s_lcomm_bytes, 1 },
5202 { "loc", dot_loc, 0 },
5203 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
5204 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
5205 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5206 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5207 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5208 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5209 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5210 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5211 { "proc", dot_proc, 0 },
5212 { "body", dot_body, 0 },
5213 { "prologue", dot_prologue, 0 },
5214 { "endp", dot_endp, 0 },
5215
5216 { "fframe", dot_fframe, 0 },
5217 { "vframe", dot_vframe, 0 },
5218 { "vframesp", dot_vframesp, 0 },
5219 { "vframepsp", dot_vframesp, 1 },
5220 { "save", dot_save, 0 },
5221 { "restore", dot_restore, 0 },
5222 { "restorereg", dot_restorereg, 0 },
5223 { "restorereg.p", dot_restorereg, 1 },
5224 { "handlerdata", dot_handlerdata, 0 },
5225 { "unwentry", dot_unwentry, 0 },
5226 { "altrp", dot_altrp, 0 },
5227 { "savesp", dot_savemem, 0 },
5228 { "savepsp", dot_savemem, 1 },
5229 { "save.g", dot_saveg, 0 },
5230 { "save.f", dot_savef, 0 },
5231 { "save.b", dot_saveb, 0 },
5232 { "save.gf", dot_savegf, 0 },
5233 { "spill", dot_spill, 0 },
5234 { "spillreg", dot_spillreg, 0 },
5235 { "spillsp", dot_spillmem, 0 },
5236 { "spillpsp", dot_spillmem, 1 },
5237 { "spillreg.p", dot_spillreg, 1 },
5238 { "spillsp.p", dot_spillmem, ~0 },
5239 { "spillpsp.p", dot_spillmem, ~1 },
5240 { "label_state", dot_label_state, 0 },
5241 { "copy_state", dot_copy_state, 0 },
5242 { "unwabi", dot_unwabi, 0 },
5243 { "personality", dot_personality, 0 },
5244 { "mii", dot_template, 0x0 },
5245 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5246 { "mlx", dot_template, 0x2 },
5247 { "mmi", dot_template, 0x4 },
5248 { "mfi", dot_template, 0x6 },
5249 { "mmf", dot_template, 0x7 },
5250 { "mib", dot_template, 0x8 },
5251 { "mbb", dot_template, 0x9 },
5252 { "bbb", dot_template, 0xb },
5253 { "mmb", dot_template, 0xc },
5254 { "mfb", dot_template, 0xe },
5255 { "align", dot_align, 0 },
5256 { "regstk", dot_regstk, 0 },
5257 { "rotr", dot_rot, DYNREG_GR },
5258 { "rotf", dot_rot, DYNREG_FR },
5259 { "rotp", dot_rot, DYNREG_PR },
5260 { "lsb", dot_byteorder, 0 },
5261 { "msb", dot_byteorder, 1 },
5262 { "psr", dot_psr, 0 },
5263 { "alias", dot_alias, 0 },
5264 { "secalias", dot_alias, 1 },
5265 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5266
5267 { "xdata1", dot_xdata, 1 },
5268 { "xdata2", dot_xdata, 2 },
5269 { "xdata4", dot_xdata, 4 },
5270 { "xdata8", dot_xdata, 8 },
5271 { "xdata16", dot_xdata, 16 },
5272 { "xreal4", dot_xfloat_cons, 'f' },
5273 { "xreal8", dot_xfloat_cons, 'd' },
5274 { "xreal10", dot_xfloat_cons, 'x' },
5275 { "xreal16", dot_xfloat_cons, 'X' },
5276 { "xstring", dot_xstringer, 8 + 0 },
5277 { "xstringz", dot_xstringer, 8 + 1 },
5278
5279 /* unaligned versions: */
5280 { "xdata2.ua", dot_xdata_ua, 2 },
5281 { "xdata4.ua", dot_xdata_ua, 4 },
5282 { "xdata8.ua", dot_xdata_ua, 8 },
5283 { "xdata16.ua", dot_xdata_ua, 16 },
5284 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5285 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5286 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5287 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5288
5289 /* annotations/DV checking support */
5290 { "entry", dot_entry, 0 },
5291 { "mem.offset", dot_mem_offset, 0 },
5292 { "pred.rel", dot_pred_rel, 0 },
5293 { "pred.rel.clear", dot_pred_rel, 'c' },
5294 { "pred.rel.imply", dot_pred_rel, 'i' },
5295 { "pred.rel.mutex", dot_pred_rel, 'm' },
5296 { "pred.safe_across_calls", dot_pred_rel, 's' },
5297 { "reg.val", dot_reg_val, 0 },
5298 { "serialize.data", dot_serialize, 0 },
5299 { "serialize.instruction", dot_serialize, 1 },
5300 { "auto", dot_dv_mode, 'a' },
5301 { "explicit", dot_dv_mode, 'e' },
5302 { "default", dot_dv_mode, 'd' },
5303
5304 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5305 IA-64 aligns data allocation pseudo-ops by default, so we have to
5306 tell it that these ones are supposed to be unaligned. Long term,
5307 should rewrite so that only IA-64 specific data allocation pseudo-ops
5308 are aligned by default. */
5309 {"2byte", stmt_cons_ua, 2},
5310 {"4byte", stmt_cons_ua, 4},
5311 {"8byte", stmt_cons_ua, 8},
5312
5313 #ifdef TE_VMS
5314 {"vms_common", obj_elf_vms_common, 0},
5315 #endif
5316
5317 { NULL, 0, 0 }
5318 };
5319
5320 static const struct pseudo_opcode
5321 {
5322 const char *name;
5323 void (*handler) (int);
5324 int arg;
5325 }
5326 pseudo_opcode[] =
5327 {
5328 /* these are more like pseudo-ops, but don't start with a dot */
5329 { "data1", cons, 1 },
5330 { "data2", cons, 2 },
5331 { "data4", cons, 4 },
5332 { "data8", cons, 8 },
5333 { "data16", cons, 16 },
5334 { "real4", stmt_float_cons, 'f' },
5335 { "real8", stmt_float_cons, 'd' },
5336 { "real10", stmt_float_cons, 'x' },
5337 { "real16", stmt_float_cons, 'X' },
5338 { "string", stringer, 8 + 0 },
5339 { "stringz", stringer, 8 + 1 },
5340
5341 /* unaligned versions: */
5342 { "data2.ua", stmt_cons_ua, 2 },
5343 { "data4.ua", stmt_cons_ua, 4 },
5344 { "data8.ua", stmt_cons_ua, 8 },
5345 { "data16.ua", stmt_cons_ua, 16 },
5346 { "real4.ua", float_cons, 'f' },
5347 { "real8.ua", float_cons, 'd' },
5348 { "real10.ua", float_cons, 'x' },
5349 { "real16.ua", float_cons, 'X' },
5350 };
5351
5352 /* Declare a register by creating a symbol for it and entering it in
5353 the symbol table. */
5354
5355 static symbolS *
5356 declare_register (const char *name, unsigned int regnum)
5357 {
5358 symbolS *sym;
5359
5360 sym = symbol_create (name, reg_section, &zero_address_frag, regnum);
5361
5362 if (str_hash_insert (md.reg_hash, S_GET_NAME (sym), sym, 0) != NULL)
5363 as_fatal (_("duplicate %s"), name);
5364
5365 return sym;
5366 }
5367
5368 static void
5369 declare_register_set (const char *prefix,
5370 unsigned int num_regs,
5371 unsigned int base_regnum)
5372 {
5373 char name[8];
5374 unsigned int i;
5375
5376 for (i = 0; i < num_regs; ++i)
5377 {
5378 snprintf (name, sizeof (name), "%s%u", prefix, i);
5379 declare_register (name, base_regnum + i);
5380 }
5381 }
5382
5383 static unsigned int
5384 operand_width (enum ia64_opnd opnd)
5385 {
5386 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5387 unsigned int bits = 0;
5388 int i;
5389
5390 bits = 0;
5391 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5392 bits += odesc->field[i].bits;
5393
5394 return bits;
5395 }
5396
5397 static enum operand_match_result
5398 operand_match (const struct ia64_opcode *idesc, int res_index, expressionS *e)
5399 {
5400 enum ia64_opnd opnd = idesc->operands[res_index];
5401 int bits, relocatable = 0;
5402 struct insn_fix *fix;
5403 bfd_signed_vma val;
5404
5405 switch (opnd)
5406 {
5407 /* constants: */
5408
5409 case IA64_OPND_AR_CCV:
5410 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5411 return OPERAND_MATCH;
5412 break;
5413
5414 case IA64_OPND_AR_CSD:
5415 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5416 return OPERAND_MATCH;
5417 break;
5418
5419 case IA64_OPND_AR_PFS:
5420 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5421 return OPERAND_MATCH;
5422 break;
5423
5424 case IA64_OPND_GR0:
5425 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5426 return OPERAND_MATCH;
5427 break;
5428
5429 case IA64_OPND_IP:
5430 if (e->X_op == O_register && e->X_add_number == REG_IP)
5431 return OPERAND_MATCH;
5432 break;
5433
5434 case IA64_OPND_PR:
5435 if (e->X_op == O_register && e->X_add_number == REG_PR)
5436 return OPERAND_MATCH;
5437 break;
5438
5439 case IA64_OPND_PR_ROT:
5440 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5441 return OPERAND_MATCH;
5442 break;
5443
5444 case IA64_OPND_PSR:
5445 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5446 return OPERAND_MATCH;
5447 break;
5448
5449 case IA64_OPND_PSR_L:
5450 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5451 return OPERAND_MATCH;
5452 break;
5453
5454 case IA64_OPND_PSR_UM:
5455 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5456 return OPERAND_MATCH;
5457 break;
5458
5459 case IA64_OPND_C1:
5460 if (e->X_op == O_constant)
5461 {
5462 if (e->X_add_number == 1)
5463 return OPERAND_MATCH;
5464 else
5465 return OPERAND_OUT_OF_RANGE;
5466 }
5467 break;
5468
5469 case IA64_OPND_C8:
5470 if (e->X_op == O_constant)
5471 {
5472 if (e->X_add_number == 8)
5473 return OPERAND_MATCH;
5474 else
5475 return OPERAND_OUT_OF_RANGE;
5476 }
5477 break;
5478
5479 case IA64_OPND_C16:
5480 if (e->X_op == O_constant)
5481 {
5482 if (e->X_add_number == 16)
5483 return OPERAND_MATCH;
5484 else
5485 return OPERAND_OUT_OF_RANGE;
5486 }
5487 break;
5488
5489 /* register operands: */
5490
5491 case IA64_OPND_AR3:
5492 if (e->X_op == O_register && e->X_add_number >= REG_AR
5493 && e->X_add_number < REG_AR + 128)
5494 return OPERAND_MATCH;
5495 break;
5496
5497 case IA64_OPND_B1:
5498 case IA64_OPND_B2:
5499 if (e->X_op == O_register && e->X_add_number >= REG_BR
5500 && e->X_add_number < REG_BR + 8)
5501 return OPERAND_MATCH;
5502 break;
5503
5504 case IA64_OPND_CR3:
5505 if (e->X_op == O_register && e->X_add_number >= REG_CR
5506 && e->X_add_number < REG_CR + 128)
5507 return OPERAND_MATCH;
5508 break;
5509
5510 case IA64_OPND_DAHR3:
5511 if (e->X_op == O_register && e->X_add_number >= REG_DAHR
5512 && e->X_add_number < REG_DAHR + 8)
5513 return OPERAND_MATCH;
5514 break;
5515
5516 case IA64_OPND_F1:
5517 case IA64_OPND_F2:
5518 case IA64_OPND_F3:
5519 case IA64_OPND_F4:
5520 if (e->X_op == O_register && e->X_add_number >= REG_FR
5521 && e->X_add_number < REG_FR + 128)
5522 return OPERAND_MATCH;
5523 break;
5524
5525 case IA64_OPND_P1:
5526 case IA64_OPND_P2:
5527 if (e->X_op == O_register && e->X_add_number >= REG_P
5528 && e->X_add_number < REG_P + 64)
5529 return OPERAND_MATCH;
5530 break;
5531
5532 case IA64_OPND_R1:
5533 case IA64_OPND_R2:
5534 case IA64_OPND_R3:
5535 if (e->X_op == O_register && e->X_add_number >= REG_GR
5536 && e->X_add_number < REG_GR + 128)
5537 return OPERAND_MATCH;
5538 break;
5539
5540 case IA64_OPND_R3_2:
5541 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5542 {
5543 if (e->X_add_number < REG_GR + 4)
5544 return OPERAND_MATCH;
5545 else if (e->X_add_number < REG_GR + 128)
5546 return OPERAND_OUT_OF_RANGE;
5547 }
5548 break;
5549
5550 /* indirect operands: */
5551 case IA64_OPND_CPUID_R3:
5552 case IA64_OPND_DBR_R3:
5553 case IA64_OPND_DTR_R3:
5554 case IA64_OPND_ITR_R3:
5555 case IA64_OPND_IBR_R3:
5556 case IA64_OPND_MSR_R3:
5557 case IA64_OPND_PKR_R3:
5558 case IA64_OPND_PMC_R3:
5559 case IA64_OPND_PMD_R3:
5560 case IA64_OPND_DAHR_R3:
5561 case IA64_OPND_RR_R3:
5562 if (e->X_op == O_index && e->X_op_symbol
5563 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5564 == opnd - IA64_OPND_CPUID_R3))
5565 return OPERAND_MATCH;
5566 break;
5567
5568 case IA64_OPND_MR3:
5569 if (e->X_op == O_index && !e->X_op_symbol)
5570 return OPERAND_MATCH;
5571 break;
5572
5573 /* immediate operands: */
5574 case IA64_OPND_CNT2a:
5575 case IA64_OPND_LEN4:
5576 case IA64_OPND_LEN6:
5577 bits = operand_width (idesc->operands[res_index]);
5578 if (e->X_op == O_constant)
5579 {
5580 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5581 return OPERAND_MATCH;
5582 else
5583 return OPERAND_OUT_OF_RANGE;
5584 }
5585 break;
5586
5587 case IA64_OPND_CNT2b:
5588 if (e->X_op == O_constant)
5589 {
5590 if ((bfd_vma) (e->X_add_number - 1) < 3)
5591 return OPERAND_MATCH;
5592 else
5593 return OPERAND_OUT_OF_RANGE;
5594 }
5595 break;
5596
5597 case IA64_OPND_CNT2c:
5598 val = e->X_add_number;
5599 if (e->X_op == O_constant)
5600 {
5601 if ((val == 0 || val == 7 || val == 15 || val == 16))
5602 return OPERAND_MATCH;
5603 else
5604 return OPERAND_OUT_OF_RANGE;
5605 }
5606 break;
5607
5608 case IA64_OPND_SOR:
5609 /* SOR must be an integer multiple of 8 */
5610 if (e->X_op == O_constant && e->X_add_number & 0x7)
5611 return OPERAND_OUT_OF_RANGE;
5612 /* Fall through. */
5613 case IA64_OPND_SOF:
5614 case IA64_OPND_SOL:
5615 if (e->X_op == O_constant)
5616 {
5617 if ((bfd_vma) e->X_add_number <= 96)
5618 return OPERAND_MATCH;
5619 else
5620 return OPERAND_OUT_OF_RANGE;
5621 }
5622 break;
5623
5624 case IA64_OPND_IMMU62:
5625 if (e->X_op == O_constant)
5626 {
5627 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5628 return OPERAND_MATCH;
5629 else
5630 return OPERAND_OUT_OF_RANGE;
5631 }
5632 else
5633 {
5634 /* FIXME -- need 62-bit relocation type */
5635 as_bad (_("62-bit relocation not yet implemented"));
5636 }
5637 break;
5638
5639 case IA64_OPND_IMMU64:
5640 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5641 || e->X_op == O_subtract)
5642 {
5643 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5644 fix->code = BFD_RELOC_IA64_IMM64;
5645 if (e->X_op != O_subtract)
5646 {
5647 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5648 if (e->X_op == O_pseudo_fixup)
5649 e->X_op = O_symbol;
5650 }
5651
5652 fix->opnd = idesc->operands[res_index];
5653 fix->expr = *e;
5654 fix->is_pcrel = 0;
5655 ++CURR_SLOT.num_fixups;
5656 return OPERAND_MATCH;
5657 }
5658 else if (e->X_op == O_constant)
5659 return OPERAND_MATCH;
5660 break;
5661
5662 case IA64_OPND_IMMU5b:
5663 if (e->X_op == O_constant)
5664 {
5665 val = e->X_add_number;
5666 if (val >= 32 && val <= 63)
5667 return OPERAND_MATCH;
5668 else
5669 return OPERAND_OUT_OF_RANGE;
5670 }
5671 break;
5672
5673 case IA64_OPND_CCNT5:
5674 case IA64_OPND_CNT5:
5675 case IA64_OPND_CNT6:
5676 case IA64_OPND_CPOS6a:
5677 case IA64_OPND_CPOS6b:
5678 case IA64_OPND_CPOS6c:
5679 case IA64_OPND_IMMU2:
5680 case IA64_OPND_IMMU7a:
5681 case IA64_OPND_IMMU7b:
5682 case IA64_OPND_IMMU16:
5683 case IA64_OPND_IMMU19:
5684 case IA64_OPND_IMMU21:
5685 case IA64_OPND_IMMU24:
5686 case IA64_OPND_MBTYPE4:
5687 case IA64_OPND_MHTYPE8:
5688 case IA64_OPND_POS6:
5689 bits = operand_width (idesc->operands[res_index]);
5690 if (e->X_op == O_constant)
5691 {
5692 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5693 return OPERAND_MATCH;
5694 else
5695 return OPERAND_OUT_OF_RANGE;
5696 }
5697 break;
5698
5699 case IA64_OPND_IMMU9:
5700 bits = operand_width (idesc->operands[res_index]);
5701 if (e->X_op == O_constant)
5702 {
5703 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5704 {
5705 int lobits = e->X_add_number & 0x3;
5706 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5707 e->X_add_number |= (bfd_vma) 0x3;
5708 return OPERAND_MATCH;
5709 }
5710 else
5711 return OPERAND_OUT_OF_RANGE;
5712 }
5713 break;
5714
5715 case IA64_OPND_IMM44:
5716 /* least 16 bits must be zero */
5717 if ((e->X_add_number & 0xffff) != 0)
5718 /* XXX technically, this is wrong: we should not be issuing warning
5719 messages until we're sure this instruction pattern is going to
5720 be used! */
5721 as_warn (_("lower 16 bits of mask ignored"));
5722
5723 if (e->X_op == O_constant)
5724 {
5725 if (((e->X_add_number >= 0
5726 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5727 || (e->X_add_number < 0
5728 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5729 {
5730 /* sign-extend */
5731 if (e->X_add_number >= 0
5732 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5733 {
5734 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5735 }
5736 return OPERAND_MATCH;
5737 }
5738 else
5739 return OPERAND_OUT_OF_RANGE;
5740 }
5741 break;
5742
5743 case IA64_OPND_IMM17:
5744 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5745 if (e->X_op == O_constant)
5746 {
5747 if (((e->X_add_number >= 0
5748 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5749 || (e->X_add_number < 0
5750 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5751 {
5752 /* sign-extend */
5753 if (e->X_add_number >= 0
5754 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5755 {
5756 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5757 }
5758 return OPERAND_MATCH;
5759 }
5760 else
5761 return OPERAND_OUT_OF_RANGE;
5762 }
5763 break;
5764
5765 case IA64_OPND_IMM14:
5766 case IA64_OPND_IMM22:
5767 relocatable = 1;
5768 /* Fall through. */
5769 case IA64_OPND_IMM1:
5770 case IA64_OPND_IMM8:
5771 case IA64_OPND_IMM8U4:
5772 case IA64_OPND_IMM8M1:
5773 case IA64_OPND_IMM8M1U4:
5774 case IA64_OPND_IMM8M1U8:
5775 case IA64_OPND_IMM9a:
5776 case IA64_OPND_IMM9b:
5777 bits = operand_width (idesc->operands[res_index]);
5778 if (relocatable && (e->X_op == O_symbol
5779 || e->X_op == O_subtract
5780 || e->X_op == O_pseudo_fixup))
5781 {
5782 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5783
5784 if (idesc->operands[res_index] == IA64_OPND_IMM14)
5785 fix->code = BFD_RELOC_IA64_IMM14;
5786 else
5787 fix->code = BFD_RELOC_IA64_IMM22;
5788
5789 if (e->X_op != O_subtract)
5790 {
5791 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5792 if (e->X_op == O_pseudo_fixup)
5793 e->X_op = O_symbol;
5794 }
5795
5796 fix->opnd = idesc->operands[res_index];
5797 fix->expr = *e;
5798 fix->is_pcrel = 0;
5799 ++CURR_SLOT.num_fixups;
5800 return OPERAND_MATCH;
5801 }
5802 else if (e->X_op != O_constant
5803 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5804 return OPERAND_MISMATCH;
5805
5806 if (opnd == IA64_OPND_IMM8M1U4)
5807 {
5808 /* Zero is not valid for unsigned compares that take an adjusted
5809 constant immediate range. */
5810 if (e->X_add_number == 0)
5811 return OPERAND_OUT_OF_RANGE;
5812
5813 /* Sign-extend 32-bit unsigned numbers, so that the following range
5814 checks will work. */
5815 val = e->X_add_number;
5816 if ((val & (~(bfd_vma) 0 << 32)) == 0)
5817 val = (val ^ ((bfd_vma) 1 << 31)) - ((bfd_vma) 1 << 31);
5818
5819 /* Check for 0x100000000. This is valid because
5820 0x100000000-1 is the same as ((uint32_t) -1). */
5821 if (val == ((bfd_signed_vma) 1 << 32))
5822 return OPERAND_MATCH;
5823
5824 val = val - 1;
5825 }
5826 else if (opnd == IA64_OPND_IMM8M1U8)
5827 {
5828 /* Zero is not valid for unsigned compares that take an adjusted
5829 constant immediate range. */
5830 if (e->X_add_number == 0)
5831 return OPERAND_OUT_OF_RANGE;
5832
5833 /* Check for 0x10000000000000000. */
5834 if (e->X_op == O_big)
5835 {
5836 if (generic_bignum[0] == 0
5837 && generic_bignum[1] == 0
5838 && generic_bignum[2] == 0
5839 && generic_bignum[3] == 0
5840 && generic_bignum[4] == 1)
5841 return OPERAND_MATCH;
5842 else
5843 return OPERAND_OUT_OF_RANGE;
5844 }
5845 else
5846 val = e->X_add_number - 1;
5847 }
5848 else if (opnd == IA64_OPND_IMM8M1)
5849 val = e->X_add_number - 1;
5850 else if (opnd == IA64_OPND_IMM8U4)
5851 {
5852 /* Sign-extend 32-bit unsigned numbers, so that the following range
5853 checks will work. */
5854 val = e->X_add_number;
5855 if ((val & (~(bfd_vma) 0 << 32)) == 0)
5856 val = (val ^ ((bfd_vma) 1 << 31)) - ((bfd_vma) 1 << 31);
5857 }
5858 else
5859 val = e->X_add_number;
5860
5861 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5862 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5863 return OPERAND_MATCH;
5864 else
5865 return OPERAND_OUT_OF_RANGE;
5866
5867 case IA64_OPND_INC3:
5868 /* +/- 1, 4, 8, 16 */
5869 val = e->X_add_number;
5870 if (val < 0)
5871 val = -val;
5872 if (e->X_op == O_constant)
5873 {
5874 if ((val == 1 || val == 4 || val == 8 || val == 16))
5875 return OPERAND_MATCH;
5876 else
5877 return OPERAND_OUT_OF_RANGE;
5878 }
5879 break;
5880
5881 case IA64_OPND_TGT25:
5882 case IA64_OPND_TGT25b:
5883 case IA64_OPND_TGT25c:
5884 case IA64_OPND_TGT64:
5885 if (e->X_op == O_symbol)
5886 {
5887 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5888 if (opnd == IA64_OPND_TGT25)
5889 fix->code = BFD_RELOC_IA64_PCREL21F;
5890 else if (opnd == IA64_OPND_TGT25b)
5891 fix->code = BFD_RELOC_IA64_PCREL21M;
5892 else if (opnd == IA64_OPND_TGT25c)
5893 fix->code = BFD_RELOC_IA64_PCREL21B;
5894 else if (opnd == IA64_OPND_TGT64)
5895 fix->code = BFD_RELOC_IA64_PCREL60B;
5896 else
5897 abort ();
5898
5899 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5900 fix->opnd = idesc->operands[res_index];
5901 fix->expr = *e;
5902 fix->is_pcrel = 1;
5903 ++CURR_SLOT.num_fixups;
5904 return OPERAND_MATCH;
5905 }
5906 /* Fall through. */
5907 case IA64_OPND_TAG13:
5908 case IA64_OPND_TAG13b:
5909 switch (e->X_op)
5910 {
5911 case O_constant:
5912 return OPERAND_MATCH;
5913
5914 case O_symbol:
5915 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5916 /* There are no external relocs for TAG13/TAG13b fields, so we
5917 create a dummy reloc. This will not live past md_apply_fix. */
5918 fix->code = BFD_RELOC_UNUSED;
5919 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5920 fix->opnd = idesc->operands[res_index];
5921 fix->expr = *e;
5922 fix->is_pcrel = 1;
5923 ++CURR_SLOT.num_fixups;
5924 return OPERAND_MATCH;
5925
5926 default:
5927 break;
5928 }
5929 break;
5930
5931 case IA64_OPND_LDXMOV:
5932 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5933 fix->code = BFD_RELOC_IA64_LDXMOV;
5934 fix->opnd = idesc->operands[res_index];
5935 fix->expr = *e;
5936 fix->is_pcrel = 0;
5937 ++CURR_SLOT.num_fixups;
5938 return OPERAND_MATCH;
5939
5940 case IA64_OPND_STRD5b:
5941 if (e->X_op == O_constant)
5942 {
5943 /* 5-bit signed scaled by 64 */
5944 if ((e->X_add_number <= ( 0xf << 6 ))
5945 && (e->X_add_number >= -( 0x10 << 6 )))
5946 {
5947
5948 /* Must be a multiple of 64 */
5949 if ((e->X_add_number & 0x3f) != 0)
5950 as_warn (_("stride must be a multiple of 64; lower 6 bits ignored"));
5951
5952 e->X_add_number &= ~ 0x3f;
5953 return OPERAND_MATCH;
5954 }
5955 else
5956 return OPERAND_OUT_OF_RANGE;
5957 }
5958 break;
5959 case IA64_OPND_CNT6a:
5960 if (e->X_op == O_constant)
5961 {
5962 /* 6-bit unsigned biased by 1 -- count 0 is meaningless */
5963 if ((e->X_add_number <= 64)
5964 && (e->X_add_number > 0) )
5965 {
5966 return OPERAND_MATCH;
5967 }
5968 else
5969 return OPERAND_OUT_OF_RANGE;
5970 }
5971 break;
5972
5973 default:
5974 break;
5975 }
5976 return OPERAND_MISMATCH;
5977 }
5978
5979 static int
5980 parse_operand (expressionS *e, int more)
5981 {
5982 int sep = '\0';
5983
5984 memset (e, 0, sizeof (*e));
5985 e->X_op = O_absent;
5986 SKIP_WHITESPACE ();
5987 expression (e);
5988 resolve_register (e);
5989 sep = *input_line_pointer;
5990 if (more && (sep == ',' || sep == more))
5991 ++input_line_pointer;
5992 return sep;
5993 }
5994
5995 static int
5996 parse_operand_and_eval (expressionS *e, int more)
5997 {
5998 int sep = parse_operand (e, more);
5999 resolve_expression (e);
6000 return sep;
6001 }
6002
6003 static int
6004 parse_operand_maybe_eval (expressionS *e, int more, enum ia64_opnd op)
6005 {
6006 int sep = parse_operand (e, more);
6007 switch (op)
6008 {
6009 case IA64_OPND_IMM14:
6010 case IA64_OPND_IMM22:
6011 case IA64_OPND_IMMU64:
6012 case IA64_OPND_TGT25:
6013 case IA64_OPND_TGT25b:
6014 case IA64_OPND_TGT25c:
6015 case IA64_OPND_TGT64:
6016 case IA64_OPND_TAG13:
6017 case IA64_OPND_TAG13b:
6018 case IA64_OPND_LDXMOV:
6019 break;
6020 default:
6021 resolve_expression (e);
6022 break;
6023 }
6024 return sep;
6025 }
6026
6027 /* Returns the next entry in the opcode table that matches the one in
6028 IDESC, and frees the entry in IDESC. If no matching entry is
6029 found, NULL is returned instead. */
6030
6031 static struct ia64_opcode *
6032 get_next_opcode (struct ia64_opcode *idesc)
6033 {
6034 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
6035 ia64_free_opcode (idesc);
6036 return next;
6037 }
6038
6039 /* Parse the operands for the opcode and find the opcode variant that
6040 matches the specified operands, or NULL if no match is possible. */
6041
6042 static struct ia64_opcode *
6043 parse_operands (struct ia64_opcode *idesc)
6044 {
6045 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
6046 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
6047 int reg1, reg2;
6048 char reg_class;
6049 enum ia64_opnd expected_operand = IA64_OPND_NIL;
6050 enum operand_match_result result;
6051 char mnemonic[129];
6052 char *first_arg = 0, *end, *saved_input_pointer;
6053 unsigned int sof;
6054
6055 gas_assert (strlen (idesc->name) <= 128);
6056
6057 strcpy (mnemonic, idesc->name);
6058 if (idesc->operands[2] == IA64_OPND_SOF
6059 || idesc->operands[1] == IA64_OPND_SOF)
6060 {
6061 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
6062 can't parse the first operand until we have parsed the
6063 remaining operands of the "alloc" instruction. */
6064 SKIP_WHITESPACE ();
6065 first_arg = input_line_pointer;
6066 end = strchr (input_line_pointer, '=');
6067 if (!end)
6068 {
6069 as_bad (_("Expected separator `='"));
6070 return 0;
6071 }
6072 input_line_pointer = end + 1;
6073 ++i;
6074 ++num_outputs;
6075 }
6076
6077 for (; ; ++i)
6078 {
6079 if (i < NELEMS (CURR_SLOT.opnd))
6080 {
6081 enum ia64_opnd op = IA64_OPND_NIL;
6082 if (i < NELEMS (idesc->operands))
6083 op = idesc->operands[i];
6084 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + i, '=', op);
6085 if (CURR_SLOT.opnd[i].X_op == O_absent)
6086 break;
6087 }
6088 else
6089 {
6090 expressionS dummy;
6091
6092 sep = parse_operand (&dummy, '=');
6093 if (dummy.X_op == O_absent)
6094 break;
6095 }
6096
6097 ++num_operands;
6098
6099 if (sep != '=' && sep != ',')
6100 break;
6101
6102 if (sep == '=')
6103 {
6104 if (num_outputs > 0)
6105 as_bad (_("Duplicate equal sign (=) in instruction"));
6106 else
6107 num_outputs = i + 1;
6108 }
6109 }
6110 if (sep != '\0')
6111 {
6112 as_bad (_("Illegal operand separator `%c'"), sep);
6113 return 0;
6114 }
6115
6116 if (idesc->operands[2] == IA64_OPND_SOF
6117 || idesc->operands[1] == IA64_OPND_SOF)
6118 {
6119 /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r.
6120 Note, however, that due to that mapping operand numbers in error
6121 messages for any of the constant operands will not be correct. */
6122 know (strcmp (idesc->name, "alloc") == 0);
6123 /* The first operand hasn't been parsed/initialized, yet (but
6124 num_operands intentionally doesn't account for that). */
6125 i = num_operands > 4 ? 2 : 1;
6126 #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \
6127 ? CURR_SLOT.opnd[n].X_add_number \
6128 : 0)
6129 sof = set_regstack (FORCE_CONST(i),
6130 FORCE_CONST(i + 1),
6131 FORCE_CONST(i + 2),
6132 FORCE_CONST(i + 3));
6133 #undef FORCE_CONST
6134
6135 /* now we can parse the first arg: */
6136 saved_input_pointer = input_line_pointer;
6137 input_line_pointer = first_arg;
6138 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + 0, '=',
6139 idesc->operands[0]);
6140 if (sep != '=')
6141 --num_outputs; /* force error */
6142 input_line_pointer = saved_input_pointer;
6143
6144 CURR_SLOT.opnd[i].X_add_number = sof;
6145 if (CURR_SLOT.opnd[i + 1].X_op == O_constant
6146 && CURR_SLOT.opnd[i + 2].X_op == O_constant)
6147 CURR_SLOT.opnd[i + 1].X_add_number
6148 = sof - CURR_SLOT.opnd[i + 2].X_add_number;
6149 else
6150 CURR_SLOT.opnd[i + 1].X_op = O_illegal;
6151 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3];
6152 }
6153
6154 highest_unmatched_operand = -4;
6155 curr_out_of_range_pos = -1;
6156 error_pos = 0;
6157 for (; idesc; idesc = get_next_opcode (idesc))
6158 {
6159 if (num_outputs != idesc->num_outputs)
6160 continue; /* mismatch in # of outputs */
6161 if (highest_unmatched_operand < 0)
6162 highest_unmatched_operand |= 1;
6163 if (num_operands > NELEMS (idesc->operands)
6164 || (num_operands < NELEMS (idesc->operands)
6165 && idesc->operands[num_operands])
6166 || (num_operands > 0 && !idesc->operands[num_operands - 1]))
6167 continue; /* mismatch in number of arguments */
6168 if (highest_unmatched_operand < 0)
6169 highest_unmatched_operand |= 2;
6170
6171 CURR_SLOT.num_fixups = 0;
6172
6173 /* Try to match all operands. If we see an out-of-range operand,
6174 then continue trying to match the rest of the operands, since if
6175 the rest match, then this idesc will give the best error message. */
6176
6177 out_of_range_pos = -1;
6178 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
6179 {
6180 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
6181 if (result != OPERAND_MATCH)
6182 {
6183 if (result != OPERAND_OUT_OF_RANGE)
6184 break;
6185 if (out_of_range_pos < 0)
6186 /* remember position of the first out-of-range operand: */
6187 out_of_range_pos = i;
6188 }
6189 }
6190
6191 /* If we did not match all operands, or if at least one operand was
6192 out-of-range, then this idesc does not match. Keep track of which
6193 idesc matched the most operands before failing. If we have two
6194 idescs that failed at the same position, and one had an out-of-range
6195 operand, then prefer the out-of-range operand. Thus if we have
6196 "add r0=0x1000000,r1" we get an error saying the constant is out
6197 of range instead of an error saying that the constant should have been
6198 a register. */
6199
6200 if (i != num_operands || out_of_range_pos >= 0)
6201 {
6202 if (i > highest_unmatched_operand
6203 || (i == highest_unmatched_operand
6204 && out_of_range_pos > curr_out_of_range_pos))
6205 {
6206 highest_unmatched_operand = i;
6207 if (out_of_range_pos >= 0)
6208 {
6209 expected_operand = idesc->operands[out_of_range_pos];
6210 error_pos = out_of_range_pos;
6211 }
6212 else
6213 {
6214 expected_operand = idesc->operands[i];
6215 error_pos = i;
6216 }
6217 curr_out_of_range_pos = out_of_range_pos;
6218 }
6219 continue;
6220 }
6221
6222 break;
6223 }
6224 if (!idesc)
6225 {
6226 if (expected_operand)
6227 as_bad (_("Operand %u of `%s' should be %s"),
6228 error_pos + 1, mnemonic,
6229 elf64_ia64_operands[expected_operand].desc);
6230 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1))
6231 as_bad (_("Wrong number of output operands"));
6232 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2))
6233 as_bad (_("Wrong number of input operands"));
6234 else
6235 as_bad (_("Operand mismatch"));
6236 return 0;
6237 }
6238
6239 /* Check that the instruction doesn't use
6240 - r0, f0, or f1 as output operands
6241 - the same predicate twice as output operands
6242 - r0 as address of a base update load or store
6243 - the same GR as output and address of a base update load
6244 - two even- or two odd-numbered FRs as output operands of a floating
6245 point parallel load.
6246 At most two (conflicting) output (or output-like) operands can exist,
6247 (floating point parallel loads have three outputs, but the base register,
6248 if updated, cannot conflict with the actual outputs). */
6249 reg2 = reg1 = -1;
6250 for (i = 0; i < num_operands; ++i)
6251 {
6252 int regno = 0;
6253
6254 reg_class = 0;
6255 switch (idesc->operands[i])
6256 {
6257 case IA64_OPND_R1:
6258 case IA64_OPND_R2:
6259 case IA64_OPND_R3:
6260 if (i < num_outputs)
6261 {
6262 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6263 reg_class = 'r';
6264 else if (reg1 < 0)
6265 reg1 = CURR_SLOT.opnd[i].X_add_number;
6266 else if (reg2 < 0)
6267 reg2 = CURR_SLOT.opnd[i].X_add_number;
6268 }
6269 break;
6270 case IA64_OPND_P1:
6271 case IA64_OPND_P2:
6272 if (i < num_outputs)
6273 {
6274 if (reg1 < 0)
6275 reg1 = CURR_SLOT.opnd[i].X_add_number;
6276 else if (reg2 < 0)
6277 reg2 = CURR_SLOT.opnd[i].X_add_number;
6278 }
6279 break;
6280 case IA64_OPND_F1:
6281 case IA64_OPND_F2:
6282 case IA64_OPND_F3:
6283 case IA64_OPND_F4:
6284 if (i < num_outputs)
6285 {
6286 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR
6287 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1)
6288 {
6289 reg_class = 'f';
6290 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR;
6291 }
6292 else if (reg1 < 0)
6293 reg1 = CURR_SLOT.opnd[i].X_add_number;
6294 else if (reg2 < 0)
6295 reg2 = CURR_SLOT.opnd[i].X_add_number;
6296 }
6297 break;
6298 case IA64_OPND_MR3:
6299 if (idesc->flags & IA64_OPCODE_POSTINC)
6300 {
6301 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6302 reg_class = 'm';
6303 else if (reg1 < 0)
6304 reg1 = CURR_SLOT.opnd[i].X_add_number;
6305 else if (reg2 < 0)
6306 reg2 = CURR_SLOT.opnd[i].X_add_number;
6307 }
6308 break;
6309 default:
6310 break;
6311 }
6312 switch (reg_class)
6313 {
6314 case 0:
6315 break;
6316 default:
6317 as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno);
6318 break;
6319 case 'm':
6320 as_warn (_("Invalid use of `r%d' as base update address operand"), regno);
6321 break;
6322 }
6323 }
6324 if (reg1 == reg2)
6325 {
6326 if (reg1 >= REG_GR && reg1 <= REG_GR + 127)
6327 {
6328 reg1 -= REG_GR;
6329 reg_class = 'r';
6330 }
6331 else if (reg1 >= REG_P && reg1 <= REG_P + 63)
6332 {
6333 reg1 -= REG_P;
6334 reg_class = 'p';
6335 }
6336 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127)
6337 {
6338 reg1 -= REG_FR;
6339 reg_class = 'f';
6340 }
6341 else
6342 reg_class = 0;
6343 if (reg_class)
6344 as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1);
6345 }
6346 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31
6347 && reg2 >= REG_FR && reg2 <= REG_FR + 31)
6348 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6349 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127))
6350 && ! ((reg1 ^ reg2) & 1))
6351 as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"),
6352 reg1 - REG_FR, reg2 - REG_FR);
6353 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31
6354 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)
6355 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6356 && reg2 >= REG_FR && reg2 <= REG_FR + 31))
6357 as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"),
6358 reg1 - REG_FR, reg2 - REG_FR);
6359 return idesc;
6360 }
6361
6362 static void
6363 build_insn (struct slot *slot, bfd_vma *insnp)
6364 {
6365 const struct ia64_operand *odesc, *o2desc;
6366 struct ia64_opcode *idesc = slot->idesc;
6367 bfd_vma insn;
6368 bfd_signed_vma val;
6369 const char *err;
6370 int i;
6371
6372 insn = idesc->opcode | slot->qp_regno;
6373
6374 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6375 {
6376 if (slot->opnd[i].X_op == O_register
6377 || slot->opnd[i].X_op == O_constant
6378 || slot->opnd[i].X_op == O_index)
6379 val = slot->opnd[i].X_add_number;
6380 else if (slot->opnd[i].X_op == O_big)
6381 {
6382 /* This must be the value 0x10000000000000000. */
6383 gas_assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6384 val = 0;
6385 }
6386 else
6387 val = 0;
6388
6389 switch (idesc->operands[i])
6390 {
6391 case IA64_OPND_IMMU64:
6392 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6393 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6394 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6395 | (((val >> 63) & 0x1) << 36));
6396 continue;
6397
6398 case IA64_OPND_IMMU62:
6399 val &= 0x3fffffffffffffffULL;
6400 if (val != slot->opnd[i].X_add_number)
6401 as_warn (_("Value truncated to 62 bits"));
6402 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6403 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6404 continue;
6405
6406 case IA64_OPND_TGT64:
6407 val >>= 4;
6408 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6409 insn |= ((((val >> 59) & 0x1) << 36)
6410 | (((val >> 0) & 0xfffff) << 13));
6411 continue;
6412
6413 case IA64_OPND_AR3:
6414 val -= REG_AR;
6415 break;
6416
6417 case IA64_OPND_B1:
6418 case IA64_OPND_B2:
6419 val -= REG_BR;
6420 break;
6421
6422 case IA64_OPND_CR3:
6423 val -= REG_CR;
6424 break;
6425
6426 case IA64_OPND_DAHR3:
6427 val -= REG_DAHR;
6428 break;
6429
6430 case IA64_OPND_F1:
6431 case IA64_OPND_F2:
6432 case IA64_OPND_F3:
6433 case IA64_OPND_F4:
6434 val -= REG_FR;
6435 break;
6436
6437 case IA64_OPND_P1:
6438 case IA64_OPND_P2:
6439 val -= REG_P;
6440 break;
6441
6442 case IA64_OPND_R1:
6443 case IA64_OPND_R2:
6444 case IA64_OPND_R3:
6445 case IA64_OPND_R3_2:
6446 case IA64_OPND_CPUID_R3:
6447 case IA64_OPND_DBR_R3:
6448 case IA64_OPND_DTR_R3:
6449 case IA64_OPND_ITR_R3:
6450 case IA64_OPND_IBR_R3:
6451 case IA64_OPND_MR3:
6452 case IA64_OPND_MSR_R3:
6453 case IA64_OPND_PKR_R3:
6454 case IA64_OPND_PMC_R3:
6455 case IA64_OPND_PMD_R3:
6456 case IA64_OPND_DAHR_R3:
6457 case IA64_OPND_RR_R3:
6458 val -= REG_GR;
6459 break;
6460
6461 default:
6462 break;
6463 }
6464
6465 odesc = elf64_ia64_operands + idesc->operands[i];
6466 err = (*odesc->insert) (odesc, val, &insn);
6467 if (err)
6468 as_bad_where (slot->src_file, slot->src_line,
6469 _("Bad operand value: %s"), err);
6470 if (idesc->flags & IA64_OPCODE_PSEUDO)
6471 {
6472 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6473 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6474 {
6475 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6476 (*o2desc->insert) (o2desc, val, &insn);
6477 }
6478 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6479 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6480 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6481 {
6482 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6483 (*o2desc->insert) (o2desc, 64 - val, &insn);
6484 }
6485 }
6486 }
6487 *insnp = insn;
6488 }
6489
6490 static void
6491 emit_one_bundle (void)
6492 {
6493 int manual_bundling_off = 0, manual_bundling = 0;
6494 enum ia64_unit required_unit, insn_unit = 0;
6495 enum ia64_insn_type type[3], insn_type;
6496 unsigned int template_val, orig_template;
6497 bfd_vma insn[3] = { -1, -1, -1 };
6498 struct ia64_opcode *idesc;
6499 int end_of_insn_group = 0, user_template = -1;
6500 int n, i, j, first, curr, last_slot;
6501 bfd_vma t0 = 0, t1 = 0;
6502 struct label_fix *lfix;
6503 bool mark_label;
6504 struct insn_fix *ifix;
6505 char mnemonic[16];
6506 fixS *fix;
6507 char *f;
6508 int addr_mod;
6509
6510 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6511 know (first >= 0 && first < NUM_SLOTS);
6512 n = MIN (3, md.num_slots_in_use);
6513
6514 /* Determine template: user user_template if specified, best match
6515 otherwise: */
6516
6517 if (md.slot[first].user_template >= 0)
6518 user_template = template_val = md.slot[first].user_template;
6519 else
6520 {
6521 /* Auto select appropriate template. */
6522 memset (type, 0, sizeof (type));
6523 curr = first;
6524 for (i = 0; i < n; ++i)
6525 {
6526 if (md.slot[curr].label_fixups && i != 0)
6527 break;
6528 type[i] = md.slot[curr].idesc->type;
6529 curr = (curr + 1) % NUM_SLOTS;
6530 }
6531 template_val = best_template[type[0]][type[1]][type[2]];
6532 }
6533
6534 /* initialize instructions with appropriate nops: */
6535 for (i = 0; i < 3; ++i)
6536 insn[i] = nop[ia64_templ_desc[template_val].exec_unit[i]];
6537
6538 f = frag_more (16);
6539
6540 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6541 from the start of the frag. */
6542 addr_mod = frag_now_fix () & 15;
6543 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6544 as_bad (_("instruction address is not a multiple of 16"));
6545 frag_now->insn_addr = addr_mod;
6546 frag_now->has_code = 1;
6547
6548 /* now fill in slots with as many insns as possible: */
6549 curr = first;
6550 idesc = md.slot[curr].idesc;
6551 end_of_insn_group = 0;
6552 last_slot = -1;
6553 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6554 {
6555 /* If we have unwind records, we may need to update some now. */
6556 unw_rec_list *ptr = md.slot[curr].unwind_record;
6557 unw_rec_list *end_ptr = NULL;
6558
6559 if (ptr)
6560 {
6561 /* Find the last prologue/body record in the list for the current
6562 insn, and set the slot number for all records up to that point.
6563 This needs to be done now, because prologue/body records refer to
6564 the current point, not the point after the instruction has been
6565 issued. This matters because there may have been nops emitted
6566 meanwhile. Any non-prologue non-body record followed by a
6567 prologue/body record must also refer to the current point. */
6568 unw_rec_list *last_ptr;
6569
6570 for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j)
6571 end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record;
6572 for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next)
6573 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6574 || ptr->r.type == body)
6575 last_ptr = ptr;
6576 if (last_ptr)
6577 {
6578 /* Make last_ptr point one after the last prologue/body
6579 record. */
6580 last_ptr = last_ptr->next;
6581 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6582 ptr = ptr->next)
6583 {
6584 ptr->slot_number = (unsigned long) f + i;
6585 ptr->slot_frag = frag_now;
6586 }
6587 /* Remove the initialized records, so that we won't accidentally
6588 update them again if we insert a nop and continue. */
6589 md.slot[curr].unwind_record = last_ptr;
6590 }
6591 }
6592
6593 manual_bundling_off = md.slot[curr].manual_bundling_off;
6594 if (md.slot[curr].manual_bundling_on)
6595 {
6596 if (curr == first)
6597 manual_bundling = 1;
6598 else
6599 break; /* Need to start a new bundle. */
6600 }
6601
6602 /* If this instruction specifies a template, then it must be the first
6603 instruction of a bundle. */
6604 if (curr != first && md.slot[curr].user_template >= 0)
6605 break;
6606
6607 if (idesc->flags & IA64_OPCODE_SLOT2)
6608 {
6609 if (manual_bundling && !manual_bundling_off)
6610 {
6611 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6612 _("`%s' must be last in bundle"), idesc->name);
6613 if (i < 2)
6614 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6615 }
6616 i = 2;
6617 }
6618 if (idesc->flags & IA64_OPCODE_LAST)
6619 {
6620 int required_slot;
6621 unsigned int required_template;
6622
6623 /* If we need a stop bit after an M slot, our only choice is
6624 template 5 (M;;MI). If we need a stop bit after a B
6625 slot, our only choice is to place it at the end of the
6626 bundle, because the only available templates are MIB,
6627 MBB, BBB, MMB, and MFB. We don't handle anything other
6628 than M and B slots because these are the only kind of
6629 instructions that can have the IA64_OPCODE_LAST bit set. */
6630 required_template = template_val;
6631 switch (idesc->type)
6632 {
6633 case IA64_TYPE_M:
6634 required_slot = 0;
6635 required_template = 5;
6636 break;
6637
6638 case IA64_TYPE_B:
6639 required_slot = 2;
6640 break;
6641
6642 default:
6643 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6644 _("Internal error: don't know how to force %s to end of instruction group"),
6645 idesc->name);
6646 required_slot = i;
6647 break;
6648 }
6649 if (manual_bundling
6650 && (i > required_slot
6651 || (required_slot == 2 && !manual_bundling_off)
6652 || (user_template >= 0
6653 /* Changing from MMI to M;MI is OK. */
6654 && (template_val ^ required_template) > 1)))
6655 {
6656 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6657 _("`%s' must be last in instruction group"),
6658 idesc->name);
6659 if (i < 2 && required_slot == 2 && !manual_bundling_off)
6660 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6661 }
6662 if (required_slot < i)
6663 /* Can't fit this instruction. */
6664 break;
6665
6666 i = required_slot;
6667 if (required_template != template_val)
6668 {
6669 /* If we switch the template, we need to reset the NOPs
6670 after slot i. The slot-types of the instructions ahead
6671 of i never change, so we don't need to worry about
6672 changing NOPs in front of this slot. */
6673 for (j = i; j < 3; ++j)
6674 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6675
6676 /* We just picked a template that includes the stop bit in the
6677 middle, so we don't need another one emitted later. */
6678 md.slot[curr].end_of_insn_group = 0;
6679 }
6680 template_val = required_template;
6681 }
6682 if (curr != first && md.slot[curr].label_fixups)
6683 {
6684 if (manual_bundling)
6685 {
6686 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6687 _("Label must be first in a bundle"));
6688 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6689 }
6690 /* This insn must go into the first slot of a bundle. */
6691 break;
6692 }
6693
6694 if (end_of_insn_group && md.num_slots_in_use >= 1)
6695 {
6696 /* We need an instruction group boundary in the middle of a
6697 bundle. See if we can switch to an other template with
6698 an appropriate boundary. */
6699
6700 orig_template = template_val;
6701 if (i == 1 && (user_template == 4
6702 || (user_template < 0
6703 && (ia64_templ_desc[template_val].exec_unit[0]
6704 == IA64_UNIT_M))))
6705 {
6706 template_val = 5;
6707 end_of_insn_group = 0;
6708 }
6709 else if (i == 2 && (user_template == 0
6710 || (user_template < 0
6711 && (ia64_templ_desc[template_val].exec_unit[1]
6712 == IA64_UNIT_I)))
6713 /* This test makes sure we don't switch the template if
6714 the next instruction is one that needs to be first in
6715 an instruction group. Since all those instructions are
6716 in the M group, there is no way such an instruction can
6717 fit in this bundle even if we switch the template. The
6718 reason we have to check for this is that otherwise we
6719 may end up generating "MI;;I M.." which has the deadly
6720 effect that the second M instruction is no longer the
6721 first in the group! --davidm 99/12/16 */
6722 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6723 {
6724 template_val = 1;
6725 end_of_insn_group = 0;
6726 }
6727 else if (i == 1
6728 && user_template == 0
6729 && !(idesc->flags & IA64_OPCODE_FIRST))
6730 /* Use the next slot. */
6731 continue;
6732 else if (curr != first)
6733 /* can't fit this insn */
6734 break;
6735
6736 if (template_val != orig_template)
6737 /* if we switch the template, we need to reset the NOPs
6738 after slot i. The slot-types of the instructions ahead
6739 of i never change, so we don't need to worry about
6740 changing NOPs in front of this slot. */
6741 for (j = i; j < 3; ++j)
6742 insn[j] = nop[ia64_templ_desc[template_val].exec_unit[j]];
6743 }
6744 required_unit = ia64_templ_desc[template_val].exec_unit[i];
6745
6746 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6747 if (idesc->type == IA64_TYPE_DYN)
6748 {
6749 enum ia64_opnd opnd1, opnd2;
6750
6751 if ((strcmp (idesc->name, "nop") == 0)
6752 || (strcmp (idesc->name, "break") == 0))
6753 insn_unit = required_unit;
6754 else if (strcmp (idesc->name, "hint") == 0)
6755 {
6756 insn_unit = required_unit;
6757 if (required_unit == IA64_UNIT_B)
6758 {
6759 switch (md.hint_b)
6760 {
6761 case hint_b_ok:
6762 break;
6763 case hint_b_warning:
6764 as_warn (_("hint in B unit may be treated as nop"));
6765 break;
6766 case hint_b_error:
6767 /* When manual bundling is off and there is no
6768 user template, we choose a different unit so
6769 that hint won't go into the current slot. We
6770 will fill the current bundle with nops and
6771 try to put hint into the next bundle. */
6772 if (!manual_bundling && user_template < 0)
6773 insn_unit = IA64_UNIT_I;
6774 else
6775 as_bad (_("hint in B unit can't be used"));
6776 break;
6777 }
6778 }
6779 }
6780 else if (strcmp (idesc->name, "chk.s") == 0
6781 || strcmp (idesc->name, "mov") == 0)
6782 {
6783 insn_unit = IA64_UNIT_M;
6784 if (required_unit == IA64_UNIT_I
6785 || (required_unit == IA64_UNIT_F && template_val == 6))
6786 insn_unit = IA64_UNIT_I;
6787 }
6788 else
6789 as_fatal (_("emit_one_bundle: unexpected dynamic op"));
6790
6791 snprintf (mnemonic, sizeof (mnemonic), "%s.%c",
6792 idesc->name, "?imbfxx"[insn_unit]);
6793 opnd1 = idesc->operands[0];
6794 opnd2 = idesc->operands[1];
6795 ia64_free_opcode (idesc);
6796 idesc = ia64_find_opcode (mnemonic);
6797 /* moves to/from ARs have collisions */
6798 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6799 {
6800 while (idesc != NULL
6801 && (idesc->operands[0] != opnd1
6802 || idesc->operands[1] != opnd2))
6803 idesc = get_next_opcode (idesc);
6804 }
6805 md.slot[curr].idesc = idesc;
6806 }
6807 else
6808 {
6809 insn_type = idesc->type;
6810 insn_unit = IA64_UNIT_NIL;
6811 switch (insn_type)
6812 {
6813 case IA64_TYPE_A:
6814 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6815 insn_unit = required_unit;
6816 break;
6817 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6818 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6819 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6820 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6821 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6822 default: break;
6823 }
6824 }
6825
6826 if (insn_unit != required_unit)
6827 continue; /* Try next slot. */
6828
6829 /* Now is a good time to fix up the labels for this insn. */
6830 mark_label = false;
6831 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6832 {
6833 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6834 symbol_set_frag (lfix->sym, frag_now);
6835 mark_label |= lfix->dw2_mark_labels;
6836 }
6837 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6838 {
6839 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6840 symbol_set_frag (lfix->sym, frag_now);
6841 }
6842
6843 if (debug_type == DEBUG_DWARF2
6844 || md.slot[curr].loc_directive_seen
6845 || mark_label)
6846 {
6847 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6848
6849 md.slot[curr].loc_directive_seen = 0;
6850 if (mark_label)
6851 md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
6852
6853 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6854 }
6855
6856 build_insn (md.slot + curr, insn + i);
6857
6858 ptr = md.slot[curr].unwind_record;
6859 if (ptr)
6860 {
6861 /* Set slot numbers for all remaining unwind records belonging to the
6862 current insn. There can not be any prologue/body unwind records
6863 here. */
6864 for (; ptr != end_ptr; ptr = ptr->next)
6865 {
6866 ptr->slot_number = (unsigned long) f + i;
6867 ptr->slot_frag = frag_now;
6868 }
6869 md.slot[curr].unwind_record = NULL;
6870 }
6871
6872 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6873 {
6874 unsigned long where;
6875
6876 ifix = md.slot[curr].fixup + j;
6877 where = frag_now_fix () - 16 + i;
6878 #ifdef TE_HPUX
6879 /* Relocations for instructions specify the slot in the
6880 bottom two bits of r_offset. The IA64 HP-UX linker
6881 expects PCREL60B relocations to specify slot 2 of an
6882 instruction. gas generates PCREL60B against slot 1. */
6883 if (ifix->code == BFD_RELOC_IA64_PCREL60B)
6884 {
6885 know (i == 1);
6886 ++where;
6887 }
6888 #endif
6889
6890 fix = fix_new_exp (frag_now, where, 8,
6891 &ifix->expr, ifix->is_pcrel, ifix->code);
6892 fix->tc_fix_data.opnd = ifix->opnd;
6893 fix->fx_file = md.slot[curr].src_file;
6894 fix->fx_line = md.slot[curr].src_line;
6895 }
6896
6897 end_of_insn_group = md.slot[curr].end_of_insn_group;
6898
6899 /* This adjustment to "i" must occur after the fix, otherwise the fix
6900 is assigned to the wrong slot, and the VMS linker complains. */
6901 if (required_unit == IA64_UNIT_L)
6902 {
6903 know (i == 1);
6904 /* skip one slot for long/X-unit instructions */
6905 ++i;
6906 }
6907 --md.num_slots_in_use;
6908 last_slot = i;
6909
6910 /* clear slot: */
6911 ia64_free_opcode (md.slot[curr].idesc);
6912 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6913 md.slot[curr].user_template = -1;
6914
6915 if (manual_bundling_off)
6916 {
6917 manual_bundling = 0;
6918 break;
6919 }
6920 curr = (curr + 1) % NUM_SLOTS;
6921 idesc = md.slot[curr].idesc;
6922 }
6923
6924 /* A user template was specified, but the first following instruction did
6925 not fit. This can happen with or without manual bundling. */
6926 if (md.num_slots_in_use > 0 && last_slot < 0)
6927 {
6928 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6929 _("`%s' does not fit into %s template"),
6930 idesc->name, ia64_templ_desc[template_val].name);
6931 /* Drop first insn so we don't livelock. */
6932 --md.num_slots_in_use;
6933 know (curr == first);
6934 ia64_free_opcode (md.slot[curr].idesc);
6935 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6936 md.slot[curr].user_template = -1;
6937 }
6938 else if (manual_bundling > 0)
6939 {
6940 if (md.num_slots_in_use > 0)
6941 {
6942 if (last_slot >= 2)
6943 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6944 _("`%s' does not fit into bundle"), idesc->name);
6945 else
6946 {
6947 const char *where;
6948
6949 if (template_val == 2)
6950 where = "X slot";
6951 else if (last_slot == 0)
6952 where = "slots 2 or 3";
6953 else
6954 where = "slot 3";
6955 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6956 _("`%s' can't go in %s of %s template"),
6957 idesc->name, where, ia64_templ_desc[template_val].name);
6958 }
6959 }
6960 else
6961 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6962 _("Missing '}' at end of file"));
6963 }
6964
6965 know (md.num_slots_in_use < NUM_SLOTS);
6966
6967 t0 = end_of_insn_group | (template_val << 1) | (insn[0] << 5) | (insn[1] << 46);
6968 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6969
6970 number_to_chars_littleendian (f + 0, t0, 8);
6971 number_to_chars_littleendian (f + 8, t1, 8);
6972 }
6973
6974 int
6975 md_parse_option (int c, const char *arg)
6976 {
6977
6978 switch (c)
6979 {
6980 /* Switches from the Intel assembler. */
6981 case 'm':
6982 if (strcmp (arg, "ilp64") == 0
6983 || strcmp (arg, "lp64") == 0
6984 || strcmp (arg, "p64") == 0)
6985 {
6986 md.flags |= EF_IA_64_ABI64;
6987 }
6988 else if (strcmp (arg, "ilp32") == 0)
6989 {
6990 md.flags &= ~EF_IA_64_ABI64;
6991 }
6992 else if (strcmp (arg, "le") == 0)
6993 {
6994 md.flags &= ~EF_IA_64_BE;
6995 default_big_endian = 0;
6996 }
6997 else if (strcmp (arg, "be") == 0)
6998 {
6999 md.flags |= EF_IA_64_BE;
7000 default_big_endian = 1;
7001 }
7002 else if (startswith (arg, "unwind-check="))
7003 {
7004 arg += 13;
7005 if (strcmp (arg, "warning") == 0)
7006 md.unwind_check = unwind_check_warning;
7007 else if (strcmp (arg, "error") == 0)
7008 md.unwind_check = unwind_check_error;
7009 else
7010 return 0;
7011 }
7012 else if (startswith (arg, "hint.b="))
7013 {
7014 arg += 7;
7015 if (strcmp (arg, "ok") == 0)
7016 md.hint_b = hint_b_ok;
7017 else if (strcmp (arg, "warning") == 0)
7018 md.hint_b = hint_b_warning;
7019 else if (strcmp (arg, "error") == 0)
7020 md.hint_b = hint_b_error;
7021 else
7022 return 0;
7023 }
7024 else if (startswith (arg, "tune="))
7025 {
7026 arg += 5;
7027 if (strcmp (arg, "itanium1") == 0)
7028 md.tune = itanium1;
7029 else if (strcmp (arg, "itanium2") == 0)
7030 md.tune = itanium2;
7031 else
7032 return 0;
7033 }
7034 else
7035 return 0;
7036 break;
7037
7038 case 'N':
7039 if (strcmp (arg, "so") == 0)
7040 {
7041 /* Suppress signon message. */
7042 }
7043 else if (strcmp (arg, "pi") == 0)
7044 {
7045 /* Reject privileged instructions. FIXME */
7046 }
7047 else if (strcmp (arg, "us") == 0)
7048 {
7049 /* Allow union of signed and unsigned range. FIXME */
7050 }
7051 else if (strcmp (arg, "close_fcalls") == 0)
7052 {
7053 /* Do not resolve global function calls. */
7054 }
7055 else
7056 return 0;
7057 break;
7058
7059 case 'C':
7060 /* temp[="prefix"] Insert temporary labels into the object file
7061 symbol table prefixed by "prefix".
7062 Default prefix is ":temp:".
7063 */
7064 break;
7065
7066 case 'a':
7067 /* indirect=<tgt> Assume unannotated indirect branches behavior
7068 according to <tgt> --
7069 exit: branch out from the current context (default)
7070 labels: all labels in context may be branch targets
7071 */
7072 if (!startswith (arg, "indirect="))
7073 return 0;
7074 break;
7075
7076 case 'x':
7077 /* -X conflicts with an ignored option, use -x instead */
7078 md.detect_dv = 1;
7079 if (!arg || strcmp (arg, "explicit") == 0)
7080 {
7081 /* set default mode to explicit */
7082 md.default_explicit_mode = 1;
7083 break;
7084 }
7085 else if (strcmp (arg, "auto") == 0)
7086 {
7087 md.default_explicit_mode = 0;
7088 }
7089 else if (strcmp (arg, "none") == 0)
7090 {
7091 md.detect_dv = 0;
7092 }
7093 else if (strcmp (arg, "debug") == 0)
7094 {
7095 md.debug_dv = 1;
7096 }
7097 else if (strcmp (arg, "debugx") == 0)
7098 {
7099 md.default_explicit_mode = 1;
7100 md.debug_dv = 1;
7101 }
7102 else if (strcmp (arg, "debugn") == 0)
7103 {
7104 md.debug_dv = 1;
7105 md.detect_dv = 0;
7106 }
7107 else
7108 {
7109 as_bad (_("Unrecognized option '-x%s'"), arg);
7110 }
7111 break;
7112
7113 case 'S':
7114 /* nops Print nops statistics. */
7115 break;
7116
7117 /* GNU specific switches for gcc. */
7118 case OPTION_MCONSTANT_GP:
7119 md.flags |= EF_IA_64_CONS_GP;
7120 break;
7121
7122 case OPTION_MAUTO_PIC:
7123 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
7124 break;
7125
7126 default:
7127 return 0;
7128 }
7129
7130 return 1;
7131 }
7132
7133 void
7134 md_show_usage (FILE *stream)
7135 {
7136 fputs (_("\
7137 IA-64 options:\n\
7138 --mconstant-gp mark output file as using the constant-GP model\n\
7139 (sets ELF header flag EF_IA_64_CONS_GP)\n\
7140 --mauto-pic mark output file as using the constant-GP model\n\
7141 without function descriptors (sets ELF header flag\n\
7142 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
7143 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
7144 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
7145 -mtune=[itanium1|itanium2]\n\
7146 tune for a specific CPU (default -mtune=itanium2)\n\
7147 -munwind-check=[warning|error]\n\
7148 unwind directive check (default -munwind-check=warning)\n\
7149 -mhint.b=[ok|warning|error]\n\
7150 hint.b check (default -mhint.b=error)\n\
7151 -x | -xexplicit turn on dependency violation checking\n"), stream);
7152 /* Note for translators: "automagically" can be translated as "automatically" here. */
7153 fputs (_("\
7154 -xauto automagically remove dependency violations (default)\n\
7155 -xnone turn off dependency violation checking\n\
7156 -xdebug debug dependency violation checker\n\
7157 -xdebugn debug dependency violation checker but turn off\n\
7158 dependency violation checking\n\
7159 -xdebugx debug dependency violation checker and turn on\n\
7160 dependency violation checking\n"),
7161 stream);
7162 }
7163
7164 void
7165 ia64_after_parse_args (void)
7166 {
7167 if (debug_type == DEBUG_STABS)
7168 as_fatal (_("--gstabs is not supported for ia64"));
7169 }
7170
7171 /* Return true if TYPE fits in TEMPL at SLOT. */
7172
7173 static int
7174 match (int templ, int type, int slot)
7175 {
7176 enum ia64_unit unit;
7177 int result;
7178
7179 unit = ia64_templ_desc[templ].exec_unit[slot];
7180 switch (type)
7181 {
7182 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
7183 case IA64_TYPE_A:
7184 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
7185 break;
7186 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
7187 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
7188 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
7189 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
7190 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
7191 default: result = 0; break;
7192 }
7193 return result;
7194 }
7195
7196 /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit
7197 in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of
7198 type M or I would fit in TEMPL at SLOT. */
7199
7200 static inline int
7201 extra_goodness (int templ, int slot)
7202 {
7203 switch (md.tune)
7204 {
7205 case itanium1:
7206 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
7207 return 2;
7208 else if (slot == 2 && match (templ, IA64_TYPE_B, slot))
7209 return 1;
7210 else
7211 return 0;
7212 break;
7213 case itanium2:
7214 if (match (templ, IA64_TYPE_M, slot)
7215 || match (templ, IA64_TYPE_I, slot))
7216 /* Favor M- and I-unit NOPs. We definitely want to avoid
7217 F-unit and B-unit may cause split-issue or less-than-optimal
7218 branch-prediction. */
7219 return 2;
7220 else
7221 return 0;
7222 break;
7223 default:
7224 abort ();
7225 return 0;
7226 }
7227 }
7228
7229 /* This function is called once, at assembler startup time. It sets
7230 up all the tables, etc. that the MD part of the assembler will need
7231 that can be determined before arguments are parsed. */
7232 void
7233 md_begin (void)
7234 {
7235 int i, j, k, t, goodness, best, ok;
7236
7237 md.auto_align = 1;
7238 md.explicit_mode = md.default_explicit_mode;
7239
7240 bfd_set_section_alignment (text_section, 4);
7241
7242 /* Make sure function pointers get initialized. */
7243 target_big_endian = -1;
7244 dot_byteorder (default_big_endian);
7245
7246 alias_hash = str_htab_create ();
7247 alias_name_hash = str_htab_create ();
7248 secalias_hash = str_htab_create ();
7249 secalias_name_hash = str_htab_create ();
7250
7251 pseudo_func[FUNC_DTP_MODULE].u.sym =
7252 symbol_new (".<dtpmod>", undefined_section,
7253 &zero_address_frag, FUNC_DTP_MODULE);
7254
7255 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
7256 symbol_new (".<dtprel>", undefined_section,
7257 &zero_address_frag, FUNC_DTP_RELATIVE);
7258
7259 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
7260 symbol_new (".<fptr>", undefined_section,
7261 &zero_address_frag, FUNC_FPTR_RELATIVE);
7262
7263 pseudo_func[FUNC_GP_RELATIVE].u.sym =
7264 symbol_new (".<gprel>", undefined_section,
7265 &zero_address_frag, FUNC_GP_RELATIVE);
7266
7267 pseudo_func[FUNC_LT_RELATIVE].u.sym =
7268 symbol_new (".<ltoff>", undefined_section,
7269 &zero_address_frag, FUNC_LT_RELATIVE);
7270
7271 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
7272 symbol_new (".<ltoffx>", undefined_section,
7273 &zero_address_frag, FUNC_LT_RELATIVE_X);
7274
7275 pseudo_func[FUNC_PC_RELATIVE].u.sym =
7276 symbol_new (".<pcrel>", undefined_section,
7277 &zero_address_frag, FUNC_PC_RELATIVE);
7278
7279 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
7280 symbol_new (".<pltoff>", undefined_section,
7281 &zero_address_frag, FUNC_PLT_RELATIVE);
7282
7283 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
7284 symbol_new (".<secrel>", undefined_section,
7285 &zero_address_frag, FUNC_SEC_RELATIVE);
7286
7287 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
7288 symbol_new (".<segrel>", undefined_section,
7289 &zero_address_frag, FUNC_SEG_RELATIVE);
7290
7291 pseudo_func[FUNC_TP_RELATIVE].u.sym =
7292 symbol_new (".<tprel>", undefined_section,
7293 &zero_address_frag, FUNC_TP_RELATIVE);
7294
7295 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
7296 symbol_new (".<ltv>", undefined_section,
7297 &zero_address_frag, FUNC_LTV_RELATIVE);
7298
7299 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
7300 symbol_new (".<ltoff.fptr>", undefined_section,
7301 &zero_address_frag, FUNC_LT_FPTR_RELATIVE);
7302
7303 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
7304 symbol_new (".<ltoff.dtpmod>", undefined_section,
7305 &zero_address_frag, FUNC_LT_DTP_MODULE);
7306
7307 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
7308 symbol_new (".<ltoff.dptrel>", undefined_section,
7309 &zero_address_frag, FUNC_LT_DTP_RELATIVE);
7310
7311 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
7312 symbol_new (".<ltoff.tprel>", undefined_section,
7313 &zero_address_frag, FUNC_LT_TP_RELATIVE);
7314
7315 pseudo_func[FUNC_IPLT_RELOC].u.sym =
7316 symbol_new (".<iplt>", undefined_section,
7317 &zero_address_frag, FUNC_IPLT_RELOC);
7318
7319 #ifdef TE_VMS
7320 pseudo_func[FUNC_SLOTCOUNT_RELOC].u.sym =
7321 symbol_new (".<slotcount>", undefined_section,
7322 &zero_address_frag, FUNC_SLOTCOUNT_RELOC);
7323 #endif
7324
7325 if (md.tune != itanium1)
7326 {
7327 /* Convert MFI NOPs bundles into MMI NOPs bundles. */
7328 le_nop[0] = 0x8;
7329 le_nop_stop[0] = 0x9;
7330 }
7331
7332 /* Compute the table of best templates. We compute goodness as a
7333 base 4 value, in which each match counts for 3. Match-failures
7334 result in NOPs and we use extra_goodness() to pick the execution
7335 units that are best suited for issuing the NOP. */
7336 for (i = 0; i < IA64_NUM_TYPES; ++i)
7337 for (j = 0; j < IA64_NUM_TYPES; ++j)
7338 for (k = 0; k < IA64_NUM_TYPES; ++k)
7339 {
7340 best = 0;
7341 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
7342 {
7343 goodness = 0;
7344 if (match (t, i, 0))
7345 {
7346 if (match (t, j, 1))
7347 {
7348 if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2))
7349 goodness = 3 + 3 + 3;
7350 else
7351 goodness = 3 + 3 + extra_goodness (t, 2);
7352 }
7353 else if (match (t, j, 2))
7354 goodness = 3 + 3 + extra_goodness (t, 1);
7355 else
7356 {
7357 goodness = 3;
7358 goodness += extra_goodness (t, 1);
7359 goodness += extra_goodness (t, 2);
7360 }
7361 }
7362 else if (match (t, i, 1))
7363 {
7364 if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2))
7365 goodness = 3 + 3;
7366 else
7367 goodness = 3 + extra_goodness (t, 2);
7368 }
7369 else if (match (t, i, 2))
7370 goodness = 3 + extra_goodness (t, 1);
7371
7372 if (goodness > best)
7373 {
7374 best = goodness;
7375 best_template[i][j][k] = t;
7376 }
7377 }
7378 }
7379
7380 #ifdef DEBUG_TEMPLATES
7381 /* For debugging changes to the best_template calculations. We don't care
7382 about combinations with invalid instructions, so start the loops at 1. */
7383 for (i = 0; i < IA64_NUM_TYPES; ++i)
7384 for (j = 0; j < IA64_NUM_TYPES; ++j)
7385 for (k = 0; k < IA64_NUM_TYPES; ++k)
7386 {
7387 char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f',
7388 'x', 'd' };
7389 fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j],
7390 type_letter[k],
7391 ia64_templ_desc[best_template[i][j][k]].name);
7392 }
7393 #endif
7394
7395 for (i = 0; i < NUM_SLOTS; ++i)
7396 md.slot[i].user_template = -1;
7397
7398 md.pseudo_hash = str_htab_create ();
7399 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
7400 if (str_hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
7401 pseudo_opcode + i, 0) != NULL)
7402 as_fatal (_("duplicate %s"), pseudo_opcode[i].name);
7403
7404 md.reg_hash = str_htab_create ();
7405 md.dynreg_hash = str_htab_create ();
7406 md.const_hash = str_htab_create ();
7407 md.entry_hash = str_htab_create ();
7408
7409 /* general registers: */
7410 declare_register_set ("r", 128, REG_GR);
7411 declare_register ("gp", REG_GR + 1);
7412 declare_register ("sp", REG_GR + 12);
7413 declare_register ("tp", REG_GR + 13);
7414 declare_register_set ("ret", 4, REG_GR + 8);
7415
7416 /* floating point registers: */
7417 declare_register_set ("f", 128, REG_FR);
7418 declare_register_set ("farg", 8, REG_FR + 8);
7419 declare_register_set ("fret", 8, REG_FR + 8);
7420
7421 /* branch registers: */
7422 declare_register_set ("b", 8, REG_BR);
7423 declare_register ("rp", REG_BR + 0);
7424
7425 /* predicate registers: */
7426 declare_register_set ("p", 64, REG_P);
7427 declare_register ("pr", REG_PR);
7428 declare_register ("pr.rot", REG_PR_ROT);
7429
7430 /* application registers: */
7431 declare_register_set ("ar", 128, REG_AR);
7432 for (i = 0; i < NELEMS (ar); ++i)
7433 declare_register (ar[i].name, REG_AR + ar[i].regnum);
7434
7435 /* control registers: */
7436 declare_register_set ("cr", 128, REG_CR);
7437 for (i = 0; i < NELEMS (cr); ++i)
7438 declare_register (cr[i].name, REG_CR + cr[i].regnum);
7439
7440 /* dahr registers: */
7441 declare_register_set ("dahr", 8, REG_DAHR);
7442
7443 declare_register ("ip", REG_IP);
7444 declare_register ("cfm", REG_CFM);
7445 declare_register ("psr", REG_PSR);
7446 declare_register ("psr.l", REG_PSR_L);
7447 declare_register ("psr.um", REG_PSR_UM);
7448
7449 for (i = 0; i < NELEMS (indirect_reg); ++i)
7450 {
7451 unsigned int regnum = indirect_reg[i].regnum;
7452
7453 md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum);
7454 }
7455
7456 /* pseudo-registers used to specify unwind info: */
7457 declare_register ("psp", REG_PSP);
7458
7459 for (i = 0; i < NELEMS (const_bits); ++i)
7460 if (str_hash_insert (md.const_hash, const_bits[i].name, const_bits + i, 0))
7461 as_fatal (_("duplicate %s"), const_bits[i].name);
7462
7463 /* Set the architecture and machine depending on defaults and command line
7464 options. */
7465 if (md.flags & EF_IA_64_ABI64)
7466 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7467 else
7468 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7469
7470 if (! ok)
7471 as_warn (_("Could not set architecture and machine"));
7472
7473 /* Set the pointer size and pointer shift size depending on md.flags */
7474
7475 if (md.flags & EF_IA_64_ABI64)
7476 {
7477 md.pointer_size = 8; /* pointers are 8 bytes */
7478 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7479 }
7480 else
7481 {
7482 md.pointer_size = 4; /* pointers are 4 bytes */
7483 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7484 }
7485
7486 md.mem_offset.hint = 0;
7487 md.path = 0;
7488 md.maxpaths = 0;
7489 md.entry_labels = NULL;
7490 }
7491
7492 /* Set the default options in md. Cannot do this in md_begin because
7493 that is called after md_parse_option which is where we set the
7494 options in md based on command line options. */
7495
7496 void
7497 ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED)
7498 {
7499 md.flags = MD_FLAGS_DEFAULT;
7500 #ifndef TE_VMS
7501 /* Don't turn on dependency checking for VMS, doesn't work. */
7502 md.detect_dv = 1;
7503 #endif
7504 /* FIXME: We should change it to unwind_check_error someday. */
7505 md.unwind_check = unwind_check_warning;
7506 md.hint_b = hint_b_error;
7507 md.tune = itanium2;
7508 }
7509
7510 /* Return a string for the target object file format. */
7511
7512 const char *
7513 ia64_target_format (void)
7514 {
7515 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7516 {
7517 if (md.flags & EF_IA_64_BE)
7518 {
7519 if (md.flags & EF_IA_64_ABI64)
7520 #if defined(TE_AIX50)
7521 return "elf64-ia64-aix-big";
7522 #elif defined(TE_HPUX)
7523 return "elf64-ia64-hpux-big";
7524 #else
7525 return "elf64-ia64-big";
7526 #endif
7527 else
7528 #if defined(TE_AIX50)
7529 return "elf32-ia64-aix-big";
7530 #elif defined(TE_HPUX)
7531 return "elf32-ia64-hpux-big";
7532 #else
7533 return "elf32-ia64-big";
7534 #endif
7535 }
7536 else
7537 {
7538 if (md.flags & EF_IA_64_ABI64)
7539 #if defined (TE_AIX50)
7540 return "elf64-ia64-aix-little";
7541 #elif defined (TE_VMS)
7542 {
7543 md.flags |= EF_IA_64_ARCHVER_1;
7544 return "elf64-ia64-vms";
7545 }
7546 #else
7547 return "elf64-ia64-little";
7548 #endif
7549 else
7550 #ifdef TE_AIX50
7551 return "elf32-ia64-aix-little";
7552 #else
7553 return "elf32-ia64-little";
7554 #endif
7555 }
7556 }
7557 else
7558 return "unknown-format";
7559 }
7560
7561 void
7562 ia64_md_finish (void)
7563 {
7564 /* terminate insn group upon reaching end of file: */
7565 insn_group_break (1, 0, 0);
7566
7567 /* emits slots we haven't written yet: */
7568 ia64_flush_insns ();
7569
7570 bfd_set_private_flags (stdoutput, md.flags);
7571
7572 md.mem_offset.hint = 0;
7573 }
7574
7575 void
7576 ia64_start_line (void)
7577 {
7578 static int first;
7579
7580 if (!first) {
7581 /* Make sure we don't reference input_line_pointer[-1] when that's
7582 not valid. */
7583 first = 1;
7584 return;
7585 }
7586
7587 if (md.qp.X_op == O_register)
7588 as_bad (_("qualifying predicate not followed by instruction"));
7589 md.qp.X_op = O_absent;
7590
7591 if (ignore_input ())
7592 return;
7593
7594 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7595 {
7596 if (md.detect_dv && !md.explicit_mode)
7597 {
7598 static int warned;
7599
7600 if (!warned)
7601 {
7602 warned = 1;
7603 as_warn (_("Explicit stops are ignored in auto mode"));
7604 }
7605 }
7606 else
7607 insn_group_break (1, 0, 0);
7608 }
7609 else if (input_line_pointer[-1] == '{')
7610 {
7611 if (md.manual_bundling)
7612 as_warn (_("Found '{' when manual bundling is already turned on"));
7613 else
7614 CURR_SLOT.manual_bundling_on = 1;
7615 md.manual_bundling = 1;
7616
7617 /* Bundling is only acceptable in explicit mode
7618 or when in default automatic mode. */
7619 if (md.detect_dv && !md.explicit_mode)
7620 {
7621 if (!md.mode_explicitly_set
7622 && !md.default_explicit_mode)
7623 dot_dv_mode ('E');
7624 else
7625 as_warn (_("Found '{' after explicit switch to automatic mode"));
7626 }
7627 }
7628 else if (input_line_pointer[-1] == '}')
7629 {
7630 if (!md.manual_bundling)
7631 as_warn (_("Found '}' when manual bundling is off"));
7632 else
7633 PREV_SLOT.manual_bundling_off = 1;
7634 md.manual_bundling = 0;
7635
7636 /* switch back to automatic mode, if applicable */
7637 if (md.detect_dv
7638 && md.explicit_mode
7639 && !md.mode_explicitly_set
7640 && !md.default_explicit_mode)
7641 dot_dv_mode ('A');
7642 }
7643 }
7644
7645 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7646 labels. */
7647 static int defining_tag = 0;
7648
7649 int
7650 ia64_unrecognized_line (int ch)
7651 {
7652 switch (ch)
7653 {
7654 case '(':
7655 expression_and_evaluate (&md.qp);
7656 if (*input_line_pointer++ != ')')
7657 {
7658 as_bad (_("Expected ')'"));
7659 return 0;
7660 }
7661 if (md.qp.X_op != O_register)
7662 {
7663 as_bad (_("Qualifying predicate expected"));
7664 return 0;
7665 }
7666 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7667 {
7668 as_bad (_("Predicate register expected"));
7669 return 0;
7670 }
7671 return 1;
7672
7673 case '[':
7674 {
7675 char *s;
7676 char c;
7677 symbolS *tag;
7678 int temp;
7679
7680 if (md.qp.X_op == O_register)
7681 {
7682 as_bad (_("Tag must come before qualifying predicate."));
7683 return 0;
7684 }
7685
7686 /* This implements just enough of read_a_source_file in read.c to
7687 recognize labels. */
7688 if (is_name_beginner (*input_line_pointer))
7689 {
7690 c = get_symbol_name (&s);
7691 }
7692 else if (LOCAL_LABELS_FB
7693 && ISDIGIT (*input_line_pointer))
7694 {
7695 temp = 0;
7696 while (ISDIGIT (*input_line_pointer))
7697 temp = (temp * 10) + *input_line_pointer++ - '0';
7698 fb_label_instance_inc (temp);
7699 s = fb_label_name (temp, 0);
7700 c = *input_line_pointer;
7701 }
7702 else
7703 {
7704 s = NULL;
7705 c = '\0';
7706 }
7707 if (c != ':')
7708 {
7709 /* Put ':' back for error messages' sake. */
7710 *input_line_pointer++ = ':';
7711 as_bad (_("Expected ':'"));
7712 return 0;
7713 }
7714
7715 defining_tag = 1;
7716 tag = colon (s);
7717 defining_tag = 0;
7718 /* Put ':' back for error messages' sake. */
7719 *input_line_pointer++ = ':';
7720 if (*input_line_pointer++ != ']')
7721 {
7722 as_bad (_("Expected ']'"));
7723 return 0;
7724 }
7725 if (! tag)
7726 {
7727 as_bad (_("Tag name expected"));
7728 return 0;
7729 }
7730 return 1;
7731 }
7732
7733 default:
7734 break;
7735 }
7736
7737 /* Not a valid line. */
7738 return 0;
7739 }
7740
7741 void
7742 ia64_frob_label (struct symbol *sym)
7743 {
7744 struct label_fix *fix;
7745
7746 /* Tags need special handling since they are not bundle breaks like
7747 labels. */
7748 if (defining_tag)
7749 {
7750 fix = XOBNEW (&notes, struct label_fix);
7751 fix->sym = sym;
7752 fix->next = CURR_SLOT.tag_fixups;
7753 fix->dw2_mark_labels = false;
7754 CURR_SLOT.tag_fixups = fix;
7755
7756 return;
7757 }
7758
7759 if (bfd_section_flags (now_seg) & SEC_CODE)
7760 {
7761 md.last_text_seg = now_seg;
7762 md.last_text_subseg = now_subseg;
7763 fix = XOBNEW (&notes, struct label_fix);
7764 fix->sym = sym;
7765 fix->next = CURR_SLOT.label_fixups;
7766 fix->dw2_mark_labels = dwarf2_loc_mark_labels;
7767 CURR_SLOT.label_fixups = fix;
7768
7769 /* Keep track of how many code entry points we've seen. */
7770 if (md.path == md.maxpaths)
7771 {
7772 md.maxpaths += 20;
7773 md.entry_labels = XRESIZEVEC (const char *, md.entry_labels,
7774 md.maxpaths);
7775 }
7776 md.entry_labels[md.path++] = S_GET_NAME (sym);
7777 }
7778 }
7779
7780 #ifdef TE_HPUX
7781 /* The HP-UX linker will give unresolved symbol errors for symbols
7782 that are declared but unused. This routine removes declared,
7783 unused symbols from an object. */
7784 int
7785 ia64_frob_symbol (struct symbol *sym)
7786 {
7787 if ((S_GET_SEGMENT (sym) == bfd_und_section_ptr && ! symbol_used_p (sym) &&
7788 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7789 || (S_GET_SEGMENT (sym) == bfd_abs_section_ptr
7790 && ! S_IS_EXTERNAL (sym)))
7791 return 1;
7792 return 0;
7793 }
7794 #endif
7795
7796 void
7797 ia64_flush_pending_output (void)
7798 {
7799 if (!md.keep_pending_output
7800 && bfd_section_flags (now_seg) & SEC_CODE)
7801 {
7802 /* ??? This causes many unnecessary stop bits to be emitted.
7803 Unfortunately, it isn't clear if it is safe to remove this. */
7804 insn_group_break (1, 0, 0);
7805 ia64_flush_insns ();
7806 }
7807 }
7808
7809 /* Do ia64-specific expression optimization. All that's done here is
7810 to transform index expressions that are either due to the indexing
7811 of rotating registers or due to the indexing of indirect register
7812 sets. */
7813 int
7814 ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r)
7815 {
7816 if (op != O_index)
7817 return 0;
7818 resolve_expression (l);
7819 if (l->X_op == O_register)
7820 {
7821 unsigned num_regs = l->X_add_number >> 16;
7822
7823 resolve_expression (r);
7824 if (num_regs)
7825 {
7826 /* Left side is a .rotX-allocated register. */
7827 if (r->X_op != O_constant)
7828 {
7829 as_bad (_("Rotating register index must be a non-negative constant"));
7830 r->X_add_number = 0;
7831 }
7832 else if ((valueT) r->X_add_number >= num_regs)
7833 {
7834 as_bad (_("Index out of range 0..%u"), num_regs - 1);
7835 r->X_add_number = 0;
7836 }
7837 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7838 return 1;
7839 }
7840 else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR)
7841 {
7842 if (r->X_op != O_register
7843 || r->X_add_number < REG_GR
7844 || r->X_add_number > REG_GR + 127)
7845 {
7846 as_bad (_("Indirect register index must be a general register"));
7847 r->X_add_number = REG_GR;
7848 }
7849 l->X_op = O_index;
7850 l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID];
7851 l->X_add_number = r->X_add_number;
7852 return 1;
7853 }
7854 }
7855 as_bad (_("Index can only be applied to rotating or indirect registers"));
7856 /* Fall back to some register use of which has as little as possible
7857 side effects, to minimize subsequent error messages. */
7858 l->X_op = O_register;
7859 l->X_add_number = REG_GR + 3;
7860 return 1;
7861 }
7862
7863 int
7864 ia64_parse_name (char *name, expressionS *e, char *nextcharP)
7865 {
7866 struct const_desc *cdesc;
7867 struct dynreg *dr = 0;
7868 unsigned int idx;
7869 struct symbol *sym;
7870 char *end;
7871
7872 if (*name == '@')
7873 {
7874 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE;
7875
7876 /* Find what relocation pseudo-function we're dealing with. */
7877 for (idx = 0; idx < NELEMS (pseudo_func); ++idx)
7878 if (pseudo_func[idx].name
7879 && pseudo_func[idx].name[0] == name[1]
7880 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0)
7881 {
7882 pseudo_type = pseudo_func[idx].type;
7883 break;
7884 }
7885 switch (pseudo_type)
7886 {
7887 case PSEUDO_FUNC_RELOC:
7888 end = input_line_pointer;
7889 if (*nextcharP != '(')
7890 {
7891 as_bad (_("Expected '('"));
7892 break;
7893 }
7894 /* Skip '('. */
7895 ++input_line_pointer;
7896 expression (e);
7897 if (*input_line_pointer != ')')
7898 {
7899 as_bad (_("Missing ')'"));
7900 goto done;
7901 }
7902 /* Skip ')'. */
7903 ++input_line_pointer;
7904 #ifdef TE_VMS
7905 if (idx == FUNC_SLOTCOUNT_RELOC)
7906 {
7907 /* @slotcount can accept any expression. Canonicalize. */
7908 e->X_add_symbol = make_expr_symbol (e);
7909 e->X_op = O_symbol;
7910 e->X_add_number = 0;
7911 }
7912 #endif
7913 if (e->X_op != O_symbol)
7914 {
7915 if (e->X_op != O_pseudo_fixup)
7916 {
7917 as_bad (_("Not a symbolic expression"));
7918 goto done;
7919 }
7920 if (idx != FUNC_LT_RELATIVE)
7921 {
7922 as_bad (_("Illegal combination of relocation functions"));
7923 goto done;
7924 }
7925 switch (S_GET_VALUE (e->X_op_symbol))
7926 {
7927 case FUNC_FPTR_RELATIVE:
7928 idx = FUNC_LT_FPTR_RELATIVE; break;
7929 case FUNC_DTP_MODULE:
7930 idx = FUNC_LT_DTP_MODULE; break;
7931 case FUNC_DTP_RELATIVE:
7932 idx = FUNC_LT_DTP_RELATIVE; break;
7933 case FUNC_TP_RELATIVE:
7934 idx = FUNC_LT_TP_RELATIVE; break;
7935 default:
7936 as_bad (_("Illegal combination of relocation functions"));
7937 goto done;
7938 }
7939 }
7940 /* Make sure gas doesn't get rid of local symbols that are used
7941 in relocs. */
7942 e->X_op = O_pseudo_fixup;
7943 e->X_op_symbol = pseudo_func[idx].u.sym;
7944 done:
7945 *nextcharP = *input_line_pointer;
7946 break;
7947
7948 case PSEUDO_FUNC_CONST:
7949 e->X_op = O_constant;
7950 e->X_add_number = pseudo_func[idx].u.ival;
7951 break;
7952
7953 case PSEUDO_FUNC_REG:
7954 e->X_op = O_register;
7955 e->X_add_number = pseudo_func[idx].u.ival;
7956 break;
7957
7958 default:
7959 return 0;
7960 }
7961 return 1;
7962 }
7963
7964 /* first see if NAME is a known register name: */
7965 sym = str_hash_find (md.reg_hash, name);
7966 if (sym)
7967 {
7968 e->X_op = O_register;
7969 e->X_add_number = S_GET_VALUE (sym);
7970 return 1;
7971 }
7972
7973 cdesc = str_hash_find (md.const_hash, name);
7974 if (cdesc)
7975 {
7976 e->X_op = O_constant;
7977 e->X_add_number = cdesc->value;
7978 return 1;
7979 }
7980
7981 /* check for inN, locN, or outN: */
7982 idx = 0;
7983 switch (name[0])
7984 {
7985 case 'i':
7986 if (name[1] == 'n' && ISDIGIT (name[2]))
7987 {
7988 dr = &md.in;
7989 idx = 2;
7990 }
7991 break;
7992
7993 case 'l':
7994 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
7995 {
7996 dr = &md.loc;
7997 idx = 3;
7998 }
7999 break;
8000
8001 case 'o':
8002 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
8003 {
8004 dr = &md.out;
8005 idx = 3;
8006 }
8007 break;
8008
8009 default:
8010 break;
8011 }
8012
8013 /* Ignore register numbers with leading zeroes, except zero itself. */
8014 if (dr && (name[idx] != '0' || name[idx + 1] == '\0'))
8015 {
8016 unsigned long regnum;
8017
8018 /* The name is inN, locN, or outN; parse the register number. */
8019 regnum = strtoul (name + idx, &end, 10);
8020 if (end > name + idx && *end == '\0' && regnum < 96)
8021 {
8022 if (regnum >= dr->num_regs)
8023 {
8024 if (!dr->num_regs)
8025 as_bad (_("No current frame"));
8026 else
8027 as_bad (_("Register number out of range 0..%u"),
8028 dr->num_regs - 1);
8029 regnum = 0;
8030 }
8031 e->X_op = O_register;
8032 e->X_add_number = dr->base + regnum;
8033 return 1;
8034 }
8035 }
8036
8037 end = xstrdup (name);
8038 name = ia64_canonicalize_symbol_name (end);
8039 if ((dr = str_hash_find (md.dynreg_hash, name)))
8040 {
8041 /* We've got ourselves the name of a rotating register set.
8042 Store the base register number in the low 16 bits of
8043 X_add_number and the size of the register set in the top 16
8044 bits. */
8045 e->X_op = O_register;
8046 e->X_add_number = dr->base | (dr->num_regs << 16);
8047 free (end);
8048 return 1;
8049 }
8050 free (end);
8051 return 0;
8052 }
8053
8054 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
8055
8056 char *
8057 ia64_canonicalize_symbol_name (char *name)
8058 {
8059 size_t len = strlen (name), full = len;
8060
8061 while (len > 0 && name[len - 1] == '#')
8062 --len;
8063 if (len <= 0)
8064 {
8065 if (full > 0)
8066 as_bad (_("Standalone `#' is illegal"));
8067 }
8068 else if (len < full - 1)
8069 as_warn (_("Redundant `#' suffix operators"));
8070 name[len] = '\0';
8071 return name;
8072 }
8073
8074 /* Return true if idesc is a conditional branch instruction. This excludes
8075 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
8076 because they always read/write resources regardless of the value of the
8077 qualifying predicate. br.ia must always use p0, and hence is always
8078 taken. Thus this function returns true for branches which can fall
8079 through, and which use no resources if they do fall through. */
8080
8081 static int
8082 is_conditional_branch (struct ia64_opcode *idesc)
8083 {
8084 /* br is a conditional branch. Everything that starts with br. except
8085 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
8086 Everything that starts with brl is a conditional branch. */
8087 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
8088 && (idesc->name[2] == '\0'
8089 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
8090 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
8091 || idesc->name[2] == 'l'
8092 /* br.cond, br.call, br.clr */
8093 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
8094 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
8095 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
8096 }
8097
8098 /* Return whether the given opcode is a taken branch. If there's any doubt,
8099 returns zero. */
8100
8101 static int
8102 is_taken_branch (struct ia64_opcode *idesc)
8103 {
8104 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
8105 || startswith (idesc->name, "br.ia"));
8106 }
8107
8108 /* Return whether the given opcode is an interruption or rfi. If there's any
8109 doubt, returns zero. */
8110
8111 static int
8112 is_interruption_or_rfi (struct ia64_opcode *idesc)
8113 {
8114 if (strcmp (idesc->name, "rfi") == 0)
8115 return 1;
8116 return 0;
8117 }
8118
8119 /* Returns the index of the given dependency in the opcode's list of chks, or
8120 -1 if there is no dependency. */
8121
8122 static int
8123 depends_on (int depind, struct ia64_opcode *idesc)
8124 {
8125 int i;
8126 const struct ia64_opcode_dependency *dep = idesc->dependencies;
8127 for (i = 0; i < dep->nchks; i++)
8128 {
8129 if (depind == DEP (dep->chks[i]))
8130 return i;
8131 }
8132 return -1;
8133 }
8134
8135 /* Determine a set of specific resources used for a particular resource
8136 class. Returns the number of specific resources identified For those
8137 cases which are not determinable statically, the resource returned is
8138 marked nonspecific.
8139
8140 Meanings of value in 'NOTE':
8141 1) only read/write when the register number is explicitly encoded in the
8142 insn.
8143 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
8144 accesses CFM when qualifying predicate is in the rotating region.
8145 3) general register value is used to specify an indirect register; not
8146 determinable statically.
8147 4) only read the given resource when bits 7:0 of the indirect index
8148 register value does not match the register number of the resource; not
8149 determinable statically.
8150 5) all rules are implementation specific.
8151 6) only when both the index specified by the reader and the index specified
8152 by the writer have the same value in bits 63:61; not determinable
8153 statically.
8154 7) only access the specified resource when the corresponding mask bit is
8155 set
8156 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
8157 only read when these insns reference FR2-31
8158 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
8159 written when these insns write FR32-127
8160 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
8161 instruction
8162 11) The target predicates are written independently of PR[qp], but source
8163 registers are only read if PR[qp] is true. Since the state of PR[qp]
8164 cannot statically be determined, all source registers are marked used.
8165 12) This insn only reads the specified predicate register when that
8166 register is the PR[qp].
8167 13) This reference to ld-c only applies to the GR whose value is loaded
8168 with data returned from memory, not the post-incremented address register.
8169 14) The RSE resource includes the implementation-specific RSE internal
8170 state resources. At least one (and possibly more) of these resources are
8171 read by each instruction listed in IC:rse-readers. At least one (and
8172 possibly more) of these resources are written by each insn listed in
8173 IC:rse-writers.
8174 15+16) Represents reserved instructions, which the assembler does not
8175 generate.
8176 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and
8177 mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up.
8178
8179 Memory resources (i.e. locations in memory) are *not* marked or tracked by
8180 this code; there are no dependency violations based on memory access.
8181 */
8182
8183 #define MAX_SPECS 256
8184 #define DV_CHK 1
8185 #define DV_REG 0
8186
8187 static int
8188 specify_resource (const struct ia64_dependency *dep,
8189 struct ia64_opcode *idesc,
8190 /* is this a DV chk or a DV reg? */
8191 int type,
8192 /* returned specific resources */
8193 struct rsrc specs[MAX_SPECS],
8194 /* resource note for this insn's usage */
8195 int note,
8196 /* which execution path to examine */
8197 int path)
8198 {
8199 int count = 0;
8200 int i;
8201 int rsrc_write = 0;
8202 struct rsrc tmpl;
8203
8204 if (dep->mode == IA64_DV_WAW
8205 || (dep->mode == IA64_DV_RAW && type == DV_REG)
8206 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
8207 rsrc_write = 1;
8208
8209 /* template for any resources we identify */
8210 tmpl.dependency = dep;
8211 tmpl.note = note;
8212 tmpl.insn_srlz = tmpl.data_srlz = 0;
8213 tmpl.qp_regno = CURR_SLOT.qp_regno;
8214 tmpl.link_to_qp_branch = 1;
8215 tmpl.mem_offset.hint = 0;
8216 tmpl.mem_offset.offset = 0;
8217 tmpl.mem_offset.base = 0;
8218 tmpl.specific = 1;
8219 tmpl.index = -1;
8220 tmpl.cmp_type = CMP_NONE;
8221 tmpl.depind = 0;
8222 tmpl.file = NULL;
8223 tmpl.line = 0;
8224 tmpl.path = 0;
8225
8226 #define UNHANDLED \
8227 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
8228 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
8229 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
8230
8231 /* we don't need to track these */
8232 if (dep->semantics == IA64_DVS_NONE)
8233 return 0;
8234
8235 switch (dep->specifier)
8236 {
8237 case IA64_RS_AR_K:
8238 if (note == 1)
8239 {
8240 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8241 {
8242 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8243 if (regno >= 0 && regno <= 7)
8244 {
8245 specs[count] = tmpl;
8246 specs[count++].index = regno;
8247 }
8248 }
8249 }
8250 else if (note == 0)
8251 {
8252 for (i = 0; i < 8; i++)
8253 {
8254 specs[count] = tmpl;
8255 specs[count++].index = i;
8256 }
8257 }
8258 else
8259 {
8260 UNHANDLED;
8261 }
8262 break;
8263
8264 case IA64_RS_AR_UNAT:
8265 /* This is a mov =AR or mov AR= instruction. */
8266 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8267 {
8268 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8269 if (regno == AR_UNAT)
8270 {
8271 specs[count++] = tmpl;
8272 }
8273 }
8274 else
8275 {
8276 /* This is a spill/fill, or other instruction that modifies the
8277 unat register. */
8278
8279 /* Unless we can determine the specific bits used, mark the whole
8280 thing; bits 8:3 of the memory address indicate the bit used in
8281 UNAT. The .mem.offset hint may be used to eliminate a small
8282 subset of conflicts. */
8283 specs[count] = tmpl;
8284 if (md.mem_offset.hint)
8285 {
8286 if (md.debug_dv)
8287 fprintf (stderr, " Using hint for spill/fill\n");
8288 /* The index isn't actually used, just set it to something
8289 approximating the bit index. */
8290 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
8291 specs[count].mem_offset.hint = 1;
8292 specs[count].mem_offset.offset = md.mem_offset.offset;
8293 specs[count++].mem_offset.base = md.mem_offset.base;
8294 }
8295 else
8296 {
8297 specs[count++].specific = 0;
8298 }
8299 }
8300 break;
8301
8302 case IA64_RS_AR:
8303 if (note == 1)
8304 {
8305 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8306 {
8307 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8308 if ((regno >= 8 && regno <= 15)
8309 || (regno >= 20 && regno <= 23)
8310 || (regno >= 31 && regno <= 39)
8311 || (regno >= 41 && regno <= 47)
8312 || (regno >= 67 && regno <= 111))
8313 {
8314 specs[count] = tmpl;
8315 specs[count++].index = regno;
8316 }
8317 }
8318 }
8319 else
8320 {
8321 UNHANDLED;
8322 }
8323 break;
8324
8325 case IA64_RS_ARb:
8326 if (note == 1)
8327 {
8328 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8329 {
8330 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8331 if ((regno >= 48 && regno <= 63)
8332 || (regno >= 112 && regno <= 127))
8333 {
8334 specs[count] = tmpl;
8335 specs[count++].index = regno;
8336 }
8337 }
8338 }
8339 else if (note == 0)
8340 {
8341 for (i = 48; i < 64; i++)
8342 {
8343 specs[count] = tmpl;
8344 specs[count++].index = i;
8345 }
8346 for (i = 112; i < 128; i++)
8347 {
8348 specs[count] = tmpl;
8349 specs[count++].index = i;
8350 }
8351 }
8352 else
8353 {
8354 UNHANDLED;
8355 }
8356 break;
8357
8358 case IA64_RS_BR:
8359 if (note != 1)
8360 {
8361 UNHANDLED;
8362 }
8363 else
8364 {
8365 if (rsrc_write)
8366 {
8367 for (i = 0; i < idesc->num_outputs; i++)
8368 if (idesc->operands[i] == IA64_OPND_B1
8369 || idesc->operands[i] == IA64_OPND_B2)
8370 {
8371 specs[count] = tmpl;
8372 specs[count++].index =
8373 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8374 }
8375 }
8376 else
8377 {
8378 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8379 if (idesc->operands[i] == IA64_OPND_B1
8380 || idesc->operands[i] == IA64_OPND_B2)
8381 {
8382 specs[count] = tmpl;
8383 specs[count++].index =
8384 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8385 }
8386 }
8387 }
8388 break;
8389
8390 case IA64_RS_CPUID: /* four or more registers */
8391 if (note == 3)
8392 {
8393 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
8394 {
8395 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8396 if (regno >= 0 && regno < NELEMS (gr_values)
8397 && KNOWN (regno))
8398 {
8399 specs[count] = tmpl;
8400 specs[count++].index = gr_values[regno].value & 0xFF;
8401 }
8402 else
8403 {
8404 specs[count] = tmpl;
8405 specs[count++].specific = 0;
8406 }
8407 }
8408 }
8409 else
8410 {
8411 UNHANDLED;
8412 }
8413 break;
8414
8415 case IA64_RS_DBR: /* four or more registers */
8416 if (note == 3)
8417 {
8418 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
8419 {
8420 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8421 if (regno >= 0 && regno < NELEMS (gr_values)
8422 && KNOWN (regno))
8423 {
8424 specs[count] = tmpl;
8425 specs[count++].index = gr_values[regno].value & 0xFF;
8426 }
8427 else
8428 {
8429 specs[count] = tmpl;
8430 specs[count++].specific = 0;
8431 }
8432 }
8433 }
8434 else if (note == 0 && !rsrc_write)
8435 {
8436 specs[count] = tmpl;
8437 specs[count++].specific = 0;
8438 }
8439 else
8440 {
8441 UNHANDLED;
8442 }
8443 break;
8444
8445 case IA64_RS_IBR: /* four or more registers */
8446 if (note == 3)
8447 {
8448 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
8449 {
8450 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8451 if (regno >= 0 && regno < NELEMS (gr_values)
8452 && KNOWN (regno))
8453 {
8454 specs[count] = tmpl;
8455 specs[count++].index = gr_values[regno].value & 0xFF;
8456 }
8457 else
8458 {
8459 specs[count] = tmpl;
8460 specs[count++].specific = 0;
8461 }
8462 }
8463 }
8464 else
8465 {
8466 UNHANDLED;
8467 }
8468 break;
8469
8470 case IA64_RS_MSR:
8471 if (note == 5)
8472 {
8473 /* These are implementation specific. Force all references to
8474 conflict with all other references. */
8475 specs[count] = tmpl;
8476 specs[count++].specific = 0;
8477 }
8478 else
8479 {
8480 UNHANDLED;
8481 }
8482 break;
8483
8484 case IA64_RS_PKR: /* 16 or more registers */
8485 if (note == 3 || note == 4)
8486 {
8487 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
8488 {
8489 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8490 if (regno >= 0 && regno < NELEMS (gr_values)
8491 && KNOWN (regno))
8492 {
8493 if (note == 3)
8494 {
8495 specs[count] = tmpl;
8496 specs[count++].index = gr_values[regno].value & 0xFF;
8497 }
8498 else
8499 for (i = 0; i < NELEMS (gr_values); i++)
8500 {
8501 /* Uses all registers *except* the one in R3. */
8502 if ((unsigned)i != (gr_values[regno].value & 0xFF))
8503 {
8504 specs[count] = tmpl;
8505 specs[count++].index = i;
8506 }
8507 }
8508 }
8509 else
8510 {
8511 specs[count] = tmpl;
8512 specs[count++].specific = 0;
8513 }
8514 }
8515 }
8516 else if (note == 0)
8517 {
8518 /* probe et al. */
8519 specs[count] = tmpl;
8520 specs[count++].specific = 0;
8521 }
8522 break;
8523
8524 case IA64_RS_PMC: /* four or more registers */
8525 if (note == 3)
8526 {
8527 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
8528 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
8529
8530 {
8531 int reg_index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
8532 ? 1 : !rsrc_write);
8533 int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_GR;
8534 if (regno >= 0 && regno < NELEMS (gr_values)
8535 && KNOWN (regno))
8536 {
8537 specs[count] = tmpl;
8538 specs[count++].index = gr_values[regno].value & 0xFF;
8539 }
8540 else
8541 {
8542 specs[count] = tmpl;
8543 specs[count++].specific = 0;
8544 }
8545 }
8546 }
8547 else
8548 {
8549 UNHANDLED;
8550 }
8551 break;
8552
8553 case IA64_RS_PMD: /* four or more registers */
8554 if (note == 3)
8555 {
8556 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
8557 {
8558 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8559 if (regno >= 0 && regno < NELEMS (gr_values)
8560 && KNOWN (regno))
8561 {
8562 specs[count] = tmpl;
8563 specs[count++].index = gr_values[regno].value & 0xFF;
8564 }
8565 else
8566 {
8567 specs[count] = tmpl;
8568 specs[count++].specific = 0;
8569 }
8570 }
8571 }
8572 else
8573 {
8574 UNHANDLED;
8575 }
8576 break;
8577
8578 case IA64_RS_RR: /* eight registers */
8579 if (note == 6)
8580 {
8581 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
8582 {
8583 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8584 if (regno >= 0 && regno < NELEMS (gr_values)
8585 && KNOWN (regno))
8586 {
8587 specs[count] = tmpl;
8588 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
8589 }
8590 else
8591 {
8592 specs[count] = tmpl;
8593 specs[count++].specific = 0;
8594 }
8595 }
8596 }
8597 else if (note == 0 && !rsrc_write)
8598 {
8599 specs[count] = tmpl;
8600 specs[count++].specific = 0;
8601 }
8602 else
8603 {
8604 UNHANDLED;
8605 }
8606 break;
8607
8608 case IA64_RS_CR_IRR:
8609 if (note == 0)
8610 {
8611 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8612 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8613 if (rsrc_write
8614 && idesc->operands[1] == IA64_OPND_CR3
8615 && regno == CR_IVR)
8616 {
8617 for (i = 0; i < 4; i++)
8618 {
8619 specs[count] = tmpl;
8620 specs[count++].index = CR_IRR0 + i;
8621 }
8622 }
8623 }
8624 else if (note == 1)
8625 {
8626 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8627 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8628 && regno >= CR_IRR0
8629 && regno <= CR_IRR3)
8630 {
8631 specs[count] = tmpl;
8632 specs[count++].index = regno;
8633 }
8634 }
8635 else
8636 {
8637 UNHANDLED;
8638 }
8639 break;
8640
8641 case IA64_RS_CR_IIB:
8642 if (note != 0)
8643 {
8644 UNHANDLED;
8645 }
8646 else
8647 {
8648 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8649 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8650 && (regno == CR_IIB0 || regno == CR_IIB1))
8651 {
8652 specs[count] = tmpl;
8653 specs[count++].index = regno;
8654 }
8655 }
8656 break;
8657
8658 case IA64_RS_CR_LRR:
8659 if (note != 1)
8660 {
8661 UNHANDLED;
8662 }
8663 else
8664 {
8665 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8666 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8667 && (regno == CR_LRR0 || regno == CR_LRR1))
8668 {
8669 specs[count] = tmpl;
8670 specs[count++].index = regno;
8671 }
8672 }
8673 break;
8674
8675 case IA64_RS_CR:
8676 if (note == 1)
8677 {
8678 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8679 {
8680 specs[count] = tmpl;
8681 specs[count++].index =
8682 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8683 }
8684 }
8685 else
8686 {
8687 UNHANDLED;
8688 }
8689 break;
8690
8691 case IA64_RS_DAHR:
8692 if (note == 0)
8693 {
8694 if (idesc->operands[!rsrc_write] == IA64_OPND_DAHR3)
8695 {
8696 specs[count] = tmpl;
8697 specs[count++].index =
8698 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_DAHR;
8699 }
8700 }
8701 else
8702 {
8703 UNHANDLED;
8704 }
8705 break;
8706
8707 case IA64_RS_FR:
8708 case IA64_RS_FRb:
8709 if (note != 1)
8710 {
8711 UNHANDLED;
8712 }
8713 else if (rsrc_write)
8714 {
8715 if (dep->specifier == IA64_RS_FRb
8716 && idesc->operands[0] == IA64_OPND_F1)
8717 {
8718 specs[count] = tmpl;
8719 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8720 }
8721 }
8722 else
8723 {
8724 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8725 {
8726 if (idesc->operands[i] == IA64_OPND_F2
8727 || idesc->operands[i] == IA64_OPND_F3
8728 || idesc->operands[i] == IA64_OPND_F4)
8729 {
8730 specs[count] = tmpl;
8731 specs[count++].index =
8732 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8733 }
8734 }
8735 }
8736 break;
8737
8738 case IA64_RS_GR:
8739 if (note == 13)
8740 {
8741 /* This reference applies only to the GR whose value is loaded with
8742 data returned from memory. */
8743 specs[count] = tmpl;
8744 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8745 }
8746 else if (note == 1)
8747 {
8748 if (rsrc_write)
8749 {
8750 for (i = 0; i < idesc->num_outputs; i++)
8751 if (idesc->operands[i] == IA64_OPND_R1
8752 || idesc->operands[i] == IA64_OPND_R2
8753 || idesc->operands[i] == IA64_OPND_R3)
8754 {
8755 specs[count] = tmpl;
8756 specs[count++].index =
8757 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8758 }
8759 if (idesc->flags & IA64_OPCODE_POSTINC)
8760 for (i = 0; i < NELEMS (idesc->operands); i++)
8761 if (idesc->operands[i] == IA64_OPND_MR3)
8762 {
8763 specs[count] = tmpl;
8764 specs[count++].index =
8765 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8766 }
8767 }
8768 else
8769 {
8770 /* Look for anything that reads a GR. */
8771 for (i = 0; i < NELEMS (idesc->operands); i++)
8772 {
8773 if (idesc->operands[i] == IA64_OPND_MR3
8774 || idesc->operands[i] == IA64_OPND_CPUID_R3
8775 || idesc->operands[i] == IA64_OPND_DBR_R3
8776 || idesc->operands[i] == IA64_OPND_IBR_R3
8777 || idesc->operands[i] == IA64_OPND_MSR_R3
8778 || idesc->operands[i] == IA64_OPND_PKR_R3
8779 || idesc->operands[i] == IA64_OPND_PMC_R3
8780 || idesc->operands[i] == IA64_OPND_PMD_R3
8781 || idesc->operands[i] == IA64_OPND_DAHR_R3
8782 || idesc->operands[i] == IA64_OPND_RR_R3
8783 || ((i >= idesc->num_outputs)
8784 && (idesc->operands[i] == IA64_OPND_R1
8785 || idesc->operands[i] == IA64_OPND_R2
8786 || idesc->operands[i] == IA64_OPND_R3
8787 /* addl source register. */
8788 || idesc->operands[i] == IA64_OPND_R3_2)))
8789 {
8790 specs[count] = tmpl;
8791 specs[count++].index =
8792 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8793 }
8794 }
8795 }
8796 }
8797 else
8798 {
8799 UNHANDLED;
8800 }
8801 break;
8802
8803 /* This is the same as IA64_RS_PRr, except that the register range is
8804 from 1 - 15, and there are no rotating register reads/writes here. */
8805 case IA64_RS_PR:
8806 if (note == 0)
8807 {
8808 for (i = 1; i < 16; i++)
8809 {
8810 specs[count] = tmpl;
8811 specs[count++].index = i;
8812 }
8813 }
8814 else if (note == 7)
8815 {
8816 valueT mask = 0;
8817 /* Mark only those registers indicated by the mask. */
8818 if (rsrc_write)
8819 {
8820 mask = CURR_SLOT.opnd[2].X_add_number;
8821 for (i = 1; i < 16; i++)
8822 if (mask & ((valueT) 1 << i))
8823 {
8824 specs[count] = tmpl;
8825 specs[count++].index = i;
8826 }
8827 }
8828 else
8829 {
8830 UNHANDLED;
8831 }
8832 }
8833 else if (note == 11) /* note 11 implies note 1 as well */
8834 {
8835 if (rsrc_write)
8836 {
8837 for (i = 0; i < idesc->num_outputs; i++)
8838 {
8839 if (idesc->operands[i] == IA64_OPND_P1
8840 || idesc->operands[i] == IA64_OPND_P2)
8841 {
8842 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8843 if (regno >= 1 && regno < 16)
8844 {
8845 specs[count] = tmpl;
8846 specs[count++].index = regno;
8847 }
8848 }
8849 }
8850 }
8851 else
8852 {
8853 UNHANDLED;
8854 }
8855 }
8856 else if (note == 12)
8857 {
8858 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8859 {
8860 specs[count] = tmpl;
8861 specs[count++].index = CURR_SLOT.qp_regno;
8862 }
8863 }
8864 else if (note == 1)
8865 {
8866 if (rsrc_write)
8867 {
8868 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8869 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8870 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8871 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8872
8873 if ((idesc->operands[0] == IA64_OPND_P1
8874 || idesc->operands[0] == IA64_OPND_P2)
8875 && p1 >= 1 && p1 < 16)
8876 {
8877 specs[count] = tmpl;
8878 specs[count].cmp_type =
8879 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8880 specs[count++].index = p1;
8881 }
8882 if ((idesc->operands[1] == IA64_OPND_P1
8883 || idesc->operands[1] == IA64_OPND_P2)
8884 && p2 >= 1 && p2 < 16)
8885 {
8886 specs[count] = tmpl;
8887 specs[count].cmp_type =
8888 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8889 specs[count++].index = p2;
8890 }
8891 }
8892 else
8893 {
8894 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8895 {
8896 specs[count] = tmpl;
8897 specs[count++].index = CURR_SLOT.qp_regno;
8898 }
8899 if (idesc->operands[1] == IA64_OPND_PR)
8900 {
8901 for (i = 1; i < 16; i++)
8902 {
8903 specs[count] = tmpl;
8904 specs[count++].index = i;
8905 }
8906 }
8907 }
8908 }
8909 else
8910 {
8911 UNHANDLED;
8912 }
8913 break;
8914
8915 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8916 simplified cases of this. */
8917 case IA64_RS_PRr:
8918 if (note == 0)
8919 {
8920 for (i = 16; i < 63; i++)
8921 {
8922 specs[count] = tmpl;
8923 specs[count++].index = i;
8924 }
8925 }
8926 else if (note == 7)
8927 {
8928 valueT mask = 0;
8929 /* Mark only those registers indicated by the mask. */
8930 if (rsrc_write
8931 && idesc->operands[0] == IA64_OPND_PR)
8932 {
8933 mask = CURR_SLOT.opnd[2].X_add_number;
8934 if (mask & ((valueT) 1 << 16))
8935 for (i = 16; i < 63; i++)
8936 {
8937 specs[count] = tmpl;
8938 specs[count++].index = i;
8939 }
8940 }
8941 else if (rsrc_write
8942 && idesc->operands[0] == IA64_OPND_PR_ROT)
8943 {
8944 for (i = 16; i < 63; i++)
8945 {
8946 specs[count] = tmpl;
8947 specs[count++].index = i;
8948 }
8949 }
8950 else
8951 {
8952 UNHANDLED;
8953 }
8954 }
8955 else if (note == 11) /* note 11 implies note 1 as well */
8956 {
8957 if (rsrc_write)
8958 {
8959 for (i = 0; i < idesc->num_outputs; i++)
8960 {
8961 if (idesc->operands[i] == IA64_OPND_P1
8962 || idesc->operands[i] == IA64_OPND_P2)
8963 {
8964 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8965 if (regno >= 16 && regno < 63)
8966 {
8967 specs[count] = tmpl;
8968 specs[count++].index = regno;
8969 }
8970 }
8971 }
8972 }
8973 else
8974 {
8975 UNHANDLED;
8976 }
8977 }
8978 else if (note == 12)
8979 {
8980 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8981 {
8982 specs[count] = tmpl;
8983 specs[count++].index = CURR_SLOT.qp_regno;
8984 }
8985 }
8986 else if (note == 1)
8987 {
8988 if (rsrc_write)
8989 {
8990 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8991 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8992 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8993 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8994
8995 if ((idesc->operands[0] == IA64_OPND_P1
8996 || idesc->operands[0] == IA64_OPND_P2)
8997 && p1 >= 16 && p1 < 63)
8998 {
8999 specs[count] = tmpl;
9000 specs[count].cmp_type =
9001 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9002 specs[count++].index = p1;
9003 }
9004 if ((idesc->operands[1] == IA64_OPND_P1
9005 || idesc->operands[1] == IA64_OPND_P2)
9006 && p2 >= 16 && p2 < 63)
9007 {
9008 specs[count] = tmpl;
9009 specs[count].cmp_type =
9010 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9011 specs[count++].index = p2;
9012 }
9013 }
9014 else
9015 {
9016 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9017 {
9018 specs[count] = tmpl;
9019 specs[count++].index = CURR_SLOT.qp_regno;
9020 }
9021 if (idesc->operands[1] == IA64_OPND_PR)
9022 {
9023 for (i = 16; i < 63; i++)
9024 {
9025 specs[count] = tmpl;
9026 specs[count++].index = i;
9027 }
9028 }
9029 }
9030 }
9031 else
9032 {
9033 UNHANDLED;
9034 }
9035 break;
9036
9037 case IA64_RS_PSR:
9038 /* Verify that the instruction is using the PSR bit indicated in
9039 dep->regindex. */
9040 if (note == 0)
9041 {
9042 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
9043 {
9044 if (dep->regindex < 6)
9045 {
9046 specs[count++] = tmpl;
9047 }
9048 }
9049 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
9050 {
9051 if (dep->regindex < 32
9052 || dep->regindex == 35
9053 || dep->regindex == 36
9054 || (!rsrc_write && dep->regindex == PSR_CPL))
9055 {
9056 specs[count++] = tmpl;
9057 }
9058 }
9059 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
9060 {
9061 if (dep->regindex < 32
9062 || dep->regindex == 35
9063 || dep->regindex == 36
9064 || (rsrc_write && dep->regindex == PSR_CPL))
9065 {
9066 specs[count++] = tmpl;
9067 }
9068 }
9069 else
9070 {
9071 /* Several PSR bits have very specific dependencies. */
9072 switch (dep->regindex)
9073 {
9074 default:
9075 specs[count++] = tmpl;
9076 break;
9077 case PSR_IC:
9078 if (rsrc_write)
9079 {
9080 specs[count++] = tmpl;
9081 }
9082 else
9083 {
9084 /* Only certain CR accesses use PSR.ic */
9085 if (idesc->operands[0] == IA64_OPND_CR3
9086 || idesc->operands[1] == IA64_OPND_CR3)
9087 {
9088 int reg_index =
9089 ((idesc->operands[0] == IA64_OPND_CR3)
9090 ? 0 : 1);
9091 int regno =
9092 CURR_SLOT.opnd[reg_index].X_add_number - REG_CR;
9093
9094 switch (regno)
9095 {
9096 default:
9097 break;
9098 case CR_ITIR:
9099 case CR_IFS:
9100 case CR_IIM:
9101 case CR_IIP:
9102 case CR_IPSR:
9103 case CR_ISR:
9104 case CR_IFA:
9105 case CR_IHA:
9106 case CR_IIB0:
9107 case CR_IIB1:
9108 case CR_IIPA:
9109 specs[count++] = tmpl;
9110 break;
9111 }
9112 }
9113 }
9114 break;
9115 case PSR_CPL:
9116 if (rsrc_write)
9117 {
9118 specs[count++] = tmpl;
9119 }
9120 else
9121 {
9122 /* Only some AR accesses use cpl */
9123 if (idesc->operands[0] == IA64_OPND_AR3
9124 || idesc->operands[1] == IA64_OPND_AR3)
9125 {
9126 int reg_index =
9127 ((idesc->operands[0] == IA64_OPND_AR3)
9128 ? 0 : 1);
9129 int regno =
9130 CURR_SLOT.opnd[reg_index].X_add_number - REG_AR;
9131
9132 if (regno == AR_ITC
9133 || regno == AR_RUC
9134 || (reg_index == 0
9135 && (regno == AR_RSC
9136 || (regno >= AR_K0
9137 && regno <= AR_K7))))
9138 {
9139 specs[count++] = tmpl;
9140 }
9141 }
9142 else
9143 {
9144 specs[count++] = tmpl;
9145 }
9146 break;
9147 }
9148 }
9149 }
9150 }
9151 else if (note == 7)
9152 {
9153 valueT mask = 0;
9154 if (idesc->operands[0] == IA64_OPND_IMMU24)
9155 {
9156 mask = CURR_SLOT.opnd[0].X_add_number;
9157 }
9158 else
9159 {
9160 UNHANDLED;
9161 }
9162 if (mask & ((valueT) 1 << dep->regindex))
9163 {
9164 specs[count++] = tmpl;
9165 }
9166 }
9167 else if (note == 8)
9168 {
9169 int min = dep->regindex == PSR_DFL ? 2 : 32;
9170 int max = dep->regindex == PSR_DFL ? 31 : 127;
9171 /* dfh is read on FR32-127; dfl is read on FR2-31 */
9172 for (i = 0; i < NELEMS (idesc->operands); i++)
9173 {
9174 if (idesc->operands[i] == IA64_OPND_F1
9175 || idesc->operands[i] == IA64_OPND_F2
9176 || idesc->operands[i] == IA64_OPND_F3
9177 || idesc->operands[i] == IA64_OPND_F4)
9178 {
9179 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9180 if (reg >= min && reg <= max)
9181 {
9182 specs[count++] = tmpl;
9183 }
9184 }
9185 }
9186 }
9187 else if (note == 9)
9188 {
9189 int min = dep->regindex == PSR_MFL ? 2 : 32;
9190 int max = dep->regindex == PSR_MFL ? 31 : 127;
9191 /* mfh is read on writes to FR32-127; mfl is read on writes to
9192 FR2-31 */
9193 for (i = 0; i < idesc->num_outputs; i++)
9194 {
9195 if (idesc->operands[i] == IA64_OPND_F1)
9196 {
9197 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9198 if (reg >= min && reg <= max)
9199 {
9200 specs[count++] = tmpl;
9201 }
9202 }
9203 }
9204 }
9205 else if (note == 10)
9206 {
9207 for (i = 0; i < NELEMS (idesc->operands); i++)
9208 {
9209 if (idesc->operands[i] == IA64_OPND_R1
9210 || idesc->operands[i] == IA64_OPND_R2
9211 || idesc->operands[i] == IA64_OPND_R3)
9212 {
9213 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9214 if (regno >= 16 && regno <= 31)
9215 {
9216 specs[count++] = tmpl;
9217 }
9218 }
9219 }
9220 }
9221 else
9222 {
9223 UNHANDLED;
9224 }
9225 break;
9226
9227 case IA64_RS_AR_FPSR:
9228 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
9229 {
9230 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9231 if (regno == AR_FPSR)
9232 {
9233 specs[count++] = tmpl;
9234 }
9235 }
9236 else
9237 {
9238 specs[count++] = tmpl;
9239 }
9240 break;
9241
9242 case IA64_RS_ARX:
9243 /* Handle all AR[REG] resources */
9244 if (note == 0 || note == 1)
9245 {
9246 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9247 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
9248 && regno == dep->regindex)
9249 {
9250 specs[count++] = tmpl;
9251 }
9252 /* other AR[REG] resources may be affected by AR accesses */
9253 else if (idesc->operands[0] == IA64_OPND_AR3)
9254 {
9255 /* AR[] writes */
9256 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
9257 switch (dep->regindex)
9258 {
9259 default:
9260 break;
9261 case AR_BSP:
9262 case AR_RNAT:
9263 if (regno == AR_BSPSTORE)
9264 {
9265 specs[count++] = tmpl;
9266 }
9267 /* Fall through. */
9268 case AR_RSC:
9269 if (!rsrc_write &&
9270 (regno == AR_BSPSTORE
9271 || regno == AR_RNAT))
9272 {
9273 specs[count++] = tmpl;
9274 }
9275 break;
9276 }
9277 }
9278 else if (idesc->operands[1] == IA64_OPND_AR3)
9279 {
9280 /* AR[] reads */
9281 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
9282 switch (dep->regindex)
9283 {
9284 default:
9285 break;
9286 case AR_RSC:
9287 if (regno == AR_BSPSTORE || regno == AR_RNAT)
9288 {
9289 specs[count++] = tmpl;
9290 }
9291 break;
9292 }
9293 }
9294 else
9295 {
9296 specs[count++] = tmpl;
9297 }
9298 }
9299 else
9300 {
9301 UNHANDLED;
9302 }
9303 break;
9304
9305 case IA64_RS_CRX:
9306 /* Handle all CR[REG] resources.
9307 ??? FIXME: The rule 17 isn't really handled correctly. */
9308 if (note == 0 || note == 1 || note == 17)
9309 {
9310 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
9311 {
9312 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
9313 if (regno == dep->regindex)
9314 {
9315 specs[count++] = tmpl;
9316 }
9317 else if (!rsrc_write)
9318 {
9319 /* Reads from CR[IVR] affect other resources. */
9320 if (regno == CR_IVR)
9321 {
9322 if ((dep->regindex >= CR_IRR0
9323 && dep->regindex <= CR_IRR3)
9324 || dep->regindex == CR_TPR)
9325 {
9326 specs[count++] = tmpl;
9327 }
9328 }
9329 }
9330 }
9331 else
9332 {
9333 specs[count++] = tmpl;
9334 }
9335 }
9336 else
9337 {
9338 UNHANDLED;
9339 }
9340 break;
9341
9342 case IA64_RS_INSERVICE:
9343 /* look for write of EOI (67) or read of IVR (65) */
9344 if ((idesc->operands[0] == IA64_OPND_CR3
9345 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
9346 || (idesc->operands[1] == IA64_OPND_CR3
9347 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
9348 {
9349 specs[count++] = tmpl;
9350 }
9351 break;
9352
9353 case IA64_RS_GR0:
9354 if (note == 1)
9355 {
9356 specs[count++] = tmpl;
9357 }
9358 else
9359 {
9360 UNHANDLED;
9361 }
9362 break;
9363
9364 case IA64_RS_CFM:
9365 if (note != 2)
9366 {
9367 specs[count++] = tmpl;
9368 }
9369 else
9370 {
9371 /* Check if any of the registers accessed are in the rotating region.
9372 mov to/from pr accesses CFM only when qp_regno is in the rotating
9373 region */
9374 for (i = 0; i < NELEMS (idesc->operands); i++)
9375 {
9376 if (idesc->operands[i] == IA64_OPND_R1
9377 || idesc->operands[i] == IA64_OPND_R2
9378 || idesc->operands[i] == IA64_OPND_R3)
9379 {
9380 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9381 /* Assumes that md.rot.num_regs is always valid */
9382 if (md.rot.num_regs > 0
9383 && num > 31
9384 && num < 31 + md.rot.num_regs)
9385 {
9386 specs[count] = tmpl;
9387 specs[count++].specific = 0;
9388 }
9389 }
9390 else if (idesc->operands[i] == IA64_OPND_F1
9391 || idesc->operands[i] == IA64_OPND_F2
9392 || idesc->operands[i] == IA64_OPND_F3
9393 || idesc->operands[i] == IA64_OPND_F4)
9394 {
9395 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9396 if (num > 31)
9397 {
9398 specs[count] = tmpl;
9399 specs[count++].specific = 0;
9400 }
9401 }
9402 else if (idesc->operands[i] == IA64_OPND_P1
9403 || idesc->operands[i] == IA64_OPND_P2)
9404 {
9405 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
9406 if (num > 15)
9407 {
9408 specs[count] = tmpl;
9409 specs[count++].specific = 0;
9410 }
9411 }
9412 }
9413 if (CURR_SLOT.qp_regno > 15)
9414 {
9415 specs[count] = tmpl;
9416 specs[count++].specific = 0;
9417 }
9418 }
9419 break;
9420
9421 /* This is the same as IA64_RS_PRr, except simplified to account for
9422 the fact that there is only one register. */
9423 case IA64_RS_PR63:
9424 if (note == 0)
9425 {
9426 specs[count++] = tmpl;
9427 }
9428 else if (note == 7)
9429 {
9430 valueT mask = 0;
9431 if (idesc->operands[2] == IA64_OPND_IMM17)
9432 mask = CURR_SLOT.opnd[2].X_add_number;
9433 if (mask & ((valueT) 1 << 63))
9434 specs[count++] = tmpl;
9435 }
9436 else if (note == 11)
9437 {
9438 if ((idesc->operands[0] == IA64_OPND_P1
9439 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
9440 || (idesc->operands[1] == IA64_OPND_P2
9441 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
9442 {
9443 specs[count++] = tmpl;
9444 }
9445 }
9446 else if (note == 12)
9447 {
9448 if (CURR_SLOT.qp_regno == 63)
9449 {
9450 specs[count++] = tmpl;
9451 }
9452 }
9453 else if (note == 1)
9454 {
9455 if (rsrc_write)
9456 {
9457 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9458 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9459 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9460 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9461
9462 if (p1 == 63
9463 && (idesc->operands[0] == IA64_OPND_P1
9464 || idesc->operands[0] == IA64_OPND_P2))
9465 {
9466 specs[count] = tmpl;
9467 specs[count++].cmp_type =
9468 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9469 }
9470 if (p2 == 63
9471 && (idesc->operands[1] == IA64_OPND_P1
9472 || idesc->operands[1] == IA64_OPND_P2))
9473 {
9474 specs[count] = tmpl;
9475 specs[count++].cmp_type =
9476 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9477 }
9478 }
9479 else
9480 {
9481 if (CURR_SLOT.qp_regno == 63)
9482 {
9483 specs[count++] = tmpl;
9484 }
9485 }
9486 }
9487 else
9488 {
9489 UNHANDLED;
9490 }
9491 break;
9492
9493 case IA64_RS_RSE:
9494 /* FIXME we can identify some individual RSE written resources, but RSE
9495 read resources have not yet been completely identified, so for now
9496 treat RSE as a single resource */
9497 if (startswith (idesc->name, "mov"))
9498 {
9499 if (rsrc_write)
9500 {
9501 if (idesc->operands[0] == IA64_OPND_AR3
9502 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
9503 {
9504 specs[count++] = tmpl;
9505 }
9506 }
9507 else
9508 {
9509 if (idesc->operands[0] == IA64_OPND_AR3)
9510 {
9511 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
9512 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
9513 {
9514 specs[count++] = tmpl;
9515 }
9516 }
9517 else if (idesc->operands[1] == IA64_OPND_AR3)
9518 {
9519 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
9520 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
9521 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
9522 {
9523 specs[count++] = tmpl;
9524 }
9525 }
9526 }
9527 }
9528 else
9529 {
9530 specs[count++] = tmpl;
9531 }
9532 break;
9533
9534 case IA64_RS_ANY:
9535 /* FIXME -- do any of these need to be non-specific? */
9536 specs[count++] = tmpl;
9537 break;
9538
9539 default:
9540 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
9541 break;
9542 }
9543
9544 return count;
9545 }
9546
9547 /* Clear branch flags on marked resources. This breaks the link between the
9548 QP of the marking instruction and a subsequent branch on the same QP. */
9549
9550 static void
9551 clear_qp_branch_flag (valueT mask)
9552 {
9553 int i;
9554 for (i = 0; i < regdepslen; i++)
9555 {
9556 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
9557 if ((bit & mask) != 0)
9558 {
9559 regdeps[i].link_to_qp_branch = 0;
9560 }
9561 }
9562 }
9563
9564 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
9565 any mutexes which contain one of the PRs and create new ones when
9566 needed. */
9567
9568 static int
9569 update_qp_mutex (valueT mask)
9570 {
9571 int i;
9572 int add = 0;
9573
9574 i = 0;
9575 while (i < qp_mutexeslen)
9576 {
9577 if ((qp_mutexes[i].prmask & mask) != 0)
9578 {
9579 /* If it destroys and creates the same mutex, do nothing. */
9580 if (qp_mutexes[i].prmask == mask
9581 && qp_mutexes[i].path == md.path)
9582 {
9583 i++;
9584 add = -1;
9585 }
9586 else
9587 {
9588 int keep = 0;
9589
9590 if (md.debug_dv)
9591 {
9592 fprintf (stderr, " Clearing mutex relation");
9593 print_prmask (qp_mutexes[i].prmask);
9594 fprintf (stderr, "\n");
9595 }
9596
9597 /* Deal with the old mutex with more than 3+ PRs only if
9598 the new mutex on the same execution path with it.
9599
9600 FIXME: The 3+ mutex support is incomplete.
9601 dot_pred_rel () may be a better place to fix it. */
9602 if (qp_mutexes[i].path == md.path)
9603 {
9604 /* If it is a proper subset of the mutex, create a
9605 new mutex. */
9606 if (add == 0
9607 && (qp_mutexes[i].prmask & mask) == mask)
9608 add = 1;
9609
9610 qp_mutexes[i].prmask &= ~mask;
9611 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
9612 {
9613 /* Modify the mutex if there are more than one
9614 PR left. */
9615 keep = 1;
9616 i++;
9617 }
9618 }
9619
9620 if (keep == 0)
9621 /* Remove the mutex. */
9622 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9623 }
9624 }
9625 else
9626 ++i;
9627 }
9628
9629 if (add == 1)
9630 add_qp_mutex (mask);
9631
9632 return add;
9633 }
9634
9635 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9636
9637 Any changes to a PR clears the mutex relations which include that PR. */
9638
9639 static void
9640 clear_qp_mutex (valueT mask)
9641 {
9642 int i;
9643
9644 i = 0;
9645 while (i < qp_mutexeslen)
9646 {
9647 if ((qp_mutexes[i].prmask & mask) != 0)
9648 {
9649 if (md.debug_dv)
9650 {
9651 fprintf (stderr, " Clearing mutex relation");
9652 print_prmask (qp_mutexes[i].prmask);
9653 fprintf (stderr, "\n");
9654 }
9655 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9656 }
9657 else
9658 ++i;
9659 }
9660 }
9661
9662 /* Clear implies relations which contain PRs in the given masks.
9663 P1_MASK indicates the source of the implies relation, while P2_MASK
9664 indicates the implied PR. */
9665
9666 static void
9667 clear_qp_implies (valueT p1_mask, valueT p2_mask)
9668 {
9669 int i;
9670
9671 i = 0;
9672 while (i < qp_implieslen)
9673 {
9674 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9675 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9676 {
9677 if (md.debug_dv)
9678 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9679 qp_implies[i].p1, qp_implies[i].p2);
9680 qp_implies[i] = qp_implies[--qp_implieslen];
9681 }
9682 else
9683 ++i;
9684 }
9685 }
9686
9687 /* Add the PRs specified to the list of implied relations. */
9688
9689 static void
9690 add_qp_imply (int p1, int p2)
9691 {
9692 valueT mask;
9693 valueT bit;
9694 int i;
9695
9696 /* p0 is not meaningful here. */
9697 if (p1 == 0 || p2 == 0)
9698 abort ();
9699
9700 if (p1 == p2)
9701 return;
9702
9703 /* If it exists already, ignore it. */
9704 for (i = 0; i < qp_implieslen; i++)
9705 {
9706 if (qp_implies[i].p1 == p1
9707 && qp_implies[i].p2 == p2
9708 && qp_implies[i].path == md.path
9709 && !qp_implies[i].p2_branched)
9710 return;
9711 }
9712
9713 if (qp_implieslen == qp_impliestotlen)
9714 {
9715 qp_impliestotlen += 20;
9716 qp_implies = XRESIZEVEC (struct qp_imply, qp_implies, qp_impliestotlen);
9717 }
9718 if (md.debug_dv)
9719 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9720 qp_implies[qp_implieslen].p1 = p1;
9721 qp_implies[qp_implieslen].p2 = p2;
9722 qp_implies[qp_implieslen].path = md.path;
9723 qp_implies[qp_implieslen++].p2_branched = 0;
9724
9725 /* Add in the implied transitive relations; for everything that p2 implies,
9726 make p1 imply that, too; for everything that implies p1, make it imply p2
9727 as well. */
9728 for (i = 0; i < qp_implieslen; i++)
9729 {
9730 if (qp_implies[i].p1 == p2)
9731 add_qp_imply (p1, qp_implies[i].p2);
9732 if (qp_implies[i].p2 == p1)
9733 add_qp_imply (qp_implies[i].p1, p2);
9734 }
9735 /* Add in mutex relations implied by this implies relation; for each mutex
9736 relation containing p2, duplicate it and replace p2 with p1. */
9737 bit = (valueT) 1 << p1;
9738 mask = (valueT) 1 << p2;
9739 for (i = 0; i < qp_mutexeslen; i++)
9740 {
9741 if (qp_mutexes[i].prmask & mask)
9742 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9743 }
9744 }
9745
9746 /* Add the PRs specified in the mask to the mutex list; this means that only
9747 one of the PRs can be true at any time. PR0 should never be included in
9748 the mask. */
9749
9750 static void
9751 add_qp_mutex (valueT mask)
9752 {
9753 if (mask & 0x1)
9754 abort ();
9755
9756 if (qp_mutexeslen == qp_mutexestotlen)
9757 {
9758 qp_mutexestotlen += 20;
9759 qp_mutexes = XRESIZEVEC (struct qpmutex, qp_mutexes, qp_mutexestotlen);
9760 }
9761 if (md.debug_dv)
9762 {
9763 fprintf (stderr, " Registering mutex on");
9764 print_prmask (mask);
9765 fprintf (stderr, "\n");
9766 }
9767 qp_mutexes[qp_mutexeslen].path = md.path;
9768 qp_mutexes[qp_mutexeslen++].prmask = mask;
9769 }
9770
9771 static int
9772 has_suffix_p (const char *name, const char *suffix)
9773 {
9774 size_t namelen = strlen (name);
9775 size_t sufflen = strlen (suffix);
9776
9777 if (namelen <= sufflen)
9778 return 0;
9779 return strcmp (name + namelen - sufflen, suffix) == 0;
9780 }
9781
9782 static void
9783 clear_register_values (void)
9784 {
9785 int i;
9786 if (md.debug_dv)
9787 fprintf (stderr, " Clearing register values\n");
9788 for (i = 1; i < NELEMS (gr_values); i++)
9789 gr_values[i].known = 0;
9790 }
9791
9792 /* Keep track of register values/changes which affect DV tracking.
9793
9794 optimization note: should add a flag to classes of insns where otherwise we
9795 have to examine a group of strings to identify them. */
9796
9797 static void
9798 note_register_values (struct ia64_opcode *idesc)
9799 {
9800 valueT qp_changemask = 0;
9801 int i;
9802
9803 /* Invalidate values for registers being written to. */
9804 for (i = 0; i < idesc->num_outputs; i++)
9805 {
9806 if (idesc->operands[i] == IA64_OPND_R1
9807 || idesc->operands[i] == IA64_OPND_R2
9808 || idesc->operands[i] == IA64_OPND_R3)
9809 {
9810 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9811 if (regno > 0 && regno < NELEMS (gr_values))
9812 gr_values[regno].known = 0;
9813 }
9814 else if (idesc->operands[i] == IA64_OPND_R3_2)
9815 {
9816 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9817 if (regno > 0 && regno < 4)
9818 gr_values[regno].known = 0;
9819 }
9820 else if (idesc->operands[i] == IA64_OPND_P1
9821 || idesc->operands[i] == IA64_OPND_P2)
9822 {
9823 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9824 qp_changemask |= (valueT) 1 << regno;
9825 }
9826 else if (idesc->operands[i] == IA64_OPND_PR)
9827 {
9828 if (idesc->operands[2] & (valueT) 0x10000)
9829 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9830 else
9831 qp_changemask = idesc->operands[2];
9832 break;
9833 }
9834 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9835 {
9836 if (idesc->operands[1] & ((valueT) 1 << 43))
9837 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9838 else
9839 qp_changemask = idesc->operands[1];
9840 qp_changemask &= ~(valueT) 0xFFFF;
9841 break;
9842 }
9843 }
9844
9845 /* Always clear qp branch flags on any PR change. */
9846 /* FIXME there may be exceptions for certain compares. */
9847 clear_qp_branch_flag (qp_changemask);
9848
9849 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9850 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9851 {
9852 qp_changemask |= ~(valueT) 0xFFFF;
9853 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9854 {
9855 for (i = 32; i < 32 + md.rot.num_regs; i++)
9856 gr_values[i].known = 0;
9857 }
9858 clear_qp_mutex (qp_changemask);
9859 clear_qp_implies (qp_changemask, qp_changemask);
9860 }
9861 /* After a call, all register values are undefined, except those marked
9862 as "safe". */
9863 else if (startswith (idesc->name, "br.call")
9864 || startswith (idesc->name, "brl.call"))
9865 {
9866 /* FIXME keep GR values which are marked as "safe_across_calls" */
9867 clear_register_values ();
9868 clear_qp_mutex (~qp_safe_across_calls);
9869 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9870 clear_qp_branch_flag (~qp_safe_across_calls);
9871 }
9872 else if (is_interruption_or_rfi (idesc)
9873 || is_taken_branch (idesc))
9874 {
9875 clear_register_values ();
9876 clear_qp_mutex (~(valueT) 0);
9877 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9878 }
9879 /* Look for mutex and implies relations. */
9880 else if ((idesc->operands[0] == IA64_OPND_P1
9881 || idesc->operands[0] == IA64_OPND_P2)
9882 && (idesc->operands[1] == IA64_OPND_P1
9883 || idesc->operands[1] == IA64_OPND_P2))
9884 {
9885 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9886 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9887 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9888 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9889
9890 /* If both PRs are PR0, we can't really do anything. */
9891 if (p1 == 0 && p2 == 0)
9892 {
9893 if (md.debug_dv)
9894 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9895 }
9896 /* In general, clear mutexes and implies which include P1 or P2,
9897 with the following exceptions. */
9898 else if (has_suffix_p (idesc->name, ".or.andcm")
9899 || has_suffix_p (idesc->name, ".and.orcm"))
9900 {
9901 clear_qp_implies (p2mask, p1mask);
9902 }
9903 else if (has_suffix_p (idesc->name, ".andcm")
9904 || has_suffix_p (idesc->name, ".and"))
9905 {
9906 clear_qp_implies (0, p1mask | p2mask);
9907 }
9908 else if (has_suffix_p (idesc->name, ".orcm")
9909 || has_suffix_p (idesc->name, ".or"))
9910 {
9911 clear_qp_mutex (p1mask | p2mask);
9912 clear_qp_implies (p1mask | p2mask, 0);
9913 }
9914 else
9915 {
9916 int added = 0;
9917
9918 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9919
9920 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9921 if (p1 == 0 || p2 == 0)
9922 clear_qp_mutex (p1mask | p2mask);
9923 else
9924 added = update_qp_mutex (p1mask | p2mask);
9925
9926 if (CURR_SLOT.qp_regno == 0
9927 || has_suffix_p (idesc->name, ".unc"))
9928 {
9929 if (added == 0 && p1 && p2)
9930 add_qp_mutex (p1mask | p2mask);
9931 if (CURR_SLOT.qp_regno != 0)
9932 {
9933 if (p1)
9934 add_qp_imply (p1, CURR_SLOT.qp_regno);
9935 if (p2)
9936 add_qp_imply (p2, CURR_SLOT.qp_regno);
9937 }
9938 }
9939 }
9940 }
9941 /* Look for mov imm insns into GRs. */
9942 else if (idesc->operands[0] == IA64_OPND_R1
9943 && (idesc->operands[1] == IA64_OPND_IMM22
9944 || idesc->operands[1] == IA64_OPND_IMMU64)
9945 && CURR_SLOT.opnd[1].X_op == O_constant
9946 && (strcmp (idesc->name, "mov") == 0
9947 || strcmp (idesc->name, "movl") == 0))
9948 {
9949 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9950 if (regno > 0 && regno < NELEMS (gr_values))
9951 {
9952 gr_values[regno].known = 1;
9953 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9954 gr_values[regno].path = md.path;
9955 if (md.debug_dv)
9956 fprintf (stderr, " Know gr%d = %" PRIx64 "\n",
9957 regno, gr_values[regno].value);
9958 }
9959 }
9960 /* Look for dep.z imm insns. */
9961 else if (idesc->operands[0] == IA64_OPND_R1
9962 && idesc->operands[1] == IA64_OPND_IMM8
9963 && strcmp (idesc->name, "dep.z") == 0)
9964 {
9965 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9966 if (regno > 0 && regno < NELEMS (gr_values))
9967 {
9968 valueT value = CURR_SLOT.opnd[1].X_add_number;
9969
9970 if (CURR_SLOT.opnd[3].X_add_number < 64)
9971 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1;
9972 value <<= CURR_SLOT.opnd[2].X_add_number;
9973 gr_values[regno].known = 1;
9974 gr_values[regno].value = value;
9975 gr_values[regno].path = md.path;
9976 if (md.debug_dv)
9977 fprintf (stderr, " Know gr%d = %" PRIx64 "\n",
9978 regno, gr_values[regno].value);
9979 }
9980 }
9981 else
9982 {
9983 clear_qp_mutex (qp_changemask);
9984 clear_qp_implies (qp_changemask, qp_changemask);
9985 }
9986 }
9987
9988 /* Return whether the given predicate registers are currently mutex. */
9989
9990 static int
9991 qp_mutex (int p1, int p2, int path)
9992 {
9993 int i;
9994 valueT mask;
9995
9996 if (p1 != p2)
9997 {
9998 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
9999 for (i = 0; i < qp_mutexeslen; i++)
10000 {
10001 if (qp_mutexes[i].path >= path
10002 && (qp_mutexes[i].prmask & mask) == mask)
10003 return 1;
10004 }
10005 }
10006 return 0;
10007 }
10008
10009 /* Return whether the given resource is in the given insn's list of chks
10010 Return 1 if the conflict is absolutely determined, 2 if it's a potential
10011 conflict. */
10012
10013 static int
10014 resources_match (struct rsrc *rs,
10015 struct ia64_opcode *idesc,
10016 int note,
10017 int qp_regno,
10018 int path)
10019 {
10020 struct rsrc specs[MAX_SPECS];
10021 int count;
10022
10023 /* If the marked resource's qp_regno and the given qp_regno are mutex,
10024 we don't need to check. One exception is note 11, which indicates that
10025 target predicates are written regardless of PR[qp]. */
10026 if (qp_mutex (rs->qp_regno, qp_regno, path)
10027 && note != 11)
10028 return 0;
10029
10030 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
10031 while (count-- > 0)
10032 {
10033 /* UNAT checking is a bit more specific than other resources */
10034 if (rs->dependency->specifier == IA64_RS_AR_UNAT
10035 && specs[count].mem_offset.hint
10036 && rs->mem_offset.hint)
10037 {
10038 if (rs->mem_offset.base == specs[count].mem_offset.base)
10039 {
10040 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
10041 ((specs[count].mem_offset.offset >> 3) & 0x3F))
10042 return 1;
10043 else
10044 continue;
10045 }
10046 }
10047
10048 /* Skip apparent PR write conflicts where both writes are an AND or both
10049 writes are an OR. */
10050 if (rs->dependency->specifier == IA64_RS_PR
10051 || rs->dependency->specifier == IA64_RS_PRr
10052 || rs->dependency->specifier == IA64_RS_PR63)
10053 {
10054 if (specs[count].cmp_type != CMP_NONE
10055 && specs[count].cmp_type == rs->cmp_type)
10056 {
10057 if (md.debug_dv)
10058 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
10059 dv_mode[rs->dependency->mode],
10060 rs->dependency->specifier != IA64_RS_PR63 ?
10061 specs[count].index : 63);
10062 continue;
10063 }
10064 if (md.debug_dv)
10065 fprintf (stderr,
10066 " %s on parallel compare conflict %s vs %s on PR%d\n",
10067 dv_mode[rs->dependency->mode],
10068 dv_cmp_type[rs->cmp_type],
10069 dv_cmp_type[specs[count].cmp_type],
10070 rs->dependency->specifier != IA64_RS_PR63 ?
10071 specs[count].index : 63);
10072
10073 }
10074
10075 /* If either resource is not specific, conservatively assume a conflict
10076 */
10077 if (!specs[count].specific || !rs->specific)
10078 return 2;
10079 else if (specs[count].index == rs->index)
10080 return 1;
10081 }
10082
10083 return 0;
10084 }
10085
10086 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
10087 insert a stop to create the break. Update all resource dependencies
10088 appropriately. If QP_REGNO is non-zero, only apply the break to resources
10089 which use the same QP_REGNO and have the link_to_qp_branch flag set.
10090 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
10091 instruction. */
10092
10093 static void
10094 insn_group_break (int insert_stop, int qp_regno, int save_current)
10095 {
10096 int i;
10097
10098 if (insert_stop && md.num_slots_in_use > 0)
10099 PREV_SLOT.end_of_insn_group = 1;
10100
10101 if (md.debug_dv)
10102 {
10103 fprintf (stderr, " Insn group break%s",
10104 (insert_stop ? " (w/stop)" : ""));
10105 if (qp_regno != 0)
10106 fprintf (stderr, " effective for QP=%d", qp_regno);
10107 fprintf (stderr, "\n");
10108 }
10109
10110 i = 0;
10111 while (i < regdepslen)
10112 {
10113 const struct ia64_dependency *dep = regdeps[i].dependency;
10114
10115 if (qp_regno != 0
10116 && regdeps[i].qp_regno != qp_regno)
10117 {
10118 ++i;
10119 continue;
10120 }
10121
10122 if (save_current
10123 && CURR_SLOT.src_file == regdeps[i].file
10124 && CURR_SLOT.src_line == regdeps[i].line)
10125 {
10126 ++i;
10127 continue;
10128 }
10129
10130 /* clear dependencies which are automatically cleared by a stop, or
10131 those that have reached the appropriate state of insn serialization */
10132 if (dep->semantics == IA64_DVS_IMPLIED
10133 || dep->semantics == IA64_DVS_IMPLIEDF
10134 || regdeps[i].insn_srlz == STATE_SRLZ)
10135 {
10136 print_dependency ("Removing", i);
10137 regdeps[i] = regdeps[--regdepslen];
10138 }
10139 else
10140 {
10141 if (dep->semantics == IA64_DVS_DATA
10142 || dep->semantics == IA64_DVS_INSTR
10143 || dep->semantics == IA64_DVS_SPECIFIC)
10144 {
10145 if (regdeps[i].insn_srlz == STATE_NONE)
10146 regdeps[i].insn_srlz = STATE_STOP;
10147 if (regdeps[i].data_srlz == STATE_NONE)
10148 regdeps[i].data_srlz = STATE_STOP;
10149 }
10150 ++i;
10151 }
10152 }
10153 }
10154
10155 /* Add the given resource usage spec to the list of active dependencies. */
10156
10157 static void
10158 mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED,
10159 const struct ia64_dependency *dep ATTRIBUTE_UNUSED,
10160 struct rsrc *spec,
10161 int depind,
10162 int path)
10163 {
10164 if (regdepslen == regdepstotlen)
10165 {
10166 regdepstotlen += 20;
10167 regdeps = XRESIZEVEC (struct rsrc, regdeps, regdepstotlen);
10168 }
10169
10170 regdeps[regdepslen] = *spec;
10171 regdeps[regdepslen].depind = depind;
10172 regdeps[regdepslen].path = path;
10173 regdeps[regdepslen].file = CURR_SLOT.src_file;
10174 regdeps[regdepslen].line = CURR_SLOT.src_line;
10175
10176 print_dependency ("Adding", regdepslen);
10177
10178 ++regdepslen;
10179 }
10180
10181 static void
10182 print_dependency (const char *action, int depind)
10183 {
10184 if (md.debug_dv)
10185 {
10186 fprintf (stderr, " %s %s '%s'",
10187 action, dv_mode[(regdeps[depind].dependency)->mode],
10188 (regdeps[depind].dependency)->name);
10189 if (regdeps[depind].specific && regdeps[depind].index >= 0)
10190 fprintf (stderr, " (%d)", regdeps[depind].index);
10191 if (regdeps[depind].mem_offset.hint)
10192 fprintf (stderr, " %" PRIx64 "+%" PRIx64,
10193 regdeps[depind].mem_offset.base,
10194 regdeps[depind].mem_offset.offset);
10195 fprintf (stderr, "\n");
10196 }
10197 }
10198
10199 static void
10200 instruction_serialization (void)
10201 {
10202 int i;
10203 if (md.debug_dv)
10204 fprintf (stderr, " Instruction serialization\n");
10205 for (i = 0; i < regdepslen; i++)
10206 if (regdeps[i].insn_srlz == STATE_STOP)
10207 regdeps[i].insn_srlz = STATE_SRLZ;
10208 }
10209
10210 static void
10211 data_serialization (void)
10212 {
10213 int i = 0;
10214 if (md.debug_dv)
10215 fprintf (stderr, " Data serialization\n");
10216 while (i < regdepslen)
10217 {
10218 if (regdeps[i].data_srlz == STATE_STOP
10219 /* Note: as of 991210, all "other" dependencies are cleared by a
10220 data serialization. This might change with new tables */
10221 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
10222 {
10223 print_dependency ("Removing", i);
10224 regdeps[i] = regdeps[--regdepslen];
10225 }
10226 else
10227 ++i;
10228 }
10229 }
10230
10231 /* Insert stops and serializations as needed to avoid DVs. */
10232
10233 static void
10234 remove_marked_resource (struct rsrc *rs)
10235 {
10236 switch (rs->dependency->semantics)
10237 {
10238 case IA64_DVS_SPECIFIC:
10239 if (md.debug_dv)
10240 fprintf (stderr, "Implementation-specific, assume worst case...\n");
10241 /* Fall through. */
10242 case IA64_DVS_INSTR:
10243 if (md.debug_dv)
10244 fprintf (stderr, "Inserting instr serialization\n");
10245 if (rs->insn_srlz < STATE_STOP)
10246 insn_group_break (1, 0, 0);
10247 if (rs->insn_srlz < STATE_SRLZ)
10248 {
10249 struct slot oldslot = CURR_SLOT;
10250 /* Manually jam a srlz.i insn into the stream */
10251 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10252 CURR_SLOT.user_template = -1;
10253 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
10254 instruction_serialization ();
10255 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10256 if (++md.num_slots_in_use >= NUM_SLOTS)
10257 emit_one_bundle ();
10258 CURR_SLOT = oldslot;
10259 }
10260 insn_group_break (1, 0, 0);
10261 break;
10262 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
10263 "other" types of DV are eliminated
10264 by a data serialization */
10265 case IA64_DVS_DATA:
10266 if (md.debug_dv)
10267 fprintf (stderr, "Inserting data serialization\n");
10268 if (rs->data_srlz < STATE_STOP)
10269 insn_group_break (1, 0, 0);
10270 {
10271 struct slot oldslot = CURR_SLOT;
10272 /* Manually jam a srlz.d insn into the stream */
10273 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10274 CURR_SLOT.user_template = -1;
10275 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
10276 data_serialization ();
10277 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10278 if (++md.num_slots_in_use >= NUM_SLOTS)
10279 emit_one_bundle ();
10280 CURR_SLOT = oldslot;
10281 }
10282 break;
10283 case IA64_DVS_IMPLIED:
10284 case IA64_DVS_IMPLIEDF:
10285 if (md.debug_dv)
10286 fprintf (stderr, "Inserting stop\n");
10287 insn_group_break (1, 0, 0);
10288 break;
10289 default:
10290 break;
10291 }
10292 }
10293
10294 /* Check the resources used by the given opcode against the current dependency
10295 list.
10296
10297 The check is run once for each execution path encountered. In this case,
10298 a unique execution path is the sequence of instructions following a code
10299 entry point, e.g. the following has three execution paths, one starting
10300 at L0, one at L1, and one at L2.
10301
10302 L0: nop
10303 L1: add
10304 L2: add
10305 br.ret
10306 */
10307
10308 static void
10309 check_dependencies (struct ia64_opcode *idesc)
10310 {
10311 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10312 int path;
10313 int i;
10314
10315 /* Note that the number of marked resources may change within the
10316 loop if in auto mode. */
10317 i = 0;
10318 while (i < regdepslen)
10319 {
10320 struct rsrc *rs = &regdeps[i];
10321 const struct ia64_dependency *dep = rs->dependency;
10322 int chkind;
10323 int note;
10324 int start_over = 0;
10325
10326 if (dep->semantics == IA64_DVS_NONE
10327 || (chkind = depends_on (rs->depind, idesc)) == -1)
10328 {
10329 ++i;
10330 continue;
10331 }
10332
10333 note = NOTE (opdeps->chks[chkind]);
10334
10335 /* Check this resource against each execution path seen thus far. */
10336 for (path = 0; path <= md.path; path++)
10337 {
10338 int matchtype;
10339
10340 /* If the dependency wasn't on the path being checked, ignore it. */
10341 if (rs->path < path)
10342 continue;
10343
10344 /* If the QP for this insn implies a QP which has branched, don't
10345 bother checking. Ed. NOTE: I don't think this check is terribly
10346 useful; what's the point of generating code which will only be
10347 reached if its QP is zero?
10348 This code was specifically inserted to handle the following code,
10349 based on notes from Intel's DV checking code, where p1 implies p2.
10350
10351 mov r4 = 2
10352 (p2) br.cond L
10353 (p1) mov r4 = 7
10354 */
10355 if (CURR_SLOT.qp_regno != 0)
10356 {
10357 int skip = 0;
10358 int implies;
10359 for (implies = 0; implies < qp_implieslen; implies++)
10360 {
10361 if (qp_implies[implies].path >= path
10362 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
10363 && qp_implies[implies].p2_branched)
10364 {
10365 skip = 1;
10366 break;
10367 }
10368 }
10369 if (skip)
10370 continue;
10371 }
10372
10373 if ((matchtype = resources_match (rs, idesc, note,
10374 CURR_SLOT.qp_regno, path)) != 0)
10375 {
10376 char msg[1024];
10377 char pathmsg[256] = "";
10378 char indexmsg[256] = "";
10379 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
10380
10381 if (path != 0)
10382 snprintf (pathmsg, sizeof (pathmsg),
10383 " when entry is at label '%s'",
10384 md.entry_labels[path - 1]);
10385 if (matchtype == 1 && rs->index >= 0)
10386 snprintf (indexmsg, sizeof (indexmsg),
10387 ", specific resource number is %d",
10388 rs->index);
10389 snprintf (msg, sizeof (msg),
10390 "Use of '%s' %s %s dependency '%s' (%s)%s%s",
10391 idesc->name,
10392 (certain ? "violates" : "may violate"),
10393 dv_mode[dep->mode], dep->name,
10394 dv_sem[dep->semantics],
10395 pathmsg, indexmsg);
10396
10397 if (md.explicit_mode)
10398 {
10399 as_warn ("%s", msg);
10400 if (path < md.path)
10401 as_warn (_("Only the first path encountering the conflict is reported"));
10402 as_warn_where (rs->file, rs->line,
10403 _("This is the location of the conflicting usage"));
10404 /* Don't bother checking other paths, to avoid duplicating
10405 the same warning */
10406 break;
10407 }
10408 else
10409 {
10410 if (md.debug_dv)
10411 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
10412
10413 remove_marked_resource (rs);
10414
10415 /* since the set of dependencies has changed, start over */
10416 /* FIXME -- since we're removing dvs as we go, we
10417 probably don't really need to start over... */
10418 start_over = 1;
10419 break;
10420 }
10421 }
10422 }
10423 if (start_over)
10424 i = 0;
10425 else
10426 ++i;
10427 }
10428 }
10429
10430 /* Register new dependencies based on the given opcode. */
10431
10432 static void
10433 mark_resources (struct ia64_opcode *idesc)
10434 {
10435 int i;
10436 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10437 int add_only_qp_reads = 0;
10438
10439 /* A conditional branch only uses its resources if it is taken; if it is
10440 taken, we stop following that path. The other branch types effectively
10441 *always* write their resources. If it's not taken, register only QP
10442 reads. */
10443 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
10444 {
10445 add_only_qp_reads = 1;
10446 }
10447
10448 if (md.debug_dv)
10449 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
10450
10451 for (i = 0; i < opdeps->nregs; i++)
10452 {
10453 const struct ia64_dependency *dep;
10454 struct rsrc specs[MAX_SPECS];
10455 int note;
10456 int path;
10457 int count;
10458
10459 dep = ia64_find_dependency (opdeps->regs[i]);
10460 note = NOTE (opdeps->regs[i]);
10461
10462 if (add_only_qp_reads
10463 && !(dep->mode == IA64_DV_WAR
10464 && (dep->specifier == IA64_RS_PR
10465 || dep->specifier == IA64_RS_PRr
10466 || dep->specifier == IA64_RS_PR63)))
10467 continue;
10468
10469 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
10470
10471 while (count-- > 0)
10472 {
10473 mark_resource (idesc, dep, &specs[count],
10474 DEP (opdeps->regs[i]), md.path);
10475 }
10476
10477 /* The execution path may affect register values, which may in turn
10478 affect which indirect-access resources are accessed. */
10479 switch (dep->specifier)
10480 {
10481 default:
10482 break;
10483 case IA64_RS_CPUID:
10484 case IA64_RS_DBR:
10485 case IA64_RS_IBR:
10486 case IA64_RS_MSR:
10487 case IA64_RS_PKR:
10488 case IA64_RS_PMC:
10489 case IA64_RS_PMD:
10490 case IA64_RS_RR:
10491 for (path = 0; path < md.path; path++)
10492 {
10493 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
10494 while (count-- > 0)
10495 mark_resource (idesc, dep, &specs[count],
10496 DEP (opdeps->regs[i]), path);
10497 }
10498 break;
10499 }
10500 }
10501 }
10502
10503 /* Remove dependencies when they no longer apply. */
10504
10505 static void
10506 update_dependencies (struct ia64_opcode *idesc)
10507 {
10508 int i;
10509
10510 if (strcmp (idesc->name, "srlz.i") == 0)
10511 {
10512 instruction_serialization ();
10513 }
10514 else if (strcmp (idesc->name, "srlz.d") == 0)
10515 {
10516 data_serialization ();
10517 }
10518 else if (is_interruption_or_rfi (idesc)
10519 || is_taken_branch (idesc))
10520 {
10521 /* Although technically the taken branch doesn't clear dependencies
10522 which require a srlz.[id], we don't follow the branch; the next
10523 instruction is assumed to start with a clean slate. */
10524 regdepslen = 0;
10525 md.path = 0;
10526 }
10527 else if (is_conditional_branch (idesc)
10528 && CURR_SLOT.qp_regno != 0)
10529 {
10530 int is_call = strstr (idesc->name, ".call") != NULL;
10531
10532 for (i = 0; i < qp_implieslen; i++)
10533 {
10534 /* If the conditional branch's predicate is implied by the predicate
10535 in an existing dependency, remove that dependency. */
10536 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
10537 {
10538 int depind = 0;
10539 /* Note that this implied predicate takes a branch so that if
10540 a later insn generates a DV but its predicate implies this
10541 one, we can avoid the false DV warning. */
10542 qp_implies[i].p2_branched = 1;
10543 while (depind < regdepslen)
10544 {
10545 if (regdeps[depind].qp_regno == qp_implies[i].p1)
10546 {
10547 print_dependency ("Removing", depind);
10548 regdeps[depind] = regdeps[--regdepslen];
10549 }
10550 else
10551 ++depind;
10552 }
10553 }
10554 }
10555 /* Any marked resources which have this same predicate should be
10556 cleared, provided that the QP hasn't been modified between the
10557 marking instruction and the branch. */
10558 if (is_call)
10559 {
10560 insn_group_break (0, CURR_SLOT.qp_regno, 1);
10561 }
10562 else
10563 {
10564 i = 0;
10565 while (i < regdepslen)
10566 {
10567 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
10568 && regdeps[i].link_to_qp_branch
10569 && (regdeps[i].file != CURR_SLOT.src_file
10570 || regdeps[i].line != CURR_SLOT.src_line))
10571 {
10572 /* Treat like a taken branch */
10573 print_dependency ("Removing", i);
10574 regdeps[i] = regdeps[--regdepslen];
10575 }
10576 else
10577 ++i;
10578 }
10579 }
10580 }
10581 }
10582
10583 /* Examine the current instruction for dependency violations. */
10584
10585 static int
10586 check_dv (struct ia64_opcode *idesc)
10587 {
10588 if (md.debug_dv)
10589 {
10590 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
10591 idesc->name, CURR_SLOT.src_line,
10592 idesc->dependencies->nchks,
10593 idesc->dependencies->nregs);
10594 }
10595
10596 /* Look through the list of currently marked resources; if the current
10597 instruction has the dependency in its chks list which uses that resource,
10598 check against the specific resources used. */
10599 check_dependencies (idesc);
10600
10601 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
10602 then add them to the list of marked resources. */
10603 mark_resources (idesc);
10604
10605 /* There are several types of dependency semantics, and each has its own
10606 requirements for being cleared
10607
10608 Instruction serialization (insns separated by interruption, rfi, or
10609 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10610
10611 Data serialization (instruction serialization, or writer + srlz.d +
10612 reader, where writer and srlz.d are in separate groups) clears
10613 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10614 always be the case).
10615
10616 Instruction group break (groups separated by stop, taken branch,
10617 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10618 */
10619 update_dependencies (idesc);
10620
10621 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10622 warning. Keep track of as many as possible that are useful. */
10623 note_register_values (idesc);
10624
10625 /* We don't need or want this anymore. */
10626 md.mem_offset.hint = 0;
10627
10628 return 0;
10629 }
10630
10631 /* Translate one line of assembly. Pseudo ops and labels do not show
10632 here. */
10633 void
10634 md_assemble (char *str)
10635 {
10636 char *saved_input_line_pointer, *temp;
10637 const char *mnemonic;
10638 const struct pseudo_opcode *pdesc;
10639 struct ia64_opcode *idesc;
10640 unsigned char qp_regno;
10641 unsigned int flags;
10642 int ch;
10643
10644 saved_input_line_pointer = input_line_pointer;
10645 input_line_pointer = str;
10646
10647 /* extract the opcode (mnemonic): */
10648
10649 ch = get_symbol_name (&temp);
10650 mnemonic = temp;
10651 pdesc = (struct pseudo_opcode *) str_hash_find (md.pseudo_hash, mnemonic);
10652 if (pdesc)
10653 {
10654 (void) restore_line_pointer (ch);
10655 (*pdesc->handler) (pdesc->arg);
10656 goto done;
10657 }
10658
10659 /* Find the instruction descriptor matching the arguments. */
10660
10661 idesc = ia64_find_opcode (mnemonic);
10662 (void) restore_line_pointer (ch);
10663 if (!idesc)
10664 {
10665 as_bad (_("Unknown opcode `%s'"), mnemonic);
10666 goto done;
10667 }
10668
10669 idesc = parse_operands (idesc);
10670 if (!idesc)
10671 goto done;
10672
10673 /* Handle the dynamic ops we can handle now: */
10674 if (idesc->type == IA64_TYPE_DYN)
10675 {
10676 if (strcmp (idesc->name, "add") == 0)
10677 {
10678 if (CURR_SLOT.opnd[2].X_op == O_register
10679 && CURR_SLOT.opnd[2].X_add_number < 4)
10680 mnemonic = "addl";
10681 else
10682 mnemonic = "adds";
10683 ia64_free_opcode (idesc);
10684 idesc = ia64_find_opcode (mnemonic);
10685 }
10686 else if (strcmp (idesc->name, "mov") == 0)
10687 {
10688 enum ia64_opnd opnd1, opnd2;
10689 int rop;
10690
10691 opnd1 = idesc->operands[0];
10692 opnd2 = idesc->operands[1];
10693 if (opnd1 == IA64_OPND_AR3)
10694 rop = 0;
10695 else if (opnd2 == IA64_OPND_AR3)
10696 rop = 1;
10697 else
10698 abort ();
10699 if (CURR_SLOT.opnd[rop].X_op == O_register)
10700 {
10701 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10702 mnemonic = "mov.i";
10703 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10704 mnemonic = "mov.m";
10705 else
10706 rop = -1;
10707 }
10708 else
10709 abort ();
10710 if (rop >= 0)
10711 {
10712 ia64_free_opcode (idesc);
10713 idesc = ia64_find_opcode (mnemonic);
10714 while (idesc != NULL
10715 && (idesc->operands[0] != opnd1
10716 || idesc->operands[1] != opnd2))
10717 idesc = get_next_opcode (idesc);
10718 }
10719 }
10720 }
10721 else if (strcmp (idesc->name, "mov.i") == 0
10722 || strcmp (idesc->name, "mov.m") == 0)
10723 {
10724 enum ia64_opnd opnd1, opnd2;
10725 int rop;
10726
10727 opnd1 = idesc->operands[0];
10728 opnd2 = idesc->operands[1];
10729 if (opnd1 == IA64_OPND_AR3)
10730 rop = 0;
10731 else if (opnd2 == IA64_OPND_AR3)
10732 rop = 1;
10733 else
10734 abort ();
10735 if (CURR_SLOT.opnd[rop].X_op == O_register)
10736 {
10737 char unit = 'a';
10738 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10739 unit = 'i';
10740 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10741 unit = 'm';
10742 if (unit != 'a' && unit != idesc->name [4])
10743 as_bad (_("AR %d can only be accessed by %c-unit"),
10744 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10745 TOUPPER (unit));
10746 }
10747 }
10748 else if (strcmp (idesc->name, "hint.b") == 0)
10749 {
10750 switch (md.hint_b)
10751 {
10752 case hint_b_ok:
10753 break;
10754 case hint_b_warning:
10755 as_warn (_("hint.b may be treated as nop"));
10756 break;
10757 case hint_b_error:
10758 as_bad (_("hint.b shouldn't be used"));
10759 break;
10760 }
10761 }
10762
10763 qp_regno = 0;
10764 if (md.qp.X_op == O_register)
10765 {
10766 qp_regno = md.qp.X_add_number - REG_P;
10767 md.qp.X_op = O_absent;
10768 }
10769
10770 flags = idesc->flags;
10771
10772 if ((flags & IA64_OPCODE_FIRST) != 0)
10773 {
10774 /* The alignment frag has to end with a stop bit only if the
10775 next instruction after the alignment directive has to be
10776 the first instruction in an instruction group. */
10777 if (align_frag)
10778 {
10779 while (align_frag->fr_type != rs_align_code)
10780 {
10781 align_frag = align_frag->fr_next;
10782 if (!align_frag)
10783 break;
10784 }
10785 /* align_frag can be NULL if there are directives in
10786 between. */
10787 if (align_frag && align_frag->fr_next == frag_now)
10788 align_frag->tc_frag_data = 1;
10789 }
10790
10791 insn_group_break (1, 0, 0);
10792 }
10793 align_frag = NULL;
10794
10795 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10796 {
10797 as_bad (_("`%s' cannot be predicated"), idesc->name);
10798 goto done;
10799 }
10800
10801 /* Build the instruction. */
10802 CURR_SLOT.qp_regno = qp_regno;
10803 CURR_SLOT.idesc = idesc;
10804 CURR_SLOT.src_file = as_where (&CURR_SLOT.src_line);
10805 dwarf2_where (&CURR_SLOT.debug_line);
10806 dwarf2_consume_line_info ();
10807
10808 /* Add unwind entries, if there are any. */
10809 if (unwind.current_entry)
10810 {
10811 CURR_SLOT.unwind_record = unwind.current_entry;
10812 unwind.current_entry = NULL;
10813 }
10814 if (unwind.pending_saves)
10815 {
10816 if (unwind.pending_saves->next)
10817 {
10818 /* Attach the next pending save to the next slot so that its
10819 slot number will get set correctly. */
10820 add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR);
10821 unwind.pending_saves = &unwind.pending_saves->next->r.record.p;
10822 }
10823 else
10824 unwind.pending_saves = NULL;
10825 }
10826 if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym))
10827 unwind.insn = 1;
10828
10829 /* Check for dependency violations. */
10830 if (md.detect_dv)
10831 check_dv (idesc);
10832
10833 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10834 if (++md.num_slots_in_use >= NUM_SLOTS)
10835 emit_one_bundle ();
10836
10837 if ((flags & IA64_OPCODE_LAST) != 0)
10838 insn_group_break (1, 0, 0);
10839
10840 md.last_text_seg = now_seg;
10841 md.last_text_subseg = now_subseg;
10842
10843 done:
10844 input_line_pointer = saved_input_line_pointer;
10845 }
10846
10847 /* Called when symbol NAME cannot be found in the symbol table.
10848 Should be used for dynamic valued symbols only. */
10849
10850 symbolS *
10851 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
10852 {
10853 return 0;
10854 }
10855
10856 /* Called for any expression that can not be recognized. When the
10857 function is called, `input_line_pointer' will point to the start of
10858 the expression. */
10859
10860 void
10861 md_operand (expressionS *e)
10862 {
10863 switch (*input_line_pointer)
10864 {
10865 case '[':
10866 ++input_line_pointer;
10867 expression_and_evaluate (e);
10868 if (*input_line_pointer != ']')
10869 {
10870 as_bad (_("Closing bracket missing"));
10871 goto err;
10872 }
10873 else
10874 {
10875 if (e->X_op != O_register
10876 || e->X_add_number < REG_GR
10877 || e->X_add_number > REG_GR + 127)
10878 {
10879 as_bad (_("Index must be a general register"));
10880 e->X_add_number = REG_GR;
10881 }
10882
10883 ++input_line_pointer;
10884 e->X_op = O_index;
10885 }
10886 break;
10887
10888 default:
10889 break;
10890 }
10891 return;
10892
10893 err:
10894 ignore_rest_of_line ();
10895 }
10896
10897 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10898 a section symbol plus some offset. For relocs involving @fptr(),
10899 directives we don't want such adjustments since we need to have the
10900 original symbol's name in the reloc. */
10901 int
10902 ia64_fix_adjustable (fixS *fix)
10903 {
10904 /* Prevent all adjustments to global symbols */
10905 if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10906 return 0;
10907
10908 switch (fix->fx_r_type)
10909 {
10910 case BFD_RELOC_IA64_FPTR64I:
10911 case BFD_RELOC_IA64_FPTR32MSB:
10912 case BFD_RELOC_IA64_FPTR32LSB:
10913 case BFD_RELOC_IA64_FPTR64MSB:
10914 case BFD_RELOC_IA64_FPTR64LSB:
10915 case BFD_RELOC_IA64_LTOFF_FPTR22:
10916 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10917 return 0;
10918 default:
10919 break;
10920 }
10921
10922 return 1;
10923 }
10924
10925 int
10926 ia64_force_relocation (fixS *fix)
10927 {
10928 switch (fix->fx_r_type)
10929 {
10930 case BFD_RELOC_IA64_FPTR64I:
10931 case BFD_RELOC_IA64_FPTR32MSB:
10932 case BFD_RELOC_IA64_FPTR32LSB:
10933 case BFD_RELOC_IA64_FPTR64MSB:
10934 case BFD_RELOC_IA64_FPTR64LSB:
10935
10936 case BFD_RELOC_IA64_LTOFF22:
10937 case BFD_RELOC_IA64_LTOFF64I:
10938 case BFD_RELOC_IA64_LTOFF_FPTR22:
10939 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10940 case BFD_RELOC_IA64_PLTOFF22:
10941 case BFD_RELOC_IA64_PLTOFF64I:
10942 case BFD_RELOC_IA64_PLTOFF64MSB:
10943 case BFD_RELOC_IA64_PLTOFF64LSB:
10944
10945 case BFD_RELOC_IA64_LTOFF22X:
10946 case BFD_RELOC_IA64_LDXMOV:
10947 return 1;
10948
10949 default:
10950 break;
10951 }
10952
10953 return generic_force_reloc (fix);
10954 }
10955
10956 /* Decide from what point a pc-relative relocation is relative to,
10957 relative to the pc-relative fixup. Er, relatively speaking. */
10958 long
10959 ia64_pcrel_from_section (fixS *fix, segT sec)
10960 {
10961 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10962
10963 if (bfd_section_flags (sec) & SEC_CODE)
10964 off &= ~0xfUL;
10965
10966 return off;
10967 }
10968
10969
10970 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10971 void
10972 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10973 {
10974 expressionS exp;
10975
10976 exp.X_op = O_pseudo_fixup;
10977 exp.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
10978 exp.X_add_number = 0;
10979 exp.X_add_symbol = symbol;
10980 emit_expr (&exp, size);
10981 }
10982
10983 /* This is called whenever some data item (not an instruction) needs a
10984 fixup. We pick the right reloc code depending on the byteorder
10985 currently in effect. */
10986 void
10987 ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp,
10988 bfd_reloc_code_real_type code)
10989 {
10990 fixS *fix;
10991
10992 switch (nbytes)
10993 {
10994 /* There are no reloc for 8 and 16 bit quantities, but we allow
10995 them here since they will work fine as long as the expression
10996 is fully defined at the end of the pass over the source file. */
10997 case 1: code = BFD_RELOC_8; break;
10998 case 2: code = BFD_RELOC_16; break;
10999 case 4:
11000 if (target_big_endian)
11001 code = BFD_RELOC_IA64_DIR32MSB;
11002 else
11003 code = BFD_RELOC_IA64_DIR32LSB;
11004 break;
11005
11006 case 8:
11007 /* In 32-bit mode, data8 could mean function descriptors too. */
11008 if (exp->X_op == O_pseudo_fixup
11009 && exp->X_op_symbol
11010 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
11011 && !(md.flags & EF_IA_64_ABI64))
11012 {
11013 if (target_big_endian)
11014 code = BFD_RELOC_IA64_IPLTMSB;
11015 else
11016 code = BFD_RELOC_IA64_IPLTLSB;
11017 exp->X_op = O_symbol;
11018 break;
11019 }
11020 else
11021 {
11022 if (target_big_endian)
11023 code = BFD_RELOC_IA64_DIR64MSB;
11024 else
11025 code = BFD_RELOC_IA64_DIR64LSB;
11026 break;
11027 }
11028
11029 case 16:
11030 if (exp->X_op == O_pseudo_fixup
11031 && exp->X_op_symbol
11032 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
11033 {
11034 if (target_big_endian)
11035 code = BFD_RELOC_IA64_IPLTMSB;
11036 else
11037 code = BFD_RELOC_IA64_IPLTLSB;
11038 exp->X_op = O_symbol;
11039 break;
11040 }
11041 /* FALLTHRU */
11042
11043 default:
11044 as_bad (_("Unsupported fixup size %d"), nbytes);
11045 ignore_rest_of_line ();
11046 return;
11047 }
11048
11049 if (exp->X_op == O_pseudo_fixup)
11050 {
11051 exp->X_op = O_symbol;
11052 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
11053 /* ??? If code unchanged, unsupported. */
11054 }
11055
11056 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
11057 /* We need to store the byte order in effect in case we're going
11058 to fix an 8 or 16 bit relocation (for which there no real
11059 relocs available). See md_apply_fix(). */
11060 fix->tc_fix_data.bigendian = target_big_endian;
11061 }
11062
11063 /* Return the actual relocation we wish to associate with the pseudo
11064 reloc described by SYM and R_TYPE. SYM should be one of the
11065 symbols in the pseudo_func array, or NULL. */
11066
11067 static bfd_reloc_code_real_type
11068 ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type)
11069 {
11070 bfd_reloc_code_real_type newr = 0;
11071 const char *type = NULL, *suffix = "";
11072
11073 if (sym == NULL)
11074 {
11075 return r_type;
11076 }
11077
11078 switch (S_GET_VALUE (sym))
11079 {
11080 case FUNC_FPTR_RELATIVE:
11081 switch (r_type)
11082 {
11083 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_FPTR64I; break;
11084 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_FPTR32MSB; break;
11085 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_FPTR32LSB; break;
11086 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_FPTR64MSB; break;
11087 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_FPTR64LSB; break;
11088 default: type = "FPTR"; break;
11089 }
11090 break;
11091
11092 case FUNC_GP_RELATIVE:
11093 switch (r_type)
11094 {
11095 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_GPREL22; break;
11096 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_GPREL64I; break;
11097 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_GPREL32MSB; break;
11098 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_GPREL32LSB; break;
11099 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_GPREL64MSB; break;
11100 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_GPREL64LSB; break;
11101 default: type = "GPREL"; break;
11102 }
11103 break;
11104
11105 case FUNC_LT_RELATIVE:
11106 switch (r_type)
11107 {
11108 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22; break;
11109 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_LTOFF64I; break;
11110 default: type = "LTOFF"; break;
11111 }
11112 break;
11113
11114 case FUNC_LT_RELATIVE_X:
11115 switch (r_type)
11116 {
11117 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22X; break;
11118 default: type = "LTOFF"; suffix = "X"; break;
11119 }
11120 break;
11121
11122 case FUNC_PC_RELATIVE:
11123 switch (r_type)
11124 {
11125 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PCREL22; break;
11126 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PCREL64I; break;
11127 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_PCREL32MSB; break;
11128 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_PCREL32LSB; break;
11129 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PCREL64MSB; break;
11130 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PCREL64LSB; break;
11131 default: type = "PCREL"; break;
11132 }
11133 break;
11134
11135 case FUNC_PLT_RELATIVE:
11136 switch (r_type)
11137 {
11138 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PLTOFF22; break;
11139 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PLTOFF64I; break;
11140 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PLTOFF64MSB;break;
11141 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PLTOFF64LSB;break;
11142 default: type = "PLTOFF"; break;
11143 }
11144 break;
11145
11146 case FUNC_SEC_RELATIVE:
11147 switch (r_type)
11148 {
11149 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SECREL32MSB;break;
11150 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SECREL32LSB;break;
11151 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SECREL64MSB;break;
11152 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SECREL64LSB;break;
11153 default: type = "SECREL"; break;
11154 }
11155 break;
11156
11157 case FUNC_SEG_RELATIVE:
11158 switch (r_type)
11159 {
11160 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SEGREL32MSB;break;
11161 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SEGREL32LSB;break;
11162 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SEGREL64MSB;break;
11163 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SEGREL64LSB;break;
11164 default: type = "SEGREL"; break;
11165 }
11166 break;
11167
11168 case FUNC_LTV_RELATIVE:
11169 switch (r_type)
11170 {
11171 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_LTV32MSB; break;
11172 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_LTV32LSB; break;
11173 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_LTV64MSB; break;
11174 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_LTV64LSB; break;
11175 default: type = "LTV"; break;
11176 }
11177 break;
11178
11179 case FUNC_LT_FPTR_RELATIVE:
11180 switch (r_type)
11181 {
11182 case BFD_RELOC_IA64_IMM22:
11183 newr = BFD_RELOC_IA64_LTOFF_FPTR22; break;
11184 case BFD_RELOC_IA64_IMM64:
11185 newr = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
11186 case BFD_RELOC_IA64_DIR32MSB:
11187 newr = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break;
11188 case BFD_RELOC_IA64_DIR32LSB:
11189 newr = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break;
11190 case BFD_RELOC_IA64_DIR64MSB:
11191 newr = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break;
11192 case BFD_RELOC_IA64_DIR64LSB:
11193 newr = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break;
11194 default:
11195 type = "LTOFF_FPTR"; break;
11196 }
11197 break;
11198
11199 case FUNC_TP_RELATIVE:
11200 switch (r_type)
11201 {
11202 case BFD_RELOC_IA64_IMM14: newr = BFD_RELOC_IA64_TPREL14; break;
11203 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_TPREL22; break;
11204 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_TPREL64I; break;
11205 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_TPREL64MSB; break;
11206 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_TPREL64LSB; break;
11207 default: type = "TPREL"; break;
11208 }
11209 break;
11210
11211 case FUNC_LT_TP_RELATIVE:
11212 switch (r_type)
11213 {
11214 case BFD_RELOC_IA64_IMM22:
11215 newr = BFD_RELOC_IA64_LTOFF_TPREL22; break;
11216 default:
11217 type = "LTOFF_TPREL"; break;
11218 }
11219 break;
11220
11221 case FUNC_DTP_MODULE:
11222 switch (r_type)
11223 {
11224 case BFD_RELOC_IA64_DIR64MSB:
11225 newr = BFD_RELOC_IA64_DTPMOD64MSB; break;
11226 case BFD_RELOC_IA64_DIR64LSB:
11227 newr = BFD_RELOC_IA64_DTPMOD64LSB; break;
11228 default:
11229 type = "DTPMOD"; break;
11230 }
11231 break;
11232
11233 case FUNC_LT_DTP_MODULE:
11234 switch (r_type)
11235 {
11236 case BFD_RELOC_IA64_IMM22:
11237 newr = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
11238 default:
11239 type = "LTOFF_DTPMOD"; break;
11240 }
11241 break;
11242
11243 case FUNC_DTP_RELATIVE:
11244 switch (r_type)
11245 {
11246 case BFD_RELOC_IA64_DIR32MSB:
11247 newr = BFD_RELOC_IA64_DTPREL32MSB; break;
11248 case BFD_RELOC_IA64_DIR32LSB:
11249 newr = BFD_RELOC_IA64_DTPREL32LSB; break;
11250 case BFD_RELOC_IA64_DIR64MSB:
11251 newr = BFD_RELOC_IA64_DTPREL64MSB; break;
11252 case BFD_RELOC_IA64_DIR64LSB:
11253 newr = BFD_RELOC_IA64_DTPREL64LSB; break;
11254 case BFD_RELOC_IA64_IMM14:
11255 newr = BFD_RELOC_IA64_DTPREL14; break;
11256 case BFD_RELOC_IA64_IMM22:
11257 newr = BFD_RELOC_IA64_DTPREL22; break;
11258 case BFD_RELOC_IA64_IMM64:
11259 newr = BFD_RELOC_IA64_DTPREL64I; break;
11260 default:
11261 type = "DTPREL"; break;
11262 }
11263 break;
11264
11265 case FUNC_LT_DTP_RELATIVE:
11266 switch (r_type)
11267 {
11268 case BFD_RELOC_IA64_IMM22:
11269 newr = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
11270 default:
11271 type = "LTOFF_DTPREL"; break;
11272 }
11273 break;
11274
11275 case FUNC_IPLT_RELOC:
11276 switch (r_type)
11277 {
11278 case BFD_RELOC_IA64_IPLTMSB: return r_type;
11279 case BFD_RELOC_IA64_IPLTLSB: return r_type;
11280 default: type = "IPLT"; break;
11281 }
11282 break;
11283
11284 #ifdef TE_VMS
11285 case FUNC_SLOTCOUNT_RELOC:
11286 return DUMMY_RELOC_IA64_SLOTCOUNT;
11287 #endif
11288
11289 default:
11290 abort ();
11291 }
11292
11293 if (newr)
11294 return newr;
11295 else
11296 {
11297 int width;
11298
11299 if (!type)
11300 abort ();
11301 switch (r_type)
11302 {
11303 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break;
11304 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break;
11305 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break;
11306 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break;
11307 case BFD_RELOC_UNUSED: width = 13; break;
11308 case BFD_RELOC_IA64_IMM14: width = 14; break;
11309 case BFD_RELOC_IA64_IMM22: width = 22; break;
11310 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break;
11311 default: abort ();
11312 }
11313
11314 /* This should be an error, but since previously there wasn't any
11315 diagnostic here, don't make it fail because of this for now. */
11316 as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix);
11317 return r_type;
11318 }
11319 }
11320
11321 /* Here is where generate the appropriate reloc for pseudo relocation
11322 functions. */
11323 void
11324 ia64_validate_fix (fixS *fix)
11325 {
11326 switch (fix->fx_r_type)
11327 {
11328 case BFD_RELOC_IA64_FPTR64I:
11329 case BFD_RELOC_IA64_FPTR32MSB:
11330 case BFD_RELOC_IA64_FPTR64LSB:
11331 case BFD_RELOC_IA64_LTOFF_FPTR22:
11332 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11333 if (fix->fx_offset != 0)
11334 as_bad_where (fix->fx_file, fix->fx_line,
11335 _("No addend allowed in @fptr() relocation"));
11336 break;
11337 default:
11338 break;
11339 }
11340 }
11341
11342 static void
11343 fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value)
11344 {
11345 bfd_vma insn[3], t0, t1, control_bits;
11346 const char *err;
11347 char *fixpos;
11348 long slot;
11349
11350 slot = fix->fx_where & 0x3;
11351 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
11352
11353 /* Bundles are always in little-endian byte order */
11354 t0 = bfd_getl64 (fixpos);
11355 t1 = bfd_getl64 (fixpos + 8);
11356 control_bits = t0 & 0x1f;
11357 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
11358 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
11359 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
11360
11361 err = NULL;
11362 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
11363 {
11364 insn[1] = (value >> 22) & 0x1ffffffffffLL;
11365 insn[2] |= (((value & 0x7f) << 13)
11366 | (((value >> 7) & 0x1ff) << 27)
11367 | (((value >> 16) & 0x1f) << 22)
11368 | (((value >> 21) & 0x1) << 21)
11369 | (((value >> 63) & 0x1) << 36));
11370 }
11371 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
11372 {
11373 if (value & ~0x3fffffffffffffffULL)
11374 err = _("integer operand out of range");
11375 insn[1] = (value >> 21) & 0x1ffffffffffLL;
11376 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
11377 }
11378 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
11379 {
11380 value >>= 4;
11381 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
11382 insn[2] |= ((((value >> 59) & 0x1) << 36)
11383 | (((value >> 0) & 0xfffff) << 13));
11384 }
11385 else
11386 err = (*odesc->insert) (odesc, value, insn + slot);
11387
11388 if (err)
11389 as_bad_where (fix->fx_file, fix->fx_line, "%s", err);
11390
11391 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
11392 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
11393 number_to_chars_littleendian (fixpos + 0, t0, 8);
11394 number_to_chars_littleendian (fixpos + 8, t1, 8);
11395 }
11396
11397 /* Attempt to simplify or even eliminate a fixup. The return value is
11398 ignored; perhaps it was once meaningful, but now it is historical.
11399 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
11400
11401 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
11402 (if possible). */
11403
11404 void
11405 md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED)
11406 {
11407 char *fixpos;
11408 valueT value = *valP;
11409
11410 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
11411
11412 if (fix->fx_pcrel)
11413 {
11414 switch (fix->fx_r_type)
11415 {
11416 case BFD_RELOC_IA64_PCREL21B: break;
11417 case BFD_RELOC_IA64_PCREL21BI: break;
11418 case BFD_RELOC_IA64_PCREL21F: break;
11419 case BFD_RELOC_IA64_PCREL21M: break;
11420 case BFD_RELOC_IA64_PCREL60B: break;
11421 case BFD_RELOC_IA64_PCREL22: break;
11422 case BFD_RELOC_IA64_PCREL64I: break;
11423 case BFD_RELOC_IA64_PCREL32MSB: break;
11424 case BFD_RELOC_IA64_PCREL32LSB: break;
11425 case BFD_RELOC_IA64_PCREL64MSB: break;
11426 case BFD_RELOC_IA64_PCREL64LSB: break;
11427 default:
11428 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym,
11429 fix->fx_r_type);
11430 break;
11431 }
11432 }
11433 if (fix->fx_addsy)
11434 {
11435 switch ((unsigned) fix->fx_r_type)
11436 {
11437 case BFD_RELOC_UNUSED:
11438 /* This must be a TAG13 or TAG13b operand. There are no external
11439 relocs defined for them, so we must give an error. */
11440 as_bad_where (fix->fx_file, fix->fx_line,
11441 _("%s must have a constant value"),
11442 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
11443 fix->fx_done = 1;
11444 return;
11445
11446 case BFD_RELOC_IA64_TPREL14:
11447 case BFD_RELOC_IA64_TPREL22:
11448 case BFD_RELOC_IA64_TPREL64I:
11449 case BFD_RELOC_IA64_LTOFF_TPREL22:
11450 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
11451 case BFD_RELOC_IA64_DTPREL14:
11452 case BFD_RELOC_IA64_DTPREL22:
11453 case BFD_RELOC_IA64_DTPREL64I:
11454 case BFD_RELOC_IA64_LTOFF_DTPREL22:
11455 S_SET_THREAD_LOCAL (fix->fx_addsy);
11456 break;
11457
11458 #ifdef TE_VMS
11459 case DUMMY_RELOC_IA64_SLOTCOUNT:
11460 as_bad_where (fix->fx_file, fix->fx_line,
11461 _("cannot resolve @slotcount parameter"));
11462 fix->fx_done = 1;
11463 return;
11464 #endif
11465
11466 default:
11467 break;
11468 }
11469 }
11470 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
11471 {
11472 #ifdef TE_VMS
11473 if (fix->fx_r_type == DUMMY_RELOC_IA64_SLOTCOUNT)
11474 {
11475 /* For @slotcount, convert an addresses difference to a slots
11476 difference. */
11477 valueT v;
11478
11479 v = (value >> 4) * 3;
11480 switch (value & 0x0f)
11481 {
11482 case 0:
11483 case 1:
11484 case 2:
11485 v += value & 0x0f;
11486 break;
11487 case 0x0f:
11488 v += 2;
11489 break;
11490 case 0x0e:
11491 v += 1;
11492 break;
11493 default:
11494 as_bad (_("invalid @slotcount value"));
11495 }
11496 value = v;
11497 }
11498 #endif
11499
11500 if (fix->tc_fix_data.bigendian)
11501 number_to_chars_bigendian (fixpos, value, fix->fx_size);
11502 else
11503 number_to_chars_littleendian (fixpos, value, fix->fx_size);
11504 fix->fx_done = 1;
11505 }
11506 else
11507 {
11508 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
11509 fix->fx_done = 1;
11510 }
11511 }
11512
11513 /* Generate the BFD reloc to be stuck in the object file from the
11514 fixup used internally in the assembler. */
11515
11516 arelent *
11517 tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp)
11518 {
11519 arelent *reloc;
11520
11521 reloc = XNEW (arelent);
11522 reloc->sym_ptr_ptr = XNEW (asymbol *);
11523 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11524 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
11525 reloc->addend = fixp->fx_offset;
11526 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
11527
11528 if (!reloc->howto)
11529 {
11530 as_bad_where (fixp->fx_file, fixp->fx_line,
11531 _("Cannot represent %s relocation in object file"),
11532 bfd_get_reloc_code_name (fixp->fx_r_type));
11533 free (reloc);
11534 return NULL;
11535 }
11536 return reloc;
11537 }
11538
11539 /* Turn a string in input_line_pointer into a floating point constant
11540 of type TYPE, and store the appropriate bytes in *LIT. The number
11541 of LITTLENUMS emitted is stored in *SIZE. An error message is
11542 returned, or NULL on OK. */
11543
11544 const char *
11545 md_atof (int type, char *lit, int *size)
11546 {
11547 LITTLENUM_TYPE words[MAX_LITTLENUMS];
11548 char *t;
11549 int prec;
11550
11551 switch (type)
11552 {
11553 /* IEEE floats */
11554 case 'f':
11555 case 'F':
11556 case 's':
11557 case 'S':
11558 prec = 2;
11559 break;
11560
11561 case 'd':
11562 case 'D':
11563 case 'r':
11564 case 'R':
11565 prec = 4;
11566 break;
11567
11568 case 'x':
11569 case 'X':
11570 case 'p':
11571 case 'P':
11572 prec = 5;
11573 break;
11574
11575 default:
11576 *size = 0;
11577 return _("Unrecognized or unsupported floating point constant");
11578 }
11579 t = atof_ieee (input_line_pointer, type, words);
11580 if (t)
11581 input_line_pointer = t;
11582
11583 (*ia64_float_to_chars) (lit, words, prec);
11584
11585 if (type == 'X')
11586 {
11587 /* It is 10 byte floating point with 6 byte padding. */
11588 memset (&lit [10], 0, 6);
11589 *size = 8 * sizeof (LITTLENUM_TYPE);
11590 }
11591 else
11592 *size = prec * sizeof (LITTLENUM_TYPE);
11593
11594 return NULL;
11595 }
11596
11597 /* Handle ia64 specific semantics of the align directive. */
11598
11599 void
11600 ia64_md_do_align (int n ATTRIBUTE_UNUSED,
11601 const char *fill ATTRIBUTE_UNUSED,
11602 int len ATTRIBUTE_UNUSED,
11603 int max ATTRIBUTE_UNUSED)
11604 {
11605 if (subseg_text_p (now_seg))
11606 ia64_flush_insns ();
11607 }
11608
11609 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
11610 of an rs_align_code fragment. */
11611
11612 void
11613 ia64_handle_align (fragS *fragp)
11614 {
11615 int bytes;
11616 char *p;
11617 const unsigned char *nop_type;
11618
11619 if (fragp->fr_type != rs_align_code)
11620 return;
11621
11622 /* Check if this frag has to end with a stop bit. */
11623 nop_type = fragp->tc_frag_data ? le_nop_stop : le_nop;
11624
11625 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11626 p = fragp->fr_literal + fragp->fr_fix;
11627
11628 /* If no paddings are needed, we check if we need a stop bit. */
11629 if (!bytes && fragp->tc_frag_data)
11630 {
11631 if (fragp->fr_fix < 16)
11632 #if 1
11633 /* FIXME: It won't work with
11634 .align 16
11635 alloc r32=ar.pfs,1,2,4,0
11636 */
11637 ;
11638 #else
11639 as_bad_where (fragp->fr_file, fragp->fr_line,
11640 _("Can't add stop bit to mark end of instruction group"));
11641 #endif
11642 else
11643 /* Bundles are always in little-endian byte order. Make sure
11644 the previous bundle has the stop bit. */
11645 *(p - 16) |= 1;
11646 }
11647
11648 /* Make sure we are on a 16-byte boundary, in case someone has been
11649 putting data into a text section. */
11650 if (bytes & 15)
11651 {
11652 int fix = bytes & 15;
11653 memset (p, 0, fix);
11654 p += fix;
11655 bytes -= fix;
11656 fragp->fr_fix += fix;
11657 }
11658
11659 /* Instruction bundles are always little-endian. */
11660 memcpy (p, nop_type, 16);
11661 fragp->fr_var = 16;
11662 }
11663
11664 static void
11665 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11666 int prec)
11667 {
11668 while (prec--)
11669 {
11670 number_to_chars_bigendian (lit, (long) (*words++),
11671 sizeof (LITTLENUM_TYPE));
11672 lit += sizeof (LITTLENUM_TYPE);
11673 }
11674 }
11675
11676 static void
11677 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11678 int prec)
11679 {
11680 while (prec--)
11681 {
11682 number_to_chars_littleendian (lit, (long) (words[prec]),
11683 sizeof (LITTLENUM_TYPE));
11684 lit += sizeof (LITTLENUM_TYPE);
11685 }
11686 }
11687
11688 void
11689 ia64_elf_section_change_hook (void)
11690 {
11691 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11692 && elf_linked_to_section (now_seg) == NULL)
11693 elf_linked_to_section (now_seg) = text_section;
11694 dot_byteorder (-1);
11695 }
11696
11697 /* Check if a label should be made global. */
11698 void
11699 ia64_check_label (symbolS *label)
11700 {
11701 if (*input_line_pointer == ':')
11702 {
11703 S_SET_EXTERNAL (label);
11704 input_line_pointer++;
11705 }
11706 }
11707
11708 /* Used to remember where .alias and .secalias directives are seen. We
11709 will rename symbol and section names when we are about to output
11710 the relocatable file. */
11711 struct alias
11712 {
11713 const char *file; /* The file where the directive is seen. */
11714 unsigned int line; /* The line number the directive is at. */
11715 const char *name; /* The original name of the symbol. */
11716 };
11717
11718 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11719 .secalias. Otherwise, it is .alias. */
11720 static void
11721 dot_alias (int section)
11722 {
11723 char *name, *alias;
11724 char delim;
11725 char *end_name;
11726 int len;
11727 struct alias *h;
11728 const char *a;
11729 htab_t ahash, nhash;
11730 const char *kind;
11731
11732 delim = get_symbol_name (&name);
11733 end_name = input_line_pointer;
11734 *end_name = delim;
11735
11736 if (name == end_name)
11737 {
11738 as_bad (_("expected symbol name"));
11739 ignore_rest_of_line ();
11740 return;
11741 }
11742
11743 SKIP_WHITESPACE_AFTER_NAME ();
11744
11745 if (*input_line_pointer != ',')
11746 {
11747 *end_name = 0;
11748 as_bad (_("expected comma after \"%s\""), name);
11749 *end_name = delim;
11750 ignore_rest_of_line ();
11751 return;
11752 }
11753
11754 input_line_pointer++;
11755 *end_name = 0;
11756 ia64_canonicalize_symbol_name (name);
11757
11758 /* We call demand_copy_C_string to check if alias string is valid.
11759 There should be a closing `"' and no `\0' in the string. */
11760 alias = demand_copy_C_string (&len);
11761 if (alias == NULL)
11762 {
11763 ignore_rest_of_line ();
11764 return;
11765 }
11766
11767 /* Make a copy of name string. */
11768 name = notes_strdup (name);
11769
11770 if (section)
11771 {
11772 kind = "section";
11773 ahash = secalias_hash;
11774 nhash = secalias_name_hash;
11775 }
11776 else
11777 {
11778 kind = "symbol";
11779 ahash = alias_hash;
11780 nhash = alias_name_hash;
11781 }
11782
11783 /* Check if alias has been used before. */
11784
11785 h = (struct alias *) str_hash_find (ahash, alias);
11786 if (h)
11787 {
11788 if (strcmp (h->name, name))
11789 as_bad (_("`%s' is already the alias of %s `%s'"),
11790 alias, kind, h->name);
11791 notes_free (alias);
11792 goto out;
11793 }
11794
11795 /* Check if name already has an alias. */
11796 a = (const char *) str_hash_find (nhash, name);
11797 if (a)
11798 {
11799 if (strcmp (a, alias))
11800 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11801 notes_free (alias);
11802 goto out;
11803 }
11804
11805 h = notes_alloc (sizeof (*h));
11806 h->file = as_where (&h->line);
11807 h->name = name;
11808
11809 str_hash_insert (ahash, alias, h, 0);
11810 str_hash_insert (nhash, name, alias, 0);
11811
11812 out:
11813 demand_empty_rest_of_line ();
11814 }
11815
11816 /* It renames the original symbol name to its alias. */
11817 static int
11818 do_alias (void **slot, void *arg ATTRIBUTE_UNUSED)
11819 {
11820 string_tuple_t *tuple = *((string_tuple_t **) slot);
11821 struct alias *h = (struct alias *) tuple->value;
11822 symbolS *sym = symbol_find (h->name);
11823
11824 if (sym == NULL)
11825 {
11826 #ifdef TE_VMS
11827 /* Uses .alias extensively to alias CRTL functions to same with
11828 decc$ prefix. Sometimes function gets optimized away and a
11829 warning results, which should be suppressed. */
11830 if (!startswith (tuple->key, "decc$"))
11831 #endif
11832 as_warn_where (h->file, h->line,
11833 _("symbol `%s' aliased to `%s' is not used"),
11834 h->name, tuple->key);
11835 }
11836 else
11837 S_SET_NAME (sym, (char *) tuple->key);
11838
11839 return 1;
11840 }
11841
11842 /* Called from write_object_file. */
11843 void
11844 ia64_adjust_symtab (void)
11845 {
11846 htab_traverse_noresize (alias_hash, do_alias, NULL);
11847 }
11848
11849 /* It renames the original section name to its alias. */
11850 static int
11851 do_secalias (void **slot, void *arg ATTRIBUTE_UNUSED)
11852 {
11853 string_tuple_t *tuple = *((string_tuple_t **) slot);
11854 struct alias *h = (struct alias *) tuple->value;
11855 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11856
11857 if (sec == NULL)
11858 as_warn_where (h->file, h->line,
11859 _("section `%s' aliased to `%s' is not used"),
11860 h->name, tuple->key);
11861 else
11862 sec->name = tuple->key;
11863
11864 return 1;
11865 }
11866
11867 /* Called from write_object_file. */
11868 void
11869 ia64_frob_file (void)
11870 {
11871 htab_traverse_noresize (secalias_hash, do_secalias, NULL);
11872 }
11873
11874 #ifdef TE_VMS
11875 #define NT_VMS_MHD 1
11876 #define NT_VMS_LNM 2
11877
11878 /* Integrity VMS 8.x identifies it's ELF modules with a standard ELF
11879 .note section. */
11880
11881 /* Manufacture a VMS-like time string. */
11882 static void
11883 get_vms_time (char *Now)
11884 {
11885 char *pnt;
11886 time_t timeb;
11887
11888 time (&timeb);
11889 pnt = ctime (&timeb);
11890 pnt[3] = 0;
11891 pnt[7] = 0;
11892 pnt[10] = 0;
11893 pnt[16] = 0;
11894 pnt[24] = 0;
11895 sprintf (Now, "%2s-%3s-%s %s", pnt + 8, pnt + 4, pnt + 20, pnt + 11);
11896 }
11897
11898 void
11899 ia64_vms_note (void)
11900 {
11901 char *p;
11902 asection *seg = now_seg;
11903 subsegT subseg = now_subseg;
11904 asection *secp = NULL;
11905 char *bname;
11906 char buf [256];
11907 symbolS *sym;
11908
11909 /* Create the .note section. */
11910
11911 secp = subseg_new (".note", 0);
11912 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11913
11914 /* Module header note (MHD). */
11915 bname = xstrdup (lbasename (out_file_name));
11916 if ((p = strrchr (bname, '.')))
11917 *p = '\0';
11918
11919 /* VMS note header is 24 bytes long. */
11920 p = frag_more (8 + 8 + 8);
11921 number_to_chars_littleendian (p + 0, 8, 8);
11922 number_to_chars_littleendian (p + 8, 40 + strlen (bname), 8);
11923 number_to_chars_littleendian (p + 16, NT_VMS_MHD, 8);
11924
11925 p = frag_more (8);
11926 strcpy (p, "IPF/VMS");
11927
11928 p = frag_more (17 + 17 + strlen (bname) + 1 + 5);
11929 get_vms_time (p);
11930 strcpy (p + 17, "24-FEB-2005 15:00");
11931 p += 17 + 17;
11932 strcpy (p, bname);
11933 p += strlen (bname) + 1;
11934 free (bname);
11935 strcpy (p, "V1.0");
11936
11937 frag_align (3, 0, 0);
11938
11939 /* Language processor name note. */
11940 sprintf (buf, "GNU assembler version %s (%s) using BFD version %s",
11941 VERSION, TARGET_ALIAS, BFD_VERSION_STRING);
11942
11943 p = frag_more (8 + 8 + 8);
11944 number_to_chars_littleendian (p + 0, 8, 8);
11945 number_to_chars_littleendian (p + 8, strlen (buf) + 1, 8);
11946 number_to_chars_littleendian (p + 16, NT_VMS_LNM, 8);
11947
11948 p = frag_more (8);
11949 strcpy (p, "IPF/VMS");
11950
11951 p = frag_more (strlen (buf) + 1);
11952 strcpy (p, buf);
11953
11954 frag_align (3, 0, 0);
11955
11956 secp = subseg_new (".vms_display_name_info", 0);
11957 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11958
11959 /* This symbol should be passed on the command line and be variable
11960 according to language. */
11961 sym = symbol_new ("__gnat_vms_display_name@gnat_demangler_rtl",
11962 absolute_section, &zero_address_frag, 0);
11963 symbol_table_insert (sym);
11964 symbol_get_bfdsym (sym)->flags |= BSF_DEBUGGING | BSF_DYNAMIC;
11965
11966 p = frag_more (4);
11967 /* Format 3 of VMS demangler Spec. */
11968 number_to_chars_littleendian (p, 3, 4);
11969
11970 p = frag_more (4);
11971 /* Place holder for symbol table index of above symbol. */
11972 number_to_chars_littleendian (p, -1, 4);
11973
11974 frag_align (3, 0, 0);
11975
11976 /* We probably can't restore the current segment, for there likely
11977 isn't one yet... */
11978 if (seg && subseg)
11979 subseg_set (seg, subseg);
11980 }
11981
11982 #endif /* TE_VMS */