]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-ia64.c
Constify more
[thirdparty/binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright (C) 1998-2016 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street - Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .eb
28 .estate
29 .lb
30 .popsection
31 .previous
32 .psr
33 .pushsection
34 - labels are wrong if automatic alignment is introduced
35 (e.g., checkout the second real10 definition in test-data.s)
36 - DV-related stuff:
37 <reg>.safe_across_calls and any other DV-related directives I don't
38 have documentation for.
39 verify mod-sched-brs reads/writes are checked/marked (and other
40 notes)
41
42 */
43
44 #include "as.h"
45 #include "safe-ctype.h"
46 #include "dwarf2dbg.h"
47 #include "subsegs.h"
48
49 #include "opcode/ia64.h"
50
51 #include "elf/ia64.h"
52 #include "bfdver.h"
53 #include <time.h>
54
55 #ifdef HAVE_LIMITS_H
56 #include <limits.h>
57 #endif
58
59 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
60
61 /* Some systems define MIN in, e.g., param.h. */
62 #undef MIN
63 #define MIN(a,b) ((a) < (b) ? (a) : (b))
64
65 #define NUM_SLOTS 4
66 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
67 #define CURR_SLOT md.slot[md.curr_slot]
68
69 #define O_pseudo_fixup (O_max + 1)
70
71 enum special_section
72 {
73 /* IA-64 ABI section pseudo-ops. */
74 SPECIAL_SECTION_BSS = 0,
75 SPECIAL_SECTION_SBSS,
76 SPECIAL_SECTION_SDATA,
77 SPECIAL_SECTION_RODATA,
78 SPECIAL_SECTION_COMMENT,
79 SPECIAL_SECTION_UNWIND,
80 SPECIAL_SECTION_UNWIND_INFO,
81 /* HPUX specific section pseudo-ops. */
82 SPECIAL_SECTION_INIT_ARRAY,
83 SPECIAL_SECTION_FINI_ARRAY,
84 };
85
86 enum reloc_func
87 {
88 FUNC_DTP_MODULE,
89 FUNC_DTP_RELATIVE,
90 FUNC_FPTR_RELATIVE,
91 FUNC_GP_RELATIVE,
92 FUNC_LT_RELATIVE,
93 FUNC_LT_RELATIVE_X,
94 FUNC_PC_RELATIVE,
95 FUNC_PLT_RELATIVE,
96 FUNC_SEC_RELATIVE,
97 FUNC_SEG_RELATIVE,
98 FUNC_TP_RELATIVE,
99 FUNC_LTV_RELATIVE,
100 FUNC_LT_FPTR_RELATIVE,
101 FUNC_LT_DTP_MODULE,
102 FUNC_LT_DTP_RELATIVE,
103 FUNC_LT_TP_RELATIVE,
104 FUNC_IPLT_RELOC,
105 #ifdef TE_VMS
106 FUNC_SLOTCOUNT_RELOC,
107 #endif
108 };
109
110 enum reg_symbol
111 {
112 REG_GR = 0,
113 REG_FR = (REG_GR + 128),
114 REG_AR = (REG_FR + 128),
115 REG_CR = (REG_AR + 128),
116 REG_DAHR = (REG_CR + 128),
117 REG_P = (REG_DAHR + 8),
118 REG_BR = (REG_P + 64),
119 REG_IP = (REG_BR + 8),
120 REG_CFM,
121 REG_PR,
122 REG_PR_ROT,
123 REG_PSR,
124 REG_PSR_L,
125 REG_PSR_UM,
126 /* The following are pseudo-registers for use by gas only. */
127 IND_CPUID,
128 IND_DBR,
129 IND_DTR,
130 IND_ITR,
131 IND_IBR,
132 IND_MSR,
133 IND_PKR,
134 IND_PMC,
135 IND_PMD,
136 IND_DAHR,
137 IND_RR,
138 /* The following pseudo-registers are used for unwind directives only: */
139 REG_PSP,
140 REG_PRIUNAT,
141 REG_NUM
142 };
143
144 enum dynreg_type
145 {
146 DYNREG_GR = 0, /* dynamic general purpose register */
147 DYNREG_FR, /* dynamic floating point register */
148 DYNREG_PR, /* dynamic predicate register */
149 DYNREG_NUM_TYPES
150 };
151
152 enum operand_match_result
153 {
154 OPERAND_MATCH,
155 OPERAND_OUT_OF_RANGE,
156 OPERAND_MISMATCH
157 };
158
159 /* On the ia64, we can't know the address of a text label until the
160 instructions are packed into a bundle. To handle this, we keep
161 track of the list of labels that appear in front of each
162 instruction. */
163 struct label_fix
164 {
165 struct label_fix *next;
166 struct symbol *sym;
167 bfd_boolean dw2_mark_labels;
168 };
169
170 #ifdef TE_VMS
171 /* An internally used relocation. */
172 #define DUMMY_RELOC_IA64_SLOTCOUNT (BFD_RELOC_UNUSED + 1)
173 #endif
174
175 /* This is the endianness of the current section. */
176 extern int target_big_endian;
177
178 /* This is the default endianness. */
179 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
180
181 void (*ia64_number_to_chars) (char *, valueT, int);
182
183 static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int);
184 static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int);
185
186 static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int);
187
188 static struct hash_control *alias_hash;
189 static struct hash_control *alias_name_hash;
190 static struct hash_control *secalias_hash;
191 static struct hash_control *secalias_name_hash;
192
193 /* List of chars besides those in app.c:symbol_chars that can start an
194 operand. Used to prevent the scrubber eating vital white-space. */
195 const char ia64_symbol_chars[] = "@?";
196
197 /* Characters which always start a comment. */
198 const char comment_chars[] = "";
199
200 /* Characters which start a comment at the beginning of a line. */
201 const char line_comment_chars[] = "#";
202
203 /* Characters which may be used to separate multiple commands on a
204 single line. */
205 const char line_separator_chars[] = ";{}";
206
207 /* Characters which are used to indicate an exponent in a floating
208 point number. */
209 const char EXP_CHARS[] = "eE";
210
211 /* Characters which mean that a number is a floating point constant,
212 as in 0d1.0. */
213 const char FLT_CHARS[] = "rRsSfFdDxXpP";
214
215 /* ia64-specific option processing: */
216
217 const char *md_shortopts = "m:N:x::";
218
219 struct option md_longopts[] =
220 {
221 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
222 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
223 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
224 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
225 };
226
227 size_t md_longopts_size = sizeof (md_longopts);
228
229 static struct
230 {
231 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
232 struct hash_control *reg_hash; /* register name hash table */
233 struct hash_control *dynreg_hash; /* dynamic register hash table */
234 struct hash_control *const_hash; /* constant hash table */
235 struct hash_control *entry_hash; /* code entry hint hash table */
236
237 /* If X_op is != O_absent, the registername for the instruction's
238 qualifying predicate. If NULL, p0 is assumed for instructions
239 that are predictable. */
240 expressionS qp;
241
242 /* Optimize for which CPU. */
243 enum
244 {
245 itanium1,
246 itanium2
247 } tune;
248
249 /* What to do when hint.b is used. */
250 enum
251 {
252 hint_b_error,
253 hint_b_warning,
254 hint_b_ok
255 } hint_b;
256
257 unsigned int
258 manual_bundling : 1,
259 debug_dv: 1,
260 detect_dv: 1,
261 explicit_mode : 1, /* which mode we're in */
262 default_explicit_mode : 1, /* which mode is the default */
263 mode_explicitly_set : 1, /* was the current mode explicitly set? */
264 auto_align : 1,
265 keep_pending_output : 1;
266
267 /* What to do when something is wrong with unwind directives. */
268 enum
269 {
270 unwind_check_warning,
271 unwind_check_error
272 } unwind_check;
273
274 /* Each bundle consists of up to three instructions. We keep
275 track of four most recent instructions so we can correctly set
276 the end_of_insn_group for the last instruction in a bundle. */
277 int curr_slot;
278 int num_slots_in_use;
279 struct slot
280 {
281 unsigned int
282 end_of_insn_group : 1,
283 manual_bundling_on : 1,
284 manual_bundling_off : 1,
285 loc_directive_seen : 1;
286 signed char user_template; /* user-selected template, if any */
287 unsigned char qp_regno; /* qualifying predicate */
288 /* This duplicates a good fraction of "struct fix" but we
289 can't use a "struct fix" instead since we can't call
290 fix_new_exp() until we know the address of the instruction. */
291 int num_fixups;
292 struct insn_fix
293 {
294 bfd_reloc_code_real_type code;
295 enum ia64_opnd opnd; /* type of operand in need of fix */
296 unsigned int is_pcrel : 1; /* is operand pc-relative? */
297 expressionS expr; /* the value to be inserted */
298 }
299 fixup[2]; /* at most two fixups per insn */
300 struct ia64_opcode *idesc;
301 struct label_fix *label_fixups;
302 struct label_fix *tag_fixups;
303 struct unw_rec_list *unwind_record; /* Unwind directive. */
304 expressionS opnd[6];
305 const char *src_file;
306 unsigned int src_line;
307 struct dwarf2_line_info debug_line;
308 }
309 slot[NUM_SLOTS];
310
311 segT last_text_seg;
312
313 struct dynreg
314 {
315 struct dynreg *next; /* next dynamic register */
316 const char *name;
317 unsigned short base; /* the base register number */
318 unsigned short num_regs; /* # of registers in this set */
319 }
320 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
321
322 flagword flags; /* ELF-header flags */
323
324 struct mem_offset {
325 unsigned hint:1; /* is this hint currently valid? */
326 bfd_vma offset; /* mem.offset offset */
327 bfd_vma base; /* mem.offset base */
328 } mem_offset;
329
330 int path; /* number of alt. entry points seen */
331 const char **entry_labels; /* labels of all alternate paths in
332 the current DV-checking block. */
333 int maxpaths; /* size currently allocated for
334 entry_labels */
335
336 int pointer_size; /* size in bytes of a pointer */
337 int pointer_size_shift; /* shift size of a pointer for alignment */
338
339 symbolS *indregsym[IND_RR - IND_CPUID + 1];
340 }
341 md;
342
343 /* These are not const, because they are modified to MMI for non-itanium1
344 targets below. */
345 /* MFI bundle of nops. */
346 static unsigned char le_nop[16] =
347 {
348 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
349 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
350 };
351 /* MFI bundle of nops with stop-bit. */
352 static unsigned char le_nop_stop[16] =
353 {
354 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
355 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
356 };
357
358 /* application registers: */
359
360 #define AR_K0 0
361 #define AR_K7 7
362 #define AR_RSC 16
363 #define AR_BSP 17
364 #define AR_BSPSTORE 18
365 #define AR_RNAT 19
366 #define AR_FCR 21
367 #define AR_EFLAG 24
368 #define AR_CSD 25
369 #define AR_SSD 26
370 #define AR_CFLG 27
371 #define AR_FSR 28
372 #define AR_FIR 29
373 #define AR_FDR 30
374 #define AR_CCV 32
375 #define AR_UNAT 36
376 #define AR_FPSR 40
377 #define AR_ITC 44
378 #define AR_RUC 45
379 #define AR_PFS 64
380 #define AR_LC 65
381 #define AR_EC 66
382
383 static const struct
384 {
385 const char *name;
386 unsigned int regnum;
387 }
388 ar[] =
389 {
390 {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1},
391 {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3},
392 {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5},
393 {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7},
394 {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP},
395 {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT},
396 {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG},
397 {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD},
398 {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR},
399 {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR},
400 {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT},
401 {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC},
402 {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS},
403 {"ar.lc", AR_LC}, {"ar.ec", AR_EC},
404 };
405
406 /* control registers: */
407
408 #define CR_DCR 0
409 #define CR_ITM 1
410 #define CR_IVA 2
411 #define CR_PTA 8
412 #define CR_GPTA 9
413 #define CR_IPSR 16
414 #define CR_ISR 17
415 #define CR_IIP 19
416 #define CR_IFA 20
417 #define CR_ITIR 21
418 #define CR_IIPA 22
419 #define CR_IFS 23
420 #define CR_IIM 24
421 #define CR_IHA 25
422 #define CR_IIB0 26
423 #define CR_IIB1 27
424 #define CR_LID 64
425 #define CR_IVR 65
426 #define CR_TPR 66
427 #define CR_EOI 67
428 #define CR_IRR0 68
429 #define CR_IRR3 71
430 #define CR_ITV 72
431 #define CR_PMV 73
432 #define CR_CMCV 74
433 #define CR_LRR0 80
434 #define CR_LRR1 81
435
436 static const struct
437 {
438 const char *name;
439 unsigned int regnum;
440 }
441 cr[] =
442 {
443 {"cr.dcr", CR_DCR},
444 {"cr.itm", CR_ITM},
445 {"cr.iva", CR_IVA},
446 {"cr.pta", CR_PTA},
447 {"cr.gpta", CR_GPTA},
448 {"cr.ipsr", CR_IPSR},
449 {"cr.isr", CR_ISR},
450 {"cr.iip", CR_IIP},
451 {"cr.ifa", CR_IFA},
452 {"cr.itir", CR_ITIR},
453 {"cr.iipa", CR_IIPA},
454 {"cr.ifs", CR_IFS},
455 {"cr.iim", CR_IIM},
456 {"cr.iha", CR_IHA},
457 {"cr.iib0", CR_IIB0},
458 {"cr.iib1", CR_IIB1},
459 {"cr.lid", CR_LID},
460 {"cr.ivr", CR_IVR},
461 {"cr.tpr", CR_TPR},
462 {"cr.eoi", CR_EOI},
463 {"cr.irr0", CR_IRR0},
464 {"cr.irr1", CR_IRR0 + 1},
465 {"cr.irr2", CR_IRR0 + 2},
466 {"cr.irr3", CR_IRR3},
467 {"cr.itv", CR_ITV},
468 {"cr.pmv", CR_PMV},
469 {"cr.cmcv", CR_CMCV},
470 {"cr.lrr0", CR_LRR0},
471 {"cr.lrr1", CR_LRR1}
472 };
473
474 #define PSR_MFL 4
475 #define PSR_IC 13
476 #define PSR_DFL 18
477 #define PSR_CPL 32
478
479 static const struct const_desc
480 {
481 const char *name;
482 valueT value;
483 }
484 const_bits[] =
485 {
486 /* PSR constant masks: */
487
488 /* 0: reserved */
489 {"psr.be", ((valueT) 1) << 1},
490 {"psr.up", ((valueT) 1) << 2},
491 {"psr.ac", ((valueT) 1) << 3},
492 {"psr.mfl", ((valueT) 1) << 4},
493 {"psr.mfh", ((valueT) 1) << 5},
494 /* 6-12: reserved */
495 {"psr.ic", ((valueT) 1) << 13},
496 {"psr.i", ((valueT) 1) << 14},
497 {"psr.pk", ((valueT) 1) << 15},
498 /* 16: reserved */
499 {"psr.dt", ((valueT) 1) << 17},
500 {"psr.dfl", ((valueT) 1) << 18},
501 {"psr.dfh", ((valueT) 1) << 19},
502 {"psr.sp", ((valueT) 1) << 20},
503 {"psr.pp", ((valueT) 1) << 21},
504 {"psr.di", ((valueT) 1) << 22},
505 {"psr.si", ((valueT) 1) << 23},
506 {"psr.db", ((valueT) 1) << 24},
507 {"psr.lp", ((valueT) 1) << 25},
508 {"psr.tb", ((valueT) 1) << 26},
509 {"psr.rt", ((valueT) 1) << 27},
510 /* 28-31: reserved */
511 /* 32-33: cpl (current privilege level) */
512 {"psr.is", ((valueT) 1) << 34},
513 {"psr.mc", ((valueT) 1) << 35},
514 {"psr.it", ((valueT) 1) << 36},
515 {"psr.id", ((valueT) 1) << 37},
516 {"psr.da", ((valueT) 1) << 38},
517 {"psr.dd", ((valueT) 1) << 39},
518 {"psr.ss", ((valueT) 1) << 40},
519 /* 41-42: ri (restart instruction) */
520 {"psr.ed", ((valueT) 1) << 43},
521 {"psr.bn", ((valueT) 1) << 44},
522 };
523
524 /* indirect register-sets/memory: */
525
526 static const struct
527 {
528 const char *name;
529 unsigned int regnum;
530 }
531 indirect_reg[] =
532 {
533 { "CPUID", IND_CPUID },
534 { "cpuid", IND_CPUID },
535 { "dbr", IND_DBR },
536 { "dtr", IND_DTR },
537 { "itr", IND_ITR },
538 { "ibr", IND_IBR },
539 { "msr", IND_MSR },
540 { "pkr", IND_PKR },
541 { "pmc", IND_PMC },
542 { "pmd", IND_PMD },
543 { "dahr", IND_DAHR },
544 { "rr", IND_RR },
545 };
546
547 /* Pseudo functions used to indicate relocation types (these functions
548 start with an at sign (@). */
549 static struct
550 {
551 const char *name;
552 enum pseudo_type
553 {
554 PSEUDO_FUNC_NONE,
555 PSEUDO_FUNC_RELOC,
556 PSEUDO_FUNC_CONST,
557 PSEUDO_FUNC_REG,
558 PSEUDO_FUNC_FLOAT
559 }
560 type;
561 union
562 {
563 unsigned long ival;
564 symbolS *sym;
565 }
566 u;
567 }
568 pseudo_func[] =
569 {
570 /* reloc pseudo functions (these must come first!): */
571 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
572 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
573 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
574 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
575 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
576 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
577 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
578 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
579 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
580 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
581 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
582 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
583 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
584 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
585 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
586 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
587 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
588 #ifdef TE_VMS
589 { "slotcount", PSEUDO_FUNC_RELOC, { 0 } },
590 #endif
591
592 /* mbtype4 constants: */
593 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
594 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
595 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
596 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
597 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
598
599 /* fclass constants: */
600 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
601 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
602 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
603 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
604 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
605 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
606 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
607 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
608 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
609
610 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
611
612 /* hint constants: */
613 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
614 { "priority", PSEUDO_FUNC_CONST, { 0x1 } },
615
616 /* tf constants: */
617 { "clz", PSEUDO_FUNC_CONST, { 32 } },
618 { "mpy", PSEUDO_FUNC_CONST, { 33 } },
619 { "datahints", PSEUDO_FUNC_CONST, { 34 } },
620
621 /* unwind-related constants: */
622 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
623 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
624 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
625 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_GNU } },
626 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
627 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
628 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
629
630 /* unwind-related registers: */
631 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
632 };
633
634 /* 41-bit nop opcodes (one per unit): */
635 static const bfd_vma nop[IA64_NUM_UNITS] =
636 {
637 0x0000000000LL, /* NIL => break 0 */
638 0x0008000000LL, /* I-unit nop */
639 0x0008000000LL, /* M-unit nop */
640 0x4000000000LL, /* B-unit nop */
641 0x0008000000LL, /* F-unit nop */
642 0x0000000000LL, /* L-"unit" nop immediate */
643 0x0008000000LL, /* X-unit nop */
644 };
645
646 /* Can't be `const' as it's passed to input routines (which have the
647 habit of setting temporary sentinels. */
648 static char special_section_name[][20] =
649 {
650 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
651 {".IA_64.unwind"}, {".IA_64.unwind_info"},
652 {".init_array"}, {".fini_array"}
653 };
654
655 /* The best template for a particular sequence of up to three
656 instructions: */
657 #define N IA64_NUM_TYPES
658 static unsigned char best_template[N][N][N];
659 #undef N
660
661 /* Resource dependencies currently in effect */
662 static struct rsrc {
663 int depind; /* dependency index */
664 const struct ia64_dependency *dependency; /* actual dependency */
665 unsigned specific:1, /* is this a specific bit/regno? */
666 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
667 int index; /* specific regno/bit within dependency */
668 int note; /* optional qualifying note (0 if none) */
669 #define STATE_NONE 0
670 #define STATE_STOP 1
671 #define STATE_SRLZ 2
672 int insn_srlz; /* current insn serialization state */
673 int data_srlz; /* current data serialization state */
674 int qp_regno; /* qualifying predicate for this usage */
675 const char *file; /* what file marked this dependency */
676 unsigned int line; /* what line marked this dependency */
677 struct mem_offset mem_offset; /* optional memory offset hint */
678 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
679 int path; /* corresponding code entry index */
680 } *regdeps = NULL;
681 static int regdepslen = 0;
682 static int regdepstotlen = 0;
683 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
684 static const char *dv_sem[] = { "none", "implied", "impliedf",
685 "data", "instr", "specific", "stop", "other" };
686 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
687
688 /* Current state of PR mutexation */
689 static struct qpmutex {
690 valueT prmask;
691 int path;
692 } *qp_mutexes = NULL; /* QP mutex bitmasks */
693 static int qp_mutexeslen = 0;
694 static int qp_mutexestotlen = 0;
695 static valueT qp_safe_across_calls = 0;
696
697 /* Current state of PR implications */
698 static struct qp_imply {
699 unsigned p1:6;
700 unsigned p2:6;
701 unsigned p2_branched:1;
702 int path;
703 } *qp_implies = NULL;
704 static int qp_implieslen = 0;
705 static int qp_impliestotlen = 0;
706
707 /* Keep track of static GR values so that indirect register usage can
708 sometimes be tracked. */
709 static struct gr {
710 unsigned known:1;
711 int path;
712 valueT value;
713 } gr_values[128] = {
714 {
715 1,
716 #ifdef INT_MAX
717 INT_MAX,
718 #else
719 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1,
720 #endif
721 0
722 }
723 };
724
725 /* Remember the alignment frag. */
726 static fragS *align_frag;
727
728 /* These are the routines required to output the various types of
729 unwind records. */
730
731 /* A slot_number is a frag address plus the slot index (0-2). We use the
732 frag address here so that if there is a section switch in the middle of
733 a function, then instructions emitted to a different section are not
734 counted. Since there may be more than one frag for a function, this
735 means we also need to keep track of which frag this address belongs to
736 so we can compute inter-frag distances. This also nicely solves the
737 problem with nops emitted for align directives, which can't easily be
738 counted, but can easily be derived from frag sizes. */
739
740 typedef struct unw_rec_list {
741 unwind_record r;
742 unsigned long slot_number;
743 fragS *slot_frag;
744 struct unw_rec_list *next;
745 } unw_rec_list;
746
747 #define SLOT_NUM_NOT_SET (unsigned)-1
748
749 /* Linked list of saved prologue counts. A very poor
750 implementation of a map from label numbers to prologue counts. */
751 typedef struct label_prologue_count
752 {
753 struct label_prologue_count *next;
754 unsigned long label_number;
755 unsigned int prologue_count;
756 } label_prologue_count;
757
758 typedef struct proc_pending
759 {
760 symbolS *sym;
761 struct proc_pending *next;
762 } proc_pending;
763
764 static struct
765 {
766 /* Maintain a list of unwind entries for the current function. */
767 unw_rec_list *list;
768 unw_rec_list *tail;
769
770 /* Any unwind entries that should be attached to the current slot
771 that an insn is being constructed for. */
772 unw_rec_list *current_entry;
773
774 /* These are used to create the unwind table entry for this function. */
775 proc_pending proc_pending;
776 symbolS *info; /* pointer to unwind info */
777 symbolS *personality_routine;
778 segT saved_text_seg;
779 subsegT saved_text_subseg;
780 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
781
782 /* TRUE if processing unwind directives in a prologue region. */
783 unsigned int prologue : 1;
784 unsigned int prologue_mask : 4;
785 unsigned int prologue_gr : 7;
786 unsigned int body : 1;
787 unsigned int insn : 1;
788 unsigned int prologue_count; /* number of .prologues seen so far */
789 /* Prologue counts at previous .label_state directives. */
790 struct label_prologue_count * saved_prologue_counts;
791
792 /* List of split up .save-s. */
793 unw_p_record *pending_saves;
794 } unwind;
795
796 /* The input value is a negated offset from psp, and specifies an address
797 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
798 must add 16 and divide by 4 to get the encoded value. */
799
800 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
801
802 typedef void (*vbyte_func) (int, char *, char *);
803
804 /* Forward declarations: */
805 static void dot_alias (int);
806 static int parse_operand_and_eval (expressionS *, int);
807 static void emit_one_bundle (void);
808 static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *,
809 bfd_reloc_code_real_type);
810 static void insn_group_break (int, int, int);
811 static void add_qp_mutex (valueT);
812 static void add_qp_imply (int, int);
813 static void clear_qp_mutex (valueT);
814 static void clear_qp_implies (valueT, valueT);
815 static void print_dependency (const char *, int);
816 static void instruction_serialization (void);
817 static void data_serialization (void);
818 static void output_R3_format (vbyte_func, unw_record_type, unsigned long);
819 static void output_B3_format (vbyte_func, unsigned long, unsigned long);
820 static void output_B4_format (vbyte_func, unw_record_type, unsigned long);
821 static void free_saved_prologue_counts (void);
822
823 /* Determine if application register REGNUM resides only in the integer
824 unit (as opposed to the memory unit). */
825 static int
826 ar_is_only_in_integer_unit (int reg)
827 {
828 reg -= REG_AR;
829 return reg >= 64 && reg <= 111;
830 }
831
832 /* Determine if application register REGNUM resides only in the memory
833 unit (as opposed to the integer unit). */
834 static int
835 ar_is_only_in_memory_unit (int reg)
836 {
837 reg -= REG_AR;
838 return reg >= 0 && reg <= 47;
839 }
840
841 /* Switch to section NAME and create section if necessary. It's
842 rather ugly that we have to manipulate input_line_pointer but I
843 don't see any other way to accomplish the same thing without
844 changing obj-elf.c (which may be the Right Thing, in the end). */
845 static void
846 set_section (char *name)
847 {
848 char *saved_input_line_pointer;
849
850 saved_input_line_pointer = input_line_pointer;
851 input_line_pointer = name;
852 obj_elf_section (0);
853 input_line_pointer = saved_input_line_pointer;
854 }
855
856 /* Map 's' to SHF_IA_64_SHORT. */
857
858 bfd_vma
859 ia64_elf_section_letter (int letter, const char **ptr_msg)
860 {
861 if (letter == 's')
862 return SHF_IA_64_SHORT;
863 else if (letter == 'o')
864 return SHF_LINK_ORDER;
865 #ifdef TE_VMS
866 else if (letter == 'O')
867 return SHF_IA_64_VMS_OVERLAID;
868 else if (letter == 'g')
869 return SHF_IA_64_VMS_GLOBAL;
870 #endif
871
872 *ptr_msg = _("bad .section directive: want a,o,s,w,x,M,S,G,T in string");
873 return -1;
874 }
875
876 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
877
878 flagword
879 ia64_elf_section_flags (flagword flags,
880 bfd_vma attr,
881 int type ATTRIBUTE_UNUSED)
882 {
883 if (attr & SHF_IA_64_SHORT)
884 flags |= SEC_SMALL_DATA;
885 return flags;
886 }
887
888 int
889 ia64_elf_section_type (const char *str, size_t len)
890 {
891 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
892
893 if (STREQ (ELF_STRING_ia64_unwind_info))
894 return SHT_PROGBITS;
895
896 if (STREQ (ELF_STRING_ia64_unwind_info_once))
897 return SHT_PROGBITS;
898
899 if (STREQ (ELF_STRING_ia64_unwind))
900 return SHT_IA_64_UNWIND;
901
902 if (STREQ (ELF_STRING_ia64_unwind_once))
903 return SHT_IA_64_UNWIND;
904
905 if (STREQ ("unwind"))
906 return SHT_IA_64_UNWIND;
907
908 return -1;
909 #undef STREQ
910 }
911
912 static unsigned int
913 set_regstack (unsigned int ins,
914 unsigned int locs,
915 unsigned int outs,
916 unsigned int rots)
917 {
918 /* Size of frame. */
919 unsigned int sof;
920
921 sof = ins + locs + outs;
922 if (sof > 96)
923 {
924 as_bad (_("Size of frame exceeds maximum of 96 registers"));
925 return 0;
926 }
927 if (rots > sof)
928 {
929 as_warn (_("Size of rotating registers exceeds frame size"));
930 return 0;
931 }
932 md.in.base = REG_GR + 32;
933 md.loc.base = md.in.base + ins;
934 md.out.base = md.loc.base + locs;
935
936 md.in.num_regs = ins;
937 md.loc.num_regs = locs;
938 md.out.num_regs = outs;
939 md.rot.num_regs = rots;
940 return sof;
941 }
942
943 void
944 ia64_flush_insns (void)
945 {
946 struct label_fix *lfix;
947 segT saved_seg;
948 subsegT saved_subseg;
949 unw_rec_list *ptr;
950 bfd_boolean mark;
951
952 if (!md.last_text_seg)
953 return;
954
955 saved_seg = now_seg;
956 saved_subseg = now_subseg;
957
958 subseg_set (md.last_text_seg, 0);
959
960 while (md.num_slots_in_use > 0)
961 emit_one_bundle (); /* force out queued instructions */
962
963 /* In case there are labels following the last instruction, resolve
964 those now. */
965 mark = FALSE;
966 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
967 {
968 symbol_set_value_now (lfix->sym);
969 mark |= lfix->dw2_mark_labels;
970 }
971 if (mark)
972 {
973 dwarf2_where (&CURR_SLOT.debug_line);
974 CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
975 dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line);
976 dwarf2_consume_line_info ();
977 }
978 CURR_SLOT.label_fixups = 0;
979
980 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
981 symbol_set_value_now (lfix->sym);
982 CURR_SLOT.tag_fixups = 0;
983
984 /* In case there are unwind directives following the last instruction,
985 resolve those now. We only handle prologue, body, and endp directives
986 here. Give an error for others. */
987 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
988 {
989 switch (ptr->r.type)
990 {
991 case prologue:
992 case prologue_gr:
993 case body:
994 case endp:
995 ptr->slot_number = (unsigned long) frag_more (0);
996 ptr->slot_frag = frag_now;
997 break;
998
999 /* Allow any record which doesn't have a "t" field (i.e.,
1000 doesn't relate to a particular instruction). */
1001 case unwabi:
1002 case br_gr:
1003 case copy_state:
1004 case fr_mem:
1005 case frgr_mem:
1006 case gr_gr:
1007 case gr_mem:
1008 case label_state:
1009 case rp_br:
1010 case spill_base:
1011 case spill_mask:
1012 /* nothing */
1013 break;
1014
1015 default:
1016 as_bad (_("Unwind directive not followed by an instruction."));
1017 break;
1018 }
1019 }
1020 unwind.current_entry = NULL;
1021
1022 subseg_set (saved_seg, saved_subseg);
1023
1024 if (md.qp.X_op == O_register)
1025 as_bad (_("qualifying predicate not followed by instruction"));
1026 }
1027
1028 void
1029 ia64_cons_align (int nbytes)
1030 {
1031 if (md.auto_align)
1032 {
1033 int log;
1034 for (log = 0; (nbytes & 1) != 1; nbytes >>= 1)
1035 log++;
1036
1037 do_align (log, NULL, 0, 0);
1038 }
1039 }
1040
1041 #ifdef TE_VMS
1042
1043 /* .vms_common section, symbol, size, alignment */
1044
1045 static void
1046 obj_elf_vms_common (int ignore ATTRIBUTE_UNUSED)
1047 {
1048 const char *sec_name;
1049 char *sym_name;
1050 char c;
1051 offsetT size;
1052 offsetT cur_size;
1053 offsetT temp;
1054 symbolS *symbolP;
1055 segT current_seg = now_seg;
1056 subsegT current_subseg = now_subseg;
1057 offsetT log_align;
1058
1059 /* Section name. */
1060 sec_name = obj_elf_section_name ();
1061 if (sec_name == NULL)
1062 return;
1063
1064 /* Symbol name. */
1065 SKIP_WHITESPACE ();
1066 if (*input_line_pointer == ',')
1067 {
1068 input_line_pointer++;
1069 SKIP_WHITESPACE ();
1070 }
1071 else
1072 {
1073 as_bad (_("expected ',' after section name"));
1074 ignore_rest_of_line ();
1075 return;
1076 }
1077
1078 c = get_symbol_name (&sym_name);
1079
1080 if (input_line_pointer == sym_name)
1081 {
1082 (void) restore_line_pointer (c);
1083 as_bad (_("expected symbol name"));
1084 ignore_rest_of_line ();
1085 return;
1086 }
1087
1088 symbolP = symbol_find_or_make (sym_name);
1089 (void) restore_line_pointer (c);
1090
1091 if ((S_IS_DEFINED (symbolP) || symbol_equated_p (symbolP))
1092 && !S_IS_COMMON (symbolP))
1093 {
1094 as_bad (_("Ignoring attempt to re-define symbol"));
1095 ignore_rest_of_line ();
1096 return;
1097 }
1098
1099 /* Symbol size. */
1100 SKIP_WHITESPACE ();
1101 if (*input_line_pointer == ',')
1102 {
1103 input_line_pointer++;
1104 SKIP_WHITESPACE ();
1105 }
1106 else
1107 {
1108 as_bad (_("expected ',' after symbol name"));
1109 ignore_rest_of_line ();
1110 return;
1111 }
1112
1113 temp = get_absolute_expression ();
1114 size = temp;
1115 size &= ((offsetT) 2 << (stdoutput->arch_info->bits_per_address - 1)) - 1;
1116 if (temp != size)
1117 {
1118 as_warn (_("size (%ld) out of range, ignored"), (long) temp);
1119 ignore_rest_of_line ();
1120 return;
1121 }
1122
1123 /* Alignment. */
1124 SKIP_WHITESPACE ();
1125 if (*input_line_pointer == ',')
1126 {
1127 input_line_pointer++;
1128 SKIP_WHITESPACE ();
1129 }
1130 else
1131 {
1132 as_bad (_("expected ',' after symbol size"));
1133 ignore_rest_of_line ();
1134 return;
1135 }
1136
1137 log_align = get_absolute_expression ();
1138
1139 demand_empty_rest_of_line ();
1140
1141 obj_elf_change_section
1142 (sec_name, SHT_NOBITS,
1143 SHF_ALLOC | SHF_WRITE | SHF_IA_64_VMS_OVERLAID | SHF_IA_64_VMS_GLOBAL,
1144 0, NULL, 1, 0);
1145
1146 S_SET_VALUE (symbolP, 0);
1147 S_SET_SIZE (symbolP, size);
1148 S_SET_EXTERNAL (symbolP);
1149 S_SET_SEGMENT (symbolP, now_seg);
1150
1151 symbol_get_bfdsym (symbolP)->flags |= BSF_OBJECT;
1152
1153 record_alignment (now_seg, log_align);
1154
1155 cur_size = bfd_section_size (stdoutput, now_seg);
1156 if ((int) size > cur_size)
1157 {
1158 char *pfrag
1159 = frag_var (rs_fill, 1, 1, (relax_substateT)0, NULL,
1160 (valueT)size - (valueT)cur_size, NULL);
1161 *pfrag = 0;
1162 bfd_section_size (stdoutput, now_seg) = size;
1163 }
1164
1165 /* Switch back to current segment. */
1166 subseg_set (current_seg, current_subseg);
1167
1168 #ifdef md_elf_section_change_hook
1169 md_elf_section_change_hook ();
1170 #endif
1171 }
1172
1173 #endif /* TE_VMS */
1174
1175 /* Output COUNT bytes to a memory location. */
1176 static char *vbyte_mem_ptr = NULL;
1177
1178 static void
1179 output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED)
1180 {
1181 int x;
1182 if (vbyte_mem_ptr == NULL)
1183 abort ();
1184
1185 if (count == 0)
1186 return;
1187 for (x = 0; x < count; x++)
1188 *(vbyte_mem_ptr++) = ptr[x];
1189 }
1190
1191 /* Count the number of bytes required for records. */
1192 static int vbyte_count = 0;
1193 static void
1194 count_output (int count,
1195 char *ptr ATTRIBUTE_UNUSED,
1196 char *comment ATTRIBUTE_UNUSED)
1197 {
1198 vbyte_count += count;
1199 }
1200
1201 static void
1202 output_R1_format (vbyte_func f, unw_record_type rtype, int rlen)
1203 {
1204 int r = 0;
1205 char byte;
1206 if (rlen > 0x1f)
1207 {
1208 output_R3_format (f, rtype, rlen);
1209 return;
1210 }
1211
1212 if (rtype == body)
1213 r = 1;
1214 else if (rtype != prologue)
1215 as_bad (_("record type is not valid"));
1216
1217 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1218 (*f) (1, &byte, NULL);
1219 }
1220
1221 static void
1222 output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen)
1223 {
1224 char bytes[20];
1225 int count = 2;
1226 mask = (mask & 0x0f);
1227 grsave = (grsave & 0x7f);
1228
1229 bytes[0] = (UNW_R2 | (mask >> 1));
1230 bytes[1] = (((mask & 0x01) << 7) | grsave);
1231 count += output_leb128 (bytes + 2, rlen, 0);
1232 (*f) (count, bytes, NULL);
1233 }
1234
1235 static void
1236 output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen)
1237 {
1238 int r = 0, count;
1239 char bytes[20];
1240 if (rlen <= 0x1f)
1241 {
1242 output_R1_format (f, rtype, rlen);
1243 return;
1244 }
1245
1246 if (rtype == body)
1247 r = 1;
1248 else if (rtype != prologue)
1249 as_bad (_("record type is not valid"));
1250 bytes[0] = (UNW_R3 | r);
1251 count = output_leb128 (bytes + 1, rlen, 0);
1252 (*f) (count + 1, bytes, NULL);
1253 }
1254
1255 static void
1256 output_P1_format (vbyte_func f, int brmask)
1257 {
1258 char byte;
1259 byte = UNW_P1 | (brmask & 0x1f);
1260 (*f) (1, &byte, NULL);
1261 }
1262
1263 static void
1264 output_P2_format (vbyte_func f, int brmask, int gr)
1265 {
1266 char bytes[2];
1267 brmask = (brmask & 0x1f);
1268 bytes[0] = UNW_P2 | (brmask >> 1);
1269 bytes[1] = (((brmask & 1) << 7) | gr);
1270 (*f) (2, bytes, NULL);
1271 }
1272
1273 static void
1274 output_P3_format (vbyte_func f, unw_record_type rtype, int reg)
1275 {
1276 char bytes[2];
1277 int r = 0;
1278 reg = (reg & 0x7f);
1279 switch (rtype)
1280 {
1281 case psp_gr:
1282 r = 0;
1283 break;
1284 case rp_gr:
1285 r = 1;
1286 break;
1287 case pfs_gr:
1288 r = 2;
1289 break;
1290 case preds_gr:
1291 r = 3;
1292 break;
1293 case unat_gr:
1294 r = 4;
1295 break;
1296 case lc_gr:
1297 r = 5;
1298 break;
1299 case rp_br:
1300 r = 6;
1301 break;
1302 case rnat_gr:
1303 r = 7;
1304 break;
1305 case bsp_gr:
1306 r = 8;
1307 break;
1308 case bspstore_gr:
1309 r = 9;
1310 break;
1311 case fpsr_gr:
1312 r = 10;
1313 break;
1314 case priunat_gr:
1315 r = 11;
1316 break;
1317 default:
1318 as_bad (_("Invalid record type for P3 format."));
1319 }
1320 bytes[0] = (UNW_P3 | (r >> 1));
1321 bytes[1] = (((r & 1) << 7) | reg);
1322 (*f) (2, bytes, NULL);
1323 }
1324
1325 static void
1326 output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size)
1327 {
1328 imask[0] = UNW_P4;
1329 (*f) (imask_size, (char *) imask, NULL);
1330 }
1331
1332 static void
1333 output_P5_format (vbyte_func f, int grmask, unsigned long frmask)
1334 {
1335 char bytes[4];
1336 grmask = (grmask & 0x0f);
1337
1338 bytes[0] = UNW_P5;
1339 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1340 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1341 bytes[3] = (frmask & 0x000000ff);
1342 (*f) (4, bytes, NULL);
1343 }
1344
1345 static void
1346 output_P6_format (vbyte_func f, unw_record_type rtype, int rmask)
1347 {
1348 char byte;
1349 int r = 0;
1350
1351 if (rtype == gr_mem)
1352 r = 1;
1353 else if (rtype != fr_mem)
1354 as_bad (_("Invalid record type for format P6"));
1355 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1356 (*f) (1, &byte, NULL);
1357 }
1358
1359 static void
1360 output_P7_format (vbyte_func f,
1361 unw_record_type rtype,
1362 unsigned long w1,
1363 unsigned long w2)
1364 {
1365 char bytes[20];
1366 int count = 1;
1367 int r = 0;
1368 count += output_leb128 (bytes + 1, w1, 0);
1369 switch (rtype)
1370 {
1371 case mem_stack_f:
1372 r = 0;
1373 count += output_leb128 (bytes + count, w2 >> 4, 0);
1374 break;
1375 case mem_stack_v:
1376 r = 1;
1377 break;
1378 case spill_base:
1379 r = 2;
1380 break;
1381 case psp_sprel:
1382 r = 3;
1383 break;
1384 case rp_when:
1385 r = 4;
1386 break;
1387 case rp_psprel:
1388 r = 5;
1389 break;
1390 case pfs_when:
1391 r = 6;
1392 break;
1393 case pfs_psprel:
1394 r = 7;
1395 break;
1396 case preds_when:
1397 r = 8;
1398 break;
1399 case preds_psprel:
1400 r = 9;
1401 break;
1402 case lc_when:
1403 r = 10;
1404 break;
1405 case lc_psprel:
1406 r = 11;
1407 break;
1408 case unat_when:
1409 r = 12;
1410 break;
1411 case unat_psprel:
1412 r = 13;
1413 break;
1414 case fpsr_when:
1415 r = 14;
1416 break;
1417 case fpsr_psprel:
1418 r = 15;
1419 break;
1420 default:
1421 break;
1422 }
1423 bytes[0] = (UNW_P7 | r);
1424 (*f) (count, bytes, NULL);
1425 }
1426
1427 static void
1428 output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t)
1429 {
1430 char bytes[20];
1431 int r = 0;
1432 int count = 2;
1433 bytes[0] = UNW_P8;
1434 switch (rtype)
1435 {
1436 case rp_sprel:
1437 r = 1;
1438 break;
1439 case pfs_sprel:
1440 r = 2;
1441 break;
1442 case preds_sprel:
1443 r = 3;
1444 break;
1445 case lc_sprel:
1446 r = 4;
1447 break;
1448 case unat_sprel:
1449 r = 5;
1450 break;
1451 case fpsr_sprel:
1452 r = 6;
1453 break;
1454 case bsp_when:
1455 r = 7;
1456 break;
1457 case bsp_psprel:
1458 r = 8;
1459 break;
1460 case bsp_sprel:
1461 r = 9;
1462 break;
1463 case bspstore_when:
1464 r = 10;
1465 break;
1466 case bspstore_psprel:
1467 r = 11;
1468 break;
1469 case bspstore_sprel:
1470 r = 12;
1471 break;
1472 case rnat_when:
1473 r = 13;
1474 break;
1475 case rnat_psprel:
1476 r = 14;
1477 break;
1478 case rnat_sprel:
1479 r = 15;
1480 break;
1481 case priunat_when_gr:
1482 r = 16;
1483 break;
1484 case priunat_psprel:
1485 r = 17;
1486 break;
1487 case priunat_sprel:
1488 r = 18;
1489 break;
1490 case priunat_when_mem:
1491 r = 19;
1492 break;
1493 default:
1494 break;
1495 }
1496 bytes[1] = r;
1497 count += output_leb128 (bytes + 2, t, 0);
1498 (*f) (count, bytes, NULL);
1499 }
1500
1501 static void
1502 output_P9_format (vbyte_func f, int grmask, int gr)
1503 {
1504 char bytes[3];
1505 bytes[0] = UNW_P9;
1506 bytes[1] = (grmask & 0x0f);
1507 bytes[2] = (gr & 0x7f);
1508 (*f) (3, bytes, NULL);
1509 }
1510
1511 static void
1512 output_P10_format (vbyte_func f, int abi, int context)
1513 {
1514 char bytes[3];
1515 bytes[0] = UNW_P10;
1516 bytes[1] = (abi & 0xff);
1517 bytes[2] = (context & 0xff);
1518 (*f) (3, bytes, NULL);
1519 }
1520
1521 static void
1522 output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1523 {
1524 char byte;
1525 int r = 0;
1526 if (label > 0x1f)
1527 {
1528 output_B4_format (f, rtype, label);
1529 return;
1530 }
1531 if (rtype == copy_state)
1532 r = 1;
1533 else if (rtype != label_state)
1534 as_bad (_("Invalid record type for format B1"));
1535
1536 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1537 (*f) (1, &byte, NULL);
1538 }
1539
1540 static void
1541 output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t)
1542 {
1543 char bytes[20];
1544 int count = 1;
1545 if (ecount > 0x1f)
1546 {
1547 output_B3_format (f, ecount, t);
1548 return;
1549 }
1550 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1551 count += output_leb128 (bytes + 1, t, 0);
1552 (*f) (count, bytes, NULL);
1553 }
1554
1555 static void
1556 output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t)
1557 {
1558 char bytes[20];
1559 int count = 1;
1560 if (ecount <= 0x1f)
1561 {
1562 output_B2_format (f, ecount, t);
1563 return;
1564 }
1565 bytes[0] = UNW_B3;
1566 count += output_leb128 (bytes + 1, t, 0);
1567 count += output_leb128 (bytes + count, ecount, 0);
1568 (*f) (count, bytes, NULL);
1569 }
1570
1571 static void
1572 output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1573 {
1574 char bytes[20];
1575 int r = 0;
1576 int count = 1;
1577 if (label <= 0x1f)
1578 {
1579 output_B1_format (f, rtype, label);
1580 return;
1581 }
1582
1583 if (rtype == copy_state)
1584 r = 1;
1585 else if (rtype != label_state)
1586 as_bad (_("Invalid record type for format B1"));
1587
1588 bytes[0] = (UNW_B4 | (r << 3));
1589 count += output_leb128 (bytes + 1, label, 0);
1590 (*f) (count, bytes, NULL);
1591 }
1592
1593 static char
1594 format_ab_reg (int ab, int reg)
1595 {
1596 int ret;
1597 ab = (ab & 3);
1598 reg = (reg & 0x1f);
1599 ret = (ab << 5) | reg;
1600 return ret;
1601 }
1602
1603 static void
1604 output_X1_format (vbyte_func f,
1605 unw_record_type rtype,
1606 int ab,
1607 int reg,
1608 unsigned long t,
1609 unsigned long w1)
1610 {
1611 char bytes[20];
1612 int r = 0;
1613 int count = 2;
1614 bytes[0] = UNW_X1;
1615
1616 if (rtype == spill_sprel)
1617 r = 1;
1618 else if (rtype != spill_psprel)
1619 as_bad (_("Invalid record type for format X1"));
1620 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1621 count += output_leb128 (bytes + 2, t, 0);
1622 count += output_leb128 (bytes + count, w1, 0);
1623 (*f) (count, bytes, NULL);
1624 }
1625
1626 static void
1627 output_X2_format (vbyte_func f,
1628 int ab,
1629 int reg,
1630 int x,
1631 int y,
1632 int treg,
1633 unsigned long t)
1634 {
1635 char bytes[20];
1636 int count = 3;
1637 bytes[0] = UNW_X2;
1638 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1639 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1640 count += output_leb128 (bytes + 3, t, 0);
1641 (*f) (count, bytes, NULL);
1642 }
1643
1644 static void
1645 output_X3_format (vbyte_func f,
1646 unw_record_type rtype,
1647 int qp,
1648 int ab,
1649 int reg,
1650 unsigned long t,
1651 unsigned long w1)
1652 {
1653 char bytes[20];
1654 int r = 0;
1655 int count = 3;
1656 bytes[0] = UNW_X3;
1657
1658 if (rtype == spill_sprel_p)
1659 r = 1;
1660 else if (rtype != spill_psprel_p)
1661 as_bad (_("Invalid record type for format X3"));
1662 bytes[1] = ((r << 7) | (qp & 0x3f));
1663 bytes[2] = format_ab_reg (ab, reg);
1664 count += output_leb128 (bytes + 3, t, 0);
1665 count += output_leb128 (bytes + count, w1, 0);
1666 (*f) (count, bytes, NULL);
1667 }
1668
1669 static void
1670 output_X4_format (vbyte_func f,
1671 int qp,
1672 int ab,
1673 int reg,
1674 int x,
1675 int y,
1676 int treg,
1677 unsigned long t)
1678 {
1679 char bytes[20];
1680 int count = 4;
1681 bytes[0] = UNW_X4;
1682 bytes[1] = (qp & 0x3f);
1683 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1684 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1685 count += output_leb128 (bytes + 4, t, 0);
1686 (*f) (count, bytes, NULL);
1687 }
1688
1689 /* This function checks whether there are any outstanding .save-s and
1690 discards them if so. */
1691
1692 static void
1693 check_pending_save (void)
1694 {
1695 if (unwind.pending_saves)
1696 {
1697 unw_rec_list *cur, *prev;
1698
1699 as_warn (_("Previous .save incomplete"));
1700 for (cur = unwind.list, prev = NULL; cur; )
1701 if (&cur->r.record.p == unwind.pending_saves)
1702 {
1703 if (prev)
1704 prev->next = cur->next;
1705 else
1706 unwind.list = cur->next;
1707 if (cur == unwind.tail)
1708 unwind.tail = prev;
1709 if (cur == unwind.current_entry)
1710 unwind.current_entry = cur->next;
1711 /* Don't free the first discarded record, it's being used as
1712 terminator for (currently) br_gr and gr_gr processing, and
1713 also prevents leaving a dangling pointer to it in its
1714 predecessor. */
1715 cur->r.record.p.grmask = 0;
1716 cur->r.record.p.brmask = 0;
1717 cur->r.record.p.frmask = 0;
1718 prev = cur->r.record.p.next;
1719 cur->r.record.p.next = NULL;
1720 cur = prev;
1721 break;
1722 }
1723 else
1724 {
1725 prev = cur;
1726 cur = cur->next;
1727 }
1728 while (cur)
1729 {
1730 prev = cur;
1731 cur = cur->r.record.p.next;
1732 free (prev);
1733 }
1734 unwind.pending_saves = NULL;
1735 }
1736 }
1737
1738 /* This function allocates a record list structure, and initializes fields. */
1739
1740 static unw_rec_list *
1741 alloc_record (unw_record_type t)
1742 {
1743 unw_rec_list *ptr;
1744 ptr = xmalloc (sizeof (*ptr));
1745 memset (ptr, 0, sizeof (*ptr));
1746 ptr->slot_number = SLOT_NUM_NOT_SET;
1747 ptr->r.type = t;
1748 return ptr;
1749 }
1750
1751 /* Dummy unwind record used for calculating the length of the last prologue or
1752 body region. */
1753
1754 static unw_rec_list *
1755 output_endp (void)
1756 {
1757 unw_rec_list *ptr = alloc_record (endp);
1758 return ptr;
1759 }
1760
1761 static unw_rec_list *
1762 output_prologue (void)
1763 {
1764 unw_rec_list *ptr = alloc_record (prologue);
1765 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1766 return ptr;
1767 }
1768
1769 static unw_rec_list *
1770 output_prologue_gr (unsigned int saved_mask, unsigned int reg)
1771 {
1772 unw_rec_list *ptr = alloc_record (prologue_gr);
1773 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1774 ptr->r.record.r.grmask = saved_mask;
1775 ptr->r.record.r.grsave = reg;
1776 return ptr;
1777 }
1778
1779 static unw_rec_list *
1780 output_body (void)
1781 {
1782 unw_rec_list *ptr = alloc_record (body);
1783 return ptr;
1784 }
1785
1786 static unw_rec_list *
1787 output_mem_stack_f (unsigned int size)
1788 {
1789 unw_rec_list *ptr = alloc_record (mem_stack_f);
1790 ptr->r.record.p.size = size;
1791 return ptr;
1792 }
1793
1794 static unw_rec_list *
1795 output_mem_stack_v (void)
1796 {
1797 unw_rec_list *ptr = alloc_record (mem_stack_v);
1798 return ptr;
1799 }
1800
1801 static unw_rec_list *
1802 output_psp_gr (unsigned int gr)
1803 {
1804 unw_rec_list *ptr = alloc_record (psp_gr);
1805 ptr->r.record.p.r.gr = gr;
1806 return ptr;
1807 }
1808
1809 static unw_rec_list *
1810 output_psp_sprel (unsigned int offset)
1811 {
1812 unw_rec_list *ptr = alloc_record (psp_sprel);
1813 ptr->r.record.p.off.sp = offset / 4;
1814 return ptr;
1815 }
1816
1817 static unw_rec_list *
1818 output_rp_when (void)
1819 {
1820 unw_rec_list *ptr = alloc_record (rp_when);
1821 return ptr;
1822 }
1823
1824 static unw_rec_list *
1825 output_rp_gr (unsigned int gr)
1826 {
1827 unw_rec_list *ptr = alloc_record (rp_gr);
1828 ptr->r.record.p.r.gr = gr;
1829 return ptr;
1830 }
1831
1832 static unw_rec_list *
1833 output_rp_br (unsigned int br)
1834 {
1835 unw_rec_list *ptr = alloc_record (rp_br);
1836 ptr->r.record.p.r.br = br;
1837 return ptr;
1838 }
1839
1840 static unw_rec_list *
1841 output_rp_psprel (unsigned int offset)
1842 {
1843 unw_rec_list *ptr = alloc_record (rp_psprel);
1844 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1845 return ptr;
1846 }
1847
1848 static unw_rec_list *
1849 output_rp_sprel (unsigned int offset)
1850 {
1851 unw_rec_list *ptr = alloc_record (rp_sprel);
1852 ptr->r.record.p.off.sp = offset / 4;
1853 return ptr;
1854 }
1855
1856 static unw_rec_list *
1857 output_pfs_when (void)
1858 {
1859 unw_rec_list *ptr = alloc_record (pfs_when);
1860 return ptr;
1861 }
1862
1863 static unw_rec_list *
1864 output_pfs_gr (unsigned int gr)
1865 {
1866 unw_rec_list *ptr = alloc_record (pfs_gr);
1867 ptr->r.record.p.r.gr = gr;
1868 return ptr;
1869 }
1870
1871 static unw_rec_list *
1872 output_pfs_psprel (unsigned int offset)
1873 {
1874 unw_rec_list *ptr = alloc_record (pfs_psprel);
1875 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1876 return ptr;
1877 }
1878
1879 static unw_rec_list *
1880 output_pfs_sprel (unsigned int offset)
1881 {
1882 unw_rec_list *ptr = alloc_record (pfs_sprel);
1883 ptr->r.record.p.off.sp = offset / 4;
1884 return ptr;
1885 }
1886
1887 static unw_rec_list *
1888 output_preds_when (void)
1889 {
1890 unw_rec_list *ptr = alloc_record (preds_when);
1891 return ptr;
1892 }
1893
1894 static unw_rec_list *
1895 output_preds_gr (unsigned int gr)
1896 {
1897 unw_rec_list *ptr = alloc_record (preds_gr);
1898 ptr->r.record.p.r.gr = gr;
1899 return ptr;
1900 }
1901
1902 static unw_rec_list *
1903 output_preds_psprel (unsigned int offset)
1904 {
1905 unw_rec_list *ptr = alloc_record (preds_psprel);
1906 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1907 return ptr;
1908 }
1909
1910 static unw_rec_list *
1911 output_preds_sprel (unsigned int offset)
1912 {
1913 unw_rec_list *ptr = alloc_record (preds_sprel);
1914 ptr->r.record.p.off.sp = offset / 4;
1915 return ptr;
1916 }
1917
1918 static unw_rec_list *
1919 output_fr_mem (unsigned int mask)
1920 {
1921 unw_rec_list *ptr = alloc_record (fr_mem);
1922 unw_rec_list *cur = ptr;
1923
1924 ptr->r.record.p.frmask = mask;
1925 unwind.pending_saves = &ptr->r.record.p;
1926 for (;;)
1927 {
1928 unw_rec_list *prev = cur;
1929
1930 /* Clear least significant set bit. */
1931 mask &= ~(mask & (~mask + 1));
1932 if (!mask)
1933 return ptr;
1934 cur = alloc_record (fr_mem);
1935 cur->r.record.p.frmask = mask;
1936 /* Retain only least significant bit. */
1937 prev->r.record.p.frmask ^= mask;
1938 prev->r.record.p.next = cur;
1939 }
1940 }
1941
1942 static unw_rec_list *
1943 output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask)
1944 {
1945 unw_rec_list *ptr = alloc_record (frgr_mem);
1946 unw_rec_list *cur = ptr;
1947
1948 unwind.pending_saves = &cur->r.record.p;
1949 cur->r.record.p.frmask = fr_mask;
1950 while (fr_mask)
1951 {
1952 unw_rec_list *prev = cur;
1953
1954 /* Clear least significant set bit. */
1955 fr_mask &= ~(fr_mask & (~fr_mask + 1));
1956 if (!gr_mask && !fr_mask)
1957 return ptr;
1958 cur = alloc_record (frgr_mem);
1959 cur->r.record.p.frmask = fr_mask;
1960 /* Retain only least significant bit. */
1961 prev->r.record.p.frmask ^= fr_mask;
1962 prev->r.record.p.next = cur;
1963 }
1964 cur->r.record.p.grmask = gr_mask;
1965 for (;;)
1966 {
1967 unw_rec_list *prev = cur;
1968
1969 /* Clear least significant set bit. */
1970 gr_mask &= ~(gr_mask & (~gr_mask + 1));
1971 if (!gr_mask)
1972 return ptr;
1973 cur = alloc_record (frgr_mem);
1974 cur->r.record.p.grmask = gr_mask;
1975 /* Retain only least significant bit. */
1976 prev->r.record.p.grmask ^= gr_mask;
1977 prev->r.record.p.next = cur;
1978 }
1979 }
1980
1981 static unw_rec_list *
1982 output_gr_gr (unsigned int mask, unsigned int reg)
1983 {
1984 unw_rec_list *ptr = alloc_record (gr_gr);
1985 unw_rec_list *cur = ptr;
1986
1987 ptr->r.record.p.grmask = mask;
1988 ptr->r.record.p.r.gr = reg;
1989 unwind.pending_saves = &ptr->r.record.p;
1990 for (;;)
1991 {
1992 unw_rec_list *prev = cur;
1993
1994 /* Clear least significant set bit. */
1995 mask &= ~(mask & (~mask + 1));
1996 if (!mask)
1997 return ptr;
1998 cur = alloc_record (gr_gr);
1999 cur->r.record.p.grmask = mask;
2000 /* Indicate this record shouldn't be output. */
2001 cur->r.record.p.r.gr = REG_NUM;
2002 /* Retain only least significant bit. */
2003 prev->r.record.p.grmask ^= mask;
2004 prev->r.record.p.next = cur;
2005 }
2006 }
2007
2008 static unw_rec_list *
2009 output_gr_mem (unsigned int mask)
2010 {
2011 unw_rec_list *ptr = alloc_record (gr_mem);
2012 unw_rec_list *cur = ptr;
2013
2014 ptr->r.record.p.grmask = mask;
2015 unwind.pending_saves = &ptr->r.record.p;
2016 for (;;)
2017 {
2018 unw_rec_list *prev = cur;
2019
2020 /* Clear least significant set bit. */
2021 mask &= ~(mask & (~mask + 1));
2022 if (!mask)
2023 return ptr;
2024 cur = alloc_record (gr_mem);
2025 cur->r.record.p.grmask = mask;
2026 /* Retain only least significant bit. */
2027 prev->r.record.p.grmask ^= mask;
2028 prev->r.record.p.next = cur;
2029 }
2030 }
2031
2032 static unw_rec_list *
2033 output_br_mem (unsigned int mask)
2034 {
2035 unw_rec_list *ptr = alloc_record (br_mem);
2036 unw_rec_list *cur = ptr;
2037
2038 ptr->r.record.p.brmask = mask;
2039 unwind.pending_saves = &ptr->r.record.p;
2040 for (;;)
2041 {
2042 unw_rec_list *prev = cur;
2043
2044 /* Clear least significant set bit. */
2045 mask &= ~(mask & (~mask + 1));
2046 if (!mask)
2047 return ptr;
2048 cur = alloc_record (br_mem);
2049 cur->r.record.p.brmask = mask;
2050 /* Retain only least significant bit. */
2051 prev->r.record.p.brmask ^= mask;
2052 prev->r.record.p.next = cur;
2053 }
2054 }
2055
2056 static unw_rec_list *
2057 output_br_gr (unsigned int mask, unsigned int reg)
2058 {
2059 unw_rec_list *ptr = alloc_record (br_gr);
2060 unw_rec_list *cur = ptr;
2061
2062 ptr->r.record.p.brmask = mask;
2063 ptr->r.record.p.r.gr = reg;
2064 unwind.pending_saves = &ptr->r.record.p;
2065 for (;;)
2066 {
2067 unw_rec_list *prev = cur;
2068
2069 /* Clear least significant set bit. */
2070 mask &= ~(mask & (~mask + 1));
2071 if (!mask)
2072 return ptr;
2073 cur = alloc_record (br_gr);
2074 cur->r.record.p.brmask = mask;
2075 /* Indicate this record shouldn't be output. */
2076 cur->r.record.p.r.gr = REG_NUM;
2077 /* Retain only least significant bit. */
2078 prev->r.record.p.brmask ^= mask;
2079 prev->r.record.p.next = cur;
2080 }
2081 }
2082
2083 static unw_rec_list *
2084 output_spill_base (unsigned int offset)
2085 {
2086 unw_rec_list *ptr = alloc_record (spill_base);
2087 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2088 return ptr;
2089 }
2090
2091 static unw_rec_list *
2092 output_unat_when (void)
2093 {
2094 unw_rec_list *ptr = alloc_record (unat_when);
2095 return ptr;
2096 }
2097
2098 static unw_rec_list *
2099 output_unat_gr (unsigned int gr)
2100 {
2101 unw_rec_list *ptr = alloc_record (unat_gr);
2102 ptr->r.record.p.r.gr = gr;
2103 return ptr;
2104 }
2105
2106 static unw_rec_list *
2107 output_unat_psprel (unsigned int offset)
2108 {
2109 unw_rec_list *ptr = alloc_record (unat_psprel);
2110 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2111 return ptr;
2112 }
2113
2114 static unw_rec_list *
2115 output_unat_sprel (unsigned int offset)
2116 {
2117 unw_rec_list *ptr = alloc_record (unat_sprel);
2118 ptr->r.record.p.off.sp = offset / 4;
2119 return ptr;
2120 }
2121
2122 static unw_rec_list *
2123 output_lc_when (void)
2124 {
2125 unw_rec_list *ptr = alloc_record (lc_when);
2126 return ptr;
2127 }
2128
2129 static unw_rec_list *
2130 output_lc_gr (unsigned int gr)
2131 {
2132 unw_rec_list *ptr = alloc_record (lc_gr);
2133 ptr->r.record.p.r.gr = gr;
2134 return ptr;
2135 }
2136
2137 static unw_rec_list *
2138 output_lc_psprel (unsigned int offset)
2139 {
2140 unw_rec_list *ptr = alloc_record (lc_psprel);
2141 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2142 return ptr;
2143 }
2144
2145 static unw_rec_list *
2146 output_lc_sprel (unsigned int offset)
2147 {
2148 unw_rec_list *ptr = alloc_record (lc_sprel);
2149 ptr->r.record.p.off.sp = offset / 4;
2150 return ptr;
2151 }
2152
2153 static unw_rec_list *
2154 output_fpsr_when (void)
2155 {
2156 unw_rec_list *ptr = alloc_record (fpsr_when);
2157 return ptr;
2158 }
2159
2160 static unw_rec_list *
2161 output_fpsr_gr (unsigned int gr)
2162 {
2163 unw_rec_list *ptr = alloc_record (fpsr_gr);
2164 ptr->r.record.p.r.gr = gr;
2165 return ptr;
2166 }
2167
2168 static unw_rec_list *
2169 output_fpsr_psprel (unsigned int offset)
2170 {
2171 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2172 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2173 return ptr;
2174 }
2175
2176 static unw_rec_list *
2177 output_fpsr_sprel (unsigned int offset)
2178 {
2179 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2180 ptr->r.record.p.off.sp = offset / 4;
2181 return ptr;
2182 }
2183
2184 static unw_rec_list *
2185 output_priunat_when_gr (void)
2186 {
2187 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2188 return ptr;
2189 }
2190
2191 static unw_rec_list *
2192 output_priunat_when_mem (void)
2193 {
2194 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2195 return ptr;
2196 }
2197
2198 static unw_rec_list *
2199 output_priunat_gr (unsigned int gr)
2200 {
2201 unw_rec_list *ptr = alloc_record (priunat_gr);
2202 ptr->r.record.p.r.gr = gr;
2203 return ptr;
2204 }
2205
2206 static unw_rec_list *
2207 output_priunat_psprel (unsigned int offset)
2208 {
2209 unw_rec_list *ptr = alloc_record (priunat_psprel);
2210 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2211 return ptr;
2212 }
2213
2214 static unw_rec_list *
2215 output_priunat_sprel (unsigned int offset)
2216 {
2217 unw_rec_list *ptr = alloc_record (priunat_sprel);
2218 ptr->r.record.p.off.sp = offset / 4;
2219 return ptr;
2220 }
2221
2222 static unw_rec_list *
2223 output_bsp_when (void)
2224 {
2225 unw_rec_list *ptr = alloc_record (bsp_when);
2226 return ptr;
2227 }
2228
2229 static unw_rec_list *
2230 output_bsp_gr (unsigned int gr)
2231 {
2232 unw_rec_list *ptr = alloc_record (bsp_gr);
2233 ptr->r.record.p.r.gr = gr;
2234 return ptr;
2235 }
2236
2237 static unw_rec_list *
2238 output_bsp_psprel (unsigned int offset)
2239 {
2240 unw_rec_list *ptr = alloc_record (bsp_psprel);
2241 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2242 return ptr;
2243 }
2244
2245 static unw_rec_list *
2246 output_bsp_sprel (unsigned int offset)
2247 {
2248 unw_rec_list *ptr = alloc_record (bsp_sprel);
2249 ptr->r.record.p.off.sp = offset / 4;
2250 return ptr;
2251 }
2252
2253 static unw_rec_list *
2254 output_bspstore_when (void)
2255 {
2256 unw_rec_list *ptr = alloc_record (bspstore_when);
2257 return ptr;
2258 }
2259
2260 static unw_rec_list *
2261 output_bspstore_gr (unsigned int gr)
2262 {
2263 unw_rec_list *ptr = alloc_record (bspstore_gr);
2264 ptr->r.record.p.r.gr = gr;
2265 return ptr;
2266 }
2267
2268 static unw_rec_list *
2269 output_bspstore_psprel (unsigned int offset)
2270 {
2271 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2272 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2273 return ptr;
2274 }
2275
2276 static unw_rec_list *
2277 output_bspstore_sprel (unsigned int offset)
2278 {
2279 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2280 ptr->r.record.p.off.sp = offset / 4;
2281 return ptr;
2282 }
2283
2284 static unw_rec_list *
2285 output_rnat_when (void)
2286 {
2287 unw_rec_list *ptr = alloc_record (rnat_when);
2288 return ptr;
2289 }
2290
2291 static unw_rec_list *
2292 output_rnat_gr (unsigned int gr)
2293 {
2294 unw_rec_list *ptr = alloc_record (rnat_gr);
2295 ptr->r.record.p.r.gr = gr;
2296 return ptr;
2297 }
2298
2299 static unw_rec_list *
2300 output_rnat_psprel (unsigned int offset)
2301 {
2302 unw_rec_list *ptr = alloc_record (rnat_psprel);
2303 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2304 return ptr;
2305 }
2306
2307 static unw_rec_list *
2308 output_rnat_sprel (unsigned int offset)
2309 {
2310 unw_rec_list *ptr = alloc_record (rnat_sprel);
2311 ptr->r.record.p.off.sp = offset / 4;
2312 return ptr;
2313 }
2314
2315 static unw_rec_list *
2316 output_unwabi (unsigned long abi, unsigned long context)
2317 {
2318 unw_rec_list *ptr = alloc_record (unwabi);
2319 ptr->r.record.p.abi = abi;
2320 ptr->r.record.p.context = context;
2321 return ptr;
2322 }
2323
2324 static unw_rec_list *
2325 output_epilogue (unsigned long ecount)
2326 {
2327 unw_rec_list *ptr = alloc_record (epilogue);
2328 ptr->r.record.b.ecount = ecount;
2329 return ptr;
2330 }
2331
2332 static unw_rec_list *
2333 output_label_state (unsigned long label)
2334 {
2335 unw_rec_list *ptr = alloc_record (label_state);
2336 ptr->r.record.b.label = label;
2337 return ptr;
2338 }
2339
2340 static unw_rec_list *
2341 output_copy_state (unsigned long label)
2342 {
2343 unw_rec_list *ptr = alloc_record (copy_state);
2344 ptr->r.record.b.label = label;
2345 return ptr;
2346 }
2347
2348 static unw_rec_list *
2349 output_spill_psprel (unsigned int ab,
2350 unsigned int reg,
2351 unsigned int offset,
2352 unsigned int predicate)
2353 {
2354 unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel);
2355 ptr->r.record.x.ab = ab;
2356 ptr->r.record.x.reg = reg;
2357 ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset);
2358 ptr->r.record.x.qp = predicate;
2359 return ptr;
2360 }
2361
2362 static unw_rec_list *
2363 output_spill_sprel (unsigned int ab,
2364 unsigned int reg,
2365 unsigned int offset,
2366 unsigned int predicate)
2367 {
2368 unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel);
2369 ptr->r.record.x.ab = ab;
2370 ptr->r.record.x.reg = reg;
2371 ptr->r.record.x.where.spoff = offset / 4;
2372 ptr->r.record.x.qp = predicate;
2373 return ptr;
2374 }
2375
2376 static unw_rec_list *
2377 output_spill_reg (unsigned int ab,
2378 unsigned int reg,
2379 unsigned int targ_reg,
2380 unsigned int xy,
2381 unsigned int predicate)
2382 {
2383 unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg);
2384 ptr->r.record.x.ab = ab;
2385 ptr->r.record.x.reg = reg;
2386 ptr->r.record.x.where.reg = targ_reg;
2387 ptr->r.record.x.xy = xy;
2388 ptr->r.record.x.qp = predicate;
2389 return ptr;
2390 }
2391
2392 /* Given a unw_rec_list process the correct format with the
2393 specified function. */
2394
2395 static void
2396 process_one_record (unw_rec_list *ptr, vbyte_func f)
2397 {
2398 unsigned int fr_mask, gr_mask;
2399
2400 switch (ptr->r.type)
2401 {
2402 /* This is a dummy record that takes up no space in the output. */
2403 case endp:
2404 break;
2405
2406 case gr_mem:
2407 case fr_mem:
2408 case br_mem:
2409 case frgr_mem:
2410 /* These are taken care of by prologue/prologue_gr. */
2411 break;
2412
2413 case prologue_gr:
2414 case prologue:
2415 if (ptr->r.type == prologue_gr)
2416 output_R2_format (f, ptr->r.record.r.grmask,
2417 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2418 else
2419 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2420
2421 /* Output descriptor(s) for union of register spills (if any). */
2422 gr_mask = ptr->r.record.r.mask.gr_mem;
2423 fr_mask = ptr->r.record.r.mask.fr_mem;
2424 if (fr_mask)
2425 {
2426 if ((fr_mask & ~0xfUL) == 0)
2427 output_P6_format (f, fr_mem, fr_mask);
2428 else
2429 {
2430 output_P5_format (f, gr_mask, fr_mask);
2431 gr_mask = 0;
2432 }
2433 }
2434 if (gr_mask)
2435 output_P6_format (f, gr_mem, gr_mask);
2436 if (ptr->r.record.r.mask.br_mem)
2437 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2438
2439 /* output imask descriptor if necessary: */
2440 if (ptr->r.record.r.mask.i)
2441 output_P4_format (f, ptr->r.record.r.mask.i,
2442 ptr->r.record.r.imask_size);
2443 break;
2444
2445 case body:
2446 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2447 break;
2448 case mem_stack_f:
2449 case mem_stack_v:
2450 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2451 ptr->r.record.p.size);
2452 break;
2453 case psp_gr:
2454 case rp_gr:
2455 case pfs_gr:
2456 case preds_gr:
2457 case unat_gr:
2458 case lc_gr:
2459 case fpsr_gr:
2460 case priunat_gr:
2461 case bsp_gr:
2462 case bspstore_gr:
2463 case rnat_gr:
2464 output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr);
2465 break;
2466 case rp_br:
2467 output_P3_format (f, rp_br, ptr->r.record.p.r.br);
2468 break;
2469 case psp_sprel:
2470 output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0);
2471 break;
2472 case rp_when:
2473 case pfs_when:
2474 case preds_when:
2475 case unat_when:
2476 case lc_when:
2477 case fpsr_when:
2478 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2479 break;
2480 case rp_psprel:
2481 case pfs_psprel:
2482 case preds_psprel:
2483 case unat_psprel:
2484 case lc_psprel:
2485 case fpsr_psprel:
2486 case spill_base:
2487 output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0);
2488 break;
2489 case rp_sprel:
2490 case pfs_sprel:
2491 case preds_sprel:
2492 case unat_sprel:
2493 case lc_sprel:
2494 case fpsr_sprel:
2495 case priunat_sprel:
2496 case bsp_sprel:
2497 case bspstore_sprel:
2498 case rnat_sprel:
2499 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp);
2500 break;
2501 case gr_gr:
2502 if (ptr->r.record.p.r.gr < REG_NUM)
2503 {
2504 const unw_rec_list *cur = ptr;
2505
2506 gr_mask = cur->r.record.p.grmask;
2507 while ((cur = cur->r.record.p.next) != NULL)
2508 gr_mask |= cur->r.record.p.grmask;
2509 output_P9_format (f, gr_mask, ptr->r.record.p.r.gr);
2510 }
2511 break;
2512 case br_gr:
2513 if (ptr->r.record.p.r.gr < REG_NUM)
2514 {
2515 const unw_rec_list *cur = ptr;
2516
2517 gr_mask = cur->r.record.p.brmask;
2518 while ((cur = cur->r.record.p.next) != NULL)
2519 gr_mask |= cur->r.record.p.brmask;
2520 output_P2_format (f, gr_mask, ptr->r.record.p.r.gr);
2521 }
2522 break;
2523 case spill_mask:
2524 as_bad (_("spill_mask record unimplemented."));
2525 break;
2526 case priunat_when_gr:
2527 case priunat_when_mem:
2528 case bsp_when:
2529 case bspstore_when:
2530 case rnat_when:
2531 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2532 break;
2533 case priunat_psprel:
2534 case bsp_psprel:
2535 case bspstore_psprel:
2536 case rnat_psprel:
2537 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp);
2538 break;
2539 case unwabi:
2540 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2541 break;
2542 case epilogue:
2543 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2544 break;
2545 case label_state:
2546 case copy_state:
2547 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2548 break;
2549 case spill_psprel:
2550 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2551 ptr->r.record.x.reg, ptr->r.record.x.t,
2552 ptr->r.record.x.where.pspoff);
2553 break;
2554 case spill_sprel:
2555 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2556 ptr->r.record.x.reg, ptr->r.record.x.t,
2557 ptr->r.record.x.where.spoff);
2558 break;
2559 case spill_reg:
2560 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2561 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2562 ptr->r.record.x.where.reg, ptr->r.record.x.t);
2563 break;
2564 case spill_psprel_p:
2565 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2566 ptr->r.record.x.ab, ptr->r.record.x.reg,
2567 ptr->r.record.x.t, ptr->r.record.x.where.pspoff);
2568 break;
2569 case spill_sprel_p:
2570 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2571 ptr->r.record.x.ab, ptr->r.record.x.reg,
2572 ptr->r.record.x.t, ptr->r.record.x.where.spoff);
2573 break;
2574 case spill_reg_p:
2575 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2576 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2577 ptr->r.record.x.xy, ptr->r.record.x.where.reg,
2578 ptr->r.record.x.t);
2579 break;
2580 default:
2581 as_bad (_("record_type_not_valid"));
2582 break;
2583 }
2584 }
2585
2586 /* Given a unw_rec_list list, process all the records with
2587 the specified function. */
2588 static void
2589 process_unw_records (unw_rec_list *list, vbyte_func f)
2590 {
2591 unw_rec_list *ptr;
2592 for (ptr = list; ptr; ptr = ptr->next)
2593 process_one_record (ptr, f);
2594 }
2595
2596 /* Determine the size of a record list in bytes. */
2597 static int
2598 calc_record_size (unw_rec_list *list)
2599 {
2600 vbyte_count = 0;
2601 process_unw_records (list, count_output);
2602 return vbyte_count;
2603 }
2604
2605 /* Return the number of bits set in the input value.
2606 Perhaps this has a better place... */
2607 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
2608 # define popcount __builtin_popcount
2609 #else
2610 static int
2611 popcount (unsigned x)
2612 {
2613 static const unsigned char popcnt[16] =
2614 {
2615 0, 1, 1, 2,
2616 1, 2, 2, 3,
2617 1, 2, 2, 3,
2618 2, 3, 3, 4
2619 };
2620
2621 if (x < NELEMS (popcnt))
2622 return popcnt[x];
2623 return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt));
2624 }
2625 #endif
2626
2627 /* Update IMASK bitmask to reflect the fact that one or more registers
2628 of type TYPE are saved starting at instruction with index T. If N
2629 bits are set in REGMASK, it is assumed that instructions T through
2630 T+N-1 save these registers.
2631
2632 TYPE values:
2633 0: no save
2634 1: instruction saves next fp reg
2635 2: instruction saves next general reg
2636 3: instruction saves next branch reg */
2637 static void
2638 set_imask (unw_rec_list *region,
2639 unsigned long regmask,
2640 unsigned long t,
2641 unsigned int type)
2642 {
2643 unsigned char *imask;
2644 unsigned long imask_size;
2645 unsigned int i;
2646 int pos;
2647
2648 imask = region->r.record.r.mask.i;
2649 imask_size = region->r.record.r.imask_size;
2650 if (!imask)
2651 {
2652 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2653 imask = xmalloc (imask_size);
2654 memset (imask, 0, imask_size);
2655
2656 region->r.record.r.imask_size = imask_size;
2657 region->r.record.r.mask.i = imask;
2658 }
2659
2660 i = (t / 4) + 1;
2661 pos = 2 * (3 - t % 4);
2662 while (regmask)
2663 {
2664 if (i >= imask_size)
2665 {
2666 as_bad (_("Ignoring attempt to spill beyond end of region"));
2667 return;
2668 }
2669
2670 imask[i] |= (type & 0x3) << pos;
2671
2672 regmask &= (regmask - 1);
2673 pos -= 2;
2674 if (pos < 0)
2675 {
2676 pos = 0;
2677 ++i;
2678 }
2679 }
2680 }
2681
2682 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2683 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2684 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2685 for frag sizes. */
2686
2687 static unsigned long
2688 slot_index (unsigned long slot_addr,
2689 fragS *slot_frag,
2690 unsigned long first_addr,
2691 fragS *first_frag,
2692 int before_relax)
2693 {
2694 unsigned long s_index = 0;
2695
2696 /* First time we are called, the initial address and frag are invalid. */
2697 if (first_addr == 0)
2698 return 0;
2699
2700 /* If the two addresses are in different frags, then we need to add in
2701 the remaining size of this frag, and then the entire size of intermediate
2702 frags. */
2703 while (slot_frag != first_frag)
2704 {
2705 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2706
2707 if (! before_relax)
2708 {
2709 /* We can get the final addresses only during and after
2710 relaxation. */
2711 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2712 s_index += 3 * ((first_frag->fr_next->fr_address
2713 - first_frag->fr_address
2714 - first_frag->fr_fix) >> 4);
2715 }
2716 else
2717 /* We don't know what the final addresses will be. We try our
2718 best to estimate. */
2719 switch (first_frag->fr_type)
2720 {
2721 default:
2722 break;
2723
2724 case rs_space:
2725 as_fatal (_("Only constant space allocation is supported"));
2726 break;
2727
2728 case rs_align:
2729 case rs_align_code:
2730 case rs_align_test:
2731 /* Take alignment into account. Assume the worst case
2732 before relaxation. */
2733 s_index += 3 * ((1 << first_frag->fr_offset) >> 4);
2734 break;
2735
2736 case rs_org:
2737 if (first_frag->fr_symbol)
2738 {
2739 as_fatal (_("Only constant offsets are supported"));
2740 break;
2741 }
2742 case rs_fill:
2743 s_index += 3 * (first_frag->fr_offset >> 4);
2744 break;
2745 }
2746
2747 /* Add in the full size of the frag converted to instruction slots. */
2748 s_index += 3 * (first_frag->fr_fix >> 4);
2749 /* Subtract away the initial part before first_addr. */
2750 s_index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2751 + ((first_addr & 0x3) - (start_addr & 0x3)));
2752
2753 /* Move to the beginning of the next frag. */
2754 first_frag = first_frag->fr_next;
2755 first_addr = (unsigned long) &first_frag->fr_literal;
2756
2757 /* This can happen if there is section switching in the middle of a
2758 function, causing the frag chain for the function to be broken.
2759 It is too difficult to recover safely from this problem, so we just
2760 exit with an error. */
2761 if (first_frag == NULL)
2762 as_fatal (_("Section switching in code is not supported."));
2763 }
2764
2765 /* Add in the used part of the last frag. */
2766 s_index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2767 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2768 return s_index;
2769 }
2770
2771 /* Optimize unwind record directives. */
2772
2773 static unw_rec_list *
2774 optimize_unw_records (unw_rec_list *list)
2775 {
2776 if (!list)
2777 return NULL;
2778
2779 /* If the only unwind record is ".prologue" or ".prologue" followed
2780 by ".body", then we can optimize the unwind directives away. */
2781 if (list->r.type == prologue
2782 && (list->next->r.type == endp
2783 || (list->next->r.type == body && list->next->next->r.type == endp)))
2784 return NULL;
2785
2786 return list;
2787 }
2788
2789 /* Given a complete record list, process any records which have
2790 unresolved fields, (ie length counts for a prologue). After
2791 this has been run, all necessary information should be available
2792 within each record to generate an image. */
2793
2794 static void
2795 fixup_unw_records (unw_rec_list *list, int before_relax)
2796 {
2797 unw_rec_list *ptr, *region = 0;
2798 unsigned long first_addr = 0, rlen = 0, t;
2799 fragS *first_frag = 0;
2800
2801 for (ptr = list; ptr; ptr = ptr->next)
2802 {
2803 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2804 as_bad (_(" Insn slot not set in unwind record."));
2805 t = slot_index (ptr->slot_number, ptr->slot_frag,
2806 first_addr, first_frag, before_relax);
2807 switch (ptr->r.type)
2808 {
2809 case prologue:
2810 case prologue_gr:
2811 case body:
2812 {
2813 unw_rec_list *last;
2814 int size;
2815 unsigned long last_addr = 0;
2816 fragS *last_frag = NULL;
2817
2818 first_addr = ptr->slot_number;
2819 first_frag = ptr->slot_frag;
2820 /* Find either the next body/prologue start, or the end of
2821 the function, and determine the size of the region. */
2822 for (last = ptr->next; last != NULL; last = last->next)
2823 if (last->r.type == prologue || last->r.type == prologue_gr
2824 || last->r.type == body || last->r.type == endp)
2825 {
2826 last_addr = last->slot_number;
2827 last_frag = last->slot_frag;
2828 break;
2829 }
2830 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2831 before_relax);
2832 rlen = ptr->r.record.r.rlen = size;
2833 if (ptr->r.type == body)
2834 /* End of region. */
2835 region = 0;
2836 else
2837 region = ptr;
2838 break;
2839 }
2840 case epilogue:
2841 if (t < rlen)
2842 ptr->r.record.b.t = rlen - 1 - t;
2843 else
2844 /* This happens when a memory-stack-less procedure uses a
2845 ".restore sp" directive at the end of a region to pop
2846 the frame state. */
2847 ptr->r.record.b.t = 0;
2848 break;
2849
2850 case mem_stack_f:
2851 case mem_stack_v:
2852 case rp_when:
2853 case pfs_when:
2854 case preds_when:
2855 case unat_when:
2856 case lc_when:
2857 case fpsr_when:
2858 case priunat_when_gr:
2859 case priunat_when_mem:
2860 case bsp_when:
2861 case bspstore_when:
2862 case rnat_when:
2863 ptr->r.record.p.t = t;
2864 break;
2865
2866 case spill_reg:
2867 case spill_sprel:
2868 case spill_psprel:
2869 case spill_reg_p:
2870 case spill_sprel_p:
2871 case spill_psprel_p:
2872 ptr->r.record.x.t = t;
2873 break;
2874
2875 case frgr_mem:
2876 if (!region)
2877 {
2878 as_bad (_("frgr_mem record before region record!"));
2879 return;
2880 }
2881 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2882 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2883 set_imask (region, ptr->r.record.p.frmask, t, 1);
2884 set_imask (region, ptr->r.record.p.grmask, t, 2);
2885 break;
2886 case fr_mem:
2887 if (!region)
2888 {
2889 as_bad (_("fr_mem record before region record!"));
2890 return;
2891 }
2892 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2893 set_imask (region, ptr->r.record.p.frmask, t, 1);
2894 break;
2895 case gr_mem:
2896 if (!region)
2897 {
2898 as_bad (_("gr_mem record before region record!"));
2899 return;
2900 }
2901 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2902 set_imask (region, ptr->r.record.p.grmask, t, 2);
2903 break;
2904 case br_mem:
2905 if (!region)
2906 {
2907 as_bad (_("br_mem record before region record!"));
2908 return;
2909 }
2910 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2911 set_imask (region, ptr->r.record.p.brmask, t, 3);
2912 break;
2913
2914 case gr_gr:
2915 if (!region)
2916 {
2917 as_bad (_("gr_gr record before region record!"));
2918 return;
2919 }
2920 set_imask (region, ptr->r.record.p.grmask, t, 2);
2921 break;
2922 case br_gr:
2923 if (!region)
2924 {
2925 as_bad (_("br_gr record before region record!"));
2926 return;
2927 }
2928 set_imask (region, ptr->r.record.p.brmask, t, 3);
2929 break;
2930
2931 default:
2932 break;
2933 }
2934 }
2935 }
2936
2937 /* Estimate the size of a frag before relaxing. We only have one type of frag
2938 to handle here, which is the unwind info frag. */
2939
2940 int
2941 ia64_estimate_size_before_relax (fragS *frag,
2942 asection *segtype ATTRIBUTE_UNUSED)
2943 {
2944 unw_rec_list *list;
2945 int len, size, pad;
2946
2947 /* ??? This code is identical to the first part of ia64_convert_frag. */
2948 list = (unw_rec_list *) frag->fr_opcode;
2949 fixup_unw_records (list, 0);
2950
2951 len = calc_record_size (list);
2952 /* pad to pointer-size boundary. */
2953 pad = len % md.pointer_size;
2954 if (pad != 0)
2955 len += md.pointer_size - pad;
2956 /* Add 8 for the header. */
2957 size = len + 8;
2958 /* Add a pointer for the personality offset. */
2959 if (frag->fr_offset)
2960 size += md.pointer_size;
2961
2962 /* fr_var carries the max_chars that we created the fragment with.
2963 We must, of course, have allocated enough memory earlier. */
2964 gas_assert (frag->fr_var >= size);
2965
2966 return frag->fr_fix + size;
2967 }
2968
2969 /* This function converts a rs_machine_dependent variant frag into a
2970 normal fill frag with the unwind image from the record list. */
2971 void
2972 ia64_convert_frag (fragS *frag)
2973 {
2974 unw_rec_list *list;
2975 int len, size, pad;
2976 valueT flag_value;
2977
2978 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2979 list = (unw_rec_list *) frag->fr_opcode;
2980 fixup_unw_records (list, 0);
2981
2982 len = calc_record_size (list);
2983 /* pad to pointer-size boundary. */
2984 pad = len % md.pointer_size;
2985 if (pad != 0)
2986 len += md.pointer_size - pad;
2987 /* Add 8 for the header. */
2988 size = len + 8;
2989 /* Add a pointer for the personality offset. */
2990 if (frag->fr_offset)
2991 size += md.pointer_size;
2992
2993 /* fr_var carries the max_chars that we created the fragment with.
2994 We must, of course, have allocated enough memory earlier. */
2995 gas_assert (frag->fr_var >= size);
2996
2997 /* Initialize the header area. fr_offset is initialized with
2998 unwind.personality_routine. */
2999 if (frag->fr_offset)
3000 {
3001 if (md.flags & EF_IA_64_ABI64)
3002 flag_value = (bfd_vma) 3 << 32;
3003 else
3004 /* 32-bit unwind info block. */
3005 flag_value = (bfd_vma) 0x1003 << 32;
3006 }
3007 else
3008 flag_value = 0;
3009
3010 md_number_to_chars (frag->fr_literal,
3011 (((bfd_vma) 1 << 48) /* Version. */
3012 | flag_value /* U & E handler flags. */
3013 | (len / md.pointer_size)), /* Length. */
3014 8);
3015
3016 /* Skip the header. */
3017 vbyte_mem_ptr = frag->fr_literal + 8;
3018 process_unw_records (list, output_vbyte_mem);
3019
3020 /* Fill the padding bytes with zeros. */
3021 if (pad != 0)
3022 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
3023 md.pointer_size - pad);
3024 /* Fill the unwind personality with zeros. */
3025 if (frag->fr_offset)
3026 md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0,
3027 md.pointer_size);
3028
3029 frag->fr_fix += size;
3030 frag->fr_type = rs_fill;
3031 frag->fr_var = 0;
3032 frag->fr_offset = 0;
3033 }
3034
3035 static int
3036 parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po)
3037 {
3038 int sep = parse_operand_and_eval (e, ',');
3039
3040 *qp = e->X_add_number - REG_P;
3041 if (e->X_op != O_register || *qp > 63)
3042 {
3043 as_bad (_("First operand to .%s must be a predicate"), po);
3044 *qp = 0;
3045 }
3046 else if (*qp == 0)
3047 as_warn (_("Pointless use of p0 as first operand to .%s"), po);
3048 if (sep == ',')
3049 sep = parse_operand_and_eval (e, ',');
3050 else
3051 e->X_op = O_absent;
3052 return sep;
3053 }
3054
3055 static void
3056 convert_expr_to_ab_reg (const expressionS *e,
3057 unsigned int *ab,
3058 unsigned int *regp,
3059 const char *po,
3060 int n)
3061 {
3062 unsigned int reg = e->X_add_number;
3063
3064 *ab = *regp = 0; /* Anything valid is good here. */
3065
3066 if (e->X_op != O_register)
3067 reg = REG_GR; /* Anything invalid is good here. */
3068
3069 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
3070 {
3071 *ab = 0;
3072 *regp = reg - REG_GR;
3073 }
3074 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
3075 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
3076 {
3077 *ab = 1;
3078 *regp = reg - REG_FR;
3079 }
3080 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
3081 {
3082 *ab = 2;
3083 *regp = reg - REG_BR;
3084 }
3085 else
3086 {
3087 *ab = 3;
3088 switch (reg)
3089 {
3090 case REG_PR: *regp = 0; break;
3091 case REG_PSP: *regp = 1; break;
3092 case REG_PRIUNAT: *regp = 2; break;
3093 case REG_BR + 0: *regp = 3; break;
3094 case REG_AR + AR_BSP: *regp = 4; break;
3095 case REG_AR + AR_BSPSTORE: *regp = 5; break;
3096 case REG_AR + AR_RNAT: *regp = 6; break;
3097 case REG_AR + AR_UNAT: *regp = 7; break;
3098 case REG_AR + AR_FPSR: *regp = 8; break;
3099 case REG_AR + AR_PFS: *regp = 9; break;
3100 case REG_AR + AR_LC: *regp = 10; break;
3101
3102 default:
3103 as_bad (_("Operand %d to .%s must be a preserved register"), n, po);
3104 break;
3105 }
3106 }
3107 }
3108
3109 static void
3110 convert_expr_to_xy_reg (const expressionS *e,
3111 unsigned int *xy,
3112 unsigned int *regp,
3113 const char *po,
3114 int n)
3115 {
3116 unsigned int reg = e->X_add_number;
3117
3118 *xy = *regp = 0; /* Anything valid is good here. */
3119
3120 if (e->X_op != O_register)
3121 reg = REG_GR; /* Anything invalid is good here. */
3122
3123 if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127))
3124 {
3125 *xy = 0;
3126 *regp = reg - REG_GR;
3127 }
3128 else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127))
3129 {
3130 *xy = 1;
3131 *regp = reg - REG_FR;
3132 }
3133 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3134 {
3135 *xy = 2;
3136 *regp = reg - REG_BR;
3137 }
3138 else
3139 as_bad (_("Operand %d to .%s must be a writable register"), n, po);
3140 }
3141
3142 static void
3143 dot_align (int arg)
3144 {
3145 /* The current frag is an alignment frag. */
3146 align_frag = frag_now;
3147 s_align_bytes (arg);
3148 }
3149
3150 static void
3151 dot_radix (int dummy ATTRIBUTE_UNUSED)
3152 {
3153 char *radix;
3154 int ch;
3155
3156 SKIP_WHITESPACE ();
3157
3158 if (is_it_end_of_statement ())
3159 return;
3160 ch = get_symbol_name (&radix);
3161 ia64_canonicalize_symbol_name (radix);
3162 if (strcasecmp (radix, "C"))
3163 as_bad (_("Radix `%s' unsupported or invalid"), radix);
3164 (void) restore_line_pointer (ch);
3165 demand_empty_rest_of_line ();
3166 }
3167
3168 /* Helper function for .loc directives. If the assembler is not generating
3169 line number info, then we need to remember which instructions have a .loc
3170 directive, and only call dwarf2_gen_line_info for those instructions. */
3171
3172 static void
3173 dot_loc (int x)
3174 {
3175 CURR_SLOT.loc_directive_seen = 1;
3176 dwarf2_directive_loc (x);
3177 }
3178
3179 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3180 static void
3181 dot_special_section (int which)
3182 {
3183 set_section ((char *) special_section_name[which]);
3184 }
3185
3186 /* Return -1 for warning and 0 for error. */
3187
3188 static int
3189 unwind_diagnostic (const char * region, const char *directive)
3190 {
3191 if (md.unwind_check == unwind_check_warning)
3192 {
3193 as_warn (_(".%s outside of %s"), directive, region);
3194 return -1;
3195 }
3196 else
3197 {
3198 as_bad (_(".%s outside of %s"), directive, region);
3199 ignore_rest_of_line ();
3200 return 0;
3201 }
3202 }
3203
3204 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in
3205 a procedure but the unwind directive check is set to warning, 0 if
3206 a directive isn't in a procedure and the unwind directive check is set
3207 to error. */
3208
3209 static int
3210 in_procedure (const char *directive)
3211 {
3212 if (unwind.proc_pending.sym
3213 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0))
3214 return 1;
3215 return unwind_diagnostic ("procedure", directive);
3216 }
3217
3218 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in
3219 a prologue but the unwind directive check is set to warning, 0 if
3220 a directive isn't in a prologue and the unwind directive check is set
3221 to error. */
3222
3223 static int
3224 in_prologue (const char *directive)
3225 {
3226 int in = in_procedure (directive);
3227
3228 if (in > 0 && !unwind.prologue)
3229 in = unwind_diagnostic ("prologue", directive);
3230 check_pending_save ();
3231 return in;
3232 }
3233
3234 /* Return 1 if a directive is in a body, -1 if a directive isn't in
3235 a body but the unwind directive check is set to warning, 0 if
3236 a directive isn't in a body and the unwind directive check is set
3237 to error. */
3238
3239 static int
3240 in_body (const char *directive)
3241 {
3242 int in = in_procedure (directive);
3243
3244 if (in > 0 && !unwind.body)
3245 in = unwind_diagnostic ("body region", directive);
3246 return in;
3247 }
3248
3249 static void
3250 add_unwind_entry (unw_rec_list *ptr, int sep)
3251 {
3252 if (ptr)
3253 {
3254 if (unwind.tail)
3255 unwind.tail->next = ptr;
3256 else
3257 unwind.list = ptr;
3258 unwind.tail = ptr;
3259
3260 /* The current entry can in fact be a chain of unwind entries. */
3261 if (unwind.current_entry == NULL)
3262 unwind.current_entry = ptr;
3263 }
3264
3265 /* The current entry can in fact be a chain of unwind entries. */
3266 if (unwind.current_entry == NULL)
3267 unwind.current_entry = ptr;
3268
3269 if (sep == ',')
3270 {
3271 char *name;
3272 /* Parse a tag permitted for the current directive. */
3273 int ch;
3274
3275 SKIP_WHITESPACE ();
3276 ch = get_symbol_name (&name);
3277 /* FIXME: For now, just issue a warning that this isn't implemented. */
3278 {
3279 static int warned;
3280
3281 if (!warned)
3282 {
3283 warned = 1;
3284 as_warn (_("Tags on unwind pseudo-ops aren't supported, yet"));
3285 }
3286 }
3287 (void) restore_line_pointer (ch);
3288 }
3289 if (sep != NOT_A_CHAR)
3290 demand_empty_rest_of_line ();
3291 }
3292
3293 static void
3294 dot_fframe (int dummy ATTRIBUTE_UNUSED)
3295 {
3296 expressionS e;
3297 int sep;
3298
3299 if (!in_prologue ("fframe"))
3300 return;
3301
3302 sep = parse_operand_and_eval (&e, ',');
3303
3304 if (e.X_op != O_constant)
3305 {
3306 as_bad (_("First operand to .fframe must be a constant"));
3307 e.X_add_number = 0;
3308 }
3309 add_unwind_entry (output_mem_stack_f (e.X_add_number), sep);
3310 }
3311
3312 static void
3313 dot_vframe (int dummy ATTRIBUTE_UNUSED)
3314 {
3315 expressionS e;
3316 unsigned reg;
3317 int sep;
3318
3319 if (!in_prologue ("vframe"))
3320 return;
3321
3322 sep = parse_operand_and_eval (&e, ',');
3323 reg = e.X_add_number - REG_GR;
3324 if (e.X_op != O_register || reg > 127)
3325 {
3326 as_bad (_("First operand to .vframe must be a general register"));
3327 reg = 0;
3328 }
3329 add_unwind_entry (output_mem_stack_v (), sep);
3330 if (! (unwind.prologue_mask & 2))
3331 add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR);
3332 else if (reg != unwind.prologue_gr
3333 + (unsigned) popcount (unwind.prologue_mask & -(2 << 1)))
3334 as_warn (_("Operand of .vframe contradicts .prologue"));
3335 }
3336
3337 static void
3338 dot_vframesp (int psp)
3339 {
3340 expressionS e;
3341 int sep;
3342
3343 if (psp)
3344 as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant"));
3345
3346 if (!in_prologue ("vframesp"))
3347 return;
3348
3349 sep = parse_operand_and_eval (&e, ',');
3350 if (e.X_op != O_constant)
3351 {
3352 as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)"));
3353 e.X_add_number = 0;
3354 }
3355 add_unwind_entry (output_mem_stack_v (), sep);
3356 add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR);
3357 }
3358
3359 static void
3360 dot_save (int dummy ATTRIBUTE_UNUSED)
3361 {
3362 expressionS e1, e2;
3363 unsigned reg1, reg2;
3364 int sep;
3365
3366 if (!in_prologue ("save"))
3367 return;
3368
3369 sep = parse_operand_and_eval (&e1, ',');
3370 if (sep == ',')
3371 sep = parse_operand_and_eval (&e2, ',');
3372 else
3373 e2.X_op = O_absent;
3374
3375 reg1 = e1.X_add_number;
3376 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3377 if (e1.X_op != O_register)
3378 {
3379 as_bad (_("First operand to .save not a register"));
3380 reg1 = REG_PR; /* Anything valid is good here. */
3381 }
3382 reg2 = e2.X_add_number - REG_GR;
3383 if (e2.X_op != O_register || reg2 > 127)
3384 {
3385 as_bad (_("Second operand to .save not a valid register"));
3386 reg2 = 0;
3387 }
3388 switch (reg1)
3389 {
3390 case REG_AR + AR_BSP:
3391 add_unwind_entry (output_bsp_when (), sep);
3392 add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR);
3393 break;
3394 case REG_AR + AR_BSPSTORE:
3395 add_unwind_entry (output_bspstore_when (), sep);
3396 add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR);
3397 break;
3398 case REG_AR + AR_RNAT:
3399 add_unwind_entry (output_rnat_when (), sep);
3400 add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR);
3401 break;
3402 case REG_AR + AR_UNAT:
3403 add_unwind_entry (output_unat_when (), sep);
3404 add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR);
3405 break;
3406 case REG_AR + AR_FPSR:
3407 add_unwind_entry (output_fpsr_when (), sep);
3408 add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR);
3409 break;
3410 case REG_AR + AR_PFS:
3411 add_unwind_entry (output_pfs_when (), sep);
3412 if (! (unwind.prologue_mask & 4))
3413 add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR);
3414 else if (reg2 != unwind.prologue_gr
3415 + (unsigned) popcount (unwind.prologue_mask & -(4 << 1)))
3416 as_warn (_("Second operand of .save contradicts .prologue"));
3417 break;
3418 case REG_AR + AR_LC:
3419 add_unwind_entry (output_lc_when (), sep);
3420 add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR);
3421 break;
3422 case REG_BR:
3423 add_unwind_entry (output_rp_when (), sep);
3424 if (! (unwind.prologue_mask & 8))
3425 add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR);
3426 else if (reg2 != unwind.prologue_gr)
3427 as_warn (_("Second operand of .save contradicts .prologue"));
3428 break;
3429 case REG_PR:
3430 add_unwind_entry (output_preds_when (), sep);
3431 if (! (unwind.prologue_mask & 1))
3432 add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR);
3433 else if (reg2 != unwind.prologue_gr
3434 + (unsigned) popcount (unwind.prologue_mask & -(1 << 1)))
3435 as_warn (_("Second operand of .save contradicts .prologue"));
3436 break;
3437 case REG_PRIUNAT:
3438 add_unwind_entry (output_priunat_when_gr (), sep);
3439 add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR);
3440 break;
3441 default:
3442 as_bad (_("First operand to .save not a valid register"));
3443 add_unwind_entry (NULL, sep);
3444 break;
3445 }
3446 }
3447
3448 static void
3449 dot_restore (int dummy ATTRIBUTE_UNUSED)
3450 {
3451 expressionS e1;
3452 unsigned long ecount; /* # of _additional_ regions to pop */
3453 int sep;
3454
3455 if (!in_body ("restore"))
3456 return;
3457
3458 sep = parse_operand_and_eval (&e1, ',');
3459 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3460 as_bad (_("First operand to .restore must be stack pointer (sp)"));
3461
3462 if (sep == ',')
3463 {
3464 expressionS e2;
3465
3466 sep = parse_operand_and_eval (&e2, ',');
3467 if (e2.X_op != O_constant || e2.X_add_number < 0)
3468 {
3469 as_bad (_("Second operand to .restore must be a constant >= 0"));
3470 e2.X_add_number = 0;
3471 }
3472 ecount = e2.X_add_number;
3473 }
3474 else
3475 ecount = unwind.prologue_count - 1;
3476
3477 if (ecount >= unwind.prologue_count)
3478 {
3479 as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"),
3480 ecount + 1, unwind.prologue_count);
3481 ecount = 0;
3482 }
3483
3484 add_unwind_entry (output_epilogue (ecount), sep);
3485
3486 if (ecount < unwind.prologue_count)
3487 unwind.prologue_count -= ecount + 1;
3488 else
3489 unwind.prologue_count = 0;
3490 }
3491
3492 static void
3493 dot_restorereg (int pred)
3494 {
3495 unsigned int qp, ab, reg;
3496 expressionS e;
3497 int sep;
3498 const char * const po = pred ? "restorereg.p" : "restorereg";
3499
3500 if (!in_procedure (po))
3501 return;
3502
3503 if (pred)
3504 sep = parse_predicate_and_operand (&e, &qp, po);
3505 else
3506 {
3507 sep = parse_operand_and_eval (&e, ',');
3508 qp = 0;
3509 }
3510 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
3511
3512 add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep);
3513 }
3514
3515 static const char *special_linkonce_name[] =
3516 {
3517 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3518 };
3519
3520 static void
3521 start_unwind_section (const segT text_seg, int sec_index)
3522 {
3523 /*
3524 Use a slightly ugly scheme to derive the unwind section names from
3525 the text section name:
3526
3527 text sect. unwind table sect.
3528 name: name: comments:
3529 ---------- ----------------- --------------------------------
3530 .text .IA_64.unwind
3531 .text.foo .IA_64.unwind.text.foo
3532 .foo .IA_64.unwind.foo
3533 .gnu.linkonce.t.foo
3534 .gnu.linkonce.ia64unw.foo
3535 _info .IA_64.unwind_info gas issues error message (ditto)
3536 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3537
3538 This mapping is done so that:
3539
3540 (a) An object file with unwind info only in .text will use
3541 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3542 This follows the letter of the ABI and also ensures backwards
3543 compatibility with older toolchains.
3544
3545 (b) An object file with unwind info in multiple text sections
3546 will use separate unwind sections for each text section.
3547 This allows us to properly set the "sh_info" and "sh_link"
3548 fields in SHT_IA_64_UNWIND as required by the ABI and also
3549 lets GNU ld support programs with multiple segments
3550 containing unwind info (as might be the case for certain
3551 embedded applications).
3552
3553 (c) An error is issued if there would be a name clash.
3554 */
3555
3556 const char *text_name, *sec_text_name;
3557 char *sec_name;
3558 const char *prefix = special_section_name [sec_index];
3559 const char *suffix;
3560
3561 sec_text_name = segment_name (text_seg);
3562 text_name = sec_text_name;
3563 if (strncmp (text_name, "_info", 5) == 0)
3564 {
3565 as_bad (_("Illegal section name `%s' (causes unwind section name clash)"),
3566 text_name);
3567 ignore_rest_of_line ();
3568 return;
3569 }
3570 if (strcmp (text_name, ".text") == 0)
3571 text_name = "";
3572
3573 /* Build the unwind section name by appending the (possibly stripped)
3574 text section name to the unwind prefix. */
3575 suffix = text_name;
3576 if (strncmp (text_name, ".gnu.linkonce.t.",
3577 sizeof (".gnu.linkonce.t.") - 1) == 0)
3578 {
3579 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3580 suffix += sizeof (".gnu.linkonce.t.") - 1;
3581 }
3582
3583 sec_name = concat (prefix, suffix, NULL);
3584
3585 /* Handle COMDAT group. */
3586 if ((text_seg->flags & SEC_LINK_ONCE) != 0
3587 && (elf_section_flags (text_seg) & SHF_GROUP) != 0)
3588 {
3589 char *section;
3590 const char *group_name = elf_group_name (text_seg);
3591
3592 if (group_name == NULL)
3593 {
3594 as_bad (_("Group section `%s' has no group signature"),
3595 sec_text_name);
3596 ignore_rest_of_line ();
3597 free (sec_name);
3598 return;
3599 }
3600
3601 /* We have to construct a fake section directive. */
3602 section = concat (sec_name, ",\"aG\",@progbits,", group_name, ",comdat", NULL);
3603 set_section (section);
3604 free (section);
3605 }
3606 else
3607 {
3608 set_section (sec_name);
3609 bfd_set_section_flags (stdoutput, now_seg,
3610 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3611 }
3612
3613 elf_linked_to_section (now_seg) = text_seg;
3614 free (sec_name);
3615 }
3616
3617 static void
3618 generate_unwind_image (const segT text_seg)
3619 {
3620 int size, pad;
3621 unw_rec_list *list;
3622
3623 /* Mark the end of the unwind info, so that we can compute the size of the
3624 last unwind region. */
3625 add_unwind_entry (output_endp (), NOT_A_CHAR);
3626
3627 /* Force out pending instructions, to make sure all unwind records have
3628 a valid slot_number field. */
3629 ia64_flush_insns ();
3630
3631 /* Generate the unwind record. */
3632 list = optimize_unw_records (unwind.list);
3633 fixup_unw_records (list, 1);
3634 size = calc_record_size (list);
3635
3636 if (size > 0 || unwind.force_unwind_entry)
3637 {
3638 unwind.force_unwind_entry = 0;
3639 /* pad to pointer-size boundary. */
3640 pad = size % md.pointer_size;
3641 if (pad != 0)
3642 size += md.pointer_size - pad;
3643 /* Add 8 for the header. */
3644 size += 8;
3645 /* Add a pointer for the personality offset. */
3646 if (unwind.personality_routine)
3647 size += md.pointer_size;
3648 }
3649
3650 /* If there are unwind records, switch sections, and output the info. */
3651 if (size != 0)
3652 {
3653 expressionS exp;
3654 bfd_reloc_code_real_type reloc;
3655
3656 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO);
3657
3658 /* Make sure the section has 4 byte alignment for ILP32 and
3659 8 byte alignment for LP64. */
3660 frag_align (md.pointer_size_shift, 0, 0);
3661 record_alignment (now_seg, md.pointer_size_shift);
3662
3663 /* Set expression which points to start of unwind descriptor area. */
3664 unwind.info = expr_build_dot ();
3665
3666 frag_var (rs_machine_dependent, size, size, 0, 0,
3667 (offsetT) (long) unwind.personality_routine,
3668 (char *) list);
3669
3670 /* Add the personality address to the image. */
3671 if (unwind.personality_routine != 0)
3672 {
3673 exp.X_op = O_symbol;
3674 exp.X_add_symbol = unwind.personality_routine;
3675 exp.X_add_number = 0;
3676
3677 if (md.flags & EF_IA_64_BE)
3678 {
3679 if (md.flags & EF_IA_64_ABI64)
3680 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3681 else
3682 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3683 }
3684 else
3685 {
3686 if (md.flags & EF_IA_64_ABI64)
3687 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3688 else
3689 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3690 }
3691
3692 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3693 md.pointer_size, &exp, 0, reloc);
3694 unwind.personality_routine = 0;
3695 }
3696 }
3697
3698 free_saved_prologue_counts ();
3699 unwind.list = unwind.tail = unwind.current_entry = NULL;
3700 }
3701
3702 static void
3703 dot_handlerdata (int dummy ATTRIBUTE_UNUSED)
3704 {
3705 if (!in_procedure ("handlerdata"))
3706 return;
3707 unwind.force_unwind_entry = 1;
3708
3709 /* Remember which segment we're in so we can switch back after .endp */
3710 unwind.saved_text_seg = now_seg;
3711 unwind.saved_text_subseg = now_subseg;
3712
3713 /* Generate unwind info into unwind-info section and then leave that
3714 section as the currently active one so dataXX directives go into
3715 the language specific data area of the unwind info block. */
3716 generate_unwind_image (now_seg);
3717 demand_empty_rest_of_line ();
3718 }
3719
3720 static void
3721 dot_unwentry (int dummy ATTRIBUTE_UNUSED)
3722 {
3723 if (!in_procedure ("unwentry"))
3724 return;
3725 unwind.force_unwind_entry = 1;
3726 demand_empty_rest_of_line ();
3727 }
3728
3729 static void
3730 dot_altrp (int dummy ATTRIBUTE_UNUSED)
3731 {
3732 expressionS e;
3733 unsigned reg;
3734
3735 if (!in_prologue ("altrp"))
3736 return;
3737
3738 parse_operand_and_eval (&e, 0);
3739 reg = e.X_add_number - REG_BR;
3740 if (e.X_op != O_register || reg > 7)
3741 {
3742 as_bad (_("First operand to .altrp not a valid branch register"));
3743 reg = 0;
3744 }
3745 add_unwind_entry (output_rp_br (reg), 0);
3746 }
3747
3748 static void
3749 dot_savemem (int psprel)
3750 {
3751 expressionS e1, e2;
3752 int sep;
3753 int reg1, val;
3754 const char * const po = psprel ? "savepsp" : "savesp";
3755
3756 if (!in_prologue (po))
3757 return;
3758
3759 sep = parse_operand_and_eval (&e1, ',');
3760 if (sep == ',')
3761 sep = parse_operand_and_eval (&e2, ',');
3762 else
3763 e2.X_op = O_absent;
3764
3765 reg1 = e1.X_add_number;
3766 val = e2.X_add_number;
3767
3768 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3769 if (e1.X_op != O_register)
3770 {
3771 as_bad (_("First operand to .%s not a register"), po);
3772 reg1 = REG_PR; /* Anything valid is good here. */
3773 }
3774 if (e2.X_op != O_constant)
3775 {
3776 as_bad (_("Second operand to .%s not a constant"), po);
3777 val = 0;
3778 }
3779
3780 switch (reg1)
3781 {
3782 case REG_AR + AR_BSP:
3783 add_unwind_entry (output_bsp_when (), sep);
3784 add_unwind_entry ((psprel
3785 ? output_bsp_psprel
3786 : output_bsp_sprel) (val), NOT_A_CHAR);
3787 break;
3788 case REG_AR + AR_BSPSTORE:
3789 add_unwind_entry (output_bspstore_when (), sep);
3790 add_unwind_entry ((psprel
3791 ? output_bspstore_psprel
3792 : output_bspstore_sprel) (val), NOT_A_CHAR);
3793 break;
3794 case REG_AR + AR_RNAT:
3795 add_unwind_entry (output_rnat_when (), sep);
3796 add_unwind_entry ((psprel
3797 ? output_rnat_psprel
3798 : output_rnat_sprel) (val), NOT_A_CHAR);
3799 break;
3800 case REG_AR + AR_UNAT:
3801 add_unwind_entry (output_unat_when (), sep);
3802 add_unwind_entry ((psprel
3803 ? output_unat_psprel
3804 : output_unat_sprel) (val), NOT_A_CHAR);
3805 break;
3806 case REG_AR + AR_FPSR:
3807 add_unwind_entry (output_fpsr_when (), sep);
3808 add_unwind_entry ((psprel
3809 ? output_fpsr_psprel
3810 : output_fpsr_sprel) (val), NOT_A_CHAR);
3811 break;
3812 case REG_AR + AR_PFS:
3813 add_unwind_entry (output_pfs_when (), sep);
3814 add_unwind_entry ((psprel
3815 ? output_pfs_psprel
3816 : output_pfs_sprel) (val), NOT_A_CHAR);
3817 break;
3818 case REG_AR + AR_LC:
3819 add_unwind_entry (output_lc_when (), sep);
3820 add_unwind_entry ((psprel
3821 ? output_lc_psprel
3822 : output_lc_sprel) (val), NOT_A_CHAR);
3823 break;
3824 case REG_BR:
3825 add_unwind_entry (output_rp_when (), sep);
3826 add_unwind_entry ((psprel
3827 ? output_rp_psprel
3828 : output_rp_sprel) (val), NOT_A_CHAR);
3829 break;
3830 case REG_PR:
3831 add_unwind_entry (output_preds_when (), sep);
3832 add_unwind_entry ((psprel
3833 ? output_preds_psprel
3834 : output_preds_sprel) (val), NOT_A_CHAR);
3835 break;
3836 case REG_PRIUNAT:
3837 add_unwind_entry (output_priunat_when_mem (), sep);
3838 add_unwind_entry ((psprel
3839 ? output_priunat_psprel
3840 : output_priunat_sprel) (val), NOT_A_CHAR);
3841 break;
3842 default:
3843 as_bad (_("First operand to .%s not a valid register"), po);
3844 add_unwind_entry (NULL, sep);
3845 break;
3846 }
3847 }
3848
3849 static void
3850 dot_saveg (int dummy ATTRIBUTE_UNUSED)
3851 {
3852 expressionS e;
3853 unsigned grmask;
3854 int sep;
3855
3856 if (!in_prologue ("save.g"))
3857 return;
3858
3859 sep = parse_operand_and_eval (&e, ',');
3860
3861 grmask = e.X_add_number;
3862 if (e.X_op != O_constant
3863 || e.X_add_number <= 0
3864 || e.X_add_number > 0xf)
3865 {
3866 as_bad (_("First operand to .save.g must be a positive 4-bit constant"));
3867 grmask = 0;
3868 }
3869
3870 if (sep == ',')
3871 {
3872 unsigned reg;
3873 int n = popcount (grmask);
3874
3875 parse_operand_and_eval (&e, 0);
3876 reg = e.X_add_number - REG_GR;
3877 if (e.X_op != O_register || reg > 127)
3878 {
3879 as_bad (_("Second operand to .save.g must be a general register"));
3880 reg = 0;
3881 }
3882 else if (reg > 128U - n)
3883 {
3884 as_bad (_("Second operand to .save.g must be the first of %d general registers"), n);
3885 reg = 0;
3886 }
3887 add_unwind_entry (output_gr_gr (grmask, reg), 0);
3888 }
3889 else
3890 add_unwind_entry (output_gr_mem (grmask), 0);
3891 }
3892
3893 static void
3894 dot_savef (int dummy ATTRIBUTE_UNUSED)
3895 {
3896 expressionS e;
3897
3898 if (!in_prologue ("save.f"))
3899 return;
3900
3901 parse_operand_and_eval (&e, 0);
3902
3903 if (e.X_op != O_constant
3904 || e.X_add_number <= 0
3905 || e.X_add_number > 0xfffff)
3906 {
3907 as_bad (_("Operand to .save.f must be a positive 20-bit constant"));
3908 e.X_add_number = 0;
3909 }
3910 add_unwind_entry (output_fr_mem (e.X_add_number), 0);
3911 }
3912
3913 static void
3914 dot_saveb (int dummy ATTRIBUTE_UNUSED)
3915 {
3916 expressionS e;
3917 unsigned brmask;
3918 int sep;
3919
3920 if (!in_prologue ("save.b"))
3921 return;
3922
3923 sep = parse_operand_and_eval (&e, ',');
3924
3925 brmask = e.X_add_number;
3926 if (e.X_op != O_constant
3927 || e.X_add_number <= 0
3928 || e.X_add_number > 0x1f)
3929 {
3930 as_bad (_("First operand to .save.b must be a positive 5-bit constant"));
3931 brmask = 0;
3932 }
3933
3934 if (sep == ',')
3935 {
3936 unsigned reg;
3937 int n = popcount (brmask);
3938
3939 parse_operand_and_eval (&e, 0);
3940 reg = e.X_add_number - REG_GR;
3941 if (e.X_op != O_register || reg > 127)
3942 {
3943 as_bad (_("Second operand to .save.b must be a general register"));
3944 reg = 0;
3945 }
3946 else if (reg > 128U - n)
3947 {
3948 as_bad (_("Second operand to .save.b must be the first of %d general registers"), n);
3949 reg = 0;
3950 }
3951 add_unwind_entry (output_br_gr (brmask, reg), 0);
3952 }
3953 else
3954 add_unwind_entry (output_br_mem (brmask), 0);
3955 }
3956
3957 static void
3958 dot_savegf (int dummy ATTRIBUTE_UNUSED)
3959 {
3960 expressionS e1, e2;
3961
3962 if (!in_prologue ("save.gf"))
3963 return;
3964
3965 if (parse_operand_and_eval (&e1, ',') == ',')
3966 parse_operand_and_eval (&e2, 0);
3967 else
3968 e2.X_op = O_absent;
3969
3970 if (e1.X_op != O_constant
3971 || e1.X_add_number < 0
3972 || e1.X_add_number > 0xf)
3973 {
3974 as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant"));
3975 e1.X_op = O_absent;
3976 e1.X_add_number = 0;
3977 }
3978 if (e2.X_op != O_constant
3979 || e2.X_add_number < 0
3980 || e2.X_add_number > 0xfffff)
3981 {
3982 as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant"));
3983 e2.X_op = O_absent;
3984 e2.X_add_number = 0;
3985 }
3986 if (e1.X_op == O_constant
3987 && e2.X_op == O_constant
3988 && e1.X_add_number == 0
3989 && e2.X_add_number == 0)
3990 as_bad (_("Operands to .save.gf may not be both zero"));
3991
3992 add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0);
3993 }
3994
3995 static void
3996 dot_spill (int dummy ATTRIBUTE_UNUSED)
3997 {
3998 expressionS e;
3999
4000 if (!in_prologue ("spill"))
4001 return;
4002
4003 parse_operand_and_eval (&e, 0);
4004
4005 if (e.X_op != O_constant)
4006 {
4007 as_bad (_("Operand to .spill must be a constant"));
4008 e.X_add_number = 0;
4009 }
4010 add_unwind_entry (output_spill_base (e.X_add_number), 0);
4011 }
4012
4013 static void
4014 dot_spillreg (int pred)
4015 {
4016 int sep;
4017 unsigned int qp, ab, xy, reg, treg;
4018 expressionS e;
4019 const char * const po = pred ? "spillreg.p" : "spillreg";
4020
4021 if (!in_procedure (po))
4022 return;
4023
4024 if (pred)
4025 sep = parse_predicate_and_operand (&e, &qp, po);
4026 else
4027 {
4028 sep = parse_operand_and_eval (&e, ',');
4029 qp = 0;
4030 }
4031 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4032
4033 if (sep == ',')
4034 sep = parse_operand_and_eval (&e, ',');
4035 else
4036 e.X_op = O_absent;
4037 convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred);
4038
4039 add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep);
4040 }
4041
4042 static void
4043 dot_spillmem (int psprel)
4044 {
4045 expressionS e;
4046 int pred = (psprel < 0), sep;
4047 unsigned int qp, ab, reg;
4048 const char * po;
4049
4050 if (pred)
4051 {
4052 psprel = ~psprel;
4053 po = psprel ? "spillpsp.p" : "spillsp.p";
4054 }
4055 else
4056 po = psprel ? "spillpsp" : "spillsp";
4057
4058 if (!in_procedure (po))
4059 return;
4060
4061 if (pred)
4062 sep = parse_predicate_and_operand (&e, &qp, po);
4063 else
4064 {
4065 sep = parse_operand_and_eval (&e, ',');
4066 qp = 0;
4067 }
4068 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4069
4070 if (sep == ',')
4071 sep = parse_operand_and_eval (&e, ',');
4072 else
4073 e.X_op = O_absent;
4074 if (e.X_op != O_constant)
4075 {
4076 as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po);
4077 e.X_add_number = 0;
4078 }
4079
4080 if (psprel)
4081 add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep);
4082 else
4083 add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep);
4084 }
4085
4086 static unsigned int
4087 get_saved_prologue_count (unsigned long lbl)
4088 {
4089 label_prologue_count *lpc = unwind.saved_prologue_counts;
4090
4091 while (lpc != NULL && lpc->label_number != lbl)
4092 lpc = lpc->next;
4093
4094 if (lpc != NULL)
4095 return lpc->prologue_count;
4096
4097 as_bad (_("Missing .label_state %ld"), lbl);
4098 return 1;
4099 }
4100
4101 static void
4102 save_prologue_count (unsigned long lbl, unsigned int count)
4103 {
4104 label_prologue_count *lpc = unwind.saved_prologue_counts;
4105
4106 while (lpc != NULL && lpc->label_number != lbl)
4107 lpc = lpc->next;
4108
4109 if (lpc != NULL)
4110 lpc->prologue_count = count;
4111 else
4112 {
4113 label_prologue_count *new_lpc = xmalloc (sizeof (* new_lpc));
4114
4115 new_lpc->next = unwind.saved_prologue_counts;
4116 new_lpc->label_number = lbl;
4117 new_lpc->prologue_count = count;
4118 unwind.saved_prologue_counts = new_lpc;
4119 }
4120 }
4121
4122 static void
4123 free_saved_prologue_counts ()
4124 {
4125 label_prologue_count *lpc = unwind.saved_prologue_counts;
4126 label_prologue_count *next;
4127
4128 while (lpc != NULL)
4129 {
4130 next = lpc->next;
4131 free (lpc);
4132 lpc = next;
4133 }
4134
4135 unwind.saved_prologue_counts = NULL;
4136 }
4137
4138 static void
4139 dot_label_state (int dummy ATTRIBUTE_UNUSED)
4140 {
4141 expressionS e;
4142
4143 if (!in_body ("label_state"))
4144 return;
4145
4146 parse_operand_and_eval (&e, 0);
4147 if (e.X_op == O_constant)
4148 save_prologue_count (e.X_add_number, unwind.prologue_count);
4149 else
4150 {
4151 as_bad (_("Operand to .label_state must be a constant"));
4152 e.X_add_number = 0;
4153 }
4154 add_unwind_entry (output_label_state (e.X_add_number), 0);
4155 }
4156
4157 static void
4158 dot_copy_state (int dummy ATTRIBUTE_UNUSED)
4159 {
4160 expressionS e;
4161
4162 if (!in_body ("copy_state"))
4163 return;
4164
4165 parse_operand_and_eval (&e, 0);
4166 if (e.X_op == O_constant)
4167 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4168 else
4169 {
4170 as_bad (_("Operand to .copy_state must be a constant"));
4171 e.X_add_number = 0;
4172 }
4173 add_unwind_entry (output_copy_state (e.X_add_number), 0);
4174 }
4175
4176 static void
4177 dot_unwabi (int dummy ATTRIBUTE_UNUSED)
4178 {
4179 expressionS e1, e2;
4180 unsigned char sep;
4181
4182 if (!in_prologue ("unwabi"))
4183 return;
4184
4185 sep = parse_operand_and_eval (&e1, ',');
4186 if (sep == ',')
4187 parse_operand_and_eval (&e2, 0);
4188 else
4189 e2.X_op = O_absent;
4190
4191 if (e1.X_op != O_constant)
4192 {
4193 as_bad (_("First operand to .unwabi must be a constant"));
4194 e1.X_add_number = 0;
4195 }
4196
4197 if (e2.X_op != O_constant)
4198 {
4199 as_bad (_("Second operand to .unwabi must be a constant"));
4200 e2.X_add_number = 0;
4201 }
4202
4203 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0);
4204 }
4205
4206 static void
4207 dot_personality (int dummy ATTRIBUTE_UNUSED)
4208 {
4209 char *name, *p, c;
4210
4211 if (!in_procedure ("personality"))
4212 return;
4213 SKIP_WHITESPACE ();
4214 c = get_symbol_name (&name);
4215 p = input_line_pointer;
4216 unwind.personality_routine = symbol_find_or_make (name);
4217 unwind.force_unwind_entry = 1;
4218 *p = c;
4219 SKIP_WHITESPACE_AFTER_NAME ();
4220 demand_empty_rest_of_line ();
4221 }
4222
4223 static void
4224 dot_proc (int dummy ATTRIBUTE_UNUSED)
4225 {
4226 char *name, *p, c;
4227 symbolS *sym;
4228 proc_pending *pending, *last_pending;
4229
4230 if (unwind.proc_pending.sym)
4231 {
4232 (md.unwind_check == unwind_check_warning
4233 ? as_warn
4234 : as_bad) (_("Missing .endp after previous .proc"));
4235 while (unwind.proc_pending.next)
4236 {
4237 pending = unwind.proc_pending.next;
4238 unwind.proc_pending.next = pending->next;
4239 free (pending);
4240 }
4241 }
4242 last_pending = NULL;
4243
4244 /* Parse names of main and alternate entry points and mark them as
4245 function symbols: */
4246 while (1)
4247 {
4248 SKIP_WHITESPACE ();
4249 c = get_symbol_name (&name);
4250 p = input_line_pointer;
4251 if (!*name)
4252 as_bad (_("Empty argument of .proc"));
4253 else
4254 {
4255 sym = symbol_find_or_make (name);
4256 if (S_IS_DEFINED (sym))
4257 as_bad (_("`%s' was already defined"), name);
4258 else if (!last_pending)
4259 {
4260 unwind.proc_pending.sym = sym;
4261 last_pending = &unwind.proc_pending;
4262 }
4263 else
4264 {
4265 pending = xmalloc (sizeof (*pending));
4266 pending->sym = sym;
4267 last_pending = last_pending->next = pending;
4268 }
4269 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4270 }
4271 *p = c;
4272 SKIP_WHITESPACE_AFTER_NAME ();
4273 if (*input_line_pointer != ',')
4274 break;
4275 ++input_line_pointer;
4276 }
4277 if (!last_pending)
4278 {
4279 unwind.proc_pending.sym = expr_build_dot ();
4280 last_pending = &unwind.proc_pending;
4281 }
4282 last_pending->next = NULL;
4283 demand_empty_rest_of_line ();
4284 do_align (4, NULL, 0, 0);
4285
4286 unwind.prologue = 0;
4287 unwind.prologue_count = 0;
4288 unwind.body = 0;
4289 unwind.insn = 0;
4290 unwind.list = unwind.tail = unwind.current_entry = NULL;
4291 unwind.personality_routine = 0;
4292 }
4293
4294 static void
4295 dot_body (int dummy ATTRIBUTE_UNUSED)
4296 {
4297 if (!in_procedure ("body"))
4298 return;
4299 if (!unwind.prologue && !unwind.body && unwind.insn)
4300 as_warn (_("Initial .body should precede any instructions"));
4301 check_pending_save ();
4302
4303 unwind.prologue = 0;
4304 unwind.prologue_mask = 0;
4305 unwind.body = 1;
4306
4307 add_unwind_entry (output_body (), 0);
4308 }
4309
4310 static void
4311 dot_prologue (int dummy ATTRIBUTE_UNUSED)
4312 {
4313 unsigned mask = 0, grsave = 0;
4314
4315 if (!in_procedure ("prologue"))
4316 return;
4317 if (unwind.prologue)
4318 {
4319 as_bad (_(".prologue within prologue"));
4320 ignore_rest_of_line ();
4321 return;
4322 }
4323 if (!unwind.body && unwind.insn)
4324 as_warn (_("Initial .prologue should precede any instructions"));
4325
4326 if (!is_it_end_of_statement ())
4327 {
4328 expressionS e;
4329 int n, sep = parse_operand_and_eval (&e, ',');
4330
4331 if (e.X_op != O_constant
4332 || e.X_add_number < 0
4333 || e.X_add_number > 0xf)
4334 as_bad (_("First operand to .prologue must be a positive 4-bit constant"));
4335 else if (e.X_add_number == 0)
4336 as_warn (_("Pointless use of zero first operand to .prologue"));
4337 else
4338 mask = e.X_add_number;
4339
4340 n = popcount (mask);
4341
4342 if (sep == ',')
4343 parse_operand_and_eval (&e, 0);
4344 else
4345 e.X_op = O_absent;
4346
4347 if (e.X_op == O_constant
4348 && e.X_add_number >= 0
4349 && e.X_add_number < 128)
4350 {
4351 if (md.unwind_check == unwind_check_error)
4352 as_warn (_("Using a constant as second operand to .prologue is deprecated"));
4353 grsave = e.X_add_number;
4354 }
4355 else if (e.X_op != O_register
4356 || (grsave = e.X_add_number - REG_GR) > 127)
4357 {
4358 as_bad (_("Second operand to .prologue must be a general register"));
4359 grsave = 0;
4360 }
4361 else if (grsave > 128U - n)
4362 {
4363 as_bad (_("Second operand to .prologue must be the first of %d general registers"), n);
4364 grsave = 0;
4365 }
4366 }
4367
4368 if (mask)
4369 add_unwind_entry (output_prologue_gr (mask, grsave), 0);
4370 else
4371 add_unwind_entry (output_prologue (), 0);
4372
4373 unwind.prologue = 1;
4374 unwind.prologue_mask = mask;
4375 unwind.prologue_gr = grsave;
4376 unwind.body = 0;
4377 ++unwind.prologue_count;
4378 }
4379
4380 static void
4381 dot_endp (int dummy ATTRIBUTE_UNUSED)
4382 {
4383 expressionS e;
4384 int bytes_per_address;
4385 long where;
4386 segT saved_seg;
4387 subsegT saved_subseg;
4388 proc_pending *pending;
4389 int unwind_check = md.unwind_check;
4390
4391 md.unwind_check = unwind_check_error;
4392 if (!in_procedure ("endp"))
4393 return;
4394 md.unwind_check = unwind_check;
4395
4396 if (unwind.saved_text_seg)
4397 {
4398 saved_seg = unwind.saved_text_seg;
4399 saved_subseg = unwind.saved_text_subseg;
4400 unwind.saved_text_seg = NULL;
4401 }
4402 else
4403 {
4404 saved_seg = now_seg;
4405 saved_subseg = now_subseg;
4406 }
4407
4408 insn_group_break (1, 0, 0);
4409
4410 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4411 if (!unwind.info)
4412 generate_unwind_image (saved_seg);
4413
4414 if (unwind.info || unwind.force_unwind_entry)
4415 {
4416 symbolS *proc_end;
4417
4418 subseg_set (md.last_text_seg, 0);
4419 proc_end = expr_build_dot ();
4420
4421 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND);
4422
4423 /* Make sure that section has 4 byte alignment for ILP32 and
4424 8 byte alignment for LP64. */
4425 record_alignment (now_seg, md.pointer_size_shift);
4426
4427 /* Need space for 3 pointers for procedure start, procedure end,
4428 and unwind info. */
4429 memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size);
4430 where = frag_now_fix () - (3 * md.pointer_size);
4431 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4432
4433 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4434 e.X_op = O_pseudo_fixup;
4435 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4436 e.X_add_number = 0;
4437 if (!S_IS_LOCAL (unwind.proc_pending.sym)
4438 && S_IS_DEFINED (unwind.proc_pending.sym))
4439 e.X_add_symbol = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym),
4440 S_GET_VALUE (unwind.proc_pending.sym),
4441 symbol_get_frag (unwind.proc_pending.sym));
4442 else
4443 e.X_add_symbol = unwind.proc_pending.sym;
4444 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e,
4445 BFD_RELOC_NONE);
4446
4447 e.X_op = O_pseudo_fixup;
4448 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4449 e.X_add_number = 0;
4450 e.X_add_symbol = proc_end;
4451 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4452 bytes_per_address, &e, BFD_RELOC_NONE);
4453
4454 if (unwind.info)
4455 {
4456 e.X_op = O_pseudo_fixup;
4457 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4458 e.X_add_number = 0;
4459 e.X_add_symbol = unwind.info;
4460 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4461 bytes_per_address, &e, BFD_RELOC_NONE);
4462 }
4463 }
4464 subseg_set (saved_seg, saved_subseg);
4465
4466 /* Set symbol sizes. */
4467 pending = &unwind.proc_pending;
4468 if (S_GET_NAME (pending->sym))
4469 {
4470 do
4471 {
4472 symbolS *sym = pending->sym;
4473
4474 if (!S_IS_DEFINED (sym))
4475 as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym));
4476 else if (S_GET_SIZE (sym) == 0
4477 && symbol_get_obj (sym)->size == NULL)
4478 {
4479 fragS *frag = symbol_get_frag (sym);
4480
4481 if (frag)
4482 {
4483 if (frag == frag_now && SEG_NORMAL (now_seg))
4484 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4485 else
4486 {
4487 symbol_get_obj (sym)->size =
4488 (expressionS *) xmalloc (sizeof (expressionS));
4489 symbol_get_obj (sym)->size->X_op = O_subtract;
4490 symbol_get_obj (sym)->size->X_add_symbol
4491 = symbol_new (FAKE_LABEL_NAME, now_seg,
4492 frag_now_fix (), frag_now);
4493 symbol_get_obj (sym)->size->X_op_symbol = sym;
4494 symbol_get_obj (sym)->size->X_add_number = 0;
4495 }
4496 }
4497 }
4498 } while ((pending = pending->next) != NULL);
4499 }
4500
4501 /* Parse names of main and alternate entry points. */
4502 while (1)
4503 {
4504 char *name, *p, c;
4505
4506 SKIP_WHITESPACE ();
4507 c = get_symbol_name (&name);
4508 p = input_line_pointer;
4509 if (!*name)
4510 (md.unwind_check == unwind_check_warning
4511 ? as_warn
4512 : as_bad) (_("Empty argument of .endp"));
4513 else
4514 {
4515 symbolS *sym = symbol_find (name);
4516
4517 for (pending = &unwind.proc_pending; pending; pending = pending->next)
4518 {
4519 if (sym == pending->sym)
4520 {
4521 pending->sym = NULL;
4522 break;
4523 }
4524 }
4525 if (!sym || !pending)
4526 as_warn (_("`%s' was not specified with previous .proc"), name);
4527 }
4528 *p = c;
4529 SKIP_WHITESPACE_AFTER_NAME ();
4530 if (*input_line_pointer != ',')
4531 break;
4532 ++input_line_pointer;
4533 }
4534 demand_empty_rest_of_line ();
4535
4536 /* Deliberately only checking for the main entry point here; the
4537 language spec even says all arguments to .endp are ignored. */
4538 if (unwind.proc_pending.sym
4539 && S_GET_NAME (unwind.proc_pending.sym)
4540 && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME))
4541 as_warn (_("`%s' should be an operand to this .endp"),
4542 S_GET_NAME (unwind.proc_pending.sym));
4543 while (unwind.proc_pending.next)
4544 {
4545 pending = unwind.proc_pending.next;
4546 unwind.proc_pending.next = pending->next;
4547 free (pending);
4548 }
4549 unwind.proc_pending.sym = unwind.info = NULL;
4550 }
4551
4552 static void
4553 dot_template (int template_val)
4554 {
4555 CURR_SLOT.user_template = template_val;
4556 }
4557
4558 static void
4559 dot_regstk (int dummy ATTRIBUTE_UNUSED)
4560 {
4561 int ins, locs, outs, rots;
4562
4563 if (is_it_end_of_statement ())
4564 ins = locs = outs = rots = 0;
4565 else
4566 {
4567 ins = get_absolute_expression ();
4568 if (*input_line_pointer++ != ',')
4569 goto err;
4570 locs = get_absolute_expression ();
4571 if (*input_line_pointer++ != ',')
4572 goto err;
4573 outs = get_absolute_expression ();
4574 if (*input_line_pointer++ != ',')
4575 goto err;
4576 rots = get_absolute_expression ();
4577 }
4578 set_regstack (ins, locs, outs, rots);
4579 return;
4580
4581 err:
4582 as_bad (_("Comma expected"));
4583 ignore_rest_of_line ();
4584 }
4585
4586 static void
4587 dot_rot (int type)
4588 {
4589 offsetT num_regs;
4590 valueT num_alloced = 0;
4591 struct dynreg **drpp, *dr;
4592 int ch, base_reg = 0;
4593 char *name, *start;
4594 size_t len;
4595
4596 switch (type)
4597 {
4598 case DYNREG_GR: base_reg = REG_GR + 32; break;
4599 case DYNREG_FR: base_reg = REG_FR + 32; break;
4600 case DYNREG_PR: base_reg = REG_P + 16; break;
4601 default: break;
4602 }
4603
4604 /* First, remove existing names from hash table. */
4605 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4606 {
4607 hash_delete (md.dynreg_hash, dr->name, FALSE);
4608 /* FIXME: Free dr->name. */
4609 dr->num_regs = 0;
4610 }
4611
4612 drpp = &md.dynreg[type];
4613 while (1)
4614 {
4615 ch = get_symbol_name (&start);
4616 len = strlen (ia64_canonicalize_symbol_name (start));
4617 *input_line_pointer = ch;
4618
4619 SKIP_WHITESPACE_AFTER_NAME ();
4620 if (*input_line_pointer != '[')
4621 {
4622 as_bad (_("Expected '['"));
4623 goto err;
4624 }
4625 ++input_line_pointer; /* skip '[' */
4626
4627 num_regs = get_absolute_expression ();
4628
4629 if (*input_line_pointer++ != ']')
4630 {
4631 as_bad (_("Expected ']'"));
4632 goto err;
4633 }
4634 if (num_regs <= 0)
4635 {
4636 as_bad (_("Number of elements must be positive"));
4637 goto err;
4638 }
4639 SKIP_WHITESPACE ();
4640
4641 num_alloced += num_regs;
4642 switch (type)
4643 {
4644 case DYNREG_GR:
4645 if (num_alloced > md.rot.num_regs)
4646 {
4647 as_bad (_("Used more than the declared %d rotating registers"),
4648 md.rot.num_regs);
4649 goto err;
4650 }
4651 break;
4652 case DYNREG_FR:
4653 if (num_alloced > 96)
4654 {
4655 as_bad (_("Used more than the available 96 rotating registers"));
4656 goto err;
4657 }
4658 break;
4659 case DYNREG_PR:
4660 if (num_alloced > 48)
4661 {
4662 as_bad (_("Used more than the available 48 rotating registers"));
4663 goto err;
4664 }
4665 break;
4666
4667 default:
4668 break;
4669 }
4670
4671 if (!*drpp)
4672 {
4673 *drpp = obstack_alloc (&notes, sizeof (*dr));
4674 memset (*drpp, 0, sizeof (*dr));
4675 }
4676
4677 name = obstack_alloc (&notes, len + 1);
4678 memcpy (name, start, len);
4679 name[len] = '\0';
4680
4681 dr = *drpp;
4682 dr->name = name;
4683 dr->num_regs = num_regs;
4684 dr->base = base_reg;
4685 drpp = &dr->next;
4686 base_reg += num_regs;
4687
4688 if (hash_insert (md.dynreg_hash, name, dr))
4689 {
4690 as_bad (_("Attempt to redefine register set `%s'"), name);
4691 obstack_free (&notes, name);
4692 goto err;
4693 }
4694
4695 if (*input_line_pointer != ',')
4696 break;
4697 ++input_line_pointer; /* skip comma */
4698 SKIP_WHITESPACE ();
4699 }
4700 demand_empty_rest_of_line ();
4701 return;
4702
4703 err:
4704 ignore_rest_of_line ();
4705 }
4706
4707 static void
4708 dot_byteorder (int byteorder)
4709 {
4710 segment_info_type *seginfo = seg_info (now_seg);
4711
4712 if (byteorder == -1)
4713 {
4714 if (seginfo->tc_segment_info_data.endian == 0)
4715 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4716 byteorder = seginfo->tc_segment_info_data.endian == 1;
4717 }
4718 else
4719 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4720
4721 if (target_big_endian != byteorder)
4722 {
4723 target_big_endian = byteorder;
4724 if (target_big_endian)
4725 {
4726 ia64_number_to_chars = number_to_chars_bigendian;
4727 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4728 }
4729 else
4730 {
4731 ia64_number_to_chars = number_to_chars_littleendian;
4732 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4733 }
4734 }
4735 }
4736
4737 static void
4738 dot_psr (int dummy ATTRIBUTE_UNUSED)
4739 {
4740 char *option;
4741 int ch;
4742
4743 while (1)
4744 {
4745 ch = get_symbol_name (&option);
4746 if (strcmp (option, "lsb") == 0)
4747 md.flags &= ~EF_IA_64_BE;
4748 else if (strcmp (option, "msb") == 0)
4749 md.flags |= EF_IA_64_BE;
4750 else if (strcmp (option, "abi32") == 0)
4751 md.flags &= ~EF_IA_64_ABI64;
4752 else if (strcmp (option, "abi64") == 0)
4753 md.flags |= EF_IA_64_ABI64;
4754 else
4755 as_bad (_("Unknown psr option `%s'"), option);
4756 *input_line_pointer = ch;
4757
4758 SKIP_WHITESPACE_AFTER_NAME ();
4759 if (*input_line_pointer != ',')
4760 break;
4761
4762 ++input_line_pointer;
4763 SKIP_WHITESPACE ();
4764 }
4765 demand_empty_rest_of_line ();
4766 }
4767
4768 static void
4769 dot_ln (int dummy ATTRIBUTE_UNUSED)
4770 {
4771 new_logical_line (0, get_absolute_expression ());
4772 demand_empty_rest_of_line ();
4773 }
4774
4775 static void
4776 cross_section (int ref, void (*builder) (int), int ua)
4777 {
4778 char *start, *end;
4779 int saved_auto_align;
4780 unsigned int section_count;
4781 char *name;
4782 char c;
4783
4784 SKIP_WHITESPACE ();
4785 start = input_line_pointer;
4786 c = get_symbol_name (&name);
4787 if (input_line_pointer == start)
4788 {
4789 as_bad (_("Missing section name"));
4790 ignore_rest_of_line ();
4791 return;
4792 }
4793 * input_line_pointer = c;
4794 SKIP_WHITESPACE_AFTER_NAME ();
4795 end = input_line_pointer;
4796 if (*input_line_pointer != ',')
4797 {
4798 as_bad (_("Comma expected after section name"));
4799 ignore_rest_of_line ();
4800 return;
4801 }
4802 *end = '\0';
4803 end = input_line_pointer + 1; /* skip comma */
4804 input_line_pointer = start;
4805 md.keep_pending_output = 1;
4806 section_count = bfd_count_sections (stdoutput);
4807 obj_elf_section (0);
4808 if (section_count != bfd_count_sections (stdoutput))
4809 as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated."));
4810 input_line_pointer = end;
4811 saved_auto_align = md.auto_align;
4812 if (ua)
4813 md.auto_align = 0;
4814 (*builder) (ref);
4815 if (ua)
4816 md.auto_align = saved_auto_align;
4817 obj_elf_previous (0);
4818 md.keep_pending_output = 0;
4819 }
4820
4821 static void
4822 dot_xdata (int size)
4823 {
4824 cross_section (size, cons, 0);
4825 }
4826
4827 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4828
4829 static void
4830 stmt_float_cons (int kind)
4831 {
4832 size_t alignment;
4833
4834 switch (kind)
4835 {
4836 case 'd':
4837 alignment = 3;
4838 break;
4839
4840 case 'x':
4841 case 'X':
4842 alignment = 4;
4843 break;
4844
4845 case 'f':
4846 default:
4847 alignment = 2;
4848 break;
4849 }
4850 do_align (alignment, NULL, 0, 0);
4851 float_cons (kind);
4852 }
4853
4854 static void
4855 stmt_cons_ua (int size)
4856 {
4857 int saved_auto_align = md.auto_align;
4858
4859 md.auto_align = 0;
4860 cons (size);
4861 md.auto_align = saved_auto_align;
4862 }
4863
4864 static void
4865 dot_xfloat_cons (int kind)
4866 {
4867 cross_section (kind, stmt_float_cons, 0);
4868 }
4869
4870 static void
4871 dot_xstringer (int zero)
4872 {
4873 cross_section (zero, stringer, 0);
4874 }
4875
4876 static void
4877 dot_xdata_ua (int size)
4878 {
4879 cross_section (size, cons, 1);
4880 }
4881
4882 static void
4883 dot_xfloat_cons_ua (int kind)
4884 {
4885 cross_section (kind, float_cons, 1);
4886 }
4887
4888 /* .reg.val <regname>,value */
4889
4890 static void
4891 dot_reg_val (int dummy ATTRIBUTE_UNUSED)
4892 {
4893 expressionS reg;
4894
4895 expression_and_evaluate (&reg);
4896 if (reg.X_op != O_register)
4897 {
4898 as_bad (_("Register name expected"));
4899 ignore_rest_of_line ();
4900 }
4901 else if (*input_line_pointer++ != ',')
4902 {
4903 as_bad (_("Comma expected"));
4904 ignore_rest_of_line ();
4905 }
4906 else
4907 {
4908 valueT value = get_absolute_expression ();
4909 int regno = reg.X_add_number;
4910 if (regno <= REG_GR || regno > REG_GR + 127)
4911 as_warn (_("Register value annotation ignored"));
4912 else
4913 {
4914 gr_values[regno - REG_GR].known = 1;
4915 gr_values[regno - REG_GR].value = value;
4916 gr_values[regno - REG_GR].path = md.path;
4917 }
4918 }
4919 demand_empty_rest_of_line ();
4920 }
4921
4922 /*
4923 .serialize.data
4924 .serialize.instruction
4925 */
4926 static void
4927 dot_serialize (int type)
4928 {
4929 insn_group_break (0, 0, 0);
4930 if (type)
4931 instruction_serialization ();
4932 else
4933 data_serialization ();
4934 insn_group_break (0, 0, 0);
4935 demand_empty_rest_of_line ();
4936 }
4937
4938 /* select dv checking mode
4939 .auto
4940 .explicit
4941 .default
4942
4943 A stop is inserted when changing modes
4944 */
4945
4946 static void
4947 dot_dv_mode (int type)
4948 {
4949 if (md.manual_bundling)
4950 as_warn (_("Directive invalid within a bundle"));
4951
4952 if (type == 'E' || type == 'A')
4953 md.mode_explicitly_set = 0;
4954 else
4955 md.mode_explicitly_set = 1;
4956
4957 md.detect_dv = 1;
4958 switch (type)
4959 {
4960 case 'A':
4961 case 'a':
4962 if (md.explicit_mode)
4963 insn_group_break (1, 0, 0);
4964 md.explicit_mode = 0;
4965 break;
4966 case 'E':
4967 case 'e':
4968 if (!md.explicit_mode)
4969 insn_group_break (1, 0, 0);
4970 md.explicit_mode = 1;
4971 break;
4972 default:
4973 case 'd':
4974 if (md.explicit_mode != md.default_explicit_mode)
4975 insn_group_break (1, 0, 0);
4976 md.explicit_mode = md.default_explicit_mode;
4977 md.mode_explicitly_set = 0;
4978 break;
4979 }
4980 }
4981
4982 static void
4983 print_prmask (valueT mask)
4984 {
4985 int regno;
4986 const char *comma = "";
4987 for (regno = 0; regno < 64; regno++)
4988 {
4989 if (mask & ((valueT) 1 << regno))
4990 {
4991 fprintf (stderr, "%s p%d", comma, regno);
4992 comma = ",";
4993 }
4994 }
4995 }
4996
4997 /*
4998 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear)
4999 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply)
5000 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex)
5001 .pred.safe_across_calls p1 [, p2 [,...]]
5002 */
5003
5004 static void
5005 dot_pred_rel (int type)
5006 {
5007 valueT mask = 0;
5008 int count = 0;
5009 int p1 = -1, p2 = -1;
5010
5011 if (type == 0)
5012 {
5013 if (*input_line_pointer == '"')
5014 {
5015 int len;
5016 char *form = demand_copy_C_string (&len);
5017
5018 if (strcmp (form, "mutex") == 0)
5019 type = 'm';
5020 else if (strcmp (form, "clear") == 0)
5021 type = 'c';
5022 else if (strcmp (form, "imply") == 0)
5023 type = 'i';
5024 obstack_free (&notes, form);
5025 }
5026 else if (*input_line_pointer == '@')
5027 {
5028 char *form;
5029 char c;
5030
5031 ++input_line_pointer;
5032 c = get_symbol_name (&form);
5033
5034 if (strcmp (form, "mutex") == 0)
5035 type = 'm';
5036 else if (strcmp (form, "clear") == 0)
5037 type = 'c';
5038 else if (strcmp (form, "imply") == 0)
5039 type = 'i';
5040 (void) restore_line_pointer (c);
5041 }
5042 else
5043 {
5044 as_bad (_("Missing predicate relation type"));
5045 ignore_rest_of_line ();
5046 return;
5047 }
5048 if (type == 0)
5049 {
5050 as_bad (_("Unrecognized predicate relation type"));
5051 ignore_rest_of_line ();
5052 return;
5053 }
5054 if (*input_line_pointer == ',')
5055 ++input_line_pointer;
5056 SKIP_WHITESPACE ();
5057 }
5058
5059 while (1)
5060 {
5061 valueT bits = 1;
5062 int sep, regno;
5063 expressionS pr, *pr1, *pr2;
5064
5065 sep = parse_operand_and_eval (&pr, ',');
5066 if (pr.X_op == O_register
5067 && pr.X_add_number >= REG_P
5068 && pr.X_add_number <= REG_P + 63)
5069 {
5070 regno = pr.X_add_number - REG_P;
5071 bits <<= regno;
5072 count++;
5073 if (p1 == -1)
5074 p1 = regno;
5075 else if (p2 == -1)
5076 p2 = regno;
5077 }
5078 else if (type != 'i'
5079 && pr.X_op == O_subtract
5080 && (pr1 = symbol_get_value_expression (pr.X_add_symbol))
5081 && pr1->X_op == O_register
5082 && pr1->X_add_number >= REG_P
5083 && pr1->X_add_number <= REG_P + 63
5084 && (pr2 = symbol_get_value_expression (pr.X_op_symbol))
5085 && pr2->X_op == O_register
5086 && pr2->X_add_number >= REG_P
5087 && pr2->X_add_number <= REG_P + 63)
5088 {
5089 /* It's a range. */
5090 int stop;
5091
5092 regno = pr1->X_add_number - REG_P;
5093 stop = pr2->X_add_number - REG_P;
5094 if (regno >= stop)
5095 {
5096 as_bad (_("Bad register range"));
5097 ignore_rest_of_line ();
5098 return;
5099 }
5100 bits = ((bits << stop) << 1) - (bits << regno);
5101 count += stop - regno + 1;
5102 }
5103 else
5104 {
5105 as_bad (_("Predicate register expected"));
5106 ignore_rest_of_line ();
5107 return;
5108 }
5109 if (mask & bits)
5110 as_warn (_("Duplicate predicate register ignored"));
5111 mask |= bits;
5112 if (sep != ',')
5113 break;
5114 }
5115
5116 switch (type)
5117 {
5118 case 'c':
5119 if (count == 0)
5120 mask = ~(valueT) 0;
5121 clear_qp_mutex (mask);
5122 clear_qp_implies (mask, (valueT) 0);
5123 break;
5124 case 'i':
5125 if (count != 2 || p1 == -1 || p2 == -1)
5126 as_bad (_("Predicate source and target required"));
5127 else if (p1 == 0 || p2 == 0)
5128 as_bad (_("Use of p0 is not valid in this context"));
5129 else
5130 add_qp_imply (p1, p2);
5131 break;
5132 case 'm':
5133 if (count < 2)
5134 {
5135 as_bad (_("At least two PR arguments expected"));
5136 break;
5137 }
5138 else if (mask & 1)
5139 {
5140 as_bad (_("Use of p0 is not valid in this context"));
5141 break;
5142 }
5143 add_qp_mutex (mask);
5144 break;
5145 case 's':
5146 /* note that we don't override any existing relations */
5147 if (count == 0)
5148 {
5149 as_bad (_("At least one PR argument expected"));
5150 break;
5151 }
5152 if (md.debug_dv)
5153 {
5154 fprintf (stderr, "Safe across calls: ");
5155 print_prmask (mask);
5156 fprintf (stderr, "\n");
5157 }
5158 qp_safe_across_calls = mask;
5159 break;
5160 }
5161 demand_empty_rest_of_line ();
5162 }
5163
5164 /* .entry label [, label [, ...]]
5165 Hint to DV code that the given labels are to be considered entry points.
5166 Otherwise, only global labels are considered entry points. */
5167
5168 static void
5169 dot_entry (int dummy ATTRIBUTE_UNUSED)
5170 {
5171 const char *err;
5172 char *name;
5173 int c;
5174 symbolS *symbolP;
5175
5176 do
5177 {
5178 c = get_symbol_name (&name);
5179 symbolP = symbol_find_or_make (name);
5180
5181 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (void *) symbolP);
5182 if (err)
5183 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
5184 name, err);
5185
5186 *input_line_pointer = c;
5187 SKIP_WHITESPACE_AFTER_NAME ();
5188 c = *input_line_pointer;
5189 if (c == ',')
5190 {
5191 input_line_pointer++;
5192 SKIP_WHITESPACE ();
5193 if (*input_line_pointer == '\n')
5194 c = '\n';
5195 }
5196 }
5197 while (c == ',');
5198
5199 demand_empty_rest_of_line ();
5200 }
5201
5202 /* .mem.offset offset, base
5203 "base" is used to distinguish between offsets from a different base. */
5204
5205 static void
5206 dot_mem_offset (int dummy ATTRIBUTE_UNUSED)
5207 {
5208 md.mem_offset.hint = 1;
5209 md.mem_offset.offset = get_absolute_expression ();
5210 if (*input_line_pointer != ',')
5211 {
5212 as_bad (_("Comma expected"));
5213 ignore_rest_of_line ();
5214 return;
5215 }
5216 ++input_line_pointer;
5217 md.mem_offset.base = get_absolute_expression ();
5218 demand_empty_rest_of_line ();
5219 }
5220
5221 /* ia64-specific pseudo-ops: */
5222 const pseudo_typeS md_pseudo_table[] =
5223 {
5224 { "radix", dot_radix, 0 },
5225 { "lcomm", s_lcomm_bytes, 1 },
5226 { "loc", dot_loc, 0 },
5227 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
5228 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
5229 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
5230 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5231 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5232 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5233 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5234 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5235 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5236 { "proc", dot_proc, 0 },
5237 { "body", dot_body, 0 },
5238 { "prologue", dot_prologue, 0 },
5239 { "endp", dot_endp, 0 },
5240
5241 { "fframe", dot_fframe, 0 },
5242 { "vframe", dot_vframe, 0 },
5243 { "vframesp", dot_vframesp, 0 },
5244 { "vframepsp", dot_vframesp, 1 },
5245 { "save", dot_save, 0 },
5246 { "restore", dot_restore, 0 },
5247 { "restorereg", dot_restorereg, 0 },
5248 { "restorereg.p", dot_restorereg, 1 },
5249 { "handlerdata", dot_handlerdata, 0 },
5250 { "unwentry", dot_unwentry, 0 },
5251 { "altrp", dot_altrp, 0 },
5252 { "savesp", dot_savemem, 0 },
5253 { "savepsp", dot_savemem, 1 },
5254 { "save.g", dot_saveg, 0 },
5255 { "save.f", dot_savef, 0 },
5256 { "save.b", dot_saveb, 0 },
5257 { "save.gf", dot_savegf, 0 },
5258 { "spill", dot_spill, 0 },
5259 { "spillreg", dot_spillreg, 0 },
5260 { "spillsp", dot_spillmem, 0 },
5261 { "spillpsp", dot_spillmem, 1 },
5262 { "spillreg.p", dot_spillreg, 1 },
5263 { "spillsp.p", dot_spillmem, ~0 },
5264 { "spillpsp.p", dot_spillmem, ~1 },
5265 { "label_state", dot_label_state, 0 },
5266 { "copy_state", dot_copy_state, 0 },
5267 { "unwabi", dot_unwabi, 0 },
5268 { "personality", dot_personality, 0 },
5269 { "mii", dot_template, 0x0 },
5270 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5271 { "mlx", dot_template, 0x2 },
5272 { "mmi", dot_template, 0x4 },
5273 { "mfi", dot_template, 0x6 },
5274 { "mmf", dot_template, 0x7 },
5275 { "mib", dot_template, 0x8 },
5276 { "mbb", dot_template, 0x9 },
5277 { "bbb", dot_template, 0xb },
5278 { "mmb", dot_template, 0xc },
5279 { "mfb", dot_template, 0xe },
5280 { "align", dot_align, 0 },
5281 { "regstk", dot_regstk, 0 },
5282 { "rotr", dot_rot, DYNREG_GR },
5283 { "rotf", dot_rot, DYNREG_FR },
5284 { "rotp", dot_rot, DYNREG_PR },
5285 { "lsb", dot_byteorder, 0 },
5286 { "msb", dot_byteorder, 1 },
5287 { "psr", dot_psr, 0 },
5288 { "alias", dot_alias, 0 },
5289 { "secalias", dot_alias, 1 },
5290 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5291
5292 { "xdata1", dot_xdata, 1 },
5293 { "xdata2", dot_xdata, 2 },
5294 { "xdata4", dot_xdata, 4 },
5295 { "xdata8", dot_xdata, 8 },
5296 { "xdata16", dot_xdata, 16 },
5297 { "xreal4", dot_xfloat_cons, 'f' },
5298 { "xreal8", dot_xfloat_cons, 'd' },
5299 { "xreal10", dot_xfloat_cons, 'x' },
5300 { "xreal16", dot_xfloat_cons, 'X' },
5301 { "xstring", dot_xstringer, 8 + 0 },
5302 { "xstringz", dot_xstringer, 8 + 1 },
5303
5304 /* unaligned versions: */
5305 { "xdata2.ua", dot_xdata_ua, 2 },
5306 { "xdata4.ua", dot_xdata_ua, 4 },
5307 { "xdata8.ua", dot_xdata_ua, 8 },
5308 { "xdata16.ua", dot_xdata_ua, 16 },
5309 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5310 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5311 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5312 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5313
5314 /* annotations/DV checking support */
5315 { "entry", dot_entry, 0 },
5316 { "mem.offset", dot_mem_offset, 0 },
5317 { "pred.rel", dot_pred_rel, 0 },
5318 { "pred.rel.clear", dot_pred_rel, 'c' },
5319 { "pred.rel.imply", dot_pred_rel, 'i' },
5320 { "pred.rel.mutex", dot_pred_rel, 'm' },
5321 { "pred.safe_across_calls", dot_pred_rel, 's' },
5322 { "reg.val", dot_reg_val, 0 },
5323 { "serialize.data", dot_serialize, 0 },
5324 { "serialize.instruction", dot_serialize, 1 },
5325 { "auto", dot_dv_mode, 'a' },
5326 { "explicit", dot_dv_mode, 'e' },
5327 { "default", dot_dv_mode, 'd' },
5328
5329 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5330 IA-64 aligns data allocation pseudo-ops by default, so we have to
5331 tell it that these ones are supposed to be unaligned. Long term,
5332 should rewrite so that only IA-64 specific data allocation pseudo-ops
5333 are aligned by default. */
5334 {"2byte", stmt_cons_ua, 2},
5335 {"4byte", stmt_cons_ua, 4},
5336 {"8byte", stmt_cons_ua, 8},
5337
5338 #ifdef TE_VMS
5339 {"vms_common", obj_elf_vms_common, 0},
5340 #endif
5341
5342 { NULL, 0, 0 }
5343 };
5344
5345 static const struct pseudo_opcode
5346 {
5347 const char *name;
5348 void (*handler) (int);
5349 int arg;
5350 }
5351 pseudo_opcode[] =
5352 {
5353 /* these are more like pseudo-ops, but don't start with a dot */
5354 { "data1", cons, 1 },
5355 { "data2", cons, 2 },
5356 { "data4", cons, 4 },
5357 { "data8", cons, 8 },
5358 { "data16", cons, 16 },
5359 { "real4", stmt_float_cons, 'f' },
5360 { "real8", stmt_float_cons, 'd' },
5361 { "real10", stmt_float_cons, 'x' },
5362 { "real16", stmt_float_cons, 'X' },
5363 { "string", stringer, 8 + 0 },
5364 { "stringz", stringer, 8 + 1 },
5365
5366 /* unaligned versions: */
5367 { "data2.ua", stmt_cons_ua, 2 },
5368 { "data4.ua", stmt_cons_ua, 4 },
5369 { "data8.ua", stmt_cons_ua, 8 },
5370 { "data16.ua", stmt_cons_ua, 16 },
5371 { "real4.ua", float_cons, 'f' },
5372 { "real8.ua", float_cons, 'd' },
5373 { "real10.ua", float_cons, 'x' },
5374 { "real16.ua", float_cons, 'X' },
5375 };
5376
5377 /* Declare a register by creating a symbol for it and entering it in
5378 the symbol table. */
5379
5380 static symbolS *
5381 declare_register (const char *name, unsigned int regnum)
5382 {
5383 const char *err;
5384 symbolS *sym;
5385
5386 sym = symbol_create (name, reg_section, regnum, &zero_address_frag);
5387
5388 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (void *) sym);
5389 if (err)
5390 as_fatal ("Inserting \"%s\" into register table failed: %s",
5391 name, err);
5392
5393 return sym;
5394 }
5395
5396 static void
5397 declare_register_set (const char *prefix,
5398 unsigned int num_regs,
5399 unsigned int base_regnum)
5400 {
5401 char name[8];
5402 unsigned int i;
5403
5404 for (i = 0; i < num_regs; ++i)
5405 {
5406 snprintf (name, sizeof (name), "%s%u", prefix, i);
5407 declare_register (name, base_regnum + i);
5408 }
5409 }
5410
5411 static unsigned int
5412 operand_width (enum ia64_opnd opnd)
5413 {
5414 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5415 unsigned int bits = 0;
5416 int i;
5417
5418 bits = 0;
5419 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5420 bits += odesc->field[i].bits;
5421
5422 return bits;
5423 }
5424
5425 static enum operand_match_result
5426 operand_match (const struct ia64_opcode *idesc, int res_index, expressionS *e)
5427 {
5428 enum ia64_opnd opnd = idesc->operands[res_index];
5429 int bits, relocatable = 0;
5430 struct insn_fix *fix;
5431 bfd_signed_vma val;
5432
5433 switch (opnd)
5434 {
5435 /* constants: */
5436
5437 case IA64_OPND_AR_CCV:
5438 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5439 return OPERAND_MATCH;
5440 break;
5441
5442 case IA64_OPND_AR_CSD:
5443 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5444 return OPERAND_MATCH;
5445 break;
5446
5447 case IA64_OPND_AR_PFS:
5448 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5449 return OPERAND_MATCH;
5450 break;
5451
5452 case IA64_OPND_GR0:
5453 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5454 return OPERAND_MATCH;
5455 break;
5456
5457 case IA64_OPND_IP:
5458 if (e->X_op == O_register && e->X_add_number == REG_IP)
5459 return OPERAND_MATCH;
5460 break;
5461
5462 case IA64_OPND_PR:
5463 if (e->X_op == O_register && e->X_add_number == REG_PR)
5464 return OPERAND_MATCH;
5465 break;
5466
5467 case IA64_OPND_PR_ROT:
5468 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5469 return OPERAND_MATCH;
5470 break;
5471
5472 case IA64_OPND_PSR:
5473 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5474 return OPERAND_MATCH;
5475 break;
5476
5477 case IA64_OPND_PSR_L:
5478 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5479 return OPERAND_MATCH;
5480 break;
5481
5482 case IA64_OPND_PSR_UM:
5483 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5484 return OPERAND_MATCH;
5485 break;
5486
5487 case IA64_OPND_C1:
5488 if (e->X_op == O_constant)
5489 {
5490 if (e->X_add_number == 1)
5491 return OPERAND_MATCH;
5492 else
5493 return OPERAND_OUT_OF_RANGE;
5494 }
5495 break;
5496
5497 case IA64_OPND_C8:
5498 if (e->X_op == O_constant)
5499 {
5500 if (e->X_add_number == 8)
5501 return OPERAND_MATCH;
5502 else
5503 return OPERAND_OUT_OF_RANGE;
5504 }
5505 break;
5506
5507 case IA64_OPND_C16:
5508 if (e->X_op == O_constant)
5509 {
5510 if (e->X_add_number == 16)
5511 return OPERAND_MATCH;
5512 else
5513 return OPERAND_OUT_OF_RANGE;
5514 }
5515 break;
5516
5517 /* register operands: */
5518
5519 case IA64_OPND_AR3:
5520 if (e->X_op == O_register && e->X_add_number >= REG_AR
5521 && e->X_add_number < REG_AR + 128)
5522 return OPERAND_MATCH;
5523 break;
5524
5525 case IA64_OPND_B1:
5526 case IA64_OPND_B2:
5527 if (e->X_op == O_register && e->X_add_number >= REG_BR
5528 && e->X_add_number < REG_BR + 8)
5529 return OPERAND_MATCH;
5530 break;
5531
5532 case IA64_OPND_CR3:
5533 if (e->X_op == O_register && e->X_add_number >= REG_CR
5534 && e->X_add_number < REG_CR + 128)
5535 return OPERAND_MATCH;
5536 break;
5537
5538 case IA64_OPND_DAHR3:
5539 if (e->X_op == O_register && e->X_add_number >= REG_DAHR
5540 && e->X_add_number < REG_DAHR + 8)
5541 return OPERAND_MATCH;
5542 break;
5543
5544 case IA64_OPND_F1:
5545 case IA64_OPND_F2:
5546 case IA64_OPND_F3:
5547 case IA64_OPND_F4:
5548 if (e->X_op == O_register && e->X_add_number >= REG_FR
5549 && e->X_add_number < REG_FR + 128)
5550 return OPERAND_MATCH;
5551 break;
5552
5553 case IA64_OPND_P1:
5554 case IA64_OPND_P2:
5555 if (e->X_op == O_register && e->X_add_number >= REG_P
5556 && e->X_add_number < REG_P + 64)
5557 return OPERAND_MATCH;
5558 break;
5559
5560 case IA64_OPND_R1:
5561 case IA64_OPND_R2:
5562 case IA64_OPND_R3:
5563 if (e->X_op == O_register && e->X_add_number >= REG_GR
5564 && e->X_add_number < REG_GR + 128)
5565 return OPERAND_MATCH;
5566 break;
5567
5568 case IA64_OPND_R3_2:
5569 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5570 {
5571 if (e->X_add_number < REG_GR + 4)
5572 return OPERAND_MATCH;
5573 else if (e->X_add_number < REG_GR + 128)
5574 return OPERAND_OUT_OF_RANGE;
5575 }
5576 break;
5577
5578 /* indirect operands: */
5579 case IA64_OPND_CPUID_R3:
5580 case IA64_OPND_DBR_R3:
5581 case IA64_OPND_DTR_R3:
5582 case IA64_OPND_ITR_R3:
5583 case IA64_OPND_IBR_R3:
5584 case IA64_OPND_MSR_R3:
5585 case IA64_OPND_PKR_R3:
5586 case IA64_OPND_PMC_R3:
5587 case IA64_OPND_PMD_R3:
5588 case IA64_OPND_DAHR_R3:
5589 case IA64_OPND_RR_R3:
5590 if (e->X_op == O_index && e->X_op_symbol
5591 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5592 == opnd - IA64_OPND_CPUID_R3))
5593 return OPERAND_MATCH;
5594 break;
5595
5596 case IA64_OPND_MR3:
5597 if (e->X_op == O_index && !e->X_op_symbol)
5598 return OPERAND_MATCH;
5599 break;
5600
5601 /* immediate operands: */
5602 case IA64_OPND_CNT2a:
5603 case IA64_OPND_LEN4:
5604 case IA64_OPND_LEN6:
5605 bits = operand_width (idesc->operands[res_index]);
5606 if (e->X_op == O_constant)
5607 {
5608 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5609 return OPERAND_MATCH;
5610 else
5611 return OPERAND_OUT_OF_RANGE;
5612 }
5613 break;
5614
5615 case IA64_OPND_CNT2b:
5616 if (e->X_op == O_constant)
5617 {
5618 if ((bfd_vma) (e->X_add_number - 1) < 3)
5619 return OPERAND_MATCH;
5620 else
5621 return OPERAND_OUT_OF_RANGE;
5622 }
5623 break;
5624
5625 case IA64_OPND_CNT2c:
5626 val = e->X_add_number;
5627 if (e->X_op == O_constant)
5628 {
5629 if ((val == 0 || val == 7 || val == 15 || val == 16))
5630 return OPERAND_MATCH;
5631 else
5632 return OPERAND_OUT_OF_RANGE;
5633 }
5634 break;
5635
5636 case IA64_OPND_SOR:
5637 /* SOR must be an integer multiple of 8 */
5638 if (e->X_op == O_constant && e->X_add_number & 0x7)
5639 return OPERAND_OUT_OF_RANGE;
5640 case IA64_OPND_SOF:
5641 case IA64_OPND_SOL:
5642 if (e->X_op == O_constant)
5643 {
5644 if ((bfd_vma) e->X_add_number <= 96)
5645 return OPERAND_MATCH;
5646 else
5647 return OPERAND_OUT_OF_RANGE;
5648 }
5649 break;
5650
5651 case IA64_OPND_IMMU62:
5652 if (e->X_op == O_constant)
5653 {
5654 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5655 return OPERAND_MATCH;
5656 else
5657 return OPERAND_OUT_OF_RANGE;
5658 }
5659 else
5660 {
5661 /* FIXME -- need 62-bit relocation type */
5662 as_bad (_("62-bit relocation not yet implemented"));
5663 }
5664 break;
5665
5666 case IA64_OPND_IMMU64:
5667 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5668 || e->X_op == O_subtract)
5669 {
5670 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5671 fix->code = BFD_RELOC_IA64_IMM64;
5672 if (e->X_op != O_subtract)
5673 {
5674 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5675 if (e->X_op == O_pseudo_fixup)
5676 e->X_op = O_symbol;
5677 }
5678
5679 fix->opnd = idesc->operands[res_index];
5680 fix->expr = *e;
5681 fix->is_pcrel = 0;
5682 ++CURR_SLOT.num_fixups;
5683 return OPERAND_MATCH;
5684 }
5685 else if (e->X_op == O_constant)
5686 return OPERAND_MATCH;
5687 break;
5688
5689 case IA64_OPND_IMMU5b:
5690 if (e->X_op == O_constant)
5691 {
5692 val = e->X_add_number;
5693 if (val >= 32 && val <= 63)
5694 return OPERAND_MATCH;
5695 else
5696 return OPERAND_OUT_OF_RANGE;
5697 }
5698 break;
5699
5700 case IA64_OPND_CCNT5:
5701 case IA64_OPND_CNT5:
5702 case IA64_OPND_CNT6:
5703 case IA64_OPND_CPOS6a:
5704 case IA64_OPND_CPOS6b:
5705 case IA64_OPND_CPOS6c:
5706 case IA64_OPND_IMMU2:
5707 case IA64_OPND_IMMU7a:
5708 case IA64_OPND_IMMU7b:
5709 case IA64_OPND_IMMU16:
5710 case IA64_OPND_IMMU19:
5711 case IA64_OPND_IMMU21:
5712 case IA64_OPND_IMMU24:
5713 case IA64_OPND_MBTYPE4:
5714 case IA64_OPND_MHTYPE8:
5715 case IA64_OPND_POS6:
5716 bits = operand_width (idesc->operands[res_index]);
5717 if (e->X_op == O_constant)
5718 {
5719 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5720 return OPERAND_MATCH;
5721 else
5722 return OPERAND_OUT_OF_RANGE;
5723 }
5724 break;
5725
5726 case IA64_OPND_IMMU9:
5727 bits = operand_width (idesc->operands[res_index]);
5728 if (e->X_op == O_constant)
5729 {
5730 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5731 {
5732 int lobits = e->X_add_number & 0x3;
5733 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5734 e->X_add_number |= (bfd_vma) 0x3;
5735 return OPERAND_MATCH;
5736 }
5737 else
5738 return OPERAND_OUT_OF_RANGE;
5739 }
5740 break;
5741
5742 case IA64_OPND_IMM44:
5743 /* least 16 bits must be zero */
5744 if ((e->X_add_number & 0xffff) != 0)
5745 /* XXX technically, this is wrong: we should not be issuing warning
5746 messages until we're sure this instruction pattern is going to
5747 be used! */
5748 as_warn (_("lower 16 bits of mask ignored"));
5749
5750 if (e->X_op == O_constant)
5751 {
5752 if (((e->X_add_number >= 0
5753 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5754 || (e->X_add_number < 0
5755 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5756 {
5757 /* sign-extend */
5758 if (e->X_add_number >= 0
5759 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5760 {
5761 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5762 }
5763 return OPERAND_MATCH;
5764 }
5765 else
5766 return OPERAND_OUT_OF_RANGE;
5767 }
5768 break;
5769
5770 case IA64_OPND_IMM17:
5771 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5772 if (e->X_op == O_constant)
5773 {
5774 if (((e->X_add_number >= 0
5775 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5776 || (e->X_add_number < 0
5777 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5778 {
5779 /* sign-extend */
5780 if (e->X_add_number >= 0
5781 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5782 {
5783 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5784 }
5785 return OPERAND_MATCH;
5786 }
5787 else
5788 return OPERAND_OUT_OF_RANGE;
5789 }
5790 break;
5791
5792 case IA64_OPND_IMM14:
5793 case IA64_OPND_IMM22:
5794 relocatable = 1;
5795 case IA64_OPND_IMM1:
5796 case IA64_OPND_IMM8:
5797 case IA64_OPND_IMM8U4:
5798 case IA64_OPND_IMM8M1:
5799 case IA64_OPND_IMM8M1U4:
5800 case IA64_OPND_IMM8M1U8:
5801 case IA64_OPND_IMM9a:
5802 case IA64_OPND_IMM9b:
5803 bits = operand_width (idesc->operands[res_index]);
5804 if (relocatable && (e->X_op == O_symbol
5805 || e->X_op == O_subtract
5806 || e->X_op == O_pseudo_fixup))
5807 {
5808 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5809
5810 if (idesc->operands[res_index] == IA64_OPND_IMM14)
5811 fix->code = BFD_RELOC_IA64_IMM14;
5812 else
5813 fix->code = BFD_RELOC_IA64_IMM22;
5814
5815 if (e->X_op != O_subtract)
5816 {
5817 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5818 if (e->X_op == O_pseudo_fixup)
5819 e->X_op = O_symbol;
5820 }
5821
5822 fix->opnd = idesc->operands[res_index];
5823 fix->expr = *e;
5824 fix->is_pcrel = 0;
5825 ++CURR_SLOT.num_fixups;
5826 return OPERAND_MATCH;
5827 }
5828 else if (e->X_op != O_constant
5829 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5830 return OPERAND_MISMATCH;
5831
5832 if (opnd == IA64_OPND_IMM8M1U4)
5833 {
5834 /* Zero is not valid for unsigned compares that take an adjusted
5835 constant immediate range. */
5836 if (e->X_add_number == 0)
5837 return OPERAND_OUT_OF_RANGE;
5838
5839 /* Sign-extend 32-bit unsigned numbers, so that the following range
5840 checks will work. */
5841 val = e->X_add_number;
5842 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5843 && ((val & ((bfd_vma) 1 << 31)) != 0))
5844 val = ((val << 32) >> 32);
5845
5846 /* Check for 0x100000000. This is valid because
5847 0x100000000-1 is the same as ((uint32_t) -1). */
5848 if (val == ((bfd_signed_vma) 1 << 32))
5849 return OPERAND_MATCH;
5850
5851 val = val - 1;
5852 }
5853 else if (opnd == IA64_OPND_IMM8M1U8)
5854 {
5855 /* Zero is not valid for unsigned compares that take an adjusted
5856 constant immediate range. */
5857 if (e->X_add_number == 0)
5858 return OPERAND_OUT_OF_RANGE;
5859
5860 /* Check for 0x10000000000000000. */
5861 if (e->X_op == O_big)
5862 {
5863 if (generic_bignum[0] == 0
5864 && generic_bignum[1] == 0
5865 && generic_bignum[2] == 0
5866 && generic_bignum[3] == 0
5867 && generic_bignum[4] == 1)
5868 return OPERAND_MATCH;
5869 else
5870 return OPERAND_OUT_OF_RANGE;
5871 }
5872 else
5873 val = e->X_add_number - 1;
5874 }
5875 else if (opnd == IA64_OPND_IMM8M1)
5876 val = e->X_add_number - 1;
5877 else if (opnd == IA64_OPND_IMM8U4)
5878 {
5879 /* Sign-extend 32-bit unsigned numbers, so that the following range
5880 checks will work. */
5881 val = e->X_add_number;
5882 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5883 && ((val & ((bfd_vma) 1 << 31)) != 0))
5884 val = ((val << 32) >> 32);
5885 }
5886 else
5887 val = e->X_add_number;
5888
5889 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5890 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5891 return OPERAND_MATCH;
5892 else
5893 return OPERAND_OUT_OF_RANGE;
5894
5895 case IA64_OPND_INC3:
5896 /* +/- 1, 4, 8, 16 */
5897 val = e->X_add_number;
5898 if (val < 0)
5899 val = -val;
5900 if (e->X_op == O_constant)
5901 {
5902 if ((val == 1 || val == 4 || val == 8 || val == 16))
5903 return OPERAND_MATCH;
5904 else
5905 return OPERAND_OUT_OF_RANGE;
5906 }
5907 break;
5908
5909 case IA64_OPND_TGT25:
5910 case IA64_OPND_TGT25b:
5911 case IA64_OPND_TGT25c:
5912 case IA64_OPND_TGT64:
5913 if (e->X_op == O_symbol)
5914 {
5915 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5916 if (opnd == IA64_OPND_TGT25)
5917 fix->code = BFD_RELOC_IA64_PCREL21F;
5918 else if (opnd == IA64_OPND_TGT25b)
5919 fix->code = BFD_RELOC_IA64_PCREL21M;
5920 else if (opnd == IA64_OPND_TGT25c)
5921 fix->code = BFD_RELOC_IA64_PCREL21B;
5922 else if (opnd == IA64_OPND_TGT64)
5923 fix->code = BFD_RELOC_IA64_PCREL60B;
5924 else
5925 abort ();
5926
5927 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5928 fix->opnd = idesc->operands[res_index];
5929 fix->expr = *e;
5930 fix->is_pcrel = 1;
5931 ++CURR_SLOT.num_fixups;
5932 return OPERAND_MATCH;
5933 }
5934 case IA64_OPND_TAG13:
5935 case IA64_OPND_TAG13b:
5936 switch (e->X_op)
5937 {
5938 case O_constant:
5939 return OPERAND_MATCH;
5940
5941 case O_symbol:
5942 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5943 /* There are no external relocs for TAG13/TAG13b fields, so we
5944 create a dummy reloc. This will not live past md_apply_fix. */
5945 fix->code = BFD_RELOC_UNUSED;
5946 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5947 fix->opnd = idesc->operands[res_index];
5948 fix->expr = *e;
5949 fix->is_pcrel = 1;
5950 ++CURR_SLOT.num_fixups;
5951 return OPERAND_MATCH;
5952
5953 default:
5954 break;
5955 }
5956 break;
5957
5958 case IA64_OPND_LDXMOV:
5959 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5960 fix->code = BFD_RELOC_IA64_LDXMOV;
5961 fix->opnd = idesc->operands[res_index];
5962 fix->expr = *e;
5963 fix->is_pcrel = 0;
5964 ++CURR_SLOT.num_fixups;
5965 return OPERAND_MATCH;
5966
5967 case IA64_OPND_STRD5b:
5968 if (e->X_op == O_constant)
5969 {
5970 /* 5-bit signed scaled by 64 */
5971 if ((e->X_add_number <= ( 0xf << 6 ))
5972 && (e->X_add_number >= -( 0x10 << 6 )))
5973 {
5974
5975 /* Must be a multiple of 64 */
5976 if ((e->X_add_number & 0x3f) != 0)
5977 as_warn (_("stride must be a multiple of 64; lower 6 bits ignored"));
5978
5979 e->X_add_number &= ~ 0x3f;
5980 return OPERAND_MATCH;
5981 }
5982 else
5983 return OPERAND_OUT_OF_RANGE;
5984 }
5985 break;
5986 case IA64_OPND_CNT6a:
5987 if (e->X_op == O_constant)
5988 {
5989 /* 6-bit unsigned biased by 1 -- count 0 is meaningless */
5990 if ((e->X_add_number <= 64)
5991 && (e->X_add_number > 0) )
5992 {
5993 return OPERAND_MATCH;
5994 }
5995 else
5996 return OPERAND_OUT_OF_RANGE;
5997 }
5998 break;
5999
6000 default:
6001 break;
6002 }
6003 return OPERAND_MISMATCH;
6004 }
6005
6006 static int
6007 parse_operand (expressionS *e, int more)
6008 {
6009 int sep = '\0';
6010
6011 memset (e, 0, sizeof (*e));
6012 e->X_op = O_absent;
6013 SKIP_WHITESPACE ();
6014 expression (e);
6015 sep = *input_line_pointer;
6016 if (more && (sep == ',' || sep == more))
6017 ++input_line_pointer;
6018 return sep;
6019 }
6020
6021 static int
6022 parse_operand_and_eval (expressionS *e, int more)
6023 {
6024 int sep = parse_operand (e, more);
6025 resolve_expression (e);
6026 return sep;
6027 }
6028
6029 static int
6030 parse_operand_maybe_eval (expressionS *e, int more, enum ia64_opnd op)
6031 {
6032 int sep = parse_operand (e, more);
6033 switch (op)
6034 {
6035 case IA64_OPND_IMM14:
6036 case IA64_OPND_IMM22:
6037 case IA64_OPND_IMMU64:
6038 case IA64_OPND_TGT25:
6039 case IA64_OPND_TGT25b:
6040 case IA64_OPND_TGT25c:
6041 case IA64_OPND_TGT64:
6042 case IA64_OPND_TAG13:
6043 case IA64_OPND_TAG13b:
6044 case IA64_OPND_LDXMOV:
6045 break;
6046 default:
6047 resolve_expression (e);
6048 break;
6049 }
6050 return sep;
6051 }
6052
6053 /* Returns the next entry in the opcode table that matches the one in
6054 IDESC, and frees the entry in IDESC. If no matching entry is
6055 found, NULL is returned instead. */
6056
6057 static struct ia64_opcode *
6058 get_next_opcode (struct ia64_opcode *idesc)
6059 {
6060 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
6061 ia64_free_opcode (idesc);
6062 return next;
6063 }
6064
6065 /* Parse the operands for the opcode and find the opcode variant that
6066 matches the specified operands, or NULL if no match is possible. */
6067
6068 static struct ia64_opcode *
6069 parse_operands (struct ia64_opcode *idesc)
6070 {
6071 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
6072 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
6073 int reg1, reg2;
6074 char reg_class;
6075 enum ia64_opnd expected_operand = IA64_OPND_NIL;
6076 enum operand_match_result result;
6077 char mnemonic[129];
6078 char *first_arg = 0, *end, *saved_input_pointer;
6079 unsigned int sof;
6080
6081 gas_assert (strlen (idesc->name) <= 128);
6082
6083 strcpy (mnemonic, idesc->name);
6084 if (idesc->operands[2] == IA64_OPND_SOF
6085 || idesc->operands[1] == IA64_OPND_SOF)
6086 {
6087 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
6088 can't parse the first operand until we have parsed the
6089 remaining operands of the "alloc" instruction. */
6090 SKIP_WHITESPACE ();
6091 first_arg = input_line_pointer;
6092 end = strchr (input_line_pointer, '=');
6093 if (!end)
6094 {
6095 as_bad (_("Expected separator `='"));
6096 return 0;
6097 }
6098 input_line_pointer = end + 1;
6099 ++i;
6100 ++num_outputs;
6101 }
6102
6103 for (; ; ++i)
6104 {
6105 if (i < NELEMS (CURR_SLOT.opnd))
6106 {
6107 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + i, '=',
6108 idesc->operands[i]);
6109 if (CURR_SLOT.opnd[i].X_op == O_absent)
6110 break;
6111 }
6112 else
6113 {
6114 expressionS dummy;
6115
6116 sep = parse_operand (&dummy, '=');
6117 if (dummy.X_op == O_absent)
6118 break;
6119 }
6120
6121 ++num_operands;
6122
6123 if (sep != '=' && sep != ',')
6124 break;
6125
6126 if (sep == '=')
6127 {
6128 if (num_outputs > 0)
6129 as_bad (_("Duplicate equal sign (=) in instruction"));
6130 else
6131 num_outputs = i + 1;
6132 }
6133 }
6134 if (sep != '\0')
6135 {
6136 as_bad (_("Illegal operand separator `%c'"), sep);
6137 return 0;
6138 }
6139
6140 if (idesc->operands[2] == IA64_OPND_SOF
6141 || idesc->operands[1] == IA64_OPND_SOF)
6142 {
6143 /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r.
6144 Note, however, that due to that mapping operand numbers in error
6145 messages for any of the constant operands will not be correct. */
6146 know (strcmp (idesc->name, "alloc") == 0);
6147 /* The first operand hasn't been parsed/initialized, yet (but
6148 num_operands intentionally doesn't account for that). */
6149 i = num_operands > 4 ? 2 : 1;
6150 #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \
6151 ? CURR_SLOT.opnd[n].X_add_number \
6152 : 0)
6153 sof = set_regstack (FORCE_CONST(i),
6154 FORCE_CONST(i + 1),
6155 FORCE_CONST(i + 2),
6156 FORCE_CONST(i + 3));
6157 #undef FORCE_CONST
6158
6159 /* now we can parse the first arg: */
6160 saved_input_pointer = input_line_pointer;
6161 input_line_pointer = first_arg;
6162 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + 0, '=',
6163 idesc->operands[0]);
6164 if (sep != '=')
6165 --num_outputs; /* force error */
6166 input_line_pointer = saved_input_pointer;
6167
6168 CURR_SLOT.opnd[i].X_add_number = sof;
6169 if (CURR_SLOT.opnd[i + 1].X_op == O_constant
6170 && CURR_SLOT.opnd[i + 2].X_op == O_constant)
6171 CURR_SLOT.opnd[i + 1].X_add_number
6172 = sof - CURR_SLOT.opnd[i + 2].X_add_number;
6173 else
6174 CURR_SLOT.opnd[i + 1].X_op = O_illegal;
6175 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3];
6176 }
6177
6178 highest_unmatched_operand = -4;
6179 curr_out_of_range_pos = -1;
6180 error_pos = 0;
6181 for (; idesc; idesc = get_next_opcode (idesc))
6182 {
6183 if (num_outputs != idesc->num_outputs)
6184 continue; /* mismatch in # of outputs */
6185 if (highest_unmatched_operand < 0)
6186 highest_unmatched_operand |= 1;
6187 if (num_operands > NELEMS (idesc->operands)
6188 || (num_operands < NELEMS (idesc->operands)
6189 && idesc->operands[num_operands])
6190 || (num_operands > 0 && !idesc->operands[num_operands - 1]))
6191 continue; /* mismatch in number of arguments */
6192 if (highest_unmatched_operand < 0)
6193 highest_unmatched_operand |= 2;
6194
6195 CURR_SLOT.num_fixups = 0;
6196
6197 /* Try to match all operands. If we see an out-of-range operand,
6198 then continue trying to match the rest of the operands, since if
6199 the rest match, then this idesc will give the best error message. */
6200
6201 out_of_range_pos = -1;
6202 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
6203 {
6204 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
6205 if (result != OPERAND_MATCH)
6206 {
6207 if (result != OPERAND_OUT_OF_RANGE)
6208 break;
6209 if (out_of_range_pos < 0)
6210 /* remember position of the first out-of-range operand: */
6211 out_of_range_pos = i;
6212 }
6213 }
6214
6215 /* If we did not match all operands, or if at least one operand was
6216 out-of-range, then this idesc does not match. Keep track of which
6217 idesc matched the most operands before failing. If we have two
6218 idescs that failed at the same position, and one had an out-of-range
6219 operand, then prefer the out-of-range operand. Thus if we have
6220 "add r0=0x1000000,r1" we get an error saying the constant is out
6221 of range instead of an error saying that the constant should have been
6222 a register. */
6223
6224 if (i != num_operands || out_of_range_pos >= 0)
6225 {
6226 if (i > highest_unmatched_operand
6227 || (i == highest_unmatched_operand
6228 && out_of_range_pos > curr_out_of_range_pos))
6229 {
6230 highest_unmatched_operand = i;
6231 if (out_of_range_pos >= 0)
6232 {
6233 expected_operand = idesc->operands[out_of_range_pos];
6234 error_pos = out_of_range_pos;
6235 }
6236 else
6237 {
6238 expected_operand = idesc->operands[i];
6239 error_pos = i;
6240 }
6241 curr_out_of_range_pos = out_of_range_pos;
6242 }
6243 continue;
6244 }
6245
6246 break;
6247 }
6248 if (!idesc)
6249 {
6250 if (expected_operand)
6251 as_bad (_("Operand %u of `%s' should be %s"),
6252 error_pos + 1, mnemonic,
6253 elf64_ia64_operands[expected_operand].desc);
6254 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1))
6255 as_bad (_("Wrong number of output operands"));
6256 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2))
6257 as_bad (_("Wrong number of input operands"));
6258 else
6259 as_bad (_("Operand mismatch"));
6260 return 0;
6261 }
6262
6263 /* Check that the instruction doesn't use
6264 - r0, f0, or f1 as output operands
6265 - the same predicate twice as output operands
6266 - r0 as address of a base update load or store
6267 - the same GR as output and address of a base update load
6268 - two even- or two odd-numbered FRs as output operands of a floating
6269 point parallel load.
6270 At most two (conflicting) output (or output-like) operands can exist,
6271 (floating point parallel loads have three outputs, but the base register,
6272 if updated, cannot conflict with the actual outputs). */
6273 reg2 = reg1 = -1;
6274 for (i = 0; i < num_operands; ++i)
6275 {
6276 int regno = 0;
6277
6278 reg_class = 0;
6279 switch (idesc->operands[i])
6280 {
6281 case IA64_OPND_R1:
6282 case IA64_OPND_R2:
6283 case IA64_OPND_R3:
6284 if (i < num_outputs)
6285 {
6286 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6287 reg_class = 'r';
6288 else if (reg1 < 0)
6289 reg1 = CURR_SLOT.opnd[i].X_add_number;
6290 else if (reg2 < 0)
6291 reg2 = CURR_SLOT.opnd[i].X_add_number;
6292 }
6293 break;
6294 case IA64_OPND_P1:
6295 case IA64_OPND_P2:
6296 if (i < num_outputs)
6297 {
6298 if (reg1 < 0)
6299 reg1 = CURR_SLOT.opnd[i].X_add_number;
6300 else if (reg2 < 0)
6301 reg2 = CURR_SLOT.opnd[i].X_add_number;
6302 }
6303 break;
6304 case IA64_OPND_F1:
6305 case IA64_OPND_F2:
6306 case IA64_OPND_F3:
6307 case IA64_OPND_F4:
6308 if (i < num_outputs)
6309 {
6310 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR
6311 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1)
6312 {
6313 reg_class = 'f';
6314 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR;
6315 }
6316 else if (reg1 < 0)
6317 reg1 = CURR_SLOT.opnd[i].X_add_number;
6318 else if (reg2 < 0)
6319 reg2 = CURR_SLOT.opnd[i].X_add_number;
6320 }
6321 break;
6322 case IA64_OPND_MR3:
6323 if (idesc->flags & IA64_OPCODE_POSTINC)
6324 {
6325 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6326 reg_class = 'm';
6327 else if (reg1 < 0)
6328 reg1 = CURR_SLOT.opnd[i].X_add_number;
6329 else if (reg2 < 0)
6330 reg2 = CURR_SLOT.opnd[i].X_add_number;
6331 }
6332 break;
6333 default:
6334 break;
6335 }
6336 switch (reg_class)
6337 {
6338 case 0:
6339 break;
6340 default:
6341 as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno);
6342 break;
6343 case 'm':
6344 as_warn (_("Invalid use of `r%d' as base update address operand"), regno);
6345 break;
6346 }
6347 }
6348 if (reg1 == reg2)
6349 {
6350 if (reg1 >= REG_GR && reg1 <= REG_GR + 127)
6351 {
6352 reg1 -= REG_GR;
6353 reg_class = 'r';
6354 }
6355 else if (reg1 >= REG_P && reg1 <= REG_P + 63)
6356 {
6357 reg1 -= REG_P;
6358 reg_class = 'p';
6359 }
6360 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127)
6361 {
6362 reg1 -= REG_FR;
6363 reg_class = 'f';
6364 }
6365 else
6366 reg_class = 0;
6367 if (reg_class)
6368 as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1);
6369 }
6370 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31
6371 && reg2 >= REG_FR && reg2 <= REG_FR + 31)
6372 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6373 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127))
6374 && ! ((reg1 ^ reg2) & 1))
6375 as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"),
6376 reg1 - REG_FR, reg2 - REG_FR);
6377 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31
6378 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)
6379 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6380 && reg2 >= REG_FR && reg2 <= REG_FR + 31))
6381 as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"),
6382 reg1 - REG_FR, reg2 - REG_FR);
6383 return idesc;
6384 }
6385
6386 static void
6387 build_insn (struct slot *slot, bfd_vma *insnp)
6388 {
6389 const struct ia64_operand *odesc, *o2desc;
6390 struct ia64_opcode *idesc = slot->idesc;
6391 bfd_vma insn;
6392 bfd_signed_vma val;
6393 const char *err;
6394 int i;
6395
6396 insn = idesc->opcode | slot->qp_regno;
6397
6398 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6399 {
6400 if (slot->opnd[i].X_op == O_register
6401 || slot->opnd[i].X_op == O_constant
6402 || slot->opnd[i].X_op == O_index)
6403 val = slot->opnd[i].X_add_number;
6404 else if (slot->opnd[i].X_op == O_big)
6405 {
6406 /* This must be the value 0x10000000000000000. */
6407 gas_assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6408 val = 0;
6409 }
6410 else
6411 val = 0;
6412
6413 switch (idesc->operands[i])
6414 {
6415 case IA64_OPND_IMMU64:
6416 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6417 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6418 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6419 | (((val >> 63) & 0x1) << 36));
6420 continue;
6421
6422 case IA64_OPND_IMMU62:
6423 val &= 0x3fffffffffffffffULL;
6424 if (val != slot->opnd[i].X_add_number)
6425 as_warn (_("Value truncated to 62 bits"));
6426 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6427 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6428 continue;
6429
6430 case IA64_OPND_TGT64:
6431 val >>= 4;
6432 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6433 insn |= ((((val >> 59) & 0x1) << 36)
6434 | (((val >> 0) & 0xfffff) << 13));
6435 continue;
6436
6437 case IA64_OPND_AR3:
6438 val -= REG_AR;
6439 break;
6440
6441 case IA64_OPND_B1:
6442 case IA64_OPND_B2:
6443 val -= REG_BR;
6444 break;
6445
6446 case IA64_OPND_CR3:
6447 val -= REG_CR;
6448 break;
6449
6450 case IA64_OPND_DAHR3:
6451 val -= REG_DAHR;
6452 break;
6453
6454 case IA64_OPND_F1:
6455 case IA64_OPND_F2:
6456 case IA64_OPND_F3:
6457 case IA64_OPND_F4:
6458 val -= REG_FR;
6459 break;
6460
6461 case IA64_OPND_P1:
6462 case IA64_OPND_P2:
6463 val -= REG_P;
6464 break;
6465
6466 case IA64_OPND_R1:
6467 case IA64_OPND_R2:
6468 case IA64_OPND_R3:
6469 case IA64_OPND_R3_2:
6470 case IA64_OPND_CPUID_R3:
6471 case IA64_OPND_DBR_R3:
6472 case IA64_OPND_DTR_R3:
6473 case IA64_OPND_ITR_R3:
6474 case IA64_OPND_IBR_R3:
6475 case IA64_OPND_MR3:
6476 case IA64_OPND_MSR_R3:
6477 case IA64_OPND_PKR_R3:
6478 case IA64_OPND_PMC_R3:
6479 case IA64_OPND_PMD_R3:
6480 case IA64_OPND_DAHR_R3:
6481 case IA64_OPND_RR_R3:
6482 val -= REG_GR;
6483 break;
6484
6485 default:
6486 break;
6487 }
6488
6489 odesc = elf64_ia64_operands + idesc->operands[i];
6490 err = (*odesc->insert) (odesc, val, &insn);
6491 if (err)
6492 as_bad_where (slot->src_file, slot->src_line,
6493 _("Bad operand value: %s"), err);
6494 if (idesc->flags & IA64_OPCODE_PSEUDO)
6495 {
6496 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6497 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6498 {
6499 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6500 (*o2desc->insert) (o2desc, val, &insn);
6501 }
6502 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6503 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6504 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6505 {
6506 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6507 (*o2desc->insert) (o2desc, 64 - val, &insn);
6508 }
6509 }
6510 }
6511 *insnp = insn;
6512 }
6513
6514 static void
6515 emit_one_bundle (void)
6516 {
6517 int manual_bundling_off = 0, manual_bundling = 0;
6518 enum ia64_unit required_unit, insn_unit = 0;
6519 enum ia64_insn_type type[3], insn_type;
6520 unsigned int template_val, orig_template;
6521 bfd_vma insn[3] = { -1, -1, -1 };
6522 struct ia64_opcode *idesc;
6523 int end_of_insn_group = 0, user_template = -1;
6524 int n, i, j, first, curr, last_slot;
6525 bfd_vma t0 = 0, t1 = 0;
6526 struct label_fix *lfix;
6527 bfd_boolean mark_label;
6528 struct insn_fix *ifix;
6529 char mnemonic[16];
6530 fixS *fix;
6531 char *f;
6532 int addr_mod;
6533
6534 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6535 know (first >= 0 && first < NUM_SLOTS);
6536 n = MIN (3, md.num_slots_in_use);
6537
6538 /* Determine template: user user_template if specified, best match
6539 otherwise: */
6540
6541 if (md.slot[first].user_template >= 0)
6542 user_template = template_val = md.slot[first].user_template;
6543 else
6544 {
6545 /* Auto select appropriate template. */
6546 memset (type, 0, sizeof (type));
6547 curr = first;
6548 for (i = 0; i < n; ++i)
6549 {
6550 if (md.slot[curr].label_fixups && i != 0)
6551 break;
6552 type[i] = md.slot[curr].idesc->type;
6553 curr = (curr + 1) % NUM_SLOTS;
6554 }
6555 template_val = best_template[type[0]][type[1]][type[2]];
6556 }
6557
6558 /* initialize instructions with appropriate nops: */
6559 for (i = 0; i < 3; ++i)
6560 insn[i] = nop[ia64_templ_desc[template_val].exec_unit[i]];
6561
6562 f = frag_more (16);
6563
6564 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6565 from the start of the frag. */
6566 addr_mod = frag_now_fix () & 15;
6567 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6568 as_bad (_("instruction address is not a multiple of 16"));
6569 frag_now->insn_addr = addr_mod;
6570 frag_now->has_code = 1;
6571
6572 /* now fill in slots with as many insns as possible: */
6573 curr = first;
6574 idesc = md.slot[curr].idesc;
6575 end_of_insn_group = 0;
6576 last_slot = -1;
6577 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6578 {
6579 /* If we have unwind records, we may need to update some now. */
6580 unw_rec_list *ptr = md.slot[curr].unwind_record;
6581 unw_rec_list *end_ptr = NULL;
6582
6583 if (ptr)
6584 {
6585 /* Find the last prologue/body record in the list for the current
6586 insn, and set the slot number for all records up to that point.
6587 This needs to be done now, because prologue/body records refer to
6588 the current point, not the point after the instruction has been
6589 issued. This matters because there may have been nops emitted
6590 meanwhile. Any non-prologue non-body record followed by a
6591 prologue/body record must also refer to the current point. */
6592 unw_rec_list *last_ptr;
6593
6594 for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j)
6595 end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record;
6596 for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next)
6597 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6598 || ptr->r.type == body)
6599 last_ptr = ptr;
6600 if (last_ptr)
6601 {
6602 /* Make last_ptr point one after the last prologue/body
6603 record. */
6604 last_ptr = last_ptr->next;
6605 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6606 ptr = ptr->next)
6607 {
6608 ptr->slot_number = (unsigned long) f + i;
6609 ptr->slot_frag = frag_now;
6610 }
6611 /* Remove the initialized records, so that we won't accidentally
6612 update them again if we insert a nop and continue. */
6613 md.slot[curr].unwind_record = last_ptr;
6614 }
6615 }
6616
6617 manual_bundling_off = md.slot[curr].manual_bundling_off;
6618 if (md.slot[curr].manual_bundling_on)
6619 {
6620 if (curr == first)
6621 manual_bundling = 1;
6622 else
6623 break; /* Need to start a new bundle. */
6624 }
6625
6626 /* If this instruction specifies a template, then it must be the first
6627 instruction of a bundle. */
6628 if (curr != first && md.slot[curr].user_template >= 0)
6629 break;
6630
6631 if (idesc->flags & IA64_OPCODE_SLOT2)
6632 {
6633 if (manual_bundling && !manual_bundling_off)
6634 {
6635 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6636 _("`%s' must be last in bundle"), idesc->name);
6637 if (i < 2)
6638 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6639 }
6640 i = 2;
6641 }
6642 if (idesc->flags & IA64_OPCODE_LAST)
6643 {
6644 int required_slot;
6645 unsigned int required_template;
6646
6647 /* If we need a stop bit after an M slot, our only choice is
6648 template 5 (M;;MI). If we need a stop bit after a B
6649 slot, our only choice is to place it at the end of the
6650 bundle, because the only available templates are MIB,
6651 MBB, BBB, MMB, and MFB. We don't handle anything other
6652 than M and B slots because these are the only kind of
6653 instructions that can have the IA64_OPCODE_LAST bit set. */
6654 required_template = template_val;
6655 switch (idesc->type)
6656 {
6657 case IA64_TYPE_M:
6658 required_slot = 0;
6659 required_template = 5;
6660 break;
6661
6662 case IA64_TYPE_B:
6663 required_slot = 2;
6664 break;
6665
6666 default:
6667 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6668 _("Internal error: don't know how to force %s to end of instruction group"),
6669 idesc->name);
6670 required_slot = i;
6671 break;
6672 }
6673 if (manual_bundling
6674 && (i > required_slot
6675 || (required_slot == 2 && !manual_bundling_off)
6676 || (user_template >= 0
6677 /* Changing from MMI to M;MI is OK. */
6678 && (template_val ^ required_template) > 1)))
6679 {
6680 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6681 _("`%s' must be last in instruction group"),
6682 idesc->name);
6683 if (i < 2 && required_slot == 2 && !manual_bundling_off)
6684 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6685 }
6686 if (required_slot < i)
6687 /* Can't fit this instruction. */
6688 break;
6689
6690 i = required_slot;
6691 if (required_template != template_val)
6692 {
6693 /* If we switch the template, we need to reset the NOPs
6694 after slot i. The slot-types of the instructions ahead
6695 of i never change, so we don't need to worry about
6696 changing NOPs in front of this slot. */
6697 for (j = i; j < 3; ++j)
6698 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6699
6700 /* We just picked a template that includes the stop bit in the
6701 middle, so we don't need another one emitted later. */
6702 md.slot[curr].end_of_insn_group = 0;
6703 }
6704 template_val = required_template;
6705 }
6706 if (curr != first && md.slot[curr].label_fixups)
6707 {
6708 if (manual_bundling)
6709 {
6710 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6711 _("Label must be first in a bundle"));
6712 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6713 }
6714 /* This insn must go into the first slot of a bundle. */
6715 break;
6716 }
6717
6718 if (end_of_insn_group && md.num_slots_in_use >= 1)
6719 {
6720 /* We need an instruction group boundary in the middle of a
6721 bundle. See if we can switch to an other template with
6722 an appropriate boundary. */
6723
6724 orig_template = template_val;
6725 if (i == 1 && (user_template == 4
6726 || (user_template < 0
6727 && (ia64_templ_desc[template_val].exec_unit[0]
6728 == IA64_UNIT_M))))
6729 {
6730 template_val = 5;
6731 end_of_insn_group = 0;
6732 }
6733 else if (i == 2 && (user_template == 0
6734 || (user_template < 0
6735 && (ia64_templ_desc[template_val].exec_unit[1]
6736 == IA64_UNIT_I)))
6737 /* This test makes sure we don't switch the template if
6738 the next instruction is one that needs to be first in
6739 an instruction group. Since all those instructions are
6740 in the M group, there is no way such an instruction can
6741 fit in this bundle even if we switch the template. The
6742 reason we have to check for this is that otherwise we
6743 may end up generating "MI;;I M.." which has the deadly
6744 effect that the second M instruction is no longer the
6745 first in the group! --davidm 99/12/16 */
6746 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6747 {
6748 template_val = 1;
6749 end_of_insn_group = 0;
6750 }
6751 else if (i == 1
6752 && user_template == 0
6753 && !(idesc->flags & IA64_OPCODE_FIRST))
6754 /* Use the next slot. */
6755 continue;
6756 else if (curr != first)
6757 /* can't fit this insn */
6758 break;
6759
6760 if (template_val != orig_template)
6761 /* if we switch the template, we need to reset the NOPs
6762 after slot i. The slot-types of the instructions ahead
6763 of i never change, so we don't need to worry about
6764 changing NOPs in front of this slot. */
6765 for (j = i; j < 3; ++j)
6766 insn[j] = nop[ia64_templ_desc[template_val].exec_unit[j]];
6767 }
6768 required_unit = ia64_templ_desc[template_val].exec_unit[i];
6769
6770 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6771 if (idesc->type == IA64_TYPE_DYN)
6772 {
6773 enum ia64_opnd opnd1, opnd2;
6774
6775 if ((strcmp (idesc->name, "nop") == 0)
6776 || (strcmp (idesc->name, "break") == 0))
6777 insn_unit = required_unit;
6778 else if (strcmp (idesc->name, "hint") == 0)
6779 {
6780 insn_unit = required_unit;
6781 if (required_unit == IA64_UNIT_B)
6782 {
6783 switch (md.hint_b)
6784 {
6785 case hint_b_ok:
6786 break;
6787 case hint_b_warning:
6788 as_warn (_("hint in B unit may be treated as nop"));
6789 break;
6790 case hint_b_error:
6791 /* When manual bundling is off and there is no
6792 user template, we choose a different unit so
6793 that hint won't go into the current slot. We
6794 will fill the current bundle with nops and
6795 try to put hint into the next bundle. */
6796 if (!manual_bundling && user_template < 0)
6797 insn_unit = IA64_UNIT_I;
6798 else
6799 as_bad (_("hint in B unit can't be used"));
6800 break;
6801 }
6802 }
6803 }
6804 else if (strcmp (idesc->name, "chk.s") == 0
6805 || strcmp (idesc->name, "mov") == 0)
6806 {
6807 insn_unit = IA64_UNIT_M;
6808 if (required_unit == IA64_UNIT_I
6809 || (required_unit == IA64_UNIT_F && template_val == 6))
6810 insn_unit = IA64_UNIT_I;
6811 }
6812 else
6813 as_fatal (_("emit_one_bundle: unexpected dynamic op"));
6814
6815 snprintf (mnemonic, sizeof (mnemonic), "%s.%c",
6816 idesc->name, "?imbfxx"[insn_unit]);
6817 opnd1 = idesc->operands[0];
6818 opnd2 = idesc->operands[1];
6819 ia64_free_opcode (idesc);
6820 idesc = ia64_find_opcode (mnemonic);
6821 /* moves to/from ARs have collisions */
6822 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6823 {
6824 while (idesc != NULL
6825 && (idesc->operands[0] != opnd1
6826 || idesc->operands[1] != opnd2))
6827 idesc = get_next_opcode (idesc);
6828 }
6829 md.slot[curr].idesc = idesc;
6830 }
6831 else
6832 {
6833 insn_type = idesc->type;
6834 insn_unit = IA64_UNIT_NIL;
6835 switch (insn_type)
6836 {
6837 case IA64_TYPE_A:
6838 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6839 insn_unit = required_unit;
6840 break;
6841 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6842 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6843 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6844 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6845 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6846 default: break;
6847 }
6848 }
6849
6850 if (insn_unit != required_unit)
6851 continue; /* Try next slot. */
6852
6853 /* Now is a good time to fix up the labels for this insn. */
6854 mark_label = FALSE;
6855 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6856 {
6857 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6858 symbol_set_frag (lfix->sym, frag_now);
6859 mark_label |= lfix->dw2_mark_labels;
6860 }
6861 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6862 {
6863 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6864 symbol_set_frag (lfix->sym, frag_now);
6865 }
6866
6867 if (debug_type == DEBUG_DWARF2
6868 || md.slot[curr].loc_directive_seen
6869 || mark_label)
6870 {
6871 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6872
6873 md.slot[curr].loc_directive_seen = 0;
6874 if (mark_label)
6875 md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
6876
6877 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6878 }
6879
6880 build_insn (md.slot + curr, insn + i);
6881
6882 ptr = md.slot[curr].unwind_record;
6883 if (ptr)
6884 {
6885 /* Set slot numbers for all remaining unwind records belonging to the
6886 current insn. There can not be any prologue/body unwind records
6887 here. */
6888 for (; ptr != end_ptr; ptr = ptr->next)
6889 {
6890 ptr->slot_number = (unsigned long) f + i;
6891 ptr->slot_frag = frag_now;
6892 }
6893 md.slot[curr].unwind_record = NULL;
6894 }
6895
6896 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6897 {
6898 ifix = md.slot[curr].fixup + j;
6899 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6900 &ifix->expr, ifix->is_pcrel, ifix->code);
6901 fix->tc_fix_data.opnd = ifix->opnd;
6902 fix->fx_file = md.slot[curr].src_file;
6903 fix->fx_line = md.slot[curr].src_line;
6904 }
6905
6906 end_of_insn_group = md.slot[curr].end_of_insn_group;
6907
6908 /* This adjustment to "i" must occur after the fix, otherwise the fix
6909 is assigned to the wrong slot, and the VMS linker complains. */
6910 if (required_unit == IA64_UNIT_L)
6911 {
6912 know (i == 1);
6913 /* skip one slot for long/X-unit instructions */
6914 ++i;
6915 }
6916 --md.num_slots_in_use;
6917 last_slot = i;
6918
6919 /* clear slot: */
6920 ia64_free_opcode (md.slot[curr].idesc);
6921 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6922 md.slot[curr].user_template = -1;
6923
6924 if (manual_bundling_off)
6925 {
6926 manual_bundling = 0;
6927 break;
6928 }
6929 curr = (curr + 1) % NUM_SLOTS;
6930 idesc = md.slot[curr].idesc;
6931 }
6932
6933 /* A user template was specified, but the first following instruction did
6934 not fit. This can happen with or without manual bundling. */
6935 if (md.num_slots_in_use > 0 && last_slot < 0)
6936 {
6937 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6938 _("`%s' does not fit into %s template"),
6939 idesc->name, ia64_templ_desc[template_val].name);
6940 /* Drop first insn so we don't livelock. */
6941 --md.num_slots_in_use;
6942 know (curr == first);
6943 ia64_free_opcode (md.slot[curr].idesc);
6944 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6945 md.slot[curr].user_template = -1;
6946 }
6947 else if (manual_bundling > 0)
6948 {
6949 if (md.num_slots_in_use > 0)
6950 {
6951 if (last_slot >= 2)
6952 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6953 _("`%s' does not fit into bundle"), idesc->name);
6954 else
6955 {
6956 const char *where;
6957
6958 if (template_val == 2)
6959 where = "X slot";
6960 else if (last_slot == 0)
6961 where = "slots 2 or 3";
6962 else
6963 where = "slot 3";
6964 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6965 _("`%s' can't go in %s of %s template"),
6966 idesc->name, where, ia64_templ_desc[template_val].name);
6967 }
6968 }
6969 else
6970 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6971 _("Missing '}' at end of file"));
6972 }
6973
6974 know (md.num_slots_in_use < NUM_SLOTS);
6975
6976 t0 = end_of_insn_group | (template_val << 1) | (insn[0] << 5) | (insn[1] << 46);
6977 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6978
6979 number_to_chars_littleendian (f + 0, t0, 8);
6980 number_to_chars_littleendian (f + 8, t1, 8);
6981 }
6982
6983 int
6984 md_parse_option (int c, const char *arg)
6985 {
6986
6987 switch (c)
6988 {
6989 /* Switches from the Intel assembler. */
6990 case 'm':
6991 if (strcmp (arg, "ilp64") == 0
6992 || strcmp (arg, "lp64") == 0
6993 || strcmp (arg, "p64") == 0)
6994 {
6995 md.flags |= EF_IA_64_ABI64;
6996 }
6997 else if (strcmp (arg, "ilp32") == 0)
6998 {
6999 md.flags &= ~EF_IA_64_ABI64;
7000 }
7001 else if (strcmp (arg, "le") == 0)
7002 {
7003 md.flags &= ~EF_IA_64_BE;
7004 default_big_endian = 0;
7005 }
7006 else if (strcmp (arg, "be") == 0)
7007 {
7008 md.flags |= EF_IA_64_BE;
7009 default_big_endian = 1;
7010 }
7011 else if (strncmp (arg, "unwind-check=", 13) == 0)
7012 {
7013 arg += 13;
7014 if (strcmp (arg, "warning") == 0)
7015 md.unwind_check = unwind_check_warning;
7016 else if (strcmp (arg, "error") == 0)
7017 md.unwind_check = unwind_check_error;
7018 else
7019 return 0;
7020 }
7021 else if (strncmp (arg, "hint.b=", 7) == 0)
7022 {
7023 arg += 7;
7024 if (strcmp (arg, "ok") == 0)
7025 md.hint_b = hint_b_ok;
7026 else if (strcmp (arg, "warning") == 0)
7027 md.hint_b = hint_b_warning;
7028 else if (strcmp (arg, "error") == 0)
7029 md.hint_b = hint_b_error;
7030 else
7031 return 0;
7032 }
7033 else if (strncmp (arg, "tune=", 5) == 0)
7034 {
7035 arg += 5;
7036 if (strcmp (arg, "itanium1") == 0)
7037 md.tune = itanium1;
7038 else if (strcmp (arg, "itanium2") == 0)
7039 md.tune = itanium2;
7040 else
7041 return 0;
7042 }
7043 else
7044 return 0;
7045 break;
7046
7047 case 'N':
7048 if (strcmp (arg, "so") == 0)
7049 {
7050 /* Suppress signon message. */
7051 }
7052 else if (strcmp (arg, "pi") == 0)
7053 {
7054 /* Reject privileged instructions. FIXME */
7055 }
7056 else if (strcmp (arg, "us") == 0)
7057 {
7058 /* Allow union of signed and unsigned range. FIXME */
7059 }
7060 else if (strcmp (arg, "close_fcalls") == 0)
7061 {
7062 /* Do not resolve global function calls. */
7063 }
7064 else
7065 return 0;
7066 break;
7067
7068 case 'C':
7069 /* temp[="prefix"] Insert temporary labels into the object file
7070 symbol table prefixed by "prefix".
7071 Default prefix is ":temp:".
7072 */
7073 break;
7074
7075 case 'a':
7076 /* indirect=<tgt> Assume unannotated indirect branches behavior
7077 according to <tgt> --
7078 exit: branch out from the current context (default)
7079 labels: all labels in context may be branch targets
7080 */
7081 if (strncmp (arg, "indirect=", 9) != 0)
7082 return 0;
7083 break;
7084
7085 case 'x':
7086 /* -X conflicts with an ignored option, use -x instead */
7087 md.detect_dv = 1;
7088 if (!arg || strcmp (arg, "explicit") == 0)
7089 {
7090 /* set default mode to explicit */
7091 md.default_explicit_mode = 1;
7092 break;
7093 }
7094 else if (strcmp (arg, "auto") == 0)
7095 {
7096 md.default_explicit_mode = 0;
7097 }
7098 else if (strcmp (arg, "none") == 0)
7099 {
7100 md.detect_dv = 0;
7101 }
7102 else if (strcmp (arg, "debug") == 0)
7103 {
7104 md.debug_dv = 1;
7105 }
7106 else if (strcmp (arg, "debugx") == 0)
7107 {
7108 md.default_explicit_mode = 1;
7109 md.debug_dv = 1;
7110 }
7111 else if (strcmp (arg, "debugn") == 0)
7112 {
7113 md.debug_dv = 1;
7114 md.detect_dv = 0;
7115 }
7116 else
7117 {
7118 as_bad (_("Unrecognized option '-x%s'"), arg);
7119 }
7120 break;
7121
7122 case 'S':
7123 /* nops Print nops statistics. */
7124 break;
7125
7126 /* GNU specific switches for gcc. */
7127 case OPTION_MCONSTANT_GP:
7128 md.flags |= EF_IA_64_CONS_GP;
7129 break;
7130
7131 case OPTION_MAUTO_PIC:
7132 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
7133 break;
7134
7135 default:
7136 return 0;
7137 }
7138
7139 return 1;
7140 }
7141
7142 void
7143 md_show_usage (FILE *stream)
7144 {
7145 fputs (_("\
7146 IA-64 options:\n\
7147 --mconstant-gp mark output file as using the constant-GP model\n\
7148 (sets ELF header flag EF_IA_64_CONS_GP)\n\
7149 --mauto-pic mark output file as using the constant-GP model\n\
7150 without function descriptors (sets ELF header flag\n\
7151 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
7152 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
7153 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
7154 -mtune=[itanium1|itanium2]\n\
7155 tune for a specific CPU (default -mtune=itanium2)\n\
7156 -munwind-check=[warning|error]\n\
7157 unwind directive check (default -munwind-check=warning)\n\
7158 -mhint.b=[ok|warning|error]\n\
7159 hint.b check (default -mhint.b=error)\n\
7160 -x | -xexplicit turn on dependency violation checking\n"), stream);
7161 /* Note for translators: "automagically" can be translated as "automatically" here. */
7162 fputs (_("\
7163 -xauto automagically remove dependency violations (default)\n\
7164 -xnone turn off dependency violation checking\n\
7165 -xdebug debug dependency violation checker\n\
7166 -xdebugn debug dependency violation checker but turn off\n\
7167 dependency violation checking\n\
7168 -xdebugx debug dependency violation checker and turn on\n\
7169 dependency violation checking\n"),
7170 stream);
7171 }
7172
7173 void
7174 ia64_after_parse_args (void)
7175 {
7176 if (debug_type == DEBUG_STABS)
7177 as_fatal (_("--gstabs is not supported for ia64"));
7178 }
7179
7180 /* Return true if TYPE fits in TEMPL at SLOT. */
7181
7182 static int
7183 match (int templ, int type, int slot)
7184 {
7185 enum ia64_unit unit;
7186 int result;
7187
7188 unit = ia64_templ_desc[templ].exec_unit[slot];
7189 switch (type)
7190 {
7191 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
7192 case IA64_TYPE_A:
7193 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
7194 break;
7195 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
7196 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
7197 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
7198 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
7199 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
7200 default: result = 0; break;
7201 }
7202 return result;
7203 }
7204
7205 /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit
7206 in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of
7207 type M or I would fit in TEMPL at SLOT. */
7208
7209 static inline int
7210 extra_goodness (int templ, int slot)
7211 {
7212 switch (md.tune)
7213 {
7214 case itanium1:
7215 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
7216 return 2;
7217 else if (slot == 2 && match (templ, IA64_TYPE_B, slot))
7218 return 1;
7219 else
7220 return 0;
7221 break;
7222 case itanium2:
7223 if (match (templ, IA64_TYPE_M, slot)
7224 || match (templ, IA64_TYPE_I, slot))
7225 /* Favor M- and I-unit NOPs. We definitely want to avoid
7226 F-unit and B-unit may cause split-issue or less-than-optimal
7227 branch-prediction. */
7228 return 2;
7229 else
7230 return 0;
7231 break;
7232 default:
7233 abort ();
7234 return 0;
7235 }
7236 }
7237
7238 /* This function is called once, at assembler startup time. It sets
7239 up all the tables, etc. that the MD part of the assembler will need
7240 that can be determined before arguments are parsed. */
7241 void
7242 md_begin (void)
7243 {
7244 int i, j, k, t, goodness, best, ok;
7245 const char *err;
7246 char name[8];
7247
7248 md.auto_align = 1;
7249 md.explicit_mode = md.default_explicit_mode;
7250
7251 bfd_set_section_alignment (stdoutput, text_section, 4);
7252
7253 /* Make sure function pointers get initialized. */
7254 target_big_endian = -1;
7255 dot_byteorder (default_big_endian);
7256
7257 alias_hash = hash_new ();
7258 alias_name_hash = hash_new ();
7259 secalias_hash = hash_new ();
7260 secalias_name_hash = hash_new ();
7261
7262 pseudo_func[FUNC_DTP_MODULE].u.sym =
7263 symbol_new (".<dtpmod>", undefined_section, FUNC_DTP_MODULE,
7264 &zero_address_frag);
7265
7266 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
7267 symbol_new (".<dtprel>", undefined_section, FUNC_DTP_RELATIVE,
7268 &zero_address_frag);
7269
7270 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
7271 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
7272 &zero_address_frag);
7273
7274 pseudo_func[FUNC_GP_RELATIVE].u.sym =
7275 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
7276 &zero_address_frag);
7277
7278 pseudo_func[FUNC_LT_RELATIVE].u.sym =
7279 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
7280 &zero_address_frag);
7281
7282 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
7283 symbol_new (".<ltoffx>", undefined_section, FUNC_LT_RELATIVE_X,
7284 &zero_address_frag);
7285
7286 pseudo_func[FUNC_PC_RELATIVE].u.sym =
7287 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
7288 &zero_address_frag);
7289
7290 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
7291 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
7292 &zero_address_frag);
7293
7294 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
7295 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
7296 &zero_address_frag);
7297
7298 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
7299 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
7300 &zero_address_frag);
7301
7302 pseudo_func[FUNC_TP_RELATIVE].u.sym =
7303 symbol_new (".<tprel>", undefined_section, FUNC_TP_RELATIVE,
7304 &zero_address_frag);
7305
7306 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
7307 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
7308 &zero_address_frag);
7309
7310 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
7311 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
7312 &zero_address_frag);
7313
7314 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
7315 symbol_new (".<ltoff.dtpmod>", undefined_section, FUNC_LT_DTP_MODULE,
7316 &zero_address_frag);
7317
7318 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
7319 symbol_new (".<ltoff.dptrel>", undefined_section, FUNC_LT_DTP_RELATIVE,
7320 &zero_address_frag);
7321
7322 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
7323 symbol_new (".<ltoff.tprel>", undefined_section, FUNC_LT_TP_RELATIVE,
7324 &zero_address_frag);
7325
7326 pseudo_func[FUNC_IPLT_RELOC].u.sym =
7327 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC,
7328 &zero_address_frag);
7329
7330 #ifdef TE_VMS
7331 pseudo_func[FUNC_SLOTCOUNT_RELOC].u.sym =
7332 symbol_new (".<slotcount>", undefined_section, FUNC_SLOTCOUNT_RELOC,
7333 &zero_address_frag);
7334 #endif
7335
7336 if (md.tune != itanium1)
7337 {
7338 /* Convert MFI NOPs bundles into MMI NOPs bundles. */
7339 le_nop[0] = 0x8;
7340 le_nop_stop[0] = 0x9;
7341 }
7342
7343 /* Compute the table of best templates. We compute goodness as a
7344 base 4 value, in which each match counts for 3. Match-failures
7345 result in NOPs and we use extra_goodness() to pick the execution
7346 units that are best suited for issuing the NOP. */
7347 for (i = 0; i < IA64_NUM_TYPES; ++i)
7348 for (j = 0; j < IA64_NUM_TYPES; ++j)
7349 for (k = 0; k < IA64_NUM_TYPES; ++k)
7350 {
7351 best = 0;
7352 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
7353 {
7354 goodness = 0;
7355 if (match (t, i, 0))
7356 {
7357 if (match (t, j, 1))
7358 {
7359 if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2))
7360 goodness = 3 + 3 + 3;
7361 else
7362 goodness = 3 + 3 + extra_goodness (t, 2);
7363 }
7364 else if (match (t, j, 2))
7365 goodness = 3 + 3 + extra_goodness (t, 1);
7366 else
7367 {
7368 goodness = 3;
7369 goodness += extra_goodness (t, 1);
7370 goodness += extra_goodness (t, 2);
7371 }
7372 }
7373 else if (match (t, i, 1))
7374 {
7375 if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2))
7376 goodness = 3 + 3;
7377 else
7378 goodness = 3 + extra_goodness (t, 2);
7379 }
7380 else if (match (t, i, 2))
7381 goodness = 3 + extra_goodness (t, 1);
7382
7383 if (goodness > best)
7384 {
7385 best = goodness;
7386 best_template[i][j][k] = t;
7387 }
7388 }
7389 }
7390
7391 #ifdef DEBUG_TEMPLATES
7392 /* For debugging changes to the best_template calculations. We don't care
7393 about combinations with invalid instructions, so start the loops at 1. */
7394 for (i = 0; i < IA64_NUM_TYPES; ++i)
7395 for (j = 0; j < IA64_NUM_TYPES; ++j)
7396 for (k = 0; k < IA64_NUM_TYPES; ++k)
7397 {
7398 char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f',
7399 'x', 'd' };
7400 fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j],
7401 type_letter[k],
7402 ia64_templ_desc[best_template[i][j][k]].name);
7403 }
7404 #endif
7405
7406 for (i = 0; i < NUM_SLOTS; ++i)
7407 md.slot[i].user_template = -1;
7408
7409 md.pseudo_hash = hash_new ();
7410 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
7411 {
7412 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
7413 (void *) (pseudo_opcode + i));
7414 if (err)
7415 as_fatal (_("ia64.md_begin: can't hash `%s': %s"),
7416 pseudo_opcode[i].name, err);
7417 }
7418
7419 md.reg_hash = hash_new ();
7420 md.dynreg_hash = hash_new ();
7421 md.const_hash = hash_new ();
7422 md.entry_hash = hash_new ();
7423
7424 /* general registers: */
7425 declare_register_set ("r", 128, REG_GR);
7426 declare_register ("gp", REG_GR + 1);
7427 declare_register ("sp", REG_GR + 12);
7428 declare_register ("tp", REG_GR + 13);
7429 declare_register_set ("ret", 4, REG_GR + 8);
7430
7431 /* floating point registers: */
7432 declare_register_set ("f", 128, REG_FR);
7433 declare_register_set ("farg", 8, REG_FR + 8);
7434 declare_register_set ("fret", 8, REG_FR + 8);
7435
7436 /* branch registers: */
7437 declare_register_set ("b", 8, REG_BR);
7438 declare_register ("rp", REG_BR + 0);
7439
7440 /* predicate registers: */
7441 declare_register_set ("p", 64, REG_P);
7442 declare_register ("pr", REG_PR);
7443 declare_register ("pr.rot", REG_PR_ROT);
7444
7445 /* application registers: */
7446 declare_register_set ("ar", 128, REG_AR);
7447 for (i = 0; i < NELEMS (ar); ++i)
7448 declare_register (ar[i].name, REG_AR + ar[i].regnum);
7449
7450 /* control registers: */
7451 declare_register_set ("cr", 128, REG_CR);
7452 for (i = 0; i < NELEMS (cr); ++i)
7453 declare_register (cr[i].name, REG_CR + cr[i].regnum);
7454
7455 /* dahr registers: */
7456 declare_register_set ("dahr", 8, REG_DAHR);
7457
7458 declare_register ("ip", REG_IP);
7459 declare_register ("cfm", REG_CFM);
7460 declare_register ("psr", REG_PSR);
7461 declare_register ("psr.l", REG_PSR_L);
7462 declare_register ("psr.um", REG_PSR_UM);
7463
7464 for (i = 0; i < NELEMS (indirect_reg); ++i)
7465 {
7466 unsigned int regnum = indirect_reg[i].regnum;
7467
7468 md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum);
7469 }
7470
7471 /* pseudo-registers used to specify unwind info: */
7472 declare_register ("psp", REG_PSP);
7473
7474 for (i = 0; i < NELEMS (const_bits); ++i)
7475 {
7476 err = hash_insert (md.const_hash, const_bits[i].name,
7477 (void *) (const_bits + i));
7478 if (err)
7479 as_fatal (_("Inserting \"%s\" into constant hash table failed: %s"),
7480 name, err);
7481 }
7482
7483 /* Set the architecture and machine depending on defaults and command line
7484 options. */
7485 if (md.flags & EF_IA_64_ABI64)
7486 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7487 else
7488 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7489
7490 if (! ok)
7491 as_warn (_("Could not set architecture and machine"));
7492
7493 /* Set the pointer size and pointer shift size depending on md.flags */
7494
7495 if (md.flags & EF_IA_64_ABI64)
7496 {
7497 md.pointer_size = 8; /* pointers are 8 bytes */
7498 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7499 }
7500 else
7501 {
7502 md.pointer_size = 4; /* pointers are 4 bytes */
7503 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7504 }
7505
7506 md.mem_offset.hint = 0;
7507 md.path = 0;
7508 md.maxpaths = 0;
7509 md.entry_labels = NULL;
7510 }
7511
7512 /* Set the default options in md. Cannot do this in md_begin because
7513 that is called after md_parse_option which is where we set the
7514 options in md based on command line options. */
7515
7516 void
7517 ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED)
7518 {
7519 md.flags = MD_FLAGS_DEFAULT;
7520 #ifndef TE_VMS
7521 /* Don't turn on dependency checking for VMS, doesn't work. */
7522 md.detect_dv = 1;
7523 #endif
7524 /* FIXME: We should change it to unwind_check_error someday. */
7525 md.unwind_check = unwind_check_warning;
7526 md.hint_b = hint_b_error;
7527 md.tune = itanium2;
7528 }
7529
7530 /* Return a string for the target object file format. */
7531
7532 const char *
7533 ia64_target_format (void)
7534 {
7535 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7536 {
7537 if (md.flags & EF_IA_64_BE)
7538 {
7539 if (md.flags & EF_IA_64_ABI64)
7540 #if defined(TE_AIX50)
7541 return "elf64-ia64-aix-big";
7542 #elif defined(TE_HPUX)
7543 return "elf64-ia64-hpux-big";
7544 #else
7545 return "elf64-ia64-big";
7546 #endif
7547 else
7548 #if defined(TE_AIX50)
7549 return "elf32-ia64-aix-big";
7550 #elif defined(TE_HPUX)
7551 return "elf32-ia64-hpux-big";
7552 #else
7553 return "elf32-ia64-big";
7554 #endif
7555 }
7556 else
7557 {
7558 if (md.flags & EF_IA_64_ABI64)
7559 #if defined (TE_AIX50)
7560 return "elf64-ia64-aix-little";
7561 #elif defined (TE_VMS)
7562 {
7563 md.flags |= EF_IA_64_ARCHVER_1;
7564 return "elf64-ia64-vms";
7565 }
7566 #else
7567 return "elf64-ia64-little";
7568 #endif
7569 else
7570 #ifdef TE_AIX50
7571 return "elf32-ia64-aix-little";
7572 #else
7573 return "elf32-ia64-little";
7574 #endif
7575 }
7576 }
7577 else
7578 return "unknown-format";
7579 }
7580
7581 void
7582 ia64_end_of_source (void)
7583 {
7584 /* terminate insn group upon reaching end of file: */
7585 insn_group_break (1, 0, 0);
7586
7587 /* emits slots we haven't written yet: */
7588 ia64_flush_insns ();
7589
7590 bfd_set_private_flags (stdoutput, md.flags);
7591
7592 md.mem_offset.hint = 0;
7593 }
7594
7595 void
7596 ia64_start_line (void)
7597 {
7598 static int first;
7599
7600 if (!first) {
7601 /* Make sure we don't reference input_line_pointer[-1] when that's
7602 not valid. */
7603 first = 1;
7604 return;
7605 }
7606
7607 if (md.qp.X_op == O_register)
7608 as_bad (_("qualifying predicate not followed by instruction"));
7609 md.qp.X_op = O_absent;
7610
7611 if (ignore_input ())
7612 return;
7613
7614 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7615 {
7616 if (md.detect_dv && !md.explicit_mode)
7617 {
7618 static int warned;
7619
7620 if (!warned)
7621 {
7622 warned = 1;
7623 as_warn (_("Explicit stops are ignored in auto mode"));
7624 }
7625 }
7626 else
7627 insn_group_break (1, 0, 0);
7628 }
7629 else if (input_line_pointer[-1] == '{')
7630 {
7631 if (md.manual_bundling)
7632 as_warn (_("Found '{' when manual bundling is already turned on"));
7633 else
7634 CURR_SLOT.manual_bundling_on = 1;
7635 md.manual_bundling = 1;
7636
7637 /* Bundling is only acceptable in explicit mode
7638 or when in default automatic mode. */
7639 if (md.detect_dv && !md.explicit_mode)
7640 {
7641 if (!md.mode_explicitly_set
7642 && !md.default_explicit_mode)
7643 dot_dv_mode ('E');
7644 else
7645 as_warn (_("Found '{' after explicit switch to automatic mode"));
7646 }
7647 }
7648 else if (input_line_pointer[-1] == '}')
7649 {
7650 if (!md.manual_bundling)
7651 as_warn (_("Found '}' when manual bundling is off"));
7652 else
7653 PREV_SLOT.manual_bundling_off = 1;
7654 md.manual_bundling = 0;
7655
7656 /* switch back to automatic mode, if applicable */
7657 if (md.detect_dv
7658 && md.explicit_mode
7659 && !md.mode_explicitly_set
7660 && !md.default_explicit_mode)
7661 dot_dv_mode ('A');
7662 }
7663 }
7664
7665 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7666 labels. */
7667 static int defining_tag = 0;
7668
7669 int
7670 ia64_unrecognized_line (int ch)
7671 {
7672 switch (ch)
7673 {
7674 case '(':
7675 expression_and_evaluate (&md.qp);
7676 if (*input_line_pointer++ != ')')
7677 {
7678 as_bad (_("Expected ')'"));
7679 return 0;
7680 }
7681 if (md.qp.X_op != O_register)
7682 {
7683 as_bad (_("Qualifying predicate expected"));
7684 return 0;
7685 }
7686 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7687 {
7688 as_bad (_("Predicate register expected"));
7689 return 0;
7690 }
7691 return 1;
7692
7693 case '[':
7694 {
7695 char *s;
7696 char c;
7697 symbolS *tag;
7698 int temp;
7699
7700 if (md.qp.X_op == O_register)
7701 {
7702 as_bad (_("Tag must come before qualifying predicate."));
7703 return 0;
7704 }
7705
7706 /* This implements just enough of read_a_source_file in read.c to
7707 recognize labels. */
7708 if (is_name_beginner (*input_line_pointer))
7709 {
7710 c = get_symbol_name (&s);
7711 }
7712 else if (LOCAL_LABELS_FB
7713 && ISDIGIT (*input_line_pointer))
7714 {
7715 temp = 0;
7716 while (ISDIGIT (*input_line_pointer))
7717 temp = (temp * 10) + *input_line_pointer++ - '0';
7718 fb_label_instance_inc (temp);
7719 s = fb_label_name (temp, 0);
7720 c = *input_line_pointer;
7721 }
7722 else
7723 {
7724 s = NULL;
7725 c = '\0';
7726 }
7727 if (c != ':')
7728 {
7729 /* Put ':' back for error messages' sake. */
7730 *input_line_pointer++ = ':';
7731 as_bad (_("Expected ':'"));
7732 return 0;
7733 }
7734
7735 defining_tag = 1;
7736 tag = colon (s);
7737 defining_tag = 0;
7738 /* Put ':' back for error messages' sake. */
7739 *input_line_pointer++ = ':';
7740 if (*input_line_pointer++ != ']')
7741 {
7742 as_bad (_("Expected ']'"));
7743 return 0;
7744 }
7745 if (! tag)
7746 {
7747 as_bad (_("Tag name expected"));
7748 return 0;
7749 }
7750 return 1;
7751 }
7752
7753 default:
7754 break;
7755 }
7756
7757 /* Not a valid line. */
7758 return 0;
7759 }
7760
7761 void
7762 ia64_frob_label (struct symbol *sym)
7763 {
7764 struct label_fix *fix;
7765
7766 /* Tags need special handling since they are not bundle breaks like
7767 labels. */
7768 if (defining_tag)
7769 {
7770 fix = obstack_alloc (&notes, sizeof (*fix));
7771 fix->sym = sym;
7772 fix->next = CURR_SLOT.tag_fixups;
7773 fix->dw2_mark_labels = FALSE;
7774 CURR_SLOT.tag_fixups = fix;
7775
7776 return;
7777 }
7778
7779 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7780 {
7781 md.last_text_seg = now_seg;
7782 fix = obstack_alloc (&notes, sizeof (*fix));
7783 fix->sym = sym;
7784 fix->next = CURR_SLOT.label_fixups;
7785 fix->dw2_mark_labels = dwarf2_loc_mark_labels;
7786 CURR_SLOT.label_fixups = fix;
7787
7788 /* Keep track of how many code entry points we've seen. */
7789 if (md.path == md.maxpaths)
7790 {
7791 md.maxpaths += 20;
7792 md.entry_labels = (const char **)
7793 xrealloc ((void *) md.entry_labels,
7794 md.maxpaths * sizeof (char *));
7795 }
7796 md.entry_labels[md.path++] = S_GET_NAME (sym);
7797 }
7798 }
7799
7800 #ifdef TE_HPUX
7801 /* The HP-UX linker will give unresolved symbol errors for symbols
7802 that are declared but unused. This routine removes declared,
7803 unused symbols from an object. */
7804 int
7805 ia64_frob_symbol (struct symbol *sym)
7806 {
7807 if ((S_GET_SEGMENT (sym) == bfd_und_section_ptr && ! symbol_used_p (sym) &&
7808 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7809 || (S_GET_SEGMENT (sym) == bfd_abs_section_ptr
7810 && ! S_IS_EXTERNAL (sym)))
7811 return 1;
7812 return 0;
7813 }
7814 #endif
7815
7816 void
7817 ia64_flush_pending_output (void)
7818 {
7819 if (!md.keep_pending_output
7820 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7821 {
7822 /* ??? This causes many unnecessary stop bits to be emitted.
7823 Unfortunately, it isn't clear if it is safe to remove this. */
7824 insn_group_break (1, 0, 0);
7825 ia64_flush_insns ();
7826 }
7827 }
7828
7829 /* Do ia64-specific expression optimization. All that's done here is
7830 to transform index expressions that are either due to the indexing
7831 of rotating registers or due to the indexing of indirect register
7832 sets. */
7833 int
7834 ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r)
7835 {
7836 if (op != O_index)
7837 return 0;
7838 resolve_expression (l);
7839 if (l->X_op == O_register)
7840 {
7841 unsigned num_regs = l->X_add_number >> 16;
7842
7843 resolve_expression (r);
7844 if (num_regs)
7845 {
7846 /* Left side is a .rotX-allocated register. */
7847 if (r->X_op != O_constant)
7848 {
7849 as_bad (_("Rotating register index must be a non-negative constant"));
7850 r->X_add_number = 0;
7851 }
7852 else if ((valueT) r->X_add_number >= num_regs)
7853 {
7854 as_bad (_("Index out of range 0..%u"), num_regs - 1);
7855 r->X_add_number = 0;
7856 }
7857 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7858 return 1;
7859 }
7860 else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR)
7861 {
7862 if (r->X_op != O_register
7863 || r->X_add_number < REG_GR
7864 || r->X_add_number > REG_GR + 127)
7865 {
7866 as_bad (_("Indirect register index must be a general register"));
7867 r->X_add_number = REG_GR;
7868 }
7869 l->X_op = O_index;
7870 l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID];
7871 l->X_add_number = r->X_add_number;
7872 return 1;
7873 }
7874 }
7875 as_bad (_("Index can only be applied to rotating or indirect registers"));
7876 /* Fall back to some register use of which has as little as possible
7877 side effects, to minimize subsequent error messages. */
7878 l->X_op = O_register;
7879 l->X_add_number = REG_GR + 3;
7880 return 1;
7881 }
7882
7883 int
7884 ia64_parse_name (char *name, expressionS *e, char *nextcharP)
7885 {
7886 struct const_desc *cdesc;
7887 struct dynreg *dr = 0;
7888 unsigned int idx;
7889 struct symbol *sym;
7890 char *end;
7891
7892 if (*name == '@')
7893 {
7894 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE;
7895
7896 /* Find what relocation pseudo-function we're dealing with. */
7897 for (idx = 0; idx < NELEMS (pseudo_func); ++idx)
7898 if (pseudo_func[idx].name
7899 && pseudo_func[idx].name[0] == name[1]
7900 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0)
7901 {
7902 pseudo_type = pseudo_func[idx].type;
7903 break;
7904 }
7905 switch (pseudo_type)
7906 {
7907 case PSEUDO_FUNC_RELOC:
7908 end = input_line_pointer;
7909 if (*nextcharP != '(')
7910 {
7911 as_bad (_("Expected '('"));
7912 break;
7913 }
7914 /* Skip '('. */
7915 ++input_line_pointer;
7916 expression (e);
7917 if (*input_line_pointer != ')')
7918 {
7919 as_bad (_("Missing ')'"));
7920 goto done;
7921 }
7922 /* Skip ')'. */
7923 ++input_line_pointer;
7924 #ifdef TE_VMS
7925 if (idx == FUNC_SLOTCOUNT_RELOC)
7926 {
7927 /* @slotcount can accept any expression. Canonicalize. */
7928 e->X_add_symbol = make_expr_symbol (e);
7929 e->X_op = O_symbol;
7930 e->X_add_number = 0;
7931 }
7932 #endif
7933 if (e->X_op != O_symbol)
7934 {
7935 if (e->X_op != O_pseudo_fixup)
7936 {
7937 as_bad (_("Not a symbolic expression"));
7938 goto done;
7939 }
7940 if (idx != FUNC_LT_RELATIVE)
7941 {
7942 as_bad (_("Illegal combination of relocation functions"));
7943 goto done;
7944 }
7945 switch (S_GET_VALUE (e->X_op_symbol))
7946 {
7947 case FUNC_FPTR_RELATIVE:
7948 idx = FUNC_LT_FPTR_RELATIVE; break;
7949 case FUNC_DTP_MODULE:
7950 idx = FUNC_LT_DTP_MODULE; break;
7951 case FUNC_DTP_RELATIVE:
7952 idx = FUNC_LT_DTP_RELATIVE; break;
7953 case FUNC_TP_RELATIVE:
7954 idx = FUNC_LT_TP_RELATIVE; break;
7955 default:
7956 as_bad (_("Illegal combination of relocation functions"));
7957 goto done;
7958 }
7959 }
7960 /* Make sure gas doesn't get rid of local symbols that are used
7961 in relocs. */
7962 e->X_op = O_pseudo_fixup;
7963 e->X_op_symbol = pseudo_func[idx].u.sym;
7964 done:
7965 *nextcharP = *input_line_pointer;
7966 break;
7967
7968 case PSEUDO_FUNC_CONST:
7969 e->X_op = O_constant;
7970 e->X_add_number = pseudo_func[idx].u.ival;
7971 break;
7972
7973 case PSEUDO_FUNC_REG:
7974 e->X_op = O_register;
7975 e->X_add_number = pseudo_func[idx].u.ival;
7976 break;
7977
7978 default:
7979 return 0;
7980 }
7981 return 1;
7982 }
7983
7984 /* first see if NAME is a known register name: */
7985 sym = hash_find (md.reg_hash, name);
7986 if (sym)
7987 {
7988 e->X_op = O_register;
7989 e->X_add_number = S_GET_VALUE (sym);
7990 return 1;
7991 }
7992
7993 cdesc = hash_find (md.const_hash, name);
7994 if (cdesc)
7995 {
7996 e->X_op = O_constant;
7997 e->X_add_number = cdesc->value;
7998 return 1;
7999 }
8000
8001 /* check for inN, locN, or outN: */
8002 idx = 0;
8003 switch (name[0])
8004 {
8005 case 'i':
8006 if (name[1] == 'n' && ISDIGIT (name[2]))
8007 {
8008 dr = &md.in;
8009 idx = 2;
8010 }
8011 break;
8012
8013 case 'l':
8014 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
8015 {
8016 dr = &md.loc;
8017 idx = 3;
8018 }
8019 break;
8020
8021 case 'o':
8022 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
8023 {
8024 dr = &md.out;
8025 idx = 3;
8026 }
8027 break;
8028
8029 default:
8030 break;
8031 }
8032
8033 /* Ignore register numbers with leading zeroes, except zero itself. */
8034 if (dr && (name[idx] != '0' || name[idx + 1] == '\0'))
8035 {
8036 unsigned long regnum;
8037
8038 /* The name is inN, locN, or outN; parse the register number. */
8039 regnum = strtoul (name + idx, &end, 10);
8040 if (end > name + idx && *end == '\0' && regnum < 96)
8041 {
8042 if (regnum >= dr->num_regs)
8043 {
8044 if (!dr->num_regs)
8045 as_bad (_("No current frame"));
8046 else
8047 as_bad (_("Register number out of range 0..%u"),
8048 dr->num_regs - 1);
8049 regnum = 0;
8050 }
8051 e->X_op = O_register;
8052 e->X_add_number = dr->base + regnum;
8053 return 1;
8054 }
8055 }
8056
8057 end = xstrdup (name);
8058 name = ia64_canonicalize_symbol_name (end);
8059 if ((dr = hash_find (md.dynreg_hash, name)))
8060 {
8061 /* We've got ourselves the name of a rotating register set.
8062 Store the base register number in the low 16 bits of
8063 X_add_number and the size of the register set in the top 16
8064 bits. */
8065 e->X_op = O_register;
8066 e->X_add_number = dr->base | (dr->num_regs << 16);
8067 free (end);
8068 return 1;
8069 }
8070 free (end);
8071 return 0;
8072 }
8073
8074 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
8075
8076 char *
8077 ia64_canonicalize_symbol_name (char *name)
8078 {
8079 size_t len = strlen (name), full = len;
8080
8081 while (len > 0 && name[len - 1] == '#')
8082 --len;
8083 if (len <= 0)
8084 {
8085 if (full > 0)
8086 as_bad (_("Standalone `#' is illegal"));
8087 }
8088 else if (len < full - 1)
8089 as_warn (_("Redundant `#' suffix operators"));
8090 name[len] = '\0';
8091 return name;
8092 }
8093
8094 /* Return true if idesc is a conditional branch instruction. This excludes
8095 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
8096 because they always read/write resources regardless of the value of the
8097 qualifying predicate. br.ia must always use p0, and hence is always
8098 taken. Thus this function returns true for branches which can fall
8099 through, and which use no resources if they do fall through. */
8100
8101 static int
8102 is_conditional_branch (struct ia64_opcode *idesc)
8103 {
8104 /* br is a conditional branch. Everything that starts with br. except
8105 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
8106 Everything that starts with brl is a conditional branch. */
8107 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
8108 && (idesc->name[2] == '\0'
8109 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
8110 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
8111 || idesc->name[2] == 'l'
8112 /* br.cond, br.call, br.clr */
8113 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
8114 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
8115 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
8116 }
8117
8118 /* Return whether the given opcode is a taken branch. If there's any doubt,
8119 returns zero. */
8120
8121 static int
8122 is_taken_branch (struct ia64_opcode *idesc)
8123 {
8124 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
8125 || strncmp (idesc->name, "br.ia", 5) == 0);
8126 }
8127
8128 /* Return whether the given opcode is an interruption or rfi. If there's any
8129 doubt, returns zero. */
8130
8131 static int
8132 is_interruption_or_rfi (struct ia64_opcode *idesc)
8133 {
8134 if (strcmp (idesc->name, "rfi") == 0)
8135 return 1;
8136 return 0;
8137 }
8138
8139 /* Returns the index of the given dependency in the opcode's list of chks, or
8140 -1 if there is no dependency. */
8141
8142 static int
8143 depends_on (int depind, struct ia64_opcode *idesc)
8144 {
8145 int i;
8146 const struct ia64_opcode_dependency *dep = idesc->dependencies;
8147 for (i = 0; i < dep->nchks; i++)
8148 {
8149 if (depind == DEP (dep->chks[i]))
8150 return i;
8151 }
8152 return -1;
8153 }
8154
8155 /* Determine a set of specific resources used for a particular resource
8156 class. Returns the number of specific resources identified For those
8157 cases which are not determinable statically, the resource returned is
8158 marked nonspecific.
8159
8160 Meanings of value in 'NOTE':
8161 1) only read/write when the register number is explicitly encoded in the
8162 insn.
8163 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
8164 accesses CFM when qualifying predicate is in the rotating region.
8165 3) general register value is used to specify an indirect register; not
8166 determinable statically.
8167 4) only read the given resource when bits 7:0 of the indirect index
8168 register value does not match the register number of the resource; not
8169 determinable statically.
8170 5) all rules are implementation specific.
8171 6) only when both the index specified by the reader and the index specified
8172 by the writer have the same value in bits 63:61; not determinable
8173 statically.
8174 7) only access the specified resource when the corresponding mask bit is
8175 set
8176 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
8177 only read when these insns reference FR2-31
8178 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
8179 written when these insns write FR32-127
8180 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
8181 instruction
8182 11) The target predicates are written independently of PR[qp], but source
8183 registers are only read if PR[qp] is true. Since the state of PR[qp]
8184 cannot statically be determined, all source registers are marked used.
8185 12) This insn only reads the specified predicate register when that
8186 register is the PR[qp].
8187 13) This reference to ld-c only applies to the GR whose value is loaded
8188 with data returned from memory, not the post-incremented address register.
8189 14) The RSE resource includes the implementation-specific RSE internal
8190 state resources. At least one (and possibly more) of these resources are
8191 read by each instruction listed in IC:rse-readers. At least one (and
8192 possibly more) of these resources are written by each insn listed in
8193 IC:rse-writers.
8194 15+16) Represents reserved instructions, which the assembler does not
8195 generate.
8196 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and
8197 mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up.
8198
8199 Memory resources (i.e. locations in memory) are *not* marked or tracked by
8200 this code; there are no dependency violations based on memory access.
8201 */
8202
8203 #define MAX_SPECS 256
8204 #define DV_CHK 1
8205 #define DV_REG 0
8206
8207 static int
8208 specify_resource (const struct ia64_dependency *dep,
8209 struct ia64_opcode *idesc,
8210 /* is this a DV chk or a DV reg? */
8211 int type,
8212 /* returned specific resources */
8213 struct rsrc specs[MAX_SPECS],
8214 /* resource note for this insn's usage */
8215 int note,
8216 /* which execution path to examine */
8217 int path)
8218 {
8219 int count = 0;
8220 int i;
8221 int rsrc_write = 0;
8222 struct rsrc tmpl;
8223
8224 if (dep->mode == IA64_DV_WAW
8225 || (dep->mode == IA64_DV_RAW && type == DV_REG)
8226 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
8227 rsrc_write = 1;
8228
8229 /* template for any resources we identify */
8230 tmpl.dependency = dep;
8231 tmpl.note = note;
8232 tmpl.insn_srlz = tmpl.data_srlz = 0;
8233 tmpl.qp_regno = CURR_SLOT.qp_regno;
8234 tmpl.link_to_qp_branch = 1;
8235 tmpl.mem_offset.hint = 0;
8236 tmpl.mem_offset.offset = 0;
8237 tmpl.mem_offset.base = 0;
8238 tmpl.specific = 1;
8239 tmpl.index = -1;
8240 tmpl.cmp_type = CMP_NONE;
8241 tmpl.depind = 0;
8242 tmpl.file = NULL;
8243 tmpl.line = 0;
8244 tmpl.path = 0;
8245
8246 #define UNHANDLED \
8247 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
8248 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
8249 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
8250
8251 /* we don't need to track these */
8252 if (dep->semantics == IA64_DVS_NONE)
8253 return 0;
8254
8255 switch (dep->specifier)
8256 {
8257 case IA64_RS_AR_K:
8258 if (note == 1)
8259 {
8260 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8261 {
8262 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8263 if (regno >= 0 && regno <= 7)
8264 {
8265 specs[count] = tmpl;
8266 specs[count++].index = regno;
8267 }
8268 }
8269 }
8270 else if (note == 0)
8271 {
8272 for (i = 0; i < 8; i++)
8273 {
8274 specs[count] = tmpl;
8275 specs[count++].index = i;
8276 }
8277 }
8278 else
8279 {
8280 UNHANDLED;
8281 }
8282 break;
8283
8284 case IA64_RS_AR_UNAT:
8285 /* This is a mov =AR or mov AR= instruction. */
8286 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8287 {
8288 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8289 if (regno == AR_UNAT)
8290 {
8291 specs[count++] = tmpl;
8292 }
8293 }
8294 else
8295 {
8296 /* This is a spill/fill, or other instruction that modifies the
8297 unat register. */
8298
8299 /* Unless we can determine the specific bits used, mark the whole
8300 thing; bits 8:3 of the memory address indicate the bit used in
8301 UNAT. The .mem.offset hint may be used to eliminate a small
8302 subset of conflicts. */
8303 specs[count] = tmpl;
8304 if (md.mem_offset.hint)
8305 {
8306 if (md.debug_dv)
8307 fprintf (stderr, " Using hint for spill/fill\n");
8308 /* The index isn't actually used, just set it to something
8309 approximating the bit index. */
8310 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
8311 specs[count].mem_offset.hint = 1;
8312 specs[count].mem_offset.offset = md.mem_offset.offset;
8313 specs[count++].mem_offset.base = md.mem_offset.base;
8314 }
8315 else
8316 {
8317 specs[count++].specific = 0;
8318 }
8319 }
8320 break;
8321
8322 case IA64_RS_AR:
8323 if (note == 1)
8324 {
8325 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8326 {
8327 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8328 if ((regno >= 8 && regno <= 15)
8329 || (regno >= 20 && regno <= 23)
8330 || (regno >= 31 && regno <= 39)
8331 || (regno >= 41 && regno <= 47)
8332 || (regno >= 67 && regno <= 111))
8333 {
8334 specs[count] = tmpl;
8335 specs[count++].index = regno;
8336 }
8337 }
8338 }
8339 else
8340 {
8341 UNHANDLED;
8342 }
8343 break;
8344
8345 case IA64_RS_ARb:
8346 if (note == 1)
8347 {
8348 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8349 {
8350 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8351 if ((regno >= 48 && regno <= 63)
8352 || (regno >= 112 && regno <= 127))
8353 {
8354 specs[count] = tmpl;
8355 specs[count++].index = regno;
8356 }
8357 }
8358 }
8359 else if (note == 0)
8360 {
8361 for (i = 48; i < 64; i++)
8362 {
8363 specs[count] = tmpl;
8364 specs[count++].index = i;
8365 }
8366 for (i = 112; i < 128; i++)
8367 {
8368 specs[count] = tmpl;
8369 specs[count++].index = i;
8370 }
8371 }
8372 else
8373 {
8374 UNHANDLED;
8375 }
8376 break;
8377
8378 case IA64_RS_BR:
8379 if (note != 1)
8380 {
8381 UNHANDLED;
8382 }
8383 else
8384 {
8385 if (rsrc_write)
8386 {
8387 for (i = 0; i < idesc->num_outputs; i++)
8388 if (idesc->operands[i] == IA64_OPND_B1
8389 || idesc->operands[i] == IA64_OPND_B2)
8390 {
8391 specs[count] = tmpl;
8392 specs[count++].index =
8393 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8394 }
8395 }
8396 else
8397 {
8398 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8399 if (idesc->operands[i] == IA64_OPND_B1
8400 || idesc->operands[i] == IA64_OPND_B2)
8401 {
8402 specs[count] = tmpl;
8403 specs[count++].index =
8404 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8405 }
8406 }
8407 }
8408 break;
8409
8410 case IA64_RS_CPUID: /* four or more registers */
8411 if (note == 3)
8412 {
8413 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
8414 {
8415 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8416 if (regno >= 0 && regno < NELEMS (gr_values)
8417 && KNOWN (regno))
8418 {
8419 specs[count] = tmpl;
8420 specs[count++].index = gr_values[regno].value & 0xFF;
8421 }
8422 else
8423 {
8424 specs[count] = tmpl;
8425 specs[count++].specific = 0;
8426 }
8427 }
8428 }
8429 else
8430 {
8431 UNHANDLED;
8432 }
8433 break;
8434
8435 case IA64_RS_DBR: /* four or more registers */
8436 if (note == 3)
8437 {
8438 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
8439 {
8440 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8441 if (regno >= 0 && regno < NELEMS (gr_values)
8442 && KNOWN (regno))
8443 {
8444 specs[count] = tmpl;
8445 specs[count++].index = gr_values[regno].value & 0xFF;
8446 }
8447 else
8448 {
8449 specs[count] = tmpl;
8450 specs[count++].specific = 0;
8451 }
8452 }
8453 }
8454 else if (note == 0 && !rsrc_write)
8455 {
8456 specs[count] = tmpl;
8457 specs[count++].specific = 0;
8458 }
8459 else
8460 {
8461 UNHANDLED;
8462 }
8463 break;
8464
8465 case IA64_RS_IBR: /* four or more registers */
8466 if (note == 3)
8467 {
8468 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
8469 {
8470 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8471 if (regno >= 0 && regno < NELEMS (gr_values)
8472 && KNOWN (regno))
8473 {
8474 specs[count] = tmpl;
8475 specs[count++].index = gr_values[regno].value & 0xFF;
8476 }
8477 else
8478 {
8479 specs[count] = tmpl;
8480 specs[count++].specific = 0;
8481 }
8482 }
8483 }
8484 else
8485 {
8486 UNHANDLED;
8487 }
8488 break;
8489
8490 case IA64_RS_MSR:
8491 if (note == 5)
8492 {
8493 /* These are implementation specific. Force all references to
8494 conflict with all other references. */
8495 specs[count] = tmpl;
8496 specs[count++].specific = 0;
8497 }
8498 else
8499 {
8500 UNHANDLED;
8501 }
8502 break;
8503
8504 case IA64_RS_PKR: /* 16 or more registers */
8505 if (note == 3 || note == 4)
8506 {
8507 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
8508 {
8509 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8510 if (regno >= 0 && regno < NELEMS (gr_values)
8511 && KNOWN (regno))
8512 {
8513 if (note == 3)
8514 {
8515 specs[count] = tmpl;
8516 specs[count++].index = gr_values[regno].value & 0xFF;
8517 }
8518 else
8519 for (i = 0; i < NELEMS (gr_values); i++)
8520 {
8521 /* Uses all registers *except* the one in R3. */
8522 if ((unsigned)i != (gr_values[regno].value & 0xFF))
8523 {
8524 specs[count] = tmpl;
8525 specs[count++].index = i;
8526 }
8527 }
8528 }
8529 else
8530 {
8531 specs[count] = tmpl;
8532 specs[count++].specific = 0;
8533 }
8534 }
8535 }
8536 else if (note == 0)
8537 {
8538 /* probe et al. */
8539 specs[count] = tmpl;
8540 specs[count++].specific = 0;
8541 }
8542 break;
8543
8544 case IA64_RS_PMC: /* four or more registers */
8545 if (note == 3)
8546 {
8547 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
8548 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
8549
8550 {
8551 int reg_index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
8552 ? 1 : !rsrc_write);
8553 int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_GR;
8554 if (regno >= 0 && regno < NELEMS (gr_values)
8555 && KNOWN (regno))
8556 {
8557 specs[count] = tmpl;
8558 specs[count++].index = gr_values[regno].value & 0xFF;
8559 }
8560 else
8561 {
8562 specs[count] = tmpl;
8563 specs[count++].specific = 0;
8564 }
8565 }
8566 }
8567 else
8568 {
8569 UNHANDLED;
8570 }
8571 break;
8572
8573 case IA64_RS_PMD: /* four or more registers */
8574 if (note == 3)
8575 {
8576 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
8577 {
8578 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8579 if (regno >= 0 && regno < NELEMS (gr_values)
8580 && KNOWN (regno))
8581 {
8582 specs[count] = tmpl;
8583 specs[count++].index = gr_values[regno].value & 0xFF;
8584 }
8585 else
8586 {
8587 specs[count] = tmpl;
8588 specs[count++].specific = 0;
8589 }
8590 }
8591 }
8592 else
8593 {
8594 UNHANDLED;
8595 }
8596 break;
8597
8598 case IA64_RS_RR: /* eight registers */
8599 if (note == 6)
8600 {
8601 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
8602 {
8603 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8604 if (regno >= 0 && regno < NELEMS (gr_values)
8605 && KNOWN (regno))
8606 {
8607 specs[count] = tmpl;
8608 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
8609 }
8610 else
8611 {
8612 specs[count] = tmpl;
8613 specs[count++].specific = 0;
8614 }
8615 }
8616 }
8617 else if (note == 0 && !rsrc_write)
8618 {
8619 specs[count] = tmpl;
8620 specs[count++].specific = 0;
8621 }
8622 else
8623 {
8624 UNHANDLED;
8625 }
8626 break;
8627
8628 case IA64_RS_CR_IRR:
8629 if (note == 0)
8630 {
8631 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8632 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8633 if (rsrc_write
8634 && idesc->operands[1] == IA64_OPND_CR3
8635 && regno == CR_IVR)
8636 {
8637 for (i = 0; i < 4; i++)
8638 {
8639 specs[count] = tmpl;
8640 specs[count++].index = CR_IRR0 + i;
8641 }
8642 }
8643 }
8644 else if (note == 1)
8645 {
8646 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8647 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8648 && regno >= CR_IRR0
8649 && regno <= CR_IRR3)
8650 {
8651 specs[count] = tmpl;
8652 specs[count++].index = regno;
8653 }
8654 }
8655 else
8656 {
8657 UNHANDLED;
8658 }
8659 break;
8660
8661 case IA64_RS_CR_IIB:
8662 if (note != 0)
8663 {
8664 UNHANDLED;
8665 }
8666 else
8667 {
8668 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8669 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8670 && (regno == CR_IIB0 || regno == CR_IIB1))
8671 {
8672 specs[count] = tmpl;
8673 specs[count++].index = regno;
8674 }
8675 }
8676 break;
8677
8678 case IA64_RS_CR_LRR:
8679 if (note != 1)
8680 {
8681 UNHANDLED;
8682 }
8683 else
8684 {
8685 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8686 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8687 && (regno == CR_LRR0 || regno == CR_LRR1))
8688 {
8689 specs[count] = tmpl;
8690 specs[count++].index = regno;
8691 }
8692 }
8693 break;
8694
8695 case IA64_RS_CR:
8696 if (note == 1)
8697 {
8698 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8699 {
8700 specs[count] = tmpl;
8701 specs[count++].index =
8702 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8703 }
8704 }
8705 else
8706 {
8707 UNHANDLED;
8708 }
8709 break;
8710
8711 case IA64_RS_DAHR:
8712 if (note == 0)
8713 {
8714 if (idesc->operands[!rsrc_write] == IA64_OPND_DAHR3)
8715 {
8716 specs[count] = tmpl;
8717 specs[count++].index =
8718 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_DAHR;
8719 }
8720 }
8721 else
8722 {
8723 UNHANDLED;
8724 }
8725 break;
8726
8727 case IA64_RS_FR:
8728 case IA64_RS_FRb:
8729 if (note != 1)
8730 {
8731 UNHANDLED;
8732 }
8733 else if (rsrc_write)
8734 {
8735 if (dep->specifier == IA64_RS_FRb
8736 && idesc->operands[0] == IA64_OPND_F1)
8737 {
8738 specs[count] = tmpl;
8739 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8740 }
8741 }
8742 else
8743 {
8744 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8745 {
8746 if (idesc->operands[i] == IA64_OPND_F2
8747 || idesc->operands[i] == IA64_OPND_F3
8748 || idesc->operands[i] == IA64_OPND_F4)
8749 {
8750 specs[count] = tmpl;
8751 specs[count++].index =
8752 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8753 }
8754 }
8755 }
8756 break;
8757
8758 case IA64_RS_GR:
8759 if (note == 13)
8760 {
8761 /* This reference applies only to the GR whose value is loaded with
8762 data returned from memory. */
8763 specs[count] = tmpl;
8764 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8765 }
8766 else if (note == 1)
8767 {
8768 if (rsrc_write)
8769 {
8770 for (i = 0; i < idesc->num_outputs; i++)
8771 if (idesc->operands[i] == IA64_OPND_R1
8772 || idesc->operands[i] == IA64_OPND_R2
8773 || idesc->operands[i] == IA64_OPND_R3)
8774 {
8775 specs[count] = tmpl;
8776 specs[count++].index =
8777 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8778 }
8779 if (idesc->flags & IA64_OPCODE_POSTINC)
8780 for (i = 0; i < NELEMS (idesc->operands); i++)
8781 if (idesc->operands[i] == IA64_OPND_MR3)
8782 {
8783 specs[count] = tmpl;
8784 specs[count++].index =
8785 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8786 }
8787 }
8788 else
8789 {
8790 /* Look for anything that reads a GR. */
8791 for (i = 0; i < NELEMS (idesc->operands); i++)
8792 {
8793 if (idesc->operands[i] == IA64_OPND_MR3
8794 || idesc->operands[i] == IA64_OPND_CPUID_R3
8795 || idesc->operands[i] == IA64_OPND_DBR_R3
8796 || idesc->operands[i] == IA64_OPND_IBR_R3
8797 || idesc->operands[i] == IA64_OPND_MSR_R3
8798 || idesc->operands[i] == IA64_OPND_PKR_R3
8799 || idesc->operands[i] == IA64_OPND_PMC_R3
8800 || idesc->operands[i] == IA64_OPND_PMD_R3
8801 || idesc->operands[i] == IA64_OPND_DAHR_R3
8802 || idesc->operands[i] == IA64_OPND_RR_R3
8803 || ((i >= idesc->num_outputs)
8804 && (idesc->operands[i] == IA64_OPND_R1
8805 || idesc->operands[i] == IA64_OPND_R2
8806 || idesc->operands[i] == IA64_OPND_R3
8807 /* addl source register. */
8808 || idesc->operands[i] == IA64_OPND_R3_2)))
8809 {
8810 specs[count] = tmpl;
8811 specs[count++].index =
8812 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8813 }
8814 }
8815 }
8816 }
8817 else
8818 {
8819 UNHANDLED;
8820 }
8821 break;
8822
8823 /* This is the same as IA64_RS_PRr, except that the register range is
8824 from 1 - 15, and there are no rotating register reads/writes here. */
8825 case IA64_RS_PR:
8826 if (note == 0)
8827 {
8828 for (i = 1; i < 16; i++)
8829 {
8830 specs[count] = tmpl;
8831 specs[count++].index = i;
8832 }
8833 }
8834 else if (note == 7)
8835 {
8836 valueT mask = 0;
8837 /* Mark only those registers indicated by the mask. */
8838 if (rsrc_write)
8839 {
8840 mask = CURR_SLOT.opnd[2].X_add_number;
8841 for (i = 1; i < 16; i++)
8842 if (mask & ((valueT) 1 << i))
8843 {
8844 specs[count] = tmpl;
8845 specs[count++].index = i;
8846 }
8847 }
8848 else
8849 {
8850 UNHANDLED;
8851 }
8852 }
8853 else if (note == 11) /* note 11 implies note 1 as well */
8854 {
8855 if (rsrc_write)
8856 {
8857 for (i = 0; i < idesc->num_outputs; i++)
8858 {
8859 if (idesc->operands[i] == IA64_OPND_P1
8860 || idesc->operands[i] == IA64_OPND_P2)
8861 {
8862 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8863 if (regno >= 1 && regno < 16)
8864 {
8865 specs[count] = tmpl;
8866 specs[count++].index = regno;
8867 }
8868 }
8869 }
8870 }
8871 else
8872 {
8873 UNHANDLED;
8874 }
8875 }
8876 else if (note == 12)
8877 {
8878 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8879 {
8880 specs[count] = tmpl;
8881 specs[count++].index = CURR_SLOT.qp_regno;
8882 }
8883 }
8884 else if (note == 1)
8885 {
8886 if (rsrc_write)
8887 {
8888 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8889 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8890 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8891 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8892
8893 if ((idesc->operands[0] == IA64_OPND_P1
8894 || idesc->operands[0] == IA64_OPND_P2)
8895 && p1 >= 1 && p1 < 16)
8896 {
8897 specs[count] = tmpl;
8898 specs[count].cmp_type =
8899 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8900 specs[count++].index = p1;
8901 }
8902 if ((idesc->operands[1] == IA64_OPND_P1
8903 || idesc->operands[1] == IA64_OPND_P2)
8904 && p2 >= 1 && p2 < 16)
8905 {
8906 specs[count] = tmpl;
8907 specs[count].cmp_type =
8908 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8909 specs[count++].index = p2;
8910 }
8911 }
8912 else
8913 {
8914 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8915 {
8916 specs[count] = tmpl;
8917 specs[count++].index = CURR_SLOT.qp_regno;
8918 }
8919 if (idesc->operands[1] == IA64_OPND_PR)
8920 {
8921 for (i = 1; i < 16; i++)
8922 {
8923 specs[count] = tmpl;
8924 specs[count++].index = i;
8925 }
8926 }
8927 }
8928 }
8929 else
8930 {
8931 UNHANDLED;
8932 }
8933 break;
8934
8935 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8936 simplified cases of this. */
8937 case IA64_RS_PRr:
8938 if (note == 0)
8939 {
8940 for (i = 16; i < 63; i++)
8941 {
8942 specs[count] = tmpl;
8943 specs[count++].index = i;
8944 }
8945 }
8946 else if (note == 7)
8947 {
8948 valueT mask = 0;
8949 /* Mark only those registers indicated by the mask. */
8950 if (rsrc_write
8951 && idesc->operands[0] == IA64_OPND_PR)
8952 {
8953 mask = CURR_SLOT.opnd[2].X_add_number;
8954 if (mask & ((valueT) 1 << 16))
8955 for (i = 16; i < 63; i++)
8956 {
8957 specs[count] = tmpl;
8958 specs[count++].index = i;
8959 }
8960 }
8961 else if (rsrc_write
8962 && idesc->operands[0] == IA64_OPND_PR_ROT)
8963 {
8964 for (i = 16; i < 63; i++)
8965 {
8966 specs[count] = tmpl;
8967 specs[count++].index = i;
8968 }
8969 }
8970 else
8971 {
8972 UNHANDLED;
8973 }
8974 }
8975 else if (note == 11) /* note 11 implies note 1 as well */
8976 {
8977 if (rsrc_write)
8978 {
8979 for (i = 0; i < idesc->num_outputs; i++)
8980 {
8981 if (idesc->operands[i] == IA64_OPND_P1
8982 || idesc->operands[i] == IA64_OPND_P2)
8983 {
8984 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8985 if (regno >= 16 && regno < 63)
8986 {
8987 specs[count] = tmpl;
8988 specs[count++].index = regno;
8989 }
8990 }
8991 }
8992 }
8993 else
8994 {
8995 UNHANDLED;
8996 }
8997 }
8998 else if (note == 12)
8999 {
9000 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9001 {
9002 specs[count] = tmpl;
9003 specs[count++].index = CURR_SLOT.qp_regno;
9004 }
9005 }
9006 else if (note == 1)
9007 {
9008 if (rsrc_write)
9009 {
9010 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9011 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9012 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9013 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9014
9015 if ((idesc->operands[0] == IA64_OPND_P1
9016 || idesc->operands[0] == IA64_OPND_P2)
9017 && p1 >= 16 && p1 < 63)
9018 {
9019 specs[count] = tmpl;
9020 specs[count].cmp_type =
9021 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9022 specs[count++].index = p1;
9023 }
9024 if ((idesc->operands[1] == IA64_OPND_P1
9025 || idesc->operands[1] == IA64_OPND_P2)
9026 && p2 >= 16 && p2 < 63)
9027 {
9028 specs[count] = tmpl;
9029 specs[count].cmp_type =
9030 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9031 specs[count++].index = p2;
9032 }
9033 }
9034 else
9035 {
9036 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9037 {
9038 specs[count] = tmpl;
9039 specs[count++].index = CURR_SLOT.qp_regno;
9040 }
9041 if (idesc->operands[1] == IA64_OPND_PR)
9042 {
9043 for (i = 16; i < 63; i++)
9044 {
9045 specs[count] = tmpl;
9046 specs[count++].index = i;
9047 }
9048 }
9049 }
9050 }
9051 else
9052 {
9053 UNHANDLED;
9054 }
9055 break;
9056
9057 case IA64_RS_PSR:
9058 /* Verify that the instruction is using the PSR bit indicated in
9059 dep->regindex. */
9060 if (note == 0)
9061 {
9062 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
9063 {
9064 if (dep->regindex < 6)
9065 {
9066 specs[count++] = tmpl;
9067 }
9068 }
9069 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
9070 {
9071 if (dep->regindex < 32
9072 || dep->regindex == 35
9073 || dep->regindex == 36
9074 || (!rsrc_write && dep->regindex == PSR_CPL))
9075 {
9076 specs[count++] = tmpl;
9077 }
9078 }
9079 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
9080 {
9081 if (dep->regindex < 32
9082 || dep->regindex == 35
9083 || dep->regindex == 36
9084 || (rsrc_write && dep->regindex == PSR_CPL))
9085 {
9086 specs[count++] = tmpl;
9087 }
9088 }
9089 else
9090 {
9091 /* Several PSR bits have very specific dependencies. */
9092 switch (dep->regindex)
9093 {
9094 default:
9095 specs[count++] = tmpl;
9096 break;
9097 case PSR_IC:
9098 if (rsrc_write)
9099 {
9100 specs[count++] = tmpl;
9101 }
9102 else
9103 {
9104 /* Only certain CR accesses use PSR.ic */
9105 if (idesc->operands[0] == IA64_OPND_CR3
9106 || idesc->operands[1] == IA64_OPND_CR3)
9107 {
9108 int reg_index =
9109 ((idesc->operands[0] == IA64_OPND_CR3)
9110 ? 0 : 1);
9111 int regno =
9112 CURR_SLOT.opnd[reg_index].X_add_number - REG_CR;
9113
9114 switch (regno)
9115 {
9116 default:
9117 break;
9118 case CR_ITIR:
9119 case CR_IFS:
9120 case CR_IIM:
9121 case CR_IIP:
9122 case CR_IPSR:
9123 case CR_ISR:
9124 case CR_IFA:
9125 case CR_IHA:
9126 case CR_IIB0:
9127 case CR_IIB1:
9128 case CR_IIPA:
9129 specs[count++] = tmpl;
9130 break;
9131 }
9132 }
9133 }
9134 break;
9135 case PSR_CPL:
9136 if (rsrc_write)
9137 {
9138 specs[count++] = tmpl;
9139 }
9140 else
9141 {
9142 /* Only some AR accesses use cpl */
9143 if (idesc->operands[0] == IA64_OPND_AR3
9144 || idesc->operands[1] == IA64_OPND_AR3)
9145 {
9146 int reg_index =
9147 ((idesc->operands[0] == IA64_OPND_AR3)
9148 ? 0 : 1);
9149 int regno =
9150 CURR_SLOT.opnd[reg_index].X_add_number - REG_AR;
9151
9152 if (regno == AR_ITC
9153 || regno == AR_RUC
9154 || (reg_index == 0
9155 && (regno == AR_RSC
9156 || (regno >= AR_K0
9157 && regno <= AR_K7))))
9158 {
9159 specs[count++] = tmpl;
9160 }
9161 }
9162 else
9163 {
9164 specs[count++] = tmpl;
9165 }
9166 break;
9167 }
9168 }
9169 }
9170 }
9171 else if (note == 7)
9172 {
9173 valueT mask = 0;
9174 if (idesc->operands[0] == IA64_OPND_IMMU24)
9175 {
9176 mask = CURR_SLOT.opnd[0].X_add_number;
9177 }
9178 else
9179 {
9180 UNHANDLED;
9181 }
9182 if (mask & ((valueT) 1 << dep->regindex))
9183 {
9184 specs[count++] = tmpl;
9185 }
9186 }
9187 else if (note == 8)
9188 {
9189 int min = dep->regindex == PSR_DFL ? 2 : 32;
9190 int max = dep->regindex == PSR_DFL ? 31 : 127;
9191 /* dfh is read on FR32-127; dfl is read on FR2-31 */
9192 for (i = 0; i < NELEMS (idesc->operands); i++)
9193 {
9194 if (idesc->operands[i] == IA64_OPND_F1
9195 || idesc->operands[i] == IA64_OPND_F2
9196 || idesc->operands[i] == IA64_OPND_F3
9197 || idesc->operands[i] == IA64_OPND_F4)
9198 {
9199 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9200 if (reg >= min && reg <= max)
9201 {
9202 specs[count++] = tmpl;
9203 }
9204 }
9205 }
9206 }
9207 else if (note == 9)
9208 {
9209 int min = dep->regindex == PSR_MFL ? 2 : 32;
9210 int max = dep->regindex == PSR_MFL ? 31 : 127;
9211 /* mfh is read on writes to FR32-127; mfl is read on writes to
9212 FR2-31 */
9213 for (i = 0; i < idesc->num_outputs; i++)
9214 {
9215 if (idesc->operands[i] == IA64_OPND_F1)
9216 {
9217 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9218 if (reg >= min && reg <= max)
9219 {
9220 specs[count++] = tmpl;
9221 }
9222 }
9223 }
9224 }
9225 else if (note == 10)
9226 {
9227 for (i = 0; i < NELEMS (idesc->operands); i++)
9228 {
9229 if (idesc->operands[i] == IA64_OPND_R1
9230 || idesc->operands[i] == IA64_OPND_R2
9231 || idesc->operands[i] == IA64_OPND_R3)
9232 {
9233 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9234 if (regno >= 16 && regno <= 31)
9235 {
9236 specs[count++] = tmpl;
9237 }
9238 }
9239 }
9240 }
9241 else
9242 {
9243 UNHANDLED;
9244 }
9245 break;
9246
9247 case IA64_RS_AR_FPSR:
9248 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
9249 {
9250 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9251 if (regno == AR_FPSR)
9252 {
9253 specs[count++] = tmpl;
9254 }
9255 }
9256 else
9257 {
9258 specs[count++] = tmpl;
9259 }
9260 break;
9261
9262 case IA64_RS_ARX:
9263 /* Handle all AR[REG] resources */
9264 if (note == 0 || note == 1)
9265 {
9266 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9267 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
9268 && regno == dep->regindex)
9269 {
9270 specs[count++] = tmpl;
9271 }
9272 /* other AR[REG] resources may be affected by AR accesses */
9273 else if (idesc->operands[0] == IA64_OPND_AR3)
9274 {
9275 /* AR[] writes */
9276 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
9277 switch (dep->regindex)
9278 {
9279 default:
9280 break;
9281 case AR_BSP:
9282 case AR_RNAT:
9283 if (regno == AR_BSPSTORE)
9284 {
9285 specs[count++] = tmpl;
9286 }
9287 case AR_RSC:
9288 if (!rsrc_write &&
9289 (regno == AR_BSPSTORE
9290 || regno == AR_RNAT))
9291 {
9292 specs[count++] = tmpl;
9293 }
9294 break;
9295 }
9296 }
9297 else if (idesc->operands[1] == IA64_OPND_AR3)
9298 {
9299 /* AR[] reads */
9300 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
9301 switch (dep->regindex)
9302 {
9303 default:
9304 break;
9305 case AR_RSC:
9306 if (regno == AR_BSPSTORE || regno == AR_RNAT)
9307 {
9308 specs[count++] = tmpl;
9309 }
9310 break;
9311 }
9312 }
9313 else
9314 {
9315 specs[count++] = tmpl;
9316 }
9317 }
9318 else
9319 {
9320 UNHANDLED;
9321 }
9322 break;
9323
9324 case IA64_RS_CRX:
9325 /* Handle all CR[REG] resources.
9326 ??? FIXME: The rule 17 isn't really handled correctly. */
9327 if (note == 0 || note == 1 || note == 17)
9328 {
9329 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
9330 {
9331 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
9332 if (regno == dep->regindex)
9333 {
9334 specs[count++] = tmpl;
9335 }
9336 else if (!rsrc_write)
9337 {
9338 /* Reads from CR[IVR] affect other resources. */
9339 if (regno == CR_IVR)
9340 {
9341 if ((dep->regindex >= CR_IRR0
9342 && dep->regindex <= CR_IRR3)
9343 || dep->regindex == CR_TPR)
9344 {
9345 specs[count++] = tmpl;
9346 }
9347 }
9348 }
9349 }
9350 else
9351 {
9352 specs[count++] = tmpl;
9353 }
9354 }
9355 else
9356 {
9357 UNHANDLED;
9358 }
9359 break;
9360
9361 case IA64_RS_INSERVICE:
9362 /* look for write of EOI (67) or read of IVR (65) */
9363 if ((idesc->operands[0] == IA64_OPND_CR3
9364 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
9365 || (idesc->operands[1] == IA64_OPND_CR3
9366 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
9367 {
9368 specs[count++] = tmpl;
9369 }
9370 break;
9371
9372 case IA64_RS_GR0:
9373 if (note == 1)
9374 {
9375 specs[count++] = tmpl;
9376 }
9377 else
9378 {
9379 UNHANDLED;
9380 }
9381 break;
9382
9383 case IA64_RS_CFM:
9384 if (note != 2)
9385 {
9386 specs[count++] = tmpl;
9387 }
9388 else
9389 {
9390 /* Check if any of the registers accessed are in the rotating region.
9391 mov to/from pr accesses CFM only when qp_regno is in the rotating
9392 region */
9393 for (i = 0; i < NELEMS (idesc->operands); i++)
9394 {
9395 if (idesc->operands[i] == IA64_OPND_R1
9396 || idesc->operands[i] == IA64_OPND_R2
9397 || idesc->operands[i] == IA64_OPND_R3)
9398 {
9399 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9400 /* Assumes that md.rot.num_regs is always valid */
9401 if (md.rot.num_regs > 0
9402 && num > 31
9403 && num < 31 + md.rot.num_regs)
9404 {
9405 specs[count] = tmpl;
9406 specs[count++].specific = 0;
9407 }
9408 }
9409 else if (idesc->operands[i] == IA64_OPND_F1
9410 || idesc->operands[i] == IA64_OPND_F2
9411 || idesc->operands[i] == IA64_OPND_F3
9412 || idesc->operands[i] == IA64_OPND_F4)
9413 {
9414 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9415 if (num > 31)
9416 {
9417 specs[count] = tmpl;
9418 specs[count++].specific = 0;
9419 }
9420 }
9421 else if (idesc->operands[i] == IA64_OPND_P1
9422 || idesc->operands[i] == IA64_OPND_P2)
9423 {
9424 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
9425 if (num > 15)
9426 {
9427 specs[count] = tmpl;
9428 specs[count++].specific = 0;
9429 }
9430 }
9431 }
9432 if (CURR_SLOT.qp_regno > 15)
9433 {
9434 specs[count] = tmpl;
9435 specs[count++].specific = 0;
9436 }
9437 }
9438 break;
9439
9440 /* This is the same as IA64_RS_PRr, except simplified to account for
9441 the fact that there is only one register. */
9442 case IA64_RS_PR63:
9443 if (note == 0)
9444 {
9445 specs[count++] = tmpl;
9446 }
9447 else if (note == 7)
9448 {
9449 valueT mask = 0;
9450 if (idesc->operands[2] == IA64_OPND_IMM17)
9451 mask = CURR_SLOT.opnd[2].X_add_number;
9452 if (mask & ((valueT) 1 << 63))
9453 specs[count++] = tmpl;
9454 }
9455 else if (note == 11)
9456 {
9457 if ((idesc->operands[0] == IA64_OPND_P1
9458 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
9459 || (idesc->operands[1] == IA64_OPND_P2
9460 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
9461 {
9462 specs[count++] = tmpl;
9463 }
9464 }
9465 else if (note == 12)
9466 {
9467 if (CURR_SLOT.qp_regno == 63)
9468 {
9469 specs[count++] = tmpl;
9470 }
9471 }
9472 else if (note == 1)
9473 {
9474 if (rsrc_write)
9475 {
9476 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9477 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9478 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9479 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9480
9481 if (p1 == 63
9482 && (idesc->operands[0] == IA64_OPND_P1
9483 || idesc->operands[0] == IA64_OPND_P2))
9484 {
9485 specs[count] = tmpl;
9486 specs[count++].cmp_type =
9487 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9488 }
9489 if (p2 == 63
9490 && (idesc->operands[1] == IA64_OPND_P1
9491 || idesc->operands[1] == IA64_OPND_P2))
9492 {
9493 specs[count] = tmpl;
9494 specs[count++].cmp_type =
9495 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9496 }
9497 }
9498 else
9499 {
9500 if (CURR_SLOT.qp_regno == 63)
9501 {
9502 specs[count++] = tmpl;
9503 }
9504 }
9505 }
9506 else
9507 {
9508 UNHANDLED;
9509 }
9510 break;
9511
9512 case IA64_RS_RSE:
9513 /* FIXME we can identify some individual RSE written resources, but RSE
9514 read resources have not yet been completely identified, so for now
9515 treat RSE as a single resource */
9516 if (strncmp (idesc->name, "mov", 3) == 0)
9517 {
9518 if (rsrc_write)
9519 {
9520 if (idesc->operands[0] == IA64_OPND_AR3
9521 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
9522 {
9523 specs[count++] = tmpl;
9524 }
9525 }
9526 else
9527 {
9528 if (idesc->operands[0] == IA64_OPND_AR3)
9529 {
9530 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
9531 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
9532 {
9533 specs[count++] = tmpl;
9534 }
9535 }
9536 else if (idesc->operands[1] == IA64_OPND_AR3)
9537 {
9538 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
9539 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
9540 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
9541 {
9542 specs[count++] = tmpl;
9543 }
9544 }
9545 }
9546 }
9547 else
9548 {
9549 specs[count++] = tmpl;
9550 }
9551 break;
9552
9553 case IA64_RS_ANY:
9554 /* FIXME -- do any of these need to be non-specific? */
9555 specs[count++] = tmpl;
9556 break;
9557
9558 default:
9559 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
9560 break;
9561 }
9562
9563 return count;
9564 }
9565
9566 /* Clear branch flags on marked resources. This breaks the link between the
9567 QP of the marking instruction and a subsequent branch on the same QP. */
9568
9569 static void
9570 clear_qp_branch_flag (valueT mask)
9571 {
9572 int i;
9573 for (i = 0; i < regdepslen; i++)
9574 {
9575 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
9576 if ((bit & mask) != 0)
9577 {
9578 regdeps[i].link_to_qp_branch = 0;
9579 }
9580 }
9581 }
9582
9583 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
9584 any mutexes which contain one of the PRs and create new ones when
9585 needed. */
9586
9587 static int
9588 update_qp_mutex (valueT mask)
9589 {
9590 int i;
9591 int add = 0;
9592
9593 i = 0;
9594 while (i < qp_mutexeslen)
9595 {
9596 if ((qp_mutexes[i].prmask & mask) != 0)
9597 {
9598 /* If it destroys and creates the same mutex, do nothing. */
9599 if (qp_mutexes[i].prmask == mask
9600 && qp_mutexes[i].path == md.path)
9601 {
9602 i++;
9603 add = -1;
9604 }
9605 else
9606 {
9607 int keep = 0;
9608
9609 if (md.debug_dv)
9610 {
9611 fprintf (stderr, " Clearing mutex relation");
9612 print_prmask (qp_mutexes[i].prmask);
9613 fprintf (stderr, "\n");
9614 }
9615
9616 /* Deal with the old mutex with more than 3+ PRs only if
9617 the new mutex on the same execution path with it.
9618
9619 FIXME: The 3+ mutex support is incomplete.
9620 dot_pred_rel () may be a better place to fix it. */
9621 if (qp_mutexes[i].path == md.path)
9622 {
9623 /* If it is a proper subset of the mutex, create a
9624 new mutex. */
9625 if (add == 0
9626 && (qp_mutexes[i].prmask & mask) == mask)
9627 add = 1;
9628
9629 qp_mutexes[i].prmask &= ~mask;
9630 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
9631 {
9632 /* Modify the mutex if there are more than one
9633 PR left. */
9634 keep = 1;
9635 i++;
9636 }
9637 }
9638
9639 if (keep == 0)
9640 /* Remove the mutex. */
9641 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9642 }
9643 }
9644 else
9645 ++i;
9646 }
9647
9648 if (add == 1)
9649 add_qp_mutex (mask);
9650
9651 return add;
9652 }
9653
9654 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9655
9656 Any changes to a PR clears the mutex relations which include that PR. */
9657
9658 static void
9659 clear_qp_mutex (valueT mask)
9660 {
9661 int i;
9662
9663 i = 0;
9664 while (i < qp_mutexeslen)
9665 {
9666 if ((qp_mutexes[i].prmask & mask) != 0)
9667 {
9668 if (md.debug_dv)
9669 {
9670 fprintf (stderr, " Clearing mutex relation");
9671 print_prmask (qp_mutexes[i].prmask);
9672 fprintf (stderr, "\n");
9673 }
9674 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9675 }
9676 else
9677 ++i;
9678 }
9679 }
9680
9681 /* Clear implies relations which contain PRs in the given masks.
9682 P1_MASK indicates the source of the implies relation, while P2_MASK
9683 indicates the implied PR. */
9684
9685 static void
9686 clear_qp_implies (valueT p1_mask, valueT p2_mask)
9687 {
9688 int i;
9689
9690 i = 0;
9691 while (i < qp_implieslen)
9692 {
9693 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9694 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9695 {
9696 if (md.debug_dv)
9697 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9698 qp_implies[i].p1, qp_implies[i].p2);
9699 qp_implies[i] = qp_implies[--qp_implieslen];
9700 }
9701 else
9702 ++i;
9703 }
9704 }
9705
9706 /* Add the PRs specified to the list of implied relations. */
9707
9708 static void
9709 add_qp_imply (int p1, int p2)
9710 {
9711 valueT mask;
9712 valueT bit;
9713 int i;
9714
9715 /* p0 is not meaningful here. */
9716 if (p1 == 0 || p2 == 0)
9717 abort ();
9718
9719 if (p1 == p2)
9720 return;
9721
9722 /* If it exists already, ignore it. */
9723 for (i = 0; i < qp_implieslen; i++)
9724 {
9725 if (qp_implies[i].p1 == p1
9726 && qp_implies[i].p2 == p2
9727 && qp_implies[i].path == md.path
9728 && !qp_implies[i].p2_branched)
9729 return;
9730 }
9731
9732 if (qp_implieslen == qp_impliestotlen)
9733 {
9734 qp_impliestotlen += 20;
9735 qp_implies = (struct qp_imply *)
9736 xrealloc ((void *) qp_implies,
9737 qp_impliestotlen * sizeof (struct qp_imply));
9738 }
9739 if (md.debug_dv)
9740 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9741 qp_implies[qp_implieslen].p1 = p1;
9742 qp_implies[qp_implieslen].p2 = p2;
9743 qp_implies[qp_implieslen].path = md.path;
9744 qp_implies[qp_implieslen++].p2_branched = 0;
9745
9746 /* Add in the implied transitive relations; for everything that p2 implies,
9747 make p1 imply that, too; for everything that implies p1, make it imply p2
9748 as well. */
9749 for (i = 0; i < qp_implieslen; i++)
9750 {
9751 if (qp_implies[i].p1 == p2)
9752 add_qp_imply (p1, qp_implies[i].p2);
9753 if (qp_implies[i].p2 == p1)
9754 add_qp_imply (qp_implies[i].p1, p2);
9755 }
9756 /* Add in mutex relations implied by this implies relation; for each mutex
9757 relation containing p2, duplicate it and replace p2 with p1. */
9758 bit = (valueT) 1 << p1;
9759 mask = (valueT) 1 << p2;
9760 for (i = 0; i < qp_mutexeslen; i++)
9761 {
9762 if (qp_mutexes[i].prmask & mask)
9763 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9764 }
9765 }
9766
9767 /* Add the PRs specified in the mask to the mutex list; this means that only
9768 one of the PRs can be true at any time. PR0 should never be included in
9769 the mask. */
9770
9771 static void
9772 add_qp_mutex (valueT mask)
9773 {
9774 if (mask & 0x1)
9775 abort ();
9776
9777 if (qp_mutexeslen == qp_mutexestotlen)
9778 {
9779 qp_mutexestotlen += 20;
9780 qp_mutexes = (struct qpmutex *)
9781 xrealloc ((void *) qp_mutexes,
9782 qp_mutexestotlen * sizeof (struct qpmutex));
9783 }
9784 if (md.debug_dv)
9785 {
9786 fprintf (stderr, " Registering mutex on");
9787 print_prmask (mask);
9788 fprintf (stderr, "\n");
9789 }
9790 qp_mutexes[qp_mutexeslen].path = md.path;
9791 qp_mutexes[qp_mutexeslen++].prmask = mask;
9792 }
9793
9794 static int
9795 has_suffix_p (const char *name, const char *suffix)
9796 {
9797 size_t namelen = strlen (name);
9798 size_t sufflen = strlen (suffix);
9799
9800 if (namelen <= sufflen)
9801 return 0;
9802 return strcmp (name + namelen - sufflen, suffix) == 0;
9803 }
9804
9805 static void
9806 clear_register_values (void)
9807 {
9808 int i;
9809 if (md.debug_dv)
9810 fprintf (stderr, " Clearing register values\n");
9811 for (i = 1; i < NELEMS (gr_values); i++)
9812 gr_values[i].known = 0;
9813 }
9814
9815 /* Keep track of register values/changes which affect DV tracking.
9816
9817 optimization note: should add a flag to classes of insns where otherwise we
9818 have to examine a group of strings to identify them. */
9819
9820 static void
9821 note_register_values (struct ia64_opcode *idesc)
9822 {
9823 valueT qp_changemask = 0;
9824 int i;
9825
9826 /* Invalidate values for registers being written to. */
9827 for (i = 0; i < idesc->num_outputs; i++)
9828 {
9829 if (idesc->operands[i] == IA64_OPND_R1
9830 || idesc->operands[i] == IA64_OPND_R2
9831 || idesc->operands[i] == IA64_OPND_R3)
9832 {
9833 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9834 if (regno > 0 && regno < NELEMS (gr_values))
9835 gr_values[regno].known = 0;
9836 }
9837 else if (idesc->operands[i] == IA64_OPND_R3_2)
9838 {
9839 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9840 if (regno > 0 && regno < 4)
9841 gr_values[regno].known = 0;
9842 }
9843 else if (idesc->operands[i] == IA64_OPND_P1
9844 || idesc->operands[i] == IA64_OPND_P2)
9845 {
9846 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9847 qp_changemask |= (valueT) 1 << regno;
9848 }
9849 else if (idesc->operands[i] == IA64_OPND_PR)
9850 {
9851 if (idesc->operands[2] & (valueT) 0x10000)
9852 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9853 else
9854 qp_changemask = idesc->operands[2];
9855 break;
9856 }
9857 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9858 {
9859 if (idesc->operands[1] & ((valueT) 1 << 43))
9860 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9861 else
9862 qp_changemask = idesc->operands[1];
9863 qp_changemask &= ~(valueT) 0xFFFF;
9864 break;
9865 }
9866 }
9867
9868 /* Always clear qp branch flags on any PR change. */
9869 /* FIXME there may be exceptions for certain compares. */
9870 clear_qp_branch_flag (qp_changemask);
9871
9872 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9873 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9874 {
9875 qp_changemask |= ~(valueT) 0xFFFF;
9876 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9877 {
9878 for (i = 32; i < 32 + md.rot.num_regs; i++)
9879 gr_values[i].known = 0;
9880 }
9881 clear_qp_mutex (qp_changemask);
9882 clear_qp_implies (qp_changemask, qp_changemask);
9883 }
9884 /* After a call, all register values are undefined, except those marked
9885 as "safe". */
9886 else if (strncmp (idesc->name, "br.call", 6) == 0
9887 || strncmp (idesc->name, "brl.call", 7) == 0)
9888 {
9889 /* FIXME keep GR values which are marked as "safe_across_calls" */
9890 clear_register_values ();
9891 clear_qp_mutex (~qp_safe_across_calls);
9892 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9893 clear_qp_branch_flag (~qp_safe_across_calls);
9894 }
9895 else if (is_interruption_or_rfi (idesc)
9896 || is_taken_branch (idesc))
9897 {
9898 clear_register_values ();
9899 clear_qp_mutex (~(valueT) 0);
9900 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9901 }
9902 /* Look for mutex and implies relations. */
9903 else if ((idesc->operands[0] == IA64_OPND_P1
9904 || idesc->operands[0] == IA64_OPND_P2)
9905 && (idesc->operands[1] == IA64_OPND_P1
9906 || idesc->operands[1] == IA64_OPND_P2))
9907 {
9908 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9909 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9910 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9911 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9912
9913 /* If both PRs are PR0, we can't really do anything. */
9914 if (p1 == 0 && p2 == 0)
9915 {
9916 if (md.debug_dv)
9917 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9918 }
9919 /* In general, clear mutexes and implies which include P1 or P2,
9920 with the following exceptions. */
9921 else if (has_suffix_p (idesc->name, ".or.andcm")
9922 || has_suffix_p (idesc->name, ".and.orcm"))
9923 {
9924 clear_qp_implies (p2mask, p1mask);
9925 }
9926 else if (has_suffix_p (idesc->name, ".andcm")
9927 || has_suffix_p (idesc->name, ".and"))
9928 {
9929 clear_qp_implies (0, p1mask | p2mask);
9930 }
9931 else if (has_suffix_p (idesc->name, ".orcm")
9932 || has_suffix_p (idesc->name, ".or"))
9933 {
9934 clear_qp_mutex (p1mask | p2mask);
9935 clear_qp_implies (p1mask | p2mask, 0);
9936 }
9937 else
9938 {
9939 int added = 0;
9940
9941 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9942
9943 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9944 if (p1 == 0 || p2 == 0)
9945 clear_qp_mutex (p1mask | p2mask);
9946 else
9947 added = update_qp_mutex (p1mask | p2mask);
9948
9949 if (CURR_SLOT.qp_regno == 0
9950 || has_suffix_p (idesc->name, ".unc"))
9951 {
9952 if (added == 0 && p1 && p2)
9953 add_qp_mutex (p1mask | p2mask);
9954 if (CURR_SLOT.qp_regno != 0)
9955 {
9956 if (p1)
9957 add_qp_imply (p1, CURR_SLOT.qp_regno);
9958 if (p2)
9959 add_qp_imply (p2, CURR_SLOT.qp_regno);
9960 }
9961 }
9962 }
9963 }
9964 /* Look for mov imm insns into GRs. */
9965 else if (idesc->operands[0] == IA64_OPND_R1
9966 && (idesc->operands[1] == IA64_OPND_IMM22
9967 || idesc->operands[1] == IA64_OPND_IMMU64)
9968 && CURR_SLOT.opnd[1].X_op == O_constant
9969 && (strcmp (idesc->name, "mov") == 0
9970 || strcmp (idesc->name, "movl") == 0))
9971 {
9972 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9973 if (regno > 0 && regno < NELEMS (gr_values))
9974 {
9975 gr_values[regno].known = 1;
9976 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9977 gr_values[regno].path = md.path;
9978 if (md.debug_dv)
9979 {
9980 fprintf (stderr, " Know gr%d = ", regno);
9981 fprintf_vma (stderr, gr_values[regno].value);
9982 fputs ("\n", stderr);
9983 }
9984 }
9985 }
9986 /* Look for dep.z imm insns. */
9987 else if (idesc->operands[0] == IA64_OPND_R1
9988 && idesc->operands[1] == IA64_OPND_IMM8
9989 && strcmp (idesc->name, "dep.z") == 0)
9990 {
9991 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9992 if (regno > 0 && regno < NELEMS (gr_values))
9993 {
9994 valueT value = CURR_SLOT.opnd[1].X_add_number;
9995
9996 if (CURR_SLOT.opnd[3].X_add_number < 64)
9997 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1;
9998 value <<= CURR_SLOT.opnd[2].X_add_number;
9999 gr_values[regno].known = 1;
10000 gr_values[regno].value = value;
10001 gr_values[regno].path = md.path;
10002 if (md.debug_dv)
10003 {
10004 fprintf (stderr, " Know gr%d = ", regno);
10005 fprintf_vma (stderr, gr_values[regno].value);
10006 fputs ("\n", stderr);
10007 }
10008 }
10009 }
10010 else
10011 {
10012 clear_qp_mutex (qp_changemask);
10013 clear_qp_implies (qp_changemask, qp_changemask);
10014 }
10015 }
10016
10017 /* Return whether the given predicate registers are currently mutex. */
10018
10019 static int
10020 qp_mutex (int p1, int p2, int path)
10021 {
10022 int i;
10023 valueT mask;
10024
10025 if (p1 != p2)
10026 {
10027 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
10028 for (i = 0; i < qp_mutexeslen; i++)
10029 {
10030 if (qp_mutexes[i].path >= path
10031 && (qp_mutexes[i].prmask & mask) == mask)
10032 return 1;
10033 }
10034 }
10035 return 0;
10036 }
10037
10038 /* Return whether the given resource is in the given insn's list of chks
10039 Return 1 if the conflict is absolutely determined, 2 if it's a potential
10040 conflict. */
10041
10042 static int
10043 resources_match (struct rsrc *rs,
10044 struct ia64_opcode *idesc,
10045 int note,
10046 int qp_regno,
10047 int path)
10048 {
10049 struct rsrc specs[MAX_SPECS];
10050 int count;
10051
10052 /* If the marked resource's qp_regno and the given qp_regno are mutex,
10053 we don't need to check. One exception is note 11, which indicates that
10054 target predicates are written regardless of PR[qp]. */
10055 if (qp_mutex (rs->qp_regno, qp_regno, path)
10056 && note != 11)
10057 return 0;
10058
10059 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
10060 while (count-- > 0)
10061 {
10062 /* UNAT checking is a bit more specific than other resources */
10063 if (rs->dependency->specifier == IA64_RS_AR_UNAT
10064 && specs[count].mem_offset.hint
10065 && rs->mem_offset.hint)
10066 {
10067 if (rs->mem_offset.base == specs[count].mem_offset.base)
10068 {
10069 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
10070 ((specs[count].mem_offset.offset >> 3) & 0x3F))
10071 return 1;
10072 else
10073 continue;
10074 }
10075 }
10076
10077 /* Skip apparent PR write conflicts where both writes are an AND or both
10078 writes are an OR. */
10079 if (rs->dependency->specifier == IA64_RS_PR
10080 || rs->dependency->specifier == IA64_RS_PRr
10081 || rs->dependency->specifier == IA64_RS_PR63)
10082 {
10083 if (specs[count].cmp_type != CMP_NONE
10084 && specs[count].cmp_type == rs->cmp_type)
10085 {
10086 if (md.debug_dv)
10087 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
10088 dv_mode[rs->dependency->mode],
10089 rs->dependency->specifier != IA64_RS_PR63 ?
10090 specs[count].index : 63);
10091 continue;
10092 }
10093 if (md.debug_dv)
10094 fprintf (stderr,
10095 " %s on parallel compare conflict %s vs %s on PR%d\n",
10096 dv_mode[rs->dependency->mode],
10097 dv_cmp_type[rs->cmp_type],
10098 dv_cmp_type[specs[count].cmp_type],
10099 rs->dependency->specifier != IA64_RS_PR63 ?
10100 specs[count].index : 63);
10101
10102 }
10103
10104 /* If either resource is not specific, conservatively assume a conflict
10105 */
10106 if (!specs[count].specific || !rs->specific)
10107 return 2;
10108 else if (specs[count].index == rs->index)
10109 return 1;
10110 }
10111
10112 return 0;
10113 }
10114
10115 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
10116 insert a stop to create the break. Update all resource dependencies
10117 appropriately. If QP_REGNO is non-zero, only apply the break to resources
10118 which use the same QP_REGNO and have the link_to_qp_branch flag set.
10119 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
10120 instruction. */
10121
10122 static void
10123 insn_group_break (int insert_stop, int qp_regno, int save_current)
10124 {
10125 int i;
10126
10127 if (insert_stop && md.num_slots_in_use > 0)
10128 PREV_SLOT.end_of_insn_group = 1;
10129
10130 if (md.debug_dv)
10131 {
10132 fprintf (stderr, " Insn group break%s",
10133 (insert_stop ? " (w/stop)" : ""));
10134 if (qp_regno != 0)
10135 fprintf (stderr, " effective for QP=%d", qp_regno);
10136 fprintf (stderr, "\n");
10137 }
10138
10139 i = 0;
10140 while (i < regdepslen)
10141 {
10142 const struct ia64_dependency *dep = regdeps[i].dependency;
10143
10144 if (qp_regno != 0
10145 && regdeps[i].qp_regno != qp_regno)
10146 {
10147 ++i;
10148 continue;
10149 }
10150
10151 if (save_current
10152 && CURR_SLOT.src_file == regdeps[i].file
10153 && CURR_SLOT.src_line == regdeps[i].line)
10154 {
10155 ++i;
10156 continue;
10157 }
10158
10159 /* clear dependencies which are automatically cleared by a stop, or
10160 those that have reached the appropriate state of insn serialization */
10161 if (dep->semantics == IA64_DVS_IMPLIED
10162 || dep->semantics == IA64_DVS_IMPLIEDF
10163 || regdeps[i].insn_srlz == STATE_SRLZ)
10164 {
10165 print_dependency ("Removing", i);
10166 regdeps[i] = regdeps[--regdepslen];
10167 }
10168 else
10169 {
10170 if (dep->semantics == IA64_DVS_DATA
10171 || dep->semantics == IA64_DVS_INSTR
10172 || dep->semantics == IA64_DVS_SPECIFIC)
10173 {
10174 if (regdeps[i].insn_srlz == STATE_NONE)
10175 regdeps[i].insn_srlz = STATE_STOP;
10176 if (regdeps[i].data_srlz == STATE_NONE)
10177 regdeps[i].data_srlz = STATE_STOP;
10178 }
10179 ++i;
10180 }
10181 }
10182 }
10183
10184 /* Add the given resource usage spec to the list of active dependencies. */
10185
10186 static void
10187 mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED,
10188 const struct ia64_dependency *dep ATTRIBUTE_UNUSED,
10189 struct rsrc *spec,
10190 int depind,
10191 int path)
10192 {
10193 if (regdepslen == regdepstotlen)
10194 {
10195 regdepstotlen += 20;
10196 regdeps = (struct rsrc *)
10197 xrealloc ((void *) regdeps,
10198 regdepstotlen * sizeof (struct rsrc));
10199 }
10200
10201 regdeps[regdepslen] = *spec;
10202 regdeps[regdepslen].depind = depind;
10203 regdeps[regdepslen].path = path;
10204 regdeps[regdepslen].file = CURR_SLOT.src_file;
10205 regdeps[regdepslen].line = CURR_SLOT.src_line;
10206
10207 print_dependency ("Adding", regdepslen);
10208
10209 ++regdepslen;
10210 }
10211
10212 static void
10213 print_dependency (const char *action, int depind)
10214 {
10215 if (md.debug_dv)
10216 {
10217 fprintf (stderr, " %s %s '%s'",
10218 action, dv_mode[(regdeps[depind].dependency)->mode],
10219 (regdeps[depind].dependency)->name);
10220 if (regdeps[depind].specific && regdeps[depind].index >= 0)
10221 fprintf (stderr, " (%d)", regdeps[depind].index);
10222 if (regdeps[depind].mem_offset.hint)
10223 {
10224 fputs (" ", stderr);
10225 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
10226 fputs ("+", stderr);
10227 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
10228 }
10229 fprintf (stderr, "\n");
10230 }
10231 }
10232
10233 static void
10234 instruction_serialization (void)
10235 {
10236 int i;
10237 if (md.debug_dv)
10238 fprintf (stderr, " Instruction serialization\n");
10239 for (i = 0; i < regdepslen; i++)
10240 if (regdeps[i].insn_srlz == STATE_STOP)
10241 regdeps[i].insn_srlz = STATE_SRLZ;
10242 }
10243
10244 static void
10245 data_serialization (void)
10246 {
10247 int i = 0;
10248 if (md.debug_dv)
10249 fprintf (stderr, " Data serialization\n");
10250 while (i < regdepslen)
10251 {
10252 if (regdeps[i].data_srlz == STATE_STOP
10253 /* Note: as of 991210, all "other" dependencies are cleared by a
10254 data serialization. This might change with new tables */
10255 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
10256 {
10257 print_dependency ("Removing", i);
10258 regdeps[i] = regdeps[--regdepslen];
10259 }
10260 else
10261 ++i;
10262 }
10263 }
10264
10265 /* Insert stops and serializations as needed to avoid DVs. */
10266
10267 static void
10268 remove_marked_resource (struct rsrc *rs)
10269 {
10270 switch (rs->dependency->semantics)
10271 {
10272 case IA64_DVS_SPECIFIC:
10273 if (md.debug_dv)
10274 fprintf (stderr, "Implementation-specific, assume worst case...\n");
10275 /* ...fall through... */
10276 case IA64_DVS_INSTR:
10277 if (md.debug_dv)
10278 fprintf (stderr, "Inserting instr serialization\n");
10279 if (rs->insn_srlz < STATE_STOP)
10280 insn_group_break (1, 0, 0);
10281 if (rs->insn_srlz < STATE_SRLZ)
10282 {
10283 struct slot oldslot = CURR_SLOT;
10284 /* Manually jam a srlz.i insn into the stream */
10285 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10286 CURR_SLOT.user_template = -1;
10287 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
10288 instruction_serialization ();
10289 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10290 if (++md.num_slots_in_use >= NUM_SLOTS)
10291 emit_one_bundle ();
10292 CURR_SLOT = oldslot;
10293 }
10294 insn_group_break (1, 0, 0);
10295 break;
10296 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
10297 "other" types of DV are eliminated
10298 by a data serialization */
10299 case IA64_DVS_DATA:
10300 if (md.debug_dv)
10301 fprintf (stderr, "Inserting data serialization\n");
10302 if (rs->data_srlz < STATE_STOP)
10303 insn_group_break (1, 0, 0);
10304 {
10305 struct slot oldslot = CURR_SLOT;
10306 /* Manually jam a srlz.d insn into the stream */
10307 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10308 CURR_SLOT.user_template = -1;
10309 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
10310 data_serialization ();
10311 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10312 if (++md.num_slots_in_use >= NUM_SLOTS)
10313 emit_one_bundle ();
10314 CURR_SLOT = oldslot;
10315 }
10316 break;
10317 case IA64_DVS_IMPLIED:
10318 case IA64_DVS_IMPLIEDF:
10319 if (md.debug_dv)
10320 fprintf (stderr, "Inserting stop\n");
10321 insn_group_break (1, 0, 0);
10322 break;
10323 default:
10324 break;
10325 }
10326 }
10327
10328 /* Check the resources used by the given opcode against the current dependency
10329 list.
10330
10331 The check is run once for each execution path encountered. In this case,
10332 a unique execution path is the sequence of instructions following a code
10333 entry point, e.g. the following has three execution paths, one starting
10334 at L0, one at L1, and one at L2.
10335
10336 L0: nop
10337 L1: add
10338 L2: add
10339 br.ret
10340 */
10341
10342 static void
10343 check_dependencies (struct ia64_opcode *idesc)
10344 {
10345 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10346 int path;
10347 int i;
10348
10349 /* Note that the number of marked resources may change within the
10350 loop if in auto mode. */
10351 i = 0;
10352 while (i < regdepslen)
10353 {
10354 struct rsrc *rs = &regdeps[i];
10355 const struct ia64_dependency *dep = rs->dependency;
10356 int chkind;
10357 int note;
10358 int start_over = 0;
10359
10360 if (dep->semantics == IA64_DVS_NONE
10361 || (chkind = depends_on (rs->depind, idesc)) == -1)
10362 {
10363 ++i;
10364 continue;
10365 }
10366
10367 note = NOTE (opdeps->chks[chkind]);
10368
10369 /* Check this resource against each execution path seen thus far. */
10370 for (path = 0; path <= md.path; path++)
10371 {
10372 int matchtype;
10373
10374 /* If the dependency wasn't on the path being checked, ignore it. */
10375 if (rs->path < path)
10376 continue;
10377
10378 /* If the QP for this insn implies a QP which has branched, don't
10379 bother checking. Ed. NOTE: I don't think this check is terribly
10380 useful; what's the point of generating code which will only be
10381 reached if its QP is zero?
10382 This code was specifically inserted to handle the following code,
10383 based on notes from Intel's DV checking code, where p1 implies p2.
10384
10385 mov r4 = 2
10386 (p2) br.cond L
10387 (p1) mov r4 = 7
10388 */
10389 if (CURR_SLOT.qp_regno != 0)
10390 {
10391 int skip = 0;
10392 int implies;
10393 for (implies = 0; implies < qp_implieslen; implies++)
10394 {
10395 if (qp_implies[implies].path >= path
10396 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
10397 && qp_implies[implies].p2_branched)
10398 {
10399 skip = 1;
10400 break;
10401 }
10402 }
10403 if (skip)
10404 continue;
10405 }
10406
10407 if ((matchtype = resources_match (rs, idesc, note,
10408 CURR_SLOT.qp_regno, path)) != 0)
10409 {
10410 char msg[1024];
10411 char pathmsg[256] = "";
10412 char indexmsg[256] = "";
10413 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
10414
10415 if (path != 0)
10416 snprintf (pathmsg, sizeof (pathmsg),
10417 " when entry is at label '%s'",
10418 md.entry_labels[path - 1]);
10419 if (matchtype == 1 && rs->index >= 0)
10420 snprintf (indexmsg, sizeof (indexmsg),
10421 ", specific resource number is %d",
10422 rs->index);
10423 snprintf (msg, sizeof (msg),
10424 "Use of '%s' %s %s dependency '%s' (%s)%s%s",
10425 idesc->name,
10426 (certain ? "violates" : "may violate"),
10427 dv_mode[dep->mode], dep->name,
10428 dv_sem[dep->semantics],
10429 pathmsg, indexmsg);
10430
10431 if (md.explicit_mode)
10432 {
10433 as_warn ("%s", msg);
10434 if (path < md.path)
10435 as_warn (_("Only the first path encountering the conflict is reported"));
10436 as_warn_where (rs->file, rs->line,
10437 _("This is the location of the conflicting usage"));
10438 /* Don't bother checking other paths, to avoid duplicating
10439 the same warning */
10440 break;
10441 }
10442 else
10443 {
10444 if (md.debug_dv)
10445 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
10446
10447 remove_marked_resource (rs);
10448
10449 /* since the set of dependencies has changed, start over */
10450 /* FIXME -- since we're removing dvs as we go, we
10451 probably don't really need to start over... */
10452 start_over = 1;
10453 break;
10454 }
10455 }
10456 }
10457 if (start_over)
10458 i = 0;
10459 else
10460 ++i;
10461 }
10462 }
10463
10464 /* Register new dependencies based on the given opcode. */
10465
10466 static void
10467 mark_resources (struct ia64_opcode *idesc)
10468 {
10469 int i;
10470 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10471 int add_only_qp_reads = 0;
10472
10473 /* A conditional branch only uses its resources if it is taken; if it is
10474 taken, we stop following that path. The other branch types effectively
10475 *always* write their resources. If it's not taken, register only QP
10476 reads. */
10477 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
10478 {
10479 add_only_qp_reads = 1;
10480 }
10481
10482 if (md.debug_dv)
10483 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
10484
10485 for (i = 0; i < opdeps->nregs; i++)
10486 {
10487 const struct ia64_dependency *dep;
10488 struct rsrc specs[MAX_SPECS];
10489 int note;
10490 int path;
10491 int count;
10492
10493 dep = ia64_find_dependency (opdeps->regs[i]);
10494 note = NOTE (opdeps->regs[i]);
10495
10496 if (add_only_qp_reads
10497 && !(dep->mode == IA64_DV_WAR
10498 && (dep->specifier == IA64_RS_PR
10499 || dep->specifier == IA64_RS_PRr
10500 || dep->specifier == IA64_RS_PR63)))
10501 continue;
10502
10503 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
10504
10505 while (count-- > 0)
10506 {
10507 mark_resource (idesc, dep, &specs[count],
10508 DEP (opdeps->regs[i]), md.path);
10509 }
10510
10511 /* The execution path may affect register values, which may in turn
10512 affect which indirect-access resources are accessed. */
10513 switch (dep->specifier)
10514 {
10515 default:
10516 break;
10517 case IA64_RS_CPUID:
10518 case IA64_RS_DBR:
10519 case IA64_RS_IBR:
10520 case IA64_RS_MSR:
10521 case IA64_RS_PKR:
10522 case IA64_RS_PMC:
10523 case IA64_RS_PMD:
10524 case IA64_RS_RR:
10525 for (path = 0; path < md.path; path++)
10526 {
10527 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
10528 while (count-- > 0)
10529 mark_resource (idesc, dep, &specs[count],
10530 DEP (opdeps->regs[i]), path);
10531 }
10532 break;
10533 }
10534 }
10535 }
10536
10537 /* Remove dependencies when they no longer apply. */
10538
10539 static void
10540 update_dependencies (struct ia64_opcode *idesc)
10541 {
10542 int i;
10543
10544 if (strcmp (idesc->name, "srlz.i") == 0)
10545 {
10546 instruction_serialization ();
10547 }
10548 else if (strcmp (idesc->name, "srlz.d") == 0)
10549 {
10550 data_serialization ();
10551 }
10552 else if (is_interruption_or_rfi (idesc)
10553 || is_taken_branch (idesc))
10554 {
10555 /* Although technically the taken branch doesn't clear dependencies
10556 which require a srlz.[id], we don't follow the branch; the next
10557 instruction is assumed to start with a clean slate. */
10558 regdepslen = 0;
10559 md.path = 0;
10560 }
10561 else if (is_conditional_branch (idesc)
10562 && CURR_SLOT.qp_regno != 0)
10563 {
10564 int is_call = strstr (idesc->name, ".call") != NULL;
10565
10566 for (i = 0; i < qp_implieslen; i++)
10567 {
10568 /* If the conditional branch's predicate is implied by the predicate
10569 in an existing dependency, remove that dependency. */
10570 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
10571 {
10572 int depind = 0;
10573 /* Note that this implied predicate takes a branch so that if
10574 a later insn generates a DV but its predicate implies this
10575 one, we can avoid the false DV warning. */
10576 qp_implies[i].p2_branched = 1;
10577 while (depind < regdepslen)
10578 {
10579 if (regdeps[depind].qp_regno == qp_implies[i].p1)
10580 {
10581 print_dependency ("Removing", depind);
10582 regdeps[depind] = regdeps[--regdepslen];
10583 }
10584 else
10585 ++depind;
10586 }
10587 }
10588 }
10589 /* Any marked resources which have this same predicate should be
10590 cleared, provided that the QP hasn't been modified between the
10591 marking instruction and the branch. */
10592 if (is_call)
10593 {
10594 insn_group_break (0, CURR_SLOT.qp_regno, 1);
10595 }
10596 else
10597 {
10598 i = 0;
10599 while (i < regdepslen)
10600 {
10601 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
10602 && regdeps[i].link_to_qp_branch
10603 && (regdeps[i].file != CURR_SLOT.src_file
10604 || regdeps[i].line != CURR_SLOT.src_line))
10605 {
10606 /* Treat like a taken branch */
10607 print_dependency ("Removing", i);
10608 regdeps[i] = regdeps[--regdepslen];
10609 }
10610 else
10611 ++i;
10612 }
10613 }
10614 }
10615 }
10616
10617 /* Examine the current instruction for dependency violations. */
10618
10619 static int
10620 check_dv (struct ia64_opcode *idesc)
10621 {
10622 if (md.debug_dv)
10623 {
10624 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
10625 idesc->name, CURR_SLOT.src_line,
10626 idesc->dependencies->nchks,
10627 idesc->dependencies->nregs);
10628 }
10629
10630 /* Look through the list of currently marked resources; if the current
10631 instruction has the dependency in its chks list which uses that resource,
10632 check against the specific resources used. */
10633 check_dependencies (idesc);
10634
10635 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
10636 then add them to the list of marked resources. */
10637 mark_resources (idesc);
10638
10639 /* There are several types of dependency semantics, and each has its own
10640 requirements for being cleared
10641
10642 Instruction serialization (insns separated by interruption, rfi, or
10643 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10644
10645 Data serialization (instruction serialization, or writer + srlz.d +
10646 reader, where writer and srlz.d are in separate groups) clears
10647 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10648 always be the case).
10649
10650 Instruction group break (groups separated by stop, taken branch,
10651 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10652 */
10653 update_dependencies (idesc);
10654
10655 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10656 warning. Keep track of as many as possible that are useful. */
10657 note_register_values (idesc);
10658
10659 /* We don't need or want this anymore. */
10660 md.mem_offset.hint = 0;
10661
10662 return 0;
10663 }
10664
10665 /* Translate one line of assembly. Pseudo ops and labels do not show
10666 here. */
10667 void
10668 md_assemble (char *str)
10669 {
10670 char *saved_input_line_pointer, *temp;
10671 const char *mnemonic;
10672 const struct pseudo_opcode *pdesc;
10673 struct ia64_opcode *idesc;
10674 unsigned char qp_regno;
10675 unsigned int flags;
10676 int ch;
10677
10678 saved_input_line_pointer = input_line_pointer;
10679 input_line_pointer = str;
10680
10681 /* extract the opcode (mnemonic): */
10682
10683 ch = get_symbol_name (&temp);
10684 mnemonic = temp;
10685 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
10686 if (pdesc)
10687 {
10688 (void) restore_line_pointer (ch);
10689 (*pdesc->handler) (pdesc->arg);
10690 goto done;
10691 }
10692
10693 /* Find the instruction descriptor matching the arguments. */
10694
10695 idesc = ia64_find_opcode (mnemonic);
10696 (void) restore_line_pointer (ch);
10697 if (!idesc)
10698 {
10699 as_bad (_("Unknown opcode `%s'"), mnemonic);
10700 goto done;
10701 }
10702
10703 idesc = parse_operands (idesc);
10704 if (!idesc)
10705 goto done;
10706
10707 /* Handle the dynamic ops we can handle now: */
10708 if (idesc->type == IA64_TYPE_DYN)
10709 {
10710 if (strcmp (idesc->name, "add") == 0)
10711 {
10712 if (CURR_SLOT.opnd[2].X_op == O_register
10713 && CURR_SLOT.opnd[2].X_add_number < 4)
10714 mnemonic = "addl";
10715 else
10716 mnemonic = "adds";
10717 ia64_free_opcode (idesc);
10718 idesc = ia64_find_opcode (mnemonic);
10719 }
10720 else if (strcmp (idesc->name, "mov") == 0)
10721 {
10722 enum ia64_opnd opnd1, opnd2;
10723 int rop;
10724
10725 opnd1 = idesc->operands[0];
10726 opnd2 = idesc->operands[1];
10727 if (opnd1 == IA64_OPND_AR3)
10728 rop = 0;
10729 else if (opnd2 == IA64_OPND_AR3)
10730 rop = 1;
10731 else
10732 abort ();
10733 if (CURR_SLOT.opnd[rop].X_op == O_register)
10734 {
10735 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10736 mnemonic = "mov.i";
10737 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10738 mnemonic = "mov.m";
10739 else
10740 rop = -1;
10741 }
10742 else
10743 abort ();
10744 if (rop >= 0)
10745 {
10746 ia64_free_opcode (idesc);
10747 idesc = ia64_find_opcode (mnemonic);
10748 while (idesc != NULL
10749 && (idesc->operands[0] != opnd1
10750 || idesc->operands[1] != opnd2))
10751 idesc = get_next_opcode (idesc);
10752 }
10753 }
10754 }
10755 else if (strcmp (idesc->name, "mov.i") == 0
10756 || strcmp (idesc->name, "mov.m") == 0)
10757 {
10758 enum ia64_opnd opnd1, opnd2;
10759 int rop;
10760
10761 opnd1 = idesc->operands[0];
10762 opnd2 = idesc->operands[1];
10763 if (opnd1 == IA64_OPND_AR3)
10764 rop = 0;
10765 else if (opnd2 == IA64_OPND_AR3)
10766 rop = 1;
10767 else
10768 abort ();
10769 if (CURR_SLOT.opnd[rop].X_op == O_register)
10770 {
10771 char unit = 'a';
10772 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10773 unit = 'i';
10774 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10775 unit = 'm';
10776 if (unit != 'a' && unit != idesc->name [4])
10777 as_bad (_("AR %d can only be accessed by %c-unit"),
10778 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10779 TOUPPER (unit));
10780 }
10781 }
10782 else if (strcmp (idesc->name, "hint.b") == 0)
10783 {
10784 switch (md.hint_b)
10785 {
10786 case hint_b_ok:
10787 break;
10788 case hint_b_warning:
10789 as_warn (_("hint.b may be treated as nop"));
10790 break;
10791 case hint_b_error:
10792 as_bad (_("hint.b shouldn't be used"));
10793 break;
10794 }
10795 }
10796
10797 qp_regno = 0;
10798 if (md.qp.X_op == O_register)
10799 {
10800 qp_regno = md.qp.X_add_number - REG_P;
10801 md.qp.X_op = O_absent;
10802 }
10803
10804 flags = idesc->flags;
10805
10806 if ((flags & IA64_OPCODE_FIRST) != 0)
10807 {
10808 /* The alignment frag has to end with a stop bit only if the
10809 next instruction after the alignment directive has to be
10810 the first instruction in an instruction group. */
10811 if (align_frag)
10812 {
10813 while (align_frag->fr_type != rs_align_code)
10814 {
10815 align_frag = align_frag->fr_next;
10816 if (!align_frag)
10817 break;
10818 }
10819 /* align_frag can be NULL if there are directives in
10820 between. */
10821 if (align_frag && align_frag->fr_next == frag_now)
10822 align_frag->tc_frag_data = 1;
10823 }
10824
10825 insn_group_break (1, 0, 0);
10826 }
10827 align_frag = NULL;
10828
10829 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10830 {
10831 as_bad (_("`%s' cannot be predicated"), idesc->name);
10832 goto done;
10833 }
10834
10835 /* Build the instruction. */
10836 CURR_SLOT.qp_regno = qp_regno;
10837 CURR_SLOT.idesc = idesc;
10838 CURR_SLOT.src_file = as_where (&CURR_SLOT.src_line);
10839 dwarf2_where (&CURR_SLOT.debug_line);
10840 dwarf2_consume_line_info ();
10841
10842 /* Add unwind entries, if there are any. */
10843 if (unwind.current_entry)
10844 {
10845 CURR_SLOT.unwind_record = unwind.current_entry;
10846 unwind.current_entry = NULL;
10847 }
10848 if (unwind.pending_saves)
10849 {
10850 if (unwind.pending_saves->next)
10851 {
10852 /* Attach the next pending save to the next slot so that its
10853 slot number will get set correctly. */
10854 add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR);
10855 unwind.pending_saves = &unwind.pending_saves->next->r.record.p;
10856 }
10857 else
10858 unwind.pending_saves = NULL;
10859 }
10860 if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym))
10861 unwind.insn = 1;
10862
10863 /* Check for dependency violations. */
10864 if (md.detect_dv)
10865 check_dv (idesc);
10866
10867 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10868 if (++md.num_slots_in_use >= NUM_SLOTS)
10869 emit_one_bundle ();
10870
10871 if ((flags & IA64_OPCODE_LAST) != 0)
10872 insn_group_break (1, 0, 0);
10873
10874 md.last_text_seg = now_seg;
10875
10876 done:
10877 input_line_pointer = saved_input_line_pointer;
10878 }
10879
10880 /* Called when symbol NAME cannot be found in the symbol table.
10881 Should be used for dynamic valued symbols only. */
10882
10883 symbolS *
10884 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
10885 {
10886 return 0;
10887 }
10888
10889 /* Called for any expression that can not be recognized. When the
10890 function is called, `input_line_pointer' will point to the start of
10891 the expression. */
10892
10893 void
10894 md_operand (expressionS *e)
10895 {
10896 switch (*input_line_pointer)
10897 {
10898 case '[':
10899 ++input_line_pointer;
10900 expression_and_evaluate (e);
10901 if (*input_line_pointer != ']')
10902 {
10903 as_bad (_("Closing bracket missing"));
10904 goto err;
10905 }
10906 else
10907 {
10908 if (e->X_op != O_register
10909 || e->X_add_number < REG_GR
10910 || e->X_add_number > REG_GR + 127)
10911 {
10912 as_bad (_("Index must be a general register"));
10913 e->X_add_number = REG_GR;
10914 }
10915
10916 ++input_line_pointer;
10917 e->X_op = O_index;
10918 }
10919 break;
10920
10921 default:
10922 break;
10923 }
10924 return;
10925
10926 err:
10927 ignore_rest_of_line ();
10928 }
10929
10930 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10931 a section symbol plus some offset. For relocs involving @fptr(),
10932 directives we don't want such adjustments since we need to have the
10933 original symbol's name in the reloc. */
10934 int
10935 ia64_fix_adjustable (fixS *fix)
10936 {
10937 /* Prevent all adjustments to global symbols */
10938 if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10939 return 0;
10940
10941 switch (fix->fx_r_type)
10942 {
10943 case BFD_RELOC_IA64_FPTR64I:
10944 case BFD_RELOC_IA64_FPTR32MSB:
10945 case BFD_RELOC_IA64_FPTR32LSB:
10946 case BFD_RELOC_IA64_FPTR64MSB:
10947 case BFD_RELOC_IA64_FPTR64LSB:
10948 case BFD_RELOC_IA64_LTOFF_FPTR22:
10949 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10950 return 0;
10951 default:
10952 break;
10953 }
10954
10955 return 1;
10956 }
10957
10958 int
10959 ia64_force_relocation (fixS *fix)
10960 {
10961 switch (fix->fx_r_type)
10962 {
10963 case BFD_RELOC_IA64_FPTR64I:
10964 case BFD_RELOC_IA64_FPTR32MSB:
10965 case BFD_RELOC_IA64_FPTR32LSB:
10966 case BFD_RELOC_IA64_FPTR64MSB:
10967 case BFD_RELOC_IA64_FPTR64LSB:
10968
10969 case BFD_RELOC_IA64_LTOFF22:
10970 case BFD_RELOC_IA64_LTOFF64I:
10971 case BFD_RELOC_IA64_LTOFF_FPTR22:
10972 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10973 case BFD_RELOC_IA64_PLTOFF22:
10974 case BFD_RELOC_IA64_PLTOFF64I:
10975 case BFD_RELOC_IA64_PLTOFF64MSB:
10976 case BFD_RELOC_IA64_PLTOFF64LSB:
10977
10978 case BFD_RELOC_IA64_LTOFF22X:
10979 case BFD_RELOC_IA64_LDXMOV:
10980 return 1;
10981
10982 default:
10983 break;
10984 }
10985
10986 return generic_force_reloc (fix);
10987 }
10988
10989 /* Decide from what point a pc-relative relocation is relative to,
10990 relative to the pc-relative fixup. Er, relatively speaking. */
10991 long
10992 ia64_pcrel_from_section (fixS *fix, segT sec)
10993 {
10994 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10995
10996 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
10997 off &= ~0xfUL;
10998
10999 return off;
11000 }
11001
11002
11003 /* Used to emit section-relative relocs for the dwarf2 debug data. */
11004 void
11005 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
11006 {
11007 expressionS exp;
11008
11009 exp.X_op = O_pseudo_fixup;
11010 exp.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
11011 exp.X_add_number = 0;
11012 exp.X_add_symbol = symbol;
11013 emit_expr (&exp, size);
11014 }
11015
11016 /* This is called whenever some data item (not an instruction) needs a
11017 fixup. We pick the right reloc code depending on the byteorder
11018 currently in effect. */
11019 void
11020 ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp,
11021 bfd_reloc_code_real_type code)
11022 {
11023 fixS *fix;
11024
11025 switch (nbytes)
11026 {
11027 /* There are no reloc for 8 and 16 bit quantities, but we allow
11028 them here since they will work fine as long as the expression
11029 is fully defined at the end of the pass over the source file. */
11030 case 1: code = BFD_RELOC_8; break;
11031 case 2: code = BFD_RELOC_16; break;
11032 case 4:
11033 if (target_big_endian)
11034 code = BFD_RELOC_IA64_DIR32MSB;
11035 else
11036 code = BFD_RELOC_IA64_DIR32LSB;
11037 break;
11038
11039 case 8:
11040 /* In 32-bit mode, data8 could mean function descriptors too. */
11041 if (exp->X_op == O_pseudo_fixup
11042 && exp->X_op_symbol
11043 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
11044 && !(md.flags & EF_IA_64_ABI64))
11045 {
11046 if (target_big_endian)
11047 code = BFD_RELOC_IA64_IPLTMSB;
11048 else
11049 code = BFD_RELOC_IA64_IPLTLSB;
11050 exp->X_op = O_symbol;
11051 break;
11052 }
11053 else
11054 {
11055 if (target_big_endian)
11056 code = BFD_RELOC_IA64_DIR64MSB;
11057 else
11058 code = BFD_RELOC_IA64_DIR64LSB;
11059 break;
11060 }
11061
11062 case 16:
11063 if (exp->X_op == O_pseudo_fixup
11064 && exp->X_op_symbol
11065 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
11066 {
11067 if (target_big_endian)
11068 code = BFD_RELOC_IA64_IPLTMSB;
11069 else
11070 code = BFD_RELOC_IA64_IPLTLSB;
11071 exp->X_op = O_symbol;
11072 break;
11073 }
11074 /* FALLTHRU */
11075
11076 default:
11077 as_bad (_("Unsupported fixup size %d"), nbytes);
11078 ignore_rest_of_line ();
11079 return;
11080 }
11081
11082 if (exp->X_op == O_pseudo_fixup)
11083 {
11084 exp->X_op = O_symbol;
11085 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
11086 /* ??? If code unchanged, unsupported. */
11087 }
11088
11089 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
11090 /* We need to store the byte order in effect in case we're going
11091 to fix an 8 or 16 bit relocation (for which there no real
11092 relocs available). See md_apply_fix(). */
11093 fix->tc_fix_data.bigendian = target_big_endian;
11094 }
11095
11096 /* Return the actual relocation we wish to associate with the pseudo
11097 reloc described by SYM and R_TYPE. SYM should be one of the
11098 symbols in the pseudo_func array, or NULL. */
11099
11100 static bfd_reloc_code_real_type
11101 ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type)
11102 {
11103 bfd_reloc_code_real_type newr = 0;
11104 const char *type = NULL, *suffix = "";
11105
11106 if (sym == NULL)
11107 {
11108 return r_type;
11109 }
11110
11111 switch (S_GET_VALUE (sym))
11112 {
11113 case FUNC_FPTR_RELATIVE:
11114 switch (r_type)
11115 {
11116 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_FPTR64I; break;
11117 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_FPTR32MSB; break;
11118 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_FPTR32LSB; break;
11119 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_FPTR64MSB; break;
11120 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_FPTR64LSB; break;
11121 default: type = "FPTR"; break;
11122 }
11123 break;
11124
11125 case FUNC_GP_RELATIVE:
11126 switch (r_type)
11127 {
11128 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_GPREL22; break;
11129 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_GPREL64I; break;
11130 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_GPREL32MSB; break;
11131 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_GPREL32LSB; break;
11132 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_GPREL64MSB; break;
11133 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_GPREL64LSB; break;
11134 default: type = "GPREL"; break;
11135 }
11136 break;
11137
11138 case FUNC_LT_RELATIVE:
11139 switch (r_type)
11140 {
11141 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22; break;
11142 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_LTOFF64I; break;
11143 default: type = "LTOFF"; break;
11144 }
11145 break;
11146
11147 case FUNC_LT_RELATIVE_X:
11148 switch (r_type)
11149 {
11150 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22X; break;
11151 default: type = "LTOFF"; suffix = "X"; break;
11152 }
11153 break;
11154
11155 case FUNC_PC_RELATIVE:
11156 switch (r_type)
11157 {
11158 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PCREL22; break;
11159 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PCREL64I; break;
11160 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_PCREL32MSB; break;
11161 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_PCREL32LSB; break;
11162 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PCREL64MSB; break;
11163 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PCREL64LSB; break;
11164 default: type = "PCREL"; break;
11165 }
11166 break;
11167
11168 case FUNC_PLT_RELATIVE:
11169 switch (r_type)
11170 {
11171 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PLTOFF22; break;
11172 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PLTOFF64I; break;
11173 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PLTOFF64MSB;break;
11174 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PLTOFF64LSB;break;
11175 default: type = "PLTOFF"; break;
11176 }
11177 break;
11178
11179 case FUNC_SEC_RELATIVE:
11180 switch (r_type)
11181 {
11182 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SECREL32MSB;break;
11183 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SECREL32LSB;break;
11184 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SECREL64MSB;break;
11185 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SECREL64LSB;break;
11186 default: type = "SECREL"; break;
11187 }
11188 break;
11189
11190 case FUNC_SEG_RELATIVE:
11191 switch (r_type)
11192 {
11193 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SEGREL32MSB;break;
11194 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SEGREL32LSB;break;
11195 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SEGREL64MSB;break;
11196 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SEGREL64LSB;break;
11197 default: type = "SEGREL"; break;
11198 }
11199 break;
11200
11201 case FUNC_LTV_RELATIVE:
11202 switch (r_type)
11203 {
11204 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_LTV32MSB; break;
11205 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_LTV32LSB; break;
11206 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_LTV64MSB; break;
11207 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_LTV64LSB; break;
11208 default: type = "LTV"; break;
11209 }
11210 break;
11211
11212 case FUNC_LT_FPTR_RELATIVE:
11213 switch (r_type)
11214 {
11215 case BFD_RELOC_IA64_IMM22:
11216 newr = BFD_RELOC_IA64_LTOFF_FPTR22; break;
11217 case BFD_RELOC_IA64_IMM64:
11218 newr = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
11219 case BFD_RELOC_IA64_DIR32MSB:
11220 newr = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break;
11221 case BFD_RELOC_IA64_DIR32LSB:
11222 newr = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break;
11223 case BFD_RELOC_IA64_DIR64MSB:
11224 newr = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break;
11225 case BFD_RELOC_IA64_DIR64LSB:
11226 newr = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break;
11227 default:
11228 type = "LTOFF_FPTR"; break;
11229 }
11230 break;
11231
11232 case FUNC_TP_RELATIVE:
11233 switch (r_type)
11234 {
11235 case BFD_RELOC_IA64_IMM14: newr = BFD_RELOC_IA64_TPREL14; break;
11236 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_TPREL22; break;
11237 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_TPREL64I; break;
11238 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_TPREL64MSB; break;
11239 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_TPREL64LSB; break;
11240 default: type = "TPREL"; break;
11241 }
11242 break;
11243
11244 case FUNC_LT_TP_RELATIVE:
11245 switch (r_type)
11246 {
11247 case BFD_RELOC_IA64_IMM22:
11248 newr = BFD_RELOC_IA64_LTOFF_TPREL22; break;
11249 default:
11250 type = "LTOFF_TPREL"; break;
11251 }
11252 break;
11253
11254 case FUNC_DTP_MODULE:
11255 switch (r_type)
11256 {
11257 case BFD_RELOC_IA64_DIR64MSB:
11258 newr = BFD_RELOC_IA64_DTPMOD64MSB; break;
11259 case BFD_RELOC_IA64_DIR64LSB:
11260 newr = BFD_RELOC_IA64_DTPMOD64LSB; break;
11261 default:
11262 type = "DTPMOD"; break;
11263 }
11264 break;
11265
11266 case FUNC_LT_DTP_MODULE:
11267 switch (r_type)
11268 {
11269 case BFD_RELOC_IA64_IMM22:
11270 newr = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
11271 default:
11272 type = "LTOFF_DTPMOD"; break;
11273 }
11274 break;
11275
11276 case FUNC_DTP_RELATIVE:
11277 switch (r_type)
11278 {
11279 case BFD_RELOC_IA64_DIR32MSB:
11280 newr = BFD_RELOC_IA64_DTPREL32MSB; break;
11281 case BFD_RELOC_IA64_DIR32LSB:
11282 newr = BFD_RELOC_IA64_DTPREL32LSB; break;
11283 case BFD_RELOC_IA64_DIR64MSB:
11284 newr = BFD_RELOC_IA64_DTPREL64MSB; break;
11285 case BFD_RELOC_IA64_DIR64LSB:
11286 newr = BFD_RELOC_IA64_DTPREL64LSB; break;
11287 case BFD_RELOC_IA64_IMM14:
11288 newr = BFD_RELOC_IA64_DTPREL14; break;
11289 case BFD_RELOC_IA64_IMM22:
11290 newr = BFD_RELOC_IA64_DTPREL22; break;
11291 case BFD_RELOC_IA64_IMM64:
11292 newr = BFD_RELOC_IA64_DTPREL64I; break;
11293 default:
11294 type = "DTPREL"; break;
11295 }
11296 break;
11297
11298 case FUNC_LT_DTP_RELATIVE:
11299 switch (r_type)
11300 {
11301 case BFD_RELOC_IA64_IMM22:
11302 newr = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
11303 default:
11304 type = "LTOFF_DTPREL"; break;
11305 }
11306 break;
11307
11308 case FUNC_IPLT_RELOC:
11309 switch (r_type)
11310 {
11311 case BFD_RELOC_IA64_IPLTMSB: return r_type;
11312 case BFD_RELOC_IA64_IPLTLSB: return r_type;
11313 default: type = "IPLT"; break;
11314 }
11315 break;
11316
11317 #ifdef TE_VMS
11318 case FUNC_SLOTCOUNT_RELOC:
11319 return DUMMY_RELOC_IA64_SLOTCOUNT;
11320 #endif
11321
11322 default:
11323 abort ();
11324 }
11325
11326 if (newr)
11327 return newr;
11328 else
11329 {
11330 int width;
11331
11332 if (!type)
11333 abort ();
11334 switch (r_type)
11335 {
11336 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break;
11337 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break;
11338 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break;
11339 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break;
11340 case BFD_RELOC_UNUSED: width = 13; break;
11341 case BFD_RELOC_IA64_IMM14: width = 14; break;
11342 case BFD_RELOC_IA64_IMM22: width = 22; break;
11343 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break;
11344 default: abort ();
11345 }
11346
11347 /* This should be an error, but since previously there wasn't any
11348 diagnostic here, don't make it fail because of this for now. */
11349 as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix);
11350 return r_type;
11351 }
11352 }
11353
11354 /* Here is where generate the appropriate reloc for pseudo relocation
11355 functions. */
11356 void
11357 ia64_validate_fix (fixS *fix)
11358 {
11359 switch (fix->fx_r_type)
11360 {
11361 case BFD_RELOC_IA64_FPTR64I:
11362 case BFD_RELOC_IA64_FPTR32MSB:
11363 case BFD_RELOC_IA64_FPTR64LSB:
11364 case BFD_RELOC_IA64_LTOFF_FPTR22:
11365 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11366 if (fix->fx_offset != 0)
11367 as_bad_where (fix->fx_file, fix->fx_line,
11368 _("No addend allowed in @fptr() relocation"));
11369 break;
11370 default:
11371 break;
11372 }
11373 }
11374
11375 static void
11376 fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value)
11377 {
11378 bfd_vma insn[3], t0, t1, control_bits;
11379 const char *err;
11380 char *fixpos;
11381 long slot;
11382
11383 slot = fix->fx_where & 0x3;
11384 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
11385
11386 /* Bundles are always in little-endian byte order */
11387 t0 = bfd_getl64 (fixpos);
11388 t1 = bfd_getl64 (fixpos + 8);
11389 control_bits = t0 & 0x1f;
11390 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
11391 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
11392 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
11393
11394 err = NULL;
11395 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
11396 {
11397 insn[1] = (value >> 22) & 0x1ffffffffffLL;
11398 insn[2] |= (((value & 0x7f) << 13)
11399 | (((value >> 7) & 0x1ff) << 27)
11400 | (((value >> 16) & 0x1f) << 22)
11401 | (((value >> 21) & 0x1) << 21)
11402 | (((value >> 63) & 0x1) << 36));
11403 }
11404 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
11405 {
11406 if (value & ~0x3fffffffffffffffULL)
11407 err = _("integer operand out of range");
11408 insn[1] = (value >> 21) & 0x1ffffffffffLL;
11409 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
11410 }
11411 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
11412 {
11413 value >>= 4;
11414 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
11415 insn[2] |= ((((value >> 59) & 0x1) << 36)
11416 | (((value >> 0) & 0xfffff) << 13));
11417 }
11418 else
11419 err = (*odesc->insert) (odesc, value, insn + slot);
11420
11421 if (err)
11422 as_bad_where (fix->fx_file, fix->fx_line, "%s", err);
11423
11424 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
11425 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
11426 number_to_chars_littleendian (fixpos + 0, t0, 8);
11427 number_to_chars_littleendian (fixpos + 8, t1, 8);
11428 }
11429
11430 /* Attempt to simplify or even eliminate a fixup. The return value is
11431 ignored; perhaps it was once meaningful, but now it is historical.
11432 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
11433
11434 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
11435 (if possible). */
11436
11437 void
11438 md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED)
11439 {
11440 char *fixpos;
11441 valueT value = *valP;
11442
11443 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
11444
11445 if (fix->fx_pcrel)
11446 {
11447 switch (fix->fx_r_type)
11448 {
11449 case BFD_RELOC_IA64_PCREL21B: break;
11450 case BFD_RELOC_IA64_PCREL21BI: break;
11451 case BFD_RELOC_IA64_PCREL21F: break;
11452 case BFD_RELOC_IA64_PCREL21M: break;
11453 case BFD_RELOC_IA64_PCREL60B: break;
11454 case BFD_RELOC_IA64_PCREL22: break;
11455 case BFD_RELOC_IA64_PCREL64I: break;
11456 case BFD_RELOC_IA64_PCREL32MSB: break;
11457 case BFD_RELOC_IA64_PCREL32LSB: break;
11458 case BFD_RELOC_IA64_PCREL64MSB: break;
11459 case BFD_RELOC_IA64_PCREL64LSB: break;
11460 default:
11461 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym,
11462 fix->fx_r_type);
11463 break;
11464 }
11465 }
11466 if (fix->fx_addsy)
11467 {
11468 switch ((unsigned) fix->fx_r_type)
11469 {
11470 case BFD_RELOC_UNUSED:
11471 /* This must be a TAG13 or TAG13b operand. There are no external
11472 relocs defined for them, so we must give an error. */
11473 as_bad_where (fix->fx_file, fix->fx_line,
11474 _("%s must have a constant value"),
11475 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
11476 fix->fx_done = 1;
11477 return;
11478
11479 case BFD_RELOC_IA64_TPREL14:
11480 case BFD_RELOC_IA64_TPREL22:
11481 case BFD_RELOC_IA64_TPREL64I:
11482 case BFD_RELOC_IA64_LTOFF_TPREL22:
11483 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
11484 case BFD_RELOC_IA64_DTPREL14:
11485 case BFD_RELOC_IA64_DTPREL22:
11486 case BFD_RELOC_IA64_DTPREL64I:
11487 case BFD_RELOC_IA64_LTOFF_DTPREL22:
11488 S_SET_THREAD_LOCAL (fix->fx_addsy);
11489 break;
11490
11491 #ifdef TE_VMS
11492 case DUMMY_RELOC_IA64_SLOTCOUNT:
11493 as_bad_where (fix->fx_file, fix->fx_line,
11494 _("cannot resolve @slotcount parameter"));
11495 fix->fx_done = 1;
11496 return;
11497 #endif
11498
11499 default:
11500 break;
11501 }
11502 }
11503 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
11504 {
11505 #ifdef TE_VMS
11506 if (fix->fx_r_type == DUMMY_RELOC_IA64_SLOTCOUNT)
11507 {
11508 /* For @slotcount, convert an addresses difference to a slots
11509 difference. */
11510 valueT v;
11511
11512 v = (value >> 4) * 3;
11513 switch (value & 0x0f)
11514 {
11515 case 0:
11516 case 1:
11517 case 2:
11518 v += value & 0x0f;
11519 break;
11520 case 0x0f:
11521 v += 2;
11522 break;
11523 case 0x0e:
11524 v += 1;
11525 break;
11526 default:
11527 as_bad (_("invalid @slotcount value"));
11528 }
11529 value = v;
11530 }
11531 #endif
11532
11533 if (fix->tc_fix_data.bigendian)
11534 number_to_chars_bigendian (fixpos, value, fix->fx_size);
11535 else
11536 number_to_chars_littleendian (fixpos, value, fix->fx_size);
11537 fix->fx_done = 1;
11538 }
11539 else
11540 {
11541 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
11542 fix->fx_done = 1;
11543 }
11544 }
11545
11546 /* Generate the BFD reloc to be stuck in the object file from the
11547 fixup used internally in the assembler. */
11548
11549 arelent *
11550 tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp)
11551 {
11552 arelent *reloc;
11553
11554 reloc = xmalloc (sizeof (*reloc));
11555 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
11556 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11557 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
11558 reloc->addend = fixp->fx_offset;
11559 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
11560
11561 if (!reloc->howto)
11562 {
11563 as_bad_where (fixp->fx_file, fixp->fx_line,
11564 _("Cannot represent %s relocation in object file"),
11565 bfd_get_reloc_code_name (fixp->fx_r_type));
11566 free (reloc);
11567 return NULL;
11568 }
11569 return reloc;
11570 }
11571
11572 /* Turn a string in input_line_pointer into a floating point constant
11573 of type TYPE, and store the appropriate bytes in *LIT. The number
11574 of LITTLENUMS emitted is stored in *SIZE. An error message is
11575 returned, or NULL on OK. */
11576
11577 #define MAX_LITTLENUMS 5
11578
11579 const char *
11580 md_atof (int type, char *lit, int *size)
11581 {
11582 LITTLENUM_TYPE words[MAX_LITTLENUMS];
11583 char *t;
11584 int prec;
11585
11586 switch (type)
11587 {
11588 /* IEEE floats */
11589 case 'f':
11590 case 'F':
11591 case 's':
11592 case 'S':
11593 prec = 2;
11594 break;
11595
11596 case 'd':
11597 case 'D':
11598 case 'r':
11599 case 'R':
11600 prec = 4;
11601 break;
11602
11603 case 'x':
11604 case 'X':
11605 case 'p':
11606 case 'P':
11607 prec = 5;
11608 break;
11609
11610 default:
11611 *size = 0;
11612 return _("Unrecognized or unsupported floating point constant");
11613 }
11614 t = atof_ieee (input_line_pointer, type, words);
11615 if (t)
11616 input_line_pointer = t;
11617
11618 (*ia64_float_to_chars) (lit, words, prec);
11619
11620 if (type == 'X')
11621 {
11622 /* It is 10 byte floating point with 6 byte padding. */
11623 memset (&lit [10], 0, 6);
11624 *size = 8 * sizeof (LITTLENUM_TYPE);
11625 }
11626 else
11627 *size = prec * sizeof (LITTLENUM_TYPE);
11628
11629 return NULL;
11630 }
11631
11632 /* Handle ia64 specific semantics of the align directive. */
11633
11634 void
11635 ia64_md_do_align (int n ATTRIBUTE_UNUSED,
11636 const char *fill ATTRIBUTE_UNUSED,
11637 int len ATTRIBUTE_UNUSED,
11638 int max ATTRIBUTE_UNUSED)
11639 {
11640 if (subseg_text_p (now_seg))
11641 ia64_flush_insns ();
11642 }
11643
11644 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
11645 of an rs_align_code fragment. */
11646
11647 void
11648 ia64_handle_align (fragS *fragp)
11649 {
11650 int bytes;
11651 char *p;
11652 const unsigned char *nop_type;
11653
11654 if (fragp->fr_type != rs_align_code)
11655 return;
11656
11657 /* Check if this frag has to end with a stop bit. */
11658 nop_type = fragp->tc_frag_data ? le_nop_stop : le_nop;
11659
11660 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11661 p = fragp->fr_literal + fragp->fr_fix;
11662
11663 /* If no paddings are needed, we check if we need a stop bit. */
11664 if (!bytes && fragp->tc_frag_data)
11665 {
11666 if (fragp->fr_fix < 16)
11667 #if 1
11668 /* FIXME: It won't work with
11669 .align 16
11670 alloc r32=ar.pfs,1,2,4,0
11671 */
11672 ;
11673 #else
11674 as_bad_where (fragp->fr_file, fragp->fr_line,
11675 _("Can't add stop bit to mark end of instruction group"));
11676 #endif
11677 else
11678 /* Bundles are always in little-endian byte order. Make sure
11679 the previous bundle has the stop bit. */
11680 *(p - 16) |= 1;
11681 }
11682
11683 /* Make sure we are on a 16-byte boundary, in case someone has been
11684 putting data into a text section. */
11685 if (bytes & 15)
11686 {
11687 int fix = bytes & 15;
11688 memset (p, 0, fix);
11689 p += fix;
11690 bytes -= fix;
11691 fragp->fr_fix += fix;
11692 }
11693
11694 /* Instruction bundles are always little-endian. */
11695 memcpy (p, nop_type, 16);
11696 fragp->fr_var = 16;
11697 }
11698
11699 static void
11700 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11701 int prec)
11702 {
11703 while (prec--)
11704 {
11705 number_to_chars_bigendian (lit, (long) (*words++),
11706 sizeof (LITTLENUM_TYPE));
11707 lit += sizeof (LITTLENUM_TYPE);
11708 }
11709 }
11710
11711 static void
11712 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11713 int prec)
11714 {
11715 while (prec--)
11716 {
11717 number_to_chars_littleendian (lit, (long) (words[prec]),
11718 sizeof (LITTLENUM_TYPE));
11719 lit += sizeof (LITTLENUM_TYPE);
11720 }
11721 }
11722
11723 void
11724 ia64_elf_section_change_hook (void)
11725 {
11726 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11727 && elf_linked_to_section (now_seg) == NULL)
11728 elf_linked_to_section (now_seg) = text_section;
11729 dot_byteorder (-1);
11730 }
11731
11732 /* Check if a label should be made global. */
11733 void
11734 ia64_check_label (symbolS *label)
11735 {
11736 if (*input_line_pointer == ':')
11737 {
11738 S_SET_EXTERNAL (label);
11739 input_line_pointer++;
11740 }
11741 }
11742
11743 /* Used to remember where .alias and .secalias directives are seen. We
11744 will rename symbol and section names when we are about to output
11745 the relocatable file. */
11746 struct alias
11747 {
11748 const char *file; /* The file where the directive is seen. */
11749 unsigned int line; /* The line number the directive is at. */
11750 const char *name; /* The original name of the symbol. */
11751 };
11752
11753 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11754 .secalias. Otherwise, it is .alias. */
11755 static void
11756 dot_alias (int section)
11757 {
11758 char *name, *alias;
11759 char delim;
11760 char *end_name;
11761 int len;
11762 const char *error_string;
11763 struct alias *h;
11764 const char *a;
11765 struct hash_control *ahash, *nhash;
11766 const char *kind;
11767
11768 delim = get_symbol_name (&name);
11769 end_name = input_line_pointer;
11770 *end_name = delim;
11771
11772 if (name == end_name)
11773 {
11774 as_bad (_("expected symbol name"));
11775 ignore_rest_of_line ();
11776 return;
11777 }
11778
11779 SKIP_WHITESPACE_AFTER_NAME ();
11780
11781 if (*input_line_pointer != ',')
11782 {
11783 *end_name = 0;
11784 as_bad (_("expected comma after \"%s\""), name);
11785 *end_name = delim;
11786 ignore_rest_of_line ();
11787 return;
11788 }
11789
11790 input_line_pointer++;
11791 *end_name = 0;
11792 ia64_canonicalize_symbol_name (name);
11793
11794 /* We call demand_copy_C_string to check if alias string is valid.
11795 There should be a closing `"' and no `\0' in the string. */
11796 alias = demand_copy_C_string (&len);
11797 if (alias == NULL)
11798 {
11799 ignore_rest_of_line ();
11800 return;
11801 }
11802
11803 /* Make a copy of name string. */
11804 len = strlen (name) + 1;
11805 obstack_grow (&notes, name, len);
11806 name = obstack_finish (&notes);
11807
11808 if (section)
11809 {
11810 kind = "section";
11811 ahash = secalias_hash;
11812 nhash = secalias_name_hash;
11813 }
11814 else
11815 {
11816 kind = "symbol";
11817 ahash = alias_hash;
11818 nhash = alias_name_hash;
11819 }
11820
11821 /* Check if alias has been used before. */
11822 h = (struct alias *) hash_find (ahash, alias);
11823 if (h)
11824 {
11825 if (strcmp (h->name, name))
11826 as_bad (_("`%s' is already the alias of %s `%s'"),
11827 alias, kind, h->name);
11828 goto out;
11829 }
11830
11831 /* Check if name already has an alias. */
11832 a = (const char *) hash_find (nhash, name);
11833 if (a)
11834 {
11835 if (strcmp (a, alias))
11836 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11837 goto out;
11838 }
11839
11840 h = (struct alias *) xmalloc (sizeof (struct alias));
11841 h->file = as_where (&h->line);
11842 h->name = name;
11843
11844 error_string = hash_jam (ahash, alias, (void *) h);
11845 if (error_string)
11846 {
11847 as_fatal (_("inserting \"%s\" into %s alias hash table failed: %s"),
11848 alias, kind, error_string);
11849 goto out;
11850 }
11851
11852 error_string = hash_jam (nhash, name, (void *) alias);
11853 if (error_string)
11854 {
11855 as_fatal (_("inserting \"%s\" into %s name hash table failed: %s"),
11856 alias, kind, error_string);
11857 out:
11858 obstack_free (&notes, name);
11859 obstack_free (&notes, alias);
11860 }
11861
11862 demand_empty_rest_of_line ();
11863 }
11864
11865 /* It renames the original symbol name to its alias. */
11866 static void
11867 do_alias (const char *alias, void *value)
11868 {
11869 struct alias *h = (struct alias *) value;
11870 symbolS *sym = symbol_find (h->name);
11871
11872 if (sym == NULL)
11873 {
11874 #ifdef TE_VMS
11875 /* Uses .alias extensively to alias CRTL functions to same with
11876 decc$ prefix. Sometimes function gets optimized away and a
11877 warning results, which should be suppressed. */
11878 if (strncmp (alias, "decc$", 5) != 0)
11879 #endif
11880 as_warn_where (h->file, h->line,
11881 _("symbol `%s' aliased to `%s' is not used"),
11882 h->name, alias);
11883 }
11884 else
11885 S_SET_NAME (sym, (char *) alias);
11886 }
11887
11888 /* Called from write_object_file. */
11889 void
11890 ia64_adjust_symtab (void)
11891 {
11892 hash_traverse (alias_hash, do_alias);
11893 }
11894
11895 /* It renames the original section name to its alias. */
11896 static void
11897 do_secalias (const char *alias, void *value)
11898 {
11899 struct alias *h = (struct alias *) value;
11900 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11901
11902 if (sec == NULL)
11903 as_warn_where (h->file, h->line,
11904 _("section `%s' aliased to `%s' is not used"),
11905 h->name, alias);
11906 else
11907 sec->name = alias;
11908 }
11909
11910 /* Called from write_object_file. */
11911 void
11912 ia64_frob_file (void)
11913 {
11914 hash_traverse (secalias_hash, do_secalias);
11915 }
11916
11917 #ifdef TE_VMS
11918 #define NT_VMS_MHD 1
11919 #define NT_VMS_LNM 2
11920
11921 /* Integrity VMS 8.x identifies it's ELF modules with a standard ELF
11922 .note section. */
11923
11924 /* Manufacture a VMS-like time string. */
11925 static void
11926 get_vms_time (char *Now)
11927 {
11928 char *pnt;
11929 time_t timeb;
11930
11931 time (&timeb);
11932 pnt = ctime (&timeb);
11933 pnt[3] = 0;
11934 pnt[7] = 0;
11935 pnt[10] = 0;
11936 pnt[16] = 0;
11937 pnt[24] = 0;
11938 sprintf (Now, "%2s-%3s-%s %s", pnt + 8, pnt + 4, pnt + 20, pnt + 11);
11939 }
11940
11941 void
11942 ia64_vms_note (void)
11943 {
11944 char *p;
11945 asection *seg = now_seg;
11946 subsegT subseg = now_subseg;
11947 asection *secp = NULL;
11948 char *bname;
11949 char buf [256];
11950 symbolS *sym;
11951
11952 /* Create the .note section. */
11953
11954 secp = subseg_new (".note", 0);
11955 bfd_set_section_flags (stdoutput,
11956 secp,
11957 SEC_HAS_CONTENTS | SEC_READONLY);
11958
11959 /* Module header note (MHD). */
11960 bname = xstrdup (lbasename (out_file_name));
11961 if ((p = strrchr (bname, '.')))
11962 *p = '\0';
11963
11964 /* VMS note header is 24 bytes long. */
11965 p = frag_more (8 + 8 + 8);
11966 number_to_chars_littleendian (p + 0, 8, 8);
11967 number_to_chars_littleendian (p + 8, 40 + strlen (bname), 8);
11968 number_to_chars_littleendian (p + 16, NT_VMS_MHD, 8);
11969
11970 p = frag_more (8);
11971 strcpy (p, "IPF/VMS");
11972
11973 p = frag_more (17 + 17 + strlen (bname) + 1 + 5);
11974 get_vms_time (p);
11975 strcpy (p + 17, "24-FEB-2005 15:00");
11976 p += 17 + 17;
11977 strcpy (p, bname);
11978 p += strlen (bname) + 1;
11979 free (bname);
11980 strcpy (p, "V1.0");
11981
11982 frag_align (3, 0, 0);
11983
11984 /* Language processor name note. */
11985 sprintf (buf, "GNU assembler version %s (%s) using BFD version %s",
11986 VERSION, TARGET_ALIAS, BFD_VERSION_STRING);
11987
11988 p = frag_more (8 + 8 + 8);
11989 number_to_chars_littleendian (p + 0, 8, 8);
11990 number_to_chars_littleendian (p + 8, strlen (buf) + 1, 8);
11991 number_to_chars_littleendian (p + 16, NT_VMS_LNM, 8);
11992
11993 p = frag_more (8);
11994 strcpy (p, "IPF/VMS");
11995
11996 p = frag_more (strlen (buf) + 1);
11997 strcpy (p, buf);
11998
11999 frag_align (3, 0, 0);
12000
12001 secp = subseg_new (".vms_display_name_info", 0);
12002 bfd_set_section_flags (stdoutput,
12003 secp,
12004 SEC_HAS_CONTENTS | SEC_READONLY);
12005
12006 /* This symbol should be passed on the command line and be variable
12007 according to language. */
12008 sym = symbol_new ("__gnat_vms_display_name@gnat_demangler_rtl",
12009 absolute_section, 0, &zero_address_frag);
12010 symbol_table_insert (sym);
12011 symbol_get_bfdsym (sym)->flags |= BSF_DEBUGGING | BSF_DYNAMIC;
12012
12013 p = frag_more (4);
12014 /* Format 3 of VMS demangler Spec. */
12015 number_to_chars_littleendian (p, 3, 4);
12016
12017 p = frag_more (4);
12018 /* Place holder for symbol table index of above symbol. */
12019 number_to_chars_littleendian (p, -1, 4);
12020
12021 frag_align (3, 0, 0);
12022
12023 /* We probably can't restore the current segment, for there likely
12024 isn't one yet... */
12025 if (seg && subseg)
12026 subseg_set (seg, subseg);
12027 }
12028
12029 #endif /* TE_VMS */