]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-ia64.c
Update year range in copyright notice of binutils files
[thirdparty/binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright (C) 1998-2019 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street - Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .eb
28 .estate
29 .lb
30 .popsection
31 .previous
32 .psr
33 .pushsection
34 - labels are wrong if automatic alignment is introduced
35 (e.g., checkout the second real10 definition in test-data.s)
36 - DV-related stuff:
37 <reg>.safe_across_calls and any other DV-related directives I don't
38 have documentation for.
39 verify mod-sched-brs reads/writes are checked/marked (and other
40 notes)
41
42 */
43
44 #include "as.h"
45 #include "safe-ctype.h"
46 #include "dwarf2dbg.h"
47 #include "subsegs.h"
48
49 #include "opcode/ia64.h"
50
51 #include "elf/ia64.h"
52 #include "bfdver.h"
53 #include <time.h>
54
55 #ifdef HAVE_LIMITS_H
56 #include <limits.h>
57 #endif
58
59 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
60
61 /* Some systems define MIN in, e.g., param.h. */
62 #undef MIN
63 #define MIN(a,b) ((a) < (b) ? (a) : (b))
64
65 #define NUM_SLOTS 4
66 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
67 #define CURR_SLOT md.slot[md.curr_slot]
68
69 #define O_pseudo_fixup (O_max + 1)
70
71 enum special_section
72 {
73 /* IA-64 ABI section pseudo-ops. */
74 SPECIAL_SECTION_BSS = 0,
75 SPECIAL_SECTION_SBSS,
76 SPECIAL_SECTION_SDATA,
77 SPECIAL_SECTION_RODATA,
78 SPECIAL_SECTION_COMMENT,
79 SPECIAL_SECTION_UNWIND,
80 SPECIAL_SECTION_UNWIND_INFO,
81 /* HPUX specific section pseudo-ops. */
82 SPECIAL_SECTION_INIT_ARRAY,
83 SPECIAL_SECTION_FINI_ARRAY,
84 };
85
86 enum reloc_func
87 {
88 FUNC_DTP_MODULE,
89 FUNC_DTP_RELATIVE,
90 FUNC_FPTR_RELATIVE,
91 FUNC_GP_RELATIVE,
92 FUNC_LT_RELATIVE,
93 FUNC_LT_RELATIVE_X,
94 FUNC_PC_RELATIVE,
95 FUNC_PLT_RELATIVE,
96 FUNC_SEC_RELATIVE,
97 FUNC_SEG_RELATIVE,
98 FUNC_TP_RELATIVE,
99 FUNC_LTV_RELATIVE,
100 FUNC_LT_FPTR_RELATIVE,
101 FUNC_LT_DTP_MODULE,
102 FUNC_LT_DTP_RELATIVE,
103 FUNC_LT_TP_RELATIVE,
104 FUNC_IPLT_RELOC,
105 #ifdef TE_VMS
106 FUNC_SLOTCOUNT_RELOC,
107 #endif
108 };
109
110 enum reg_symbol
111 {
112 REG_GR = 0,
113 REG_FR = (REG_GR + 128),
114 REG_AR = (REG_FR + 128),
115 REG_CR = (REG_AR + 128),
116 REG_DAHR = (REG_CR + 128),
117 REG_P = (REG_DAHR + 8),
118 REG_BR = (REG_P + 64),
119 REG_IP = (REG_BR + 8),
120 REG_CFM,
121 REG_PR,
122 REG_PR_ROT,
123 REG_PSR,
124 REG_PSR_L,
125 REG_PSR_UM,
126 /* The following are pseudo-registers for use by gas only. */
127 IND_CPUID,
128 IND_DBR,
129 IND_DTR,
130 IND_ITR,
131 IND_IBR,
132 IND_MSR,
133 IND_PKR,
134 IND_PMC,
135 IND_PMD,
136 IND_DAHR,
137 IND_RR,
138 /* The following pseudo-registers are used for unwind directives only: */
139 REG_PSP,
140 REG_PRIUNAT,
141 REG_NUM
142 };
143
144 enum dynreg_type
145 {
146 DYNREG_GR = 0, /* dynamic general purpose register */
147 DYNREG_FR, /* dynamic floating point register */
148 DYNREG_PR, /* dynamic predicate register */
149 DYNREG_NUM_TYPES
150 };
151
152 enum operand_match_result
153 {
154 OPERAND_MATCH,
155 OPERAND_OUT_OF_RANGE,
156 OPERAND_MISMATCH
157 };
158
159 /* On the ia64, we can't know the address of a text label until the
160 instructions are packed into a bundle. To handle this, we keep
161 track of the list of labels that appear in front of each
162 instruction. */
163 struct label_fix
164 {
165 struct label_fix *next;
166 struct symbol *sym;
167 bfd_boolean dw2_mark_labels;
168 };
169
170 #ifdef TE_VMS
171 /* An internally used relocation. */
172 #define DUMMY_RELOC_IA64_SLOTCOUNT (BFD_RELOC_UNUSED + 1)
173 #endif
174
175 /* This is the endianness of the current section. */
176 extern int target_big_endian;
177
178 /* This is the default endianness. */
179 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
180
181 void (*ia64_number_to_chars) (char *, valueT, int);
182
183 static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int);
184 static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int);
185
186 static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int);
187
188 static struct hash_control *alias_hash;
189 static struct hash_control *alias_name_hash;
190 static struct hash_control *secalias_hash;
191 static struct hash_control *secalias_name_hash;
192
193 /* List of chars besides those in app.c:symbol_chars that can start an
194 operand. Used to prevent the scrubber eating vital white-space. */
195 const char ia64_symbol_chars[] = "@?";
196
197 /* Characters which always start a comment. */
198 const char comment_chars[] = "";
199
200 /* Characters which start a comment at the beginning of a line. */
201 const char line_comment_chars[] = "#";
202
203 /* Characters which may be used to separate multiple commands on a
204 single line. */
205 const char line_separator_chars[] = ";{}";
206
207 /* Characters which are used to indicate an exponent in a floating
208 point number. */
209 const char EXP_CHARS[] = "eE";
210
211 /* Characters which mean that a number is a floating point constant,
212 as in 0d1.0. */
213 const char FLT_CHARS[] = "rRsSfFdDxXpP";
214
215 /* ia64-specific option processing: */
216
217 const char *md_shortopts = "m:N:x::";
218
219 struct option md_longopts[] =
220 {
221 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
222 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
223 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
224 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
225 };
226
227 size_t md_longopts_size = sizeof (md_longopts);
228
229 static struct
230 {
231 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
232 struct hash_control *reg_hash; /* register name hash table */
233 struct hash_control *dynreg_hash; /* dynamic register hash table */
234 struct hash_control *const_hash; /* constant hash table */
235 struct hash_control *entry_hash; /* code entry hint hash table */
236
237 /* If X_op is != O_absent, the register name for the instruction's
238 qualifying predicate. If NULL, p0 is assumed for instructions
239 that are predictable. */
240 expressionS qp;
241
242 /* Optimize for which CPU. */
243 enum
244 {
245 itanium1,
246 itanium2
247 } tune;
248
249 /* What to do when hint.b is used. */
250 enum
251 {
252 hint_b_error,
253 hint_b_warning,
254 hint_b_ok
255 } hint_b;
256
257 unsigned int
258 manual_bundling : 1,
259 debug_dv: 1,
260 detect_dv: 1,
261 explicit_mode : 1, /* which mode we're in */
262 default_explicit_mode : 1, /* which mode is the default */
263 mode_explicitly_set : 1, /* was the current mode explicitly set? */
264 auto_align : 1,
265 keep_pending_output : 1;
266
267 /* What to do when something is wrong with unwind directives. */
268 enum
269 {
270 unwind_check_warning,
271 unwind_check_error
272 } unwind_check;
273
274 /* Each bundle consists of up to three instructions. We keep
275 track of four most recent instructions so we can correctly set
276 the end_of_insn_group for the last instruction in a bundle. */
277 int curr_slot;
278 int num_slots_in_use;
279 struct slot
280 {
281 unsigned int
282 end_of_insn_group : 1,
283 manual_bundling_on : 1,
284 manual_bundling_off : 1,
285 loc_directive_seen : 1;
286 signed char user_template; /* user-selected template, if any */
287 unsigned char qp_regno; /* qualifying predicate */
288 /* This duplicates a good fraction of "struct fix" but we
289 can't use a "struct fix" instead since we can't call
290 fix_new_exp() until we know the address of the instruction. */
291 int num_fixups;
292 struct insn_fix
293 {
294 bfd_reloc_code_real_type code;
295 enum ia64_opnd opnd; /* type of operand in need of fix */
296 unsigned int is_pcrel : 1; /* is operand pc-relative? */
297 expressionS expr; /* the value to be inserted */
298 }
299 fixup[2]; /* at most two fixups per insn */
300 struct ia64_opcode *idesc;
301 struct label_fix *label_fixups;
302 struct label_fix *tag_fixups;
303 struct unw_rec_list *unwind_record; /* Unwind directive. */
304 expressionS opnd[6];
305 const char *src_file;
306 unsigned int src_line;
307 struct dwarf2_line_info debug_line;
308 }
309 slot[NUM_SLOTS];
310
311 segT last_text_seg;
312
313 struct dynreg
314 {
315 struct dynreg *next; /* next dynamic register */
316 const char *name;
317 unsigned short base; /* the base register number */
318 unsigned short num_regs; /* # of registers in this set */
319 }
320 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
321
322 flagword flags; /* ELF-header flags */
323
324 struct mem_offset {
325 unsigned hint:1; /* is this hint currently valid? */
326 bfd_vma offset; /* mem.offset offset */
327 bfd_vma base; /* mem.offset base */
328 } mem_offset;
329
330 int path; /* number of alt. entry points seen */
331 const char **entry_labels; /* labels of all alternate paths in
332 the current DV-checking block. */
333 int maxpaths; /* size currently allocated for
334 entry_labels */
335
336 int pointer_size; /* size in bytes of a pointer */
337 int pointer_size_shift; /* shift size of a pointer for alignment */
338
339 symbolS *indregsym[IND_RR - IND_CPUID + 1];
340 }
341 md;
342
343 /* These are not const, because they are modified to MMI for non-itanium1
344 targets below. */
345 /* MFI bundle of nops. */
346 static unsigned char le_nop[16] =
347 {
348 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
349 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
350 };
351 /* MFI bundle of nops with stop-bit. */
352 static unsigned char le_nop_stop[16] =
353 {
354 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
355 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
356 };
357
358 /* application registers: */
359
360 #define AR_K0 0
361 #define AR_K7 7
362 #define AR_RSC 16
363 #define AR_BSP 17
364 #define AR_BSPSTORE 18
365 #define AR_RNAT 19
366 #define AR_FCR 21
367 #define AR_EFLAG 24
368 #define AR_CSD 25
369 #define AR_SSD 26
370 #define AR_CFLG 27
371 #define AR_FSR 28
372 #define AR_FIR 29
373 #define AR_FDR 30
374 #define AR_CCV 32
375 #define AR_UNAT 36
376 #define AR_FPSR 40
377 #define AR_ITC 44
378 #define AR_RUC 45
379 #define AR_PFS 64
380 #define AR_LC 65
381 #define AR_EC 66
382
383 static const struct
384 {
385 const char *name;
386 unsigned int regnum;
387 }
388 ar[] =
389 {
390 {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1},
391 {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3},
392 {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5},
393 {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7},
394 {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP},
395 {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT},
396 {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG},
397 {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD},
398 {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR},
399 {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR},
400 {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT},
401 {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC},
402 {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS},
403 {"ar.lc", AR_LC}, {"ar.ec", AR_EC},
404 };
405
406 /* control registers: */
407
408 #define CR_DCR 0
409 #define CR_ITM 1
410 #define CR_IVA 2
411 #define CR_PTA 8
412 #define CR_GPTA 9
413 #define CR_IPSR 16
414 #define CR_ISR 17
415 #define CR_IIP 19
416 #define CR_IFA 20
417 #define CR_ITIR 21
418 #define CR_IIPA 22
419 #define CR_IFS 23
420 #define CR_IIM 24
421 #define CR_IHA 25
422 #define CR_IIB0 26
423 #define CR_IIB1 27
424 #define CR_LID 64
425 #define CR_IVR 65
426 #define CR_TPR 66
427 #define CR_EOI 67
428 #define CR_IRR0 68
429 #define CR_IRR3 71
430 #define CR_ITV 72
431 #define CR_PMV 73
432 #define CR_CMCV 74
433 #define CR_LRR0 80
434 #define CR_LRR1 81
435
436 static const struct
437 {
438 const char *name;
439 unsigned int regnum;
440 }
441 cr[] =
442 {
443 {"cr.dcr", CR_DCR},
444 {"cr.itm", CR_ITM},
445 {"cr.iva", CR_IVA},
446 {"cr.pta", CR_PTA},
447 {"cr.gpta", CR_GPTA},
448 {"cr.ipsr", CR_IPSR},
449 {"cr.isr", CR_ISR},
450 {"cr.iip", CR_IIP},
451 {"cr.ifa", CR_IFA},
452 {"cr.itir", CR_ITIR},
453 {"cr.iipa", CR_IIPA},
454 {"cr.ifs", CR_IFS},
455 {"cr.iim", CR_IIM},
456 {"cr.iha", CR_IHA},
457 {"cr.iib0", CR_IIB0},
458 {"cr.iib1", CR_IIB1},
459 {"cr.lid", CR_LID},
460 {"cr.ivr", CR_IVR},
461 {"cr.tpr", CR_TPR},
462 {"cr.eoi", CR_EOI},
463 {"cr.irr0", CR_IRR0},
464 {"cr.irr1", CR_IRR0 + 1},
465 {"cr.irr2", CR_IRR0 + 2},
466 {"cr.irr3", CR_IRR3},
467 {"cr.itv", CR_ITV},
468 {"cr.pmv", CR_PMV},
469 {"cr.cmcv", CR_CMCV},
470 {"cr.lrr0", CR_LRR0},
471 {"cr.lrr1", CR_LRR1}
472 };
473
474 #define PSR_MFL 4
475 #define PSR_IC 13
476 #define PSR_DFL 18
477 #define PSR_CPL 32
478
479 static const struct const_desc
480 {
481 const char *name;
482 valueT value;
483 }
484 const_bits[] =
485 {
486 /* PSR constant masks: */
487
488 /* 0: reserved */
489 {"psr.be", ((valueT) 1) << 1},
490 {"psr.up", ((valueT) 1) << 2},
491 {"psr.ac", ((valueT) 1) << 3},
492 {"psr.mfl", ((valueT) 1) << 4},
493 {"psr.mfh", ((valueT) 1) << 5},
494 /* 6-12: reserved */
495 {"psr.ic", ((valueT) 1) << 13},
496 {"psr.i", ((valueT) 1) << 14},
497 {"psr.pk", ((valueT) 1) << 15},
498 /* 16: reserved */
499 {"psr.dt", ((valueT) 1) << 17},
500 {"psr.dfl", ((valueT) 1) << 18},
501 {"psr.dfh", ((valueT) 1) << 19},
502 {"psr.sp", ((valueT) 1) << 20},
503 {"psr.pp", ((valueT) 1) << 21},
504 {"psr.di", ((valueT) 1) << 22},
505 {"psr.si", ((valueT) 1) << 23},
506 {"psr.db", ((valueT) 1) << 24},
507 {"psr.lp", ((valueT) 1) << 25},
508 {"psr.tb", ((valueT) 1) << 26},
509 {"psr.rt", ((valueT) 1) << 27},
510 /* 28-31: reserved */
511 /* 32-33: cpl (current privilege level) */
512 {"psr.is", ((valueT) 1) << 34},
513 {"psr.mc", ((valueT) 1) << 35},
514 {"psr.it", ((valueT) 1) << 36},
515 {"psr.id", ((valueT) 1) << 37},
516 {"psr.da", ((valueT) 1) << 38},
517 {"psr.dd", ((valueT) 1) << 39},
518 {"psr.ss", ((valueT) 1) << 40},
519 /* 41-42: ri (restart instruction) */
520 {"psr.ed", ((valueT) 1) << 43},
521 {"psr.bn", ((valueT) 1) << 44},
522 };
523
524 /* indirect register-sets/memory: */
525
526 static const struct
527 {
528 const char *name;
529 unsigned int regnum;
530 }
531 indirect_reg[] =
532 {
533 { "CPUID", IND_CPUID },
534 { "cpuid", IND_CPUID },
535 { "dbr", IND_DBR },
536 { "dtr", IND_DTR },
537 { "itr", IND_ITR },
538 { "ibr", IND_IBR },
539 { "msr", IND_MSR },
540 { "pkr", IND_PKR },
541 { "pmc", IND_PMC },
542 { "pmd", IND_PMD },
543 { "dahr", IND_DAHR },
544 { "rr", IND_RR },
545 };
546
547 /* Pseudo functions used to indicate relocation types (these functions
548 start with an at sign (@). */
549 static struct
550 {
551 const char *name;
552 enum pseudo_type
553 {
554 PSEUDO_FUNC_NONE,
555 PSEUDO_FUNC_RELOC,
556 PSEUDO_FUNC_CONST,
557 PSEUDO_FUNC_REG,
558 PSEUDO_FUNC_FLOAT
559 }
560 type;
561 union
562 {
563 unsigned long ival;
564 symbolS *sym;
565 }
566 u;
567 }
568 pseudo_func[] =
569 {
570 /* reloc pseudo functions (these must come first!): */
571 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
572 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
573 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
574 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
575 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
576 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
577 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
578 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
579 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
580 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
581 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
582 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
583 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
584 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
585 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
586 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
587 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
588 #ifdef TE_VMS
589 { "slotcount", PSEUDO_FUNC_RELOC, { 0 } },
590 #endif
591
592 /* mbtype4 constants: */
593 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
594 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
595 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
596 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
597 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
598
599 /* fclass constants: */
600 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
601 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
602 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
603 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
604 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
605 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
606 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
607 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
608 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
609
610 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
611
612 /* hint constants: */
613 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
614 { "priority", PSEUDO_FUNC_CONST, { 0x1 } },
615
616 /* tf constants: */
617 { "clz", PSEUDO_FUNC_CONST, { 32 } },
618 { "mpy", PSEUDO_FUNC_CONST, { 33 } },
619 { "datahints", PSEUDO_FUNC_CONST, { 34 } },
620
621 /* unwind-related constants: */
622 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
623 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
624 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
625 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_GNU } },
626 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
627 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
628 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
629
630 /* unwind-related registers: */
631 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
632 };
633
634 /* 41-bit nop opcodes (one per unit): */
635 static const bfd_vma nop[IA64_NUM_UNITS] =
636 {
637 0x0000000000LL, /* NIL => break 0 */
638 0x0008000000LL, /* I-unit nop */
639 0x0008000000LL, /* M-unit nop */
640 0x4000000000LL, /* B-unit nop */
641 0x0008000000LL, /* F-unit nop */
642 0x0000000000LL, /* L-"unit" nop immediate */
643 0x0008000000LL, /* X-unit nop */
644 };
645
646 /* Can't be `const' as it's passed to input routines (which have the
647 habit of setting temporary sentinels. */
648 static char special_section_name[][20] =
649 {
650 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
651 {".IA_64.unwind"}, {".IA_64.unwind_info"},
652 {".init_array"}, {".fini_array"}
653 };
654
655 /* The best template for a particular sequence of up to three
656 instructions: */
657 #define N IA64_NUM_TYPES
658 static unsigned char best_template[N][N][N];
659 #undef N
660
661 /* Resource dependencies currently in effect */
662 static struct rsrc {
663 int depind; /* dependency index */
664 const struct ia64_dependency *dependency; /* actual dependency */
665 unsigned specific:1, /* is this a specific bit/regno? */
666 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
667 int index; /* specific regno/bit within dependency */
668 int note; /* optional qualifying note (0 if none) */
669 #define STATE_NONE 0
670 #define STATE_STOP 1
671 #define STATE_SRLZ 2
672 int insn_srlz; /* current insn serialization state */
673 int data_srlz; /* current data serialization state */
674 int qp_regno; /* qualifying predicate for this usage */
675 const char *file; /* what file marked this dependency */
676 unsigned int line; /* what line marked this dependency */
677 struct mem_offset mem_offset; /* optional memory offset hint */
678 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
679 int path; /* corresponding code entry index */
680 } *regdeps = NULL;
681 static int regdepslen = 0;
682 static int regdepstotlen = 0;
683 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
684 static const char *dv_sem[] = { "none", "implied", "impliedf",
685 "data", "instr", "specific", "stop", "other" };
686 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
687
688 /* Current state of PR mutexation */
689 static struct qpmutex {
690 valueT prmask;
691 int path;
692 } *qp_mutexes = NULL; /* QP mutex bitmasks */
693 static int qp_mutexeslen = 0;
694 static int qp_mutexestotlen = 0;
695 static valueT qp_safe_across_calls = 0;
696
697 /* Current state of PR implications */
698 static struct qp_imply {
699 unsigned p1:6;
700 unsigned p2:6;
701 unsigned p2_branched:1;
702 int path;
703 } *qp_implies = NULL;
704 static int qp_implieslen = 0;
705 static int qp_impliestotlen = 0;
706
707 /* Keep track of static GR values so that indirect register usage can
708 sometimes be tracked. */
709 static struct gr {
710 unsigned known:1;
711 int path;
712 valueT value;
713 } gr_values[128] = {
714 {
715 1,
716 #ifdef INT_MAX
717 INT_MAX,
718 #else
719 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1,
720 #endif
721 0
722 }
723 };
724
725 /* Remember the alignment frag. */
726 static fragS *align_frag;
727
728 /* These are the routines required to output the various types of
729 unwind records. */
730
731 /* A slot_number is a frag address plus the slot index (0-2). We use the
732 frag address here so that if there is a section switch in the middle of
733 a function, then instructions emitted to a different section are not
734 counted. Since there may be more than one frag for a function, this
735 means we also need to keep track of which frag this address belongs to
736 so we can compute inter-frag distances. This also nicely solves the
737 problem with nops emitted for align directives, which can't easily be
738 counted, but can easily be derived from frag sizes. */
739
740 typedef struct unw_rec_list {
741 unwind_record r;
742 unsigned long slot_number;
743 fragS *slot_frag;
744 struct unw_rec_list *next;
745 } unw_rec_list;
746
747 #define SLOT_NUM_NOT_SET (unsigned)-1
748
749 /* Linked list of saved prologue counts. A very poor
750 implementation of a map from label numbers to prologue counts. */
751 typedef struct label_prologue_count
752 {
753 struct label_prologue_count *next;
754 unsigned long label_number;
755 unsigned int prologue_count;
756 } label_prologue_count;
757
758 typedef struct proc_pending
759 {
760 symbolS *sym;
761 struct proc_pending *next;
762 } proc_pending;
763
764 static struct
765 {
766 /* Maintain a list of unwind entries for the current function. */
767 unw_rec_list *list;
768 unw_rec_list *tail;
769
770 /* Any unwind entries that should be attached to the current slot
771 that an insn is being constructed for. */
772 unw_rec_list *current_entry;
773
774 /* These are used to create the unwind table entry for this function. */
775 proc_pending proc_pending;
776 symbolS *info; /* pointer to unwind info */
777 symbolS *personality_routine;
778 segT saved_text_seg;
779 subsegT saved_text_subseg;
780 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
781
782 /* TRUE if processing unwind directives in a prologue region. */
783 unsigned int prologue : 1;
784 unsigned int prologue_mask : 4;
785 unsigned int prologue_gr : 7;
786 unsigned int body : 1;
787 unsigned int insn : 1;
788 unsigned int prologue_count; /* number of .prologues seen so far */
789 /* Prologue counts at previous .label_state directives. */
790 struct label_prologue_count * saved_prologue_counts;
791
792 /* List of split up .save-s. */
793 unw_p_record *pending_saves;
794 } unwind;
795
796 /* The input value is a negated offset from psp, and specifies an address
797 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
798 must add 16 and divide by 4 to get the encoded value. */
799
800 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
801
802 typedef void (*vbyte_func) (int, char *, char *);
803
804 /* Forward declarations: */
805 static void dot_alias (int);
806 static int parse_operand_and_eval (expressionS *, int);
807 static void emit_one_bundle (void);
808 static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *,
809 bfd_reloc_code_real_type);
810 static void insn_group_break (int, int, int);
811 static void add_qp_mutex (valueT);
812 static void add_qp_imply (int, int);
813 static void clear_qp_mutex (valueT);
814 static void clear_qp_implies (valueT, valueT);
815 static void print_dependency (const char *, int);
816 static void instruction_serialization (void);
817 static void data_serialization (void);
818 static void output_R3_format (vbyte_func, unw_record_type, unsigned long);
819 static void output_B3_format (vbyte_func, unsigned long, unsigned long);
820 static void output_B4_format (vbyte_func, unw_record_type, unsigned long);
821 static void free_saved_prologue_counts (void);
822
823 /* Determine if application register REGNUM resides only in the integer
824 unit (as opposed to the memory unit). */
825 static int
826 ar_is_only_in_integer_unit (int reg)
827 {
828 reg -= REG_AR;
829 return reg >= 64 && reg <= 111;
830 }
831
832 /* Determine if application register REGNUM resides only in the memory
833 unit (as opposed to the integer unit). */
834 static int
835 ar_is_only_in_memory_unit (int reg)
836 {
837 reg -= REG_AR;
838 return reg >= 0 && reg <= 47;
839 }
840
841 /* Switch to section NAME and create section if necessary. It's
842 rather ugly that we have to manipulate input_line_pointer but I
843 don't see any other way to accomplish the same thing without
844 changing obj-elf.c (which may be the Right Thing, in the end). */
845 static void
846 set_section (char *name)
847 {
848 char *saved_input_line_pointer;
849
850 saved_input_line_pointer = input_line_pointer;
851 input_line_pointer = name;
852 obj_elf_section (0);
853 input_line_pointer = saved_input_line_pointer;
854 }
855
856 /* Map 's' to SHF_IA_64_SHORT. */
857
858 bfd_vma
859 ia64_elf_section_letter (int letter, const char **ptr_msg)
860 {
861 if (letter == 's')
862 return SHF_IA_64_SHORT;
863 else if (letter == 'o')
864 return SHF_LINK_ORDER;
865 #ifdef TE_VMS
866 else if (letter == 'O')
867 return SHF_IA_64_VMS_OVERLAID;
868 else if (letter == 'g')
869 return SHF_IA_64_VMS_GLOBAL;
870 #endif
871
872 *ptr_msg = _("bad .section directive: want a,o,s,w,x,M,S,G,T in string");
873 return -1;
874 }
875
876 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
877
878 flagword
879 ia64_elf_section_flags (flagword flags,
880 bfd_vma attr,
881 int type ATTRIBUTE_UNUSED)
882 {
883 if (attr & SHF_IA_64_SHORT)
884 flags |= SEC_SMALL_DATA;
885 return flags;
886 }
887
888 int
889 ia64_elf_section_type (const char *str, size_t len)
890 {
891 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
892
893 if (STREQ (ELF_STRING_ia64_unwind_info))
894 return SHT_PROGBITS;
895
896 if (STREQ (ELF_STRING_ia64_unwind_info_once))
897 return SHT_PROGBITS;
898
899 if (STREQ (ELF_STRING_ia64_unwind))
900 return SHT_IA_64_UNWIND;
901
902 if (STREQ (ELF_STRING_ia64_unwind_once))
903 return SHT_IA_64_UNWIND;
904
905 if (STREQ ("unwind"))
906 return SHT_IA_64_UNWIND;
907
908 return -1;
909 #undef STREQ
910 }
911
912 static unsigned int
913 set_regstack (unsigned int ins,
914 unsigned int locs,
915 unsigned int outs,
916 unsigned int rots)
917 {
918 /* Size of frame. */
919 unsigned int sof;
920
921 sof = ins + locs + outs;
922 if (sof > 96)
923 {
924 as_bad (_("Size of frame exceeds maximum of 96 registers"));
925 return 0;
926 }
927 if (rots > sof)
928 {
929 as_warn (_("Size of rotating registers exceeds frame size"));
930 return 0;
931 }
932 md.in.base = REG_GR + 32;
933 md.loc.base = md.in.base + ins;
934 md.out.base = md.loc.base + locs;
935
936 md.in.num_regs = ins;
937 md.loc.num_regs = locs;
938 md.out.num_regs = outs;
939 md.rot.num_regs = rots;
940 return sof;
941 }
942
943 void
944 ia64_flush_insns (void)
945 {
946 struct label_fix *lfix;
947 segT saved_seg;
948 subsegT saved_subseg;
949 unw_rec_list *ptr;
950 bfd_boolean mark;
951
952 if (!md.last_text_seg)
953 return;
954
955 saved_seg = now_seg;
956 saved_subseg = now_subseg;
957
958 subseg_set (md.last_text_seg, 0);
959
960 while (md.num_slots_in_use > 0)
961 emit_one_bundle (); /* force out queued instructions */
962
963 /* In case there are labels following the last instruction, resolve
964 those now. */
965 mark = FALSE;
966 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
967 {
968 symbol_set_value_now (lfix->sym);
969 mark |= lfix->dw2_mark_labels;
970 }
971 if (mark)
972 {
973 dwarf2_where (&CURR_SLOT.debug_line);
974 CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
975 dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line);
976 dwarf2_consume_line_info ();
977 }
978 CURR_SLOT.label_fixups = 0;
979
980 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
981 symbol_set_value_now (lfix->sym);
982 CURR_SLOT.tag_fixups = 0;
983
984 /* In case there are unwind directives following the last instruction,
985 resolve those now. We only handle prologue, body, and endp directives
986 here. Give an error for others. */
987 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
988 {
989 switch (ptr->r.type)
990 {
991 case prologue:
992 case prologue_gr:
993 case body:
994 case endp:
995 ptr->slot_number = (unsigned long) frag_more (0);
996 ptr->slot_frag = frag_now;
997 break;
998
999 /* Allow any record which doesn't have a "t" field (i.e.,
1000 doesn't relate to a particular instruction). */
1001 case unwabi:
1002 case br_gr:
1003 case copy_state:
1004 case fr_mem:
1005 case frgr_mem:
1006 case gr_gr:
1007 case gr_mem:
1008 case label_state:
1009 case rp_br:
1010 case spill_base:
1011 case spill_mask:
1012 /* nothing */
1013 break;
1014
1015 default:
1016 as_bad (_("Unwind directive not followed by an instruction."));
1017 break;
1018 }
1019 }
1020 unwind.current_entry = NULL;
1021
1022 subseg_set (saved_seg, saved_subseg);
1023
1024 if (md.qp.X_op == O_register)
1025 as_bad (_("qualifying predicate not followed by instruction"));
1026 }
1027
1028 void
1029 ia64_cons_align (int nbytes)
1030 {
1031 if (md.auto_align)
1032 {
1033 int log;
1034 for (log = 0; (nbytes & 1) != 1; nbytes >>= 1)
1035 log++;
1036
1037 do_align (log, NULL, 0, 0);
1038 }
1039 }
1040
1041 #ifdef TE_VMS
1042
1043 /* .vms_common section, symbol, size, alignment */
1044
1045 static void
1046 obj_elf_vms_common (int ignore ATTRIBUTE_UNUSED)
1047 {
1048 const char *sec_name;
1049 char *sym_name;
1050 char c;
1051 offsetT size;
1052 offsetT cur_size;
1053 offsetT temp;
1054 symbolS *symbolP;
1055 segT current_seg = now_seg;
1056 subsegT current_subseg = now_subseg;
1057 offsetT log_align;
1058
1059 /* Section name. */
1060 sec_name = obj_elf_section_name ();
1061 if (sec_name == NULL)
1062 return;
1063
1064 /* Symbol name. */
1065 SKIP_WHITESPACE ();
1066 if (*input_line_pointer == ',')
1067 {
1068 input_line_pointer++;
1069 SKIP_WHITESPACE ();
1070 }
1071 else
1072 {
1073 as_bad (_("expected ',' after section name"));
1074 ignore_rest_of_line ();
1075 return;
1076 }
1077
1078 c = get_symbol_name (&sym_name);
1079
1080 if (input_line_pointer == sym_name)
1081 {
1082 (void) restore_line_pointer (c);
1083 as_bad (_("expected symbol name"));
1084 ignore_rest_of_line ();
1085 return;
1086 }
1087
1088 symbolP = symbol_find_or_make (sym_name);
1089 (void) restore_line_pointer (c);
1090
1091 if ((S_IS_DEFINED (symbolP) || symbol_equated_p (symbolP))
1092 && !S_IS_COMMON (symbolP))
1093 {
1094 as_bad (_("Ignoring attempt to re-define symbol"));
1095 ignore_rest_of_line ();
1096 return;
1097 }
1098
1099 /* Symbol size. */
1100 SKIP_WHITESPACE ();
1101 if (*input_line_pointer == ',')
1102 {
1103 input_line_pointer++;
1104 SKIP_WHITESPACE ();
1105 }
1106 else
1107 {
1108 as_bad (_("expected ',' after symbol name"));
1109 ignore_rest_of_line ();
1110 return;
1111 }
1112
1113 temp = get_absolute_expression ();
1114 size = temp;
1115 size &= ((offsetT) 2 << (stdoutput->arch_info->bits_per_address - 1)) - 1;
1116 if (temp != size)
1117 {
1118 as_warn (_("size (%ld) out of range, ignored"), (long) temp);
1119 ignore_rest_of_line ();
1120 return;
1121 }
1122
1123 /* Alignment. */
1124 SKIP_WHITESPACE ();
1125 if (*input_line_pointer == ',')
1126 {
1127 input_line_pointer++;
1128 SKIP_WHITESPACE ();
1129 }
1130 else
1131 {
1132 as_bad (_("expected ',' after symbol size"));
1133 ignore_rest_of_line ();
1134 return;
1135 }
1136
1137 log_align = get_absolute_expression ();
1138
1139 demand_empty_rest_of_line ();
1140
1141 obj_elf_change_section
1142 (sec_name, SHT_NOBITS, 0,
1143 SHF_ALLOC | SHF_WRITE | SHF_IA_64_VMS_OVERLAID | SHF_IA_64_VMS_GLOBAL,
1144 0, NULL, 1, 0);
1145
1146 S_SET_VALUE (symbolP, 0);
1147 S_SET_SIZE (symbolP, size);
1148 S_SET_EXTERNAL (symbolP);
1149 S_SET_SEGMENT (symbolP, now_seg);
1150
1151 symbol_get_bfdsym (symbolP)->flags |= BSF_OBJECT;
1152
1153 record_alignment (now_seg, log_align);
1154
1155 cur_size = bfd_section_size (stdoutput, now_seg);
1156 if ((int) size > cur_size)
1157 {
1158 char *pfrag
1159 = frag_var (rs_fill, 1, 1, (relax_substateT)0, NULL,
1160 (valueT)size - (valueT)cur_size, NULL);
1161 *pfrag = 0;
1162 bfd_section_size (stdoutput, now_seg) = size;
1163 }
1164
1165 /* Switch back to current segment. */
1166 subseg_set (current_seg, current_subseg);
1167
1168 #ifdef md_elf_section_change_hook
1169 md_elf_section_change_hook ();
1170 #endif
1171 }
1172
1173 #endif /* TE_VMS */
1174
1175 /* Output COUNT bytes to a memory location. */
1176 static char *vbyte_mem_ptr = NULL;
1177
1178 static void
1179 output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED)
1180 {
1181 int x;
1182 if (vbyte_mem_ptr == NULL)
1183 abort ();
1184
1185 if (count == 0)
1186 return;
1187 for (x = 0; x < count; x++)
1188 *(vbyte_mem_ptr++) = ptr[x];
1189 }
1190
1191 /* Count the number of bytes required for records. */
1192 static int vbyte_count = 0;
1193 static void
1194 count_output (int count,
1195 char *ptr ATTRIBUTE_UNUSED,
1196 char *comment ATTRIBUTE_UNUSED)
1197 {
1198 vbyte_count += count;
1199 }
1200
1201 static void
1202 output_R1_format (vbyte_func f, unw_record_type rtype, int rlen)
1203 {
1204 int r = 0;
1205 char byte;
1206 if (rlen > 0x1f)
1207 {
1208 output_R3_format (f, rtype, rlen);
1209 return;
1210 }
1211
1212 if (rtype == body)
1213 r = 1;
1214 else if (rtype != prologue)
1215 as_bad (_("record type is not valid"));
1216
1217 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1218 (*f) (1, &byte, NULL);
1219 }
1220
1221 static void
1222 output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen)
1223 {
1224 char bytes[20];
1225 int count = 2;
1226 mask = (mask & 0x0f);
1227 grsave = (grsave & 0x7f);
1228
1229 bytes[0] = (UNW_R2 | (mask >> 1));
1230 bytes[1] = (((mask & 0x01) << 7) | grsave);
1231 count += output_leb128 (bytes + 2, rlen, 0);
1232 (*f) (count, bytes, NULL);
1233 }
1234
1235 static void
1236 output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen)
1237 {
1238 int r = 0, count;
1239 char bytes[20];
1240 if (rlen <= 0x1f)
1241 {
1242 output_R1_format (f, rtype, rlen);
1243 return;
1244 }
1245
1246 if (rtype == body)
1247 r = 1;
1248 else if (rtype != prologue)
1249 as_bad (_("record type is not valid"));
1250 bytes[0] = (UNW_R3 | r);
1251 count = output_leb128 (bytes + 1, rlen, 0);
1252 (*f) (count + 1, bytes, NULL);
1253 }
1254
1255 static void
1256 output_P1_format (vbyte_func f, int brmask)
1257 {
1258 char byte;
1259 byte = UNW_P1 | (brmask & 0x1f);
1260 (*f) (1, &byte, NULL);
1261 }
1262
1263 static void
1264 output_P2_format (vbyte_func f, int brmask, int gr)
1265 {
1266 char bytes[2];
1267 brmask = (brmask & 0x1f);
1268 bytes[0] = UNW_P2 | (brmask >> 1);
1269 bytes[1] = (((brmask & 1) << 7) | gr);
1270 (*f) (2, bytes, NULL);
1271 }
1272
1273 static void
1274 output_P3_format (vbyte_func f, unw_record_type rtype, int reg)
1275 {
1276 char bytes[2];
1277 int r = 0;
1278 reg = (reg & 0x7f);
1279 switch (rtype)
1280 {
1281 case psp_gr:
1282 r = 0;
1283 break;
1284 case rp_gr:
1285 r = 1;
1286 break;
1287 case pfs_gr:
1288 r = 2;
1289 break;
1290 case preds_gr:
1291 r = 3;
1292 break;
1293 case unat_gr:
1294 r = 4;
1295 break;
1296 case lc_gr:
1297 r = 5;
1298 break;
1299 case rp_br:
1300 r = 6;
1301 break;
1302 case rnat_gr:
1303 r = 7;
1304 break;
1305 case bsp_gr:
1306 r = 8;
1307 break;
1308 case bspstore_gr:
1309 r = 9;
1310 break;
1311 case fpsr_gr:
1312 r = 10;
1313 break;
1314 case priunat_gr:
1315 r = 11;
1316 break;
1317 default:
1318 as_bad (_("Invalid record type for P3 format."));
1319 }
1320 bytes[0] = (UNW_P3 | (r >> 1));
1321 bytes[1] = (((r & 1) << 7) | reg);
1322 (*f) (2, bytes, NULL);
1323 }
1324
1325 static void
1326 output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size)
1327 {
1328 imask[0] = UNW_P4;
1329 (*f) (imask_size, (char *) imask, NULL);
1330 }
1331
1332 static void
1333 output_P5_format (vbyte_func f, int grmask, unsigned long frmask)
1334 {
1335 char bytes[4];
1336 grmask = (grmask & 0x0f);
1337
1338 bytes[0] = UNW_P5;
1339 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1340 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1341 bytes[3] = (frmask & 0x000000ff);
1342 (*f) (4, bytes, NULL);
1343 }
1344
1345 static void
1346 output_P6_format (vbyte_func f, unw_record_type rtype, int rmask)
1347 {
1348 char byte;
1349 int r = 0;
1350
1351 if (rtype == gr_mem)
1352 r = 1;
1353 else if (rtype != fr_mem)
1354 as_bad (_("Invalid record type for format P6"));
1355 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1356 (*f) (1, &byte, NULL);
1357 }
1358
1359 static void
1360 output_P7_format (vbyte_func f,
1361 unw_record_type rtype,
1362 unsigned long w1,
1363 unsigned long w2)
1364 {
1365 char bytes[20];
1366 int count = 1;
1367 int r = 0;
1368 count += output_leb128 (bytes + 1, w1, 0);
1369 switch (rtype)
1370 {
1371 case mem_stack_f:
1372 r = 0;
1373 count += output_leb128 (bytes + count, w2 >> 4, 0);
1374 break;
1375 case mem_stack_v:
1376 r = 1;
1377 break;
1378 case spill_base:
1379 r = 2;
1380 break;
1381 case psp_sprel:
1382 r = 3;
1383 break;
1384 case rp_when:
1385 r = 4;
1386 break;
1387 case rp_psprel:
1388 r = 5;
1389 break;
1390 case pfs_when:
1391 r = 6;
1392 break;
1393 case pfs_psprel:
1394 r = 7;
1395 break;
1396 case preds_when:
1397 r = 8;
1398 break;
1399 case preds_psprel:
1400 r = 9;
1401 break;
1402 case lc_when:
1403 r = 10;
1404 break;
1405 case lc_psprel:
1406 r = 11;
1407 break;
1408 case unat_when:
1409 r = 12;
1410 break;
1411 case unat_psprel:
1412 r = 13;
1413 break;
1414 case fpsr_when:
1415 r = 14;
1416 break;
1417 case fpsr_psprel:
1418 r = 15;
1419 break;
1420 default:
1421 break;
1422 }
1423 bytes[0] = (UNW_P7 | r);
1424 (*f) (count, bytes, NULL);
1425 }
1426
1427 static void
1428 output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t)
1429 {
1430 char bytes[20];
1431 int r = 0;
1432 int count = 2;
1433 bytes[0] = UNW_P8;
1434 switch (rtype)
1435 {
1436 case rp_sprel:
1437 r = 1;
1438 break;
1439 case pfs_sprel:
1440 r = 2;
1441 break;
1442 case preds_sprel:
1443 r = 3;
1444 break;
1445 case lc_sprel:
1446 r = 4;
1447 break;
1448 case unat_sprel:
1449 r = 5;
1450 break;
1451 case fpsr_sprel:
1452 r = 6;
1453 break;
1454 case bsp_when:
1455 r = 7;
1456 break;
1457 case bsp_psprel:
1458 r = 8;
1459 break;
1460 case bsp_sprel:
1461 r = 9;
1462 break;
1463 case bspstore_when:
1464 r = 10;
1465 break;
1466 case bspstore_psprel:
1467 r = 11;
1468 break;
1469 case bspstore_sprel:
1470 r = 12;
1471 break;
1472 case rnat_when:
1473 r = 13;
1474 break;
1475 case rnat_psprel:
1476 r = 14;
1477 break;
1478 case rnat_sprel:
1479 r = 15;
1480 break;
1481 case priunat_when_gr:
1482 r = 16;
1483 break;
1484 case priunat_psprel:
1485 r = 17;
1486 break;
1487 case priunat_sprel:
1488 r = 18;
1489 break;
1490 case priunat_when_mem:
1491 r = 19;
1492 break;
1493 default:
1494 break;
1495 }
1496 bytes[1] = r;
1497 count += output_leb128 (bytes + 2, t, 0);
1498 (*f) (count, bytes, NULL);
1499 }
1500
1501 static void
1502 output_P9_format (vbyte_func f, int grmask, int gr)
1503 {
1504 char bytes[3];
1505 bytes[0] = UNW_P9;
1506 bytes[1] = (grmask & 0x0f);
1507 bytes[2] = (gr & 0x7f);
1508 (*f) (3, bytes, NULL);
1509 }
1510
1511 static void
1512 output_P10_format (vbyte_func f, int abi, int context)
1513 {
1514 char bytes[3];
1515 bytes[0] = UNW_P10;
1516 bytes[1] = (abi & 0xff);
1517 bytes[2] = (context & 0xff);
1518 (*f) (3, bytes, NULL);
1519 }
1520
1521 static void
1522 output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1523 {
1524 char byte;
1525 int r = 0;
1526 if (label > 0x1f)
1527 {
1528 output_B4_format (f, rtype, label);
1529 return;
1530 }
1531 if (rtype == copy_state)
1532 r = 1;
1533 else if (rtype != label_state)
1534 as_bad (_("Invalid record type for format B1"));
1535
1536 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1537 (*f) (1, &byte, NULL);
1538 }
1539
1540 static void
1541 output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t)
1542 {
1543 char bytes[20];
1544 int count = 1;
1545 if (ecount > 0x1f)
1546 {
1547 output_B3_format (f, ecount, t);
1548 return;
1549 }
1550 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1551 count += output_leb128 (bytes + 1, t, 0);
1552 (*f) (count, bytes, NULL);
1553 }
1554
1555 static void
1556 output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t)
1557 {
1558 char bytes[20];
1559 int count = 1;
1560 if (ecount <= 0x1f)
1561 {
1562 output_B2_format (f, ecount, t);
1563 return;
1564 }
1565 bytes[0] = UNW_B3;
1566 count += output_leb128 (bytes + 1, t, 0);
1567 count += output_leb128 (bytes + count, ecount, 0);
1568 (*f) (count, bytes, NULL);
1569 }
1570
1571 static void
1572 output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1573 {
1574 char bytes[20];
1575 int r = 0;
1576 int count = 1;
1577 if (label <= 0x1f)
1578 {
1579 output_B1_format (f, rtype, label);
1580 return;
1581 }
1582
1583 if (rtype == copy_state)
1584 r = 1;
1585 else if (rtype != label_state)
1586 as_bad (_("Invalid record type for format B1"));
1587
1588 bytes[0] = (UNW_B4 | (r << 3));
1589 count += output_leb128 (bytes + 1, label, 0);
1590 (*f) (count, bytes, NULL);
1591 }
1592
1593 static char
1594 format_ab_reg (int ab, int reg)
1595 {
1596 int ret;
1597 ab = (ab & 3);
1598 reg = (reg & 0x1f);
1599 ret = (ab << 5) | reg;
1600 return ret;
1601 }
1602
1603 static void
1604 output_X1_format (vbyte_func f,
1605 unw_record_type rtype,
1606 int ab,
1607 int reg,
1608 unsigned long t,
1609 unsigned long w1)
1610 {
1611 char bytes[20];
1612 int r = 0;
1613 int count = 2;
1614 bytes[0] = UNW_X1;
1615
1616 if (rtype == spill_sprel)
1617 r = 1;
1618 else if (rtype != spill_psprel)
1619 as_bad (_("Invalid record type for format X1"));
1620 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1621 count += output_leb128 (bytes + 2, t, 0);
1622 count += output_leb128 (bytes + count, w1, 0);
1623 (*f) (count, bytes, NULL);
1624 }
1625
1626 static void
1627 output_X2_format (vbyte_func f,
1628 int ab,
1629 int reg,
1630 int x,
1631 int y,
1632 int treg,
1633 unsigned long t)
1634 {
1635 char bytes[20];
1636 int count = 3;
1637 bytes[0] = UNW_X2;
1638 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1639 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1640 count += output_leb128 (bytes + 3, t, 0);
1641 (*f) (count, bytes, NULL);
1642 }
1643
1644 static void
1645 output_X3_format (vbyte_func f,
1646 unw_record_type rtype,
1647 int qp,
1648 int ab,
1649 int reg,
1650 unsigned long t,
1651 unsigned long w1)
1652 {
1653 char bytes[20];
1654 int r = 0;
1655 int count = 3;
1656 bytes[0] = UNW_X3;
1657
1658 if (rtype == spill_sprel_p)
1659 r = 1;
1660 else if (rtype != spill_psprel_p)
1661 as_bad (_("Invalid record type for format X3"));
1662 bytes[1] = ((r << 7) | (qp & 0x3f));
1663 bytes[2] = format_ab_reg (ab, reg);
1664 count += output_leb128 (bytes + 3, t, 0);
1665 count += output_leb128 (bytes + count, w1, 0);
1666 (*f) (count, bytes, NULL);
1667 }
1668
1669 static void
1670 output_X4_format (vbyte_func f,
1671 int qp,
1672 int ab,
1673 int reg,
1674 int x,
1675 int y,
1676 int treg,
1677 unsigned long t)
1678 {
1679 char bytes[20];
1680 int count = 4;
1681 bytes[0] = UNW_X4;
1682 bytes[1] = (qp & 0x3f);
1683 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1684 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1685 count += output_leb128 (bytes + 4, t, 0);
1686 (*f) (count, bytes, NULL);
1687 }
1688
1689 /* This function checks whether there are any outstanding .save-s and
1690 discards them if so. */
1691
1692 static void
1693 check_pending_save (void)
1694 {
1695 if (unwind.pending_saves)
1696 {
1697 unw_rec_list *cur, *prev;
1698
1699 as_warn (_("Previous .save incomplete"));
1700 for (cur = unwind.list, prev = NULL; cur; )
1701 if (&cur->r.record.p == unwind.pending_saves)
1702 {
1703 if (prev)
1704 prev->next = cur->next;
1705 else
1706 unwind.list = cur->next;
1707 if (cur == unwind.tail)
1708 unwind.tail = prev;
1709 if (cur == unwind.current_entry)
1710 unwind.current_entry = cur->next;
1711 /* Don't free the first discarded record, it's being used as
1712 terminator for (currently) br_gr and gr_gr processing, and
1713 also prevents leaving a dangling pointer to it in its
1714 predecessor. */
1715 cur->r.record.p.grmask = 0;
1716 cur->r.record.p.brmask = 0;
1717 cur->r.record.p.frmask = 0;
1718 prev = cur->r.record.p.next;
1719 cur->r.record.p.next = NULL;
1720 cur = prev;
1721 break;
1722 }
1723 else
1724 {
1725 prev = cur;
1726 cur = cur->next;
1727 }
1728 while (cur)
1729 {
1730 prev = cur;
1731 cur = cur->r.record.p.next;
1732 free (prev);
1733 }
1734 unwind.pending_saves = NULL;
1735 }
1736 }
1737
1738 /* This function allocates a record list structure, and initializes fields. */
1739
1740 static unw_rec_list *
1741 alloc_record (unw_record_type t)
1742 {
1743 unw_rec_list *ptr;
1744 ptr = XNEW (unw_rec_list);
1745 memset (ptr, 0, sizeof (*ptr));
1746 ptr->slot_number = SLOT_NUM_NOT_SET;
1747 ptr->r.type = t;
1748 return ptr;
1749 }
1750
1751 /* Dummy unwind record used for calculating the length of the last prologue or
1752 body region. */
1753
1754 static unw_rec_list *
1755 output_endp (void)
1756 {
1757 unw_rec_list *ptr = alloc_record (endp);
1758 return ptr;
1759 }
1760
1761 static unw_rec_list *
1762 output_prologue (void)
1763 {
1764 unw_rec_list *ptr = alloc_record (prologue);
1765 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1766 return ptr;
1767 }
1768
1769 static unw_rec_list *
1770 output_prologue_gr (unsigned int saved_mask, unsigned int reg)
1771 {
1772 unw_rec_list *ptr = alloc_record (prologue_gr);
1773 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1774 ptr->r.record.r.grmask = saved_mask;
1775 ptr->r.record.r.grsave = reg;
1776 return ptr;
1777 }
1778
1779 static unw_rec_list *
1780 output_body (void)
1781 {
1782 unw_rec_list *ptr = alloc_record (body);
1783 return ptr;
1784 }
1785
1786 static unw_rec_list *
1787 output_mem_stack_f (unsigned int size)
1788 {
1789 unw_rec_list *ptr = alloc_record (mem_stack_f);
1790 ptr->r.record.p.size = size;
1791 return ptr;
1792 }
1793
1794 static unw_rec_list *
1795 output_mem_stack_v (void)
1796 {
1797 unw_rec_list *ptr = alloc_record (mem_stack_v);
1798 return ptr;
1799 }
1800
1801 static unw_rec_list *
1802 output_psp_gr (unsigned int gr)
1803 {
1804 unw_rec_list *ptr = alloc_record (psp_gr);
1805 ptr->r.record.p.r.gr = gr;
1806 return ptr;
1807 }
1808
1809 static unw_rec_list *
1810 output_psp_sprel (unsigned int offset)
1811 {
1812 unw_rec_list *ptr = alloc_record (psp_sprel);
1813 ptr->r.record.p.off.sp = offset / 4;
1814 return ptr;
1815 }
1816
1817 static unw_rec_list *
1818 output_rp_when (void)
1819 {
1820 unw_rec_list *ptr = alloc_record (rp_when);
1821 return ptr;
1822 }
1823
1824 static unw_rec_list *
1825 output_rp_gr (unsigned int gr)
1826 {
1827 unw_rec_list *ptr = alloc_record (rp_gr);
1828 ptr->r.record.p.r.gr = gr;
1829 return ptr;
1830 }
1831
1832 static unw_rec_list *
1833 output_rp_br (unsigned int br)
1834 {
1835 unw_rec_list *ptr = alloc_record (rp_br);
1836 ptr->r.record.p.r.br = br;
1837 return ptr;
1838 }
1839
1840 static unw_rec_list *
1841 output_rp_psprel (unsigned int offset)
1842 {
1843 unw_rec_list *ptr = alloc_record (rp_psprel);
1844 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1845 return ptr;
1846 }
1847
1848 static unw_rec_list *
1849 output_rp_sprel (unsigned int offset)
1850 {
1851 unw_rec_list *ptr = alloc_record (rp_sprel);
1852 ptr->r.record.p.off.sp = offset / 4;
1853 return ptr;
1854 }
1855
1856 static unw_rec_list *
1857 output_pfs_when (void)
1858 {
1859 unw_rec_list *ptr = alloc_record (pfs_when);
1860 return ptr;
1861 }
1862
1863 static unw_rec_list *
1864 output_pfs_gr (unsigned int gr)
1865 {
1866 unw_rec_list *ptr = alloc_record (pfs_gr);
1867 ptr->r.record.p.r.gr = gr;
1868 return ptr;
1869 }
1870
1871 static unw_rec_list *
1872 output_pfs_psprel (unsigned int offset)
1873 {
1874 unw_rec_list *ptr = alloc_record (pfs_psprel);
1875 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1876 return ptr;
1877 }
1878
1879 static unw_rec_list *
1880 output_pfs_sprel (unsigned int offset)
1881 {
1882 unw_rec_list *ptr = alloc_record (pfs_sprel);
1883 ptr->r.record.p.off.sp = offset / 4;
1884 return ptr;
1885 }
1886
1887 static unw_rec_list *
1888 output_preds_when (void)
1889 {
1890 unw_rec_list *ptr = alloc_record (preds_when);
1891 return ptr;
1892 }
1893
1894 static unw_rec_list *
1895 output_preds_gr (unsigned int gr)
1896 {
1897 unw_rec_list *ptr = alloc_record (preds_gr);
1898 ptr->r.record.p.r.gr = gr;
1899 return ptr;
1900 }
1901
1902 static unw_rec_list *
1903 output_preds_psprel (unsigned int offset)
1904 {
1905 unw_rec_list *ptr = alloc_record (preds_psprel);
1906 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1907 return ptr;
1908 }
1909
1910 static unw_rec_list *
1911 output_preds_sprel (unsigned int offset)
1912 {
1913 unw_rec_list *ptr = alloc_record (preds_sprel);
1914 ptr->r.record.p.off.sp = offset / 4;
1915 return ptr;
1916 }
1917
1918 static unw_rec_list *
1919 output_fr_mem (unsigned int mask)
1920 {
1921 unw_rec_list *ptr = alloc_record (fr_mem);
1922 unw_rec_list *cur = ptr;
1923
1924 ptr->r.record.p.frmask = mask;
1925 unwind.pending_saves = &ptr->r.record.p;
1926 for (;;)
1927 {
1928 unw_rec_list *prev = cur;
1929
1930 /* Clear least significant set bit. */
1931 mask &= ~(mask & (~mask + 1));
1932 if (!mask)
1933 return ptr;
1934 cur = alloc_record (fr_mem);
1935 cur->r.record.p.frmask = mask;
1936 /* Retain only least significant bit. */
1937 prev->r.record.p.frmask ^= mask;
1938 prev->r.record.p.next = cur;
1939 }
1940 }
1941
1942 static unw_rec_list *
1943 output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask)
1944 {
1945 unw_rec_list *ptr = alloc_record (frgr_mem);
1946 unw_rec_list *cur = ptr;
1947
1948 unwind.pending_saves = &cur->r.record.p;
1949 cur->r.record.p.frmask = fr_mask;
1950 while (fr_mask)
1951 {
1952 unw_rec_list *prev = cur;
1953
1954 /* Clear least significant set bit. */
1955 fr_mask &= ~(fr_mask & (~fr_mask + 1));
1956 if (!gr_mask && !fr_mask)
1957 return ptr;
1958 cur = alloc_record (frgr_mem);
1959 cur->r.record.p.frmask = fr_mask;
1960 /* Retain only least significant bit. */
1961 prev->r.record.p.frmask ^= fr_mask;
1962 prev->r.record.p.next = cur;
1963 }
1964 cur->r.record.p.grmask = gr_mask;
1965 for (;;)
1966 {
1967 unw_rec_list *prev = cur;
1968
1969 /* Clear least significant set bit. */
1970 gr_mask &= ~(gr_mask & (~gr_mask + 1));
1971 if (!gr_mask)
1972 return ptr;
1973 cur = alloc_record (frgr_mem);
1974 cur->r.record.p.grmask = gr_mask;
1975 /* Retain only least significant bit. */
1976 prev->r.record.p.grmask ^= gr_mask;
1977 prev->r.record.p.next = cur;
1978 }
1979 }
1980
1981 static unw_rec_list *
1982 output_gr_gr (unsigned int mask, unsigned int reg)
1983 {
1984 unw_rec_list *ptr = alloc_record (gr_gr);
1985 unw_rec_list *cur = ptr;
1986
1987 ptr->r.record.p.grmask = mask;
1988 ptr->r.record.p.r.gr = reg;
1989 unwind.pending_saves = &ptr->r.record.p;
1990 for (;;)
1991 {
1992 unw_rec_list *prev = cur;
1993
1994 /* Clear least significant set bit. */
1995 mask &= ~(mask & (~mask + 1));
1996 if (!mask)
1997 return ptr;
1998 cur = alloc_record (gr_gr);
1999 cur->r.record.p.grmask = mask;
2000 /* Indicate this record shouldn't be output. */
2001 cur->r.record.p.r.gr = REG_NUM;
2002 /* Retain only least significant bit. */
2003 prev->r.record.p.grmask ^= mask;
2004 prev->r.record.p.next = cur;
2005 }
2006 }
2007
2008 static unw_rec_list *
2009 output_gr_mem (unsigned int mask)
2010 {
2011 unw_rec_list *ptr = alloc_record (gr_mem);
2012 unw_rec_list *cur = ptr;
2013
2014 ptr->r.record.p.grmask = mask;
2015 unwind.pending_saves = &ptr->r.record.p;
2016 for (;;)
2017 {
2018 unw_rec_list *prev = cur;
2019
2020 /* Clear least significant set bit. */
2021 mask &= ~(mask & (~mask + 1));
2022 if (!mask)
2023 return ptr;
2024 cur = alloc_record (gr_mem);
2025 cur->r.record.p.grmask = mask;
2026 /* Retain only least significant bit. */
2027 prev->r.record.p.grmask ^= mask;
2028 prev->r.record.p.next = cur;
2029 }
2030 }
2031
2032 static unw_rec_list *
2033 output_br_mem (unsigned int mask)
2034 {
2035 unw_rec_list *ptr = alloc_record (br_mem);
2036 unw_rec_list *cur = ptr;
2037
2038 ptr->r.record.p.brmask = mask;
2039 unwind.pending_saves = &ptr->r.record.p;
2040 for (;;)
2041 {
2042 unw_rec_list *prev = cur;
2043
2044 /* Clear least significant set bit. */
2045 mask &= ~(mask & (~mask + 1));
2046 if (!mask)
2047 return ptr;
2048 cur = alloc_record (br_mem);
2049 cur->r.record.p.brmask = mask;
2050 /* Retain only least significant bit. */
2051 prev->r.record.p.brmask ^= mask;
2052 prev->r.record.p.next = cur;
2053 }
2054 }
2055
2056 static unw_rec_list *
2057 output_br_gr (unsigned int mask, unsigned int reg)
2058 {
2059 unw_rec_list *ptr = alloc_record (br_gr);
2060 unw_rec_list *cur = ptr;
2061
2062 ptr->r.record.p.brmask = mask;
2063 ptr->r.record.p.r.gr = reg;
2064 unwind.pending_saves = &ptr->r.record.p;
2065 for (;;)
2066 {
2067 unw_rec_list *prev = cur;
2068
2069 /* Clear least significant set bit. */
2070 mask &= ~(mask & (~mask + 1));
2071 if (!mask)
2072 return ptr;
2073 cur = alloc_record (br_gr);
2074 cur->r.record.p.brmask = mask;
2075 /* Indicate this record shouldn't be output. */
2076 cur->r.record.p.r.gr = REG_NUM;
2077 /* Retain only least significant bit. */
2078 prev->r.record.p.brmask ^= mask;
2079 prev->r.record.p.next = cur;
2080 }
2081 }
2082
2083 static unw_rec_list *
2084 output_spill_base (unsigned int offset)
2085 {
2086 unw_rec_list *ptr = alloc_record (spill_base);
2087 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2088 return ptr;
2089 }
2090
2091 static unw_rec_list *
2092 output_unat_when (void)
2093 {
2094 unw_rec_list *ptr = alloc_record (unat_when);
2095 return ptr;
2096 }
2097
2098 static unw_rec_list *
2099 output_unat_gr (unsigned int gr)
2100 {
2101 unw_rec_list *ptr = alloc_record (unat_gr);
2102 ptr->r.record.p.r.gr = gr;
2103 return ptr;
2104 }
2105
2106 static unw_rec_list *
2107 output_unat_psprel (unsigned int offset)
2108 {
2109 unw_rec_list *ptr = alloc_record (unat_psprel);
2110 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2111 return ptr;
2112 }
2113
2114 static unw_rec_list *
2115 output_unat_sprel (unsigned int offset)
2116 {
2117 unw_rec_list *ptr = alloc_record (unat_sprel);
2118 ptr->r.record.p.off.sp = offset / 4;
2119 return ptr;
2120 }
2121
2122 static unw_rec_list *
2123 output_lc_when (void)
2124 {
2125 unw_rec_list *ptr = alloc_record (lc_when);
2126 return ptr;
2127 }
2128
2129 static unw_rec_list *
2130 output_lc_gr (unsigned int gr)
2131 {
2132 unw_rec_list *ptr = alloc_record (lc_gr);
2133 ptr->r.record.p.r.gr = gr;
2134 return ptr;
2135 }
2136
2137 static unw_rec_list *
2138 output_lc_psprel (unsigned int offset)
2139 {
2140 unw_rec_list *ptr = alloc_record (lc_psprel);
2141 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2142 return ptr;
2143 }
2144
2145 static unw_rec_list *
2146 output_lc_sprel (unsigned int offset)
2147 {
2148 unw_rec_list *ptr = alloc_record (lc_sprel);
2149 ptr->r.record.p.off.sp = offset / 4;
2150 return ptr;
2151 }
2152
2153 static unw_rec_list *
2154 output_fpsr_when (void)
2155 {
2156 unw_rec_list *ptr = alloc_record (fpsr_when);
2157 return ptr;
2158 }
2159
2160 static unw_rec_list *
2161 output_fpsr_gr (unsigned int gr)
2162 {
2163 unw_rec_list *ptr = alloc_record (fpsr_gr);
2164 ptr->r.record.p.r.gr = gr;
2165 return ptr;
2166 }
2167
2168 static unw_rec_list *
2169 output_fpsr_psprel (unsigned int offset)
2170 {
2171 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2172 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2173 return ptr;
2174 }
2175
2176 static unw_rec_list *
2177 output_fpsr_sprel (unsigned int offset)
2178 {
2179 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2180 ptr->r.record.p.off.sp = offset / 4;
2181 return ptr;
2182 }
2183
2184 static unw_rec_list *
2185 output_priunat_when_gr (void)
2186 {
2187 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2188 return ptr;
2189 }
2190
2191 static unw_rec_list *
2192 output_priunat_when_mem (void)
2193 {
2194 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2195 return ptr;
2196 }
2197
2198 static unw_rec_list *
2199 output_priunat_gr (unsigned int gr)
2200 {
2201 unw_rec_list *ptr = alloc_record (priunat_gr);
2202 ptr->r.record.p.r.gr = gr;
2203 return ptr;
2204 }
2205
2206 static unw_rec_list *
2207 output_priunat_psprel (unsigned int offset)
2208 {
2209 unw_rec_list *ptr = alloc_record (priunat_psprel);
2210 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2211 return ptr;
2212 }
2213
2214 static unw_rec_list *
2215 output_priunat_sprel (unsigned int offset)
2216 {
2217 unw_rec_list *ptr = alloc_record (priunat_sprel);
2218 ptr->r.record.p.off.sp = offset / 4;
2219 return ptr;
2220 }
2221
2222 static unw_rec_list *
2223 output_bsp_when (void)
2224 {
2225 unw_rec_list *ptr = alloc_record (bsp_when);
2226 return ptr;
2227 }
2228
2229 static unw_rec_list *
2230 output_bsp_gr (unsigned int gr)
2231 {
2232 unw_rec_list *ptr = alloc_record (bsp_gr);
2233 ptr->r.record.p.r.gr = gr;
2234 return ptr;
2235 }
2236
2237 static unw_rec_list *
2238 output_bsp_psprel (unsigned int offset)
2239 {
2240 unw_rec_list *ptr = alloc_record (bsp_psprel);
2241 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2242 return ptr;
2243 }
2244
2245 static unw_rec_list *
2246 output_bsp_sprel (unsigned int offset)
2247 {
2248 unw_rec_list *ptr = alloc_record (bsp_sprel);
2249 ptr->r.record.p.off.sp = offset / 4;
2250 return ptr;
2251 }
2252
2253 static unw_rec_list *
2254 output_bspstore_when (void)
2255 {
2256 unw_rec_list *ptr = alloc_record (bspstore_when);
2257 return ptr;
2258 }
2259
2260 static unw_rec_list *
2261 output_bspstore_gr (unsigned int gr)
2262 {
2263 unw_rec_list *ptr = alloc_record (bspstore_gr);
2264 ptr->r.record.p.r.gr = gr;
2265 return ptr;
2266 }
2267
2268 static unw_rec_list *
2269 output_bspstore_psprel (unsigned int offset)
2270 {
2271 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2272 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2273 return ptr;
2274 }
2275
2276 static unw_rec_list *
2277 output_bspstore_sprel (unsigned int offset)
2278 {
2279 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2280 ptr->r.record.p.off.sp = offset / 4;
2281 return ptr;
2282 }
2283
2284 static unw_rec_list *
2285 output_rnat_when (void)
2286 {
2287 unw_rec_list *ptr = alloc_record (rnat_when);
2288 return ptr;
2289 }
2290
2291 static unw_rec_list *
2292 output_rnat_gr (unsigned int gr)
2293 {
2294 unw_rec_list *ptr = alloc_record (rnat_gr);
2295 ptr->r.record.p.r.gr = gr;
2296 return ptr;
2297 }
2298
2299 static unw_rec_list *
2300 output_rnat_psprel (unsigned int offset)
2301 {
2302 unw_rec_list *ptr = alloc_record (rnat_psprel);
2303 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2304 return ptr;
2305 }
2306
2307 static unw_rec_list *
2308 output_rnat_sprel (unsigned int offset)
2309 {
2310 unw_rec_list *ptr = alloc_record (rnat_sprel);
2311 ptr->r.record.p.off.sp = offset / 4;
2312 return ptr;
2313 }
2314
2315 static unw_rec_list *
2316 output_unwabi (unsigned long abi, unsigned long context)
2317 {
2318 unw_rec_list *ptr = alloc_record (unwabi);
2319 ptr->r.record.p.abi = abi;
2320 ptr->r.record.p.context = context;
2321 return ptr;
2322 }
2323
2324 static unw_rec_list *
2325 output_epilogue (unsigned long ecount)
2326 {
2327 unw_rec_list *ptr = alloc_record (epilogue);
2328 ptr->r.record.b.ecount = ecount;
2329 return ptr;
2330 }
2331
2332 static unw_rec_list *
2333 output_label_state (unsigned long label)
2334 {
2335 unw_rec_list *ptr = alloc_record (label_state);
2336 ptr->r.record.b.label = label;
2337 return ptr;
2338 }
2339
2340 static unw_rec_list *
2341 output_copy_state (unsigned long label)
2342 {
2343 unw_rec_list *ptr = alloc_record (copy_state);
2344 ptr->r.record.b.label = label;
2345 return ptr;
2346 }
2347
2348 static unw_rec_list *
2349 output_spill_psprel (unsigned int ab,
2350 unsigned int reg,
2351 unsigned int offset,
2352 unsigned int predicate)
2353 {
2354 unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel);
2355 ptr->r.record.x.ab = ab;
2356 ptr->r.record.x.reg = reg;
2357 ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset);
2358 ptr->r.record.x.qp = predicate;
2359 return ptr;
2360 }
2361
2362 static unw_rec_list *
2363 output_spill_sprel (unsigned int ab,
2364 unsigned int reg,
2365 unsigned int offset,
2366 unsigned int predicate)
2367 {
2368 unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel);
2369 ptr->r.record.x.ab = ab;
2370 ptr->r.record.x.reg = reg;
2371 ptr->r.record.x.where.spoff = offset / 4;
2372 ptr->r.record.x.qp = predicate;
2373 return ptr;
2374 }
2375
2376 static unw_rec_list *
2377 output_spill_reg (unsigned int ab,
2378 unsigned int reg,
2379 unsigned int targ_reg,
2380 unsigned int xy,
2381 unsigned int predicate)
2382 {
2383 unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg);
2384 ptr->r.record.x.ab = ab;
2385 ptr->r.record.x.reg = reg;
2386 ptr->r.record.x.where.reg = targ_reg;
2387 ptr->r.record.x.xy = xy;
2388 ptr->r.record.x.qp = predicate;
2389 return ptr;
2390 }
2391
2392 /* Given a unw_rec_list process the correct format with the
2393 specified function. */
2394
2395 static void
2396 process_one_record (unw_rec_list *ptr, vbyte_func f)
2397 {
2398 unsigned int fr_mask, gr_mask;
2399
2400 switch (ptr->r.type)
2401 {
2402 /* This is a dummy record that takes up no space in the output. */
2403 case endp:
2404 break;
2405
2406 case gr_mem:
2407 case fr_mem:
2408 case br_mem:
2409 case frgr_mem:
2410 /* These are taken care of by prologue/prologue_gr. */
2411 break;
2412
2413 case prologue_gr:
2414 case prologue:
2415 if (ptr->r.type == prologue_gr)
2416 output_R2_format (f, ptr->r.record.r.grmask,
2417 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2418 else
2419 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2420
2421 /* Output descriptor(s) for union of register spills (if any). */
2422 gr_mask = ptr->r.record.r.mask.gr_mem;
2423 fr_mask = ptr->r.record.r.mask.fr_mem;
2424 if (fr_mask)
2425 {
2426 if ((fr_mask & ~0xfUL) == 0)
2427 output_P6_format (f, fr_mem, fr_mask);
2428 else
2429 {
2430 output_P5_format (f, gr_mask, fr_mask);
2431 gr_mask = 0;
2432 }
2433 }
2434 if (gr_mask)
2435 output_P6_format (f, gr_mem, gr_mask);
2436 if (ptr->r.record.r.mask.br_mem)
2437 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2438
2439 /* output imask descriptor if necessary: */
2440 if (ptr->r.record.r.mask.i)
2441 output_P4_format (f, ptr->r.record.r.mask.i,
2442 ptr->r.record.r.imask_size);
2443 break;
2444
2445 case body:
2446 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2447 break;
2448 case mem_stack_f:
2449 case mem_stack_v:
2450 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2451 ptr->r.record.p.size);
2452 break;
2453 case psp_gr:
2454 case rp_gr:
2455 case pfs_gr:
2456 case preds_gr:
2457 case unat_gr:
2458 case lc_gr:
2459 case fpsr_gr:
2460 case priunat_gr:
2461 case bsp_gr:
2462 case bspstore_gr:
2463 case rnat_gr:
2464 output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr);
2465 break;
2466 case rp_br:
2467 output_P3_format (f, rp_br, ptr->r.record.p.r.br);
2468 break;
2469 case psp_sprel:
2470 output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0);
2471 break;
2472 case rp_when:
2473 case pfs_when:
2474 case preds_when:
2475 case unat_when:
2476 case lc_when:
2477 case fpsr_when:
2478 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2479 break;
2480 case rp_psprel:
2481 case pfs_psprel:
2482 case preds_psprel:
2483 case unat_psprel:
2484 case lc_psprel:
2485 case fpsr_psprel:
2486 case spill_base:
2487 output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0);
2488 break;
2489 case rp_sprel:
2490 case pfs_sprel:
2491 case preds_sprel:
2492 case unat_sprel:
2493 case lc_sprel:
2494 case fpsr_sprel:
2495 case priunat_sprel:
2496 case bsp_sprel:
2497 case bspstore_sprel:
2498 case rnat_sprel:
2499 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp);
2500 break;
2501 case gr_gr:
2502 if (ptr->r.record.p.r.gr < REG_NUM)
2503 {
2504 const unw_rec_list *cur = ptr;
2505
2506 gr_mask = cur->r.record.p.grmask;
2507 while ((cur = cur->r.record.p.next) != NULL)
2508 gr_mask |= cur->r.record.p.grmask;
2509 output_P9_format (f, gr_mask, ptr->r.record.p.r.gr);
2510 }
2511 break;
2512 case br_gr:
2513 if (ptr->r.record.p.r.gr < REG_NUM)
2514 {
2515 const unw_rec_list *cur = ptr;
2516
2517 gr_mask = cur->r.record.p.brmask;
2518 while ((cur = cur->r.record.p.next) != NULL)
2519 gr_mask |= cur->r.record.p.brmask;
2520 output_P2_format (f, gr_mask, ptr->r.record.p.r.gr);
2521 }
2522 break;
2523 case spill_mask:
2524 as_bad (_("spill_mask record unimplemented."));
2525 break;
2526 case priunat_when_gr:
2527 case priunat_when_mem:
2528 case bsp_when:
2529 case bspstore_when:
2530 case rnat_when:
2531 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2532 break;
2533 case priunat_psprel:
2534 case bsp_psprel:
2535 case bspstore_psprel:
2536 case rnat_psprel:
2537 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp);
2538 break;
2539 case unwabi:
2540 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2541 break;
2542 case epilogue:
2543 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2544 break;
2545 case label_state:
2546 case copy_state:
2547 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2548 break;
2549 case spill_psprel:
2550 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2551 ptr->r.record.x.reg, ptr->r.record.x.t,
2552 ptr->r.record.x.where.pspoff);
2553 break;
2554 case spill_sprel:
2555 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2556 ptr->r.record.x.reg, ptr->r.record.x.t,
2557 ptr->r.record.x.where.spoff);
2558 break;
2559 case spill_reg:
2560 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2561 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2562 ptr->r.record.x.where.reg, ptr->r.record.x.t);
2563 break;
2564 case spill_psprel_p:
2565 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2566 ptr->r.record.x.ab, ptr->r.record.x.reg,
2567 ptr->r.record.x.t, ptr->r.record.x.where.pspoff);
2568 break;
2569 case spill_sprel_p:
2570 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2571 ptr->r.record.x.ab, ptr->r.record.x.reg,
2572 ptr->r.record.x.t, ptr->r.record.x.where.spoff);
2573 break;
2574 case spill_reg_p:
2575 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2576 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2577 ptr->r.record.x.xy, ptr->r.record.x.where.reg,
2578 ptr->r.record.x.t);
2579 break;
2580 default:
2581 as_bad (_("record_type_not_valid"));
2582 break;
2583 }
2584 }
2585
2586 /* Given a unw_rec_list list, process all the records with
2587 the specified function. */
2588 static void
2589 process_unw_records (unw_rec_list *list, vbyte_func f)
2590 {
2591 unw_rec_list *ptr;
2592 for (ptr = list; ptr; ptr = ptr->next)
2593 process_one_record (ptr, f);
2594 }
2595
2596 /* Determine the size of a record list in bytes. */
2597 static int
2598 calc_record_size (unw_rec_list *list)
2599 {
2600 vbyte_count = 0;
2601 process_unw_records (list, count_output);
2602 return vbyte_count;
2603 }
2604
2605 /* Return the number of bits set in the input value.
2606 Perhaps this has a better place... */
2607 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
2608 # define popcount __builtin_popcount
2609 #else
2610 static int
2611 popcount (unsigned x)
2612 {
2613 static const unsigned char popcnt[16] =
2614 {
2615 0, 1, 1, 2,
2616 1, 2, 2, 3,
2617 1, 2, 2, 3,
2618 2, 3, 3, 4
2619 };
2620
2621 if (x < NELEMS (popcnt))
2622 return popcnt[x];
2623 return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt));
2624 }
2625 #endif
2626
2627 /* Update IMASK bitmask to reflect the fact that one or more registers
2628 of type TYPE are saved starting at instruction with index T. If N
2629 bits are set in REGMASK, it is assumed that instructions T through
2630 T+N-1 save these registers.
2631
2632 TYPE values:
2633 0: no save
2634 1: instruction saves next fp reg
2635 2: instruction saves next general reg
2636 3: instruction saves next branch reg */
2637 static void
2638 set_imask (unw_rec_list *region,
2639 unsigned long regmask,
2640 unsigned long t,
2641 unsigned int type)
2642 {
2643 unsigned char *imask;
2644 unsigned long imask_size;
2645 unsigned int i;
2646 int pos;
2647
2648 imask = region->r.record.r.mask.i;
2649 imask_size = region->r.record.r.imask_size;
2650 if (!imask)
2651 {
2652 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2653 imask = XCNEWVEC (unsigned char, imask_size);
2654
2655 region->r.record.r.imask_size = imask_size;
2656 region->r.record.r.mask.i = imask;
2657 }
2658
2659 i = (t / 4) + 1;
2660 pos = 2 * (3 - t % 4);
2661 while (regmask)
2662 {
2663 if (i >= imask_size)
2664 {
2665 as_bad (_("Ignoring attempt to spill beyond end of region"));
2666 return;
2667 }
2668
2669 imask[i] |= (type & 0x3) << pos;
2670
2671 regmask &= (regmask - 1);
2672 pos -= 2;
2673 if (pos < 0)
2674 {
2675 pos = 0;
2676 ++i;
2677 }
2678 }
2679 }
2680
2681 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2682 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2683 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2684 for frag sizes. */
2685
2686 static unsigned long
2687 slot_index (unsigned long slot_addr,
2688 fragS *slot_frag,
2689 unsigned long first_addr,
2690 fragS *first_frag,
2691 int before_relax)
2692 {
2693 unsigned long s_index = 0;
2694
2695 /* First time we are called, the initial address and frag are invalid. */
2696 if (first_addr == 0)
2697 return 0;
2698
2699 /* If the two addresses are in different frags, then we need to add in
2700 the remaining size of this frag, and then the entire size of intermediate
2701 frags. */
2702 while (slot_frag != first_frag)
2703 {
2704 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2705
2706 if (! before_relax)
2707 {
2708 /* We can get the final addresses only during and after
2709 relaxation. */
2710 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2711 s_index += 3 * ((first_frag->fr_next->fr_address
2712 - first_frag->fr_address
2713 - first_frag->fr_fix) >> 4);
2714 }
2715 else
2716 /* We don't know what the final addresses will be. We try our
2717 best to estimate. */
2718 switch (first_frag->fr_type)
2719 {
2720 default:
2721 break;
2722
2723 case rs_space:
2724 as_fatal (_("Only constant space allocation is supported"));
2725 break;
2726
2727 case rs_align:
2728 case rs_align_code:
2729 case rs_align_test:
2730 /* Take alignment into account. Assume the worst case
2731 before relaxation. */
2732 s_index += 3 * ((1 << first_frag->fr_offset) >> 4);
2733 break;
2734
2735 case rs_org:
2736 if (first_frag->fr_symbol)
2737 {
2738 as_fatal (_("Only constant offsets are supported"));
2739 break;
2740 }
2741 /* Fall through. */
2742 case rs_fill:
2743 s_index += 3 * (first_frag->fr_offset >> 4);
2744 break;
2745 }
2746
2747 /* Add in the full size of the frag converted to instruction slots. */
2748 s_index += 3 * (first_frag->fr_fix >> 4);
2749 /* Subtract away the initial part before first_addr. */
2750 s_index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2751 + ((first_addr & 0x3) - (start_addr & 0x3)));
2752
2753 /* Move to the beginning of the next frag. */
2754 first_frag = first_frag->fr_next;
2755 first_addr = (unsigned long) &first_frag->fr_literal;
2756
2757 /* This can happen if there is section switching in the middle of a
2758 function, causing the frag chain for the function to be broken.
2759 It is too difficult to recover safely from this problem, so we just
2760 exit with an error. */
2761 if (first_frag == NULL)
2762 as_fatal (_("Section switching in code is not supported."));
2763 }
2764
2765 /* Add in the used part of the last frag. */
2766 s_index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2767 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2768 return s_index;
2769 }
2770
2771 /* Optimize unwind record directives. */
2772
2773 static unw_rec_list *
2774 optimize_unw_records (unw_rec_list *list)
2775 {
2776 if (!list)
2777 return NULL;
2778
2779 /* If the only unwind record is ".prologue" or ".prologue" followed
2780 by ".body", then we can optimize the unwind directives away. */
2781 if (list->r.type == prologue
2782 && (list->next->r.type == endp
2783 || (list->next->r.type == body && list->next->next->r.type == endp)))
2784 return NULL;
2785
2786 return list;
2787 }
2788
2789 /* Given a complete record list, process any records which have
2790 unresolved fields, (ie length counts for a prologue). After
2791 this has been run, all necessary information should be available
2792 within each record to generate an image. */
2793
2794 static void
2795 fixup_unw_records (unw_rec_list *list, int before_relax)
2796 {
2797 unw_rec_list *ptr, *region = 0;
2798 unsigned long first_addr = 0, rlen = 0, t;
2799 fragS *first_frag = 0;
2800
2801 for (ptr = list; ptr; ptr = ptr->next)
2802 {
2803 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2804 as_bad (_("Insn slot not set in unwind record."));
2805 t = slot_index (ptr->slot_number, ptr->slot_frag,
2806 first_addr, first_frag, before_relax);
2807 switch (ptr->r.type)
2808 {
2809 case prologue:
2810 case prologue_gr:
2811 case body:
2812 {
2813 unw_rec_list *last;
2814 int size;
2815 unsigned long last_addr = 0;
2816 fragS *last_frag = NULL;
2817
2818 first_addr = ptr->slot_number;
2819 first_frag = ptr->slot_frag;
2820 /* Find either the next body/prologue start, or the end of
2821 the function, and determine the size of the region. */
2822 for (last = ptr->next; last != NULL; last = last->next)
2823 if (last->r.type == prologue || last->r.type == prologue_gr
2824 || last->r.type == body || last->r.type == endp)
2825 {
2826 last_addr = last->slot_number;
2827 last_frag = last->slot_frag;
2828 break;
2829 }
2830 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2831 before_relax);
2832 rlen = ptr->r.record.r.rlen = size;
2833 if (ptr->r.type == body)
2834 /* End of region. */
2835 region = 0;
2836 else
2837 region = ptr;
2838 break;
2839 }
2840 case epilogue:
2841 if (t < rlen)
2842 ptr->r.record.b.t = rlen - 1 - t;
2843 else
2844 /* This happens when a memory-stack-less procedure uses a
2845 ".restore sp" directive at the end of a region to pop
2846 the frame state. */
2847 ptr->r.record.b.t = 0;
2848 break;
2849
2850 case mem_stack_f:
2851 case mem_stack_v:
2852 case rp_when:
2853 case pfs_when:
2854 case preds_when:
2855 case unat_when:
2856 case lc_when:
2857 case fpsr_when:
2858 case priunat_when_gr:
2859 case priunat_when_mem:
2860 case bsp_when:
2861 case bspstore_when:
2862 case rnat_when:
2863 ptr->r.record.p.t = t;
2864 break;
2865
2866 case spill_reg:
2867 case spill_sprel:
2868 case spill_psprel:
2869 case spill_reg_p:
2870 case spill_sprel_p:
2871 case spill_psprel_p:
2872 ptr->r.record.x.t = t;
2873 break;
2874
2875 case frgr_mem:
2876 if (!region)
2877 {
2878 as_bad (_("frgr_mem record before region record!"));
2879 return;
2880 }
2881 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2882 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2883 set_imask (region, ptr->r.record.p.frmask, t, 1);
2884 set_imask (region, ptr->r.record.p.grmask, t, 2);
2885 break;
2886 case fr_mem:
2887 if (!region)
2888 {
2889 as_bad (_("fr_mem record before region record!"));
2890 return;
2891 }
2892 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2893 set_imask (region, ptr->r.record.p.frmask, t, 1);
2894 break;
2895 case gr_mem:
2896 if (!region)
2897 {
2898 as_bad (_("gr_mem record before region record!"));
2899 return;
2900 }
2901 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2902 set_imask (region, ptr->r.record.p.grmask, t, 2);
2903 break;
2904 case br_mem:
2905 if (!region)
2906 {
2907 as_bad (_("br_mem record before region record!"));
2908 return;
2909 }
2910 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2911 set_imask (region, ptr->r.record.p.brmask, t, 3);
2912 break;
2913
2914 case gr_gr:
2915 if (!region)
2916 {
2917 as_bad (_("gr_gr record before region record!"));
2918 return;
2919 }
2920 set_imask (region, ptr->r.record.p.grmask, t, 2);
2921 break;
2922 case br_gr:
2923 if (!region)
2924 {
2925 as_bad (_("br_gr record before region record!"));
2926 return;
2927 }
2928 set_imask (region, ptr->r.record.p.brmask, t, 3);
2929 break;
2930
2931 default:
2932 break;
2933 }
2934 }
2935 }
2936
2937 /* Estimate the size of a frag before relaxing. We only have one type of frag
2938 to handle here, which is the unwind info frag. */
2939
2940 int
2941 ia64_estimate_size_before_relax (fragS *frag,
2942 asection *segtype ATTRIBUTE_UNUSED)
2943 {
2944 unw_rec_list *list;
2945 int len, size, pad;
2946
2947 /* ??? This code is identical to the first part of ia64_convert_frag. */
2948 list = (unw_rec_list *) frag->fr_opcode;
2949 fixup_unw_records (list, 0);
2950
2951 len = calc_record_size (list);
2952 /* pad to pointer-size boundary. */
2953 pad = len % md.pointer_size;
2954 if (pad != 0)
2955 len += md.pointer_size - pad;
2956 /* Add 8 for the header. */
2957 size = len + 8;
2958 /* Add a pointer for the personality offset. */
2959 if (frag->fr_offset)
2960 size += md.pointer_size;
2961
2962 /* fr_var carries the max_chars that we created the fragment with.
2963 We must, of course, have allocated enough memory earlier. */
2964 gas_assert (frag->fr_var >= size);
2965
2966 return frag->fr_fix + size;
2967 }
2968
2969 /* This function converts a rs_machine_dependent variant frag into a
2970 normal fill frag with the unwind image from the record list. */
2971 void
2972 ia64_convert_frag (fragS *frag)
2973 {
2974 unw_rec_list *list;
2975 int len, size, pad;
2976 valueT flag_value;
2977
2978 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2979 list = (unw_rec_list *) frag->fr_opcode;
2980 fixup_unw_records (list, 0);
2981
2982 len = calc_record_size (list);
2983 /* pad to pointer-size boundary. */
2984 pad = len % md.pointer_size;
2985 if (pad != 0)
2986 len += md.pointer_size - pad;
2987 /* Add 8 for the header. */
2988 size = len + 8;
2989 /* Add a pointer for the personality offset. */
2990 if (frag->fr_offset)
2991 size += md.pointer_size;
2992
2993 /* fr_var carries the max_chars that we created the fragment with.
2994 We must, of course, have allocated enough memory earlier. */
2995 gas_assert (frag->fr_var >= size);
2996
2997 /* Initialize the header area. fr_offset is initialized with
2998 unwind.personality_routine. */
2999 if (frag->fr_offset)
3000 {
3001 if (md.flags & EF_IA_64_ABI64)
3002 flag_value = (bfd_vma) 3 << 32;
3003 else
3004 /* 32-bit unwind info block. */
3005 flag_value = (bfd_vma) 0x1003 << 32;
3006 }
3007 else
3008 flag_value = 0;
3009
3010 md_number_to_chars (frag->fr_literal,
3011 (((bfd_vma) 1 << 48) /* Version. */
3012 | flag_value /* U & E handler flags. */
3013 | (len / md.pointer_size)), /* Length. */
3014 8);
3015
3016 /* Skip the header. */
3017 vbyte_mem_ptr = frag->fr_literal + 8;
3018 process_unw_records (list, output_vbyte_mem);
3019
3020 /* Fill the padding bytes with zeros. */
3021 if (pad != 0)
3022 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
3023 md.pointer_size - pad);
3024 /* Fill the unwind personality with zeros. */
3025 if (frag->fr_offset)
3026 md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0,
3027 md.pointer_size);
3028
3029 frag->fr_fix += size;
3030 frag->fr_type = rs_fill;
3031 frag->fr_var = 0;
3032 frag->fr_offset = 0;
3033 }
3034
3035 static int
3036 parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po)
3037 {
3038 int sep = parse_operand_and_eval (e, ',');
3039
3040 *qp = e->X_add_number - REG_P;
3041 if (e->X_op != O_register || *qp > 63)
3042 {
3043 as_bad (_("First operand to .%s must be a predicate"), po);
3044 *qp = 0;
3045 }
3046 else if (*qp == 0)
3047 as_warn (_("Pointless use of p0 as first operand to .%s"), po);
3048 if (sep == ',')
3049 sep = parse_operand_and_eval (e, ',');
3050 else
3051 e->X_op = O_absent;
3052 return sep;
3053 }
3054
3055 static void
3056 convert_expr_to_ab_reg (const expressionS *e,
3057 unsigned int *ab,
3058 unsigned int *regp,
3059 const char *po,
3060 int n)
3061 {
3062 unsigned int reg = e->X_add_number;
3063
3064 *ab = *regp = 0; /* Anything valid is good here. */
3065
3066 if (e->X_op != O_register)
3067 reg = REG_GR; /* Anything invalid is good here. */
3068
3069 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
3070 {
3071 *ab = 0;
3072 *regp = reg - REG_GR;
3073 }
3074 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
3075 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
3076 {
3077 *ab = 1;
3078 *regp = reg - REG_FR;
3079 }
3080 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
3081 {
3082 *ab = 2;
3083 *regp = reg - REG_BR;
3084 }
3085 else
3086 {
3087 *ab = 3;
3088 switch (reg)
3089 {
3090 case REG_PR: *regp = 0; break;
3091 case REG_PSP: *regp = 1; break;
3092 case REG_PRIUNAT: *regp = 2; break;
3093 case REG_BR + 0: *regp = 3; break;
3094 case REG_AR + AR_BSP: *regp = 4; break;
3095 case REG_AR + AR_BSPSTORE: *regp = 5; break;
3096 case REG_AR + AR_RNAT: *regp = 6; break;
3097 case REG_AR + AR_UNAT: *regp = 7; break;
3098 case REG_AR + AR_FPSR: *regp = 8; break;
3099 case REG_AR + AR_PFS: *regp = 9; break;
3100 case REG_AR + AR_LC: *regp = 10; break;
3101
3102 default:
3103 as_bad (_("Operand %d to .%s must be a preserved register"), n, po);
3104 break;
3105 }
3106 }
3107 }
3108
3109 static void
3110 convert_expr_to_xy_reg (const expressionS *e,
3111 unsigned int *xy,
3112 unsigned int *regp,
3113 const char *po,
3114 int n)
3115 {
3116 unsigned int reg = e->X_add_number;
3117
3118 *xy = *regp = 0; /* Anything valid is good here. */
3119
3120 if (e->X_op != O_register)
3121 reg = REG_GR; /* Anything invalid is good here. */
3122
3123 if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127))
3124 {
3125 *xy = 0;
3126 *regp = reg - REG_GR;
3127 }
3128 else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127))
3129 {
3130 *xy = 1;
3131 *regp = reg - REG_FR;
3132 }
3133 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3134 {
3135 *xy = 2;
3136 *regp = reg - REG_BR;
3137 }
3138 else
3139 as_bad (_("Operand %d to .%s must be a writable register"), n, po);
3140 }
3141
3142 static void
3143 dot_align (int arg)
3144 {
3145 /* The current frag is an alignment frag. */
3146 align_frag = frag_now;
3147 s_align_bytes (arg);
3148 }
3149
3150 static void
3151 dot_radix (int dummy ATTRIBUTE_UNUSED)
3152 {
3153 char *radix;
3154 int ch;
3155
3156 SKIP_WHITESPACE ();
3157
3158 if (is_it_end_of_statement ())
3159 return;
3160 ch = get_symbol_name (&radix);
3161 ia64_canonicalize_symbol_name (radix);
3162 if (strcasecmp (radix, "C"))
3163 as_bad (_("Radix `%s' unsupported or invalid"), radix);
3164 (void) restore_line_pointer (ch);
3165 demand_empty_rest_of_line ();
3166 }
3167
3168 /* Helper function for .loc directives. If the assembler is not generating
3169 line number info, then we need to remember which instructions have a .loc
3170 directive, and only call dwarf2_gen_line_info for those instructions. */
3171
3172 static void
3173 dot_loc (int x)
3174 {
3175 CURR_SLOT.loc_directive_seen = 1;
3176 dwarf2_directive_loc (x);
3177 }
3178
3179 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3180 static void
3181 dot_special_section (int which)
3182 {
3183 set_section ((char *) special_section_name[which]);
3184 }
3185
3186 /* Return -1 for warning and 0 for error. */
3187
3188 static int
3189 unwind_diagnostic (const char * region, const char *directive)
3190 {
3191 if (md.unwind_check == unwind_check_warning)
3192 {
3193 as_warn (_(".%s outside of %s"), directive, region);
3194 return -1;
3195 }
3196 else
3197 {
3198 as_bad (_(".%s outside of %s"), directive, region);
3199 ignore_rest_of_line ();
3200 return 0;
3201 }
3202 }
3203
3204 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in
3205 a procedure but the unwind directive check is set to warning, 0 if
3206 a directive isn't in a procedure and the unwind directive check is set
3207 to error. */
3208
3209 static int
3210 in_procedure (const char *directive)
3211 {
3212 if (unwind.proc_pending.sym
3213 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0))
3214 return 1;
3215 return unwind_diagnostic ("procedure", directive);
3216 }
3217
3218 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in
3219 a prologue but the unwind directive check is set to warning, 0 if
3220 a directive isn't in a prologue and the unwind directive check is set
3221 to error. */
3222
3223 static int
3224 in_prologue (const char *directive)
3225 {
3226 int in = in_procedure (directive);
3227
3228 if (in > 0 && !unwind.prologue)
3229 in = unwind_diagnostic ("prologue", directive);
3230 check_pending_save ();
3231 return in;
3232 }
3233
3234 /* Return 1 if a directive is in a body, -1 if a directive isn't in
3235 a body but the unwind directive check is set to warning, 0 if
3236 a directive isn't in a body and the unwind directive check is set
3237 to error. */
3238
3239 static int
3240 in_body (const char *directive)
3241 {
3242 int in = in_procedure (directive);
3243
3244 if (in > 0 && !unwind.body)
3245 in = unwind_diagnostic ("body region", directive);
3246 return in;
3247 }
3248
3249 static void
3250 add_unwind_entry (unw_rec_list *ptr, int sep)
3251 {
3252 if (ptr)
3253 {
3254 if (unwind.tail)
3255 unwind.tail->next = ptr;
3256 else
3257 unwind.list = ptr;
3258 unwind.tail = ptr;
3259
3260 /* The current entry can in fact be a chain of unwind entries. */
3261 if (unwind.current_entry == NULL)
3262 unwind.current_entry = ptr;
3263 }
3264
3265 /* The current entry can in fact be a chain of unwind entries. */
3266 if (unwind.current_entry == NULL)
3267 unwind.current_entry = ptr;
3268
3269 if (sep == ',')
3270 {
3271 char *name;
3272 /* Parse a tag permitted for the current directive. */
3273 int ch;
3274
3275 SKIP_WHITESPACE ();
3276 ch = get_symbol_name (&name);
3277 /* FIXME: For now, just issue a warning that this isn't implemented. */
3278 {
3279 static int warned;
3280
3281 if (!warned)
3282 {
3283 warned = 1;
3284 as_warn (_("Tags on unwind pseudo-ops aren't supported, yet"));
3285 }
3286 }
3287 (void) restore_line_pointer (ch);
3288 }
3289 if (sep != NOT_A_CHAR)
3290 demand_empty_rest_of_line ();
3291 }
3292
3293 static void
3294 dot_fframe (int dummy ATTRIBUTE_UNUSED)
3295 {
3296 expressionS e;
3297 int sep;
3298
3299 if (!in_prologue ("fframe"))
3300 return;
3301
3302 sep = parse_operand_and_eval (&e, ',');
3303
3304 if (e.X_op != O_constant)
3305 {
3306 as_bad (_("First operand to .fframe must be a constant"));
3307 e.X_add_number = 0;
3308 }
3309 add_unwind_entry (output_mem_stack_f (e.X_add_number), sep);
3310 }
3311
3312 static void
3313 dot_vframe (int dummy ATTRIBUTE_UNUSED)
3314 {
3315 expressionS e;
3316 unsigned reg;
3317 int sep;
3318
3319 if (!in_prologue ("vframe"))
3320 return;
3321
3322 sep = parse_operand_and_eval (&e, ',');
3323 reg = e.X_add_number - REG_GR;
3324 if (e.X_op != O_register || reg > 127)
3325 {
3326 as_bad (_("First operand to .vframe must be a general register"));
3327 reg = 0;
3328 }
3329 add_unwind_entry (output_mem_stack_v (), sep);
3330 if (! (unwind.prologue_mask & 2))
3331 add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR);
3332 else if (reg != unwind.prologue_gr
3333 + (unsigned) popcount (unwind.prologue_mask & -(2 << 1)))
3334 as_warn (_("Operand of .vframe contradicts .prologue"));
3335 }
3336
3337 static void
3338 dot_vframesp (int psp)
3339 {
3340 expressionS e;
3341 int sep;
3342
3343 if (psp)
3344 as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant"));
3345
3346 if (!in_prologue ("vframesp"))
3347 return;
3348
3349 sep = parse_operand_and_eval (&e, ',');
3350 if (e.X_op != O_constant)
3351 {
3352 as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)"));
3353 e.X_add_number = 0;
3354 }
3355 add_unwind_entry (output_mem_stack_v (), sep);
3356 add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR);
3357 }
3358
3359 static void
3360 dot_save (int dummy ATTRIBUTE_UNUSED)
3361 {
3362 expressionS e1, e2;
3363 unsigned reg1, reg2;
3364 int sep;
3365
3366 if (!in_prologue ("save"))
3367 return;
3368
3369 sep = parse_operand_and_eval (&e1, ',');
3370 if (sep == ',')
3371 sep = parse_operand_and_eval (&e2, ',');
3372 else
3373 e2.X_op = O_absent;
3374
3375 reg1 = e1.X_add_number;
3376 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3377 if (e1.X_op != O_register)
3378 {
3379 as_bad (_("First operand to .save not a register"));
3380 reg1 = REG_PR; /* Anything valid is good here. */
3381 }
3382 reg2 = e2.X_add_number - REG_GR;
3383 if (e2.X_op != O_register || reg2 > 127)
3384 {
3385 as_bad (_("Second operand to .save not a valid register"));
3386 reg2 = 0;
3387 }
3388 switch (reg1)
3389 {
3390 case REG_AR + AR_BSP:
3391 add_unwind_entry (output_bsp_when (), sep);
3392 add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR);
3393 break;
3394 case REG_AR + AR_BSPSTORE:
3395 add_unwind_entry (output_bspstore_when (), sep);
3396 add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR);
3397 break;
3398 case REG_AR + AR_RNAT:
3399 add_unwind_entry (output_rnat_when (), sep);
3400 add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR);
3401 break;
3402 case REG_AR + AR_UNAT:
3403 add_unwind_entry (output_unat_when (), sep);
3404 add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR);
3405 break;
3406 case REG_AR + AR_FPSR:
3407 add_unwind_entry (output_fpsr_when (), sep);
3408 add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR);
3409 break;
3410 case REG_AR + AR_PFS:
3411 add_unwind_entry (output_pfs_when (), sep);
3412 if (! (unwind.prologue_mask & 4))
3413 add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR);
3414 else if (reg2 != unwind.prologue_gr
3415 + (unsigned) popcount (unwind.prologue_mask & -(4 << 1)))
3416 as_warn (_("Second operand of .save contradicts .prologue"));
3417 break;
3418 case REG_AR + AR_LC:
3419 add_unwind_entry (output_lc_when (), sep);
3420 add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR);
3421 break;
3422 case REG_BR:
3423 add_unwind_entry (output_rp_when (), sep);
3424 if (! (unwind.prologue_mask & 8))
3425 add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR);
3426 else if (reg2 != unwind.prologue_gr)
3427 as_warn (_("Second operand of .save contradicts .prologue"));
3428 break;
3429 case REG_PR:
3430 add_unwind_entry (output_preds_when (), sep);
3431 if (! (unwind.prologue_mask & 1))
3432 add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR);
3433 else if (reg2 != unwind.prologue_gr
3434 + (unsigned) popcount (unwind.prologue_mask & -(1 << 1)))
3435 as_warn (_("Second operand of .save contradicts .prologue"));
3436 break;
3437 case REG_PRIUNAT:
3438 add_unwind_entry (output_priunat_when_gr (), sep);
3439 add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR);
3440 break;
3441 default:
3442 as_bad (_("First operand to .save not a valid register"));
3443 add_unwind_entry (NULL, sep);
3444 break;
3445 }
3446 }
3447
3448 static void
3449 dot_restore (int dummy ATTRIBUTE_UNUSED)
3450 {
3451 expressionS e1;
3452 unsigned long ecount; /* # of _additional_ regions to pop */
3453 int sep;
3454
3455 if (!in_body ("restore"))
3456 return;
3457
3458 sep = parse_operand_and_eval (&e1, ',');
3459 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3460 as_bad (_("First operand to .restore must be stack pointer (sp)"));
3461
3462 if (sep == ',')
3463 {
3464 expressionS e2;
3465
3466 sep = parse_operand_and_eval (&e2, ',');
3467 if (e2.X_op != O_constant || e2.X_add_number < 0)
3468 {
3469 as_bad (_("Second operand to .restore must be a constant >= 0"));
3470 e2.X_add_number = 0;
3471 }
3472 ecount = e2.X_add_number;
3473 }
3474 else
3475 ecount = unwind.prologue_count - 1;
3476
3477 if (ecount >= unwind.prologue_count)
3478 {
3479 as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"),
3480 ecount + 1, unwind.prologue_count);
3481 ecount = 0;
3482 }
3483
3484 add_unwind_entry (output_epilogue (ecount), sep);
3485
3486 if (ecount < unwind.prologue_count)
3487 unwind.prologue_count -= ecount + 1;
3488 else
3489 unwind.prologue_count = 0;
3490 }
3491
3492 static void
3493 dot_restorereg (int pred)
3494 {
3495 unsigned int qp, ab, reg;
3496 expressionS e;
3497 int sep;
3498 const char * const po = pred ? "restorereg.p" : "restorereg";
3499
3500 if (!in_procedure (po))
3501 return;
3502
3503 if (pred)
3504 sep = parse_predicate_and_operand (&e, &qp, po);
3505 else
3506 {
3507 sep = parse_operand_and_eval (&e, ',');
3508 qp = 0;
3509 }
3510 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
3511
3512 add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep);
3513 }
3514
3515 static const char *special_linkonce_name[] =
3516 {
3517 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3518 };
3519
3520 static void
3521 start_unwind_section (const segT text_seg, int sec_index)
3522 {
3523 /*
3524 Use a slightly ugly scheme to derive the unwind section names from
3525 the text section name:
3526
3527 text sect. unwind table sect.
3528 name: name: comments:
3529 ---------- ----------------- --------------------------------
3530 .text .IA_64.unwind
3531 .text.foo .IA_64.unwind.text.foo
3532 .foo .IA_64.unwind.foo
3533 .gnu.linkonce.t.foo
3534 .gnu.linkonce.ia64unw.foo
3535 _info .IA_64.unwind_info gas issues error message (ditto)
3536 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3537
3538 This mapping is done so that:
3539
3540 (a) An object file with unwind info only in .text will use
3541 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3542 This follows the letter of the ABI and also ensures backwards
3543 compatibility with older toolchains.
3544
3545 (b) An object file with unwind info in multiple text sections
3546 will use separate unwind sections for each text section.
3547 This allows us to properly set the "sh_info" and "sh_link"
3548 fields in SHT_IA_64_UNWIND as required by the ABI and also
3549 lets GNU ld support programs with multiple segments
3550 containing unwind info (as might be the case for certain
3551 embedded applications).
3552
3553 (c) An error is issued if there would be a name clash.
3554 */
3555
3556 const char *text_name, *sec_text_name;
3557 char *sec_name;
3558 const char *prefix = special_section_name [sec_index];
3559 const char *suffix;
3560
3561 sec_text_name = segment_name (text_seg);
3562 text_name = sec_text_name;
3563 if (strncmp (text_name, "_info", 5) == 0)
3564 {
3565 as_bad (_("Illegal section name `%s' (causes unwind section name clash)"),
3566 text_name);
3567 ignore_rest_of_line ();
3568 return;
3569 }
3570 if (strcmp (text_name, ".text") == 0)
3571 text_name = "";
3572
3573 /* Build the unwind section name by appending the (possibly stripped)
3574 text section name to the unwind prefix. */
3575 suffix = text_name;
3576 if (strncmp (text_name, ".gnu.linkonce.t.",
3577 sizeof (".gnu.linkonce.t.") - 1) == 0)
3578 {
3579 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3580 suffix += sizeof (".gnu.linkonce.t.") - 1;
3581 }
3582
3583 sec_name = concat (prefix, suffix, NULL);
3584
3585 /* Handle COMDAT group. */
3586 if ((text_seg->flags & SEC_LINK_ONCE) != 0
3587 && (elf_section_flags (text_seg) & SHF_GROUP) != 0)
3588 {
3589 char *section;
3590 const char *group_name = elf_group_name (text_seg);
3591
3592 if (group_name == NULL)
3593 {
3594 as_bad (_("Group section `%s' has no group signature"),
3595 sec_text_name);
3596 ignore_rest_of_line ();
3597 free (sec_name);
3598 return;
3599 }
3600
3601 /* We have to construct a fake section directive. */
3602 section = concat (sec_name, ",\"aG\",@progbits,", group_name, ",comdat", NULL);
3603 set_section (section);
3604 free (section);
3605 }
3606 else
3607 {
3608 set_section (sec_name);
3609 bfd_set_section_flags (stdoutput, now_seg,
3610 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3611 }
3612
3613 elf_linked_to_section (now_seg) = text_seg;
3614 free (sec_name);
3615 }
3616
3617 static void
3618 generate_unwind_image (const segT text_seg)
3619 {
3620 int size, pad;
3621 unw_rec_list *list;
3622
3623 /* Mark the end of the unwind info, so that we can compute the size of the
3624 last unwind region. */
3625 add_unwind_entry (output_endp (), NOT_A_CHAR);
3626
3627 /* Force out pending instructions, to make sure all unwind records have
3628 a valid slot_number field. */
3629 ia64_flush_insns ();
3630
3631 /* Generate the unwind record. */
3632 list = optimize_unw_records (unwind.list);
3633 fixup_unw_records (list, 1);
3634 size = calc_record_size (list);
3635
3636 if (size > 0 || unwind.force_unwind_entry)
3637 {
3638 unwind.force_unwind_entry = 0;
3639 /* pad to pointer-size boundary. */
3640 pad = size % md.pointer_size;
3641 if (pad != 0)
3642 size += md.pointer_size - pad;
3643 /* Add 8 for the header. */
3644 size += 8;
3645 /* Add a pointer for the personality offset. */
3646 if (unwind.personality_routine)
3647 size += md.pointer_size;
3648 }
3649
3650 /* If there are unwind records, switch sections, and output the info. */
3651 if (size != 0)
3652 {
3653 expressionS exp;
3654 bfd_reloc_code_real_type reloc;
3655
3656 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO);
3657
3658 /* Make sure the section has 4 byte alignment for ILP32 and
3659 8 byte alignment for LP64. */
3660 frag_align (md.pointer_size_shift, 0, 0);
3661 record_alignment (now_seg, md.pointer_size_shift);
3662
3663 /* Set expression which points to start of unwind descriptor area. */
3664 unwind.info = expr_build_dot ();
3665
3666 frag_var (rs_machine_dependent, size, size, 0, 0,
3667 (offsetT) (long) unwind.personality_routine,
3668 (char *) list);
3669
3670 /* Add the personality address to the image. */
3671 if (unwind.personality_routine != 0)
3672 {
3673 exp.X_op = O_symbol;
3674 exp.X_add_symbol = unwind.personality_routine;
3675 exp.X_add_number = 0;
3676
3677 if (md.flags & EF_IA_64_BE)
3678 {
3679 if (md.flags & EF_IA_64_ABI64)
3680 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3681 else
3682 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3683 }
3684 else
3685 {
3686 if (md.flags & EF_IA_64_ABI64)
3687 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3688 else
3689 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3690 }
3691
3692 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3693 md.pointer_size, &exp, 0, reloc);
3694 unwind.personality_routine = 0;
3695 }
3696 }
3697
3698 free_saved_prologue_counts ();
3699 unwind.list = unwind.tail = unwind.current_entry = NULL;
3700 }
3701
3702 static void
3703 dot_handlerdata (int dummy ATTRIBUTE_UNUSED)
3704 {
3705 if (!in_procedure ("handlerdata"))
3706 return;
3707 unwind.force_unwind_entry = 1;
3708
3709 /* Remember which segment we're in so we can switch back after .endp */
3710 unwind.saved_text_seg = now_seg;
3711 unwind.saved_text_subseg = now_subseg;
3712
3713 /* Generate unwind info into unwind-info section and then leave that
3714 section as the currently active one so dataXX directives go into
3715 the language specific data area of the unwind info block. */
3716 generate_unwind_image (now_seg);
3717 demand_empty_rest_of_line ();
3718 }
3719
3720 static void
3721 dot_unwentry (int dummy ATTRIBUTE_UNUSED)
3722 {
3723 if (!in_procedure ("unwentry"))
3724 return;
3725 unwind.force_unwind_entry = 1;
3726 demand_empty_rest_of_line ();
3727 }
3728
3729 static void
3730 dot_altrp (int dummy ATTRIBUTE_UNUSED)
3731 {
3732 expressionS e;
3733 unsigned reg;
3734
3735 if (!in_prologue ("altrp"))
3736 return;
3737
3738 parse_operand_and_eval (&e, 0);
3739 reg = e.X_add_number - REG_BR;
3740 if (e.X_op != O_register || reg > 7)
3741 {
3742 as_bad (_("First operand to .altrp not a valid branch register"));
3743 reg = 0;
3744 }
3745 add_unwind_entry (output_rp_br (reg), 0);
3746 }
3747
3748 static void
3749 dot_savemem (int psprel)
3750 {
3751 expressionS e1, e2;
3752 int sep;
3753 int reg1, val;
3754 const char * const po = psprel ? "savepsp" : "savesp";
3755
3756 if (!in_prologue (po))
3757 return;
3758
3759 sep = parse_operand_and_eval (&e1, ',');
3760 if (sep == ',')
3761 sep = parse_operand_and_eval (&e2, ',');
3762 else
3763 e2.X_op = O_absent;
3764
3765 reg1 = e1.X_add_number;
3766 val = e2.X_add_number;
3767
3768 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3769 if (e1.X_op != O_register)
3770 {
3771 as_bad (_("First operand to .%s not a register"), po);
3772 reg1 = REG_PR; /* Anything valid is good here. */
3773 }
3774 if (e2.X_op != O_constant)
3775 {
3776 as_bad (_("Second operand to .%s not a constant"), po);
3777 val = 0;
3778 }
3779
3780 switch (reg1)
3781 {
3782 case REG_AR + AR_BSP:
3783 add_unwind_entry (output_bsp_when (), sep);
3784 add_unwind_entry ((psprel
3785 ? output_bsp_psprel
3786 : output_bsp_sprel) (val), NOT_A_CHAR);
3787 break;
3788 case REG_AR + AR_BSPSTORE:
3789 add_unwind_entry (output_bspstore_when (), sep);
3790 add_unwind_entry ((psprel
3791 ? output_bspstore_psprel
3792 : output_bspstore_sprel) (val), NOT_A_CHAR);
3793 break;
3794 case REG_AR + AR_RNAT:
3795 add_unwind_entry (output_rnat_when (), sep);
3796 add_unwind_entry ((psprel
3797 ? output_rnat_psprel
3798 : output_rnat_sprel) (val), NOT_A_CHAR);
3799 break;
3800 case REG_AR + AR_UNAT:
3801 add_unwind_entry (output_unat_when (), sep);
3802 add_unwind_entry ((psprel
3803 ? output_unat_psprel
3804 : output_unat_sprel) (val), NOT_A_CHAR);
3805 break;
3806 case REG_AR + AR_FPSR:
3807 add_unwind_entry (output_fpsr_when (), sep);
3808 add_unwind_entry ((psprel
3809 ? output_fpsr_psprel
3810 : output_fpsr_sprel) (val), NOT_A_CHAR);
3811 break;
3812 case REG_AR + AR_PFS:
3813 add_unwind_entry (output_pfs_when (), sep);
3814 add_unwind_entry ((psprel
3815 ? output_pfs_psprel
3816 : output_pfs_sprel) (val), NOT_A_CHAR);
3817 break;
3818 case REG_AR + AR_LC:
3819 add_unwind_entry (output_lc_when (), sep);
3820 add_unwind_entry ((psprel
3821 ? output_lc_psprel
3822 : output_lc_sprel) (val), NOT_A_CHAR);
3823 break;
3824 case REG_BR:
3825 add_unwind_entry (output_rp_when (), sep);
3826 add_unwind_entry ((psprel
3827 ? output_rp_psprel
3828 : output_rp_sprel) (val), NOT_A_CHAR);
3829 break;
3830 case REG_PR:
3831 add_unwind_entry (output_preds_when (), sep);
3832 add_unwind_entry ((psprel
3833 ? output_preds_psprel
3834 : output_preds_sprel) (val), NOT_A_CHAR);
3835 break;
3836 case REG_PRIUNAT:
3837 add_unwind_entry (output_priunat_when_mem (), sep);
3838 add_unwind_entry ((psprel
3839 ? output_priunat_psprel
3840 : output_priunat_sprel) (val), NOT_A_CHAR);
3841 break;
3842 default:
3843 as_bad (_("First operand to .%s not a valid register"), po);
3844 add_unwind_entry (NULL, sep);
3845 break;
3846 }
3847 }
3848
3849 static void
3850 dot_saveg (int dummy ATTRIBUTE_UNUSED)
3851 {
3852 expressionS e;
3853 unsigned grmask;
3854 int sep;
3855
3856 if (!in_prologue ("save.g"))
3857 return;
3858
3859 sep = parse_operand_and_eval (&e, ',');
3860
3861 grmask = e.X_add_number;
3862 if (e.X_op != O_constant
3863 || e.X_add_number <= 0
3864 || e.X_add_number > 0xf)
3865 {
3866 as_bad (_("First operand to .save.g must be a positive 4-bit constant"));
3867 grmask = 0;
3868 }
3869
3870 if (sep == ',')
3871 {
3872 unsigned reg;
3873 int n = popcount (grmask);
3874
3875 parse_operand_and_eval (&e, 0);
3876 reg = e.X_add_number - REG_GR;
3877 if (e.X_op != O_register || reg > 127)
3878 {
3879 as_bad (_("Second operand to .save.g must be a general register"));
3880 reg = 0;
3881 }
3882 else if (reg > 128U - n)
3883 {
3884 as_bad (_("Second operand to .save.g must be the first of %d general registers"), n);
3885 reg = 0;
3886 }
3887 add_unwind_entry (output_gr_gr (grmask, reg), 0);
3888 }
3889 else
3890 add_unwind_entry (output_gr_mem (grmask), 0);
3891 }
3892
3893 static void
3894 dot_savef (int dummy ATTRIBUTE_UNUSED)
3895 {
3896 expressionS e;
3897
3898 if (!in_prologue ("save.f"))
3899 return;
3900
3901 parse_operand_and_eval (&e, 0);
3902
3903 if (e.X_op != O_constant
3904 || e.X_add_number <= 0
3905 || e.X_add_number > 0xfffff)
3906 {
3907 as_bad (_("Operand to .save.f must be a positive 20-bit constant"));
3908 e.X_add_number = 0;
3909 }
3910 add_unwind_entry (output_fr_mem (e.X_add_number), 0);
3911 }
3912
3913 static void
3914 dot_saveb (int dummy ATTRIBUTE_UNUSED)
3915 {
3916 expressionS e;
3917 unsigned brmask;
3918 int sep;
3919
3920 if (!in_prologue ("save.b"))
3921 return;
3922
3923 sep = parse_operand_and_eval (&e, ',');
3924
3925 brmask = e.X_add_number;
3926 if (e.X_op != O_constant
3927 || e.X_add_number <= 0
3928 || e.X_add_number > 0x1f)
3929 {
3930 as_bad (_("First operand to .save.b must be a positive 5-bit constant"));
3931 brmask = 0;
3932 }
3933
3934 if (sep == ',')
3935 {
3936 unsigned reg;
3937 int n = popcount (brmask);
3938
3939 parse_operand_and_eval (&e, 0);
3940 reg = e.X_add_number - REG_GR;
3941 if (e.X_op != O_register || reg > 127)
3942 {
3943 as_bad (_("Second operand to .save.b must be a general register"));
3944 reg = 0;
3945 }
3946 else if (reg > 128U - n)
3947 {
3948 as_bad (_("Second operand to .save.b must be the first of %d general registers"), n);
3949 reg = 0;
3950 }
3951 add_unwind_entry (output_br_gr (brmask, reg), 0);
3952 }
3953 else
3954 add_unwind_entry (output_br_mem (brmask), 0);
3955 }
3956
3957 static void
3958 dot_savegf (int dummy ATTRIBUTE_UNUSED)
3959 {
3960 expressionS e1, e2;
3961
3962 if (!in_prologue ("save.gf"))
3963 return;
3964
3965 if (parse_operand_and_eval (&e1, ',') == ',')
3966 parse_operand_and_eval (&e2, 0);
3967 else
3968 e2.X_op = O_absent;
3969
3970 if (e1.X_op != O_constant
3971 || e1.X_add_number < 0
3972 || e1.X_add_number > 0xf)
3973 {
3974 as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant"));
3975 e1.X_op = O_absent;
3976 e1.X_add_number = 0;
3977 }
3978 if (e2.X_op != O_constant
3979 || e2.X_add_number < 0
3980 || e2.X_add_number > 0xfffff)
3981 {
3982 as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant"));
3983 e2.X_op = O_absent;
3984 e2.X_add_number = 0;
3985 }
3986 if (e1.X_op == O_constant
3987 && e2.X_op == O_constant
3988 && e1.X_add_number == 0
3989 && e2.X_add_number == 0)
3990 as_bad (_("Operands to .save.gf may not be both zero"));
3991
3992 add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0);
3993 }
3994
3995 static void
3996 dot_spill (int dummy ATTRIBUTE_UNUSED)
3997 {
3998 expressionS e;
3999
4000 if (!in_prologue ("spill"))
4001 return;
4002
4003 parse_operand_and_eval (&e, 0);
4004
4005 if (e.X_op != O_constant)
4006 {
4007 as_bad (_("Operand to .spill must be a constant"));
4008 e.X_add_number = 0;
4009 }
4010 add_unwind_entry (output_spill_base (e.X_add_number), 0);
4011 }
4012
4013 static void
4014 dot_spillreg (int pred)
4015 {
4016 int sep;
4017 unsigned int qp, ab, xy, reg, treg;
4018 expressionS e;
4019 const char * const po = pred ? "spillreg.p" : "spillreg";
4020
4021 if (!in_procedure (po))
4022 return;
4023
4024 if (pred)
4025 sep = parse_predicate_and_operand (&e, &qp, po);
4026 else
4027 {
4028 sep = parse_operand_and_eval (&e, ',');
4029 qp = 0;
4030 }
4031 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4032
4033 if (sep == ',')
4034 sep = parse_operand_and_eval (&e, ',');
4035 else
4036 e.X_op = O_absent;
4037 convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred);
4038
4039 add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep);
4040 }
4041
4042 static void
4043 dot_spillmem (int psprel)
4044 {
4045 expressionS e;
4046 int pred = (psprel < 0), sep;
4047 unsigned int qp, ab, reg;
4048 const char * po;
4049
4050 if (pred)
4051 {
4052 psprel = ~psprel;
4053 po = psprel ? "spillpsp.p" : "spillsp.p";
4054 }
4055 else
4056 po = psprel ? "spillpsp" : "spillsp";
4057
4058 if (!in_procedure (po))
4059 return;
4060
4061 if (pred)
4062 sep = parse_predicate_and_operand (&e, &qp, po);
4063 else
4064 {
4065 sep = parse_operand_and_eval (&e, ',');
4066 qp = 0;
4067 }
4068 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4069
4070 if (sep == ',')
4071 sep = parse_operand_and_eval (&e, ',');
4072 else
4073 e.X_op = O_absent;
4074 if (e.X_op != O_constant)
4075 {
4076 as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po);
4077 e.X_add_number = 0;
4078 }
4079
4080 if (psprel)
4081 add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep);
4082 else
4083 add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep);
4084 }
4085
4086 static unsigned int
4087 get_saved_prologue_count (unsigned long lbl)
4088 {
4089 label_prologue_count *lpc = unwind.saved_prologue_counts;
4090
4091 while (lpc != NULL && lpc->label_number != lbl)
4092 lpc = lpc->next;
4093
4094 if (lpc != NULL)
4095 return lpc->prologue_count;
4096
4097 as_bad (_("Missing .label_state %ld"), lbl);
4098 return 1;
4099 }
4100
4101 static void
4102 save_prologue_count (unsigned long lbl, unsigned int count)
4103 {
4104 label_prologue_count *lpc = unwind.saved_prologue_counts;
4105
4106 while (lpc != NULL && lpc->label_number != lbl)
4107 lpc = lpc->next;
4108
4109 if (lpc != NULL)
4110 lpc->prologue_count = count;
4111 else
4112 {
4113 label_prologue_count *new_lpc = XNEW (label_prologue_count);
4114
4115 new_lpc->next = unwind.saved_prologue_counts;
4116 new_lpc->label_number = lbl;
4117 new_lpc->prologue_count = count;
4118 unwind.saved_prologue_counts = new_lpc;
4119 }
4120 }
4121
4122 static void
4123 free_saved_prologue_counts (void)
4124 {
4125 label_prologue_count *lpc = unwind.saved_prologue_counts;
4126 label_prologue_count *next;
4127
4128 while (lpc != NULL)
4129 {
4130 next = lpc->next;
4131 free (lpc);
4132 lpc = next;
4133 }
4134
4135 unwind.saved_prologue_counts = NULL;
4136 }
4137
4138 static void
4139 dot_label_state (int dummy ATTRIBUTE_UNUSED)
4140 {
4141 expressionS e;
4142
4143 if (!in_body ("label_state"))
4144 return;
4145
4146 parse_operand_and_eval (&e, 0);
4147 if (e.X_op == O_constant)
4148 save_prologue_count (e.X_add_number, unwind.prologue_count);
4149 else
4150 {
4151 as_bad (_("Operand to .label_state must be a constant"));
4152 e.X_add_number = 0;
4153 }
4154 add_unwind_entry (output_label_state (e.X_add_number), 0);
4155 }
4156
4157 static void
4158 dot_copy_state (int dummy ATTRIBUTE_UNUSED)
4159 {
4160 expressionS e;
4161
4162 if (!in_body ("copy_state"))
4163 return;
4164
4165 parse_operand_and_eval (&e, 0);
4166 if (e.X_op == O_constant)
4167 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4168 else
4169 {
4170 as_bad (_("Operand to .copy_state must be a constant"));
4171 e.X_add_number = 0;
4172 }
4173 add_unwind_entry (output_copy_state (e.X_add_number), 0);
4174 }
4175
4176 static void
4177 dot_unwabi (int dummy ATTRIBUTE_UNUSED)
4178 {
4179 expressionS e1, e2;
4180 unsigned char sep;
4181
4182 if (!in_prologue ("unwabi"))
4183 return;
4184
4185 sep = parse_operand_and_eval (&e1, ',');
4186 if (sep == ',')
4187 parse_operand_and_eval (&e2, 0);
4188 else
4189 e2.X_op = O_absent;
4190
4191 if (e1.X_op != O_constant)
4192 {
4193 as_bad (_("First operand to .unwabi must be a constant"));
4194 e1.X_add_number = 0;
4195 }
4196
4197 if (e2.X_op != O_constant)
4198 {
4199 as_bad (_("Second operand to .unwabi must be a constant"));
4200 e2.X_add_number = 0;
4201 }
4202
4203 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0);
4204 }
4205
4206 static void
4207 dot_personality (int dummy ATTRIBUTE_UNUSED)
4208 {
4209 char *name, *p, c;
4210
4211 if (!in_procedure ("personality"))
4212 return;
4213 SKIP_WHITESPACE ();
4214 c = get_symbol_name (&name);
4215 p = input_line_pointer;
4216 unwind.personality_routine = symbol_find_or_make (name);
4217 unwind.force_unwind_entry = 1;
4218 *p = c;
4219 SKIP_WHITESPACE_AFTER_NAME ();
4220 demand_empty_rest_of_line ();
4221 }
4222
4223 static void
4224 dot_proc (int dummy ATTRIBUTE_UNUSED)
4225 {
4226 char *name, *p, c;
4227 symbolS *sym;
4228 proc_pending *pending, *last_pending;
4229
4230 if (unwind.proc_pending.sym)
4231 {
4232 (md.unwind_check == unwind_check_warning
4233 ? as_warn
4234 : as_bad) (_("Missing .endp after previous .proc"));
4235 while (unwind.proc_pending.next)
4236 {
4237 pending = unwind.proc_pending.next;
4238 unwind.proc_pending.next = pending->next;
4239 free (pending);
4240 }
4241 }
4242 last_pending = NULL;
4243
4244 /* Parse names of main and alternate entry points and mark them as
4245 function symbols: */
4246 while (1)
4247 {
4248 SKIP_WHITESPACE ();
4249 c = get_symbol_name (&name);
4250 p = input_line_pointer;
4251 if (!*name)
4252 as_bad (_("Empty argument of .proc"));
4253 else
4254 {
4255 sym = symbol_find_or_make (name);
4256 if (S_IS_DEFINED (sym))
4257 as_bad (_("`%s' was already defined"), name);
4258 else if (!last_pending)
4259 {
4260 unwind.proc_pending.sym = sym;
4261 last_pending = &unwind.proc_pending;
4262 }
4263 else
4264 {
4265 pending = XNEW (proc_pending);
4266 pending->sym = sym;
4267 last_pending = last_pending->next = pending;
4268 }
4269 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4270 }
4271 *p = c;
4272 SKIP_WHITESPACE_AFTER_NAME ();
4273 if (*input_line_pointer != ',')
4274 break;
4275 ++input_line_pointer;
4276 }
4277 if (!last_pending)
4278 {
4279 unwind.proc_pending.sym = expr_build_dot ();
4280 last_pending = &unwind.proc_pending;
4281 }
4282 last_pending->next = NULL;
4283 demand_empty_rest_of_line ();
4284 do_align (4, NULL, 0, 0);
4285
4286 unwind.prologue = 0;
4287 unwind.prologue_count = 0;
4288 unwind.body = 0;
4289 unwind.insn = 0;
4290 unwind.list = unwind.tail = unwind.current_entry = NULL;
4291 unwind.personality_routine = 0;
4292 }
4293
4294 static void
4295 dot_body (int dummy ATTRIBUTE_UNUSED)
4296 {
4297 if (!in_procedure ("body"))
4298 return;
4299 if (!unwind.prologue && !unwind.body && unwind.insn)
4300 as_warn (_("Initial .body should precede any instructions"));
4301 check_pending_save ();
4302
4303 unwind.prologue = 0;
4304 unwind.prologue_mask = 0;
4305 unwind.body = 1;
4306
4307 add_unwind_entry (output_body (), 0);
4308 }
4309
4310 static void
4311 dot_prologue (int dummy ATTRIBUTE_UNUSED)
4312 {
4313 unsigned mask = 0, grsave = 0;
4314
4315 if (!in_procedure ("prologue"))
4316 return;
4317 if (unwind.prologue)
4318 {
4319 as_bad (_(".prologue within prologue"));
4320 ignore_rest_of_line ();
4321 return;
4322 }
4323 if (!unwind.body && unwind.insn)
4324 as_warn (_("Initial .prologue should precede any instructions"));
4325
4326 if (!is_it_end_of_statement ())
4327 {
4328 expressionS e;
4329 int n, sep = parse_operand_and_eval (&e, ',');
4330
4331 if (e.X_op != O_constant
4332 || e.X_add_number < 0
4333 || e.X_add_number > 0xf)
4334 as_bad (_("First operand to .prologue must be a positive 4-bit constant"));
4335 else if (e.X_add_number == 0)
4336 as_warn (_("Pointless use of zero first operand to .prologue"));
4337 else
4338 mask = e.X_add_number;
4339
4340 n = popcount (mask);
4341
4342 if (sep == ',')
4343 parse_operand_and_eval (&e, 0);
4344 else
4345 e.X_op = O_absent;
4346
4347 if (e.X_op == O_constant
4348 && e.X_add_number >= 0
4349 && e.X_add_number < 128)
4350 {
4351 if (md.unwind_check == unwind_check_error)
4352 as_warn (_("Using a constant as second operand to .prologue is deprecated"));
4353 grsave = e.X_add_number;
4354 }
4355 else if (e.X_op != O_register
4356 || (grsave = e.X_add_number - REG_GR) > 127)
4357 {
4358 as_bad (_("Second operand to .prologue must be a general register"));
4359 grsave = 0;
4360 }
4361 else if (grsave > 128U - n)
4362 {
4363 as_bad (_("Second operand to .prologue must be the first of %d general registers"), n);
4364 grsave = 0;
4365 }
4366 }
4367
4368 if (mask)
4369 add_unwind_entry (output_prologue_gr (mask, grsave), 0);
4370 else
4371 add_unwind_entry (output_prologue (), 0);
4372
4373 unwind.prologue = 1;
4374 unwind.prologue_mask = mask;
4375 unwind.prologue_gr = grsave;
4376 unwind.body = 0;
4377 ++unwind.prologue_count;
4378 }
4379
4380 static void
4381 dot_endp (int dummy ATTRIBUTE_UNUSED)
4382 {
4383 expressionS e;
4384 int bytes_per_address;
4385 long where;
4386 segT saved_seg;
4387 subsegT saved_subseg;
4388 proc_pending *pending;
4389 int unwind_check = md.unwind_check;
4390
4391 md.unwind_check = unwind_check_error;
4392 if (!in_procedure ("endp"))
4393 return;
4394 md.unwind_check = unwind_check;
4395
4396 if (unwind.saved_text_seg)
4397 {
4398 saved_seg = unwind.saved_text_seg;
4399 saved_subseg = unwind.saved_text_subseg;
4400 unwind.saved_text_seg = NULL;
4401 }
4402 else
4403 {
4404 saved_seg = now_seg;
4405 saved_subseg = now_subseg;
4406 }
4407
4408 insn_group_break (1, 0, 0);
4409
4410 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4411 if (!unwind.info)
4412 generate_unwind_image (saved_seg);
4413
4414 if (unwind.info || unwind.force_unwind_entry)
4415 {
4416 symbolS *proc_end;
4417
4418 subseg_set (md.last_text_seg, 0);
4419 proc_end = expr_build_dot ();
4420
4421 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND);
4422
4423 /* Make sure that section has 4 byte alignment for ILP32 and
4424 8 byte alignment for LP64. */
4425 record_alignment (now_seg, md.pointer_size_shift);
4426
4427 /* Need space for 3 pointers for procedure start, procedure end,
4428 and unwind info. */
4429 memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size);
4430 where = frag_now_fix () - (3 * md.pointer_size);
4431 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4432
4433 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4434 e.X_op = O_pseudo_fixup;
4435 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4436 e.X_add_number = 0;
4437 if (!S_IS_LOCAL (unwind.proc_pending.sym)
4438 && S_IS_DEFINED (unwind.proc_pending.sym))
4439 e.X_add_symbol = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym),
4440 S_GET_VALUE (unwind.proc_pending.sym),
4441 symbol_get_frag (unwind.proc_pending.sym));
4442 else
4443 e.X_add_symbol = unwind.proc_pending.sym;
4444 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e,
4445 BFD_RELOC_NONE);
4446
4447 e.X_op = O_pseudo_fixup;
4448 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4449 e.X_add_number = 0;
4450 e.X_add_symbol = proc_end;
4451 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4452 bytes_per_address, &e, BFD_RELOC_NONE);
4453
4454 if (unwind.info)
4455 {
4456 e.X_op = O_pseudo_fixup;
4457 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4458 e.X_add_number = 0;
4459 e.X_add_symbol = unwind.info;
4460 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4461 bytes_per_address, &e, BFD_RELOC_NONE);
4462 }
4463 }
4464 subseg_set (saved_seg, saved_subseg);
4465
4466 /* Set symbol sizes. */
4467 pending = &unwind.proc_pending;
4468 if (S_GET_NAME (pending->sym))
4469 {
4470 do
4471 {
4472 symbolS *sym = pending->sym;
4473
4474 if (!S_IS_DEFINED (sym))
4475 as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym));
4476 else if (S_GET_SIZE (sym) == 0
4477 && symbol_get_obj (sym)->size == NULL)
4478 {
4479 fragS *frag = symbol_get_frag (sym);
4480
4481 if (frag)
4482 {
4483 if (frag == frag_now && SEG_NORMAL (now_seg))
4484 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4485 else
4486 {
4487 symbol_get_obj (sym)->size = XNEW (expressionS);
4488 symbol_get_obj (sym)->size->X_op = O_subtract;
4489 symbol_get_obj (sym)->size->X_add_symbol
4490 = symbol_new (FAKE_LABEL_NAME, now_seg,
4491 frag_now_fix (), frag_now);
4492 symbol_get_obj (sym)->size->X_op_symbol = sym;
4493 symbol_get_obj (sym)->size->X_add_number = 0;
4494 }
4495 }
4496 }
4497 } while ((pending = pending->next) != NULL);
4498 }
4499
4500 /* Parse names of main and alternate entry points. */
4501 while (1)
4502 {
4503 char *name, *p, c;
4504
4505 SKIP_WHITESPACE ();
4506 c = get_symbol_name (&name);
4507 p = input_line_pointer;
4508 if (!*name)
4509 (md.unwind_check == unwind_check_warning
4510 ? as_warn
4511 : as_bad) (_("Empty argument of .endp"));
4512 else
4513 {
4514 symbolS *sym = symbol_find (name);
4515
4516 for (pending = &unwind.proc_pending; pending; pending = pending->next)
4517 {
4518 if (sym == pending->sym)
4519 {
4520 pending->sym = NULL;
4521 break;
4522 }
4523 }
4524 if (!sym || !pending)
4525 as_warn (_("`%s' was not specified with previous .proc"), name);
4526 }
4527 *p = c;
4528 SKIP_WHITESPACE_AFTER_NAME ();
4529 if (*input_line_pointer != ',')
4530 break;
4531 ++input_line_pointer;
4532 }
4533 demand_empty_rest_of_line ();
4534
4535 /* Deliberately only checking for the main entry point here; the
4536 language spec even says all arguments to .endp are ignored. */
4537 if (unwind.proc_pending.sym
4538 && S_GET_NAME (unwind.proc_pending.sym)
4539 && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME))
4540 as_warn (_("`%s' should be an operand to this .endp"),
4541 S_GET_NAME (unwind.proc_pending.sym));
4542 while (unwind.proc_pending.next)
4543 {
4544 pending = unwind.proc_pending.next;
4545 unwind.proc_pending.next = pending->next;
4546 free (pending);
4547 }
4548 unwind.proc_pending.sym = unwind.info = NULL;
4549 }
4550
4551 static void
4552 dot_template (int template_val)
4553 {
4554 CURR_SLOT.user_template = template_val;
4555 }
4556
4557 static void
4558 dot_regstk (int dummy ATTRIBUTE_UNUSED)
4559 {
4560 int ins, locs, outs, rots;
4561
4562 if (is_it_end_of_statement ())
4563 ins = locs = outs = rots = 0;
4564 else
4565 {
4566 ins = get_absolute_expression ();
4567 if (*input_line_pointer++ != ',')
4568 goto err;
4569 locs = get_absolute_expression ();
4570 if (*input_line_pointer++ != ',')
4571 goto err;
4572 outs = get_absolute_expression ();
4573 if (*input_line_pointer++ != ',')
4574 goto err;
4575 rots = get_absolute_expression ();
4576 }
4577 set_regstack (ins, locs, outs, rots);
4578 return;
4579
4580 err:
4581 as_bad (_("Comma expected"));
4582 ignore_rest_of_line ();
4583 }
4584
4585 static void
4586 dot_rot (int type)
4587 {
4588 offsetT num_regs;
4589 valueT num_alloced = 0;
4590 struct dynreg **drpp, *dr;
4591 int ch, base_reg = 0;
4592 char *name, *start;
4593 size_t len;
4594
4595 switch (type)
4596 {
4597 case DYNREG_GR: base_reg = REG_GR + 32; break;
4598 case DYNREG_FR: base_reg = REG_FR + 32; break;
4599 case DYNREG_PR: base_reg = REG_P + 16; break;
4600 default: break;
4601 }
4602
4603 /* First, remove existing names from hash table. */
4604 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4605 {
4606 hash_delete (md.dynreg_hash, dr->name, FALSE);
4607 /* FIXME: Free dr->name. */
4608 dr->num_regs = 0;
4609 }
4610
4611 drpp = &md.dynreg[type];
4612 while (1)
4613 {
4614 ch = get_symbol_name (&start);
4615 len = strlen (ia64_canonicalize_symbol_name (start));
4616 *input_line_pointer = ch;
4617
4618 SKIP_WHITESPACE_AFTER_NAME ();
4619 if (*input_line_pointer != '[')
4620 {
4621 as_bad (_("Expected '['"));
4622 goto err;
4623 }
4624 ++input_line_pointer; /* skip '[' */
4625
4626 num_regs = get_absolute_expression ();
4627
4628 if (*input_line_pointer++ != ']')
4629 {
4630 as_bad (_("Expected ']'"));
4631 goto err;
4632 }
4633 if (num_regs <= 0)
4634 {
4635 as_bad (_("Number of elements must be positive"));
4636 goto err;
4637 }
4638 SKIP_WHITESPACE ();
4639
4640 num_alloced += num_regs;
4641 switch (type)
4642 {
4643 case DYNREG_GR:
4644 if (num_alloced > md.rot.num_regs)
4645 {
4646 as_bad (_("Used more than the declared %d rotating registers"),
4647 md.rot.num_regs);
4648 goto err;
4649 }
4650 break;
4651 case DYNREG_FR:
4652 if (num_alloced > 96)
4653 {
4654 as_bad (_("Used more than the available 96 rotating registers"));
4655 goto err;
4656 }
4657 break;
4658 case DYNREG_PR:
4659 if (num_alloced > 48)
4660 {
4661 as_bad (_("Used more than the available 48 rotating registers"));
4662 goto err;
4663 }
4664 break;
4665
4666 default:
4667 break;
4668 }
4669
4670 if (!*drpp)
4671 {
4672 *drpp = XOBNEW (&notes, struct dynreg);
4673 memset (*drpp, 0, sizeof (*dr));
4674 }
4675
4676 name = XOBNEWVEC (&notes, char, len + 1);
4677 memcpy (name, start, len);
4678 name[len] = '\0';
4679
4680 dr = *drpp;
4681 dr->name = name;
4682 dr->num_regs = num_regs;
4683 dr->base = base_reg;
4684 drpp = &dr->next;
4685 base_reg += num_regs;
4686
4687 if (hash_insert (md.dynreg_hash, name, dr))
4688 {
4689 as_bad (_("Attempt to redefine register set `%s'"), name);
4690 obstack_free (&notes, name);
4691 goto err;
4692 }
4693
4694 if (*input_line_pointer != ',')
4695 break;
4696 ++input_line_pointer; /* skip comma */
4697 SKIP_WHITESPACE ();
4698 }
4699 demand_empty_rest_of_line ();
4700 return;
4701
4702 err:
4703 ignore_rest_of_line ();
4704 }
4705
4706 static void
4707 dot_byteorder (int byteorder)
4708 {
4709 segment_info_type *seginfo = seg_info (now_seg);
4710
4711 if (byteorder == -1)
4712 {
4713 if (seginfo->tc_segment_info_data.endian == 0)
4714 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4715 byteorder = seginfo->tc_segment_info_data.endian == 1;
4716 }
4717 else
4718 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4719
4720 if (target_big_endian != byteorder)
4721 {
4722 target_big_endian = byteorder;
4723 if (target_big_endian)
4724 {
4725 ia64_number_to_chars = number_to_chars_bigendian;
4726 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4727 }
4728 else
4729 {
4730 ia64_number_to_chars = number_to_chars_littleendian;
4731 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4732 }
4733 }
4734 }
4735
4736 static void
4737 dot_psr (int dummy ATTRIBUTE_UNUSED)
4738 {
4739 char *option;
4740 int ch;
4741
4742 while (1)
4743 {
4744 ch = get_symbol_name (&option);
4745 if (strcmp (option, "lsb") == 0)
4746 md.flags &= ~EF_IA_64_BE;
4747 else if (strcmp (option, "msb") == 0)
4748 md.flags |= EF_IA_64_BE;
4749 else if (strcmp (option, "abi32") == 0)
4750 md.flags &= ~EF_IA_64_ABI64;
4751 else if (strcmp (option, "abi64") == 0)
4752 md.flags |= EF_IA_64_ABI64;
4753 else
4754 as_bad (_("Unknown psr option `%s'"), option);
4755 *input_line_pointer = ch;
4756
4757 SKIP_WHITESPACE_AFTER_NAME ();
4758 if (*input_line_pointer != ',')
4759 break;
4760
4761 ++input_line_pointer;
4762 SKIP_WHITESPACE ();
4763 }
4764 demand_empty_rest_of_line ();
4765 }
4766
4767 static void
4768 dot_ln (int dummy ATTRIBUTE_UNUSED)
4769 {
4770 new_logical_line (0, get_absolute_expression ());
4771 demand_empty_rest_of_line ();
4772 }
4773
4774 static void
4775 cross_section (int ref, void (*builder) (int), int ua)
4776 {
4777 char *start, *end;
4778 int saved_auto_align;
4779 unsigned int section_count;
4780 char *name;
4781 char c;
4782
4783 SKIP_WHITESPACE ();
4784 start = input_line_pointer;
4785 c = get_symbol_name (&name);
4786 if (input_line_pointer == start)
4787 {
4788 as_bad (_("Missing section name"));
4789 ignore_rest_of_line ();
4790 return;
4791 }
4792 * input_line_pointer = c;
4793 SKIP_WHITESPACE_AFTER_NAME ();
4794 end = input_line_pointer;
4795 if (*input_line_pointer != ',')
4796 {
4797 as_bad (_("Comma expected after section name"));
4798 ignore_rest_of_line ();
4799 return;
4800 }
4801 *end = '\0';
4802 end = input_line_pointer + 1; /* skip comma */
4803 input_line_pointer = start;
4804 md.keep_pending_output = 1;
4805 section_count = bfd_count_sections (stdoutput);
4806 obj_elf_section (0);
4807 if (section_count != bfd_count_sections (stdoutput))
4808 as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated."));
4809 input_line_pointer = end;
4810 saved_auto_align = md.auto_align;
4811 if (ua)
4812 md.auto_align = 0;
4813 (*builder) (ref);
4814 if (ua)
4815 md.auto_align = saved_auto_align;
4816 obj_elf_previous (0);
4817 md.keep_pending_output = 0;
4818 }
4819
4820 static void
4821 dot_xdata (int size)
4822 {
4823 cross_section (size, cons, 0);
4824 }
4825
4826 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4827
4828 static void
4829 stmt_float_cons (int kind)
4830 {
4831 size_t alignment;
4832
4833 switch (kind)
4834 {
4835 case 'd':
4836 alignment = 3;
4837 break;
4838
4839 case 'x':
4840 case 'X':
4841 alignment = 4;
4842 break;
4843
4844 case 'f':
4845 default:
4846 alignment = 2;
4847 break;
4848 }
4849 do_align (alignment, NULL, 0, 0);
4850 float_cons (kind);
4851 }
4852
4853 static void
4854 stmt_cons_ua (int size)
4855 {
4856 int saved_auto_align = md.auto_align;
4857
4858 md.auto_align = 0;
4859 cons (size);
4860 md.auto_align = saved_auto_align;
4861 }
4862
4863 static void
4864 dot_xfloat_cons (int kind)
4865 {
4866 cross_section (kind, stmt_float_cons, 0);
4867 }
4868
4869 static void
4870 dot_xstringer (int zero)
4871 {
4872 cross_section (zero, stringer, 0);
4873 }
4874
4875 static void
4876 dot_xdata_ua (int size)
4877 {
4878 cross_section (size, cons, 1);
4879 }
4880
4881 static void
4882 dot_xfloat_cons_ua (int kind)
4883 {
4884 cross_section (kind, float_cons, 1);
4885 }
4886
4887 /* .reg.val <regname>,value */
4888
4889 static void
4890 dot_reg_val (int dummy ATTRIBUTE_UNUSED)
4891 {
4892 expressionS reg;
4893
4894 expression_and_evaluate (&reg);
4895 if (reg.X_op != O_register)
4896 {
4897 as_bad (_("Register name expected"));
4898 ignore_rest_of_line ();
4899 }
4900 else if (*input_line_pointer++ != ',')
4901 {
4902 as_bad (_("Comma expected"));
4903 ignore_rest_of_line ();
4904 }
4905 else
4906 {
4907 valueT value = get_absolute_expression ();
4908 int regno = reg.X_add_number;
4909 if (regno <= REG_GR || regno > REG_GR + 127)
4910 as_warn (_("Register value annotation ignored"));
4911 else
4912 {
4913 gr_values[regno - REG_GR].known = 1;
4914 gr_values[regno - REG_GR].value = value;
4915 gr_values[regno - REG_GR].path = md.path;
4916 }
4917 }
4918 demand_empty_rest_of_line ();
4919 }
4920
4921 /*
4922 .serialize.data
4923 .serialize.instruction
4924 */
4925 static void
4926 dot_serialize (int type)
4927 {
4928 insn_group_break (0, 0, 0);
4929 if (type)
4930 instruction_serialization ();
4931 else
4932 data_serialization ();
4933 insn_group_break (0, 0, 0);
4934 demand_empty_rest_of_line ();
4935 }
4936
4937 /* select dv checking mode
4938 .auto
4939 .explicit
4940 .default
4941
4942 A stop is inserted when changing modes
4943 */
4944
4945 static void
4946 dot_dv_mode (int type)
4947 {
4948 if (md.manual_bundling)
4949 as_warn (_("Directive invalid within a bundle"));
4950
4951 if (type == 'E' || type == 'A')
4952 md.mode_explicitly_set = 0;
4953 else
4954 md.mode_explicitly_set = 1;
4955
4956 md.detect_dv = 1;
4957 switch (type)
4958 {
4959 case 'A':
4960 case 'a':
4961 if (md.explicit_mode)
4962 insn_group_break (1, 0, 0);
4963 md.explicit_mode = 0;
4964 break;
4965 case 'E':
4966 case 'e':
4967 if (!md.explicit_mode)
4968 insn_group_break (1, 0, 0);
4969 md.explicit_mode = 1;
4970 break;
4971 default:
4972 case 'd':
4973 if (md.explicit_mode != md.default_explicit_mode)
4974 insn_group_break (1, 0, 0);
4975 md.explicit_mode = md.default_explicit_mode;
4976 md.mode_explicitly_set = 0;
4977 break;
4978 }
4979 }
4980
4981 static void
4982 print_prmask (valueT mask)
4983 {
4984 int regno;
4985 const char *comma = "";
4986 for (regno = 0; regno < 64; regno++)
4987 {
4988 if (mask & ((valueT) 1 << regno))
4989 {
4990 fprintf (stderr, "%s p%d", comma, regno);
4991 comma = ",";
4992 }
4993 }
4994 }
4995
4996 /*
4997 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear)
4998 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply)
4999 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex)
5000 .pred.safe_across_calls p1 [, p2 [,...]]
5001 */
5002
5003 static void
5004 dot_pred_rel (int type)
5005 {
5006 valueT mask = 0;
5007 int count = 0;
5008 int p1 = -1, p2 = -1;
5009
5010 if (type == 0)
5011 {
5012 if (*input_line_pointer == '"')
5013 {
5014 int len;
5015 char *form = demand_copy_C_string (&len);
5016
5017 if (strcmp (form, "mutex") == 0)
5018 type = 'm';
5019 else if (strcmp (form, "clear") == 0)
5020 type = 'c';
5021 else if (strcmp (form, "imply") == 0)
5022 type = 'i';
5023 obstack_free (&notes, form);
5024 }
5025 else if (*input_line_pointer == '@')
5026 {
5027 char *form;
5028 char c;
5029
5030 ++input_line_pointer;
5031 c = get_symbol_name (&form);
5032
5033 if (strcmp (form, "mutex") == 0)
5034 type = 'm';
5035 else if (strcmp (form, "clear") == 0)
5036 type = 'c';
5037 else if (strcmp (form, "imply") == 0)
5038 type = 'i';
5039 (void) restore_line_pointer (c);
5040 }
5041 else
5042 {
5043 as_bad (_("Missing predicate relation type"));
5044 ignore_rest_of_line ();
5045 return;
5046 }
5047 if (type == 0)
5048 {
5049 as_bad (_("Unrecognized predicate relation type"));
5050 ignore_rest_of_line ();
5051 return;
5052 }
5053 if (*input_line_pointer == ',')
5054 ++input_line_pointer;
5055 SKIP_WHITESPACE ();
5056 }
5057
5058 while (1)
5059 {
5060 valueT bits = 1;
5061 int sep, regno;
5062 expressionS pr, *pr1, *pr2;
5063
5064 sep = parse_operand_and_eval (&pr, ',');
5065 if (pr.X_op == O_register
5066 && pr.X_add_number >= REG_P
5067 && pr.X_add_number <= REG_P + 63)
5068 {
5069 regno = pr.X_add_number - REG_P;
5070 bits <<= regno;
5071 count++;
5072 if (p1 == -1)
5073 p1 = regno;
5074 else if (p2 == -1)
5075 p2 = regno;
5076 }
5077 else if (type != 'i'
5078 && pr.X_op == O_subtract
5079 && (pr1 = symbol_get_value_expression (pr.X_add_symbol))
5080 && pr1->X_op == O_register
5081 && pr1->X_add_number >= REG_P
5082 && pr1->X_add_number <= REG_P + 63
5083 && (pr2 = symbol_get_value_expression (pr.X_op_symbol))
5084 && pr2->X_op == O_register
5085 && pr2->X_add_number >= REG_P
5086 && pr2->X_add_number <= REG_P + 63)
5087 {
5088 /* It's a range. */
5089 int stop;
5090
5091 regno = pr1->X_add_number - REG_P;
5092 stop = pr2->X_add_number - REG_P;
5093 if (regno >= stop)
5094 {
5095 as_bad (_("Bad register range"));
5096 ignore_rest_of_line ();
5097 return;
5098 }
5099 bits = ((bits << stop) << 1) - (bits << regno);
5100 count += stop - regno + 1;
5101 }
5102 else
5103 {
5104 as_bad (_("Predicate register expected"));
5105 ignore_rest_of_line ();
5106 return;
5107 }
5108 if (mask & bits)
5109 as_warn (_("Duplicate predicate register ignored"));
5110 mask |= bits;
5111 if (sep != ',')
5112 break;
5113 }
5114
5115 switch (type)
5116 {
5117 case 'c':
5118 if (count == 0)
5119 mask = ~(valueT) 0;
5120 clear_qp_mutex (mask);
5121 clear_qp_implies (mask, (valueT) 0);
5122 break;
5123 case 'i':
5124 if (count != 2 || p1 == -1 || p2 == -1)
5125 as_bad (_("Predicate source and target required"));
5126 else if (p1 == 0 || p2 == 0)
5127 as_bad (_("Use of p0 is not valid in this context"));
5128 else
5129 add_qp_imply (p1, p2);
5130 break;
5131 case 'm':
5132 if (count < 2)
5133 {
5134 as_bad (_("At least two PR arguments expected"));
5135 break;
5136 }
5137 else if (mask & 1)
5138 {
5139 as_bad (_("Use of p0 is not valid in this context"));
5140 break;
5141 }
5142 add_qp_mutex (mask);
5143 break;
5144 case 's':
5145 /* note that we don't override any existing relations */
5146 if (count == 0)
5147 {
5148 as_bad (_("At least one PR argument expected"));
5149 break;
5150 }
5151 if (md.debug_dv)
5152 {
5153 fprintf (stderr, "Safe across calls: ");
5154 print_prmask (mask);
5155 fprintf (stderr, "\n");
5156 }
5157 qp_safe_across_calls = mask;
5158 break;
5159 }
5160 demand_empty_rest_of_line ();
5161 }
5162
5163 /* .entry label [, label [, ...]]
5164 Hint to DV code that the given labels are to be considered entry points.
5165 Otherwise, only global labels are considered entry points. */
5166
5167 static void
5168 dot_entry (int dummy ATTRIBUTE_UNUSED)
5169 {
5170 const char *err;
5171 char *name;
5172 int c;
5173 symbolS *symbolP;
5174
5175 do
5176 {
5177 c = get_symbol_name (&name);
5178 symbolP = symbol_find_or_make (name);
5179
5180 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (void *) symbolP);
5181 if (err)
5182 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
5183 name, err);
5184
5185 *input_line_pointer = c;
5186 SKIP_WHITESPACE_AFTER_NAME ();
5187 c = *input_line_pointer;
5188 if (c == ',')
5189 {
5190 input_line_pointer++;
5191 SKIP_WHITESPACE ();
5192 if (*input_line_pointer == '\n')
5193 c = '\n';
5194 }
5195 }
5196 while (c == ',');
5197
5198 demand_empty_rest_of_line ();
5199 }
5200
5201 /* .mem.offset offset, base
5202 "base" is used to distinguish between offsets from a different base. */
5203
5204 static void
5205 dot_mem_offset (int dummy ATTRIBUTE_UNUSED)
5206 {
5207 md.mem_offset.hint = 1;
5208 md.mem_offset.offset = get_absolute_expression ();
5209 if (*input_line_pointer != ',')
5210 {
5211 as_bad (_("Comma expected"));
5212 ignore_rest_of_line ();
5213 return;
5214 }
5215 ++input_line_pointer;
5216 md.mem_offset.base = get_absolute_expression ();
5217 demand_empty_rest_of_line ();
5218 }
5219
5220 /* ia64-specific pseudo-ops: */
5221 const pseudo_typeS md_pseudo_table[] =
5222 {
5223 { "radix", dot_radix, 0 },
5224 { "lcomm", s_lcomm_bytes, 1 },
5225 { "loc", dot_loc, 0 },
5226 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
5227 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
5228 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
5229 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5230 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5231 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5232 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5233 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5234 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5235 { "proc", dot_proc, 0 },
5236 { "body", dot_body, 0 },
5237 { "prologue", dot_prologue, 0 },
5238 { "endp", dot_endp, 0 },
5239
5240 { "fframe", dot_fframe, 0 },
5241 { "vframe", dot_vframe, 0 },
5242 { "vframesp", dot_vframesp, 0 },
5243 { "vframepsp", dot_vframesp, 1 },
5244 { "save", dot_save, 0 },
5245 { "restore", dot_restore, 0 },
5246 { "restorereg", dot_restorereg, 0 },
5247 { "restorereg.p", dot_restorereg, 1 },
5248 { "handlerdata", dot_handlerdata, 0 },
5249 { "unwentry", dot_unwentry, 0 },
5250 { "altrp", dot_altrp, 0 },
5251 { "savesp", dot_savemem, 0 },
5252 { "savepsp", dot_savemem, 1 },
5253 { "save.g", dot_saveg, 0 },
5254 { "save.f", dot_savef, 0 },
5255 { "save.b", dot_saveb, 0 },
5256 { "save.gf", dot_savegf, 0 },
5257 { "spill", dot_spill, 0 },
5258 { "spillreg", dot_spillreg, 0 },
5259 { "spillsp", dot_spillmem, 0 },
5260 { "spillpsp", dot_spillmem, 1 },
5261 { "spillreg.p", dot_spillreg, 1 },
5262 { "spillsp.p", dot_spillmem, ~0 },
5263 { "spillpsp.p", dot_spillmem, ~1 },
5264 { "label_state", dot_label_state, 0 },
5265 { "copy_state", dot_copy_state, 0 },
5266 { "unwabi", dot_unwabi, 0 },
5267 { "personality", dot_personality, 0 },
5268 { "mii", dot_template, 0x0 },
5269 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5270 { "mlx", dot_template, 0x2 },
5271 { "mmi", dot_template, 0x4 },
5272 { "mfi", dot_template, 0x6 },
5273 { "mmf", dot_template, 0x7 },
5274 { "mib", dot_template, 0x8 },
5275 { "mbb", dot_template, 0x9 },
5276 { "bbb", dot_template, 0xb },
5277 { "mmb", dot_template, 0xc },
5278 { "mfb", dot_template, 0xe },
5279 { "align", dot_align, 0 },
5280 { "regstk", dot_regstk, 0 },
5281 { "rotr", dot_rot, DYNREG_GR },
5282 { "rotf", dot_rot, DYNREG_FR },
5283 { "rotp", dot_rot, DYNREG_PR },
5284 { "lsb", dot_byteorder, 0 },
5285 { "msb", dot_byteorder, 1 },
5286 { "psr", dot_psr, 0 },
5287 { "alias", dot_alias, 0 },
5288 { "secalias", dot_alias, 1 },
5289 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5290
5291 { "xdata1", dot_xdata, 1 },
5292 { "xdata2", dot_xdata, 2 },
5293 { "xdata4", dot_xdata, 4 },
5294 { "xdata8", dot_xdata, 8 },
5295 { "xdata16", dot_xdata, 16 },
5296 { "xreal4", dot_xfloat_cons, 'f' },
5297 { "xreal8", dot_xfloat_cons, 'd' },
5298 { "xreal10", dot_xfloat_cons, 'x' },
5299 { "xreal16", dot_xfloat_cons, 'X' },
5300 { "xstring", dot_xstringer, 8 + 0 },
5301 { "xstringz", dot_xstringer, 8 + 1 },
5302
5303 /* unaligned versions: */
5304 { "xdata2.ua", dot_xdata_ua, 2 },
5305 { "xdata4.ua", dot_xdata_ua, 4 },
5306 { "xdata8.ua", dot_xdata_ua, 8 },
5307 { "xdata16.ua", dot_xdata_ua, 16 },
5308 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5309 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5310 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5311 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5312
5313 /* annotations/DV checking support */
5314 { "entry", dot_entry, 0 },
5315 { "mem.offset", dot_mem_offset, 0 },
5316 { "pred.rel", dot_pred_rel, 0 },
5317 { "pred.rel.clear", dot_pred_rel, 'c' },
5318 { "pred.rel.imply", dot_pred_rel, 'i' },
5319 { "pred.rel.mutex", dot_pred_rel, 'm' },
5320 { "pred.safe_across_calls", dot_pred_rel, 's' },
5321 { "reg.val", dot_reg_val, 0 },
5322 { "serialize.data", dot_serialize, 0 },
5323 { "serialize.instruction", dot_serialize, 1 },
5324 { "auto", dot_dv_mode, 'a' },
5325 { "explicit", dot_dv_mode, 'e' },
5326 { "default", dot_dv_mode, 'd' },
5327
5328 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5329 IA-64 aligns data allocation pseudo-ops by default, so we have to
5330 tell it that these ones are supposed to be unaligned. Long term,
5331 should rewrite so that only IA-64 specific data allocation pseudo-ops
5332 are aligned by default. */
5333 {"2byte", stmt_cons_ua, 2},
5334 {"4byte", stmt_cons_ua, 4},
5335 {"8byte", stmt_cons_ua, 8},
5336
5337 #ifdef TE_VMS
5338 {"vms_common", obj_elf_vms_common, 0},
5339 #endif
5340
5341 { NULL, 0, 0 }
5342 };
5343
5344 static const struct pseudo_opcode
5345 {
5346 const char *name;
5347 void (*handler) (int);
5348 int arg;
5349 }
5350 pseudo_opcode[] =
5351 {
5352 /* these are more like pseudo-ops, but don't start with a dot */
5353 { "data1", cons, 1 },
5354 { "data2", cons, 2 },
5355 { "data4", cons, 4 },
5356 { "data8", cons, 8 },
5357 { "data16", cons, 16 },
5358 { "real4", stmt_float_cons, 'f' },
5359 { "real8", stmt_float_cons, 'd' },
5360 { "real10", stmt_float_cons, 'x' },
5361 { "real16", stmt_float_cons, 'X' },
5362 { "string", stringer, 8 + 0 },
5363 { "stringz", stringer, 8 + 1 },
5364
5365 /* unaligned versions: */
5366 { "data2.ua", stmt_cons_ua, 2 },
5367 { "data4.ua", stmt_cons_ua, 4 },
5368 { "data8.ua", stmt_cons_ua, 8 },
5369 { "data16.ua", stmt_cons_ua, 16 },
5370 { "real4.ua", float_cons, 'f' },
5371 { "real8.ua", float_cons, 'd' },
5372 { "real10.ua", float_cons, 'x' },
5373 { "real16.ua", float_cons, 'X' },
5374 };
5375
5376 /* Declare a register by creating a symbol for it and entering it in
5377 the symbol table. */
5378
5379 static symbolS *
5380 declare_register (const char *name, unsigned int regnum)
5381 {
5382 const char *err;
5383 symbolS *sym;
5384
5385 sym = symbol_create (name, reg_section, regnum, &zero_address_frag);
5386
5387 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (void *) sym);
5388 if (err)
5389 as_fatal ("Inserting \"%s\" into register table failed: %s",
5390 name, err);
5391
5392 return sym;
5393 }
5394
5395 static void
5396 declare_register_set (const char *prefix,
5397 unsigned int num_regs,
5398 unsigned int base_regnum)
5399 {
5400 char name[8];
5401 unsigned int i;
5402
5403 for (i = 0; i < num_regs; ++i)
5404 {
5405 snprintf (name, sizeof (name), "%s%u", prefix, i);
5406 declare_register (name, base_regnum + i);
5407 }
5408 }
5409
5410 static unsigned int
5411 operand_width (enum ia64_opnd opnd)
5412 {
5413 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5414 unsigned int bits = 0;
5415 int i;
5416
5417 bits = 0;
5418 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5419 bits += odesc->field[i].bits;
5420
5421 return bits;
5422 }
5423
5424 static enum operand_match_result
5425 operand_match (const struct ia64_opcode *idesc, int res_index, expressionS *e)
5426 {
5427 enum ia64_opnd opnd = idesc->operands[res_index];
5428 int bits, relocatable = 0;
5429 struct insn_fix *fix;
5430 bfd_signed_vma val;
5431
5432 switch (opnd)
5433 {
5434 /* constants: */
5435
5436 case IA64_OPND_AR_CCV:
5437 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5438 return OPERAND_MATCH;
5439 break;
5440
5441 case IA64_OPND_AR_CSD:
5442 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5443 return OPERAND_MATCH;
5444 break;
5445
5446 case IA64_OPND_AR_PFS:
5447 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5448 return OPERAND_MATCH;
5449 break;
5450
5451 case IA64_OPND_GR0:
5452 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5453 return OPERAND_MATCH;
5454 break;
5455
5456 case IA64_OPND_IP:
5457 if (e->X_op == O_register && e->X_add_number == REG_IP)
5458 return OPERAND_MATCH;
5459 break;
5460
5461 case IA64_OPND_PR:
5462 if (e->X_op == O_register && e->X_add_number == REG_PR)
5463 return OPERAND_MATCH;
5464 break;
5465
5466 case IA64_OPND_PR_ROT:
5467 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5468 return OPERAND_MATCH;
5469 break;
5470
5471 case IA64_OPND_PSR:
5472 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5473 return OPERAND_MATCH;
5474 break;
5475
5476 case IA64_OPND_PSR_L:
5477 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5478 return OPERAND_MATCH;
5479 break;
5480
5481 case IA64_OPND_PSR_UM:
5482 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5483 return OPERAND_MATCH;
5484 break;
5485
5486 case IA64_OPND_C1:
5487 if (e->X_op == O_constant)
5488 {
5489 if (e->X_add_number == 1)
5490 return OPERAND_MATCH;
5491 else
5492 return OPERAND_OUT_OF_RANGE;
5493 }
5494 break;
5495
5496 case IA64_OPND_C8:
5497 if (e->X_op == O_constant)
5498 {
5499 if (e->X_add_number == 8)
5500 return OPERAND_MATCH;
5501 else
5502 return OPERAND_OUT_OF_RANGE;
5503 }
5504 break;
5505
5506 case IA64_OPND_C16:
5507 if (e->X_op == O_constant)
5508 {
5509 if (e->X_add_number == 16)
5510 return OPERAND_MATCH;
5511 else
5512 return OPERAND_OUT_OF_RANGE;
5513 }
5514 break;
5515
5516 /* register operands: */
5517
5518 case IA64_OPND_AR3:
5519 if (e->X_op == O_register && e->X_add_number >= REG_AR
5520 && e->X_add_number < REG_AR + 128)
5521 return OPERAND_MATCH;
5522 break;
5523
5524 case IA64_OPND_B1:
5525 case IA64_OPND_B2:
5526 if (e->X_op == O_register && e->X_add_number >= REG_BR
5527 && e->X_add_number < REG_BR + 8)
5528 return OPERAND_MATCH;
5529 break;
5530
5531 case IA64_OPND_CR3:
5532 if (e->X_op == O_register && e->X_add_number >= REG_CR
5533 && e->X_add_number < REG_CR + 128)
5534 return OPERAND_MATCH;
5535 break;
5536
5537 case IA64_OPND_DAHR3:
5538 if (e->X_op == O_register && e->X_add_number >= REG_DAHR
5539 && e->X_add_number < REG_DAHR + 8)
5540 return OPERAND_MATCH;
5541 break;
5542
5543 case IA64_OPND_F1:
5544 case IA64_OPND_F2:
5545 case IA64_OPND_F3:
5546 case IA64_OPND_F4:
5547 if (e->X_op == O_register && e->X_add_number >= REG_FR
5548 && e->X_add_number < REG_FR + 128)
5549 return OPERAND_MATCH;
5550 break;
5551
5552 case IA64_OPND_P1:
5553 case IA64_OPND_P2:
5554 if (e->X_op == O_register && e->X_add_number >= REG_P
5555 && e->X_add_number < REG_P + 64)
5556 return OPERAND_MATCH;
5557 break;
5558
5559 case IA64_OPND_R1:
5560 case IA64_OPND_R2:
5561 case IA64_OPND_R3:
5562 if (e->X_op == O_register && e->X_add_number >= REG_GR
5563 && e->X_add_number < REG_GR + 128)
5564 return OPERAND_MATCH;
5565 break;
5566
5567 case IA64_OPND_R3_2:
5568 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5569 {
5570 if (e->X_add_number < REG_GR + 4)
5571 return OPERAND_MATCH;
5572 else if (e->X_add_number < REG_GR + 128)
5573 return OPERAND_OUT_OF_RANGE;
5574 }
5575 break;
5576
5577 /* indirect operands: */
5578 case IA64_OPND_CPUID_R3:
5579 case IA64_OPND_DBR_R3:
5580 case IA64_OPND_DTR_R3:
5581 case IA64_OPND_ITR_R3:
5582 case IA64_OPND_IBR_R3:
5583 case IA64_OPND_MSR_R3:
5584 case IA64_OPND_PKR_R3:
5585 case IA64_OPND_PMC_R3:
5586 case IA64_OPND_PMD_R3:
5587 case IA64_OPND_DAHR_R3:
5588 case IA64_OPND_RR_R3:
5589 if (e->X_op == O_index && e->X_op_symbol
5590 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5591 == opnd - IA64_OPND_CPUID_R3))
5592 return OPERAND_MATCH;
5593 break;
5594
5595 case IA64_OPND_MR3:
5596 if (e->X_op == O_index && !e->X_op_symbol)
5597 return OPERAND_MATCH;
5598 break;
5599
5600 /* immediate operands: */
5601 case IA64_OPND_CNT2a:
5602 case IA64_OPND_LEN4:
5603 case IA64_OPND_LEN6:
5604 bits = operand_width (idesc->operands[res_index]);
5605 if (e->X_op == O_constant)
5606 {
5607 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5608 return OPERAND_MATCH;
5609 else
5610 return OPERAND_OUT_OF_RANGE;
5611 }
5612 break;
5613
5614 case IA64_OPND_CNT2b:
5615 if (e->X_op == O_constant)
5616 {
5617 if ((bfd_vma) (e->X_add_number - 1) < 3)
5618 return OPERAND_MATCH;
5619 else
5620 return OPERAND_OUT_OF_RANGE;
5621 }
5622 break;
5623
5624 case IA64_OPND_CNT2c:
5625 val = e->X_add_number;
5626 if (e->X_op == O_constant)
5627 {
5628 if ((val == 0 || val == 7 || val == 15 || val == 16))
5629 return OPERAND_MATCH;
5630 else
5631 return OPERAND_OUT_OF_RANGE;
5632 }
5633 break;
5634
5635 case IA64_OPND_SOR:
5636 /* SOR must be an integer multiple of 8 */
5637 if (e->X_op == O_constant && e->X_add_number & 0x7)
5638 return OPERAND_OUT_OF_RANGE;
5639 /* Fall through. */
5640 case IA64_OPND_SOF:
5641 case IA64_OPND_SOL:
5642 if (e->X_op == O_constant)
5643 {
5644 if ((bfd_vma) e->X_add_number <= 96)
5645 return OPERAND_MATCH;
5646 else
5647 return OPERAND_OUT_OF_RANGE;
5648 }
5649 break;
5650
5651 case IA64_OPND_IMMU62:
5652 if (e->X_op == O_constant)
5653 {
5654 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5655 return OPERAND_MATCH;
5656 else
5657 return OPERAND_OUT_OF_RANGE;
5658 }
5659 else
5660 {
5661 /* FIXME -- need 62-bit relocation type */
5662 as_bad (_("62-bit relocation not yet implemented"));
5663 }
5664 break;
5665
5666 case IA64_OPND_IMMU64:
5667 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5668 || e->X_op == O_subtract)
5669 {
5670 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5671 fix->code = BFD_RELOC_IA64_IMM64;
5672 if (e->X_op != O_subtract)
5673 {
5674 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5675 if (e->X_op == O_pseudo_fixup)
5676 e->X_op = O_symbol;
5677 }
5678
5679 fix->opnd = idesc->operands[res_index];
5680 fix->expr = *e;
5681 fix->is_pcrel = 0;
5682 ++CURR_SLOT.num_fixups;
5683 return OPERAND_MATCH;
5684 }
5685 else if (e->X_op == O_constant)
5686 return OPERAND_MATCH;
5687 break;
5688
5689 case IA64_OPND_IMMU5b:
5690 if (e->X_op == O_constant)
5691 {
5692 val = e->X_add_number;
5693 if (val >= 32 && val <= 63)
5694 return OPERAND_MATCH;
5695 else
5696 return OPERAND_OUT_OF_RANGE;
5697 }
5698 break;
5699
5700 case IA64_OPND_CCNT5:
5701 case IA64_OPND_CNT5:
5702 case IA64_OPND_CNT6:
5703 case IA64_OPND_CPOS6a:
5704 case IA64_OPND_CPOS6b:
5705 case IA64_OPND_CPOS6c:
5706 case IA64_OPND_IMMU2:
5707 case IA64_OPND_IMMU7a:
5708 case IA64_OPND_IMMU7b:
5709 case IA64_OPND_IMMU16:
5710 case IA64_OPND_IMMU19:
5711 case IA64_OPND_IMMU21:
5712 case IA64_OPND_IMMU24:
5713 case IA64_OPND_MBTYPE4:
5714 case IA64_OPND_MHTYPE8:
5715 case IA64_OPND_POS6:
5716 bits = operand_width (idesc->operands[res_index]);
5717 if (e->X_op == O_constant)
5718 {
5719 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5720 return OPERAND_MATCH;
5721 else
5722 return OPERAND_OUT_OF_RANGE;
5723 }
5724 break;
5725
5726 case IA64_OPND_IMMU9:
5727 bits = operand_width (idesc->operands[res_index]);
5728 if (e->X_op == O_constant)
5729 {
5730 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5731 {
5732 int lobits = e->X_add_number & 0x3;
5733 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5734 e->X_add_number |= (bfd_vma) 0x3;
5735 return OPERAND_MATCH;
5736 }
5737 else
5738 return OPERAND_OUT_OF_RANGE;
5739 }
5740 break;
5741
5742 case IA64_OPND_IMM44:
5743 /* least 16 bits must be zero */
5744 if ((e->X_add_number & 0xffff) != 0)
5745 /* XXX technically, this is wrong: we should not be issuing warning
5746 messages until we're sure this instruction pattern is going to
5747 be used! */
5748 as_warn (_("lower 16 bits of mask ignored"));
5749
5750 if (e->X_op == O_constant)
5751 {
5752 if (((e->X_add_number >= 0
5753 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5754 || (e->X_add_number < 0
5755 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5756 {
5757 /* sign-extend */
5758 if (e->X_add_number >= 0
5759 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5760 {
5761 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5762 }
5763 return OPERAND_MATCH;
5764 }
5765 else
5766 return OPERAND_OUT_OF_RANGE;
5767 }
5768 break;
5769
5770 case IA64_OPND_IMM17:
5771 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5772 if (e->X_op == O_constant)
5773 {
5774 if (((e->X_add_number >= 0
5775 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5776 || (e->X_add_number < 0
5777 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5778 {
5779 /* sign-extend */
5780 if (e->X_add_number >= 0
5781 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5782 {
5783 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5784 }
5785 return OPERAND_MATCH;
5786 }
5787 else
5788 return OPERAND_OUT_OF_RANGE;
5789 }
5790 break;
5791
5792 case IA64_OPND_IMM14:
5793 case IA64_OPND_IMM22:
5794 relocatable = 1;
5795 /* Fall through. */
5796 case IA64_OPND_IMM1:
5797 case IA64_OPND_IMM8:
5798 case IA64_OPND_IMM8U4:
5799 case IA64_OPND_IMM8M1:
5800 case IA64_OPND_IMM8M1U4:
5801 case IA64_OPND_IMM8M1U8:
5802 case IA64_OPND_IMM9a:
5803 case IA64_OPND_IMM9b:
5804 bits = operand_width (idesc->operands[res_index]);
5805 if (relocatable && (e->X_op == O_symbol
5806 || e->X_op == O_subtract
5807 || e->X_op == O_pseudo_fixup))
5808 {
5809 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5810
5811 if (idesc->operands[res_index] == IA64_OPND_IMM14)
5812 fix->code = BFD_RELOC_IA64_IMM14;
5813 else
5814 fix->code = BFD_RELOC_IA64_IMM22;
5815
5816 if (e->X_op != O_subtract)
5817 {
5818 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5819 if (e->X_op == O_pseudo_fixup)
5820 e->X_op = O_symbol;
5821 }
5822
5823 fix->opnd = idesc->operands[res_index];
5824 fix->expr = *e;
5825 fix->is_pcrel = 0;
5826 ++CURR_SLOT.num_fixups;
5827 return OPERAND_MATCH;
5828 }
5829 else if (e->X_op != O_constant
5830 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5831 return OPERAND_MISMATCH;
5832
5833 if (opnd == IA64_OPND_IMM8M1U4)
5834 {
5835 /* Zero is not valid for unsigned compares that take an adjusted
5836 constant immediate range. */
5837 if (e->X_add_number == 0)
5838 return OPERAND_OUT_OF_RANGE;
5839
5840 /* Sign-extend 32-bit unsigned numbers, so that the following range
5841 checks will work. */
5842 val = e->X_add_number;
5843 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5844 && ((val & ((bfd_vma) 1 << 31)) != 0))
5845 val = ((val << 32) >> 32);
5846
5847 /* Check for 0x100000000. This is valid because
5848 0x100000000-1 is the same as ((uint32_t) -1). */
5849 if (val == ((bfd_signed_vma) 1 << 32))
5850 return OPERAND_MATCH;
5851
5852 val = val - 1;
5853 }
5854 else if (opnd == IA64_OPND_IMM8M1U8)
5855 {
5856 /* Zero is not valid for unsigned compares that take an adjusted
5857 constant immediate range. */
5858 if (e->X_add_number == 0)
5859 return OPERAND_OUT_OF_RANGE;
5860
5861 /* Check for 0x10000000000000000. */
5862 if (e->X_op == O_big)
5863 {
5864 if (generic_bignum[0] == 0
5865 && generic_bignum[1] == 0
5866 && generic_bignum[2] == 0
5867 && generic_bignum[3] == 0
5868 && generic_bignum[4] == 1)
5869 return OPERAND_MATCH;
5870 else
5871 return OPERAND_OUT_OF_RANGE;
5872 }
5873 else
5874 val = e->X_add_number - 1;
5875 }
5876 else if (opnd == IA64_OPND_IMM8M1)
5877 val = e->X_add_number - 1;
5878 else if (opnd == IA64_OPND_IMM8U4)
5879 {
5880 /* Sign-extend 32-bit unsigned numbers, so that the following range
5881 checks will work. */
5882 val = e->X_add_number;
5883 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5884 && ((val & ((bfd_vma) 1 << 31)) != 0))
5885 val = ((val << 32) >> 32);
5886 }
5887 else
5888 val = e->X_add_number;
5889
5890 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5891 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5892 return OPERAND_MATCH;
5893 else
5894 return OPERAND_OUT_OF_RANGE;
5895
5896 case IA64_OPND_INC3:
5897 /* +/- 1, 4, 8, 16 */
5898 val = e->X_add_number;
5899 if (val < 0)
5900 val = -val;
5901 if (e->X_op == O_constant)
5902 {
5903 if ((val == 1 || val == 4 || val == 8 || val == 16))
5904 return OPERAND_MATCH;
5905 else
5906 return OPERAND_OUT_OF_RANGE;
5907 }
5908 break;
5909
5910 case IA64_OPND_TGT25:
5911 case IA64_OPND_TGT25b:
5912 case IA64_OPND_TGT25c:
5913 case IA64_OPND_TGT64:
5914 if (e->X_op == O_symbol)
5915 {
5916 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5917 if (opnd == IA64_OPND_TGT25)
5918 fix->code = BFD_RELOC_IA64_PCREL21F;
5919 else if (opnd == IA64_OPND_TGT25b)
5920 fix->code = BFD_RELOC_IA64_PCREL21M;
5921 else if (opnd == IA64_OPND_TGT25c)
5922 fix->code = BFD_RELOC_IA64_PCREL21B;
5923 else if (opnd == IA64_OPND_TGT64)
5924 fix->code = BFD_RELOC_IA64_PCREL60B;
5925 else
5926 abort ();
5927
5928 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5929 fix->opnd = idesc->operands[res_index];
5930 fix->expr = *e;
5931 fix->is_pcrel = 1;
5932 ++CURR_SLOT.num_fixups;
5933 return OPERAND_MATCH;
5934 }
5935 /* Fall through. */
5936 case IA64_OPND_TAG13:
5937 case IA64_OPND_TAG13b:
5938 switch (e->X_op)
5939 {
5940 case O_constant:
5941 return OPERAND_MATCH;
5942
5943 case O_symbol:
5944 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5945 /* There are no external relocs for TAG13/TAG13b fields, so we
5946 create a dummy reloc. This will not live past md_apply_fix. */
5947 fix->code = BFD_RELOC_UNUSED;
5948 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5949 fix->opnd = idesc->operands[res_index];
5950 fix->expr = *e;
5951 fix->is_pcrel = 1;
5952 ++CURR_SLOT.num_fixups;
5953 return OPERAND_MATCH;
5954
5955 default:
5956 break;
5957 }
5958 break;
5959
5960 case IA64_OPND_LDXMOV:
5961 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5962 fix->code = BFD_RELOC_IA64_LDXMOV;
5963 fix->opnd = idesc->operands[res_index];
5964 fix->expr = *e;
5965 fix->is_pcrel = 0;
5966 ++CURR_SLOT.num_fixups;
5967 return OPERAND_MATCH;
5968
5969 case IA64_OPND_STRD5b:
5970 if (e->X_op == O_constant)
5971 {
5972 /* 5-bit signed scaled by 64 */
5973 if ((e->X_add_number <= ( 0xf << 6 ))
5974 && (e->X_add_number >= -( 0x10 << 6 )))
5975 {
5976
5977 /* Must be a multiple of 64 */
5978 if ((e->X_add_number & 0x3f) != 0)
5979 as_warn (_("stride must be a multiple of 64; lower 6 bits ignored"));
5980
5981 e->X_add_number &= ~ 0x3f;
5982 return OPERAND_MATCH;
5983 }
5984 else
5985 return OPERAND_OUT_OF_RANGE;
5986 }
5987 break;
5988 case IA64_OPND_CNT6a:
5989 if (e->X_op == O_constant)
5990 {
5991 /* 6-bit unsigned biased by 1 -- count 0 is meaningless */
5992 if ((e->X_add_number <= 64)
5993 && (e->X_add_number > 0) )
5994 {
5995 return OPERAND_MATCH;
5996 }
5997 else
5998 return OPERAND_OUT_OF_RANGE;
5999 }
6000 break;
6001
6002 default:
6003 break;
6004 }
6005 return OPERAND_MISMATCH;
6006 }
6007
6008 static int
6009 parse_operand (expressionS *e, int more)
6010 {
6011 int sep = '\0';
6012
6013 memset (e, 0, sizeof (*e));
6014 e->X_op = O_absent;
6015 SKIP_WHITESPACE ();
6016 expression (e);
6017 sep = *input_line_pointer;
6018 if (more && (sep == ',' || sep == more))
6019 ++input_line_pointer;
6020 return sep;
6021 }
6022
6023 static int
6024 parse_operand_and_eval (expressionS *e, int more)
6025 {
6026 int sep = parse_operand (e, more);
6027 resolve_expression (e);
6028 return sep;
6029 }
6030
6031 static int
6032 parse_operand_maybe_eval (expressionS *e, int more, enum ia64_opnd op)
6033 {
6034 int sep = parse_operand (e, more);
6035 switch (op)
6036 {
6037 case IA64_OPND_IMM14:
6038 case IA64_OPND_IMM22:
6039 case IA64_OPND_IMMU64:
6040 case IA64_OPND_TGT25:
6041 case IA64_OPND_TGT25b:
6042 case IA64_OPND_TGT25c:
6043 case IA64_OPND_TGT64:
6044 case IA64_OPND_TAG13:
6045 case IA64_OPND_TAG13b:
6046 case IA64_OPND_LDXMOV:
6047 break;
6048 default:
6049 resolve_expression (e);
6050 break;
6051 }
6052 return sep;
6053 }
6054
6055 /* Returns the next entry in the opcode table that matches the one in
6056 IDESC, and frees the entry in IDESC. If no matching entry is
6057 found, NULL is returned instead. */
6058
6059 static struct ia64_opcode *
6060 get_next_opcode (struct ia64_opcode *idesc)
6061 {
6062 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
6063 ia64_free_opcode (idesc);
6064 return next;
6065 }
6066
6067 /* Parse the operands for the opcode and find the opcode variant that
6068 matches the specified operands, or NULL if no match is possible. */
6069
6070 static struct ia64_opcode *
6071 parse_operands (struct ia64_opcode *idesc)
6072 {
6073 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
6074 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
6075 int reg1, reg2;
6076 char reg_class;
6077 enum ia64_opnd expected_operand = IA64_OPND_NIL;
6078 enum operand_match_result result;
6079 char mnemonic[129];
6080 char *first_arg = 0, *end, *saved_input_pointer;
6081 unsigned int sof;
6082
6083 gas_assert (strlen (idesc->name) <= 128);
6084
6085 strcpy (mnemonic, idesc->name);
6086 if (idesc->operands[2] == IA64_OPND_SOF
6087 || idesc->operands[1] == IA64_OPND_SOF)
6088 {
6089 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
6090 can't parse the first operand until we have parsed the
6091 remaining operands of the "alloc" instruction. */
6092 SKIP_WHITESPACE ();
6093 first_arg = input_line_pointer;
6094 end = strchr (input_line_pointer, '=');
6095 if (!end)
6096 {
6097 as_bad (_("Expected separator `='"));
6098 return 0;
6099 }
6100 input_line_pointer = end + 1;
6101 ++i;
6102 ++num_outputs;
6103 }
6104
6105 for (; ; ++i)
6106 {
6107 if (i < NELEMS (CURR_SLOT.opnd))
6108 {
6109 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + i, '=',
6110 idesc->operands[i]);
6111 if (CURR_SLOT.opnd[i].X_op == O_absent)
6112 break;
6113 }
6114 else
6115 {
6116 expressionS dummy;
6117
6118 sep = parse_operand (&dummy, '=');
6119 if (dummy.X_op == O_absent)
6120 break;
6121 }
6122
6123 ++num_operands;
6124
6125 if (sep != '=' && sep != ',')
6126 break;
6127
6128 if (sep == '=')
6129 {
6130 if (num_outputs > 0)
6131 as_bad (_("Duplicate equal sign (=) in instruction"));
6132 else
6133 num_outputs = i + 1;
6134 }
6135 }
6136 if (sep != '\0')
6137 {
6138 as_bad (_("Illegal operand separator `%c'"), sep);
6139 return 0;
6140 }
6141
6142 if (idesc->operands[2] == IA64_OPND_SOF
6143 || idesc->operands[1] == IA64_OPND_SOF)
6144 {
6145 /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r.
6146 Note, however, that due to that mapping operand numbers in error
6147 messages for any of the constant operands will not be correct. */
6148 know (strcmp (idesc->name, "alloc") == 0);
6149 /* The first operand hasn't been parsed/initialized, yet (but
6150 num_operands intentionally doesn't account for that). */
6151 i = num_operands > 4 ? 2 : 1;
6152 #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \
6153 ? CURR_SLOT.opnd[n].X_add_number \
6154 : 0)
6155 sof = set_regstack (FORCE_CONST(i),
6156 FORCE_CONST(i + 1),
6157 FORCE_CONST(i + 2),
6158 FORCE_CONST(i + 3));
6159 #undef FORCE_CONST
6160
6161 /* now we can parse the first arg: */
6162 saved_input_pointer = input_line_pointer;
6163 input_line_pointer = first_arg;
6164 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + 0, '=',
6165 idesc->operands[0]);
6166 if (sep != '=')
6167 --num_outputs; /* force error */
6168 input_line_pointer = saved_input_pointer;
6169
6170 CURR_SLOT.opnd[i].X_add_number = sof;
6171 if (CURR_SLOT.opnd[i + 1].X_op == O_constant
6172 && CURR_SLOT.opnd[i + 2].X_op == O_constant)
6173 CURR_SLOT.opnd[i + 1].X_add_number
6174 = sof - CURR_SLOT.opnd[i + 2].X_add_number;
6175 else
6176 CURR_SLOT.opnd[i + 1].X_op = O_illegal;
6177 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3];
6178 }
6179
6180 highest_unmatched_operand = -4;
6181 curr_out_of_range_pos = -1;
6182 error_pos = 0;
6183 for (; idesc; idesc = get_next_opcode (idesc))
6184 {
6185 if (num_outputs != idesc->num_outputs)
6186 continue; /* mismatch in # of outputs */
6187 if (highest_unmatched_operand < 0)
6188 highest_unmatched_operand |= 1;
6189 if (num_operands > NELEMS (idesc->operands)
6190 || (num_operands < NELEMS (idesc->operands)
6191 && idesc->operands[num_operands])
6192 || (num_operands > 0 && !idesc->operands[num_operands - 1]))
6193 continue; /* mismatch in number of arguments */
6194 if (highest_unmatched_operand < 0)
6195 highest_unmatched_operand |= 2;
6196
6197 CURR_SLOT.num_fixups = 0;
6198
6199 /* Try to match all operands. If we see an out-of-range operand,
6200 then continue trying to match the rest of the operands, since if
6201 the rest match, then this idesc will give the best error message. */
6202
6203 out_of_range_pos = -1;
6204 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
6205 {
6206 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
6207 if (result != OPERAND_MATCH)
6208 {
6209 if (result != OPERAND_OUT_OF_RANGE)
6210 break;
6211 if (out_of_range_pos < 0)
6212 /* remember position of the first out-of-range operand: */
6213 out_of_range_pos = i;
6214 }
6215 }
6216
6217 /* If we did not match all operands, or if at least one operand was
6218 out-of-range, then this idesc does not match. Keep track of which
6219 idesc matched the most operands before failing. If we have two
6220 idescs that failed at the same position, and one had an out-of-range
6221 operand, then prefer the out-of-range operand. Thus if we have
6222 "add r0=0x1000000,r1" we get an error saying the constant is out
6223 of range instead of an error saying that the constant should have been
6224 a register. */
6225
6226 if (i != num_operands || out_of_range_pos >= 0)
6227 {
6228 if (i > highest_unmatched_operand
6229 || (i == highest_unmatched_operand
6230 && out_of_range_pos > curr_out_of_range_pos))
6231 {
6232 highest_unmatched_operand = i;
6233 if (out_of_range_pos >= 0)
6234 {
6235 expected_operand = idesc->operands[out_of_range_pos];
6236 error_pos = out_of_range_pos;
6237 }
6238 else
6239 {
6240 expected_operand = idesc->operands[i];
6241 error_pos = i;
6242 }
6243 curr_out_of_range_pos = out_of_range_pos;
6244 }
6245 continue;
6246 }
6247
6248 break;
6249 }
6250 if (!idesc)
6251 {
6252 if (expected_operand)
6253 as_bad (_("Operand %u of `%s' should be %s"),
6254 error_pos + 1, mnemonic,
6255 elf64_ia64_operands[expected_operand].desc);
6256 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1))
6257 as_bad (_("Wrong number of output operands"));
6258 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2))
6259 as_bad (_("Wrong number of input operands"));
6260 else
6261 as_bad (_("Operand mismatch"));
6262 return 0;
6263 }
6264
6265 /* Check that the instruction doesn't use
6266 - r0, f0, or f1 as output operands
6267 - the same predicate twice as output operands
6268 - r0 as address of a base update load or store
6269 - the same GR as output and address of a base update load
6270 - two even- or two odd-numbered FRs as output operands of a floating
6271 point parallel load.
6272 At most two (conflicting) output (or output-like) operands can exist,
6273 (floating point parallel loads have three outputs, but the base register,
6274 if updated, cannot conflict with the actual outputs). */
6275 reg2 = reg1 = -1;
6276 for (i = 0; i < num_operands; ++i)
6277 {
6278 int regno = 0;
6279
6280 reg_class = 0;
6281 switch (idesc->operands[i])
6282 {
6283 case IA64_OPND_R1:
6284 case IA64_OPND_R2:
6285 case IA64_OPND_R3:
6286 if (i < num_outputs)
6287 {
6288 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6289 reg_class = 'r';
6290 else if (reg1 < 0)
6291 reg1 = CURR_SLOT.opnd[i].X_add_number;
6292 else if (reg2 < 0)
6293 reg2 = CURR_SLOT.opnd[i].X_add_number;
6294 }
6295 break;
6296 case IA64_OPND_P1:
6297 case IA64_OPND_P2:
6298 if (i < num_outputs)
6299 {
6300 if (reg1 < 0)
6301 reg1 = CURR_SLOT.opnd[i].X_add_number;
6302 else if (reg2 < 0)
6303 reg2 = CURR_SLOT.opnd[i].X_add_number;
6304 }
6305 break;
6306 case IA64_OPND_F1:
6307 case IA64_OPND_F2:
6308 case IA64_OPND_F3:
6309 case IA64_OPND_F4:
6310 if (i < num_outputs)
6311 {
6312 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR
6313 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1)
6314 {
6315 reg_class = 'f';
6316 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR;
6317 }
6318 else if (reg1 < 0)
6319 reg1 = CURR_SLOT.opnd[i].X_add_number;
6320 else if (reg2 < 0)
6321 reg2 = CURR_SLOT.opnd[i].X_add_number;
6322 }
6323 break;
6324 case IA64_OPND_MR3:
6325 if (idesc->flags & IA64_OPCODE_POSTINC)
6326 {
6327 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6328 reg_class = 'm';
6329 else if (reg1 < 0)
6330 reg1 = CURR_SLOT.opnd[i].X_add_number;
6331 else if (reg2 < 0)
6332 reg2 = CURR_SLOT.opnd[i].X_add_number;
6333 }
6334 break;
6335 default:
6336 break;
6337 }
6338 switch (reg_class)
6339 {
6340 case 0:
6341 break;
6342 default:
6343 as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno);
6344 break;
6345 case 'm':
6346 as_warn (_("Invalid use of `r%d' as base update address operand"), regno);
6347 break;
6348 }
6349 }
6350 if (reg1 == reg2)
6351 {
6352 if (reg1 >= REG_GR && reg1 <= REG_GR + 127)
6353 {
6354 reg1 -= REG_GR;
6355 reg_class = 'r';
6356 }
6357 else if (reg1 >= REG_P && reg1 <= REG_P + 63)
6358 {
6359 reg1 -= REG_P;
6360 reg_class = 'p';
6361 }
6362 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127)
6363 {
6364 reg1 -= REG_FR;
6365 reg_class = 'f';
6366 }
6367 else
6368 reg_class = 0;
6369 if (reg_class)
6370 as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1);
6371 }
6372 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31
6373 && reg2 >= REG_FR && reg2 <= REG_FR + 31)
6374 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6375 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127))
6376 && ! ((reg1 ^ reg2) & 1))
6377 as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"),
6378 reg1 - REG_FR, reg2 - REG_FR);
6379 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31
6380 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)
6381 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6382 && reg2 >= REG_FR && reg2 <= REG_FR + 31))
6383 as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"),
6384 reg1 - REG_FR, reg2 - REG_FR);
6385 return idesc;
6386 }
6387
6388 static void
6389 build_insn (struct slot *slot, bfd_vma *insnp)
6390 {
6391 const struct ia64_operand *odesc, *o2desc;
6392 struct ia64_opcode *idesc = slot->idesc;
6393 bfd_vma insn;
6394 bfd_signed_vma val;
6395 const char *err;
6396 int i;
6397
6398 insn = idesc->opcode | slot->qp_regno;
6399
6400 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6401 {
6402 if (slot->opnd[i].X_op == O_register
6403 || slot->opnd[i].X_op == O_constant
6404 || slot->opnd[i].X_op == O_index)
6405 val = slot->opnd[i].X_add_number;
6406 else if (slot->opnd[i].X_op == O_big)
6407 {
6408 /* This must be the value 0x10000000000000000. */
6409 gas_assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6410 val = 0;
6411 }
6412 else
6413 val = 0;
6414
6415 switch (idesc->operands[i])
6416 {
6417 case IA64_OPND_IMMU64:
6418 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6419 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6420 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6421 | (((val >> 63) & 0x1) << 36));
6422 continue;
6423
6424 case IA64_OPND_IMMU62:
6425 val &= 0x3fffffffffffffffULL;
6426 if (val != slot->opnd[i].X_add_number)
6427 as_warn (_("Value truncated to 62 bits"));
6428 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6429 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6430 continue;
6431
6432 case IA64_OPND_TGT64:
6433 val >>= 4;
6434 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6435 insn |= ((((val >> 59) & 0x1) << 36)
6436 | (((val >> 0) & 0xfffff) << 13));
6437 continue;
6438
6439 case IA64_OPND_AR3:
6440 val -= REG_AR;
6441 break;
6442
6443 case IA64_OPND_B1:
6444 case IA64_OPND_B2:
6445 val -= REG_BR;
6446 break;
6447
6448 case IA64_OPND_CR3:
6449 val -= REG_CR;
6450 break;
6451
6452 case IA64_OPND_DAHR3:
6453 val -= REG_DAHR;
6454 break;
6455
6456 case IA64_OPND_F1:
6457 case IA64_OPND_F2:
6458 case IA64_OPND_F3:
6459 case IA64_OPND_F4:
6460 val -= REG_FR;
6461 break;
6462
6463 case IA64_OPND_P1:
6464 case IA64_OPND_P2:
6465 val -= REG_P;
6466 break;
6467
6468 case IA64_OPND_R1:
6469 case IA64_OPND_R2:
6470 case IA64_OPND_R3:
6471 case IA64_OPND_R3_2:
6472 case IA64_OPND_CPUID_R3:
6473 case IA64_OPND_DBR_R3:
6474 case IA64_OPND_DTR_R3:
6475 case IA64_OPND_ITR_R3:
6476 case IA64_OPND_IBR_R3:
6477 case IA64_OPND_MR3:
6478 case IA64_OPND_MSR_R3:
6479 case IA64_OPND_PKR_R3:
6480 case IA64_OPND_PMC_R3:
6481 case IA64_OPND_PMD_R3:
6482 case IA64_OPND_DAHR_R3:
6483 case IA64_OPND_RR_R3:
6484 val -= REG_GR;
6485 break;
6486
6487 default:
6488 break;
6489 }
6490
6491 odesc = elf64_ia64_operands + idesc->operands[i];
6492 err = (*odesc->insert) (odesc, val, &insn);
6493 if (err)
6494 as_bad_where (slot->src_file, slot->src_line,
6495 _("Bad operand value: %s"), err);
6496 if (idesc->flags & IA64_OPCODE_PSEUDO)
6497 {
6498 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6499 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6500 {
6501 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6502 (*o2desc->insert) (o2desc, val, &insn);
6503 }
6504 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6505 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6506 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6507 {
6508 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6509 (*o2desc->insert) (o2desc, 64 - val, &insn);
6510 }
6511 }
6512 }
6513 *insnp = insn;
6514 }
6515
6516 static void
6517 emit_one_bundle (void)
6518 {
6519 int manual_bundling_off = 0, manual_bundling = 0;
6520 enum ia64_unit required_unit, insn_unit = 0;
6521 enum ia64_insn_type type[3], insn_type;
6522 unsigned int template_val, orig_template;
6523 bfd_vma insn[3] = { -1, -1, -1 };
6524 struct ia64_opcode *idesc;
6525 int end_of_insn_group = 0, user_template = -1;
6526 int n, i, j, first, curr, last_slot;
6527 bfd_vma t0 = 0, t1 = 0;
6528 struct label_fix *lfix;
6529 bfd_boolean mark_label;
6530 struct insn_fix *ifix;
6531 char mnemonic[16];
6532 fixS *fix;
6533 char *f;
6534 int addr_mod;
6535
6536 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6537 know (first >= 0 && first < NUM_SLOTS);
6538 n = MIN (3, md.num_slots_in_use);
6539
6540 /* Determine template: user user_template if specified, best match
6541 otherwise: */
6542
6543 if (md.slot[first].user_template >= 0)
6544 user_template = template_val = md.slot[first].user_template;
6545 else
6546 {
6547 /* Auto select appropriate template. */
6548 memset (type, 0, sizeof (type));
6549 curr = first;
6550 for (i = 0; i < n; ++i)
6551 {
6552 if (md.slot[curr].label_fixups && i != 0)
6553 break;
6554 type[i] = md.slot[curr].idesc->type;
6555 curr = (curr + 1) % NUM_SLOTS;
6556 }
6557 template_val = best_template[type[0]][type[1]][type[2]];
6558 }
6559
6560 /* initialize instructions with appropriate nops: */
6561 for (i = 0; i < 3; ++i)
6562 insn[i] = nop[ia64_templ_desc[template_val].exec_unit[i]];
6563
6564 f = frag_more (16);
6565
6566 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6567 from the start of the frag. */
6568 addr_mod = frag_now_fix () & 15;
6569 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6570 as_bad (_("instruction address is not a multiple of 16"));
6571 frag_now->insn_addr = addr_mod;
6572 frag_now->has_code = 1;
6573
6574 /* now fill in slots with as many insns as possible: */
6575 curr = first;
6576 idesc = md.slot[curr].idesc;
6577 end_of_insn_group = 0;
6578 last_slot = -1;
6579 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6580 {
6581 /* If we have unwind records, we may need to update some now. */
6582 unw_rec_list *ptr = md.slot[curr].unwind_record;
6583 unw_rec_list *end_ptr = NULL;
6584
6585 if (ptr)
6586 {
6587 /* Find the last prologue/body record in the list for the current
6588 insn, and set the slot number for all records up to that point.
6589 This needs to be done now, because prologue/body records refer to
6590 the current point, not the point after the instruction has been
6591 issued. This matters because there may have been nops emitted
6592 meanwhile. Any non-prologue non-body record followed by a
6593 prologue/body record must also refer to the current point. */
6594 unw_rec_list *last_ptr;
6595
6596 for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j)
6597 end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record;
6598 for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next)
6599 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6600 || ptr->r.type == body)
6601 last_ptr = ptr;
6602 if (last_ptr)
6603 {
6604 /* Make last_ptr point one after the last prologue/body
6605 record. */
6606 last_ptr = last_ptr->next;
6607 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6608 ptr = ptr->next)
6609 {
6610 ptr->slot_number = (unsigned long) f + i;
6611 ptr->slot_frag = frag_now;
6612 }
6613 /* Remove the initialized records, so that we won't accidentally
6614 update them again if we insert a nop and continue. */
6615 md.slot[curr].unwind_record = last_ptr;
6616 }
6617 }
6618
6619 manual_bundling_off = md.slot[curr].manual_bundling_off;
6620 if (md.slot[curr].manual_bundling_on)
6621 {
6622 if (curr == first)
6623 manual_bundling = 1;
6624 else
6625 break; /* Need to start a new bundle. */
6626 }
6627
6628 /* If this instruction specifies a template, then it must be the first
6629 instruction of a bundle. */
6630 if (curr != first && md.slot[curr].user_template >= 0)
6631 break;
6632
6633 if (idesc->flags & IA64_OPCODE_SLOT2)
6634 {
6635 if (manual_bundling && !manual_bundling_off)
6636 {
6637 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6638 _("`%s' must be last in bundle"), idesc->name);
6639 if (i < 2)
6640 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6641 }
6642 i = 2;
6643 }
6644 if (idesc->flags & IA64_OPCODE_LAST)
6645 {
6646 int required_slot;
6647 unsigned int required_template;
6648
6649 /* If we need a stop bit after an M slot, our only choice is
6650 template 5 (M;;MI). If we need a stop bit after a B
6651 slot, our only choice is to place it at the end of the
6652 bundle, because the only available templates are MIB,
6653 MBB, BBB, MMB, and MFB. We don't handle anything other
6654 than M and B slots because these are the only kind of
6655 instructions that can have the IA64_OPCODE_LAST bit set. */
6656 required_template = template_val;
6657 switch (idesc->type)
6658 {
6659 case IA64_TYPE_M:
6660 required_slot = 0;
6661 required_template = 5;
6662 break;
6663
6664 case IA64_TYPE_B:
6665 required_slot = 2;
6666 break;
6667
6668 default:
6669 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6670 _("Internal error: don't know how to force %s to end of instruction group"),
6671 idesc->name);
6672 required_slot = i;
6673 break;
6674 }
6675 if (manual_bundling
6676 && (i > required_slot
6677 || (required_slot == 2 && !manual_bundling_off)
6678 || (user_template >= 0
6679 /* Changing from MMI to M;MI is OK. */
6680 && (template_val ^ required_template) > 1)))
6681 {
6682 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6683 _("`%s' must be last in instruction group"),
6684 idesc->name);
6685 if (i < 2 && required_slot == 2 && !manual_bundling_off)
6686 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6687 }
6688 if (required_slot < i)
6689 /* Can't fit this instruction. */
6690 break;
6691
6692 i = required_slot;
6693 if (required_template != template_val)
6694 {
6695 /* If we switch the template, we need to reset the NOPs
6696 after slot i. The slot-types of the instructions ahead
6697 of i never change, so we don't need to worry about
6698 changing NOPs in front of this slot. */
6699 for (j = i; j < 3; ++j)
6700 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6701
6702 /* We just picked a template that includes the stop bit in the
6703 middle, so we don't need another one emitted later. */
6704 md.slot[curr].end_of_insn_group = 0;
6705 }
6706 template_val = required_template;
6707 }
6708 if (curr != first && md.slot[curr].label_fixups)
6709 {
6710 if (manual_bundling)
6711 {
6712 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6713 _("Label must be first in a bundle"));
6714 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6715 }
6716 /* This insn must go into the first slot of a bundle. */
6717 break;
6718 }
6719
6720 if (end_of_insn_group && md.num_slots_in_use >= 1)
6721 {
6722 /* We need an instruction group boundary in the middle of a
6723 bundle. See if we can switch to an other template with
6724 an appropriate boundary. */
6725
6726 orig_template = template_val;
6727 if (i == 1 && (user_template == 4
6728 || (user_template < 0
6729 && (ia64_templ_desc[template_val].exec_unit[0]
6730 == IA64_UNIT_M))))
6731 {
6732 template_val = 5;
6733 end_of_insn_group = 0;
6734 }
6735 else if (i == 2 && (user_template == 0
6736 || (user_template < 0
6737 && (ia64_templ_desc[template_val].exec_unit[1]
6738 == IA64_UNIT_I)))
6739 /* This test makes sure we don't switch the template if
6740 the next instruction is one that needs to be first in
6741 an instruction group. Since all those instructions are
6742 in the M group, there is no way such an instruction can
6743 fit in this bundle even if we switch the template. The
6744 reason we have to check for this is that otherwise we
6745 may end up generating "MI;;I M.." which has the deadly
6746 effect that the second M instruction is no longer the
6747 first in the group! --davidm 99/12/16 */
6748 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6749 {
6750 template_val = 1;
6751 end_of_insn_group = 0;
6752 }
6753 else if (i == 1
6754 && user_template == 0
6755 && !(idesc->flags & IA64_OPCODE_FIRST))
6756 /* Use the next slot. */
6757 continue;
6758 else if (curr != first)
6759 /* can't fit this insn */
6760 break;
6761
6762 if (template_val != orig_template)
6763 /* if we switch the template, we need to reset the NOPs
6764 after slot i. The slot-types of the instructions ahead
6765 of i never change, so we don't need to worry about
6766 changing NOPs in front of this slot. */
6767 for (j = i; j < 3; ++j)
6768 insn[j] = nop[ia64_templ_desc[template_val].exec_unit[j]];
6769 }
6770 required_unit = ia64_templ_desc[template_val].exec_unit[i];
6771
6772 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6773 if (idesc->type == IA64_TYPE_DYN)
6774 {
6775 enum ia64_opnd opnd1, opnd2;
6776
6777 if ((strcmp (idesc->name, "nop") == 0)
6778 || (strcmp (idesc->name, "break") == 0))
6779 insn_unit = required_unit;
6780 else if (strcmp (idesc->name, "hint") == 0)
6781 {
6782 insn_unit = required_unit;
6783 if (required_unit == IA64_UNIT_B)
6784 {
6785 switch (md.hint_b)
6786 {
6787 case hint_b_ok:
6788 break;
6789 case hint_b_warning:
6790 as_warn (_("hint in B unit may be treated as nop"));
6791 break;
6792 case hint_b_error:
6793 /* When manual bundling is off and there is no
6794 user template, we choose a different unit so
6795 that hint won't go into the current slot. We
6796 will fill the current bundle with nops and
6797 try to put hint into the next bundle. */
6798 if (!manual_bundling && user_template < 0)
6799 insn_unit = IA64_UNIT_I;
6800 else
6801 as_bad (_("hint in B unit can't be used"));
6802 break;
6803 }
6804 }
6805 }
6806 else if (strcmp (idesc->name, "chk.s") == 0
6807 || strcmp (idesc->name, "mov") == 0)
6808 {
6809 insn_unit = IA64_UNIT_M;
6810 if (required_unit == IA64_UNIT_I
6811 || (required_unit == IA64_UNIT_F && template_val == 6))
6812 insn_unit = IA64_UNIT_I;
6813 }
6814 else
6815 as_fatal (_("emit_one_bundle: unexpected dynamic op"));
6816
6817 snprintf (mnemonic, sizeof (mnemonic), "%s.%c",
6818 idesc->name, "?imbfxx"[insn_unit]);
6819 opnd1 = idesc->operands[0];
6820 opnd2 = idesc->operands[1];
6821 ia64_free_opcode (idesc);
6822 idesc = ia64_find_opcode (mnemonic);
6823 /* moves to/from ARs have collisions */
6824 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6825 {
6826 while (idesc != NULL
6827 && (idesc->operands[0] != opnd1
6828 || idesc->operands[1] != opnd2))
6829 idesc = get_next_opcode (idesc);
6830 }
6831 md.slot[curr].idesc = idesc;
6832 }
6833 else
6834 {
6835 insn_type = idesc->type;
6836 insn_unit = IA64_UNIT_NIL;
6837 switch (insn_type)
6838 {
6839 case IA64_TYPE_A:
6840 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6841 insn_unit = required_unit;
6842 break;
6843 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6844 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6845 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6846 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6847 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6848 default: break;
6849 }
6850 }
6851
6852 if (insn_unit != required_unit)
6853 continue; /* Try next slot. */
6854
6855 /* Now is a good time to fix up the labels for this insn. */
6856 mark_label = FALSE;
6857 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6858 {
6859 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6860 symbol_set_frag (lfix->sym, frag_now);
6861 mark_label |= lfix->dw2_mark_labels;
6862 }
6863 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6864 {
6865 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6866 symbol_set_frag (lfix->sym, frag_now);
6867 }
6868
6869 if (debug_type == DEBUG_DWARF2
6870 || md.slot[curr].loc_directive_seen
6871 || mark_label)
6872 {
6873 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6874
6875 md.slot[curr].loc_directive_seen = 0;
6876 if (mark_label)
6877 md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
6878
6879 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6880 }
6881
6882 build_insn (md.slot + curr, insn + i);
6883
6884 ptr = md.slot[curr].unwind_record;
6885 if (ptr)
6886 {
6887 /* Set slot numbers for all remaining unwind records belonging to the
6888 current insn. There can not be any prologue/body unwind records
6889 here. */
6890 for (; ptr != end_ptr; ptr = ptr->next)
6891 {
6892 ptr->slot_number = (unsigned long) f + i;
6893 ptr->slot_frag = frag_now;
6894 }
6895 md.slot[curr].unwind_record = NULL;
6896 }
6897
6898 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6899 {
6900 ifix = md.slot[curr].fixup + j;
6901 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6902 &ifix->expr, ifix->is_pcrel, ifix->code);
6903 fix->tc_fix_data.opnd = ifix->opnd;
6904 fix->fx_file = md.slot[curr].src_file;
6905 fix->fx_line = md.slot[curr].src_line;
6906 }
6907
6908 end_of_insn_group = md.slot[curr].end_of_insn_group;
6909
6910 /* This adjustment to "i" must occur after the fix, otherwise the fix
6911 is assigned to the wrong slot, and the VMS linker complains. */
6912 if (required_unit == IA64_UNIT_L)
6913 {
6914 know (i == 1);
6915 /* skip one slot for long/X-unit instructions */
6916 ++i;
6917 }
6918 --md.num_slots_in_use;
6919 last_slot = i;
6920
6921 /* clear slot: */
6922 ia64_free_opcode (md.slot[curr].idesc);
6923 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6924 md.slot[curr].user_template = -1;
6925
6926 if (manual_bundling_off)
6927 {
6928 manual_bundling = 0;
6929 break;
6930 }
6931 curr = (curr + 1) % NUM_SLOTS;
6932 idesc = md.slot[curr].idesc;
6933 }
6934
6935 /* A user template was specified, but the first following instruction did
6936 not fit. This can happen with or without manual bundling. */
6937 if (md.num_slots_in_use > 0 && last_slot < 0)
6938 {
6939 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6940 _("`%s' does not fit into %s template"),
6941 idesc->name, ia64_templ_desc[template_val].name);
6942 /* Drop first insn so we don't livelock. */
6943 --md.num_slots_in_use;
6944 know (curr == first);
6945 ia64_free_opcode (md.slot[curr].idesc);
6946 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6947 md.slot[curr].user_template = -1;
6948 }
6949 else if (manual_bundling > 0)
6950 {
6951 if (md.num_slots_in_use > 0)
6952 {
6953 if (last_slot >= 2)
6954 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6955 _("`%s' does not fit into bundle"), idesc->name);
6956 else
6957 {
6958 const char *where;
6959
6960 if (template_val == 2)
6961 where = "X slot";
6962 else if (last_slot == 0)
6963 where = "slots 2 or 3";
6964 else
6965 where = "slot 3";
6966 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6967 _("`%s' can't go in %s of %s template"),
6968 idesc->name, where, ia64_templ_desc[template_val].name);
6969 }
6970 }
6971 else
6972 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6973 _("Missing '}' at end of file"));
6974 }
6975
6976 know (md.num_slots_in_use < NUM_SLOTS);
6977
6978 t0 = end_of_insn_group | (template_val << 1) | (insn[0] << 5) | (insn[1] << 46);
6979 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6980
6981 number_to_chars_littleendian (f + 0, t0, 8);
6982 number_to_chars_littleendian (f + 8, t1, 8);
6983 }
6984
6985 int
6986 md_parse_option (int c, const char *arg)
6987 {
6988
6989 switch (c)
6990 {
6991 /* Switches from the Intel assembler. */
6992 case 'm':
6993 if (strcmp (arg, "ilp64") == 0
6994 || strcmp (arg, "lp64") == 0
6995 || strcmp (arg, "p64") == 0)
6996 {
6997 md.flags |= EF_IA_64_ABI64;
6998 }
6999 else if (strcmp (arg, "ilp32") == 0)
7000 {
7001 md.flags &= ~EF_IA_64_ABI64;
7002 }
7003 else if (strcmp (arg, "le") == 0)
7004 {
7005 md.flags &= ~EF_IA_64_BE;
7006 default_big_endian = 0;
7007 }
7008 else if (strcmp (arg, "be") == 0)
7009 {
7010 md.flags |= EF_IA_64_BE;
7011 default_big_endian = 1;
7012 }
7013 else if (strncmp (arg, "unwind-check=", 13) == 0)
7014 {
7015 arg += 13;
7016 if (strcmp (arg, "warning") == 0)
7017 md.unwind_check = unwind_check_warning;
7018 else if (strcmp (arg, "error") == 0)
7019 md.unwind_check = unwind_check_error;
7020 else
7021 return 0;
7022 }
7023 else if (strncmp (arg, "hint.b=", 7) == 0)
7024 {
7025 arg += 7;
7026 if (strcmp (arg, "ok") == 0)
7027 md.hint_b = hint_b_ok;
7028 else if (strcmp (arg, "warning") == 0)
7029 md.hint_b = hint_b_warning;
7030 else if (strcmp (arg, "error") == 0)
7031 md.hint_b = hint_b_error;
7032 else
7033 return 0;
7034 }
7035 else if (strncmp (arg, "tune=", 5) == 0)
7036 {
7037 arg += 5;
7038 if (strcmp (arg, "itanium1") == 0)
7039 md.tune = itanium1;
7040 else if (strcmp (arg, "itanium2") == 0)
7041 md.tune = itanium2;
7042 else
7043 return 0;
7044 }
7045 else
7046 return 0;
7047 break;
7048
7049 case 'N':
7050 if (strcmp (arg, "so") == 0)
7051 {
7052 /* Suppress signon message. */
7053 }
7054 else if (strcmp (arg, "pi") == 0)
7055 {
7056 /* Reject privileged instructions. FIXME */
7057 }
7058 else if (strcmp (arg, "us") == 0)
7059 {
7060 /* Allow union of signed and unsigned range. FIXME */
7061 }
7062 else if (strcmp (arg, "close_fcalls") == 0)
7063 {
7064 /* Do not resolve global function calls. */
7065 }
7066 else
7067 return 0;
7068 break;
7069
7070 case 'C':
7071 /* temp[="prefix"] Insert temporary labels into the object file
7072 symbol table prefixed by "prefix".
7073 Default prefix is ":temp:".
7074 */
7075 break;
7076
7077 case 'a':
7078 /* indirect=<tgt> Assume unannotated indirect branches behavior
7079 according to <tgt> --
7080 exit: branch out from the current context (default)
7081 labels: all labels in context may be branch targets
7082 */
7083 if (strncmp (arg, "indirect=", 9) != 0)
7084 return 0;
7085 break;
7086
7087 case 'x':
7088 /* -X conflicts with an ignored option, use -x instead */
7089 md.detect_dv = 1;
7090 if (!arg || strcmp (arg, "explicit") == 0)
7091 {
7092 /* set default mode to explicit */
7093 md.default_explicit_mode = 1;
7094 break;
7095 }
7096 else if (strcmp (arg, "auto") == 0)
7097 {
7098 md.default_explicit_mode = 0;
7099 }
7100 else if (strcmp (arg, "none") == 0)
7101 {
7102 md.detect_dv = 0;
7103 }
7104 else if (strcmp (arg, "debug") == 0)
7105 {
7106 md.debug_dv = 1;
7107 }
7108 else if (strcmp (arg, "debugx") == 0)
7109 {
7110 md.default_explicit_mode = 1;
7111 md.debug_dv = 1;
7112 }
7113 else if (strcmp (arg, "debugn") == 0)
7114 {
7115 md.debug_dv = 1;
7116 md.detect_dv = 0;
7117 }
7118 else
7119 {
7120 as_bad (_("Unrecognized option '-x%s'"), arg);
7121 }
7122 break;
7123
7124 case 'S':
7125 /* nops Print nops statistics. */
7126 break;
7127
7128 /* GNU specific switches for gcc. */
7129 case OPTION_MCONSTANT_GP:
7130 md.flags |= EF_IA_64_CONS_GP;
7131 break;
7132
7133 case OPTION_MAUTO_PIC:
7134 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
7135 break;
7136
7137 default:
7138 return 0;
7139 }
7140
7141 return 1;
7142 }
7143
7144 void
7145 md_show_usage (FILE *stream)
7146 {
7147 fputs (_("\
7148 IA-64 options:\n\
7149 --mconstant-gp mark output file as using the constant-GP model\n\
7150 (sets ELF header flag EF_IA_64_CONS_GP)\n\
7151 --mauto-pic mark output file as using the constant-GP model\n\
7152 without function descriptors (sets ELF header flag\n\
7153 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
7154 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
7155 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
7156 -mtune=[itanium1|itanium2]\n\
7157 tune for a specific CPU (default -mtune=itanium2)\n\
7158 -munwind-check=[warning|error]\n\
7159 unwind directive check (default -munwind-check=warning)\n\
7160 -mhint.b=[ok|warning|error]\n\
7161 hint.b check (default -mhint.b=error)\n\
7162 -x | -xexplicit turn on dependency violation checking\n"), stream);
7163 /* Note for translators: "automagically" can be translated as "automatically" here. */
7164 fputs (_("\
7165 -xauto automagically remove dependency violations (default)\n\
7166 -xnone turn off dependency violation checking\n\
7167 -xdebug debug dependency violation checker\n\
7168 -xdebugn debug dependency violation checker but turn off\n\
7169 dependency violation checking\n\
7170 -xdebugx debug dependency violation checker and turn on\n\
7171 dependency violation checking\n"),
7172 stream);
7173 }
7174
7175 void
7176 ia64_after_parse_args (void)
7177 {
7178 if (debug_type == DEBUG_STABS)
7179 as_fatal (_("--gstabs is not supported for ia64"));
7180 }
7181
7182 /* Return true if TYPE fits in TEMPL at SLOT. */
7183
7184 static int
7185 match (int templ, int type, int slot)
7186 {
7187 enum ia64_unit unit;
7188 int result;
7189
7190 unit = ia64_templ_desc[templ].exec_unit[slot];
7191 switch (type)
7192 {
7193 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
7194 case IA64_TYPE_A:
7195 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
7196 break;
7197 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
7198 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
7199 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
7200 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
7201 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
7202 default: result = 0; break;
7203 }
7204 return result;
7205 }
7206
7207 /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit
7208 in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of
7209 type M or I would fit in TEMPL at SLOT. */
7210
7211 static inline int
7212 extra_goodness (int templ, int slot)
7213 {
7214 switch (md.tune)
7215 {
7216 case itanium1:
7217 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
7218 return 2;
7219 else if (slot == 2 && match (templ, IA64_TYPE_B, slot))
7220 return 1;
7221 else
7222 return 0;
7223 break;
7224 case itanium2:
7225 if (match (templ, IA64_TYPE_M, slot)
7226 || match (templ, IA64_TYPE_I, slot))
7227 /* Favor M- and I-unit NOPs. We definitely want to avoid
7228 F-unit and B-unit may cause split-issue or less-than-optimal
7229 branch-prediction. */
7230 return 2;
7231 else
7232 return 0;
7233 break;
7234 default:
7235 abort ();
7236 return 0;
7237 }
7238 }
7239
7240 /* This function is called once, at assembler startup time. It sets
7241 up all the tables, etc. that the MD part of the assembler will need
7242 that can be determined before arguments are parsed. */
7243 void
7244 md_begin (void)
7245 {
7246 int i, j, k, t, goodness, best, ok;
7247 const char *err;
7248 char name[8];
7249
7250 md.auto_align = 1;
7251 md.explicit_mode = md.default_explicit_mode;
7252
7253 bfd_set_section_alignment (stdoutput, text_section, 4);
7254
7255 /* Make sure function pointers get initialized. */
7256 target_big_endian = -1;
7257 dot_byteorder (default_big_endian);
7258
7259 alias_hash = hash_new ();
7260 alias_name_hash = hash_new ();
7261 secalias_hash = hash_new ();
7262 secalias_name_hash = hash_new ();
7263
7264 pseudo_func[FUNC_DTP_MODULE].u.sym =
7265 symbol_new (".<dtpmod>", undefined_section, FUNC_DTP_MODULE,
7266 &zero_address_frag);
7267
7268 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
7269 symbol_new (".<dtprel>", undefined_section, FUNC_DTP_RELATIVE,
7270 &zero_address_frag);
7271
7272 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
7273 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
7274 &zero_address_frag);
7275
7276 pseudo_func[FUNC_GP_RELATIVE].u.sym =
7277 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
7278 &zero_address_frag);
7279
7280 pseudo_func[FUNC_LT_RELATIVE].u.sym =
7281 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
7282 &zero_address_frag);
7283
7284 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
7285 symbol_new (".<ltoffx>", undefined_section, FUNC_LT_RELATIVE_X,
7286 &zero_address_frag);
7287
7288 pseudo_func[FUNC_PC_RELATIVE].u.sym =
7289 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
7290 &zero_address_frag);
7291
7292 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
7293 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
7294 &zero_address_frag);
7295
7296 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
7297 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
7298 &zero_address_frag);
7299
7300 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
7301 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
7302 &zero_address_frag);
7303
7304 pseudo_func[FUNC_TP_RELATIVE].u.sym =
7305 symbol_new (".<tprel>", undefined_section, FUNC_TP_RELATIVE,
7306 &zero_address_frag);
7307
7308 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
7309 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
7310 &zero_address_frag);
7311
7312 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
7313 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
7314 &zero_address_frag);
7315
7316 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
7317 symbol_new (".<ltoff.dtpmod>", undefined_section, FUNC_LT_DTP_MODULE,
7318 &zero_address_frag);
7319
7320 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
7321 symbol_new (".<ltoff.dptrel>", undefined_section, FUNC_LT_DTP_RELATIVE,
7322 &zero_address_frag);
7323
7324 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
7325 symbol_new (".<ltoff.tprel>", undefined_section, FUNC_LT_TP_RELATIVE,
7326 &zero_address_frag);
7327
7328 pseudo_func[FUNC_IPLT_RELOC].u.sym =
7329 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC,
7330 &zero_address_frag);
7331
7332 #ifdef TE_VMS
7333 pseudo_func[FUNC_SLOTCOUNT_RELOC].u.sym =
7334 symbol_new (".<slotcount>", undefined_section, FUNC_SLOTCOUNT_RELOC,
7335 &zero_address_frag);
7336 #endif
7337
7338 if (md.tune != itanium1)
7339 {
7340 /* Convert MFI NOPs bundles into MMI NOPs bundles. */
7341 le_nop[0] = 0x8;
7342 le_nop_stop[0] = 0x9;
7343 }
7344
7345 /* Compute the table of best templates. We compute goodness as a
7346 base 4 value, in which each match counts for 3. Match-failures
7347 result in NOPs and we use extra_goodness() to pick the execution
7348 units that are best suited for issuing the NOP. */
7349 for (i = 0; i < IA64_NUM_TYPES; ++i)
7350 for (j = 0; j < IA64_NUM_TYPES; ++j)
7351 for (k = 0; k < IA64_NUM_TYPES; ++k)
7352 {
7353 best = 0;
7354 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
7355 {
7356 goodness = 0;
7357 if (match (t, i, 0))
7358 {
7359 if (match (t, j, 1))
7360 {
7361 if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2))
7362 goodness = 3 + 3 + 3;
7363 else
7364 goodness = 3 + 3 + extra_goodness (t, 2);
7365 }
7366 else if (match (t, j, 2))
7367 goodness = 3 + 3 + extra_goodness (t, 1);
7368 else
7369 {
7370 goodness = 3;
7371 goodness += extra_goodness (t, 1);
7372 goodness += extra_goodness (t, 2);
7373 }
7374 }
7375 else if (match (t, i, 1))
7376 {
7377 if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2))
7378 goodness = 3 + 3;
7379 else
7380 goodness = 3 + extra_goodness (t, 2);
7381 }
7382 else if (match (t, i, 2))
7383 goodness = 3 + extra_goodness (t, 1);
7384
7385 if (goodness > best)
7386 {
7387 best = goodness;
7388 best_template[i][j][k] = t;
7389 }
7390 }
7391 }
7392
7393 #ifdef DEBUG_TEMPLATES
7394 /* For debugging changes to the best_template calculations. We don't care
7395 about combinations with invalid instructions, so start the loops at 1. */
7396 for (i = 0; i < IA64_NUM_TYPES; ++i)
7397 for (j = 0; j < IA64_NUM_TYPES; ++j)
7398 for (k = 0; k < IA64_NUM_TYPES; ++k)
7399 {
7400 char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f',
7401 'x', 'd' };
7402 fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j],
7403 type_letter[k],
7404 ia64_templ_desc[best_template[i][j][k]].name);
7405 }
7406 #endif
7407
7408 for (i = 0; i < NUM_SLOTS; ++i)
7409 md.slot[i].user_template = -1;
7410
7411 md.pseudo_hash = hash_new ();
7412 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
7413 {
7414 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
7415 (void *) (pseudo_opcode + i));
7416 if (err)
7417 as_fatal (_("ia64.md_begin: can't hash `%s': %s"),
7418 pseudo_opcode[i].name, err);
7419 }
7420
7421 md.reg_hash = hash_new ();
7422 md.dynreg_hash = hash_new ();
7423 md.const_hash = hash_new ();
7424 md.entry_hash = hash_new ();
7425
7426 /* general registers: */
7427 declare_register_set ("r", 128, REG_GR);
7428 declare_register ("gp", REG_GR + 1);
7429 declare_register ("sp", REG_GR + 12);
7430 declare_register ("tp", REG_GR + 13);
7431 declare_register_set ("ret", 4, REG_GR + 8);
7432
7433 /* floating point registers: */
7434 declare_register_set ("f", 128, REG_FR);
7435 declare_register_set ("farg", 8, REG_FR + 8);
7436 declare_register_set ("fret", 8, REG_FR + 8);
7437
7438 /* branch registers: */
7439 declare_register_set ("b", 8, REG_BR);
7440 declare_register ("rp", REG_BR + 0);
7441
7442 /* predicate registers: */
7443 declare_register_set ("p", 64, REG_P);
7444 declare_register ("pr", REG_PR);
7445 declare_register ("pr.rot", REG_PR_ROT);
7446
7447 /* application registers: */
7448 declare_register_set ("ar", 128, REG_AR);
7449 for (i = 0; i < NELEMS (ar); ++i)
7450 declare_register (ar[i].name, REG_AR + ar[i].regnum);
7451
7452 /* control registers: */
7453 declare_register_set ("cr", 128, REG_CR);
7454 for (i = 0; i < NELEMS (cr); ++i)
7455 declare_register (cr[i].name, REG_CR + cr[i].regnum);
7456
7457 /* dahr registers: */
7458 declare_register_set ("dahr", 8, REG_DAHR);
7459
7460 declare_register ("ip", REG_IP);
7461 declare_register ("cfm", REG_CFM);
7462 declare_register ("psr", REG_PSR);
7463 declare_register ("psr.l", REG_PSR_L);
7464 declare_register ("psr.um", REG_PSR_UM);
7465
7466 for (i = 0; i < NELEMS (indirect_reg); ++i)
7467 {
7468 unsigned int regnum = indirect_reg[i].regnum;
7469
7470 md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum);
7471 }
7472
7473 /* pseudo-registers used to specify unwind info: */
7474 declare_register ("psp", REG_PSP);
7475
7476 for (i = 0; i < NELEMS (const_bits); ++i)
7477 {
7478 err = hash_insert (md.const_hash, const_bits[i].name,
7479 (void *) (const_bits + i));
7480 if (err)
7481 as_fatal (_("Inserting \"%s\" into constant hash table failed: %s"),
7482 name, err);
7483 }
7484
7485 /* Set the architecture and machine depending on defaults and command line
7486 options. */
7487 if (md.flags & EF_IA_64_ABI64)
7488 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7489 else
7490 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7491
7492 if (! ok)
7493 as_warn (_("Could not set architecture and machine"));
7494
7495 /* Set the pointer size and pointer shift size depending on md.flags */
7496
7497 if (md.flags & EF_IA_64_ABI64)
7498 {
7499 md.pointer_size = 8; /* pointers are 8 bytes */
7500 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7501 }
7502 else
7503 {
7504 md.pointer_size = 4; /* pointers are 4 bytes */
7505 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7506 }
7507
7508 md.mem_offset.hint = 0;
7509 md.path = 0;
7510 md.maxpaths = 0;
7511 md.entry_labels = NULL;
7512 }
7513
7514 /* Set the default options in md. Cannot do this in md_begin because
7515 that is called after md_parse_option which is where we set the
7516 options in md based on command line options. */
7517
7518 void
7519 ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED)
7520 {
7521 md.flags = MD_FLAGS_DEFAULT;
7522 #ifndef TE_VMS
7523 /* Don't turn on dependency checking for VMS, doesn't work. */
7524 md.detect_dv = 1;
7525 #endif
7526 /* FIXME: We should change it to unwind_check_error someday. */
7527 md.unwind_check = unwind_check_warning;
7528 md.hint_b = hint_b_error;
7529 md.tune = itanium2;
7530 }
7531
7532 /* Return a string for the target object file format. */
7533
7534 const char *
7535 ia64_target_format (void)
7536 {
7537 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7538 {
7539 if (md.flags & EF_IA_64_BE)
7540 {
7541 if (md.flags & EF_IA_64_ABI64)
7542 #if defined(TE_AIX50)
7543 return "elf64-ia64-aix-big";
7544 #elif defined(TE_HPUX)
7545 return "elf64-ia64-hpux-big";
7546 #else
7547 return "elf64-ia64-big";
7548 #endif
7549 else
7550 #if defined(TE_AIX50)
7551 return "elf32-ia64-aix-big";
7552 #elif defined(TE_HPUX)
7553 return "elf32-ia64-hpux-big";
7554 #else
7555 return "elf32-ia64-big";
7556 #endif
7557 }
7558 else
7559 {
7560 if (md.flags & EF_IA_64_ABI64)
7561 #if defined (TE_AIX50)
7562 return "elf64-ia64-aix-little";
7563 #elif defined (TE_VMS)
7564 {
7565 md.flags |= EF_IA_64_ARCHVER_1;
7566 return "elf64-ia64-vms";
7567 }
7568 #else
7569 return "elf64-ia64-little";
7570 #endif
7571 else
7572 #ifdef TE_AIX50
7573 return "elf32-ia64-aix-little";
7574 #else
7575 return "elf32-ia64-little";
7576 #endif
7577 }
7578 }
7579 else
7580 return "unknown-format";
7581 }
7582
7583 void
7584 ia64_end_of_source (void)
7585 {
7586 /* terminate insn group upon reaching end of file: */
7587 insn_group_break (1, 0, 0);
7588
7589 /* emits slots we haven't written yet: */
7590 ia64_flush_insns ();
7591
7592 bfd_set_private_flags (stdoutput, md.flags);
7593
7594 md.mem_offset.hint = 0;
7595 }
7596
7597 void
7598 ia64_start_line (void)
7599 {
7600 static int first;
7601
7602 if (!first) {
7603 /* Make sure we don't reference input_line_pointer[-1] when that's
7604 not valid. */
7605 first = 1;
7606 return;
7607 }
7608
7609 if (md.qp.X_op == O_register)
7610 as_bad (_("qualifying predicate not followed by instruction"));
7611 md.qp.X_op = O_absent;
7612
7613 if (ignore_input ())
7614 return;
7615
7616 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7617 {
7618 if (md.detect_dv && !md.explicit_mode)
7619 {
7620 static int warned;
7621
7622 if (!warned)
7623 {
7624 warned = 1;
7625 as_warn (_("Explicit stops are ignored in auto mode"));
7626 }
7627 }
7628 else
7629 insn_group_break (1, 0, 0);
7630 }
7631 else if (input_line_pointer[-1] == '{')
7632 {
7633 if (md.manual_bundling)
7634 as_warn (_("Found '{' when manual bundling is already turned on"));
7635 else
7636 CURR_SLOT.manual_bundling_on = 1;
7637 md.manual_bundling = 1;
7638
7639 /* Bundling is only acceptable in explicit mode
7640 or when in default automatic mode. */
7641 if (md.detect_dv && !md.explicit_mode)
7642 {
7643 if (!md.mode_explicitly_set
7644 && !md.default_explicit_mode)
7645 dot_dv_mode ('E');
7646 else
7647 as_warn (_("Found '{' after explicit switch to automatic mode"));
7648 }
7649 }
7650 else if (input_line_pointer[-1] == '}')
7651 {
7652 if (!md.manual_bundling)
7653 as_warn (_("Found '}' when manual bundling is off"));
7654 else
7655 PREV_SLOT.manual_bundling_off = 1;
7656 md.manual_bundling = 0;
7657
7658 /* switch back to automatic mode, if applicable */
7659 if (md.detect_dv
7660 && md.explicit_mode
7661 && !md.mode_explicitly_set
7662 && !md.default_explicit_mode)
7663 dot_dv_mode ('A');
7664 }
7665 }
7666
7667 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7668 labels. */
7669 static int defining_tag = 0;
7670
7671 int
7672 ia64_unrecognized_line (int ch)
7673 {
7674 switch (ch)
7675 {
7676 case '(':
7677 expression_and_evaluate (&md.qp);
7678 if (*input_line_pointer++ != ')')
7679 {
7680 as_bad (_("Expected ')'"));
7681 return 0;
7682 }
7683 if (md.qp.X_op != O_register)
7684 {
7685 as_bad (_("Qualifying predicate expected"));
7686 return 0;
7687 }
7688 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7689 {
7690 as_bad (_("Predicate register expected"));
7691 return 0;
7692 }
7693 return 1;
7694
7695 case '[':
7696 {
7697 char *s;
7698 char c;
7699 symbolS *tag;
7700 int temp;
7701
7702 if (md.qp.X_op == O_register)
7703 {
7704 as_bad (_("Tag must come before qualifying predicate."));
7705 return 0;
7706 }
7707
7708 /* This implements just enough of read_a_source_file in read.c to
7709 recognize labels. */
7710 if (is_name_beginner (*input_line_pointer))
7711 {
7712 c = get_symbol_name (&s);
7713 }
7714 else if (LOCAL_LABELS_FB
7715 && ISDIGIT (*input_line_pointer))
7716 {
7717 temp = 0;
7718 while (ISDIGIT (*input_line_pointer))
7719 temp = (temp * 10) + *input_line_pointer++ - '0';
7720 fb_label_instance_inc (temp);
7721 s = fb_label_name (temp, 0);
7722 c = *input_line_pointer;
7723 }
7724 else
7725 {
7726 s = NULL;
7727 c = '\0';
7728 }
7729 if (c != ':')
7730 {
7731 /* Put ':' back for error messages' sake. */
7732 *input_line_pointer++ = ':';
7733 as_bad (_("Expected ':'"));
7734 return 0;
7735 }
7736
7737 defining_tag = 1;
7738 tag = colon (s);
7739 defining_tag = 0;
7740 /* Put ':' back for error messages' sake. */
7741 *input_line_pointer++ = ':';
7742 if (*input_line_pointer++ != ']')
7743 {
7744 as_bad (_("Expected ']'"));
7745 return 0;
7746 }
7747 if (! tag)
7748 {
7749 as_bad (_("Tag name expected"));
7750 return 0;
7751 }
7752 return 1;
7753 }
7754
7755 default:
7756 break;
7757 }
7758
7759 /* Not a valid line. */
7760 return 0;
7761 }
7762
7763 void
7764 ia64_frob_label (struct symbol *sym)
7765 {
7766 struct label_fix *fix;
7767
7768 /* Tags need special handling since they are not bundle breaks like
7769 labels. */
7770 if (defining_tag)
7771 {
7772 fix = XOBNEW (&notes, struct label_fix);
7773 fix->sym = sym;
7774 fix->next = CURR_SLOT.tag_fixups;
7775 fix->dw2_mark_labels = FALSE;
7776 CURR_SLOT.tag_fixups = fix;
7777
7778 return;
7779 }
7780
7781 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7782 {
7783 md.last_text_seg = now_seg;
7784 fix = XOBNEW (&notes, struct label_fix);
7785 fix->sym = sym;
7786 fix->next = CURR_SLOT.label_fixups;
7787 fix->dw2_mark_labels = dwarf2_loc_mark_labels;
7788 CURR_SLOT.label_fixups = fix;
7789
7790 /* Keep track of how many code entry points we've seen. */
7791 if (md.path == md.maxpaths)
7792 {
7793 md.maxpaths += 20;
7794 md.entry_labels = XRESIZEVEC (const char *, md.entry_labels,
7795 md.maxpaths);
7796 }
7797 md.entry_labels[md.path++] = S_GET_NAME (sym);
7798 }
7799 }
7800
7801 #ifdef TE_HPUX
7802 /* The HP-UX linker will give unresolved symbol errors for symbols
7803 that are declared but unused. This routine removes declared,
7804 unused symbols from an object. */
7805 int
7806 ia64_frob_symbol (struct symbol *sym)
7807 {
7808 if ((S_GET_SEGMENT (sym) == bfd_und_section_ptr && ! symbol_used_p (sym) &&
7809 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7810 || (S_GET_SEGMENT (sym) == bfd_abs_section_ptr
7811 && ! S_IS_EXTERNAL (sym)))
7812 return 1;
7813 return 0;
7814 }
7815 #endif
7816
7817 void
7818 ia64_flush_pending_output (void)
7819 {
7820 if (!md.keep_pending_output
7821 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7822 {
7823 /* ??? This causes many unnecessary stop bits to be emitted.
7824 Unfortunately, it isn't clear if it is safe to remove this. */
7825 insn_group_break (1, 0, 0);
7826 ia64_flush_insns ();
7827 }
7828 }
7829
7830 /* Do ia64-specific expression optimization. All that's done here is
7831 to transform index expressions that are either due to the indexing
7832 of rotating registers or due to the indexing of indirect register
7833 sets. */
7834 int
7835 ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r)
7836 {
7837 if (op != O_index)
7838 return 0;
7839 resolve_expression (l);
7840 if (l->X_op == O_register)
7841 {
7842 unsigned num_regs = l->X_add_number >> 16;
7843
7844 resolve_expression (r);
7845 if (num_regs)
7846 {
7847 /* Left side is a .rotX-allocated register. */
7848 if (r->X_op != O_constant)
7849 {
7850 as_bad (_("Rotating register index must be a non-negative constant"));
7851 r->X_add_number = 0;
7852 }
7853 else if ((valueT) r->X_add_number >= num_regs)
7854 {
7855 as_bad (_("Index out of range 0..%u"), num_regs - 1);
7856 r->X_add_number = 0;
7857 }
7858 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7859 return 1;
7860 }
7861 else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR)
7862 {
7863 if (r->X_op != O_register
7864 || r->X_add_number < REG_GR
7865 || r->X_add_number > REG_GR + 127)
7866 {
7867 as_bad (_("Indirect register index must be a general register"));
7868 r->X_add_number = REG_GR;
7869 }
7870 l->X_op = O_index;
7871 l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID];
7872 l->X_add_number = r->X_add_number;
7873 return 1;
7874 }
7875 }
7876 as_bad (_("Index can only be applied to rotating or indirect registers"));
7877 /* Fall back to some register use of which has as little as possible
7878 side effects, to minimize subsequent error messages. */
7879 l->X_op = O_register;
7880 l->X_add_number = REG_GR + 3;
7881 return 1;
7882 }
7883
7884 int
7885 ia64_parse_name (char *name, expressionS *e, char *nextcharP)
7886 {
7887 struct const_desc *cdesc;
7888 struct dynreg *dr = 0;
7889 unsigned int idx;
7890 struct symbol *sym;
7891 char *end;
7892
7893 if (*name == '@')
7894 {
7895 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE;
7896
7897 /* Find what relocation pseudo-function we're dealing with. */
7898 for (idx = 0; idx < NELEMS (pseudo_func); ++idx)
7899 if (pseudo_func[idx].name
7900 && pseudo_func[idx].name[0] == name[1]
7901 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0)
7902 {
7903 pseudo_type = pseudo_func[idx].type;
7904 break;
7905 }
7906 switch (pseudo_type)
7907 {
7908 case PSEUDO_FUNC_RELOC:
7909 end = input_line_pointer;
7910 if (*nextcharP != '(')
7911 {
7912 as_bad (_("Expected '('"));
7913 break;
7914 }
7915 /* Skip '('. */
7916 ++input_line_pointer;
7917 expression (e);
7918 if (*input_line_pointer != ')')
7919 {
7920 as_bad (_("Missing ')'"));
7921 goto done;
7922 }
7923 /* Skip ')'. */
7924 ++input_line_pointer;
7925 #ifdef TE_VMS
7926 if (idx == FUNC_SLOTCOUNT_RELOC)
7927 {
7928 /* @slotcount can accept any expression. Canonicalize. */
7929 e->X_add_symbol = make_expr_symbol (e);
7930 e->X_op = O_symbol;
7931 e->X_add_number = 0;
7932 }
7933 #endif
7934 if (e->X_op != O_symbol)
7935 {
7936 if (e->X_op != O_pseudo_fixup)
7937 {
7938 as_bad (_("Not a symbolic expression"));
7939 goto done;
7940 }
7941 if (idx != FUNC_LT_RELATIVE)
7942 {
7943 as_bad (_("Illegal combination of relocation functions"));
7944 goto done;
7945 }
7946 switch (S_GET_VALUE (e->X_op_symbol))
7947 {
7948 case FUNC_FPTR_RELATIVE:
7949 idx = FUNC_LT_FPTR_RELATIVE; break;
7950 case FUNC_DTP_MODULE:
7951 idx = FUNC_LT_DTP_MODULE; break;
7952 case FUNC_DTP_RELATIVE:
7953 idx = FUNC_LT_DTP_RELATIVE; break;
7954 case FUNC_TP_RELATIVE:
7955 idx = FUNC_LT_TP_RELATIVE; break;
7956 default:
7957 as_bad (_("Illegal combination of relocation functions"));
7958 goto done;
7959 }
7960 }
7961 /* Make sure gas doesn't get rid of local symbols that are used
7962 in relocs. */
7963 e->X_op = O_pseudo_fixup;
7964 e->X_op_symbol = pseudo_func[idx].u.sym;
7965 done:
7966 *nextcharP = *input_line_pointer;
7967 break;
7968
7969 case PSEUDO_FUNC_CONST:
7970 e->X_op = O_constant;
7971 e->X_add_number = pseudo_func[idx].u.ival;
7972 break;
7973
7974 case PSEUDO_FUNC_REG:
7975 e->X_op = O_register;
7976 e->X_add_number = pseudo_func[idx].u.ival;
7977 break;
7978
7979 default:
7980 return 0;
7981 }
7982 return 1;
7983 }
7984
7985 /* first see if NAME is a known register name: */
7986 sym = hash_find (md.reg_hash, name);
7987 if (sym)
7988 {
7989 e->X_op = O_register;
7990 e->X_add_number = S_GET_VALUE (sym);
7991 return 1;
7992 }
7993
7994 cdesc = hash_find (md.const_hash, name);
7995 if (cdesc)
7996 {
7997 e->X_op = O_constant;
7998 e->X_add_number = cdesc->value;
7999 return 1;
8000 }
8001
8002 /* check for inN, locN, or outN: */
8003 idx = 0;
8004 switch (name[0])
8005 {
8006 case 'i':
8007 if (name[1] == 'n' && ISDIGIT (name[2]))
8008 {
8009 dr = &md.in;
8010 idx = 2;
8011 }
8012 break;
8013
8014 case 'l':
8015 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
8016 {
8017 dr = &md.loc;
8018 idx = 3;
8019 }
8020 break;
8021
8022 case 'o':
8023 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
8024 {
8025 dr = &md.out;
8026 idx = 3;
8027 }
8028 break;
8029
8030 default:
8031 break;
8032 }
8033
8034 /* Ignore register numbers with leading zeroes, except zero itself. */
8035 if (dr && (name[idx] != '0' || name[idx + 1] == '\0'))
8036 {
8037 unsigned long regnum;
8038
8039 /* The name is inN, locN, or outN; parse the register number. */
8040 regnum = strtoul (name + idx, &end, 10);
8041 if (end > name + idx && *end == '\0' && regnum < 96)
8042 {
8043 if (regnum >= dr->num_regs)
8044 {
8045 if (!dr->num_regs)
8046 as_bad (_("No current frame"));
8047 else
8048 as_bad (_("Register number out of range 0..%u"),
8049 dr->num_regs - 1);
8050 regnum = 0;
8051 }
8052 e->X_op = O_register;
8053 e->X_add_number = dr->base + regnum;
8054 return 1;
8055 }
8056 }
8057
8058 end = xstrdup (name);
8059 name = ia64_canonicalize_symbol_name (end);
8060 if ((dr = hash_find (md.dynreg_hash, name)))
8061 {
8062 /* We've got ourselves the name of a rotating register set.
8063 Store the base register number in the low 16 bits of
8064 X_add_number and the size of the register set in the top 16
8065 bits. */
8066 e->X_op = O_register;
8067 e->X_add_number = dr->base | (dr->num_regs << 16);
8068 free (end);
8069 return 1;
8070 }
8071 free (end);
8072 return 0;
8073 }
8074
8075 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
8076
8077 char *
8078 ia64_canonicalize_symbol_name (char *name)
8079 {
8080 size_t len = strlen (name), full = len;
8081
8082 while (len > 0 && name[len - 1] == '#')
8083 --len;
8084 if (len <= 0)
8085 {
8086 if (full > 0)
8087 as_bad (_("Standalone `#' is illegal"));
8088 }
8089 else if (len < full - 1)
8090 as_warn (_("Redundant `#' suffix operators"));
8091 name[len] = '\0';
8092 return name;
8093 }
8094
8095 /* Return true if idesc is a conditional branch instruction. This excludes
8096 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
8097 because they always read/write resources regardless of the value of the
8098 qualifying predicate. br.ia must always use p0, and hence is always
8099 taken. Thus this function returns true for branches which can fall
8100 through, and which use no resources if they do fall through. */
8101
8102 static int
8103 is_conditional_branch (struct ia64_opcode *idesc)
8104 {
8105 /* br is a conditional branch. Everything that starts with br. except
8106 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
8107 Everything that starts with brl is a conditional branch. */
8108 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
8109 && (idesc->name[2] == '\0'
8110 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
8111 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
8112 || idesc->name[2] == 'l'
8113 /* br.cond, br.call, br.clr */
8114 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
8115 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
8116 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
8117 }
8118
8119 /* Return whether the given opcode is a taken branch. If there's any doubt,
8120 returns zero. */
8121
8122 static int
8123 is_taken_branch (struct ia64_opcode *idesc)
8124 {
8125 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
8126 || strncmp (idesc->name, "br.ia", 5) == 0);
8127 }
8128
8129 /* Return whether the given opcode is an interruption or rfi. If there's any
8130 doubt, returns zero. */
8131
8132 static int
8133 is_interruption_or_rfi (struct ia64_opcode *idesc)
8134 {
8135 if (strcmp (idesc->name, "rfi") == 0)
8136 return 1;
8137 return 0;
8138 }
8139
8140 /* Returns the index of the given dependency in the opcode's list of chks, or
8141 -1 if there is no dependency. */
8142
8143 static int
8144 depends_on (int depind, struct ia64_opcode *idesc)
8145 {
8146 int i;
8147 const struct ia64_opcode_dependency *dep = idesc->dependencies;
8148 for (i = 0; i < dep->nchks; i++)
8149 {
8150 if (depind == DEP (dep->chks[i]))
8151 return i;
8152 }
8153 return -1;
8154 }
8155
8156 /* Determine a set of specific resources used for a particular resource
8157 class. Returns the number of specific resources identified For those
8158 cases which are not determinable statically, the resource returned is
8159 marked nonspecific.
8160
8161 Meanings of value in 'NOTE':
8162 1) only read/write when the register number is explicitly encoded in the
8163 insn.
8164 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
8165 accesses CFM when qualifying predicate is in the rotating region.
8166 3) general register value is used to specify an indirect register; not
8167 determinable statically.
8168 4) only read the given resource when bits 7:0 of the indirect index
8169 register value does not match the register number of the resource; not
8170 determinable statically.
8171 5) all rules are implementation specific.
8172 6) only when both the index specified by the reader and the index specified
8173 by the writer have the same value in bits 63:61; not determinable
8174 statically.
8175 7) only access the specified resource when the corresponding mask bit is
8176 set
8177 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
8178 only read when these insns reference FR2-31
8179 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
8180 written when these insns write FR32-127
8181 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
8182 instruction
8183 11) The target predicates are written independently of PR[qp], but source
8184 registers are only read if PR[qp] is true. Since the state of PR[qp]
8185 cannot statically be determined, all source registers are marked used.
8186 12) This insn only reads the specified predicate register when that
8187 register is the PR[qp].
8188 13) This reference to ld-c only applies to the GR whose value is loaded
8189 with data returned from memory, not the post-incremented address register.
8190 14) The RSE resource includes the implementation-specific RSE internal
8191 state resources. At least one (and possibly more) of these resources are
8192 read by each instruction listed in IC:rse-readers. At least one (and
8193 possibly more) of these resources are written by each insn listed in
8194 IC:rse-writers.
8195 15+16) Represents reserved instructions, which the assembler does not
8196 generate.
8197 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and
8198 mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up.
8199
8200 Memory resources (i.e. locations in memory) are *not* marked or tracked by
8201 this code; there are no dependency violations based on memory access.
8202 */
8203
8204 #define MAX_SPECS 256
8205 #define DV_CHK 1
8206 #define DV_REG 0
8207
8208 static int
8209 specify_resource (const struct ia64_dependency *dep,
8210 struct ia64_opcode *idesc,
8211 /* is this a DV chk or a DV reg? */
8212 int type,
8213 /* returned specific resources */
8214 struct rsrc specs[MAX_SPECS],
8215 /* resource note for this insn's usage */
8216 int note,
8217 /* which execution path to examine */
8218 int path)
8219 {
8220 int count = 0;
8221 int i;
8222 int rsrc_write = 0;
8223 struct rsrc tmpl;
8224
8225 if (dep->mode == IA64_DV_WAW
8226 || (dep->mode == IA64_DV_RAW && type == DV_REG)
8227 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
8228 rsrc_write = 1;
8229
8230 /* template for any resources we identify */
8231 tmpl.dependency = dep;
8232 tmpl.note = note;
8233 tmpl.insn_srlz = tmpl.data_srlz = 0;
8234 tmpl.qp_regno = CURR_SLOT.qp_regno;
8235 tmpl.link_to_qp_branch = 1;
8236 tmpl.mem_offset.hint = 0;
8237 tmpl.mem_offset.offset = 0;
8238 tmpl.mem_offset.base = 0;
8239 tmpl.specific = 1;
8240 tmpl.index = -1;
8241 tmpl.cmp_type = CMP_NONE;
8242 tmpl.depind = 0;
8243 tmpl.file = NULL;
8244 tmpl.line = 0;
8245 tmpl.path = 0;
8246
8247 #define UNHANDLED \
8248 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
8249 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
8250 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
8251
8252 /* we don't need to track these */
8253 if (dep->semantics == IA64_DVS_NONE)
8254 return 0;
8255
8256 switch (dep->specifier)
8257 {
8258 case IA64_RS_AR_K:
8259 if (note == 1)
8260 {
8261 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8262 {
8263 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8264 if (regno >= 0 && regno <= 7)
8265 {
8266 specs[count] = tmpl;
8267 specs[count++].index = regno;
8268 }
8269 }
8270 }
8271 else if (note == 0)
8272 {
8273 for (i = 0; i < 8; i++)
8274 {
8275 specs[count] = tmpl;
8276 specs[count++].index = i;
8277 }
8278 }
8279 else
8280 {
8281 UNHANDLED;
8282 }
8283 break;
8284
8285 case IA64_RS_AR_UNAT:
8286 /* This is a mov =AR or mov AR= instruction. */
8287 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8288 {
8289 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8290 if (regno == AR_UNAT)
8291 {
8292 specs[count++] = tmpl;
8293 }
8294 }
8295 else
8296 {
8297 /* This is a spill/fill, or other instruction that modifies the
8298 unat register. */
8299
8300 /* Unless we can determine the specific bits used, mark the whole
8301 thing; bits 8:3 of the memory address indicate the bit used in
8302 UNAT. The .mem.offset hint may be used to eliminate a small
8303 subset of conflicts. */
8304 specs[count] = tmpl;
8305 if (md.mem_offset.hint)
8306 {
8307 if (md.debug_dv)
8308 fprintf (stderr, " Using hint for spill/fill\n");
8309 /* The index isn't actually used, just set it to something
8310 approximating the bit index. */
8311 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
8312 specs[count].mem_offset.hint = 1;
8313 specs[count].mem_offset.offset = md.mem_offset.offset;
8314 specs[count++].mem_offset.base = md.mem_offset.base;
8315 }
8316 else
8317 {
8318 specs[count++].specific = 0;
8319 }
8320 }
8321 break;
8322
8323 case IA64_RS_AR:
8324 if (note == 1)
8325 {
8326 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8327 {
8328 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8329 if ((regno >= 8 && regno <= 15)
8330 || (regno >= 20 && regno <= 23)
8331 || (regno >= 31 && regno <= 39)
8332 || (regno >= 41 && regno <= 47)
8333 || (regno >= 67 && regno <= 111))
8334 {
8335 specs[count] = tmpl;
8336 specs[count++].index = regno;
8337 }
8338 }
8339 }
8340 else
8341 {
8342 UNHANDLED;
8343 }
8344 break;
8345
8346 case IA64_RS_ARb:
8347 if (note == 1)
8348 {
8349 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8350 {
8351 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8352 if ((regno >= 48 && regno <= 63)
8353 || (regno >= 112 && regno <= 127))
8354 {
8355 specs[count] = tmpl;
8356 specs[count++].index = regno;
8357 }
8358 }
8359 }
8360 else if (note == 0)
8361 {
8362 for (i = 48; i < 64; i++)
8363 {
8364 specs[count] = tmpl;
8365 specs[count++].index = i;
8366 }
8367 for (i = 112; i < 128; i++)
8368 {
8369 specs[count] = tmpl;
8370 specs[count++].index = i;
8371 }
8372 }
8373 else
8374 {
8375 UNHANDLED;
8376 }
8377 break;
8378
8379 case IA64_RS_BR:
8380 if (note != 1)
8381 {
8382 UNHANDLED;
8383 }
8384 else
8385 {
8386 if (rsrc_write)
8387 {
8388 for (i = 0; i < idesc->num_outputs; i++)
8389 if (idesc->operands[i] == IA64_OPND_B1
8390 || idesc->operands[i] == IA64_OPND_B2)
8391 {
8392 specs[count] = tmpl;
8393 specs[count++].index =
8394 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8395 }
8396 }
8397 else
8398 {
8399 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8400 if (idesc->operands[i] == IA64_OPND_B1
8401 || idesc->operands[i] == IA64_OPND_B2)
8402 {
8403 specs[count] = tmpl;
8404 specs[count++].index =
8405 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8406 }
8407 }
8408 }
8409 break;
8410
8411 case IA64_RS_CPUID: /* four or more registers */
8412 if (note == 3)
8413 {
8414 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
8415 {
8416 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8417 if (regno >= 0 && regno < NELEMS (gr_values)
8418 && KNOWN (regno))
8419 {
8420 specs[count] = tmpl;
8421 specs[count++].index = gr_values[regno].value & 0xFF;
8422 }
8423 else
8424 {
8425 specs[count] = tmpl;
8426 specs[count++].specific = 0;
8427 }
8428 }
8429 }
8430 else
8431 {
8432 UNHANDLED;
8433 }
8434 break;
8435
8436 case IA64_RS_DBR: /* four or more registers */
8437 if (note == 3)
8438 {
8439 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
8440 {
8441 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8442 if (regno >= 0 && regno < NELEMS (gr_values)
8443 && KNOWN (regno))
8444 {
8445 specs[count] = tmpl;
8446 specs[count++].index = gr_values[regno].value & 0xFF;
8447 }
8448 else
8449 {
8450 specs[count] = tmpl;
8451 specs[count++].specific = 0;
8452 }
8453 }
8454 }
8455 else if (note == 0 && !rsrc_write)
8456 {
8457 specs[count] = tmpl;
8458 specs[count++].specific = 0;
8459 }
8460 else
8461 {
8462 UNHANDLED;
8463 }
8464 break;
8465
8466 case IA64_RS_IBR: /* four or more registers */
8467 if (note == 3)
8468 {
8469 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
8470 {
8471 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8472 if (regno >= 0 && regno < NELEMS (gr_values)
8473 && KNOWN (regno))
8474 {
8475 specs[count] = tmpl;
8476 specs[count++].index = gr_values[regno].value & 0xFF;
8477 }
8478 else
8479 {
8480 specs[count] = tmpl;
8481 specs[count++].specific = 0;
8482 }
8483 }
8484 }
8485 else
8486 {
8487 UNHANDLED;
8488 }
8489 break;
8490
8491 case IA64_RS_MSR:
8492 if (note == 5)
8493 {
8494 /* These are implementation specific. Force all references to
8495 conflict with all other references. */
8496 specs[count] = tmpl;
8497 specs[count++].specific = 0;
8498 }
8499 else
8500 {
8501 UNHANDLED;
8502 }
8503 break;
8504
8505 case IA64_RS_PKR: /* 16 or more registers */
8506 if (note == 3 || note == 4)
8507 {
8508 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
8509 {
8510 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8511 if (regno >= 0 && regno < NELEMS (gr_values)
8512 && KNOWN (regno))
8513 {
8514 if (note == 3)
8515 {
8516 specs[count] = tmpl;
8517 specs[count++].index = gr_values[regno].value & 0xFF;
8518 }
8519 else
8520 for (i = 0; i < NELEMS (gr_values); i++)
8521 {
8522 /* Uses all registers *except* the one in R3. */
8523 if ((unsigned)i != (gr_values[regno].value & 0xFF))
8524 {
8525 specs[count] = tmpl;
8526 specs[count++].index = i;
8527 }
8528 }
8529 }
8530 else
8531 {
8532 specs[count] = tmpl;
8533 specs[count++].specific = 0;
8534 }
8535 }
8536 }
8537 else if (note == 0)
8538 {
8539 /* probe et al. */
8540 specs[count] = tmpl;
8541 specs[count++].specific = 0;
8542 }
8543 break;
8544
8545 case IA64_RS_PMC: /* four or more registers */
8546 if (note == 3)
8547 {
8548 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
8549 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
8550
8551 {
8552 int reg_index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
8553 ? 1 : !rsrc_write);
8554 int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_GR;
8555 if (regno >= 0 && regno < NELEMS (gr_values)
8556 && KNOWN (regno))
8557 {
8558 specs[count] = tmpl;
8559 specs[count++].index = gr_values[regno].value & 0xFF;
8560 }
8561 else
8562 {
8563 specs[count] = tmpl;
8564 specs[count++].specific = 0;
8565 }
8566 }
8567 }
8568 else
8569 {
8570 UNHANDLED;
8571 }
8572 break;
8573
8574 case IA64_RS_PMD: /* four or more registers */
8575 if (note == 3)
8576 {
8577 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
8578 {
8579 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8580 if (regno >= 0 && regno < NELEMS (gr_values)
8581 && KNOWN (regno))
8582 {
8583 specs[count] = tmpl;
8584 specs[count++].index = gr_values[regno].value & 0xFF;
8585 }
8586 else
8587 {
8588 specs[count] = tmpl;
8589 specs[count++].specific = 0;
8590 }
8591 }
8592 }
8593 else
8594 {
8595 UNHANDLED;
8596 }
8597 break;
8598
8599 case IA64_RS_RR: /* eight registers */
8600 if (note == 6)
8601 {
8602 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
8603 {
8604 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8605 if (regno >= 0 && regno < NELEMS (gr_values)
8606 && KNOWN (regno))
8607 {
8608 specs[count] = tmpl;
8609 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
8610 }
8611 else
8612 {
8613 specs[count] = tmpl;
8614 specs[count++].specific = 0;
8615 }
8616 }
8617 }
8618 else if (note == 0 && !rsrc_write)
8619 {
8620 specs[count] = tmpl;
8621 specs[count++].specific = 0;
8622 }
8623 else
8624 {
8625 UNHANDLED;
8626 }
8627 break;
8628
8629 case IA64_RS_CR_IRR:
8630 if (note == 0)
8631 {
8632 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8633 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8634 if (rsrc_write
8635 && idesc->operands[1] == IA64_OPND_CR3
8636 && regno == CR_IVR)
8637 {
8638 for (i = 0; i < 4; i++)
8639 {
8640 specs[count] = tmpl;
8641 specs[count++].index = CR_IRR0 + i;
8642 }
8643 }
8644 }
8645 else if (note == 1)
8646 {
8647 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8648 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8649 && regno >= CR_IRR0
8650 && regno <= CR_IRR3)
8651 {
8652 specs[count] = tmpl;
8653 specs[count++].index = regno;
8654 }
8655 }
8656 else
8657 {
8658 UNHANDLED;
8659 }
8660 break;
8661
8662 case IA64_RS_CR_IIB:
8663 if (note != 0)
8664 {
8665 UNHANDLED;
8666 }
8667 else
8668 {
8669 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8670 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8671 && (regno == CR_IIB0 || regno == CR_IIB1))
8672 {
8673 specs[count] = tmpl;
8674 specs[count++].index = regno;
8675 }
8676 }
8677 break;
8678
8679 case IA64_RS_CR_LRR:
8680 if (note != 1)
8681 {
8682 UNHANDLED;
8683 }
8684 else
8685 {
8686 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8687 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8688 && (regno == CR_LRR0 || regno == CR_LRR1))
8689 {
8690 specs[count] = tmpl;
8691 specs[count++].index = regno;
8692 }
8693 }
8694 break;
8695
8696 case IA64_RS_CR:
8697 if (note == 1)
8698 {
8699 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8700 {
8701 specs[count] = tmpl;
8702 specs[count++].index =
8703 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8704 }
8705 }
8706 else
8707 {
8708 UNHANDLED;
8709 }
8710 break;
8711
8712 case IA64_RS_DAHR:
8713 if (note == 0)
8714 {
8715 if (idesc->operands[!rsrc_write] == IA64_OPND_DAHR3)
8716 {
8717 specs[count] = tmpl;
8718 specs[count++].index =
8719 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_DAHR;
8720 }
8721 }
8722 else
8723 {
8724 UNHANDLED;
8725 }
8726 break;
8727
8728 case IA64_RS_FR:
8729 case IA64_RS_FRb:
8730 if (note != 1)
8731 {
8732 UNHANDLED;
8733 }
8734 else if (rsrc_write)
8735 {
8736 if (dep->specifier == IA64_RS_FRb
8737 && idesc->operands[0] == IA64_OPND_F1)
8738 {
8739 specs[count] = tmpl;
8740 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8741 }
8742 }
8743 else
8744 {
8745 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8746 {
8747 if (idesc->operands[i] == IA64_OPND_F2
8748 || idesc->operands[i] == IA64_OPND_F3
8749 || idesc->operands[i] == IA64_OPND_F4)
8750 {
8751 specs[count] = tmpl;
8752 specs[count++].index =
8753 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8754 }
8755 }
8756 }
8757 break;
8758
8759 case IA64_RS_GR:
8760 if (note == 13)
8761 {
8762 /* This reference applies only to the GR whose value is loaded with
8763 data returned from memory. */
8764 specs[count] = tmpl;
8765 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8766 }
8767 else if (note == 1)
8768 {
8769 if (rsrc_write)
8770 {
8771 for (i = 0; i < idesc->num_outputs; i++)
8772 if (idesc->operands[i] == IA64_OPND_R1
8773 || idesc->operands[i] == IA64_OPND_R2
8774 || idesc->operands[i] == IA64_OPND_R3)
8775 {
8776 specs[count] = tmpl;
8777 specs[count++].index =
8778 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8779 }
8780 if (idesc->flags & IA64_OPCODE_POSTINC)
8781 for (i = 0; i < NELEMS (idesc->operands); i++)
8782 if (idesc->operands[i] == IA64_OPND_MR3)
8783 {
8784 specs[count] = tmpl;
8785 specs[count++].index =
8786 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8787 }
8788 }
8789 else
8790 {
8791 /* Look for anything that reads a GR. */
8792 for (i = 0; i < NELEMS (idesc->operands); i++)
8793 {
8794 if (idesc->operands[i] == IA64_OPND_MR3
8795 || idesc->operands[i] == IA64_OPND_CPUID_R3
8796 || idesc->operands[i] == IA64_OPND_DBR_R3
8797 || idesc->operands[i] == IA64_OPND_IBR_R3
8798 || idesc->operands[i] == IA64_OPND_MSR_R3
8799 || idesc->operands[i] == IA64_OPND_PKR_R3
8800 || idesc->operands[i] == IA64_OPND_PMC_R3
8801 || idesc->operands[i] == IA64_OPND_PMD_R3
8802 || idesc->operands[i] == IA64_OPND_DAHR_R3
8803 || idesc->operands[i] == IA64_OPND_RR_R3
8804 || ((i >= idesc->num_outputs)
8805 && (idesc->operands[i] == IA64_OPND_R1
8806 || idesc->operands[i] == IA64_OPND_R2
8807 || idesc->operands[i] == IA64_OPND_R3
8808 /* addl source register. */
8809 || idesc->operands[i] == IA64_OPND_R3_2)))
8810 {
8811 specs[count] = tmpl;
8812 specs[count++].index =
8813 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8814 }
8815 }
8816 }
8817 }
8818 else
8819 {
8820 UNHANDLED;
8821 }
8822 break;
8823
8824 /* This is the same as IA64_RS_PRr, except that the register range is
8825 from 1 - 15, and there are no rotating register reads/writes here. */
8826 case IA64_RS_PR:
8827 if (note == 0)
8828 {
8829 for (i = 1; i < 16; i++)
8830 {
8831 specs[count] = tmpl;
8832 specs[count++].index = i;
8833 }
8834 }
8835 else if (note == 7)
8836 {
8837 valueT mask = 0;
8838 /* Mark only those registers indicated by the mask. */
8839 if (rsrc_write)
8840 {
8841 mask = CURR_SLOT.opnd[2].X_add_number;
8842 for (i = 1; i < 16; i++)
8843 if (mask & ((valueT) 1 << i))
8844 {
8845 specs[count] = tmpl;
8846 specs[count++].index = i;
8847 }
8848 }
8849 else
8850 {
8851 UNHANDLED;
8852 }
8853 }
8854 else if (note == 11) /* note 11 implies note 1 as well */
8855 {
8856 if (rsrc_write)
8857 {
8858 for (i = 0; i < idesc->num_outputs; i++)
8859 {
8860 if (idesc->operands[i] == IA64_OPND_P1
8861 || idesc->operands[i] == IA64_OPND_P2)
8862 {
8863 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8864 if (regno >= 1 && regno < 16)
8865 {
8866 specs[count] = tmpl;
8867 specs[count++].index = regno;
8868 }
8869 }
8870 }
8871 }
8872 else
8873 {
8874 UNHANDLED;
8875 }
8876 }
8877 else if (note == 12)
8878 {
8879 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8880 {
8881 specs[count] = tmpl;
8882 specs[count++].index = CURR_SLOT.qp_regno;
8883 }
8884 }
8885 else if (note == 1)
8886 {
8887 if (rsrc_write)
8888 {
8889 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8890 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8891 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8892 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8893
8894 if ((idesc->operands[0] == IA64_OPND_P1
8895 || idesc->operands[0] == IA64_OPND_P2)
8896 && p1 >= 1 && p1 < 16)
8897 {
8898 specs[count] = tmpl;
8899 specs[count].cmp_type =
8900 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8901 specs[count++].index = p1;
8902 }
8903 if ((idesc->operands[1] == IA64_OPND_P1
8904 || idesc->operands[1] == IA64_OPND_P2)
8905 && p2 >= 1 && p2 < 16)
8906 {
8907 specs[count] = tmpl;
8908 specs[count].cmp_type =
8909 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8910 specs[count++].index = p2;
8911 }
8912 }
8913 else
8914 {
8915 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8916 {
8917 specs[count] = tmpl;
8918 specs[count++].index = CURR_SLOT.qp_regno;
8919 }
8920 if (idesc->operands[1] == IA64_OPND_PR)
8921 {
8922 for (i = 1; i < 16; i++)
8923 {
8924 specs[count] = tmpl;
8925 specs[count++].index = i;
8926 }
8927 }
8928 }
8929 }
8930 else
8931 {
8932 UNHANDLED;
8933 }
8934 break;
8935
8936 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8937 simplified cases of this. */
8938 case IA64_RS_PRr:
8939 if (note == 0)
8940 {
8941 for (i = 16; i < 63; i++)
8942 {
8943 specs[count] = tmpl;
8944 specs[count++].index = i;
8945 }
8946 }
8947 else if (note == 7)
8948 {
8949 valueT mask = 0;
8950 /* Mark only those registers indicated by the mask. */
8951 if (rsrc_write
8952 && idesc->operands[0] == IA64_OPND_PR)
8953 {
8954 mask = CURR_SLOT.opnd[2].X_add_number;
8955 if (mask & ((valueT) 1 << 16))
8956 for (i = 16; i < 63; i++)
8957 {
8958 specs[count] = tmpl;
8959 specs[count++].index = i;
8960 }
8961 }
8962 else if (rsrc_write
8963 && idesc->operands[0] == IA64_OPND_PR_ROT)
8964 {
8965 for (i = 16; i < 63; i++)
8966 {
8967 specs[count] = tmpl;
8968 specs[count++].index = i;
8969 }
8970 }
8971 else
8972 {
8973 UNHANDLED;
8974 }
8975 }
8976 else if (note == 11) /* note 11 implies note 1 as well */
8977 {
8978 if (rsrc_write)
8979 {
8980 for (i = 0; i < idesc->num_outputs; i++)
8981 {
8982 if (idesc->operands[i] == IA64_OPND_P1
8983 || idesc->operands[i] == IA64_OPND_P2)
8984 {
8985 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8986 if (regno >= 16 && regno < 63)
8987 {
8988 specs[count] = tmpl;
8989 specs[count++].index = regno;
8990 }
8991 }
8992 }
8993 }
8994 else
8995 {
8996 UNHANDLED;
8997 }
8998 }
8999 else if (note == 12)
9000 {
9001 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9002 {
9003 specs[count] = tmpl;
9004 specs[count++].index = CURR_SLOT.qp_regno;
9005 }
9006 }
9007 else if (note == 1)
9008 {
9009 if (rsrc_write)
9010 {
9011 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9012 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9013 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9014 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9015
9016 if ((idesc->operands[0] == IA64_OPND_P1
9017 || idesc->operands[0] == IA64_OPND_P2)
9018 && p1 >= 16 && p1 < 63)
9019 {
9020 specs[count] = tmpl;
9021 specs[count].cmp_type =
9022 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9023 specs[count++].index = p1;
9024 }
9025 if ((idesc->operands[1] == IA64_OPND_P1
9026 || idesc->operands[1] == IA64_OPND_P2)
9027 && p2 >= 16 && p2 < 63)
9028 {
9029 specs[count] = tmpl;
9030 specs[count].cmp_type =
9031 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9032 specs[count++].index = p2;
9033 }
9034 }
9035 else
9036 {
9037 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9038 {
9039 specs[count] = tmpl;
9040 specs[count++].index = CURR_SLOT.qp_regno;
9041 }
9042 if (idesc->operands[1] == IA64_OPND_PR)
9043 {
9044 for (i = 16; i < 63; i++)
9045 {
9046 specs[count] = tmpl;
9047 specs[count++].index = i;
9048 }
9049 }
9050 }
9051 }
9052 else
9053 {
9054 UNHANDLED;
9055 }
9056 break;
9057
9058 case IA64_RS_PSR:
9059 /* Verify that the instruction is using the PSR bit indicated in
9060 dep->regindex. */
9061 if (note == 0)
9062 {
9063 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
9064 {
9065 if (dep->regindex < 6)
9066 {
9067 specs[count++] = tmpl;
9068 }
9069 }
9070 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
9071 {
9072 if (dep->regindex < 32
9073 || dep->regindex == 35
9074 || dep->regindex == 36
9075 || (!rsrc_write && dep->regindex == PSR_CPL))
9076 {
9077 specs[count++] = tmpl;
9078 }
9079 }
9080 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
9081 {
9082 if (dep->regindex < 32
9083 || dep->regindex == 35
9084 || dep->regindex == 36
9085 || (rsrc_write && dep->regindex == PSR_CPL))
9086 {
9087 specs[count++] = tmpl;
9088 }
9089 }
9090 else
9091 {
9092 /* Several PSR bits have very specific dependencies. */
9093 switch (dep->regindex)
9094 {
9095 default:
9096 specs[count++] = tmpl;
9097 break;
9098 case PSR_IC:
9099 if (rsrc_write)
9100 {
9101 specs[count++] = tmpl;
9102 }
9103 else
9104 {
9105 /* Only certain CR accesses use PSR.ic */
9106 if (idesc->operands[0] == IA64_OPND_CR3
9107 || idesc->operands[1] == IA64_OPND_CR3)
9108 {
9109 int reg_index =
9110 ((idesc->operands[0] == IA64_OPND_CR3)
9111 ? 0 : 1);
9112 int regno =
9113 CURR_SLOT.opnd[reg_index].X_add_number - REG_CR;
9114
9115 switch (regno)
9116 {
9117 default:
9118 break;
9119 case CR_ITIR:
9120 case CR_IFS:
9121 case CR_IIM:
9122 case CR_IIP:
9123 case CR_IPSR:
9124 case CR_ISR:
9125 case CR_IFA:
9126 case CR_IHA:
9127 case CR_IIB0:
9128 case CR_IIB1:
9129 case CR_IIPA:
9130 specs[count++] = tmpl;
9131 break;
9132 }
9133 }
9134 }
9135 break;
9136 case PSR_CPL:
9137 if (rsrc_write)
9138 {
9139 specs[count++] = tmpl;
9140 }
9141 else
9142 {
9143 /* Only some AR accesses use cpl */
9144 if (idesc->operands[0] == IA64_OPND_AR3
9145 || idesc->operands[1] == IA64_OPND_AR3)
9146 {
9147 int reg_index =
9148 ((idesc->operands[0] == IA64_OPND_AR3)
9149 ? 0 : 1);
9150 int regno =
9151 CURR_SLOT.opnd[reg_index].X_add_number - REG_AR;
9152
9153 if (regno == AR_ITC
9154 || regno == AR_RUC
9155 || (reg_index == 0
9156 && (regno == AR_RSC
9157 || (regno >= AR_K0
9158 && regno <= AR_K7))))
9159 {
9160 specs[count++] = tmpl;
9161 }
9162 }
9163 else
9164 {
9165 specs[count++] = tmpl;
9166 }
9167 break;
9168 }
9169 }
9170 }
9171 }
9172 else if (note == 7)
9173 {
9174 valueT mask = 0;
9175 if (idesc->operands[0] == IA64_OPND_IMMU24)
9176 {
9177 mask = CURR_SLOT.opnd[0].X_add_number;
9178 }
9179 else
9180 {
9181 UNHANDLED;
9182 }
9183 if (mask & ((valueT) 1 << dep->regindex))
9184 {
9185 specs[count++] = tmpl;
9186 }
9187 }
9188 else if (note == 8)
9189 {
9190 int min = dep->regindex == PSR_DFL ? 2 : 32;
9191 int max = dep->regindex == PSR_DFL ? 31 : 127;
9192 /* dfh is read on FR32-127; dfl is read on FR2-31 */
9193 for (i = 0; i < NELEMS (idesc->operands); i++)
9194 {
9195 if (idesc->operands[i] == IA64_OPND_F1
9196 || idesc->operands[i] == IA64_OPND_F2
9197 || idesc->operands[i] == IA64_OPND_F3
9198 || idesc->operands[i] == IA64_OPND_F4)
9199 {
9200 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9201 if (reg >= min && reg <= max)
9202 {
9203 specs[count++] = tmpl;
9204 }
9205 }
9206 }
9207 }
9208 else if (note == 9)
9209 {
9210 int min = dep->regindex == PSR_MFL ? 2 : 32;
9211 int max = dep->regindex == PSR_MFL ? 31 : 127;
9212 /* mfh is read on writes to FR32-127; mfl is read on writes to
9213 FR2-31 */
9214 for (i = 0; i < idesc->num_outputs; i++)
9215 {
9216 if (idesc->operands[i] == IA64_OPND_F1)
9217 {
9218 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9219 if (reg >= min && reg <= max)
9220 {
9221 specs[count++] = tmpl;
9222 }
9223 }
9224 }
9225 }
9226 else if (note == 10)
9227 {
9228 for (i = 0; i < NELEMS (idesc->operands); i++)
9229 {
9230 if (idesc->operands[i] == IA64_OPND_R1
9231 || idesc->operands[i] == IA64_OPND_R2
9232 || idesc->operands[i] == IA64_OPND_R3)
9233 {
9234 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9235 if (regno >= 16 && regno <= 31)
9236 {
9237 specs[count++] = tmpl;
9238 }
9239 }
9240 }
9241 }
9242 else
9243 {
9244 UNHANDLED;
9245 }
9246 break;
9247
9248 case IA64_RS_AR_FPSR:
9249 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
9250 {
9251 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9252 if (regno == AR_FPSR)
9253 {
9254 specs[count++] = tmpl;
9255 }
9256 }
9257 else
9258 {
9259 specs[count++] = tmpl;
9260 }
9261 break;
9262
9263 case IA64_RS_ARX:
9264 /* Handle all AR[REG] resources */
9265 if (note == 0 || note == 1)
9266 {
9267 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9268 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
9269 && regno == dep->regindex)
9270 {
9271 specs[count++] = tmpl;
9272 }
9273 /* other AR[REG] resources may be affected by AR accesses */
9274 else if (idesc->operands[0] == IA64_OPND_AR3)
9275 {
9276 /* AR[] writes */
9277 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
9278 switch (dep->regindex)
9279 {
9280 default:
9281 break;
9282 case AR_BSP:
9283 case AR_RNAT:
9284 if (regno == AR_BSPSTORE)
9285 {
9286 specs[count++] = tmpl;
9287 }
9288 /* Fall through. */
9289 case AR_RSC:
9290 if (!rsrc_write &&
9291 (regno == AR_BSPSTORE
9292 || regno == AR_RNAT))
9293 {
9294 specs[count++] = tmpl;
9295 }
9296 break;
9297 }
9298 }
9299 else if (idesc->operands[1] == IA64_OPND_AR3)
9300 {
9301 /* AR[] reads */
9302 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
9303 switch (dep->regindex)
9304 {
9305 default:
9306 break;
9307 case AR_RSC:
9308 if (regno == AR_BSPSTORE || regno == AR_RNAT)
9309 {
9310 specs[count++] = tmpl;
9311 }
9312 break;
9313 }
9314 }
9315 else
9316 {
9317 specs[count++] = tmpl;
9318 }
9319 }
9320 else
9321 {
9322 UNHANDLED;
9323 }
9324 break;
9325
9326 case IA64_RS_CRX:
9327 /* Handle all CR[REG] resources.
9328 ??? FIXME: The rule 17 isn't really handled correctly. */
9329 if (note == 0 || note == 1 || note == 17)
9330 {
9331 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
9332 {
9333 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
9334 if (regno == dep->regindex)
9335 {
9336 specs[count++] = tmpl;
9337 }
9338 else if (!rsrc_write)
9339 {
9340 /* Reads from CR[IVR] affect other resources. */
9341 if (regno == CR_IVR)
9342 {
9343 if ((dep->regindex >= CR_IRR0
9344 && dep->regindex <= CR_IRR3)
9345 || dep->regindex == CR_TPR)
9346 {
9347 specs[count++] = tmpl;
9348 }
9349 }
9350 }
9351 }
9352 else
9353 {
9354 specs[count++] = tmpl;
9355 }
9356 }
9357 else
9358 {
9359 UNHANDLED;
9360 }
9361 break;
9362
9363 case IA64_RS_INSERVICE:
9364 /* look for write of EOI (67) or read of IVR (65) */
9365 if ((idesc->operands[0] == IA64_OPND_CR3
9366 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
9367 || (idesc->operands[1] == IA64_OPND_CR3
9368 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
9369 {
9370 specs[count++] = tmpl;
9371 }
9372 break;
9373
9374 case IA64_RS_GR0:
9375 if (note == 1)
9376 {
9377 specs[count++] = tmpl;
9378 }
9379 else
9380 {
9381 UNHANDLED;
9382 }
9383 break;
9384
9385 case IA64_RS_CFM:
9386 if (note != 2)
9387 {
9388 specs[count++] = tmpl;
9389 }
9390 else
9391 {
9392 /* Check if any of the registers accessed are in the rotating region.
9393 mov to/from pr accesses CFM only when qp_regno is in the rotating
9394 region */
9395 for (i = 0; i < NELEMS (idesc->operands); i++)
9396 {
9397 if (idesc->operands[i] == IA64_OPND_R1
9398 || idesc->operands[i] == IA64_OPND_R2
9399 || idesc->operands[i] == IA64_OPND_R3)
9400 {
9401 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9402 /* Assumes that md.rot.num_regs is always valid */
9403 if (md.rot.num_regs > 0
9404 && num > 31
9405 && num < 31 + md.rot.num_regs)
9406 {
9407 specs[count] = tmpl;
9408 specs[count++].specific = 0;
9409 }
9410 }
9411 else if (idesc->operands[i] == IA64_OPND_F1
9412 || idesc->operands[i] == IA64_OPND_F2
9413 || idesc->operands[i] == IA64_OPND_F3
9414 || idesc->operands[i] == IA64_OPND_F4)
9415 {
9416 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9417 if (num > 31)
9418 {
9419 specs[count] = tmpl;
9420 specs[count++].specific = 0;
9421 }
9422 }
9423 else if (idesc->operands[i] == IA64_OPND_P1
9424 || idesc->operands[i] == IA64_OPND_P2)
9425 {
9426 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
9427 if (num > 15)
9428 {
9429 specs[count] = tmpl;
9430 specs[count++].specific = 0;
9431 }
9432 }
9433 }
9434 if (CURR_SLOT.qp_regno > 15)
9435 {
9436 specs[count] = tmpl;
9437 specs[count++].specific = 0;
9438 }
9439 }
9440 break;
9441
9442 /* This is the same as IA64_RS_PRr, except simplified to account for
9443 the fact that there is only one register. */
9444 case IA64_RS_PR63:
9445 if (note == 0)
9446 {
9447 specs[count++] = tmpl;
9448 }
9449 else if (note == 7)
9450 {
9451 valueT mask = 0;
9452 if (idesc->operands[2] == IA64_OPND_IMM17)
9453 mask = CURR_SLOT.opnd[2].X_add_number;
9454 if (mask & ((valueT) 1 << 63))
9455 specs[count++] = tmpl;
9456 }
9457 else if (note == 11)
9458 {
9459 if ((idesc->operands[0] == IA64_OPND_P1
9460 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
9461 || (idesc->operands[1] == IA64_OPND_P2
9462 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
9463 {
9464 specs[count++] = tmpl;
9465 }
9466 }
9467 else if (note == 12)
9468 {
9469 if (CURR_SLOT.qp_regno == 63)
9470 {
9471 specs[count++] = tmpl;
9472 }
9473 }
9474 else if (note == 1)
9475 {
9476 if (rsrc_write)
9477 {
9478 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9479 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9480 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9481 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9482
9483 if (p1 == 63
9484 && (idesc->operands[0] == IA64_OPND_P1
9485 || idesc->operands[0] == IA64_OPND_P2))
9486 {
9487 specs[count] = tmpl;
9488 specs[count++].cmp_type =
9489 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9490 }
9491 if (p2 == 63
9492 && (idesc->operands[1] == IA64_OPND_P1
9493 || idesc->operands[1] == IA64_OPND_P2))
9494 {
9495 specs[count] = tmpl;
9496 specs[count++].cmp_type =
9497 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9498 }
9499 }
9500 else
9501 {
9502 if (CURR_SLOT.qp_regno == 63)
9503 {
9504 specs[count++] = tmpl;
9505 }
9506 }
9507 }
9508 else
9509 {
9510 UNHANDLED;
9511 }
9512 break;
9513
9514 case IA64_RS_RSE:
9515 /* FIXME we can identify some individual RSE written resources, but RSE
9516 read resources have not yet been completely identified, so for now
9517 treat RSE as a single resource */
9518 if (strncmp (idesc->name, "mov", 3) == 0)
9519 {
9520 if (rsrc_write)
9521 {
9522 if (idesc->operands[0] == IA64_OPND_AR3
9523 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
9524 {
9525 specs[count++] = tmpl;
9526 }
9527 }
9528 else
9529 {
9530 if (idesc->operands[0] == IA64_OPND_AR3)
9531 {
9532 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
9533 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
9534 {
9535 specs[count++] = tmpl;
9536 }
9537 }
9538 else if (idesc->operands[1] == IA64_OPND_AR3)
9539 {
9540 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
9541 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
9542 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
9543 {
9544 specs[count++] = tmpl;
9545 }
9546 }
9547 }
9548 }
9549 else
9550 {
9551 specs[count++] = tmpl;
9552 }
9553 break;
9554
9555 case IA64_RS_ANY:
9556 /* FIXME -- do any of these need to be non-specific? */
9557 specs[count++] = tmpl;
9558 break;
9559
9560 default:
9561 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
9562 break;
9563 }
9564
9565 return count;
9566 }
9567
9568 /* Clear branch flags on marked resources. This breaks the link between the
9569 QP of the marking instruction and a subsequent branch on the same QP. */
9570
9571 static void
9572 clear_qp_branch_flag (valueT mask)
9573 {
9574 int i;
9575 for (i = 0; i < regdepslen; i++)
9576 {
9577 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
9578 if ((bit & mask) != 0)
9579 {
9580 regdeps[i].link_to_qp_branch = 0;
9581 }
9582 }
9583 }
9584
9585 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
9586 any mutexes which contain one of the PRs and create new ones when
9587 needed. */
9588
9589 static int
9590 update_qp_mutex (valueT mask)
9591 {
9592 int i;
9593 int add = 0;
9594
9595 i = 0;
9596 while (i < qp_mutexeslen)
9597 {
9598 if ((qp_mutexes[i].prmask & mask) != 0)
9599 {
9600 /* If it destroys and creates the same mutex, do nothing. */
9601 if (qp_mutexes[i].prmask == mask
9602 && qp_mutexes[i].path == md.path)
9603 {
9604 i++;
9605 add = -1;
9606 }
9607 else
9608 {
9609 int keep = 0;
9610
9611 if (md.debug_dv)
9612 {
9613 fprintf (stderr, " Clearing mutex relation");
9614 print_prmask (qp_mutexes[i].prmask);
9615 fprintf (stderr, "\n");
9616 }
9617
9618 /* Deal with the old mutex with more than 3+ PRs only if
9619 the new mutex on the same execution path with it.
9620
9621 FIXME: The 3+ mutex support is incomplete.
9622 dot_pred_rel () may be a better place to fix it. */
9623 if (qp_mutexes[i].path == md.path)
9624 {
9625 /* If it is a proper subset of the mutex, create a
9626 new mutex. */
9627 if (add == 0
9628 && (qp_mutexes[i].prmask & mask) == mask)
9629 add = 1;
9630
9631 qp_mutexes[i].prmask &= ~mask;
9632 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
9633 {
9634 /* Modify the mutex if there are more than one
9635 PR left. */
9636 keep = 1;
9637 i++;
9638 }
9639 }
9640
9641 if (keep == 0)
9642 /* Remove the mutex. */
9643 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9644 }
9645 }
9646 else
9647 ++i;
9648 }
9649
9650 if (add == 1)
9651 add_qp_mutex (mask);
9652
9653 return add;
9654 }
9655
9656 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9657
9658 Any changes to a PR clears the mutex relations which include that PR. */
9659
9660 static void
9661 clear_qp_mutex (valueT mask)
9662 {
9663 int i;
9664
9665 i = 0;
9666 while (i < qp_mutexeslen)
9667 {
9668 if ((qp_mutexes[i].prmask & mask) != 0)
9669 {
9670 if (md.debug_dv)
9671 {
9672 fprintf (stderr, " Clearing mutex relation");
9673 print_prmask (qp_mutexes[i].prmask);
9674 fprintf (stderr, "\n");
9675 }
9676 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9677 }
9678 else
9679 ++i;
9680 }
9681 }
9682
9683 /* Clear implies relations which contain PRs in the given masks.
9684 P1_MASK indicates the source of the implies relation, while P2_MASK
9685 indicates the implied PR. */
9686
9687 static void
9688 clear_qp_implies (valueT p1_mask, valueT p2_mask)
9689 {
9690 int i;
9691
9692 i = 0;
9693 while (i < qp_implieslen)
9694 {
9695 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9696 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9697 {
9698 if (md.debug_dv)
9699 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9700 qp_implies[i].p1, qp_implies[i].p2);
9701 qp_implies[i] = qp_implies[--qp_implieslen];
9702 }
9703 else
9704 ++i;
9705 }
9706 }
9707
9708 /* Add the PRs specified to the list of implied relations. */
9709
9710 static void
9711 add_qp_imply (int p1, int p2)
9712 {
9713 valueT mask;
9714 valueT bit;
9715 int i;
9716
9717 /* p0 is not meaningful here. */
9718 if (p1 == 0 || p2 == 0)
9719 abort ();
9720
9721 if (p1 == p2)
9722 return;
9723
9724 /* If it exists already, ignore it. */
9725 for (i = 0; i < qp_implieslen; i++)
9726 {
9727 if (qp_implies[i].p1 == p1
9728 && qp_implies[i].p2 == p2
9729 && qp_implies[i].path == md.path
9730 && !qp_implies[i].p2_branched)
9731 return;
9732 }
9733
9734 if (qp_implieslen == qp_impliestotlen)
9735 {
9736 qp_impliestotlen += 20;
9737 qp_implies = XRESIZEVEC (struct qp_imply, qp_implies, qp_impliestotlen);
9738 }
9739 if (md.debug_dv)
9740 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9741 qp_implies[qp_implieslen].p1 = p1;
9742 qp_implies[qp_implieslen].p2 = p2;
9743 qp_implies[qp_implieslen].path = md.path;
9744 qp_implies[qp_implieslen++].p2_branched = 0;
9745
9746 /* Add in the implied transitive relations; for everything that p2 implies,
9747 make p1 imply that, too; for everything that implies p1, make it imply p2
9748 as well. */
9749 for (i = 0; i < qp_implieslen; i++)
9750 {
9751 if (qp_implies[i].p1 == p2)
9752 add_qp_imply (p1, qp_implies[i].p2);
9753 if (qp_implies[i].p2 == p1)
9754 add_qp_imply (qp_implies[i].p1, p2);
9755 }
9756 /* Add in mutex relations implied by this implies relation; for each mutex
9757 relation containing p2, duplicate it and replace p2 with p1. */
9758 bit = (valueT) 1 << p1;
9759 mask = (valueT) 1 << p2;
9760 for (i = 0; i < qp_mutexeslen; i++)
9761 {
9762 if (qp_mutexes[i].prmask & mask)
9763 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9764 }
9765 }
9766
9767 /* Add the PRs specified in the mask to the mutex list; this means that only
9768 one of the PRs can be true at any time. PR0 should never be included in
9769 the mask. */
9770
9771 static void
9772 add_qp_mutex (valueT mask)
9773 {
9774 if (mask & 0x1)
9775 abort ();
9776
9777 if (qp_mutexeslen == qp_mutexestotlen)
9778 {
9779 qp_mutexestotlen += 20;
9780 qp_mutexes = XRESIZEVEC (struct qpmutex, qp_mutexes, qp_mutexestotlen);
9781 }
9782 if (md.debug_dv)
9783 {
9784 fprintf (stderr, " Registering mutex on");
9785 print_prmask (mask);
9786 fprintf (stderr, "\n");
9787 }
9788 qp_mutexes[qp_mutexeslen].path = md.path;
9789 qp_mutexes[qp_mutexeslen++].prmask = mask;
9790 }
9791
9792 static int
9793 has_suffix_p (const char *name, const char *suffix)
9794 {
9795 size_t namelen = strlen (name);
9796 size_t sufflen = strlen (suffix);
9797
9798 if (namelen <= sufflen)
9799 return 0;
9800 return strcmp (name + namelen - sufflen, suffix) == 0;
9801 }
9802
9803 static void
9804 clear_register_values (void)
9805 {
9806 int i;
9807 if (md.debug_dv)
9808 fprintf (stderr, " Clearing register values\n");
9809 for (i = 1; i < NELEMS (gr_values); i++)
9810 gr_values[i].known = 0;
9811 }
9812
9813 /* Keep track of register values/changes which affect DV tracking.
9814
9815 optimization note: should add a flag to classes of insns where otherwise we
9816 have to examine a group of strings to identify them. */
9817
9818 static void
9819 note_register_values (struct ia64_opcode *idesc)
9820 {
9821 valueT qp_changemask = 0;
9822 int i;
9823
9824 /* Invalidate values for registers being written to. */
9825 for (i = 0; i < idesc->num_outputs; i++)
9826 {
9827 if (idesc->operands[i] == IA64_OPND_R1
9828 || idesc->operands[i] == IA64_OPND_R2
9829 || idesc->operands[i] == IA64_OPND_R3)
9830 {
9831 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9832 if (regno > 0 && regno < NELEMS (gr_values))
9833 gr_values[regno].known = 0;
9834 }
9835 else if (idesc->operands[i] == IA64_OPND_R3_2)
9836 {
9837 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9838 if (regno > 0 && regno < 4)
9839 gr_values[regno].known = 0;
9840 }
9841 else if (idesc->operands[i] == IA64_OPND_P1
9842 || idesc->operands[i] == IA64_OPND_P2)
9843 {
9844 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9845 qp_changemask |= (valueT) 1 << regno;
9846 }
9847 else if (idesc->operands[i] == IA64_OPND_PR)
9848 {
9849 if (idesc->operands[2] & (valueT) 0x10000)
9850 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9851 else
9852 qp_changemask = idesc->operands[2];
9853 break;
9854 }
9855 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9856 {
9857 if (idesc->operands[1] & ((valueT) 1 << 43))
9858 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9859 else
9860 qp_changemask = idesc->operands[1];
9861 qp_changemask &= ~(valueT) 0xFFFF;
9862 break;
9863 }
9864 }
9865
9866 /* Always clear qp branch flags on any PR change. */
9867 /* FIXME there may be exceptions for certain compares. */
9868 clear_qp_branch_flag (qp_changemask);
9869
9870 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9871 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9872 {
9873 qp_changemask |= ~(valueT) 0xFFFF;
9874 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9875 {
9876 for (i = 32; i < 32 + md.rot.num_regs; i++)
9877 gr_values[i].known = 0;
9878 }
9879 clear_qp_mutex (qp_changemask);
9880 clear_qp_implies (qp_changemask, qp_changemask);
9881 }
9882 /* After a call, all register values are undefined, except those marked
9883 as "safe". */
9884 else if (strncmp (idesc->name, "br.call", 6) == 0
9885 || strncmp (idesc->name, "brl.call", 7) == 0)
9886 {
9887 /* FIXME keep GR values which are marked as "safe_across_calls" */
9888 clear_register_values ();
9889 clear_qp_mutex (~qp_safe_across_calls);
9890 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9891 clear_qp_branch_flag (~qp_safe_across_calls);
9892 }
9893 else if (is_interruption_or_rfi (idesc)
9894 || is_taken_branch (idesc))
9895 {
9896 clear_register_values ();
9897 clear_qp_mutex (~(valueT) 0);
9898 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9899 }
9900 /* Look for mutex and implies relations. */
9901 else if ((idesc->operands[0] == IA64_OPND_P1
9902 || idesc->operands[0] == IA64_OPND_P2)
9903 && (idesc->operands[1] == IA64_OPND_P1
9904 || idesc->operands[1] == IA64_OPND_P2))
9905 {
9906 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9907 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9908 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9909 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9910
9911 /* If both PRs are PR0, we can't really do anything. */
9912 if (p1 == 0 && p2 == 0)
9913 {
9914 if (md.debug_dv)
9915 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9916 }
9917 /* In general, clear mutexes and implies which include P1 or P2,
9918 with the following exceptions. */
9919 else if (has_suffix_p (idesc->name, ".or.andcm")
9920 || has_suffix_p (idesc->name, ".and.orcm"))
9921 {
9922 clear_qp_implies (p2mask, p1mask);
9923 }
9924 else if (has_suffix_p (idesc->name, ".andcm")
9925 || has_suffix_p (idesc->name, ".and"))
9926 {
9927 clear_qp_implies (0, p1mask | p2mask);
9928 }
9929 else if (has_suffix_p (idesc->name, ".orcm")
9930 || has_suffix_p (idesc->name, ".or"))
9931 {
9932 clear_qp_mutex (p1mask | p2mask);
9933 clear_qp_implies (p1mask | p2mask, 0);
9934 }
9935 else
9936 {
9937 int added = 0;
9938
9939 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9940
9941 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9942 if (p1 == 0 || p2 == 0)
9943 clear_qp_mutex (p1mask | p2mask);
9944 else
9945 added = update_qp_mutex (p1mask | p2mask);
9946
9947 if (CURR_SLOT.qp_regno == 0
9948 || has_suffix_p (idesc->name, ".unc"))
9949 {
9950 if (added == 0 && p1 && p2)
9951 add_qp_mutex (p1mask | p2mask);
9952 if (CURR_SLOT.qp_regno != 0)
9953 {
9954 if (p1)
9955 add_qp_imply (p1, CURR_SLOT.qp_regno);
9956 if (p2)
9957 add_qp_imply (p2, CURR_SLOT.qp_regno);
9958 }
9959 }
9960 }
9961 }
9962 /* Look for mov imm insns into GRs. */
9963 else if (idesc->operands[0] == IA64_OPND_R1
9964 && (idesc->operands[1] == IA64_OPND_IMM22
9965 || idesc->operands[1] == IA64_OPND_IMMU64)
9966 && CURR_SLOT.opnd[1].X_op == O_constant
9967 && (strcmp (idesc->name, "mov") == 0
9968 || strcmp (idesc->name, "movl") == 0))
9969 {
9970 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9971 if (regno > 0 && regno < NELEMS (gr_values))
9972 {
9973 gr_values[regno].known = 1;
9974 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9975 gr_values[regno].path = md.path;
9976 if (md.debug_dv)
9977 {
9978 fprintf (stderr, " Know gr%d = ", regno);
9979 fprintf_vma (stderr, gr_values[regno].value);
9980 fputs ("\n", stderr);
9981 }
9982 }
9983 }
9984 /* Look for dep.z imm insns. */
9985 else if (idesc->operands[0] == IA64_OPND_R1
9986 && idesc->operands[1] == IA64_OPND_IMM8
9987 && strcmp (idesc->name, "dep.z") == 0)
9988 {
9989 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9990 if (regno > 0 && regno < NELEMS (gr_values))
9991 {
9992 valueT value = CURR_SLOT.opnd[1].X_add_number;
9993
9994 if (CURR_SLOT.opnd[3].X_add_number < 64)
9995 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1;
9996 value <<= CURR_SLOT.opnd[2].X_add_number;
9997 gr_values[regno].known = 1;
9998 gr_values[regno].value = value;
9999 gr_values[regno].path = md.path;
10000 if (md.debug_dv)
10001 {
10002 fprintf (stderr, " Know gr%d = ", regno);
10003 fprintf_vma (stderr, gr_values[regno].value);
10004 fputs ("\n", stderr);
10005 }
10006 }
10007 }
10008 else
10009 {
10010 clear_qp_mutex (qp_changemask);
10011 clear_qp_implies (qp_changemask, qp_changemask);
10012 }
10013 }
10014
10015 /* Return whether the given predicate registers are currently mutex. */
10016
10017 static int
10018 qp_mutex (int p1, int p2, int path)
10019 {
10020 int i;
10021 valueT mask;
10022
10023 if (p1 != p2)
10024 {
10025 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
10026 for (i = 0; i < qp_mutexeslen; i++)
10027 {
10028 if (qp_mutexes[i].path >= path
10029 && (qp_mutexes[i].prmask & mask) == mask)
10030 return 1;
10031 }
10032 }
10033 return 0;
10034 }
10035
10036 /* Return whether the given resource is in the given insn's list of chks
10037 Return 1 if the conflict is absolutely determined, 2 if it's a potential
10038 conflict. */
10039
10040 static int
10041 resources_match (struct rsrc *rs,
10042 struct ia64_opcode *idesc,
10043 int note,
10044 int qp_regno,
10045 int path)
10046 {
10047 struct rsrc specs[MAX_SPECS];
10048 int count;
10049
10050 /* If the marked resource's qp_regno and the given qp_regno are mutex,
10051 we don't need to check. One exception is note 11, which indicates that
10052 target predicates are written regardless of PR[qp]. */
10053 if (qp_mutex (rs->qp_regno, qp_regno, path)
10054 && note != 11)
10055 return 0;
10056
10057 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
10058 while (count-- > 0)
10059 {
10060 /* UNAT checking is a bit more specific than other resources */
10061 if (rs->dependency->specifier == IA64_RS_AR_UNAT
10062 && specs[count].mem_offset.hint
10063 && rs->mem_offset.hint)
10064 {
10065 if (rs->mem_offset.base == specs[count].mem_offset.base)
10066 {
10067 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
10068 ((specs[count].mem_offset.offset >> 3) & 0x3F))
10069 return 1;
10070 else
10071 continue;
10072 }
10073 }
10074
10075 /* Skip apparent PR write conflicts where both writes are an AND or both
10076 writes are an OR. */
10077 if (rs->dependency->specifier == IA64_RS_PR
10078 || rs->dependency->specifier == IA64_RS_PRr
10079 || rs->dependency->specifier == IA64_RS_PR63)
10080 {
10081 if (specs[count].cmp_type != CMP_NONE
10082 && specs[count].cmp_type == rs->cmp_type)
10083 {
10084 if (md.debug_dv)
10085 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
10086 dv_mode[rs->dependency->mode],
10087 rs->dependency->specifier != IA64_RS_PR63 ?
10088 specs[count].index : 63);
10089 continue;
10090 }
10091 if (md.debug_dv)
10092 fprintf (stderr,
10093 " %s on parallel compare conflict %s vs %s on PR%d\n",
10094 dv_mode[rs->dependency->mode],
10095 dv_cmp_type[rs->cmp_type],
10096 dv_cmp_type[specs[count].cmp_type],
10097 rs->dependency->specifier != IA64_RS_PR63 ?
10098 specs[count].index : 63);
10099
10100 }
10101
10102 /* If either resource is not specific, conservatively assume a conflict
10103 */
10104 if (!specs[count].specific || !rs->specific)
10105 return 2;
10106 else if (specs[count].index == rs->index)
10107 return 1;
10108 }
10109
10110 return 0;
10111 }
10112
10113 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
10114 insert a stop to create the break. Update all resource dependencies
10115 appropriately. If QP_REGNO is non-zero, only apply the break to resources
10116 which use the same QP_REGNO and have the link_to_qp_branch flag set.
10117 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
10118 instruction. */
10119
10120 static void
10121 insn_group_break (int insert_stop, int qp_regno, int save_current)
10122 {
10123 int i;
10124
10125 if (insert_stop && md.num_slots_in_use > 0)
10126 PREV_SLOT.end_of_insn_group = 1;
10127
10128 if (md.debug_dv)
10129 {
10130 fprintf (stderr, " Insn group break%s",
10131 (insert_stop ? " (w/stop)" : ""));
10132 if (qp_regno != 0)
10133 fprintf (stderr, " effective for QP=%d", qp_regno);
10134 fprintf (stderr, "\n");
10135 }
10136
10137 i = 0;
10138 while (i < regdepslen)
10139 {
10140 const struct ia64_dependency *dep = regdeps[i].dependency;
10141
10142 if (qp_regno != 0
10143 && regdeps[i].qp_regno != qp_regno)
10144 {
10145 ++i;
10146 continue;
10147 }
10148
10149 if (save_current
10150 && CURR_SLOT.src_file == regdeps[i].file
10151 && CURR_SLOT.src_line == regdeps[i].line)
10152 {
10153 ++i;
10154 continue;
10155 }
10156
10157 /* clear dependencies which are automatically cleared by a stop, or
10158 those that have reached the appropriate state of insn serialization */
10159 if (dep->semantics == IA64_DVS_IMPLIED
10160 || dep->semantics == IA64_DVS_IMPLIEDF
10161 || regdeps[i].insn_srlz == STATE_SRLZ)
10162 {
10163 print_dependency ("Removing", i);
10164 regdeps[i] = regdeps[--regdepslen];
10165 }
10166 else
10167 {
10168 if (dep->semantics == IA64_DVS_DATA
10169 || dep->semantics == IA64_DVS_INSTR
10170 || dep->semantics == IA64_DVS_SPECIFIC)
10171 {
10172 if (regdeps[i].insn_srlz == STATE_NONE)
10173 regdeps[i].insn_srlz = STATE_STOP;
10174 if (regdeps[i].data_srlz == STATE_NONE)
10175 regdeps[i].data_srlz = STATE_STOP;
10176 }
10177 ++i;
10178 }
10179 }
10180 }
10181
10182 /* Add the given resource usage spec to the list of active dependencies. */
10183
10184 static void
10185 mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED,
10186 const struct ia64_dependency *dep ATTRIBUTE_UNUSED,
10187 struct rsrc *spec,
10188 int depind,
10189 int path)
10190 {
10191 if (regdepslen == regdepstotlen)
10192 {
10193 regdepstotlen += 20;
10194 regdeps = XRESIZEVEC (struct rsrc, regdeps, regdepstotlen);
10195 }
10196
10197 regdeps[regdepslen] = *spec;
10198 regdeps[regdepslen].depind = depind;
10199 regdeps[regdepslen].path = path;
10200 regdeps[regdepslen].file = CURR_SLOT.src_file;
10201 regdeps[regdepslen].line = CURR_SLOT.src_line;
10202
10203 print_dependency ("Adding", regdepslen);
10204
10205 ++regdepslen;
10206 }
10207
10208 static void
10209 print_dependency (const char *action, int depind)
10210 {
10211 if (md.debug_dv)
10212 {
10213 fprintf (stderr, " %s %s '%s'",
10214 action, dv_mode[(regdeps[depind].dependency)->mode],
10215 (regdeps[depind].dependency)->name);
10216 if (regdeps[depind].specific && regdeps[depind].index >= 0)
10217 fprintf (stderr, " (%d)", regdeps[depind].index);
10218 if (regdeps[depind].mem_offset.hint)
10219 {
10220 fputs (" ", stderr);
10221 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
10222 fputs ("+", stderr);
10223 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
10224 }
10225 fprintf (stderr, "\n");
10226 }
10227 }
10228
10229 static void
10230 instruction_serialization (void)
10231 {
10232 int i;
10233 if (md.debug_dv)
10234 fprintf (stderr, " Instruction serialization\n");
10235 for (i = 0; i < regdepslen; i++)
10236 if (regdeps[i].insn_srlz == STATE_STOP)
10237 regdeps[i].insn_srlz = STATE_SRLZ;
10238 }
10239
10240 static void
10241 data_serialization (void)
10242 {
10243 int i = 0;
10244 if (md.debug_dv)
10245 fprintf (stderr, " Data serialization\n");
10246 while (i < regdepslen)
10247 {
10248 if (regdeps[i].data_srlz == STATE_STOP
10249 /* Note: as of 991210, all "other" dependencies are cleared by a
10250 data serialization. This might change with new tables */
10251 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
10252 {
10253 print_dependency ("Removing", i);
10254 regdeps[i] = regdeps[--regdepslen];
10255 }
10256 else
10257 ++i;
10258 }
10259 }
10260
10261 /* Insert stops and serializations as needed to avoid DVs. */
10262
10263 static void
10264 remove_marked_resource (struct rsrc *rs)
10265 {
10266 switch (rs->dependency->semantics)
10267 {
10268 case IA64_DVS_SPECIFIC:
10269 if (md.debug_dv)
10270 fprintf (stderr, "Implementation-specific, assume worst case...\n");
10271 /* Fall through. */
10272 case IA64_DVS_INSTR:
10273 if (md.debug_dv)
10274 fprintf (stderr, "Inserting instr serialization\n");
10275 if (rs->insn_srlz < STATE_STOP)
10276 insn_group_break (1, 0, 0);
10277 if (rs->insn_srlz < STATE_SRLZ)
10278 {
10279 struct slot oldslot = CURR_SLOT;
10280 /* Manually jam a srlz.i insn into the stream */
10281 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10282 CURR_SLOT.user_template = -1;
10283 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
10284 instruction_serialization ();
10285 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10286 if (++md.num_slots_in_use >= NUM_SLOTS)
10287 emit_one_bundle ();
10288 CURR_SLOT = oldslot;
10289 }
10290 insn_group_break (1, 0, 0);
10291 break;
10292 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
10293 "other" types of DV are eliminated
10294 by a data serialization */
10295 case IA64_DVS_DATA:
10296 if (md.debug_dv)
10297 fprintf (stderr, "Inserting data serialization\n");
10298 if (rs->data_srlz < STATE_STOP)
10299 insn_group_break (1, 0, 0);
10300 {
10301 struct slot oldslot = CURR_SLOT;
10302 /* Manually jam a srlz.d insn into the stream */
10303 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10304 CURR_SLOT.user_template = -1;
10305 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
10306 data_serialization ();
10307 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10308 if (++md.num_slots_in_use >= NUM_SLOTS)
10309 emit_one_bundle ();
10310 CURR_SLOT = oldslot;
10311 }
10312 break;
10313 case IA64_DVS_IMPLIED:
10314 case IA64_DVS_IMPLIEDF:
10315 if (md.debug_dv)
10316 fprintf (stderr, "Inserting stop\n");
10317 insn_group_break (1, 0, 0);
10318 break;
10319 default:
10320 break;
10321 }
10322 }
10323
10324 /* Check the resources used by the given opcode against the current dependency
10325 list.
10326
10327 The check is run once for each execution path encountered. In this case,
10328 a unique execution path is the sequence of instructions following a code
10329 entry point, e.g. the following has three execution paths, one starting
10330 at L0, one at L1, and one at L2.
10331
10332 L0: nop
10333 L1: add
10334 L2: add
10335 br.ret
10336 */
10337
10338 static void
10339 check_dependencies (struct ia64_opcode *idesc)
10340 {
10341 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10342 int path;
10343 int i;
10344
10345 /* Note that the number of marked resources may change within the
10346 loop if in auto mode. */
10347 i = 0;
10348 while (i < regdepslen)
10349 {
10350 struct rsrc *rs = &regdeps[i];
10351 const struct ia64_dependency *dep = rs->dependency;
10352 int chkind;
10353 int note;
10354 int start_over = 0;
10355
10356 if (dep->semantics == IA64_DVS_NONE
10357 || (chkind = depends_on (rs->depind, idesc)) == -1)
10358 {
10359 ++i;
10360 continue;
10361 }
10362
10363 note = NOTE (opdeps->chks[chkind]);
10364
10365 /* Check this resource against each execution path seen thus far. */
10366 for (path = 0; path <= md.path; path++)
10367 {
10368 int matchtype;
10369
10370 /* If the dependency wasn't on the path being checked, ignore it. */
10371 if (rs->path < path)
10372 continue;
10373
10374 /* If the QP for this insn implies a QP which has branched, don't
10375 bother checking. Ed. NOTE: I don't think this check is terribly
10376 useful; what's the point of generating code which will only be
10377 reached if its QP is zero?
10378 This code was specifically inserted to handle the following code,
10379 based on notes from Intel's DV checking code, where p1 implies p2.
10380
10381 mov r4 = 2
10382 (p2) br.cond L
10383 (p1) mov r4 = 7
10384 */
10385 if (CURR_SLOT.qp_regno != 0)
10386 {
10387 int skip = 0;
10388 int implies;
10389 for (implies = 0; implies < qp_implieslen; implies++)
10390 {
10391 if (qp_implies[implies].path >= path
10392 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
10393 && qp_implies[implies].p2_branched)
10394 {
10395 skip = 1;
10396 break;
10397 }
10398 }
10399 if (skip)
10400 continue;
10401 }
10402
10403 if ((matchtype = resources_match (rs, idesc, note,
10404 CURR_SLOT.qp_regno, path)) != 0)
10405 {
10406 char msg[1024];
10407 char pathmsg[256] = "";
10408 char indexmsg[256] = "";
10409 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
10410
10411 if (path != 0)
10412 snprintf (pathmsg, sizeof (pathmsg),
10413 " when entry is at label '%s'",
10414 md.entry_labels[path - 1]);
10415 if (matchtype == 1 && rs->index >= 0)
10416 snprintf (indexmsg, sizeof (indexmsg),
10417 ", specific resource number is %d",
10418 rs->index);
10419 snprintf (msg, sizeof (msg),
10420 "Use of '%s' %s %s dependency '%s' (%s)%s%s",
10421 idesc->name,
10422 (certain ? "violates" : "may violate"),
10423 dv_mode[dep->mode], dep->name,
10424 dv_sem[dep->semantics],
10425 pathmsg, indexmsg);
10426
10427 if (md.explicit_mode)
10428 {
10429 as_warn ("%s", msg);
10430 if (path < md.path)
10431 as_warn (_("Only the first path encountering the conflict is reported"));
10432 as_warn_where (rs->file, rs->line,
10433 _("This is the location of the conflicting usage"));
10434 /* Don't bother checking other paths, to avoid duplicating
10435 the same warning */
10436 break;
10437 }
10438 else
10439 {
10440 if (md.debug_dv)
10441 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
10442
10443 remove_marked_resource (rs);
10444
10445 /* since the set of dependencies has changed, start over */
10446 /* FIXME -- since we're removing dvs as we go, we
10447 probably don't really need to start over... */
10448 start_over = 1;
10449 break;
10450 }
10451 }
10452 }
10453 if (start_over)
10454 i = 0;
10455 else
10456 ++i;
10457 }
10458 }
10459
10460 /* Register new dependencies based on the given opcode. */
10461
10462 static void
10463 mark_resources (struct ia64_opcode *idesc)
10464 {
10465 int i;
10466 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10467 int add_only_qp_reads = 0;
10468
10469 /* A conditional branch only uses its resources if it is taken; if it is
10470 taken, we stop following that path. The other branch types effectively
10471 *always* write their resources. If it's not taken, register only QP
10472 reads. */
10473 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
10474 {
10475 add_only_qp_reads = 1;
10476 }
10477
10478 if (md.debug_dv)
10479 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
10480
10481 for (i = 0; i < opdeps->nregs; i++)
10482 {
10483 const struct ia64_dependency *dep;
10484 struct rsrc specs[MAX_SPECS];
10485 int note;
10486 int path;
10487 int count;
10488
10489 dep = ia64_find_dependency (opdeps->regs[i]);
10490 note = NOTE (opdeps->regs[i]);
10491
10492 if (add_only_qp_reads
10493 && !(dep->mode == IA64_DV_WAR
10494 && (dep->specifier == IA64_RS_PR
10495 || dep->specifier == IA64_RS_PRr
10496 || dep->specifier == IA64_RS_PR63)))
10497 continue;
10498
10499 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
10500
10501 while (count-- > 0)
10502 {
10503 mark_resource (idesc, dep, &specs[count],
10504 DEP (opdeps->regs[i]), md.path);
10505 }
10506
10507 /* The execution path may affect register values, which may in turn
10508 affect which indirect-access resources are accessed. */
10509 switch (dep->specifier)
10510 {
10511 default:
10512 break;
10513 case IA64_RS_CPUID:
10514 case IA64_RS_DBR:
10515 case IA64_RS_IBR:
10516 case IA64_RS_MSR:
10517 case IA64_RS_PKR:
10518 case IA64_RS_PMC:
10519 case IA64_RS_PMD:
10520 case IA64_RS_RR:
10521 for (path = 0; path < md.path; path++)
10522 {
10523 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
10524 while (count-- > 0)
10525 mark_resource (idesc, dep, &specs[count],
10526 DEP (opdeps->regs[i]), path);
10527 }
10528 break;
10529 }
10530 }
10531 }
10532
10533 /* Remove dependencies when they no longer apply. */
10534
10535 static void
10536 update_dependencies (struct ia64_opcode *idesc)
10537 {
10538 int i;
10539
10540 if (strcmp (idesc->name, "srlz.i") == 0)
10541 {
10542 instruction_serialization ();
10543 }
10544 else if (strcmp (idesc->name, "srlz.d") == 0)
10545 {
10546 data_serialization ();
10547 }
10548 else if (is_interruption_or_rfi (idesc)
10549 || is_taken_branch (idesc))
10550 {
10551 /* Although technically the taken branch doesn't clear dependencies
10552 which require a srlz.[id], we don't follow the branch; the next
10553 instruction is assumed to start with a clean slate. */
10554 regdepslen = 0;
10555 md.path = 0;
10556 }
10557 else if (is_conditional_branch (idesc)
10558 && CURR_SLOT.qp_regno != 0)
10559 {
10560 int is_call = strstr (idesc->name, ".call") != NULL;
10561
10562 for (i = 0; i < qp_implieslen; i++)
10563 {
10564 /* If the conditional branch's predicate is implied by the predicate
10565 in an existing dependency, remove that dependency. */
10566 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
10567 {
10568 int depind = 0;
10569 /* Note that this implied predicate takes a branch so that if
10570 a later insn generates a DV but its predicate implies this
10571 one, we can avoid the false DV warning. */
10572 qp_implies[i].p2_branched = 1;
10573 while (depind < regdepslen)
10574 {
10575 if (regdeps[depind].qp_regno == qp_implies[i].p1)
10576 {
10577 print_dependency ("Removing", depind);
10578 regdeps[depind] = regdeps[--regdepslen];
10579 }
10580 else
10581 ++depind;
10582 }
10583 }
10584 }
10585 /* Any marked resources which have this same predicate should be
10586 cleared, provided that the QP hasn't been modified between the
10587 marking instruction and the branch. */
10588 if (is_call)
10589 {
10590 insn_group_break (0, CURR_SLOT.qp_regno, 1);
10591 }
10592 else
10593 {
10594 i = 0;
10595 while (i < regdepslen)
10596 {
10597 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
10598 && regdeps[i].link_to_qp_branch
10599 && (regdeps[i].file != CURR_SLOT.src_file
10600 || regdeps[i].line != CURR_SLOT.src_line))
10601 {
10602 /* Treat like a taken branch */
10603 print_dependency ("Removing", i);
10604 regdeps[i] = regdeps[--regdepslen];
10605 }
10606 else
10607 ++i;
10608 }
10609 }
10610 }
10611 }
10612
10613 /* Examine the current instruction for dependency violations. */
10614
10615 static int
10616 check_dv (struct ia64_opcode *idesc)
10617 {
10618 if (md.debug_dv)
10619 {
10620 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
10621 idesc->name, CURR_SLOT.src_line,
10622 idesc->dependencies->nchks,
10623 idesc->dependencies->nregs);
10624 }
10625
10626 /* Look through the list of currently marked resources; if the current
10627 instruction has the dependency in its chks list which uses that resource,
10628 check against the specific resources used. */
10629 check_dependencies (idesc);
10630
10631 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
10632 then add them to the list of marked resources. */
10633 mark_resources (idesc);
10634
10635 /* There are several types of dependency semantics, and each has its own
10636 requirements for being cleared
10637
10638 Instruction serialization (insns separated by interruption, rfi, or
10639 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10640
10641 Data serialization (instruction serialization, or writer + srlz.d +
10642 reader, where writer and srlz.d are in separate groups) clears
10643 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10644 always be the case).
10645
10646 Instruction group break (groups separated by stop, taken branch,
10647 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10648 */
10649 update_dependencies (idesc);
10650
10651 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10652 warning. Keep track of as many as possible that are useful. */
10653 note_register_values (idesc);
10654
10655 /* We don't need or want this anymore. */
10656 md.mem_offset.hint = 0;
10657
10658 return 0;
10659 }
10660
10661 /* Translate one line of assembly. Pseudo ops and labels do not show
10662 here. */
10663 void
10664 md_assemble (char *str)
10665 {
10666 char *saved_input_line_pointer, *temp;
10667 const char *mnemonic;
10668 const struct pseudo_opcode *pdesc;
10669 struct ia64_opcode *idesc;
10670 unsigned char qp_regno;
10671 unsigned int flags;
10672 int ch;
10673
10674 saved_input_line_pointer = input_line_pointer;
10675 input_line_pointer = str;
10676
10677 /* extract the opcode (mnemonic): */
10678
10679 ch = get_symbol_name (&temp);
10680 mnemonic = temp;
10681 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
10682 if (pdesc)
10683 {
10684 (void) restore_line_pointer (ch);
10685 (*pdesc->handler) (pdesc->arg);
10686 goto done;
10687 }
10688
10689 /* Find the instruction descriptor matching the arguments. */
10690
10691 idesc = ia64_find_opcode (mnemonic);
10692 (void) restore_line_pointer (ch);
10693 if (!idesc)
10694 {
10695 as_bad (_("Unknown opcode `%s'"), mnemonic);
10696 goto done;
10697 }
10698
10699 idesc = parse_operands (idesc);
10700 if (!idesc)
10701 goto done;
10702
10703 /* Handle the dynamic ops we can handle now: */
10704 if (idesc->type == IA64_TYPE_DYN)
10705 {
10706 if (strcmp (idesc->name, "add") == 0)
10707 {
10708 if (CURR_SLOT.opnd[2].X_op == O_register
10709 && CURR_SLOT.opnd[2].X_add_number < 4)
10710 mnemonic = "addl";
10711 else
10712 mnemonic = "adds";
10713 ia64_free_opcode (idesc);
10714 idesc = ia64_find_opcode (mnemonic);
10715 }
10716 else if (strcmp (idesc->name, "mov") == 0)
10717 {
10718 enum ia64_opnd opnd1, opnd2;
10719 int rop;
10720
10721 opnd1 = idesc->operands[0];
10722 opnd2 = idesc->operands[1];
10723 if (opnd1 == IA64_OPND_AR3)
10724 rop = 0;
10725 else if (opnd2 == IA64_OPND_AR3)
10726 rop = 1;
10727 else
10728 abort ();
10729 if (CURR_SLOT.opnd[rop].X_op == O_register)
10730 {
10731 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10732 mnemonic = "mov.i";
10733 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10734 mnemonic = "mov.m";
10735 else
10736 rop = -1;
10737 }
10738 else
10739 abort ();
10740 if (rop >= 0)
10741 {
10742 ia64_free_opcode (idesc);
10743 idesc = ia64_find_opcode (mnemonic);
10744 while (idesc != NULL
10745 && (idesc->operands[0] != opnd1
10746 || idesc->operands[1] != opnd2))
10747 idesc = get_next_opcode (idesc);
10748 }
10749 }
10750 }
10751 else if (strcmp (idesc->name, "mov.i") == 0
10752 || strcmp (idesc->name, "mov.m") == 0)
10753 {
10754 enum ia64_opnd opnd1, opnd2;
10755 int rop;
10756
10757 opnd1 = idesc->operands[0];
10758 opnd2 = idesc->operands[1];
10759 if (opnd1 == IA64_OPND_AR3)
10760 rop = 0;
10761 else if (opnd2 == IA64_OPND_AR3)
10762 rop = 1;
10763 else
10764 abort ();
10765 if (CURR_SLOT.opnd[rop].X_op == O_register)
10766 {
10767 char unit = 'a';
10768 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10769 unit = 'i';
10770 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10771 unit = 'm';
10772 if (unit != 'a' && unit != idesc->name [4])
10773 as_bad (_("AR %d can only be accessed by %c-unit"),
10774 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10775 TOUPPER (unit));
10776 }
10777 }
10778 else if (strcmp (idesc->name, "hint.b") == 0)
10779 {
10780 switch (md.hint_b)
10781 {
10782 case hint_b_ok:
10783 break;
10784 case hint_b_warning:
10785 as_warn (_("hint.b may be treated as nop"));
10786 break;
10787 case hint_b_error:
10788 as_bad (_("hint.b shouldn't be used"));
10789 break;
10790 }
10791 }
10792
10793 qp_regno = 0;
10794 if (md.qp.X_op == O_register)
10795 {
10796 qp_regno = md.qp.X_add_number - REG_P;
10797 md.qp.X_op = O_absent;
10798 }
10799
10800 flags = idesc->flags;
10801
10802 if ((flags & IA64_OPCODE_FIRST) != 0)
10803 {
10804 /* The alignment frag has to end with a stop bit only if the
10805 next instruction after the alignment directive has to be
10806 the first instruction in an instruction group. */
10807 if (align_frag)
10808 {
10809 while (align_frag->fr_type != rs_align_code)
10810 {
10811 align_frag = align_frag->fr_next;
10812 if (!align_frag)
10813 break;
10814 }
10815 /* align_frag can be NULL if there are directives in
10816 between. */
10817 if (align_frag && align_frag->fr_next == frag_now)
10818 align_frag->tc_frag_data = 1;
10819 }
10820
10821 insn_group_break (1, 0, 0);
10822 }
10823 align_frag = NULL;
10824
10825 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10826 {
10827 as_bad (_("`%s' cannot be predicated"), idesc->name);
10828 goto done;
10829 }
10830
10831 /* Build the instruction. */
10832 CURR_SLOT.qp_regno = qp_regno;
10833 CURR_SLOT.idesc = idesc;
10834 CURR_SLOT.src_file = as_where (&CURR_SLOT.src_line);
10835 dwarf2_where (&CURR_SLOT.debug_line);
10836 dwarf2_consume_line_info ();
10837
10838 /* Add unwind entries, if there are any. */
10839 if (unwind.current_entry)
10840 {
10841 CURR_SLOT.unwind_record = unwind.current_entry;
10842 unwind.current_entry = NULL;
10843 }
10844 if (unwind.pending_saves)
10845 {
10846 if (unwind.pending_saves->next)
10847 {
10848 /* Attach the next pending save to the next slot so that its
10849 slot number will get set correctly. */
10850 add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR);
10851 unwind.pending_saves = &unwind.pending_saves->next->r.record.p;
10852 }
10853 else
10854 unwind.pending_saves = NULL;
10855 }
10856 if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym))
10857 unwind.insn = 1;
10858
10859 /* Check for dependency violations. */
10860 if (md.detect_dv)
10861 check_dv (idesc);
10862
10863 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10864 if (++md.num_slots_in_use >= NUM_SLOTS)
10865 emit_one_bundle ();
10866
10867 if ((flags & IA64_OPCODE_LAST) != 0)
10868 insn_group_break (1, 0, 0);
10869
10870 md.last_text_seg = now_seg;
10871
10872 done:
10873 input_line_pointer = saved_input_line_pointer;
10874 }
10875
10876 /* Called when symbol NAME cannot be found in the symbol table.
10877 Should be used for dynamic valued symbols only. */
10878
10879 symbolS *
10880 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
10881 {
10882 return 0;
10883 }
10884
10885 /* Called for any expression that can not be recognized. When the
10886 function is called, `input_line_pointer' will point to the start of
10887 the expression. */
10888
10889 void
10890 md_operand (expressionS *e)
10891 {
10892 switch (*input_line_pointer)
10893 {
10894 case '[':
10895 ++input_line_pointer;
10896 expression_and_evaluate (e);
10897 if (*input_line_pointer != ']')
10898 {
10899 as_bad (_("Closing bracket missing"));
10900 goto err;
10901 }
10902 else
10903 {
10904 if (e->X_op != O_register
10905 || e->X_add_number < REG_GR
10906 || e->X_add_number > REG_GR + 127)
10907 {
10908 as_bad (_("Index must be a general register"));
10909 e->X_add_number = REG_GR;
10910 }
10911
10912 ++input_line_pointer;
10913 e->X_op = O_index;
10914 }
10915 break;
10916
10917 default:
10918 break;
10919 }
10920 return;
10921
10922 err:
10923 ignore_rest_of_line ();
10924 }
10925
10926 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10927 a section symbol plus some offset. For relocs involving @fptr(),
10928 directives we don't want such adjustments since we need to have the
10929 original symbol's name in the reloc. */
10930 int
10931 ia64_fix_adjustable (fixS *fix)
10932 {
10933 /* Prevent all adjustments to global symbols */
10934 if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10935 return 0;
10936
10937 switch (fix->fx_r_type)
10938 {
10939 case BFD_RELOC_IA64_FPTR64I:
10940 case BFD_RELOC_IA64_FPTR32MSB:
10941 case BFD_RELOC_IA64_FPTR32LSB:
10942 case BFD_RELOC_IA64_FPTR64MSB:
10943 case BFD_RELOC_IA64_FPTR64LSB:
10944 case BFD_RELOC_IA64_LTOFF_FPTR22:
10945 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10946 return 0;
10947 default:
10948 break;
10949 }
10950
10951 return 1;
10952 }
10953
10954 int
10955 ia64_force_relocation (fixS *fix)
10956 {
10957 switch (fix->fx_r_type)
10958 {
10959 case BFD_RELOC_IA64_FPTR64I:
10960 case BFD_RELOC_IA64_FPTR32MSB:
10961 case BFD_RELOC_IA64_FPTR32LSB:
10962 case BFD_RELOC_IA64_FPTR64MSB:
10963 case BFD_RELOC_IA64_FPTR64LSB:
10964
10965 case BFD_RELOC_IA64_LTOFF22:
10966 case BFD_RELOC_IA64_LTOFF64I:
10967 case BFD_RELOC_IA64_LTOFF_FPTR22:
10968 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10969 case BFD_RELOC_IA64_PLTOFF22:
10970 case BFD_RELOC_IA64_PLTOFF64I:
10971 case BFD_RELOC_IA64_PLTOFF64MSB:
10972 case BFD_RELOC_IA64_PLTOFF64LSB:
10973
10974 case BFD_RELOC_IA64_LTOFF22X:
10975 case BFD_RELOC_IA64_LDXMOV:
10976 return 1;
10977
10978 default:
10979 break;
10980 }
10981
10982 return generic_force_reloc (fix);
10983 }
10984
10985 /* Decide from what point a pc-relative relocation is relative to,
10986 relative to the pc-relative fixup. Er, relatively speaking. */
10987 long
10988 ia64_pcrel_from_section (fixS *fix, segT sec)
10989 {
10990 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10991
10992 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
10993 off &= ~0xfUL;
10994
10995 return off;
10996 }
10997
10998
10999 /* Used to emit section-relative relocs for the dwarf2 debug data. */
11000 void
11001 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
11002 {
11003 expressionS exp;
11004
11005 exp.X_op = O_pseudo_fixup;
11006 exp.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
11007 exp.X_add_number = 0;
11008 exp.X_add_symbol = symbol;
11009 emit_expr (&exp, size);
11010 }
11011
11012 /* This is called whenever some data item (not an instruction) needs a
11013 fixup. We pick the right reloc code depending on the byteorder
11014 currently in effect. */
11015 void
11016 ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp,
11017 bfd_reloc_code_real_type code)
11018 {
11019 fixS *fix;
11020
11021 switch (nbytes)
11022 {
11023 /* There are no reloc for 8 and 16 bit quantities, but we allow
11024 them here since they will work fine as long as the expression
11025 is fully defined at the end of the pass over the source file. */
11026 case 1: code = BFD_RELOC_8; break;
11027 case 2: code = BFD_RELOC_16; break;
11028 case 4:
11029 if (target_big_endian)
11030 code = BFD_RELOC_IA64_DIR32MSB;
11031 else
11032 code = BFD_RELOC_IA64_DIR32LSB;
11033 break;
11034
11035 case 8:
11036 /* In 32-bit mode, data8 could mean function descriptors too. */
11037 if (exp->X_op == O_pseudo_fixup
11038 && exp->X_op_symbol
11039 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
11040 && !(md.flags & EF_IA_64_ABI64))
11041 {
11042 if (target_big_endian)
11043 code = BFD_RELOC_IA64_IPLTMSB;
11044 else
11045 code = BFD_RELOC_IA64_IPLTLSB;
11046 exp->X_op = O_symbol;
11047 break;
11048 }
11049 else
11050 {
11051 if (target_big_endian)
11052 code = BFD_RELOC_IA64_DIR64MSB;
11053 else
11054 code = BFD_RELOC_IA64_DIR64LSB;
11055 break;
11056 }
11057
11058 case 16:
11059 if (exp->X_op == O_pseudo_fixup
11060 && exp->X_op_symbol
11061 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
11062 {
11063 if (target_big_endian)
11064 code = BFD_RELOC_IA64_IPLTMSB;
11065 else
11066 code = BFD_RELOC_IA64_IPLTLSB;
11067 exp->X_op = O_symbol;
11068 break;
11069 }
11070 /* FALLTHRU */
11071
11072 default:
11073 as_bad (_("Unsupported fixup size %d"), nbytes);
11074 ignore_rest_of_line ();
11075 return;
11076 }
11077
11078 if (exp->X_op == O_pseudo_fixup)
11079 {
11080 exp->X_op = O_symbol;
11081 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
11082 /* ??? If code unchanged, unsupported. */
11083 }
11084
11085 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
11086 /* We need to store the byte order in effect in case we're going
11087 to fix an 8 or 16 bit relocation (for which there no real
11088 relocs available). See md_apply_fix(). */
11089 fix->tc_fix_data.bigendian = target_big_endian;
11090 }
11091
11092 /* Return the actual relocation we wish to associate with the pseudo
11093 reloc described by SYM and R_TYPE. SYM should be one of the
11094 symbols in the pseudo_func array, or NULL. */
11095
11096 static bfd_reloc_code_real_type
11097 ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type)
11098 {
11099 bfd_reloc_code_real_type newr = 0;
11100 const char *type = NULL, *suffix = "";
11101
11102 if (sym == NULL)
11103 {
11104 return r_type;
11105 }
11106
11107 switch (S_GET_VALUE (sym))
11108 {
11109 case FUNC_FPTR_RELATIVE:
11110 switch (r_type)
11111 {
11112 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_FPTR64I; break;
11113 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_FPTR32MSB; break;
11114 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_FPTR32LSB; break;
11115 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_FPTR64MSB; break;
11116 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_FPTR64LSB; break;
11117 default: type = "FPTR"; break;
11118 }
11119 break;
11120
11121 case FUNC_GP_RELATIVE:
11122 switch (r_type)
11123 {
11124 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_GPREL22; break;
11125 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_GPREL64I; break;
11126 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_GPREL32MSB; break;
11127 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_GPREL32LSB; break;
11128 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_GPREL64MSB; break;
11129 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_GPREL64LSB; break;
11130 default: type = "GPREL"; break;
11131 }
11132 break;
11133
11134 case FUNC_LT_RELATIVE:
11135 switch (r_type)
11136 {
11137 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22; break;
11138 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_LTOFF64I; break;
11139 default: type = "LTOFF"; break;
11140 }
11141 break;
11142
11143 case FUNC_LT_RELATIVE_X:
11144 switch (r_type)
11145 {
11146 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22X; break;
11147 default: type = "LTOFF"; suffix = "X"; break;
11148 }
11149 break;
11150
11151 case FUNC_PC_RELATIVE:
11152 switch (r_type)
11153 {
11154 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PCREL22; break;
11155 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PCREL64I; break;
11156 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_PCREL32MSB; break;
11157 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_PCREL32LSB; break;
11158 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PCREL64MSB; break;
11159 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PCREL64LSB; break;
11160 default: type = "PCREL"; break;
11161 }
11162 break;
11163
11164 case FUNC_PLT_RELATIVE:
11165 switch (r_type)
11166 {
11167 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PLTOFF22; break;
11168 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PLTOFF64I; break;
11169 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PLTOFF64MSB;break;
11170 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PLTOFF64LSB;break;
11171 default: type = "PLTOFF"; break;
11172 }
11173 break;
11174
11175 case FUNC_SEC_RELATIVE:
11176 switch (r_type)
11177 {
11178 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SECREL32MSB;break;
11179 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SECREL32LSB;break;
11180 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SECREL64MSB;break;
11181 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SECREL64LSB;break;
11182 default: type = "SECREL"; break;
11183 }
11184 break;
11185
11186 case FUNC_SEG_RELATIVE:
11187 switch (r_type)
11188 {
11189 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SEGREL32MSB;break;
11190 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SEGREL32LSB;break;
11191 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SEGREL64MSB;break;
11192 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SEGREL64LSB;break;
11193 default: type = "SEGREL"; break;
11194 }
11195 break;
11196
11197 case FUNC_LTV_RELATIVE:
11198 switch (r_type)
11199 {
11200 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_LTV32MSB; break;
11201 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_LTV32LSB; break;
11202 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_LTV64MSB; break;
11203 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_LTV64LSB; break;
11204 default: type = "LTV"; break;
11205 }
11206 break;
11207
11208 case FUNC_LT_FPTR_RELATIVE:
11209 switch (r_type)
11210 {
11211 case BFD_RELOC_IA64_IMM22:
11212 newr = BFD_RELOC_IA64_LTOFF_FPTR22; break;
11213 case BFD_RELOC_IA64_IMM64:
11214 newr = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
11215 case BFD_RELOC_IA64_DIR32MSB:
11216 newr = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break;
11217 case BFD_RELOC_IA64_DIR32LSB:
11218 newr = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break;
11219 case BFD_RELOC_IA64_DIR64MSB:
11220 newr = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break;
11221 case BFD_RELOC_IA64_DIR64LSB:
11222 newr = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break;
11223 default:
11224 type = "LTOFF_FPTR"; break;
11225 }
11226 break;
11227
11228 case FUNC_TP_RELATIVE:
11229 switch (r_type)
11230 {
11231 case BFD_RELOC_IA64_IMM14: newr = BFD_RELOC_IA64_TPREL14; break;
11232 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_TPREL22; break;
11233 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_TPREL64I; break;
11234 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_TPREL64MSB; break;
11235 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_TPREL64LSB; break;
11236 default: type = "TPREL"; break;
11237 }
11238 break;
11239
11240 case FUNC_LT_TP_RELATIVE:
11241 switch (r_type)
11242 {
11243 case BFD_RELOC_IA64_IMM22:
11244 newr = BFD_RELOC_IA64_LTOFF_TPREL22; break;
11245 default:
11246 type = "LTOFF_TPREL"; break;
11247 }
11248 break;
11249
11250 case FUNC_DTP_MODULE:
11251 switch (r_type)
11252 {
11253 case BFD_RELOC_IA64_DIR64MSB:
11254 newr = BFD_RELOC_IA64_DTPMOD64MSB; break;
11255 case BFD_RELOC_IA64_DIR64LSB:
11256 newr = BFD_RELOC_IA64_DTPMOD64LSB; break;
11257 default:
11258 type = "DTPMOD"; break;
11259 }
11260 break;
11261
11262 case FUNC_LT_DTP_MODULE:
11263 switch (r_type)
11264 {
11265 case BFD_RELOC_IA64_IMM22:
11266 newr = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
11267 default:
11268 type = "LTOFF_DTPMOD"; break;
11269 }
11270 break;
11271
11272 case FUNC_DTP_RELATIVE:
11273 switch (r_type)
11274 {
11275 case BFD_RELOC_IA64_DIR32MSB:
11276 newr = BFD_RELOC_IA64_DTPREL32MSB; break;
11277 case BFD_RELOC_IA64_DIR32LSB:
11278 newr = BFD_RELOC_IA64_DTPREL32LSB; break;
11279 case BFD_RELOC_IA64_DIR64MSB:
11280 newr = BFD_RELOC_IA64_DTPREL64MSB; break;
11281 case BFD_RELOC_IA64_DIR64LSB:
11282 newr = BFD_RELOC_IA64_DTPREL64LSB; break;
11283 case BFD_RELOC_IA64_IMM14:
11284 newr = BFD_RELOC_IA64_DTPREL14; break;
11285 case BFD_RELOC_IA64_IMM22:
11286 newr = BFD_RELOC_IA64_DTPREL22; break;
11287 case BFD_RELOC_IA64_IMM64:
11288 newr = BFD_RELOC_IA64_DTPREL64I; break;
11289 default:
11290 type = "DTPREL"; break;
11291 }
11292 break;
11293
11294 case FUNC_LT_DTP_RELATIVE:
11295 switch (r_type)
11296 {
11297 case BFD_RELOC_IA64_IMM22:
11298 newr = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
11299 default:
11300 type = "LTOFF_DTPREL"; break;
11301 }
11302 break;
11303
11304 case FUNC_IPLT_RELOC:
11305 switch (r_type)
11306 {
11307 case BFD_RELOC_IA64_IPLTMSB: return r_type;
11308 case BFD_RELOC_IA64_IPLTLSB: return r_type;
11309 default: type = "IPLT"; break;
11310 }
11311 break;
11312
11313 #ifdef TE_VMS
11314 case FUNC_SLOTCOUNT_RELOC:
11315 return DUMMY_RELOC_IA64_SLOTCOUNT;
11316 #endif
11317
11318 default:
11319 abort ();
11320 }
11321
11322 if (newr)
11323 return newr;
11324 else
11325 {
11326 int width;
11327
11328 if (!type)
11329 abort ();
11330 switch (r_type)
11331 {
11332 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break;
11333 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break;
11334 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break;
11335 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break;
11336 case BFD_RELOC_UNUSED: width = 13; break;
11337 case BFD_RELOC_IA64_IMM14: width = 14; break;
11338 case BFD_RELOC_IA64_IMM22: width = 22; break;
11339 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break;
11340 default: abort ();
11341 }
11342
11343 /* This should be an error, but since previously there wasn't any
11344 diagnostic here, don't make it fail because of this for now. */
11345 as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix);
11346 return r_type;
11347 }
11348 }
11349
11350 /* Here is where generate the appropriate reloc for pseudo relocation
11351 functions. */
11352 void
11353 ia64_validate_fix (fixS *fix)
11354 {
11355 switch (fix->fx_r_type)
11356 {
11357 case BFD_RELOC_IA64_FPTR64I:
11358 case BFD_RELOC_IA64_FPTR32MSB:
11359 case BFD_RELOC_IA64_FPTR64LSB:
11360 case BFD_RELOC_IA64_LTOFF_FPTR22:
11361 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11362 if (fix->fx_offset != 0)
11363 as_bad_where (fix->fx_file, fix->fx_line,
11364 _("No addend allowed in @fptr() relocation"));
11365 break;
11366 default:
11367 break;
11368 }
11369 }
11370
11371 static void
11372 fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value)
11373 {
11374 bfd_vma insn[3], t0, t1, control_bits;
11375 const char *err;
11376 char *fixpos;
11377 long slot;
11378
11379 slot = fix->fx_where & 0x3;
11380 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
11381
11382 /* Bundles are always in little-endian byte order */
11383 t0 = bfd_getl64 (fixpos);
11384 t1 = bfd_getl64 (fixpos + 8);
11385 control_bits = t0 & 0x1f;
11386 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
11387 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
11388 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
11389
11390 err = NULL;
11391 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
11392 {
11393 insn[1] = (value >> 22) & 0x1ffffffffffLL;
11394 insn[2] |= (((value & 0x7f) << 13)
11395 | (((value >> 7) & 0x1ff) << 27)
11396 | (((value >> 16) & 0x1f) << 22)
11397 | (((value >> 21) & 0x1) << 21)
11398 | (((value >> 63) & 0x1) << 36));
11399 }
11400 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
11401 {
11402 if (value & ~0x3fffffffffffffffULL)
11403 err = _("integer operand out of range");
11404 insn[1] = (value >> 21) & 0x1ffffffffffLL;
11405 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
11406 }
11407 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
11408 {
11409 value >>= 4;
11410 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
11411 insn[2] |= ((((value >> 59) & 0x1) << 36)
11412 | (((value >> 0) & 0xfffff) << 13));
11413 }
11414 else
11415 err = (*odesc->insert) (odesc, value, insn + slot);
11416
11417 if (err)
11418 as_bad_where (fix->fx_file, fix->fx_line, "%s", err);
11419
11420 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
11421 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
11422 number_to_chars_littleendian (fixpos + 0, t0, 8);
11423 number_to_chars_littleendian (fixpos + 8, t1, 8);
11424 }
11425
11426 /* Attempt to simplify or even eliminate a fixup. The return value is
11427 ignored; perhaps it was once meaningful, but now it is historical.
11428 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
11429
11430 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
11431 (if possible). */
11432
11433 void
11434 md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED)
11435 {
11436 char *fixpos;
11437 valueT value = *valP;
11438
11439 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
11440
11441 if (fix->fx_pcrel)
11442 {
11443 switch (fix->fx_r_type)
11444 {
11445 case BFD_RELOC_IA64_PCREL21B: break;
11446 case BFD_RELOC_IA64_PCREL21BI: break;
11447 case BFD_RELOC_IA64_PCREL21F: break;
11448 case BFD_RELOC_IA64_PCREL21M: break;
11449 case BFD_RELOC_IA64_PCREL60B: break;
11450 case BFD_RELOC_IA64_PCREL22: break;
11451 case BFD_RELOC_IA64_PCREL64I: break;
11452 case BFD_RELOC_IA64_PCREL32MSB: break;
11453 case BFD_RELOC_IA64_PCREL32LSB: break;
11454 case BFD_RELOC_IA64_PCREL64MSB: break;
11455 case BFD_RELOC_IA64_PCREL64LSB: break;
11456 default:
11457 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym,
11458 fix->fx_r_type);
11459 break;
11460 }
11461 }
11462 if (fix->fx_addsy)
11463 {
11464 switch ((unsigned) fix->fx_r_type)
11465 {
11466 case BFD_RELOC_UNUSED:
11467 /* This must be a TAG13 or TAG13b operand. There are no external
11468 relocs defined for them, so we must give an error. */
11469 as_bad_where (fix->fx_file, fix->fx_line,
11470 _("%s must have a constant value"),
11471 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
11472 fix->fx_done = 1;
11473 return;
11474
11475 case BFD_RELOC_IA64_TPREL14:
11476 case BFD_RELOC_IA64_TPREL22:
11477 case BFD_RELOC_IA64_TPREL64I:
11478 case BFD_RELOC_IA64_LTOFF_TPREL22:
11479 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
11480 case BFD_RELOC_IA64_DTPREL14:
11481 case BFD_RELOC_IA64_DTPREL22:
11482 case BFD_RELOC_IA64_DTPREL64I:
11483 case BFD_RELOC_IA64_LTOFF_DTPREL22:
11484 S_SET_THREAD_LOCAL (fix->fx_addsy);
11485 break;
11486
11487 #ifdef TE_VMS
11488 case DUMMY_RELOC_IA64_SLOTCOUNT:
11489 as_bad_where (fix->fx_file, fix->fx_line,
11490 _("cannot resolve @slotcount parameter"));
11491 fix->fx_done = 1;
11492 return;
11493 #endif
11494
11495 default:
11496 break;
11497 }
11498 }
11499 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
11500 {
11501 #ifdef TE_VMS
11502 if (fix->fx_r_type == DUMMY_RELOC_IA64_SLOTCOUNT)
11503 {
11504 /* For @slotcount, convert an addresses difference to a slots
11505 difference. */
11506 valueT v;
11507
11508 v = (value >> 4) * 3;
11509 switch (value & 0x0f)
11510 {
11511 case 0:
11512 case 1:
11513 case 2:
11514 v += value & 0x0f;
11515 break;
11516 case 0x0f:
11517 v += 2;
11518 break;
11519 case 0x0e:
11520 v += 1;
11521 break;
11522 default:
11523 as_bad (_("invalid @slotcount value"));
11524 }
11525 value = v;
11526 }
11527 #endif
11528
11529 if (fix->tc_fix_data.bigendian)
11530 number_to_chars_bigendian (fixpos, value, fix->fx_size);
11531 else
11532 number_to_chars_littleendian (fixpos, value, fix->fx_size);
11533 fix->fx_done = 1;
11534 }
11535 else
11536 {
11537 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
11538 fix->fx_done = 1;
11539 }
11540 }
11541
11542 /* Generate the BFD reloc to be stuck in the object file from the
11543 fixup used internally in the assembler. */
11544
11545 arelent *
11546 tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp)
11547 {
11548 arelent *reloc;
11549
11550 reloc = XNEW (arelent);
11551 reloc->sym_ptr_ptr = XNEW (asymbol *);
11552 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11553 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
11554 reloc->addend = fixp->fx_offset;
11555 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
11556
11557 if (!reloc->howto)
11558 {
11559 as_bad_where (fixp->fx_file, fixp->fx_line,
11560 _("Cannot represent %s relocation in object file"),
11561 bfd_get_reloc_code_name (fixp->fx_r_type));
11562 free (reloc);
11563 return NULL;
11564 }
11565 return reloc;
11566 }
11567
11568 /* Turn a string in input_line_pointer into a floating point constant
11569 of type TYPE, and store the appropriate bytes in *LIT. The number
11570 of LITTLENUMS emitted is stored in *SIZE. An error message is
11571 returned, or NULL on OK. */
11572
11573 #define MAX_LITTLENUMS 5
11574
11575 const char *
11576 md_atof (int type, char *lit, int *size)
11577 {
11578 LITTLENUM_TYPE words[MAX_LITTLENUMS];
11579 char *t;
11580 int prec;
11581
11582 switch (type)
11583 {
11584 /* IEEE floats */
11585 case 'f':
11586 case 'F':
11587 case 's':
11588 case 'S':
11589 prec = 2;
11590 break;
11591
11592 case 'd':
11593 case 'D':
11594 case 'r':
11595 case 'R':
11596 prec = 4;
11597 break;
11598
11599 case 'x':
11600 case 'X':
11601 case 'p':
11602 case 'P':
11603 prec = 5;
11604 break;
11605
11606 default:
11607 *size = 0;
11608 return _("Unrecognized or unsupported floating point constant");
11609 }
11610 t = atof_ieee (input_line_pointer, type, words);
11611 if (t)
11612 input_line_pointer = t;
11613
11614 (*ia64_float_to_chars) (lit, words, prec);
11615
11616 if (type == 'X')
11617 {
11618 /* It is 10 byte floating point with 6 byte padding. */
11619 memset (&lit [10], 0, 6);
11620 *size = 8 * sizeof (LITTLENUM_TYPE);
11621 }
11622 else
11623 *size = prec * sizeof (LITTLENUM_TYPE);
11624
11625 return NULL;
11626 }
11627
11628 /* Handle ia64 specific semantics of the align directive. */
11629
11630 void
11631 ia64_md_do_align (int n ATTRIBUTE_UNUSED,
11632 const char *fill ATTRIBUTE_UNUSED,
11633 int len ATTRIBUTE_UNUSED,
11634 int max ATTRIBUTE_UNUSED)
11635 {
11636 if (subseg_text_p (now_seg))
11637 ia64_flush_insns ();
11638 }
11639
11640 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
11641 of an rs_align_code fragment. */
11642
11643 void
11644 ia64_handle_align (fragS *fragp)
11645 {
11646 int bytes;
11647 char *p;
11648 const unsigned char *nop_type;
11649
11650 if (fragp->fr_type != rs_align_code)
11651 return;
11652
11653 /* Check if this frag has to end with a stop bit. */
11654 nop_type = fragp->tc_frag_data ? le_nop_stop : le_nop;
11655
11656 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11657 p = fragp->fr_literal + fragp->fr_fix;
11658
11659 /* If no paddings are needed, we check if we need a stop bit. */
11660 if (!bytes && fragp->tc_frag_data)
11661 {
11662 if (fragp->fr_fix < 16)
11663 #if 1
11664 /* FIXME: It won't work with
11665 .align 16
11666 alloc r32=ar.pfs,1,2,4,0
11667 */
11668 ;
11669 #else
11670 as_bad_where (fragp->fr_file, fragp->fr_line,
11671 _("Can't add stop bit to mark end of instruction group"));
11672 #endif
11673 else
11674 /* Bundles are always in little-endian byte order. Make sure
11675 the previous bundle has the stop bit. */
11676 *(p - 16) |= 1;
11677 }
11678
11679 /* Make sure we are on a 16-byte boundary, in case someone has been
11680 putting data into a text section. */
11681 if (bytes & 15)
11682 {
11683 int fix = bytes & 15;
11684 memset (p, 0, fix);
11685 p += fix;
11686 bytes -= fix;
11687 fragp->fr_fix += fix;
11688 }
11689
11690 /* Instruction bundles are always little-endian. */
11691 memcpy (p, nop_type, 16);
11692 fragp->fr_var = 16;
11693 }
11694
11695 static void
11696 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11697 int prec)
11698 {
11699 while (prec--)
11700 {
11701 number_to_chars_bigendian (lit, (long) (*words++),
11702 sizeof (LITTLENUM_TYPE));
11703 lit += sizeof (LITTLENUM_TYPE);
11704 }
11705 }
11706
11707 static void
11708 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11709 int prec)
11710 {
11711 while (prec--)
11712 {
11713 number_to_chars_littleendian (lit, (long) (words[prec]),
11714 sizeof (LITTLENUM_TYPE));
11715 lit += sizeof (LITTLENUM_TYPE);
11716 }
11717 }
11718
11719 void
11720 ia64_elf_section_change_hook (void)
11721 {
11722 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11723 && elf_linked_to_section (now_seg) == NULL)
11724 elf_linked_to_section (now_seg) = text_section;
11725 dot_byteorder (-1);
11726 }
11727
11728 /* Check if a label should be made global. */
11729 void
11730 ia64_check_label (symbolS *label)
11731 {
11732 if (*input_line_pointer == ':')
11733 {
11734 S_SET_EXTERNAL (label);
11735 input_line_pointer++;
11736 }
11737 }
11738
11739 /* Used to remember where .alias and .secalias directives are seen. We
11740 will rename symbol and section names when we are about to output
11741 the relocatable file. */
11742 struct alias
11743 {
11744 const char *file; /* The file where the directive is seen. */
11745 unsigned int line; /* The line number the directive is at. */
11746 const char *name; /* The original name of the symbol. */
11747 };
11748
11749 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11750 .secalias. Otherwise, it is .alias. */
11751 static void
11752 dot_alias (int section)
11753 {
11754 char *name, *alias;
11755 char delim;
11756 char *end_name;
11757 int len;
11758 const char *error_string;
11759 struct alias *h;
11760 const char *a;
11761 struct hash_control *ahash, *nhash;
11762 const char *kind;
11763
11764 delim = get_symbol_name (&name);
11765 end_name = input_line_pointer;
11766 *end_name = delim;
11767
11768 if (name == end_name)
11769 {
11770 as_bad (_("expected symbol name"));
11771 ignore_rest_of_line ();
11772 return;
11773 }
11774
11775 SKIP_WHITESPACE_AFTER_NAME ();
11776
11777 if (*input_line_pointer != ',')
11778 {
11779 *end_name = 0;
11780 as_bad (_("expected comma after \"%s\""), name);
11781 *end_name = delim;
11782 ignore_rest_of_line ();
11783 return;
11784 }
11785
11786 input_line_pointer++;
11787 *end_name = 0;
11788 ia64_canonicalize_symbol_name (name);
11789
11790 /* We call demand_copy_C_string to check if alias string is valid.
11791 There should be a closing `"' and no `\0' in the string. */
11792 alias = demand_copy_C_string (&len);
11793 if (alias == NULL)
11794 {
11795 ignore_rest_of_line ();
11796 return;
11797 }
11798
11799 /* Make a copy of name string. */
11800 len = strlen (name) + 1;
11801 obstack_grow (&notes, name, len);
11802 name = obstack_finish (&notes);
11803
11804 if (section)
11805 {
11806 kind = "section";
11807 ahash = secalias_hash;
11808 nhash = secalias_name_hash;
11809 }
11810 else
11811 {
11812 kind = "symbol";
11813 ahash = alias_hash;
11814 nhash = alias_name_hash;
11815 }
11816
11817 /* Check if alias has been used before. */
11818 h = (struct alias *) hash_find (ahash, alias);
11819 if (h)
11820 {
11821 if (strcmp (h->name, name))
11822 as_bad (_("`%s' is already the alias of %s `%s'"),
11823 alias, kind, h->name);
11824 goto out;
11825 }
11826
11827 /* Check if name already has an alias. */
11828 a = (const char *) hash_find (nhash, name);
11829 if (a)
11830 {
11831 if (strcmp (a, alias))
11832 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11833 goto out;
11834 }
11835
11836 h = XNEW (struct alias);
11837 h->file = as_where (&h->line);
11838 h->name = name;
11839
11840 error_string = hash_jam (ahash, alias, (void *) h);
11841 if (error_string)
11842 {
11843 as_fatal (_("inserting \"%s\" into %s alias hash table failed: %s"),
11844 alias, kind, error_string);
11845 goto out;
11846 }
11847
11848 error_string = hash_jam (nhash, name, (void *) alias);
11849 if (error_string)
11850 {
11851 as_fatal (_("inserting \"%s\" into %s name hash table failed: %s"),
11852 alias, kind, error_string);
11853 out:
11854 obstack_free (&notes, name);
11855 obstack_free (&notes, alias);
11856 }
11857
11858 demand_empty_rest_of_line ();
11859 }
11860
11861 /* It renames the original symbol name to its alias. */
11862 static void
11863 do_alias (const char *alias, void *value)
11864 {
11865 struct alias *h = (struct alias *) value;
11866 symbolS *sym = symbol_find (h->name);
11867
11868 if (sym == NULL)
11869 {
11870 #ifdef TE_VMS
11871 /* Uses .alias extensively to alias CRTL functions to same with
11872 decc$ prefix. Sometimes function gets optimized away and a
11873 warning results, which should be suppressed. */
11874 if (strncmp (alias, "decc$", 5) != 0)
11875 #endif
11876 as_warn_where (h->file, h->line,
11877 _("symbol `%s' aliased to `%s' is not used"),
11878 h->name, alias);
11879 }
11880 else
11881 S_SET_NAME (sym, (char *) alias);
11882 }
11883
11884 /* Called from write_object_file. */
11885 void
11886 ia64_adjust_symtab (void)
11887 {
11888 hash_traverse (alias_hash, do_alias);
11889 }
11890
11891 /* It renames the original section name to its alias. */
11892 static void
11893 do_secalias (const char *alias, void *value)
11894 {
11895 struct alias *h = (struct alias *) value;
11896 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11897
11898 if (sec == NULL)
11899 as_warn_where (h->file, h->line,
11900 _("section `%s' aliased to `%s' is not used"),
11901 h->name, alias);
11902 else
11903 sec->name = alias;
11904 }
11905
11906 /* Called from write_object_file. */
11907 void
11908 ia64_frob_file (void)
11909 {
11910 hash_traverse (secalias_hash, do_secalias);
11911 }
11912
11913 #ifdef TE_VMS
11914 #define NT_VMS_MHD 1
11915 #define NT_VMS_LNM 2
11916
11917 /* Integrity VMS 8.x identifies it's ELF modules with a standard ELF
11918 .note section. */
11919
11920 /* Manufacture a VMS-like time string. */
11921 static void
11922 get_vms_time (char *Now)
11923 {
11924 char *pnt;
11925 time_t timeb;
11926
11927 time (&timeb);
11928 pnt = ctime (&timeb);
11929 pnt[3] = 0;
11930 pnt[7] = 0;
11931 pnt[10] = 0;
11932 pnt[16] = 0;
11933 pnt[24] = 0;
11934 sprintf (Now, "%2s-%3s-%s %s", pnt + 8, pnt + 4, pnt + 20, pnt + 11);
11935 }
11936
11937 void
11938 ia64_vms_note (void)
11939 {
11940 char *p;
11941 asection *seg = now_seg;
11942 subsegT subseg = now_subseg;
11943 asection *secp = NULL;
11944 char *bname;
11945 char buf [256];
11946 symbolS *sym;
11947
11948 /* Create the .note section. */
11949
11950 secp = subseg_new (".note", 0);
11951 bfd_set_section_flags (stdoutput,
11952 secp,
11953 SEC_HAS_CONTENTS | SEC_READONLY);
11954
11955 /* Module header note (MHD). */
11956 bname = xstrdup (lbasename (out_file_name));
11957 if ((p = strrchr (bname, '.')))
11958 *p = '\0';
11959
11960 /* VMS note header is 24 bytes long. */
11961 p = frag_more (8 + 8 + 8);
11962 number_to_chars_littleendian (p + 0, 8, 8);
11963 number_to_chars_littleendian (p + 8, 40 + strlen (bname), 8);
11964 number_to_chars_littleendian (p + 16, NT_VMS_MHD, 8);
11965
11966 p = frag_more (8);
11967 strcpy (p, "IPF/VMS");
11968
11969 p = frag_more (17 + 17 + strlen (bname) + 1 + 5);
11970 get_vms_time (p);
11971 strcpy (p + 17, "24-FEB-2005 15:00");
11972 p += 17 + 17;
11973 strcpy (p, bname);
11974 p += strlen (bname) + 1;
11975 free (bname);
11976 strcpy (p, "V1.0");
11977
11978 frag_align (3, 0, 0);
11979
11980 /* Language processor name note. */
11981 sprintf (buf, "GNU assembler version %s (%s) using BFD version %s",
11982 VERSION, TARGET_ALIAS, BFD_VERSION_STRING);
11983
11984 p = frag_more (8 + 8 + 8);
11985 number_to_chars_littleendian (p + 0, 8, 8);
11986 number_to_chars_littleendian (p + 8, strlen (buf) + 1, 8);
11987 number_to_chars_littleendian (p + 16, NT_VMS_LNM, 8);
11988
11989 p = frag_more (8);
11990 strcpy (p, "IPF/VMS");
11991
11992 p = frag_more (strlen (buf) + 1);
11993 strcpy (p, buf);
11994
11995 frag_align (3, 0, 0);
11996
11997 secp = subseg_new (".vms_display_name_info", 0);
11998 bfd_set_section_flags (stdoutput,
11999 secp,
12000 SEC_HAS_CONTENTS | SEC_READONLY);
12001
12002 /* This symbol should be passed on the command line and be variable
12003 according to language. */
12004 sym = symbol_new ("__gnat_vms_display_name@gnat_demangler_rtl",
12005 absolute_section, 0, &zero_address_frag);
12006 symbol_table_insert (sym);
12007 symbol_get_bfdsym (sym)->flags |= BSF_DEBUGGING | BSF_DYNAMIC;
12008
12009 p = frag_more (4);
12010 /* Format 3 of VMS demangler Spec. */
12011 number_to_chars_littleendian (p, 3, 4);
12012
12013 p = frag_more (4);
12014 /* Place holder for symbol table index of above symbol. */
12015 number_to_chars_littleendian (p, -1, 4);
12016
12017 frag_align (3, 0, 0);
12018
12019 /* We probably can't restore the current segment, for there likely
12020 isn't one yet... */
12021 if (seg && subseg)
12022 subseg_set (seg, subseg);
12023 }
12024
12025 #endif /* TE_VMS */