]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdbserver/linux-ppc-low.cc
Automatic date update in version.in
[thirdparty/binutils-gdb.git] / gdbserver / linux-ppc-low.cc
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include "elf/common.h"
24 #include <sys/uio.h>
25 #include <elf.h>
26 #include <asm/ptrace.h>
27
28 #include "arch/ppc-linux-common.h"
29 #include "arch/ppc-linux-tdesc.h"
30 #include "nat/ppc-linux.h"
31 #include "nat/linux-ptrace.h"
32 #include "linux-ppc-tdesc-init.h"
33 #include "ax.h"
34 #include "tracepoint.h"
35
36 #define PPC_FIELD(value, from, len) \
37 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
38 #define PPC_SEXT(v, bs) \
39 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
40 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
41 - ((CORE_ADDR) 1 << ((bs) - 1)))
42 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
43 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
44 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
45 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
46
47 /* Linux target op definitions for the PowerPC architecture. */
48
49 class ppc_target : public linux_process_target
50 {
51 public:
52
53 const regs_info *get_regs_info () override;
54
55 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
56
57 bool supports_z_point_type (char z_type) override;
58
59
60 void low_collect_ptrace_register (regcache *regcache, int regno,
61 char *buf) override;
62
63 void low_supply_ptrace_register (regcache *regcache, int regno,
64 const char *buf) override;
65
66 bool supports_tracepoints () override;
67
68 bool supports_fast_tracepoints () override;
69
70 int install_fast_tracepoint_jump_pad
71 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
72 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
73 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
74 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
75 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
76 char *err) override;
77
78 int get_min_fast_tracepoint_insn_len () override;
79
80 struct emit_ops *emit_ops () override;
81
82 int get_ipa_tdesc_idx () override;
83
84 protected:
85
86 void low_arch_setup () override;
87
88 bool low_cannot_fetch_register (int regno) override;
89
90 bool low_cannot_store_register (int regno) override;
91
92 bool low_supports_breakpoints () override;
93
94 CORE_ADDR low_get_pc (regcache *regcache) override;
95
96 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
97
98 bool low_breakpoint_at (CORE_ADDR pc) override;
99
100 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
101 int size, raw_breakpoint *bp) override;
102
103 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
104 int size, raw_breakpoint *bp) override;
105
106 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
107 };
108
109 /* The singleton target ops object. */
110
111 static ppc_target the_ppc_target;
112
113 /* Holds the AT_HWCAP auxv entry. */
114
115 static unsigned long ppc_hwcap;
116
117 /* Holds the AT_HWCAP2 auxv entry. */
118
119 static unsigned long ppc_hwcap2;
120
121
122 #define ppc_num_regs 73
123
124 #ifdef __powerpc64__
125 /* We use a constant for FPSCR instead of PT_FPSCR, because
126 many shipped PPC64 kernels had the wrong value in ptrace.h. */
127 static int ppc_regmap[] =
128 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
129 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
130 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
131 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
132 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
133 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
134 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
135 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
136 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
137 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
138 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
139 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
140 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
141 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
142 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
143 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
144 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
145 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
146 PT_ORIG_R3 * 8, PT_TRAP * 8 };
147 #else
148 /* Currently, don't check/send MQ. */
149 static int ppc_regmap[] =
150 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
151 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
152 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
153 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
154 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
155 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
156 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
157 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
158 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
159 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
160 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
161 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
162 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
163 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
164 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
165 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
166 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
167 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
168 PT_ORIG_R3 * 4, PT_TRAP * 4
169 };
170
171 static int ppc_regmap_e500[] =
172 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
173 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
174 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
175 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
176 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
177 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
178 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
179 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
180 -1, -1, -1, -1,
181 -1, -1, -1, -1,
182 -1, -1, -1, -1,
183 -1, -1, -1, -1,
184 -1, -1, -1, -1,
185 -1, -1, -1, -1,
186 -1, -1, -1, -1,
187 -1, -1, -1, -1,
188 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
189 PT_CTR * 4, PT_XER * 4, -1,
190 PT_ORIG_R3 * 4, PT_TRAP * 4
191 };
192 #endif
193
194 /* Check whether the kernel provides a register set with number
195 REGSET_ID of size REGSETSIZE for process/thread TID. */
196
197 static int
198 ppc_check_regset (int tid, int regset_id, int regsetsize)
199 {
200 void *buf = alloca (regsetsize);
201 struct iovec iov;
202
203 iov.iov_base = buf;
204 iov.iov_len = regsetsize;
205
206 if (ptrace (PTRACE_GETREGSET, tid, regset_id, &iov) >= 0
207 || errno == ENODATA)
208 return 1;
209 return 0;
210 }
211
212 bool
213 ppc_target::low_cannot_store_register (int regno)
214 {
215 const struct target_desc *tdesc = current_process ()->tdesc;
216
217 #ifndef __powerpc64__
218 /* Some kernels do not allow us to store fpscr. */
219 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
220 && regno == find_regno (tdesc, "fpscr"))
221 return true;
222 #endif
223
224 /* Some kernels do not allow us to store orig_r3 or trap. */
225 if (regno == find_regno (tdesc, "orig_r3")
226 || regno == find_regno (tdesc, "trap"))
227 return true;
228
229 return false;
230 }
231
232 bool
233 ppc_target::low_cannot_fetch_register (int regno)
234 {
235 return false;
236 }
237
238 void
239 ppc_target::low_collect_ptrace_register (regcache *regcache, int regno,
240 char *buf)
241 {
242 memset (buf, 0, sizeof (long));
243
244 if (__BYTE_ORDER == __LITTLE_ENDIAN)
245 {
246 /* Little-endian values always sit at the left end of the buffer. */
247 collect_register (regcache, regno, buf);
248 }
249 else if (__BYTE_ORDER == __BIG_ENDIAN)
250 {
251 /* Big-endian values sit at the right end of the buffer. In case of
252 registers whose sizes are smaller than sizeof (long), we must use a
253 padding to access them correctly. */
254 int size = register_size (regcache->tdesc, regno);
255
256 if (size < sizeof (long))
257 collect_register (regcache, regno, buf + sizeof (long) - size);
258 else
259 collect_register (regcache, regno, buf);
260 }
261 else
262 perror_with_name ("Unexpected byte order");
263 }
264
265 void
266 ppc_target::low_supply_ptrace_register (regcache *regcache, int regno,
267 const char *buf)
268 {
269 if (__BYTE_ORDER == __LITTLE_ENDIAN)
270 {
271 /* Little-endian values always sit at the left end of the buffer. */
272 supply_register (regcache, regno, buf);
273 }
274 else if (__BYTE_ORDER == __BIG_ENDIAN)
275 {
276 /* Big-endian values sit at the right end of the buffer. In case of
277 registers whose sizes are smaller than sizeof (long), we must use a
278 padding to access them correctly. */
279 int size = register_size (regcache->tdesc, regno);
280
281 if (size < sizeof (long))
282 supply_register (regcache, regno, buf + sizeof (long) - size);
283 else
284 supply_register (regcache, regno, buf);
285 }
286 else
287 perror_with_name ("Unexpected byte order");
288 }
289
290 bool
291 ppc_target::low_supports_breakpoints ()
292 {
293 return true;
294 }
295
296 CORE_ADDR
297 ppc_target::low_get_pc (regcache *regcache)
298 {
299 if (register_size (regcache->tdesc, 0) == 4)
300 {
301 unsigned int pc;
302 collect_register_by_name (regcache, "pc", &pc);
303 return (CORE_ADDR) pc;
304 }
305 else
306 {
307 unsigned long pc;
308 collect_register_by_name (regcache, "pc", &pc);
309 return (CORE_ADDR) pc;
310 }
311 }
312
313 void
314 ppc_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
315 {
316 if (register_size (regcache->tdesc, 0) == 4)
317 {
318 unsigned int newpc = pc;
319 supply_register_by_name (regcache, "pc", &newpc);
320 }
321 else
322 {
323 unsigned long newpc = pc;
324 supply_register_by_name (regcache, "pc", &newpc);
325 }
326 }
327
328 #ifndef __powerpc64__
329 static int ppc_regmap_adjusted;
330 #endif
331
332
333 /* Correct in either endianness.
334 This instruction is "twge r2, r2", which GDB uses as a software
335 breakpoint. */
336 static const unsigned int ppc_breakpoint = 0x7d821008;
337 #define ppc_breakpoint_len 4
338
339 /* Implementation of target ops method "sw_breakpoint_from_kind". */
340
341 const gdb_byte *
342 ppc_target::sw_breakpoint_from_kind (int kind, int *size)
343 {
344 *size = ppc_breakpoint_len;
345 return (const gdb_byte *) &ppc_breakpoint;
346 }
347
348 bool
349 ppc_target::low_breakpoint_at (CORE_ADDR where)
350 {
351 unsigned int insn;
352
353 read_memory (where, (unsigned char *) &insn, 4);
354 if (insn == ppc_breakpoint)
355 return true;
356 /* If necessary, recognize more trap instructions here. GDB only uses
357 the one. */
358
359 return false;
360 }
361
362 /* Implement supports_z_point_type target-ops.
363 Returns true if type Z_TYPE breakpoint is supported.
364
365 Handling software breakpoint at server side, so tracepoints
366 and breakpoints can be inserted at the same location. */
367
368 bool
369 ppc_target::supports_z_point_type (char z_type)
370 {
371 switch (z_type)
372 {
373 case Z_PACKET_SW_BP:
374 return true;
375 case Z_PACKET_HW_BP:
376 case Z_PACKET_WRITE_WP:
377 case Z_PACKET_ACCESS_WP:
378 default:
379 return false;
380 }
381 }
382
383 /* Implement the low_insert_point linux target op.
384 Returns 0 on success, -1 on failure and 1 on unsupported. */
385
386 int
387 ppc_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
388 int size, raw_breakpoint *bp)
389 {
390 switch (type)
391 {
392 case raw_bkpt_type_sw:
393 return insert_memory_breakpoint (bp);
394
395 case raw_bkpt_type_hw:
396 case raw_bkpt_type_write_wp:
397 case raw_bkpt_type_access_wp:
398 default:
399 /* Unsupported. */
400 return 1;
401 }
402 }
403
404 /* Implement the low_remove_point linux target op.
405 Returns 0 on success, -1 on failure and 1 on unsupported. */
406
407 int
408 ppc_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
409 int size, raw_breakpoint *bp)
410 {
411 switch (type)
412 {
413 case raw_bkpt_type_sw:
414 return remove_memory_breakpoint (bp);
415
416 case raw_bkpt_type_hw:
417 case raw_bkpt_type_write_wp:
418 case raw_bkpt_type_access_wp:
419 default:
420 /* Unsupported. */
421 return 1;
422 }
423 }
424
425 /* Provide only a fill function for the general register set. ps_lgetregs
426 will use this for NPTL support. */
427
428 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
429 {
430 int i;
431
432 ppc_target *my_ppc_target = (ppc_target *) the_linux_target;
433
434 for (i = 0; i < 32; i++)
435 my_ppc_target->low_collect_ptrace_register (regcache, i,
436 (char *) buf + ppc_regmap[i]);
437
438 for (i = 64; i < 70; i++)
439 my_ppc_target->low_collect_ptrace_register (regcache, i,
440 (char *) buf + ppc_regmap[i]);
441
442 for (i = 71; i < 73; i++)
443 my_ppc_target->low_collect_ptrace_register (regcache, i,
444 (char *) buf + ppc_regmap[i]);
445 }
446
447 /* Program Priority Register regset fill function. */
448
449 static void
450 ppc_fill_pprregset (struct regcache *regcache, void *buf)
451 {
452 char *ppr = (char *) buf;
453
454 collect_register_by_name (regcache, "ppr", ppr);
455 }
456
457 /* Program Priority Register regset store function. */
458
459 static void
460 ppc_store_pprregset (struct regcache *regcache, const void *buf)
461 {
462 const char *ppr = (const char *) buf;
463
464 supply_register_by_name (regcache, "ppr", ppr);
465 }
466
467 /* Data Stream Control Register regset fill function. */
468
469 static void
470 ppc_fill_dscrregset (struct regcache *regcache, void *buf)
471 {
472 char *dscr = (char *) buf;
473
474 collect_register_by_name (regcache, "dscr", dscr);
475 }
476
477 /* Data Stream Control Register regset store function. */
478
479 static void
480 ppc_store_dscrregset (struct regcache *regcache, const void *buf)
481 {
482 const char *dscr = (const char *) buf;
483
484 supply_register_by_name (regcache, "dscr", dscr);
485 }
486
487 /* Target Address Register regset fill function. */
488
489 static void
490 ppc_fill_tarregset (struct regcache *regcache, void *buf)
491 {
492 char *tar = (char *) buf;
493
494 collect_register_by_name (regcache, "tar", tar);
495 }
496
497 /* Target Address Register regset store function. */
498
499 static void
500 ppc_store_tarregset (struct regcache *regcache, const void *buf)
501 {
502 const char *tar = (const char *) buf;
503
504 supply_register_by_name (regcache, "tar", tar);
505 }
506
507 /* Event-Based Branching regset store function. Unless the inferior
508 has a perf event open, ptrace can return in error when reading and
509 writing to the regset, with ENODATA. For reading, the registers
510 will correctly show as unavailable. For writing, gdbserver
511 currently only caches any register writes from P and G packets and
512 the stub always tries to write all the regsets when resuming the
513 inferior, which would result in frequent warnings. For this
514 reason, we don't define a fill function. This also means that the
515 client-side regcache will be dirty if the user tries to write to
516 the EBB registers. G packets that the client sends to write to
517 unrelated registers will also include data for EBB registers, even
518 if they are unavailable. */
519
520 static void
521 ppc_store_ebbregset (struct regcache *regcache, const void *buf)
522 {
523 const char *regset = (const char *) buf;
524
525 /* The order in the kernel regset is: EBBRR, EBBHR, BESCR. In the
526 .dat file is BESCR, EBBHR, EBBRR. */
527 supply_register_by_name (regcache, "ebbrr", &regset[0]);
528 supply_register_by_name (regcache, "ebbhr", &regset[8]);
529 supply_register_by_name (regcache, "bescr", &regset[16]);
530 }
531
532 /* Performance Monitoring Unit regset fill function. */
533
534 static void
535 ppc_fill_pmuregset (struct regcache *regcache, void *buf)
536 {
537 char *regset = (char *) buf;
538
539 /* The order in the kernel regset is SIAR, SDAR, SIER, MMCR2, MMCR0.
540 In the .dat file is MMCR0, MMCR2, SIAR, SDAR, SIER. */
541 collect_register_by_name (regcache, "siar", &regset[0]);
542 collect_register_by_name (regcache, "sdar", &regset[8]);
543 collect_register_by_name (regcache, "sier", &regset[16]);
544 collect_register_by_name (regcache, "mmcr2", &regset[24]);
545 collect_register_by_name (regcache, "mmcr0", &regset[32]);
546 }
547
548 /* Performance Monitoring Unit regset store function. */
549
550 static void
551 ppc_store_pmuregset (struct regcache *regcache, const void *buf)
552 {
553 const char *regset = (const char *) buf;
554
555 supply_register_by_name (regcache, "siar", &regset[0]);
556 supply_register_by_name (regcache, "sdar", &regset[8]);
557 supply_register_by_name (regcache, "sier", &regset[16]);
558 supply_register_by_name (regcache, "mmcr2", &regset[24]);
559 supply_register_by_name (regcache, "mmcr0", &regset[32]);
560 }
561
562 /* Hardware Transactional Memory special-purpose register regset fill
563 function. */
564
565 static void
566 ppc_fill_tm_sprregset (struct regcache *regcache, void *buf)
567 {
568 int i, base;
569 char *regset = (char *) buf;
570
571 base = find_regno (regcache->tdesc, "tfhar");
572 for (i = 0; i < 3; i++)
573 collect_register (regcache, base + i, &regset[i * 8]);
574 }
575
576 /* Hardware Transactional Memory special-purpose register regset store
577 function. */
578
579 static void
580 ppc_store_tm_sprregset (struct regcache *regcache, const void *buf)
581 {
582 int i, base;
583 const char *regset = (const char *) buf;
584
585 base = find_regno (regcache->tdesc, "tfhar");
586 for (i = 0; i < 3; i++)
587 supply_register (regcache, base + i, &regset[i * 8]);
588 }
589
590 /* For the same reasons as the EBB regset, none of the HTM
591 checkpointed regsets have a fill function. These registers are
592 only available if the inferior is in a transaction. */
593
594 /* Hardware Transactional Memory checkpointed general-purpose regset
595 store function. */
596
597 static void
598 ppc_store_tm_cgprregset (struct regcache *regcache, const void *buf)
599 {
600 int i, base, size, endian_offset;
601 const char *regset = (const char *) buf;
602
603 base = find_regno (regcache->tdesc, "cr0");
604 size = register_size (regcache->tdesc, base);
605
606 gdb_assert (size == 4 || size == 8);
607
608 for (i = 0; i < 32; i++)
609 supply_register (regcache, base + i, &regset[i * size]);
610
611 endian_offset = 0;
612
613 if ((size == 8) && (__BYTE_ORDER == __BIG_ENDIAN))
614 endian_offset = 4;
615
616 supply_register_by_name (regcache, "ccr",
617 &regset[PT_CCR * size + endian_offset]);
618
619 supply_register_by_name (regcache, "cxer",
620 &regset[PT_XER * size + endian_offset]);
621
622 supply_register_by_name (regcache, "clr", &regset[PT_LNK * size]);
623 supply_register_by_name (regcache, "cctr", &regset[PT_CTR * size]);
624 }
625
626 /* Hardware Transactional Memory checkpointed floating-point regset
627 store function. */
628
629 static void
630 ppc_store_tm_cfprregset (struct regcache *regcache, const void *buf)
631 {
632 int i, base;
633 const char *regset = (const char *) buf;
634
635 base = find_regno (regcache->tdesc, "cf0");
636
637 for (i = 0; i < 32; i++)
638 supply_register (regcache, base + i, &regset[i * 8]);
639
640 supply_register_by_name (regcache, "cfpscr", &regset[32 * 8]);
641 }
642
643 /* Hardware Transactional Memory checkpointed vector regset store
644 function. */
645
646 static void
647 ppc_store_tm_cvrregset (struct regcache *regcache, const void *buf)
648 {
649 int i, base;
650 const char *regset = (const char *) buf;
651 int vscr_offset = 0;
652
653 base = find_regno (regcache->tdesc, "cvr0");
654
655 for (i = 0; i < 32; i++)
656 supply_register (regcache, base + i, &regset[i * 16]);
657
658 if (__BYTE_ORDER == __BIG_ENDIAN)
659 vscr_offset = 12;
660
661 supply_register_by_name (regcache, "cvscr",
662 &regset[32 * 16 + vscr_offset]);
663
664 supply_register_by_name (regcache, "cvrsave", &regset[33 * 16]);
665 }
666
667 /* Hardware Transactional Memory checkpointed vector-scalar regset
668 store function. */
669
670 static void
671 ppc_store_tm_cvsxregset (struct regcache *regcache, const void *buf)
672 {
673 int i, base;
674 const char *regset = (const char *) buf;
675
676 base = find_regno (regcache->tdesc, "cvs0h");
677 for (i = 0; i < 32; i++)
678 supply_register (regcache, base + i, &regset[i * 8]);
679 }
680
681 /* Hardware Transactional Memory checkpointed Program Priority
682 Register regset store function. */
683
684 static void
685 ppc_store_tm_cpprregset (struct regcache *regcache, const void *buf)
686 {
687 const char *cppr = (const char *) buf;
688
689 supply_register_by_name (regcache, "cppr", cppr);
690 }
691
692 /* Hardware Transactional Memory checkpointed Data Stream Control
693 Register regset store function. */
694
695 static void
696 ppc_store_tm_cdscrregset (struct regcache *regcache, const void *buf)
697 {
698 const char *cdscr = (const char *) buf;
699
700 supply_register_by_name (regcache, "cdscr", cdscr);
701 }
702
703 /* Hardware Transactional Memory checkpointed Target Address Register
704 regset store function. */
705
706 static void
707 ppc_store_tm_ctarregset (struct regcache *regcache, const void *buf)
708 {
709 const char *ctar = (const char *) buf;
710
711 supply_register_by_name (regcache, "ctar", ctar);
712 }
713
714 static void
715 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
716 {
717 int i, base;
718 char *regset = (char *) buf;
719
720 base = find_regno (regcache->tdesc, "vs0h");
721 for (i = 0; i < 32; i++)
722 collect_register (regcache, base + i, &regset[i * 8]);
723 }
724
725 static void
726 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
727 {
728 int i, base;
729 const char *regset = (const char *) buf;
730
731 base = find_regno (regcache->tdesc, "vs0h");
732 for (i = 0; i < 32; i++)
733 supply_register (regcache, base + i, &regset[i * 8]);
734 }
735
736 static void
737 ppc_fill_vrregset (struct regcache *regcache, void *buf)
738 {
739 int i, base;
740 char *regset = (char *) buf;
741 int vscr_offset = 0;
742
743 base = find_regno (regcache->tdesc, "vr0");
744 for (i = 0; i < 32; i++)
745 collect_register (regcache, base + i, &regset[i * 16]);
746
747 if (__BYTE_ORDER == __BIG_ENDIAN)
748 vscr_offset = 12;
749
750 collect_register_by_name (regcache, "vscr",
751 &regset[32 * 16 + vscr_offset]);
752
753 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
754 }
755
756 static void
757 ppc_store_vrregset (struct regcache *regcache, const void *buf)
758 {
759 int i, base;
760 const char *regset = (const char *) buf;
761 int vscr_offset = 0;
762
763 base = find_regno (regcache->tdesc, "vr0");
764 for (i = 0; i < 32; i++)
765 supply_register (regcache, base + i, &regset[i * 16]);
766
767 if (__BYTE_ORDER == __BIG_ENDIAN)
768 vscr_offset = 12;
769
770 supply_register_by_name (regcache, "vscr",
771 &regset[32 * 16 + vscr_offset]);
772 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
773 }
774
775 struct gdb_evrregset_t
776 {
777 unsigned long evr[32];
778 unsigned long long acc;
779 unsigned long spefscr;
780 };
781
782 static void
783 ppc_fill_evrregset (struct regcache *regcache, void *buf)
784 {
785 int i, ev0;
786 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
787
788 ev0 = find_regno (regcache->tdesc, "ev0h");
789 for (i = 0; i < 32; i++)
790 collect_register (regcache, ev0 + i, &regset->evr[i]);
791
792 collect_register_by_name (regcache, "acc", &regset->acc);
793 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
794 }
795
796 static void
797 ppc_store_evrregset (struct regcache *regcache, const void *buf)
798 {
799 int i, ev0;
800 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
801
802 ev0 = find_regno (regcache->tdesc, "ev0h");
803 for (i = 0; i < 32; i++)
804 supply_register (regcache, ev0 + i, &regset->evr[i]);
805
806 supply_register_by_name (regcache, "acc", &regset->acc);
807 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
808 }
809
810 static struct regset_info ppc_regsets[] = {
811 /* List the extra register sets before GENERAL_REGS. That way we will
812 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
813 general registers. Some kernels support these, but not the newer
814 PPC_PTRACE_GETREGS. */
815 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CTAR, 0, EXTENDED_REGS,
816 NULL, ppc_store_tm_ctarregset },
817 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CDSCR, 0, EXTENDED_REGS,
818 NULL, ppc_store_tm_cdscrregset },
819 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CPPR, 0, EXTENDED_REGS,
820 NULL, ppc_store_tm_cpprregset },
821 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVSX, 0, EXTENDED_REGS,
822 NULL, ppc_store_tm_cvsxregset },
823 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVMX, 0, EXTENDED_REGS,
824 NULL, ppc_store_tm_cvrregset },
825 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CFPR, 0, EXTENDED_REGS,
826 NULL, ppc_store_tm_cfprregset },
827 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CGPR, 0, EXTENDED_REGS,
828 NULL, ppc_store_tm_cgprregset },
829 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_SPR, 0, EXTENDED_REGS,
830 ppc_fill_tm_sprregset, ppc_store_tm_sprregset },
831 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_EBB, 0, EXTENDED_REGS,
832 NULL, ppc_store_ebbregset },
833 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PMU, 0, EXTENDED_REGS,
834 ppc_fill_pmuregset, ppc_store_pmuregset },
835 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TAR, 0, EXTENDED_REGS,
836 ppc_fill_tarregset, ppc_store_tarregset },
837 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PPR, 0, EXTENDED_REGS,
838 ppc_fill_pprregset, ppc_store_pprregset },
839 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_DSCR, 0, EXTENDED_REGS,
840 ppc_fill_dscrregset, ppc_store_dscrregset },
841 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
842 ppc_fill_vsxregset, ppc_store_vsxregset },
843 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
844 ppc_fill_vrregset, ppc_store_vrregset },
845 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
846 ppc_fill_evrregset, ppc_store_evrregset },
847 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
848 NULL_REGSET
849 };
850
851 static struct usrregs_info ppc_usrregs_info =
852 {
853 ppc_num_regs,
854 ppc_regmap,
855 };
856
857 static struct regsets_info ppc_regsets_info =
858 {
859 ppc_regsets, /* regsets */
860 0, /* num_regsets */
861 NULL, /* disabled_regsets */
862 };
863
864 static struct regs_info myregs_info =
865 {
866 NULL, /* regset_bitmap */
867 &ppc_usrregs_info,
868 &ppc_regsets_info
869 };
870
871 const regs_info *
872 ppc_target::get_regs_info ()
873 {
874 return &myregs_info;
875 }
876
877 void
878 ppc_target::low_arch_setup ()
879 {
880 const struct target_desc *tdesc;
881 struct regset_info *regset;
882 struct ppc_linux_features features = ppc_linux_no_features;
883
884 int tid = lwpid_of (current_thread);
885
886 features.wordsize = ppc_linux_target_wordsize (tid);
887
888 if (features.wordsize == 4)
889 tdesc = tdesc_powerpc_32l;
890 else
891 tdesc = tdesc_powerpc_64l;
892
893 current_process ()->tdesc = tdesc;
894
895 /* The value of current_process ()->tdesc needs to be set for this
896 call. */
897 ppc_hwcap = linux_get_hwcap (features.wordsize);
898 ppc_hwcap2 = linux_get_hwcap2 (features.wordsize);
899
900 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
901
902 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
903 features.vsx = true;
904
905 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
906 features.altivec = true;
907
908 if ((ppc_hwcap2 & PPC_FEATURE2_DSCR)
909 && ppc_check_regset (tid, NT_PPC_DSCR, PPC_LINUX_SIZEOF_DSCRREGSET)
910 && ppc_check_regset (tid, NT_PPC_PPR, PPC_LINUX_SIZEOF_PPRREGSET))
911 {
912 features.ppr_dscr = true;
913 if ((ppc_hwcap2 & PPC_FEATURE2_ARCH_2_07)
914 && (ppc_hwcap2 & PPC_FEATURE2_TAR)
915 && (ppc_hwcap2 & PPC_FEATURE2_EBB)
916 && ppc_check_regset (tid, NT_PPC_TAR,
917 PPC_LINUX_SIZEOF_TARREGSET)
918 && ppc_check_regset (tid, NT_PPC_EBB,
919 PPC_LINUX_SIZEOF_EBBREGSET)
920 && ppc_check_regset (tid, NT_PPC_PMU,
921 PPC_LINUX_SIZEOF_PMUREGSET))
922 {
923 features.isa207 = true;
924 if ((ppc_hwcap2 & PPC_FEATURE2_HTM)
925 && ppc_check_regset (tid, NT_PPC_TM_SPR,
926 PPC_LINUX_SIZEOF_TM_SPRREGSET))
927 features.htm = true;
928 }
929 }
930
931 tdesc = ppc_linux_match_description (features);
932
933 /* On 32-bit machines, check for SPE registers.
934 Set the low target's regmap field as appropriately. */
935 #ifndef __powerpc64__
936 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
937 tdesc = tdesc_powerpc_e500l;
938
939 if (!ppc_regmap_adjusted)
940 {
941 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
942 ppc_usrregs_info.regmap = ppc_regmap_e500;
943
944 /* If the FPSCR is 64-bit wide, we need to fetch the whole
945 64-bit slot and not just its second word. The PT_FPSCR
946 supplied in a 32-bit GDB compilation doesn't reflect
947 this. */
948 if (register_size (tdesc, 70) == 8)
949 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
950
951 ppc_regmap_adjusted = 1;
952 }
953 #endif
954
955 current_process ()->tdesc = tdesc;
956
957 for (regset = ppc_regsets; regset->size >= 0; regset++)
958 switch (regset->get_request)
959 {
960 case PTRACE_GETVRREGS:
961 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
962 break;
963 case PTRACE_GETVSXREGS:
964 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
965 break;
966 case PTRACE_GETEVRREGS:
967 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
968 regset->size = 32 * 4 + 8 + 4;
969 else
970 regset->size = 0;
971 break;
972 case PTRACE_GETREGSET:
973 switch (regset->nt_type)
974 {
975 case NT_PPC_PPR:
976 regset->size = (features.ppr_dscr ?
977 PPC_LINUX_SIZEOF_PPRREGSET : 0);
978 break;
979 case NT_PPC_DSCR:
980 regset->size = (features.ppr_dscr ?
981 PPC_LINUX_SIZEOF_DSCRREGSET : 0);
982 break;
983 case NT_PPC_TAR:
984 regset->size = (features.isa207 ?
985 PPC_LINUX_SIZEOF_TARREGSET : 0);
986 break;
987 case NT_PPC_EBB:
988 regset->size = (features.isa207 ?
989 PPC_LINUX_SIZEOF_EBBREGSET : 0);
990 break;
991 case NT_PPC_PMU:
992 regset->size = (features.isa207 ?
993 PPC_LINUX_SIZEOF_PMUREGSET : 0);
994 break;
995 case NT_PPC_TM_SPR:
996 regset->size = (features.htm ?
997 PPC_LINUX_SIZEOF_TM_SPRREGSET : 0);
998 break;
999 case NT_PPC_TM_CGPR:
1000 if (features.wordsize == 4)
1001 regset->size = (features.htm ?
1002 PPC32_LINUX_SIZEOF_CGPRREGSET : 0);
1003 else
1004 regset->size = (features.htm ?
1005 PPC64_LINUX_SIZEOF_CGPRREGSET : 0);
1006 break;
1007 case NT_PPC_TM_CFPR:
1008 regset->size = (features.htm ?
1009 PPC_LINUX_SIZEOF_CFPRREGSET : 0);
1010 break;
1011 case NT_PPC_TM_CVMX:
1012 regset->size = (features.htm ?
1013 PPC_LINUX_SIZEOF_CVMXREGSET : 0);
1014 break;
1015 case NT_PPC_TM_CVSX:
1016 regset->size = (features.htm ?
1017 PPC_LINUX_SIZEOF_CVSXREGSET : 0);
1018 break;
1019 case NT_PPC_TM_CPPR:
1020 regset->size = (features.htm ?
1021 PPC_LINUX_SIZEOF_CPPRREGSET : 0);
1022 break;
1023 case NT_PPC_TM_CDSCR:
1024 regset->size = (features.htm ?
1025 PPC_LINUX_SIZEOF_CDSCRREGSET : 0);
1026 break;
1027 case NT_PPC_TM_CTAR:
1028 regset->size = (features.htm ?
1029 PPC_LINUX_SIZEOF_CTARREGSET : 0);
1030 break;
1031 default:
1032 break;
1033 }
1034 break;
1035 default:
1036 break;
1037 }
1038 }
1039
1040 /* Implementation of target ops method "supports_tracepoints". */
1041
1042 bool
1043 ppc_target::supports_tracepoints ()
1044 {
1045 return true;
1046 }
1047
1048 /* Get the thread area address. This is used to recognize which
1049 thread is which when tracing with the in-process agent library. We
1050 don't read anything from the address, and treat it as opaque; it's
1051 the address itself that we assume is unique per-thread. */
1052
1053 int
1054 ppc_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
1055 {
1056 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
1057 struct thread_info *thr = get_lwp_thread (lwp);
1058 struct regcache *regcache = get_thread_regcache (thr, 1);
1059 ULONGEST tp = 0;
1060
1061 #ifdef __powerpc64__
1062 if (register_size (regcache->tdesc, 0) == 8)
1063 collect_register_by_name (regcache, "r13", &tp);
1064 else
1065 #endif
1066 collect_register_by_name (regcache, "r2", &tp);
1067
1068 *addr = tp;
1069
1070 return 0;
1071 }
1072
1073 #ifdef __powerpc64__
1074
1075 /* Older glibc doesn't provide this. */
1076
1077 #ifndef EF_PPC64_ABI
1078 #define EF_PPC64_ABI 3
1079 #endif
1080
1081 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
1082 inferiors. */
1083
1084 static int
1085 is_elfv2_inferior (void)
1086 {
1087 /* To be used as fallback if we're unable to determine the right result -
1088 assume inferior uses the same ABI as gdbserver. */
1089 #if _CALL_ELF == 2
1090 const int def_res = 1;
1091 #else
1092 const int def_res = 0;
1093 #endif
1094 CORE_ADDR phdr;
1095 Elf64_Ehdr ehdr;
1096
1097 const struct target_desc *tdesc = current_process ()->tdesc;
1098 int wordsize = register_size (tdesc, 0);
1099
1100 if (!linux_get_auxv (wordsize, AT_PHDR, &phdr))
1101 return def_res;
1102
1103 /* Assume ELF header is at the beginning of the page where program headers
1104 are located. If it doesn't look like one, bail. */
1105
1106 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
1107 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
1108 return def_res;
1109
1110 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
1111 }
1112
1113 #endif
1114
1115 /* Generate a ds-form instruction in BUF and return the number of bytes written
1116
1117 0 6 11 16 30 32
1118 | OPCD | RST | RA | DS |XO| */
1119
1120 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
1121 static int
1122 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
1123 {
1124 uint32_t insn;
1125
1126 gdb_assert ((opcd & ~0x3f) == 0);
1127 gdb_assert ((rst & ~0x1f) == 0);
1128 gdb_assert ((ra & ~0x1f) == 0);
1129 gdb_assert ((xo & ~0x3) == 0);
1130
1131 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
1132 *buf = (opcd << 26) | insn;
1133 return 1;
1134 }
1135
1136 /* Followings are frequently used ds-form instructions. */
1137
1138 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
1139 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
1140 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
1141 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
1142
1143 /* Generate a d-form instruction in BUF.
1144
1145 0 6 11 16 32
1146 | OPCD | RST | RA | D | */
1147
1148 static int
1149 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
1150 {
1151 uint32_t insn;
1152
1153 gdb_assert ((opcd & ~0x3f) == 0);
1154 gdb_assert ((rst & ~0x1f) == 0);
1155 gdb_assert ((ra & ~0x1f) == 0);
1156
1157 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
1158 *buf = (opcd << 26) | insn;
1159 return 1;
1160 }
1161
1162 /* Followings are frequently used d-form instructions. */
1163
1164 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
1165 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
1166 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
1167 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
1168 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
1169 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
1170 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
1171 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
1172 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
1173
1174 /* Generate a xfx-form instruction in BUF and return the number of bytes
1175 written.
1176
1177 0 6 11 21 31 32
1178 | OPCD | RST | RI | XO |/| */
1179
1180 static int
1181 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
1182 {
1183 uint32_t insn;
1184 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
1185
1186 gdb_assert ((opcd & ~0x3f) == 0);
1187 gdb_assert ((rst & ~0x1f) == 0);
1188 gdb_assert ((xo & ~0x3ff) == 0);
1189
1190 insn = (rst << 21) | (n << 11) | (xo << 1);
1191 *buf = (opcd << 26) | insn;
1192 return 1;
1193 }
1194
1195 /* Followings are frequently used xfx-form instructions. */
1196
1197 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
1198 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
1199 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
1200 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
1201 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
1202 E & 0xf, 598)
1203 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
1204
1205
1206 /* Generate a x-form instruction in BUF and return the number of bytes written.
1207
1208 0 6 11 16 21 31 32
1209 | OPCD | RST | RA | RB | XO |RC| */
1210
1211 static int
1212 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
1213 {
1214 uint32_t insn;
1215
1216 gdb_assert ((opcd & ~0x3f) == 0);
1217 gdb_assert ((rst & ~0x1f) == 0);
1218 gdb_assert ((ra & ~0x1f) == 0);
1219 gdb_assert ((rb & ~0x1f) == 0);
1220 gdb_assert ((xo & ~0x3ff) == 0);
1221 gdb_assert ((rc & ~1) == 0);
1222
1223 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
1224 *buf = (opcd << 26) | insn;
1225 return 1;
1226 }
1227
1228 /* Followings are frequently used x-form instructions. */
1229
1230 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
1231 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
1232 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
1233 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
1234 /* Assume bf = cr7. */
1235 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
1236
1237
1238 /* Generate a md-form instruction in BUF and return the number of bytes written.
1239
1240 0 6 11 16 21 27 30 31 32
1241 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
1242
1243 static int
1244 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
1245 int xo, int rc)
1246 {
1247 uint32_t insn;
1248 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
1249 unsigned int sh0_4 = sh & 0x1f;
1250 unsigned int sh5 = (sh >> 5) & 1;
1251
1252 gdb_assert ((opcd & ~0x3f) == 0);
1253 gdb_assert ((rs & ~0x1f) == 0);
1254 gdb_assert ((ra & ~0x1f) == 0);
1255 gdb_assert ((sh & ~0x3f) == 0);
1256 gdb_assert ((mb & ~0x3f) == 0);
1257 gdb_assert ((xo & ~0x7) == 0);
1258 gdb_assert ((rc & ~0x1) == 0);
1259
1260 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
1261 | (sh5 << 1) | (xo << 2) | (rc & 1);
1262 *buf = (opcd << 26) | insn;
1263 return 1;
1264 }
1265
1266 /* The following are frequently used md-form instructions. */
1267
1268 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
1269 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
1270 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
1271 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
1272
1273 /* Generate a i-form instruction in BUF and return the number of bytes written.
1274
1275 0 6 30 31 32
1276 | OPCD | LI |AA|LK| */
1277
1278 static int
1279 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
1280 {
1281 uint32_t insn;
1282
1283 gdb_assert ((opcd & ~0x3f) == 0);
1284
1285 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
1286 *buf = (opcd << 26) | insn;
1287 return 1;
1288 }
1289
1290 /* The following are frequently used i-form instructions. */
1291
1292 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
1293 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
1294
1295 /* Generate a b-form instruction in BUF and return the number of bytes written.
1296
1297 0 6 11 16 30 31 32
1298 | OPCD | BO | BI | BD |AA|LK| */
1299
1300 static int
1301 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
1302 int aa, int lk)
1303 {
1304 uint32_t insn;
1305
1306 gdb_assert ((opcd & ~0x3f) == 0);
1307 gdb_assert ((bo & ~0x1f) == 0);
1308 gdb_assert ((bi & ~0x1f) == 0);
1309
1310 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
1311 *buf = (opcd << 26) | insn;
1312 return 1;
1313 }
1314
1315 /* The following are frequently used b-form instructions. */
1316 /* Assume bi = cr7. */
1317 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
1318
1319 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
1320 respectively. They are primary used for save/restore GPRs in jump-pad,
1321 not used for bytecode compiling. */
1322
1323 #ifdef __powerpc64__
1324 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
1325 GEN_LD (buf, rt, ra, si) : \
1326 GEN_LWZ (buf, rt, ra, si))
1327 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
1328 GEN_STD (buf, rt, ra, si) : \
1329 GEN_STW (buf, rt, ra, si))
1330 #else
1331 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
1332 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
1333 #endif
1334
1335 /* Generate a sequence of instructions to load IMM in the register REG.
1336 Write the instructions in BUF and return the number of bytes written. */
1337
1338 static int
1339 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
1340 {
1341 uint32_t *p = buf;
1342
1343 if ((imm + 32768) < 65536)
1344 {
1345 /* li reg, imm[15:0] */
1346 p += GEN_LI (p, reg, imm);
1347 }
1348 else if ((imm >> 32) == 0)
1349 {
1350 /* lis reg, imm[31:16]
1351 ori reg, reg, imm[15:0]
1352 rldicl reg, reg, 0, 32 */
1353 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1354 if ((imm & 0xffff) != 0)
1355 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1356 /* Clear upper 32-bit if sign-bit is set. */
1357 if (imm & (1u << 31) && is_64)
1358 p += GEN_RLDICL (p, reg, reg, 0, 32);
1359 }
1360 else
1361 {
1362 gdb_assert (is_64);
1363 /* lis reg, <imm[63:48]>
1364 ori reg, reg, <imm[48:32]>
1365 rldicr reg, reg, 32, 31
1366 oris reg, reg, <imm[31:16]>
1367 ori reg, reg, <imm[15:0]> */
1368 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1369 if (((imm >> 32) & 0xffff) != 0)
1370 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1371 p += GEN_RLDICR (p, reg, reg, 32, 31);
1372 if (((imm >> 16) & 0xffff) != 0)
1373 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1374 if ((imm & 0xffff) != 0)
1375 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1376 }
1377
1378 return p - buf;
1379 }
1380
1381 /* Generate a sequence for atomically exchange at location LOCK.
1382 This code sequence clobbers r6, r7, r8. LOCK is the location for
1383 the atomic-xchg, OLD_VALUE is expected old value stored in the
1384 location, and R_NEW is a register for the new value. */
1385
1386 static int
1387 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1388 int is_64)
1389 {
1390 const int r_lock = 6;
1391 const int r_old = 7;
1392 const int r_tmp = 8;
1393 uint32_t *p = buf;
1394
1395 /*
1396 1: lwarx TMP, 0, LOCK
1397 cmpwi TMP, OLD
1398 bne 1b
1399 stwcx. NEW, 0, LOCK
1400 bne 1b */
1401
1402 p += gen_limm (p, r_lock, lock, is_64);
1403 p += gen_limm (p, r_old, old_value, is_64);
1404
1405 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1406 p += GEN_CMPW (p, r_tmp, r_old);
1407 p += GEN_BNE (p, -8);
1408 p += GEN_STWCX (p, r_new, 0, r_lock);
1409 p += GEN_BNE (p, -16);
1410
1411 return p - buf;
1412 }
1413
1414 /* Generate a sequence of instructions for calling a function
1415 at address of FN. Return the number of bytes are written in BUF. */
1416
1417 static int
1418 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1419 {
1420 uint32_t *p = buf;
1421
1422 /* Must be called by r12 for caller to calculate TOC address. */
1423 p += gen_limm (p, 12, fn, is_64);
1424 if (is_opd)
1425 {
1426 p += GEN_LOAD (p, 11, 12, 16, is_64);
1427 p += GEN_LOAD (p, 2, 12, 8, is_64);
1428 p += GEN_LOAD (p, 12, 12, 0, is_64);
1429 }
1430 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1431 *p++ = 0x4e800421; /* bctrl */
1432
1433 return p - buf;
1434 }
1435
1436 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1437 of instruction. This function is used to adjust pc-relative instructions
1438 when copying. */
1439
1440 static void
1441 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1442 {
1443 uint32_t insn, op6;
1444 long rel, newrel;
1445
1446 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1447 op6 = PPC_OP6 (insn);
1448
1449 if (op6 == 18 && (insn & 2) == 0)
1450 {
1451 /* branch && AA = 0 */
1452 rel = PPC_LI (insn);
1453 newrel = (oldloc - *to) + rel;
1454
1455 /* Out of range. Cannot relocate instruction. */
1456 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1457 return;
1458
1459 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1460 }
1461 else if (op6 == 16 && (insn & 2) == 0)
1462 {
1463 /* conditional branch && AA = 0 */
1464
1465 /* If the new relocation is too big for even a 26-bit unconditional
1466 branch, there is nothing we can do. Just abort.
1467
1468 Otherwise, if it can be fit in 16-bit conditional branch, just
1469 copy the instruction and relocate the address.
1470
1471 If the it's big for conditional-branch (16-bit), try to invert the
1472 condition and jump with 26-bit branch. For example,
1473
1474 beq .Lgoto
1475 INSN1
1476
1477 =>
1478
1479 bne 1f (+8)
1480 b .Lgoto
1481 1:INSN1
1482
1483 After this transform, we are actually jump from *TO+4 instead of *TO,
1484 so check the relocation again because it will be 1-insn farther then
1485 before if *TO is after OLDLOC.
1486
1487
1488 For BDNZT (or so) is transformed from
1489
1490 bdnzt eq, .Lgoto
1491 INSN1
1492
1493 =>
1494
1495 bdz 1f (+12)
1496 bf eq, 1f (+8)
1497 b .Lgoto
1498 1:INSN1
1499
1500 See also "BO field encodings". */
1501
1502 rel = PPC_BD (insn);
1503 newrel = (oldloc - *to) + rel;
1504
1505 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1506 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1507 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1508 {
1509 newrel -= 4;
1510
1511 /* Out of range. Cannot relocate instruction. */
1512 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1513 return;
1514
1515 if ((PPC_BO (insn) & 0x14) == 0x4)
1516 insn ^= (1 << 24);
1517 else if ((PPC_BO (insn) & 0x14) == 0x10)
1518 insn ^= (1 << 22);
1519
1520 /* Jump over the unconditional branch. */
1521 insn = (insn & ~0xfffc) | 0x8;
1522 target_write_memory (*to, (unsigned char *) &insn, 4);
1523 *to += 4;
1524
1525 /* Build a unconditional branch and copy LK bit. */
1526 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1527 target_write_memory (*to, (unsigned char *) &insn, 4);
1528 *to += 4;
1529
1530 return;
1531 }
1532 else if ((PPC_BO (insn) & 0x14) == 0)
1533 {
1534 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1535 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1536
1537 newrel -= 8;
1538
1539 /* Out of range. Cannot relocate instruction. */
1540 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1541 return;
1542
1543 /* Copy BI field. */
1544 bf_insn |= (insn & 0x1f0000);
1545
1546 /* Invert condition. */
1547 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1548 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1549
1550 target_write_memory (*to, (unsigned char *) &bdnz_insn, 4);
1551 *to += 4;
1552 target_write_memory (*to, (unsigned char *) &bf_insn, 4);
1553 *to += 4;
1554
1555 /* Build a unconditional branch and copy LK bit. */
1556 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1557 target_write_memory (*to, (unsigned char *) &insn, 4);
1558 *to += 4;
1559
1560 return;
1561 }
1562 else /* (BO & 0x14) == 0x14, branch always. */
1563 {
1564 /* Out of range. Cannot relocate instruction. */
1565 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1566 return;
1567
1568 /* Build a unconditional branch and copy LK bit. */
1569 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1570 target_write_memory (*to, (unsigned char *) &insn, 4);
1571 *to += 4;
1572
1573 return;
1574 }
1575 }
1576
1577 target_write_memory (*to, (unsigned char *) &insn, 4);
1578 *to += 4;
1579 }
1580
1581 bool
1582 ppc_target::supports_fast_tracepoints ()
1583 {
1584 return true;
1585 }
1586
1587 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1588 See target.h for details. */
1589
1590 int
1591 ppc_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1592 CORE_ADDR tpaddr,
1593 CORE_ADDR collector,
1594 CORE_ADDR lockaddr,
1595 ULONGEST orig_size,
1596 CORE_ADDR *jump_entry,
1597 CORE_ADDR *trampoline,
1598 ULONGEST *trampoline_size,
1599 unsigned char *jjump_pad_insn,
1600 ULONGEST *jjump_pad_insn_size,
1601 CORE_ADDR *adjusted_insn_addr,
1602 CORE_ADDR *adjusted_insn_addr_end,
1603 char *err)
1604 {
1605 uint32_t buf[256];
1606 uint32_t *p = buf;
1607 int j, offset;
1608 CORE_ADDR buildaddr = *jump_entry;
1609 const CORE_ADDR entryaddr = *jump_entry;
1610 int rsz, min_frame, frame_size, tp_reg;
1611 #ifdef __powerpc64__
1612 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1613 int is_64 = register_size (regcache->tdesc, 0) == 8;
1614 int is_opd = is_64 && !is_elfv2_inferior ();
1615 #else
1616 int is_64 = 0, is_opd = 0;
1617 #endif
1618
1619 #ifdef __powerpc64__
1620 if (is_64)
1621 {
1622 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1623 rsz = 8;
1624 min_frame = 112;
1625 frame_size = (40 * rsz) + min_frame;
1626 tp_reg = 13;
1627 }
1628 else
1629 {
1630 #endif
1631 rsz = 4;
1632 min_frame = 16;
1633 frame_size = (40 * rsz) + min_frame;
1634 tp_reg = 2;
1635 #ifdef __powerpc64__
1636 }
1637 #endif
1638
1639 /* Stack frame layout for this jump pad,
1640
1641 High thread_area (r13/r2) |
1642 tpoint - collecting_t obj
1643 PC/<tpaddr> | +36
1644 CTR | +35
1645 LR | +34
1646 XER | +33
1647 CR | +32
1648 R31 |
1649 R29 |
1650 ... |
1651 R1 | +1
1652 R0 - collected registers
1653 ... |
1654 ... |
1655 Low Back-chain -
1656
1657
1658 The code flow of this jump pad,
1659
1660 1. Adjust SP
1661 2. Save GPR and SPR
1662 3. Prepare argument
1663 4. Call gdb_collector
1664 5. Restore GPR and SPR
1665 6. Restore SP
1666 7. Build a jump for back to the program
1667 8. Copy/relocate original instruction
1668 9. Build a jump for replacing original instruction. */
1669
1670 /* Adjust stack pointer. */
1671 if (is_64)
1672 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1673 else
1674 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1675
1676 /* Store GPRs. Save R1 later, because it had just been modified, but
1677 we want the original value. */
1678 for (j = 2; j < 32; j++)
1679 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1680 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1681 /* Set r0 to the original value of r1 before adjusting stack frame,
1682 and then save it. */
1683 p += GEN_ADDI (p, 0, 1, frame_size);
1684 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1685
1686 /* Save CR, XER, LR, and CTR. */
1687 p += GEN_MFCR (p, 3); /* mfcr r3 */
1688 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1689 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1690 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1691 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1692 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1693 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1694 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1695
1696 /* Save PC<tpaddr> */
1697 p += gen_limm (p, 3, tpaddr, is_64);
1698 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1699
1700
1701 /* Setup arguments to collector. */
1702 /* Set r4 to collected registers. */
1703 p += GEN_ADDI (p, 4, 1, min_frame);
1704 /* Set r3 to TPOINT. */
1705 p += gen_limm (p, 3, tpoint, is_64);
1706
1707 /* Prepare collecting_t object for lock. */
1708 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1709 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1710 /* Set R5 to collecting object. */
1711 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1712
1713 p += GEN_LWSYNC (p);
1714 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1715 p += GEN_LWSYNC (p);
1716
1717 /* Call to collector. */
1718 p += gen_call (p, collector, is_64, is_opd);
1719
1720 /* Simply write 0 to release the lock. */
1721 p += gen_limm (p, 3, lockaddr, is_64);
1722 p += gen_limm (p, 4, 0, is_64);
1723 p += GEN_LWSYNC (p);
1724 p += GEN_STORE (p, 4, 3, 0, is_64);
1725
1726 /* Restore stack and registers. */
1727 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1728 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1729 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1730 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1731 p += GEN_MTCR (p, 3); /* mtcr r3 */
1732 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1733 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1734 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1735
1736 /* Restore GPRs. */
1737 for (j = 2; j < 32; j++)
1738 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1739 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1740 /* Restore SP. */
1741 p += GEN_ADDI (p, 1, 1, frame_size);
1742
1743 /* Flush instructions to inferior memory. */
1744 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1745
1746 /* Now, insert the original instruction to execute in the jump pad. */
1747 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1748 *adjusted_insn_addr_end = *adjusted_insn_addr;
1749 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1750
1751 /* Verify the relocation size. If should be 4 for normal copy,
1752 8 or 12 for some conditional branch. */
1753 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1754 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1755 {
1756 sprintf (err, "E.Unexpected instruction length = %d"
1757 "when relocate instruction.",
1758 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1759 return 1;
1760 }
1761
1762 buildaddr = *adjusted_insn_addr_end;
1763 p = buf;
1764 /* Finally, write a jump back to the program. */
1765 offset = (tpaddr + 4) - buildaddr;
1766 if (offset >= (1 << 25) || offset < -(1 << 25))
1767 {
1768 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1769 "(offset 0x%x > 26-bit).", offset);
1770 return 1;
1771 }
1772 /* b <tpaddr+4> */
1773 p += GEN_B (p, offset);
1774 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1775 *jump_entry = buildaddr + (p - buf) * 4;
1776
1777 /* The jump pad is now built. Wire in a jump to our jump pad. This
1778 is always done last (by our caller actually), so that we can
1779 install fast tracepoints with threads running. This relies on
1780 the agent's atomic write support. */
1781 offset = entryaddr - tpaddr;
1782 if (offset >= (1 << 25) || offset < -(1 << 25))
1783 {
1784 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1785 "(offset 0x%x > 26-bit).", offset);
1786 return 1;
1787 }
1788 /* b <jentry> */
1789 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1790 *jjump_pad_insn_size = 4;
1791
1792 return 0;
1793 }
1794
1795 /* Returns the minimum instruction length for installing a tracepoint. */
1796
1797 int
1798 ppc_target::get_min_fast_tracepoint_insn_len ()
1799 {
1800 return 4;
1801 }
1802
1803 /* Emits a given buffer into the target at current_insn_ptr. Length
1804 is in units of 32-bit words. */
1805
1806 static void
1807 emit_insns (uint32_t *buf, int n)
1808 {
1809 n = n * sizeof (uint32_t);
1810 target_write_memory (current_insn_ptr, (unsigned char *) buf, n);
1811 current_insn_ptr += n;
1812 }
1813
1814 #define __EMIT_ASM(NAME, INSNS) \
1815 do \
1816 { \
1817 extern uint32_t start_bcax_ ## NAME []; \
1818 extern uint32_t end_bcax_ ## NAME []; \
1819 emit_insns (start_bcax_ ## NAME, \
1820 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1821 __asm__ (".section .text.__ppcbcax\n\t" \
1822 "start_bcax_" #NAME ":\n\t" \
1823 INSNS "\n\t" \
1824 "end_bcax_" #NAME ":\n\t" \
1825 ".previous\n\t"); \
1826 } while (0)
1827
1828 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1829 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1830
1831 /*
1832
1833 Bytecode execution stack frame - 32-bit
1834
1835 | LR save area (SP + 4)
1836 SP' -> +- Back chain (SP + 0)
1837 | Save r31 for access saved arguments
1838 | Save r30 for bytecode stack pointer
1839 | Save r4 for incoming argument *value
1840 | Save r3 for incoming argument regs
1841 r30 -> +- Bytecode execution stack
1842 |
1843 | 64-byte (8 doublewords) at initial.
1844 | Expand stack as needed.
1845 |
1846 +-
1847 | Some padding for minimum stack frame and 16-byte alignment.
1848 | 16 bytes.
1849 SP +- Back-chain (SP')
1850
1851 initial frame size
1852 = 16 + (4 * 4) + 64
1853 = 96
1854
1855 r30 is the stack-pointer for bytecode machine.
1856 It should point to next-empty, so we can use LDU for pop.
1857 r3 is used for cache of the high part of TOP value.
1858 It was the first argument, pointer to regs.
1859 r4 is used for cache of the low part of TOP value.
1860 It was the second argument, pointer to the result.
1861 We should set *result = TOP after leaving this function.
1862
1863 Note:
1864 * To restore stack at epilogue
1865 => sp = r31
1866 * To check stack is big enough for bytecode execution.
1867 => r30 - 8 > SP + 8
1868 * To return execution result.
1869 => 0(r4) = TOP
1870
1871 */
1872
1873 /* Regardless of endian, register 3 is always high part, 4 is low part.
1874 These defines are used when the register pair is stored/loaded.
1875 Likewise, to simplify code, have a similiar define for 5:6. */
1876
1877 #if __BYTE_ORDER == __LITTLE_ENDIAN
1878 #define TOP_FIRST "4"
1879 #define TOP_SECOND "3"
1880 #define TMP_FIRST "6"
1881 #define TMP_SECOND "5"
1882 #else
1883 #define TOP_FIRST "3"
1884 #define TOP_SECOND "4"
1885 #define TMP_FIRST "5"
1886 #define TMP_SECOND "6"
1887 #endif
1888
1889 /* Emit prologue in inferior memory. See above comments. */
1890
1891 static void
1892 ppc_emit_prologue (void)
1893 {
1894 EMIT_ASM (/* Save return address. */
1895 "mflr 0 \n"
1896 "stw 0, 4(1) \n"
1897 /* Adjust SP. 96 is the initial frame size. */
1898 "stwu 1, -96(1) \n"
1899 /* Save r30 and incoming arguments. */
1900 "stw 31, 96-4(1) \n"
1901 "stw 30, 96-8(1) \n"
1902 "stw 4, 96-12(1) \n"
1903 "stw 3, 96-16(1) \n"
1904 /* Point r31 to original r1 for access arguments. */
1905 "addi 31, 1, 96 \n"
1906 /* Set r30 to pointing stack-top. */
1907 "addi 30, 1, 64 \n"
1908 /* Initial r3/TOP to 0. */
1909 "li 3, 0 \n"
1910 "li 4, 0 \n");
1911 }
1912
1913 /* Emit epilogue in inferior memory. See above comments. */
1914
1915 static void
1916 ppc_emit_epilogue (void)
1917 {
1918 EMIT_ASM (/* *result = TOP */
1919 "lwz 5, -12(31) \n"
1920 "stw " TOP_FIRST ", 0(5) \n"
1921 "stw " TOP_SECOND ", 4(5) \n"
1922 /* Restore registers. */
1923 "lwz 31, -4(31) \n"
1924 "lwz 30, -8(31) \n"
1925 /* Restore SP. */
1926 "lwz 1, 0(1) \n"
1927 /* Restore LR. */
1928 "lwz 0, 4(1) \n"
1929 /* Return 0 for no-error. */
1930 "li 3, 0 \n"
1931 "mtlr 0 \n"
1932 "blr \n");
1933 }
1934
1935 /* TOP = stack[--sp] + TOP */
1936
1937 static void
1938 ppc_emit_add (void)
1939 {
1940 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1941 "lwz " TMP_SECOND ", 4(30)\n"
1942 "addc 4, 6, 4 \n"
1943 "adde 3, 5, 3 \n");
1944 }
1945
1946 /* TOP = stack[--sp] - TOP */
1947
1948 static void
1949 ppc_emit_sub (void)
1950 {
1951 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1952 "lwz " TMP_SECOND ", 4(30) \n"
1953 "subfc 4, 4, 6 \n"
1954 "subfe 3, 3, 5 \n");
1955 }
1956
1957 /* TOP = stack[--sp] * TOP */
1958
1959 static void
1960 ppc_emit_mul (void)
1961 {
1962 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1963 "lwz " TMP_SECOND ", 4(30) \n"
1964 "mulhwu 7, 6, 4 \n"
1965 "mullw 3, 6, 3 \n"
1966 "mullw 5, 4, 5 \n"
1967 "mullw 4, 6, 4 \n"
1968 "add 3, 5, 3 \n"
1969 "add 3, 7, 3 \n");
1970 }
1971
1972 /* TOP = stack[--sp] << TOP */
1973
1974 static void
1975 ppc_emit_lsh (void)
1976 {
1977 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1978 "lwz " TMP_SECOND ", 4(30) \n"
1979 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1980 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1981 "slw 5, 5, 4\n" /* Shift high part left */
1982 "slw 4, 6, 4\n" /* Shift low part left */
1983 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1984 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1985 "or 3, 5, 3\n"
1986 "or 3, 7, 3\n"); /* Assemble high part */
1987 }
1988
1989 /* Top = stack[--sp] >> TOP
1990 (Arithmetic shift right) */
1991
1992 static void
1993 ppc_emit_rsh_signed (void)
1994 {
1995 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1996 "lwz " TMP_SECOND ", 4(30) \n"
1997 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1998 "sraw 3, 5, 4\n" /* Shift high part right */
1999 "cmpwi 7, 1\n"
2000 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
2001 "sraw 4, 5, 7\n" /* Shift high to low */
2002 "b 2f\n"
2003 "1:\n"
2004 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
2005 "srw 4, 6, 4\n" /* Shift low part right */
2006 "slw 5, 5, 7\n" /* Shift high to low */
2007 "or 4, 4, 5\n" /* Assemble low part */
2008 "2:\n");
2009 }
2010
2011 /* Top = stack[--sp] >> TOP
2012 (Logical shift right) */
2013
2014 static void
2015 ppc_emit_rsh_unsigned (void)
2016 {
2017 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2018 "lwz " TMP_SECOND ", 4(30) \n"
2019 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
2020 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
2021 "srw 6, 6, 4\n" /* Shift low part right */
2022 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
2023 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
2024 "or 6, 6, 3\n"
2025 "srw 3, 5, 4\n" /* Shift high part right */
2026 "or 4, 6, 7\n"); /* Assemble low part */
2027 }
2028
2029 /* Emit code for signed-extension specified by ARG. */
2030
2031 static void
2032 ppc_emit_ext (int arg)
2033 {
2034 switch (arg)
2035 {
2036 case 8:
2037 EMIT_ASM ("extsb 4, 4\n"
2038 "srawi 3, 4, 31");
2039 break;
2040 case 16:
2041 EMIT_ASM ("extsh 4, 4\n"
2042 "srawi 3, 4, 31");
2043 break;
2044 case 32:
2045 EMIT_ASM ("srawi 3, 4, 31");
2046 break;
2047 default:
2048 emit_error = 1;
2049 }
2050 }
2051
2052 /* Emit code for zero-extension specified by ARG. */
2053
2054 static void
2055 ppc_emit_zero_ext (int arg)
2056 {
2057 switch (arg)
2058 {
2059 case 8:
2060 EMIT_ASM ("clrlwi 4,4,24\n"
2061 "li 3, 0\n");
2062 break;
2063 case 16:
2064 EMIT_ASM ("clrlwi 4,4,16\n"
2065 "li 3, 0\n");
2066 break;
2067 case 32:
2068 EMIT_ASM ("li 3, 0");
2069 break;
2070 default:
2071 emit_error = 1;
2072 }
2073 }
2074
2075 /* TOP = !TOP
2076 i.e., TOP = (TOP == 0) ? 1 : 0; */
2077
2078 static void
2079 ppc_emit_log_not (void)
2080 {
2081 EMIT_ASM ("or 4, 3, 4 \n"
2082 "cntlzw 4, 4 \n"
2083 "srwi 4, 4, 5 \n"
2084 "li 3, 0 \n");
2085 }
2086
2087 /* TOP = stack[--sp] & TOP */
2088
2089 static void
2090 ppc_emit_bit_and (void)
2091 {
2092 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2093 "lwz " TMP_SECOND ", 4(30) \n"
2094 "and 4, 6, 4 \n"
2095 "and 3, 5, 3 \n");
2096 }
2097
2098 /* TOP = stack[--sp] | TOP */
2099
2100 static void
2101 ppc_emit_bit_or (void)
2102 {
2103 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2104 "lwz " TMP_SECOND ", 4(30) \n"
2105 "or 4, 6, 4 \n"
2106 "or 3, 5, 3 \n");
2107 }
2108
2109 /* TOP = stack[--sp] ^ TOP */
2110
2111 static void
2112 ppc_emit_bit_xor (void)
2113 {
2114 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2115 "lwz " TMP_SECOND ", 4(30) \n"
2116 "xor 4, 6, 4 \n"
2117 "xor 3, 5, 3 \n");
2118 }
2119
2120 /* TOP = ~TOP
2121 i.e., TOP = ~(TOP | TOP) */
2122
2123 static void
2124 ppc_emit_bit_not (void)
2125 {
2126 EMIT_ASM ("nor 3, 3, 3 \n"
2127 "nor 4, 4, 4 \n");
2128 }
2129
2130 /* TOP = stack[--sp] == TOP */
2131
2132 static void
2133 ppc_emit_equal (void)
2134 {
2135 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2136 "lwz " TMP_SECOND ", 4(30) \n"
2137 "xor 4, 6, 4 \n"
2138 "xor 3, 5, 3 \n"
2139 "or 4, 3, 4 \n"
2140 "cntlzw 4, 4 \n"
2141 "srwi 4, 4, 5 \n"
2142 "li 3, 0 \n");
2143 }
2144
2145 /* TOP = stack[--sp] < TOP
2146 (Signed comparison) */
2147
2148 static void
2149 ppc_emit_less_signed (void)
2150 {
2151 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2152 "lwz " TMP_SECOND ", 4(30) \n"
2153 "cmplw 6, 6, 4 \n"
2154 "cmpw 7, 5, 3 \n"
2155 /* CR6 bit 0 = low less and high equal */
2156 "crand 6*4+0, 6*4+0, 7*4+2\n"
2157 /* CR7 bit 0 = (low less and high equal) or high less */
2158 "cror 7*4+0, 7*4+0, 6*4+0\n"
2159 "mfcr 4 \n"
2160 "rlwinm 4, 4, 29, 31, 31 \n"
2161 "li 3, 0 \n");
2162 }
2163
2164 /* TOP = stack[--sp] < TOP
2165 (Unsigned comparison) */
2166
2167 static void
2168 ppc_emit_less_unsigned (void)
2169 {
2170 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2171 "lwz " TMP_SECOND ", 4(30) \n"
2172 "cmplw 6, 6, 4 \n"
2173 "cmplw 7, 5, 3 \n"
2174 /* CR6 bit 0 = low less and high equal */
2175 "crand 6*4+0, 6*4+0, 7*4+2\n"
2176 /* CR7 bit 0 = (low less and high equal) or high less */
2177 "cror 7*4+0, 7*4+0, 6*4+0\n"
2178 "mfcr 4 \n"
2179 "rlwinm 4, 4, 29, 31, 31 \n"
2180 "li 3, 0 \n");
2181 }
2182
2183 /* Access the memory address in TOP in size of SIZE.
2184 Zero-extend the read value. */
2185
2186 static void
2187 ppc_emit_ref (int size)
2188 {
2189 switch (size)
2190 {
2191 case 1:
2192 EMIT_ASM ("lbz 4, 0(4)\n"
2193 "li 3, 0");
2194 break;
2195 case 2:
2196 EMIT_ASM ("lhz 4, 0(4)\n"
2197 "li 3, 0");
2198 break;
2199 case 4:
2200 EMIT_ASM ("lwz 4, 0(4)\n"
2201 "li 3, 0");
2202 break;
2203 case 8:
2204 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2205 EMIT_ASM ("lwz 3, 4(4)\n"
2206 "lwz 4, 0(4)");
2207 else
2208 EMIT_ASM ("lwz 3, 0(4)\n"
2209 "lwz 4, 4(4)");
2210 break;
2211 }
2212 }
2213
2214 /* TOP = NUM */
2215
2216 static void
2217 ppc_emit_const (LONGEST num)
2218 {
2219 uint32_t buf[10];
2220 uint32_t *p = buf;
2221
2222 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
2223 p += gen_limm (p, 4, num & 0xffffffff, 0);
2224
2225 emit_insns (buf, p - buf);
2226 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2227 }
2228
2229 /* Set TOP to the value of register REG by calling get_raw_reg function
2230 with two argument, collected buffer and register number. */
2231
2232 static void
2233 ppc_emit_reg (int reg)
2234 {
2235 uint32_t buf[13];
2236 uint32_t *p = buf;
2237
2238 /* fctx->regs is passed in r3 and then saved in -16(31). */
2239 p += GEN_LWZ (p, 3, 31, -16);
2240 p += GEN_LI (p, 4, reg); /* li r4, reg */
2241 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
2242
2243 emit_insns (buf, p - buf);
2244 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2245
2246 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2247 {
2248 EMIT_ASM ("mr 5, 4\n"
2249 "mr 4, 3\n"
2250 "mr 3, 5\n");
2251 }
2252 }
2253
2254 /* TOP = stack[--sp] */
2255
2256 static void
2257 ppc_emit_pop (void)
2258 {
2259 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
2260 "lwz " TOP_SECOND ", 4(30) \n");
2261 }
2262
2263 /* stack[sp++] = TOP
2264
2265 Because we may use up bytecode stack, expand 8 doublewords more
2266 if needed. */
2267
2268 static void
2269 ppc_emit_stack_flush (void)
2270 {
2271 /* Make sure bytecode stack is big enough before push.
2272 Otherwise, expand 64-byte more. */
2273
2274 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
2275 " stw " TOP_SECOND ", 4(30)\n"
2276 " addi 5, 30, -(8 + 8) \n"
2277 " cmpw 7, 5, 1 \n"
2278 " bgt 7, 1f \n"
2279 " stwu 31, -64(1) \n"
2280 "1:addi 30, 30, -8 \n");
2281 }
2282
2283 /* Swap TOP and stack[sp-1] */
2284
2285 static void
2286 ppc_emit_swap (void)
2287 {
2288 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
2289 "lwz " TMP_SECOND ", 12(30) \n"
2290 "stw " TOP_FIRST ", 8(30) \n"
2291 "stw " TOP_SECOND ", 12(30) \n"
2292 "mr 3, 5 \n"
2293 "mr 4, 6 \n");
2294 }
2295
2296 /* Discard N elements in the stack. Also used for ppc64. */
2297
2298 static void
2299 ppc_emit_stack_adjust (int n)
2300 {
2301 uint32_t buf[6];
2302 uint32_t *p = buf;
2303
2304 n = n << 3;
2305 if ((n >> 15) != 0)
2306 {
2307 emit_error = 1;
2308 return;
2309 }
2310
2311 p += GEN_ADDI (p, 30, 30, n);
2312
2313 emit_insns (buf, p - buf);
2314 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2315 }
2316
2317 /* Call function FN. */
2318
2319 static void
2320 ppc_emit_call (CORE_ADDR fn)
2321 {
2322 uint32_t buf[11];
2323 uint32_t *p = buf;
2324
2325 p += gen_call (p, fn, 0, 0);
2326
2327 emit_insns (buf, p - buf);
2328 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2329 }
2330
2331 /* FN's prototype is `LONGEST(*fn)(int)'.
2332 TOP = fn (arg1)
2333 */
2334
2335 static void
2336 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
2337 {
2338 uint32_t buf[15];
2339 uint32_t *p = buf;
2340
2341 /* Setup argument. arg1 is a 16-bit value. */
2342 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2343 p += gen_call (p, fn, 0, 0);
2344
2345 emit_insns (buf, p - buf);
2346 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2347
2348 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2349 {
2350 EMIT_ASM ("mr 5, 4\n"
2351 "mr 4, 3\n"
2352 "mr 3, 5\n");
2353 }
2354 }
2355
2356 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2357 fn (arg1, TOP)
2358
2359 TOP should be preserved/restored before/after the call. */
2360
2361 static void
2362 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2363 {
2364 uint32_t buf[21];
2365 uint32_t *p = buf;
2366
2367 /* Save TOP. 0(30) is next-empty. */
2368 p += GEN_STW (p, 3, 30, 0);
2369 p += GEN_STW (p, 4, 30, 4);
2370
2371 /* Setup argument. arg1 is a 16-bit value. */
2372 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2373 {
2374 p += GEN_MR (p, 5, 4);
2375 p += GEN_MR (p, 6, 3);
2376 }
2377 else
2378 {
2379 p += GEN_MR (p, 5, 3);
2380 p += GEN_MR (p, 6, 4);
2381 }
2382 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2383 p += gen_call (p, fn, 0, 0);
2384
2385 /* Restore TOP */
2386 p += GEN_LWZ (p, 3, 30, 0);
2387 p += GEN_LWZ (p, 4, 30, 4);
2388
2389 emit_insns (buf, p - buf);
2390 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2391 }
2392
2393 /* Note in the following goto ops:
2394
2395 When emitting goto, the target address is later relocated by
2396 write_goto_address. OFFSET_P is the offset of the branch instruction
2397 in the code sequence, and SIZE_P is how to relocate the instruction,
2398 recognized by ppc_write_goto_address. In current implementation,
2399 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2400 */
2401
2402 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2403
2404 static void
2405 ppc_emit_if_goto (int *offset_p, int *size_p)
2406 {
2407 EMIT_ASM ("or. 3, 3, 4 \n"
2408 "lwzu " TOP_FIRST ", 8(30) \n"
2409 "lwz " TOP_SECOND ", 4(30) \n"
2410 "1:bne 0, 1b \n");
2411
2412 if (offset_p)
2413 *offset_p = 12;
2414 if (size_p)
2415 *size_p = 14;
2416 }
2417
2418 /* Unconditional goto. Also used for ppc64. */
2419
2420 static void
2421 ppc_emit_goto (int *offset_p, int *size_p)
2422 {
2423 EMIT_ASM ("1:b 1b");
2424
2425 if (offset_p)
2426 *offset_p = 0;
2427 if (size_p)
2428 *size_p = 24;
2429 }
2430
2431 /* Goto if stack[--sp] == TOP */
2432
2433 static void
2434 ppc_emit_eq_goto (int *offset_p, int *size_p)
2435 {
2436 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2437 "lwz " TMP_SECOND ", 4(30) \n"
2438 "xor 4, 6, 4 \n"
2439 "xor 3, 5, 3 \n"
2440 "or. 3, 3, 4 \n"
2441 "lwzu " TOP_FIRST ", 8(30) \n"
2442 "lwz " TOP_SECOND ", 4(30) \n"
2443 "1:beq 0, 1b \n");
2444
2445 if (offset_p)
2446 *offset_p = 28;
2447 if (size_p)
2448 *size_p = 14;
2449 }
2450
2451 /* Goto if stack[--sp] != TOP */
2452
2453 static void
2454 ppc_emit_ne_goto (int *offset_p, int *size_p)
2455 {
2456 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2457 "lwz " TMP_SECOND ", 4(30) \n"
2458 "xor 4, 6, 4 \n"
2459 "xor 3, 5, 3 \n"
2460 "or. 3, 3, 4 \n"
2461 "lwzu " TOP_FIRST ", 8(30) \n"
2462 "lwz " TOP_SECOND ", 4(30) \n"
2463 "1:bne 0, 1b \n");
2464
2465 if (offset_p)
2466 *offset_p = 28;
2467 if (size_p)
2468 *size_p = 14;
2469 }
2470
2471 /* Goto if stack[--sp] < TOP */
2472
2473 static void
2474 ppc_emit_lt_goto (int *offset_p, int *size_p)
2475 {
2476 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2477 "lwz " TMP_SECOND ", 4(30) \n"
2478 "cmplw 6, 6, 4 \n"
2479 "cmpw 7, 5, 3 \n"
2480 /* CR6 bit 0 = low less and high equal */
2481 "crand 6*4+0, 6*4+0, 7*4+2\n"
2482 /* CR7 bit 0 = (low less and high equal) or high less */
2483 "cror 7*4+0, 7*4+0, 6*4+0\n"
2484 "lwzu " TOP_FIRST ", 8(30) \n"
2485 "lwz " TOP_SECOND ", 4(30)\n"
2486 "1:blt 7, 1b \n");
2487
2488 if (offset_p)
2489 *offset_p = 32;
2490 if (size_p)
2491 *size_p = 14;
2492 }
2493
2494 /* Goto if stack[--sp] <= TOP */
2495
2496 static void
2497 ppc_emit_le_goto (int *offset_p, int *size_p)
2498 {
2499 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2500 "lwz " TMP_SECOND ", 4(30) \n"
2501 "cmplw 6, 6, 4 \n"
2502 "cmpw 7, 5, 3 \n"
2503 /* CR6 bit 0 = low less/equal and high equal */
2504 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2505 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2506 "cror 7*4+0, 7*4+0, 6*4+0\n"
2507 "lwzu " TOP_FIRST ", 8(30) \n"
2508 "lwz " TOP_SECOND ", 4(30)\n"
2509 "1:blt 7, 1b \n");
2510
2511 if (offset_p)
2512 *offset_p = 32;
2513 if (size_p)
2514 *size_p = 14;
2515 }
2516
2517 /* Goto if stack[--sp] > TOP */
2518
2519 static void
2520 ppc_emit_gt_goto (int *offset_p, int *size_p)
2521 {
2522 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2523 "lwz " TMP_SECOND ", 4(30) \n"
2524 "cmplw 6, 6, 4 \n"
2525 "cmpw 7, 5, 3 \n"
2526 /* CR6 bit 0 = low greater and high equal */
2527 "crand 6*4+0, 6*4+1, 7*4+2\n"
2528 /* CR7 bit 0 = (low greater and high equal) or high greater */
2529 "cror 7*4+0, 7*4+1, 6*4+0\n"
2530 "lwzu " TOP_FIRST ", 8(30) \n"
2531 "lwz " TOP_SECOND ", 4(30)\n"
2532 "1:blt 7, 1b \n");
2533
2534 if (offset_p)
2535 *offset_p = 32;
2536 if (size_p)
2537 *size_p = 14;
2538 }
2539
2540 /* Goto if stack[--sp] >= TOP */
2541
2542 static void
2543 ppc_emit_ge_goto (int *offset_p, int *size_p)
2544 {
2545 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2546 "lwz " TMP_SECOND ", 4(30) \n"
2547 "cmplw 6, 6, 4 \n"
2548 "cmpw 7, 5, 3 \n"
2549 /* CR6 bit 0 = low ge and high equal */
2550 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2551 /* CR7 bit 0 = (low ge and high equal) or high greater */
2552 "cror 7*4+0, 7*4+1, 6*4+0\n"
2553 "lwzu " TOP_FIRST ", 8(30)\n"
2554 "lwz " TOP_SECOND ", 4(30)\n"
2555 "1:blt 7, 1b \n");
2556
2557 if (offset_p)
2558 *offset_p = 32;
2559 if (size_p)
2560 *size_p = 14;
2561 }
2562
2563 /* Relocate previous emitted branch instruction. FROM is the address
2564 of the branch instruction, TO is the goto target address, and SIZE
2565 if the value we set by *SIZE_P before. Currently, it is either
2566 24 or 14 of branch and conditional-branch instruction.
2567 Also used for ppc64. */
2568
2569 static void
2570 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2571 {
2572 long rel = to - from;
2573 uint32_t insn;
2574 int opcd;
2575
2576 read_inferior_memory (from, (unsigned char *) &insn, 4);
2577 opcd = (insn >> 26) & 0x3f;
2578
2579 switch (size)
2580 {
2581 case 14:
2582 if (opcd != 16
2583 || (rel >= (1 << 15) || rel < -(1 << 15)))
2584 emit_error = 1;
2585 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2586 break;
2587 case 24:
2588 if (opcd != 18
2589 || (rel >= (1 << 25) || rel < -(1 << 25)))
2590 emit_error = 1;
2591 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2592 break;
2593 default:
2594 emit_error = 1;
2595 }
2596
2597 if (!emit_error)
2598 target_write_memory (from, (unsigned char *) &insn, 4);
2599 }
2600
2601 /* Table of emit ops for 32-bit. */
2602
2603 static struct emit_ops ppc_emit_ops_impl =
2604 {
2605 ppc_emit_prologue,
2606 ppc_emit_epilogue,
2607 ppc_emit_add,
2608 ppc_emit_sub,
2609 ppc_emit_mul,
2610 ppc_emit_lsh,
2611 ppc_emit_rsh_signed,
2612 ppc_emit_rsh_unsigned,
2613 ppc_emit_ext,
2614 ppc_emit_log_not,
2615 ppc_emit_bit_and,
2616 ppc_emit_bit_or,
2617 ppc_emit_bit_xor,
2618 ppc_emit_bit_not,
2619 ppc_emit_equal,
2620 ppc_emit_less_signed,
2621 ppc_emit_less_unsigned,
2622 ppc_emit_ref,
2623 ppc_emit_if_goto,
2624 ppc_emit_goto,
2625 ppc_write_goto_address,
2626 ppc_emit_const,
2627 ppc_emit_call,
2628 ppc_emit_reg,
2629 ppc_emit_pop,
2630 ppc_emit_stack_flush,
2631 ppc_emit_zero_ext,
2632 ppc_emit_swap,
2633 ppc_emit_stack_adjust,
2634 ppc_emit_int_call_1,
2635 ppc_emit_void_call_2,
2636 ppc_emit_eq_goto,
2637 ppc_emit_ne_goto,
2638 ppc_emit_lt_goto,
2639 ppc_emit_le_goto,
2640 ppc_emit_gt_goto,
2641 ppc_emit_ge_goto
2642 };
2643
2644 #ifdef __powerpc64__
2645
2646 /*
2647
2648 Bytecode execution stack frame - 64-bit
2649
2650 | LR save area (SP + 16)
2651 | CR save area (SP + 8)
2652 SP' -> +- Back chain (SP + 0)
2653 | Save r31 for access saved arguments
2654 | Save r30 for bytecode stack pointer
2655 | Save r4 for incoming argument *value
2656 | Save r3 for incoming argument regs
2657 r30 -> +- Bytecode execution stack
2658 |
2659 | 64-byte (8 doublewords) at initial.
2660 | Expand stack as needed.
2661 |
2662 +-
2663 | Some padding for minimum stack frame.
2664 | 112 for ELFv1.
2665 SP +- Back-chain (SP')
2666
2667 initial frame size
2668 = 112 + (4 * 8) + 64
2669 = 208
2670
2671 r30 is the stack-pointer for bytecode machine.
2672 It should point to next-empty, so we can use LDU for pop.
2673 r3 is used for cache of TOP value.
2674 It was the first argument, pointer to regs.
2675 r4 is the second argument, pointer to the result.
2676 We should set *result = TOP after leaving this function.
2677
2678 Note:
2679 * To restore stack at epilogue
2680 => sp = r31
2681 * To check stack is big enough for bytecode execution.
2682 => r30 - 8 > SP + 112
2683 * To return execution result.
2684 => 0(r4) = TOP
2685
2686 */
2687
2688 /* Emit prologue in inferior memory. See above comments. */
2689
2690 static void
2691 ppc64v1_emit_prologue (void)
2692 {
2693 /* On ELFv1, function pointers really point to function descriptor,
2694 so emit one here. We don't care about contents of words 1 and 2,
2695 so let them just overlap out code. */
2696 uint64_t opd = current_insn_ptr + 8;
2697 uint32_t buf[2];
2698
2699 /* Mind the strict aliasing rules. */
2700 memcpy (buf, &opd, sizeof buf);
2701 emit_insns(buf, 2);
2702 EMIT_ASM (/* Save return address. */
2703 "mflr 0 \n"
2704 "std 0, 16(1) \n"
2705 /* Save r30 and incoming arguments. */
2706 "std 31, -8(1) \n"
2707 "std 30, -16(1) \n"
2708 "std 4, -24(1) \n"
2709 "std 3, -32(1) \n"
2710 /* Point r31 to current r1 for access arguments. */
2711 "mr 31, 1 \n"
2712 /* Adjust SP. 208 is the initial frame size. */
2713 "stdu 1, -208(1) \n"
2714 /* Set r30 to pointing stack-top. */
2715 "addi 30, 1, 168 \n"
2716 /* Initial r3/TOP to 0. */
2717 "li 3, 0 \n");
2718 }
2719
2720 /* Emit prologue in inferior memory. See above comments. */
2721
2722 static void
2723 ppc64v2_emit_prologue (void)
2724 {
2725 EMIT_ASM (/* Save return address. */
2726 "mflr 0 \n"
2727 "std 0, 16(1) \n"
2728 /* Save r30 and incoming arguments. */
2729 "std 31, -8(1) \n"
2730 "std 30, -16(1) \n"
2731 "std 4, -24(1) \n"
2732 "std 3, -32(1) \n"
2733 /* Point r31 to current r1 for access arguments. */
2734 "mr 31, 1 \n"
2735 /* Adjust SP. 208 is the initial frame size. */
2736 "stdu 1, -208(1) \n"
2737 /* Set r30 to pointing stack-top. */
2738 "addi 30, 1, 168 \n"
2739 /* Initial r3/TOP to 0. */
2740 "li 3, 0 \n");
2741 }
2742
2743 /* Emit epilogue in inferior memory. See above comments. */
2744
2745 static void
2746 ppc64_emit_epilogue (void)
2747 {
2748 EMIT_ASM (/* Restore SP. */
2749 "ld 1, 0(1) \n"
2750 /* *result = TOP */
2751 "ld 4, -24(1) \n"
2752 "std 3, 0(4) \n"
2753 /* Restore registers. */
2754 "ld 31, -8(1) \n"
2755 "ld 30, -16(1) \n"
2756 /* Restore LR. */
2757 "ld 0, 16(1) \n"
2758 /* Return 0 for no-error. */
2759 "li 3, 0 \n"
2760 "mtlr 0 \n"
2761 "blr \n");
2762 }
2763
2764 /* TOP = stack[--sp] + TOP */
2765
2766 static void
2767 ppc64_emit_add (void)
2768 {
2769 EMIT_ASM ("ldu 4, 8(30) \n"
2770 "add 3, 4, 3 \n");
2771 }
2772
2773 /* TOP = stack[--sp] - TOP */
2774
2775 static void
2776 ppc64_emit_sub (void)
2777 {
2778 EMIT_ASM ("ldu 4, 8(30) \n"
2779 "sub 3, 4, 3 \n");
2780 }
2781
2782 /* TOP = stack[--sp] * TOP */
2783
2784 static void
2785 ppc64_emit_mul (void)
2786 {
2787 EMIT_ASM ("ldu 4, 8(30) \n"
2788 "mulld 3, 4, 3 \n");
2789 }
2790
2791 /* TOP = stack[--sp] << TOP */
2792
2793 static void
2794 ppc64_emit_lsh (void)
2795 {
2796 EMIT_ASM ("ldu 4, 8(30) \n"
2797 "sld 3, 4, 3 \n");
2798 }
2799
2800 /* Top = stack[--sp] >> TOP
2801 (Arithmetic shift right) */
2802
2803 static void
2804 ppc64_emit_rsh_signed (void)
2805 {
2806 EMIT_ASM ("ldu 4, 8(30) \n"
2807 "srad 3, 4, 3 \n");
2808 }
2809
2810 /* Top = stack[--sp] >> TOP
2811 (Logical shift right) */
2812
2813 static void
2814 ppc64_emit_rsh_unsigned (void)
2815 {
2816 EMIT_ASM ("ldu 4, 8(30) \n"
2817 "srd 3, 4, 3 \n");
2818 }
2819
2820 /* Emit code for signed-extension specified by ARG. */
2821
2822 static void
2823 ppc64_emit_ext (int arg)
2824 {
2825 switch (arg)
2826 {
2827 case 8:
2828 EMIT_ASM ("extsb 3, 3");
2829 break;
2830 case 16:
2831 EMIT_ASM ("extsh 3, 3");
2832 break;
2833 case 32:
2834 EMIT_ASM ("extsw 3, 3");
2835 break;
2836 default:
2837 emit_error = 1;
2838 }
2839 }
2840
2841 /* Emit code for zero-extension specified by ARG. */
2842
2843 static void
2844 ppc64_emit_zero_ext (int arg)
2845 {
2846 switch (arg)
2847 {
2848 case 8:
2849 EMIT_ASM ("rldicl 3,3,0,56");
2850 break;
2851 case 16:
2852 EMIT_ASM ("rldicl 3,3,0,48");
2853 break;
2854 case 32:
2855 EMIT_ASM ("rldicl 3,3,0,32");
2856 break;
2857 default:
2858 emit_error = 1;
2859 }
2860 }
2861
2862 /* TOP = !TOP
2863 i.e., TOP = (TOP == 0) ? 1 : 0; */
2864
2865 static void
2866 ppc64_emit_log_not (void)
2867 {
2868 EMIT_ASM ("cntlzd 3, 3 \n"
2869 "srdi 3, 3, 6 \n");
2870 }
2871
2872 /* TOP = stack[--sp] & TOP */
2873
2874 static void
2875 ppc64_emit_bit_and (void)
2876 {
2877 EMIT_ASM ("ldu 4, 8(30) \n"
2878 "and 3, 4, 3 \n");
2879 }
2880
2881 /* TOP = stack[--sp] | TOP */
2882
2883 static void
2884 ppc64_emit_bit_or (void)
2885 {
2886 EMIT_ASM ("ldu 4, 8(30) \n"
2887 "or 3, 4, 3 \n");
2888 }
2889
2890 /* TOP = stack[--sp] ^ TOP */
2891
2892 static void
2893 ppc64_emit_bit_xor (void)
2894 {
2895 EMIT_ASM ("ldu 4, 8(30) \n"
2896 "xor 3, 4, 3 \n");
2897 }
2898
2899 /* TOP = ~TOP
2900 i.e., TOP = ~(TOP | TOP) */
2901
2902 static void
2903 ppc64_emit_bit_not (void)
2904 {
2905 EMIT_ASM ("nor 3, 3, 3 \n");
2906 }
2907
2908 /* TOP = stack[--sp] == TOP */
2909
2910 static void
2911 ppc64_emit_equal (void)
2912 {
2913 EMIT_ASM ("ldu 4, 8(30) \n"
2914 "xor 3, 3, 4 \n"
2915 "cntlzd 3, 3 \n"
2916 "srdi 3, 3, 6 \n");
2917 }
2918
2919 /* TOP = stack[--sp] < TOP
2920 (Signed comparison) */
2921
2922 static void
2923 ppc64_emit_less_signed (void)
2924 {
2925 EMIT_ASM ("ldu 4, 8(30) \n"
2926 "cmpd 7, 4, 3 \n"
2927 "mfcr 3 \n"
2928 "rlwinm 3, 3, 29, 31, 31 \n");
2929 }
2930
2931 /* TOP = stack[--sp] < TOP
2932 (Unsigned comparison) */
2933
2934 static void
2935 ppc64_emit_less_unsigned (void)
2936 {
2937 EMIT_ASM ("ldu 4, 8(30) \n"
2938 "cmpld 7, 4, 3 \n"
2939 "mfcr 3 \n"
2940 "rlwinm 3, 3, 29, 31, 31 \n");
2941 }
2942
2943 /* Access the memory address in TOP in size of SIZE.
2944 Zero-extend the read value. */
2945
2946 static void
2947 ppc64_emit_ref (int size)
2948 {
2949 switch (size)
2950 {
2951 case 1:
2952 EMIT_ASM ("lbz 3, 0(3)");
2953 break;
2954 case 2:
2955 EMIT_ASM ("lhz 3, 0(3)");
2956 break;
2957 case 4:
2958 EMIT_ASM ("lwz 3, 0(3)");
2959 break;
2960 case 8:
2961 EMIT_ASM ("ld 3, 0(3)");
2962 break;
2963 }
2964 }
2965
2966 /* TOP = NUM */
2967
2968 static void
2969 ppc64_emit_const (LONGEST num)
2970 {
2971 uint32_t buf[5];
2972 uint32_t *p = buf;
2973
2974 p += gen_limm (p, 3, num, 1);
2975
2976 emit_insns (buf, p - buf);
2977 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2978 }
2979
2980 /* Set TOP to the value of register REG by calling get_raw_reg function
2981 with two argument, collected buffer and register number. */
2982
2983 static void
2984 ppc64v1_emit_reg (int reg)
2985 {
2986 uint32_t buf[15];
2987 uint32_t *p = buf;
2988
2989 /* fctx->regs is passed in r3 and then saved in 176(1). */
2990 p += GEN_LD (p, 3, 31, -32);
2991 p += GEN_LI (p, 4, reg);
2992 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2993 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2994 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2995
2996 emit_insns (buf, p - buf);
2997 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2998 }
2999
3000 /* Likewise, for ELFv2. */
3001
3002 static void
3003 ppc64v2_emit_reg (int reg)
3004 {
3005 uint32_t buf[12];
3006 uint32_t *p = buf;
3007
3008 /* fctx->regs is passed in r3 and then saved in 176(1). */
3009 p += GEN_LD (p, 3, 31, -32);
3010 p += GEN_LI (p, 4, reg);
3011 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3012 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
3013 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3014
3015 emit_insns (buf, p - buf);
3016 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3017 }
3018
3019 /* TOP = stack[--sp] */
3020
3021 static void
3022 ppc64_emit_pop (void)
3023 {
3024 EMIT_ASM ("ldu 3, 8(30)");
3025 }
3026
3027 /* stack[sp++] = TOP
3028
3029 Because we may use up bytecode stack, expand 8 doublewords more
3030 if needed. */
3031
3032 static void
3033 ppc64_emit_stack_flush (void)
3034 {
3035 /* Make sure bytecode stack is big enough before push.
3036 Otherwise, expand 64-byte more. */
3037
3038 EMIT_ASM (" std 3, 0(30) \n"
3039 " addi 4, 30, -(112 + 8) \n"
3040 " cmpd 7, 4, 1 \n"
3041 " bgt 7, 1f \n"
3042 " stdu 31, -64(1) \n"
3043 "1:addi 30, 30, -8 \n");
3044 }
3045
3046 /* Swap TOP and stack[sp-1] */
3047
3048 static void
3049 ppc64_emit_swap (void)
3050 {
3051 EMIT_ASM ("ld 4, 8(30) \n"
3052 "std 3, 8(30) \n"
3053 "mr 3, 4 \n");
3054 }
3055
3056 /* Call function FN - ELFv1. */
3057
3058 static void
3059 ppc64v1_emit_call (CORE_ADDR fn)
3060 {
3061 uint32_t buf[13];
3062 uint32_t *p = buf;
3063
3064 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3065 p += gen_call (p, fn, 1, 1);
3066 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3067
3068 emit_insns (buf, p - buf);
3069 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3070 }
3071
3072 /* Call function FN - ELFv2. */
3073
3074 static void
3075 ppc64v2_emit_call (CORE_ADDR fn)
3076 {
3077 uint32_t buf[10];
3078 uint32_t *p = buf;
3079
3080 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3081 p += gen_call (p, fn, 1, 0);
3082 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3083
3084 emit_insns (buf, p - buf);
3085 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3086 }
3087
3088 /* FN's prototype is `LONGEST(*fn)(int)'.
3089 TOP = fn (arg1)
3090 */
3091
3092 static void
3093 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
3094 {
3095 uint32_t buf[13];
3096 uint32_t *p = buf;
3097
3098 /* Setup argument. arg1 is a 16-bit value. */
3099 p += gen_limm (p, 3, arg1, 1);
3100 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3101 p += gen_call (p, fn, 1, 1);
3102 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3103
3104 emit_insns (buf, p - buf);
3105 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3106 }
3107
3108 /* Likewise for ELFv2. */
3109
3110 static void
3111 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
3112 {
3113 uint32_t buf[10];
3114 uint32_t *p = buf;
3115
3116 /* Setup argument. arg1 is a 16-bit value. */
3117 p += gen_limm (p, 3, arg1, 1);
3118 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3119 p += gen_call (p, fn, 1, 0);
3120 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3121
3122 emit_insns (buf, p - buf);
3123 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3124 }
3125
3126 /* FN's prototype is `void(*fn)(int,LONGEST)'.
3127 fn (arg1, TOP)
3128
3129 TOP should be preserved/restored before/after the call. */
3130
3131 static void
3132 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
3133 {
3134 uint32_t buf[17];
3135 uint32_t *p = buf;
3136
3137 /* Save TOP. 0(30) is next-empty. */
3138 p += GEN_STD (p, 3, 30, 0);
3139
3140 /* Setup argument. arg1 is a 16-bit value. */
3141 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3142 p += gen_limm (p, 3, arg1, 1);
3143 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3144 p += gen_call (p, fn, 1, 1);
3145 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3146
3147 /* Restore TOP */
3148 p += GEN_LD (p, 3, 30, 0);
3149
3150 emit_insns (buf, p - buf);
3151 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3152 }
3153
3154 /* Likewise for ELFv2. */
3155
3156 static void
3157 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
3158 {
3159 uint32_t buf[14];
3160 uint32_t *p = buf;
3161
3162 /* Save TOP. 0(30) is next-empty. */
3163 p += GEN_STD (p, 3, 30, 0);
3164
3165 /* Setup argument. arg1 is a 16-bit value. */
3166 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3167 p += gen_limm (p, 3, arg1, 1);
3168 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3169 p += gen_call (p, fn, 1, 0);
3170 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3171
3172 /* Restore TOP */
3173 p += GEN_LD (p, 3, 30, 0);
3174
3175 emit_insns (buf, p - buf);
3176 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3177 }
3178
3179 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
3180
3181 static void
3182 ppc64_emit_if_goto (int *offset_p, int *size_p)
3183 {
3184 EMIT_ASM ("cmpdi 7, 3, 0 \n"
3185 "ldu 3, 8(30) \n"
3186 "1:bne 7, 1b \n");
3187
3188 if (offset_p)
3189 *offset_p = 8;
3190 if (size_p)
3191 *size_p = 14;
3192 }
3193
3194 /* Goto if stack[--sp] == TOP */
3195
3196 static void
3197 ppc64_emit_eq_goto (int *offset_p, int *size_p)
3198 {
3199 EMIT_ASM ("ldu 4, 8(30) \n"
3200 "cmpd 7, 4, 3 \n"
3201 "ldu 3, 8(30) \n"
3202 "1:beq 7, 1b \n");
3203
3204 if (offset_p)
3205 *offset_p = 12;
3206 if (size_p)
3207 *size_p = 14;
3208 }
3209
3210 /* Goto if stack[--sp] != TOP */
3211
3212 static void
3213 ppc64_emit_ne_goto (int *offset_p, int *size_p)
3214 {
3215 EMIT_ASM ("ldu 4, 8(30) \n"
3216 "cmpd 7, 4, 3 \n"
3217 "ldu 3, 8(30) \n"
3218 "1:bne 7, 1b \n");
3219
3220 if (offset_p)
3221 *offset_p = 12;
3222 if (size_p)
3223 *size_p = 14;
3224 }
3225
3226 /* Goto if stack[--sp] < TOP */
3227
3228 static void
3229 ppc64_emit_lt_goto (int *offset_p, int *size_p)
3230 {
3231 EMIT_ASM ("ldu 4, 8(30) \n"
3232 "cmpd 7, 4, 3 \n"
3233 "ldu 3, 8(30) \n"
3234 "1:blt 7, 1b \n");
3235
3236 if (offset_p)
3237 *offset_p = 12;
3238 if (size_p)
3239 *size_p = 14;
3240 }
3241
3242 /* Goto if stack[--sp] <= TOP */
3243
3244 static void
3245 ppc64_emit_le_goto (int *offset_p, int *size_p)
3246 {
3247 EMIT_ASM ("ldu 4, 8(30) \n"
3248 "cmpd 7, 4, 3 \n"
3249 "ldu 3, 8(30) \n"
3250 "1:ble 7, 1b \n");
3251
3252 if (offset_p)
3253 *offset_p = 12;
3254 if (size_p)
3255 *size_p = 14;
3256 }
3257
3258 /* Goto if stack[--sp] > TOP */
3259
3260 static void
3261 ppc64_emit_gt_goto (int *offset_p, int *size_p)
3262 {
3263 EMIT_ASM ("ldu 4, 8(30) \n"
3264 "cmpd 7, 4, 3 \n"
3265 "ldu 3, 8(30) \n"
3266 "1:bgt 7, 1b \n");
3267
3268 if (offset_p)
3269 *offset_p = 12;
3270 if (size_p)
3271 *size_p = 14;
3272 }
3273
3274 /* Goto if stack[--sp] >= TOP */
3275
3276 static void
3277 ppc64_emit_ge_goto (int *offset_p, int *size_p)
3278 {
3279 EMIT_ASM ("ldu 4, 8(30) \n"
3280 "cmpd 7, 4, 3 \n"
3281 "ldu 3, 8(30) \n"
3282 "1:bge 7, 1b \n");
3283
3284 if (offset_p)
3285 *offset_p = 12;
3286 if (size_p)
3287 *size_p = 14;
3288 }
3289
3290 /* Table of emit ops for 64-bit ELFv1. */
3291
3292 static struct emit_ops ppc64v1_emit_ops_impl =
3293 {
3294 ppc64v1_emit_prologue,
3295 ppc64_emit_epilogue,
3296 ppc64_emit_add,
3297 ppc64_emit_sub,
3298 ppc64_emit_mul,
3299 ppc64_emit_lsh,
3300 ppc64_emit_rsh_signed,
3301 ppc64_emit_rsh_unsigned,
3302 ppc64_emit_ext,
3303 ppc64_emit_log_not,
3304 ppc64_emit_bit_and,
3305 ppc64_emit_bit_or,
3306 ppc64_emit_bit_xor,
3307 ppc64_emit_bit_not,
3308 ppc64_emit_equal,
3309 ppc64_emit_less_signed,
3310 ppc64_emit_less_unsigned,
3311 ppc64_emit_ref,
3312 ppc64_emit_if_goto,
3313 ppc_emit_goto,
3314 ppc_write_goto_address,
3315 ppc64_emit_const,
3316 ppc64v1_emit_call,
3317 ppc64v1_emit_reg,
3318 ppc64_emit_pop,
3319 ppc64_emit_stack_flush,
3320 ppc64_emit_zero_ext,
3321 ppc64_emit_swap,
3322 ppc_emit_stack_adjust,
3323 ppc64v1_emit_int_call_1,
3324 ppc64v1_emit_void_call_2,
3325 ppc64_emit_eq_goto,
3326 ppc64_emit_ne_goto,
3327 ppc64_emit_lt_goto,
3328 ppc64_emit_le_goto,
3329 ppc64_emit_gt_goto,
3330 ppc64_emit_ge_goto
3331 };
3332
3333 /* Table of emit ops for 64-bit ELFv2. */
3334
3335 static struct emit_ops ppc64v2_emit_ops_impl =
3336 {
3337 ppc64v2_emit_prologue,
3338 ppc64_emit_epilogue,
3339 ppc64_emit_add,
3340 ppc64_emit_sub,
3341 ppc64_emit_mul,
3342 ppc64_emit_lsh,
3343 ppc64_emit_rsh_signed,
3344 ppc64_emit_rsh_unsigned,
3345 ppc64_emit_ext,
3346 ppc64_emit_log_not,
3347 ppc64_emit_bit_and,
3348 ppc64_emit_bit_or,
3349 ppc64_emit_bit_xor,
3350 ppc64_emit_bit_not,
3351 ppc64_emit_equal,
3352 ppc64_emit_less_signed,
3353 ppc64_emit_less_unsigned,
3354 ppc64_emit_ref,
3355 ppc64_emit_if_goto,
3356 ppc_emit_goto,
3357 ppc_write_goto_address,
3358 ppc64_emit_const,
3359 ppc64v2_emit_call,
3360 ppc64v2_emit_reg,
3361 ppc64_emit_pop,
3362 ppc64_emit_stack_flush,
3363 ppc64_emit_zero_ext,
3364 ppc64_emit_swap,
3365 ppc_emit_stack_adjust,
3366 ppc64v2_emit_int_call_1,
3367 ppc64v2_emit_void_call_2,
3368 ppc64_emit_eq_goto,
3369 ppc64_emit_ne_goto,
3370 ppc64_emit_lt_goto,
3371 ppc64_emit_le_goto,
3372 ppc64_emit_gt_goto,
3373 ppc64_emit_ge_goto
3374 };
3375
3376 #endif
3377
3378 /* Implementation of target ops method "emit_ops". */
3379
3380 emit_ops *
3381 ppc_target::emit_ops ()
3382 {
3383 #ifdef __powerpc64__
3384 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3385
3386 if (register_size (regcache->tdesc, 0) == 8)
3387 {
3388 if (is_elfv2_inferior ())
3389 return &ppc64v2_emit_ops_impl;
3390 else
3391 return &ppc64v1_emit_ops_impl;
3392 }
3393 #endif
3394 return &ppc_emit_ops_impl;
3395 }
3396
3397 /* Implementation of target ops method "get_ipa_tdesc_idx". */
3398
3399 int
3400 ppc_target::get_ipa_tdesc_idx ()
3401 {
3402 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3403 const struct target_desc *tdesc = regcache->tdesc;
3404
3405 #ifdef __powerpc64__
3406 if (tdesc == tdesc_powerpc_64l)
3407 return PPC_TDESC_BASE;
3408 if (tdesc == tdesc_powerpc_altivec64l)
3409 return PPC_TDESC_ALTIVEC;
3410 if (tdesc == tdesc_powerpc_vsx64l)
3411 return PPC_TDESC_VSX;
3412 if (tdesc == tdesc_powerpc_isa205_64l)
3413 return PPC_TDESC_ISA205;
3414 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3415 return PPC_TDESC_ISA205_ALTIVEC;
3416 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3417 return PPC_TDESC_ISA205_VSX;
3418 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx64l)
3419 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3420 if (tdesc == tdesc_powerpc_isa207_vsx64l)
3421 return PPC_TDESC_ISA207_VSX;
3422 if (tdesc == tdesc_powerpc_isa207_htm_vsx64l)
3423 return PPC_TDESC_ISA207_HTM_VSX;
3424 #endif
3425
3426 if (tdesc == tdesc_powerpc_32l)
3427 return PPC_TDESC_BASE;
3428 if (tdesc == tdesc_powerpc_altivec32l)
3429 return PPC_TDESC_ALTIVEC;
3430 if (tdesc == tdesc_powerpc_vsx32l)
3431 return PPC_TDESC_VSX;
3432 if (tdesc == tdesc_powerpc_isa205_32l)
3433 return PPC_TDESC_ISA205;
3434 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3435 return PPC_TDESC_ISA205_ALTIVEC;
3436 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3437 return PPC_TDESC_ISA205_VSX;
3438 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx32l)
3439 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3440 if (tdesc == tdesc_powerpc_isa207_vsx32l)
3441 return PPC_TDESC_ISA207_VSX;
3442 if (tdesc == tdesc_powerpc_isa207_htm_vsx32l)
3443 return PPC_TDESC_ISA207_HTM_VSX;
3444 if (tdesc == tdesc_powerpc_e500l)
3445 return PPC_TDESC_E500;
3446
3447 return 0;
3448 }
3449
3450 /* The linux target ops object. */
3451
3452 linux_process_target *the_linux_target = &the_ppc_target;
3453
3454 void
3455 initialize_low_arch (void)
3456 {
3457 /* Initialize the Linux target descriptions. */
3458
3459 init_registers_powerpc_32l ();
3460 init_registers_powerpc_altivec32l ();
3461 init_registers_powerpc_vsx32l ();
3462 init_registers_powerpc_isa205_32l ();
3463 init_registers_powerpc_isa205_altivec32l ();
3464 init_registers_powerpc_isa205_vsx32l ();
3465 init_registers_powerpc_isa205_ppr_dscr_vsx32l ();
3466 init_registers_powerpc_isa207_vsx32l ();
3467 init_registers_powerpc_isa207_htm_vsx32l ();
3468 init_registers_powerpc_e500l ();
3469 #if __powerpc64__
3470 init_registers_powerpc_64l ();
3471 init_registers_powerpc_altivec64l ();
3472 init_registers_powerpc_vsx64l ();
3473 init_registers_powerpc_isa205_64l ();
3474 init_registers_powerpc_isa205_altivec64l ();
3475 init_registers_powerpc_isa205_vsx64l ();
3476 init_registers_powerpc_isa205_ppr_dscr_vsx64l ();
3477 init_registers_powerpc_isa207_vsx64l ();
3478 init_registers_powerpc_isa207_htm_vsx64l ();
3479 #endif
3480
3481 initialize_regsets_info (&ppc_regsets_info);
3482 }