]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-ppc-low.c
f17f05a0a327e19f5eddfd930b51884a6693397b
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-ppc-low.c
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2019 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include "elf/common.h"
24 #include <sys/uio.h>
25 #include <elf.h>
26 #include <asm/ptrace.h>
27
28 #include "arch/ppc-linux-common.h"
29 #include "arch/ppc-linux-tdesc.h"
30 #include "nat/ppc-linux.h"
31 #include "nat/linux-ptrace.h"
32 #include "linux-ppc-tdesc-init.h"
33 #include "ax.h"
34 #include "tracepoint.h"
35
36 #define PPC_FIELD(value, from, len) \
37 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
38 #define PPC_SEXT(v, bs) \
39 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
40 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
41 - ((CORE_ADDR) 1 << ((bs) - 1)))
42 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
43 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
44 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
45 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
46
47 /* Holds the AT_HWCAP auxv entry. */
48
49 static unsigned long ppc_hwcap;
50
51 /* Holds the AT_HWCAP2 auxv entry. */
52
53 static unsigned long ppc_hwcap2;
54
55
56 #define ppc_num_regs 73
57
58 #ifdef __powerpc64__
59 /* We use a constant for FPSCR instead of PT_FPSCR, because
60 many shipped PPC64 kernels had the wrong value in ptrace.h. */
61 static int ppc_regmap[] =
62 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
63 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
64 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
65 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
66 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
67 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
68 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
69 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
70 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
71 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
72 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
73 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
74 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
75 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
76 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
77 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
78 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
79 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
80 PT_ORIG_R3 * 8, PT_TRAP * 8 };
81 #else
82 /* Currently, don't check/send MQ. */
83 static int ppc_regmap[] =
84 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
85 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
86 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
87 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
88 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
89 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
90 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
91 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
92 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
93 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
94 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
95 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
96 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
97 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
98 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
99 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
100 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
101 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
102 PT_ORIG_R3 * 4, PT_TRAP * 4
103 };
104
105 static int ppc_regmap_e500[] =
106 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
107 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
108 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
109 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
110 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
111 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
112 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
113 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
114 -1, -1, -1, -1,
115 -1, -1, -1, -1,
116 -1, -1, -1, -1,
117 -1, -1, -1, -1,
118 -1, -1, -1, -1,
119 -1, -1, -1, -1,
120 -1, -1, -1, -1,
121 -1, -1, -1, -1,
122 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
123 PT_CTR * 4, PT_XER * 4, -1,
124 PT_ORIG_R3 * 4, PT_TRAP * 4
125 };
126 #endif
127
128 /* Check whether the kernel provides a register set with number
129 REGSET_ID of size REGSETSIZE for process/thread TID. */
130
131 static int
132 ppc_check_regset (int tid, int regset_id, int regsetsize)
133 {
134 void *buf = alloca (regsetsize);
135 struct iovec iov;
136
137 iov.iov_base = buf;
138 iov.iov_len = regsetsize;
139
140 if (ptrace (PTRACE_GETREGSET, tid, regset_id, &iov) >= 0
141 || errno == ENODATA)
142 return 1;
143 return 0;
144 }
145
146 static int
147 ppc_cannot_store_register (int regno)
148 {
149 const struct target_desc *tdesc = current_process ()->tdesc;
150
151 #ifndef __powerpc64__
152 /* Some kernels do not allow us to store fpscr. */
153 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
154 && regno == find_regno (tdesc, "fpscr"))
155 return 2;
156 #endif
157
158 /* Some kernels do not allow us to store orig_r3 or trap. */
159 if (regno == find_regno (tdesc, "orig_r3")
160 || regno == find_regno (tdesc, "trap"))
161 return 2;
162
163 return 0;
164 }
165
166 static int
167 ppc_cannot_fetch_register (int regno)
168 {
169 return 0;
170 }
171
172 static void
173 ppc_collect_ptrace_register (struct regcache *regcache, int regno, char *buf)
174 {
175 memset (buf, 0, sizeof (long));
176
177 if (__BYTE_ORDER == __LITTLE_ENDIAN)
178 {
179 /* Little-endian values always sit at the left end of the buffer. */
180 collect_register (regcache, regno, buf);
181 }
182 else if (__BYTE_ORDER == __BIG_ENDIAN)
183 {
184 /* Big-endian values sit at the right end of the buffer. In case of
185 registers whose sizes are smaller than sizeof (long), we must use a
186 padding to access them correctly. */
187 int size = register_size (regcache->tdesc, regno);
188
189 if (size < sizeof (long))
190 collect_register (regcache, regno, buf + sizeof (long) - size);
191 else
192 collect_register (regcache, regno, buf);
193 }
194 else
195 perror_with_name ("Unexpected byte order");
196 }
197
198 static void
199 ppc_supply_ptrace_register (struct regcache *regcache,
200 int regno, const char *buf)
201 {
202 if (__BYTE_ORDER == __LITTLE_ENDIAN)
203 {
204 /* Little-endian values always sit at the left end of the buffer. */
205 supply_register (regcache, regno, buf);
206 }
207 else if (__BYTE_ORDER == __BIG_ENDIAN)
208 {
209 /* Big-endian values sit at the right end of the buffer. In case of
210 registers whose sizes are smaller than sizeof (long), we must use a
211 padding to access them correctly. */
212 int size = register_size (regcache->tdesc, regno);
213
214 if (size < sizeof (long))
215 supply_register (regcache, regno, buf + sizeof (long) - size);
216 else
217 supply_register (regcache, regno, buf);
218 }
219 else
220 perror_with_name ("Unexpected byte order");
221 }
222
223
224 #define INSTR_SC 0x44000002
225 #define NR_spu_run 0x0116
226
227 /* If the PPU thread is currently stopped on a spu_run system call,
228 return to FD and ADDR the file handle and NPC parameter address
229 used with the system call. Return non-zero if successful. */
230 static int
231 parse_spufs_run (struct regcache *regcache, int *fd, CORE_ADDR *addr)
232 {
233 CORE_ADDR curr_pc;
234 int curr_insn;
235 int curr_r0;
236
237 if (register_size (regcache->tdesc, 0) == 4)
238 {
239 unsigned int pc, r0, r3, r4;
240 collect_register_by_name (regcache, "pc", &pc);
241 collect_register_by_name (regcache, "r0", &r0);
242 collect_register_by_name (regcache, "orig_r3", &r3);
243 collect_register_by_name (regcache, "r4", &r4);
244 curr_pc = (CORE_ADDR) pc;
245 curr_r0 = (int) r0;
246 *fd = (int) r3;
247 *addr = (CORE_ADDR) r4;
248 }
249 else
250 {
251 unsigned long pc, r0, r3, r4;
252 collect_register_by_name (regcache, "pc", &pc);
253 collect_register_by_name (regcache, "r0", &r0);
254 collect_register_by_name (regcache, "orig_r3", &r3);
255 collect_register_by_name (regcache, "r4", &r4);
256 curr_pc = (CORE_ADDR) pc;
257 curr_r0 = (int) r0;
258 *fd = (int) r3;
259 *addr = (CORE_ADDR) r4;
260 }
261
262 /* Fetch instruction preceding current NIP. */
263 if ((*the_target->read_memory) (curr_pc - 4,
264 (unsigned char *) &curr_insn, 4) != 0)
265 return 0;
266 /* It should be a "sc" instruction. */
267 if (curr_insn != INSTR_SC)
268 return 0;
269 /* System call number should be NR_spu_run. */
270 if (curr_r0 != NR_spu_run)
271 return 0;
272
273 return 1;
274 }
275
276 static CORE_ADDR
277 ppc_get_pc (struct regcache *regcache)
278 {
279 CORE_ADDR addr;
280 int fd;
281
282 if (parse_spufs_run (regcache, &fd, &addr))
283 {
284 unsigned int pc;
285 (*the_target->read_memory) (addr, (unsigned char *) &pc, 4);
286 return ((CORE_ADDR)1 << 63)
287 | ((CORE_ADDR)fd << 32) | (CORE_ADDR) (pc - 4);
288 }
289 else if (register_size (regcache->tdesc, 0) == 4)
290 {
291 unsigned int pc;
292 collect_register_by_name (regcache, "pc", &pc);
293 return (CORE_ADDR) pc;
294 }
295 else
296 {
297 unsigned long pc;
298 collect_register_by_name (regcache, "pc", &pc);
299 return (CORE_ADDR) pc;
300 }
301 }
302
303 static void
304 ppc_set_pc (struct regcache *regcache, CORE_ADDR pc)
305 {
306 CORE_ADDR addr;
307 int fd;
308
309 if (parse_spufs_run (regcache, &fd, &addr))
310 {
311 unsigned int newpc = pc;
312 (*the_target->write_memory) (addr, (unsigned char *) &newpc, 4);
313 }
314 else if (register_size (regcache->tdesc, 0) == 4)
315 {
316 unsigned int newpc = pc;
317 supply_register_by_name (regcache, "pc", &newpc);
318 }
319 else
320 {
321 unsigned long newpc = pc;
322 supply_register_by_name (regcache, "pc", &newpc);
323 }
324 }
325
326 #ifndef __powerpc64__
327 static int ppc_regmap_adjusted;
328 #endif
329
330
331 /* Correct in either endianness.
332 This instruction is "twge r2, r2", which GDB uses as a software
333 breakpoint. */
334 static const unsigned int ppc_breakpoint = 0x7d821008;
335 #define ppc_breakpoint_len 4
336
337 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
338
339 static const gdb_byte *
340 ppc_sw_breakpoint_from_kind (int kind, int *size)
341 {
342 *size = ppc_breakpoint_len;
343 return (const gdb_byte *) &ppc_breakpoint;
344 }
345
346 static int
347 ppc_breakpoint_at (CORE_ADDR where)
348 {
349 unsigned int insn;
350
351 if (where & ((CORE_ADDR)1 << 63))
352 {
353 char mem_annex[32];
354 sprintf (mem_annex, "%d/mem", (int)((where >> 32) & 0x7fffffff));
355 (*the_target->qxfer_spu) (mem_annex, (unsigned char *) &insn,
356 NULL, where & 0xffffffff, 4);
357 if (insn == 0x3fff)
358 return 1;
359 }
360 else
361 {
362 (*the_target->read_memory) (where, (unsigned char *) &insn, 4);
363 if (insn == ppc_breakpoint)
364 return 1;
365 /* If necessary, recognize more trap instructions here. GDB only uses
366 the one. */
367 }
368
369 return 0;
370 }
371
372 /* Implement supports_z_point_type target-ops.
373 Returns true if type Z_TYPE breakpoint is supported.
374
375 Handling software breakpoint at server side, so tracepoints
376 and breakpoints can be inserted at the same location. */
377
378 static int
379 ppc_supports_z_point_type (char z_type)
380 {
381 switch (z_type)
382 {
383 case Z_PACKET_SW_BP:
384 return 1;
385 case Z_PACKET_HW_BP:
386 case Z_PACKET_WRITE_WP:
387 case Z_PACKET_ACCESS_WP:
388 default:
389 return 0;
390 }
391 }
392
393 /* Implement insert_point target-ops.
394 Returns 0 on success, -1 on failure and 1 on unsupported. */
395
396 static int
397 ppc_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
398 int size, struct raw_breakpoint *bp)
399 {
400 switch (type)
401 {
402 case raw_bkpt_type_sw:
403 return insert_memory_breakpoint (bp);
404
405 case raw_bkpt_type_hw:
406 case raw_bkpt_type_write_wp:
407 case raw_bkpt_type_access_wp:
408 default:
409 /* Unsupported. */
410 return 1;
411 }
412 }
413
414 /* Implement remove_point target-ops.
415 Returns 0 on success, -1 on failure and 1 on unsupported. */
416
417 static int
418 ppc_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
419 int size, struct raw_breakpoint *bp)
420 {
421 switch (type)
422 {
423 case raw_bkpt_type_sw:
424 return remove_memory_breakpoint (bp);
425
426 case raw_bkpt_type_hw:
427 case raw_bkpt_type_write_wp:
428 case raw_bkpt_type_access_wp:
429 default:
430 /* Unsupported. */
431 return 1;
432 }
433 }
434
435 /* Provide only a fill function for the general register set. ps_lgetregs
436 will use this for NPTL support. */
437
438 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
439 {
440 int i;
441
442 for (i = 0; i < 32; i++)
443 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
444
445 for (i = 64; i < 70; i++)
446 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
447
448 for (i = 71; i < 73; i++)
449 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
450 }
451
452 /* Program Priority Register regset fill function. */
453
454 static void
455 ppc_fill_pprregset (struct regcache *regcache, void *buf)
456 {
457 char *ppr = (char *) buf;
458
459 collect_register_by_name (regcache, "ppr", ppr);
460 }
461
462 /* Program Priority Register regset store function. */
463
464 static void
465 ppc_store_pprregset (struct regcache *regcache, const void *buf)
466 {
467 const char *ppr = (const char *) buf;
468
469 supply_register_by_name (regcache, "ppr", ppr);
470 }
471
472 /* Data Stream Control Register regset fill function. */
473
474 static void
475 ppc_fill_dscrregset (struct regcache *regcache, void *buf)
476 {
477 char *dscr = (char *) buf;
478
479 collect_register_by_name (regcache, "dscr", dscr);
480 }
481
482 /* Data Stream Control Register regset store function. */
483
484 static void
485 ppc_store_dscrregset (struct regcache *regcache, const void *buf)
486 {
487 const char *dscr = (const char *) buf;
488
489 supply_register_by_name (regcache, "dscr", dscr);
490 }
491
492 /* Target Address Register regset fill function. */
493
494 static void
495 ppc_fill_tarregset (struct regcache *regcache, void *buf)
496 {
497 char *tar = (char *) buf;
498
499 collect_register_by_name (regcache, "tar", tar);
500 }
501
502 /* Target Address Register regset store function. */
503
504 static void
505 ppc_store_tarregset (struct regcache *regcache, const void *buf)
506 {
507 const char *tar = (const char *) buf;
508
509 supply_register_by_name (regcache, "tar", tar);
510 }
511
512 /* Event-Based Branching regset store function. Unless the inferior
513 has a perf event open, ptrace can return in error when reading and
514 writing to the regset, with ENODATA. For reading, the registers
515 will correctly show as unavailable. For writing, gdbserver
516 currently only caches any register writes from P and G packets and
517 the stub always tries to write all the regsets when resuming the
518 inferior, which would result in frequent warnings. For this
519 reason, we don't define a fill function. This also means that the
520 client-side regcache will be dirty if the user tries to write to
521 the EBB registers. G packets that the client sends to write to
522 unrelated registers will also include data for EBB registers, even
523 if they are unavailable. */
524
525 static void
526 ppc_store_ebbregset (struct regcache *regcache, const void *buf)
527 {
528 const char *regset = (const char *) buf;
529
530 /* The order in the kernel regset is: EBBRR, EBBHR, BESCR. In the
531 .dat file is BESCR, EBBHR, EBBRR. */
532 supply_register_by_name (regcache, "ebbrr", &regset[0]);
533 supply_register_by_name (regcache, "ebbhr", &regset[8]);
534 supply_register_by_name (regcache, "bescr", &regset[16]);
535 }
536
537 /* Performance Monitoring Unit regset fill function. */
538
539 static void
540 ppc_fill_pmuregset (struct regcache *regcache, void *buf)
541 {
542 char *regset = (char *) buf;
543
544 /* The order in the kernel regset is SIAR, SDAR, SIER, MMCR2, MMCR0.
545 In the .dat file is MMCR0, MMCR2, SIAR, SDAR, SIER. */
546 collect_register_by_name (regcache, "siar", &regset[0]);
547 collect_register_by_name (regcache, "sdar", &regset[8]);
548 collect_register_by_name (regcache, "sier", &regset[16]);
549 collect_register_by_name (regcache, "mmcr2", &regset[24]);
550 collect_register_by_name (regcache, "mmcr0", &regset[32]);
551 }
552
553 /* Performance Monitoring Unit regset store function. */
554
555 static void
556 ppc_store_pmuregset (struct regcache *regcache, const void *buf)
557 {
558 const char *regset = (const char *) buf;
559
560 supply_register_by_name (regcache, "siar", &regset[0]);
561 supply_register_by_name (regcache, "sdar", &regset[8]);
562 supply_register_by_name (regcache, "sier", &regset[16]);
563 supply_register_by_name (regcache, "mmcr2", &regset[24]);
564 supply_register_by_name (regcache, "mmcr0", &regset[32]);
565 }
566
567 /* Hardware Transactional Memory special-purpose register regset fill
568 function. */
569
570 static void
571 ppc_fill_tm_sprregset (struct regcache *regcache, void *buf)
572 {
573 int i, base;
574 char *regset = (char *) buf;
575
576 base = find_regno (regcache->tdesc, "tfhar");
577 for (i = 0; i < 3; i++)
578 collect_register (regcache, base + i, &regset[i * 8]);
579 }
580
581 /* Hardware Transactional Memory special-purpose register regset store
582 function. */
583
584 static void
585 ppc_store_tm_sprregset (struct regcache *regcache, const void *buf)
586 {
587 int i, base;
588 const char *regset = (const char *) buf;
589
590 base = find_regno (regcache->tdesc, "tfhar");
591 for (i = 0; i < 3; i++)
592 supply_register (regcache, base + i, &regset[i * 8]);
593 }
594
595 /* For the same reasons as the EBB regset, none of the HTM
596 checkpointed regsets have a fill function. These registers are
597 only available if the inferior is in a transaction. */
598
599 /* Hardware Transactional Memory checkpointed general-purpose regset
600 store function. */
601
602 static void
603 ppc_store_tm_cgprregset (struct regcache *regcache, const void *buf)
604 {
605 int i, base, size, endian_offset;
606 const char *regset = (const char *) buf;
607
608 base = find_regno (regcache->tdesc, "cr0");
609 size = register_size (regcache->tdesc, base);
610
611 gdb_assert (size == 4 || size == 8);
612
613 for (i = 0; i < 32; i++)
614 supply_register (regcache, base + i, &regset[i * size]);
615
616 endian_offset = 0;
617
618 if ((size == 8) && (__BYTE_ORDER == __BIG_ENDIAN))
619 endian_offset = 4;
620
621 supply_register_by_name (regcache, "ccr",
622 &regset[PT_CCR * size + endian_offset]);
623
624 supply_register_by_name (regcache, "cxer",
625 &regset[PT_XER * size + endian_offset]);
626
627 supply_register_by_name (regcache, "clr", &regset[PT_LNK * size]);
628 supply_register_by_name (regcache, "cctr", &regset[PT_CTR * size]);
629 }
630
631 /* Hardware Transactional Memory checkpointed floating-point regset
632 store function. */
633
634 static void
635 ppc_store_tm_cfprregset (struct regcache *regcache, const void *buf)
636 {
637 int i, base;
638 const char *regset = (const char *) buf;
639
640 base = find_regno (regcache->tdesc, "cf0");
641
642 for (i = 0; i < 32; i++)
643 supply_register (regcache, base + i, &regset[i * 8]);
644
645 supply_register_by_name (regcache, "cfpscr", &regset[32 * 8]);
646 }
647
648 /* Hardware Transactional Memory checkpointed vector regset store
649 function. */
650
651 static void
652 ppc_store_tm_cvrregset (struct regcache *regcache, const void *buf)
653 {
654 int i, base;
655 const char *regset = (const char *) buf;
656 int vscr_offset = 0;
657
658 base = find_regno (regcache->tdesc, "cvr0");
659
660 for (i = 0; i < 32; i++)
661 supply_register (regcache, base + i, &regset[i * 16]);
662
663 if (__BYTE_ORDER == __BIG_ENDIAN)
664 vscr_offset = 12;
665
666 supply_register_by_name (regcache, "cvscr",
667 &regset[32 * 16 + vscr_offset]);
668
669 supply_register_by_name (regcache, "cvrsave", &regset[33 * 16]);
670 }
671
672 /* Hardware Transactional Memory checkpointed vector-scalar regset
673 store function. */
674
675 static void
676 ppc_store_tm_cvsxregset (struct regcache *regcache, const void *buf)
677 {
678 int i, base;
679 const char *regset = (const char *) buf;
680
681 base = find_regno (regcache->tdesc, "cvs0h");
682 for (i = 0; i < 32; i++)
683 supply_register (regcache, base + i, &regset[i * 8]);
684 }
685
686 /* Hardware Transactional Memory checkpointed Program Priority
687 Register regset store function. */
688
689 static void
690 ppc_store_tm_cpprregset (struct regcache *regcache, const void *buf)
691 {
692 const char *cppr = (const char *) buf;
693
694 supply_register_by_name (regcache, "cppr", cppr);
695 }
696
697 /* Hardware Transactional Memory checkpointed Data Stream Control
698 Register regset store function. */
699
700 static void
701 ppc_store_tm_cdscrregset (struct regcache *regcache, const void *buf)
702 {
703 const char *cdscr = (const char *) buf;
704
705 supply_register_by_name (regcache, "cdscr", cdscr);
706 }
707
708 /* Hardware Transactional Memory checkpointed Target Address Register
709 regset store function. */
710
711 static void
712 ppc_store_tm_ctarregset (struct regcache *regcache, const void *buf)
713 {
714 const char *ctar = (const char *) buf;
715
716 supply_register_by_name (regcache, "ctar", ctar);
717 }
718
719 static void
720 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
721 {
722 int i, base;
723 char *regset = (char *) buf;
724
725 base = find_regno (regcache->tdesc, "vs0h");
726 for (i = 0; i < 32; i++)
727 collect_register (regcache, base + i, &regset[i * 8]);
728 }
729
730 static void
731 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
732 {
733 int i, base;
734 const char *regset = (const char *) buf;
735
736 base = find_regno (regcache->tdesc, "vs0h");
737 for (i = 0; i < 32; i++)
738 supply_register (regcache, base + i, &regset[i * 8]);
739 }
740
741 static void
742 ppc_fill_vrregset (struct regcache *regcache, void *buf)
743 {
744 int i, base;
745 char *regset = (char *) buf;
746 int vscr_offset = 0;
747
748 base = find_regno (regcache->tdesc, "vr0");
749 for (i = 0; i < 32; i++)
750 collect_register (regcache, base + i, &regset[i * 16]);
751
752 if (__BYTE_ORDER == __BIG_ENDIAN)
753 vscr_offset = 12;
754
755 collect_register_by_name (regcache, "vscr",
756 &regset[32 * 16 + vscr_offset]);
757
758 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
759 }
760
761 static void
762 ppc_store_vrregset (struct regcache *regcache, const void *buf)
763 {
764 int i, base;
765 const char *regset = (const char *) buf;
766 int vscr_offset = 0;
767
768 base = find_regno (regcache->tdesc, "vr0");
769 for (i = 0; i < 32; i++)
770 supply_register (regcache, base + i, &regset[i * 16]);
771
772 if (__BYTE_ORDER == __BIG_ENDIAN)
773 vscr_offset = 12;
774
775 supply_register_by_name (regcache, "vscr",
776 &regset[32 * 16 + vscr_offset]);
777 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
778 }
779
780 struct gdb_evrregset_t
781 {
782 unsigned long evr[32];
783 unsigned long long acc;
784 unsigned long spefscr;
785 };
786
787 static void
788 ppc_fill_evrregset (struct regcache *regcache, void *buf)
789 {
790 int i, ev0;
791 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
792
793 ev0 = find_regno (regcache->tdesc, "ev0h");
794 for (i = 0; i < 32; i++)
795 collect_register (regcache, ev0 + i, &regset->evr[i]);
796
797 collect_register_by_name (regcache, "acc", &regset->acc);
798 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
799 }
800
801 static void
802 ppc_store_evrregset (struct regcache *regcache, const void *buf)
803 {
804 int i, ev0;
805 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
806
807 ev0 = find_regno (regcache->tdesc, "ev0h");
808 for (i = 0; i < 32; i++)
809 supply_register (regcache, ev0 + i, &regset->evr[i]);
810
811 supply_register_by_name (regcache, "acc", &regset->acc);
812 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
813 }
814
815 /* Support for hardware single step. */
816
817 static int
818 ppc_supports_hardware_single_step (void)
819 {
820 return 1;
821 }
822
823 static struct regset_info ppc_regsets[] = {
824 /* List the extra register sets before GENERAL_REGS. That way we will
825 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
826 general registers. Some kernels support these, but not the newer
827 PPC_PTRACE_GETREGS. */
828 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CTAR, 0, EXTENDED_REGS,
829 NULL, ppc_store_tm_ctarregset },
830 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CDSCR, 0, EXTENDED_REGS,
831 NULL, ppc_store_tm_cdscrregset },
832 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CPPR, 0, EXTENDED_REGS,
833 NULL, ppc_store_tm_cpprregset },
834 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVSX, 0, EXTENDED_REGS,
835 NULL, ppc_store_tm_cvsxregset },
836 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVMX, 0, EXTENDED_REGS,
837 NULL, ppc_store_tm_cvrregset },
838 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CFPR, 0, EXTENDED_REGS,
839 NULL, ppc_store_tm_cfprregset },
840 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CGPR, 0, EXTENDED_REGS,
841 NULL, ppc_store_tm_cgprregset },
842 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_SPR, 0, EXTENDED_REGS,
843 ppc_fill_tm_sprregset, ppc_store_tm_sprregset },
844 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_EBB, 0, EXTENDED_REGS,
845 NULL, ppc_store_ebbregset },
846 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PMU, 0, EXTENDED_REGS,
847 ppc_fill_pmuregset, ppc_store_pmuregset },
848 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TAR, 0, EXTENDED_REGS,
849 ppc_fill_tarregset, ppc_store_tarregset },
850 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PPR, 0, EXTENDED_REGS,
851 ppc_fill_pprregset, ppc_store_pprregset },
852 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_DSCR, 0, EXTENDED_REGS,
853 ppc_fill_dscrregset, ppc_store_dscrregset },
854 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
855 ppc_fill_vsxregset, ppc_store_vsxregset },
856 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
857 ppc_fill_vrregset, ppc_store_vrregset },
858 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
859 ppc_fill_evrregset, ppc_store_evrregset },
860 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
861 NULL_REGSET
862 };
863
864 static struct usrregs_info ppc_usrregs_info =
865 {
866 ppc_num_regs,
867 ppc_regmap,
868 };
869
870 static struct regsets_info ppc_regsets_info =
871 {
872 ppc_regsets, /* regsets */
873 0, /* num_regsets */
874 NULL, /* disabled_regsets */
875 };
876
877 static struct regs_info regs_info =
878 {
879 NULL, /* regset_bitmap */
880 &ppc_usrregs_info,
881 &ppc_regsets_info
882 };
883
884 static const struct regs_info *
885 ppc_regs_info (void)
886 {
887 return &regs_info;
888 }
889
890 static void
891 ppc_arch_setup (void)
892 {
893 const struct target_desc *tdesc;
894 struct regset_info *regset;
895 struct ppc_linux_features features = ppc_linux_no_features;
896
897 int tid = lwpid_of (current_thread);
898
899 features.wordsize = ppc_linux_target_wordsize (tid);
900
901 if (features.wordsize == 4)
902 tdesc = tdesc_powerpc_32l;
903 else
904 tdesc = tdesc_powerpc_64l;
905
906 current_process ()->tdesc = tdesc;
907
908 /* The value of current_process ()->tdesc needs to be set for this
909 call. */
910 ppc_hwcap = linux_get_hwcap (features.wordsize);
911 ppc_hwcap2 = linux_get_hwcap2 (features.wordsize);
912
913 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
914
915 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
916 features.vsx = true;
917
918 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
919 features.altivec = true;
920
921 if ((ppc_hwcap2 & PPC_FEATURE2_DSCR)
922 && ppc_check_regset (tid, NT_PPC_DSCR, PPC_LINUX_SIZEOF_DSCRREGSET)
923 && ppc_check_regset (tid, NT_PPC_PPR, PPC_LINUX_SIZEOF_PPRREGSET))
924 {
925 features.ppr_dscr = true;
926 if ((ppc_hwcap2 & PPC_FEATURE2_ARCH_2_07)
927 && (ppc_hwcap2 & PPC_FEATURE2_TAR)
928 && (ppc_hwcap2 & PPC_FEATURE2_EBB)
929 && ppc_check_regset (tid, NT_PPC_TAR,
930 PPC_LINUX_SIZEOF_TARREGSET)
931 && ppc_check_regset (tid, NT_PPC_EBB,
932 PPC_LINUX_SIZEOF_EBBREGSET)
933 && ppc_check_regset (tid, NT_PPC_PMU,
934 PPC_LINUX_SIZEOF_PMUREGSET))
935 {
936 features.isa207 = true;
937 if ((ppc_hwcap2 & PPC_FEATURE2_HTM)
938 && ppc_check_regset (tid, NT_PPC_TM_SPR,
939 PPC_LINUX_SIZEOF_TM_SPRREGSET))
940 features.htm = true;
941 }
942 }
943
944 if (ppc_hwcap & PPC_FEATURE_CELL)
945 features.cell = true;
946
947 tdesc = ppc_linux_match_description (features);
948
949 /* On 32-bit machines, check for SPE registers.
950 Set the low target's regmap field as appropriately. */
951 #ifndef __powerpc64__
952 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
953 tdesc = tdesc_powerpc_e500l;
954
955 if (!ppc_regmap_adjusted)
956 {
957 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
958 ppc_usrregs_info.regmap = ppc_regmap_e500;
959
960 /* If the FPSCR is 64-bit wide, we need to fetch the whole
961 64-bit slot and not just its second word. The PT_FPSCR
962 supplied in a 32-bit GDB compilation doesn't reflect
963 this. */
964 if (register_size (tdesc, 70) == 8)
965 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
966
967 ppc_regmap_adjusted = 1;
968 }
969 #endif
970
971 current_process ()->tdesc = tdesc;
972
973 for (regset = ppc_regsets; regset->size >= 0; regset++)
974 switch (regset->get_request)
975 {
976 case PTRACE_GETVRREGS:
977 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
978 break;
979 case PTRACE_GETVSXREGS:
980 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
981 break;
982 case PTRACE_GETEVRREGS:
983 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
984 regset->size = 32 * 4 + 8 + 4;
985 else
986 regset->size = 0;
987 break;
988 case PTRACE_GETREGSET:
989 switch (regset->nt_type)
990 {
991 case NT_PPC_PPR:
992 regset->size = (features.ppr_dscr ?
993 PPC_LINUX_SIZEOF_PPRREGSET : 0);
994 break;
995 case NT_PPC_DSCR:
996 regset->size = (features.ppr_dscr ?
997 PPC_LINUX_SIZEOF_DSCRREGSET : 0);
998 break;
999 case NT_PPC_TAR:
1000 regset->size = (features.isa207 ?
1001 PPC_LINUX_SIZEOF_TARREGSET : 0);
1002 break;
1003 case NT_PPC_EBB:
1004 regset->size = (features.isa207 ?
1005 PPC_LINUX_SIZEOF_EBBREGSET : 0);
1006 break;
1007 case NT_PPC_PMU:
1008 regset->size = (features.isa207 ?
1009 PPC_LINUX_SIZEOF_PMUREGSET : 0);
1010 break;
1011 case NT_PPC_TM_SPR:
1012 regset->size = (features.htm ?
1013 PPC_LINUX_SIZEOF_TM_SPRREGSET : 0);
1014 break;
1015 case NT_PPC_TM_CGPR:
1016 if (features.wordsize == 4)
1017 regset->size = (features.htm ?
1018 PPC32_LINUX_SIZEOF_CGPRREGSET : 0);
1019 else
1020 regset->size = (features.htm ?
1021 PPC64_LINUX_SIZEOF_CGPRREGSET : 0);
1022 break;
1023 case NT_PPC_TM_CFPR:
1024 regset->size = (features.htm ?
1025 PPC_LINUX_SIZEOF_CFPRREGSET : 0);
1026 break;
1027 case NT_PPC_TM_CVMX:
1028 regset->size = (features.htm ?
1029 PPC_LINUX_SIZEOF_CVMXREGSET : 0);
1030 break;
1031 case NT_PPC_TM_CVSX:
1032 regset->size = (features.htm ?
1033 PPC_LINUX_SIZEOF_CVSXREGSET : 0);
1034 break;
1035 case NT_PPC_TM_CPPR:
1036 regset->size = (features.htm ?
1037 PPC_LINUX_SIZEOF_CPPRREGSET : 0);
1038 break;
1039 case NT_PPC_TM_CDSCR:
1040 regset->size = (features.htm ?
1041 PPC_LINUX_SIZEOF_CDSCRREGSET : 0);
1042 break;
1043 case NT_PPC_TM_CTAR:
1044 regset->size = (features.htm ?
1045 PPC_LINUX_SIZEOF_CTARREGSET : 0);
1046 break;
1047 default:
1048 break;
1049 }
1050 break;
1051 default:
1052 break;
1053 }
1054 }
1055
1056 /* Implementation of linux_target_ops method "supports_tracepoints". */
1057
1058 static int
1059 ppc_supports_tracepoints (void)
1060 {
1061 return 1;
1062 }
1063
1064 /* Get the thread area address. This is used to recognize which
1065 thread is which when tracing with the in-process agent library. We
1066 don't read anything from the address, and treat it as opaque; it's
1067 the address itself that we assume is unique per-thread. */
1068
1069 static int
1070 ppc_get_thread_area (int lwpid, CORE_ADDR *addr)
1071 {
1072 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
1073 struct thread_info *thr = get_lwp_thread (lwp);
1074 struct regcache *regcache = get_thread_regcache (thr, 1);
1075 ULONGEST tp = 0;
1076
1077 #ifdef __powerpc64__
1078 if (register_size (regcache->tdesc, 0) == 8)
1079 collect_register_by_name (regcache, "r13", &tp);
1080 else
1081 #endif
1082 collect_register_by_name (regcache, "r2", &tp);
1083
1084 *addr = tp;
1085
1086 return 0;
1087 }
1088
1089 #ifdef __powerpc64__
1090
1091 /* Older glibc doesn't provide this. */
1092
1093 #ifndef EF_PPC64_ABI
1094 #define EF_PPC64_ABI 3
1095 #endif
1096
1097 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
1098 inferiors. */
1099
1100 static int
1101 is_elfv2_inferior (void)
1102 {
1103 /* To be used as fallback if we're unable to determine the right result -
1104 assume inferior uses the same ABI as gdbserver. */
1105 #if _CALL_ELF == 2
1106 const int def_res = 1;
1107 #else
1108 const int def_res = 0;
1109 #endif
1110 CORE_ADDR phdr;
1111 Elf64_Ehdr ehdr;
1112
1113 const struct target_desc *tdesc = current_process ()->tdesc;
1114 int wordsize = register_size (tdesc, 0);
1115
1116 if (!linux_get_auxv (wordsize, AT_PHDR, &phdr))
1117 return def_res;
1118
1119 /* Assume ELF header is at the beginning of the page where program headers
1120 are located. If it doesn't look like one, bail. */
1121
1122 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
1123 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
1124 return def_res;
1125
1126 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
1127 }
1128
1129 #endif
1130
1131 /* Generate a ds-form instruction in BUF and return the number of bytes written
1132
1133 0 6 11 16 30 32
1134 | OPCD | RST | RA | DS |XO| */
1135
1136 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
1137 static int
1138 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
1139 {
1140 uint32_t insn;
1141
1142 gdb_assert ((opcd & ~0x3f) == 0);
1143 gdb_assert ((rst & ~0x1f) == 0);
1144 gdb_assert ((ra & ~0x1f) == 0);
1145 gdb_assert ((xo & ~0x3) == 0);
1146
1147 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
1148 *buf = (opcd << 26) | insn;
1149 return 1;
1150 }
1151
1152 /* Followings are frequently used ds-form instructions. */
1153
1154 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
1155 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
1156 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
1157 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
1158
1159 /* Generate a d-form instruction in BUF.
1160
1161 0 6 11 16 32
1162 | OPCD | RST | RA | D | */
1163
1164 static int
1165 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
1166 {
1167 uint32_t insn;
1168
1169 gdb_assert ((opcd & ~0x3f) == 0);
1170 gdb_assert ((rst & ~0x1f) == 0);
1171 gdb_assert ((ra & ~0x1f) == 0);
1172
1173 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
1174 *buf = (opcd << 26) | insn;
1175 return 1;
1176 }
1177
1178 /* Followings are frequently used d-form instructions. */
1179
1180 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
1181 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
1182 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
1183 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
1184 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
1185 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
1186 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
1187 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
1188 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
1189
1190 /* Generate a xfx-form instruction in BUF and return the number of bytes
1191 written.
1192
1193 0 6 11 21 31 32
1194 | OPCD | RST | RI | XO |/| */
1195
1196 static int
1197 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
1198 {
1199 uint32_t insn;
1200 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
1201
1202 gdb_assert ((opcd & ~0x3f) == 0);
1203 gdb_assert ((rst & ~0x1f) == 0);
1204 gdb_assert ((xo & ~0x3ff) == 0);
1205
1206 insn = (rst << 21) | (n << 11) | (xo << 1);
1207 *buf = (opcd << 26) | insn;
1208 return 1;
1209 }
1210
1211 /* Followings are frequently used xfx-form instructions. */
1212
1213 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
1214 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
1215 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
1216 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
1217 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
1218 E & 0xf, 598)
1219 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
1220
1221
1222 /* Generate a x-form instruction in BUF and return the number of bytes written.
1223
1224 0 6 11 16 21 31 32
1225 | OPCD | RST | RA | RB | XO |RC| */
1226
1227 static int
1228 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
1229 {
1230 uint32_t insn;
1231
1232 gdb_assert ((opcd & ~0x3f) == 0);
1233 gdb_assert ((rst & ~0x1f) == 0);
1234 gdb_assert ((ra & ~0x1f) == 0);
1235 gdb_assert ((rb & ~0x1f) == 0);
1236 gdb_assert ((xo & ~0x3ff) == 0);
1237 gdb_assert ((rc & ~1) == 0);
1238
1239 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
1240 *buf = (opcd << 26) | insn;
1241 return 1;
1242 }
1243
1244 /* Followings are frequently used x-form instructions. */
1245
1246 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
1247 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
1248 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
1249 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
1250 /* Assume bf = cr7. */
1251 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
1252
1253
1254 /* Generate a md-form instruction in BUF and return the number of bytes written.
1255
1256 0 6 11 16 21 27 30 31 32
1257 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
1258
1259 static int
1260 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
1261 int xo, int rc)
1262 {
1263 uint32_t insn;
1264 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
1265 unsigned int sh0_4 = sh & 0x1f;
1266 unsigned int sh5 = (sh >> 5) & 1;
1267
1268 gdb_assert ((opcd & ~0x3f) == 0);
1269 gdb_assert ((rs & ~0x1f) == 0);
1270 gdb_assert ((ra & ~0x1f) == 0);
1271 gdb_assert ((sh & ~0x3f) == 0);
1272 gdb_assert ((mb & ~0x3f) == 0);
1273 gdb_assert ((xo & ~0x7) == 0);
1274 gdb_assert ((rc & ~0x1) == 0);
1275
1276 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
1277 | (sh5 << 1) | (xo << 2) | (rc & 1);
1278 *buf = (opcd << 26) | insn;
1279 return 1;
1280 }
1281
1282 /* The following are frequently used md-form instructions. */
1283
1284 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
1285 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
1286 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
1287 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
1288
1289 /* Generate a i-form instruction in BUF and return the number of bytes written.
1290
1291 0 6 30 31 32
1292 | OPCD | LI |AA|LK| */
1293
1294 static int
1295 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
1296 {
1297 uint32_t insn;
1298
1299 gdb_assert ((opcd & ~0x3f) == 0);
1300
1301 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
1302 *buf = (opcd << 26) | insn;
1303 return 1;
1304 }
1305
1306 /* The following are frequently used i-form instructions. */
1307
1308 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
1309 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
1310
1311 /* Generate a b-form instruction in BUF and return the number of bytes written.
1312
1313 0 6 11 16 30 31 32
1314 | OPCD | BO | BI | BD |AA|LK| */
1315
1316 static int
1317 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
1318 int aa, int lk)
1319 {
1320 uint32_t insn;
1321
1322 gdb_assert ((opcd & ~0x3f) == 0);
1323 gdb_assert ((bo & ~0x1f) == 0);
1324 gdb_assert ((bi & ~0x1f) == 0);
1325
1326 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
1327 *buf = (opcd << 26) | insn;
1328 return 1;
1329 }
1330
1331 /* The following are frequently used b-form instructions. */
1332 /* Assume bi = cr7. */
1333 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
1334
1335 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
1336 respectively. They are primary used for save/restore GPRs in jump-pad,
1337 not used for bytecode compiling. */
1338
1339 #ifdef __powerpc64__
1340 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
1341 GEN_LD (buf, rt, ra, si) : \
1342 GEN_LWZ (buf, rt, ra, si))
1343 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
1344 GEN_STD (buf, rt, ra, si) : \
1345 GEN_STW (buf, rt, ra, si))
1346 #else
1347 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
1348 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
1349 #endif
1350
1351 /* Generate a sequence of instructions to load IMM in the register REG.
1352 Write the instructions in BUF and return the number of bytes written. */
1353
1354 static int
1355 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
1356 {
1357 uint32_t *p = buf;
1358
1359 if ((imm + 32768) < 65536)
1360 {
1361 /* li reg, imm[15:0] */
1362 p += GEN_LI (p, reg, imm);
1363 }
1364 else if ((imm >> 32) == 0)
1365 {
1366 /* lis reg, imm[31:16]
1367 ori reg, reg, imm[15:0]
1368 rldicl reg, reg, 0, 32 */
1369 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1370 if ((imm & 0xffff) != 0)
1371 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1372 /* Clear upper 32-bit if sign-bit is set. */
1373 if (imm & (1u << 31) && is_64)
1374 p += GEN_RLDICL (p, reg, reg, 0, 32);
1375 }
1376 else
1377 {
1378 gdb_assert (is_64);
1379 /* lis reg, <imm[63:48]>
1380 ori reg, reg, <imm[48:32]>
1381 rldicr reg, reg, 32, 31
1382 oris reg, reg, <imm[31:16]>
1383 ori reg, reg, <imm[15:0]> */
1384 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1385 if (((imm >> 32) & 0xffff) != 0)
1386 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1387 p += GEN_RLDICR (p, reg, reg, 32, 31);
1388 if (((imm >> 16) & 0xffff) != 0)
1389 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1390 if ((imm & 0xffff) != 0)
1391 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1392 }
1393
1394 return p - buf;
1395 }
1396
1397 /* Generate a sequence for atomically exchange at location LOCK.
1398 This code sequence clobbers r6, r7, r8. LOCK is the location for
1399 the atomic-xchg, OLD_VALUE is expected old value stored in the
1400 location, and R_NEW is a register for the new value. */
1401
1402 static int
1403 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1404 int is_64)
1405 {
1406 const int r_lock = 6;
1407 const int r_old = 7;
1408 const int r_tmp = 8;
1409 uint32_t *p = buf;
1410
1411 /*
1412 1: lwarx TMP, 0, LOCK
1413 cmpwi TMP, OLD
1414 bne 1b
1415 stwcx. NEW, 0, LOCK
1416 bne 1b */
1417
1418 p += gen_limm (p, r_lock, lock, is_64);
1419 p += gen_limm (p, r_old, old_value, is_64);
1420
1421 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1422 p += GEN_CMPW (p, r_tmp, r_old);
1423 p += GEN_BNE (p, -8);
1424 p += GEN_STWCX (p, r_new, 0, r_lock);
1425 p += GEN_BNE (p, -16);
1426
1427 return p - buf;
1428 }
1429
1430 /* Generate a sequence of instructions for calling a function
1431 at address of FN. Return the number of bytes are written in BUF. */
1432
1433 static int
1434 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1435 {
1436 uint32_t *p = buf;
1437
1438 /* Must be called by r12 for caller to calculate TOC address. */
1439 p += gen_limm (p, 12, fn, is_64);
1440 if (is_opd)
1441 {
1442 p += GEN_LOAD (p, 11, 12, 16, is_64);
1443 p += GEN_LOAD (p, 2, 12, 8, is_64);
1444 p += GEN_LOAD (p, 12, 12, 0, is_64);
1445 }
1446 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1447 *p++ = 0x4e800421; /* bctrl */
1448
1449 return p - buf;
1450 }
1451
1452 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1453 of instruction. This function is used to adjust pc-relative instructions
1454 when copying. */
1455
1456 static void
1457 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1458 {
1459 uint32_t insn, op6;
1460 long rel, newrel;
1461
1462 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1463 op6 = PPC_OP6 (insn);
1464
1465 if (op6 == 18 && (insn & 2) == 0)
1466 {
1467 /* branch && AA = 0 */
1468 rel = PPC_LI (insn);
1469 newrel = (oldloc - *to) + rel;
1470
1471 /* Out of range. Cannot relocate instruction. */
1472 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1473 return;
1474
1475 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1476 }
1477 else if (op6 == 16 && (insn & 2) == 0)
1478 {
1479 /* conditional branch && AA = 0 */
1480
1481 /* If the new relocation is too big for even a 26-bit unconditional
1482 branch, there is nothing we can do. Just abort.
1483
1484 Otherwise, if it can be fit in 16-bit conditional branch, just
1485 copy the instruction and relocate the address.
1486
1487 If the it's big for conditional-branch (16-bit), try to invert the
1488 condition and jump with 26-bit branch. For example,
1489
1490 beq .Lgoto
1491 INSN1
1492
1493 =>
1494
1495 bne 1f (+8)
1496 b .Lgoto
1497 1:INSN1
1498
1499 After this transform, we are actually jump from *TO+4 instead of *TO,
1500 so check the relocation again because it will be 1-insn farther then
1501 before if *TO is after OLDLOC.
1502
1503
1504 For BDNZT (or so) is transformed from
1505
1506 bdnzt eq, .Lgoto
1507 INSN1
1508
1509 =>
1510
1511 bdz 1f (+12)
1512 bf eq, 1f (+8)
1513 b .Lgoto
1514 1:INSN1
1515
1516 See also "BO field encodings". */
1517
1518 rel = PPC_BD (insn);
1519 newrel = (oldloc - *to) + rel;
1520
1521 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1522 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1523 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1524 {
1525 newrel -= 4;
1526
1527 /* Out of range. Cannot relocate instruction. */
1528 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1529 return;
1530
1531 if ((PPC_BO (insn) & 0x14) == 0x4)
1532 insn ^= (1 << 24);
1533 else if ((PPC_BO (insn) & 0x14) == 0x10)
1534 insn ^= (1 << 22);
1535
1536 /* Jump over the unconditional branch. */
1537 insn = (insn & ~0xfffc) | 0x8;
1538 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1539 *to += 4;
1540
1541 /* Build a unconditional branch and copy LK bit. */
1542 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1543 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1544 *to += 4;
1545
1546 return;
1547 }
1548 else if ((PPC_BO (insn) & 0x14) == 0)
1549 {
1550 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1551 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1552
1553 newrel -= 8;
1554
1555 /* Out of range. Cannot relocate instruction. */
1556 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1557 return;
1558
1559 /* Copy BI field. */
1560 bf_insn |= (insn & 0x1f0000);
1561
1562 /* Invert condition. */
1563 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1564 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1565
1566 write_inferior_memory (*to, (unsigned char *) &bdnz_insn, 4);
1567 *to += 4;
1568 write_inferior_memory (*to, (unsigned char *) &bf_insn, 4);
1569 *to += 4;
1570
1571 /* Build a unconditional branch and copy LK bit. */
1572 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1573 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1574 *to += 4;
1575
1576 return;
1577 }
1578 else /* (BO & 0x14) == 0x14, branch always. */
1579 {
1580 /* Out of range. Cannot relocate instruction. */
1581 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1582 return;
1583
1584 /* Build a unconditional branch and copy LK bit. */
1585 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1586 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1587 *to += 4;
1588
1589 return;
1590 }
1591 }
1592
1593 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1594 *to += 4;
1595 }
1596
1597 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1598 See target.h for details. */
1599
1600 static int
1601 ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1602 CORE_ADDR collector,
1603 CORE_ADDR lockaddr,
1604 ULONGEST orig_size,
1605 CORE_ADDR *jump_entry,
1606 CORE_ADDR *trampoline,
1607 ULONGEST *trampoline_size,
1608 unsigned char *jjump_pad_insn,
1609 ULONGEST *jjump_pad_insn_size,
1610 CORE_ADDR *adjusted_insn_addr,
1611 CORE_ADDR *adjusted_insn_addr_end,
1612 char *err)
1613 {
1614 uint32_t buf[256];
1615 uint32_t *p = buf;
1616 int j, offset;
1617 CORE_ADDR buildaddr = *jump_entry;
1618 const CORE_ADDR entryaddr = *jump_entry;
1619 int rsz, min_frame, frame_size, tp_reg;
1620 #ifdef __powerpc64__
1621 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1622 int is_64 = register_size (regcache->tdesc, 0) == 8;
1623 int is_opd = is_64 && !is_elfv2_inferior ();
1624 #else
1625 int is_64 = 0, is_opd = 0;
1626 #endif
1627
1628 #ifdef __powerpc64__
1629 if (is_64)
1630 {
1631 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1632 rsz = 8;
1633 min_frame = 112;
1634 frame_size = (40 * rsz) + min_frame;
1635 tp_reg = 13;
1636 }
1637 else
1638 {
1639 #endif
1640 rsz = 4;
1641 min_frame = 16;
1642 frame_size = (40 * rsz) + min_frame;
1643 tp_reg = 2;
1644 #ifdef __powerpc64__
1645 }
1646 #endif
1647
1648 /* Stack frame layout for this jump pad,
1649
1650 High thread_area (r13/r2) |
1651 tpoint - collecting_t obj
1652 PC/<tpaddr> | +36
1653 CTR | +35
1654 LR | +34
1655 XER | +33
1656 CR | +32
1657 R31 |
1658 R29 |
1659 ... |
1660 R1 | +1
1661 R0 - collected registers
1662 ... |
1663 ... |
1664 Low Back-chain -
1665
1666
1667 The code flow of this jump pad,
1668
1669 1. Adjust SP
1670 2. Save GPR and SPR
1671 3. Prepare argument
1672 4. Call gdb_collector
1673 5. Restore GPR and SPR
1674 6. Restore SP
1675 7. Build a jump for back to the program
1676 8. Copy/relocate original instruction
1677 9. Build a jump for replacing orignal instruction. */
1678
1679 /* Adjust stack pointer. */
1680 if (is_64)
1681 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1682 else
1683 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1684
1685 /* Store GPRs. Save R1 later, because it had just been modified, but
1686 we want the original value. */
1687 for (j = 2; j < 32; j++)
1688 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1689 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1690 /* Set r0 to the original value of r1 before adjusting stack frame,
1691 and then save it. */
1692 p += GEN_ADDI (p, 0, 1, frame_size);
1693 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1694
1695 /* Save CR, XER, LR, and CTR. */
1696 p += GEN_MFCR (p, 3); /* mfcr r3 */
1697 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1698 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1699 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1700 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1701 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1702 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1703 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1704
1705 /* Save PC<tpaddr> */
1706 p += gen_limm (p, 3, tpaddr, is_64);
1707 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1708
1709
1710 /* Setup arguments to collector. */
1711 /* Set r4 to collected registers. */
1712 p += GEN_ADDI (p, 4, 1, min_frame);
1713 /* Set r3 to TPOINT. */
1714 p += gen_limm (p, 3, tpoint, is_64);
1715
1716 /* Prepare collecting_t object for lock. */
1717 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1718 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1719 /* Set R5 to collecting object. */
1720 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1721
1722 p += GEN_LWSYNC (p);
1723 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1724 p += GEN_LWSYNC (p);
1725
1726 /* Call to collector. */
1727 p += gen_call (p, collector, is_64, is_opd);
1728
1729 /* Simply write 0 to release the lock. */
1730 p += gen_limm (p, 3, lockaddr, is_64);
1731 p += gen_limm (p, 4, 0, is_64);
1732 p += GEN_LWSYNC (p);
1733 p += GEN_STORE (p, 4, 3, 0, is_64);
1734
1735 /* Restore stack and registers. */
1736 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1737 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1738 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1739 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1740 p += GEN_MTCR (p, 3); /* mtcr r3 */
1741 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1742 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1743 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1744
1745 /* Restore GPRs. */
1746 for (j = 2; j < 32; j++)
1747 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1748 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1749 /* Restore SP. */
1750 p += GEN_ADDI (p, 1, 1, frame_size);
1751
1752 /* Flush instructions to inferior memory. */
1753 write_inferior_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1754
1755 /* Now, insert the original instruction to execute in the jump pad. */
1756 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1757 *adjusted_insn_addr_end = *adjusted_insn_addr;
1758 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1759
1760 /* Verify the relocation size. If should be 4 for normal copy,
1761 8 or 12 for some conditional branch. */
1762 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1763 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1764 {
1765 sprintf (err, "E.Unexpected instruction length = %d"
1766 "when relocate instruction.",
1767 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1768 return 1;
1769 }
1770
1771 buildaddr = *adjusted_insn_addr_end;
1772 p = buf;
1773 /* Finally, write a jump back to the program. */
1774 offset = (tpaddr + 4) - buildaddr;
1775 if (offset >= (1 << 25) || offset < -(1 << 25))
1776 {
1777 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1778 "(offset 0x%x > 26-bit).", offset);
1779 return 1;
1780 }
1781 /* b <tpaddr+4> */
1782 p += GEN_B (p, offset);
1783 write_inferior_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1784 *jump_entry = buildaddr + (p - buf) * 4;
1785
1786 /* The jump pad is now built. Wire in a jump to our jump pad. This
1787 is always done last (by our caller actually), so that we can
1788 install fast tracepoints with threads running. This relies on
1789 the agent's atomic write support. */
1790 offset = entryaddr - tpaddr;
1791 if (offset >= (1 << 25) || offset < -(1 << 25))
1792 {
1793 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1794 "(offset 0x%x > 26-bit).", offset);
1795 return 1;
1796 }
1797 /* b <jentry> */
1798 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1799 *jjump_pad_insn_size = 4;
1800
1801 return 0;
1802 }
1803
1804 /* Returns the minimum instruction length for installing a tracepoint. */
1805
1806 static int
1807 ppc_get_min_fast_tracepoint_insn_len (void)
1808 {
1809 return 4;
1810 }
1811
1812 /* Emits a given buffer into the target at current_insn_ptr. Length
1813 is in units of 32-bit words. */
1814
1815 static void
1816 emit_insns (uint32_t *buf, int n)
1817 {
1818 n = n * sizeof (uint32_t);
1819 write_inferior_memory (current_insn_ptr, (unsigned char *) buf, n);
1820 current_insn_ptr += n;
1821 }
1822
1823 #define __EMIT_ASM(NAME, INSNS) \
1824 do \
1825 { \
1826 extern uint32_t start_bcax_ ## NAME []; \
1827 extern uint32_t end_bcax_ ## NAME []; \
1828 emit_insns (start_bcax_ ## NAME, \
1829 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1830 __asm__ (".section .text.__ppcbcax\n\t" \
1831 "start_bcax_" #NAME ":\n\t" \
1832 INSNS "\n\t" \
1833 "end_bcax_" #NAME ":\n\t" \
1834 ".previous\n\t"); \
1835 } while (0)
1836
1837 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1838 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1839
1840 /*
1841
1842 Bytecode execution stack frame - 32-bit
1843
1844 | LR save area (SP + 4)
1845 SP' -> +- Back chain (SP + 0)
1846 | Save r31 for access saved arguments
1847 | Save r30 for bytecode stack pointer
1848 | Save r4 for incoming argument *value
1849 | Save r3 for incoming argument regs
1850 r30 -> +- Bytecode execution stack
1851 |
1852 | 64-byte (8 doublewords) at initial.
1853 | Expand stack as needed.
1854 |
1855 +-
1856 | Some padding for minimum stack frame and 16-byte alignment.
1857 | 16 bytes.
1858 SP +- Back-chain (SP')
1859
1860 initial frame size
1861 = 16 + (4 * 4) + 64
1862 = 96
1863
1864 r30 is the stack-pointer for bytecode machine.
1865 It should point to next-empty, so we can use LDU for pop.
1866 r3 is used for cache of the high part of TOP value.
1867 It was the first argument, pointer to regs.
1868 r4 is used for cache of the low part of TOP value.
1869 It was the second argument, pointer to the result.
1870 We should set *result = TOP after leaving this function.
1871
1872 Note:
1873 * To restore stack at epilogue
1874 => sp = r31
1875 * To check stack is big enough for bytecode execution.
1876 => r30 - 8 > SP + 8
1877 * To return execution result.
1878 => 0(r4) = TOP
1879
1880 */
1881
1882 /* Regardless of endian, register 3 is always high part, 4 is low part.
1883 These defines are used when the register pair is stored/loaded.
1884 Likewise, to simplify code, have a similiar define for 5:6. */
1885
1886 #if __BYTE_ORDER == __LITTLE_ENDIAN
1887 #define TOP_FIRST "4"
1888 #define TOP_SECOND "3"
1889 #define TMP_FIRST "6"
1890 #define TMP_SECOND "5"
1891 #else
1892 #define TOP_FIRST "3"
1893 #define TOP_SECOND "4"
1894 #define TMP_FIRST "5"
1895 #define TMP_SECOND "6"
1896 #endif
1897
1898 /* Emit prologue in inferior memory. See above comments. */
1899
1900 static void
1901 ppc_emit_prologue (void)
1902 {
1903 EMIT_ASM (/* Save return address. */
1904 "mflr 0 \n"
1905 "stw 0, 4(1) \n"
1906 /* Adjust SP. 96 is the initial frame size. */
1907 "stwu 1, -96(1) \n"
1908 /* Save r30 and incoming arguments. */
1909 "stw 31, 96-4(1) \n"
1910 "stw 30, 96-8(1) \n"
1911 "stw 4, 96-12(1) \n"
1912 "stw 3, 96-16(1) \n"
1913 /* Point r31 to original r1 for access arguments. */
1914 "addi 31, 1, 96 \n"
1915 /* Set r30 to pointing stack-top. */
1916 "addi 30, 1, 64 \n"
1917 /* Initial r3/TOP to 0. */
1918 "li 3, 0 \n"
1919 "li 4, 0 \n");
1920 }
1921
1922 /* Emit epilogue in inferior memory. See above comments. */
1923
1924 static void
1925 ppc_emit_epilogue (void)
1926 {
1927 EMIT_ASM (/* *result = TOP */
1928 "lwz 5, -12(31) \n"
1929 "stw " TOP_FIRST ", 0(5) \n"
1930 "stw " TOP_SECOND ", 4(5) \n"
1931 /* Restore registers. */
1932 "lwz 31, -4(31) \n"
1933 "lwz 30, -8(31) \n"
1934 /* Restore SP. */
1935 "lwz 1, 0(1) \n"
1936 /* Restore LR. */
1937 "lwz 0, 4(1) \n"
1938 /* Return 0 for no-error. */
1939 "li 3, 0 \n"
1940 "mtlr 0 \n"
1941 "blr \n");
1942 }
1943
1944 /* TOP = stack[--sp] + TOP */
1945
1946 static void
1947 ppc_emit_add (void)
1948 {
1949 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1950 "lwz " TMP_SECOND ", 4(30)\n"
1951 "addc 4, 6, 4 \n"
1952 "adde 3, 5, 3 \n");
1953 }
1954
1955 /* TOP = stack[--sp] - TOP */
1956
1957 static void
1958 ppc_emit_sub (void)
1959 {
1960 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1961 "lwz " TMP_SECOND ", 4(30) \n"
1962 "subfc 4, 4, 6 \n"
1963 "subfe 3, 3, 5 \n");
1964 }
1965
1966 /* TOP = stack[--sp] * TOP */
1967
1968 static void
1969 ppc_emit_mul (void)
1970 {
1971 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1972 "lwz " TMP_SECOND ", 4(30) \n"
1973 "mulhwu 7, 6, 4 \n"
1974 "mullw 3, 6, 3 \n"
1975 "mullw 5, 4, 5 \n"
1976 "mullw 4, 6, 4 \n"
1977 "add 3, 5, 3 \n"
1978 "add 3, 7, 3 \n");
1979 }
1980
1981 /* TOP = stack[--sp] << TOP */
1982
1983 static void
1984 ppc_emit_lsh (void)
1985 {
1986 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1987 "lwz " TMP_SECOND ", 4(30) \n"
1988 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1989 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1990 "slw 5, 5, 4\n" /* Shift high part left */
1991 "slw 4, 6, 4\n" /* Shift low part left */
1992 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1993 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1994 "or 3, 5, 3\n"
1995 "or 3, 7, 3\n"); /* Assemble high part */
1996 }
1997
1998 /* Top = stack[--sp] >> TOP
1999 (Arithmetic shift right) */
2000
2001 static void
2002 ppc_emit_rsh_signed (void)
2003 {
2004 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2005 "lwz " TMP_SECOND ", 4(30) \n"
2006 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
2007 "sraw 3, 5, 4\n" /* Shift high part right */
2008 "cmpwi 7, 1\n"
2009 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
2010 "sraw 4, 5, 7\n" /* Shift high to low */
2011 "b 2f\n"
2012 "1:\n"
2013 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
2014 "srw 4, 6, 4\n" /* Shift low part right */
2015 "slw 5, 5, 7\n" /* Shift high to low */
2016 "or 4, 4, 5\n" /* Assemble low part */
2017 "2:\n");
2018 }
2019
2020 /* Top = stack[--sp] >> TOP
2021 (Logical shift right) */
2022
2023 static void
2024 ppc_emit_rsh_unsigned (void)
2025 {
2026 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2027 "lwz " TMP_SECOND ", 4(30) \n"
2028 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
2029 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
2030 "srw 6, 6, 4\n" /* Shift low part right */
2031 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
2032 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
2033 "or 6, 6, 3\n"
2034 "srw 3, 5, 4\n" /* Shift high part right */
2035 "or 4, 6, 7\n"); /* Assemble low part */
2036 }
2037
2038 /* Emit code for signed-extension specified by ARG. */
2039
2040 static void
2041 ppc_emit_ext (int arg)
2042 {
2043 switch (arg)
2044 {
2045 case 8:
2046 EMIT_ASM ("extsb 4, 4\n"
2047 "srawi 3, 4, 31");
2048 break;
2049 case 16:
2050 EMIT_ASM ("extsh 4, 4\n"
2051 "srawi 3, 4, 31");
2052 break;
2053 case 32:
2054 EMIT_ASM ("srawi 3, 4, 31");
2055 break;
2056 default:
2057 emit_error = 1;
2058 }
2059 }
2060
2061 /* Emit code for zero-extension specified by ARG. */
2062
2063 static void
2064 ppc_emit_zero_ext (int arg)
2065 {
2066 switch (arg)
2067 {
2068 case 8:
2069 EMIT_ASM ("clrlwi 4,4,24\n"
2070 "li 3, 0\n");
2071 break;
2072 case 16:
2073 EMIT_ASM ("clrlwi 4,4,16\n"
2074 "li 3, 0\n");
2075 break;
2076 case 32:
2077 EMIT_ASM ("li 3, 0");
2078 break;
2079 default:
2080 emit_error = 1;
2081 }
2082 }
2083
2084 /* TOP = !TOP
2085 i.e., TOP = (TOP == 0) ? 1 : 0; */
2086
2087 static void
2088 ppc_emit_log_not (void)
2089 {
2090 EMIT_ASM ("or 4, 3, 4 \n"
2091 "cntlzw 4, 4 \n"
2092 "srwi 4, 4, 5 \n"
2093 "li 3, 0 \n");
2094 }
2095
2096 /* TOP = stack[--sp] & TOP */
2097
2098 static void
2099 ppc_emit_bit_and (void)
2100 {
2101 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2102 "lwz " TMP_SECOND ", 4(30) \n"
2103 "and 4, 6, 4 \n"
2104 "and 3, 5, 3 \n");
2105 }
2106
2107 /* TOP = stack[--sp] | TOP */
2108
2109 static void
2110 ppc_emit_bit_or (void)
2111 {
2112 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2113 "lwz " TMP_SECOND ", 4(30) \n"
2114 "or 4, 6, 4 \n"
2115 "or 3, 5, 3 \n");
2116 }
2117
2118 /* TOP = stack[--sp] ^ TOP */
2119
2120 static void
2121 ppc_emit_bit_xor (void)
2122 {
2123 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2124 "lwz " TMP_SECOND ", 4(30) \n"
2125 "xor 4, 6, 4 \n"
2126 "xor 3, 5, 3 \n");
2127 }
2128
2129 /* TOP = ~TOP
2130 i.e., TOP = ~(TOP | TOP) */
2131
2132 static void
2133 ppc_emit_bit_not (void)
2134 {
2135 EMIT_ASM ("nor 3, 3, 3 \n"
2136 "nor 4, 4, 4 \n");
2137 }
2138
2139 /* TOP = stack[--sp] == TOP */
2140
2141 static void
2142 ppc_emit_equal (void)
2143 {
2144 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2145 "lwz " TMP_SECOND ", 4(30) \n"
2146 "xor 4, 6, 4 \n"
2147 "xor 3, 5, 3 \n"
2148 "or 4, 3, 4 \n"
2149 "cntlzw 4, 4 \n"
2150 "srwi 4, 4, 5 \n"
2151 "li 3, 0 \n");
2152 }
2153
2154 /* TOP = stack[--sp] < TOP
2155 (Signed comparison) */
2156
2157 static void
2158 ppc_emit_less_signed (void)
2159 {
2160 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2161 "lwz " TMP_SECOND ", 4(30) \n"
2162 "cmplw 6, 6, 4 \n"
2163 "cmpw 7, 5, 3 \n"
2164 /* CR6 bit 0 = low less and high equal */
2165 "crand 6*4+0, 6*4+0, 7*4+2\n"
2166 /* CR7 bit 0 = (low less and high equal) or high less */
2167 "cror 7*4+0, 7*4+0, 6*4+0\n"
2168 "mfcr 4 \n"
2169 "rlwinm 4, 4, 29, 31, 31 \n"
2170 "li 3, 0 \n");
2171 }
2172
2173 /* TOP = stack[--sp] < TOP
2174 (Unsigned comparison) */
2175
2176 static void
2177 ppc_emit_less_unsigned (void)
2178 {
2179 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2180 "lwz " TMP_SECOND ", 4(30) \n"
2181 "cmplw 6, 6, 4 \n"
2182 "cmplw 7, 5, 3 \n"
2183 /* CR6 bit 0 = low less and high equal */
2184 "crand 6*4+0, 6*4+0, 7*4+2\n"
2185 /* CR7 bit 0 = (low less and high equal) or high less */
2186 "cror 7*4+0, 7*4+0, 6*4+0\n"
2187 "mfcr 4 \n"
2188 "rlwinm 4, 4, 29, 31, 31 \n"
2189 "li 3, 0 \n");
2190 }
2191
2192 /* Access the memory address in TOP in size of SIZE.
2193 Zero-extend the read value. */
2194
2195 static void
2196 ppc_emit_ref (int size)
2197 {
2198 switch (size)
2199 {
2200 case 1:
2201 EMIT_ASM ("lbz 4, 0(4)\n"
2202 "li 3, 0");
2203 break;
2204 case 2:
2205 EMIT_ASM ("lhz 4, 0(4)\n"
2206 "li 3, 0");
2207 break;
2208 case 4:
2209 EMIT_ASM ("lwz 4, 0(4)\n"
2210 "li 3, 0");
2211 break;
2212 case 8:
2213 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2214 EMIT_ASM ("lwz 3, 4(4)\n"
2215 "lwz 4, 0(4)");
2216 else
2217 EMIT_ASM ("lwz 3, 0(4)\n"
2218 "lwz 4, 4(4)");
2219 break;
2220 }
2221 }
2222
2223 /* TOP = NUM */
2224
2225 static void
2226 ppc_emit_const (LONGEST num)
2227 {
2228 uint32_t buf[10];
2229 uint32_t *p = buf;
2230
2231 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
2232 p += gen_limm (p, 4, num & 0xffffffff, 0);
2233
2234 emit_insns (buf, p - buf);
2235 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2236 }
2237
2238 /* Set TOP to the value of register REG by calling get_raw_reg function
2239 with two argument, collected buffer and register number. */
2240
2241 static void
2242 ppc_emit_reg (int reg)
2243 {
2244 uint32_t buf[13];
2245 uint32_t *p = buf;
2246
2247 /* fctx->regs is passed in r3 and then saved in -16(31). */
2248 p += GEN_LWZ (p, 3, 31, -16);
2249 p += GEN_LI (p, 4, reg); /* li r4, reg */
2250 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
2251
2252 emit_insns (buf, p - buf);
2253 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2254
2255 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2256 {
2257 EMIT_ASM ("mr 5, 4\n"
2258 "mr 4, 3\n"
2259 "mr 3, 5\n");
2260 }
2261 }
2262
2263 /* TOP = stack[--sp] */
2264
2265 static void
2266 ppc_emit_pop (void)
2267 {
2268 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
2269 "lwz " TOP_SECOND ", 4(30) \n");
2270 }
2271
2272 /* stack[sp++] = TOP
2273
2274 Because we may use up bytecode stack, expand 8 doublewords more
2275 if needed. */
2276
2277 static void
2278 ppc_emit_stack_flush (void)
2279 {
2280 /* Make sure bytecode stack is big enough before push.
2281 Otherwise, expand 64-byte more. */
2282
2283 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
2284 " stw " TOP_SECOND ", 4(30)\n"
2285 " addi 5, 30, -(8 + 8) \n"
2286 " cmpw 7, 5, 1 \n"
2287 " bgt 7, 1f \n"
2288 " stwu 31, -64(1) \n"
2289 "1:addi 30, 30, -8 \n");
2290 }
2291
2292 /* Swap TOP and stack[sp-1] */
2293
2294 static void
2295 ppc_emit_swap (void)
2296 {
2297 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
2298 "lwz " TMP_SECOND ", 12(30) \n"
2299 "stw " TOP_FIRST ", 8(30) \n"
2300 "stw " TOP_SECOND ", 12(30) \n"
2301 "mr 3, 5 \n"
2302 "mr 4, 6 \n");
2303 }
2304
2305 /* Discard N elements in the stack. Also used for ppc64. */
2306
2307 static void
2308 ppc_emit_stack_adjust (int n)
2309 {
2310 uint32_t buf[6];
2311 uint32_t *p = buf;
2312
2313 n = n << 3;
2314 if ((n >> 15) != 0)
2315 {
2316 emit_error = 1;
2317 return;
2318 }
2319
2320 p += GEN_ADDI (p, 30, 30, n);
2321
2322 emit_insns (buf, p - buf);
2323 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2324 }
2325
2326 /* Call function FN. */
2327
2328 static void
2329 ppc_emit_call (CORE_ADDR fn)
2330 {
2331 uint32_t buf[11];
2332 uint32_t *p = buf;
2333
2334 p += gen_call (p, fn, 0, 0);
2335
2336 emit_insns (buf, p - buf);
2337 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2338 }
2339
2340 /* FN's prototype is `LONGEST(*fn)(int)'.
2341 TOP = fn (arg1)
2342 */
2343
2344 static void
2345 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
2346 {
2347 uint32_t buf[15];
2348 uint32_t *p = buf;
2349
2350 /* Setup argument. arg1 is a 16-bit value. */
2351 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2352 p += gen_call (p, fn, 0, 0);
2353
2354 emit_insns (buf, p - buf);
2355 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2356
2357 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2358 {
2359 EMIT_ASM ("mr 5, 4\n"
2360 "mr 4, 3\n"
2361 "mr 3, 5\n");
2362 }
2363 }
2364
2365 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2366 fn (arg1, TOP)
2367
2368 TOP should be preserved/restored before/after the call. */
2369
2370 static void
2371 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2372 {
2373 uint32_t buf[21];
2374 uint32_t *p = buf;
2375
2376 /* Save TOP. 0(30) is next-empty. */
2377 p += GEN_STW (p, 3, 30, 0);
2378 p += GEN_STW (p, 4, 30, 4);
2379
2380 /* Setup argument. arg1 is a 16-bit value. */
2381 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2382 {
2383 p += GEN_MR (p, 5, 4);
2384 p += GEN_MR (p, 6, 3);
2385 }
2386 else
2387 {
2388 p += GEN_MR (p, 5, 3);
2389 p += GEN_MR (p, 6, 4);
2390 }
2391 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2392 p += gen_call (p, fn, 0, 0);
2393
2394 /* Restore TOP */
2395 p += GEN_LWZ (p, 3, 30, 0);
2396 p += GEN_LWZ (p, 4, 30, 4);
2397
2398 emit_insns (buf, p - buf);
2399 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2400 }
2401
2402 /* Note in the following goto ops:
2403
2404 When emitting goto, the target address is later relocated by
2405 write_goto_address. OFFSET_P is the offset of the branch instruction
2406 in the code sequence, and SIZE_P is how to relocate the instruction,
2407 recognized by ppc_write_goto_address. In current implementation,
2408 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2409 */
2410
2411 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2412
2413 static void
2414 ppc_emit_if_goto (int *offset_p, int *size_p)
2415 {
2416 EMIT_ASM ("or. 3, 3, 4 \n"
2417 "lwzu " TOP_FIRST ", 8(30) \n"
2418 "lwz " TOP_SECOND ", 4(30) \n"
2419 "1:bne 0, 1b \n");
2420
2421 if (offset_p)
2422 *offset_p = 12;
2423 if (size_p)
2424 *size_p = 14;
2425 }
2426
2427 /* Unconditional goto. Also used for ppc64. */
2428
2429 static void
2430 ppc_emit_goto (int *offset_p, int *size_p)
2431 {
2432 EMIT_ASM ("1:b 1b");
2433
2434 if (offset_p)
2435 *offset_p = 0;
2436 if (size_p)
2437 *size_p = 24;
2438 }
2439
2440 /* Goto if stack[--sp] == TOP */
2441
2442 static void
2443 ppc_emit_eq_goto (int *offset_p, int *size_p)
2444 {
2445 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2446 "lwz " TMP_SECOND ", 4(30) \n"
2447 "xor 4, 6, 4 \n"
2448 "xor 3, 5, 3 \n"
2449 "or. 3, 3, 4 \n"
2450 "lwzu " TOP_FIRST ", 8(30) \n"
2451 "lwz " TOP_SECOND ", 4(30) \n"
2452 "1:beq 0, 1b \n");
2453
2454 if (offset_p)
2455 *offset_p = 28;
2456 if (size_p)
2457 *size_p = 14;
2458 }
2459
2460 /* Goto if stack[--sp] != TOP */
2461
2462 static void
2463 ppc_emit_ne_goto (int *offset_p, int *size_p)
2464 {
2465 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2466 "lwz " TMP_SECOND ", 4(30) \n"
2467 "xor 4, 6, 4 \n"
2468 "xor 3, 5, 3 \n"
2469 "or. 3, 3, 4 \n"
2470 "lwzu " TOP_FIRST ", 8(30) \n"
2471 "lwz " TOP_SECOND ", 4(30) \n"
2472 "1:bne 0, 1b \n");
2473
2474 if (offset_p)
2475 *offset_p = 28;
2476 if (size_p)
2477 *size_p = 14;
2478 }
2479
2480 /* Goto if stack[--sp] < TOP */
2481
2482 static void
2483 ppc_emit_lt_goto (int *offset_p, int *size_p)
2484 {
2485 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2486 "lwz " TMP_SECOND ", 4(30) \n"
2487 "cmplw 6, 6, 4 \n"
2488 "cmpw 7, 5, 3 \n"
2489 /* CR6 bit 0 = low less and high equal */
2490 "crand 6*4+0, 6*4+0, 7*4+2\n"
2491 /* CR7 bit 0 = (low less and high equal) or high less */
2492 "cror 7*4+0, 7*4+0, 6*4+0\n"
2493 "lwzu " TOP_FIRST ", 8(30) \n"
2494 "lwz " TOP_SECOND ", 4(30)\n"
2495 "1:blt 7, 1b \n");
2496
2497 if (offset_p)
2498 *offset_p = 32;
2499 if (size_p)
2500 *size_p = 14;
2501 }
2502
2503 /* Goto if stack[--sp] <= TOP */
2504
2505 static void
2506 ppc_emit_le_goto (int *offset_p, int *size_p)
2507 {
2508 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2509 "lwz " TMP_SECOND ", 4(30) \n"
2510 "cmplw 6, 6, 4 \n"
2511 "cmpw 7, 5, 3 \n"
2512 /* CR6 bit 0 = low less/equal and high equal */
2513 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2514 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2515 "cror 7*4+0, 7*4+0, 6*4+0\n"
2516 "lwzu " TOP_FIRST ", 8(30) \n"
2517 "lwz " TOP_SECOND ", 4(30)\n"
2518 "1:blt 7, 1b \n");
2519
2520 if (offset_p)
2521 *offset_p = 32;
2522 if (size_p)
2523 *size_p = 14;
2524 }
2525
2526 /* Goto if stack[--sp] > TOP */
2527
2528 static void
2529 ppc_emit_gt_goto (int *offset_p, int *size_p)
2530 {
2531 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2532 "lwz " TMP_SECOND ", 4(30) \n"
2533 "cmplw 6, 6, 4 \n"
2534 "cmpw 7, 5, 3 \n"
2535 /* CR6 bit 0 = low greater and high equal */
2536 "crand 6*4+0, 6*4+1, 7*4+2\n"
2537 /* CR7 bit 0 = (low greater and high equal) or high greater */
2538 "cror 7*4+0, 7*4+1, 6*4+0\n"
2539 "lwzu " TOP_FIRST ", 8(30) \n"
2540 "lwz " TOP_SECOND ", 4(30)\n"
2541 "1:blt 7, 1b \n");
2542
2543 if (offset_p)
2544 *offset_p = 32;
2545 if (size_p)
2546 *size_p = 14;
2547 }
2548
2549 /* Goto if stack[--sp] >= TOP */
2550
2551 static void
2552 ppc_emit_ge_goto (int *offset_p, int *size_p)
2553 {
2554 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2555 "lwz " TMP_SECOND ", 4(30) \n"
2556 "cmplw 6, 6, 4 \n"
2557 "cmpw 7, 5, 3 \n"
2558 /* CR6 bit 0 = low ge and high equal */
2559 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2560 /* CR7 bit 0 = (low ge and high equal) or high greater */
2561 "cror 7*4+0, 7*4+1, 6*4+0\n"
2562 "lwzu " TOP_FIRST ", 8(30)\n"
2563 "lwz " TOP_SECOND ", 4(30)\n"
2564 "1:blt 7, 1b \n");
2565
2566 if (offset_p)
2567 *offset_p = 32;
2568 if (size_p)
2569 *size_p = 14;
2570 }
2571
2572 /* Relocate previous emitted branch instruction. FROM is the address
2573 of the branch instruction, TO is the goto target address, and SIZE
2574 if the value we set by *SIZE_P before. Currently, it is either
2575 24 or 14 of branch and conditional-branch instruction.
2576 Also used for ppc64. */
2577
2578 static void
2579 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2580 {
2581 long rel = to - from;
2582 uint32_t insn;
2583 int opcd;
2584
2585 read_inferior_memory (from, (unsigned char *) &insn, 4);
2586 opcd = (insn >> 26) & 0x3f;
2587
2588 switch (size)
2589 {
2590 case 14:
2591 if (opcd != 16
2592 || (rel >= (1 << 15) || rel < -(1 << 15)))
2593 emit_error = 1;
2594 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2595 break;
2596 case 24:
2597 if (opcd != 18
2598 || (rel >= (1 << 25) || rel < -(1 << 25)))
2599 emit_error = 1;
2600 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2601 break;
2602 default:
2603 emit_error = 1;
2604 }
2605
2606 if (!emit_error)
2607 write_inferior_memory (from, (unsigned char *) &insn, 4);
2608 }
2609
2610 /* Table of emit ops for 32-bit. */
2611
2612 static struct emit_ops ppc_emit_ops_impl =
2613 {
2614 ppc_emit_prologue,
2615 ppc_emit_epilogue,
2616 ppc_emit_add,
2617 ppc_emit_sub,
2618 ppc_emit_mul,
2619 ppc_emit_lsh,
2620 ppc_emit_rsh_signed,
2621 ppc_emit_rsh_unsigned,
2622 ppc_emit_ext,
2623 ppc_emit_log_not,
2624 ppc_emit_bit_and,
2625 ppc_emit_bit_or,
2626 ppc_emit_bit_xor,
2627 ppc_emit_bit_not,
2628 ppc_emit_equal,
2629 ppc_emit_less_signed,
2630 ppc_emit_less_unsigned,
2631 ppc_emit_ref,
2632 ppc_emit_if_goto,
2633 ppc_emit_goto,
2634 ppc_write_goto_address,
2635 ppc_emit_const,
2636 ppc_emit_call,
2637 ppc_emit_reg,
2638 ppc_emit_pop,
2639 ppc_emit_stack_flush,
2640 ppc_emit_zero_ext,
2641 ppc_emit_swap,
2642 ppc_emit_stack_adjust,
2643 ppc_emit_int_call_1,
2644 ppc_emit_void_call_2,
2645 ppc_emit_eq_goto,
2646 ppc_emit_ne_goto,
2647 ppc_emit_lt_goto,
2648 ppc_emit_le_goto,
2649 ppc_emit_gt_goto,
2650 ppc_emit_ge_goto
2651 };
2652
2653 #ifdef __powerpc64__
2654
2655 /*
2656
2657 Bytecode execution stack frame - 64-bit
2658
2659 | LR save area (SP + 16)
2660 | CR save area (SP + 8)
2661 SP' -> +- Back chain (SP + 0)
2662 | Save r31 for access saved arguments
2663 | Save r30 for bytecode stack pointer
2664 | Save r4 for incoming argument *value
2665 | Save r3 for incoming argument regs
2666 r30 -> +- Bytecode execution stack
2667 |
2668 | 64-byte (8 doublewords) at initial.
2669 | Expand stack as needed.
2670 |
2671 +-
2672 | Some padding for minimum stack frame.
2673 | 112 for ELFv1.
2674 SP +- Back-chain (SP')
2675
2676 initial frame size
2677 = 112 + (4 * 8) + 64
2678 = 208
2679
2680 r30 is the stack-pointer for bytecode machine.
2681 It should point to next-empty, so we can use LDU for pop.
2682 r3 is used for cache of TOP value.
2683 It was the first argument, pointer to regs.
2684 r4 is the second argument, pointer to the result.
2685 We should set *result = TOP after leaving this function.
2686
2687 Note:
2688 * To restore stack at epilogue
2689 => sp = r31
2690 * To check stack is big enough for bytecode execution.
2691 => r30 - 8 > SP + 112
2692 * To return execution result.
2693 => 0(r4) = TOP
2694
2695 */
2696
2697 /* Emit prologue in inferior memory. See above comments. */
2698
2699 static void
2700 ppc64v1_emit_prologue (void)
2701 {
2702 /* On ELFv1, function pointers really point to function descriptor,
2703 so emit one here. We don't care about contents of words 1 and 2,
2704 so let them just overlap out code. */
2705 uint64_t opd = current_insn_ptr + 8;
2706 uint32_t buf[2];
2707
2708 /* Mind the strict aliasing rules. */
2709 memcpy (buf, &opd, sizeof buf);
2710 emit_insns(buf, 2);
2711 EMIT_ASM (/* Save return address. */
2712 "mflr 0 \n"
2713 "std 0, 16(1) \n"
2714 /* Save r30 and incoming arguments. */
2715 "std 31, -8(1) \n"
2716 "std 30, -16(1) \n"
2717 "std 4, -24(1) \n"
2718 "std 3, -32(1) \n"
2719 /* Point r31 to current r1 for access arguments. */
2720 "mr 31, 1 \n"
2721 /* Adjust SP. 208 is the initial frame size. */
2722 "stdu 1, -208(1) \n"
2723 /* Set r30 to pointing stack-top. */
2724 "addi 30, 1, 168 \n"
2725 /* Initial r3/TOP to 0. */
2726 "li 3, 0 \n");
2727 }
2728
2729 /* Emit prologue in inferior memory. See above comments. */
2730
2731 static void
2732 ppc64v2_emit_prologue (void)
2733 {
2734 EMIT_ASM (/* Save return address. */
2735 "mflr 0 \n"
2736 "std 0, 16(1) \n"
2737 /* Save r30 and incoming arguments. */
2738 "std 31, -8(1) \n"
2739 "std 30, -16(1) \n"
2740 "std 4, -24(1) \n"
2741 "std 3, -32(1) \n"
2742 /* Point r31 to current r1 for access arguments. */
2743 "mr 31, 1 \n"
2744 /* Adjust SP. 208 is the initial frame size. */
2745 "stdu 1, -208(1) \n"
2746 /* Set r30 to pointing stack-top. */
2747 "addi 30, 1, 168 \n"
2748 /* Initial r3/TOP to 0. */
2749 "li 3, 0 \n");
2750 }
2751
2752 /* Emit epilogue in inferior memory. See above comments. */
2753
2754 static void
2755 ppc64_emit_epilogue (void)
2756 {
2757 EMIT_ASM (/* Restore SP. */
2758 "ld 1, 0(1) \n"
2759 /* *result = TOP */
2760 "ld 4, -24(1) \n"
2761 "std 3, 0(4) \n"
2762 /* Restore registers. */
2763 "ld 31, -8(1) \n"
2764 "ld 30, -16(1) \n"
2765 /* Restore LR. */
2766 "ld 0, 16(1) \n"
2767 /* Return 0 for no-error. */
2768 "li 3, 0 \n"
2769 "mtlr 0 \n"
2770 "blr \n");
2771 }
2772
2773 /* TOP = stack[--sp] + TOP */
2774
2775 static void
2776 ppc64_emit_add (void)
2777 {
2778 EMIT_ASM ("ldu 4, 8(30) \n"
2779 "add 3, 4, 3 \n");
2780 }
2781
2782 /* TOP = stack[--sp] - TOP */
2783
2784 static void
2785 ppc64_emit_sub (void)
2786 {
2787 EMIT_ASM ("ldu 4, 8(30) \n"
2788 "sub 3, 4, 3 \n");
2789 }
2790
2791 /* TOP = stack[--sp] * TOP */
2792
2793 static void
2794 ppc64_emit_mul (void)
2795 {
2796 EMIT_ASM ("ldu 4, 8(30) \n"
2797 "mulld 3, 4, 3 \n");
2798 }
2799
2800 /* TOP = stack[--sp] << TOP */
2801
2802 static void
2803 ppc64_emit_lsh (void)
2804 {
2805 EMIT_ASM ("ldu 4, 8(30) \n"
2806 "sld 3, 4, 3 \n");
2807 }
2808
2809 /* Top = stack[--sp] >> TOP
2810 (Arithmetic shift right) */
2811
2812 static void
2813 ppc64_emit_rsh_signed (void)
2814 {
2815 EMIT_ASM ("ldu 4, 8(30) \n"
2816 "srad 3, 4, 3 \n");
2817 }
2818
2819 /* Top = stack[--sp] >> TOP
2820 (Logical shift right) */
2821
2822 static void
2823 ppc64_emit_rsh_unsigned (void)
2824 {
2825 EMIT_ASM ("ldu 4, 8(30) \n"
2826 "srd 3, 4, 3 \n");
2827 }
2828
2829 /* Emit code for signed-extension specified by ARG. */
2830
2831 static void
2832 ppc64_emit_ext (int arg)
2833 {
2834 switch (arg)
2835 {
2836 case 8:
2837 EMIT_ASM ("extsb 3, 3");
2838 break;
2839 case 16:
2840 EMIT_ASM ("extsh 3, 3");
2841 break;
2842 case 32:
2843 EMIT_ASM ("extsw 3, 3");
2844 break;
2845 default:
2846 emit_error = 1;
2847 }
2848 }
2849
2850 /* Emit code for zero-extension specified by ARG. */
2851
2852 static void
2853 ppc64_emit_zero_ext (int arg)
2854 {
2855 switch (arg)
2856 {
2857 case 8:
2858 EMIT_ASM ("rldicl 3,3,0,56");
2859 break;
2860 case 16:
2861 EMIT_ASM ("rldicl 3,3,0,48");
2862 break;
2863 case 32:
2864 EMIT_ASM ("rldicl 3,3,0,32");
2865 break;
2866 default:
2867 emit_error = 1;
2868 }
2869 }
2870
2871 /* TOP = !TOP
2872 i.e., TOP = (TOP == 0) ? 1 : 0; */
2873
2874 static void
2875 ppc64_emit_log_not (void)
2876 {
2877 EMIT_ASM ("cntlzd 3, 3 \n"
2878 "srdi 3, 3, 6 \n");
2879 }
2880
2881 /* TOP = stack[--sp] & TOP */
2882
2883 static void
2884 ppc64_emit_bit_and (void)
2885 {
2886 EMIT_ASM ("ldu 4, 8(30) \n"
2887 "and 3, 4, 3 \n");
2888 }
2889
2890 /* TOP = stack[--sp] | TOP */
2891
2892 static void
2893 ppc64_emit_bit_or (void)
2894 {
2895 EMIT_ASM ("ldu 4, 8(30) \n"
2896 "or 3, 4, 3 \n");
2897 }
2898
2899 /* TOP = stack[--sp] ^ TOP */
2900
2901 static void
2902 ppc64_emit_bit_xor (void)
2903 {
2904 EMIT_ASM ("ldu 4, 8(30) \n"
2905 "xor 3, 4, 3 \n");
2906 }
2907
2908 /* TOP = ~TOP
2909 i.e., TOP = ~(TOP | TOP) */
2910
2911 static void
2912 ppc64_emit_bit_not (void)
2913 {
2914 EMIT_ASM ("nor 3, 3, 3 \n");
2915 }
2916
2917 /* TOP = stack[--sp] == TOP */
2918
2919 static void
2920 ppc64_emit_equal (void)
2921 {
2922 EMIT_ASM ("ldu 4, 8(30) \n"
2923 "xor 3, 3, 4 \n"
2924 "cntlzd 3, 3 \n"
2925 "srdi 3, 3, 6 \n");
2926 }
2927
2928 /* TOP = stack[--sp] < TOP
2929 (Signed comparison) */
2930
2931 static void
2932 ppc64_emit_less_signed (void)
2933 {
2934 EMIT_ASM ("ldu 4, 8(30) \n"
2935 "cmpd 7, 4, 3 \n"
2936 "mfcr 3 \n"
2937 "rlwinm 3, 3, 29, 31, 31 \n");
2938 }
2939
2940 /* TOP = stack[--sp] < TOP
2941 (Unsigned comparison) */
2942
2943 static void
2944 ppc64_emit_less_unsigned (void)
2945 {
2946 EMIT_ASM ("ldu 4, 8(30) \n"
2947 "cmpld 7, 4, 3 \n"
2948 "mfcr 3 \n"
2949 "rlwinm 3, 3, 29, 31, 31 \n");
2950 }
2951
2952 /* Access the memory address in TOP in size of SIZE.
2953 Zero-extend the read value. */
2954
2955 static void
2956 ppc64_emit_ref (int size)
2957 {
2958 switch (size)
2959 {
2960 case 1:
2961 EMIT_ASM ("lbz 3, 0(3)");
2962 break;
2963 case 2:
2964 EMIT_ASM ("lhz 3, 0(3)");
2965 break;
2966 case 4:
2967 EMIT_ASM ("lwz 3, 0(3)");
2968 break;
2969 case 8:
2970 EMIT_ASM ("ld 3, 0(3)");
2971 break;
2972 }
2973 }
2974
2975 /* TOP = NUM */
2976
2977 static void
2978 ppc64_emit_const (LONGEST num)
2979 {
2980 uint32_t buf[5];
2981 uint32_t *p = buf;
2982
2983 p += gen_limm (p, 3, num, 1);
2984
2985 emit_insns (buf, p - buf);
2986 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2987 }
2988
2989 /* Set TOP to the value of register REG by calling get_raw_reg function
2990 with two argument, collected buffer and register number. */
2991
2992 static void
2993 ppc64v1_emit_reg (int reg)
2994 {
2995 uint32_t buf[15];
2996 uint32_t *p = buf;
2997
2998 /* fctx->regs is passed in r3 and then saved in 176(1). */
2999 p += GEN_LD (p, 3, 31, -32);
3000 p += GEN_LI (p, 4, reg);
3001 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3002 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
3003 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3004
3005 emit_insns (buf, p - buf);
3006 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3007 }
3008
3009 /* Likewise, for ELFv2. */
3010
3011 static void
3012 ppc64v2_emit_reg (int reg)
3013 {
3014 uint32_t buf[12];
3015 uint32_t *p = buf;
3016
3017 /* fctx->regs is passed in r3 and then saved in 176(1). */
3018 p += GEN_LD (p, 3, 31, -32);
3019 p += GEN_LI (p, 4, reg);
3020 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3021 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
3022 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3023
3024 emit_insns (buf, p - buf);
3025 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3026 }
3027
3028 /* TOP = stack[--sp] */
3029
3030 static void
3031 ppc64_emit_pop (void)
3032 {
3033 EMIT_ASM ("ldu 3, 8(30)");
3034 }
3035
3036 /* stack[sp++] = TOP
3037
3038 Because we may use up bytecode stack, expand 8 doublewords more
3039 if needed. */
3040
3041 static void
3042 ppc64_emit_stack_flush (void)
3043 {
3044 /* Make sure bytecode stack is big enough before push.
3045 Otherwise, expand 64-byte more. */
3046
3047 EMIT_ASM (" std 3, 0(30) \n"
3048 " addi 4, 30, -(112 + 8) \n"
3049 " cmpd 7, 4, 1 \n"
3050 " bgt 7, 1f \n"
3051 " stdu 31, -64(1) \n"
3052 "1:addi 30, 30, -8 \n");
3053 }
3054
3055 /* Swap TOP and stack[sp-1] */
3056
3057 static void
3058 ppc64_emit_swap (void)
3059 {
3060 EMIT_ASM ("ld 4, 8(30) \n"
3061 "std 3, 8(30) \n"
3062 "mr 3, 4 \n");
3063 }
3064
3065 /* Call function FN - ELFv1. */
3066
3067 static void
3068 ppc64v1_emit_call (CORE_ADDR fn)
3069 {
3070 uint32_t buf[13];
3071 uint32_t *p = buf;
3072
3073 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3074 p += gen_call (p, fn, 1, 1);
3075 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3076
3077 emit_insns (buf, p - buf);
3078 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3079 }
3080
3081 /* Call function FN - ELFv2. */
3082
3083 static void
3084 ppc64v2_emit_call (CORE_ADDR fn)
3085 {
3086 uint32_t buf[10];
3087 uint32_t *p = buf;
3088
3089 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3090 p += gen_call (p, fn, 1, 0);
3091 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3092
3093 emit_insns (buf, p - buf);
3094 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3095 }
3096
3097 /* FN's prototype is `LONGEST(*fn)(int)'.
3098 TOP = fn (arg1)
3099 */
3100
3101 static void
3102 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
3103 {
3104 uint32_t buf[13];
3105 uint32_t *p = buf;
3106
3107 /* Setup argument. arg1 is a 16-bit value. */
3108 p += gen_limm (p, 3, arg1, 1);
3109 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3110 p += gen_call (p, fn, 1, 1);
3111 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3112
3113 emit_insns (buf, p - buf);
3114 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3115 }
3116
3117 /* Likewise for ELFv2. */
3118
3119 static void
3120 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
3121 {
3122 uint32_t buf[10];
3123 uint32_t *p = buf;
3124
3125 /* Setup argument. arg1 is a 16-bit value. */
3126 p += gen_limm (p, 3, arg1, 1);
3127 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3128 p += gen_call (p, fn, 1, 0);
3129 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3130
3131 emit_insns (buf, p - buf);
3132 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3133 }
3134
3135 /* FN's prototype is `void(*fn)(int,LONGEST)'.
3136 fn (arg1, TOP)
3137
3138 TOP should be preserved/restored before/after the call. */
3139
3140 static void
3141 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
3142 {
3143 uint32_t buf[17];
3144 uint32_t *p = buf;
3145
3146 /* Save TOP. 0(30) is next-empty. */
3147 p += GEN_STD (p, 3, 30, 0);
3148
3149 /* Setup argument. arg1 is a 16-bit value. */
3150 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3151 p += gen_limm (p, 3, arg1, 1);
3152 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3153 p += gen_call (p, fn, 1, 1);
3154 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3155
3156 /* Restore TOP */
3157 p += GEN_LD (p, 3, 30, 0);
3158
3159 emit_insns (buf, p - buf);
3160 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3161 }
3162
3163 /* Likewise for ELFv2. */
3164
3165 static void
3166 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
3167 {
3168 uint32_t buf[14];
3169 uint32_t *p = buf;
3170
3171 /* Save TOP. 0(30) is next-empty. */
3172 p += GEN_STD (p, 3, 30, 0);
3173
3174 /* Setup argument. arg1 is a 16-bit value. */
3175 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3176 p += gen_limm (p, 3, arg1, 1);
3177 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3178 p += gen_call (p, fn, 1, 0);
3179 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3180
3181 /* Restore TOP */
3182 p += GEN_LD (p, 3, 30, 0);
3183
3184 emit_insns (buf, p - buf);
3185 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3186 }
3187
3188 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
3189
3190 static void
3191 ppc64_emit_if_goto (int *offset_p, int *size_p)
3192 {
3193 EMIT_ASM ("cmpdi 7, 3, 0 \n"
3194 "ldu 3, 8(30) \n"
3195 "1:bne 7, 1b \n");
3196
3197 if (offset_p)
3198 *offset_p = 8;
3199 if (size_p)
3200 *size_p = 14;
3201 }
3202
3203 /* Goto if stack[--sp] == TOP */
3204
3205 static void
3206 ppc64_emit_eq_goto (int *offset_p, int *size_p)
3207 {
3208 EMIT_ASM ("ldu 4, 8(30) \n"
3209 "cmpd 7, 4, 3 \n"
3210 "ldu 3, 8(30) \n"
3211 "1:beq 7, 1b \n");
3212
3213 if (offset_p)
3214 *offset_p = 12;
3215 if (size_p)
3216 *size_p = 14;
3217 }
3218
3219 /* Goto if stack[--sp] != TOP */
3220
3221 static void
3222 ppc64_emit_ne_goto (int *offset_p, int *size_p)
3223 {
3224 EMIT_ASM ("ldu 4, 8(30) \n"
3225 "cmpd 7, 4, 3 \n"
3226 "ldu 3, 8(30) \n"
3227 "1:bne 7, 1b \n");
3228
3229 if (offset_p)
3230 *offset_p = 12;
3231 if (size_p)
3232 *size_p = 14;
3233 }
3234
3235 /* Goto if stack[--sp] < TOP */
3236
3237 static void
3238 ppc64_emit_lt_goto (int *offset_p, int *size_p)
3239 {
3240 EMIT_ASM ("ldu 4, 8(30) \n"
3241 "cmpd 7, 4, 3 \n"
3242 "ldu 3, 8(30) \n"
3243 "1:blt 7, 1b \n");
3244
3245 if (offset_p)
3246 *offset_p = 12;
3247 if (size_p)
3248 *size_p = 14;
3249 }
3250
3251 /* Goto if stack[--sp] <= TOP */
3252
3253 static void
3254 ppc64_emit_le_goto (int *offset_p, int *size_p)
3255 {
3256 EMIT_ASM ("ldu 4, 8(30) \n"
3257 "cmpd 7, 4, 3 \n"
3258 "ldu 3, 8(30) \n"
3259 "1:ble 7, 1b \n");
3260
3261 if (offset_p)
3262 *offset_p = 12;
3263 if (size_p)
3264 *size_p = 14;
3265 }
3266
3267 /* Goto if stack[--sp] > TOP */
3268
3269 static void
3270 ppc64_emit_gt_goto (int *offset_p, int *size_p)
3271 {
3272 EMIT_ASM ("ldu 4, 8(30) \n"
3273 "cmpd 7, 4, 3 \n"
3274 "ldu 3, 8(30) \n"
3275 "1:bgt 7, 1b \n");
3276
3277 if (offset_p)
3278 *offset_p = 12;
3279 if (size_p)
3280 *size_p = 14;
3281 }
3282
3283 /* Goto if stack[--sp] >= TOP */
3284
3285 static void
3286 ppc64_emit_ge_goto (int *offset_p, int *size_p)
3287 {
3288 EMIT_ASM ("ldu 4, 8(30) \n"
3289 "cmpd 7, 4, 3 \n"
3290 "ldu 3, 8(30) \n"
3291 "1:bge 7, 1b \n");
3292
3293 if (offset_p)
3294 *offset_p = 12;
3295 if (size_p)
3296 *size_p = 14;
3297 }
3298
3299 /* Table of emit ops for 64-bit ELFv1. */
3300
3301 static struct emit_ops ppc64v1_emit_ops_impl =
3302 {
3303 ppc64v1_emit_prologue,
3304 ppc64_emit_epilogue,
3305 ppc64_emit_add,
3306 ppc64_emit_sub,
3307 ppc64_emit_mul,
3308 ppc64_emit_lsh,
3309 ppc64_emit_rsh_signed,
3310 ppc64_emit_rsh_unsigned,
3311 ppc64_emit_ext,
3312 ppc64_emit_log_not,
3313 ppc64_emit_bit_and,
3314 ppc64_emit_bit_or,
3315 ppc64_emit_bit_xor,
3316 ppc64_emit_bit_not,
3317 ppc64_emit_equal,
3318 ppc64_emit_less_signed,
3319 ppc64_emit_less_unsigned,
3320 ppc64_emit_ref,
3321 ppc64_emit_if_goto,
3322 ppc_emit_goto,
3323 ppc_write_goto_address,
3324 ppc64_emit_const,
3325 ppc64v1_emit_call,
3326 ppc64v1_emit_reg,
3327 ppc64_emit_pop,
3328 ppc64_emit_stack_flush,
3329 ppc64_emit_zero_ext,
3330 ppc64_emit_swap,
3331 ppc_emit_stack_adjust,
3332 ppc64v1_emit_int_call_1,
3333 ppc64v1_emit_void_call_2,
3334 ppc64_emit_eq_goto,
3335 ppc64_emit_ne_goto,
3336 ppc64_emit_lt_goto,
3337 ppc64_emit_le_goto,
3338 ppc64_emit_gt_goto,
3339 ppc64_emit_ge_goto
3340 };
3341
3342 /* Table of emit ops for 64-bit ELFv2. */
3343
3344 static struct emit_ops ppc64v2_emit_ops_impl =
3345 {
3346 ppc64v2_emit_prologue,
3347 ppc64_emit_epilogue,
3348 ppc64_emit_add,
3349 ppc64_emit_sub,
3350 ppc64_emit_mul,
3351 ppc64_emit_lsh,
3352 ppc64_emit_rsh_signed,
3353 ppc64_emit_rsh_unsigned,
3354 ppc64_emit_ext,
3355 ppc64_emit_log_not,
3356 ppc64_emit_bit_and,
3357 ppc64_emit_bit_or,
3358 ppc64_emit_bit_xor,
3359 ppc64_emit_bit_not,
3360 ppc64_emit_equal,
3361 ppc64_emit_less_signed,
3362 ppc64_emit_less_unsigned,
3363 ppc64_emit_ref,
3364 ppc64_emit_if_goto,
3365 ppc_emit_goto,
3366 ppc_write_goto_address,
3367 ppc64_emit_const,
3368 ppc64v2_emit_call,
3369 ppc64v2_emit_reg,
3370 ppc64_emit_pop,
3371 ppc64_emit_stack_flush,
3372 ppc64_emit_zero_ext,
3373 ppc64_emit_swap,
3374 ppc_emit_stack_adjust,
3375 ppc64v2_emit_int_call_1,
3376 ppc64v2_emit_void_call_2,
3377 ppc64_emit_eq_goto,
3378 ppc64_emit_ne_goto,
3379 ppc64_emit_lt_goto,
3380 ppc64_emit_le_goto,
3381 ppc64_emit_gt_goto,
3382 ppc64_emit_ge_goto
3383 };
3384
3385 #endif
3386
3387 /* Implementation of linux_target_ops method "emit_ops". */
3388
3389 static struct emit_ops *
3390 ppc_emit_ops (void)
3391 {
3392 #ifdef __powerpc64__
3393 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3394
3395 if (register_size (regcache->tdesc, 0) == 8)
3396 {
3397 if (is_elfv2_inferior ())
3398 return &ppc64v2_emit_ops_impl;
3399 else
3400 return &ppc64v1_emit_ops_impl;
3401 }
3402 #endif
3403 return &ppc_emit_ops_impl;
3404 }
3405
3406 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3407
3408 static int
3409 ppc_get_ipa_tdesc_idx (void)
3410 {
3411 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3412 const struct target_desc *tdesc = regcache->tdesc;
3413
3414 #ifdef __powerpc64__
3415 if (tdesc == tdesc_powerpc_64l)
3416 return PPC_TDESC_BASE;
3417 if (tdesc == tdesc_powerpc_altivec64l)
3418 return PPC_TDESC_ALTIVEC;
3419 if (tdesc == tdesc_powerpc_cell64l)
3420 return PPC_TDESC_CELL;
3421 if (tdesc == tdesc_powerpc_vsx64l)
3422 return PPC_TDESC_VSX;
3423 if (tdesc == tdesc_powerpc_isa205_64l)
3424 return PPC_TDESC_ISA205;
3425 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3426 return PPC_TDESC_ISA205_ALTIVEC;
3427 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3428 return PPC_TDESC_ISA205_VSX;
3429 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx64l)
3430 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3431 if (tdesc == tdesc_powerpc_isa207_vsx64l)
3432 return PPC_TDESC_ISA207_VSX;
3433 if (tdesc == tdesc_powerpc_isa207_htm_vsx64l)
3434 return PPC_TDESC_ISA207_HTM_VSX;
3435 #endif
3436
3437 if (tdesc == tdesc_powerpc_32l)
3438 return PPC_TDESC_BASE;
3439 if (tdesc == tdesc_powerpc_altivec32l)
3440 return PPC_TDESC_ALTIVEC;
3441 if (tdesc == tdesc_powerpc_cell32l)
3442 return PPC_TDESC_CELL;
3443 if (tdesc == tdesc_powerpc_vsx32l)
3444 return PPC_TDESC_VSX;
3445 if (tdesc == tdesc_powerpc_isa205_32l)
3446 return PPC_TDESC_ISA205;
3447 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3448 return PPC_TDESC_ISA205_ALTIVEC;
3449 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3450 return PPC_TDESC_ISA205_VSX;
3451 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx32l)
3452 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3453 if (tdesc == tdesc_powerpc_isa207_vsx32l)
3454 return PPC_TDESC_ISA207_VSX;
3455 if (tdesc == tdesc_powerpc_isa207_htm_vsx32l)
3456 return PPC_TDESC_ISA207_HTM_VSX;
3457 if (tdesc == tdesc_powerpc_e500l)
3458 return PPC_TDESC_E500;
3459
3460 return 0;
3461 }
3462
3463 struct linux_target_ops the_low_target = {
3464 ppc_arch_setup,
3465 ppc_regs_info,
3466 ppc_cannot_fetch_register,
3467 ppc_cannot_store_register,
3468 NULL, /* fetch_register */
3469 ppc_get_pc,
3470 ppc_set_pc,
3471 NULL, /* breakpoint_kind_from_pc */
3472 ppc_sw_breakpoint_from_kind,
3473 NULL,
3474 0,
3475 ppc_breakpoint_at,
3476 ppc_supports_z_point_type,
3477 ppc_insert_point,
3478 ppc_remove_point,
3479 NULL,
3480 NULL,
3481 ppc_collect_ptrace_register,
3482 ppc_supply_ptrace_register,
3483 NULL, /* siginfo_fixup */
3484 NULL, /* new_process */
3485 NULL, /* delete_process */
3486 NULL, /* new_thread */
3487 NULL, /* delete_thread */
3488 NULL, /* new_fork */
3489 NULL, /* prepare_to_resume */
3490 NULL, /* process_qsupported */
3491 ppc_supports_tracepoints,
3492 ppc_get_thread_area,
3493 ppc_install_fast_tracepoint_jump_pad,
3494 ppc_emit_ops,
3495 ppc_get_min_fast_tracepoint_insn_len,
3496 NULL, /* supports_range_stepping */
3497 NULL, /* breakpoint_kind_from_current_state */
3498 ppc_supports_hardware_single_step,
3499 NULL, /* get_syscall_trapinfo */
3500 ppc_get_ipa_tdesc_idx,
3501 };
3502
3503 void
3504 initialize_low_arch (void)
3505 {
3506 /* Initialize the Linux target descriptions. */
3507
3508 init_registers_powerpc_32l ();
3509 init_registers_powerpc_altivec32l ();
3510 init_registers_powerpc_cell32l ();
3511 init_registers_powerpc_vsx32l ();
3512 init_registers_powerpc_isa205_32l ();
3513 init_registers_powerpc_isa205_altivec32l ();
3514 init_registers_powerpc_isa205_vsx32l ();
3515 init_registers_powerpc_isa205_ppr_dscr_vsx32l ();
3516 init_registers_powerpc_isa207_vsx32l ();
3517 init_registers_powerpc_isa207_htm_vsx32l ();
3518 init_registers_powerpc_e500l ();
3519 #if __powerpc64__
3520 init_registers_powerpc_64l ();
3521 init_registers_powerpc_altivec64l ();
3522 init_registers_powerpc_cell64l ();
3523 init_registers_powerpc_vsx64l ();
3524 init_registers_powerpc_isa205_64l ();
3525 init_registers_powerpc_isa205_altivec64l ();
3526 init_registers_powerpc_isa205_vsx64l ();
3527 init_registers_powerpc_isa205_ppr_dscr_vsx64l ();
3528 init_registers_powerpc_isa207_vsx64l ();
3529 init_registers_powerpc_isa207_htm_vsx64l ();
3530 #endif
3531
3532 initialize_regsets_info (&ppc_regsets_info);
3533 }