]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-ppc-low.c
Update release making notes.
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-ppc-low.c
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include "elf/common.h"
24 #include <sys/uio.h>
25 #include <elf.h>
26 #include <asm/ptrace.h>
27
28 #include "arch/ppc-linux-common.h"
29 #include "arch/ppc-linux-tdesc.h"
30 #include "nat/ppc-linux.h"
31 #include "nat/linux-ptrace.h"
32 #include "linux-ppc-tdesc-init.h"
33 #include "ax.h"
34 #include "tracepoint.h"
35
36 #define PPC_FIELD(value, from, len) \
37 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
38 #define PPC_SEXT(v, bs) \
39 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
40 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
41 - ((CORE_ADDR) 1 << ((bs) - 1)))
42 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
43 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
44 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
45 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
46
47 /* Holds the AT_HWCAP auxv entry. */
48
49 static unsigned long ppc_hwcap;
50
51 /* Holds the AT_HWCAP2 auxv entry. */
52
53 static unsigned long ppc_hwcap2;
54
55
56 #define ppc_num_regs 73
57
58 #ifdef __powerpc64__
59 /* We use a constant for FPSCR instead of PT_FPSCR, because
60 many shipped PPC64 kernels had the wrong value in ptrace.h. */
61 static int ppc_regmap[] =
62 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
63 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
64 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
65 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
66 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
67 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
68 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
69 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
70 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
71 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
72 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
73 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
74 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
75 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
76 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
77 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
78 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
79 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
80 PT_ORIG_R3 * 8, PT_TRAP * 8 };
81 #else
82 /* Currently, don't check/send MQ. */
83 static int ppc_regmap[] =
84 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
85 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
86 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
87 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
88 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
89 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
90 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
91 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
92 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
93 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
94 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
95 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
96 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
97 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
98 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
99 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
100 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
101 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
102 PT_ORIG_R3 * 4, PT_TRAP * 4
103 };
104
105 static int ppc_regmap_e500[] =
106 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
107 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
108 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
109 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
110 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
111 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
112 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
113 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
114 -1, -1, -1, -1,
115 -1, -1, -1, -1,
116 -1, -1, -1, -1,
117 -1, -1, -1, -1,
118 -1, -1, -1, -1,
119 -1, -1, -1, -1,
120 -1, -1, -1, -1,
121 -1, -1, -1, -1,
122 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
123 PT_CTR * 4, PT_XER * 4, -1,
124 PT_ORIG_R3 * 4, PT_TRAP * 4
125 };
126 #endif
127
128 /* Check whether the kernel provides a register set with number
129 REGSET_ID of size REGSETSIZE for process/thread TID. */
130
131 static int
132 ppc_check_regset (int tid, int regset_id, int regsetsize)
133 {
134 void *buf = alloca (regsetsize);
135 struct iovec iov;
136
137 iov.iov_base = buf;
138 iov.iov_len = regsetsize;
139
140 if (ptrace (PTRACE_GETREGSET, tid, regset_id, &iov) >= 0
141 || errno == ENODATA)
142 return 1;
143 return 0;
144 }
145
146 static int
147 ppc_cannot_store_register (int regno)
148 {
149 const struct target_desc *tdesc = current_process ()->tdesc;
150
151 #ifndef __powerpc64__
152 /* Some kernels do not allow us to store fpscr. */
153 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
154 && regno == find_regno (tdesc, "fpscr"))
155 return 2;
156 #endif
157
158 /* Some kernels do not allow us to store orig_r3 or trap. */
159 if (regno == find_regno (tdesc, "orig_r3")
160 || regno == find_regno (tdesc, "trap"))
161 return 2;
162
163 return 0;
164 }
165
166 static int
167 ppc_cannot_fetch_register (int regno)
168 {
169 return 0;
170 }
171
172 static void
173 ppc_collect_ptrace_register (struct regcache *regcache, int regno, char *buf)
174 {
175 memset (buf, 0, sizeof (long));
176
177 if (__BYTE_ORDER == __LITTLE_ENDIAN)
178 {
179 /* Little-endian values always sit at the left end of the buffer. */
180 collect_register (regcache, regno, buf);
181 }
182 else if (__BYTE_ORDER == __BIG_ENDIAN)
183 {
184 /* Big-endian values sit at the right end of the buffer. In case of
185 registers whose sizes are smaller than sizeof (long), we must use a
186 padding to access them correctly. */
187 int size = register_size (regcache->tdesc, regno);
188
189 if (size < sizeof (long))
190 collect_register (regcache, regno, buf + sizeof (long) - size);
191 else
192 collect_register (regcache, regno, buf);
193 }
194 else
195 perror_with_name ("Unexpected byte order");
196 }
197
198 static void
199 ppc_supply_ptrace_register (struct regcache *regcache,
200 int regno, const char *buf)
201 {
202 if (__BYTE_ORDER == __LITTLE_ENDIAN)
203 {
204 /* Little-endian values always sit at the left end of the buffer. */
205 supply_register (regcache, regno, buf);
206 }
207 else if (__BYTE_ORDER == __BIG_ENDIAN)
208 {
209 /* Big-endian values sit at the right end of the buffer. In case of
210 registers whose sizes are smaller than sizeof (long), we must use a
211 padding to access them correctly. */
212 int size = register_size (regcache->tdesc, regno);
213
214 if (size < sizeof (long))
215 supply_register (regcache, regno, buf + sizeof (long) - size);
216 else
217 supply_register (regcache, regno, buf);
218 }
219 else
220 perror_with_name ("Unexpected byte order");
221 }
222
223 static CORE_ADDR
224 ppc_get_pc (struct regcache *regcache)
225 {
226 if (register_size (regcache->tdesc, 0) == 4)
227 {
228 unsigned int pc;
229 collect_register_by_name (regcache, "pc", &pc);
230 return (CORE_ADDR) pc;
231 }
232 else
233 {
234 unsigned long pc;
235 collect_register_by_name (regcache, "pc", &pc);
236 return (CORE_ADDR) pc;
237 }
238 }
239
240 static void
241 ppc_set_pc (struct regcache *regcache, CORE_ADDR pc)
242 {
243 if (register_size (regcache->tdesc, 0) == 4)
244 {
245 unsigned int newpc = pc;
246 supply_register_by_name (regcache, "pc", &newpc);
247 }
248 else
249 {
250 unsigned long newpc = pc;
251 supply_register_by_name (regcache, "pc", &newpc);
252 }
253 }
254
255 #ifndef __powerpc64__
256 static int ppc_regmap_adjusted;
257 #endif
258
259
260 /* Correct in either endianness.
261 This instruction is "twge r2, r2", which GDB uses as a software
262 breakpoint. */
263 static const unsigned int ppc_breakpoint = 0x7d821008;
264 #define ppc_breakpoint_len 4
265
266 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
267
268 static const gdb_byte *
269 ppc_sw_breakpoint_from_kind (int kind, int *size)
270 {
271 *size = ppc_breakpoint_len;
272 return (const gdb_byte *) &ppc_breakpoint;
273 }
274
275 static int
276 ppc_breakpoint_at (CORE_ADDR where)
277 {
278 unsigned int insn;
279
280 (*the_target->read_memory) (where, (unsigned char *) &insn, 4);
281 if (insn == ppc_breakpoint)
282 return 1;
283 /* If necessary, recognize more trap instructions here. GDB only uses
284 the one. */
285
286 return 0;
287 }
288
289 /* Implement supports_z_point_type target-ops.
290 Returns true if type Z_TYPE breakpoint is supported.
291
292 Handling software breakpoint at server side, so tracepoints
293 and breakpoints can be inserted at the same location. */
294
295 static int
296 ppc_supports_z_point_type (char z_type)
297 {
298 switch (z_type)
299 {
300 case Z_PACKET_SW_BP:
301 return 1;
302 case Z_PACKET_HW_BP:
303 case Z_PACKET_WRITE_WP:
304 case Z_PACKET_ACCESS_WP:
305 default:
306 return 0;
307 }
308 }
309
310 /* Implement insert_point target-ops.
311 Returns 0 on success, -1 on failure and 1 on unsupported. */
312
313 static int
314 ppc_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
315 int size, struct raw_breakpoint *bp)
316 {
317 switch (type)
318 {
319 case raw_bkpt_type_sw:
320 return insert_memory_breakpoint (bp);
321
322 case raw_bkpt_type_hw:
323 case raw_bkpt_type_write_wp:
324 case raw_bkpt_type_access_wp:
325 default:
326 /* Unsupported. */
327 return 1;
328 }
329 }
330
331 /* Implement remove_point target-ops.
332 Returns 0 on success, -1 on failure and 1 on unsupported. */
333
334 static int
335 ppc_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
336 int size, struct raw_breakpoint *bp)
337 {
338 switch (type)
339 {
340 case raw_bkpt_type_sw:
341 return remove_memory_breakpoint (bp);
342
343 case raw_bkpt_type_hw:
344 case raw_bkpt_type_write_wp:
345 case raw_bkpt_type_access_wp:
346 default:
347 /* Unsupported. */
348 return 1;
349 }
350 }
351
352 /* Provide only a fill function for the general register set. ps_lgetregs
353 will use this for NPTL support. */
354
355 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
356 {
357 int i;
358
359 for (i = 0; i < 32; i++)
360 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
361
362 for (i = 64; i < 70; i++)
363 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
364
365 for (i = 71; i < 73; i++)
366 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
367 }
368
369 /* Program Priority Register regset fill function. */
370
371 static void
372 ppc_fill_pprregset (struct regcache *regcache, void *buf)
373 {
374 char *ppr = (char *) buf;
375
376 collect_register_by_name (regcache, "ppr", ppr);
377 }
378
379 /* Program Priority Register regset store function. */
380
381 static void
382 ppc_store_pprregset (struct regcache *regcache, const void *buf)
383 {
384 const char *ppr = (const char *) buf;
385
386 supply_register_by_name (regcache, "ppr", ppr);
387 }
388
389 /* Data Stream Control Register regset fill function. */
390
391 static void
392 ppc_fill_dscrregset (struct regcache *regcache, void *buf)
393 {
394 char *dscr = (char *) buf;
395
396 collect_register_by_name (regcache, "dscr", dscr);
397 }
398
399 /* Data Stream Control Register regset store function. */
400
401 static void
402 ppc_store_dscrregset (struct regcache *regcache, const void *buf)
403 {
404 const char *dscr = (const char *) buf;
405
406 supply_register_by_name (regcache, "dscr", dscr);
407 }
408
409 /* Target Address Register regset fill function. */
410
411 static void
412 ppc_fill_tarregset (struct regcache *regcache, void *buf)
413 {
414 char *tar = (char *) buf;
415
416 collect_register_by_name (regcache, "tar", tar);
417 }
418
419 /* Target Address Register regset store function. */
420
421 static void
422 ppc_store_tarregset (struct regcache *regcache, const void *buf)
423 {
424 const char *tar = (const char *) buf;
425
426 supply_register_by_name (regcache, "tar", tar);
427 }
428
429 /* Event-Based Branching regset store function. Unless the inferior
430 has a perf event open, ptrace can return in error when reading and
431 writing to the regset, with ENODATA. For reading, the registers
432 will correctly show as unavailable. For writing, gdbserver
433 currently only caches any register writes from P and G packets and
434 the stub always tries to write all the regsets when resuming the
435 inferior, which would result in frequent warnings. For this
436 reason, we don't define a fill function. This also means that the
437 client-side regcache will be dirty if the user tries to write to
438 the EBB registers. G packets that the client sends to write to
439 unrelated registers will also include data for EBB registers, even
440 if they are unavailable. */
441
442 static void
443 ppc_store_ebbregset (struct regcache *regcache, const void *buf)
444 {
445 const char *regset = (const char *) buf;
446
447 /* The order in the kernel regset is: EBBRR, EBBHR, BESCR. In the
448 .dat file is BESCR, EBBHR, EBBRR. */
449 supply_register_by_name (regcache, "ebbrr", &regset[0]);
450 supply_register_by_name (regcache, "ebbhr", &regset[8]);
451 supply_register_by_name (regcache, "bescr", &regset[16]);
452 }
453
454 /* Performance Monitoring Unit regset fill function. */
455
456 static void
457 ppc_fill_pmuregset (struct regcache *regcache, void *buf)
458 {
459 char *regset = (char *) buf;
460
461 /* The order in the kernel regset is SIAR, SDAR, SIER, MMCR2, MMCR0.
462 In the .dat file is MMCR0, MMCR2, SIAR, SDAR, SIER. */
463 collect_register_by_name (regcache, "siar", &regset[0]);
464 collect_register_by_name (regcache, "sdar", &regset[8]);
465 collect_register_by_name (regcache, "sier", &regset[16]);
466 collect_register_by_name (regcache, "mmcr2", &regset[24]);
467 collect_register_by_name (regcache, "mmcr0", &regset[32]);
468 }
469
470 /* Performance Monitoring Unit regset store function. */
471
472 static void
473 ppc_store_pmuregset (struct regcache *regcache, const void *buf)
474 {
475 const char *regset = (const char *) buf;
476
477 supply_register_by_name (regcache, "siar", &regset[0]);
478 supply_register_by_name (regcache, "sdar", &regset[8]);
479 supply_register_by_name (regcache, "sier", &regset[16]);
480 supply_register_by_name (regcache, "mmcr2", &regset[24]);
481 supply_register_by_name (regcache, "mmcr0", &regset[32]);
482 }
483
484 /* Hardware Transactional Memory special-purpose register regset fill
485 function. */
486
487 static void
488 ppc_fill_tm_sprregset (struct regcache *regcache, void *buf)
489 {
490 int i, base;
491 char *regset = (char *) buf;
492
493 base = find_regno (regcache->tdesc, "tfhar");
494 for (i = 0; i < 3; i++)
495 collect_register (regcache, base + i, &regset[i * 8]);
496 }
497
498 /* Hardware Transactional Memory special-purpose register regset store
499 function. */
500
501 static void
502 ppc_store_tm_sprregset (struct regcache *regcache, const void *buf)
503 {
504 int i, base;
505 const char *regset = (const char *) buf;
506
507 base = find_regno (regcache->tdesc, "tfhar");
508 for (i = 0; i < 3; i++)
509 supply_register (regcache, base + i, &regset[i * 8]);
510 }
511
512 /* For the same reasons as the EBB regset, none of the HTM
513 checkpointed regsets have a fill function. These registers are
514 only available if the inferior is in a transaction. */
515
516 /* Hardware Transactional Memory checkpointed general-purpose regset
517 store function. */
518
519 static void
520 ppc_store_tm_cgprregset (struct regcache *regcache, const void *buf)
521 {
522 int i, base, size, endian_offset;
523 const char *regset = (const char *) buf;
524
525 base = find_regno (regcache->tdesc, "cr0");
526 size = register_size (regcache->tdesc, base);
527
528 gdb_assert (size == 4 || size == 8);
529
530 for (i = 0; i < 32; i++)
531 supply_register (regcache, base + i, &regset[i * size]);
532
533 endian_offset = 0;
534
535 if ((size == 8) && (__BYTE_ORDER == __BIG_ENDIAN))
536 endian_offset = 4;
537
538 supply_register_by_name (regcache, "ccr",
539 &regset[PT_CCR * size + endian_offset]);
540
541 supply_register_by_name (regcache, "cxer",
542 &regset[PT_XER * size + endian_offset]);
543
544 supply_register_by_name (regcache, "clr", &regset[PT_LNK * size]);
545 supply_register_by_name (regcache, "cctr", &regset[PT_CTR * size]);
546 }
547
548 /* Hardware Transactional Memory checkpointed floating-point regset
549 store function. */
550
551 static void
552 ppc_store_tm_cfprregset (struct regcache *regcache, const void *buf)
553 {
554 int i, base;
555 const char *regset = (const char *) buf;
556
557 base = find_regno (regcache->tdesc, "cf0");
558
559 for (i = 0; i < 32; i++)
560 supply_register (regcache, base + i, &regset[i * 8]);
561
562 supply_register_by_name (regcache, "cfpscr", &regset[32 * 8]);
563 }
564
565 /* Hardware Transactional Memory checkpointed vector regset store
566 function. */
567
568 static void
569 ppc_store_tm_cvrregset (struct regcache *regcache, const void *buf)
570 {
571 int i, base;
572 const char *regset = (const char *) buf;
573 int vscr_offset = 0;
574
575 base = find_regno (regcache->tdesc, "cvr0");
576
577 for (i = 0; i < 32; i++)
578 supply_register (regcache, base + i, &regset[i * 16]);
579
580 if (__BYTE_ORDER == __BIG_ENDIAN)
581 vscr_offset = 12;
582
583 supply_register_by_name (regcache, "cvscr",
584 &regset[32 * 16 + vscr_offset]);
585
586 supply_register_by_name (regcache, "cvrsave", &regset[33 * 16]);
587 }
588
589 /* Hardware Transactional Memory checkpointed vector-scalar regset
590 store function. */
591
592 static void
593 ppc_store_tm_cvsxregset (struct regcache *regcache, const void *buf)
594 {
595 int i, base;
596 const char *regset = (const char *) buf;
597
598 base = find_regno (regcache->tdesc, "cvs0h");
599 for (i = 0; i < 32; i++)
600 supply_register (regcache, base + i, &regset[i * 8]);
601 }
602
603 /* Hardware Transactional Memory checkpointed Program Priority
604 Register regset store function. */
605
606 static void
607 ppc_store_tm_cpprregset (struct regcache *regcache, const void *buf)
608 {
609 const char *cppr = (const char *) buf;
610
611 supply_register_by_name (regcache, "cppr", cppr);
612 }
613
614 /* Hardware Transactional Memory checkpointed Data Stream Control
615 Register regset store function. */
616
617 static void
618 ppc_store_tm_cdscrregset (struct regcache *regcache, const void *buf)
619 {
620 const char *cdscr = (const char *) buf;
621
622 supply_register_by_name (regcache, "cdscr", cdscr);
623 }
624
625 /* Hardware Transactional Memory checkpointed Target Address Register
626 regset store function. */
627
628 static void
629 ppc_store_tm_ctarregset (struct regcache *regcache, const void *buf)
630 {
631 const char *ctar = (const char *) buf;
632
633 supply_register_by_name (regcache, "ctar", ctar);
634 }
635
636 static void
637 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
638 {
639 int i, base;
640 char *regset = (char *) buf;
641
642 base = find_regno (regcache->tdesc, "vs0h");
643 for (i = 0; i < 32; i++)
644 collect_register (regcache, base + i, &regset[i * 8]);
645 }
646
647 static void
648 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
649 {
650 int i, base;
651 const char *regset = (const char *) buf;
652
653 base = find_regno (regcache->tdesc, "vs0h");
654 for (i = 0; i < 32; i++)
655 supply_register (regcache, base + i, &regset[i * 8]);
656 }
657
658 static void
659 ppc_fill_vrregset (struct regcache *regcache, void *buf)
660 {
661 int i, base;
662 char *regset = (char *) buf;
663 int vscr_offset = 0;
664
665 base = find_regno (regcache->tdesc, "vr0");
666 for (i = 0; i < 32; i++)
667 collect_register (regcache, base + i, &regset[i * 16]);
668
669 if (__BYTE_ORDER == __BIG_ENDIAN)
670 vscr_offset = 12;
671
672 collect_register_by_name (regcache, "vscr",
673 &regset[32 * 16 + vscr_offset]);
674
675 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
676 }
677
678 static void
679 ppc_store_vrregset (struct regcache *regcache, const void *buf)
680 {
681 int i, base;
682 const char *regset = (const char *) buf;
683 int vscr_offset = 0;
684
685 base = find_regno (regcache->tdesc, "vr0");
686 for (i = 0; i < 32; i++)
687 supply_register (regcache, base + i, &regset[i * 16]);
688
689 if (__BYTE_ORDER == __BIG_ENDIAN)
690 vscr_offset = 12;
691
692 supply_register_by_name (regcache, "vscr",
693 &regset[32 * 16 + vscr_offset]);
694 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
695 }
696
697 struct gdb_evrregset_t
698 {
699 unsigned long evr[32];
700 unsigned long long acc;
701 unsigned long spefscr;
702 };
703
704 static void
705 ppc_fill_evrregset (struct regcache *regcache, void *buf)
706 {
707 int i, ev0;
708 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
709
710 ev0 = find_regno (regcache->tdesc, "ev0h");
711 for (i = 0; i < 32; i++)
712 collect_register (regcache, ev0 + i, &regset->evr[i]);
713
714 collect_register_by_name (regcache, "acc", &regset->acc);
715 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
716 }
717
718 static void
719 ppc_store_evrregset (struct regcache *regcache, const void *buf)
720 {
721 int i, ev0;
722 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
723
724 ev0 = find_regno (regcache->tdesc, "ev0h");
725 for (i = 0; i < 32; i++)
726 supply_register (regcache, ev0 + i, &regset->evr[i]);
727
728 supply_register_by_name (regcache, "acc", &regset->acc);
729 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
730 }
731
732 /* Support for hardware single step. */
733
734 static int
735 ppc_supports_hardware_single_step (void)
736 {
737 return 1;
738 }
739
740 static struct regset_info ppc_regsets[] = {
741 /* List the extra register sets before GENERAL_REGS. That way we will
742 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
743 general registers. Some kernels support these, but not the newer
744 PPC_PTRACE_GETREGS. */
745 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CTAR, 0, EXTENDED_REGS,
746 NULL, ppc_store_tm_ctarregset },
747 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CDSCR, 0, EXTENDED_REGS,
748 NULL, ppc_store_tm_cdscrregset },
749 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CPPR, 0, EXTENDED_REGS,
750 NULL, ppc_store_tm_cpprregset },
751 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVSX, 0, EXTENDED_REGS,
752 NULL, ppc_store_tm_cvsxregset },
753 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVMX, 0, EXTENDED_REGS,
754 NULL, ppc_store_tm_cvrregset },
755 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CFPR, 0, EXTENDED_REGS,
756 NULL, ppc_store_tm_cfprregset },
757 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CGPR, 0, EXTENDED_REGS,
758 NULL, ppc_store_tm_cgprregset },
759 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_SPR, 0, EXTENDED_REGS,
760 ppc_fill_tm_sprregset, ppc_store_tm_sprregset },
761 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_EBB, 0, EXTENDED_REGS,
762 NULL, ppc_store_ebbregset },
763 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PMU, 0, EXTENDED_REGS,
764 ppc_fill_pmuregset, ppc_store_pmuregset },
765 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TAR, 0, EXTENDED_REGS,
766 ppc_fill_tarregset, ppc_store_tarregset },
767 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PPR, 0, EXTENDED_REGS,
768 ppc_fill_pprregset, ppc_store_pprregset },
769 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_DSCR, 0, EXTENDED_REGS,
770 ppc_fill_dscrregset, ppc_store_dscrregset },
771 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
772 ppc_fill_vsxregset, ppc_store_vsxregset },
773 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
774 ppc_fill_vrregset, ppc_store_vrregset },
775 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
776 ppc_fill_evrregset, ppc_store_evrregset },
777 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
778 NULL_REGSET
779 };
780
781 static struct usrregs_info ppc_usrregs_info =
782 {
783 ppc_num_regs,
784 ppc_regmap,
785 };
786
787 static struct regsets_info ppc_regsets_info =
788 {
789 ppc_regsets, /* regsets */
790 0, /* num_regsets */
791 NULL, /* disabled_regsets */
792 };
793
794 static struct regs_info regs_info =
795 {
796 NULL, /* regset_bitmap */
797 &ppc_usrregs_info,
798 &ppc_regsets_info
799 };
800
801 static const struct regs_info *
802 ppc_regs_info (void)
803 {
804 return &regs_info;
805 }
806
807 static void
808 ppc_arch_setup (void)
809 {
810 const struct target_desc *tdesc;
811 struct regset_info *regset;
812 struct ppc_linux_features features = ppc_linux_no_features;
813
814 int tid = lwpid_of (current_thread);
815
816 features.wordsize = ppc_linux_target_wordsize (tid);
817
818 if (features.wordsize == 4)
819 tdesc = tdesc_powerpc_32l;
820 else
821 tdesc = tdesc_powerpc_64l;
822
823 current_process ()->tdesc = tdesc;
824
825 /* The value of current_process ()->tdesc needs to be set for this
826 call. */
827 ppc_hwcap = linux_get_hwcap (features.wordsize);
828 ppc_hwcap2 = linux_get_hwcap2 (features.wordsize);
829
830 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
831
832 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
833 features.vsx = true;
834
835 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
836 features.altivec = true;
837
838 if ((ppc_hwcap2 & PPC_FEATURE2_DSCR)
839 && ppc_check_regset (tid, NT_PPC_DSCR, PPC_LINUX_SIZEOF_DSCRREGSET)
840 && ppc_check_regset (tid, NT_PPC_PPR, PPC_LINUX_SIZEOF_PPRREGSET))
841 {
842 features.ppr_dscr = true;
843 if ((ppc_hwcap2 & PPC_FEATURE2_ARCH_2_07)
844 && (ppc_hwcap2 & PPC_FEATURE2_TAR)
845 && (ppc_hwcap2 & PPC_FEATURE2_EBB)
846 && ppc_check_regset (tid, NT_PPC_TAR,
847 PPC_LINUX_SIZEOF_TARREGSET)
848 && ppc_check_regset (tid, NT_PPC_EBB,
849 PPC_LINUX_SIZEOF_EBBREGSET)
850 && ppc_check_regset (tid, NT_PPC_PMU,
851 PPC_LINUX_SIZEOF_PMUREGSET))
852 {
853 features.isa207 = true;
854 if ((ppc_hwcap2 & PPC_FEATURE2_HTM)
855 && ppc_check_regset (tid, NT_PPC_TM_SPR,
856 PPC_LINUX_SIZEOF_TM_SPRREGSET))
857 features.htm = true;
858 }
859 }
860
861 tdesc = ppc_linux_match_description (features);
862
863 /* On 32-bit machines, check for SPE registers.
864 Set the low target's regmap field as appropriately. */
865 #ifndef __powerpc64__
866 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
867 tdesc = tdesc_powerpc_e500l;
868
869 if (!ppc_regmap_adjusted)
870 {
871 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
872 ppc_usrregs_info.regmap = ppc_regmap_e500;
873
874 /* If the FPSCR is 64-bit wide, we need to fetch the whole
875 64-bit slot and not just its second word. The PT_FPSCR
876 supplied in a 32-bit GDB compilation doesn't reflect
877 this. */
878 if (register_size (tdesc, 70) == 8)
879 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
880
881 ppc_regmap_adjusted = 1;
882 }
883 #endif
884
885 current_process ()->tdesc = tdesc;
886
887 for (regset = ppc_regsets; regset->size >= 0; regset++)
888 switch (regset->get_request)
889 {
890 case PTRACE_GETVRREGS:
891 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
892 break;
893 case PTRACE_GETVSXREGS:
894 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
895 break;
896 case PTRACE_GETEVRREGS:
897 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
898 regset->size = 32 * 4 + 8 + 4;
899 else
900 regset->size = 0;
901 break;
902 case PTRACE_GETREGSET:
903 switch (regset->nt_type)
904 {
905 case NT_PPC_PPR:
906 regset->size = (features.ppr_dscr ?
907 PPC_LINUX_SIZEOF_PPRREGSET : 0);
908 break;
909 case NT_PPC_DSCR:
910 regset->size = (features.ppr_dscr ?
911 PPC_LINUX_SIZEOF_DSCRREGSET : 0);
912 break;
913 case NT_PPC_TAR:
914 regset->size = (features.isa207 ?
915 PPC_LINUX_SIZEOF_TARREGSET : 0);
916 break;
917 case NT_PPC_EBB:
918 regset->size = (features.isa207 ?
919 PPC_LINUX_SIZEOF_EBBREGSET : 0);
920 break;
921 case NT_PPC_PMU:
922 regset->size = (features.isa207 ?
923 PPC_LINUX_SIZEOF_PMUREGSET : 0);
924 break;
925 case NT_PPC_TM_SPR:
926 regset->size = (features.htm ?
927 PPC_LINUX_SIZEOF_TM_SPRREGSET : 0);
928 break;
929 case NT_PPC_TM_CGPR:
930 if (features.wordsize == 4)
931 regset->size = (features.htm ?
932 PPC32_LINUX_SIZEOF_CGPRREGSET : 0);
933 else
934 regset->size = (features.htm ?
935 PPC64_LINUX_SIZEOF_CGPRREGSET : 0);
936 break;
937 case NT_PPC_TM_CFPR:
938 regset->size = (features.htm ?
939 PPC_LINUX_SIZEOF_CFPRREGSET : 0);
940 break;
941 case NT_PPC_TM_CVMX:
942 regset->size = (features.htm ?
943 PPC_LINUX_SIZEOF_CVMXREGSET : 0);
944 break;
945 case NT_PPC_TM_CVSX:
946 regset->size = (features.htm ?
947 PPC_LINUX_SIZEOF_CVSXREGSET : 0);
948 break;
949 case NT_PPC_TM_CPPR:
950 regset->size = (features.htm ?
951 PPC_LINUX_SIZEOF_CPPRREGSET : 0);
952 break;
953 case NT_PPC_TM_CDSCR:
954 regset->size = (features.htm ?
955 PPC_LINUX_SIZEOF_CDSCRREGSET : 0);
956 break;
957 case NT_PPC_TM_CTAR:
958 regset->size = (features.htm ?
959 PPC_LINUX_SIZEOF_CTARREGSET : 0);
960 break;
961 default:
962 break;
963 }
964 break;
965 default:
966 break;
967 }
968 }
969
970 /* Implementation of linux_target_ops method "supports_tracepoints". */
971
972 static int
973 ppc_supports_tracepoints (void)
974 {
975 return 1;
976 }
977
978 /* Get the thread area address. This is used to recognize which
979 thread is which when tracing with the in-process agent library. We
980 don't read anything from the address, and treat it as opaque; it's
981 the address itself that we assume is unique per-thread. */
982
983 static int
984 ppc_get_thread_area (int lwpid, CORE_ADDR *addr)
985 {
986 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
987 struct thread_info *thr = get_lwp_thread (lwp);
988 struct regcache *regcache = get_thread_regcache (thr, 1);
989 ULONGEST tp = 0;
990
991 #ifdef __powerpc64__
992 if (register_size (regcache->tdesc, 0) == 8)
993 collect_register_by_name (regcache, "r13", &tp);
994 else
995 #endif
996 collect_register_by_name (regcache, "r2", &tp);
997
998 *addr = tp;
999
1000 return 0;
1001 }
1002
1003 #ifdef __powerpc64__
1004
1005 /* Older glibc doesn't provide this. */
1006
1007 #ifndef EF_PPC64_ABI
1008 #define EF_PPC64_ABI 3
1009 #endif
1010
1011 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
1012 inferiors. */
1013
1014 static int
1015 is_elfv2_inferior (void)
1016 {
1017 /* To be used as fallback if we're unable to determine the right result -
1018 assume inferior uses the same ABI as gdbserver. */
1019 #if _CALL_ELF == 2
1020 const int def_res = 1;
1021 #else
1022 const int def_res = 0;
1023 #endif
1024 CORE_ADDR phdr;
1025 Elf64_Ehdr ehdr;
1026
1027 const struct target_desc *tdesc = current_process ()->tdesc;
1028 int wordsize = register_size (tdesc, 0);
1029
1030 if (!linux_get_auxv (wordsize, AT_PHDR, &phdr))
1031 return def_res;
1032
1033 /* Assume ELF header is at the beginning of the page where program headers
1034 are located. If it doesn't look like one, bail. */
1035
1036 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
1037 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
1038 return def_res;
1039
1040 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
1041 }
1042
1043 #endif
1044
1045 /* Generate a ds-form instruction in BUF and return the number of bytes written
1046
1047 0 6 11 16 30 32
1048 | OPCD | RST | RA | DS |XO| */
1049
1050 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
1051 static int
1052 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
1053 {
1054 uint32_t insn;
1055
1056 gdb_assert ((opcd & ~0x3f) == 0);
1057 gdb_assert ((rst & ~0x1f) == 0);
1058 gdb_assert ((ra & ~0x1f) == 0);
1059 gdb_assert ((xo & ~0x3) == 0);
1060
1061 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
1062 *buf = (opcd << 26) | insn;
1063 return 1;
1064 }
1065
1066 /* Followings are frequently used ds-form instructions. */
1067
1068 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
1069 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
1070 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
1071 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
1072
1073 /* Generate a d-form instruction in BUF.
1074
1075 0 6 11 16 32
1076 | OPCD | RST | RA | D | */
1077
1078 static int
1079 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
1080 {
1081 uint32_t insn;
1082
1083 gdb_assert ((opcd & ~0x3f) == 0);
1084 gdb_assert ((rst & ~0x1f) == 0);
1085 gdb_assert ((ra & ~0x1f) == 0);
1086
1087 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
1088 *buf = (opcd << 26) | insn;
1089 return 1;
1090 }
1091
1092 /* Followings are frequently used d-form instructions. */
1093
1094 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
1095 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
1096 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
1097 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
1098 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
1099 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
1100 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
1101 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
1102 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
1103
1104 /* Generate a xfx-form instruction in BUF and return the number of bytes
1105 written.
1106
1107 0 6 11 21 31 32
1108 | OPCD | RST | RI | XO |/| */
1109
1110 static int
1111 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
1112 {
1113 uint32_t insn;
1114 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
1115
1116 gdb_assert ((opcd & ~0x3f) == 0);
1117 gdb_assert ((rst & ~0x1f) == 0);
1118 gdb_assert ((xo & ~0x3ff) == 0);
1119
1120 insn = (rst << 21) | (n << 11) | (xo << 1);
1121 *buf = (opcd << 26) | insn;
1122 return 1;
1123 }
1124
1125 /* Followings are frequently used xfx-form instructions. */
1126
1127 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
1128 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
1129 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
1130 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
1131 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
1132 E & 0xf, 598)
1133 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
1134
1135
1136 /* Generate a x-form instruction in BUF and return the number of bytes written.
1137
1138 0 6 11 16 21 31 32
1139 | OPCD | RST | RA | RB | XO |RC| */
1140
1141 static int
1142 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
1143 {
1144 uint32_t insn;
1145
1146 gdb_assert ((opcd & ~0x3f) == 0);
1147 gdb_assert ((rst & ~0x1f) == 0);
1148 gdb_assert ((ra & ~0x1f) == 0);
1149 gdb_assert ((rb & ~0x1f) == 0);
1150 gdb_assert ((xo & ~0x3ff) == 0);
1151 gdb_assert ((rc & ~1) == 0);
1152
1153 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
1154 *buf = (opcd << 26) | insn;
1155 return 1;
1156 }
1157
1158 /* Followings are frequently used x-form instructions. */
1159
1160 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
1161 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
1162 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
1163 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
1164 /* Assume bf = cr7. */
1165 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
1166
1167
1168 /* Generate a md-form instruction in BUF and return the number of bytes written.
1169
1170 0 6 11 16 21 27 30 31 32
1171 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
1172
1173 static int
1174 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
1175 int xo, int rc)
1176 {
1177 uint32_t insn;
1178 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
1179 unsigned int sh0_4 = sh & 0x1f;
1180 unsigned int sh5 = (sh >> 5) & 1;
1181
1182 gdb_assert ((opcd & ~0x3f) == 0);
1183 gdb_assert ((rs & ~0x1f) == 0);
1184 gdb_assert ((ra & ~0x1f) == 0);
1185 gdb_assert ((sh & ~0x3f) == 0);
1186 gdb_assert ((mb & ~0x3f) == 0);
1187 gdb_assert ((xo & ~0x7) == 0);
1188 gdb_assert ((rc & ~0x1) == 0);
1189
1190 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
1191 | (sh5 << 1) | (xo << 2) | (rc & 1);
1192 *buf = (opcd << 26) | insn;
1193 return 1;
1194 }
1195
1196 /* The following are frequently used md-form instructions. */
1197
1198 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
1199 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
1200 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
1201 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
1202
1203 /* Generate a i-form instruction in BUF and return the number of bytes written.
1204
1205 0 6 30 31 32
1206 | OPCD | LI |AA|LK| */
1207
1208 static int
1209 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
1210 {
1211 uint32_t insn;
1212
1213 gdb_assert ((opcd & ~0x3f) == 0);
1214
1215 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
1216 *buf = (opcd << 26) | insn;
1217 return 1;
1218 }
1219
1220 /* The following are frequently used i-form instructions. */
1221
1222 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
1223 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
1224
1225 /* Generate a b-form instruction in BUF and return the number of bytes written.
1226
1227 0 6 11 16 30 31 32
1228 | OPCD | BO | BI | BD |AA|LK| */
1229
1230 static int
1231 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
1232 int aa, int lk)
1233 {
1234 uint32_t insn;
1235
1236 gdb_assert ((opcd & ~0x3f) == 0);
1237 gdb_assert ((bo & ~0x1f) == 0);
1238 gdb_assert ((bi & ~0x1f) == 0);
1239
1240 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
1241 *buf = (opcd << 26) | insn;
1242 return 1;
1243 }
1244
1245 /* The following are frequently used b-form instructions. */
1246 /* Assume bi = cr7. */
1247 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
1248
1249 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
1250 respectively. They are primary used for save/restore GPRs in jump-pad,
1251 not used for bytecode compiling. */
1252
1253 #ifdef __powerpc64__
1254 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
1255 GEN_LD (buf, rt, ra, si) : \
1256 GEN_LWZ (buf, rt, ra, si))
1257 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
1258 GEN_STD (buf, rt, ra, si) : \
1259 GEN_STW (buf, rt, ra, si))
1260 #else
1261 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
1262 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
1263 #endif
1264
1265 /* Generate a sequence of instructions to load IMM in the register REG.
1266 Write the instructions in BUF and return the number of bytes written. */
1267
1268 static int
1269 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
1270 {
1271 uint32_t *p = buf;
1272
1273 if ((imm + 32768) < 65536)
1274 {
1275 /* li reg, imm[15:0] */
1276 p += GEN_LI (p, reg, imm);
1277 }
1278 else if ((imm >> 32) == 0)
1279 {
1280 /* lis reg, imm[31:16]
1281 ori reg, reg, imm[15:0]
1282 rldicl reg, reg, 0, 32 */
1283 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1284 if ((imm & 0xffff) != 0)
1285 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1286 /* Clear upper 32-bit if sign-bit is set. */
1287 if (imm & (1u << 31) && is_64)
1288 p += GEN_RLDICL (p, reg, reg, 0, 32);
1289 }
1290 else
1291 {
1292 gdb_assert (is_64);
1293 /* lis reg, <imm[63:48]>
1294 ori reg, reg, <imm[48:32]>
1295 rldicr reg, reg, 32, 31
1296 oris reg, reg, <imm[31:16]>
1297 ori reg, reg, <imm[15:0]> */
1298 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1299 if (((imm >> 32) & 0xffff) != 0)
1300 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1301 p += GEN_RLDICR (p, reg, reg, 32, 31);
1302 if (((imm >> 16) & 0xffff) != 0)
1303 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1304 if ((imm & 0xffff) != 0)
1305 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1306 }
1307
1308 return p - buf;
1309 }
1310
1311 /* Generate a sequence for atomically exchange at location LOCK.
1312 This code sequence clobbers r6, r7, r8. LOCK is the location for
1313 the atomic-xchg, OLD_VALUE is expected old value stored in the
1314 location, and R_NEW is a register for the new value. */
1315
1316 static int
1317 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1318 int is_64)
1319 {
1320 const int r_lock = 6;
1321 const int r_old = 7;
1322 const int r_tmp = 8;
1323 uint32_t *p = buf;
1324
1325 /*
1326 1: lwarx TMP, 0, LOCK
1327 cmpwi TMP, OLD
1328 bne 1b
1329 stwcx. NEW, 0, LOCK
1330 bne 1b */
1331
1332 p += gen_limm (p, r_lock, lock, is_64);
1333 p += gen_limm (p, r_old, old_value, is_64);
1334
1335 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1336 p += GEN_CMPW (p, r_tmp, r_old);
1337 p += GEN_BNE (p, -8);
1338 p += GEN_STWCX (p, r_new, 0, r_lock);
1339 p += GEN_BNE (p, -16);
1340
1341 return p - buf;
1342 }
1343
1344 /* Generate a sequence of instructions for calling a function
1345 at address of FN. Return the number of bytes are written in BUF. */
1346
1347 static int
1348 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1349 {
1350 uint32_t *p = buf;
1351
1352 /* Must be called by r12 for caller to calculate TOC address. */
1353 p += gen_limm (p, 12, fn, is_64);
1354 if (is_opd)
1355 {
1356 p += GEN_LOAD (p, 11, 12, 16, is_64);
1357 p += GEN_LOAD (p, 2, 12, 8, is_64);
1358 p += GEN_LOAD (p, 12, 12, 0, is_64);
1359 }
1360 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1361 *p++ = 0x4e800421; /* bctrl */
1362
1363 return p - buf;
1364 }
1365
1366 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1367 of instruction. This function is used to adjust pc-relative instructions
1368 when copying. */
1369
1370 static void
1371 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1372 {
1373 uint32_t insn, op6;
1374 long rel, newrel;
1375
1376 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1377 op6 = PPC_OP6 (insn);
1378
1379 if (op6 == 18 && (insn & 2) == 0)
1380 {
1381 /* branch && AA = 0 */
1382 rel = PPC_LI (insn);
1383 newrel = (oldloc - *to) + rel;
1384
1385 /* Out of range. Cannot relocate instruction. */
1386 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1387 return;
1388
1389 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1390 }
1391 else if (op6 == 16 && (insn & 2) == 0)
1392 {
1393 /* conditional branch && AA = 0 */
1394
1395 /* If the new relocation is too big for even a 26-bit unconditional
1396 branch, there is nothing we can do. Just abort.
1397
1398 Otherwise, if it can be fit in 16-bit conditional branch, just
1399 copy the instruction and relocate the address.
1400
1401 If the it's big for conditional-branch (16-bit), try to invert the
1402 condition and jump with 26-bit branch. For example,
1403
1404 beq .Lgoto
1405 INSN1
1406
1407 =>
1408
1409 bne 1f (+8)
1410 b .Lgoto
1411 1:INSN1
1412
1413 After this transform, we are actually jump from *TO+4 instead of *TO,
1414 so check the relocation again because it will be 1-insn farther then
1415 before if *TO is after OLDLOC.
1416
1417
1418 For BDNZT (or so) is transformed from
1419
1420 bdnzt eq, .Lgoto
1421 INSN1
1422
1423 =>
1424
1425 bdz 1f (+12)
1426 bf eq, 1f (+8)
1427 b .Lgoto
1428 1:INSN1
1429
1430 See also "BO field encodings". */
1431
1432 rel = PPC_BD (insn);
1433 newrel = (oldloc - *to) + rel;
1434
1435 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1436 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1437 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1438 {
1439 newrel -= 4;
1440
1441 /* Out of range. Cannot relocate instruction. */
1442 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1443 return;
1444
1445 if ((PPC_BO (insn) & 0x14) == 0x4)
1446 insn ^= (1 << 24);
1447 else if ((PPC_BO (insn) & 0x14) == 0x10)
1448 insn ^= (1 << 22);
1449
1450 /* Jump over the unconditional branch. */
1451 insn = (insn & ~0xfffc) | 0x8;
1452 target_write_memory (*to, (unsigned char *) &insn, 4);
1453 *to += 4;
1454
1455 /* Build a unconditional branch and copy LK bit. */
1456 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1457 target_write_memory (*to, (unsigned char *) &insn, 4);
1458 *to += 4;
1459
1460 return;
1461 }
1462 else if ((PPC_BO (insn) & 0x14) == 0)
1463 {
1464 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1465 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1466
1467 newrel -= 8;
1468
1469 /* Out of range. Cannot relocate instruction. */
1470 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1471 return;
1472
1473 /* Copy BI field. */
1474 bf_insn |= (insn & 0x1f0000);
1475
1476 /* Invert condition. */
1477 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1478 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1479
1480 target_write_memory (*to, (unsigned char *) &bdnz_insn, 4);
1481 *to += 4;
1482 target_write_memory (*to, (unsigned char *) &bf_insn, 4);
1483 *to += 4;
1484
1485 /* Build a unconditional branch and copy LK bit. */
1486 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1487 target_write_memory (*to, (unsigned char *) &insn, 4);
1488 *to += 4;
1489
1490 return;
1491 }
1492 else /* (BO & 0x14) == 0x14, branch always. */
1493 {
1494 /* Out of range. Cannot relocate instruction. */
1495 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1496 return;
1497
1498 /* Build a unconditional branch and copy LK bit. */
1499 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1500 target_write_memory (*to, (unsigned char *) &insn, 4);
1501 *to += 4;
1502
1503 return;
1504 }
1505 }
1506
1507 target_write_memory (*to, (unsigned char *) &insn, 4);
1508 *to += 4;
1509 }
1510
1511 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1512 See target.h for details. */
1513
1514 static int
1515 ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1516 CORE_ADDR collector,
1517 CORE_ADDR lockaddr,
1518 ULONGEST orig_size,
1519 CORE_ADDR *jump_entry,
1520 CORE_ADDR *trampoline,
1521 ULONGEST *trampoline_size,
1522 unsigned char *jjump_pad_insn,
1523 ULONGEST *jjump_pad_insn_size,
1524 CORE_ADDR *adjusted_insn_addr,
1525 CORE_ADDR *adjusted_insn_addr_end,
1526 char *err)
1527 {
1528 uint32_t buf[256];
1529 uint32_t *p = buf;
1530 int j, offset;
1531 CORE_ADDR buildaddr = *jump_entry;
1532 const CORE_ADDR entryaddr = *jump_entry;
1533 int rsz, min_frame, frame_size, tp_reg;
1534 #ifdef __powerpc64__
1535 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1536 int is_64 = register_size (regcache->tdesc, 0) == 8;
1537 int is_opd = is_64 && !is_elfv2_inferior ();
1538 #else
1539 int is_64 = 0, is_opd = 0;
1540 #endif
1541
1542 #ifdef __powerpc64__
1543 if (is_64)
1544 {
1545 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1546 rsz = 8;
1547 min_frame = 112;
1548 frame_size = (40 * rsz) + min_frame;
1549 tp_reg = 13;
1550 }
1551 else
1552 {
1553 #endif
1554 rsz = 4;
1555 min_frame = 16;
1556 frame_size = (40 * rsz) + min_frame;
1557 tp_reg = 2;
1558 #ifdef __powerpc64__
1559 }
1560 #endif
1561
1562 /* Stack frame layout for this jump pad,
1563
1564 High thread_area (r13/r2) |
1565 tpoint - collecting_t obj
1566 PC/<tpaddr> | +36
1567 CTR | +35
1568 LR | +34
1569 XER | +33
1570 CR | +32
1571 R31 |
1572 R29 |
1573 ... |
1574 R1 | +1
1575 R0 - collected registers
1576 ... |
1577 ... |
1578 Low Back-chain -
1579
1580
1581 The code flow of this jump pad,
1582
1583 1. Adjust SP
1584 2. Save GPR and SPR
1585 3. Prepare argument
1586 4. Call gdb_collector
1587 5. Restore GPR and SPR
1588 6. Restore SP
1589 7. Build a jump for back to the program
1590 8. Copy/relocate original instruction
1591 9. Build a jump for replacing original instruction. */
1592
1593 /* Adjust stack pointer. */
1594 if (is_64)
1595 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1596 else
1597 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1598
1599 /* Store GPRs. Save R1 later, because it had just been modified, but
1600 we want the original value. */
1601 for (j = 2; j < 32; j++)
1602 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1603 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1604 /* Set r0 to the original value of r1 before adjusting stack frame,
1605 and then save it. */
1606 p += GEN_ADDI (p, 0, 1, frame_size);
1607 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1608
1609 /* Save CR, XER, LR, and CTR. */
1610 p += GEN_MFCR (p, 3); /* mfcr r3 */
1611 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1612 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1613 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1614 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1615 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1616 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1617 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1618
1619 /* Save PC<tpaddr> */
1620 p += gen_limm (p, 3, tpaddr, is_64);
1621 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1622
1623
1624 /* Setup arguments to collector. */
1625 /* Set r4 to collected registers. */
1626 p += GEN_ADDI (p, 4, 1, min_frame);
1627 /* Set r3 to TPOINT. */
1628 p += gen_limm (p, 3, tpoint, is_64);
1629
1630 /* Prepare collecting_t object for lock. */
1631 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1632 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1633 /* Set R5 to collecting object. */
1634 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1635
1636 p += GEN_LWSYNC (p);
1637 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1638 p += GEN_LWSYNC (p);
1639
1640 /* Call to collector. */
1641 p += gen_call (p, collector, is_64, is_opd);
1642
1643 /* Simply write 0 to release the lock. */
1644 p += gen_limm (p, 3, lockaddr, is_64);
1645 p += gen_limm (p, 4, 0, is_64);
1646 p += GEN_LWSYNC (p);
1647 p += GEN_STORE (p, 4, 3, 0, is_64);
1648
1649 /* Restore stack and registers. */
1650 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1651 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1652 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1653 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1654 p += GEN_MTCR (p, 3); /* mtcr r3 */
1655 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1656 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1657 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1658
1659 /* Restore GPRs. */
1660 for (j = 2; j < 32; j++)
1661 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1662 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1663 /* Restore SP. */
1664 p += GEN_ADDI (p, 1, 1, frame_size);
1665
1666 /* Flush instructions to inferior memory. */
1667 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1668
1669 /* Now, insert the original instruction to execute in the jump pad. */
1670 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1671 *adjusted_insn_addr_end = *adjusted_insn_addr;
1672 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1673
1674 /* Verify the relocation size. If should be 4 for normal copy,
1675 8 or 12 for some conditional branch. */
1676 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1677 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1678 {
1679 sprintf (err, "E.Unexpected instruction length = %d"
1680 "when relocate instruction.",
1681 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1682 return 1;
1683 }
1684
1685 buildaddr = *adjusted_insn_addr_end;
1686 p = buf;
1687 /* Finally, write a jump back to the program. */
1688 offset = (tpaddr + 4) - buildaddr;
1689 if (offset >= (1 << 25) || offset < -(1 << 25))
1690 {
1691 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1692 "(offset 0x%x > 26-bit).", offset);
1693 return 1;
1694 }
1695 /* b <tpaddr+4> */
1696 p += GEN_B (p, offset);
1697 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1698 *jump_entry = buildaddr + (p - buf) * 4;
1699
1700 /* The jump pad is now built. Wire in a jump to our jump pad. This
1701 is always done last (by our caller actually), so that we can
1702 install fast tracepoints with threads running. This relies on
1703 the agent's atomic write support. */
1704 offset = entryaddr - tpaddr;
1705 if (offset >= (1 << 25) || offset < -(1 << 25))
1706 {
1707 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1708 "(offset 0x%x > 26-bit).", offset);
1709 return 1;
1710 }
1711 /* b <jentry> */
1712 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1713 *jjump_pad_insn_size = 4;
1714
1715 return 0;
1716 }
1717
1718 /* Returns the minimum instruction length for installing a tracepoint. */
1719
1720 static int
1721 ppc_get_min_fast_tracepoint_insn_len (void)
1722 {
1723 return 4;
1724 }
1725
1726 /* Emits a given buffer into the target at current_insn_ptr. Length
1727 is in units of 32-bit words. */
1728
1729 static void
1730 emit_insns (uint32_t *buf, int n)
1731 {
1732 n = n * sizeof (uint32_t);
1733 target_write_memory (current_insn_ptr, (unsigned char *) buf, n);
1734 current_insn_ptr += n;
1735 }
1736
1737 #define __EMIT_ASM(NAME, INSNS) \
1738 do \
1739 { \
1740 extern uint32_t start_bcax_ ## NAME []; \
1741 extern uint32_t end_bcax_ ## NAME []; \
1742 emit_insns (start_bcax_ ## NAME, \
1743 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1744 __asm__ (".section .text.__ppcbcax\n\t" \
1745 "start_bcax_" #NAME ":\n\t" \
1746 INSNS "\n\t" \
1747 "end_bcax_" #NAME ":\n\t" \
1748 ".previous\n\t"); \
1749 } while (0)
1750
1751 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1752 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1753
1754 /*
1755
1756 Bytecode execution stack frame - 32-bit
1757
1758 | LR save area (SP + 4)
1759 SP' -> +- Back chain (SP + 0)
1760 | Save r31 for access saved arguments
1761 | Save r30 for bytecode stack pointer
1762 | Save r4 for incoming argument *value
1763 | Save r3 for incoming argument regs
1764 r30 -> +- Bytecode execution stack
1765 |
1766 | 64-byte (8 doublewords) at initial.
1767 | Expand stack as needed.
1768 |
1769 +-
1770 | Some padding for minimum stack frame and 16-byte alignment.
1771 | 16 bytes.
1772 SP +- Back-chain (SP')
1773
1774 initial frame size
1775 = 16 + (4 * 4) + 64
1776 = 96
1777
1778 r30 is the stack-pointer for bytecode machine.
1779 It should point to next-empty, so we can use LDU for pop.
1780 r3 is used for cache of the high part of TOP value.
1781 It was the first argument, pointer to regs.
1782 r4 is used for cache of the low part of TOP value.
1783 It was the second argument, pointer to the result.
1784 We should set *result = TOP after leaving this function.
1785
1786 Note:
1787 * To restore stack at epilogue
1788 => sp = r31
1789 * To check stack is big enough for bytecode execution.
1790 => r30 - 8 > SP + 8
1791 * To return execution result.
1792 => 0(r4) = TOP
1793
1794 */
1795
1796 /* Regardless of endian, register 3 is always high part, 4 is low part.
1797 These defines are used when the register pair is stored/loaded.
1798 Likewise, to simplify code, have a similiar define for 5:6. */
1799
1800 #if __BYTE_ORDER == __LITTLE_ENDIAN
1801 #define TOP_FIRST "4"
1802 #define TOP_SECOND "3"
1803 #define TMP_FIRST "6"
1804 #define TMP_SECOND "5"
1805 #else
1806 #define TOP_FIRST "3"
1807 #define TOP_SECOND "4"
1808 #define TMP_FIRST "5"
1809 #define TMP_SECOND "6"
1810 #endif
1811
1812 /* Emit prologue in inferior memory. See above comments. */
1813
1814 static void
1815 ppc_emit_prologue (void)
1816 {
1817 EMIT_ASM (/* Save return address. */
1818 "mflr 0 \n"
1819 "stw 0, 4(1) \n"
1820 /* Adjust SP. 96 is the initial frame size. */
1821 "stwu 1, -96(1) \n"
1822 /* Save r30 and incoming arguments. */
1823 "stw 31, 96-4(1) \n"
1824 "stw 30, 96-8(1) \n"
1825 "stw 4, 96-12(1) \n"
1826 "stw 3, 96-16(1) \n"
1827 /* Point r31 to original r1 for access arguments. */
1828 "addi 31, 1, 96 \n"
1829 /* Set r30 to pointing stack-top. */
1830 "addi 30, 1, 64 \n"
1831 /* Initial r3/TOP to 0. */
1832 "li 3, 0 \n"
1833 "li 4, 0 \n");
1834 }
1835
1836 /* Emit epilogue in inferior memory. See above comments. */
1837
1838 static void
1839 ppc_emit_epilogue (void)
1840 {
1841 EMIT_ASM (/* *result = TOP */
1842 "lwz 5, -12(31) \n"
1843 "stw " TOP_FIRST ", 0(5) \n"
1844 "stw " TOP_SECOND ", 4(5) \n"
1845 /* Restore registers. */
1846 "lwz 31, -4(31) \n"
1847 "lwz 30, -8(31) \n"
1848 /* Restore SP. */
1849 "lwz 1, 0(1) \n"
1850 /* Restore LR. */
1851 "lwz 0, 4(1) \n"
1852 /* Return 0 for no-error. */
1853 "li 3, 0 \n"
1854 "mtlr 0 \n"
1855 "blr \n");
1856 }
1857
1858 /* TOP = stack[--sp] + TOP */
1859
1860 static void
1861 ppc_emit_add (void)
1862 {
1863 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1864 "lwz " TMP_SECOND ", 4(30)\n"
1865 "addc 4, 6, 4 \n"
1866 "adde 3, 5, 3 \n");
1867 }
1868
1869 /* TOP = stack[--sp] - TOP */
1870
1871 static void
1872 ppc_emit_sub (void)
1873 {
1874 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1875 "lwz " TMP_SECOND ", 4(30) \n"
1876 "subfc 4, 4, 6 \n"
1877 "subfe 3, 3, 5 \n");
1878 }
1879
1880 /* TOP = stack[--sp] * TOP */
1881
1882 static void
1883 ppc_emit_mul (void)
1884 {
1885 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1886 "lwz " TMP_SECOND ", 4(30) \n"
1887 "mulhwu 7, 6, 4 \n"
1888 "mullw 3, 6, 3 \n"
1889 "mullw 5, 4, 5 \n"
1890 "mullw 4, 6, 4 \n"
1891 "add 3, 5, 3 \n"
1892 "add 3, 7, 3 \n");
1893 }
1894
1895 /* TOP = stack[--sp] << TOP */
1896
1897 static void
1898 ppc_emit_lsh (void)
1899 {
1900 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1901 "lwz " TMP_SECOND ", 4(30) \n"
1902 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1903 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1904 "slw 5, 5, 4\n" /* Shift high part left */
1905 "slw 4, 6, 4\n" /* Shift low part left */
1906 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1907 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1908 "or 3, 5, 3\n"
1909 "or 3, 7, 3\n"); /* Assemble high part */
1910 }
1911
1912 /* Top = stack[--sp] >> TOP
1913 (Arithmetic shift right) */
1914
1915 static void
1916 ppc_emit_rsh_signed (void)
1917 {
1918 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1919 "lwz " TMP_SECOND ", 4(30) \n"
1920 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1921 "sraw 3, 5, 4\n" /* Shift high part right */
1922 "cmpwi 7, 1\n"
1923 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
1924 "sraw 4, 5, 7\n" /* Shift high to low */
1925 "b 2f\n"
1926 "1:\n"
1927 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
1928 "srw 4, 6, 4\n" /* Shift low part right */
1929 "slw 5, 5, 7\n" /* Shift high to low */
1930 "or 4, 4, 5\n" /* Assemble low part */
1931 "2:\n");
1932 }
1933
1934 /* Top = stack[--sp] >> TOP
1935 (Logical shift right) */
1936
1937 static void
1938 ppc_emit_rsh_unsigned (void)
1939 {
1940 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1941 "lwz " TMP_SECOND ", 4(30) \n"
1942 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1943 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1944 "srw 6, 6, 4\n" /* Shift low part right */
1945 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
1946 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
1947 "or 6, 6, 3\n"
1948 "srw 3, 5, 4\n" /* Shift high part right */
1949 "or 4, 6, 7\n"); /* Assemble low part */
1950 }
1951
1952 /* Emit code for signed-extension specified by ARG. */
1953
1954 static void
1955 ppc_emit_ext (int arg)
1956 {
1957 switch (arg)
1958 {
1959 case 8:
1960 EMIT_ASM ("extsb 4, 4\n"
1961 "srawi 3, 4, 31");
1962 break;
1963 case 16:
1964 EMIT_ASM ("extsh 4, 4\n"
1965 "srawi 3, 4, 31");
1966 break;
1967 case 32:
1968 EMIT_ASM ("srawi 3, 4, 31");
1969 break;
1970 default:
1971 emit_error = 1;
1972 }
1973 }
1974
1975 /* Emit code for zero-extension specified by ARG. */
1976
1977 static void
1978 ppc_emit_zero_ext (int arg)
1979 {
1980 switch (arg)
1981 {
1982 case 8:
1983 EMIT_ASM ("clrlwi 4,4,24\n"
1984 "li 3, 0\n");
1985 break;
1986 case 16:
1987 EMIT_ASM ("clrlwi 4,4,16\n"
1988 "li 3, 0\n");
1989 break;
1990 case 32:
1991 EMIT_ASM ("li 3, 0");
1992 break;
1993 default:
1994 emit_error = 1;
1995 }
1996 }
1997
1998 /* TOP = !TOP
1999 i.e., TOP = (TOP == 0) ? 1 : 0; */
2000
2001 static void
2002 ppc_emit_log_not (void)
2003 {
2004 EMIT_ASM ("or 4, 3, 4 \n"
2005 "cntlzw 4, 4 \n"
2006 "srwi 4, 4, 5 \n"
2007 "li 3, 0 \n");
2008 }
2009
2010 /* TOP = stack[--sp] & TOP */
2011
2012 static void
2013 ppc_emit_bit_and (void)
2014 {
2015 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2016 "lwz " TMP_SECOND ", 4(30) \n"
2017 "and 4, 6, 4 \n"
2018 "and 3, 5, 3 \n");
2019 }
2020
2021 /* TOP = stack[--sp] | TOP */
2022
2023 static void
2024 ppc_emit_bit_or (void)
2025 {
2026 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2027 "lwz " TMP_SECOND ", 4(30) \n"
2028 "or 4, 6, 4 \n"
2029 "or 3, 5, 3 \n");
2030 }
2031
2032 /* TOP = stack[--sp] ^ TOP */
2033
2034 static void
2035 ppc_emit_bit_xor (void)
2036 {
2037 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2038 "lwz " TMP_SECOND ", 4(30) \n"
2039 "xor 4, 6, 4 \n"
2040 "xor 3, 5, 3 \n");
2041 }
2042
2043 /* TOP = ~TOP
2044 i.e., TOP = ~(TOP | TOP) */
2045
2046 static void
2047 ppc_emit_bit_not (void)
2048 {
2049 EMIT_ASM ("nor 3, 3, 3 \n"
2050 "nor 4, 4, 4 \n");
2051 }
2052
2053 /* TOP = stack[--sp] == TOP */
2054
2055 static void
2056 ppc_emit_equal (void)
2057 {
2058 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2059 "lwz " TMP_SECOND ", 4(30) \n"
2060 "xor 4, 6, 4 \n"
2061 "xor 3, 5, 3 \n"
2062 "or 4, 3, 4 \n"
2063 "cntlzw 4, 4 \n"
2064 "srwi 4, 4, 5 \n"
2065 "li 3, 0 \n");
2066 }
2067
2068 /* TOP = stack[--sp] < TOP
2069 (Signed comparison) */
2070
2071 static void
2072 ppc_emit_less_signed (void)
2073 {
2074 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2075 "lwz " TMP_SECOND ", 4(30) \n"
2076 "cmplw 6, 6, 4 \n"
2077 "cmpw 7, 5, 3 \n"
2078 /* CR6 bit 0 = low less and high equal */
2079 "crand 6*4+0, 6*4+0, 7*4+2\n"
2080 /* CR7 bit 0 = (low less and high equal) or high less */
2081 "cror 7*4+0, 7*4+0, 6*4+0\n"
2082 "mfcr 4 \n"
2083 "rlwinm 4, 4, 29, 31, 31 \n"
2084 "li 3, 0 \n");
2085 }
2086
2087 /* TOP = stack[--sp] < TOP
2088 (Unsigned comparison) */
2089
2090 static void
2091 ppc_emit_less_unsigned (void)
2092 {
2093 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2094 "lwz " TMP_SECOND ", 4(30) \n"
2095 "cmplw 6, 6, 4 \n"
2096 "cmplw 7, 5, 3 \n"
2097 /* CR6 bit 0 = low less and high equal */
2098 "crand 6*4+0, 6*4+0, 7*4+2\n"
2099 /* CR7 bit 0 = (low less and high equal) or high less */
2100 "cror 7*4+0, 7*4+0, 6*4+0\n"
2101 "mfcr 4 \n"
2102 "rlwinm 4, 4, 29, 31, 31 \n"
2103 "li 3, 0 \n");
2104 }
2105
2106 /* Access the memory address in TOP in size of SIZE.
2107 Zero-extend the read value. */
2108
2109 static void
2110 ppc_emit_ref (int size)
2111 {
2112 switch (size)
2113 {
2114 case 1:
2115 EMIT_ASM ("lbz 4, 0(4)\n"
2116 "li 3, 0");
2117 break;
2118 case 2:
2119 EMIT_ASM ("lhz 4, 0(4)\n"
2120 "li 3, 0");
2121 break;
2122 case 4:
2123 EMIT_ASM ("lwz 4, 0(4)\n"
2124 "li 3, 0");
2125 break;
2126 case 8:
2127 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2128 EMIT_ASM ("lwz 3, 4(4)\n"
2129 "lwz 4, 0(4)");
2130 else
2131 EMIT_ASM ("lwz 3, 0(4)\n"
2132 "lwz 4, 4(4)");
2133 break;
2134 }
2135 }
2136
2137 /* TOP = NUM */
2138
2139 static void
2140 ppc_emit_const (LONGEST num)
2141 {
2142 uint32_t buf[10];
2143 uint32_t *p = buf;
2144
2145 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
2146 p += gen_limm (p, 4, num & 0xffffffff, 0);
2147
2148 emit_insns (buf, p - buf);
2149 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2150 }
2151
2152 /* Set TOP to the value of register REG by calling get_raw_reg function
2153 with two argument, collected buffer and register number. */
2154
2155 static void
2156 ppc_emit_reg (int reg)
2157 {
2158 uint32_t buf[13];
2159 uint32_t *p = buf;
2160
2161 /* fctx->regs is passed in r3 and then saved in -16(31). */
2162 p += GEN_LWZ (p, 3, 31, -16);
2163 p += GEN_LI (p, 4, reg); /* li r4, reg */
2164 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
2165
2166 emit_insns (buf, p - buf);
2167 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2168
2169 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2170 {
2171 EMIT_ASM ("mr 5, 4\n"
2172 "mr 4, 3\n"
2173 "mr 3, 5\n");
2174 }
2175 }
2176
2177 /* TOP = stack[--sp] */
2178
2179 static void
2180 ppc_emit_pop (void)
2181 {
2182 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
2183 "lwz " TOP_SECOND ", 4(30) \n");
2184 }
2185
2186 /* stack[sp++] = TOP
2187
2188 Because we may use up bytecode stack, expand 8 doublewords more
2189 if needed. */
2190
2191 static void
2192 ppc_emit_stack_flush (void)
2193 {
2194 /* Make sure bytecode stack is big enough before push.
2195 Otherwise, expand 64-byte more. */
2196
2197 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
2198 " stw " TOP_SECOND ", 4(30)\n"
2199 " addi 5, 30, -(8 + 8) \n"
2200 " cmpw 7, 5, 1 \n"
2201 " bgt 7, 1f \n"
2202 " stwu 31, -64(1) \n"
2203 "1:addi 30, 30, -8 \n");
2204 }
2205
2206 /* Swap TOP and stack[sp-1] */
2207
2208 static void
2209 ppc_emit_swap (void)
2210 {
2211 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
2212 "lwz " TMP_SECOND ", 12(30) \n"
2213 "stw " TOP_FIRST ", 8(30) \n"
2214 "stw " TOP_SECOND ", 12(30) \n"
2215 "mr 3, 5 \n"
2216 "mr 4, 6 \n");
2217 }
2218
2219 /* Discard N elements in the stack. Also used for ppc64. */
2220
2221 static void
2222 ppc_emit_stack_adjust (int n)
2223 {
2224 uint32_t buf[6];
2225 uint32_t *p = buf;
2226
2227 n = n << 3;
2228 if ((n >> 15) != 0)
2229 {
2230 emit_error = 1;
2231 return;
2232 }
2233
2234 p += GEN_ADDI (p, 30, 30, n);
2235
2236 emit_insns (buf, p - buf);
2237 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2238 }
2239
2240 /* Call function FN. */
2241
2242 static void
2243 ppc_emit_call (CORE_ADDR fn)
2244 {
2245 uint32_t buf[11];
2246 uint32_t *p = buf;
2247
2248 p += gen_call (p, fn, 0, 0);
2249
2250 emit_insns (buf, p - buf);
2251 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2252 }
2253
2254 /* FN's prototype is `LONGEST(*fn)(int)'.
2255 TOP = fn (arg1)
2256 */
2257
2258 static void
2259 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
2260 {
2261 uint32_t buf[15];
2262 uint32_t *p = buf;
2263
2264 /* Setup argument. arg1 is a 16-bit value. */
2265 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2266 p += gen_call (p, fn, 0, 0);
2267
2268 emit_insns (buf, p - buf);
2269 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2270
2271 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2272 {
2273 EMIT_ASM ("mr 5, 4\n"
2274 "mr 4, 3\n"
2275 "mr 3, 5\n");
2276 }
2277 }
2278
2279 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2280 fn (arg1, TOP)
2281
2282 TOP should be preserved/restored before/after the call. */
2283
2284 static void
2285 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2286 {
2287 uint32_t buf[21];
2288 uint32_t *p = buf;
2289
2290 /* Save TOP. 0(30) is next-empty. */
2291 p += GEN_STW (p, 3, 30, 0);
2292 p += GEN_STW (p, 4, 30, 4);
2293
2294 /* Setup argument. arg1 is a 16-bit value. */
2295 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2296 {
2297 p += GEN_MR (p, 5, 4);
2298 p += GEN_MR (p, 6, 3);
2299 }
2300 else
2301 {
2302 p += GEN_MR (p, 5, 3);
2303 p += GEN_MR (p, 6, 4);
2304 }
2305 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2306 p += gen_call (p, fn, 0, 0);
2307
2308 /* Restore TOP */
2309 p += GEN_LWZ (p, 3, 30, 0);
2310 p += GEN_LWZ (p, 4, 30, 4);
2311
2312 emit_insns (buf, p - buf);
2313 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2314 }
2315
2316 /* Note in the following goto ops:
2317
2318 When emitting goto, the target address is later relocated by
2319 write_goto_address. OFFSET_P is the offset of the branch instruction
2320 in the code sequence, and SIZE_P is how to relocate the instruction,
2321 recognized by ppc_write_goto_address. In current implementation,
2322 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2323 */
2324
2325 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2326
2327 static void
2328 ppc_emit_if_goto (int *offset_p, int *size_p)
2329 {
2330 EMIT_ASM ("or. 3, 3, 4 \n"
2331 "lwzu " TOP_FIRST ", 8(30) \n"
2332 "lwz " TOP_SECOND ", 4(30) \n"
2333 "1:bne 0, 1b \n");
2334
2335 if (offset_p)
2336 *offset_p = 12;
2337 if (size_p)
2338 *size_p = 14;
2339 }
2340
2341 /* Unconditional goto. Also used for ppc64. */
2342
2343 static void
2344 ppc_emit_goto (int *offset_p, int *size_p)
2345 {
2346 EMIT_ASM ("1:b 1b");
2347
2348 if (offset_p)
2349 *offset_p = 0;
2350 if (size_p)
2351 *size_p = 24;
2352 }
2353
2354 /* Goto if stack[--sp] == TOP */
2355
2356 static void
2357 ppc_emit_eq_goto (int *offset_p, int *size_p)
2358 {
2359 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2360 "lwz " TMP_SECOND ", 4(30) \n"
2361 "xor 4, 6, 4 \n"
2362 "xor 3, 5, 3 \n"
2363 "or. 3, 3, 4 \n"
2364 "lwzu " TOP_FIRST ", 8(30) \n"
2365 "lwz " TOP_SECOND ", 4(30) \n"
2366 "1:beq 0, 1b \n");
2367
2368 if (offset_p)
2369 *offset_p = 28;
2370 if (size_p)
2371 *size_p = 14;
2372 }
2373
2374 /* Goto if stack[--sp] != TOP */
2375
2376 static void
2377 ppc_emit_ne_goto (int *offset_p, int *size_p)
2378 {
2379 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2380 "lwz " TMP_SECOND ", 4(30) \n"
2381 "xor 4, 6, 4 \n"
2382 "xor 3, 5, 3 \n"
2383 "or. 3, 3, 4 \n"
2384 "lwzu " TOP_FIRST ", 8(30) \n"
2385 "lwz " TOP_SECOND ", 4(30) \n"
2386 "1:bne 0, 1b \n");
2387
2388 if (offset_p)
2389 *offset_p = 28;
2390 if (size_p)
2391 *size_p = 14;
2392 }
2393
2394 /* Goto if stack[--sp] < TOP */
2395
2396 static void
2397 ppc_emit_lt_goto (int *offset_p, int *size_p)
2398 {
2399 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2400 "lwz " TMP_SECOND ", 4(30) \n"
2401 "cmplw 6, 6, 4 \n"
2402 "cmpw 7, 5, 3 \n"
2403 /* CR6 bit 0 = low less and high equal */
2404 "crand 6*4+0, 6*4+0, 7*4+2\n"
2405 /* CR7 bit 0 = (low less and high equal) or high less */
2406 "cror 7*4+0, 7*4+0, 6*4+0\n"
2407 "lwzu " TOP_FIRST ", 8(30) \n"
2408 "lwz " TOP_SECOND ", 4(30)\n"
2409 "1:blt 7, 1b \n");
2410
2411 if (offset_p)
2412 *offset_p = 32;
2413 if (size_p)
2414 *size_p = 14;
2415 }
2416
2417 /* Goto if stack[--sp] <= TOP */
2418
2419 static void
2420 ppc_emit_le_goto (int *offset_p, int *size_p)
2421 {
2422 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2423 "lwz " TMP_SECOND ", 4(30) \n"
2424 "cmplw 6, 6, 4 \n"
2425 "cmpw 7, 5, 3 \n"
2426 /* CR6 bit 0 = low less/equal and high equal */
2427 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2428 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2429 "cror 7*4+0, 7*4+0, 6*4+0\n"
2430 "lwzu " TOP_FIRST ", 8(30) \n"
2431 "lwz " TOP_SECOND ", 4(30)\n"
2432 "1:blt 7, 1b \n");
2433
2434 if (offset_p)
2435 *offset_p = 32;
2436 if (size_p)
2437 *size_p = 14;
2438 }
2439
2440 /* Goto if stack[--sp] > TOP */
2441
2442 static void
2443 ppc_emit_gt_goto (int *offset_p, int *size_p)
2444 {
2445 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2446 "lwz " TMP_SECOND ", 4(30) \n"
2447 "cmplw 6, 6, 4 \n"
2448 "cmpw 7, 5, 3 \n"
2449 /* CR6 bit 0 = low greater and high equal */
2450 "crand 6*4+0, 6*4+1, 7*4+2\n"
2451 /* CR7 bit 0 = (low greater and high equal) or high greater */
2452 "cror 7*4+0, 7*4+1, 6*4+0\n"
2453 "lwzu " TOP_FIRST ", 8(30) \n"
2454 "lwz " TOP_SECOND ", 4(30)\n"
2455 "1:blt 7, 1b \n");
2456
2457 if (offset_p)
2458 *offset_p = 32;
2459 if (size_p)
2460 *size_p = 14;
2461 }
2462
2463 /* Goto if stack[--sp] >= TOP */
2464
2465 static void
2466 ppc_emit_ge_goto (int *offset_p, int *size_p)
2467 {
2468 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2469 "lwz " TMP_SECOND ", 4(30) \n"
2470 "cmplw 6, 6, 4 \n"
2471 "cmpw 7, 5, 3 \n"
2472 /* CR6 bit 0 = low ge and high equal */
2473 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2474 /* CR7 bit 0 = (low ge and high equal) or high greater */
2475 "cror 7*4+0, 7*4+1, 6*4+0\n"
2476 "lwzu " TOP_FIRST ", 8(30)\n"
2477 "lwz " TOP_SECOND ", 4(30)\n"
2478 "1:blt 7, 1b \n");
2479
2480 if (offset_p)
2481 *offset_p = 32;
2482 if (size_p)
2483 *size_p = 14;
2484 }
2485
2486 /* Relocate previous emitted branch instruction. FROM is the address
2487 of the branch instruction, TO is the goto target address, and SIZE
2488 if the value we set by *SIZE_P before. Currently, it is either
2489 24 or 14 of branch and conditional-branch instruction.
2490 Also used for ppc64. */
2491
2492 static void
2493 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2494 {
2495 long rel = to - from;
2496 uint32_t insn;
2497 int opcd;
2498
2499 read_inferior_memory (from, (unsigned char *) &insn, 4);
2500 opcd = (insn >> 26) & 0x3f;
2501
2502 switch (size)
2503 {
2504 case 14:
2505 if (opcd != 16
2506 || (rel >= (1 << 15) || rel < -(1 << 15)))
2507 emit_error = 1;
2508 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2509 break;
2510 case 24:
2511 if (opcd != 18
2512 || (rel >= (1 << 25) || rel < -(1 << 25)))
2513 emit_error = 1;
2514 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2515 break;
2516 default:
2517 emit_error = 1;
2518 }
2519
2520 if (!emit_error)
2521 target_write_memory (from, (unsigned char *) &insn, 4);
2522 }
2523
2524 /* Table of emit ops for 32-bit. */
2525
2526 static struct emit_ops ppc_emit_ops_impl =
2527 {
2528 ppc_emit_prologue,
2529 ppc_emit_epilogue,
2530 ppc_emit_add,
2531 ppc_emit_sub,
2532 ppc_emit_mul,
2533 ppc_emit_lsh,
2534 ppc_emit_rsh_signed,
2535 ppc_emit_rsh_unsigned,
2536 ppc_emit_ext,
2537 ppc_emit_log_not,
2538 ppc_emit_bit_and,
2539 ppc_emit_bit_or,
2540 ppc_emit_bit_xor,
2541 ppc_emit_bit_not,
2542 ppc_emit_equal,
2543 ppc_emit_less_signed,
2544 ppc_emit_less_unsigned,
2545 ppc_emit_ref,
2546 ppc_emit_if_goto,
2547 ppc_emit_goto,
2548 ppc_write_goto_address,
2549 ppc_emit_const,
2550 ppc_emit_call,
2551 ppc_emit_reg,
2552 ppc_emit_pop,
2553 ppc_emit_stack_flush,
2554 ppc_emit_zero_ext,
2555 ppc_emit_swap,
2556 ppc_emit_stack_adjust,
2557 ppc_emit_int_call_1,
2558 ppc_emit_void_call_2,
2559 ppc_emit_eq_goto,
2560 ppc_emit_ne_goto,
2561 ppc_emit_lt_goto,
2562 ppc_emit_le_goto,
2563 ppc_emit_gt_goto,
2564 ppc_emit_ge_goto
2565 };
2566
2567 #ifdef __powerpc64__
2568
2569 /*
2570
2571 Bytecode execution stack frame - 64-bit
2572
2573 | LR save area (SP + 16)
2574 | CR save area (SP + 8)
2575 SP' -> +- Back chain (SP + 0)
2576 | Save r31 for access saved arguments
2577 | Save r30 for bytecode stack pointer
2578 | Save r4 for incoming argument *value
2579 | Save r3 for incoming argument regs
2580 r30 -> +- Bytecode execution stack
2581 |
2582 | 64-byte (8 doublewords) at initial.
2583 | Expand stack as needed.
2584 |
2585 +-
2586 | Some padding for minimum stack frame.
2587 | 112 for ELFv1.
2588 SP +- Back-chain (SP')
2589
2590 initial frame size
2591 = 112 + (4 * 8) + 64
2592 = 208
2593
2594 r30 is the stack-pointer for bytecode machine.
2595 It should point to next-empty, so we can use LDU for pop.
2596 r3 is used for cache of TOP value.
2597 It was the first argument, pointer to regs.
2598 r4 is the second argument, pointer to the result.
2599 We should set *result = TOP after leaving this function.
2600
2601 Note:
2602 * To restore stack at epilogue
2603 => sp = r31
2604 * To check stack is big enough for bytecode execution.
2605 => r30 - 8 > SP + 112
2606 * To return execution result.
2607 => 0(r4) = TOP
2608
2609 */
2610
2611 /* Emit prologue in inferior memory. See above comments. */
2612
2613 static void
2614 ppc64v1_emit_prologue (void)
2615 {
2616 /* On ELFv1, function pointers really point to function descriptor,
2617 so emit one here. We don't care about contents of words 1 and 2,
2618 so let them just overlap out code. */
2619 uint64_t opd = current_insn_ptr + 8;
2620 uint32_t buf[2];
2621
2622 /* Mind the strict aliasing rules. */
2623 memcpy (buf, &opd, sizeof buf);
2624 emit_insns(buf, 2);
2625 EMIT_ASM (/* Save return address. */
2626 "mflr 0 \n"
2627 "std 0, 16(1) \n"
2628 /* Save r30 and incoming arguments. */
2629 "std 31, -8(1) \n"
2630 "std 30, -16(1) \n"
2631 "std 4, -24(1) \n"
2632 "std 3, -32(1) \n"
2633 /* Point r31 to current r1 for access arguments. */
2634 "mr 31, 1 \n"
2635 /* Adjust SP. 208 is the initial frame size. */
2636 "stdu 1, -208(1) \n"
2637 /* Set r30 to pointing stack-top. */
2638 "addi 30, 1, 168 \n"
2639 /* Initial r3/TOP to 0. */
2640 "li 3, 0 \n");
2641 }
2642
2643 /* Emit prologue in inferior memory. See above comments. */
2644
2645 static void
2646 ppc64v2_emit_prologue (void)
2647 {
2648 EMIT_ASM (/* Save return address. */
2649 "mflr 0 \n"
2650 "std 0, 16(1) \n"
2651 /* Save r30 and incoming arguments. */
2652 "std 31, -8(1) \n"
2653 "std 30, -16(1) \n"
2654 "std 4, -24(1) \n"
2655 "std 3, -32(1) \n"
2656 /* Point r31 to current r1 for access arguments. */
2657 "mr 31, 1 \n"
2658 /* Adjust SP. 208 is the initial frame size. */
2659 "stdu 1, -208(1) \n"
2660 /* Set r30 to pointing stack-top. */
2661 "addi 30, 1, 168 \n"
2662 /* Initial r3/TOP to 0. */
2663 "li 3, 0 \n");
2664 }
2665
2666 /* Emit epilogue in inferior memory. See above comments. */
2667
2668 static void
2669 ppc64_emit_epilogue (void)
2670 {
2671 EMIT_ASM (/* Restore SP. */
2672 "ld 1, 0(1) \n"
2673 /* *result = TOP */
2674 "ld 4, -24(1) \n"
2675 "std 3, 0(4) \n"
2676 /* Restore registers. */
2677 "ld 31, -8(1) \n"
2678 "ld 30, -16(1) \n"
2679 /* Restore LR. */
2680 "ld 0, 16(1) \n"
2681 /* Return 0 for no-error. */
2682 "li 3, 0 \n"
2683 "mtlr 0 \n"
2684 "blr \n");
2685 }
2686
2687 /* TOP = stack[--sp] + TOP */
2688
2689 static void
2690 ppc64_emit_add (void)
2691 {
2692 EMIT_ASM ("ldu 4, 8(30) \n"
2693 "add 3, 4, 3 \n");
2694 }
2695
2696 /* TOP = stack[--sp] - TOP */
2697
2698 static void
2699 ppc64_emit_sub (void)
2700 {
2701 EMIT_ASM ("ldu 4, 8(30) \n"
2702 "sub 3, 4, 3 \n");
2703 }
2704
2705 /* TOP = stack[--sp] * TOP */
2706
2707 static void
2708 ppc64_emit_mul (void)
2709 {
2710 EMIT_ASM ("ldu 4, 8(30) \n"
2711 "mulld 3, 4, 3 \n");
2712 }
2713
2714 /* TOP = stack[--sp] << TOP */
2715
2716 static void
2717 ppc64_emit_lsh (void)
2718 {
2719 EMIT_ASM ("ldu 4, 8(30) \n"
2720 "sld 3, 4, 3 \n");
2721 }
2722
2723 /* Top = stack[--sp] >> TOP
2724 (Arithmetic shift right) */
2725
2726 static void
2727 ppc64_emit_rsh_signed (void)
2728 {
2729 EMIT_ASM ("ldu 4, 8(30) \n"
2730 "srad 3, 4, 3 \n");
2731 }
2732
2733 /* Top = stack[--sp] >> TOP
2734 (Logical shift right) */
2735
2736 static void
2737 ppc64_emit_rsh_unsigned (void)
2738 {
2739 EMIT_ASM ("ldu 4, 8(30) \n"
2740 "srd 3, 4, 3 \n");
2741 }
2742
2743 /* Emit code for signed-extension specified by ARG. */
2744
2745 static void
2746 ppc64_emit_ext (int arg)
2747 {
2748 switch (arg)
2749 {
2750 case 8:
2751 EMIT_ASM ("extsb 3, 3");
2752 break;
2753 case 16:
2754 EMIT_ASM ("extsh 3, 3");
2755 break;
2756 case 32:
2757 EMIT_ASM ("extsw 3, 3");
2758 break;
2759 default:
2760 emit_error = 1;
2761 }
2762 }
2763
2764 /* Emit code for zero-extension specified by ARG. */
2765
2766 static void
2767 ppc64_emit_zero_ext (int arg)
2768 {
2769 switch (arg)
2770 {
2771 case 8:
2772 EMIT_ASM ("rldicl 3,3,0,56");
2773 break;
2774 case 16:
2775 EMIT_ASM ("rldicl 3,3,0,48");
2776 break;
2777 case 32:
2778 EMIT_ASM ("rldicl 3,3,0,32");
2779 break;
2780 default:
2781 emit_error = 1;
2782 }
2783 }
2784
2785 /* TOP = !TOP
2786 i.e., TOP = (TOP == 0) ? 1 : 0; */
2787
2788 static void
2789 ppc64_emit_log_not (void)
2790 {
2791 EMIT_ASM ("cntlzd 3, 3 \n"
2792 "srdi 3, 3, 6 \n");
2793 }
2794
2795 /* TOP = stack[--sp] & TOP */
2796
2797 static void
2798 ppc64_emit_bit_and (void)
2799 {
2800 EMIT_ASM ("ldu 4, 8(30) \n"
2801 "and 3, 4, 3 \n");
2802 }
2803
2804 /* TOP = stack[--sp] | TOP */
2805
2806 static void
2807 ppc64_emit_bit_or (void)
2808 {
2809 EMIT_ASM ("ldu 4, 8(30) \n"
2810 "or 3, 4, 3 \n");
2811 }
2812
2813 /* TOP = stack[--sp] ^ TOP */
2814
2815 static void
2816 ppc64_emit_bit_xor (void)
2817 {
2818 EMIT_ASM ("ldu 4, 8(30) \n"
2819 "xor 3, 4, 3 \n");
2820 }
2821
2822 /* TOP = ~TOP
2823 i.e., TOP = ~(TOP | TOP) */
2824
2825 static void
2826 ppc64_emit_bit_not (void)
2827 {
2828 EMIT_ASM ("nor 3, 3, 3 \n");
2829 }
2830
2831 /* TOP = stack[--sp] == TOP */
2832
2833 static void
2834 ppc64_emit_equal (void)
2835 {
2836 EMIT_ASM ("ldu 4, 8(30) \n"
2837 "xor 3, 3, 4 \n"
2838 "cntlzd 3, 3 \n"
2839 "srdi 3, 3, 6 \n");
2840 }
2841
2842 /* TOP = stack[--sp] < TOP
2843 (Signed comparison) */
2844
2845 static void
2846 ppc64_emit_less_signed (void)
2847 {
2848 EMIT_ASM ("ldu 4, 8(30) \n"
2849 "cmpd 7, 4, 3 \n"
2850 "mfcr 3 \n"
2851 "rlwinm 3, 3, 29, 31, 31 \n");
2852 }
2853
2854 /* TOP = stack[--sp] < TOP
2855 (Unsigned comparison) */
2856
2857 static void
2858 ppc64_emit_less_unsigned (void)
2859 {
2860 EMIT_ASM ("ldu 4, 8(30) \n"
2861 "cmpld 7, 4, 3 \n"
2862 "mfcr 3 \n"
2863 "rlwinm 3, 3, 29, 31, 31 \n");
2864 }
2865
2866 /* Access the memory address in TOP in size of SIZE.
2867 Zero-extend the read value. */
2868
2869 static void
2870 ppc64_emit_ref (int size)
2871 {
2872 switch (size)
2873 {
2874 case 1:
2875 EMIT_ASM ("lbz 3, 0(3)");
2876 break;
2877 case 2:
2878 EMIT_ASM ("lhz 3, 0(3)");
2879 break;
2880 case 4:
2881 EMIT_ASM ("lwz 3, 0(3)");
2882 break;
2883 case 8:
2884 EMIT_ASM ("ld 3, 0(3)");
2885 break;
2886 }
2887 }
2888
2889 /* TOP = NUM */
2890
2891 static void
2892 ppc64_emit_const (LONGEST num)
2893 {
2894 uint32_t buf[5];
2895 uint32_t *p = buf;
2896
2897 p += gen_limm (p, 3, num, 1);
2898
2899 emit_insns (buf, p - buf);
2900 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2901 }
2902
2903 /* Set TOP to the value of register REG by calling get_raw_reg function
2904 with two argument, collected buffer and register number. */
2905
2906 static void
2907 ppc64v1_emit_reg (int reg)
2908 {
2909 uint32_t buf[15];
2910 uint32_t *p = buf;
2911
2912 /* fctx->regs is passed in r3 and then saved in 176(1). */
2913 p += GEN_LD (p, 3, 31, -32);
2914 p += GEN_LI (p, 4, reg);
2915 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2916 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2917 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2918
2919 emit_insns (buf, p - buf);
2920 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2921 }
2922
2923 /* Likewise, for ELFv2. */
2924
2925 static void
2926 ppc64v2_emit_reg (int reg)
2927 {
2928 uint32_t buf[12];
2929 uint32_t *p = buf;
2930
2931 /* fctx->regs is passed in r3 and then saved in 176(1). */
2932 p += GEN_LD (p, 3, 31, -32);
2933 p += GEN_LI (p, 4, reg);
2934 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2935 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
2936 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2937
2938 emit_insns (buf, p - buf);
2939 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2940 }
2941
2942 /* TOP = stack[--sp] */
2943
2944 static void
2945 ppc64_emit_pop (void)
2946 {
2947 EMIT_ASM ("ldu 3, 8(30)");
2948 }
2949
2950 /* stack[sp++] = TOP
2951
2952 Because we may use up bytecode stack, expand 8 doublewords more
2953 if needed. */
2954
2955 static void
2956 ppc64_emit_stack_flush (void)
2957 {
2958 /* Make sure bytecode stack is big enough before push.
2959 Otherwise, expand 64-byte more. */
2960
2961 EMIT_ASM (" std 3, 0(30) \n"
2962 " addi 4, 30, -(112 + 8) \n"
2963 " cmpd 7, 4, 1 \n"
2964 " bgt 7, 1f \n"
2965 " stdu 31, -64(1) \n"
2966 "1:addi 30, 30, -8 \n");
2967 }
2968
2969 /* Swap TOP and stack[sp-1] */
2970
2971 static void
2972 ppc64_emit_swap (void)
2973 {
2974 EMIT_ASM ("ld 4, 8(30) \n"
2975 "std 3, 8(30) \n"
2976 "mr 3, 4 \n");
2977 }
2978
2979 /* Call function FN - ELFv1. */
2980
2981 static void
2982 ppc64v1_emit_call (CORE_ADDR fn)
2983 {
2984 uint32_t buf[13];
2985 uint32_t *p = buf;
2986
2987 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2988 p += gen_call (p, fn, 1, 1);
2989 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2990
2991 emit_insns (buf, p - buf);
2992 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2993 }
2994
2995 /* Call function FN - ELFv2. */
2996
2997 static void
2998 ppc64v2_emit_call (CORE_ADDR fn)
2999 {
3000 uint32_t buf[10];
3001 uint32_t *p = buf;
3002
3003 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3004 p += gen_call (p, fn, 1, 0);
3005 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3006
3007 emit_insns (buf, p - buf);
3008 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3009 }
3010
3011 /* FN's prototype is `LONGEST(*fn)(int)'.
3012 TOP = fn (arg1)
3013 */
3014
3015 static void
3016 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
3017 {
3018 uint32_t buf[13];
3019 uint32_t *p = buf;
3020
3021 /* Setup argument. arg1 is a 16-bit value. */
3022 p += gen_limm (p, 3, arg1, 1);
3023 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3024 p += gen_call (p, fn, 1, 1);
3025 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3026
3027 emit_insns (buf, p - buf);
3028 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3029 }
3030
3031 /* Likewise for ELFv2. */
3032
3033 static void
3034 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
3035 {
3036 uint32_t buf[10];
3037 uint32_t *p = buf;
3038
3039 /* Setup argument. arg1 is a 16-bit value. */
3040 p += gen_limm (p, 3, arg1, 1);
3041 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3042 p += gen_call (p, fn, 1, 0);
3043 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3044
3045 emit_insns (buf, p - buf);
3046 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3047 }
3048
3049 /* FN's prototype is `void(*fn)(int,LONGEST)'.
3050 fn (arg1, TOP)
3051
3052 TOP should be preserved/restored before/after the call. */
3053
3054 static void
3055 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
3056 {
3057 uint32_t buf[17];
3058 uint32_t *p = buf;
3059
3060 /* Save TOP. 0(30) is next-empty. */
3061 p += GEN_STD (p, 3, 30, 0);
3062
3063 /* Setup argument. arg1 is a 16-bit value. */
3064 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3065 p += gen_limm (p, 3, arg1, 1);
3066 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3067 p += gen_call (p, fn, 1, 1);
3068 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3069
3070 /* Restore TOP */
3071 p += GEN_LD (p, 3, 30, 0);
3072
3073 emit_insns (buf, p - buf);
3074 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3075 }
3076
3077 /* Likewise for ELFv2. */
3078
3079 static void
3080 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
3081 {
3082 uint32_t buf[14];
3083 uint32_t *p = buf;
3084
3085 /* Save TOP. 0(30) is next-empty. */
3086 p += GEN_STD (p, 3, 30, 0);
3087
3088 /* Setup argument. arg1 is a 16-bit value. */
3089 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3090 p += gen_limm (p, 3, arg1, 1);
3091 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3092 p += gen_call (p, fn, 1, 0);
3093 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3094
3095 /* Restore TOP */
3096 p += GEN_LD (p, 3, 30, 0);
3097
3098 emit_insns (buf, p - buf);
3099 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3100 }
3101
3102 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
3103
3104 static void
3105 ppc64_emit_if_goto (int *offset_p, int *size_p)
3106 {
3107 EMIT_ASM ("cmpdi 7, 3, 0 \n"
3108 "ldu 3, 8(30) \n"
3109 "1:bne 7, 1b \n");
3110
3111 if (offset_p)
3112 *offset_p = 8;
3113 if (size_p)
3114 *size_p = 14;
3115 }
3116
3117 /* Goto if stack[--sp] == TOP */
3118
3119 static void
3120 ppc64_emit_eq_goto (int *offset_p, int *size_p)
3121 {
3122 EMIT_ASM ("ldu 4, 8(30) \n"
3123 "cmpd 7, 4, 3 \n"
3124 "ldu 3, 8(30) \n"
3125 "1:beq 7, 1b \n");
3126
3127 if (offset_p)
3128 *offset_p = 12;
3129 if (size_p)
3130 *size_p = 14;
3131 }
3132
3133 /* Goto if stack[--sp] != TOP */
3134
3135 static void
3136 ppc64_emit_ne_goto (int *offset_p, int *size_p)
3137 {
3138 EMIT_ASM ("ldu 4, 8(30) \n"
3139 "cmpd 7, 4, 3 \n"
3140 "ldu 3, 8(30) \n"
3141 "1:bne 7, 1b \n");
3142
3143 if (offset_p)
3144 *offset_p = 12;
3145 if (size_p)
3146 *size_p = 14;
3147 }
3148
3149 /* Goto if stack[--sp] < TOP */
3150
3151 static void
3152 ppc64_emit_lt_goto (int *offset_p, int *size_p)
3153 {
3154 EMIT_ASM ("ldu 4, 8(30) \n"
3155 "cmpd 7, 4, 3 \n"
3156 "ldu 3, 8(30) \n"
3157 "1:blt 7, 1b \n");
3158
3159 if (offset_p)
3160 *offset_p = 12;
3161 if (size_p)
3162 *size_p = 14;
3163 }
3164
3165 /* Goto if stack[--sp] <= TOP */
3166
3167 static void
3168 ppc64_emit_le_goto (int *offset_p, int *size_p)
3169 {
3170 EMIT_ASM ("ldu 4, 8(30) \n"
3171 "cmpd 7, 4, 3 \n"
3172 "ldu 3, 8(30) \n"
3173 "1:ble 7, 1b \n");
3174
3175 if (offset_p)
3176 *offset_p = 12;
3177 if (size_p)
3178 *size_p = 14;
3179 }
3180
3181 /* Goto if stack[--sp] > TOP */
3182
3183 static void
3184 ppc64_emit_gt_goto (int *offset_p, int *size_p)
3185 {
3186 EMIT_ASM ("ldu 4, 8(30) \n"
3187 "cmpd 7, 4, 3 \n"
3188 "ldu 3, 8(30) \n"
3189 "1:bgt 7, 1b \n");
3190
3191 if (offset_p)
3192 *offset_p = 12;
3193 if (size_p)
3194 *size_p = 14;
3195 }
3196
3197 /* Goto if stack[--sp] >= TOP */
3198
3199 static void
3200 ppc64_emit_ge_goto (int *offset_p, int *size_p)
3201 {
3202 EMIT_ASM ("ldu 4, 8(30) \n"
3203 "cmpd 7, 4, 3 \n"
3204 "ldu 3, 8(30) \n"
3205 "1:bge 7, 1b \n");
3206
3207 if (offset_p)
3208 *offset_p = 12;
3209 if (size_p)
3210 *size_p = 14;
3211 }
3212
3213 /* Table of emit ops for 64-bit ELFv1. */
3214
3215 static struct emit_ops ppc64v1_emit_ops_impl =
3216 {
3217 ppc64v1_emit_prologue,
3218 ppc64_emit_epilogue,
3219 ppc64_emit_add,
3220 ppc64_emit_sub,
3221 ppc64_emit_mul,
3222 ppc64_emit_lsh,
3223 ppc64_emit_rsh_signed,
3224 ppc64_emit_rsh_unsigned,
3225 ppc64_emit_ext,
3226 ppc64_emit_log_not,
3227 ppc64_emit_bit_and,
3228 ppc64_emit_bit_or,
3229 ppc64_emit_bit_xor,
3230 ppc64_emit_bit_not,
3231 ppc64_emit_equal,
3232 ppc64_emit_less_signed,
3233 ppc64_emit_less_unsigned,
3234 ppc64_emit_ref,
3235 ppc64_emit_if_goto,
3236 ppc_emit_goto,
3237 ppc_write_goto_address,
3238 ppc64_emit_const,
3239 ppc64v1_emit_call,
3240 ppc64v1_emit_reg,
3241 ppc64_emit_pop,
3242 ppc64_emit_stack_flush,
3243 ppc64_emit_zero_ext,
3244 ppc64_emit_swap,
3245 ppc_emit_stack_adjust,
3246 ppc64v1_emit_int_call_1,
3247 ppc64v1_emit_void_call_2,
3248 ppc64_emit_eq_goto,
3249 ppc64_emit_ne_goto,
3250 ppc64_emit_lt_goto,
3251 ppc64_emit_le_goto,
3252 ppc64_emit_gt_goto,
3253 ppc64_emit_ge_goto
3254 };
3255
3256 /* Table of emit ops for 64-bit ELFv2. */
3257
3258 static struct emit_ops ppc64v2_emit_ops_impl =
3259 {
3260 ppc64v2_emit_prologue,
3261 ppc64_emit_epilogue,
3262 ppc64_emit_add,
3263 ppc64_emit_sub,
3264 ppc64_emit_mul,
3265 ppc64_emit_lsh,
3266 ppc64_emit_rsh_signed,
3267 ppc64_emit_rsh_unsigned,
3268 ppc64_emit_ext,
3269 ppc64_emit_log_not,
3270 ppc64_emit_bit_and,
3271 ppc64_emit_bit_or,
3272 ppc64_emit_bit_xor,
3273 ppc64_emit_bit_not,
3274 ppc64_emit_equal,
3275 ppc64_emit_less_signed,
3276 ppc64_emit_less_unsigned,
3277 ppc64_emit_ref,
3278 ppc64_emit_if_goto,
3279 ppc_emit_goto,
3280 ppc_write_goto_address,
3281 ppc64_emit_const,
3282 ppc64v2_emit_call,
3283 ppc64v2_emit_reg,
3284 ppc64_emit_pop,
3285 ppc64_emit_stack_flush,
3286 ppc64_emit_zero_ext,
3287 ppc64_emit_swap,
3288 ppc_emit_stack_adjust,
3289 ppc64v2_emit_int_call_1,
3290 ppc64v2_emit_void_call_2,
3291 ppc64_emit_eq_goto,
3292 ppc64_emit_ne_goto,
3293 ppc64_emit_lt_goto,
3294 ppc64_emit_le_goto,
3295 ppc64_emit_gt_goto,
3296 ppc64_emit_ge_goto
3297 };
3298
3299 #endif
3300
3301 /* Implementation of linux_target_ops method "emit_ops". */
3302
3303 static struct emit_ops *
3304 ppc_emit_ops (void)
3305 {
3306 #ifdef __powerpc64__
3307 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3308
3309 if (register_size (regcache->tdesc, 0) == 8)
3310 {
3311 if (is_elfv2_inferior ())
3312 return &ppc64v2_emit_ops_impl;
3313 else
3314 return &ppc64v1_emit_ops_impl;
3315 }
3316 #endif
3317 return &ppc_emit_ops_impl;
3318 }
3319
3320 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3321
3322 static int
3323 ppc_get_ipa_tdesc_idx (void)
3324 {
3325 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3326 const struct target_desc *tdesc = regcache->tdesc;
3327
3328 #ifdef __powerpc64__
3329 if (tdesc == tdesc_powerpc_64l)
3330 return PPC_TDESC_BASE;
3331 if (tdesc == tdesc_powerpc_altivec64l)
3332 return PPC_TDESC_ALTIVEC;
3333 if (tdesc == tdesc_powerpc_vsx64l)
3334 return PPC_TDESC_VSX;
3335 if (tdesc == tdesc_powerpc_isa205_64l)
3336 return PPC_TDESC_ISA205;
3337 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3338 return PPC_TDESC_ISA205_ALTIVEC;
3339 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3340 return PPC_TDESC_ISA205_VSX;
3341 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx64l)
3342 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3343 if (tdesc == tdesc_powerpc_isa207_vsx64l)
3344 return PPC_TDESC_ISA207_VSX;
3345 if (tdesc == tdesc_powerpc_isa207_htm_vsx64l)
3346 return PPC_TDESC_ISA207_HTM_VSX;
3347 #endif
3348
3349 if (tdesc == tdesc_powerpc_32l)
3350 return PPC_TDESC_BASE;
3351 if (tdesc == tdesc_powerpc_altivec32l)
3352 return PPC_TDESC_ALTIVEC;
3353 if (tdesc == tdesc_powerpc_vsx32l)
3354 return PPC_TDESC_VSX;
3355 if (tdesc == tdesc_powerpc_isa205_32l)
3356 return PPC_TDESC_ISA205;
3357 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3358 return PPC_TDESC_ISA205_ALTIVEC;
3359 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3360 return PPC_TDESC_ISA205_VSX;
3361 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx32l)
3362 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3363 if (tdesc == tdesc_powerpc_isa207_vsx32l)
3364 return PPC_TDESC_ISA207_VSX;
3365 if (tdesc == tdesc_powerpc_isa207_htm_vsx32l)
3366 return PPC_TDESC_ISA207_HTM_VSX;
3367 if (tdesc == tdesc_powerpc_e500l)
3368 return PPC_TDESC_E500;
3369
3370 return 0;
3371 }
3372
3373 struct linux_target_ops the_low_target = {
3374 ppc_arch_setup,
3375 ppc_regs_info,
3376 ppc_cannot_fetch_register,
3377 ppc_cannot_store_register,
3378 NULL, /* fetch_register */
3379 ppc_get_pc,
3380 ppc_set_pc,
3381 NULL, /* breakpoint_kind_from_pc */
3382 ppc_sw_breakpoint_from_kind,
3383 NULL,
3384 0,
3385 ppc_breakpoint_at,
3386 ppc_supports_z_point_type,
3387 ppc_insert_point,
3388 ppc_remove_point,
3389 NULL,
3390 NULL,
3391 ppc_collect_ptrace_register,
3392 ppc_supply_ptrace_register,
3393 NULL, /* siginfo_fixup */
3394 NULL, /* new_process */
3395 NULL, /* delete_process */
3396 NULL, /* new_thread */
3397 NULL, /* delete_thread */
3398 NULL, /* new_fork */
3399 NULL, /* prepare_to_resume */
3400 NULL, /* process_qsupported */
3401 ppc_supports_tracepoints,
3402 ppc_get_thread_area,
3403 ppc_install_fast_tracepoint_jump_pad,
3404 ppc_emit_ops,
3405 ppc_get_min_fast_tracepoint_insn_len,
3406 NULL, /* supports_range_stepping */
3407 NULL, /* breakpoint_kind_from_current_state */
3408 ppc_supports_hardware_single_step,
3409 NULL, /* get_syscall_trapinfo */
3410 ppc_get_ipa_tdesc_idx,
3411 };
3412
3413 void
3414 initialize_low_arch (void)
3415 {
3416 /* Initialize the Linux target descriptions. */
3417
3418 init_registers_powerpc_32l ();
3419 init_registers_powerpc_altivec32l ();
3420 init_registers_powerpc_vsx32l ();
3421 init_registers_powerpc_isa205_32l ();
3422 init_registers_powerpc_isa205_altivec32l ();
3423 init_registers_powerpc_isa205_vsx32l ();
3424 init_registers_powerpc_isa205_ppr_dscr_vsx32l ();
3425 init_registers_powerpc_isa207_vsx32l ();
3426 init_registers_powerpc_isa207_htm_vsx32l ();
3427 init_registers_powerpc_e500l ();
3428 #if __powerpc64__
3429 init_registers_powerpc_64l ();
3430 init_registers_powerpc_altivec64l ();
3431 init_registers_powerpc_vsx64l ();
3432 init_registers_powerpc_isa205_64l ();
3433 init_registers_powerpc_isa205_altivec64l ();
3434 init_registers_powerpc_isa205_vsx64l ();
3435 init_registers_powerpc_isa205_ppr_dscr_vsx64l ();
3436 init_registers_powerpc_isa207_vsx64l ();
3437 init_registers_powerpc_isa207_htm_vsx64l ();
3438 #endif
3439
3440 initialize_regsets_info (&ppc_regsets_info);
3441 }