]> git.ipfire.org Git - thirdparty/qemu.git/blob - tcg/ppc/tcg-target.inc.c
tcg/ppc: Update vector support for v3.00 load/store
[thirdparty/qemu.git] / tcg / ppc / tcg-target.inc.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "elf.h"
26 #include "tcg-pool.inc.c"
27
28 #if defined _CALL_DARWIN || defined __APPLE__
29 #define TCG_TARGET_CALL_DARWIN
30 #endif
31 #ifdef _CALL_SYSV
32 # define TCG_TARGET_CALL_ALIGN_ARGS 1
33 #endif
34
35 /* For some memory operations, we need a scratch that isn't R0. For the AIX
36 calling convention, we can re-use the TOC register since we'll be reloading
37 it at every call. Otherwise R12 will do nicely as neither a call-saved
38 register nor a parameter register. */
39 #ifdef _CALL_AIX
40 # define TCG_REG_TMP1 TCG_REG_R2
41 #else
42 # define TCG_REG_TMP1 TCG_REG_R12
43 #endif
44
45 #define TCG_VEC_TMP1 TCG_REG_V0
46 #define TCG_VEC_TMP2 TCG_REG_V1
47
48 #define TCG_REG_TB TCG_REG_R31
49 #define USE_REG_TB (TCG_TARGET_REG_BITS == 64)
50
51 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
52 #define SZP ((int)sizeof(void *))
53
54 /* Shorthand for size of a register. */
55 #define SZR (TCG_TARGET_REG_BITS / 8)
56
57 #define TCG_CT_CONST_S16 0x100
58 #define TCG_CT_CONST_U16 0x200
59 #define TCG_CT_CONST_S32 0x400
60 #define TCG_CT_CONST_U32 0x800
61 #define TCG_CT_CONST_ZERO 0x1000
62 #define TCG_CT_CONST_MONE 0x2000
63 #define TCG_CT_CONST_WSZ 0x4000
64
65 static tcg_insn_unit *tb_ret_addr;
66
67 TCGPowerISA have_isa;
68 static bool have_isel;
69 bool have_altivec;
70 bool have_vsx;
71
72 #ifndef CONFIG_SOFTMMU
73 #define TCG_GUEST_BASE_REG 30
74 #endif
75
76 #ifdef CONFIG_DEBUG_TCG
77 static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
78 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
79 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
80 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
81 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
82 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
83 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
84 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
85 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
86 };
87 #endif
88
89 static const int tcg_target_reg_alloc_order[] = {
90 TCG_REG_R14, /* call saved registers */
91 TCG_REG_R15,
92 TCG_REG_R16,
93 TCG_REG_R17,
94 TCG_REG_R18,
95 TCG_REG_R19,
96 TCG_REG_R20,
97 TCG_REG_R21,
98 TCG_REG_R22,
99 TCG_REG_R23,
100 TCG_REG_R24,
101 TCG_REG_R25,
102 TCG_REG_R26,
103 TCG_REG_R27,
104 TCG_REG_R28,
105 TCG_REG_R29,
106 TCG_REG_R30,
107 TCG_REG_R31,
108 TCG_REG_R12, /* call clobbered, non-arguments */
109 TCG_REG_R11,
110 TCG_REG_R2,
111 TCG_REG_R13,
112 TCG_REG_R10, /* call clobbered, arguments */
113 TCG_REG_R9,
114 TCG_REG_R8,
115 TCG_REG_R7,
116 TCG_REG_R6,
117 TCG_REG_R5,
118 TCG_REG_R4,
119 TCG_REG_R3,
120
121 /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
122 TCG_REG_V2, /* call clobbered, vectors */
123 TCG_REG_V3,
124 TCG_REG_V4,
125 TCG_REG_V5,
126 TCG_REG_V6,
127 TCG_REG_V7,
128 TCG_REG_V8,
129 TCG_REG_V9,
130 TCG_REG_V10,
131 TCG_REG_V11,
132 TCG_REG_V12,
133 TCG_REG_V13,
134 TCG_REG_V14,
135 TCG_REG_V15,
136 TCG_REG_V16,
137 TCG_REG_V17,
138 TCG_REG_V18,
139 TCG_REG_V19,
140 };
141
142 static const int tcg_target_call_iarg_regs[] = {
143 TCG_REG_R3,
144 TCG_REG_R4,
145 TCG_REG_R5,
146 TCG_REG_R6,
147 TCG_REG_R7,
148 TCG_REG_R8,
149 TCG_REG_R9,
150 TCG_REG_R10
151 };
152
153 static const int tcg_target_call_oarg_regs[] = {
154 TCG_REG_R3,
155 TCG_REG_R4
156 };
157
158 static const int tcg_target_callee_save_regs[] = {
159 #ifdef TCG_TARGET_CALL_DARWIN
160 TCG_REG_R11,
161 #endif
162 TCG_REG_R14,
163 TCG_REG_R15,
164 TCG_REG_R16,
165 TCG_REG_R17,
166 TCG_REG_R18,
167 TCG_REG_R19,
168 TCG_REG_R20,
169 TCG_REG_R21,
170 TCG_REG_R22,
171 TCG_REG_R23,
172 TCG_REG_R24,
173 TCG_REG_R25,
174 TCG_REG_R26,
175 TCG_REG_R27, /* currently used for the global env */
176 TCG_REG_R28,
177 TCG_REG_R29,
178 TCG_REG_R30,
179 TCG_REG_R31
180 };
181
182 static inline bool in_range_b(tcg_target_long target)
183 {
184 return target == sextract64(target, 0, 26);
185 }
186
187 static uint32_t reloc_pc24_val(tcg_insn_unit *pc, tcg_insn_unit *target)
188 {
189 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
190 tcg_debug_assert(in_range_b(disp));
191 return disp & 0x3fffffc;
192 }
193
194 static bool reloc_pc24(tcg_insn_unit *pc, tcg_insn_unit *target)
195 {
196 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
197 if (in_range_b(disp)) {
198 *pc = (*pc & ~0x3fffffc) | (disp & 0x3fffffc);
199 return true;
200 }
201 return false;
202 }
203
204 static uint16_t reloc_pc14_val(tcg_insn_unit *pc, tcg_insn_unit *target)
205 {
206 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
207 tcg_debug_assert(disp == (int16_t) disp);
208 return disp & 0xfffc;
209 }
210
211 static bool reloc_pc14(tcg_insn_unit *pc, tcg_insn_unit *target)
212 {
213 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
214 if (disp == (int16_t) disp) {
215 *pc = (*pc & ~0xfffc) | (disp & 0xfffc);
216 return true;
217 }
218 return false;
219 }
220
221 /* parse target specific constraints */
222 static const char *target_parse_constraint(TCGArgConstraint *ct,
223 const char *ct_str, TCGType type)
224 {
225 switch (*ct_str++) {
226 case 'A': case 'B': case 'C': case 'D':
227 ct->ct |= TCG_CT_REG;
228 tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A');
229 break;
230 case 'r':
231 ct->ct |= TCG_CT_REG;
232 ct->u.regs = 0xffffffff;
233 break;
234 case 'v':
235 ct->ct |= TCG_CT_REG;
236 ct->u.regs = 0xffffffff00000000ull;
237 break;
238 case 'L': /* qemu_ld constraint */
239 ct->ct |= TCG_CT_REG;
240 ct->u.regs = 0xffffffff;
241 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
242 #ifdef CONFIG_SOFTMMU
243 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
244 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
245 #endif
246 break;
247 case 'S': /* qemu_st constraint */
248 ct->ct |= TCG_CT_REG;
249 ct->u.regs = 0xffffffff;
250 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
251 #ifdef CONFIG_SOFTMMU
252 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
253 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
254 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
255 #endif
256 break;
257 case 'I':
258 ct->ct |= TCG_CT_CONST_S16;
259 break;
260 case 'J':
261 ct->ct |= TCG_CT_CONST_U16;
262 break;
263 case 'M':
264 ct->ct |= TCG_CT_CONST_MONE;
265 break;
266 case 'T':
267 ct->ct |= TCG_CT_CONST_S32;
268 break;
269 case 'U':
270 ct->ct |= TCG_CT_CONST_U32;
271 break;
272 case 'W':
273 ct->ct |= TCG_CT_CONST_WSZ;
274 break;
275 case 'Z':
276 ct->ct |= TCG_CT_CONST_ZERO;
277 break;
278 default:
279 return NULL;
280 }
281 return ct_str;
282 }
283
284 /* test if a constant matches the constraint */
285 static int tcg_target_const_match(tcg_target_long val, TCGType type,
286 const TCGArgConstraint *arg_ct)
287 {
288 int ct = arg_ct->ct;
289 if (ct & TCG_CT_CONST) {
290 return 1;
291 }
292
293 /* The only 32-bit constraint we use aside from
294 TCG_CT_CONST is TCG_CT_CONST_S16. */
295 if (type == TCG_TYPE_I32) {
296 val = (int32_t)val;
297 }
298
299 if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
300 return 1;
301 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
302 return 1;
303 } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
304 return 1;
305 } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
306 return 1;
307 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
308 return 1;
309 } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
310 return 1;
311 } else if ((ct & TCG_CT_CONST_WSZ)
312 && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
313 return 1;
314 }
315 return 0;
316 }
317
318 #define OPCD(opc) ((opc)<<26)
319 #define XO19(opc) (OPCD(19)|((opc)<<1))
320 #define MD30(opc) (OPCD(30)|((opc)<<2))
321 #define MDS30(opc) (OPCD(30)|((opc)<<1))
322 #define XO31(opc) (OPCD(31)|((opc)<<1))
323 #define XO58(opc) (OPCD(58)|(opc))
324 #define XO62(opc) (OPCD(62)|(opc))
325 #define VX4(opc) (OPCD(4)|(opc))
326
327 #define B OPCD( 18)
328 #define BC OPCD( 16)
329 #define LBZ OPCD( 34)
330 #define LHZ OPCD( 40)
331 #define LHA OPCD( 42)
332 #define LWZ OPCD( 32)
333 #define LWZUX XO31( 55)
334 #define STB OPCD( 38)
335 #define STH OPCD( 44)
336 #define STW OPCD( 36)
337
338 #define STD XO62( 0)
339 #define STDU XO62( 1)
340 #define STDX XO31(149)
341
342 #define LD XO58( 0)
343 #define LDX XO31( 21)
344 #define LDU XO58( 1)
345 #define LDUX XO31( 53)
346 #define LWA XO58( 2)
347 #define LWAX XO31(341)
348
349 #define ADDIC OPCD( 12)
350 #define ADDI OPCD( 14)
351 #define ADDIS OPCD( 15)
352 #define ORI OPCD( 24)
353 #define ORIS OPCD( 25)
354 #define XORI OPCD( 26)
355 #define XORIS OPCD( 27)
356 #define ANDI OPCD( 28)
357 #define ANDIS OPCD( 29)
358 #define MULLI OPCD( 7)
359 #define CMPLI OPCD( 10)
360 #define CMPI OPCD( 11)
361 #define SUBFIC OPCD( 8)
362
363 #define LWZU OPCD( 33)
364 #define STWU OPCD( 37)
365
366 #define RLWIMI OPCD( 20)
367 #define RLWINM OPCD( 21)
368 #define RLWNM OPCD( 23)
369
370 #define RLDICL MD30( 0)
371 #define RLDICR MD30( 1)
372 #define RLDIMI MD30( 3)
373 #define RLDCL MDS30( 8)
374
375 #define BCLR XO19( 16)
376 #define BCCTR XO19(528)
377 #define CRAND XO19(257)
378 #define CRANDC XO19(129)
379 #define CRNAND XO19(225)
380 #define CROR XO19(449)
381 #define CRNOR XO19( 33)
382
383 #define EXTSB XO31(954)
384 #define EXTSH XO31(922)
385 #define EXTSW XO31(986)
386 #define ADD XO31(266)
387 #define ADDE XO31(138)
388 #define ADDME XO31(234)
389 #define ADDZE XO31(202)
390 #define ADDC XO31( 10)
391 #define AND XO31( 28)
392 #define SUBF XO31( 40)
393 #define SUBFC XO31( 8)
394 #define SUBFE XO31(136)
395 #define SUBFME XO31(232)
396 #define SUBFZE XO31(200)
397 #define OR XO31(444)
398 #define XOR XO31(316)
399 #define MULLW XO31(235)
400 #define MULHW XO31( 75)
401 #define MULHWU XO31( 11)
402 #define DIVW XO31(491)
403 #define DIVWU XO31(459)
404 #define CMP XO31( 0)
405 #define CMPL XO31( 32)
406 #define LHBRX XO31(790)
407 #define LWBRX XO31(534)
408 #define LDBRX XO31(532)
409 #define STHBRX XO31(918)
410 #define STWBRX XO31(662)
411 #define STDBRX XO31(660)
412 #define MFSPR XO31(339)
413 #define MTSPR XO31(467)
414 #define SRAWI XO31(824)
415 #define NEG XO31(104)
416 #define MFCR XO31( 19)
417 #define MFOCRF (MFCR | (1u << 20))
418 #define NOR XO31(124)
419 #define CNTLZW XO31( 26)
420 #define CNTLZD XO31( 58)
421 #define CNTTZW XO31(538)
422 #define CNTTZD XO31(570)
423 #define CNTPOPW XO31(378)
424 #define CNTPOPD XO31(506)
425 #define ANDC XO31( 60)
426 #define ORC XO31(412)
427 #define EQV XO31(284)
428 #define NAND XO31(476)
429 #define ISEL XO31( 15)
430
431 #define MULLD XO31(233)
432 #define MULHD XO31( 73)
433 #define MULHDU XO31( 9)
434 #define DIVD XO31(489)
435 #define DIVDU XO31(457)
436
437 #define LBZX XO31( 87)
438 #define LHZX XO31(279)
439 #define LHAX XO31(343)
440 #define LWZX XO31( 23)
441 #define STBX XO31(215)
442 #define STHX XO31(407)
443 #define STWX XO31(151)
444
445 #define EIEIO XO31(854)
446 #define HWSYNC XO31(598)
447 #define LWSYNC (HWSYNC | (1u << 21))
448
449 #define SPR(a, b) ((((a)<<5)|(b))<<11)
450 #define LR SPR(8, 0)
451 #define CTR SPR(9, 0)
452
453 #define SLW XO31( 24)
454 #define SRW XO31(536)
455 #define SRAW XO31(792)
456
457 #define SLD XO31( 27)
458 #define SRD XO31(539)
459 #define SRAD XO31(794)
460 #define SRADI XO31(413<<1)
461
462 #define TW XO31( 4)
463 #define TRAP (TW | TO(31))
464
465 #define NOP ORI /* ori 0,0,0 */
466
467 #define LVX XO31(103)
468 #define LVEBX XO31(7)
469 #define LVEHX XO31(39)
470 #define LVEWX XO31(71)
471 #define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */
472 #define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */
473 #define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */
474 #define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */
475 #define LXSD (OPCD(57) | 2) /* v3.00 */
476 #define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */
477
478 #define STVX XO31(231)
479 #define STVEWX XO31(199)
480 #define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */
481 #define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */
482 #define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */
483 #define STXSD (OPCD(61) | 2) /* v3.00 */
484
485 #define VADDSBS VX4(768)
486 #define VADDUBS VX4(512)
487 #define VADDUBM VX4(0)
488 #define VADDSHS VX4(832)
489 #define VADDUHS VX4(576)
490 #define VADDUHM VX4(64)
491 #define VADDSWS VX4(896)
492 #define VADDUWS VX4(640)
493 #define VADDUWM VX4(128)
494 #define VADDUDM VX4(192) /* v2.07 */
495
496 #define VSUBSBS VX4(1792)
497 #define VSUBUBS VX4(1536)
498 #define VSUBUBM VX4(1024)
499 #define VSUBSHS VX4(1856)
500 #define VSUBUHS VX4(1600)
501 #define VSUBUHM VX4(1088)
502 #define VSUBSWS VX4(1920)
503 #define VSUBUWS VX4(1664)
504 #define VSUBUWM VX4(1152)
505 #define VSUBUDM VX4(1216) /* v2.07 */
506
507 #define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */
508 #define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */
509
510 #define VMAXSB VX4(258)
511 #define VMAXSH VX4(322)
512 #define VMAXSW VX4(386)
513 #define VMAXSD VX4(450) /* v2.07 */
514 #define VMAXUB VX4(2)
515 #define VMAXUH VX4(66)
516 #define VMAXUW VX4(130)
517 #define VMAXUD VX4(194) /* v2.07 */
518 #define VMINSB VX4(770)
519 #define VMINSH VX4(834)
520 #define VMINSW VX4(898)
521 #define VMINSD VX4(962) /* v2.07 */
522 #define VMINUB VX4(514)
523 #define VMINUH VX4(578)
524 #define VMINUW VX4(642)
525 #define VMINUD VX4(706) /* v2.07 */
526
527 #define VCMPEQUB VX4(6)
528 #define VCMPEQUH VX4(70)
529 #define VCMPEQUW VX4(134)
530 #define VCMPEQUD VX4(199) /* v2.07 */
531 #define VCMPGTSB VX4(774)
532 #define VCMPGTSH VX4(838)
533 #define VCMPGTSW VX4(902)
534 #define VCMPGTSD VX4(967) /* v2.07 */
535 #define VCMPGTUB VX4(518)
536 #define VCMPGTUH VX4(582)
537 #define VCMPGTUW VX4(646)
538 #define VCMPGTUD VX4(711) /* v2.07 */
539 #define VCMPNEB VX4(7) /* v3.00 */
540 #define VCMPNEH VX4(71) /* v3.00 */
541 #define VCMPNEW VX4(135) /* v3.00 */
542
543 #define VSLB VX4(260)
544 #define VSLH VX4(324)
545 #define VSLW VX4(388)
546 #define VSLD VX4(1476) /* v2.07 */
547 #define VSRB VX4(516)
548 #define VSRH VX4(580)
549 #define VSRW VX4(644)
550 #define VSRD VX4(1732) /* v2.07 */
551 #define VSRAB VX4(772)
552 #define VSRAH VX4(836)
553 #define VSRAW VX4(900)
554 #define VSRAD VX4(964) /* v2.07 */
555 #define VRLB VX4(4)
556 #define VRLH VX4(68)
557 #define VRLW VX4(132)
558 #define VRLD VX4(196) /* v2.07 */
559
560 #define VMULEUB VX4(520)
561 #define VMULEUH VX4(584)
562 #define VMULEUW VX4(648) /* v2.07 */
563 #define VMULOUB VX4(8)
564 #define VMULOUH VX4(72)
565 #define VMULOUW VX4(136) /* v2.07 */
566 #define VMULUWM VX4(137) /* v2.07 */
567 #define VMSUMUHM VX4(38)
568
569 #define VMRGHB VX4(12)
570 #define VMRGHH VX4(76)
571 #define VMRGHW VX4(140)
572 #define VMRGLB VX4(268)
573 #define VMRGLH VX4(332)
574 #define VMRGLW VX4(396)
575
576 #define VPKUHUM VX4(14)
577 #define VPKUWUM VX4(78)
578
579 #define VAND VX4(1028)
580 #define VANDC VX4(1092)
581 #define VNOR VX4(1284)
582 #define VOR VX4(1156)
583 #define VXOR VX4(1220)
584 #define VEQV VX4(1668) /* v2.07 */
585 #define VNAND VX4(1412) /* v2.07 */
586 #define VORC VX4(1348) /* v2.07 */
587
588 #define VSPLTB VX4(524)
589 #define VSPLTH VX4(588)
590 #define VSPLTW VX4(652)
591 #define VSPLTISB VX4(780)
592 #define VSPLTISH VX4(844)
593 #define VSPLTISW VX4(908)
594
595 #define VSLDOI VX4(44)
596
597 #define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */
598 #define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */
599
600 #define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */
601 #define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */
602 #define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */
603 #define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */
604
605 #define RT(r) ((r)<<21)
606 #define RS(r) ((r)<<21)
607 #define RA(r) ((r)<<16)
608 #define RB(r) ((r)<<11)
609 #define TO(t) ((t)<<21)
610 #define SH(s) ((s)<<11)
611 #define MB(b) ((b)<<6)
612 #define ME(e) ((e)<<1)
613 #define BO(o) ((o)<<21)
614 #define MB64(b) ((b)<<5)
615 #define FXM(b) (1 << (19 - (b)))
616
617 #define VRT(r) (((r) & 31) << 21)
618 #define VRA(r) (((r) & 31) << 16)
619 #define VRB(r) (((r) & 31) << 11)
620 #define VRC(r) (((r) & 31) << 6)
621
622 #define LK 1
623
624 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
625 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
626 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
627 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
628
629 #define BF(n) ((n)<<23)
630 #define BI(n, c) (((c)+((n)*4))<<16)
631 #define BT(n, c) (((c)+((n)*4))<<21)
632 #define BA(n, c) (((c)+((n)*4))<<16)
633 #define BB(n, c) (((c)+((n)*4))<<11)
634 #define BC_(n, c) (((c)+((n)*4))<<6)
635
636 #define BO_COND_TRUE BO(12)
637 #define BO_COND_FALSE BO( 4)
638 #define BO_ALWAYS BO(20)
639
640 enum {
641 CR_LT,
642 CR_GT,
643 CR_EQ,
644 CR_SO
645 };
646
647 static const uint32_t tcg_to_bc[] = {
648 [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
649 [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
650 [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
651 [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
652 [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
653 [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
654 [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
655 [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
656 [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
657 [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
658 };
659
660 /* The low bit here is set if the RA and RB fields must be inverted. */
661 static const uint32_t tcg_to_isel[] = {
662 [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
663 [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
664 [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
665 [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
666 [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
667 [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
668 [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
669 [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
670 [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
671 [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
672 };
673
674 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
675 intptr_t value, intptr_t addend)
676 {
677 tcg_insn_unit *target;
678 int16_t lo;
679 int32_t hi;
680
681 value += addend;
682 target = (tcg_insn_unit *)value;
683
684 switch (type) {
685 case R_PPC_REL14:
686 return reloc_pc14(code_ptr, target);
687 case R_PPC_REL24:
688 return reloc_pc24(code_ptr, target);
689 case R_PPC_ADDR16:
690 /*
691 * We are (slightly) abusing this relocation type. In particular,
692 * assert that the low 2 bits are zero, and do not modify them.
693 * That way we can use this with LD et al that have opcode bits
694 * in the low 2 bits of the insn.
695 */
696 if ((value & 3) || value != (int16_t)value) {
697 return false;
698 }
699 *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
700 break;
701 case R_PPC_ADDR32:
702 /*
703 * We are abusing this relocation type. Again, this points to
704 * a pair of insns, lis + load. This is an absolute address
705 * relocation for PPC32 so the lis cannot be removed.
706 */
707 lo = value;
708 hi = value - lo;
709 if (hi + lo != value) {
710 return false;
711 }
712 code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
713 code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
714 break;
715 default:
716 g_assert_not_reached();
717 }
718 return true;
719 }
720
721 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
722 TCGReg base, tcg_target_long offset);
723
724 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
725 {
726 if (ret == arg) {
727 return true;
728 }
729 switch (type) {
730 case TCG_TYPE_I64:
731 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
732 /* fallthru */
733 case TCG_TYPE_I32:
734 if (ret < TCG_REG_V0) {
735 if (arg < TCG_REG_V0) {
736 tcg_out32(s, OR | SAB(arg, ret, arg));
737 break;
738 } else if (have_isa_2_07) {
739 tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD)
740 | VRT(arg) | RA(ret));
741 break;
742 } else {
743 /* Altivec does not support vector->integer moves. */
744 return false;
745 }
746 } else if (arg < TCG_REG_V0) {
747 if (have_isa_2_07) {
748 tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD)
749 | VRT(ret) | RA(arg));
750 break;
751 } else {
752 /* Altivec does not support integer->vector moves. */
753 return false;
754 }
755 }
756 /* fallthru */
757 case TCG_TYPE_V64:
758 case TCG_TYPE_V128:
759 tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0);
760 tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg));
761 break;
762 default:
763 g_assert_not_reached();
764 }
765 return true;
766 }
767
768 static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
769 int sh, int mb)
770 {
771 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
772 sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
773 mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
774 tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
775 }
776
777 static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
778 int sh, int mb, int me)
779 {
780 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
781 }
782
783 static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
784 {
785 tcg_out_rld(s, RLDICL, dst, src, 0, 32);
786 }
787
788 static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
789 {
790 tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
791 }
792
793 static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
794 {
795 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
796 }
797
798 static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
799 {
800 tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
801 }
802
803 static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
804 {
805 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
806 }
807
808 /* Emit a move into ret of arg, if it can be done in one insn. */
809 static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
810 {
811 if (arg == (int16_t)arg) {
812 tcg_out32(s, ADDI | TAI(ret, 0, arg));
813 return true;
814 }
815 if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
816 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
817 return true;
818 }
819 return false;
820 }
821
822 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
823 tcg_target_long arg, bool in_prologue)
824 {
825 intptr_t tb_diff;
826 tcg_target_long tmp;
827 int shift;
828
829 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
830
831 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
832 arg = (int32_t)arg;
833 }
834
835 /* Load 16-bit immediates with one insn. */
836 if (tcg_out_movi_one(s, ret, arg)) {
837 return;
838 }
839
840 /* Load addresses within the TB with one insn. */
841 tb_diff = arg - (intptr_t)s->code_gen_ptr;
842 if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
843 tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
844 return;
845 }
846
847 /* Load 32-bit immediates with two insns. Note that we've already
848 eliminated bare ADDIS, so we know both insns are required. */
849 if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
850 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
851 tcg_out32(s, ORI | SAI(ret, ret, arg));
852 return;
853 }
854 if (arg == (uint32_t)arg && !(arg & 0x8000)) {
855 tcg_out32(s, ADDI | TAI(ret, 0, arg));
856 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
857 return;
858 }
859
860 /* Load masked 16-bit value. */
861 if (arg > 0 && (arg & 0x8000)) {
862 tmp = arg | 0x7fff;
863 if ((tmp & (tmp + 1)) == 0) {
864 int mb = clz64(tmp + 1) + 1;
865 tcg_out32(s, ADDI | TAI(ret, 0, arg));
866 tcg_out_rld(s, RLDICL, ret, ret, 0, mb);
867 return;
868 }
869 }
870
871 /* Load common masks with 2 insns. */
872 shift = ctz64(arg);
873 tmp = arg >> shift;
874 if (tmp == (int16_t)tmp) {
875 tcg_out32(s, ADDI | TAI(ret, 0, tmp));
876 tcg_out_shli64(s, ret, ret, shift);
877 return;
878 }
879 shift = clz64(arg);
880 if (tcg_out_movi_one(s, ret, arg << shift)) {
881 tcg_out_shri64(s, ret, ret, shift);
882 return;
883 }
884
885 /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */
886 if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
887 tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
888 return;
889 }
890
891 /* Use the constant pool, if possible. */
892 if (!in_prologue && USE_REG_TB) {
893 new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
894 -(intptr_t)s->code_gen_ptr);
895 tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
896 return;
897 }
898
899 tmp = arg >> 31 >> 1;
900 tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
901 if (tmp) {
902 tcg_out_shli64(s, ret, ret, 32);
903 }
904 if (arg & 0xffff0000) {
905 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
906 }
907 if (arg & 0xffff) {
908 tcg_out32(s, ORI | SAI(ret, ret, arg));
909 }
910 }
911
912 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret,
913 tcg_target_long val)
914 {
915 uint32_t load_insn;
916 int rel, low;
917 intptr_t add;
918
919 low = (int8_t)val;
920 if (low >= -16 && low < 16) {
921 if (val == (tcg_target_long)dup_const(MO_8, low)) {
922 tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
923 return;
924 }
925 if (val == (tcg_target_long)dup_const(MO_16, low)) {
926 tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
927 return;
928 }
929 if (val == (tcg_target_long)dup_const(MO_32, low)) {
930 tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
931 return;
932 }
933 }
934
935 /*
936 * Otherwise we must load the value from the constant pool.
937 */
938 if (USE_REG_TB) {
939 rel = R_PPC_ADDR16;
940 add = -(intptr_t)s->code_gen_ptr;
941 } else {
942 rel = R_PPC_ADDR32;
943 add = 0;
944 }
945
946 if (have_vsx) {
947 load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX;
948 load_insn |= VRT(ret) | RB(TCG_REG_TMP1);
949 if (TCG_TARGET_REG_BITS == 64) {
950 new_pool_label(s, val, rel, s->code_ptr, add);
951 } else {
952 new_pool_l2(s, rel, s->code_ptr, add, val, val);
953 }
954 } else {
955 load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
956 if (TCG_TARGET_REG_BITS == 64) {
957 new_pool_l2(s, rel, s->code_ptr, add, val, val);
958 } else {
959 new_pool_l4(s, rel, s->code_ptr, add, val, val, val, val);
960 }
961 }
962
963 if (USE_REG_TB) {
964 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
965 load_insn |= RA(TCG_REG_TB);
966 } else {
967 tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
968 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
969 }
970 tcg_out32(s, load_insn);
971 }
972
973 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
974 tcg_target_long arg)
975 {
976 switch (type) {
977 case TCG_TYPE_I32:
978 case TCG_TYPE_I64:
979 tcg_debug_assert(ret < TCG_REG_V0);
980 tcg_out_movi_int(s, type, ret, arg, false);
981 break;
982
983 case TCG_TYPE_V64:
984 case TCG_TYPE_V128:
985 tcg_debug_assert(ret >= TCG_REG_V0);
986 tcg_out_dupi_vec(s, type, ret, arg);
987 break;
988
989 default:
990 g_assert_not_reached();
991 }
992 }
993
994 static bool mask_operand(uint32_t c, int *mb, int *me)
995 {
996 uint32_t lsb, test;
997
998 /* Accept a bit pattern like:
999 0....01....1
1000 1....10....0
1001 0..01..10..0
1002 Keep track of the transitions. */
1003 if (c == 0 || c == -1) {
1004 return false;
1005 }
1006 test = c;
1007 lsb = test & -test;
1008 test += lsb;
1009 if (test & (test - 1)) {
1010 return false;
1011 }
1012
1013 *me = clz32(lsb);
1014 *mb = test ? clz32(test & -test) + 1 : 0;
1015 return true;
1016 }
1017
1018 static bool mask64_operand(uint64_t c, int *mb, int *me)
1019 {
1020 uint64_t lsb;
1021
1022 if (c == 0) {
1023 return false;
1024 }
1025
1026 lsb = c & -c;
1027 /* Accept 1..10..0. */
1028 if (c == -lsb) {
1029 *mb = 0;
1030 *me = clz64(lsb);
1031 return true;
1032 }
1033 /* Accept 0..01..1. */
1034 if (lsb == 1 && (c & (c + 1)) == 0) {
1035 *mb = clz64(c + 1) + 1;
1036 *me = 63;
1037 return true;
1038 }
1039 return false;
1040 }
1041
1042 static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1043 {
1044 int mb, me;
1045
1046 if (mask_operand(c, &mb, &me)) {
1047 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
1048 } else if ((c & 0xffff) == c) {
1049 tcg_out32(s, ANDI | SAI(src, dst, c));
1050 return;
1051 } else if ((c & 0xffff0000) == c) {
1052 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1053 return;
1054 } else {
1055 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
1056 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1057 }
1058 }
1059
1060 static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
1061 {
1062 int mb, me;
1063
1064 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1065 if (mask64_operand(c, &mb, &me)) {
1066 if (mb == 0) {
1067 tcg_out_rld(s, RLDICR, dst, src, 0, me);
1068 } else {
1069 tcg_out_rld(s, RLDICL, dst, src, 0, mb);
1070 }
1071 } else if ((c & 0xffff) == c) {
1072 tcg_out32(s, ANDI | SAI(src, dst, c));
1073 return;
1074 } else if ((c & 0xffff0000) == c) {
1075 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1076 return;
1077 } else {
1078 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
1079 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1080 }
1081 }
1082
1083 static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
1084 int op_lo, int op_hi)
1085 {
1086 if (c >> 16) {
1087 tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
1088 src = dst;
1089 }
1090 if (c & 0xffff) {
1091 tcg_out32(s, op_lo | SAI(src, dst, c));
1092 src = dst;
1093 }
1094 }
1095
1096 static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1097 {
1098 tcg_out_zori32(s, dst, src, c, ORI, ORIS);
1099 }
1100
1101 static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1102 {
1103 tcg_out_zori32(s, dst, src, c, XORI, XORIS);
1104 }
1105
1106 static void tcg_out_b(TCGContext *s, int mask, tcg_insn_unit *target)
1107 {
1108 ptrdiff_t disp = tcg_pcrel_diff(s, target);
1109 if (in_range_b(disp)) {
1110 tcg_out32(s, B | (disp & 0x3fffffc) | mask);
1111 } else {
1112 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
1113 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
1114 tcg_out32(s, BCCTR | BO_ALWAYS | mask);
1115 }
1116 }
1117
1118 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
1119 TCGReg base, tcg_target_long offset)
1120 {
1121 tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
1122 bool is_int_store = false;
1123 TCGReg rs = TCG_REG_TMP1;
1124
1125 switch (opi) {
1126 case LD: case LWA:
1127 align = 3;
1128 /* FALLTHRU */
1129 default:
1130 if (rt > TCG_REG_R0 && rt < TCG_REG_V0) {
1131 rs = rt;
1132 break;
1133 }
1134 break;
1135 case LXSD:
1136 case STXSD:
1137 align = 3;
1138 break;
1139 case LXV:
1140 case STXV:
1141 align = 15;
1142 break;
1143 case STD:
1144 align = 3;
1145 /* FALLTHRU */
1146 case STB: case STH: case STW:
1147 is_int_store = true;
1148 break;
1149 }
1150
1151 /* For unaligned, or very large offsets, use the indexed form. */
1152 if (offset & align || offset != (int32_t)offset || opi == 0) {
1153 if (rs == base) {
1154 rs = TCG_REG_R0;
1155 }
1156 tcg_debug_assert(!is_int_store || rs != rt);
1157 tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
1158 tcg_out32(s, opx | TAB(rt & 31, base, rs));
1159 return;
1160 }
1161
1162 l0 = (int16_t)offset;
1163 offset = (offset - l0) >> 16;
1164 l1 = (int16_t)offset;
1165
1166 if (l1 < 0 && orig >= 0) {
1167 extra = 0x4000;
1168 l1 = (int16_t)(offset - 0x4000);
1169 }
1170 if (l1) {
1171 tcg_out32(s, ADDIS | TAI(rs, base, l1));
1172 base = rs;
1173 }
1174 if (extra) {
1175 tcg_out32(s, ADDIS | TAI(rs, base, extra));
1176 base = rs;
1177 }
1178 if (opi != ADDI || base != rt || l0 != 0) {
1179 tcg_out32(s, opi | TAI(rt & 31, base, l0));
1180 }
1181 }
1182
1183 static void tcg_out_vsldoi(TCGContext *s, TCGReg ret,
1184 TCGReg va, TCGReg vb, int shb)
1185 {
1186 tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6));
1187 }
1188
1189 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1190 TCGReg base, intptr_t offset)
1191 {
1192 int shift;
1193
1194 switch (type) {
1195 case TCG_TYPE_I32:
1196 if (ret < TCG_REG_V0) {
1197 tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset);
1198 break;
1199 }
1200 if (have_isa_2_07 && have_vsx) {
1201 tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset);
1202 break;
1203 }
1204 tcg_debug_assert((offset & 3) == 0);
1205 tcg_out_mem_long(s, 0, LVEWX, ret, base, offset);
1206 shift = (offset - 4) & 0xc;
1207 if (shift) {
1208 tcg_out_vsldoi(s, ret, ret, ret, shift);
1209 }
1210 break;
1211 case TCG_TYPE_I64:
1212 if (ret < TCG_REG_V0) {
1213 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1214 tcg_out_mem_long(s, LD, LDX, ret, base, offset);
1215 break;
1216 }
1217 /* fallthru */
1218 case TCG_TYPE_V64:
1219 tcg_debug_assert(ret >= TCG_REG_V0);
1220 if (have_vsx) {
1221 tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX,
1222 ret, base, offset);
1223 break;
1224 }
1225 tcg_debug_assert((offset & 7) == 0);
1226 tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
1227 if (offset & 8) {
1228 tcg_out_vsldoi(s, ret, ret, ret, 8);
1229 }
1230 break;
1231 case TCG_TYPE_V128:
1232 tcg_debug_assert(ret >= TCG_REG_V0);
1233 tcg_debug_assert((offset & 15) == 0);
1234 tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0,
1235 LVX, ret, base, offset);
1236 break;
1237 default:
1238 g_assert_not_reached();
1239 }
1240 }
1241
1242 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1243 TCGReg base, intptr_t offset)
1244 {
1245 int shift;
1246
1247 switch (type) {
1248 case TCG_TYPE_I32:
1249 if (arg < TCG_REG_V0) {
1250 tcg_out_mem_long(s, STW, STWX, arg, base, offset);
1251 break;
1252 }
1253 if (have_isa_2_07 && have_vsx) {
1254 tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset);
1255 break;
1256 }
1257 assert((offset & 3) == 0);
1258 tcg_debug_assert((offset & 3) == 0);
1259 shift = (offset - 4) & 0xc;
1260 if (shift) {
1261 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift);
1262 arg = TCG_VEC_TMP1;
1263 }
1264 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1265 break;
1266 case TCG_TYPE_I64:
1267 if (arg < TCG_REG_V0) {
1268 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1269 tcg_out_mem_long(s, STD, STDX, arg, base, offset);
1270 break;
1271 }
1272 /* fallthru */
1273 case TCG_TYPE_V64:
1274 tcg_debug_assert(arg >= TCG_REG_V0);
1275 if (have_vsx) {
1276 tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0,
1277 STXSDX, arg, base, offset);
1278 break;
1279 }
1280 tcg_debug_assert((offset & 7) == 0);
1281 if (offset & 8) {
1282 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
1283 arg = TCG_VEC_TMP1;
1284 }
1285 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1286 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4);
1287 break;
1288 case TCG_TYPE_V128:
1289 tcg_debug_assert(arg >= TCG_REG_V0);
1290 tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0,
1291 STVX, arg, base, offset);
1292 break;
1293 default:
1294 g_assert_not_reached();
1295 }
1296 }
1297
1298 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1299 TCGReg base, intptr_t ofs)
1300 {
1301 return false;
1302 }
1303
1304 static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1305 int const_arg2, int cr, TCGType type)
1306 {
1307 int imm;
1308 uint32_t op;
1309
1310 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1311
1312 /* Simplify the comparisons below wrt CMPI. */
1313 if (type == TCG_TYPE_I32) {
1314 arg2 = (int32_t)arg2;
1315 }
1316
1317 switch (cond) {
1318 case TCG_COND_EQ:
1319 case TCG_COND_NE:
1320 if (const_arg2) {
1321 if ((int16_t) arg2 == arg2) {
1322 op = CMPI;
1323 imm = 1;
1324 break;
1325 } else if ((uint16_t) arg2 == arg2) {
1326 op = CMPLI;
1327 imm = 1;
1328 break;
1329 }
1330 }
1331 op = CMPL;
1332 imm = 0;
1333 break;
1334
1335 case TCG_COND_LT:
1336 case TCG_COND_GE:
1337 case TCG_COND_LE:
1338 case TCG_COND_GT:
1339 if (const_arg2) {
1340 if ((int16_t) arg2 == arg2) {
1341 op = CMPI;
1342 imm = 1;
1343 break;
1344 }
1345 }
1346 op = CMP;
1347 imm = 0;
1348 break;
1349
1350 case TCG_COND_LTU:
1351 case TCG_COND_GEU:
1352 case TCG_COND_LEU:
1353 case TCG_COND_GTU:
1354 if (const_arg2) {
1355 if ((uint16_t) arg2 == arg2) {
1356 op = CMPLI;
1357 imm = 1;
1358 break;
1359 }
1360 }
1361 op = CMPL;
1362 imm = 0;
1363 break;
1364
1365 default:
1366 tcg_abort();
1367 }
1368 op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
1369
1370 if (imm) {
1371 tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1372 } else {
1373 if (const_arg2) {
1374 tcg_out_movi(s, type, TCG_REG_R0, arg2);
1375 arg2 = TCG_REG_R0;
1376 }
1377 tcg_out32(s, op | RA(arg1) | RB(arg2));
1378 }
1379 }
1380
1381 static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1382 TCGReg dst, TCGReg src)
1383 {
1384 if (type == TCG_TYPE_I32) {
1385 tcg_out32(s, CNTLZW | RS(src) | RA(dst));
1386 tcg_out_shri32(s, dst, dst, 5);
1387 } else {
1388 tcg_out32(s, CNTLZD | RS(src) | RA(dst));
1389 tcg_out_shri64(s, dst, dst, 6);
1390 }
1391 }
1392
1393 static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
1394 {
1395 /* X != 0 implies X + -1 generates a carry. Extra addition
1396 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
1397 if (dst != src) {
1398 tcg_out32(s, ADDIC | TAI(dst, src, -1));
1399 tcg_out32(s, SUBFE | TAB(dst, dst, src));
1400 } else {
1401 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1402 tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
1403 }
1404 }
1405
1406 static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1407 bool const_arg2)
1408 {
1409 if (const_arg2) {
1410 if ((uint32_t)arg2 == arg2) {
1411 tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1412 } else {
1413 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1414 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1415 }
1416 } else {
1417 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1418 }
1419 return TCG_REG_R0;
1420 }
1421
1422 static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1423 TCGArg arg0, TCGArg arg1, TCGArg arg2,
1424 int const_arg2)
1425 {
1426 int crop, sh;
1427
1428 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1429
1430 /* Ignore high bits of a potential constant arg2. */
1431 if (type == TCG_TYPE_I32) {
1432 arg2 = (uint32_t)arg2;
1433 }
1434
1435 /* Handle common and trivial cases before handling anything else. */
1436 if (arg2 == 0) {
1437 switch (cond) {
1438 case TCG_COND_EQ:
1439 tcg_out_setcond_eq0(s, type, arg0, arg1);
1440 return;
1441 case TCG_COND_NE:
1442 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1443 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1444 arg1 = TCG_REG_R0;
1445 }
1446 tcg_out_setcond_ne0(s, arg0, arg1);
1447 return;
1448 case TCG_COND_GE:
1449 tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1450 arg1 = arg0;
1451 /* FALLTHRU */
1452 case TCG_COND_LT:
1453 /* Extract the sign bit. */
1454 if (type == TCG_TYPE_I32) {
1455 tcg_out_shri32(s, arg0, arg1, 31);
1456 } else {
1457 tcg_out_shri64(s, arg0, arg1, 63);
1458 }
1459 return;
1460 default:
1461 break;
1462 }
1463 }
1464
1465 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1466 All other cases below are also at least 3 insns, so speed up the
1467 code generator by not considering them and always using ISEL. */
1468 if (have_isel) {
1469 int isel, tab;
1470
1471 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1472
1473 isel = tcg_to_isel[cond];
1474
1475 tcg_out_movi(s, type, arg0, 1);
1476 if (isel & 1) {
1477 /* arg0 = (bc ? 0 : 1) */
1478 tab = TAB(arg0, 0, arg0);
1479 isel &= ~1;
1480 } else {
1481 /* arg0 = (bc ? 1 : 0) */
1482 tcg_out_movi(s, type, TCG_REG_R0, 0);
1483 tab = TAB(arg0, arg0, TCG_REG_R0);
1484 }
1485 tcg_out32(s, isel | tab);
1486 return;
1487 }
1488
1489 switch (cond) {
1490 case TCG_COND_EQ:
1491 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1492 tcg_out_setcond_eq0(s, type, arg0, arg1);
1493 return;
1494
1495 case TCG_COND_NE:
1496 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1497 /* Discard the high bits only once, rather than both inputs. */
1498 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1499 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1500 arg1 = TCG_REG_R0;
1501 }
1502 tcg_out_setcond_ne0(s, arg0, arg1);
1503 return;
1504
1505 case TCG_COND_GT:
1506 case TCG_COND_GTU:
1507 sh = 30;
1508 crop = 0;
1509 goto crtest;
1510
1511 case TCG_COND_LT:
1512 case TCG_COND_LTU:
1513 sh = 29;
1514 crop = 0;
1515 goto crtest;
1516
1517 case TCG_COND_GE:
1518 case TCG_COND_GEU:
1519 sh = 31;
1520 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
1521 goto crtest;
1522
1523 case TCG_COND_LE:
1524 case TCG_COND_LEU:
1525 sh = 31;
1526 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
1527 crtest:
1528 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1529 if (crop) {
1530 tcg_out32(s, crop);
1531 }
1532 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1533 tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
1534 break;
1535
1536 default:
1537 tcg_abort();
1538 }
1539 }
1540
1541 static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
1542 {
1543 if (l->has_value) {
1544 bc |= reloc_pc14_val(s->code_ptr, l->u.value_ptr);
1545 } else {
1546 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
1547 }
1548 tcg_out32(s, bc);
1549 }
1550
1551 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
1552 TCGArg arg1, TCGArg arg2, int const_arg2,
1553 TCGLabel *l, TCGType type)
1554 {
1555 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1556 tcg_out_bc(s, tcg_to_bc[cond], l);
1557 }
1558
1559 static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
1560 TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
1561 TCGArg v2, bool const_c2)
1562 {
1563 /* If for some reason both inputs are zero, don't produce bad code. */
1564 if (v1 == 0 && v2 == 0) {
1565 tcg_out_movi(s, type, dest, 0);
1566 return;
1567 }
1568
1569 tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
1570
1571 if (have_isel) {
1572 int isel = tcg_to_isel[cond];
1573
1574 /* Swap the V operands if the operation indicates inversion. */
1575 if (isel & 1) {
1576 int t = v1;
1577 v1 = v2;
1578 v2 = t;
1579 isel &= ~1;
1580 }
1581 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1582 if (v2 == 0) {
1583 tcg_out_movi(s, type, TCG_REG_R0, 0);
1584 }
1585 tcg_out32(s, isel | TAB(dest, v1, v2));
1586 } else {
1587 if (dest == v2) {
1588 cond = tcg_invert_cond(cond);
1589 v2 = v1;
1590 } else if (dest != v1) {
1591 if (v1 == 0) {
1592 tcg_out_movi(s, type, dest, 0);
1593 } else {
1594 tcg_out_mov(s, type, dest, v1);
1595 }
1596 }
1597 /* Branch forward over one insn */
1598 tcg_out32(s, tcg_to_bc[cond] | 8);
1599 if (v2 == 0) {
1600 tcg_out_movi(s, type, dest, 0);
1601 } else {
1602 tcg_out_mov(s, type, dest, v2);
1603 }
1604 }
1605 }
1606
1607 static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
1608 TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
1609 {
1610 if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
1611 tcg_out32(s, opc | RA(a0) | RS(a1));
1612 } else {
1613 tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
1614 /* Note that the only other valid constant for a2 is 0. */
1615 if (have_isel) {
1616 tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
1617 tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
1618 } else if (!const_a2 && a0 == a2) {
1619 tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
1620 tcg_out32(s, opc | RA(a0) | RS(a1));
1621 } else {
1622 tcg_out32(s, opc | RA(a0) | RS(a1));
1623 tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
1624 if (const_a2) {
1625 tcg_out_movi(s, type, a0, 0);
1626 } else {
1627 tcg_out_mov(s, type, a0, a2);
1628 }
1629 }
1630 }
1631 }
1632
1633 static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1634 const int *const_args)
1635 {
1636 static const struct { uint8_t bit1, bit2; } bits[] = {
1637 [TCG_COND_LT ] = { CR_LT, CR_LT },
1638 [TCG_COND_LE ] = { CR_LT, CR_GT },
1639 [TCG_COND_GT ] = { CR_GT, CR_GT },
1640 [TCG_COND_GE ] = { CR_GT, CR_LT },
1641 [TCG_COND_LTU] = { CR_LT, CR_LT },
1642 [TCG_COND_LEU] = { CR_LT, CR_GT },
1643 [TCG_COND_GTU] = { CR_GT, CR_GT },
1644 [TCG_COND_GEU] = { CR_GT, CR_LT },
1645 };
1646
1647 TCGCond cond = args[4], cond2;
1648 TCGArg al, ah, bl, bh;
1649 int blconst, bhconst;
1650 int op, bit1, bit2;
1651
1652 al = args[0];
1653 ah = args[1];
1654 bl = args[2];
1655 bh = args[3];
1656 blconst = const_args[2];
1657 bhconst = const_args[3];
1658
1659 switch (cond) {
1660 case TCG_COND_EQ:
1661 op = CRAND;
1662 goto do_equality;
1663 case TCG_COND_NE:
1664 op = CRNAND;
1665 do_equality:
1666 tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
1667 tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
1668 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1669 break;
1670
1671 case TCG_COND_LT:
1672 case TCG_COND_LE:
1673 case TCG_COND_GT:
1674 case TCG_COND_GE:
1675 case TCG_COND_LTU:
1676 case TCG_COND_LEU:
1677 case TCG_COND_GTU:
1678 case TCG_COND_GEU:
1679 bit1 = bits[cond].bit1;
1680 bit2 = bits[cond].bit2;
1681 op = (bit1 != bit2 ? CRANDC : CRAND);
1682 cond2 = tcg_unsigned_cond(cond);
1683
1684 tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
1685 tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
1686 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
1687 tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ));
1688 break;
1689
1690 default:
1691 tcg_abort();
1692 }
1693 }
1694
1695 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1696 const int *const_args)
1697 {
1698 tcg_out_cmp2(s, args + 1, const_args + 1);
1699 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1700 tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
1701 }
1702
1703 static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
1704 const int *const_args)
1705 {
1706 tcg_out_cmp2(s, args, const_args);
1707 tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
1708 }
1709
1710 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1711 {
1712 uint32_t insn = HWSYNC;
1713 a0 &= TCG_MO_ALL;
1714 if (a0 == TCG_MO_LD_LD) {
1715 insn = LWSYNC;
1716 } else if (a0 == TCG_MO_ST_ST) {
1717 insn = EIEIO;
1718 }
1719 tcg_out32(s, insn);
1720 }
1721
1722 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
1723 uintptr_t addr)
1724 {
1725 if (TCG_TARGET_REG_BITS == 64) {
1726 tcg_insn_unit i1, i2;
1727 intptr_t tb_diff = addr - tc_ptr;
1728 intptr_t br_diff = addr - (jmp_addr + 4);
1729 uint64_t pair;
1730
1731 /* This does not exercise the range of the branch, but we do
1732 still need to be able to load the new value of TCG_REG_TB.
1733 But this does still happen quite often. */
1734 if (tb_diff == (int16_t)tb_diff) {
1735 i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
1736 i2 = B | (br_diff & 0x3fffffc);
1737 } else {
1738 intptr_t lo = (int16_t)tb_diff;
1739 intptr_t hi = (int32_t)(tb_diff - lo);
1740 assert(tb_diff == hi + lo);
1741 i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
1742 i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
1743 }
1744 #ifdef HOST_WORDS_BIGENDIAN
1745 pair = (uint64_t)i1 << 32 | i2;
1746 #else
1747 pair = (uint64_t)i2 << 32 | i1;
1748 #endif
1749
1750 /* As per the enclosing if, this is ppc64. Avoid the _Static_assert
1751 within atomic_set that would fail to build a ppc32 host. */
1752 atomic_set__nocheck((uint64_t *)jmp_addr, pair);
1753 flush_icache_range(jmp_addr, jmp_addr + 8);
1754 } else {
1755 intptr_t diff = addr - jmp_addr;
1756 tcg_debug_assert(in_range_b(diff));
1757 atomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc));
1758 flush_icache_range(jmp_addr, jmp_addr + 4);
1759 }
1760 }
1761
1762 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
1763 {
1764 #ifdef _CALL_AIX
1765 /* Look through the descriptor. If the branch is in range, and we
1766 don't have to spend too much effort on building the toc. */
1767 void *tgt = ((void **)target)[0];
1768 uintptr_t toc = ((uintptr_t *)target)[1];
1769 intptr_t diff = tcg_pcrel_diff(s, tgt);
1770
1771 if (in_range_b(diff) && toc == (uint32_t)toc) {
1772 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
1773 tcg_out_b(s, LK, tgt);
1774 } else {
1775 /* Fold the low bits of the constant into the addresses below. */
1776 intptr_t arg = (intptr_t)target;
1777 int ofs = (int16_t)arg;
1778
1779 if (ofs + 8 < 0x8000) {
1780 arg -= ofs;
1781 } else {
1782 ofs = 0;
1783 }
1784 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg);
1785 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
1786 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
1787 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
1788 tcg_out32(s, BCCTR | BO_ALWAYS | LK);
1789 }
1790 #elif defined(_CALL_ELF) && _CALL_ELF == 2
1791 intptr_t diff;
1792
1793 /* In the ELFv2 ABI, we have to set up r12 to contain the destination
1794 address, which the callee uses to compute its TOC address. */
1795 /* FIXME: when the branch is in range, we could avoid r12 load if we
1796 knew that the destination uses the same TOC, and what its local
1797 entry point offset is. */
1798 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target);
1799
1800 diff = tcg_pcrel_diff(s, target);
1801 if (in_range_b(diff)) {
1802 tcg_out_b(s, LK, target);
1803 } else {
1804 tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
1805 tcg_out32(s, BCCTR | BO_ALWAYS | LK);
1806 }
1807 #else
1808 tcg_out_b(s, LK, target);
1809 #endif
1810 }
1811
1812 static const uint32_t qemu_ldx_opc[16] = {
1813 [MO_UB] = LBZX,
1814 [MO_UW] = LHZX,
1815 [MO_UL] = LWZX,
1816 [MO_Q] = LDX,
1817 [MO_SW] = LHAX,
1818 [MO_SL] = LWAX,
1819 [MO_BSWAP | MO_UB] = LBZX,
1820 [MO_BSWAP | MO_UW] = LHBRX,
1821 [MO_BSWAP | MO_UL] = LWBRX,
1822 [MO_BSWAP | MO_Q] = LDBRX,
1823 };
1824
1825 static const uint32_t qemu_stx_opc[16] = {
1826 [MO_UB] = STBX,
1827 [MO_UW] = STHX,
1828 [MO_UL] = STWX,
1829 [MO_Q] = STDX,
1830 [MO_BSWAP | MO_UB] = STBX,
1831 [MO_BSWAP | MO_UW] = STHBRX,
1832 [MO_BSWAP | MO_UL] = STWBRX,
1833 [MO_BSWAP | MO_Q] = STDBRX,
1834 };
1835
1836 static const uint32_t qemu_exts_opc[4] = {
1837 EXTSB, EXTSH, EXTSW, 0
1838 };
1839
1840 #if defined (CONFIG_SOFTMMU)
1841 #include "tcg-ldst.inc.c"
1842
1843 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1844 * int mmu_idx, uintptr_t ra)
1845 */
1846 static void * const qemu_ld_helpers[16] = {
1847 [MO_UB] = helper_ret_ldub_mmu,
1848 [MO_LEUW] = helper_le_lduw_mmu,
1849 [MO_LEUL] = helper_le_ldul_mmu,
1850 [MO_LEQ] = helper_le_ldq_mmu,
1851 [MO_BEUW] = helper_be_lduw_mmu,
1852 [MO_BEUL] = helper_be_ldul_mmu,
1853 [MO_BEQ] = helper_be_ldq_mmu,
1854 };
1855
1856 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1857 * uintxx_t val, int mmu_idx, uintptr_t ra)
1858 */
1859 static void * const qemu_st_helpers[16] = {
1860 [MO_UB] = helper_ret_stb_mmu,
1861 [MO_LEUW] = helper_le_stw_mmu,
1862 [MO_LEUL] = helper_le_stl_mmu,
1863 [MO_LEQ] = helper_le_stq_mmu,
1864 [MO_BEUW] = helper_be_stw_mmu,
1865 [MO_BEUL] = helper_be_stl_mmu,
1866 [MO_BEQ] = helper_be_stq_mmu,
1867 };
1868
1869 /* We expect to use a 16-bit negative offset from ENV. */
1870 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1871 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
1872
1873 /* Perform the TLB load and compare. Places the result of the comparison
1874 in CR7, loads the addend of the TLB into R3, and returns the register
1875 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
1876
1877 static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
1878 TCGReg addrlo, TCGReg addrhi,
1879 int mem_index, bool is_read)
1880 {
1881 int cmp_off
1882 = (is_read
1883 ? offsetof(CPUTLBEntry, addr_read)
1884 : offsetof(CPUTLBEntry, addr_write));
1885 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1886 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1887 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1888 unsigned s_bits = opc & MO_SIZE;
1889 unsigned a_bits = get_alignment_bits(opc);
1890
1891 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1892 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off);
1893 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off);
1894
1895 /* Extract the page index, shifted into place for tlb index. */
1896 if (TCG_TARGET_REG_BITS == 32) {
1897 tcg_out_shri32(s, TCG_REG_TMP1, addrlo,
1898 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1899 } else {
1900 tcg_out_shri64(s, TCG_REG_TMP1, addrlo,
1901 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1902 }
1903 tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1));
1904
1905 /* Load the TLB comparator. */
1906 if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
1907 uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32
1908 ? LWZUX : LDUX);
1909 tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4));
1910 } else {
1911 tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4));
1912 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1913 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4);
1914 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off);
1915 } else {
1916 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off);
1917 }
1918 }
1919
1920 /* Load the TLB addend for use on the fast path. Do this asap
1921 to minimize any load use delay. */
1922 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3,
1923 offsetof(CPUTLBEntry, addend));
1924
1925 /* Clear the non-page, non-alignment bits from the address */
1926 if (TCG_TARGET_REG_BITS == 32) {
1927 /* We don't support unaligned accesses on 32-bits.
1928 * Preserve the bottom bits and thus trigger a comparison
1929 * failure on unaligned accesses.
1930 */
1931 if (a_bits < s_bits) {
1932 a_bits = s_bits;
1933 }
1934 tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
1935 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
1936 } else {
1937 TCGReg t = addrlo;
1938
1939 /* If the access is unaligned, we need to make sure we fail if we
1940 * cross a page boundary. The trick is to add the access size-1
1941 * to the address before masking the low bits. That will make the
1942 * address overflow to the next page if we cross a page boundary,
1943 * which will then force a mismatch of the TLB compare.
1944 */
1945 if (a_bits < s_bits) {
1946 unsigned a_mask = (1 << a_bits) - 1;
1947 unsigned s_mask = (1 << s_bits) - 1;
1948 tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
1949 t = TCG_REG_R0;
1950 }
1951
1952 /* Mask the address for the requested alignment. */
1953 if (TARGET_LONG_BITS == 32) {
1954 tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
1955 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
1956 /* Zero-extend the address for use in the final address. */
1957 tcg_out_ext32u(s, TCG_REG_R4, addrlo);
1958 addrlo = TCG_REG_R4;
1959 } else if (a_bits == 0) {
1960 tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
1961 } else {
1962 tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
1963 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
1964 tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
1965 }
1966 }
1967
1968 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1969 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
1970 0, 7, TCG_TYPE_I32);
1971 tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32);
1972 tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1973 } else {
1974 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
1975 0, 7, TCG_TYPE_TL);
1976 }
1977
1978 return addrlo;
1979 }
1980
1981 /* Record the context of a call to the out of line helper code for the slow
1982 path for a load or store, so that we can later generate the correct
1983 helper code. */
1984 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1985 TCGReg datalo_reg, TCGReg datahi_reg,
1986 TCGReg addrlo_reg, TCGReg addrhi_reg,
1987 tcg_insn_unit *raddr, tcg_insn_unit *lptr)
1988 {
1989 TCGLabelQemuLdst *label = new_ldst_label(s);
1990
1991 label->is_ld = is_ld;
1992 label->oi = oi;
1993 label->datalo_reg = datalo_reg;
1994 label->datahi_reg = datahi_reg;
1995 label->addrlo_reg = addrlo_reg;
1996 label->addrhi_reg = addrhi_reg;
1997 label->raddr = raddr;
1998 label->label_ptr[0] = lptr;
1999 }
2000
2001 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2002 {
2003 TCGMemOpIdx oi = lb->oi;
2004 MemOp opc = get_memop(oi);
2005 TCGReg hi, lo, arg = TCG_REG_R3;
2006
2007 if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
2008 return false;
2009 }
2010
2011 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
2012
2013 lo = lb->addrlo_reg;
2014 hi = lb->addrhi_reg;
2015 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2016 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2017 arg |= 1;
2018 #endif
2019 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2020 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2021 } else {
2022 /* If the address needed to be zero-extended, we'll have already
2023 placed it in R4. The only remaining case is 64-bit guest. */
2024 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
2025 }
2026
2027 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
2028 tcg_out32(s, MFSPR | RT(arg) | LR);
2029
2030 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2031
2032 lo = lb->datalo_reg;
2033 hi = lb->datahi_reg;
2034 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2035 tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4);
2036 tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3);
2037 } else if (opc & MO_SIGN) {
2038 uint32_t insn = qemu_exts_opc[opc & MO_SIZE];
2039 tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3));
2040 } else {
2041 tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3);
2042 }
2043
2044 tcg_out_b(s, 0, lb->raddr);
2045 return true;
2046 }
2047
2048 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2049 {
2050 TCGMemOpIdx oi = lb->oi;
2051 MemOp opc = get_memop(oi);
2052 MemOp s_bits = opc & MO_SIZE;
2053 TCGReg hi, lo, arg = TCG_REG_R3;
2054
2055 if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
2056 return false;
2057 }
2058
2059 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
2060
2061 lo = lb->addrlo_reg;
2062 hi = lb->addrhi_reg;
2063 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2064 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2065 arg |= 1;
2066 #endif
2067 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2068 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2069 } else {
2070 /* If the address needed to be zero-extended, we'll have already
2071 placed it in R4. The only remaining case is 64-bit guest. */
2072 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
2073 }
2074
2075 lo = lb->datalo_reg;
2076 hi = lb->datahi_reg;
2077 if (TCG_TARGET_REG_BITS == 32) {
2078 switch (s_bits) {
2079 case MO_64:
2080 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2081 arg |= 1;
2082 #endif
2083 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2084 /* FALLTHRU */
2085 case MO_32:
2086 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2087 break;
2088 default:
2089 tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31);
2090 break;
2091 }
2092 } else {
2093 if (s_bits == MO_64) {
2094 tcg_out_mov(s, TCG_TYPE_I64, arg++, lo);
2095 } else {
2096 tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits));
2097 }
2098 }
2099
2100 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
2101 tcg_out32(s, MFSPR | RT(arg) | LR);
2102
2103 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2104
2105 tcg_out_b(s, 0, lb->raddr);
2106 return true;
2107 }
2108 #endif /* SOFTMMU */
2109
2110 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
2111 {
2112 TCGReg datalo, datahi, addrlo, rbase;
2113 TCGReg addrhi __attribute__((unused));
2114 TCGMemOpIdx oi;
2115 MemOp opc, s_bits;
2116 #ifdef CONFIG_SOFTMMU
2117 int mem_index;
2118 tcg_insn_unit *label_ptr;
2119 #endif
2120
2121 datalo = *args++;
2122 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2123 addrlo = *args++;
2124 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2125 oi = *args++;
2126 opc = get_memop(oi);
2127 s_bits = opc & MO_SIZE;
2128
2129 #ifdef CONFIG_SOFTMMU
2130 mem_index = get_mmuidx(oi);
2131 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true);
2132
2133 /* Load a pointer into the current opcode w/conditional branch-link. */
2134 label_ptr = s->code_ptr;
2135 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2136
2137 rbase = TCG_REG_R3;
2138 #else /* !CONFIG_SOFTMMU */
2139 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2140 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2141 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2142 addrlo = TCG_REG_TMP1;
2143 }
2144 #endif
2145
2146 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2147 if (opc & MO_BSWAP) {
2148 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2149 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2150 tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0));
2151 } else if (rbase != 0) {
2152 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2153 tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo));
2154 tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0));
2155 } else if (addrlo == datahi) {
2156 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2157 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2158 } else {
2159 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2160 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2161 }
2162 } else {
2163 uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
2164 if (!have_isa_2_06 && insn == LDBRX) {
2165 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2166 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2167 tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0));
2168 tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
2169 } else if (insn) {
2170 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2171 } else {
2172 insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
2173 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2174 insn = qemu_exts_opc[s_bits];
2175 tcg_out32(s, insn | RA(datalo) | RS(datalo));
2176 }
2177 }
2178
2179 #ifdef CONFIG_SOFTMMU
2180 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
2181 s->code_ptr, label_ptr);
2182 #endif
2183 }
2184
2185 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
2186 {
2187 TCGReg datalo, datahi, addrlo, rbase;
2188 TCGReg addrhi __attribute__((unused));
2189 TCGMemOpIdx oi;
2190 MemOp opc, s_bits;
2191 #ifdef CONFIG_SOFTMMU
2192 int mem_index;
2193 tcg_insn_unit *label_ptr;
2194 #endif
2195
2196 datalo = *args++;
2197 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2198 addrlo = *args++;
2199 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2200 oi = *args++;
2201 opc = get_memop(oi);
2202 s_bits = opc & MO_SIZE;
2203
2204 #ifdef CONFIG_SOFTMMU
2205 mem_index = get_mmuidx(oi);
2206 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false);
2207
2208 /* Load a pointer into the current opcode w/conditional branch-link. */
2209 label_ptr = s->code_ptr;
2210 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2211
2212 rbase = TCG_REG_R3;
2213 #else /* !CONFIG_SOFTMMU */
2214 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2215 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2216 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2217 addrlo = TCG_REG_TMP1;
2218 }
2219 #endif
2220
2221 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2222 if (opc & MO_BSWAP) {
2223 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2224 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2225 tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0));
2226 } else if (rbase != 0) {
2227 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2228 tcg_out32(s, STWX | SAB(datahi, rbase, addrlo));
2229 tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0));
2230 } else {
2231 tcg_out32(s, STW | TAI(datahi, addrlo, 0));
2232 tcg_out32(s, STW | TAI(datalo, addrlo, 4));
2233 }
2234 } else {
2235 uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
2236 if (!have_isa_2_06 && insn == STDBRX) {
2237 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2238 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4));
2239 tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
2240 tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1));
2241 } else {
2242 tcg_out32(s, insn | SAB(datalo, rbase, addrlo));
2243 }
2244 }
2245
2246 #ifdef CONFIG_SOFTMMU
2247 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
2248 s->code_ptr, label_ptr);
2249 #endif
2250 }
2251
2252 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2253 {
2254 int i;
2255 for (i = 0; i < count; ++i) {
2256 p[i] = NOP;
2257 }
2258 }
2259
2260 /* Parameters for function call generation, used in tcg.c. */
2261 #define TCG_TARGET_STACK_ALIGN 16
2262 #define TCG_TARGET_EXTEND_ARGS 1
2263
2264 #ifdef _CALL_AIX
2265 # define LINK_AREA_SIZE (6 * SZR)
2266 # define LR_OFFSET (1 * SZR)
2267 # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
2268 #elif defined(TCG_TARGET_CALL_DARWIN)
2269 # define LINK_AREA_SIZE (6 * SZR)
2270 # define LR_OFFSET (2 * SZR)
2271 #elif TCG_TARGET_REG_BITS == 64
2272 # if defined(_CALL_ELF) && _CALL_ELF == 2
2273 # define LINK_AREA_SIZE (4 * SZR)
2274 # define LR_OFFSET (1 * SZR)
2275 # endif
2276 #else /* TCG_TARGET_REG_BITS == 32 */
2277 # if defined(_CALL_SYSV)
2278 # define LINK_AREA_SIZE (2 * SZR)
2279 # define LR_OFFSET (1 * SZR)
2280 # endif
2281 #endif
2282 #ifndef LR_OFFSET
2283 # error "Unhandled abi"
2284 #endif
2285 #ifndef TCG_TARGET_CALL_STACK_OFFSET
2286 # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE
2287 #endif
2288
2289 #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2290 #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
2291
2292 #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
2293 + TCG_STATIC_CALL_ARGS_SIZE \
2294 + CPU_TEMP_BUF_SIZE \
2295 + REG_SAVE_SIZE \
2296 + TCG_TARGET_STACK_ALIGN - 1) \
2297 & -TCG_TARGET_STACK_ALIGN)
2298
2299 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2300
2301 static void tcg_target_qemu_prologue(TCGContext *s)
2302 {
2303 int i;
2304
2305 #ifdef _CALL_AIX
2306 void **desc = (void **)s->code_ptr;
2307 desc[0] = desc + 2; /* entry point */
2308 desc[1] = 0; /* environment pointer */
2309 s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
2310 #endif
2311
2312 tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
2313 CPU_TEMP_BUF_SIZE);
2314
2315 /* Prologue */
2316 tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
2317 tcg_out32(s, (SZR == 8 ? STDU : STWU)
2318 | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
2319
2320 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2321 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2322 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2323 }
2324 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2325
2326 #ifndef CONFIG_SOFTMMU
2327 if (guest_base) {
2328 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2329 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2330 }
2331 #endif
2332
2333 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2334 tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
2335 if (USE_REG_TB) {
2336 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
2337 }
2338 tcg_out32(s, BCCTR | BO_ALWAYS);
2339
2340 /* Epilogue */
2341 s->code_gen_epilogue = tb_ret_addr = s->code_ptr;
2342
2343 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2344 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2345 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2346 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2347 }
2348 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
2349 tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
2350 tcg_out32(s, BCLR | BO_ALWAYS);
2351 }
2352
2353 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
2354 const int *const_args)
2355 {
2356 TCGArg a0, a1, a2;
2357 int c;
2358
2359 switch (opc) {
2360 case INDEX_op_exit_tb:
2361 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]);
2362 tcg_out_b(s, 0, tb_ret_addr);
2363 break;
2364 case INDEX_op_goto_tb:
2365 if (s->tb_jmp_insn_offset) {
2366 /* Direct jump. */
2367 if (TCG_TARGET_REG_BITS == 64) {
2368 /* Ensure the next insns are 8-byte aligned. */
2369 if ((uintptr_t)s->code_ptr & 7) {
2370 tcg_out32(s, NOP);
2371 }
2372 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
2373 tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
2374 tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
2375 } else {
2376 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
2377 tcg_out32(s, B);
2378 s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
2379 break;
2380 }
2381 } else {
2382 /* Indirect jump. */
2383 tcg_debug_assert(s->tb_jmp_insn_offset == NULL);
2384 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, 0,
2385 (intptr_t)(s->tb_jmp_insn_offset + args[0]));
2386 }
2387 tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
2388 tcg_out32(s, BCCTR | BO_ALWAYS);
2389 set_jmp_reset_offset(s, args[0]);
2390 if (USE_REG_TB) {
2391 /* For the unlinked case, need to reset TCG_REG_TB. */
2392 c = -tcg_current_code_size(s);
2393 assert(c == (int16_t)c);
2394 tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, c));
2395 }
2396 break;
2397 case INDEX_op_goto_ptr:
2398 tcg_out32(s, MTSPR | RS(args[0]) | CTR);
2399 if (USE_REG_TB) {
2400 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]);
2401 }
2402 tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
2403 tcg_out32(s, BCCTR | BO_ALWAYS);
2404 break;
2405 case INDEX_op_br:
2406 {
2407 TCGLabel *l = arg_label(args[0]);
2408 uint32_t insn = B;
2409
2410 if (l->has_value) {
2411 insn |= reloc_pc24_val(s->code_ptr, l->u.value_ptr);
2412 } else {
2413 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
2414 }
2415 tcg_out32(s, insn);
2416 }
2417 break;
2418 case INDEX_op_ld8u_i32:
2419 case INDEX_op_ld8u_i64:
2420 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2421 break;
2422 case INDEX_op_ld8s_i32:
2423 case INDEX_op_ld8s_i64:
2424 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2425 tcg_out32(s, EXTSB | RS(args[0]) | RA(args[0]));
2426 break;
2427 case INDEX_op_ld16u_i32:
2428 case INDEX_op_ld16u_i64:
2429 tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
2430 break;
2431 case INDEX_op_ld16s_i32:
2432 case INDEX_op_ld16s_i64:
2433 tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
2434 break;
2435 case INDEX_op_ld_i32:
2436 case INDEX_op_ld32u_i64:
2437 tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
2438 break;
2439 case INDEX_op_ld32s_i64:
2440 tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
2441 break;
2442 case INDEX_op_ld_i64:
2443 tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
2444 break;
2445 case INDEX_op_st8_i32:
2446 case INDEX_op_st8_i64:
2447 tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
2448 break;
2449 case INDEX_op_st16_i32:
2450 case INDEX_op_st16_i64:
2451 tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
2452 break;
2453 case INDEX_op_st_i32:
2454 case INDEX_op_st32_i64:
2455 tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
2456 break;
2457 case INDEX_op_st_i64:
2458 tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
2459 break;
2460
2461 case INDEX_op_add_i32:
2462 a0 = args[0], a1 = args[1], a2 = args[2];
2463 if (const_args[2]) {
2464 do_addi_32:
2465 tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
2466 } else {
2467 tcg_out32(s, ADD | TAB(a0, a1, a2));
2468 }
2469 break;
2470 case INDEX_op_sub_i32:
2471 a0 = args[0], a1 = args[1], a2 = args[2];
2472 if (const_args[1]) {
2473 if (const_args[2]) {
2474 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
2475 } else {
2476 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2477 }
2478 } else if (const_args[2]) {
2479 a2 = -a2;
2480 goto do_addi_32;
2481 } else {
2482 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2483 }
2484 break;
2485
2486 case INDEX_op_and_i32:
2487 a0 = args[0], a1 = args[1], a2 = args[2];
2488 if (const_args[2]) {
2489 tcg_out_andi32(s, a0, a1, a2);
2490 } else {
2491 tcg_out32(s, AND | SAB(a1, a0, a2));
2492 }
2493 break;
2494 case INDEX_op_and_i64:
2495 a0 = args[0], a1 = args[1], a2 = args[2];
2496 if (const_args[2]) {
2497 tcg_out_andi64(s, a0, a1, a2);
2498 } else {
2499 tcg_out32(s, AND | SAB(a1, a0, a2));
2500 }
2501 break;
2502 case INDEX_op_or_i64:
2503 case INDEX_op_or_i32:
2504 a0 = args[0], a1 = args[1], a2 = args[2];
2505 if (const_args[2]) {
2506 tcg_out_ori32(s, a0, a1, a2);
2507 } else {
2508 tcg_out32(s, OR | SAB(a1, a0, a2));
2509 }
2510 break;
2511 case INDEX_op_xor_i64:
2512 case INDEX_op_xor_i32:
2513 a0 = args[0], a1 = args[1], a2 = args[2];
2514 if (const_args[2]) {
2515 tcg_out_xori32(s, a0, a1, a2);
2516 } else {
2517 tcg_out32(s, XOR | SAB(a1, a0, a2));
2518 }
2519 break;
2520 case INDEX_op_andc_i32:
2521 a0 = args[0], a1 = args[1], a2 = args[2];
2522 if (const_args[2]) {
2523 tcg_out_andi32(s, a0, a1, ~a2);
2524 } else {
2525 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2526 }
2527 break;
2528 case INDEX_op_andc_i64:
2529 a0 = args[0], a1 = args[1], a2 = args[2];
2530 if (const_args[2]) {
2531 tcg_out_andi64(s, a0, a1, ~a2);
2532 } else {
2533 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2534 }
2535 break;
2536 case INDEX_op_orc_i32:
2537 if (const_args[2]) {
2538 tcg_out_ori32(s, args[0], args[1], ~args[2]);
2539 break;
2540 }
2541 /* FALLTHRU */
2542 case INDEX_op_orc_i64:
2543 tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
2544 break;
2545 case INDEX_op_eqv_i32:
2546 if (const_args[2]) {
2547 tcg_out_xori32(s, args[0], args[1], ~args[2]);
2548 break;
2549 }
2550 /* FALLTHRU */
2551 case INDEX_op_eqv_i64:
2552 tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
2553 break;
2554 case INDEX_op_nand_i32:
2555 case INDEX_op_nand_i64:
2556 tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
2557 break;
2558 case INDEX_op_nor_i32:
2559 case INDEX_op_nor_i64:
2560 tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
2561 break;
2562
2563 case INDEX_op_clz_i32:
2564 tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
2565 args[2], const_args[2]);
2566 break;
2567 case INDEX_op_ctz_i32:
2568 tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
2569 args[2], const_args[2]);
2570 break;
2571 case INDEX_op_ctpop_i32:
2572 tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
2573 break;
2574
2575 case INDEX_op_clz_i64:
2576 tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
2577 args[2], const_args[2]);
2578 break;
2579 case INDEX_op_ctz_i64:
2580 tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
2581 args[2], const_args[2]);
2582 break;
2583 case INDEX_op_ctpop_i64:
2584 tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
2585 break;
2586
2587 case INDEX_op_mul_i32:
2588 a0 = args[0], a1 = args[1], a2 = args[2];
2589 if (const_args[2]) {
2590 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2591 } else {
2592 tcg_out32(s, MULLW | TAB(a0, a1, a2));
2593 }
2594 break;
2595
2596 case INDEX_op_div_i32:
2597 tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
2598 break;
2599
2600 case INDEX_op_divu_i32:
2601 tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
2602 break;
2603
2604 case INDEX_op_shl_i32:
2605 if (const_args[2]) {
2606 tcg_out_shli32(s, args[0], args[1], args[2]);
2607 } else {
2608 tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
2609 }
2610 break;
2611 case INDEX_op_shr_i32:
2612 if (const_args[2]) {
2613 tcg_out_shri32(s, args[0], args[1], args[2]);
2614 } else {
2615 tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
2616 }
2617 break;
2618 case INDEX_op_sar_i32:
2619 if (const_args[2]) {
2620 tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2]));
2621 } else {
2622 tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
2623 }
2624 break;
2625 case INDEX_op_rotl_i32:
2626 if (const_args[2]) {
2627 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
2628 } else {
2629 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
2630 | MB(0) | ME(31));
2631 }
2632 break;
2633 case INDEX_op_rotr_i32:
2634 if (const_args[2]) {
2635 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
2636 } else {
2637 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
2638 tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
2639 | MB(0) | ME(31));
2640 }
2641 break;
2642
2643 case INDEX_op_brcond_i32:
2644 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2645 arg_label(args[3]), TCG_TYPE_I32);
2646 break;
2647 case INDEX_op_brcond_i64:
2648 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2649 arg_label(args[3]), TCG_TYPE_I64);
2650 break;
2651 case INDEX_op_brcond2_i32:
2652 tcg_out_brcond2(s, args, const_args);
2653 break;
2654
2655 case INDEX_op_neg_i32:
2656 case INDEX_op_neg_i64:
2657 tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
2658 break;
2659
2660 case INDEX_op_not_i32:
2661 case INDEX_op_not_i64:
2662 tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
2663 break;
2664
2665 case INDEX_op_add_i64:
2666 a0 = args[0], a1 = args[1], a2 = args[2];
2667 if (const_args[2]) {
2668 do_addi_64:
2669 tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
2670 } else {
2671 tcg_out32(s, ADD | TAB(a0, a1, a2));
2672 }
2673 break;
2674 case INDEX_op_sub_i64:
2675 a0 = args[0], a1 = args[1], a2 = args[2];
2676 if (const_args[1]) {
2677 if (const_args[2]) {
2678 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
2679 } else {
2680 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2681 }
2682 } else if (const_args[2]) {
2683 a2 = -a2;
2684 goto do_addi_64;
2685 } else {
2686 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2687 }
2688 break;
2689
2690 case INDEX_op_shl_i64:
2691 if (const_args[2]) {
2692 tcg_out_shli64(s, args[0], args[1], args[2]);
2693 } else {
2694 tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
2695 }
2696 break;
2697 case INDEX_op_shr_i64:
2698 if (const_args[2]) {
2699 tcg_out_shri64(s, args[0], args[1], args[2]);
2700 } else {
2701 tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
2702 }
2703 break;
2704 case INDEX_op_sar_i64:
2705 if (const_args[2]) {
2706 int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
2707 tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh);
2708 } else {
2709 tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
2710 }
2711 break;
2712 case INDEX_op_rotl_i64:
2713 if (const_args[2]) {
2714 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
2715 } else {
2716 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
2717 }
2718 break;
2719 case INDEX_op_rotr_i64:
2720 if (const_args[2]) {
2721 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
2722 } else {
2723 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
2724 tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
2725 }
2726 break;
2727
2728 case INDEX_op_mul_i64:
2729 a0 = args[0], a1 = args[1], a2 = args[2];
2730 if (const_args[2]) {
2731 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2732 } else {
2733 tcg_out32(s, MULLD | TAB(a0, a1, a2));
2734 }
2735 break;
2736 case INDEX_op_div_i64:
2737 tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
2738 break;
2739 case INDEX_op_divu_i64:
2740 tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
2741 break;
2742
2743 case INDEX_op_qemu_ld_i32:
2744 tcg_out_qemu_ld(s, args, false);
2745 break;
2746 case INDEX_op_qemu_ld_i64:
2747 tcg_out_qemu_ld(s, args, true);
2748 break;
2749 case INDEX_op_qemu_st_i32:
2750 tcg_out_qemu_st(s, args, false);
2751 break;
2752 case INDEX_op_qemu_st_i64:
2753 tcg_out_qemu_st(s, args, true);
2754 break;
2755
2756 case INDEX_op_ext8s_i32:
2757 case INDEX_op_ext8s_i64:
2758 c = EXTSB;
2759 goto gen_ext;
2760 case INDEX_op_ext16s_i32:
2761 case INDEX_op_ext16s_i64:
2762 c = EXTSH;
2763 goto gen_ext;
2764 case INDEX_op_ext_i32_i64:
2765 case INDEX_op_ext32s_i64:
2766 c = EXTSW;
2767 goto gen_ext;
2768 gen_ext:
2769 tcg_out32(s, c | RS(args[1]) | RA(args[0]));
2770 break;
2771 case INDEX_op_extu_i32_i64:
2772 tcg_out_ext32u(s, args[0], args[1]);
2773 break;
2774
2775 case INDEX_op_setcond_i32:
2776 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
2777 const_args[2]);
2778 break;
2779 case INDEX_op_setcond_i64:
2780 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
2781 const_args[2]);
2782 break;
2783 case INDEX_op_setcond2_i32:
2784 tcg_out_setcond2(s, args, const_args);
2785 break;
2786
2787 case INDEX_op_bswap16_i32:
2788 case INDEX_op_bswap16_i64:
2789 a0 = args[0], a1 = args[1];
2790 /* a1 = abcd */
2791 if (a0 != a1) {
2792 /* a0 = (a1 r<< 24) & 0xff # 000c */
2793 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
2794 /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */
2795 tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23);
2796 } else {
2797 /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */
2798 tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23);
2799 /* a0 = (a1 r<< 24) & 0xff # 000c */
2800 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
2801 /* a0 = a0 | r0 # 00dc */
2802 tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0));
2803 }
2804 break;
2805
2806 case INDEX_op_bswap32_i32:
2807 case INDEX_op_bswap32_i64:
2808 /* Stolen from gcc's builtin_bswap32 */
2809 a1 = args[1];
2810 a0 = args[0] == a1 ? TCG_REG_R0 : args[0];
2811
2812 /* a1 = args[1] # abcd */
2813 /* a0 = rotate_left (a1, 8) # bcda */
2814 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
2815 /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */
2816 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
2817 /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */
2818 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
2819
2820 if (a0 == TCG_REG_R0) {
2821 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2822 }
2823 break;
2824
2825 case INDEX_op_bswap64_i64:
2826 a0 = args[0], a1 = args[1], a2 = TCG_REG_R0;
2827 if (a0 == a1) {
2828 a0 = TCG_REG_R0;
2829 a2 = a1;
2830 }
2831
2832 /* a1 = # abcd efgh */
2833 /* a0 = rl32(a1, 8) # 0000 fghe */
2834 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
2835 /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */
2836 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
2837 /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */
2838 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
2839
2840 /* a0 = rl64(a0, 32) # hgfe 0000 */
2841 /* a2 = rl64(a1, 32) # efgh abcd */
2842 tcg_out_rld(s, RLDICL, a0, a0, 32, 0);
2843 tcg_out_rld(s, RLDICL, a2, a1, 32, 0);
2844
2845 /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */
2846 tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31);
2847 /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */
2848 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7);
2849 /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */
2850 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23);
2851
2852 if (a0 == 0) {
2853 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2854 }
2855 break;
2856
2857 case INDEX_op_deposit_i32:
2858 if (const_args[2]) {
2859 uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
2860 tcg_out_andi32(s, args[0], args[0], ~mask);
2861 } else {
2862 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
2863 32 - args[3] - args[4], 31 - args[3]);
2864 }
2865 break;
2866 case INDEX_op_deposit_i64:
2867 if (const_args[2]) {
2868 uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
2869 tcg_out_andi64(s, args[0], args[0], ~mask);
2870 } else {
2871 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
2872 64 - args[3] - args[4]);
2873 }
2874 break;
2875
2876 case INDEX_op_extract_i32:
2877 tcg_out_rlw(s, RLWINM, args[0], args[1],
2878 32 - args[2], 32 - args[3], 31);
2879 break;
2880 case INDEX_op_extract_i64:
2881 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
2882 break;
2883
2884 case INDEX_op_movcond_i32:
2885 tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
2886 args[3], args[4], const_args[2]);
2887 break;
2888 case INDEX_op_movcond_i64:
2889 tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
2890 args[3], args[4], const_args[2]);
2891 break;
2892
2893 #if TCG_TARGET_REG_BITS == 64
2894 case INDEX_op_add2_i64:
2895 #else
2896 case INDEX_op_add2_i32:
2897 #endif
2898 /* Note that the CA bit is defined based on the word size of the
2899 environment. So in 64-bit mode it's always carry-out of bit 63.
2900 The fallback code using deposit works just as well for 32-bit. */
2901 a0 = args[0], a1 = args[1];
2902 if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
2903 a0 = TCG_REG_R0;
2904 }
2905 if (const_args[4]) {
2906 tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
2907 } else {
2908 tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
2909 }
2910 if (const_args[5]) {
2911 tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
2912 } else {
2913 tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
2914 }
2915 if (a0 != args[0]) {
2916 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2917 }
2918 break;
2919
2920 #if TCG_TARGET_REG_BITS == 64
2921 case INDEX_op_sub2_i64:
2922 #else
2923 case INDEX_op_sub2_i32:
2924 #endif
2925 a0 = args[0], a1 = args[1];
2926 if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
2927 a0 = TCG_REG_R0;
2928 }
2929 if (const_args[2]) {
2930 tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
2931 } else {
2932 tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
2933 }
2934 if (const_args[3]) {
2935 tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
2936 } else {
2937 tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
2938 }
2939 if (a0 != args[0]) {
2940 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2941 }
2942 break;
2943
2944 case INDEX_op_muluh_i32:
2945 tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
2946 break;
2947 case INDEX_op_mulsh_i32:
2948 tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
2949 break;
2950 case INDEX_op_muluh_i64:
2951 tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
2952 break;
2953 case INDEX_op_mulsh_i64:
2954 tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
2955 break;
2956
2957 case INDEX_op_mb:
2958 tcg_out_mb(s, args[0]);
2959 break;
2960
2961 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2962 case INDEX_op_mov_i64:
2963 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2964 case INDEX_op_movi_i64:
2965 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2966 default:
2967 tcg_abort();
2968 }
2969 }
2970
2971 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2972 {
2973 switch (opc) {
2974 case INDEX_op_and_vec:
2975 case INDEX_op_or_vec:
2976 case INDEX_op_xor_vec:
2977 case INDEX_op_andc_vec:
2978 case INDEX_op_not_vec:
2979 return 1;
2980 case INDEX_op_orc_vec:
2981 return have_isa_2_07;
2982 case INDEX_op_add_vec:
2983 case INDEX_op_sub_vec:
2984 case INDEX_op_smax_vec:
2985 case INDEX_op_smin_vec:
2986 case INDEX_op_umax_vec:
2987 case INDEX_op_umin_vec:
2988 case INDEX_op_shlv_vec:
2989 case INDEX_op_shrv_vec:
2990 case INDEX_op_sarv_vec:
2991 return vece <= MO_32 || have_isa_2_07;
2992 case INDEX_op_ssadd_vec:
2993 case INDEX_op_sssub_vec:
2994 case INDEX_op_usadd_vec:
2995 case INDEX_op_ussub_vec:
2996 return vece <= MO_32;
2997 case INDEX_op_cmp_vec:
2998 case INDEX_op_shli_vec:
2999 case INDEX_op_shri_vec:
3000 case INDEX_op_sari_vec:
3001 return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
3002 case INDEX_op_neg_vec:
3003 return vece >= MO_32 && have_isa_3_00;
3004 case INDEX_op_mul_vec:
3005 switch (vece) {
3006 case MO_8:
3007 case MO_16:
3008 return -1;
3009 case MO_32:
3010 return have_isa_2_07 ? 1 : -1;
3011 }
3012 return 0;
3013 case INDEX_op_bitsel_vec:
3014 return have_vsx;
3015 default:
3016 return 0;
3017 }
3018 }
3019
3020 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
3021 TCGReg dst, TCGReg src)
3022 {
3023 tcg_debug_assert(dst >= TCG_REG_V0);
3024 tcg_debug_assert(src >= TCG_REG_V0);
3025
3026 /*
3027 * Recall we use (or emulate) VSX integer loads, so the integer is
3028 * right justified within the left (zero-index) double-word.
3029 */
3030 switch (vece) {
3031 case MO_8:
3032 tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16));
3033 break;
3034 case MO_16:
3035 tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16));
3036 break;
3037 case MO_32:
3038 tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
3039 break;
3040 case MO_64:
3041 if (have_vsx) {
3042 tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src));
3043 break;
3044 }
3045 tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
3046 tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
3047 break;
3048 default:
3049 g_assert_not_reached();
3050 }
3051 return true;
3052 }
3053
3054 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
3055 TCGReg out, TCGReg base, intptr_t offset)
3056 {
3057 int elt;
3058
3059 tcg_debug_assert(out >= TCG_REG_V0);
3060 switch (vece) {
3061 case MO_8:
3062 if (have_isa_3_00) {
3063 tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16);
3064 } else {
3065 tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
3066 }
3067 elt = extract32(offset, 0, 4);
3068 #ifndef HOST_WORDS_BIGENDIAN
3069 elt ^= 15;
3070 #endif
3071 tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
3072 break;
3073 case MO_16:
3074 tcg_debug_assert((offset & 1) == 0);
3075 if (have_isa_3_00) {
3076 tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16);
3077 } else {
3078 tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
3079 }
3080 elt = extract32(offset, 1, 3);
3081 #ifndef HOST_WORDS_BIGENDIAN
3082 elt ^= 7;
3083 #endif
3084 tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
3085 break;
3086 case MO_32:
3087 if (have_isa_3_00) {
3088 tcg_out_mem_long(s, 0, LXVWSX, out, base, offset);
3089 break;
3090 }
3091 tcg_debug_assert((offset & 3) == 0);
3092 tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
3093 elt = extract32(offset, 2, 2);
3094 #ifndef HOST_WORDS_BIGENDIAN
3095 elt ^= 3;
3096 #endif
3097 tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
3098 break;
3099 case MO_64:
3100 if (have_vsx) {
3101 tcg_out_mem_long(s, 0, LXVDSX, out, base, offset);
3102 break;
3103 }
3104 tcg_debug_assert((offset & 7) == 0);
3105 tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
3106 tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
3107 elt = extract32(offset, 3, 1);
3108 #ifndef HOST_WORDS_BIGENDIAN
3109 elt = !elt;
3110 #endif
3111 if (elt) {
3112 tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
3113 } else {
3114 tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8);
3115 }
3116 break;
3117 default:
3118 g_assert_not_reached();
3119 }
3120 return true;
3121 }
3122
3123 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
3124 unsigned vecl, unsigned vece,
3125 const TCGArg *args, const int *const_args)
3126 {
3127 static const uint32_t
3128 add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
3129 sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
3130 neg_op[4] = { 0, 0, VNEGW, VNEGD },
3131 eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
3132 ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
3133 gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
3134 gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD },
3135 ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 },
3136 usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 },
3137 sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 },
3138 ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 },
3139 umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD },
3140 smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD },
3141 umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD },
3142 smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD },
3143 shlv_op[4] = { VSLB, VSLH, VSLW, VSLD },
3144 shrv_op[4] = { VSRB, VSRH, VSRW, VSRD },
3145 sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD },
3146 mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 },
3147 mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 },
3148 muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 },
3149 mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 },
3150 pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 },
3151 rotl_op[4] = { VRLB, VRLH, VRLW, VRLD };
3152
3153 TCGType type = vecl + TCG_TYPE_V64;
3154 TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
3155 uint32_t insn;
3156
3157 switch (opc) {
3158 case INDEX_op_ld_vec:
3159 tcg_out_ld(s, type, a0, a1, a2);
3160 return;
3161 case INDEX_op_st_vec:
3162 tcg_out_st(s, type, a0, a1, a2);
3163 return;
3164 case INDEX_op_dupm_vec:
3165 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
3166 return;
3167
3168 case INDEX_op_add_vec:
3169 insn = add_op[vece];
3170 break;
3171 case INDEX_op_sub_vec:
3172 insn = sub_op[vece];
3173 break;
3174 case INDEX_op_neg_vec:
3175 insn = neg_op[vece];
3176 a2 = a1;
3177 a1 = 0;
3178 break;
3179 case INDEX_op_mul_vec:
3180 tcg_debug_assert(vece == MO_32 && have_isa_2_07);
3181 insn = VMULUWM;
3182 break;
3183 case INDEX_op_ssadd_vec:
3184 insn = ssadd_op[vece];
3185 break;
3186 case INDEX_op_sssub_vec:
3187 insn = sssub_op[vece];
3188 break;
3189 case INDEX_op_usadd_vec:
3190 insn = usadd_op[vece];
3191 break;
3192 case INDEX_op_ussub_vec:
3193 insn = ussub_op[vece];
3194 break;
3195 case INDEX_op_smin_vec:
3196 insn = smin_op[vece];
3197 break;
3198 case INDEX_op_umin_vec:
3199 insn = umin_op[vece];
3200 break;
3201 case INDEX_op_smax_vec:
3202 insn = smax_op[vece];
3203 break;
3204 case INDEX_op_umax_vec:
3205 insn = umax_op[vece];
3206 break;
3207 case INDEX_op_shlv_vec:
3208 insn = shlv_op[vece];
3209 break;
3210 case INDEX_op_shrv_vec:
3211 insn = shrv_op[vece];
3212 break;
3213 case INDEX_op_sarv_vec:
3214 insn = sarv_op[vece];
3215 break;
3216 case INDEX_op_and_vec:
3217 insn = VAND;
3218 break;
3219 case INDEX_op_or_vec:
3220 insn = VOR;
3221 break;
3222 case INDEX_op_xor_vec:
3223 insn = VXOR;
3224 break;
3225 case INDEX_op_andc_vec:
3226 insn = VANDC;
3227 break;
3228 case INDEX_op_not_vec:
3229 insn = VNOR;
3230 a2 = a1;
3231 break;
3232 case INDEX_op_orc_vec:
3233 insn = VORC;
3234 break;
3235
3236 case INDEX_op_cmp_vec:
3237 switch (args[3]) {
3238 case TCG_COND_EQ:
3239 insn = eq_op[vece];
3240 break;
3241 case TCG_COND_NE:
3242 insn = ne_op[vece];
3243 break;
3244 case TCG_COND_GT:
3245 insn = gts_op[vece];
3246 break;
3247 case TCG_COND_GTU:
3248 insn = gtu_op[vece];
3249 break;
3250 default:
3251 g_assert_not_reached();
3252 }
3253 break;
3254
3255 case INDEX_op_bitsel_vec:
3256 tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3]));
3257 return;
3258
3259 case INDEX_op_dup2_vec:
3260 assert(TCG_TARGET_REG_BITS == 32);
3261 /* With inputs a1 = xLxx, a2 = xHxx */
3262 tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */
3263 tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */
3264 tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */
3265 return;
3266
3267 case INDEX_op_ppc_mrgh_vec:
3268 insn = mrgh_op[vece];
3269 break;
3270 case INDEX_op_ppc_mrgl_vec:
3271 insn = mrgl_op[vece];
3272 break;
3273 case INDEX_op_ppc_muleu_vec:
3274 insn = muleu_op[vece];
3275 break;
3276 case INDEX_op_ppc_mulou_vec:
3277 insn = mulou_op[vece];
3278 break;
3279 case INDEX_op_ppc_pkum_vec:
3280 insn = pkum_op[vece];
3281 break;
3282 case INDEX_op_ppc_rotl_vec:
3283 insn = rotl_op[vece];
3284 break;
3285 case INDEX_op_ppc_msum_vec:
3286 tcg_debug_assert(vece == MO_16);
3287 tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3]));
3288 return;
3289
3290 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
3291 case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */
3292 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
3293 default:
3294 g_assert_not_reached();
3295 }
3296
3297 tcg_debug_assert(insn != 0);
3298 tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
3299 }
3300
3301 static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
3302 TCGv_vec v1, TCGArg imm, TCGOpcode opci)
3303 {
3304 TCGv_vec t1 = tcg_temp_new_vec(type);
3305
3306 /* Splat w/bytes for xxspltib. */
3307 tcg_gen_dupi_vec(MO_8, t1, imm & ((8 << vece) - 1));
3308 vec_gen_3(opci, type, vece, tcgv_vec_arg(v0),
3309 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3310 tcg_temp_free_vec(t1);
3311 }
3312
3313 static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
3314 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3315 {
3316 bool need_swap = false, need_inv = false;
3317
3318 tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
3319
3320 switch (cond) {
3321 case TCG_COND_EQ:
3322 case TCG_COND_GT:
3323 case TCG_COND_GTU:
3324 break;
3325 case TCG_COND_NE:
3326 if (have_isa_3_00 && vece <= MO_32) {
3327 break;
3328 }
3329 /* fall through */
3330 case TCG_COND_LE:
3331 case TCG_COND_LEU:
3332 need_inv = true;
3333 break;
3334 case TCG_COND_LT:
3335 case TCG_COND_LTU:
3336 need_swap = true;
3337 break;
3338 case TCG_COND_GE:
3339 case TCG_COND_GEU:
3340 need_swap = need_inv = true;
3341 break;
3342 default:
3343 g_assert_not_reached();
3344 }
3345
3346 if (need_inv) {
3347 cond = tcg_invert_cond(cond);
3348 }
3349 if (need_swap) {
3350 TCGv_vec t1;
3351 t1 = v1, v1 = v2, v2 = t1;
3352 cond = tcg_swap_cond(cond);
3353 }
3354
3355 vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
3356 tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3357
3358 if (need_inv) {
3359 tcg_gen_not_vec(vece, v0, v0);
3360 }
3361 }
3362
3363 static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
3364 TCGv_vec v1, TCGv_vec v2)
3365 {
3366 TCGv_vec t1 = tcg_temp_new_vec(type);
3367 TCGv_vec t2 = tcg_temp_new_vec(type);
3368 TCGv_vec t3, t4;
3369
3370 switch (vece) {
3371 case MO_8:
3372 case MO_16:
3373 vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1),
3374 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3375 vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2),
3376 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3377 vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0),
3378 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3379 vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1),
3380 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3381 vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
3382 tcgv_vec_arg(v0), tcgv_vec_arg(t1));
3383 break;
3384
3385 case MO_32:
3386 tcg_debug_assert(!have_isa_2_07);
3387 t3 = tcg_temp_new_vec(type);
3388 t4 = tcg_temp_new_vec(type);
3389 tcg_gen_dupi_vec(MO_8, t4, -16);
3390 vec_gen_3(INDEX_op_ppc_rotl_vec, type, MO_32, tcgv_vec_arg(t1),
3391 tcgv_vec_arg(v2), tcgv_vec_arg(t4));
3392 vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
3393 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3394 tcg_gen_dupi_vec(MO_8, t3, 0);
3395 vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t3),
3396 tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(t3));
3397 vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t3),
3398 tcgv_vec_arg(t3), tcgv_vec_arg(t4));
3399 tcg_gen_add_vec(MO_32, v0, t2, t3);
3400 tcg_temp_free_vec(t3);
3401 tcg_temp_free_vec(t4);
3402 break;
3403
3404 default:
3405 g_assert_not_reached();
3406 }
3407 tcg_temp_free_vec(t1);
3408 tcg_temp_free_vec(t2);
3409 }
3410
3411 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3412 TCGArg a0, ...)
3413 {
3414 va_list va;
3415 TCGv_vec v0, v1, v2;
3416 TCGArg a2;
3417
3418 va_start(va, a0);
3419 v0 = temp_tcgv_vec(arg_temp(a0));
3420 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3421 a2 = va_arg(va, TCGArg);
3422
3423 switch (opc) {
3424 case INDEX_op_shli_vec:
3425 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec);
3426 break;
3427 case INDEX_op_shri_vec:
3428 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec);
3429 break;
3430 case INDEX_op_sari_vec:
3431 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec);
3432 break;
3433 case INDEX_op_cmp_vec:
3434 v2 = temp_tcgv_vec(arg_temp(a2));
3435 expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3436 break;
3437 case INDEX_op_mul_vec:
3438 v2 = temp_tcgv_vec(arg_temp(a2));
3439 expand_vec_mul(type, vece, v0, v1, v2);
3440 break;
3441 default:
3442 g_assert_not_reached();
3443 }
3444 va_end(va);
3445 }
3446
3447 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
3448 {
3449 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
3450 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
3451 static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
3452 static const TCGTargetOpDef S_S = { .args_ct_str = { "S", "S" } };
3453 static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } };
3454 static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } };
3455 static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } };
3456 static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } };
3457 static const TCGTargetOpDef S_S_S = { .args_ct_str = { "S", "S", "S" } };
3458 static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
3459 static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } };
3460 static const TCGTargetOpDef r_r_rT = { .args_ct_str = { "r", "r", "rT" } };
3461 static const TCGTargetOpDef r_r_rU = { .args_ct_str = { "r", "r", "rU" } };
3462 static const TCGTargetOpDef r_rI_ri
3463 = { .args_ct_str = { "r", "rI", "ri" } };
3464 static const TCGTargetOpDef r_rI_rT
3465 = { .args_ct_str = { "r", "rI", "rT" } };
3466 static const TCGTargetOpDef r_r_rZW
3467 = { .args_ct_str = { "r", "r", "rZW" } };
3468 static const TCGTargetOpDef L_L_L_L
3469 = { .args_ct_str = { "L", "L", "L", "L" } };
3470 static const TCGTargetOpDef S_S_S_S
3471 = { .args_ct_str = { "S", "S", "S", "S" } };
3472 static const TCGTargetOpDef movc
3473 = { .args_ct_str = { "r", "r", "ri", "rZ", "rZ" } };
3474 static const TCGTargetOpDef dep
3475 = { .args_ct_str = { "r", "0", "rZ" } };
3476 static const TCGTargetOpDef br2
3477 = { .args_ct_str = { "r", "r", "ri", "ri" } };
3478 static const TCGTargetOpDef setc2
3479 = { .args_ct_str = { "r", "r", "r", "ri", "ri" } };
3480 static const TCGTargetOpDef add2
3481 = { .args_ct_str = { "r", "r", "r", "r", "rI", "rZM" } };
3482 static const TCGTargetOpDef sub2
3483 = { .args_ct_str = { "r", "r", "rI", "rZM", "r", "r" } };
3484 static const TCGTargetOpDef v_r = { .args_ct_str = { "v", "r" } };
3485 static const TCGTargetOpDef v_v = { .args_ct_str = { "v", "v" } };
3486 static const TCGTargetOpDef v_v_v = { .args_ct_str = { "v", "v", "v" } };
3487 static const TCGTargetOpDef v_v_v_v
3488 = { .args_ct_str = { "v", "v", "v", "v" } };
3489
3490 switch (op) {
3491 case INDEX_op_goto_ptr:
3492 return &r;
3493
3494 case INDEX_op_ld8u_i32:
3495 case INDEX_op_ld8s_i32:
3496 case INDEX_op_ld16u_i32:
3497 case INDEX_op_ld16s_i32:
3498 case INDEX_op_ld_i32:
3499 case INDEX_op_st8_i32:
3500 case INDEX_op_st16_i32:
3501 case INDEX_op_st_i32:
3502 case INDEX_op_ctpop_i32:
3503 case INDEX_op_neg_i32:
3504 case INDEX_op_not_i32:
3505 case INDEX_op_ext8s_i32:
3506 case INDEX_op_ext16s_i32:
3507 case INDEX_op_bswap16_i32:
3508 case INDEX_op_bswap32_i32:
3509 case INDEX_op_extract_i32:
3510 case INDEX_op_ld8u_i64:
3511 case INDEX_op_ld8s_i64:
3512 case INDEX_op_ld16u_i64:
3513 case INDEX_op_ld16s_i64:
3514 case INDEX_op_ld32u_i64:
3515 case INDEX_op_ld32s_i64:
3516 case INDEX_op_ld_i64:
3517 case INDEX_op_st8_i64:
3518 case INDEX_op_st16_i64:
3519 case INDEX_op_st32_i64:
3520 case INDEX_op_st_i64:
3521 case INDEX_op_ctpop_i64:
3522 case INDEX_op_neg_i64:
3523 case INDEX_op_not_i64:
3524 case INDEX_op_ext8s_i64:
3525 case INDEX_op_ext16s_i64:
3526 case INDEX_op_ext32s_i64:
3527 case INDEX_op_ext_i32_i64:
3528 case INDEX_op_extu_i32_i64:
3529 case INDEX_op_bswap16_i64:
3530 case INDEX_op_bswap32_i64:
3531 case INDEX_op_bswap64_i64:
3532 case INDEX_op_extract_i64:
3533 return &r_r;
3534
3535 case INDEX_op_add_i32:
3536 case INDEX_op_and_i32:
3537 case INDEX_op_or_i32:
3538 case INDEX_op_xor_i32:
3539 case INDEX_op_andc_i32:
3540 case INDEX_op_orc_i32:
3541 case INDEX_op_eqv_i32:
3542 case INDEX_op_shl_i32:
3543 case INDEX_op_shr_i32:
3544 case INDEX_op_sar_i32:
3545 case INDEX_op_rotl_i32:
3546 case INDEX_op_rotr_i32:
3547 case INDEX_op_setcond_i32:
3548 case INDEX_op_and_i64:
3549 case INDEX_op_andc_i64:
3550 case INDEX_op_shl_i64:
3551 case INDEX_op_shr_i64:
3552 case INDEX_op_sar_i64:
3553 case INDEX_op_rotl_i64:
3554 case INDEX_op_rotr_i64:
3555 case INDEX_op_setcond_i64:
3556 return &r_r_ri;
3557 case INDEX_op_mul_i32:
3558 case INDEX_op_mul_i64:
3559 return &r_r_rI;
3560 case INDEX_op_div_i32:
3561 case INDEX_op_divu_i32:
3562 case INDEX_op_nand_i32:
3563 case INDEX_op_nor_i32:
3564 case INDEX_op_muluh_i32:
3565 case INDEX_op_mulsh_i32:
3566 case INDEX_op_orc_i64:
3567 case INDEX_op_eqv_i64:
3568 case INDEX_op_nand_i64:
3569 case INDEX_op_nor_i64:
3570 case INDEX_op_div_i64:
3571 case INDEX_op_divu_i64:
3572 case INDEX_op_mulsh_i64:
3573 case INDEX_op_muluh_i64:
3574 return &r_r_r;
3575 case INDEX_op_sub_i32:
3576 return &r_rI_ri;
3577 case INDEX_op_add_i64:
3578 return &r_r_rT;
3579 case INDEX_op_or_i64:
3580 case INDEX_op_xor_i64:
3581 return &r_r_rU;
3582 case INDEX_op_sub_i64:
3583 return &r_rI_rT;
3584 case INDEX_op_clz_i32:
3585 case INDEX_op_ctz_i32:
3586 case INDEX_op_clz_i64:
3587 case INDEX_op_ctz_i64:
3588 return &r_r_rZW;
3589
3590 case INDEX_op_brcond_i32:
3591 case INDEX_op_brcond_i64:
3592 return &r_ri;
3593
3594 case INDEX_op_movcond_i32:
3595 case INDEX_op_movcond_i64:
3596 return &movc;
3597 case INDEX_op_deposit_i32:
3598 case INDEX_op_deposit_i64:
3599 return &dep;
3600 case INDEX_op_brcond2_i32:
3601 return &br2;
3602 case INDEX_op_setcond2_i32:
3603 return &setc2;
3604 case INDEX_op_add2_i64:
3605 case INDEX_op_add2_i32:
3606 return &add2;
3607 case INDEX_op_sub2_i64:
3608 case INDEX_op_sub2_i32:
3609 return &sub2;
3610
3611 case INDEX_op_qemu_ld_i32:
3612 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3613 ? &r_L : &r_L_L);
3614 case INDEX_op_qemu_st_i32:
3615 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3616 ? &S_S : &S_S_S);
3617 case INDEX_op_qemu_ld_i64:
3618 return (TCG_TARGET_REG_BITS == 64 ? &r_L
3619 : TARGET_LONG_BITS == 32 ? &L_L_L : &L_L_L_L);
3620 case INDEX_op_qemu_st_i64:
3621 return (TCG_TARGET_REG_BITS == 64 ? &S_S
3622 : TARGET_LONG_BITS == 32 ? &S_S_S : &S_S_S_S);
3623
3624 case INDEX_op_add_vec:
3625 case INDEX_op_sub_vec:
3626 case INDEX_op_mul_vec:
3627 case INDEX_op_and_vec:
3628 case INDEX_op_or_vec:
3629 case INDEX_op_xor_vec:
3630 case INDEX_op_andc_vec:
3631 case INDEX_op_orc_vec:
3632 case INDEX_op_cmp_vec:
3633 case INDEX_op_ssadd_vec:
3634 case INDEX_op_sssub_vec:
3635 case INDEX_op_usadd_vec:
3636 case INDEX_op_ussub_vec:
3637 case INDEX_op_smax_vec:
3638 case INDEX_op_smin_vec:
3639 case INDEX_op_umax_vec:
3640 case INDEX_op_umin_vec:
3641 case INDEX_op_shlv_vec:
3642 case INDEX_op_shrv_vec:
3643 case INDEX_op_sarv_vec:
3644 case INDEX_op_ppc_mrgh_vec:
3645 case INDEX_op_ppc_mrgl_vec:
3646 case INDEX_op_ppc_muleu_vec:
3647 case INDEX_op_ppc_mulou_vec:
3648 case INDEX_op_ppc_pkum_vec:
3649 case INDEX_op_ppc_rotl_vec:
3650 case INDEX_op_dup2_vec:
3651 return &v_v_v;
3652 case INDEX_op_not_vec:
3653 case INDEX_op_neg_vec:
3654 case INDEX_op_dup_vec:
3655 return &v_v;
3656 case INDEX_op_ld_vec:
3657 case INDEX_op_st_vec:
3658 case INDEX_op_dupm_vec:
3659 return &v_r;
3660 case INDEX_op_bitsel_vec:
3661 case INDEX_op_ppc_msum_vec:
3662 return &v_v_v_v;
3663
3664 default:
3665 return NULL;
3666 }
3667 }
3668
3669 static void tcg_target_init(TCGContext *s)
3670 {
3671 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3672 unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
3673
3674 have_isa = tcg_isa_base;
3675 if (hwcap & PPC_FEATURE_ARCH_2_06) {
3676 have_isa = tcg_isa_2_06;
3677 }
3678 #ifdef PPC_FEATURE2_ARCH_2_07
3679 if (hwcap2 & PPC_FEATURE2_ARCH_2_07) {
3680 have_isa = tcg_isa_2_07;
3681 }
3682 #endif
3683 #ifdef PPC_FEATURE2_ARCH_3_00
3684 if (hwcap2 & PPC_FEATURE2_ARCH_3_00) {
3685 have_isa = tcg_isa_3_00;
3686 }
3687 #endif
3688
3689 #ifdef PPC_FEATURE2_HAS_ISEL
3690 /* Prefer explicit instruction from the kernel. */
3691 have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0;
3692 #else
3693 /* Fall back to knowing Power7 (2.06) has ISEL. */
3694 have_isel = have_isa_2_06;
3695 #endif
3696
3697 if (hwcap & PPC_FEATURE_HAS_ALTIVEC) {
3698 have_altivec = true;
3699 /* We only care about the portion of VSX that overlaps Altivec. */
3700 if (hwcap & PPC_FEATURE_HAS_VSX) {
3701 have_vsx = true;
3702 }
3703 }
3704
3705 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
3706 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
3707 if (have_altivec) {
3708 tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3709 tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3710 }
3711
3712 tcg_target_call_clobber_regs = 0;
3713 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3714 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3715 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3716 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3717 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3718 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3719 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7);
3720 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
3721 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
3722 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
3723 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
3724 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
3725
3726 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3727 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3728 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3729 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3730 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3731 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3732 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3733 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3734 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
3735 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
3736 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
3737 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
3738 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
3739 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
3740 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
3741 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
3742 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3743 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3744 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3745 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3746
3747 s->reserved_regs = 0;
3748 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
3749 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
3750 #if defined(_CALL_SYSV)
3751 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
3752 #endif
3753 #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
3754 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
3755 #endif
3756 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */
3757 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
3758 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
3759 if (USE_REG_TB) {
3760 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */
3761 }
3762 }
3763
3764 #ifdef __ELF__
3765 typedef struct {
3766 DebugFrameCIE cie;
3767 DebugFrameFDEHeader fde;
3768 uint8_t fde_def_cfa[4];
3769 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
3770 } DebugFrame;
3771
3772 /* We're expecting a 2 byte uleb128 encoded value. */
3773 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3774
3775 #if TCG_TARGET_REG_BITS == 64
3776 # define ELF_HOST_MACHINE EM_PPC64
3777 #else
3778 # define ELF_HOST_MACHINE EM_PPC
3779 #endif
3780
3781 static DebugFrame debug_frame = {
3782 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3783 .cie.id = -1,
3784 .cie.version = 1,
3785 .cie.code_align = 1,
3786 .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */
3787 .cie.return_column = 65,
3788
3789 /* Total FDE size does not include the "len" member. */
3790 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
3791
3792 .fde_def_cfa = {
3793 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */
3794 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3795 (FRAME_SIZE >> 7)
3796 },
3797 .fde_reg_ofs = {
3798 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
3799 0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
3800 }
3801 };
3802
3803 void tcg_register_jit(void *buf, size_t buf_size)
3804 {
3805 uint8_t *p = &debug_frame.fde_reg_ofs[3];
3806 int i;
3807
3808 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
3809 p[0] = 0x80 + tcg_target_callee_save_regs[i];
3810 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
3811 }
3812
3813 debug_frame.fde.func_start = (uintptr_t)buf;
3814 debug_frame.fde.func_len = buf_size;
3815
3816 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3817 }
3818 #endif /* __ELF__ */
3819
3820 void flush_icache_range(uintptr_t start, uintptr_t stop)
3821 {
3822 uintptr_t p, start1, stop1;
3823 size_t dsize = qemu_dcache_linesize;
3824 size_t isize = qemu_icache_linesize;
3825
3826 start1 = start & ~(dsize - 1);
3827 stop1 = (stop + dsize - 1) & ~(dsize - 1);
3828 for (p = start1; p < stop1; p += dsize) {
3829 asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
3830 }
3831 asm volatile ("sync" : : : "memory");
3832
3833 start &= start & ~(isize - 1);
3834 stop1 = (stop + isize - 1) & ~(isize - 1);
3835 for (p = start1; p < stop1; p += isize) {
3836 asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
3837 }
3838 asm volatile ("sync" : : : "memory");
3839 asm volatile ("isync" : : : "memory");
3840 }