]> git.ipfire.org Git - thirdparty/qemu.git/blob - target/sparc/translate.c
target/sh4: Use translator_use_goto_tb
[thirdparty/qemu.git] / target / sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
29
30 #include "exec/helper-gen.h"
31
32 #include "exec/translator.h"
33 #include "exec/log.h"
34 #include "asi.h"
35
36
37 #define DEBUG_DISAS
38
39 #define DYNAMIC_PC 1 /* dynamic pc value */
40 #define JUMP_PC 2 /* dynamic pc value which takes only two values
41 according to jump_pc[T2] */
42
43 #define DISAS_EXIT DISAS_TARGET_0
44
45 /* global register indexes */
46 static TCGv_ptr cpu_regwptr;
47 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
48 static TCGv_i32 cpu_cc_op;
49 static TCGv_i32 cpu_psr;
50 static TCGv cpu_fsr, cpu_pc, cpu_npc;
51 static TCGv cpu_regs[32];
52 static TCGv cpu_y;
53 #ifndef CONFIG_USER_ONLY
54 static TCGv cpu_tbr;
55 #endif
56 static TCGv cpu_cond;
57 #ifdef TARGET_SPARC64
58 static TCGv_i32 cpu_xcc, cpu_fprs;
59 static TCGv cpu_gsr;
60 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
61 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
62 #else
63 static TCGv cpu_wim;
64 #endif
65 /* Floating point registers */
66 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67
68 #include "exec/gen-icount.h"
69
70 typedef struct DisasContext {
71 DisasContextBase base;
72 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
73 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
74 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
75 int mem_idx;
76 bool fpu_enabled;
77 bool address_mask_32bit;
78 #ifndef CONFIG_USER_ONLY
79 bool supervisor;
80 #ifdef TARGET_SPARC64
81 bool hypervisor;
82 #endif
83 #endif
84
85 uint32_t cc_op; /* current CC operation */
86 sparc_def_t *def;
87 TCGv_i32 t32[3];
88 TCGv ttl[5];
89 int n_t32;
90 int n_ttl;
91 #ifdef TARGET_SPARC64
92 int fprs_dirty;
93 int asi;
94 #endif
95 } DisasContext;
96
97 typedef struct {
98 TCGCond cond;
99 bool is_bool;
100 bool g1, g2;
101 TCGv c1, c2;
102 } DisasCompare;
103
104 // This function uses non-native bit order
105 #define GET_FIELD(X, FROM, TO) \
106 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
107
108 // This function uses the order in the manuals, i.e. bit 0 is 2^0
109 #define GET_FIELD_SP(X, FROM, TO) \
110 GET_FIELD(X, 31 - (TO), 31 - (FROM))
111
112 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
113 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
114
115 #ifdef TARGET_SPARC64
116 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
117 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
118 #else
119 #define DFPREG(r) (r & 0x1e)
120 #define QFPREG(r) (r & 0x1c)
121 #endif
122
123 #define UA2005_HTRAP_MASK 0xff
124 #define V8_TRAP_MASK 0x7f
125
126 static int sign_extend(int x, int len)
127 {
128 len = 32 - len;
129 return (x << len) >> len;
130 }
131
132 #define IS_IMM (insn & (1<<13))
133
134 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
135 {
136 TCGv_i32 t;
137 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
138 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
139 return t;
140 }
141
142 static inline TCGv get_temp_tl(DisasContext *dc)
143 {
144 TCGv t;
145 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
146 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
147 return t;
148 }
149
150 static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
151 {
152 #if defined(TARGET_SPARC64)
153 int bit = (rd < 32) ? 1 : 2;
154 /* If we know we've already set this bit within the TB,
155 we can avoid setting it again. */
156 if (!(dc->fprs_dirty & bit)) {
157 dc->fprs_dirty |= bit;
158 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
159 }
160 #endif
161 }
162
163 /* floating point registers moves */
164 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
165 {
166 #if TCG_TARGET_REG_BITS == 32
167 if (src & 1) {
168 return TCGV_LOW(cpu_fpr[src / 2]);
169 } else {
170 return TCGV_HIGH(cpu_fpr[src / 2]);
171 }
172 #else
173 TCGv_i32 ret = get_temp_i32(dc);
174 if (src & 1) {
175 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
176 } else {
177 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
178 }
179 return ret;
180 #endif
181 }
182
183 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
184 {
185 #if TCG_TARGET_REG_BITS == 32
186 if (dst & 1) {
187 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
188 } else {
189 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
190 }
191 #else
192 TCGv_i64 t = (TCGv_i64)v;
193 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
194 (dst & 1 ? 0 : 32), 32);
195 #endif
196 gen_update_fprs_dirty(dc, dst);
197 }
198
199 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
200 {
201 return get_temp_i32(dc);
202 }
203
204 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
205 {
206 src = DFPREG(src);
207 return cpu_fpr[src / 2];
208 }
209
210 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
211 {
212 dst = DFPREG(dst);
213 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
214 gen_update_fprs_dirty(dc, dst);
215 }
216
217 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
218 {
219 return cpu_fpr[DFPREG(dst) / 2];
220 }
221
222 static void gen_op_load_fpr_QT0(unsigned int src)
223 {
224 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
225 offsetof(CPU_QuadU, ll.upper));
226 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
227 offsetof(CPU_QuadU, ll.lower));
228 }
229
230 static void gen_op_load_fpr_QT1(unsigned int src)
231 {
232 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
233 offsetof(CPU_QuadU, ll.upper));
234 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
235 offsetof(CPU_QuadU, ll.lower));
236 }
237
238 static void gen_op_store_QT0_fpr(unsigned int dst)
239 {
240 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
241 offsetof(CPU_QuadU, ll.upper));
242 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
243 offsetof(CPU_QuadU, ll.lower));
244 }
245
246 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
247 TCGv_i64 v1, TCGv_i64 v2)
248 {
249 dst = QFPREG(dst);
250
251 tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
252 tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
253 gen_update_fprs_dirty(dc, dst);
254 }
255
256 #ifdef TARGET_SPARC64
257 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
258 {
259 src = QFPREG(src);
260 return cpu_fpr[src / 2];
261 }
262
263 static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
264 {
265 src = QFPREG(src);
266 return cpu_fpr[src / 2 + 1];
267 }
268
269 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
270 {
271 rd = QFPREG(rd);
272 rs = QFPREG(rs);
273
274 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
275 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
276 gen_update_fprs_dirty(dc, rd);
277 }
278 #endif
279
280 /* moves */
281 #ifdef CONFIG_USER_ONLY
282 #define supervisor(dc) 0
283 #ifdef TARGET_SPARC64
284 #define hypervisor(dc) 0
285 #endif
286 #else
287 #ifdef TARGET_SPARC64
288 #define hypervisor(dc) (dc->hypervisor)
289 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
290 #else
291 #define supervisor(dc) (dc->supervisor)
292 #endif
293 #endif
294
295 #ifdef TARGET_SPARC64
296 #ifndef TARGET_ABI32
297 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
298 #else
299 #define AM_CHECK(dc) (1)
300 #endif
301 #endif
302
303 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
304 {
305 #ifdef TARGET_SPARC64
306 if (AM_CHECK(dc))
307 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
308 #endif
309 }
310
311 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
312 {
313 if (reg > 0) {
314 assert(reg < 32);
315 return cpu_regs[reg];
316 } else {
317 TCGv t = get_temp_tl(dc);
318 tcg_gen_movi_tl(t, 0);
319 return t;
320 }
321 }
322
323 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
324 {
325 if (reg > 0) {
326 assert(reg < 32);
327 tcg_gen_mov_tl(cpu_regs[reg], v);
328 }
329 }
330
331 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
332 {
333 if (reg > 0) {
334 assert(reg < 32);
335 return cpu_regs[reg];
336 } else {
337 return get_temp_tl(dc);
338 }
339 }
340
341 static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
342 target_ulong npc)
343 {
344 if (unlikely(s->base.singlestep_enabled || singlestep)) {
345 return false;
346 }
347
348 #ifndef CONFIG_USER_ONLY
349 return (pc & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) &&
350 (npc & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK);
351 #else
352 return true;
353 #endif
354 }
355
356 static inline void gen_goto_tb(DisasContext *s, int tb_num,
357 target_ulong pc, target_ulong npc)
358 {
359 if (use_goto_tb(s, pc, npc)) {
360 /* jump to same page: we can use a direct jump */
361 tcg_gen_goto_tb(tb_num);
362 tcg_gen_movi_tl(cpu_pc, pc);
363 tcg_gen_movi_tl(cpu_npc, npc);
364 tcg_gen_exit_tb(s->base.tb, tb_num);
365 } else {
366 /* jump to another page: currently not optimized */
367 tcg_gen_movi_tl(cpu_pc, pc);
368 tcg_gen_movi_tl(cpu_npc, npc);
369 tcg_gen_exit_tb(NULL, 0);
370 }
371 }
372
373 // XXX suboptimal
374 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
375 {
376 tcg_gen_extu_i32_tl(reg, src);
377 tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
378 }
379
380 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
381 {
382 tcg_gen_extu_i32_tl(reg, src);
383 tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
384 }
385
386 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
387 {
388 tcg_gen_extu_i32_tl(reg, src);
389 tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
390 }
391
392 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
393 {
394 tcg_gen_extu_i32_tl(reg, src);
395 tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
396 }
397
398 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
399 {
400 tcg_gen_mov_tl(cpu_cc_src, src1);
401 tcg_gen_mov_tl(cpu_cc_src2, src2);
402 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
403 tcg_gen_mov_tl(dst, cpu_cc_dst);
404 }
405
406 static TCGv_i32 gen_add32_carry32(void)
407 {
408 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
409
410 /* Carry is computed from a previous add: (dst < src) */
411 #if TARGET_LONG_BITS == 64
412 cc_src1_32 = tcg_temp_new_i32();
413 cc_src2_32 = tcg_temp_new_i32();
414 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
415 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
416 #else
417 cc_src1_32 = cpu_cc_dst;
418 cc_src2_32 = cpu_cc_src;
419 #endif
420
421 carry_32 = tcg_temp_new_i32();
422 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
423
424 #if TARGET_LONG_BITS == 64
425 tcg_temp_free_i32(cc_src1_32);
426 tcg_temp_free_i32(cc_src2_32);
427 #endif
428
429 return carry_32;
430 }
431
432 static TCGv_i32 gen_sub32_carry32(void)
433 {
434 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
435
436 /* Carry is computed from a previous borrow: (src1 < src2) */
437 #if TARGET_LONG_BITS == 64
438 cc_src1_32 = tcg_temp_new_i32();
439 cc_src2_32 = tcg_temp_new_i32();
440 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
441 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
442 #else
443 cc_src1_32 = cpu_cc_src;
444 cc_src2_32 = cpu_cc_src2;
445 #endif
446
447 carry_32 = tcg_temp_new_i32();
448 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
449
450 #if TARGET_LONG_BITS == 64
451 tcg_temp_free_i32(cc_src1_32);
452 tcg_temp_free_i32(cc_src2_32);
453 #endif
454
455 return carry_32;
456 }
457
458 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
459 TCGv src2, int update_cc)
460 {
461 TCGv_i32 carry_32;
462 TCGv carry;
463
464 switch (dc->cc_op) {
465 case CC_OP_DIV:
466 case CC_OP_LOGIC:
467 /* Carry is known to be zero. Fall back to plain ADD. */
468 if (update_cc) {
469 gen_op_add_cc(dst, src1, src2);
470 } else {
471 tcg_gen_add_tl(dst, src1, src2);
472 }
473 return;
474
475 case CC_OP_ADD:
476 case CC_OP_TADD:
477 case CC_OP_TADDTV:
478 if (TARGET_LONG_BITS == 32) {
479 /* We can re-use the host's hardware carry generation by using
480 an ADD2 opcode. We discard the low part of the output.
481 Ideally we'd combine this operation with the add that
482 generated the carry in the first place. */
483 carry = tcg_temp_new();
484 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
485 tcg_temp_free(carry);
486 goto add_done;
487 }
488 carry_32 = gen_add32_carry32();
489 break;
490
491 case CC_OP_SUB:
492 case CC_OP_TSUB:
493 case CC_OP_TSUBTV:
494 carry_32 = gen_sub32_carry32();
495 break;
496
497 default:
498 /* We need external help to produce the carry. */
499 carry_32 = tcg_temp_new_i32();
500 gen_helper_compute_C_icc(carry_32, cpu_env);
501 break;
502 }
503
504 #if TARGET_LONG_BITS == 64
505 carry = tcg_temp_new();
506 tcg_gen_extu_i32_i64(carry, carry_32);
507 #else
508 carry = carry_32;
509 #endif
510
511 tcg_gen_add_tl(dst, src1, src2);
512 tcg_gen_add_tl(dst, dst, carry);
513
514 tcg_temp_free_i32(carry_32);
515 #if TARGET_LONG_BITS == 64
516 tcg_temp_free(carry);
517 #endif
518
519 add_done:
520 if (update_cc) {
521 tcg_gen_mov_tl(cpu_cc_src, src1);
522 tcg_gen_mov_tl(cpu_cc_src2, src2);
523 tcg_gen_mov_tl(cpu_cc_dst, dst);
524 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
525 dc->cc_op = CC_OP_ADDX;
526 }
527 }
528
529 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
530 {
531 tcg_gen_mov_tl(cpu_cc_src, src1);
532 tcg_gen_mov_tl(cpu_cc_src2, src2);
533 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
534 tcg_gen_mov_tl(dst, cpu_cc_dst);
535 }
536
537 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
538 TCGv src2, int update_cc)
539 {
540 TCGv_i32 carry_32;
541 TCGv carry;
542
543 switch (dc->cc_op) {
544 case CC_OP_DIV:
545 case CC_OP_LOGIC:
546 /* Carry is known to be zero. Fall back to plain SUB. */
547 if (update_cc) {
548 gen_op_sub_cc(dst, src1, src2);
549 } else {
550 tcg_gen_sub_tl(dst, src1, src2);
551 }
552 return;
553
554 case CC_OP_ADD:
555 case CC_OP_TADD:
556 case CC_OP_TADDTV:
557 carry_32 = gen_add32_carry32();
558 break;
559
560 case CC_OP_SUB:
561 case CC_OP_TSUB:
562 case CC_OP_TSUBTV:
563 if (TARGET_LONG_BITS == 32) {
564 /* We can re-use the host's hardware carry generation by using
565 a SUB2 opcode. We discard the low part of the output.
566 Ideally we'd combine this operation with the add that
567 generated the carry in the first place. */
568 carry = tcg_temp_new();
569 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
570 tcg_temp_free(carry);
571 goto sub_done;
572 }
573 carry_32 = gen_sub32_carry32();
574 break;
575
576 default:
577 /* We need external help to produce the carry. */
578 carry_32 = tcg_temp_new_i32();
579 gen_helper_compute_C_icc(carry_32, cpu_env);
580 break;
581 }
582
583 #if TARGET_LONG_BITS == 64
584 carry = tcg_temp_new();
585 tcg_gen_extu_i32_i64(carry, carry_32);
586 #else
587 carry = carry_32;
588 #endif
589
590 tcg_gen_sub_tl(dst, src1, src2);
591 tcg_gen_sub_tl(dst, dst, carry);
592
593 tcg_temp_free_i32(carry_32);
594 #if TARGET_LONG_BITS == 64
595 tcg_temp_free(carry);
596 #endif
597
598 sub_done:
599 if (update_cc) {
600 tcg_gen_mov_tl(cpu_cc_src, src1);
601 tcg_gen_mov_tl(cpu_cc_src2, src2);
602 tcg_gen_mov_tl(cpu_cc_dst, dst);
603 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
604 dc->cc_op = CC_OP_SUBX;
605 }
606 }
607
608 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
609 {
610 TCGv r_temp, zero, t0;
611
612 r_temp = tcg_temp_new();
613 t0 = tcg_temp_new();
614
615 /* old op:
616 if (!(env->y & 1))
617 T1 = 0;
618 */
619 zero = tcg_const_tl(0);
620 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
621 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
622 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
623 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
624 zero, cpu_cc_src2);
625 tcg_temp_free(zero);
626
627 // b2 = T0 & 1;
628 // env->y = (b2 << 31) | (env->y >> 1);
629 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
630 tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
631
632 // b1 = N ^ V;
633 gen_mov_reg_N(t0, cpu_psr);
634 gen_mov_reg_V(r_temp, cpu_psr);
635 tcg_gen_xor_tl(t0, t0, r_temp);
636 tcg_temp_free(r_temp);
637
638 // T0 = (b1 << 31) | (T0 >> 1);
639 // src1 = T0;
640 tcg_gen_shli_tl(t0, t0, 31);
641 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
642 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
643 tcg_temp_free(t0);
644
645 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
646
647 tcg_gen_mov_tl(dst, cpu_cc_dst);
648 }
649
650 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
651 {
652 #if TARGET_LONG_BITS == 32
653 if (sign_ext) {
654 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
655 } else {
656 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
657 }
658 #else
659 TCGv t0 = tcg_temp_new_i64();
660 TCGv t1 = tcg_temp_new_i64();
661
662 if (sign_ext) {
663 tcg_gen_ext32s_i64(t0, src1);
664 tcg_gen_ext32s_i64(t1, src2);
665 } else {
666 tcg_gen_ext32u_i64(t0, src1);
667 tcg_gen_ext32u_i64(t1, src2);
668 }
669
670 tcg_gen_mul_i64(dst, t0, t1);
671 tcg_temp_free(t0);
672 tcg_temp_free(t1);
673
674 tcg_gen_shri_i64(cpu_y, dst, 32);
675 #endif
676 }
677
678 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
679 {
680 /* zero-extend truncated operands before multiplication */
681 gen_op_multiply(dst, src1, src2, 0);
682 }
683
684 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
685 {
686 /* sign-extend truncated operands before multiplication */
687 gen_op_multiply(dst, src1, src2, 1);
688 }
689
690 // 1
691 static inline void gen_op_eval_ba(TCGv dst)
692 {
693 tcg_gen_movi_tl(dst, 1);
694 }
695
696 // Z
697 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
698 {
699 gen_mov_reg_Z(dst, src);
700 }
701
702 // Z | (N ^ V)
703 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
704 {
705 TCGv t0 = tcg_temp_new();
706 gen_mov_reg_N(t0, src);
707 gen_mov_reg_V(dst, src);
708 tcg_gen_xor_tl(dst, dst, t0);
709 gen_mov_reg_Z(t0, src);
710 tcg_gen_or_tl(dst, dst, t0);
711 tcg_temp_free(t0);
712 }
713
714 // N ^ V
715 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
716 {
717 TCGv t0 = tcg_temp_new();
718 gen_mov_reg_V(t0, src);
719 gen_mov_reg_N(dst, src);
720 tcg_gen_xor_tl(dst, dst, t0);
721 tcg_temp_free(t0);
722 }
723
724 // C | Z
725 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
726 {
727 TCGv t0 = tcg_temp_new();
728 gen_mov_reg_Z(t0, src);
729 gen_mov_reg_C(dst, src);
730 tcg_gen_or_tl(dst, dst, t0);
731 tcg_temp_free(t0);
732 }
733
734 // C
735 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
736 {
737 gen_mov_reg_C(dst, src);
738 }
739
740 // V
741 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
742 {
743 gen_mov_reg_V(dst, src);
744 }
745
746 // 0
747 static inline void gen_op_eval_bn(TCGv dst)
748 {
749 tcg_gen_movi_tl(dst, 0);
750 }
751
752 // N
753 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
754 {
755 gen_mov_reg_N(dst, src);
756 }
757
758 // !Z
759 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
760 {
761 gen_mov_reg_Z(dst, src);
762 tcg_gen_xori_tl(dst, dst, 0x1);
763 }
764
765 // !(Z | (N ^ V))
766 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
767 {
768 gen_op_eval_ble(dst, src);
769 tcg_gen_xori_tl(dst, dst, 0x1);
770 }
771
772 // !(N ^ V)
773 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
774 {
775 gen_op_eval_bl(dst, src);
776 tcg_gen_xori_tl(dst, dst, 0x1);
777 }
778
779 // !(C | Z)
780 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
781 {
782 gen_op_eval_bleu(dst, src);
783 tcg_gen_xori_tl(dst, dst, 0x1);
784 }
785
786 // !C
787 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
788 {
789 gen_mov_reg_C(dst, src);
790 tcg_gen_xori_tl(dst, dst, 0x1);
791 }
792
793 // !N
794 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
795 {
796 gen_mov_reg_N(dst, src);
797 tcg_gen_xori_tl(dst, dst, 0x1);
798 }
799
800 // !V
801 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
802 {
803 gen_mov_reg_V(dst, src);
804 tcg_gen_xori_tl(dst, dst, 0x1);
805 }
806
807 /*
808 FPSR bit field FCC1 | FCC0:
809 0 =
810 1 <
811 2 >
812 3 unordered
813 */
814 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
815 unsigned int fcc_offset)
816 {
817 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
818 tcg_gen_andi_tl(reg, reg, 0x1);
819 }
820
821 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
822 unsigned int fcc_offset)
823 {
824 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
825 tcg_gen_andi_tl(reg, reg, 0x1);
826 }
827
828 // !0: FCC0 | FCC1
829 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
830 unsigned int fcc_offset)
831 {
832 TCGv t0 = tcg_temp_new();
833 gen_mov_reg_FCC0(dst, src, fcc_offset);
834 gen_mov_reg_FCC1(t0, src, fcc_offset);
835 tcg_gen_or_tl(dst, dst, t0);
836 tcg_temp_free(t0);
837 }
838
839 // 1 or 2: FCC0 ^ FCC1
840 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
841 unsigned int fcc_offset)
842 {
843 TCGv t0 = tcg_temp_new();
844 gen_mov_reg_FCC0(dst, src, fcc_offset);
845 gen_mov_reg_FCC1(t0, src, fcc_offset);
846 tcg_gen_xor_tl(dst, dst, t0);
847 tcg_temp_free(t0);
848 }
849
850 // 1 or 3: FCC0
851 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
852 unsigned int fcc_offset)
853 {
854 gen_mov_reg_FCC0(dst, src, fcc_offset);
855 }
856
857 // 1: FCC0 & !FCC1
858 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
859 unsigned int fcc_offset)
860 {
861 TCGv t0 = tcg_temp_new();
862 gen_mov_reg_FCC0(dst, src, fcc_offset);
863 gen_mov_reg_FCC1(t0, src, fcc_offset);
864 tcg_gen_andc_tl(dst, dst, t0);
865 tcg_temp_free(t0);
866 }
867
868 // 2 or 3: FCC1
869 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
870 unsigned int fcc_offset)
871 {
872 gen_mov_reg_FCC1(dst, src, fcc_offset);
873 }
874
875 // 2: !FCC0 & FCC1
876 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
877 unsigned int fcc_offset)
878 {
879 TCGv t0 = tcg_temp_new();
880 gen_mov_reg_FCC0(dst, src, fcc_offset);
881 gen_mov_reg_FCC1(t0, src, fcc_offset);
882 tcg_gen_andc_tl(dst, t0, dst);
883 tcg_temp_free(t0);
884 }
885
886 // 3: FCC0 & FCC1
887 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
888 unsigned int fcc_offset)
889 {
890 TCGv t0 = tcg_temp_new();
891 gen_mov_reg_FCC0(dst, src, fcc_offset);
892 gen_mov_reg_FCC1(t0, src, fcc_offset);
893 tcg_gen_and_tl(dst, dst, t0);
894 tcg_temp_free(t0);
895 }
896
897 // 0: !(FCC0 | FCC1)
898 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
899 unsigned int fcc_offset)
900 {
901 TCGv t0 = tcg_temp_new();
902 gen_mov_reg_FCC0(dst, src, fcc_offset);
903 gen_mov_reg_FCC1(t0, src, fcc_offset);
904 tcg_gen_or_tl(dst, dst, t0);
905 tcg_gen_xori_tl(dst, dst, 0x1);
906 tcg_temp_free(t0);
907 }
908
909 // 0 or 3: !(FCC0 ^ FCC1)
910 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
911 unsigned int fcc_offset)
912 {
913 TCGv t0 = tcg_temp_new();
914 gen_mov_reg_FCC0(dst, src, fcc_offset);
915 gen_mov_reg_FCC1(t0, src, fcc_offset);
916 tcg_gen_xor_tl(dst, dst, t0);
917 tcg_gen_xori_tl(dst, dst, 0x1);
918 tcg_temp_free(t0);
919 }
920
921 // 0 or 2: !FCC0
922 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
923 unsigned int fcc_offset)
924 {
925 gen_mov_reg_FCC0(dst, src, fcc_offset);
926 tcg_gen_xori_tl(dst, dst, 0x1);
927 }
928
929 // !1: !(FCC0 & !FCC1)
930 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
931 unsigned int fcc_offset)
932 {
933 TCGv t0 = tcg_temp_new();
934 gen_mov_reg_FCC0(dst, src, fcc_offset);
935 gen_mov_reg_FCC1(t0, src, fcc_offset);
936 tcg_gen_andc_tl(dst, dst, t0);
937 tcg_gen_xori_tl(dst, dst, 0x1);
938 tcg_temp_free(t0);
939 }
940
941 // 0 or 1: !FCC1
942 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
943 unsigned int fcc_offset)
944 {
945 gen_mov_reg_FCC1(dst, src, fcc_offset);
946 tcg_gen_xori_tl(dst, dst, 0x1);
947 }
948
949 // !2: !(!FCC0 & FCC1)
950 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
951 unsigned int fcc_offset)
952 {
953 TCGv t0 = tcg_temp_new();
954 gen_mov_reg_FCC0(dst, src, fcc_offset);
955 gen_mov_reg_FCC1(t0, src, fcc_offset);
956 tcg_gen_andc_tl(dst, t0, dst);
957 tcg_gen_xori_tl(dst, dst, 0x1);
958 tcg_temp_free(t0);
959 }
960
961 // !3: !(FCC0 & FCC1)
962 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
963 unsigned int fcc_offset)
964 {
965 TCGv t0 = tcg_temp_new();
966 gen_mov_reg_FCC0(dst, src, fcc_offset);
967 gen_mov_reg_FCC1(t0, src, fcc_offset);
968 tcg_gen_and_tl(dst, dst, t0);
969 tcg_gen_xori_tl(dst, dst, 0x1);
970 tcg_temp_free(t0);
971 }
972
973 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
974 target_ulong pc2, TCGv r_cond)
975 {
976 TCGLabel *l1 = gen_new_label();
977
978 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
979
980 gen_goto_tb(dc, 0, pc1, pc1 + 4);
981
982 gen_set_label(l1);
983 gen_goto_tb(dc, 1, pc2, pc2 + 4);
984 }
985
986 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
987 {
988 TCGLabel *l1 = gen_new_label();
989 target_ulong npc = dc->npc;
990
991 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
992
993 gen_goto_tb(dc, 0, npc, pc1);
994
995 gen_set_label(l1);
996 gen_goto_tb(dc, 1, npc + 4, npc + 8);
997
998 dc->base.is_jmp = DISAS_NORETURN;
999 }
1000
1001 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
1002 {
1003 target_ulong npc = dc->npc;
1004
1005 if (likely(npc != DYNAMIC_PC)) {
1006 dc->pc = npc;
1007 dc->jump_pc[0] = pc1;
1008 dc->jump_pc[1] = npc + 4;
1009 dc->npc = JUMP_PC;
1010 } else {
1011 TCGv t, z;
1012
1013 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1014
1015 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1016 t = tcg_const_tl(pc1);
1017 z = tcg_const_tl(0);
1018 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
1019 tcg_temp_free(t);
1020 tcg_temp_free(z);
1021
1022 dc->pc = DYNAMIC_PC;
1023 }
1024 }
1025
1026 static inline void gen_generic_branch(DisasContext *dc)
1027 {
1028 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1029 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1030 TCGv zero = tcg_const_tl(0);
1031
1032 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1033
1034 tcg_temp_free(npc0);
1035 tcg_temp_free(npc1);
1036 tcg_temp_free(zero);
1037 }
1038
1039 /* call this function before using the condition register as it may
1040 have been set for a jump */
1041 static inline void flush_cond(DisasContext *dc)
1042 {
1043 if (dc->npc == JUMP_PC) {
1044 gen_generic_branch(dc);
1045 dc->npc = DYNAMIC_PC;
1046 }
1047 }
1048
1049 static inline void save_npc(DisasContext *dc)
1050 {
1051 if (dc->npc == JUMP_PC) {
1052 gen_generic_branch(dc);
1053 dc->npc = DYNAMIC_PC;
1054 } else if (dc->npc != DYNAMIC_PC) {
1055 tcg_gen_movi_tl(cpu_npc, dc->npc);
1056 }
1057 }
1058
1059 static inline void update_psr(DisasContext *dc)
1060 {
1061 if (dc->cc_op != CC_OP_FLAGS) {
1062 dc->cc_op = CC_OP_FLAGS;
1063 gen_helper_compute_psr(cpu_env);
1064 }
1065 }
1066
1067 static inline void save_state(DisasContext *dc)
1068 {
1069 tcg_gen_movi_tl(cpu_pc, dc->pc);
1070 save_npc(dc);
1071 }
1072
1073 static void gen_exception(DisasContext *dc, int which)
1074 {
1075 TCGv_i32 t;
1076
1077 save_state(dc);
1078 t = tcg_const_i32(which);
1079 gen_helper_raise_exception(cpu_env, t);
1080 tcg_temp_free_i32(t);
1081 dc->base.is_jmp = DISAS_NORETURN;
1082 }
1083
1084 static void gen_check_align(TCGv addr, int mask)
1085 {
1086 TCGv_i32 r_mask = tcg_const_i32(mask);
1087 gen_helper_check_align(cpu_env, addr, r_mask);
1088 tcg_temp_free_i32(r_mask);
1089 }
1090
1091 static inline void gen_mov_pc_npc(DisasContext *dc)
1092 {
1093 if (dc->npc == JUMP_PC) {
1094 gen_generic_branch(dc);
1095 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1096 dc->pc = DYNAMIC_PC;
1097 } else if (dc->npc == DYNAMIC_PC) {
1098 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1099 dc->pc = DYNAMIC_PC;
1100 } else {
1101 dc->pc = dc->npc;
1102 }
1103 }
1104
1105 static inline void gen_op_next_insn(void)
1106 {
1107 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1108 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1109 }
1110
1111 static void free_compare(DisasCompare *cmp)
1112 {
1113 if (!cmp->g1) {
1114 tcg_temp_free(cmp->c1);
1115 }
1116 if (!cmp->g2) {
1117 tcg_temp_free(cmp->c2);
1118 }
1119 }
1120
1121 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1122 DisasContext *dc)
1123 {
1124 static int subcc_cond[16] = {
1125 TCG_COND_NEVER,
1126 TCG_COND_EQ,
1127 TCG_COND_LE,
1128 TCG_COND_LT,
1129 TCG_COND_LEU,
1130 TCG_COND_LTU,
1131 -1, /* neg */
1132 -1, /* overflow */
1133 TCG_COND_ALWAYS,
1134 TCG_COND_NE,
1135 TCG_COND_GT,
1136 TCG_COND_GE,
1137 TCG_COND_GTU,
1138 TCG_COND_GEU,
1139 -1, /* pos */
1140 -1, /* no overflow */
1141 };
1142
1143 static int logic_cond[16] = {
1144 TCG_COND_NEVER,
1145 TCG_COND_EQ, /* eq: Z */
1146 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1147 TCG_COND_LT, /* lt: N ^ V -> N */
1148 TCG_COND_EQ, /* leu: C | Z -> Z */
1149 TCG_COND_NEVER, /* ltu: C -> 0 */
1150 TCG_COND_LT, /* neg: N */
1151 TCG_COND_NEVER, /* vs: V -> 0 */
1152 TCG_COND_ALWAYS,
1153 TCG_COND_NE, /* ne: !Z */
1154 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1155 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1156 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1157 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1158 TCG_COND_GE, /* pos: !N */
1159 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1160 };
1161
1162 TCGv_i32 r_src;
1163 TCGv r_dst;
1164
1165 #ifdef TARGET_SPARC64
1166 if (xcc) {
1167 r_src = cpu_xcc;
1168 } else {
1169 r_src = cpu_psr;
1170 }
1171 #else
1172 r_src = cpu_psr;
1173 #endif
1174
1175 switch (dc->cc_op) {
1176 case CC_OP_LOGIC:
1177 cmp->cond = logic_cond[cond];
1178 do_compare_dst_0:
1179 cmp->is_bool = false;
1180 cmp->g2 = false;
1181 cmp->c2 = tcg_const_tl(0);
1182 #ifdef TARGET_SPARC64
1183 if (!xcc) {
1184 cmp->g1 = false;
1185 cmp->c1 = tcg_temp_new();
1186 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1187 break;
1188 }
1189 #endif
1190 cmp->g1 = true;
1191 cmp->c1 = cpu_cc_dst;
1192 break;
1193
1194 case CC_OP_SUB:
1195 switch (cond) {
1196 case 6: /* neg */
1197 case 14: /* pos */
1198 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1199 goto do_compare_dst_0;
1200
1201 case 7: /* overflow */
1202 case 15: /* !overflow */
1203 goto do_dynamic;
1204
1205 default:
1206 cmp->cond = subcc_cond[cond];
1207 cmp->is_bool = false;
1208 #ifdef TARGET_SPARC64
1209 if (!xcc) {
1210 /* Note that sign-extension works for unsigned compares as
1211 long as both operands are sign-extended. */
1212 cmp->g1 = cmp->g2 = false;
1213 cmp->c1 = tcg_temp_new();
1214 cmp->c2 = tcg_temp_new();
1215 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1216 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1217 break;
1218 }
1219 #endif
1220 cmp->g1 = cmp->g2 = true;
1221 cmp->c1 = cpu_cc_src;
1222 cmp->c2 = cpu_cc_src2;
1223 break;
1224 }
1225 break;
1226
1227 default:
1228 do_dynamic:
1229 gen_helper_compute_psr(cpu_env);
1230 dc->cc_op = CC_OP_FLAGS;
1231 /* FALLTHRU */
1232
1233 case CC_OP_FLAGS:
1234 /* We're going to generate a boolean result. */
1235 cmp->cond = TCG_COND_NE;
1236 cmp->is_bool = true;
1237 cmp->g1 = cmp->g2 = false;
1238 cmp->c1 = r_dst = tcg_temp_new();
1239 cmp->c2 = tcg_const_tl(0);
1240
1241 switch (cond) {
1242 case 0x0:
1243 gen_op_eval_bn(r_dst);
1244 break;
1245 case 0x1:
1246 gen_op_eval_be(r_dst, r_src);
1247 break;
1248 case 0x2:
1249 gen_op_eval_ble(r_dst, r_src);
1250 break;
1251 case 0x3:
1252 gen_op_eval_bl(r_dst, r_src);
1253 break;
1254 case 0x4:
1255 gen_op_eval_bleu(r_dst, r_src);
1256 break;
1257 case 0x5:
1258 gen_op_eval_bcs(r_dst, r_src);
1259 break;
1260 case 0x6:
1261 gen_op_eval_bneg(r_dst, r_src);
1262 break;
1263 case 0x7:
1264 gen_op_eval_bvs(r_dst, r_src);
1265 break;
1266 case 0x8:
1267 gen_op_eval_ba(r_dst);
1268 break;
1269 case 0x9:
1270 gen_op_eval_bne(r_dst, r_src);
1271 break;
1272 case 0xa:
1273 gen_op_eval_bg(r_dst, r_src);
1274 break;
1275 case 0xb:
1276 gen_op_eval_bge(r_dst, r_src);
1277 break;
1278 case 0xc:
1279 gen_op_eval_bgu(r_dst, r_src);
1280 break;
1281 case 0xd:
1282 gen_op_eval_bcc(r_dst, r_src);
1283 break;
1284 case 0xe:
1285 gen_op_eval_bpos(r_dst, r_src);
1286 break;
1287 case 0xf:
1288 gen_op_eval_bvc(r_dst, r_src);
1289 break;
1290 }
1291 break;
1292 }
1293 }
1294
1295 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1296 {
1297 unsigned int offset;
1298 TCGv r_dst;
1299
1300 /* For now we still generate a straight boolean result. */
1301 cmp->cond = TCG_COND_NE;
1302 cmp->is_bool = true;
1303 cmp->g1 = cmp->g2 = false;
1304 cmp->c1 = r_dst = tcg_temp_new();
1305 cmp->c2 = tcg_const_tl(0);
1306
1307 switch (cc) {
1308 default:
1309 case 0x0:
1310 offset = 0;
1311 break;
1312 case 0x1:
1313 offset = 32 - 10;
1314 break;
1315 case 0x2:
1316 offset = 34 - 10;
1317 break;
1318 case 0x3:
1319 offset = 36 - 10;
1320 break;
1321 }
1322
1323 switch (cond) {
1324 case 0x0:
1325 gen_op_eval_bn(r_dst);
1326 break;
1327 case 0x1:
1328 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1329 break;
1330 case 0x2:
1331 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1332 break;
1333 case 0x3:
1334 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1335 break;
1336 case 0x4:
1337 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1338 break;
1339 case 0x5:
1340 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1341 break;
1342 case 0x6:
1343 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1344 break;
1345 case 0x7:
1346 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1347 break;
1348 case 0x8:
1349 gen_op_eval_ba(r_dst);
1350 break;
1351 case 0x9:
1352 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1353 break;
1354 case 0xa:
1355 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1356 break;
1357 case 0xb:
1358 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1359 break;
1360 case 0xc:
1361 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1362 break;
1363 case 0xd:
1364 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1365 break;
1366 case 0xe:
1367 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1368 break;
1369 case 0xf:
1370 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1371 break;
1372 }
1373 }
1374
1375 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1376 DisasContext *dc)
1377 {
1378 DisasCompare cmp;
1379 gen_compare(&cmp, cc, cond, dc);
1380
1381 /* The interface is to return a boolean in r_dst. */
1382 if (cmp.is_bool) {
1383 tcg_gen_mov_tl(r_dst, cmp.c1);
1384 } else {
1385 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1386 }
1387
1388 free_compare(&cmp);
1389 }
1390
1391 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1392 {
1393 DisasCompare cmp;
1394 gen_fcompare(&cmp, cc, cond);
1395
1396 /* The interface is to return a boolean in r_dst. */
1397 if (cmp.is_bool) {
1398 tcg_gen_mov_tl(r_dst, cmp.c1);
1399 } else {
1400 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1401 }
1402
1403 free_compare(&cmp);
1404 }
1405
1406 #ifdef TARGET_SPARC64
1407 // Inverted logic
1408 static const int gen_tcg_cond_reg[8] = {
1409 -1,
1410 TCG_COND_NE,
1411 TCG_COND_GT,
1412 TCG_COND_GE,
1413 -1,
1414 TCG_COND_EQ,
1415 TCG_COND_LE,
1416 TCG_COND_LT,
1417 };
1418
1419 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1420 {
1421 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1422 cmp->is_bool = false;
1423 cmp->g1 = true;
1424 cmp->g2 = false;
1425 cmp->c1 = r_src;
1426 cmp->c2 = tcg_const_tl(0);
1427 }
1428
1429 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1430 {
1431 DisasCompare cmp;
1432 gen_compare_reg(&cmp, cond, r_src);
1433
1434 /* The interface is to return a boolean in r_dst. */
1435 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1436
1437 free_compare(&cmp);
1438 }
1439 #endif
1440
1441 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1442 {
1443 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1444 target_ulong target = dc->pc + offset;
1445
1446 #ifdef TARGET_SPARC64
1447 if (unlikely(AM_CHECK(dc))) {
1448 target &= 0xffffffffULL;
1449 }
1450 #endif
1451 if (cond == 0x0) {
1452 /* unconditional not taken */
1453 if (a) {
1454 dc->pc = dc->npc + 4;
1455 dc->npc = dc->pc + 4;
1456 } else {
1457 dc->pc = dc->npc;
1458 dc->npc = dc->pc + 4;
1459 }
1460 } else if (cond == 0x8) {
1461 /* unconditional taken */
1462 if (a) {
1463 dc->pc = target;
1464 dc->npc = dc->pc + 4;
1465 } else {
1466 dc->pc = dc->npc;
1467 dc->npc = target;
1468 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1469 }
1470 } else {
1471 flush_cond(dc);
1472 gen_cond(cpu_cond, cc, cond, dc);
1473 if (a) {
1474 gen_branch_a(dc, target);
1475 } else {
1476 gen_branch_n(dc, target);
1477 }
1478 }
1479 }
1480
1481 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1482 {
1483 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1484 target_ulong target = dc->pc + offset;
1485
1486 #ifdef TARGET_SPARC64
1487 if (unlikely(AM_CHECK(dc))) {
1488 target &= 0xffffffffULL;
1489 }
1490 #endif
1491 if (cond == 0x0) {
1492 /* unconditional not taken */
1493 if (a) {
1494 dc->pc = dc->npc + 4;
1495 dc->npc = dc->pc + 4;
1496 } else {
1497 dc->pc = dc->npc;
1498 dc->npc = dc->pc + 4;
1499 }
1500 } else if (cond == 0x8) {
1501 /* unconditional taken */
1502 if (a) {
1503 dc->pc = target;
1504 dc->npc = dc->pc + 4;
1505 } else {
1506 dc->pc = dc->npc;
1507 dc->npc = target;
1508 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1509 }
1510 } else {
1511 flush_cond(dc);
1512 gen_fcond(cpu_cond, cc, cond);
1513 if (a) {
1514 gen_branch_a(dc, target);
1515 } else {
1516 gen_branch_n(dc, target);
1517 }
1518 }
1519 }
1520
1521 #ifdef TARGET_SPARC64
1522 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1523 TCGv r_reg)
1524 {
1525 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1526 target_ulong target = dc->pc + offset;
1527
1528 if (unlikely(AM_CHECK(dc))) {
1529 target &= 0xffffffffULL;
1530 }
1531 flush_cond(dc);
1532 gen_cond_reg(cpu_cond, cond, r_reg);
1533 if (a) {
1534 gen_branch_a(dc, target);
1535 } else {
1536 gen_branch_n(dc, target);
1537 }
1538 }
1539
1540 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1541 {
1542 switch (fccno) {
1543 case 0:
1544 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1545 break;
1546 case 1:
1547 gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1548 break;
1549 case 2:
1550 gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1551 break;
1552 case 3:
1553 gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1554 break;
1555 }
1556 }
1557
1558 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1559 {
1560 switch (fccno) {
1561 case 0:
1562 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1563 break;
1564 case 1:
1565 gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1566 break;
1567 case 2:
1568 gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1569 break;
1570 case 3:
1571 gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1572 break;
1573 }
1574 }
1575
1576 static inline void gen_op_fcmpq(int fccno)
1577 {
1578 switch (fccno) {
1579 case 0:
1580 gen_helper_fcmpq(cpu_fsr, cpu_env);
1581 break;
1582 case 1:
1583 gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
1584 break;
1585 case 2:
1586 gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
1587 break;
1588 case 3:
1589 gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
1590 break;
1591 }
1592 }
1593
1594 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1595 {
1596 switch (fccno) {
1597 case 0:
1598 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1599 break;
1600 case 1:
1601 gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1602 break;
1603 case 2:
1604 gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1605 break;
1606 case 3:
1607 gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1608 break;
1609 }
1610 }
1611
1612 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1613 {
1614 switch (fccno) {
1615 case 0:
1616 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1617 break;
1618 case 1:
1619 gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1620 break;
1621 case 2:
1622 gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1623 break;
1624 case 3:
1625 gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1626 break;
1627 }
1628 }
1629
1630 static inline void gen_op_fcmpeq(int fccno)
1631 {
1632 switch (fccno) {
1633 case 0:
1634 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1635 break;
1636 case 1:
1637 gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
1638 break;
1639 case 2:
1640 gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
1641 break;
1642 case 3:
1643 gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
1644 break;
1645 }
1646 }
1647
1648 #else
1649
1650 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1651 {
1652 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1653 }
1654
1655 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1656 {
1657 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1658 }
1659
1660 static inline void gen_op_fcmpq(int fccno)
1661 {
1662 gen_helper_fcmpq(cpu_fsr, cpu_env);
1663 }
1664
1665 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1666 {
1667 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1668 }
1669
1670 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1671 {
1672 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1673 }
1674
1675 static inline void gen_op_fcmpeq(int fccno)
1676 {
1677 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1678 }
1679 #endif
1680
1681 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1682 {
1683 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1684 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1685 gen_exception(dc, TT_FP_EXCP);
1686 }
1687
1688 static int gen_trap_ifnofpu(DisasContext *dc)
1689 {
1690 #if !defined(CONFIG_USER_ONLY)
1691 if (!dc->fpu_enabled) {
1692 gen_exception(dc, TT_NFPU_INSN);
1693 return 1;
1694 }
1695 #endif
1696 return 0;
1697 }
1698
1699 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1700 {
1701 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1702 }
1703
1704 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1705 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1706 {
1707 TCGv_i32 dst, src;
1708
1709 src = gen_load_fpr_F(dc, rs);
1710 dst = gen_dest_fpr_F(dc);
1711
1712 gen(dst, cpu_env, src);
1713 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1714
1715 gen_store_fpr_F(dc, rd, dst);
1716 }
1717
1718 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1719 void (*gen)(TCGv_i32, TCGv_i32))
1720 {
1721 TCGv_i32 dst, src;
1722
1723 src = gen_load_fpr_F(dc, rs);
1724 dst = gen_dest_fpr_F(dc);
1725
1726 gen(dst, src);
1727
1728 gen_store_fpr_F(dc, rd, dst);
1729 }
1730
1731 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1732 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1733 {
1734 TCGv_i32 dst, src1, src2;
1735
1736 src1 = gen_load_fpr_F(dc, rs1);
1737 src2 = gen_load_fpr_F(dc, rs2);
1738 dst = gen_dest_fpr_F(dc);
1739
1740 gen(dst, cpu_env, src1, src2);
1741 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1742
1743 gen_store_fpr_F(dc, rd, dst);
1744 }
1745
1746 #ifdef TARGET_SPARC64
1747 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1748 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1749 {
1750 TCGv_i32 dst, src1, src2;
1751
1752 src1 = gen_load_fpr_F(dc, rs1);
1753 src2 = gen_load_fpr_F(dc, rs2);
1754 dst = gen_dest_fpr_F(dc);
1755
1756 gen(dst, src1, src2);
1757
1758 gen_store_fpr_F(dc, rd, dst);
1759 }
1760 #endif
1761
1762 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1763 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1764 {
1765 TCGv_i64 dst, src;
1766
1767 src = gen_load_fpr_D(dc, rs);
1768 dst = gen_dest_fpr_D(dc, rd);
1769
1770 gen(dst, cpu_env, src);
1771 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1772
1773 gen_store_fpr_D(dc, rd, dst);
1774 }
1775
1776 #ifdef TARGET_SPARC64
1777 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1778 void (*gen)(TCGv_i64, TCGv_i64))
1779 {
1780 TCGv_i64 dst, src;
1781
1782 src = gen_load_fpr_D(dc, rs);
1783 dst = gen_dest_fpr_D(dc, rd);
1784
1785 gen(dst, src);
1786
1787 gen_store_fpr_D(dc, rd, dst);
1788 }
1789 #endif
1790
1791 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1792 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1793 {
1794 TCGv_i64 dst, src1, src2;
1795
1796 src1 = gen_load_fpr_D(dc, rs1);
1797 src2 = gen_load_fpr_D(dc, rs2);
1798 dst = gen_dest_fpr_D(dc, rd);
1799
1800 gen(dst, cpu_env, src1, src2);
1801 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1802
1803 gen_store_fpr_D(dc, rd, dst);
1804 }
1805
1806 #ifdef TARGET_SPARC64
1807 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1808 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1809 {
1810 TCGv_i64 dst, src1, src2;
1811
1812 src1 = gen_load_fpr_D(dc, rs1);
1813 src2 = gen_load_fpr_D(dc, rs2);
1814 dst = gen_dest_fpr_D(dc, rd);
1815
1816 gen(dst, src1, src2);
1817
1818 gen_store_fpr_D(dc, rd, dst);
1819 }
1820
1821 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1822 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1823 {
1824 TCGv_i64 dst, src1, src2;
1825
1826 src1 = gen_load_fpr_D(dc, rs1);
1827 src2 = gen_load_fpr_D(dc, rs2);
1828 dst = gen_dest_fpr_D(dc, rd);
1829
1830 gen(dst, cpu_gsr, src1, src2);
1831
1832 gen_store_fpr_D(dc, rd, dst);
1833 }
1834
1835 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1836 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1837 {
1838 TCGv_i64 dst, src0, src1, src2;
1839
1840 src1 = gen_load_fpr_D(dc, rs1);
1841 src2 = gen_load_fpr_D(dc, rs2);
1842 src0 = gen_load_fpr_D(dc, rd);
1843 dst = gen_dest_fpr_D(dc, rd);
1844
1845 gen(dst, src0, src1, src2);
1846
1847 gen_store_fpr_D(dc, rd, dst);
1848 }
1849 #endif
1850
1851 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1852 void (*gen)(TCGv_ptr))
1853 {
1854 gen_op_load_fpr_QT1(QFPREG(rs));
1855
1856 gen(cpu_env);
1857 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1858
1859 gen_op_store_QT0_fpr(QFPREG(rd));
1860 gen_update_fprs_dirty(dc, QFPREG(rd));
1861 }
1862
1863 #ifdef TARGET_SPARC64
1864 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1865 void (*gen)(TCGv_ptr))
1866 {
1867 gen_op_load_fpr_QT1(QFPREG(rs));
1868
1869 gen(cpu_env);
1870
1871 gen_op_store_QT0_fpr(QFPREG(rd));
1872 gen_update_fprs_dirty(dc, QFPREG(rd));
1873 }
1874 #endif
1875
1876 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1877 void (*gen)(TCGv_ptr))
1878 {
1879 gen_op_load_fpr_QT0(QFPREG(rs1));
1880 gen_op_load_fpr_QT1(QFPREG(rs2));
1881
1882 gen(cpu_env);
1883 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1884
1885 gen_op_store_QT0_fpr(QFPREG(rd));
1886 gen_update_fprs_dirty(dc, QFPREG(rd));
1887 }
1888
1889 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1890 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1891 {
1892 TCGv_i64 dst;
1893 TCGv_i32 src1, src2;
1894
1895 src1 = gen_load_fpr_F(dc, rs1);
1896 src2 = gen_load_fpr_F(dc, rs2);
1897 dst = gen_dest_fpr_D(dc, rd);
1898
1899 gen(dst, cpu_env, src1, src2);
1900 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1901
1902 gen_store_fpr_D(dc, rd, dst);
1903 }
1904
1905 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1906 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1907 {
1908 TCGv_i64 src1, src2;
1909
1910 src1 = gen_load_fpr_D(dc, rs1);
1911 src2 = gen_load_fpr_D(dc, rs2);
1912
1913 gen(cpu_env, src1, src2);
1914 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1915
1916 gen_op_store_QT0_fpr(QFPREG(rd));
1917 gen_update_fprs_dirty(dc, QFPREG(rd));
1918 }
1919
1920 #ifdef TARGET_SPARC64
1921 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1922 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1923 {
1924 TCGv_i64 dst;
1925 TCGv_i32 src;
1926
1927 src = gen_load_fpr_F(dc, rs);
1928 dst = gen_dest_fpr_D(dc, rd);
1929
1930 gen(dst, cpu_env, src);
1931 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1932
1933 gen_store_fpr_D(dc, rd, dst);
1934 }
1935 #endif
1936
1937 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1938 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1939 {
1940 TCGv_i64 dst;
1941 TCGv_i32 src;
1942
1943 src = gen_load_fpr_F(dc, rs);
1944 dst = gen_dest_fpr_D(dc, rd);
1945
1946 gen(dst, cpu_env, src);
1947
1948 gen_store_fpr_D(dc, rd, dst);
1949 }
1950
1951 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1952 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1953 {
1954 TCGv_i32 dst;
1955 TCGv_i64 src;
1956
1957 src = gen_load_fpr_D(dc, rs);
1958 dst = gen_dest_fpr_F(dc);
1959
1960 gen(dst, cpu_env, src);
1961 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1962
1963 gen_store_fpr_F(dc, rd, dst);
1964 }
1965
1966 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1967 void (*gen)(TCGv_i32, TCGv_ptr))
1968 {
1969 TCGv_i32 dst;
1970
1971 gen_op_load_fpr_QT1(QFPREG(rs));
1972 dst = gen_dest_fpr_F(dc);
1973
1974 gen(dst, cpu_env);
1975 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1976
1977 gen_store_fpr_F(dc, rd, dst);
1978 }
1979
1980 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1981 void (*gen)(TCGv_i64, TCGv_ptr))
1982 {
1983 TCGv_i64 dst;
1984
1985 gen_op_load_fpr_QT1(QFPREG(rs));
1986 dst = gen_dest_fpr_D(dc, rd);
1987
1988 gen(dst, cpu_env);
1989 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1990
1991 gen_store_fpr_D(dc, rd, dst);
1992 }
1993
1994 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1995 void (*gen)(TCGv_ptr, TCGv_i32))
1996 {
1997 TCGv_i32 src;
1998
1999 src = gen_load_fpr_F(dc, rs);
2000
2001 gen(cpu_env, src);
2002
2003 gen_op_store_QT0_fpr(QFPREG(rd));
2004 gen_update_fprs_dirty(dc, QFPREG(rd));
2005 }
2006
2007 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
2008 void (*gen)(TCGv_ptr, TCGv_i64))
2009 {
2010 TCGv_i64 src;
2011
2012 src = gen_load_fpr_D(dc, rs);
2013
2014 gen(cpu_env, src);
2015
2016 gen_op_store_QT0_fpr(QFPREG(rd));
2017 gen_update_fprs_dirty(dc, QFPREG(rd));
2018 }
2019
2020 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
2021 TCGv addr, int mmu_idx, MemOp memop)
2022 {
2023 gen_address_mask(dc, addr);
2024 tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
2025 }
2026
2027 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
2028 {
2029 TCGv m1 = tcg_const_tl(0xff);
2030 gen_address_mask(dc, addr);
2031 tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
2032 tcg_temp_free(m1);
2033 }
2034
2035 /* asi moves */
2036 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2037 typedef enum {
2038 GET_ASI_HELPER,
2039 GET_ASI_EXCP,
2040 GET_ASI_DIRECT,
2041 GET_ASI_DTWINX,
2042 GET_ASI_BLOCK,
2043 GET_ASI_SHORT,
2044 GET_ASI_BCOPY,
2045 GET_ASI_BFILL,
2046 } ASIType;
2047
2048 typedef struct {
2049 ASIType type;
2050 int asi;
2051 int mem_idx;
2052 MemOp memop;
2053 } DisasASI;
2054
2055 static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop)
2056 {
2057 int asi = GET_FIELD(insn, 19, 26);
2058 ASIType type = GET_ASI_HELPER;
2059 int mem_idx = dc->mem_idx;
2060
2061 #ifndef TARGET_SPARC64
2062 /* Before v9, all asis are immediate and privileged. */
2063 if (IS_IMM) {
2064 gen_exception(dc, TT_ILL_INSN);
2065 type = GET_ASI_EXCP;
2066 } else if (supervisor(dc)
2067 /* Note that LEON accepts ASI_USERDATA in user mode, for
2068 use with CASA. Also note that previous versions of
2069 QEMU allowed (and old versions of gcc emitted) ASI_P
2070 for LEON, which is incorrect. */
2071 || (asi == ASI_USERDATA
2072 && (dc->def->features & CPU_FEATURE_CASA))) {
2073 switch (asi) {
2074 case ASI_USERDATA: /* User data access */
2075 mem_idx = MMU_USER_IDX;
2076 type = GET_ASI_DIRECT;
2077 break;
2078 case ASI_KERNELDATA: /* Supervisor data access */
2079 mem_idx = MMU_KERNEL_IDX;
2080 type = GET_ASI_DIRECT;
2081 break;
2082 case ASI_M_BYPASS: /* MMU passthrough */
2083 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
2084 mem_idx = MMU_PHYS_IDX;
2085 type = GET_ASI_DIRECT;
2086 break;
2087 case ASI_M_BCOPY: /* Block copy, sta access */
2088 mem_idx = MMU_KERNEL_IDX;
2089 type = GET_ASI_BCOPY;
2090 break;
2091 case ASI_M_BFILL: /* Block fill, stda access */
2092 mem_idx = MMU_KERNEL_IDX;
2093 type = GET_ASI_BFILL;
2094 break;
2095 }
2096
2097 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
2098 * permissions check in get_physical_address(..).
2099 */
2100 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
2101 } else {
2102 gen_exception(dc, TT_PRIV_INSN);
2103 type = GET_ASI_EXCP;
2104 }
2105 #else
2106 if (IS_IMM) {
2107 asi = dc->asi;
2108 }
2109 /* With v9, all asis below 0x80 are privileged. */
2110 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2111 down that bit into DisasContext. For the moment that's ok,
2112 since the direct implementations below doesn't have any ASIs
2113 in the restricted [0x30, 0x7f] range, and the check will be
2114 done properly in the helper. */
2115 if (!supervisor(dc) && asi < 0x80) {
2116 gen_exception(dc, TT_PRIV_ACT);
2117 type = GET_ASI_EXCP;
2118 } else {
2119 switch (asi) {
2120 case ASI_REAL: /* Bypass */
2121 case ASI_REAL_IO: /* Bypass, non-cacheable */
2122 case ASI_REAL_L: /* Bypass LE */
2123 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
2124 case ASI_TWINX_REAL: /* Real address, twinx */
2125 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2126 case ASI_QUAD_LDD_PHYS:
2127 case ASI_QUAD_LDD_PHYS_L:
2128 mem_idx = MMU_PHYS_IDX;
2129 break;
2130 case ASI_N: /* Nucleus */
2131 case ASI_NL: /* Nucleus LE */
2132 case ASI_TWINX_N:
2133 case ASI_TWINX_NL:
2134 case ASI_NUCLEUS_QUAD_LDD:
2135 case ASI_NUCLEUS_QUAD_LDD_L:
2136 if (hypervisor(dc)) {
2137 mem_idx = MMU_PHYS_IDX;
2138 } else {
2139 mem_idx = MMU_NUCLEUS_IDX;
2140 }
2141 break;
2142 case ASI_AIUP: /* As if user primary */
2143 case ASI_AIUPL: /* As if user primary LE */
2144 case ASI_TWINX_AIUP:
2145 case ASI_TWINX_AIUP_L:
2146 case ASI_BLK_AIUP_4V:
2147 case ASI_BLK_AIUP_L_4V:
2148 case ASI_BLK_AIUP:
2149 case ASI_BLK_AIUPL:
2150 mem_idx = MMU_USER_IDX;
2151 break;
2152 case ASI_AIUS: /* As if user secondary */
2153 case ASI_AIUSL: /* As if user secondary LE */
2154 case ASI_TWINX_AIUS:
2155 case ASI_TWINX_AIUS_L:
2156 case ASI_BLK_AIUS_4V:
2157 case ASI_BLK_AIUS_L_4V:
2158 case ASI_BLK_AIUS:
2159 case ASI_BLK_AIUSL:
2160 mem_idx = MMU_USER_SECONDARY_IDX;
2161 break;
2162 case ASI_S: /* Secondary */
2163 case ASI_SL: /* Secondary LE */
2164 case ASI_TWINX_S:
2165 case ASI_TWINX_SL:
2166 case ASI_BLK_COMMIT_S:
2167 case ASI_BLK_S:
2168 case ASI_BLK_SL:
2169 case ASI_FL8_S:
2170 case ASI_FL8_SL:
2171 case ASI_FL16_S:
2172 case ASI_FL16_SL:
2173 if (mem_idx == MMU_USER_IDX) {
2174 mem_idx = MMU_USER_SECONDARY_IDX;
2175 } else if (mem_idx == MMU_KERNEL_IDX) {
2176 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2177 }
2178 break;
2179 case ASI_P: /* Primary */
2180 case ASI_PL: /* Primary LE */
2181 case ASI_TWINX_P:
2182 case ASI_TWINX_PL:
2183 case ASI_BLK_COMMIT_P:
2184 case ASI_BLK_P:
2185 case ASI_BLK_PL:
2186 case ASI_FL8_P:
2187 case ASI_FL8_PL:
2188 case ASI_FL16_P:
2189 case ASI_FL16_PL:
2190 break;
2191 }
2192 switch (asi) {
2193 case ASI_REAL:
2194 case ASI_REAL_IO:
2195 case ASI_REAL_L:
2196 case ASI_REAL_IO_L:
2197 case ASI_N:
2198 case ASI_NL:
2199 case ASI_AIUP:
2200 case ASI_AIUPL:
2201 case ASI_AIUS:
2202 case ASI_AIUSL:
2203 case ASI_S:
2204 case ASI_SL:
2205 case ASI_P:
2206 case ASI_PL:
2207 type = GET_ASI_DIRECT;
2208 break;
2209 case ASI_TWINX_REAL:
2210 case ASI_TWINX_REAL_L:
2211 case ASI_TWINX_N:
2212 case ASI_TWINX_NL:
2213 case ASI_TWINX_AIUP:
2214 case ASI_TWINX_AIUP_L:
2215 case ASI_TWINX_AIUS:
2216 case ASI_TWINX_AIUS_L:
2217 case ASI_TWINX_P:
2218 case ASI_TWINX_PL:
2219 case ASI_TWINX_S:
2220 case ASI_TWINX_SL:
2221 case ASI_QUAD_LDD_PHYS:
2222 case ASI_QUAD_LDD_PHYS_L:
2223 case ASI_NUCLEUS_QUAD_LDD:
2224 case ASI_NUCLEUS_QUAD_LDD_L:
2225 type = GET_ASI_DTWINX;
2226 break;
2227 case ASI_BLK_COMMIT_P:
2228 case ASI_BLK_COMMIT_S:
2229 case ASI_BLK_AIUP_4V:
2230 case ASI_BLK_AIUP_L_4V:
2231 case ASI_BLK_AIUP:
2232 case ASI_BLK_AIUPL:
2233 case ASI_BLK_AIUS_4V:
2234 case ASI_BLK_AIUS_L_4V:
2235 case ASI_BLK_AIUS:
2236 case ASI_BLK_AIUSL:
2237 case ASI_BLK_S:
2238 case ASI_BLK_SL:
2239 case ASI_BLK_P:
2240 case ASI_BLK_PL:
2241 type = GET_ASI_BLOCK;
2242 break;
2243 case ASI_FL8_S:
2244 case ASI_FL8_SL:
2245 case ASI_FL8_P:
2246 case ASI_FL8_PL:
2247 memop = MO_UB;
2248 type = GET_ASI_SHORT;
2249 break;
2250 case ASI_FL16_S:
2251 case ASI_FL16_SL:
2252 case ASI_FL16_P:
2253 case ASI_FL16_PL:
2254 memop = MO_TEUW;
2255 type = GET_ASI_SHORT;
2256 break;
2257 }
2258 /* The little-endian asis all have bit 3 set. */
2259 if (asi & 8) {
2260 memop ^= MO_BSWAP;
2261 }
2262 }
2263 #endif
2264
2265 return (DisasASI){ type, asi, mem_idx, memop };
2266 }
2267
2268 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2269 int insn, MemOp memop)
2270 {
2271 DisasASI da = get_asi(dc, insn, memop);
2272
2273 switch (da.type) {
2274 case GET_ASI_EXCP:
2275 break;
2276 case GET_ASI_DTWINX: /* Reserved for ldda. */
2277 gen_exception(dc, TT_ILL_INSN);
2278 break;
2279 case GET_ASI_DIRECT:
2280 gen_address_mask(dc, addr);
2281 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
2282 break;
2283 default:
2284 {
2285 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2286 TCGv_i32 r_mop = tcg_const_i32(memop);
2287
2288 save_state(dc);
2289 #ifdef TARGET_SPARC64
2290 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
2291 #else
2292 {
2293 TCGv_i64 t64 = tcg_temp_new_i64();
2294 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2295 tcg_gen_trunc_i64_tl(dst, t64);
2296 tcg_temp_free_i64(t64);
2297 }
2298 #endif
2299 tcg_temp_free_i32(r_mop);
2300 tcg_temp_free_i32(r_asi);
2301 }
2302 break;
2303 }
2304 }
2305
2306 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2307 int insn, MemOp memop)
2308 {
2309 DisasASI da = get_asi(dc, insn, memop);
2310
2311 switch (da.type) {
2312 case GET_ASI_EXCP:
2313 break;
2314 case GET_ASI_DTWINX: /* Reserved for stda. */
2315 #ifndef TARGET_SPARC64
2316 gen_exception(dc, TT_ILL_INSN);
2317 break;
2318 #else
2319 if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2320 /* Pre OpenSPARC CPUs don't have these */
2321 gen_exception(dc, TT_ILL_INSN);
2322 return;
2323 }
2324 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2325 * are ST_BLKINIT_ ASIs */
2326 #endif
2327 /* fall through */
2328 case GET_ASI_DIRECT:
2329 gen_address_mask(dc, addr);
2330 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
2331 break;
2332 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2333 case GET_ASI_BCOPY:
2334 /* Copy 32 bytes from the address in SRC to ADDR. */
2335 /* ??? The original qemu code suggests 4-byte alignment, dropping
2336 the low bits, but the only place I can see this used is in the
2337 Linux kernel with 32 byte alignment, which would make more sense
2338 as a cacheline-style operation. */
2339 {
2340 TCGv saddr = tcg_temp_new();
2341 TCGv daddr = tcg_temp_new();
2342 TCGv four = tcg_const_tl(4);
2343 TCGv_i32 tmp = tcg_temp_new_i32();
2344 int i;
2345
2346 tcg_gen_andi_tl(saddr, src, -4);
2347 tcg_gen_andi_tl(daddr, addr, -4);
2348 for (i = 0; i < 32; i += 4) {
2349 /* Since the loads and stores are paired, allow the
2350 copy to happen in the host endianness. */
2351 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2352 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2353 tcg_gen_add_tl(saddr, saddr, four);
2354 tcg_gen_add_tl(daddr, daddr, four);
2355 }
2356
2357 tcg_temp_free(saddr);
2358 tcg_temp_free(daddr);
2359 tcg_temp_free(four);
2360 tcg_temp_free_i32(tmp);
2361 }
2362 break;
2363 #endif
2364 default:
2365 {
2366 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2367 TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
2368
2369 save_state(dc);
2370 #ifdef TARGET_SPARC64
2371 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
2372 #else
2373 {
2374 TCGv_i64 t64 = tcg_temp_new_i64();
2375 tcg_gen_extu_tl_i64(t64, src);
2376 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2377 tcg_temp_free_i64(t64);
2378 }
2379 #endif
2380 tcg_temp_free_i32(r_mop);
2381 tcg_temp_free_i32(r_asi);
2382
2383 /* A write to a TLB register may alter page maps. End the TB. */
2384 dc->npc = DYNAMIC_PC;
2385 }
2386 break;
2387 }
2388 }
2389
2390 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2391 TCGv addr, int insn)
2392 {
2393 DisasASI da = get_asi(dc, insn, MO_TEUL);
2394
2395 switch (da.type) {
2396 case GET_ASI_EXCP:
2397 break;
2398 case GET_ASI_DIRECT:
2399 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2400 break;
2401 default:
2402 /* ??? Should be DAE_invalid_asi. */
2403 gen_exception(dc, TT_DATA_ACCESS);
2404 break;
2405 }
2406 }
2407
2408 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2409 int insn, int rd)
2410 {
2411 DisasASI da = get_asi(dc, insn, MO_TEUL);
2412 TCGv oldv;
2413
2414 switch (da.type) {
2415 case GET_ASI_EXCP:
2416 return;
2417 case GET_ASI_DIRECT:
2418 oldv = tcg_temp_new();
2419 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2420 da.mem_idx, da.memop);
2421 gen_store_gpr(dc, rd, oldv);
2422 tcg_temp_free(oldv);
2423 break;
2424 default:
2425 /* ??? Should be DAE_invalid_asi. */
2426 gen_exception(dc, TT_DATA_ACCESS);
2427 break;
2428 }
2429 }
2430
2431 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2432 {
2433 DisasASI da = get_asi(dc, insn, MO_UB);
2434
2435 switch (da.type) {
2436 case GET_ASI_EXCP:
2437 break;
2438 case GET_ASI_DIRECT:
2439 gen_ldstub(dc, dst, addr, da.mem_idx);
2440 break;
2441 default:
2442 /* ??? In theory, this should be raise DAE_invalid_asi.
2443 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2444 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
2445 gen_helper_exit_atomic(cpu_env);
2446 } else {
2447 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2448 TCGv_i32 r_mop = tcg_const_i32(MO_UB);
2449 TCGv_i64 s64, t64;
2450
2451 save_state(dc);
2452 t64 = tcg_temp_new_i64();
2453 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2454
2455 s64 = tcg_const_i64(0xff);
2456 gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
2457 tcg_temp_free_i64(s64);
2458 tcg_temp_free_i32(r_mop);
2459 tcg_temp_free_i32(r_asi);
2460
2461 tcg_gen_trunc_i64_tl(dst, t64);
2462 tcg_temp_free_i64(t64);
2463
2464 /* End the TB. */
2465 dc->npc = DYNAMIC_PC;
2466 }
2467 break;
2468 }
2469 }
2470 #endif
2471
2472 #ifdef TARGET_SPARC64
2473 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2474 int insn, int size, int rd)
2475 {
2476 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2477 TCGv_i32 d32;
2478 TCGv_i64 d64;
2479
2480 switch (da.type) {
2481 case GET_ASI_EXCP:
2482 break;
2483
2484 case GET_ASI_DIRECT:
2485 gen_address_mask(dc, addr);
2486 switch (size) {
2487 case 4:
2488 d32 = gen_dest_fpr_F(dc);
2489 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
2490 gen_store_fpr_F(dc, rd, d32);
2491 break;
2492 case 8:
2493 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2494 da.memop | MO_ALIGN_4);
2495 break;
2496 case 16:
2497 d64 = tcg_temp_new_i64();
2498 tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
2499 tcg_gen_addi_tl(addr, addr, 8);
2500 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
2501 da.memop | MO_ALIGN_4);
2502 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2503 tcg_temp_free_i64(d64);
2504 break;
2505 default:
2506 g_assert_not_reached();
2507 }
2508 break;
2509
2510 case GET_ASI_BLOCK:
2511 /* Valid for lddfa on aligned registers only. */
2512 if (size == 8 && (rd & 7) == 0) {
2513 MemOp memop;
2514 TCGv eight;
2515 int i;
2516
2517 gen_address_mask(dc, addr);
2518
2519 /* The first operation checks required alignment. */
2520 memop = da.memop | MO_ALIGN_64;
2521 eight = tcg_const_tl(8);
2522 for (i = 0; ; ++i) {
2523 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2524 da.mem_idx, memop);
2525 if (i == 7) {
2526 break;
2527 }
2528 tcg_gen_add_tl(addr, addr, eight);
2529 memop = da.memop;
2530 }
2531 tcg_temp_free(eight);
2532 } else {
2533 gen_exception(dc, TT_ILL_INSN);
2534 }
2535 break;
2536
2537 case GET_ASI_SHORT:
2538 /* Valid for lddfa only. */
2539 if (size == 8) {
2540 gen_address_mask(dc, addr);
2541 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2542 } else {
2543 gen_exception(dc, TT_ILL_INSN);
2544 }
2545 break;
2546
2547 default:
2548 {
2549 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2550 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2551
2552 save_state(dc);
2553 /* According to the table in the UA2011 manual, the only
2554 other asis that are valid for ldfa/lddfa/ldqfa are
2555 the NO_FAULT asis. We still need a helper for these,
2556 but we can just use the integer asi helper for them. */
2557 switch (size) {
2558 case 4:
2559 d64 = tcg_temp_new_i64();
2560 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2561 d32 = gen_dest_fpr_F(dc);
2562 tcg_gen_extrl_i64_i32(d32, d64);
2563 tcg_temp_free_i64(d64);
2564 gen_store_fpr_F(dc, rd, d32);
2565 break;
2566 case 8:
2567 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2568 break;
2569 case 16:
2570 d64 = tcg_temp_new_i64();
2571 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2572 tcg_gen_addi_tl(addr, addr, 8);
2573 gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
2574 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2575 tcg_temp_free_i64(d64);
2576 break;
2577 default:
2578 g_assert_not_reached();
2579 }
2580 tcg_temp_free_i32(r_mop);
2581 tcg_temp_free_i32(r_asi);
2582 }
2583 break;
2584 }
2585 }
2586
2587 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2588 int insn, int size, int rd)
2589 {
2590 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2591 TCGv_i32 d32;
2592
2593 switch (da.type) {
2594 case GET_ASI_EXCP:
2595 break;
2596
2597 case GET_ASI_DIRECT:
2598 gen_address_mask(dc, addr);
2599 switch (size) {
2600 case 4:
2601 d32 = gen_load_fpr_F(dc, rd);
2602 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
2603 break;
2604 case 8:
2605 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2606 da.memop | MO_ALIGN_4);
2607 break;
2608 case 16:
2609 /* Only 4-byte alignment required. However, it is legal for the
2610 cpu to signal the alignment fault, and the OS trap handler is
2611 required to fix it up. Requiring 16-byte alignment here avoids
2612 having to probe the second page before performing the first
2613 write. */
2614 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2615 da.memop | MO_ALIGN_16);
2616 tcg_gen_addi_tl(addr, addr, 8);
2617 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2618 break;
2619 default:
2620 g_assert_not_reached();
2621 }
2622 break;
2623
2624 case GET_ASI_BLOCK:
2625 /* Valid for stdfa on aligned registers only. */
2626 if (size == 8 && (rd & 7) == 0) {
2627 MemOp memop;
2628 TCGv eight;
2629 int i;
2630
2631 gen_address_mask(dc, addr);
2632
2633 /* The first operation checks required alignment. */
2634 memop = da.memop | MO_ALIGN_64;
2635 eight = tcg_const_tl(8);
2636 for (i = 0; ; ++i) {
2637 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2638 da.mem_idx, memop);
2639 if (i == 7) {
2640 break;
2641 }
2642 tcg_gen_add_tl(addr, addr, eight);
2643 memop = da.memop;
2644 }
2645 tcg_temp_free(eight);
2646 } else {
2647 gen_exception(dc, TT_ILL_INSN);
2648 }
2649 break;
2650
2651 case GET_ASI_SHORT:
2652 /* Valid for stdfa only. */
2653 if (size == 8) {
2654 gen_address_mask(dc, addr);
2655 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2656 } else {
2657 gen_exception(dc, TT_ILL_INSN);
2658 }
2659 break;
2660
2661 default:
2662 /* According to the table in the UA2011 manual, the only
2663 other asis that are valid for ldfa/lddfa/ldqfa are
2664 the PST* asis, which aren't currently handled. */
2665 gen_exception(dc, TT_ILL_INSN);
2666 break;
2667 }
2668 }
2669
2670 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2671 {
2672 DisasASI da = get_asi(dc, insn, MO_TEQ);
2673 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2674 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2675
2676 switch (da.type) {
2677 case GET_ASI_EXCP:
2678 return;
2679
2680 case GET_ASI_DTWINX:
2681 gen_address_mask(dc, addr);
2682 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2683 tcg_gen_addi_tl(addr, addr, 8);
2684 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2685 break;
2686
2687 case GET_ASI_DIRECT:
2688 {
2689 TCGv_i64 tmp = tcg_temp_new_i64();
2690
2691 gen_address_mask(dc, addr);
2692 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
2693
2694 /* Note that LE ldda acts as if each 32-bit register
2695 result is byte swapped. Having just performed one
2696 64-bit bswap, we need now to swap the writebacks. */
2697 if ((da.memop & MO_BSWAP) == MO_TE) {
2698 tcg_gen_extr32_i64(lo, hi, tmp);
2699 } else {
2700 tcg_gen_extr32_i64(hi, lo, tmp);
2701 }
2702 tcg_temp_free_i64(tmp);
2703 }
2704 break;
2705
2706 default:
2707 /* ??? In theory we've handled all of the ASIs that are valid
2708 for ldda, and this should raise DAE_invalid_asi. However,
2709 real hardware allows others. This can be seen with e.g.
2710 FreeBSD 10.3 wrt ASI_IC_TAG. */
2711 {
2712 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2713 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2714 TCGv_i64 tmp = tcg_temp_new_i64();
2715
2716 save_state(dc);
2717 gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
2718 tcg_temp_free_i32(r_asi);
2719 tcg_temp_free_i32(r_mop);
2720
2721 /* See above. */
2722 if ((da.memop & MO_BSWAP) == MO_TE) {
2723 tcg_gen_extr32_i64(lo, hi, tmp);
2724 } else {
2725 tcg_gen_extr32_i64(hi, lo, tmp);
2726 }
2727 tcg_temp_free_i64(tmp);
2728 }
2729 break;
2730 }
2731
2732 gen_store_gpr(dc, rd, hi);
2733 gen_store_gpr(dc, rd + 1, lo);
2734 }
2735
2736 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2737 int insn, int rd)
2738 {
2739 DisasASI da = get_asi(dc, insn, MO_TEQ);
2740 TCGv lo = gen_load_gpr(dc, rd + 1);
2741
2742 switch (da.type) {
2743 case GET_ASI_EXCP:
2744 break;
2745
2746 case GET_ASI_DTWINX:
2747 gen_address_mask(dc, addr);
2748 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2749 tcg_gen_addi_tl(addr, addr, 8);
2750 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2751 break;
2752
2753 case GET_ASI_DIRECT:
2754 {
2755 TCGv_i64 t64 = tcg_temp_new_i64();
2756
2757 /* Note that LE stda acts as if each 32-bit register result is
2758 byte swapped. We will perform one 64-bit LE store, so now
2759 we must swap the order of the construction. */
2760 if ((da.memop & MO_BSWAP) == MO_TE) {
2761 tcg_gen_concat32_i64(t64, lo, hi);
2762 } else {
2763 tcg_gen_concat32_i64(t64, hi, lo);
2764 }
2765 gen_address_mask(dc, addr);
2766 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2767 tcg_temp_free_i64(t64);
2768 }
2769 break;
2770
2771 default:
2772 /* ??? In theory we've handled all of the ASIs that are valid
2773 for stda, and this should raise DAE_invalid_asi. */
2774 {
2775 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2776 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2777 TCGv_i64 t64 = tcg_temp_new_i64();
2778
2779 /* See above. */
2780 if ((da.memop & MO_BSWAP) == MO_TE) {
2781 tcg_gen_concat32_i64(t64, lo, hi);
2782 } else {
2783 tcg_gen_concat32_i64(t64, hi, lo);
2784 }
2785
2786 save_state(dc);
2787 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2788 tcg_temp_free_i32(r_mop);
2789 tcg_temp_free_i32(r_asi);
2790 tcg_temp_free_i64(t64);
2791 }
2792 break;
2793 }
2794 }
2795
2796 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2797 int insn, int rd)
2798 {
2799 DisasASI da = get_asi(dc, insn, MO_TEQ);
2800 TCGv oldv;
2801
2802 switch (da.type) {
2803 case GET_ASI_EXCP:
2804 return;
2805 case GET_ASI_DIRECT:
2806 oldv = tcg_temp_new();
2807 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2808 da.mem_idx, da.memop);
2809 gen_store_gpr(dc, rd, oldv);
2810 tcg_temp_free(oldv);
2811 break;
2812 default:
2813 /* ??? Should be DAE_invalid_asi. */
2814 gen_exception(dc, TT_DATA_ACCESS);
2815 break;
2816 }
2817 }
2818
2819 #elif !defined(CONFIG_USER_ONLY)
2820 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2821 {
2822 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2823 whereby "rd + 1" elicits "error: array subscript is above array".
2824 Since we have already asserted that rd is even, the semantics
2825 are unchanged. */
2826 TCGv lo = gen_dest_gpr(dc, rd | 1);
2827 TCGv hi = gen_dest_gpr(dc, rd);
2828 TCGv_i64 t64 = tcg_temp_new_i64();
2829 DisasASI da = get_asi(dc, insn, MO_TEQ);
2830
2831 switch (da.type) {
2832 case GET_ASI_EXCP:
2833 tcg_temp_free_i64(t64);
2834 return;
2835 case GET_ASI_DIRECT:
2836 gen_address_mask(dc, addr);
2837 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
2838 break;
2839 default:
2840 {
2841 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2842 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2843
2844 save_state(dc);
2845 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2846 tcg_temp_free_i32(r_mop);
2847 tcg_temp_free_i32(r_asi);
2848 }
2849 break;
2850 }
2851
2852 tcg_gen_extr_i64_i32(lo, hi, t64);
2853 tcg_temp_free_i64(t64);
2854 gen_store_gpr(dc, rd | 1, lo);
2855 gen_store_gpr(dc, rd, hi);
2856 }
2857
2858 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2859 int insn, int rd)
2860 {
2861 DisasASI da = get_asi(dc, insn, MO_TEQ);
2862 TCGv lo = gen_load_gpr(dc, rd + 1);
2863 TCGv_i64 t64 = tcg_temp_new_i64();
2864
2865 tcg_gen_concat_tl_i64(t64, lo, hi);
2866
2867 switch (da.type) {
2868 case GET_ASI_EXCP:
2869 break;
2870 case GET_ASI_DIRECT:
2871 gen_address_mask(dc, addr);
2872 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2873 break;
2874 case GET_ASI_BFILL:
2875 /* Store 32 bytes of T64 to ADDR. */
2876 /* ??? The original qemu code suggests 8-byte alignment, dropping
2877 the low bits, but the only place I can see this used is in the
2878 Linux kernel with 32 byte alignment, which would make more sense
2879 as a cacheline-style operation. */
2880 {
2881 TCGv d_addr = tcg_temp_new();
2882 TCGv eight = tcg_const_tl(8);
2883 int i;
2884
2885 tcg_gen_andi_tl(d_addr, addr, -8);
2886 for (i = 0; i < 32; i += 8) {
2887 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2888 tcg_gen_add_tl(d_addr, d_addr, eight);
2889 }
2890
2891 tcg_temp_free(d_addr);
2892 tcg_temp_free(eight);
2893 }
2894 break;
2895 default:
2896 {
2897 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2898 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2899
2900 save_state(dc);
2901 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2902 tcg_temp_free_i32(r_mop);
2903 tcg_temp_free_i32(r_asi);
2904 }
2905 break;
2906 }
2907
2908 tcg_temp_free_i64(t64);
2909 }
2910 #endif
2911
2912 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2913 {
2914 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2915 return gen_load_gpr(dc, rs1);
2916 }
2917
2918 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2919 {
2920 if (IS_IMM) { /* immediate */
2921 target_long simm = GET_FIELDs(insn, 19, 31);
2922 TCGv t = get_temp_tl(dc);
2923 tcg_gen_movi_tl(t, simm);
2924 return t;
2925 } else { /* register */
2926 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2927 return gen_load_gpr(dc, rs2);
2928 }
2929 }
2930
2931 #ifdef TARGET_SPARC64
2932 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2933 {
2934 TCGv_i32 c32, zero, dst, s1, s2;
2935
2936 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2937 or fold the comparison down to 32 bits and use movcond_i32. Choose
2938 the later. */
2939 c32 = tcg_temp_new_i32();
2940 if (cmp->is_bool) {
2941 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2942 } else {
2943 TCGv_i64 c64 = tcg_temp_new_i64();
2944 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2945 tcg_gen_extrl_i64_i32(c32, c64);
2946 tcg_temp_free_i64(c64);
2947 }
2948
2949 s1 = gen_load_fpr_F(dc, rs);
2950 s2 = gen_load_fpr_F(dc, rd);
2951 dst = gen_dest_fpr_F(dc);
2952 zero = tcg_const_i32(0);
2953
2954 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2955
2956 tcg_temp_free_i32(c32);
2957 tcg_temp_free_i32(zero);
2958 gen_store_fpr_F(dc, rd, dst);
2959 }
2960
2961 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2962 {
2963 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2964 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2965 gen_load_fpr_D(dc, rs),
2966 gen_load_fpr_D(dc, rd));
2967 gen_store_fpr_D(dc, rd, dst);
2968 }
2969
2970 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2971 {
2972 int qd = QFPREG(rd);
2973 int qs = QFPREG(rs);
2974
2975 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2976 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2977 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2978 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2979
2980 gen_update_fprs_dirty(dc, qd);
2981 }
2982
2983 #ifndef CONFIG_USER_ONLY
2984 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2985 {
2986 TCGv_i32 r_tl = tcg_temp_new_i32();
2987
2988 /* load env->tl into r_tl */
2989 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2990
2991 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2992 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2993
2994 /* calculate offset to current trap state from env->ts, reuse r_tl */
2995 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2996 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2997
2998 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2999 {
3000 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
3001 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
3002 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
3003 tcg_temp_free_ptr(r_tl_tmp);
3004 }
3005
3006 tcg_temp_free_i32(r_tl);
3007 }
3008 #endif
3009
3010 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
3011 int width, bool cc, bool left)
3012 {
3013 TCGv lo1, lo2, t1, t2;
3014 uint64_t amask, tabl, tabr;
3015 int shift, imask, omask;
3016
3017 if (cc) {
3018 tcg_gen_mov_tl(cpu_cc_src, s1);
3019 tcg_gen_mov_tl(cpu_cc_src2, s2);
3020 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
3021 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3022 dc->cc_op = CC_OP_SUB;
3023 }
3024
3025 /* Theory of operation: there are two tables, left and right (not to
3026 be confused with the left and right versions of the opcode). These
3027 are indexed by the low 3 bits of the inputs. To make things "easy",
3028 these tables are loaded into two constants, TABL and TABR below.
3029 The operation index = (input & imask) << shift calculates the index
3030 into the constant, while val = (table >> index) & omask calculates
3031 the value we're looking for. */
3032 switch (width) {
3033 case 8:
3034 imask = 0x7;
3035 shift = 3;
3036 omask = 0xff;
3037 if (left) {
3038 tabl = 0x80c0e0f0f8fcfeffULL;
3039 tabr = 0xff7f3f1f0f070301ULL;
3040 } else {
3041 tabl = 0x0103070f1f3f7fffULL;
3042 tabr = 0xfffefcf8f0e0c080ULL;
3043 }
3044 break;
3045 case 16:
3046 imask = 0x6;
3047 shift = 1;
3048 omask = 0xf;
3049 if (left) {
3050 tabl = 0x8cef;
3051 tabr = 0xf731;
3052 } else {
3053 tabl = 0x137f;
3054 tabr = 0xfec8;
3055 }
3056 break;
3057 case 32:
3058 imask = 0x4;
3059 shift = 0;
3060 omask = 0x3;
3061 if (left) {
3062 tabl = (2 << 2) | 3;
3063 tabr = (3 << 2) | 1;
3064 } else {
3065 tabl = (1 << 2) | 3;
3066 tabr = (3 << 2) | 2;
3067 }
3068 break;
3069 default:
3070 abort();
3071 }
3072
3073 lo1 = tcg_temp_new();
3074 lo2 = tcg_temp_new();
3075 tcg_gen_andi_tl(lo1, s1, imask);
3076 tcg_gen_andi_tl(lo2, s2, imask);
3077 tcg_gen_shli_tl(lo1, lo1, shift);
3078 tcg_gen_shli_tl(lo2, lo2, shift);
3079
3080 t1 = tcg_const_tl(tabl);
3081 t2 = tcg_const_tl(tabr);
3082 tcg_gen_shr_tl(lo1, t1, lo1);
3083 tcg_gen_shr_tl(lo2, t2, lo2);
3084 tcg_gen_andi_tl(dst, lo1, omask);
3085 tcg_gen_andi_tl(lo2, lo2, omask);
3086
3087 amask = -8;
3088 if (AM_CHECK(dc)) {
3089 amask &= 0xffffffffULL;
3090 }
3091 tcg_gen_andi_tl(s1, s1, amask);
3092 tcg_gen_andi_tl(s2, s2, amask);
3093
3094 /* We want to compute
3095 dst = (s1 == s2 ? lo1 : lo1 & lo2).
3096 We've already done dst = lo1, so this reduces to
3097 dst &= (s1 == s2 ? -1 : lo2)
3098 Which we perform by
3099 lo2 |= -(s1 == s2)
3100 dst &= lo2
3101 */
3102 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
3103 tcg_gen_neg_tl(t1, t1);
3104 tcg_gen_or_tl(lo2, lo2, t1);
3105 tcg_gen_and_tl(dst, dst, lo2);
3106
3107 tcg_temp_free(lo1);
3108 tcg_temp_free(lo2);
3109 tcg_temp_free(t1);
3110 tcg_temp_free(t2);
3111 }
3112
3113 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
3114 {
3115 TCGv tmp = tcg_temp_new();
3116
3117 tcg_gen_add_tl(tmp, s1, s2);
3118 tcg_gen_andi_tl(dst, tmp, -8);
3119 if (left) {
3120 tcg_gen_neg_tl(tmp, tmp);
3121 }
3122 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3123
3124 tcg_temp_free(tmp);
3125 }
3126
3127 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
3128 {
3129 TCGv t1, t2, shift;
3130
3131 t1 = tcg_temp_new();
3132 t2 = tcg_temp_new();
3133 shift = tcg_temp_new();
3134
3135 tcg_gen_andi_tl(shift, gsr, 7);
3136 tcg_gen_shli_tl(shift, shift, 3);
3137 tcg_gen_shl_tl(t1, s1, shift);
3138
3139 /* A shift of 64 does not produce 0 in TCG. Divide this into a
3140 shift of (up to 63) followed by a constant shift of 1. */
3141 tcg_gen_xori_tl(shift, shift, 63);
3142 tcg_gen_shr_tl(t2, s2, shift);
3143 tcg_gen_shri_tl(t2, t2, 1);
3144
3145 tcg_gen_or_tl(dst, t1, t2);
3146
3147 tcg_temp_free(t1);
3148 tcg_temp_free(t2);
3149 tcg_temp_free(shift);
3150 }
3151 #endif
3152
3153 #define CHECK_IU_FEATURE(dc, FEATURE) \
3154 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3155 goto illegal_insn;
3156 #define CHECK_FPU_FEATURE(dc, FEATURE) \
3157 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3158 goto nfpu_insn;
3159
3160 /* before an instruction, dc->pc must be static */
3161 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
3162 {
3163 unsigned int opc, rs1, rs2, rd;
3164 TCGv cpu_src1, cpu_src2;
3165 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
3166 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
3167 target_long simm;
3168
3169 opc = GET_FIELD(insn, 0, 1);
3170 rd = GET_FIELD(insn, 2, 6);
3171
3172 switch (opc) {
3173 case 0: /* branches/sethi */
3174 {
3175 unsigned int xop = GET_FIELD(insn, 7, 9);
3176 int32_t target;
3177 switch (xop) {
3178 #ifdef TARGET_SPARC64
3179 case 0x1: /* V9 BPcc */
3180 {
3181 int cc;
3182
3183 target = GET_FIELD_SP(insn, 0, 18);
3184 target = sign_extend(target, 19);
3185 target <<= 2;
3186 cc = GET_FIELD_SP(insn, 20, 21);
3187 if (cc == 0)
3188 do_branch(dc, target, insn, 0);
3189 else if (cc == 2)
3190 do_branch(dc, target, insn, 1);
3191 else
3192 goto illegal_insn;
3193 goto jmp_insn;
3194 }
3195 case 0x3: /* V9 BPr */
3196 {
3197 target = GET_FIELD_SP(insn, 0, 13) |
3198 (GET_FIELD_SP(insn, 20, 21) << 14);
3199 target = sign_extend(target, 16);
3200 target <<= 2;
3201 cpu_src1 = get_src1(dc, insn);
3202 do_branch_reg(dc, target, insn, cpu_src1);
3203 goto jmp_insn;
3204 }
3205 case 0x5: /* V9 FBPcc */
3206 {
3207 int cc = GET_FIELD_SP(insn, 20, 21);
3208 if (gen_trap_ifnofpu(dc)) {
3209 goto jmp_insn;
3210 }
3211 target = GET_FIELD_SP(insn, 0, 18);
3212 target = sign_extend(target, 19);
3213 target <<= 2;
3214 do_fbranch(dc, target, insn, cc);
3215 goto jmp_insn;
3216 }
3217 #else
3218 case 0x7: /* CBN+x */
3219 {
3220 goto ncp_insn;
3221 }
3222 #endif
3223 case 0x2: /* BN+x */
3224 {
3225 target = GET_FIELD(insn, 10, 31);
3226 target = sign_extend(target, 22);
3227 target <<= 2;
3228 do_branch(dc, target, insn, 0);
3229 goto jmp_insn;
3230 }
3231 case 0x6: /* FBN+x */
3232 {
3233 if (gen_trap_ifnofpu(dc)) {
3234 goto jmp_insn;
3235 }
3236 target = GET_FIELD(insn, 10, 31);
3237 target = sign_extend(target, 22);
3238 target <<= 2;
3239 do_fbranch(dc, target, insn, 0);
3240 goto jmp_insn;
3241 }
3242 case 0x4: /* SETHI */
3243 /* Special-case %g0 because that's the canonical nop. */
3244 if (rd) {
3245 uint32_t value = GET_FIELD(insn, 10, 31);
3246 TCGv t = gen_dest_gpr(dc, rd);
3247 tcg_gen_movi_tl(t, value << 10);
3248 gen_store_gpr(dc, rd, t);
3249 }
3250 break;
3251 case 0x0: /* UNIMPL */
3252 default:
3253 goto illegal_insn;
3254 }
3255 break;
3256 }
3257 break;
3258 case 1: /*CALL*/
3259 {
3260 target_long target = GET_FIELDs(insn, 2, 31) << 2;
3261 TCGv o7 = gen_dest_gpr(dc, 15);
3262
3263 tcg_gen_movi_tl(o7, dc->pc);
3264 gen_store_gpr(dc, 15, o7);
3265 target += dc->pc;
3266 gen_mov_pc_npc(dc);
3267 #ifdef TARGET_SPARC64
3268 if (unlikely(AM_CHECK(dc))) {
3269 target &= 0xffffffffULL;
3270 }
3271 #endif
3272 dc->npc = target;
3273 }
3274 goto jmp_insn;
3275 case 2: /* FPU & Logical Operations */
3276 {
3277 unsigned int xop = GET_FIELD(insn, 7, 12);
3278 TCGv cpu_dst = get_temp_tl(dc);
3279 TCGv cpu_tmp0;
3280
3281 if (xop == 0x3a) { /* generate trap */
3282 int cond = GET_FIELD(insn, 3, 6);
3283 TCGv_i32 trap;
3284 TCGLabel *l1 = NULL;
3285 int mask;
3286
3287 if (cond == 0) {
3288 /* Trap never. */
3289 break;
3290 }
3291
3292 save_state(dc);
3293
3294 if (cond != 8) {
3295 /* Conditional trap. */
3296 DisasCompare cmp;
3297 #ifdef TARGET_SPARC64
3298 /* V9 icc/xcc */
3299 int cc = GET_FIELD_SP(insn, 11, 12);
3300 if (cc == 0) {
3301 gen_compare(&cmp, 0, cond, dc);
3302 } else if (cc == 2) {
3303 gen_compare(&cmp, 1, cond, dc);
3304 } else {
3305 goto illegal_insn;
3306 }
3307 #else
3308 gen_compare(&cmp, 0, cond, dc);
3309 #endif
3310 l1 = gen_new_label();
3311 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
3312 cmp.c1, cmp.c2, l1);
3313 free_compare(&cmp);
3314 }
3315
3316 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3317 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3318
3319 /* Don't use the normal temporaries, as they may well have
3320 gone out of scope with the branch above. While we're
3321 doing that we might as well pre-truncate to 32-bit. */
3322 trap = tcg_temp_new_i32();
3323
3324 rs1 = GET_FIELD_SP(insn, 14, 18);
3325 if (IS_IMM) {
3326 rs2 = GET_FIELD_SP(insn, 0, 7);
3327 if (rs1 == 0) {
3328 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
3329 /* Signal that the trap value is fully constant. */
3330 mask = 0;
3331 } else {
3332 TCGv t1 = gen_load_gpr(dc, rs1);
3333 tcg_gen_trunc_tl_i32(trap, t1);
3334 tcg_gen_addi_i32(trap, trap, rs2);
3335 }
3336 } else {
3337 TCGv t1, t2;
3338 rs2 = GET_FIELD_SP(insn, 0, 4);
3339 t1 = gen_load_gpr(dc, rs1);
3340 t2 = gen_load_gpr(dc, rs2);
3341 tcg_gen_add_tl(t1, t1, t2);
3342 tcg_gen_trunc_tl_i32(trap, t1);
3343 }
3344 if (mask != 0) {
3345 tcg_gen_andi_i32(trap, trap, mask);
3346 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3347 }
3348
3349 gen_helper_raise_exception(cpu_env, trap);
3350 tcg_temp_free_i32(trap);
3351
3352 if (cond == 8) {
3353 /* An unconditional trap ends the TB. */
3354 dc->base.is_jmp = DISAS_NORETURN;
3355 goto jmp_insn;
3356 } else {
3357 /* A conditional trap falls through to the next insn. */
3358 gen_set_label(l1);
3359 break;
3360 }
3361 } else if (xop == 0x28) {
3362 rs1 = GET_FIELD(insn, 13, 17);
3363 switch(rs1) {
3364 case 0: /* rdy */
3365 #ifndef TARGET_SPARC64
3366 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3367 manual, rdy on the microSPARC
3368 II */
3369 case 0x0f: /* stbar in the SPARCv8 manual,
3370 rdy on the microSPARC II */
3371 case 0x10 ... 0x1f: /* implementation-dependent in the
3372 SPARCv8 manual, rdy on the
3373 microSPARC II */
3374 /* Read Asr17 */
3375 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
3376 TCGv t = gen_dest_gpr(dc, rd);
3377 /* Read Asr17 for a Leon3 monoprocessor */
3378 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
3379 gen_store_gpr(dc, rd, t);
3380 break;
3381 }
3382 #endif
3383 gen_store_gpr(dc, rd, cpu_y);
3384 break;
3385 #ifdef TARGET_SPARC64
3386 case 0x2: /* V9 rdccr */
3387 update_psr(dc);
3388 gen_helper_rdccr(cpu_dst, cpu_env);
3389 gen_store_gpr(dc, rd, cpu_dst);
3390 break;
3391 case 0x3: /* V9 rdasi */
3392 tcg_gen_movi_tl(cpu_dst, dc->asi);
3393 gen_store_gpr(dc, rd, cpu_dst);
3394 break;
3395 case 0x4: /* V9 rdtick */
3396 {
3397 TCGv_ptr r_tickptr;
3398 TCGv_i32 r_const;
3399
3400 r_tickptr = tcg_temp_new_ptr();
3401 r_const = tcg_const_i32(dc->mem_idx);
3402 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3403 offsetof(CPUSPARCState, tick));
3404 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
3405 gen_io_start();
3406 }
3407 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3408 r_const);
3409 tcg_temp_free_ptr(r_tickptr);
3410 tcg_temp_free_i32(r_const);
3411 gen_store_gpr(dc, rd, cpu_dst);
3412 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
3413 gen_io_end();
3414 }
3415 }
3416 break;
3417 case 0x5: /* V9 rdpc */
3418 {
3419 TCGv t = gen_dest_gpr(dc, rd);
3420 if (unlikely(AM_CHECK(dc))) {
3421 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
3422 } else {
3423 tcg_gen_movi_tl(t, dc->pc);
3424 }
3425 gen_store_gpr(dc, rd, t);
3426 }
3427 break;
3428 case 0x6: /* V9 rdfprs */
3429 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
3430 gen_store_gpr(dc, rd, cpu_dst);
3431 break;
3432 case 0xf: /* V9 membar */
3433 break; /* no effect */
3434 case 0x13: /* Graphics Status */
3435 if (gen_trap_ifnofpu(dc)) {
3436 goto jmp_insn;
3437 }
3438 gen_store_gpr(dc, rd, cpu_gsr);
3439 break;
3440 case 0x16: /* Softint */
3441 tcg_gen_ld32s_tl(cpu_dst, cpu_env,
3442 offsetof(CPUSPARCState, softint));
3443 gen_store_gpr(dc, rd, cpu_dst);
3444 break;
3445 case 0x17: /* Tick compare */
3446 gen_store_gpr(dc, rd, cpu_tick_cmpr);
3447 break;
3448 case 0x18: /* System tick */
3449 {
3450 TCGv_ptr r_tickptr;
3451 TCGv_i32 r_const;
3452
3453 r_tickptr = tcg_temp_new_ptr();
3454 r_const = tcg_const_i32(dc->mem_idx);
3455 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3456 offsetof(CPUSPARCState, stick));
3457 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
3458 gen_io_start();
3459 }
3460 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3461 r_const);
3462 tcg_temp_free_ptr(r_tickptr);
3463 tcg_temp_free_i32(r_const);
3464 gen_store_gpr(dc, rd, cpu_dst);
3465 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
3466 gen_io_end();
3467 }
3468 }
3469 break;
3470 case 0x19: /* System tick compare */
3471 gen_store_gpr(dc, rd, cpu_stick_cmpr);
3472 break;
3473 case 0x1a: /* UltraSPARC-T1 Strand status */
3474 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3475 * this ASR as impl. dep
3476 */
3477 CHECK_IU_FEATURE(dc, HYPV);
3478 {
3479 TCGv t = gen_dest_gpr(dc, rd);
3480 tcg_gen_movi_tl(t, 1UL);
3481 gen_store_gpr(dc, rd, t);
3482 }
3483 break;
3484 case 0x10: /* Performance Control */
3485 case 0x11: /* Performance Instrumentation Counter */
3486 case 0x12: /* Dispatch Control */
3487 case 0x14: /* Softint set, WO */
3488 case 0x15: /* Softint clear, WO */
3489 #endif
3490 default:
3491 goto illegal_insn;
3492 }
3493 #if !defined(CONFIG_USER_ONLY)
3494 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
3495 #ifndef TARGET_SPARC64
3496 if (!supervisor(dc)) {
3497 goto priv_insn;
3498 }
3499 update_psr(dc);
3500 gen_helper_rdpsr(cpu_dst, cpu_env);
3501 #else
3502 CHECK_IU_FEATURE(dc, HYPV);
3503 if (!hypervisor(dc))
3504 goto priv_insn;
3505 rs1 = GET_FIELD(insn, 13, 17);
3506 switch (rs1) {
3507 case 0: // hpstate
3508 tcg_gen_ld_i64(cpu_dst, cpu_env,
3509 offsetof(CPUSPARCState, hpstate));
3510 break;
3511 case 1: // htstate
3512 // gen_op_rdhtstate();
3513 break;
3514 case 3: // hintp
3515 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
3516 break;
3517 case 5: // htba
3518 tcg_gen_mov_tl(cpu_dst, cpu_htba);
3519 break;
3520 case 6: // hver
3521 tcg_gen_mov_tl(cpu_dst, cpu_hver);
3522 break;
3523 case 31: // hstick_cmpr
3524 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
3525 break;
3526 default:
3527 goto illegal_insn;
3528 }
3529 #endif
3530 gen_store_gpr(dc, rd, cpu_dst);
3531 break;
3532 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
3533 if (!supervisor(dc)) {
3534 goto priv_insn;
3535 }
3536 cpu_tmp0 = get_temp_tl(dc);
3537 #ifdef TARGET_SPARC64
3538 rs1 = GET_FIELD(insn, 13, 17);
3539 switch (rs1) {
3540 case 0: // tpc
3541 {
3542 TCGv_ptr r_tsptr;
3543
3544 r_tsptr = tcg_temp_new_ptr();
3545 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3546 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3547 offsetof(trap_state, tpc));
3548 tcg_temp_free_ptr(r_tsptr);
3549 }
3550 break;
3551 case 1: // tnpc
3552 {
3553 TCGv_ptr r_tsptr;
3554
3555 r_tsptr = tcg_temp_new_ptr();
3556 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3557 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3558 offsetof(trap_state, tnpc));
3559 tcg_temp_free_ptr(r_tsptr);
3560 }
3561 break;
3562 case 2: // tstate
3563 {
3564 TCGv_ptr r_tsptr;
3565
3566 r_tsptr = tcg_temp_new_ptr();
3567 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3568 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3569 offsetof(trap_state, tstate));
3570 tcg_temp_free_ptr(r_tsptr);
3571 }
3572 break;
3573 case 3: // tt
3574 {
3575 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3576
3577 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3578 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
3579 offsetof(trap_state, tt));
3580 tcg_temp_free_ptr(r_tsptr);
3581 }
3582 break;
3583 case 4: // tick
3584 {
3585 TCGv_ptr r_tickptr;
3586 TCGv_i32 r_const;
3587
3588 r_tickptr = tcg_temp_new_ptr();
3589 r_const = tcg_const_i32(dc->mem_idx);
3590 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3591 offsetof(CPUSPARCState, tick));
3592 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
3593 gen_io_start();
3594 }
3595 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
3596 r_tickptr, r_const);
3597 tcg_temp_free_ptr(r_tickptr);
3598 tcg_temp_free_i32(r_const);
3599 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
3600 gen_io_end();
3601 }
3602 }
3603 break;
3604 case 5: // tba
3605 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
3606 break;
3607 case 6: // pstate
3608 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3609 offsetof(CPUSPARCState, pstate));
3610 break;
3611 case 7: // tl
3612 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3613 offsetof(CPUSPARCState, tl));
3614 break;
3615 case 8: // pil
3616 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3617 offsetof(CPUSPARCState, psrpil));
3618 break;
3619 case 9: // cwp
3620 gen_helper_rdcwp(cpu_tmp0, cpu_env);
3621 break;
3622 case 10: // cansave
3623 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3624 offsetof(CPUSPARCState, cansave));
3625 break;
3626 case 11: // canrestore
3627 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3628 offsetof(CPUSPARCState, canrestore));
3629 break;
3630 case 12: // cleanwin
3631 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3632 offsetof(CPUSPARCState, cleanwin));
3633 break;
3634 case 13: // otherwin
3635 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3636 offsetof(CPUSPARCState, otherwin));
3637 break;
3638 case 14: // wstate
3639 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3640 offsetof(CPUSPARCState, wstate));
3641 break;
3642 case 16: // UA2005 gl
3643 CHECK_IU_FEATURE(dc, GL);
3644 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3645 offsetof(CPUSPARCState, gl));
3646 break;
3647 case 26: // UA2005 strand status
3648 CHECK_IU_FEATURE(dc, HYPV);
3649 if (!hypervisor(dc))
3650 goto priv_insn;
3651 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
3652 break;
3653 case 31: // ver
3654 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
3655 break;
3656 case 15: // fq
3657 default:
3658 goto illegal_insn;
3659 }
3660 #else
3661 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
3662 #endif
3663 gen_store_gpr(dc, rd, cpu_tmp0);
3664 break;
3665 #endif
3666 #if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
3667 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3668 #ifdef TARGET_SPARC64
3669 gen_helper_flushw(cpu_env);
3670 #else
3671 if (!supervisor(dc))
3672 goto priv_insn;
3673 gen_store_gpr(dc, rd, cpu_tbr);
3674 #endif
3675 break;
3676 #endif
3677 } else if (xop == 0x34) { /* FPU Operations */
3678 if (gen_trap_ifnofpu(dc)) {
3679 goto jmp_insn;
3680 }
3681 gen_op_clear_ieee_excp_and_FTT();
3682 rs1 = GET_FIELD(insn, 13, 17);
3683 rs2 = GET_FIELD(insn, 27, 31);
3684 xop = GET_FIELD(insn, 18, 26);
3685
3686 switch (xop) {
3687 case 0x1: /* fmovs */
3688 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3689 gen_store_fpr_F(dc, rd, cpu_src1_32);
3690 break;
3691 case 0x5: /* fnegs */
3692 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3693 break;
3694 case 0x9: /* fabss */
3695 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3696 break;
3697 case 0x29: /* fsqrts */
3698 CHECK_FPU_FEATURE(dc, FSQRT);
3699 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3700 break;
3701 case 0x2a: /* fsqrtd */
3702 CHECK_FPU_FEATURE(dc, FSQRT);
3703 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3704 break;
3705 case 0x2b: /* fsqrtq */
3706 CHECK_FPU_FEATURE(dc, FLOAT128);
3707 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3708 break;
3709 case 0x41: /* fadds */
3710 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3711 break;
3712 case 0x42: /* faddd */
3713 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3714 break;
3715 case 0x43: /* faddq */
3716 CHECK_FPU_FEATURE(dc, FLOAT128);
3717 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3718 break;
3719 case 0x45: /* fsubs */
3720 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3721 break;
3722 case 0x46: /* fsubd */
3723 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3724 break;
3725 case 0x47: /* fsubq */
3726 CHECK_FPU_FEATURE(dc, FLOAT128);
3727 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3728 break;
3729 case 0x49: /* fmuls */
3730 CHECK_FPU_FEATURE(dc, FMUL);
3731 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3732 break;
3733 case 0x4a: /* fmuld */
3734 CHECK_FPU_FEATURE(dc, FMUL);
3735 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3736 break;
3737 case 0x4b: /* fmulq */
3738 CHECK_FPU_FEATURE(dc, FLOAT128);
3739 CHECK_FPU_FEATURE(dc, FMUL);
3740 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3741 break;
3742 case 0x4d: /* fdivs */
3743 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3744 break;
3745 case 0x4e: /* fdivd */
3746 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3747 break;
3748 case 0x4f: /* fdivq */
3749 CHECK_FPU_FEATURE(dc, FLOAT128);
3750 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3751 break;
3752 case 0x69: /* fsmuld */
3753 CHECK_FPU_FEATURE(dc, FSMULD);
3754 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3755 break;
3756 case 0x6e: /* fdmulq */
3757 CHECK_FPU_FEATURE(dc, FLOAT128);
3758 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3759 break;
3760 case 0xc4: /* fitos */
3761 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3762 break;
3763 case 0xc6: /* fdtos */
3764 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3765 break;
3766 case 0xc7: /* fqtos */
3767 CHECK_FPU_FEATURE(dc, FLOAT128);
3768 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3769 break;
3770 case 0xc8: /* fitod */
3771 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3772 break;
3773 case 0xc9: /* fstod */
3774 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3775 break;
3776 case 0xcb: /* fqtod */
3777 CHECK_FPU_FEATURE(dc, FLOAT128);
3778 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3779 break;
3780 case 0xcc: /* fitoq */
3781 CHECK_FPU_FEATURE(dc, FLOAT128);
3782 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3783 break;
3784 case 0xcd: /* fstoq */
3785 CHECK_FPU_FEATURE(dc, FLOAT128);
3786 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3787 break;
3788 case 0xce: /* fdtoq */
3789 CHECK_FPU_FEATURE(dc, FLOAT128);
3790 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3791 break;
3792 case 0xd1: /* fstoi */
3793 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3794 break;
3795 case 0xd2: /* fdtoi */
3796 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3797 break;
3798 case 0xd3: /* fqtoi */
3799 CHECK_FPU_FEATURE(dc, FLOAT128);
3800 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3801 break;
3802 #ifdef TARGET_SPARC64
3803 case 0x2: /* V9 fmovd */
3804 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3805 gen_store_fpr_D(dc, rd, cpu_src1_64);
3806 break;
3807 case 0x3: /* V9 fmovq */
3808 CHECK_FPU_FEATURE(dc, FLOAT128);
3809 gen_move_Q(dc, rd, rs2);
3810 break;
3811 case 0x6: /* V9 fnegd */
3812 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3813 break;
3814 case 0x7: /* V9 fnegq */
3815 CHECK_FPU_FEATURE(dc, FLOAT128);
3816 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3817 break;
3818 case 0xa: /* V9 fabsd */
3819 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3820 break;
3821 case 0xb: /* V9 fabsq */
3822 CHECK_FPU_FEATURE(dc, FLOAT128);
3823 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3824 break;
3825 case 0x81: /* V9 fstox */
3826 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3827 break;
3828 case 0x82: /* V9 fdtox */
3829 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3830 break;
3831 case 0x83: /* V9 fqtox */
3832 CHECK_FPU_FEATURE(dc, FLOAT128);
3833 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3834 break;
3835 case 0x84: /* V9 fxtos */
3836 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3837 break;
3838 case 0x88: /* V9 fxtod */
3839 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3840 break;
3841 case 0x8c: /* V9 fxtoq */
3842 CHECK_FPU_FEATURE(dc, FLOAT128);
3843 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3844 break;
3845 #endif
3846 default:
3847 goto illegal_insn;
3848 }
3849 } else if (xop == 0x35) { /* FPU Operations */
3850 #ifdef TARGET_SPARC64
3851 int cond;
3852 #endif
3853 if (gen_trap_ifnofpu(dc)) {
3854 goto jmp_insn;
3855 }
3856 gen_op_clear_ieee_excp_and_FTT();
3857 rs1 = GET_FIELD(insn, 13, 17);
3858 rs2 = GET_FIELD(insn, 27, 31);
3859 xop = GET_FIELD(insn, 18, 26);
3860
3861 #ifdef TARGET_SPARC64
3862 #define FMOVR(sz) \
3863 do { \
3864 DisasCompare cmp; \
3865 cond = GET_FIELD_SP(insn, 10, 12); \
3866 cpu_src1 = get_src1(dc, insn); \
3867 gen_compare_reg(&cmp, cond, cpu_src1); \
3868 gen_fmov##sz(dc, &cmp, rd, rs2); \
3869 free_compare(&cmp); \
3870 } while (0)
3871
3872 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3873 FMOVR(s);
3874 break;
3875 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3876 FMOVR(d);
3877 break;
3878 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3879 CHECK_FPU_FEATURE(dc, FLOAT128);
3880 FMOVR(q);
3881 break;
3882 }
3883 #undef FMOVR
3884 #endif
3885 switch (xop) {
3886 #ifdef TARGET_SPARC64
3887 #define FMOVCC(fcc, sz) \
3888 do { \
3889 DisasCompare cmp; \
3890 cond = GET_FIELD_SP(insn, 14, 17); \
3891 gen_fcompare(&cmp, fcc, cond); \
3892 gen_fmov##sz(dc, &cmp, rd, rs2); \
3893 free_compare(&cmp); \
3894 } while (0)
3895
3896 case 0x001: /* V9 fmovscc %fcc0 */
3897 FMOVCC(0, s);
3898 break;
3899 case 0x002: /* V9 fmovdcc %fcc0 */
3900 FMOVCC(0, d);
3901 break;
3902 case 0x003: /* V9 fmovqcc %fcc0 */
3903 CHECK_FPU_FEATURE(dc, FLOAT128);
3904 FMOVCC(0, q);
3905 break;
3906 case 0x041: /* V9 fmovscc %fcc1 */
3907 FMOVCC(1, s);
3908 break;
3909 case 0x042: /* V9 fmovdcc %fcc1 */
3910 FMOVCC(1, d);
3911 break;
3912 case 0x043: /* V9 fmovqcc %fcc1 */
3913 CHECK_FPU_FEATURE(dc, FLOAT128);
3914 FMOVCC(1, q);
3915 break;
3916 case 0x081: /* V9 fmovscc %fcc2 */
3917 FMOVCC(2, s);
3918 break;
3919 case 0x082: /* V9 fmovdcc %fcc2 */
3920 FMOVCC(2, d);
3921 break;
3922 case 0x083: /* V9 fmovqcc %fcc2 */
3923 CHECK_FPU_FEATURE(dc, FLOAT128);
3924 FMOVCC(2, q);
3925 break;
3926 case 0x0c1: /* V9 fmovscc %fcc3 */
3927 FMOVCC(3, s);
3928 break;
3929 case 0x0c2: /* V9 fmovdcc %fcc3 */
3930 FMOVCC(3, d);
3931 break;
3932 case 0x0c3: /* V9 fmovqcc %fcc3 */
3933 CHECK_FPU_FEATURE(dc, FLOAT128);
3934 FMOVCC(3, q);
3935 break;
3936 #undef FMOVCC
3937 #define FMOVCC(xcc, sz) \
3938 do { \
3939 DisasCompare cmp; \
3940 cond = GET_FIELD_SP(insn, 14, 17); \
3941 gen_compare(&cmp, xcc, cond, dc); \
3942 gen_fmov##sz(dc, &cmp, rd, rs2); \
3943 free_compare(&cmp); \
3944 } while (0)
3945
3946 case 0x101: /* V9 fmovscc %icc */
3947 FMOVCC(0, s);
3948 break;
3949 case 0x102: /* V9 fmovdcc %icc */
3950 FMOVCC(0, d);
3951 break;
3952 case 0x103: /* V9 fmovqcc %icc */
3953 CHECK_FPU_FEATURE(dc, FLOAT128);
3954 FMOVCC(0, q);
3955 break;
3956 case 0x181: /* V9 fmovscc %xcc */
3957 FMOVCC(1, s);
3958 break;
3959 case 0x182: /* V9 fmovdcc %xcc */
3960 FMOVCC(1, d);
3961 break;
3962 case 0x183: /* V9 fmovqcc %xcc */
3963 CHECK_FPU_FEATURE(dc, FLOAT128);
3964 FMOVCC(1, q);
3965 break;
3966 #undef FMOVCC
3967 #endif
3968 case 0x51: /* fcmps, V9 %fcc */
3969 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3970 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3971 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3972 break;
3973 case 0x52: /* fcmpd, V9 %fcc */
3974 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3975 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3976 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3977 break;
3978 case 0x53: /* fcmpq, V9 %fcc */
3979 CHECK_FPU_FEATURE(dc, FLOAT128);
3980 gen_op_load_fpr_QT0(QFPREG(rs1));
3981 gen_op_load_fpr_QT1(QFPREG(rs2));
3982 gen_op_fcmpq(rd & 3);
3983 break;
3984 case 0x55: /* fcmpes, V9 %fcc */
3985 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3986 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3987 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3988 break;
3989 case 0x56: /* fcmped, V9 %fcc */
3990 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3991 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3992 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3993 break;
3994 case 0x57: /* fcmpeq, V9 %fcc */
3995 CHECK_FPU_FEATURE(dc, FLOAT128);
3996 gen_op_load_fpr_QT0(QFPREG(rs1));
3997 gen_op_load_fpr_QT1(QFPREG(rs2));
3998 gen_op_fcmpeq(rd & 3);
3999 break;
4000 default:
4001 goto illegal_insn;
4002 }
4003 } else if (xop == 0x2) {
4004 TCGv dst = gen_dest_gpr(dc, rd);
4005 rs1 = GET_FIELD(insn, 13, 17);
4006 if (rs1 == 0) {
4007 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
4008 if (IS_IMM) { /* immediate */
4009 simm = GET_FIELDs(insn, 19, 31);
4010 tcg_gen_movi_tl(dst, simm);
4011 gen_store_gpr(dc, rd, dst);
4012 } else { /* register */
4013 rs2 = GET_FIELD(insn, 27, 31);
4014 if (rs2 == 0) {
4015 tcg_gen_movi_tl(dst, 0);
4016 gen_store_gpr(dc, rd, dst);
4017 } else {
4018 cpu_src2 = gen_load_gpr(dc, rs2);
4019 gen_store_gpr(dc, rd, cpu_src2);
4020 }
4021 }
4022 } else {
4023 cpu_src1 = get_src1(dc, insn);
4024 if (IS_IMM) { /* immediate */
4025 simm = GET_FIELDs(insn, 19, 31);
4026 tcg_gen_ori_tl(dst, cpu_src1, simm);
4027 gen_store_gpr(dc, rd, dst);
4028 } else { /* register */
4029 rs2 = GET_FIELD(insn, 27, 31);
4030 if (rs2 == 0) {
4031 /* mov shortcut: or x, %g0, y -> mov x, y */
4032 gen_store_gpr(dc, rd, cpu_src1);
4033 } else {
4034 cpu_src2 = gen_load_gpr(dc, rs2);
4035 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
4036 gen_store_gpr(dc, rd, dst);
4037 }
4038 }
4039 }
4040 #ifdef TARGET_SPARC64
4041 } else if (xop == 0x25) { /* sll, V9 sllx */
4042 cpu_src1 = get_src1(dc, insn);
4043 if (IS_IMM) { /* immediate */
4044 simm = GET_FIELDs(insn, 20, 31);
4045 if (insn & (1 << 12)) {
4046 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
4047 } else {
4048 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
4049 }
4050 } else { /* register */
4051 rs2 = GET_FIELD(insn, 27, 31);
4052 cpu_src2 = gen_load_gpr(dc, rs2);
4053 cpu_tmp0 = get_temp_tl(dc);
4054 if (insn & (1 << 12)) {
4055 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4056 } else {
4057 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4058 }
4059 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
4060 }
4061 gen_store_gpr(dc, rd, cpu_dst);
4062 } else if (xop == 0x26) { /* srl, V9 srlx */
4063 cpu_src1 = get_src1(dc, insn);
4064 if (IS_IMM) { /* immediate */
4065 simm = GET_FIELDs(insn, 20, 31);
4066 if (insn & (1 << 12)) {
4067 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
4068 } else {
4069 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4070 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
4071 }
4072 } else { /* register */
4073 rs2 = GET_FIELD(insn, 27, 31);
4074 cpu_src2 = gen_load_gpr(dc, rs2);
4075 cpu_tmp0 = get_temp_tl(dc);
4076 if (insn & (1 << 12)) {
4077 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4078 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
4079 } else {
4080 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4081 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4082 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
4083 }
4084 }
4085 gen_store_gpr(dc, rd, cpu_dst);
4086 } else if (xop == 0x27) { /* sra, V9 srax */
4087 cpu_src1 = get_src1(dc, insn);
4088 if (IS_IMM) { /* immediate */
4089 simm = GET_FIELDs(insn, 20, 31);
4090 if (insn & (1 << 12)) {
4091 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
4092 } else {
4093 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4094 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
4095 }
4096 } else { /* register */
4097 rs2 = GET_FIELD(insn, 27, 31);
4098 cpu_src2 = gen_load_gpr(dc, rs2);
4099 cpu_tmp0 = get_temp_tl(dc);
4100 if (insn & (1 << 12)) {
4101 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4102 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
4103 } else {
4104 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4105 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4106 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
4107 }
4108 }
4109 gen_store_gpr(dc, rd, cpu_dst);
4110 #endif
4111 } else if (xop < 0x36) {
4112 if (xop < 0x20) {
4113 cpu_src1 = get_src1(dc, insn);
4114 cpu_src2 = get_src2(dc, insn);
4115 switch (xop & ~0x10) {
4116 case 0x0: /* add */
4117 if (xop & 0x10) {
4118 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4119 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4120 dc->cc_op = CC_OP_ADD;
4121 } else {
4122 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4123 }
4124 break;
4125 case 0x1: /* and */
4126 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
4127 if (xop & 0x10) {
4128 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4129 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4130 dc->cc_op = CC_OP_LOGIC;
4131 }
4132 break;
4133 case 0x2: /* or */
4134 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
4135 if (xop & 0x10) {
4136 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4137 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4138 dc->cc_op = CC_OP_LOGIC;
4139 }
4140 break;
4141 case 0x3: /* xor */
4142 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
4143 if (xop & 0x10) {
4144 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4145 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4146 dc->cc_op = CC_OP_LOGIC;
4147 }
4148 break;
4149 case 0x4: /* sub */
4150 if (xop & 0x10) {
4151 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4152 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
4153 dc->cc_op = CC_OP_SUB;
4154 } else {
4155 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
4156 }
4157 break;
4158 case 0x5: /* andn */
4159 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
4160 if (xop & 0x10) {
4161 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4162 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4163 dc->cc_op = CC_OP_LOGIC;
4164 }
4165 break;
4166 case 0x6: /* orn */
4167 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
4168 if (xop & 0x10) {
4169 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4170 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4171 dc->cc_op = CC_OP_LOGIC;
4172 }
4173 break;
4174 case 0x7: /* xorn */
4175 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
4176 if (xop & 0x10) {
4177 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4178 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4179 dc->cc_op = CC_OP_LOGIC;
4180 }
4181 break;
4182 case 0x8: /* addx, V9 addc */
4183 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4184 (xop & 0x10));
4185 break;
4186 #ifdef TARGET_SPARC64
4187 case 0x9: /* V9 mulx */
4188 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
4189 break;
4190 #endif
4191 case 0xa: /* umul */
4192 CHECK_IU_FEATURE(dc, MUL);
4193 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
4194 if (xop & 0x10) {
4195 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4196 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4197 dc->cc_op = CC_OP_LOGIC;
4198 }
4199 break;
4200 case 0xb: /* smul */
4201 CHECK_IU_FEATURE(dc, MUL);
4202 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
4203 if (xop & 0x10) {
4204 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4205 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4206 dc->cc_op = CC_OP_LOGIC;
4207 }
4208 break;
4209 case 0xc: /* subx, V9 subc */
4210 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4211 (xop & 0x10));
4212 break;
4213 #ifdef TARGET_SPARC64
4214 case 0xd: /* V9 udivx */
4215 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4216 break;
4217 #endif
4218 case 0xe: /* udiv */
4219 CHECK_IU_FEATURE(dc, DIV);
4220 if (xop & 0x10) {
4221 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
4222 cpu_src2);
4223 dc->cc_op = CC_OP_DIV;
4224 } else {
4225 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
4226 cpu_src2);
4227 }
4228 break;
4229 case 0xf: /* sdiv */
4230 CHECK_IU_FEATURE(dc, DIV);
4231 if (xop & 0x10) {
4232 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
4233 cpu_src2);
4234 dc->cc_op = CC_OP_DIV;
4235 } else {
4236 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
4237 cpu_src2);
4238 }
4239 break;
4240 default:
4241 goto illegal_insn;
4242 }
4243 gen_store_gpr(dc, rd, cpu_dst);
4244 } else {
4245 cpu_src1 = get_src1(dc, insn);
4246 cpu_src2 = get_src2(dc, insn);
4247 switch (xop) {
4248 case 0x20: /* taddcc */
4249 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4250 gen_store_gpr(dc, rd, cpu_dst);
4251 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4252 dc->cc_op = CC_OP_TADD;
4253 break;
4254 case 0x21: /* tsubcc */
4255 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4256 gen_store_gpr(dc, rd, cpu_dst);
4257 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4258 dc->cc_op = CC_OP_TSUB;
4259 break;
4260 case 0x22: /* taddcctv */
4261 gen_helper_taddcctv(cpu_dst, cpu_env,
4262 cpu_src1, cpu_src2);
4263 gen_store_gpr(dc, rd, cpu_dst);
4264 dc->cc_op = CC_OP_TADDTV;
4265 break;
4266 case 0x23: /* tsubcctv */
4267 gen_helper_tsubcctv(cpu_dst, cpu_env,
4268 cpu_src1, cpu_src2);
4269 gen_store_gpr(dc, rd, cpu_dst);
4270 dc->cc_op = CC_OP_TSUBTV;
4271 break;
4272 case 0x24: /* mulscc */
4273 update_psr(dc);
4274 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4275 gen_store_gpr(dc, rd, cpu_dst);
4276 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4277 dc->cc_op = CC_OP_ADD;
4278 break;
4279 #ifndef TARGET_SPARC64
4280 case 0x25: /* sll */
4281 if (IS_IMM) { /* immediate */
4282 simm = GET_FIELDs(insn, 20, 31);
4283 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4284 } else { /* register */
4285 cpu_tmp0 = get_temp_tl(dc);
4286 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4287 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4288 }
4289 gen_store_gpr(dc, rd, cpu_dst);
4290 break;
4291 case 0x26: /* srl */
4292 if (IS_IMM) { /* immediate */
4293 simm = GET_FIELDs(insn, 20, 31);
4294 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4295 } else { /* register */
4296 cpu_tmp0 = get_temp_tl(dc);
4297 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4298 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4299 }
4300 gen_store_gpr(dc, rd, cpu_dst);
4301 break;
4302 case 0x27: /* sra */
4303 if (IS_IMM) { /* immediate */
4304 simm = GET_FIELDs(insn, 20, 31);
4305 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4306 } else { /* register */
4307 cpu_tmp0 = get_temp_tl(dc);
4308 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4309 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4310 }
4311 gen_store_gpr(dc, rd, cpu_dst);
4312 break;
4313 #endif
4314 case 0x30:
4315 {
4316 cpu_tmp0 = get_temp_tl(dc);
4317 switch(rd) {
4318 case 0: /* wry */
4319 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4320 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
4321 break;
4322 #ifndef TARGET_SPARC64
4323 case 0x01 ... 0x0f: /* undefined in the
4324 SPARCv8 manual, nop
4325 on the microSPARC
4326 II */
4327 case 0x10 ... 0x1f: /* implementation-dependent
4328 in the SPARCv8
4329 manual, nop on the
4330 microSPARC II */
4331 if ((rd == 0x13) && (dc->def->features &
4332 CPU_FEATURE_POWERDOWN)) {
4333 /* LEON3 power-down */
4334 save_state(dc);
4335 gen_helper_power_down(cpu_env);
4336 }
4337 break;
4338 #else
4339 case 0x2: /* V9 wrccr */
4340 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4341 gen_helper_wrccr(cpu_env, cpu_tmp0);
4342 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4343 dc->cc_op = CC_OP_FLAGS;
4344 break;
4345 case 0x3: /* V9 wrasi */
4346 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4347 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
4348 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4349 offsetof(CPUSPARCState, asi));
4350 /* End TB to notice changed ASI. */
4351 save_state(dc);
4352 gen_op_next_insn();
4353 tcg_gen_exit_tb(NULL, 0);
4354 dc->base.is_jmp = DISAS_NORETURN;
4355 break;
4356 case 0x6: /* V9 wrfprs */
4357 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4358 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
4359 dc->fprs_dirty = 0;
4360 save_state(dc);
4361 gen_op_next_insn();
4362 tcg_gen_exit_tb(NULL, 0);
4363 dc->base.is_jmp = DISAS_NORETURN;
4364 break;
4365 case 0xf: /* V9 sir, nop if user */
4366 #if !defined(CONFIG_USER_ONLY)
4367 if (supervisor(dc)) {
4368 ; // XXX
4369 }
4370 #endif
4371 break;
4372 case 0x13: /* Graphics Status */
4373 if (gen_trap_ifnofpu(dc)) {
4374 goto jmp_insn;
4375 }
4376 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
4377 break;
4378 case 0x14: /* Softint set */
4379 if (!supervisor(dc))
4380 goto illegal_insn;
4381 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4382 gen_helper_set_softint(cpu_env, cpu_tmp0);
4383 break;
4384 case 0x15: /* Softint clear */
4385 if (!supervisor(dc))
4386 goto illegal_insn;
4387 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4388 gen_helper_clear_softint(cpu_env, cpu_tmp0);
4389 break;
4390 case 0x16: /* Softint write */
4391 if (!supervisor(dc))
4392 goto illegal_insn;
4393 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4394 gen_helper_write_softint(cpu_env, cpu_tmp0);
4395 break;
4396 case 0x17: /* Tick compare */
4397 #if !defined(CONFIG_USER_ONLY)
4398 if (!supervisor(dc))
4399 goto illegal_insn;
4400 #endif
4401 {
4402 TCGv_ptr r_tickptr;
4403
4404 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
4405 cpu_src2);
4406 r_tickptr = tcg_temp_new_ptr();
4407 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4408 offsetof(CPUSPARCState, tick));
4409 if (tb_cflags(dc->base.tb) &
4410 CF_USE_ICOUNT) {
4411 gen_io_start();
4412 }
4413 gen_helper_tick_set_limit(r_tickptr,
4414 cpu_tick_cmpr);
4415 tcg_temp_free_ptr(r_tickptr);
4416 /* End TB to handle timer interrupt */
4417 dc->base.is_jmp = DISAS_EXIT;
4418 }
4419 break;
4420 case 0x18: /* System tick */
4421 #if !defined(CONFIG_USER_ONLY)
4422 if (!supervisor(dc))
4423 goto illegal_insn;
4424 #endif
4425 {
4426 TCGv_ptr r_tickptr;
4427
4428 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
4429 cpu_src2);
4430 r_tickptr = tcg_temp_new_ptr();
4431 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4432 offsetof(CPUSPARCState, stick));
4433 if (tb_cflags(dc->base.tb) &
4434 CF_USE_ICOUNT) {
4435 gen_io_start();
4436 }
4437 gen_helper_tick_set_count(r_tickptr,
4438 cpu_tmp0);
4439 tcg_temp_free_ptr(r_tickptr);
4440 /* End TB to handle timer interrupt */
4441 dc->base.is_jmp = DISAS_EXIT;
4442 }
4443 break;
4444 case 0x19: /* System tick compare */
4445 #if !defined(CONFIG_USER_ONLY)
4446 if (!supervisor(dc))
4447 goto illegal_insn;
4448 #endif
4449 {
4450 TCGv_ptr r_tickptr;
4451
4452 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
4453 cpu_src2);
4454 r_tickptr = tcg_temp_new_ptr();
4455 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4456 offsetof(CPUSPARCState, stick));
4457 if (tb_cflags(dc->base.tb) &
4458 CF_USE_ICOUNT) {
4459 gen_io_start();
4460 }
4461 gen_helper_tick_set_limit(r_tickptr,
4462 cpu_stick_cmpr);
4463 tcg_temp_free_ptr(r_tickptr);
4464 /* End TB to handle timer interrupt */
4465 dc->base.is_jmp = DISAS_EXIT;
4466 }
4467 break;
4468
4469 case 0x10: /* Performance Control */
4470 case 0x11: /* Performance Instrumentation
4471 Counter */
4472 case 0x12: /* Dispatch Control */
4473 #endif
4474 default:
4475 goto illegal_insn;
4476 }
4477 }
4478 break;
4479 #if !defined(CONFIG_USER_ONLY)
4480 case 0x31: /* wrpsr, V9 saved, restored */
4481 {
4482 if (!supervisor(dc))
4483 goto priv_insn;
4484 #ifdef TARGET_SPARC64
4485 switch (rd) {
4486 case 0:
4487 gen_helper_saved(cpu_env);
4488 break;
4489 case 1:
4490 gen_helper_restored(cpu_env);
4491 break;
4492 case 2: /* UA2005 allclean */
4493 case 3: /* UA2005 otherw */
4494 case 4: /* UA2005 normalw */
4495 case 5: /* UA2005 invalw */
4496 // XXX
4497 default:
4498 goto illegal_insn;
4499 }
4500 #else
4501 cpu_tmp0 = get_temp_tl(dc);
4502 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4503 gen_helper_wrpsr(cpu_env, cpu_tmp0);
4504 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4505 dc->cc_op = CC_OP_FLAGS;
4506 save_state(dc);
4507 gen_op_next_insn();
4508 tcg_gen_exit_tb(NULL, 0);
4509 dc->base.is_jmp = DISAS_NORETURN;
4510 #endif
4511 }
4512 break;
4513 case 0x32: /* wrwim, V9 wrpr */
4514 {
4515 if (!supervisor(dc))
4516 goto priv_insn;
4517 cpu_tmp0 = get_temp_tl(dc);
4518 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4519 #ifdef TARGET_SPARC64
4520 switch (rd) {
4521 case 0: // tpc
4522 {
4523 TCGv_ptr r_tsptr;
4524
4525 r_tsptr = tcg_temp_new_ptr();
4526 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4527 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4528 offsetof(trap_state, tpc));
4529 tcg_temp_free_ptr(r_tsptr);
4530 }
4531 break;
4532 case 1: // tnpc
4533 {
4534 TCGv_ptr r_tsptr;
4535
4536 r_tsptr = tcg_temp_new_ptr();
4537 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4538 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4539 offsetof(trap_state, tnpc));
4540 tcg_temp_free_ptr(r_tsptr);
4541 }
4542 break;
4543 case 2: // tstate
4544 {
4545 TCGv_ptr r_tsptr;
4546
4547 r_tsptr = tcg_temp_new_ptr();
4548 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4549 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4550 offsetof(trap_state,
4551 tstate));
4552 tcg_temp_free_ptr(r_tsptr);
4553 }
4554 break;
4555 case 3: // tt
4556 {
4557 TCGv_ptr r_tsptr;
4558
4559 r_tsptr = tcg_temp_new_ptr();
4560 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4561 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
4562 offsetof(trap_state, tt));
4563 tcg_temp_free_ptr(r_tsptr);
4564 }
4565 break;
4566 case 4: // tick
4567 {
4568 TCGv_ptr r_tickptr;
4569
4570 r_tickptr = tcg_temp_new_ptr();
4571 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4572 offsetof(CPUSPARCState, tick));
4573 if (tb_cflags(dc->base.tb) &
4574 CF_USE_ICOUNT) {
4575 gen_io_start();
4576 }
4577 gen_helper_tick_set_count(r_tickptr,
4578 cpu_tmp0);
4579 tcg_temp_free_ptr(r_tickptr);
4580 /* End TB to handle timer interrupt */
4581 dc->base.is_jmp = DISAS_EXIT;
4582 }
4583 break;
4584 case 5: // tba
4585 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
4586 break;
4587 case 6: // pstate
4588 save_state(dc);
4589 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
4590 gen_io_start();
4591 }
4592 gen_helper_wrpstate(cpu_env, cpu_tmp0);
4593 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
4594 gen_io_end();
4595 }
4596 dc->npc = DYNAMIC_PC;
4597 break;
4598 case 7: // tl
4599 save_state(dc);
4600 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4601 offsetof(CPUSPARCState, tl));
4602 dc->npc = DYNAMIC_PC;
4603 break;
4604 case 8: // pil
4605 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
4606 gen_io_start();
4607 }
4608 gen_helper_wrpil(cpu_env, cpu_tmp0);
4609 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
4610 gen_io_end();
4611 }
4612 break;
4613 case 9: // cwp
4614 gen_helper_wrcwp(cpu_env, cpu_tmp0);
4615 break;
4616 case 10: // cansave
4617 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4618 offsetof(CPUSPARCState,
4619 cansave));
4620 break;
4621 case 11: // canrestore
4622 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4623 offsetof(CPUSPARCState,
4624 canrestore));
4625 break;
4626 case 12: // cleanwin
4627 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4628 offsetof(CPUSPARCState,
4629 cleanwin));
4630 break;
4631 case 13: // otherwin
4632 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4633 offsetof(CPUSPARCState,
4634 otherwin));
4635 break;
4636 case 14: // wstate
4637 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4638 offsetof(CPUSPARCState,
4639 wstate));
4640 break;
4641 case 16: // UA2005 gl
4642 CHECK_IU_FEATURE(dc, GL);
4643 gen_helper_wrgl(cpu_env, cpu_tmp0);
4644 break;
4645 case 26: // UA2005 strand status
4646 CHECK_IU_FEATURE(dc, HYPV);
4647 if (!hypervisor(dc))
4648 goto priv_insn;
4649 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4650 break;
4651 default:
4652 goto illegal_insn;
4653 }
4654 #else
4655 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
4656 if (dc->def->nwindows != 32) {
4657 tcg_gen_andi_tl(cpu_wim, cpu_wim,
4658 (1 << dc->def->nwindows) - 1);
4659 }
4660 #endif
4661 }
4662 break;
4663 case 0x33: /* wrtbr, UA2005 wrhpr */
4664 {
4665 #ifndef TARGET_SPARC64
4666 if (!supervisor(dc))
4667 goto priv_insn;
4668 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4669 #else
4670 CHECK_IU_FEATURE(dc, HYPV);
4671 if (!hypervisor(dc))
4672 goto priv_insn;
4673 cpu_tmp0 = get_temp_tl(dc);
4674 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4675 switch (rd) {
4676 case 0: // hpstate
4677 tcg_gen_st_i64(cpu_tmp0, cpu_env,
4678 offsetof(CPUSPARCState,
4679 hpstate));
4680 save_state(dc);
4681 gen_op_next_insn();
4682 tcg_gen_exit_tb(NULL, 0);
4683 dc->base.is_jmp = DISAS_NORETURN;
4684 break;
4685 case 1: // htstate
4686 // XXX gen_op_wrhtstate();
4687 break;
4688 case 3: // hintp
4689 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4690 break;
4691 case 5: // htba
4692 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4693 break;
4694 case 31: // hstick_cmpr
4695 {
4696 TCGv_ptr r_tickptr;
4697
4698 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4699 r_tickptr = tcg_temp_new_ptr();
4700 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4701 offsetof(CPUSPARCState, hstick));
4702 if (tb_cflags(dc->base.tb) &
4703 CF_USE_ICOUNT) {
4704 gen_io_start();
4705 }
4706 gen_helper_tick_set_limit(r_tickptr,
4707 cpu_hstick_cmpr);
4708 tcg_temp_free_ptr(r_tickptr);
4709 if (tb_cflags(dc->base.tb) &
4710 CF_USE_ICOUNT) {
4711 gen_io_end();
4712 }
4713 /* End TB to handle timer interrupt */
4714 dc->base.is_jmp = DISAS_EXIT;
4715 }
4716 break;
4717 case 6: // hver readonly
4718 default:
4719 goto illegal_insn;
4720 }
4721 #endif
4722 }
4723 break;
4724 #endif
4725 #ifdef TARGET_SPARC64
4726 case 0x2c: /* V9 movcc */
4727 {
4728 int cc = GET_FIELD_SP(insn, 11, 12);
4729 int cond = GET_FIELD_SP(insn, 14, 17);
4730 DisasCompare cmp;
4731 TCGv dst;
4732
4733 if (insn & (1 << 18)) {
4734 if (cc == 0) {
4735 gen_compare(&cmp, 0, cond, dc);
4736 } else if (cc == 2) {
4737 gen_compare(&cmp, 1, cond, dc);
4738 } else {
4739 goto illegal_insn;
4740 }
4741 } else {
4742 gen_fcompare(&cmp, cc, cond);
4743 }
4744
4745 /* The get_src2 above loaded the normal 13-bit
4746 immediate field, not the 11-bit field we have
4747 in movcc. But it did handle the reg case. */
4748 if (IS_IMM) {
4749 simm = GET_FIELD_SPs(insn, 0, 10);
4750 tcg_gen_movi_tl(cpu_src2, simm);
4751 }
4752
4753 dst = gen_load_gpr(dc, rd);
4754 tcg_gen_movcond_tl(cmp.cond, dst,
4755 cmp.c1, cmp.c2,
4756 cpu_src2, dst);
4757 free_compare(&cmp);
4758 gen_store_gpr(dc, rd, dst);
4759 break;
4760 }
4761 case 0x2d: /* V9 sdivx */
4762 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4763 gen_store_gpr(dc, rd, cpu_dst);
4764 break;
4765 case 0x2e: /* V9 popc */
4766 tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
4767 gen_store_gpr(dc, rd, cpu_dst);
4768 break;
4769 case 0x2f: /* V9 movr */
4770 {
4771 int cond = GET_FIELD_SP(insn, 10, 12);
4772 DisasCompare cmp;
4773 TCGv dst;
4774
4775 gen_compare_reg(&cmp, cond, cpu_src1);
4776
4777 /* The get_src2 above loaded the normal 13-bit
4778 immediate field, not the 10-bit field we have
4779 in movr. But it did handle the reg case. */
4780 if (IS_IMM) {
4781 simm = GET_FIELD_SPs(insn, 0, 9);
4782 tcg_gen_movi_tl(cpu_src2, simm);
4783 }
4784
4785 dst = gen_load_gpr(dc, rd);
4786 tcg_gen_movcond_tl(cmp.cond, dst,
4787 cmp.c1, cmp.c2,
4788 cpu_src2, dst);
4789 free_compare(&cmp);
4790 gen_store_gpr(dc, rd, dst);
4791 break;
4792 }
4793 #endif
4794 default:
4795 goto illegal_insn;
4796 }
4797 }
4798 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4799 #ifdef TARGET_SPARC64
4800 int opf = GET_FIELD_SP(insn, 5, 13);
4801 rs1 = GET_FIELD(insn, 13, 17);
4802 rs2 = GET_FIELD(insn, 27, 31);
4803 if (gen_trap_ifnofpu(dc)) {
4804 goto jmp_insn;
4805 }
4806
4807 switch (opf) {
4808 case 0x000: /* VIS I edge8cc */
4809 CHECK_FPU_FEATURE(dc, VIS1);
4810 cpu_src1 = gen_load_gpr(dc, rs1);
4811 cpu_src2 = gen_load_gpr(dc, rs2);
4812 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4813 gen_store_gpr(dc, rd, cpu_dst);
4814 break;
4815 case 0x001: /* VIS II edge8n */
4816 CHECK_FPU_FEATURE(dc, VIS2);
4817 cpu_src1 = gen_load_gpr(dc, rs1);
4818 cpu_src2 = gen_load_gpr(dc, rs2);
4819 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4820 gen_store_gpr(dc, rd, cpu_dst);
4821 break;
4822 case 0x002: /* VIS I edge8lcc */
4823 CHECK_FPU_FEATURE(dc, VIS1);
4824 cpu_src1 = gen_load_gpr(dc, rs1);
4825 cpu_src2 = gen_load_gpr(dc, rs2);
4826 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4827 gen_store_gpr(dc, rd, cpu_dst);
4828 break;
4829 case 0x003: /* VIS II edge8ln */
4830 CHECK_FPU_FEATURE(dc, VIS2);
4831 cpu_src1 = gen_load_gpr(dc, rs1);
4832 cpu_src2 = gen_load_gpr(dc, rs2);
4833 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4834 gen_store_gpr(dc, rd, cpu_dst);
4835 break;
4836 case 0x004: /* VIS I edge16cc */
4837 CHECK_FPU_FEATURE(dc, VIS1);
4838 cpu_src1 = gen_load_gpr(dc, rs1);
4839 cpu_src2 = gen_load_gpr(dc, rs2);
4840 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4841 gen_store_gpr(dc, rd, cpu_dst);
4842 break;
4843 case 0x005: /* VIS II edge16n */
4844 CHECK_FPU_FEATURE(dc, VIS2);
4845 cpu_src1 = gen_load_gpr(dc, rs1);
4846 cpu_src2 = gen_load_gpr(dc, rs2);
4847 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4848 gen_store_gpr(dc, rd, cpu_dst);
4849 break;
4850 case 0x006: /* VIS I edge16lcc */
4851 CHECK_FPU_FEATURE(dc, VIS1);
4852 cpu_src1 = gen_load_gpr(dc, rs1);
4853 cpu_src2 = gen_load_gpr(dc, rs2);
4854 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4855 gen_store_gpr(dc, rd, cpu_dst);
4856 break;
4857 case 0x007: /* VIS II edge16ln */
4858 CHECK_FPU_FEATURE(dc, VIS2);
4859 cpu_src1 = gen_load_gpr(dc, rs1);
4860 cpu_src2 = gen_load_gpr(dc, rs2);
4861 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4862 gen_store_gpr(dc, rd, cpu_dst);
4863 break;
4864 case 0x008: /* VIS I edge32cc */
4865 CHECK_FPU_FEATURE(dc, VIS1);
4866 cpu_src1 = gen_load_gpr(dc, rs1);
4867 cpu_src2 = gen_load_gpr(dc, rs2);
4868 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4869 gen_store_gpr(dc, rd, cpu_dst);
4870 break;
4871 case 0x009: /* VIS II edge32n */
4872 CHECK_FPU_FEATURE(dc, VIS2);
4873 cpu_src1 = gen_load_gpr(dc, rs1);
4874 cpu_src2 = gen_load_gpr(dc, rs2);
4875 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4876 gen_store_gpr(dc, rd, cpu_dst);
4877 break;
4878 case 0x00a: /* VIS I edge32lcc */
4879 CHECK_FPU_FEATURE(dc, VIS1);
4880 cpu_src1 = gen_load_gpr(dc, rs1);
4881 cpu_src2 = gen_load_gpr(dc, rs2);
4882 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4883 gen_store_gpr(dc, rd, cpu_dst);
4884 break;
4885 case 0x00b: /* VIS II edge32ln */
4886 CHECK_FPU_FEATURE(dc, VIS2);
4887 cpu_src1 = gen_load_gpr(dc, rs1);
4888 cpu_src2 = gen_load_gpr(dc, rs2);
4889 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4890 gen_store_gpr(dc, rd, cpu_dst);
4891 break;
4892 case 0x010: /* VIS I array8 */
4893 CHECK_FPU_FEATURE(dc, VIS1);
4894 cpu_src1 = gen_load_gpr(dc, rs1);
4895 cpu_src2 = gen_load_gpr(dc, rs2);
4896 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4897 gen_store_gpr(dc, rd, cpu_dst);
4898 break;
4899 case 0x012: /* VIS I array16 */
4900 CHECK_FPU_FEATURE(dc, VIS1);
4901 cpu_src1 = gen_load_gpr(dc, rs1);
4902 cpu_src2 = gen_load_gpr(dc, rs2);
4903 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4904 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4905 gen_store_gpr(dc, rd, cpu_dst);
4906 break;
4907 case 0x014: /* VIS I array32 */
4908 CHECK_FPU_FEATURE(dc, VIS1);
4909 cpu_src1 = gen_load_gpr(dc, rs1);
4910 cpu_src2 = gen_load_gpr(dc, rs2);
4911 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4912 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4913 gen_store_gpr(dc, rd, cpu_dst);
4914 break;
4915 case 0x018: /* VIS I alignaddr */
4916 CHECK_FPU_FEATURE(dc, VIS1);
4917 cpu_src1 = gen_load_gpr(dc, rs1);
4918 cpu_src2 = gen_load_gpr(dc, rs2);
4919 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4920 gen_store_gpr(dc, rd, cpu_dst);
4921 break;
4922 case 0x01a: /* VIS I alignaddrl */
4923 CHECK_FPU_FEATURE(dc, VIS1);
4924 cpu_src1 = gen_load_gpr(dc, rs1);
4925 cpu_src2 = gen_load_gpr(dc, rs2);
4926 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4927 gen_store_gpr(dc, rd, cpu_dst);
4928 break;
4929 case 0x019: /* VIS II bmask */
4930 CHECK_FPU_FEATURE(dc, VIS2);
4931 cpu_src1 = gen_load_gpr(dc, rs1);
4932 cpu_src2 = gen_load_gpr(dc, rs2);
4933 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4934 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4935 gen_store_gpr(dc, rd, cpu_dst);
4936 break;
4937 case 0x020: /* VIS I fcmple16 */
4938 CHECK_FPU_FEATURE(dc, VIS1);
4939 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4940 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4941 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4942 gen_store_gpr(dc, rd, cpu_dst);
4943 break;
4944 case 0x022: /* VIS I fcmpne16 */
4945 CHECK_FPU_FEATURE(dc, VIS1);
4946 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4947 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4948 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4949 gen_store_gpr(dc, rd, cpu_dst);
4950 break;
4951 case 0x024: /* VIS I fcmple32 */
4952 CHECK_FPU_FEATURE(dc, VIS1);
4953 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4954 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4955 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4956 gen_store_gpr(dc, rd, cpu_dst);
4957 break;
4958 case 0x026: /* VIS I fcmpne32 */
4959 CHECK_FPU_FEATURE(dc, VIS1);
4960 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4961 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4962 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4963 gen_store_gpr(dc, rd, cpu_dst);
4964 break;
4965 case 0x028: /* VIS I fcmpgt16 */
4966 CHECK_FPU_FEATURE(dc, VIS1);
4967 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4968 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4969 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4970 gen_store_gpr(dc, rd, cpu_dst);
4971 break;
4972 case 0x02a: /* VIS I fcmpeq16 */
4973 CHECK_FPU_FEATURE(dc, VIS1);
4974 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4975 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4976 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4977 gen_store_gpr(dc, rd, cpu_dst);
4978 break;
4979 case 0x02c: /* VIS I fcmpgt32 */
4980 CHECK_FPU_FEATURE(dc, VIS1);
4981 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4982 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4983 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4984 gen_store_gpr(dc, rd, cpu_dst);
4985 break;
4986 case 0x02e: /* VIS I fcmpeq32 */
4987 CHECK_FPU_FEATURE(dc, VIS1);
4988 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4989 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4990 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4991 gen_store_gpr(dc, rd, cpu_dst);
4992 break;
4993 case 0x031: /* VIS I fmul8x16 */
4994 CHECK_FPU_FEATURE(dc, VIS1);
4995 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4996 break;
4997 case 0x033: /* VIS I fmul8x16au */
4998 CHECK_FPU_FEATURE(dc, VIS1);
4999 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
5000 break;
5001 case 0x035: /* VIS I fmul8x16al */
5002 CHECK_FPU_FEATURE(dc, VIS1);
5003 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
5004 break;
5005 case 0x036: /* VIS I fmul8sux16 */
5006 CHECK_FPU_FEATURE(dc, VIS1);
5007 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
5008 break;
5009 case 0x037: /* VIS I fmul8ulx16 */
5010 CHECK_FPU_FEATURE(dc, VIS1);
5011 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
5012 break;
5013 case 0x038: /* VIS I fmuld8sux16 */
5014 CHECK_FPU_FEATURE(dc, VIS1);
5015 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
5016 break;
5017 case 0x039: /* VIS I fmuld8ulx16 */
5018 CHECK_FPU_FEATURE(dc, VIS1);
5019 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
5020 break;
5021 case 0x03a: /* VIS I fpack32 */
5022 CHECK_FPU_FEATURE(dc, VIS1);
5023 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
5024 break;
5025 case 0x03b: /* VIS I fpack16 */
5026 CHECK_FPU_FEATURE(dc, VIS1);
5027 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5028 cpu_dst_32 = gen_dest_fpr_F(dc);
5029 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
5030 gen_store_fpr_F(dc, rd, cpu_dst_32);
5031 break;
5032 case 0x03d: /* VIS I fpackfix */
5033 CHECK_FPU_FEATURE(dc, VIS1);
5034 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5035 cpu_dst_32 = gen_dest_fpr_F(dc);
5036 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
5037 gen_store_fpr_F(dc, rd, cpu_dst_32);
5038 break;
5039 case 0x03e: /* VIS I pdist */
5040 CHECK_FPU_FEATURE(dc, VIS1);
5041 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
5042 break;
5043 case 0x048: /* VIS I faligndata */
5044 CHECK_FPU_FEATURE(dc, VIS1);
5045 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
5046 break;
5047 case 0x04b: /* VIS I fpmerge */
5048 CHECK_FPU_FEATURE(dc, VIS1);
5049 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
5050 break;
5051 case 0x04c: /* VIS II bshuffle */
5052 CHECK_FPU_FEATURE(dc, VIS2);
5053 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
5054 break;
5055 case 0x04d: /* VIS I fexpand */
5056 CHECK_FPU_FEATURE(dc, VIS1);
5057 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
5058 break;
5059 case 0x050: /* VIS I fpadd16 */
5060 CHECK_FPU_FEATURE(dc, VIS1);
5061 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
5062 break;
5063 case 0x051: /* VIS I fpadd16s */
5064 CHECK_FPU_FEATURE(dc, VIS1);
5065 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
5066 break;
5067 case 0x052: /* VIS I fpadd32 */
5068 CHECK_FPU_FEATURE(dc, VIS1);
5069 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
5070 break;
5071 case 0x053: /* VIS I fpadd32s */
5072 CHECK_FPU_FEATURE(dc, VIS1);
5073 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
5074 break;
5075 case 0x054: /* VIS I fpsub16 */
5076 CHECK_FPU_FEATURE(dc, VIS1);
5077 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
5078 break;
5079 case 0x055: /* VIS I fpsub16s */
5080 CHECK_FPU_FEATURE(dc, VIS1);
5081 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
5082 break;
5083 case 0x056: /* VIS I fpsub32 */
5084 CHECK_FPU_FEATURE(dc, VIS1);
5085 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
5086 break;
5087 case 0x057: /* VIS I fpsub32s */
5088 CHECK_FPU_FEATURE(dc, VIS1);
5089 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
5090 break;
5091 case 0x060: /* VIS I fzero */
5092 CHECK_FPU_FEATURE(dc, VIS1);
5093 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5094 tcg_gen_movi_i64(cpu_dst_64, 0);
5095 gen_store_fpr_D(dc, rd, cpu_dst_64);
5096 break;
5097 case 0x061: /* VIS I fzeros */
5098 CHECK_FPU_FEATURE(dc, VIS1);
5099 cpu_dst_32 = gen_dest_fpr_F(dc);
5100 tcg_gen_movi_i32(cpu_dst_32, 0);
5101 gen_store_fpr_F(dc, rd, cpu_dst_32);
5102 break;
5103 case 0x062: /* VIS I fnor */
5104 CHECK_FPU_FEATURE(dc, VIS1);
5105 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
5106 break;
5107 case 0x063: /* VIS I fnors */
5108 CHECK_FPU_FEATURE(dc, VIS1);
5109 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
5110 break;
5111 case 0x064: /* VIS I fandnot2 */
5112 CHECK_FPU_FEATURE(dc, VIS1);
5113 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
5114 break;
5115 case 0x065: /* VIS I fandnot2s */
5116 CHECK_FPU_FEATURE(dc, VIS1);
5117 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
5118 break;
5119 case 0x066: /* VIS I fnot2 */
5120 CHECK_FPU_FEATURE(dc, VIS1);
5121 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
5122 break;
5123 case 0x067: /* VIS I fnot2s */
5124 CHECK_FPU_FEATURE(dc, VIS1);
5125 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
5126 break;
5127 case 0x068: /* VIS I fandnot1 */
5128 CHECK_FPU_FEATURE(dc, VIS1);
5129 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
5130 break;
5131 case 0x069: /* VIS I fandnot1s */
5132 CHECK_FPU_FEATURE(dc, VIS1);
5133 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
5134 break;
5135 case 0x06a: /* VIS I fnot1 */
5136 CHECK_FPU_FEATURE(dc, VIS1);
5137 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
5138 break;
5139 case 0x06b: /* VIS I fnot1s */
5140 CHECK_FPU_FEATURE(dc, VIS1);
5141 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
5142 break;
5143 case 0x06c: /* VIS I fxor */
5144 CHECK_FPU_FEATURE(dc, VIS1);
5145 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
5146 break;
5147 case 0x06d: /* VIS I fxors */
5148 CHECK_FPU_FEATURE(dc, VIS1);
5149 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
5150 break;
5151 case 0x06e: /* VIS I fnand */
5152 CHECK_FPU_FEATURE(dc, VIS1);
5153 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
5154 break;
5155 case 0x06f: /* VIS I fnands */
5156 CHECK_FPU_FEATURE(dc, VIS1);
5157 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
5158 break;
5159 case 0x070: /* VIS I fand */
5160 CHECK_FPU_FEATURE(dc, VIS1);
5161 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
5162 break;
5163 case 0x071: /* VIS I fands */
5164 CHECK_FPU_FEATURE(dc, VIS1);
5165 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
5166 break;
5167 case 0x072: /* VIS I fxnor */
5168 CHECK_FPU_FEATURE(dc, VIS1);
5169 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
5170 break;
5171 case 0x073: /* VIS I fxnors */
5172 CHECK_FPU_FEATURE(dc, VIS1);
5173 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
5174 break;
5175 case 0x074: /* VIS I fsrc1 */
5176 CHECK_FPU_FEATURE(dc, VIS1);
5177 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5178 gen_store_fpr_D(dc, rd, cpu_src1_64);
5179 break;
5180 case 0x075: /* VIS I fsrc1s */
5181 CHECK_FPU_FEATURE(dc, VIS1);
5182 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5183 gen_store_fpr_F(dc, rd, cpu_src1_32);
5184 break;
5185 case 0x076: /* VIS I fornot2 */
5186 CHECK_FPU_FEATURE(dc, VIS1);
5187 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
5188 break;
5189 case 0x077: /* VIS I fornot2s */
5190 CHECK_FPU_FEATURE(dc, VIS1);
5191 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
5192 break;
5193 case 0x078: /* VIS I fsrc2 */
5194 CHECK_FPU_FEATURE(dc, VIS1);
5195 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5196 gen_store_fpr_D(dc, rd, cpu_src1_64);
5197 break;
5198 case 0x079: /* VIS I fsrc2s */
5199 CHECK_FPU_FEATURE(dc, VIS1);
5200 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
5201 gen_store_fpr_F(dc, rd, cpu_src1_32);
5202 break;
5203 case 0x07a: /* VIS I fornot1 */
5204 CHECK_FPU_FEATURE(dc, VIS1);
5205 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
5206 break;
5207 case 0x07b: /* VIS I fornot1s */
5208 CHECK_FPU_FEATURE(dc, VIS1);
5209 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
5210 break;
5211 case 0x07c: /* VIS I for */
5212 CHECK_FPU_FEATURE(dc, VIS1);
5213 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
5214 break;
5215 case 0x07d: /* VIS I fors */
5216 CHECK_FPU_FEATURE(dc, VIS1);
5217 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
5218 break;
5219 case 0x07e: /* VIS I fone */
5220 CHECK_FPU_FEATURE(dc, VIS1);
5221 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5222 tcg_gen_movi_i64(cpu_dst_64, -1);
5223 gen_store_fpr_D(dc, rd, cpu_dst_64);
5224 break;
5225 case 0x07f: /* VIS I fones */
5226 CHECK_FPU_FEATURE(dc, VIS1);
5227 cpu_dst_32 = gen_dest_fpr_F(dc);
5228 tcg_gen_movi_i32(cpu_dst_32, -1);
5229 gen_store_fpr_F(dc, rd, cpu_dst_32);
5230 break;
5231 case 0x080: /* VIS I shutdown */
5232 case 0x081: /* VIS II siam */
5233 // XXX
5234 goto illegal_insn;
5235 default:
5236 goto illegal_insn;
5237 }
5238 #else
5239 goto ncp_insn;
5240 #endif
5241 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5242 #ifdef TARGET_SPARC64
5243 goto illegal_insn;
5244 #else
5245 goto ncp_insn;
5246 #endif
5247 #ifdef TARGET_SPARC64
5248 } else if (xop == 0x39) { /* V9 return */
5249 save_state(dc);
5250 cpu_src1 = get_src1(dc, insn);
5251 cpu_tmp0 = get_temp_tl(dc);
5252 if (IS_IMM) { /* immediate */
5253 simm = GET_FIELDs(insn, 19, 31);
5254 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5255 } else { /* register */
5256 rs2 = GET_FIELD(insn, 27, 31);
5257 if (rs2) {
5258 cpu_src2 = gen_load_gpr(dc, rs2);
5259 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5260 } else {
5261 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5262 }
5263 }
5264 gen_helper_restore(cpu_env);
5265 gen_mov_pc_npc(dc);
5266 gen_check_align(cpu_tmp0, 3);
5267 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5268 dc->npc = DYNAMIC_PC;
5269 goto jmp_insn;
5270 #endif
5271 } else {
5272 cpu_src1 = get_src1(dc, insn);
5273 cpu_tmp0 = get_temp_tl(dc);
5274 if (IS_IMM) { /* immediate */
5275 simm = GET_FIELDs(insn, 19, 31);
5276 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5277 } else { /* register */
5278 rs2 = GET_FIELD(insn, 27, 31);
5279 if (rs2) {
5280 cpu_src2 = gen_load_gpr(dc, rs2);
5281 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5282 } else {
5283 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5284 }
5285 }
5286 switch (xop) {
5287 case 0x38: /* jmpl */
5288 {
5289 TCGv t = gen_dest_gpr(dc, rd);
5290 tcg_gen_movi_tl(t, dc->pc);
5291 gen_store_gpr(dc, rd, t);
5292
5293 gen_mov_pc_npc(dc);
5294 gen_check_align(cpu_tmp0, 3);
5295 gen_address_mask(dc, cpu_tmp0);
5296 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5297 dc->npc = DYNAMIC_PC;
5298 }
5299 goto jmp_insn;
5300 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5301 case 0x39: /* rett, V9 return */
5302 {
5303 if (!supervisor(dc))
5304 goto priv_insn;
5305 gen_mov_pc_npc(dc);
5306 gen_check_align(cpu_tmp0, 3);
5307 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5308 dc->npc = DYNAMIC_PC;
5309 gen_helper_rett(cpu_env);
5310 }
5311 goto jmp_insn;
5312 #endif
5313 case 0x3b: /* flush */
5314 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
5315 goto unimp_flush;
5316 /* nop */
5317 break;
5318 case 0x3c: /* save */
5319 gen_helper_save(cpu_env);
5320 gen_store_gpr(dc, rd, cpu_tmp0);
5321 break;
5322 case 0x3d: /* restore */
5323 gen_helper_restore(cpu_env);
5324 gen_store_gpr(dc, rd, cpu_tmp0);
5325 break;
5326 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5327 case 0x3e: /* V9 done/retry */
5328 {
5329 switch (rd) {
5330 case 0:
5331 if (!supervisor(dc))
5332 goto priv_insn;
5333 dc->npc = DYNAMIC_PC;
5334 dc->pc = DYNAMIC_PC;
5335 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
5336 gen_io_start();
5337 }
5338 gen_helper_done(cpu_env);
5339 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
5340 gen_io_end();
5341 }
5342 goto jmp_insn;
5343 case 1:
5344 if (!supervisor(dc))
5345 goto priv_insn;
5346 dc->npc = DYNAMIC_PC;
5347 dc->pc = DYNAMIC_PC;
5348 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
5349 gen_io_start();
5350 }
5351 gen_helper_retry(cpu_env);
5352 if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
5353 gen_io_end();
5354 }
5355 goto jmp_insn;
5356 default:
5357 goto illegal_insn;
5358 }
5359 }
5360 break;
5361 #endif
5362 default:
5363 goto illegal_insn;
5364 }
5365 }
5366 break;
5367 }
5368 break;
5369 case 3: /* load/store instructions */
5370 {
5371 unsigned int xop = GET_FIELD(insn, 7, 12);
5372 /* ??? gen_address_mask prevents us from using a source
5373 register directly. Always generate a temporary. */
5374 TCGv cpu_addr = get_temp_tl(dc);
5375
5376 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5377 if (xop == 0x3c || xop == 0x3e) {
5378 /* V9 casa/casxa : no offset */
5379 } else if (IS_IMM) { /* immediate */
5380 simm = GET_FIELDs(insn, 19, 31);
5381 if (simm != 0) {
5382 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5383 }
5384 } else { /* register */
5385 rs2 = GET_FIELD(insn, 27, 31);
5386 if (rs2 != 0) {
5387 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5388 }
5389 }
5390 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5391 (xop > 0x17 && xop <= 0x1d ) ||
5392 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5393 TCGv cpu_val = gen_dest_gpr(dc, rd);
5394
5395 switch (xop) {
5396 case 0x0: /* ld, V9 lduw, load unsigned word */
5397 gen_address_mask(dc, cpu_addr);
5398 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
5399 break;
5400 case 0x1: /* ldub, load unsigned byte */
5401 gen_address_mask(dc, cpu_addr);
5402 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
5403 break;
5404 case 0x2: /* lduh, load unsigned halfword */
5405 gen_address_mask(dc, cpu_addr);
5406 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
5407 break;
5408 case 0x3: /* ldd, load double word */
5409 if (rd & 1)
5410 goto illegal_insn;
5411 else {
5412 TCGv_i64 t64;
5413
5414 gen_address_mask(dc, cpu_addr);
5415 t64 = tcg_temp_new_i64();
5416 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5417 tcg_gen_trunc_i64_tl(cpu_val, t64);
5418 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5419 gen_store_gpr(dc, rd + 1, cpu_val);
5420 tcg_gen_shri_i64(t64, t64, 32);
5421 tcg_gen_trunc_i64_tl(cpu_val, t64);
5422 tcg_temp_free_i64(t64);
5423 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5424 }
5425 break;
5426 case 0x9: /* ldsb, load signed byte */
5427 gen_address_mask(dc, cpu_addr);
5428 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
5429 break;
5430 case 0xa: /* ldsh, load signed halfword */
5431 gen_address_mask(dc, cpu_addr);
5432 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
5433 break;
5434 case 0xd: /* ldstub */
5435 gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
5436 break;
5437 case 0x0f:
5438 /* swap, swap register with memory. Also atomically */
5439 CHECK_IU_FEATURE(dc, SWAP);
5440 cpu_src1 = gen_load_gpr(dc, rd);
5441 gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5442 dc->mem_idx, MO_TEUL);
5443 break;
5444 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5445 case 0x10: /* lda, V9 lduwa, load word alternate */
5446 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5447 break;
5448 case 0x11: /* lduba, load unsigned byte alternate */
5449 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5450 break;
5451 case 0x12: /* lduha, load unsigned halfword alternate */
5452 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5453 break;
5454 case 0x13: /* ldda, load double word alternate */
5455 if (rd & 1) {
5456 goto illegal_insn;
5457 }
5458 gen_ldda_asi(dc, cpu_addr, insn, rd);
5459 goto skip_move;
5460 case 0x19: /* ldsba, load signed byte alternate */
5461 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5462 break;
5463 case 0x1a: /* ldsha, load signed halfword alternate */
5464 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5465 break;
5466 case 0x1d: /* ldstuba -- XXX: should be atomically */
5467 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5468 break;
5469 case 0x1f: /* swapa, swap reg with alt. memory. Also
5470 atomically */
5471 CHECK_IU_FEATURE(dc, SWAP);
5472 cpu_src1 = gen_load_gpr(dc, rd);
5473 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5474 break;
5475
5476 #ifndef TARGET_SPARC64
5477 case 0x30: /* ldc */
5478 case 0x31: /* ldcsr */
5479 case 0x33: /* lddc */
5480 goto ncp_insn;
5481 #endif
5482 #endif
5483 #ifdef TARGET_SPARC64
5484 case 0x08: /* V9 ldsw */
5485 gen_address_mask(dc, cpu_addr);
5486 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
5487 break;
5488 case 0x0b: /* V9 ldx */
5489 gen_address_mask(dc, cpu_addr);
5490 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
5491 break;
5492 case 0x18: /* V9 ldswa */
5493 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5494 break;
5495 case 0x1b: /* V9 ldxa */
5496 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5497 break;
5498 case 0x2d: /* V9 prefetch, no effect */
5499 goto skip_move;
5500 case 0x30: /* V9 ldfa */
5501 if (gen_trap_ifnofpu(dc)) {
5502 goto jmp_insn;
5503 }
5504 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5505 gen_update_fprs_dirty(dc, rd);
5506 goto skip_move;
5507 case 0x33: /* V9 lddfa */
5508 if (gen_trap_ifnofpu(dc)) {
5509 goto jmp_insn;
5510 }
5511 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5512 gen_update_fprs_dirty(dc, DFPREG(rd));
5513 goto skip_move;
5514 case 0x3d: /* V9 prefetcha, no effect */
5515 goto skip_move;
5516 case 0x32: /* V9 ldqfa */
5517 CHECK_FPU_FEATURE(dc, FLOAT128);
5518 if (gen_trap_ifnofpu(dc)) {
5519 goto jmp_insn;
5520 }
5521 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5522 gen_update_fprs_dirty(dc, QFPREG(rd));
5523 goto skip_move;
5524 #endif
5525 default:
5526 goto illegal_insn;
5527 }
5528 gen_store_gpr(dc, rd, cpu_val);
5529 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5530 skip_move: ;
5531 #endif
5532 } else if (xop >= 0x20 && xop < 0x24) {
5533 if (gen_trap_ifnofpu(dc)) {
5534 goto jmp_insn;
5535 }
5536 switch (xop) {
5537 case 0x20: /* ldf, load fpreg */
5538 gen_address_mask(dc, cpu_addr);
5539 cpu_dst_32 = gen_dest_fpr_F(dc);
5540 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5541 dc->mem_idx, MO_TEUL);
5542 gen_store_fpr_F(dc, rd, cpu_dst_32);
5543 break;
5544 case 0x21: /* ldfsr, V9 ldxfsr */
5545 #ifdef TARGET_SPARC64
5546 gen_address_mask(dc, cpu_addr);
5547 if (rd == 1) {
5548 TCGv_i64 t64 = tcg_temp_new_i64();
5549 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5550 dc->mem_idx, MO_TEQ);
5551 gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
5552 tcg_temp_free_i64(t64);
5553 break;
5554 }
5555 #endif
5556 cpu_dst_32 = get_temp_i32(dc);
5557 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5558 dc->mem_idx, MO_TEUL);
5559 gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
5560 break;
5561 case 0x22: /* ldqf, load quad fpreg */
5562 CHECK_FPU_FEATURE(dc, FLOAT128);
5563 gen_address_mask(dc, cpu_addr);
5564 cpu_src1_64 = tcg_temp_new_i64();
5565 tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5566 MO_TEQ | MO_ALIGN_4);
5567 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5568 cpu_src2_64 = tcg_temp_new_i64();
5569 tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
5570 MO_TEQ | MO_ALIGN_4);
5571 gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
5572 tcg_temp_free_i64(cpu_src1_64);
5573 tcg_temp_free_i64(cpu_src2_64);
5574 break;
5575 case 0x23: /* lddf, load double fpreg */
5576 gen_address_mask(dc, cpu_addr);
5577 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5578 tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
5579 MO_TEQ | MO_ALIGN_4);
5580 gen_store_fpr_D(dc, rd, cpu_dst_64);
5581 break;
5582 default:
5583 goto illegal_insn;
5584 }
5585 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5586 xop == 0xe || xop == 0x1e) {
5587 TCGv cpu_val = gen_load_gpr(dc, rd);
5588
5589 switch (xop) {
5590 case 0x4: /* st, store word */
5591 gen_address_mask(dc, cpu_addr);
5592 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
5593 break;
5594 case 0x5: /* stb, store byte */
5595 gen_address_mask(dc, cpu_addr);
5596 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
5597 break;
5598 case 0x6: /* sth, store halfword */
5599 gen_address_mask(dc, cpu_addr);
5600 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
5601 break;
5602 case 0x7: /* std, store double word */
5603 if (rd & 1)
5604 goto illegal_insn;
5605 else {
5606 TCGv_i64 t64;
5607 TCGv lo;
5608
5609 gen_address_mask(dc, cpu_addr);
5610 lo = gen_load_gpr(dc, rd + 1);
5611 t64 = tcg_temp_new_i64();
5612 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5613 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
5614 tcg_temp_free_i64(t64);
5615 }
5616 break;
5617 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5618 case 0x14: /* sta, V9 stwa, store word alternate */
5619 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5620 break;
5621 case 0x15: /* stba, store byte alternate */
5622 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5623 break;
5624 case 0x16: /* stha, store halfword alternate */
5625 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5626 break;
5627 case 0x17: /* stda, store double word alternate */
5628 if (rd & 1) {
5629 goto illegal_insn;
5630 }
5631 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5632 break;
5633 #endif
5634 #ifdef TARGET_SPARC64
5635 case 0x0e: /* V9 stx */
5636 gen_address_mask(dc, cpu_addr);
5637 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5638 break;
5639 case 0x1e: /* V9 stxa */
5640 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5641 break;
5642 #endif
5643 default:
5644 goto illegal_insn;
5645 }
5646 } else if (xop > 0x23 && xop < 0x28) {
5647 if (gen_trap_ifnofpu(dc)) {
5648 goto jmp_insn;
5649 }
5650 switch (xop) {
5651 case 0x24: /* stf, store fpreg */
5652 gen_address_mask(dc, cpu_addr);
5653 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5654 tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
5655 dc->mem_idx, MO_TEUL);
5656 break;
5657 case 0x25: /* stfsr, V9 stxfsr */
5658 {
5659 #ifdef TARGET_SPARC64
5660 gen_address_mask(dc, cpu_addr);
5661 if (rd == 1) {
5662 tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
5663 break;
5664 }
5665 #endif
5666 tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
5667 }
5668 break;
5669 case 0x26:
5670 #ifdef TARGET_SPARC64
5671 /* V9 stqf, store quad fpreg */
5672 CHECK_FPU_FEATURE(dc, FLOAT128);
5673 gen_address_mask(dc, cpu_addr);
5674 /* ??? While stqf only requires 4-byte alignment, it is
5675 legal for the cpu to signal the unaligned exception.
5676 The OS trap handler is then required to fix it up.
5677 For qemu, this avoids having to probe the second page
5678 before performing the first write. */
5679 cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
5680 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5681 dc->mem_idx, MO_TEQ | MO_ALIGN_16);
5682 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5683 cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
5684 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5685 dc->mem_idx, MO_TEQ);
5686 break;
5687 #else /* !TARGET_SPARC64 */
5688 /* stdfq, store floating point queue */
5689 #if defined(CONFIG_USER_ONLY)
5690 goto illegal_insn;
5691 #else
5692 if (!supervisor(dc))
5693 goto priv_insn;
5694 if (gen_trap_ifnofpu(dc)) {
5695 goto jmp_insn;
5696 }
5697 goto nfq_insn;
5698 #endif
5699 #endif
5700 case 0x27: /* stdf, store double fpreg */
5701 gen_address_mask(dc, cpu_addr);
5702 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5703 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5704 MO_TEQ | MO_ALIGN_4);
5705 break;
5706 default:
5707 goto illegal_insn;
5708 }
5709 } else if (xop > 0x33 && xop < 0x3f) {
5710 switch (xop) {
5711 #ifdef TARGET_SPARC64
5712 case 0x34: /* V9 stfa */
5713 if (gen_trap_ifnofpu(dc)) {
5714 goto jmp_insn;
5715 }
5716 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5717 break;
5718 case 0x36: /* V9 stqfa */
5719 {
5720 CHECK_FPU_FEATURE(dc, FLOAT128);
5721 if (gen_trap_ifnofpu(dc)) {
5722 goto jmp_insn;
5723 }
5724 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5725 }
5726 break;
5727 case 0x37: /* V9 stdfa */
5728 if (gen_trap_ifnofpu(dc)) {
5729 goto jmp_insn;
5730 }
5731 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5732 break;
5733 case 0x3e: /* V9 casxa */
5734 rs2 = GET_FIELD(insn, 27, 31);
5735 cpu_src2 = gen_load_gpr(dc, rs2);
5736 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5737 break;
5738 #else
5739 case 0x34: /* stc */
5740 case 0x35: /* stcsr */
5741 case 0x36: /* stdcq */
5742 case 0x37: /* stdc */
5743 goto ncp_insn;
5744 #endif
5745 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5746 case 0x3c: /* V9 or LEON3 casa */
5747 #ifndef TARGET_SPARC64
5748 CHECK_IU_FEATURE(dc, CASA);
5749 #endif
5750 rs2 = GET_FIELD(insn, 27, 31);
5751 cpu_src2 = gen_load_gpr(dc, rs2);
5752 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5753 break;
5754 #endif
5755 default:
5756 goto illegal_insn;
5757 }
5758 } else {
5759 goto illegal_insn;
5760 }
5761 }
5762 break;
5763 }
5764 /* default case for non jump instructions */
5765 if (dc->npc == DYNAMIC_PC) {
5766 dc->pc = DYNAMIC_PC;
5767 gen_op_next_insn();
5768 } else if (dc->npc == JUMP_PC) {
5769 /* we can do a static jump */
5770 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5771 dc->base.is_jmp = DISAS_NORETURN;
5772 } else {
5773 dc->pc = dc->npc;
5774 dc->npc = dc->npc + 4;
5775 }
5776 jmp_insn:
5777 goto egress;
5778 illegal_insn:
5779 gen_exception(dc, TT_ILL_INSN);
5780 goto egress;
5781 unimp_flush:
5782 gen_exception(dc, TT_UNIMP_FLUSH);
5783 goto egress;
5784 #if !defined(CONFIG_USER_ONLY)
5785 priv_insn:
5786 gen_exception(dc, TT_PRIV_INSN);
5787 goto egress;
5788 #endif
5789 nfpu_insn:
5790 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5791 goto egress;
5792 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5793 nfq_insn:
5794 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5795 goto egress;
5796 #endif
5797 #ifndef TARGET_SPARC64
5798 ncp_insn:
5799 gen_exception(dc, TT_NCP_INSN);
5800 goto egress;
5801 #endif
5802 egress:
5803 if (dc->n_t32 != 0) {
5804 int i;
5805 for (i = dc->n_t32 - 1; i >= 0; --i) {
5806 tcg_temp_free_i32(dc->t32[i]);
5807 }
5808 dc->n_t32 = 0;
5809 }
5810 if (dc->n_ttl != 0) {
5811 int i;
5812 for (i = dc->n_ttl - 1; i >= 0; --i) {
5813 tcg_temp_free(dc->ttl[i]);
5814 }
5815 dc->n_ttl = 0;
5816 }
5817 }
5818
5819 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5820 {
5821 DisasContext *dc = container_of(dcbase, DisasContext, base);
5822 CPUSPARCState *env = cs->env_ptr;
5823 int bound;
5824
5825 dc->pc = dc->base.pc_first;
5826 dc->npc = (target_ulong)dc->base.tb->cs_base;
5827 dc->cc_op = CC_OP_DYNAMIC;
5828 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5829 dc->def = &env->def;
5830 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5831 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5832 #ifndef CONFIG_USER_ONLY
5833 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5834 #endif
5835 #ifdef TARGET_SPARC64
5836 dc->fprs_dirty = 0;
5837 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5838 #ifndef CONFIG_USER_ONLY
5839 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5840 #endif
5841 #endif
5842 /*
5843 * if we reach a page boundary, we stop generation so that the
5844 * PC of a TT_TFAULT exception is always in the right page
5845 */
5846 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5847 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5848 }
5849
5850 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5851 {
5852 }
5853
5854 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5855 {
5856 DisasContext *dc = container_of(dcbase, DisasContext, base);
5857
5858 if (dc->npc & JUMP_PC) {
5859 assert(dc->jump_pc[1] == dc->pc + 4);
5860 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5861 } else {
5862 tcg_gen_insn_start(dc->pc, dc->npc);
5863 }
5864 }
5865
5866 static bool sparc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
5867 const CPUBreakpoint *bp)
5868 {
5869 DisasContext *dc = container_of(dcbase, DisasContext, base);
5870
5871 if (dc->pc != dc->base.pc_first) {
5872 save_state(dc);
5873 }
5874 gen_helper_debug(cpu_env);
5875 tcg_gen_exit_tb(NULL, 0);
5876 dc->base.is_jmp = DISAS_NORETURN;
5877 /* update pc_next so that the current instruction is included in tb->size */
5878 dc->base.pc_next += 4;
5879 return true;
5880 }
5881
5882 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5883 {
5884 DisasContext *dc = container_of(dcbase, DisasContext, base);
5885 CPUSPARCState *env = cs->env_ptr;
5886 unsigned int insn;
5887
5888 insn = translator_ldl(env, dc->pc);
5889 dc->base.pc_next += 4;
5890 disas_sparc_insn(dc, insn);
5891
5892 if (dc->base.is_jmp == DISAS_NORETURN) {
5893 return;
5894 }
5895 if (dc->pc != dc->base.pc_next) {
5896 dc->base.is_jmp = DISAS_TOO_MANY;
5897 }
5898 }
5899
5900 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5901 {
5902 DisasContext *dc = container_of(dcbase, DisasContext, base);
5903
5904 switch (dc->base.is_jmp) {
5905 case DISAS_NEXT:
5906 case DISAS_TOO_MANY:
5907 if (dc->pc != DYNAMIC_PC &&
5908 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5909 /* static PC and NPC: we can use direct chaining */
5910 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5911 } else {
5912 if (dc->pc != DYNAMIC_PC) {
5913 tcg_gen_movi_tl(cpu_pc, dc->pc);
5914 }
5915 save_npc(dc);
5916 tcg_gen_exit_tb(NULL, 0);
5917 }
5918 break;
5919
5920 case DISAS_NORETURN:
5921 break;
5922
5923 case DISAS_EXIT:
5924 /* Exit TB */
5925 save_state(dc);
5926 tcg_gen_exit_tb(NULL, 0);
5927 break;
5928
5929 default:
5930 g_assert_not_reached();
5931 }
5932 }
5933
5934 static void sparc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
5935 {
5936 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
5937 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
5938 }
5939
5940 static const TranslatorOps sparc_tr_ops = {
5941 .init_disas_context = sparc_tr_init_disas_context,
5942 .tb_start = sparc_tr_tb_start,
5943 .insn_start = sparc_tr_insn_start,
5944 .breakpoint_check = sparc_tr_breakpoint_check,
5945 .translate_insn = sparc_tr_translate_insn,
5946 .tb_stop = sparc_tr_tb_stop,
5947 .disas_log = sparc_tr_disas_log,
5948 };
5949
5950 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
5951 {
5952 DisasContext dc = {};
5953
5954 translator_loop(&sparc_tr_ops, &dc.base, cs, tb, max_insns);
5955 }
5956
5957 void sparc_tcg_init(void)
5958 {
5959 static const char gregnames[32][4] = {
5960 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5961 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5962 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5963 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5964 };
5965 static const char fregnames[32][4] = {
5966 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5967 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5968 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5969 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5970 };
5971
5972 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5973 #ifdef TARGET_SPARC64
5974 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5975 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5976 #else
5977 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5978 #endif
5979 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5980 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5981 };
5982
5983 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5984 #ifdef TARGET_SPARC64
5985 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5986 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5987 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5988 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5989 "hstick_cmpr" },
5990 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5991 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5992 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5993 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5994 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5995 #endif
5996 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5997 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5998 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5999 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
6000 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
6001 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
6002 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
6003 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
6004 #ifndef CONFIG_USER_ONLY
6005 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
6006 #endif
6007 };
6008
6009 unsigned int i;
6010
6011 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
6012 offsetof(CPUSPARCState, regwptr),
6013 "regwptr");
6014
6015 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
6016 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
6017 }
6018
6019 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
6020 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
6021 }
6022
6023 cpu_regs[0] = NULL;
6024 for (i = 1; i < 8; ++i) {
6025 cpu_regs[i] = tcg_global_mem_new(cpu_env,
6026 offsetof(CPUSPARCState, gregs[i]),
6027 gregnames[i]);
6028 }
6029
6030 for (i = 8; i < 32; ++i) {
6031 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
6032 (i - 8) * sizeof(target_ulong),
6033 gregnames[i]);
6034 }
6035
6036 for (i = 0; i < TARGET_DPREGS; i++) {
6037 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
6038 offsetof(CPUSPARCState, fpr[i]),
6039 fregnames[i]);
6040 }
6041 }
6042
6043 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
6044 target_ulong *data)
6045 {
6046 target_ulong pc = data[0];
6047 target_ulong npc = data[1];
6048
6049 env->pc = pc;
6050 if (npc == DYNAMIC_PC) {
6051 /* dynamic NPC: already stored */
6052 } else if (npc & JUMP_PC) {
6053 /* jump PC: use 'cond' and the jump targets of the translation */
6054 if (env->cond) {
6055 env->npc = npc & ~3;
6056 } else {
6057 env->npc = pc + 4;
6058 }
6059 } else {
6060 env->npc = npc;
6061 }
6062 }