]> git.ipfire.org Git - thirdparty/qemu.git/blob - target-sparc/translate.c
target-sparc: Remove gen_opc_jump_pc
[thirdparty/qemu.git] / target-sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "exec/helper-proto.h"
30 #include "tcg-op.h"
31 #include "exec/cpu_ldst.h"
32
33 #include "exec/helper-gen.h"
34
35 #include "trace-tcg.h"
36
37
38 #define DEBUG_DISAS
39
40 #define DYNAMIC_PC 1 /* dynamic pc value */
41 #define JUMP_PC 2 /* dynamic pc value which takes only two values
42 according to jump_pc[T2] */
43
44 /* global register indexes */
45 static TCGv_ptr cpu_env, cpu_regwptr;
46 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
47 static TCGv_i32 cpu_cc_op;
48 static TCGv_i32 cpu_psr;
49 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
50 static TCGv cpu_y;
51 #ifndef CONFIG_USER_ONLY
52 static TCGv cpu_tbr;
53 #endif
54 static TCGv cpu_cond;
55 #ifdef TARGET_SPARC64
56 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
57 static TCGv cpu_gsr;
58 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
59 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
60 static TCGv_i32 cpu_softint;
61 #else
62 static TCGv cpu_wim;
63 #endif
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
66
67 static target_ulong gen_opc_npc[OPC_BUF_SIZE];
68
69 #include "exec/gen-icount.h"
70
71 typedef struct DisasContext {
72 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
73 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
74 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
75 int is_br;
76 int mem_idx;
77 int fpu_enabled;
78 int address_mask_32bit;
79 int singlestep;
80 uint32_t cc_op; /* current CC operation */
81 struct TranslationBlock *tb;
82 sparc_def_t *def;
83 TCGv_i32 t32[3];
84 TCGv ttl[5];
85 int n_t32;
86 int n_ttl;
87 } DisasContext;
88
89 typedef struct {
90 TCGCond cond;
91 bool is_bool;
92 bool g1, g2;
93 TCGv c1, c2;
94 } DisasCompare;
95
96 // This function uses non-native bit order
97 #define GET_FIELD(X, FROM, TO) \
98 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
99
100 // This function uses the order in the manuals, i.e. bit 0 is 2^0
101 #define GET_FIELD_SP(X, FROM, TO) \
102 GET_FIELD(X, 31 - (TO), 31 - (FROM))
103
104 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
105 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
106
107 #ifdef TARGET_SPARC64
108 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
109 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
110 #else
111 #define DFPREG(r) (r & 0x1e)
112 #define QFPREG(r) (r & 0x1c)
113 #endif
114
115 #define UA2005_HTRAP_MASK 0xff
116 #define V8_TRAP_MASK 0x7f
117
118 static int sign_extend(int x, int len)
119 {
120 len = 32 - len;
121 return (x << len) >> len;
122 }
123
124 #define IS_IMM (insn & (1<<13))
125
126 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
127 {
128 TCGv_i32 t;
129 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
130 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
131 return t;
132 }
133
134 static inline TCGv get_temp_tl(DisasContext *dc)
135 {
136 TCGv t;
137 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
138 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
139 return t;
140 }
141
142 static inline void gen_update_fprs_dirty(int rd)
143 {
144 #if defined(TARGET_SPARC64)
145 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
146 #endif
147 }
148
149 /* floating point registers moves */
150 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
151 {
152 #if TCG_TARGET_REG_BITS == 32
153 if (src & 1) {
154 return TCGV_LOW(cpu_fpr[src / 2]);
155 } else {
156 return TCGV_HIGH(cpu_fpr[src / 2]);
157 }
158 #else
159 if (src & 1) {
160 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
161 } else {
162 TCGv_i32 ret = get_temp_i32(dc);
163 TCGv_i64 t = tcg_temp_new_i64();
164
165 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
166 tcg_gen_extrl_i64_i32(ret, t);
167 tcg_temp_free_i64(t);
168
169 return ret;
170 }
171 #endif
172 }
173
174 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
175 {
176 #if TCG_TARGET_REG_BITS == 32
177 if (dst & 1) {
178 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
179 } else {
180 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
181 }
182 #else
183 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
184 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
185 (dst & 1 ? 0 : 32), 32);
186 #endif
187 gen_update_fprs_dirty(dst);
188 }
189
190 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
191 {
192 return get_temp_i32(dc);
193 }
194
195 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
196 {
197 src = DFPREG(src);
198 return cpu_fpr[src / 2];
199 }
200
201 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
202 {
203 dst = DFPREG(dst);
204 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
205 gen_update_fprs_dirty(dst);
206 }
207
208 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
209 {
210 return cpu_fpr[DFPREG(dst) / 2];
211 }
212
213 static void gen_op_load_fpr_QT0(unsigned int src)
214 {
215 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
216 offsetof(CPU_QuadU, ll.upper));
217 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
218 offsetof(CPU_QuadU, ll.lower));
219 }
220
221 static void gen_op_load_fpr_QT1(unsigned int src)
222 {
223 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
224 offsetof(CPU_QuadU, ll.upper));
225 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
226 offsetof(CPU_QuadU, ll.lower));
227 }
228
229 static void gen_op_store_QT0_fpr(unsigned int dst)
230 {
231 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
232 offsetof(CPU_QuadU, ll.upper));
233 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
234 offsetof(CPU_QuadU, ll.lower));
235 }
236
237 #ifdef TARGET_SPARC64
238 static void gen_move_Q(unsigned int rd, unsigned int rs)
239 {
240 rd = QFPREG(rd);
241 rs = QFPREG(rs);
242
243 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
244 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
245 gen_update_fprs_dirty(rd);
246 }
247 #endif
248
249 /* moves */
250 #ifdef CONFIG_USER_ONLY
251 #define supervisor(dc) 0
252 #ifdef TARGET_SPARC64
253 #define hypervisor(dc) 0
254 #endif
255 #else
256 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
257 #ifdef TARGET_SPARC64
258 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
259 #else
260 #endif
261 #endif
262
263 #ifdef TARGET_SPARC64
264 #ifndef TARGET_ABI32
265 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
266 #else
267 #define AM_CHECK(dc) (1)
268 #endif
269 #endif
270
271 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
272 {
273 #ifdef TARGET_SPARC64
274 if (AM_CHECK(dc))
275 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
276 #endif
277 }
278
279 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
280 {
281 if (reg == 0 || reg >= 8) {
282 TCGv t = get_temp_tl(dc);
283 if (reg == 0) {
284 tcg_gen_movi_tl(t, 0);
285 } else {
286 tcg_gen_ld_tl(t, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
287 }
288 return t;
289 } else {
290 return cpu_gregs[reg];
291 }
292 }
293
294 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
295 {
296 if (reg > 0) {
297 if (reg < 8) {
298 tcg_gen_mov_tl(cpu_gregs[reg], v);
299 } else {
300 tcg_gen_st_tl(v, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
301 }
302 }
303 }
304
305 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
306 {
307 if (reg == 0 || reg >= 8) {
308 return get_temp_tl(dc);
309 } else {
310 return cpu_gregs[reg];
311 }
312 }
313
314 static inline void gen_goto_tb(DisasContext *s, int tb_num,
315 target_ulong pc, target_ulong npc)
316 {
317 TranslationBlock *tb;
318
319 tb = s->tb;
320 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
321 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
322 !s->singlestep) {
323 /* jump to same page: we can use a direct jump */
324 tcg_gen_goto_tb(tb_num);
325 tcg_gen_movi_tl(cpu_pc, pc);
326 tcg_gen_movi_tl(cpu_npc, npc);
327 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
328 } else {
329 /* jump to another page: currently not optimized */
330 tcg_gen_movi_tl(cpu_pc, pc);
331 tcg_gen_movi_tl(cpu_npc, npc);
332 tcg_gen_exit_tb(0);
333 }
334 }
335
336 // XXX suboptimal
337 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
338 {
339 tcg_gen_extu_i32_tl(reg, src);
340 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
341 tcg_gen_andi_tl(reg, reg, 0x1);
342 }
343
344 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
345 {
346 tcg_gen_extu_i32_tl(reg, src);
347 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
348 tcg_gen_andi_tl(reg, reg, 0x1);
349 }
350
351 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
352 {
353 tcg_gen_extu_i32_tl(reg, src);
354 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
355 tcg_gen_andi_tl(reg, reg, 0x1);
356 }
357
358 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
359 {
360 tcg_gen_extu_i32_tl(reg, src);
361 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
362 tcg_gen_andi_tl(reg, reg, 0x1);
363 }
364
365 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
366 {
367 tcg_gen_mov_tl(cpu_cc_src, src1);
368 tcg_gen_mov_tl(cpu_cc_src2, src2);
369 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
370 tcg_gen_mov_tl(dst, cpu_cc_dst);
371 }
372
373 static TCGv_i32 gen_add32_carry32(void)
374 {
375 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
376
377 /* Carry is computed from a previous add: (dst < src) */
378 #if TARGET_LONG_BITS == 64
379 cc_src1_32 = tcg_temp_new_i32();
380 cc_src2_32 = tcg_temp_new_i32();
381 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
382 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
383 #else
384 cc_src1_32 = cpu_cc_dst;
385 cc_src2_32 = cpu_cc_src;
386 #endif
387
388 carry_32 = tcg_temp_new_i32();
389 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
390
391 #if TARGET_LONG_BITS == 64
392 tcg_temp_free_i32(cc_src1_32);
393 tcg_temp_free_i32(cc_src2_32);
394 #endif
395
396 return carry_32;
397 }
398
399 static TCGv_i32 gen_sub32_carry32(void)
400 {
401 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
402
403 /* Carry is computed from a previous borrow: (src1 < src2) */
404 #if TARGET_LONG_BITS == 64
405 cc_src1_32 = tcg_temp_new_i32();
406 cc_src2_32 = tcg_temp_new_i32();
407 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
408 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
409 #else
410 cc_src1_32 = cpu_cc_src;
411 cc_src2_32 = cpu_cc_src2;
412 #endif
413
414 carry_32 = tcg_temp_new_i32();
415 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
416
417 #if TARGET_LONG_BITS == 64
418 tcg_temp_free_i32(cc_src1_32);
419 tcg_temp_free_i32(cc_src2_32);
420 #endif
421
422 return carry_32;
423 }
424
425 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
426 TCGv src2, int update_cc)
427 {
428 TCGv_i32 carry_32;
429 TCGv carry;
430
431 switch (dc->cc_op) {
432 case CC_OP_DIV:
433 case CC_OP_LOGIC:
434 /* Carry is known to be zero. Fall back to plain ADD. */
435 if (update_cc) {
436 gen_op_add_cc(dst, src1, src2);
437 } else {
438 tcg_gen_add_tl(dst, src1, src2);
439 }
440 return;
441
442 case CC_OP_ADD:
443 case CC_OP_TADD:
444 case CC_OP_TADDTV:
445 if (TARGET_LONG_BITS == 32) {
446 /* We can re-use the host's hardware carry generation by using
447 an ADD2 opcode. We discard the low part of the output.
448 Ideally we'd combine this operation with the add that
449 generated the carry in the first place. */
450 carry = tcg_temp_new();
451 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
452 tcg_temp_free(carry);
453 goto add_done;
454 }
455 carry_32 = gen_add32_carry32();
456 break;
457
458 case CC_OP_SUB:
459 case CC_OP_TSUB:
460 case CC_OP_TSUBTV:
461 carry_32 = gen_sub32_carry32();
462 break;
463
464 default:
465 /* We need external help to produce the carry. */
466 carry_32 = tcg_temp_new_i32();
467 gen_helper_compute_C_icc(carry_32, cpu_env);
468 break;
469 }
470
471 #if TARGET_LONG_BITS == 64
472 carry = tcg_temp_new();
473 tcg_gen_extu_i32_i64(carry, carry_32);
474 #else
475 carry = carry_32;
476 #endif
477
478 tcg_gen_add_tl(dst, src1, src2);
479 tcg_gen_add_tl(dst, dst, carry);
480
481 tcg_temp_free_i32(carry_32);
482 #if TARGET_LONG_BITS == 64
483 tcg_temp_free(carry);
484 #endif
485
486 add_done:
487 if (update_cc) {
488 tcg_gen_mov_tl(cpu_cc_src, src1);
489 tcg_gen_mov_tl(cpu_cc_src2, src2);
490 tcg_gen_mov_tl(cpu_cc_dst, dst);
491 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
492 dc->cc_op = CC_OP_ADDX;
493 }
494 }
495
496 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
497 {
498 tcg_gen_mov_tl(cpu_cc_src, src1);
499 tcg_gen_mov_tl(cpu_cc_src2, src2);
500 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
501 tcg_gen_mov_tl(dst, cpu_cc_dst);
502 }
503
504 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
505 TCGv src2, int update_cc)
506 {
507 TCGv_i32 carry_32;
508 TCGv carry;
509
510 switch (dc->cc_op) {
511 case CC_OP_DIV:
512 case CC_OP_LOGIC:
513 /* Carry is known to be zero. Fall back to plain SUB. */
514 if (update_cc) {
515 gen_op_sub_cc(dst, src1, src2);
516 } else {
517 tcg_gen_sub_tl(dst, src1, src2);
518 }
519 return;
520
521 case CC_OP_ADD:
522 case CC_OP_TADD:
523 case CC_OP_TADDTV:
524 carry_32 = gen_add32_carry32();
525 break;
526
527 case CC_OP_SUB:
528 case CC_OP_TSUB:
529 case CC_OP_TSUBTV:
530 if (TARGET_LONG_BITS == 32) {
531 /* We can re-use the host's hardware carry generation by using
532 a SUB2 opcode. We discard the low part of the output.
533 Ideally we'd combine this operation with the add that
534 generated the carry in the first place. */
535 carry = tcg_temp_new();
536 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
537 tcg_temp_free(carry);
538 goto sub_done;
539 }
540 carry_32 = gen_sub32_carry32();
541 break;
542
543 default:
544 /* We need external help to produce the carry. */
545 carry_32 = tcg_temp_new_i32();
546 gen_helper_compute_C_icc(carry_32, cpu_env);
547 break;
548 }
549
550 #if TARGET_LONG_BITS == 64
551 carry = tcg_temp_new();
552 tcg_gen_extu_i32_i64(carry, carry_32);
553 #else
554 carry = carry_32;
555 #endif
556
557 tcg_gen_sub_tl(dst, src1, src2);
558 tcg_gen_sub_tl(dst, dst, carry);
559
560 tcg_temp_free_i32(carry_32);
561 #if TARGET_LONG_BITS == 64
562 tcg_temp_free(carry);
563 #endif
564
565 sub_done:
566 if (update_cc) {
567 tcg_gen_mov_tl(cpu_cc_src, src1);
568 tcg_gen_mov_tl(cpu_cc_src2, src2);
569 tcg_gen_mov_tl(cpu_cc_dst, dst);
570 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
571 dc->cc_op = CC_OP_SUBX;
572 }
573 }
574
575 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
576 {
577 TCGv r_temp, zero, t0;
578
579 r_temp = tcg_temp_new();
580 t0 = tcg_temp_new();
581
582 /* old op:
583 if (!(env->y & 1))
584 T1 = 0;
585 */
586 zero = tcg_const_tl(0);
587 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
588 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
589 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
590 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
591 zero, cpu_cc_src2);
592 tcg_temp_free(zero);
593
594 // b2 = T0 & 1;
595 // env->y = (b2 << 31) | (env->y >> 1);
596 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
597 tcg_gen_shli_tl(r_temp, r_temp, 31);
598 tcg_gen_shri_tl(t0, cpu_y, 1);
599 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
600 tcg_gen_or_tl(t0, t0, r_temp);
601 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
602
603 // b1 = N ^ V;
604 gen_mov_reg_N(t0, cpu_psr);
605 gen_mov_reg_V(r_temp, cpu_psr);
606 tcg_gen_xor_tl(t0, t0, r_temp);
607 tcg_temp_free(r_temp);
608
609 // T0 = (b1 << 31) | (T0 >> 1);
610 // src1 = T0;
611 tcg_gen_shli_tl(t0, t0, 31);
612 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
613 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
614 tcg_temp_free(t0);
615
616 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
617
618 tcg_gen_mov_tl(dst, cpu_cc_dst);
619 }
620
621 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
622 {
623 #if TARGET_LONG_BITS == 32
624 if (sign_ext) {
625 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
626 } else {
627 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
628 }
629 #else
630 TCGv t0 = tcg_temp_new_i64();
631 TCGv t1 = tcg_temp_new_i64();
632
633 if (sign_ext) {
634 tcg_gen_ext32s_i64(t0, src1);
635 tcg_gen_ext32s_i64(t1, src2);
636 } else {
637 tcg_gen_ext32u_i64(t0, src1);
638 tcg_gen_ext32u_i64(t1, src2);
639 }
640
641 tcg_gen_mul_i64(dst, t0, t1);
642 tcg_temp_free(t0);
643 tcg_temp_free(t1);
644
645 tcg_gen_shri_i64(cpu_y, dst, 32);
646 #endif
647 }
648
649 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
650 {
651 /* zero-extend truncated operands before multiplication */
652 gen_op_multiply(dst, src1, src2, 0);
653 }
654
655 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
656 {
657 /* sign-extend truncated operands before multiplication */
658 gen_op_multiply(dst, src1, src2, 1);
659 }
660
661 // 1
662 static inline void gen_op_eval_ba(TCGv dst)
663 {
664 tcg_gen_movi_tl(dst, 1);
665 }
666
667 // Z
668 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
669 {
670 gen_mov_reg_Z(dst, src);
671 }
672
673 // Z | (N ^ V)
674 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
675 {
676 TCGv t0 = tcg_temp_new();
677 gen_mov_reg_N(t0, src);
678 gen_mov_reg_V(dst, src);
679 tcg_gen_xor_tl(dst, dst, t0);
680 gen_mov_reg_Z(t0, src);
681 tcg_gen_or_tl(dst, dst, t0);
682 tcg_temp_free(t0);
683 }
684
685 // N ^ V
686 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
687 {
688 TCGv t0 = tcg_temp_new();
689 gen_mov_reg_V(t0, src);
690 gen_mov_reg_N(dst, src);
691 tcg_gen_xor_tl(dst, dst, t0);
692 tcg_temp_free(t0);
693 }
694
695 // C | Z
696 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
697 {
698 TCGv t0 = tcg_temp_new();
699 gen_mov_reg_Z(t0, src);
700 gen_mov_reg_C(dst, src);
701 tcg_gen_or_tl(dst, dst, t0);
702 tcg_temp_free(t0);
703 }
704
705 // C
706 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
707 {
708 gen_mov_reg_C(dst, src);
709 }
710
711 // V
712 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
713 {
714 gen_mov_reg_V(dst, src);
715 }
716
717 // 0
718 static inline void gen_op_eval_bn(TCGv dst)
719 {
720 tcg_gen_movi_tl(dst, 0);
721 }
722
723 // N
724 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
725 {
726 gen_mov_reg_N(dst, src);
727 }
728
729 // !Z
730 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
731 {
732 gen_mov_reg_Z(dst, src);
733 tcg_gen_xori_tl(dst, dst, 0x1);
734 }
735
736 // !(Z | (N ^ V))
737 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
738 {
739 gen_op_eval_ble(dst, src);
740 tcg_gen_xori_tl(dst, dst, 0x1);
741 }
742
743 // !(N ^ V)
744 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
745 {
746 gen_op_eval_bl(dst, src);
747 tcg_gen_xori_tl(dst, dst, 0x1);
748 }
749
750 // !(C | Z)
751 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
752 {
753 gen_op_eval_bleu(dst, src);
754 tcg_gen_xori_tl(dst, dst, 0x1);
755 }
756
757 // !C
758 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
759 {
760 gen_mov_reg_C(dst, src);
761 tcg_gen_xori_tl(dst, dst, 0x1);
762 }
763
764 // !N
765 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
766 {
767 gen_mov_reg_N(dst, src);
768 tcg_gen_xori_tl(dst, dst, 0x1);
769 }
770
771 // !V
772 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
773 {
774 gen_mov_reg_V(dst, src);
775 tcg_gen_xori_tl(dst, dst, 0x1);
776 }
777
778 /*
779 FPSR bit field FCC1 | FCC0:
780 0 =
781 1 <
782 2 >
783 3 unordered
784 */
785 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
786 unsigned int fcc_offset)
787 {
788 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
789 tcg_gen_andi_tl(reg, reg, 0x1);
790 }
791
792 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
793 unsigned int fcc_offset)
794 {
795 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
796 tcg_gen_andi_tl(reg, reg, 0x1);
797 }
798
799 // !0: FCC0 | FCC1
800 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
801 unsigned int fcc_offset)
802 {
803 TCGv t0 = tcg_temp_new();
804 gen_mov_reg_FCC0(dst, src, fcc_offset);
805 gen_mov_reg_FCC1(t0, src, fcc_offset);
806 tcg_gen_or_tl(dst, dst, t0);
807 tcg_temp_free(t0);
808 }
809
810 // 1 or 2: FCC0 ^ FCC1
811 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
812 unsigned int fcc_offset)
813 {
814 TCGv t0 = tcg_temp_new();
815 gen_mov_reg_FCC0(dst, src, fcc_offset);
816 gen_mov_reg_FCC1(t0, src, fcc_offset);
817 tcg_gen_xor_tl(dst, dst, t0);
818 tcg_temp_free(t0);
819 }
820
821 // 1 or 3: FCC0
822 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
823 unsigned int fcc_offset)
824 {
825 gen_mov_reg_FCC0(dst, src, fcc_offset);
826 }
827
828 // 1: FCC0 & !FCC1
829 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
830 unsigned int fcc_offset)
831 {
832 TCGv t0 = tcg_temp_new();
833 gen_mov_reg_FCC0(dst, src, fcc_offset);
834 gen_mov_reg_FCC1(t0, src, fcc_offset);
835 tcg_gen_andc_tl(dst, dst, t0);
836 tcg_temp_free(t0);
837 }
838
839 // 2 or 3: FCC1
840 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
841 unsigned int fcc_offset)
842 {
843 gen_mov_reg_FCC1(dst, src, fcc_offset);
844 }
845
846 // 2: !FCC0 & FCC1
847 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
848 unsigned int fcc_offset)
849 {
850 TCGv t0 = tcg_temp_new();
851 gen_mov_reg_FCC0(dst, src, fcc_offset);
852 gen_mov_reg_FCC1(t0, src, fcc_offset);
853 tcg_gen_andc_tl(dst, t0, dst);
854 tcg_temp_free(t0);
855 }
856
857 // 3: FCC0 & FCC1
858 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
859 unsigned int fcc_offset)
860 {
861 TCGv t0 = tcg_temp_new();
862 gen_mov_reg_FCC0(dst, src, fcc_offset);
863 gen_mov_reg_FCC1(t0, src, fcc_offset);
864 tcg_gen_and_tl(dst, dst, t0);
865 tcg_temp_free(t0);
866 }
867
868 // 0: !(FCC0 | FCC1)
869 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
870 unsigned int fcc_offset)
871 {
872 TCGv t0 = tcg_temp_new();
873 gen_mov_reg_FCC0(dst, src, fcc_offset);
874 gen_mov_reg_FCC1(t0, src, fcc_offset);
875 tcg_gen_or_tl(dst, dst, t0);
876 tcg_gen_xori_tl(dst, dst, 0x1);
877 tcg_temp_free(t0);
878 }
879
880 // 0 or 3: !(FCC0 ^ FCC1)
881 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
882 unsigned int fcc_offset)
883 {
884 TCGv t0 = tcg_temp_new();
885 gen_mov_reg_FCC0(dst, src, fcc_offset);
886 gen_mov_reg_FCC1(t0, src, fcc_offset);
887 tcg_gen_xor_tl(dst, dst, t0);
888 tcg_gen_xori_tl(dst, dst, 0x1);
889 tcg_temp_free(t0);
890 }
891
892 // 0 or 2: !FCC0
893 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
894 unsigned int fcc_offset)
895 {
896 gen_mov_reg_FCC0(dst, src, fcc_offset);
897 tcg_gen_xori_tl(dst, dst, 0x1);
898 }
899
900 // !1: !(FCC0 & !FCC1)
901 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
902 unsigned int fcc_offset)
903 {
904 TCGv t0 = tcg_temp_new();
905 gen_mov_reg_FCC0(dst, src, fcc_offset);
906 gen_mov_reg_FCC1(t0, src, fcc_offset);
907 tcg_gen_andc_tl(dst, dst, t0);
908 tcg_gen_xori_tl(dst, dst, 0x1);
909 tcg_temp_free(t0);
910 }
911
912 // 0 or 1: !FCC1
913 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
914 unsigned int fcc_offset)
915 {
916 gen_mov_reg_FCC1(dst, src, fcc_offset);
917 tcg_gen_xori_tl(dst, dst, 0x1);
918 }
919
920 // !2: !(!FCC0 & FCC1)
921 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
922 unsigned int fcc_offset)
923 {
924 TCGv t0 = tcg_temp_new();
925 gen_mov_reg_FCC0(dst, src, fcc_offset);
926 gen_mov_reg_FCC1(t0, src, fcc_offset);
927 tcg_gen_andc_tl(dst, t0, dst);
928 tcg_gen_xori_tl(dst, dst, 0x1);
929 tcg_temp_free(t0);
930 }
931
932 // !3: !(FCC0 & FCC1)
933 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
934 unsigned int fcc_offset)
935 {
936 TCGv t0 = tcg_temp_new();
937 gen_mov_reg_FCC0(dst, src, fcc_offset);
938 gen_mov_reg_FCC1(t0, src, fcc_offset);
939 tcg_gen_and_tl(dst, dst, t0);
940 tcg_gen_xori_tl(dst, dst, 0x1);
941 tcg_temp_free(t0);
942 }
943
944 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
945 target_ulong pc2, TCGv r_cond)
946 {
947 TCGLabel *l1 = gen_new_label();
948
949 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
950
951 gen_goto_tb(dc, 0, pc1, pc1 + 4);
952
953 gen_set_label(l1);
954 gen_goto_tb(dc, 1, pc2, pc2 + 4);
955 }
956
957 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
958 {
959 TCGLabel *l1 = gen_new_label();
960 target_ulong npc = dc->npc;
961
962 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
963
964 gen_goto_tb(dc, 0, npc, pc1);
965
966 gen_set_label(l1);
967 gen_goto_tb(dc, 1, npc + 4, npc + 8);
968
969 dc->is_br = 1;
970 }
971
972 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
973 {
974 target_ulong npc = dc->npc;
975
976 if (likely(npc != DYNAMIC_PC)) {
977 dc->pc = npc;
978 dc->jump_pc[0] = pc1;
979 dc->jump_pc[1] = npc + 4;
980 dc->npc = JUMP_PC;
981 } else {
982 TCGv t, z;
983
984 tcg_gen_mov_tl(cpu_pc, cpu_npc);
985
986 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
987 t = tcg_const_tl(pc1);
988 z = tcg_const_tl(0);
989 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
990 tcg_temp_free(t);
991 tcg_temp_free(z);
992
993 dc->pc = DYNAMIC_PC;
994 }
995 }
996
997 static inline void gen_generic_branch(DisasContext *dc)
998 {
999 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1000 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1001 TCGv zero = tcg_const_tl(0);
1002
1003 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1004
1005 tcg_temp_free(npc0);
1006 tcg_temp_free(npc1);
1007 tcg_temp_free(zero);
1008 }
1009
1010 /* call this function before using the condition register as it may
1011 have been set for a jump */
1012 static inline void flush_cond(DisasContext *dc)
1013 {
1014 if (dc->npc == JUMP_PC) {
1015 gen_generic_branch(dc);
1016 dc->npc = DYNAMIC_PC;
1017 }
1018 }
1019
1020 static inline void save_npc(DisasContext *dc)
1021 {
1022 if (dc->npc == JUMP_PC) {
1023 gen_generic_branch(dc);
1024 dc->npc = DYNAMIC_PC;
1025 } else if (dc->npc != DYNAMIC_PC) {
1026 tcg_gen_movi_tl(cpu_npc, dc->npc);
1027 }
1028 }
1029
1030 static inline void update_psr(DisasContext *dc)
1031 {
1032 if (dc->cc_op != CC_OP_FLAGS) {
1033 dc->cc_op = CC_OP_FLAGS;
1034 gen_helper_compute_psr(cpu_env);
1035 }
1036 }
1037
1038 static inline void save_state(DisasContext *dc)
1039 {
1040 tcg_gen_movi_tl(cpu_pc, dc->pc);
1041 save_npc(dc);
1042 }
1043
1044 static inline void gen_mov_pc_npc(DisasContext *dc)
1045 {
1046 if (dc->npc == JUMP_PC) {
1047 gen_generic_branch(dc);
1048 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1049 dc->pc = DYNAMIC_PC;
1050 } else if (dc->npc == DYNAMIC_PC) {
1051 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1052 dc->pc = DYNAMIC_PC;
1053 } else {
1054 dc->pc = dc->npc;
1055 }
1056 }
1057
1058 static inline void gen_op_next_insn(void)
1059 {
1060 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1061 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1062 }
1063
1064 static void free_compare(DisasCompare *cmp)
1065 {
1066 if (!cmp->g1) {
1067 tcg_temp_free(cmp->c1);
1068 }
1069 if (!cmp->g2) {
1070 tcg_temp_free(cmp->c2);
1071 }
1072 }
1073
1074 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1075 DisasContext *dc)
1076 {
1077 static int subcc_cond[16] = {
1078 TCG_COND_NEVER,
1079 TCG_COND_EQ,
1080 TCG_COND_LE,
1081 TCG_COND_LT,
1082 TCG_COND_LEU,
1083 TCG_COND_LTU,
1084 -1, /* neg */
1085 -1, /* overflow */
1086 TCG_COND_ALWAYS,
1087 TCG_COND_NE,
1088 TCG_COND_GT,
1089 TCG_COND_GE,
1090 TCG_COND_GTU,
1091 TCG_COND_GEU,
1092 -1, /* pos */
1093 -1, /* no overflow */
1094 };
1095
1096 static int logic_cond[16] = {
1097 TCG_COND_NEVER,
1098 TCG_COND_EQ, /* eq: Z */
1099 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1100 TCG_COND_LT, /* lt: N ^ V -> N */
1101 TCG_COND_EQ, /* leu: C | Z -> Z */
1102 TCG_COND_NEVER, /* ltu: C -> 0 */
1103 TCG_COND_LT, /* neg: N */
1104 TCG_COND_NEVER, /* vs: V -> 0 */
1105 TCG_COND_ALWAYS,
1106 TCG_COND_NE, /* ne: !Z */
1107 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1108 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1109 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1110 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1111 TCG_COND_GE, /* pos: !N */
1112 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1113 };
1114
1115 TCGv_i32 r_src;
1116 TCGv r_dst;
1117
1118 #ifdef TARGET_SPARC64
1119 if (xcc) {
1120 r_src = cpu_xcc;
1121 } else {
1122 r_src = cpu_psr;
1123 }
1124 #else
1125 r_src = cpu_psr;
1126 #endif
1127
1128 switch (dc->cc_op) {
1129 case CC_OP_LOGIC:
1130 cmp->cond = logic_cond[cond];
1131 do_compare_dst_0:
1132 cmp->is_bool = false;
1133 cmp->g2 = false;
1134 cmp->c2 = tcg_const_tl(0);
1135 #ifdef TARGET_SPARC64
1136 if (!xcc) {
1137 cmp->g1 = false;
1138 cmp->c1 = tcg_temp_new();
1139 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1140 break;
1141 }
1142 #endif
1143 cmp->g1 = true;
1144 cmp->c1 = cpu_cc_dst;
1145 break;
1146
1147 case CC_OP_SUB:
1148 switch (cond) {
1149 case 6: /* neg */
1150 case 14: /* pos */
1151 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1152 goto do_compare_dst_0;
1153
1154 case 7: /* overflow */
1155 case 15: /* !overflow */
1156 goto do_dynamic;
1157
1158 default:
1159 cmp->cond = subcc_cond[cond];
1160 cmp->is_bool = false;
1161 #ifdef TARGET_SPARC64
1162 if (!xcc) {
1163 /* Note that sign-extension works for unsigned compares as
1164 long as both operands are sign-extended. */
1165 cmp->g1 = cmp->g2 = false;
1166 cmp->c1 = tcg_temp_new();
1167 cmp->c2 = tcg_temp_new();
1168 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1169 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1170 break;
1171 }
1172 #endif
1173 cmp->g1 = cmp->g2 = true;
1174 cmp->c1 = cpu_cc_src;
1175 cmp->c2 = cpu_cc_src2;
1176 break;
1177 }
1178 break;
1179
1180 default:
1181 do_dynamic:
1182 gen_helper_compute_psr(cpu_env);
1183 dc->cc_op = CC_OP_FLAGS;
1184 /* FALLTHRU */
1185
1186 case CC_OP_FLAGS:
1187 /* We're going to generate a boolean result. */
1188 cmp->cond = TCG_COND_NE;
1189 cmp->is_bool = true;
1190 cmp->g1 = cmp->g2 = false;
1191 cmp->c1 = r_dst = tcg_temp_new();
1192 cmp->c2 = tcg_const_tl(0);
1193
1194 switch (cond) {
1195 case 0x0:
1196 gen_op_eval_bn(r_dst);
1197 break;
1198 case 0x1:
1199 gen_op_eval_be(r_dst, r_src);
1200 break;
1201 case 0x2:
1202 gen_op_eval_ble(r_dst, r_src);
1203 break;
1204 case 0x3:
1205 gen_op_eval_bl(r_dst, r_src);
1206 break;
1207 case 0x4:
1208 gen_op_eval_bleu(r_dst, r_src);
1209 break;
1210 case 0x5:
1211 gen_op_eval_bcs(r_dst, r_src);
1212 break;
1213 case 0x6:
1214 gen_op_eval_bneg(r_dst, r_src);
1215 break;
1216 case 0x7:
1217 gen_op_eval_bvs(r_dst, r_src);
1218 break;
1219 case 0x8:
1220 gen_op_eval_ba(r_dst);
1221 break;
1222 case 0x9:
1223 gen_op_eval_bne(r_dst, r_src);
1224 break;
1225 case 0xa:
1226 gen_op_eval_bg(r_dst, r_src);
1227 break;
1228 case 0xb:
1229 gen_op_eval_bge(r_dst, r_src);
1230 break;
1231 case 0xc:
1232 gen_op_eval_bgu(r_dst, r_src);
1233 break;
1234 case 0xd:
1235 gen_op_eval_bcc(r_dst, r_src);
1236 break;
1237 case 0xe:
1238 gen_op_eval_bpos(r_dst, r_src);
1239 break;
1240 case 0xf:
1241 gen_op_eval_bvc(r_dst, r_src);
1242 break;
1243 }
1244 break;
1245 }
1246 }
1247
1248 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1249 {
1250 unsigned int offset;
1251 TCGv r_dst;
1252
1253 /* For now we still generate a straight boolean result. */
1254 cmp->cond = TCG_COND_NE;
1255 cmp->is_bool = true;
1256 cmp->g1 = cmp->g2 = false;
1257 cmp->c1 = r_dst = tcg_temp_new();
1258 cmp->c2 = tcg_const_tl(0);
1259
1260 switch (cc) {
1261 default:
1262 case 0x0:
1263 offset = 0;
1264 break;
1265 case 0x1:
1266 offset = 32 - 10;
1267 break;
1268 case 0x2:
1269 offset = 34 - 10;
1270 break;
1271 case 0x3:
1272 offset = 36 - 10;
1273 break;
1274 }
1275
1276 switch (cond) {
1277 case 0x0:
1278 gen_op_eval_bn(r_dst);
1279 break;
1280 case 0x1:
1281 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1282 break;
1283 case 0x2:
1284 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1285 break;
1286 case 0x3:
1287 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1288 break;
1289 case 0x4:
1290 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1291 break;
1292 case 0x5:
1293 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1294 break;
1295 case 0x6:
1296 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1297 break;
1298 case 0x7:
1299 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1300 break;
1301 case 0x8:
1302 gen_op_eval_ba(r_dst);
1303 break;
1304 case 0x9:
1305 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1306 break;
1307 case 0xa:
1308 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1309 break;
1310 case 0xb:
1311 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1312 break;
1313 case 0xc:
1314 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1315 break;
1316 case 0xd:
1317 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1318 break;
1319 case 0xe:
1320 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1321 break;
1322 case 0xf:
1323 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1324 break;
1325 }
1326 }
1327
1328 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1329 DisasContext *dc)
1330 {
1331 DisasCompare cmp;
1332 gen_compare(&cmp, cc, cond, dc);
1333
1334 /* The interface is to return a boolean in r_dst. */
1335 if (cmp.is_bool) {
1336 tcg_gen_mov_tl(r_dst, cmp.c1);
1337 } else {
1338 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1339 }
1340
1341 free_compare(&cmp);
1342 }
1343
1344 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1345 {
1346 DisasCompare cmp;
1347 gen_fcompare(&cmp, cc, cond);
1348
1349 /* The interface is to return a boolean in r_dst. */
1350 if (cmp.is_bool) {
1351 tcg_gen_mov_tl(r_dst, cmp.c1);
1352 } else {
1353 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1354 }
1355
1356 free_compare(&cmp);
1357 }
1358
1359 #ifdef TARGET_SPARC64
1360 // Inverted logic
1361 static const int gen_tcg_cond_reg[8] = {
1362 -1,
1363 TCG_COND_NE,
1364 TCG_COND_GT,
1365 TCG_COND_GE,
1366 -1,
1367 TCG_COND_EQ,
1368 TCG_COND_LE,
1369 TCG_COND_LT,
1370 };
1371
1372 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1373 {
1374 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1375 cmp->is_bool = false;
1376 cmp->g1 = true;
1377 cmp->g2 = false;
1378 cmp->c1 = r_src;
1379 cmp->c2 = tcg_const_tl(0);
1380 }
1381
1382 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1383 {
1384 DisasCompare cmp;
1385 gen_compare_reg(&cmp, cond, r_src);
1386
1387 /* The interface is to return a boolean in r_dst. */
1388 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1389
1390 free_compare(&cmp);
1391 }
1392 #endif
1393
1394 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1395 {
1396 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1397 target_ulong target = dc->pc + offset;
1398
1399 #ifdef TARGET_SPARC64
1400 if (unlikely(AM_CHECK(dc))) {
1401 target &= 0xffffffffULL;
1402 }
1403 #endif
1404 if (cond == 0x0) {
1405 /* unconditional not taken */
1406 if (a) {
1407 dc->pc = dc->npc + 4;
1408 dc->npc = dc->pc + 4;
1409 } else {
1410 dc->pc = dc->npc;
1411 dc->npc = dc->pc + 4;
1412 }
1413 } else if (cond == 0x8) {
1414 /* unconditional taken */
1415 if (a) {
1416 dc->pc = target;
1417 dc->npc = dc->pc + 4;
1418 } else {
1419 dc->pc = dc->npc;
1420 dc->npc = target;
1421 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1422 }
1423 } else {
1424 flush_cond(dc);
1425 gen_cond(cpu_cond, cc, cond, dc);
1426 if (a) {
1427 gen_branch_a(dc, target);
1428 } else {
1429 gen_branch_n(dc, target);
1430 }
1431 }
1432 }
1433
1434 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1435 {
1436 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1437 target_ulong target = dc->pc + offset;
1438
1439 #ifdef TARGET_SPARC64
1440 if (unlikely(AM_CHECK(dc))) {
1441 target &= 0xffffffffULL;
1442 }
1443 #endif
1444 if (cond == 0x0) {
1445 /* unconditional not taken */
1446 if (a) {
1447 dc->pc = dc->npc + 4;
1448 dc->npc = dc->pc + 4;
1449 } else {
1450 dc->pc = dc->npc;
1451 dc->npc = dc->pc + 4;
1452 }
1453 } else if (cond == 0x8) {
1454 /* unconditional taken */
1455 if (a) {
1456 dc->pc = target;
1457 dc->npc = dc->pc + 4;
1458 } else {
1459 dc->pc = dc->npc;
1460 dc->npc = target;
1461 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1462 }
1463 } else {
1464 flush_cond(dc);
1465 gen_fcond(cpu_cond, cc, cond);
1466 if (a) {
1467 gen_branch_a(dc, target);
1468 } else {
1469 gen_branch_n(dc, target);
1470 }
1471 }
1472 }
1473
1474 #ifdef TARGET_SPARC64
1475 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1476 TCGv r_reg)
1477 {
1478 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1479 target_ulong target = dc->pc + offset;
1480
1481 if (unlikely(AM_CHECK(dc))) {
1482 target &= 0xffffffffULL;
1483 }
1484 flush_cond(dc);
1485 gen_cond_reg(cpu_cond, cond, r_reg);
1486 if (a) {
1487 gen_branch_a(dc, target);
1488 } else {
1489 gen_branch_n(dc, target);
1490 }
1491 }
1492
1493 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1494 {
1495 switch (fccno) {
1496 case 0:
1497 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1498 break;
1499 case 1:
1500 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1501 break;
1502 case 2:
1503 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1504 break;
1505 case 3:
1506 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1507 break;
1508 }
1509 }
1510
1511 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1512 {
1513 switch (fccno) {
1514 case 0:
1515 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1516 break;
1517 case 1:
1518 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1519 break;
1520 case 2:
1521 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1522 break;
1523 case 3:
1524 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1525 break;
1526 }
1527 }
1528
1529 static inline void gen_op_fcmpq(int fccno)
1530 {
1531 switch (fccno) {
1532 case 0:
1533 gen_helper_fcmpq(cpu_env);
1534 break;
1535 case 1:
1536 gen_helper_fcmpq_fcc1(cpu_env);
1537 break;
1538 case 2:
1539 gen_helper_fcmpq_fcc2(cpu_env);
1540 break;
1541 case 3:
1542 gen_helper_fcmpq_fcc3(cpu_env);
1543 break;
1544 }
1545 }
1546
1547 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1548 {
1549 switch (fccno) {
1550 case 0:
1551 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1552 break;
1553 case 1:
1554 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1555 break;
1556 case 2:
1557 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1558 break;
1559 case 3:
1560 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1561 break;
1562 }
1563 }
1564
1565 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1566 {
1567 switch (fccno) {
1568 case 0:
1569 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1570 break;
1571 case 1:
1572 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1573 break;
1574 case 2:
1575 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1576 break;
1577 case 3:
1578 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1579 break;
1580 }
1581 }
1582
1583 static inline void gen_op_fcmpeq(int fccno)
1584 {
1585 switch (fccno) {
1586 case 0:
1587 gen_helper_fcmpeq(cpu_env);
1588 break;
1589 case 1:
1590 gen_helper_fcmpeq_fcc1(cpu_env);
1591 break;
1592 case 2:
1593 gen_helper_fcmpeq_fcc2(cpu_env);
1594 break;
1595 case 3:
1596 gen_helper_fcmpeq_fcc3(cpu_env);
1597 break;
1598 }
1599 }
1600
1601 #else
1602
1603 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1604 {
1605 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1606 }
1607
1608 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1609 {
1610 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1611 }
1612
1613 static inline void gen_op_fcmpq(int fccno)
1614 {
1615 gen_helper_fcmpq(cpu_env);
1616 }
1617
1618 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1619 {
1620 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1621 }
1622
1623 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1624 {
1625 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1626 }
1627
1628 static inline void gen_op_fcmpeq(int fccno)
1629 {
1630 gen_helper_fcmpeq(cpu_env);
1631 }
1632 #endif
1633
1634 static inline void gen_op_fpexception_im(int fsr_flags)
1635 {
1636 TCGv_i32 r_const;
1637
1638 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1639 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1640 r_const = tcg_const_i32(TT_FP_EXCP);
1641 gen_helper_raise_exception(cpu_env, r_const);
1642 tcg_temp_free_i32(r_const);
1643 }
1644
1645 static int gen_trap_ifnofpu(DisasContext *dc)
1646 {
1647 #if !defined(CONFIG_USER_ONLY)
1648 if (!dc->fpu_enabled) {
1649 TCGv_i32 r_const;
1650
1651 save_state(dc);
1652 r_const = tcg_const_i32(TT_NFPU_INSN);
1653 gen_helper_raise_exception(cpu_env, r_const);
1654 tcg_temp_free_i32(r_const);
1655 dc->is_br = 1;
1656 return 1;
1657 }
1658 #endif
1659 return 0;
1660 }
1661
1662 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1663 {
1664 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1665 }
1666
1667 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1668 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1669 {
1670 TCGv_i32 dst, src;
1671
1672 src = gen_load_fpr_F(dc, rs);
1673 dst = gen_dest_fpr_F(dc);
1674
1675 gen(dst, cpu_env, src);
1676
1677 gen_store_fpr_F(dc, rd, dst);
1678 }
1679
1680 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1681 void (*gen)(TCGv_i32, TCGv_i32))
1682 {
1683 TCGv_i32 dst, src;
1684
1685 src = gen_load_fpr_F(dc, rs);
1686 dst = gen_dest_fpr_F(dc);
1687
1688 gen(dst, src);
1689
1690 gen_store_fpr_F(dc, rd, dst);
1691 }
1692
1693 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1694 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1695 {
1696 TCGv_i32 dst, src1, src2;
1697
1698 src1 = gen_load_fpr_F(dc, rs1);
1699 src2 = gen_load_fpr_F(dc, rs2);
1700 dst = gen_dest_fpr_F(dc);
1701
1702 gen(dst, cpu_env, src1, src2);
1703
1704 gen_store_fpr_F(dc, rd, dst);
1705 }
1706
1707 #ifdef TARGET_SPARC64
1708 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1709 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1710 {
1711 TCGv_i32 dst, src1, src2;
1712
1713 src1 = gen_load_fpr_F(dc, rs1);
1714 src2 = gen_load_fpr_F(dc, rs2);
1715 dst = gen_dest_fpr_F(dc);
1716
1717 gen(dst, src1, src2);
1718
1719 gen_store_fpr_F(dc, rd, dst);
1720 }
1721 #endif
1722
1723 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1724 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1725 {
1726 TCGv_i64 dst, src;
1727
1728 src = gen_load_fpr_D(dc, rs);
1729 dst = gen_dest_fpr_D(dc, rd);
1730
1731 gen(dst, cpu_env, src);
1732
1733 gen_store_fpr_D(dc, rd, dst);
1734 }
1735
1736 #ifdef TARGET_SPARC64
1737 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1738 void (*gen)(TCGv_i64, TCGv_i64))
1739 {
1740 TCGv_i64 dst, src;
1741
1742 src = gen_load_fpr_D(dc, rs);
1743 dst = gen_dest_fpr_D(dc, rd);
1744
1745 gen(dst, src);
1746
1747 gen_store_fpr_D(dc, rd, dst);
1748 }
1749 #endif
1750
1751 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1752 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1753 {
1754 TCGv_i64 dst, src1, src2;
1755
1756 src1 = gen_load_fpr_D(dc, rs1);
1757 src2 = gen_load_fpr_D(dc, rs2);
1758 dst = gen_dest_fpr_D(dc, rd);
1759
1760 gen(dst, cpu_env, src1, src2);
1761
1762 gen_store_fpr_D(dc, rd, dst);
1763 }
1764
1765 #ifdef TARGET_SPARC64
1766 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1767 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1768 {
1769 TCGv_i64 dst, src1, src2;
1770
1771 src1 = gen_load_fpr_D(dc, rs1);
1772 src2 = gen_load_fpr_D(dc, rs2);
1773 dst = gen_dest_fpr_D(dc, rd);
1774
1775 gen(dst, src1, src2);
1776
1777 gen_store_fpr_D(dc, rd, dst);
1778 }
1779
1780 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1781 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1782 {
1783 TCGv_i64 dst, src1, src2;
1784
1785 src1 = gen_load_fpr_D(dc, rs1);
1786 src2 = gen_load_fpr_D(dc, rs2);
1787 dst = gen_dest_fpr_D(dc, rd);
1788
1789 gen(dst, cpu_gsr, src1, src2);
1790
1791 gen_store_fpr_D(dc, rd, dst);
1792 }
1793
1794 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1795 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1796 {
1797 TCGv_i64 dst, src0, src1, src2;
1798
1799 src1 = gen_load_fpr_D(dc, rs1);
1800 src2 = gen_load_fpr_D(dc, rs2);
1801 src0 = gen_load_fpr_D(dc, rd);
1802 dst = gen_dest_fpr_D(dc, rd);
1803
1804 gen(dst, src0, src1, src2);
1805
1806 gen_store_fpr_D(dc, rd, dst);
1807 }
1808 #endif
1809
1810 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1811 void (*gen)(TCGv_ptr))
1812 {
1813 gen_op_load_fpr_QT1(QFPREG(rs));
1814
1815 gen(cpu_env);
1816
1817 gen_op_store_QT0_fpr(QFPREG(rd));
1818 gen_update_fprs_dirty(QFPREG(rd));
1819 }
1820
1821 #ifdef TARGET_SPARC64
1822 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1823 void (*gen)(TCGv_ptr))
1824 {
1825 gen_op_load_fpr_QT1(QFPREG(rs));
1826
1827 gen(cpu_env);
1828
1829 gen_op_store_QT0_fpr(QFPREG(rd));
1830 gen_update_fprs_dirty(QFPREG(rd));
1831 }
1832 #endif
1833
1834 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1835 void (*gen)(TCGv_ptr))
1836 {
1837 gen_op_load_fpr_QT0(QFPREG(rs1));
1838 gen_op_load_fpr_QT1(QFPREG(rs2));
1839
1840 gen(cpu_env);
1841
1842 gen_op_store_QT0_fpr(QFPREG(rd));
1843 gen_update_fprs_dirty(QFPREG(rd));
1844 }
1845
1846 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1847 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1848 {
1849 TCGv_i64 dst;
1850 TCGv_i32 src1, src2;
1851
1852 src1 = gen_load_fpr_F(dc, rs1);
1853 src2 = gen_load_fpr_F(dc, rs2);
1854 dst = gen_dest_fpr_D(dc, rd);
1855
1856 gen(dst, cpu_env, src1, src2);
1857
1858 gen_store_fpr_D(dc, rd, dst);
1859 }
1860
1861 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1862 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1863 {
1864 TCGv_i64 src1, src2;
1865
1866 src1 = gen_load_fpr_D(dc, rs1);
1867 src2 = gen_load_fpr_D(dc, rs2);
1868
1869 gen(cpu_env, src1, src2);
1870
1871 gen_op_store_QT0_fpr(QFPREG(rd));
1872 gen_update_fprs_dirty(QFPREG(rd));
1873 }
1874
1875 #ifdef TARGET_SPARC64
1876 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1877 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1878 {
1879 TCGv_i64 dst;
1880 TCGv_i32 src;
1881
1882 src = gen_load_fpr_F(dc, rs);
1883 dst = gen_dest_fpr_D(dc, rd);
1884
1885 gen(dst, cpu_env, src);
1886
1887 gen_store_fpr_D(dc, rd, dst);
1888 }
1889 #endif
1890
1891 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1892 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1893 {
1894 TCGv_i64 dst;
1895 TCGv_i32 src;
1896
1897 src = gen_load_fpr_F(dc, rs);
1898 dst = gen_dest_fpr_D(dc, rd);
1899
1900 gen(dst, cpu_env, src);
1901
1902 gen_store_fpr_D(dc, rd, dst);
1903 }
1904
1905 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1906 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1907 {
1908 TCGv_i32 dst;
1909 TCGv_i64 src;
1910
1911 src = gen_load_fpr_D(dc, rs);
1912 dst = gen_dest_fpr_F(dc);
1913
1914 gen(dst, cpu_env, src);
1915
1916 gen_store_fpr_F(dc, rd, dst);
1917 }
1918
1919 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1920 void (*gen)(TCGv_i32, TCGv_ptr))
1921 {
1922 TCGv_i32 dst;
1923
1924 gen_op_load_fpr_QT1(QFPREG(rs));
1925 dst = gen_dest_fpr_F(dc);
1926
1927 gen(dst, cpu_env);
1928
1929 gen_store_fpr_F(dc, rd, dst);
1930 }
1931
1932 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1933 void (*gen)(TCGv_i64, TCGv_ptr))
1934 {
1935 TCGv_i64 dst;
1936
1937 gen_op_load_fpr_QT1(QFPREG(rs));
1938 dst = gen_dest_fpr_D(dc, rd);
1939
1940 gen(dst, cpu_env);
1941
1942 gen_store_fpr_D(dc, rd, dst);
1943 }
1944
1945 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1946 void (*gen)(TCGv_ptr, TCGv_i32))
1947 {
1948 TCGv_i32 src;
1949
1950 src = gen_load_fpr_F(dc, rs);
1951
1952 gen(cpu_env, src);
1953
1954 gen_op_store_QT0_fpr(QFPREG(rd));
1955 gen_update_fprs_dirty(QFPREG(rd));
1956 }
1957
1958 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1959 void (*gen)(TCGv_ptr, TCGv_i64))
1960 {
1961 TCGv_i64 src;
1962
1963 src = gen_load_fpr_D(dc, rs);
1964
1965 gen(cpu_env, src);
1966
1967 gen_op_store_QT0_fpr(QFPREG(rd));
1968 gen_update_fprs_dirty(QFPREG(rd));
1969 }
1970
1971 /* asi moves */
1972 #ifdef TARGET_SPARC64
1973 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
1974 {
1975 int asi;
1976 TCGv_i32 r_asi;
1977
1978 if (IS_IMM) {
1979 r_asi = tcg_temp_new_i32();
1980 tcg_gen_mov_i32(r_asi, cpu_asi);
1981 } else {
1982 asi = GET_FIELD(insn, 19, 26);
1983 r_asi = tcg_const_i32(asi);
1984 }
1985 return r_asi;
1986 }
1987
1988 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
1989 int sign)
1990 {
1991 TCGv_i32 r_asi, r_size, r_sign;
1992
1993 r_asi = gen_get_asi(insn, addr);
1994 r_size = tcg_const_i32(size);
1995 r_sign = tcg_const_i32(sign);
1996 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
1997 tcg_temp_free_i32(r_sign);
1998 tcg_temp_free_i32(r_size);
1999 tcg_temp_free_i32(r_asi);
2000 }
2001
2002 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2003 {
2004 TCGv_i32 r_asi, r_size;
2005
2006 r_asi = gen_get_asi(insn, addr);
2007 r_size = tcg_const_i32(size);
2008 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2009 tcg_temp_free_i32(r_size);
2010 tcg_temp_free_i32(r_asi);
2011 }
2012
2013 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2014 {
2015 TCGv_i32 r_asi, r_size, r_rd;
2016
2017 r_asi = gen_get_asi(insn, addr);
2018 r_size = tcg_const_i32(size);
2019 r_rd = tcg_const_i32(rd);
2020 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2021 tcg_temp_free_i32(r_rd);
2022 tcg_temp_free_i32(r_size);
2023 tcg_temp_free_i32(r_asi);
2024 }
2025
2026 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2027 {
2028 TCGv_i32 r_asi, r_size, r_rd;
2029
2030 r_asi = gen_get_asi(insn, addr);
2031 r_size = tcg_const_i32(size);
2032 r_rd = tcg_const_i32(rd);
2033 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2034 tcg_temp_free_i32(r_rd);
2035 tcg_temp_free_i32(r_size);
2036 tcg_temp_free_i32(r_asi);
2037 }
2038
2039 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2040 {
2041 TCGv_i32 r_asi, r_size, r_sign;
2042 TCGv_i64 t64 = tcg_temp_new_i64();
2043
2044 r_asi = gen_get_asi(insn, addr);
2045 r_size = tcg_const_i32(4);
2046 r_sign = tcg_const_i32(0);
2047 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2048 tcg_temp_free_i32(r_sign);
2049 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2050 tcg_temp_free_i32(r_size);
2051 tcg_temp_free_i32(r_asi);
2052 tcg_gen_trunc_i64_tl(dst, t64);
2053 tcg_temp_free_i64(t64);
2054 }
2055
2056 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2057 int insn, int rd)
2058 {
2059 TCGv_i32 r_asi, r_rd;
2060
2061 r_asi = gen_get_asi(insn, addr);
2062 r_rd = tcg_const_i32(rd);
2063 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2064 tcg_temp_free_i32(r_rd);
2065 tcg_temp_free_i32(r_asi);
2066 }
2067
2068 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2069 int insn, int rd)
2070 {
2071 TCGv_i32 r_asi, r_size;
2072 TCGv lo = gen_load_gpr(dc, rd + 1);
2073 TCGv_i64 t64 = tcg_temp_new_i64();
2074
2075 tcg_gen_concat_tl_i64(t64, lo, hi);
2076 r_asi = gen_get_asi(insn, addr);
2077 r_size = tcg_const_i32(8);
2078 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2079 tcg_temp_free_i32(r_size);
2080 tcg_temp_free_i32(r_asi);
2081 tcg_temp_free_i64(t64);
2082 }
2083
2084 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2085 TCGv val2, int insn, int rd)
2086 {
2087 TCGv val1 = gen_load_gpr(dc, rd);
2088 TCGv dst = gen_dest_gpr(dc, rd);
2089 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2090
2091 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2092 tcg_temp_free_i32(r_asi);
2093 gen_store_gpr(dc, rd, dst);
2094 }
2095
2096 #elif !defined(CONFIG_USER_ONLY)
2097
2098 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2099 int sign)
2100 {
2101 TCGv_i32 r_asi, r_size, r_sign;
2102 TCGv_i64 t64 = tcg_temp_new_i64();
2103
2104 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2105 r_size = tcg_const_i32(size);
2106 r_sign = tcg_const_i32(sign);
2107 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2108 tcg_temp_free_i32(r_sign);
2109 tcg_temp_free_i32(r_size);
2110 tcg_temp_free_i32(r_asi);
2111 tcg_gen_trunc_i64_tl(dst, t64);
2112 tcg_temp_free_i64(t64);
2113 }
2114
2115 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2116 {
2117 TCGv_i32 r_asi, r_size;
2118 TCGv_i64 t64 = tcg_temp_new_i64();
2119
2120 tcg_gen_extu_tl_i64(t64, src);
2121 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2122 r_size = tcg_const_i32(size);
2123 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2124 tcg_temp_free_i32(r_size);
2125 tcg_temp_free_i32(r_asi);
2126 tcg_temp_free_i64(t64);
2127 }
2128
2129 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2130 {
2131 TCGv_i32 r_asi, r_size, r_sign;
2132 TCGv_i64 r_val, t64;
2133
2134 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2135 r_size = tcg_const_i32(4);
2136 r_sign = tcg_const_i32(0);
2137 t64 = tcg_temp_new_i64();
2138 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2139 tcg_temp_free(r_sign);
2140 r_val = tcg_temp_new_i64();
2141 tcg_gen_extu_tl_i64(r_val, src);
2142 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2143 tcg_temp_free_i64(r_val);
2144 tcg_temp_free_i32(r_size);
2145 tcg_temp_free_i32(r_asi);
2146 tcg_gen_trunc_i64_tl(dst, t64);
2147 tcg_temp_free_i64(t64);
2148 }
2149
2150 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2151 int insn, int rd)
2152 {
2153 TCGv_i32 r_asi, r_size, r_sign;
2154 TCGv t;
2155 TCGv_i64 t64;
2156
2157 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2158 r_size = tcg_const_i32(8);
2159 r_sign = tcg_const_i32(0);
2160 t64 = tcg_temp_new_i64();
2161 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2162 tcg_temp_free_i32(r_sign);
2163 tcg_temp_free_i32(r_size);
2164 tcg_temp_free_i32(r_asi);
2165
2166 t = gen_dest_gpr(dc, rd + 1);
2167 tcg_gen_trunc_i64_tl(t, t64);
2168 gen_store_gpr(dc, rd + 1, t);
2169
2170 tcg_gen_shri_i64(t64, t64, 32);
2171 tcg_gen_trunc_i64_tl(hi, t64);
2172 tcg_temp_free_i64(t64);
2173 gen_store_gpr(dc, rd, hi);
2174 }
2175
2176 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2177 int insn, int rd)
2178 {
2179 TCGv_i32 r_asi, r_size;
2180 TCGv lo = gen_load_gpr(dc, rd + 1);
2181 TCGv_i64 t64 = tcg_temp_new_i64();
2182
2183 tcg_gen_concat_tl_i64(t64, lo, hi);
2184 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2185 r_size = tcg_const_i32(8);
2186 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2187 tcg_temp_free_i32(r_size);
2188 tcg_temp_free_i32(r_asi);
2189 tcg_temp_free_i64(t64);
2190 }
2191 #endif
2192
2193 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2194 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2195 TCGv val2, int insn, int rd)
2196 {
2197 TCGv val1 = gen_load_gpr(dc, rd);
2198 TCGv dst = gen_dest_gpr(dc, rd);
2199 #ifdef TARGET_SPARC64
2200 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2201 #else
2202 TCGv_i32 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2203 #endif
2204
2205 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2206 tcg_temp_free_i32(r_asi);
2207 gen_store_gpr(dc, rd, dst);
2208 }
2209
2210 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2211 {
2212 TCGv_i64 r_val;
2213 TCGv_i32 r_asi, r_size;
2214
2215 gen_ld_asi(dst, addr, insn, 1, 0);
2216
2217 r_val = tcg_const_i64(0xffULL);
2218 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2219 r_size = tcg_const_i32(1);
2220 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2221 tcg_temp_free_i32(r_size);
2222 tcg_temp_free_i32(r_asi);
2223 tcg_temp_free_i64(r_val);
2224 }
2225 #endif
2226
2227 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2228 {
2229 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2230 return gen_load_gpr(dc, rs1);
2231 }
2232
2233 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2234 {
2235 if (IS_IMM) { /* immediate */
2236 target_long simm = GET_FIELDs(insn, 19, 31);
2237 TCGv t = get_temp_tl(dc);
2238 tcg_gen_movi_tl(t, simm);
2239 return t;
2240 } else { /* register */
2241 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2242 return gen_load_gpr(dc, rs2);
2243 }
2244 }
2245
2246 #ifdef TARGET_SPARC64
2247 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2248 {
2249 TCGv_i32 c32, zero, dst, s1, s2;
2250
2251 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2252 or fold the comparison down to 32 bits and use movcond_i32. Choose
2253 the later. */
2254 c32 = tcg_temp_new_i32();
2255 if (cmp->is_bool) {
2256 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2257 } else {
2258 TCGv_i64 c64 = tcg_temp_new_i64();
2259 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2260 tcg_gen_extrl_i64_i32(c32, c64);
2261 tcg_temp_free_i64(c64);
2262 }
2263
2264 s1 = gen_load_fpr_F(dc, rs);
2265 s2 = gen_load_fpr_F(dc, rd);
2266 dst = gen_dest_fpr_F(dc);
2267 zero = tcg_const_i32(0);
2268
2269 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2270
2271 tcg_temp_free_i32(c32);
2272 tcg_temp_free_i32(zero);
2273 gen_store_fpr_F(dc, rd, dst);
2274 }
2275
2276 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2277 {
2278 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2279 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2280 gen_load_fpr_D(dc, rs),
2281 gen_load_fpr_D(dc, rd));
2282 gen_store_fpr_D(dc, rd, dst);
2283 }
2284
2285 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2286 {
2287 int qd = QFPREG(rd);
2288 int qs = QFPREG(rs);
2289
2290 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2291 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2292 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2293 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2294
2295 gen_update_fprs_dirty(qd);
2296 }
2297
2298 #ifndef CONFIG_USER_ONLY
2299 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2300 {
2301 TCGv_i32 r_tl = tcg_temp_new_i32();
2302
2303 /* load env->tl into r_tl */
2304 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2305
2306 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2307 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2308
2309 /* calculate offset to current trap state from env->ts, reuse r_tl */
2310 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2311 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2312
2313 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2314 {
2315 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2316 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2317 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2318 tcg_temp_free_ptr(r_tl_tmp);
2319 }
2320
2321 tcg_temp_free_i32(r_tl);
2322 }
2323 #endif
2324
2325 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2326 int width, bool cc, bool left)
2327 {
2328 TCGv lo1, lo2, t1, t2;
2329 uint64_t amask, tabl, tabr;
2330 int shift, imask, omask;
2331
2332 if (cc) {
2333 tcg_gen_mov_tl(cpu_cc_src, s1);
2334 tcg_gen_mov_tl(cpu_cc_src2, s2);
2335 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2336 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2337 dc->cc_op = CC_OP_SUB;
2338 }
2339
2340 /* Theory of operation: there are two tables, left and right (not to
2341 be confused with the left and right versions of the opcode). These
2342 are indexed by the low 3 bits of the inputs. To make things "easy",
2343 these tables are loaded into two constants, TABL and TABR below.
2344 The operation index = (input & imask) << shift calculates the index
2345 into the constant, while val = (table >> index) & omask calculates
2346 the value we're looking for. */
2347 switch (width) {
2348 case 8:
2349 imask = 0x7;
2350 shift = 3;
2351 omask = 0xff;
2352 if (left) {
2353 tabl = 0x80c0e0f0f8fcfeffULL;
2354 tabr = 0xff7f3f1f0f070301ULL;
2355 } else {
2356 tabl = 0x0103070f1f3f7fffULL;
2357 tabr = 0xfffefcf8f0e0c080ULL;
2358 }
2359 break;
2360 case 16:
2361 imask = 0x6;
2362 shift = 1;
2363 omask = 0xf;
2364 if (left) {
2365 tabl = 0x8cef;
2366 tabr = 0xf731;
2367 } else {
2368 tabl = 0x137f;
2369 tabr = 0xfec8;
2370 }
2371 break;
2372 case 32:
2373 imask = 0x4;
2374 shift = 0;
2375 omask = 0x3;
2376 if (left) {
2377 tabl = (2 << 2) | 3;
2378 tabr = (3 << 2) | 1;
2379 } else {
2380 tabl = (1 << 2) | 3;
2381 tabr = (3 << 2) | 2;
2382 }
2383 break;
2384 default:
2385 abort();
2386 }
2387
2388 lo1 = tcg_temp_new();
2389 lo2 = tcg_temp_new();
2390 tcg_gen_andi_tl(lo1, s1, imask);
2391 tcg_gen_andi_tl(lo2, s2, imask);
2392 tcg_gen_shli_tl(lo1, lo1, shift);
2393 tcg_gen_shli_tl(lo2, lo2, shift);
2394
2395 t1 = tcg_const_tl(tabl);
2396 t2 = tcg_const_tl(tabr);
2397 tcg_gen_shr_tl(lo1, t1, lo1);
2398 tcg_gen_shr_tl(lo2, t2, lo2);
2399 tcg_gen_andi_tl(dst, lo1, omask);
2400 tcg_gen_andi_tl(lo2, lo2, omask);
2401
2402 amask = -8;
2403 if (AM_CHECK(dc)) {
2404 amask &= 0xffffffffULL;
2405 }
2406 tcg_gen_andi_tl(s1, s1, amask);
2407 tcg_gen_andi_tl(s2, s2, amask);
2408
2409 /* We want to compute
2410 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2411 We've already done dst = lo1, so this reduces to
2412 dst &= (s1 == s2 ? -1 : lo2)
2413 Which we perform by
2414 lo2 |= -(s1 == s2)
2415 dst &= lo2
2416 */
2417 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2418 tcg_gen_neg_tl(t1, t1);
2419 tcg_gen_or_tl(lo2, lo2, t1);
2420 tcg_gen_and_tl(dst, dst, lo2);
2421
2422 tcg_temp_free(lo1);
2423 tcg_temp_free(lo2);
2424 tcg_temp_free(t1);
2425 tcg_temp_free(t2);
2426 }
2427
2428 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2429 {
2430 TCGv tmp = tcg_temp_new();
2431
2432 tcg_gen_add_tl(tmp, s1, s2);
2433 tcg_gen_andi_tl(dst, tmp, -8);
2434 if (left) {
2435 tcg_gen_neg_tl(tmp, tmp);
2436 }
2437 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2438
2439 tcg_temp_free(tmp);
2440 }
2441
2442 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2443 {
2444 TCGv t1, t2, shift;
2445
2446 t1 = tcg_temp_new();
2447 t2 = tcg_temp_new();
2448 shift = tcg_temp_new();
2449
2450 tcg_gen_andi_tl(shift, gsr, 7);
2451 tcg_gen_shli_tl(shift, shift, 3);
2452 tcg_gen_shl_tl(t1, s1, shift);
2453
2454 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2455 shift of (up to 63) followed by a constant shift of 1. */
2456 tcg_gen_xori_tl(shift, shift, 63);
2457 tcg_gen_shr_tl(t2, s2, shift);
2458 tcg_gen_shri_tl(t2, t2, 1);
2459
2460 tcg_gen_or_tl(dst, t1, t2);
2461
2462 tcg_temp_free(t1);
2463 tcg_temp_free(t2);
2464 tcg_temp_free(shift);
2465 }
2466 #endif
2467
2468 #define CHECK_IU_FEATURE(dc, FEATURE) \
2469 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2470 goto illegal_insn;
2471 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2472 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2473 goto nfpu_insn;
2474
2475 /* before an instruction, dc->pc must be static */
2476 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2477 {
2478 unsigned int opc, rs1, rs2, rd;
2479 TCGv cpu_src1, cpu_src2;
2480 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2481 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2482 target_long simm;
2483
2484 opc = GET_FIELD(insn, 0, 1);
2485 rd = GET_FIELD(insn, 2, 6);
2486
2487 switch (opc) {
2488 case 0: /* branches/sethi */
2489 {
2490 unsigned int xop = GET_FIELD(insn, 7, 9);
2491 int32_t target;
2492 switch (xop) {
2493 #ifdef TARGET_SPARC64
2494 case 0x1: /* V9 BPcc */
2495 {
2496 int cc;
2497
2498 target = GET_FIELD_SP(insn, 0, 18);
2499 target = sign_extend(target, 19);
2500 target <<= 2;
2501 cc = GET_FIELD_SP(insn, 20, 21);
2502 if (cc == 0)
2503 do_branch(dc, target, insn, 0);
2504 else if (cc == 2)
2505 do_branch(dc, target, insn, 1);
2506 else
2507 goto illegal_insn;
2508 goto jmp_insn;
2509 }
2510 case 0x3: /* V9 BPr */
2511 {
2512 target = GET_FIELD_SP(insn, 0, 13) |
2513 (GET_FIELD_SP(insn, 20, 21) << 14);
2514 target = sign_extend(target, 16);
2515 target <<= 2;
2516 cpu_src1 = get_src1(dc, insn);
2517 do_branch_reg(dc, target, insn, cpu_src1);
2518 goto jmp_insn;
2519 }
2520 case 0x5: /* V9 FBPcc */
2521 {
2522 int cc = GET_FIELD_SP(insn, 20, 21);
2523 if (gen_trap_ifnofpu(dc)) {
2524 goto jmp_insn;
2525 }
2526 target = GET_FIELD_SP(insn, 0, 18);
2527 target = sign_extend(target, 19);
2528 target <<= 2;
2529 do_fbranch(dc, target, insn, cc);
2530 goto jmp_insn;
2531 }
2532 #else
2533 case 0x7: /* CBN+x */
2534 {
2535 goto ncp_insn;
2536 }
2537 #endif
2538 case 0x2: /* BN+x */
2539 {
2540 target = GET_FIELD(insn, 10, 31);
2541 target = sign_extend(target, 22);
2542 target <<= 2;
2543 do_branch(dc, target, insn, 0);
2544 goto jmp_insn;
2545 }
2546 case 0x6: /* FBN+x */
2547 {
2548 if (gen_trap_ifnofpu(dc)) {
2549 goto jmp_insn;
2550 }
2551 target = GET_FIELD(insn, 10, 31);
2552 target = sign_extend(target, 22);
2553 target <<= 2;
2554 do_fbranch(dc, target, insn, 0);
2555 goto jmp_insn;
2556 }
2557 case 0x4: /* SETHI */
2558 /* Special-case %g0 because that's the canonical nop. */
2559 if (rd) {
2560 uint32_t value = GET_FIELD(insn, 10, 31);
2561 TCGv t = gen_dest_gpr(dc, rd);
2562 tcg_gen_movi_tl(t, value << 10);
2563 gen_store_gpr(dc, rd, t);
2564 }
2565 break;
2566 case 0x0: /* UNIMPL */
2567 default:
2568 goto illegal_insn;
2569 }
2570 break;
2571 }
2572 break;
2573 case 1: /*CALL*/
2574 {
2575 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2576 TCGv o7 = gen_dest_gpr(dc, 15);
2577
2578 tcg_gen_movi_tl(o7, dc->pc);
2579 gen_store_gpr(dc, 15, o7);
2580 target += dc->pc;
2581 gen_mov_pc_npc(dc);
2582 #ifdef TARGET_SPARC64
2583 if (unlikely(AM_CHECK(dc))) {
2584 target &= 0xffffffffULL;
2585 }
2586 #endif
2587 dc->npc = target;
2588 }
2589 goto jmp_insn;
2590 case 2: /* FPU & Logical Operations */
2591 {
2592 unsigned int xop = GET_FIELD(insn, 7, 12);
2593 TCGv cpu_dst = get_temp_tl(dc);
2594 TCGv cpu_tmp0;
2595
2596 if (xop == 0x3a) { /* generate trap */
2597 int cond = GET_FIELD(insn, 3, 6);
2598 TCGv_i32 trap;
2599 TCGLabel *l1 = NULL;
2600 int mask;
2601
2602 if (cond == 0) {
2603 /* Trap never. */
2604 break;
2605 }
2606
2607 save_state(dc);
2608
2609 if (cond != 8) {
2610 /* Conditional trap. */
2611 DisasCompare cmp;
2612 #ifdef TARGET_SPARC64
2613 /* V9 icc/xcc */
2614 int cc = GET_FIELD_SP(insn, 11, 12);
2615 if (cc == 0) {
2616 gen_compare(&cmp, 0, cond, dc);
2617 } else if (cc == 2) {
2618 gen_compare(&cmp, 1, cond, dc);
2619 } else {
2620 goto illegal_insn;
2621 }
2622 #else
2623 gen_compare(&cmp, 0, cond, dc);
2624 #endif
2625 l1 = gen_new_label();
2626 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2627 cmp.c1, cmp.c2, l1);
2628 free_compare(&cmp);
2629 }
2630
2631 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2632 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2633
2634 /* Don't use the normal temporaries, as they may well have
2635 gone out of scope with the branch above. While we're
2636 doing that we might as well pre-truncate to 32-bit. */
2637 trap = tcg_temp_new_i32();
2638
2639 rs1 = GET_FIELD_SP(insn, 14, 18);
2640 if (IS_IMM) {
2641 rs2 = GET_FIELD_SP(insn, 0, 6);
2642 if (rs1 == 0) {
2643 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2644 /* Signal that the trap value is fully constant. */
2645 mask = 0;
2646 } else {
2647 TCGv t1 = gen_load_gpr(dc, rs1);
2648 tcg_gen_trunc_tl_i32(trap, t1);
2649 tcg_gen_addi_i32(trap, trap, rs2);
2650 }
2651 } else {
2652 TCGv t1, t2;
2653 rs2 = GET_FIELD_SP(insn, 0, 4);
2654 t1 = gen_load_gpr(dc, rs1);
2655 t2 = gen_load_gpr(dc, rs2);
2656 tcg_gen_add_tl(t1, t1, t2);
2657 tcg_gen_trunc_tl_i32(trap, t1);
2658 }
2659 if (mask != 0) {
2660 tcg_gen_andi_i32(trap, trap, mask);
2661 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2662 }
2663
2664 gen_helper_raise_exception(cpu_env, trap);
2665 tcg_temp_free_i32(trap);
2666
2667 if (cond == 8) {
2668 /* An unconditional trap ends the TB. */
2669 dc->is_br = 1;
2670 goto jmp_insn;
2671 } else {
2672 /* A conditional trap falls through to the next insn. */
2673 gen_set_label(l1);
2674 break;
2675 }
2676 } else if (xop == 0x28) {
2677 rs1 = GET_FIELD(insn, 13, 17);
2678 switch(rs1) {
2679 case 0: /* rdy */
2680 #ifndef TARGET_SPARC64
2681 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2682 manual, rdy on the microSPARC
2683 II */
2684 case 0x0f: /* stbar in the SPARCv8 manual,
2685 rdy on the microSPARC II */
2686 case 0x10 ... 0x1f: /* implementation-dependent in the
2687 SPARCv8 manual, rdy on the
2688 microSPARC II */
2689 /* Read Asr17 */
2690 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2691 TCGv t = gen_dest_gpr(dc, rd);
2692 /* Read Asr17 for a Leon3 monoprocessor */
2693 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2694 gen_store_gpr(dc, rd, t);
2695 break;
2696 }
2697 #endif
2698 gen_store_gpr(dc, rd, cpu_y);
2699 break;
2700 #ifdef TARGET_SPARC64
2701 case 0x2: /* V9 rdccr */
2702 update_psr(dc);
2703 gen_helper_rdccr(cpu_dst, cpu_env);
2704 gen_store_gpr(dc, rd, cpu_dst);
2705 break;
2706 case 0x3: /* V9 rdasi */
2707 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2708 gen_store_gpr(dc, rd, cpu_dst);
2709 break;
2710 case 0x4: /* V9 rdtick */
2711 {
2712 TCGv_ptr r_tickptr;
2713
2714 r_tickptr = tcg_temp_new_ptr();
2715 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2716 offsetof(CPUSPARCState, tick));
2717 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2718 tcg_temp_free_ptr(r_tickptr);
2719 gen_store_gpr(dc, rd, cpu_dst);
2720 }
2721 break;
2722 case 0x5: /* V9 rdpc */
2723 {
2724 TCGv t = gen_dest_gpr(dc, rd);
2725 if (unlikely(AM_CHECK(dc))) {
2726 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2727 } else {
2728 tcg_gen_movi_tl(t, dc->pc);
2729 }
2730 gen_store_gpr(dc, rd, t);
2731 }
2732 break;
2733 case 0x6: /* V9 rdfprs */
2734 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2735 gen_store_gpr(dc, rd, cpu_dst);
2736 break;
2737 case 0xf: /* V9 membar */
2738 break; /* no effect */
2739 case 0x13: /* Graphics Status */
2740 if (gen_trap_ifnofpu(dc)) {
2741 goto jmp_insn;
2742 }
2743 gen_store_gpr(dc, rd, cpu_gsr);
2744 break;
2745 case 0x16: /* Softint */
2746 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2747 gen_store_gpr(dc, rd, cpu_dst);
2748 break;
2749 case 0x17: /* Tick compare */
2750 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2751 break;
2752 case 0x18: /* System tick */
2753 {
2754 TCGv_ptr r_tickptr;
2755
2756 r_tickptr = tcg_temp_new_ptr();
2757 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2758 offsetof(CPUSPARCState, stick));
2759 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2760 tcg_temp_free_ptr(r_tickptr);
2761 gen_store_gpr(dc, rd, cpu_dst);
2762 }
2763 break;
2764 case 0x19: /* System tick compare */
2765 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2766 break;
2767 case 0x10: /* Performance Control */
2768 case 0x11: /* Performance Instrumentation Counter */
2769 case 0x12: /* Dispatch Control */
2770 case 0x14: /* Softint set, WO */
2771 case 0x15: /* Softint clear, WO */
2772 #endif
2773 default:
2774 goto illegal_insn;
2775 }
2776 #if !defined(CONFIG_USER_ONLY)
2777 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2778 #ifndef TARGET_SPARC64
2779 if (!supervisor(dc)) {
2780 goto priv_insn;
2781 }
2782 update_psr(dc);
2783 gen_helper_rdpsr(cpu_dst, cpu_env);
2784 #else
2785 CHECK_IU_FEATURE(dc, HYPV);
2786 if (!hypervisor(dc))
2787 goto priv_insn;
2788 rs1 = GET_FIELD(insn, 13, 17);
2789 switch (rs1) {
2790 case 0: // hpstate
2791 // gen_op_rdhpstate();
2792 break;
2793 case 1: // htstate
2794 // gen_op_rdhtstate();
2795 break;
2796 case 3: // hintp
2797 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2798 break;
2799 case 5: // htba
2800 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2801 break;
2802 case 6: // hver
2803 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2804 break;
2805 case 31: // hstick_cmpr
2806 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2807 break;
2808 default:
2809 goto illegal_insn;
2810 }
2811 #endif
2812 gen_store_gpr(dc, rd, cpu_dst);
2813 break;
2814 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2815 if (!supervisor(dc)) {
2816 goto priv_insn;
2817 }
2818 cpu_tmp0 = get_temp_tl(dc);
2819 #ifdef TARGET_SPARC64
2820 rs1 = GET_FIELD(insn, 13, 17);
2821 switch (rs1) {
2822 case 0: // tpc
2823 {
2824 TCGv_ptr r_tsptr;
2825
2826 r_tsptr = tcg_temp_new_ptr();
2827 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2828 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2829 offsetof(trap_state, tpc));
2830 tcg_temp_free_ptr(r_tsptr);
2831 }
2832 break;
2833 case 1: // tnpc
2834 {
2835 TCGv_ptr r_tsptr;
2836
2837 r_tsptr = tcg_temp_new_ptr();
2838 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2839 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2840 offsetof(trap_state, tnpc));
2841 tcg_temp_free_ptr(r_tsptr);
2842 }
2843 break;
2844 case 2: // tstate
2845 {
2846 TCGv_ptr r_tsptr;
2847
2848 r_tsptr = tcg_temp_new_ptr();
2849 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2850 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2851 offsetof(trap_state, tstate));
2852 tcg_temp_free_ptr(r_tsptr);
2853 }
2854 break;
2855 case 3: // tt
2856 {
2857 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2858
2859 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2860 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
2861 offsetof(trap_state, tt));
2862 tcg_temp_free_ptr(r_tsptr);
2863 }
2864 break;
2865 case 4: // tick
2866 {
2867 TCGv_ptr r_tickptr;
2868
2869 r_tickptr = tcg_temp_new_ptr();
2870 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2871 offsetof(CPUSPARCState, tick));
2872 gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
2873 tcg_temp_free_ptr(r_tickptr);
2874 }
2875 break;
2876 case 5: // tba
2877 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2878 break;
2879 case 6: // pstate
2880 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2881 offsetof(CPUSPARCState, pstate));
2882 break;
2883 case 7: // tl
2884 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2885 offsetof(CPUSPARCState, tl));
2886 break;
2887 case 8: // pil
2888 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2889 offsetof(CPUSPARCState, psrpil));
2890 break;
2891 case 9: // cwp
2892 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2893 break;
2894 case 10: // cansave
2895 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2896 offsetof(CPUSPARCState, cansave));
2897 break;
2898 case 11: // canrestore
2899 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2900 offsetof(CPUSPARCState, canrestore));
2901 break;
2902 case 12: // cleanwin
2903 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2904 offsetof(CPUSPARCState, cleanwin));
2905 break;
2906 case 13: // otherwin
2907 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2908 offsetof(CPUSPARCState, otherwin));
2909 break;
2910 case 14: // wstate
2911 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2912 offsetof(CPUSPARCState, wstate));
2913 break;
2914 case 16: // UA2005 gl
2915 CHECK_IU_FEATURE(dc, GL);
2916 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2917 offsetof(CPUSPARCState, gl));
2918 break;
2919 case 26: // UA2005 strand status
2920 CHECK_IU_FEATURE(dc, HYPV);
2921 if (!hypervisor(dc))
2922 goto priv_insn;
2923 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2924 break;
2925 case 31: // ver
2926 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2927 break;
2928 case 15: // fq
2929 default:
2930 goto illegal_insn;
2931 }
2932 #else
2933 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2934 #endif
2935 gen_store_gpr(dc, rd, cpu_tmp0);
2936 break;
2937 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2938 #ifdef TARGET_SPARC64
2939 save_state(dc);
2940 gen_helper_flushw(cpu_env);
2941 #else
2942 if (!supervisor(dc))
2943 goto priv_insn;
2944 gen_store_gpr(dc, rd, cpu_tbr);
2945 #endif
2946 break;
2947 #endif
2948 } else if (xop == 0x34) { /* FPU Operations */
2949 if (gen_trap_ifnofpu(dc)) {
2950 goto jmp_insn;
2951 }
2952 gen_op_clear_ieee_excp_and_FTT();
2953 rs1 = GET_FIELD(insn, 13, 17);
2954 rs2 = GET_FIELD(insn, 27, 31);
2955 xop = GET_FIELD(insn, 18, 26);
2956 save_state(dc);
2957 switch (xop) {
2958 case 0x1: /* fmovs */
2959 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2960 gen_store_fpr_F(dc, rd, cpu_src1_32);
2961 break;
2962 case 0x5: /* fnegs */
2963 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2964 break;
2965 case 0x9: /* fabss */
2966 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2967 break;
2968 case 0x29: /* fsqrts */
2969 CHECK_FPU_FEATURE(dc, FSQRT);
2970 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2971 break;
2972 case 0x2a: /* fsqrtd */
2973 CHECK_FPU_FEATURE(dc, FSQRT);
2974 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2975 break;
2976 case 0x2b: /* fsqrtq */
2977 CHECK_FPU_FEATURE(dc, FLOAT128);
2978 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2979 break;
2980 case 0x41: /* fadds */
2981 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
2982 break;
2983 case 0x42: /* faddd */
2984 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
2985 break;
2986 case 0x43: /* faddq */
2987 CHECK_FPU_FEATURE(dc, FLOAT128);
2988 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
2989 break;
2990 case 0x45: /* fsubs */
2991 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
2992 break;
2993 case 0x46: /* fsubd */
2994 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
2995 break;
2996 case 0x47: /* fsubq */
2997 CHECK_FPU_FEATURE(dc, FLOAT128);
2998 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
2999 break;
3000 case 0x49: /* fmuls */
3001 CHECK_FPU_FEATURE(dc, FMUL);
3002 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3003 break;
3004 case 0x4a: /* fmuld */
3005 CHECK_FPU_FEATURE(dc, FMUL);
3006 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3007 break;
3008 case 0x4b: /* fmulq */
3009 CHECK_FPU_FEATURE(dc, FLOAT128);
3010 CHECK_FPU_FEATURE(dc, FMUL);
3011 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3012 break;
3013 case 0x4d: /* fdivs */
3014 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3015 break;
3016 case 0x4e: /* fdivd */
3017 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3018 break;
3019 case 0x4f: /* fdivq */
3020 CHECK_FPU_FEATURE(dc, FLOAT128);
3021 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3022 break;
3023 case 0x69: /* fsmuld */
3024 CHECK_FPU_FEATURE(dc, FSMULD);
3025 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3026 break;
3027 case 0x6e: /* fdmulq */
3028 CHECK_FPU_FEATURE(dc, FLOAT128);
3029 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3030 break;
3031 case 0xc4: /* fitos */
3032 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3033 break;
3034 case 0xc6: /* fdtos */
3035 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3036 break;
3037 case 0xc7: /* fqtos */
3038 CHECK_FPU_FEATURE(dc, FLOAT128);
3039 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3040 break;
3041 case 0xc8: /* fitod */
3042 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3043 break;
3044 case 0xc9: /* fstod */
3045 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3046 break;
3047 case 0xcb: /* fqtod */
3048 CHECK_FPU_FEATURE(dc, FLOAT128);
3049 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3050 break;
3051 case 0xcc: /* fitoq */
3052 CHECK_FPU_FEATURE(dc, FLOAT128);
3053 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3054 break;
3055 case 0xcd: /* fstoq */
3056 CHECK_FPU_FEATURE(dc, FLOAT128);
3057 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3058 break;
3059 case 0xce: /* fdtoq */
3060 CHECK_FPU_FEATURE(dc, FLOAT128);
3061 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3062 break;
3063 case 0xd1: /* fstoi */
3064 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3065 break;
3066 case 0xd2: /* fdtoi */
3067 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3068 break;
3069 case 0xd3: /* fqtoi */
3070 CHECK_FPU_FEATURE(dc, FLOAT128);
3071 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3072 break;
3073 #ifdef TARGET_SPARC64
3074 case 0x2: /* V9 fmovd */
3075 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3076 gen_store_fpr_D(dc, rd, cpu_src1_64);
3077 break;
3078 case 0x3: /* V9 fmovq */
3079 CHECK_FPU_FEATURE(dc, FLOAT128);
3080 gen_move_Q(rd, rs2);
3081 break;
3082 case 0x6: /* V9 fnegd */
3083 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3084 break;
3085 case 0x7: /* V9 fnegq */
3086 CHECK_FPU_FEATURE(dc, FLOAT128);
3087 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3088 break;
3089 case 0xa: /* V9 fabsd */
3090 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3091 break;
3092 case 0xb: /* V9 fabsq */
3093 CHECK_FPU_FEATURE(dc, FLOAT128);
3094 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3095 break;
3096 case 0x81: /* V9 fstox */
3097 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3098 break;
3099 case 0x82: /* V9 fdtox */
3100 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3101 break;
3102 case 0x83: /* V9 fqtox */
3103 CHECK_FPU_FEATURE(dc, FLOAT128);
3104 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3105 break;
3106 case 0x84: /* V9 fxtos */
3107 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3108 break;
3109 case 0x88: /* V9 fxtod */
3110 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3111 break;
3112 case 0x8c: /* V9 fxtoq */
3113 CHECK_FPU_FEATURE(dc, FLOAT128);
3114 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3115 break;
3116 #endif
3117 default:
3118 goto illegal_insn;
3119 }
3120 } else if (xop == 0x35) { /* FPU Operations */
3121 #ifdef TARGET_SPARC64
3122 int cond;
3123 #endif
3124 if (gen_trap_ifnofpu(dc)) {
3125 goto jmp_insn;
3126 }
3127 gen_op_clear_ieee_excp_and_FTT();
3128 rs1 = GET_FIELD(insn, 13, 17);
3129 rs2 = GET_FIELD(insn, 27, 31);
3130 xop = GET_FIELD(insn, 18, 26);
3131 save_state(dc);
3132
3133 #ifdef TARGET_SPARC64
3134 #define FMOVR(sz) \
3135 do { \
3136 DisasCompare cmp; \
3137 cond = GET_FIELD_SP(insn, 10, 12); \
3138 cpu_src1 = get_src1(dc, insn); \
3139 gen_compare_reg(&cmp, cond, cpu_src1); \
3140 gen_fmov##sz(dc, &cmp, rd, rs2); \
3141 free_compare(&cmp); \
3142 } while (0)
3143
3144 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3145 FMOVR(s);
3146 break;
3147 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3148 FMOVR(d);
3149 break;
3150 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3151 CHECK_FPU_FEATURE(dc, FLOAT128);
3152 FMOVR(q);
3153 break;
3154 }
3155 #undef FMOVR
3156 #endif
3157 switch (xop) {
3158 #ifdef TARGET_SPARC64
3159 #define FMOVCC(fcc, sz) \
3160 do { \
3161 DisasCompare cmp; \
3162 cond = GET_FIELD_SP(insn, 14, 17); \
3163 gen_fcompare(&cmp, fcc, cond); \
3164 gen_fmov##sz(dc, &cmp, rd, rs2); \
3165 free_compare(&cmp); \
3166 } while (0)
3167
3168 case 0x001: /* V9 fmovscc %fcc0 */
3169 FMOVCC(0, s);
3170 break;
3171 case 0x002: /* V9 fmovdcc %fcc0 */
3172 FMOVCC(0, d);
3173 break;
3174 case 0x003: /* V9 fmovqcc %fcc0 */
3175 CHECK_FPU_FEATURE(dc, FLOAT128);
3176 FMOVCC(0, q);
3177 break;
3178 case 0x041: /* V9 fmovscc %fcc1 */
3179 FMOVCC(1, s);
3180 break;
3181 case 0x042: /* V9 fmovdcc %fcc1 */
3182 FMOVCC(1, d);
3183 break;
3184 case 0x043: /* V9 fmovqcc %fcc1 */
3185 CHECK_FPU_FEATURE(dc, FLOAT128);
3186 FMOVCC(1, q);
3187 break;
3188 case 0x081: /* V9 fmovscc %fcc2 */
3189 FMOVCC(2, s);
3190 break;
3191 case 0x082: /* V9 fmovdcc %fcc2 */
3192 FMOVCC(2, d);
3193 break;
3194 case 0x083: /* V9 fmovqcc %fcc2 */
3195 CHECK_FPU_FEATURE(dc, FLOAT128);
3196 FMOVCC(2, q);
3197 break;
3198 case 0x0c1: /* V9 fmovscc %fcc3 */
3199 FMOVCC(3, s);
3200 break;
3201 case 0x0c2: /* V9 fmovdcc %fcc3 */
3202 FMOVCC(3, d);
3203 break;
3204 case 0x0c3: /* V9 fmovqcc %fcc3 */
3205 CHECK_FPU_FEATURE(dc, FLOAT128);
3206 FMOVCC(3, q);
3207 break;
3208 #undef FMOVCC
3209 #define FMOVCC(xcc, sz) \
3210 do { \
3211 DisasCompare cmp; \
3212 cond = GET_FIELD_SP(insn, 14, 17); \
3213 gen_compare(&cmp, xcc, cond, dc); \
3214 gen_fmov##sz(dc, &cmp, rd, rs2); \
3215 free_compare(&cmp); \
3216 } while (0)
3217
3218 case 0x101: /* V9 fmovscc %icc */
3219 FMOVCC(0, s);
3220 break;
3221 case 0x102: /* V9 fmovdcc %icc */
3222 FMOVCC(0, d);
3223 break;
3224 case 0x103: /* V9 fmovqcc %icc */
3225 CHECK_FPU_FEATURE(dc, FLOAT128);
3226 FMOVCC(0, q);
3227 break;
3228 case 0x181: /* V9 fmovscc %xcc */
3229 FMOVCC(1, s);
3230 break;
3231 case 0x182: /* V9 fmovdcc %xcc */
3232 FMOVCC(1, d);
3233 break;
3234 case 0x183: /* V9 fmovqcc %xcc */
3235 CHECK_FPU_FEATURE(dc, FLOAT128);
3236 FMOVCC(1, q);
3237 break;
3238 #undef FMOVCC
3239 #endif
3240 case 0x51: /* fcmps, V9 %fcc */
3241 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3242 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3243 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3244 break;
3245 case 0x52: /* fcmpd, V9 %fcc */
3246 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3247 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3248 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3249 break;
3250 case 0x53: /* fcmpq, V9 %fcc */
3251 CHECK_FPU_FEATURE(dc, FLOAT128);
3252 gen_op_load_fpr_QT0(QFPREG(rs1));
3253 gen_op_load_fpr_QT1(QFPREG(rs2));
3254 gen_op_fcmpq(rd & 3);
3255 break;
3256 case 0x55: /* fcmpes, V9 %fcc */
3257 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3258 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3259 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3260 break;
3261 case 0x56: /* fcmped, V9 %fcc */
3262 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3263 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3264 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3265 break;
3266 case 0x57: /* fcmpeq, V9 %fcc */
3267 CHECK_FPU_FEATURE(dc, FLOAT128);
3268 gen_op_load_fpr_QT0(QFPREG(rs1));
3269 gen_op_load_fpr_QT1(QFPREG(rs2));
3270 gen_op_fcmpeq(rd & 3);
3271 break;
3272 default:
3273 goto illegal_insn;
3274 }
3275 } else if (xop == 0x2) {
3276 TCGv dst = gen_dest_gpr(dc, rd);
3277 rs1 = GET_FIELD(insn, 13, 17);
3278 if (rs1 == 0) {
3279 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3280 if (IS_IMM) { /* immediate */
3281 simm = GET_FIELDs(insn, 19, 31);
3282 tcg_gen_movi_tl(dst, simm);
3283 gen_store_gpr(dc, rd, dst);
3284 } else { /* register */
3285 rs2 = GET_FIELD(insn, 27, 31);
3286 if (rs2 == 0) {
3287 tcg_gen_movi_tl(dst, 0);
3288 gen_store_gpr(dc, rd, dst);
3289 } else {
3290 cpu_src2 = gen_load_gpr(dc, rs2);
3291 gen_store_gpr(dc, rd, cpu_src2);
3292 }
3293 }
3294 } else {
3295 cpu_src1 = get_src1(dc, insn);
3296 if (IS_IMM) { /* immediate */
3297 simm = GET_FIELDs(insn, 19, 31);
3298 tcg_gen_ori_tl(dst, cpu_src1, simm);
3299 gen_store_gpr(dc, rd, dst);
3300 } else { /* register */
3301 rs2 = GET_FIELD(insn, 27, 31);
3302 if (rs2 == 0) {
3303 /* mov shortcut: or x, %g0, y -> mov x, y */
3304 gen_store_gpr(dc, rd, cpu_src1);
3305 } else {
3306 cpu_src2 = gen_load_gpr(dc, rs2);
3307 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3308 gen_store_gpr(dc, rd, dst);
3309 }
3310 }
3311 }
3312 #ifdef TARGET_SPARC64
3313 } else if (xop == 0x25) { /* sll, V9 sllx */
3314 cpu_src1 = get_src1(dc, insn);
3315 if (IS_IMM) { /* immediate */
3316 simm = GET_FIELDs(insn, 20, 31);
3317 if (insn & (1 << 12)) {
3318 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3319 } else {
3320 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3321 }
3322 } else { /* register */
3323 rs2 = GET_FIELD(insn, 27, 31);
3324 cpu_src2 = gen_load_gpr(dc, rs2);
3325 cpu_tmp0 = get_temp_tl(dc);
3326 if (insn & (1 << 12)) {
3327 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3328 } else {
3329 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3330 }
3331 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3332 }
3333 gen_store_gpr(dc, rd, cpu_dst);
3334 } else if (xop == 0x26) { /* srl, V9 srlx */
3335 cpu_src1 = get_src1(dc, insn);
3336 if (IS_IMM) { /* immediate */
3337 simm = GET_FIELDs(insn, 20, 31);
3338 if (insn & (1 << 12)) {
3339 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3340 } else {
3341 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3342 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3343 }
3344 } else { /* register */
3345 rs2 = GET_FIELD(insn, 27, 31);
3346 cpu_src2 = gen_load_gpr(dc, rs2);
3347 cpu_tmp0 = get_temp_tl(dc);
3348 if (insn & (1 << 12)) {
3349 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3350 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3351 } else {
3352 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3353 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3354 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3355 }
3356 }
3357 gen_store_gpr(dc, rd, cpu_dst);
3358 } else if (xop == 0x27) { /* sra, V9 srax */
3359 cpu_src1 = get_src1(dc, insn);
3360 if (IS_IMM) { /* immediate */
3361 simm = GET_FIELDs(insn, 20, 31);
3362 if (insn & (1 << 12)) {
3363 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3364 } else {
3365 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3366 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3367 }
3368 } else { /* register */
3369 rs2 = GET_FIELD(insn, 27, 31);
3370 cpu_src2 = gen_load_gpr(dc, rs2);
3371 cpu_tmp0 = get_temp_tl(dc);
3372 if (insn & (1 << 12)) {
3373 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3374 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3375 } else {
3376 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3377 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3378 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3379 }
3380 }
3381 gen_store_gpr(dc, rd, cpu_dst);
3382 #endif
3383 } else if (xop < 0x36) {
3384 if (xop < 0x20) {
3385 cpu_src1 = get_src1(dc, insn);
3386 cpu_src2 = get_src2(dc, insn);
3387 switch (xop & ~0x10) {
3388 case 0x0: /* add */
3389 if (xop & 0x10) {
3390 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3391 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3392 dc->cc_op = CC_OP_ADD;
3393 } else {
3394 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3395 }
3396 break;
3397 case 0x1: /* and */
3398 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3399 if (xop & 0x10) {
3400 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3401 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3402 dc->cc_op = CC_OP_LOGIC;
3403 }
3404 break;
3405 case 0x2: /* or */
3406 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3407 if (xop & 0x10) {
3408 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3409 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3410 dc->cc_op = CC_OP_LOGIC;
3411 }
3412 break;
3413 case 0x3: /* xor */
3414 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3415 if (xop & 0x10) {
3416 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3417 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3418 dc->cc_op = CC_OP_LOGIC;
3419 }
3420 break;
3421 case 0x4: /* sub */
3422 if (xop & 0x10) {
3423 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3424 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3425 dc->cc_op = CC_OP_SUB;
3426 } else {
3427 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3428 }
3429 break;
3430 case 0x5: /* andn */
3431 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3432 if (xop & 0x10) {
3433 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3434 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3435 dc->cc_op = CC_OP_LOGIC;
3436 }
3437 break;
3438 case 0x6: /* orn */
3439 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3440 if (xop & 0x10) {
3441 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3442 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3443 dc->cc_op = CC_OP_LOGIC;
3444 }
3445 break;
3446 case 0x7: /* xorn */
3447 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3448 if (xop & 0x10) {
3449 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3450 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3451 dc->cc_op = CC_OP_LOGIC;
3452 }
3453 break;
3454 case 0x8: /* addx, V9 addc */
3455 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3456 (xop & 0x10));
3457 break;
3458 #ifdef TARGET_SPARC64
3459 case 0x9: /* V9 mulx */
3460 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3461 break;
3462 #endif
3463 case 0xa: /* umul */
3464 CHECK_IU_FEATURE(dc, MUL);
3465 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3466 if (xop & 0x10) {
3467 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3468 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3469 dc->cc_op = CC_OP_LOGIC;
3470 }
3471 break;
3472 case 0xb: /* smul */
3473 CHECK_IU_FEATURE(dc, MUL);
3474 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3475 if (xop & 0x10) {
3476 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3477 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3478 dc->cc_op = CC_OP_LOGIC;
3479 }
3480 break;
3481 case 0xc: /* subx, V9 subc */
3482 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3483 (xop & 0x10));
3484 break;
3485 #ifdef TARGET_SPARC64
3486 case 0xd: /* V9 udivx */
3487 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3488 break;
3489 #endif
3490 case 0xe: /* udiv */
3491 CHECK_IU_FEATURE(dc, DIV);
3492 if (xop & 0x10) {
3493 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3494 cpu_src2);
3495 dc->cc_op = CC_OP_DIV;
3496 } else {
3497 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3498 cpu_src2);
3499 }
3500 break;
3501 case 0xf: /* sdiv */
3502 CHECK_IU_FEATURE(dc, DIV);
3503 if (xop & 0x10) {
3504 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3505 cpu_src2);
3506 dc->cc_op = CC_OP_DIV;
3507 } else {
3508 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3509 cpu_src2);
3510 }
3511 break;
3512 default:
3513 goto illegal_insn;
3514 }
3515 gen_store_gpr(dc, rd, cpu_dst);
3516 } else {
3517 cpu_src1 = get_src1(dc, insn);
3518 cpu_src2 = get_src2(dc, insn);
3519 switch (xop) {
3520 case 0x20: /* taddcc */
3521 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3522 gen_store_gpr(dc, rd, cpu_dst);
3523 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3524 dc->cc_op = CC_OP_TADD;
3525 break;
3526 case 0x21: /* tsubcc */
3527 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3528 gen_store_gpr(dc, rd, cpu_dst);
3529 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3530 dc->cc_op = CC_OP_TSUB;
3531 break;
3532 case 0x22: /* taddcctv */
3533 gen_helper_taddcctv(cpu_dst, cpu_env,
3534 cpu_src1, cpu_src2);
3535 gen_store_gpr(dc, rd, cpu_dst);
3536 dc->cc_op = CC_OP_TADDTV;
3537 break;
3538 case 0x23: /* tsubcctv */
3539 gen_helper_tsubcctv(cpu_dst, cpu_env,
3540 cpu_src1, cpu_src2);
3541 gen_store_gpr(dc, rd, cpu_dst);
3542 dc->cc_op = CC_OP_TSUBTV;
3543 break;
3544 case 0x24: /* mulscc */
3545 update_psr(dc);
3546 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3547 gen_store_gpr(dc, rd, cpu_dst);
3548 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3549 dc->cc_op = CC_OP_ADD;
3550 break;
3551 #ifndef TARGET_SPARC64
3552 case 0x25: /* sll */
3553 if (IS_IMM) { /* immediate */
3554 simm = GET_FIELDs(insn, 20, 31);
3555 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3556 } else { /* register */
3557 cpu_tmp0 = get_temp_tl(dc);
3558 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3559 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3560 }
3561 gen_store_gpr(dc, rd, cpu_dst);
3562 break;
3563 case 0x26: /* srl */
3564 if (IS_IMM) { /* immediate */
3565 simm = GET_FIELDs(insn, 20, 31);
3566 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3567 } else { /* register */
3568 cpu_tmp0 = get_temp_tl(dc);
3569 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3570 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3571 }
3572 gen_store_gpr(dc, rd, cpu_dst);
3573 break;
3574 case 0x27: /* sra */
3575 if (IS_IMM) { /* immediate */
3576 simm = GET_FIELDs(insn, 20, 31);
3577 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3578 } else { /* register */
3579 cpu_tmp0 = get_temp_tl(dc);
3580 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3581 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3582 }
3583 gen_store_gpr(dc, rd, cpu_dst);
3584 break;
3585 #endif
3586 case 0x30:
3587 {
3588 cpu_tmp0 = get_temp_tl(dc);
3589 switch(rd) {
3590 case 0: /* wry */
3591 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3592 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3593 break;
3594 #ifndef TARGET_SPARC64
3595 case 0x01 ... 0x0f: /* undefined in the
3596 SPARCv8 manual, nop
3597 on the microSPARC
3598 II */
3599 case 0x10 ... 0x1f: /* implementation-dependent
3600 in the SPARCv8
3601 manual, nop on the
3602 microSPARC II */
3603 if ((rd == 0x13) && (dc->def->features &
3604 CPU_FEATURE_POWERDOWN)) {
3605 /* LEON3 power-down */
3606 save_state(dc);
3607 gen_helper_power_down(cpu_env);
3608 }
3609 break;
3610 #else
3611 case 0x2: /* V9 wrccr */
3612 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3613 gen_helper_wrccr(cpu_env, cpu_tmp0);
3614 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3615 dc->cc_op = CC_OP_FLAGS;
3616 break;
3617 case 0x3: /* V9 wrasi */
3618 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3619 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
3620 tcg_gen_trunc_tl_i32(cpu_asi, cpu_tmp0);
3621 break;
3622 case 0x6: /* V9 wrfprs */
3623 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3624 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
3625 save_state(dc);
3626 gen_op_next_insn();
3627 tcg_gen_exit_tb(0);
3628 dc->is_br = 1;
3629 break;
3630 case 0xf: /* V9 sir, nop if user */
3631 #if !defined(CONFIG_USER_ONLY)
3632 if (supervisor(dc)) {
3633 ; // XXX
3634 }
3635 #endif
3636 break;
3637 case 0x13: /* Graphics Status */
3638 if (gen_trap_ifnofpu(dc)) {
3639 goto jmp_insn;
3640 }
3641 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3642 break;
3643 case 0x14: /* Softint set */
3644 if (!supervisor(dc))
3645 goto illegal_insn;
3646 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3647 gen_helper_set_softint(cpu_env, cpu_tmp0);
3648 break;
3649 case 0x15: /* Softint clear */
3650 if (!supervisor(dc))
3651 goto illegal_insn;
3652 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3653 gen_helper_clear_softint(cpu_env, cpu_tmp0);
3654 break;
3655 case 0x16: /* Softint write */
3656 if (!supervisor(dc))
3657 goto illegal_insn;
3658 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3659 gen_helper_write_softint(cpu_env, cpu_tmp0);
3660 break;
3661 case 0x17: /* Tick compare */
3662 #if !defined(CONFIG_USER_ONLY)
3663 if (!supervisor(dc))
3664 goto illegal_insn;
3665 #endif
3666 {
3667 TCGv_ptr r_tickptr;
3668
3669 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3670 cpu_src2);
3671 r_tickptr = tcg_temp_new_ptr();
3672 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3673 offsetof(CPUSPARCState, tick));
3674 gen_helper_tick_set_limit(r_tickptr,
3675 cpu_tick_cmpr);
3676 tcg_temp_free_ptr(r_tickptr);
3677 }
3678 break;
3679 case 0x18: /* System tick */
3680 #if !defined(CONFIG_USER_ONLY)
3681 if (!supervisor(dc))
3682 goto illegal_insn;
3683 #endif
3684 {
3685 TCGv_ptr r_tickptr;
3686
3687 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
3688 cpu_src2);
3689 r_tickptr = tcg_temp_new_ptr();
3690 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3691 offsetof(CPUSPARCState, stick));
3692 gen_helper_tick_set_count(r_tickptr,
3693 cpu_tmp0);
3694 tcg_temp_free_ptr(r_tickptr);
3695 }
3696 break;
3697 case 0x19: /* System tick compare */
3698 #if !defined(CONFIG_USER_ONLY)
3699 if (!supervisor(dc))
3700 goto illegal_insn;
3701 #endif
3702 {
3703 TCGv_ptr r_tickptr;
3704
3705 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3706 cpu_src2);
3707 r_tickptr = tcg_temp_new_ptr();
3708 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3709 offsetof(CPUSPARCState, stick));
3710 gen_helper_tick_set_limit(r_tickptr,
3711 cpu_stick_cmpr);
3712 tcg_temp_free_ptr(r_tickptr);
3713 }
3714 break;
3715
3716 case 0x10: /* Performance Control */
3717 case 0x11: /* Performance Instrumentation
3718 Counter */
3719 case 0x12: /* Dispatch Control */
3720 #endif
3721 default:
3722 goto illegal_insn;
3723 }
3724 }
3725 break;
3726 #if !defined(CONFIG_USER_ONLY)
3727 case 0x31: /* wrpsr, V9 saved, restored */
3728 {
3729 if (!supervisor(dc))
3730 goto priv_insn;
3731 #ifdef TARGET_SPARC64
3732 switch (rd) {
3733 case 0:
3734 gen_helper_saved(cpu_env);
3735 break;
3736 case 1:
3737 gen_helper_restored(cpu_env);
3738 break;
3739 case 2: /* UA2005 allclean */
3740 case 3: /* UA2005 otherw */
3741 case 4: /* UA2005 normalw */
3742 case 5: /* UA2005 invalw */
3743 // XXX
3744 default:
3745 goto illegal_insn;
3746 }
3747 #else
3748 cpu_tmp0 = get_temp_tl(dc);
3749 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3750 gen_helper_wrpsr(cpu_env, cpu_tmp0);
3751 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3752 dc->cc_op = CC_OP_FLAGS;
3753 save_state(dc);
3754 gen_op_next_insn();
3755 tcg_gen_exit_tb(0);
3756 dc->is_br = 1;
3757 #endif
3758 }
3759 break;
3760 case 0x32: /* wrwim, V9 wrpr */
3761 {
3762 if (!supervisor(dc))
3763 goto priv_insn;
3764 cpu_tmp0 = get_temp_tl(dc);
3765 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3766 #ifdef TARGET_SPARC64
3767 switch (rd) {
3768 case 0: // tpc
3769 {
3770 TCGv_ptr r_tsptr;
3771
3772 r_tsptr = tcg_temp_new_ptr();
3773 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3774 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3775 offsetof(trap_state, tpc));
3776 tcg_temp_free_ptr(r_tsptr);
3777 }
3778 break;
3779 case 1: // tnpc
3780 {
3781 TCGv_ptr r_tsptr;
3782
3783 r_tsptr = tcg_temp_new_ptr();
3784 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3785 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3786 offsetof(trap_state, tnpc));
3787 tcg_temp_free_ptr(r_tsptr);
3788 }
3789 break;
3790 case 2: // tstate
3791 {
3792 TCGv_ptr r_tsptr;
3793
3794 r_tsptr = tcg_temp_new_ptr();
3795 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3796 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3797 offsetof(trap_state,
3798 tstate));
3799 tcg_temp_free_ptr(r_tsptr);
3800 }
3801 break;
3802 case 3: // tt
3803 {
3804 TCGv_ptr r_tsptr;
3805
3806 r_tsptr = tcg_temp_new_ptr();
3807 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3808 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
3809 offsetof(trap_state, tt));
3810 tcg_temp_free_ptr(r_tsptr);
3811 }
3812 break;
3813 case 4: // tick
3814 {
3815 TCGv_ptr r_tickptr;
3816
3817 r_tickptr = tcg_temp_new_ptr();
3818 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3819 offsetof(CPUSPARCState, tick));
3820 gen_helper_tick_set_count(r_tickptr,
3821 cpu_tmp0);
3822 tcg_temp_free_ptr(r_tickptr);
3823 }
3824 break;
3825 case 5: // tba
3826 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3827 break;
3828 case 6: // pstate
3829 save_state(dc);
3830 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3831 dc->npc = DYNAMIC_PC;
3832 break;
3833 case 7: // tl
3834 save_state(dc);
3835 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3836 offsetof(CPUSPARCState, tl));
3837 dc->npc = DYNAMIC_PC;
3838 break;
3839 case 8: // pil
3840 gen_helper_wrpil(cpu_env, cpu_tmp0);
3841 break;
3842 case 9: // cwp
3843 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3844 break;
3845 case 10: // cansave
3846 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3847 offsetof(CPUSPARCState,
3848 cansave));
3849 break;
3850 case 11: // canrestore
3851 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3852 offsetof(CPUSPARCState,
3853 canrestore));
3854 break;
3855 case 12: // cleanwin
3856 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3857 offsetof(CPUSPARCState,
3858 cleanwin));
3859 break;
3860 case 13: // otherwin
3861 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3862 offsetof(CPUSPARCState,
3863 otherwin));
3864 break;
3865 case 14: // wstate
3866 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3867 offsetof(CPUSPARCState,
3868 wstate));
3869 break;
3870 case 16: // UA2005 gl
3871 CHECK_IU_FEATURE(dc, GL);
3872 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3873 offsetof(CPUSPARCState, gl));
3874 break;
3875 case 26: // UA2005 strand status
3876 CHECK_IU_FEATURE(dc, HYPV);
3877 if (!hypervisor(dc))
3878 goto priv_insn;
3879 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3880 break;
3881 default:
3882 goto illegal_insn;
3883 }
3884 #else
3885 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
3886 if (dc->def->nwindows != 32) {
3887 tcg_gen_andi_tl(cpu_wim, cpu_wim,
3888 (1 << dc->def->nwindows) - 1);
3889 }
3890 #endif
3891 }
3892 break;
3893 case 0x33: /* wrtbr, UA2005 wrhpr */
3894 {
3895 #ifndef TARGET_SPARC64
3896 if (!supervisor(dc))
3897 goto priv_insn;
3898 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3899 #else
3900 CHECK_IU_FEATURE(dc, HYPV);
3901 if (!hypervisor(dc))
3902 goto priv_insn;
3903 cpu_tmp0 = get_temp_tl(dc);
3904 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3905 switch (rd) {
3906 case 0: // hpstate
3907 // XXX gen_op_wrhpstate();
3908 save_state(dc);
3909 gen_op_next_insn();
3910 tcg_gen_exit_tb(0);
3911 dc->is_br = 1;
3912 break;
3913 case 1: // htstate
3914 // XXX gen_op_wrhtstate();
3915 break;
3916 case 3: // hintp
3917 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3918 break;
3919 case 5: // htba
3920 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3921 break;
3922 case 31: // hstick_cmpr
3923 {
3924 TCGv_ptr r_tickptr;
3925
3926 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3927 r_tickptr = tcg_temp_new_ptr();
3928 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3929 offsetof(CPUSPARCState, hstick));
3930 gen_helper_tick_set_limit(r_tickptr,
3931 cpu_hstick_cmpr);
3932 tcg_temp_free_ptr(r_tickptr);
3933 }
3934 break;
3935 case 6: // hver readonly
3936 default:
3937 goto illegal_insn;
3938 }
3939 #endif
3940 }
3941 break;
3942 #endif
3943 #ifdef TARGET_SPARC64
3944 case 0x2c: /* V9 movcc */
3945 {
3946 int cc = GET_FIELD_SP(insn, 11, 12);
3947 int cond = GET_FIELD_SP(insn, 14, 17);
3948 DisasCompare cmp;
3949 TCGv dst;
3950
3951 if (insn & (1 << 18)) {
3952 if (cc == 0) {
3953 gen_compare(&cmp, 0, cond, dc);
3954 } else if (cc == 2) {
3955 gen_compare(&cmp, 1, cond, dc);
3956 } else {
3957 goto illegal_insn;
3958 }
3959 } else {
3960 gen_fcompare(&cmp, cc, cond);
3961 }
3962
3963 /* The get_src2 above loaded the normal 13-bit
3964 immediate field, not the 11-bit field we have
3965 in movcc. But it did handle the reg case. */
3966 if (IS_IMM) {
3967 simm = GET_FIELD_SPs(insn, 0, 10);
3968 tcg_gen_movi_tl(cpu_src2, simm);
3969 }
3970
3971 dst = gen_load_gpr(dc, rd);
3972 tcg_gen_movcond_tl(cmp.cond, dst,
3973 cmp.c1, cmp.c2,
3974 cpu_src2, dst);
3975 free_compare(&cmp);
3976 gen_store_gpr(dc, rd, dst);
3977 break;
3978 }
3979 case 0x2d: /* V9 sdivx */
3980 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3981 gen_store_gpr(dc, rd, cpu_dst);
3982 break;
3983 case 0x2e: /* V9 popc */
3984 gen_helper_popc(cpu_dst, cpu_src2);
3985 gen_store_gpr(dc, rd, cpu_dst);
3986 break;
3987 case 0x2f: /* V9 movr */
3988 {
3989 int cond = GET_FIELD_SP(insn, 10, 12);
3990 DisasCompare cmp;
3991 TCGv dst;
3992
3993 gen_compare_reg(&cmp, cond, cpu_src1);
3994
3995 /* The get_src2 above loaded the normal 13-bit
3996 immediate field, not the 10-bit field we have
3997 in movr. But it did handle the reg case. */
3998 if (IS_IMM) {
3999 simm = GET_FIELD_SPs(insn, 0, 9);
4000 tcg_gen_movi_tl(cpu_src2, simm);
4001 }
4002
4003 dst = gen_load_gpr(dc, rd);
4004 tcg_gen_movcond_tl(cmp.cond, dst,
4005 cmp.c1, cmp.c2,
4006 cpu_src2, dst);
4007 free_compare(&cmp);
4008 gen_store_gpr(dc, rd, dst);
4009 break;
4010 }
4011 #endif
4012 default:
4013 goto illegal_insn;
4014 }
4015 }
4016 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4017 #ifdef TARGET_SPARC64
4018 int opf = GET_FIELD_SP(insn, 5, 13);
4019 rs1 = GET_FIELD(insn, 13, 17);
4020 rs2 = GET_FIELD(insn, 27, 31);
4021 if (gen_trap_ifnofpu(dc)) {
4022 goto jmp_insn;
4023 }
4024
4025 switch (opf) {
4026 case 0x000: /* VIS I edge8cc */
4027 CHECK_FPU_FEATURE(dc, VIS1);
4028 cpu_src1 = gen_load_gpr(dc, rs1);
4029 cpu_src2 = gen_load_gpr(dc, rs2);
4030 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4031 gen_store_gpr(dc, rd, cpu_dst);
4032 break;
4033 case 0x001: /* VIS II edge8n */
4034 CHECK_FPU_FEATURE(dc, VIS2);
4035 cpu_src1 = gen_load_gpr(dc, rs1);
4036 cpu_src2 = gen_load_gpr(dc, rs2);
4037 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4038 gen_store_gpr(dc, rd, cpu_dst);
4039 break;
4040 case 0x002: /* VIS I edge8lcc */
4041 CHECK_FPU_FEATURE(dc, VIS1);
4042 cpu_src1 = gen_load_gpr(dc, rs1);
4043 cpu_src2 = gen_load_gpr(dc, rs2);
4044 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4045 gen_store_gpr(dc, rd, cpu_dst);
4046 break;
4047 case 0x003: /* VIS II edge8ln */
4048 CHECK_FPU_FEATURE(dc, VIS2);
4049 cpu_src1 = gen_load_gpr(dc, rs1);
4050 cpu_src2 = gen_load_gpr(dc, rs2);
4051 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4052 gen_store_gpr(dc, rd, cpu_dst);
4053 break;
4054 case 0x004: /* VIS I edge16cc */
4055 CHECK_FPU_FEATURE(dc, VIS1);
4056 cpu_src1 = gen_load_gpr(dc, rs1);
4057 cpu_src2 = gen_load_gpr(dc, rs2);
4058 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4059 gen_store_gpr(dc, rd, cpu_dst);
4060 break;
4061 case 0x005: /* VIS II edge16n */
4062 CHECK_FPU_FEATURE(dc, VIS2);
4063 cpu_src1 = gen_load_gpr(dc, rs1);
4064 cpu_src2 = gen_load_gpr(dc, rs2);
4065 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4066 gen_store_gpr(dc, rd, cpu_dst);
4067 break;
4068 case 0x006: /* VIS I edge16lcc */
4069 CHECK_FPU_FEATURE(dc, VIS1);
4070 cpu_src1 = gen_load_gpr(dc, rs1);
4071 cpu_src2 = gen_load_gpr(dc, rs2);
4072 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4073 gen_store_gpr(dc, rd, cpu_dst);
4074 break;
4075 case 0x007: /* VIS II edge16ln */
4076 CHECK_FPU_FEATURE(dc, VIS2);
4077 cpu_src1 = gen_load_gpr(dc, rs1);
4078 cpu_src2 = gen_load_gpr(dc, rs2);
4079 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4080 gen_store_gpr(dc, rd, cpu_dst);
4081 break;
4082 case 0x008: /* VIS I edge32cc */
4083 CHECK_FPU_FEATURE(dc, VIS1);
4084 cpu_src1 = gen_load_gpr(dc, rs1);
4085 cpu_src2 = gen_load_gpr(dc, rs2);
4086 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4087 gen_store_gpr(dc, rd, cpu_dst);
4088 break;
4089 case 0x009: /* VIS II edge32n */
4090 CHECK_FPU_FEATURE(dc, VIS2);
4091 cpu_src1 = gen_load_gpr(dc, rs1);
4092 cpu_src2 = gen_load_gpr(dc, rs2);
4093 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4094 gen_store_gpr(dc, rd, cpu_dst);
4095 break;
4096 case 0x00a: /* VIS I edge32lcc */
4097 CHECK_FPU_FEATURE(dc, VIS1);
4098 cpu_src1 = gen_load_gpr(dc, rs1);
4099 cpu_src2 = gen_load_gpr(dc, rs2);
4100 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4101 gen_store_gpr(dc, rd, cpu_dst);
4102 break;
4103 case 0x00b: /* VIS II edge32ln */
4104 CHECK_FPU_FEATURE(dc, VIS2);
4105 cpu_src1 = gen_load_gpr(dc, rs1);
4106 cpu_src2 = gen_load_gpr(dc, rs2);
4107 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4108 gen_store_gpr(dc, rd, cpu_dst);
4109 break;
4110 case 0x010: /* VIS I array8 */
4111 CHECK_FPU_FEATURE(dc, VIS1);
4112 cpu_src1 = gen_load_gpr(dc, rs1);
4113 cpu_src2 = gen_load_gpr(dc, rs2);
4114 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4115 gen_store_gpr(dc, rd, cpu_dst);
4116 break;
4117 case 0x012: /* VIS I array16 */
4118 CHECK_FPU_FEATURE(dc, VIS1);
4119 cpu_src1 = gen_load_gpr(dc, rs1);
4120 cpu_src2 = gen_load_gpr(dc, rs2);
4121 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4122 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4123 gen_store_gpr(dc, rd, cpu_dst);
4124 break;
4125 case 0x014: /* VIS I array32 */
4126 CHECK_FPU_FEATURE(dc, VIS1);
4127 cpu_src1 = gen_load_gpr(dc, rs1);
4128 cpu_src2 = gen_load_gpr(dc, rs2);
4129 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4130 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4131 gen_store_gpr(dc, rd, cpu_dst);
4132 break;
4133 case 0x018: /* VIS I alignaddr */
4134 CHECK_FPU_FEATURE(dc, VIS1);
4135 cpu_src1 = gen_load_gpr(dc, rs1);
4136 cpu_src2 = gen_load_gpr(dc, rs2);
4137 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4138 gen_store_gpr(dc, rd, cpu_dst);
4139 break;
4140 case 0x01a: /* VIS I alignaddrl */
4141 CHECK_FPU_FEATURE(dc, VIS1);
4142 cpu_src1 = gen_load_gpr(dc, rs1);
4143 cpu_src2 = gen_load_gpr(dc, rs2);
4144 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4145 gen_store_gpr(dc, rd, cpu_dst);
4146 break;
4147 case 0x019: /* VIS II bmask */
4148 CHECK_FPU_FEATURE(dc, VIS2);
4149 cpu_src1 = gen_load_gpr(dc, rs1);
4150 cpu_src2 = gen_load_gpr(dc, rs2);
4151 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4152 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4153 gen_store_gpr(dc, rd, cpu_dst);
4154 break;
4155 case 0x020: /* VIS I fcmple16 */
4156 CHECK_FPU_FEATURE(dc, VIS1);
4157 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4158 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4159 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4160 gen_store_gpr(dc, rd, cpu_dst);
4161 break;
4162 case 0x022: /* VIS I fcmpne16 */
4163 CHECK_FPU_FEATURE(dc, VIS1);
4164 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4165 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4166 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4167 gen_store_gpr(dc, rd, cpu_dst);
4168 break;
4169 case 0x024: /* VIS I fcmple32 */
4170 CHECK_FPU_FEATURE(dc, VIS1);
4171 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4172 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4173 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4174 gen_store_gpr(dc, rd, cpu_dst);
4175 break;
4176 case 0x026: /* VIS I fcmpne32 */
4177 CHECK_FPU_FEATURE(dc, VIS1);
4178 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4179 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4180 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4181 gen_store_gpr(dc, rd, cpu_dst);
4182 break;
4183 case 0x028: /* VIS I fcmpgt16 */
4184 CHECK_FPU_FEATURE(dc, VIS1);
4185 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4186 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4187 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4188 gen_store_gpr(dc, rd, cpu_dst);
4189 break;
4190 case 0x02a: /* VIS I fcmpeq16 */
4191 CHECK_FPU_FEATURE(dc, VIS1);
4192 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4193 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4194 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4195 gen_store_gpr(dc, rd, cpu_dst);
4196 break;
4197 case 0x02c: /* VIS I fcmpgt32 */
4198 CHECK_FPU_FEATURE(dc, VIS1);
4199 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4200 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4201 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4202 gen_store_gpr(dc, rd, cpu_dst);
4203 break;
4204 case 0x02e: /* VIS I fcmpeq32 */
4205 CHECK_FPU_FEATURE(dc, VIS1);
4206 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4207 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4208 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4209 gen_store_gpr(dc, rd, cpu_dst);
4210 break;
4211 case 0x031: /* VIS I fmul8x16 */
4212 CHECK_FPU_FEATURE(dc, VIS1);
4213 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4214 break;
4215 case 0x033: /* VIS I fmul8x16au */
4216 CHECK_FPU_FEATURE(dc, VIS1);
4217 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4218 break;
4219 case 0x035: /* VIS I fmul8x16al */
4220 CHECK_FPU_FEATURE(dc, VIS1);
4221 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4222 break;
4223 case 0x036: /* VIS I fmul8sux16 */
4224 CHECK_FPU_FEATURE(dc, VIS1);
4225 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4226 break;
4227 case 0x037: /* VIS I fmul8ulx16 */
4228 CHECK_FPU_FEATURE(dc, VIS1);
4229 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4230 break;
4231 case 0x038: /* VIS I fmuld8sux16 */
4232 CHECK_FPU_FEATURE(dc, VIS1);
4233 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4234 break;
4235 case 0x039: /* VIS I fmuld8ulx16 */
4236 CHECK_FPU_FEATURE(dc, VIS1);
4237 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4238 break;
4239 case 0x03a: /* VIS I fpack32 */
4240 CHECK_FPU_FEATURE(dc, VIS1);
4241 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4242 break;
4243 case 0x03b: /* VIS I fpack16 */
4244 CHECK_FPU_FEATURE(dc, VIS1);
4245 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4246 cpu_dst_32 = gen_dest_fpr_F(dc);
4247 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4248 gen_store_fpr_F(dc, rd, cpu_dst_32);
4249 break;
4250 case 0x03d: /* VIS I fpackfix */
4251 CHECK_FPU_FEATURE(dc, VIS1);
4252 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4253 cpu_dst_32 = gen_dest_fpr_F(dc);
4254 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4255 gen_store_fpr_F(dc, rd, cpu_dst_32);
4256 break;
4257 case 0x03e: /* VIS I pdist */
4258 CHECK_FPU_FEATURE(dc, VIS1);
4259 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4260 break;
4261 case 0x048: /* VIS I faligndata */
4262 CHECK_FPU_FEATURE(dc, VIS1);
4263 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4264 break;
4265 case 0x04b: /* VIS I fpmerge */
4266 CHECK_FPU_FEATURE(dc, VIS1);
4267 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4268 break;
4269 case 0x04c: /* VIS II bshuffle */
4270 CHECK_FPU_FEATURE(dc, VIS2);
4271 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4272 break;
4273 case 0x04d: /* VIS I fexpand */
4274 CHECK_FPU_FEATURE(dc, VIS1);
4275 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4276 break;
4277 case 0x050: /* VIS I fpadd16 */
4278 CHECK_FPU_FEATURE(dc, VIS1);
4279 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4280 break;
4281 case 0x051: /* VIS I fpadd16s */
4282 CHECK_FPU_FEATURE(dc, VIS1);
4283 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4284 break;
4285 case 0x052: /* VIS I fpadd32 */
4286 CHECK_FPU_FEATURE(dc, VIS1);
4287 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4288 break;
4289 case 0x053: /* VIS I fpadd32s */
4290 CHECK_FPU_FEATURE(dc, VIS1);
4291 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4292 break;
4293 case 0x054: /* VIS I fpsub16 */
4294 CHECK_FPU_FEATURE(dc, VIS1);
4295 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4296 break;
4297 case 0x055: /* VIS I fpsub16s */
4298 CHECK_FPU_FEATURE(dc, VIS1);
4299 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4300 break;
4301 case 0x056: /* VIS I fpsub32 */
4302 CHECK_FPU_FEATURE(dc, VIS1);
4303 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4304 break;
4305 case 0x057: /* VIS I fpsub32s */
4306 CHECK_FPU_FEATURE(dc, VIS1);
4307 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4308 break;
4309 case 0x060: /* VIS I fzero */
4310 CHECK_FPU_FEATURE(dc, VIS1);
4311 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4312 tcg_gen_movi_i64(cpu_dst_64, 0);
4313 gen_store_fpr_D(dc, rd, cpu_dst_64);
4314 break;
4315 case 0x061: /* VIS I fzeros */
4316 CHECK_FPU_FEATURE(dc, VIS1);
4317 cpu_dst_32 = gen_dest_fpr_F(dc);
4318 tcg_gen_movi_i32(cpu_dst_32, 0);
4319 gen_store_fpr_F(dc, rd, cpu_dst_32);
4320 break;
4321 case 0x062: /* VIS I fnor */
4322 CHECK_FPU_FEATURE(dc, VIS1);
4323 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4324 break;
4325 case 0x063: /* VIS I fnors */
4326 CHECK_FPU_FEATURE(dc, VIS1);
4327 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4328 break;
4329 case 0x064: /* VIS I fandnot2 */
4330 CHECK_FPU_FEATURE(dc, VIS1);
4331 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4332 break;
4333 case 0x065: /* VIS I fandnot2s */
4334 CHECK_FPU_FEATURE(dc, VIS1);
4335 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4336 break;
4337 case 0x066: /* VIS I fnot2 */
4338 CHECK_FPU_FEATURE(dc, VIS1);
4339 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4340 break;
4341 case 0x067: /* VIS I fnot2s */
4342 CHECK_FPU_FEATURE(dc, VIS1);
4343 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4344 break;
4345 case 0x068: /* VIS I fandnot1 */
4346 CHECK_FPU_FEATURE(dc, VIS1);
4347 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4348 break;
4349 case 0x069: /* VIS I fandnot1s */
4350 CHECK_FPU_FEATURE(dc, VIS1);
4351 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4352 break;
4353 case 0x06a: /* VIS I fnot1 */
4354 CHECK_FPU_FEATURE(dc, VIS1);
4355 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4356 break;
4357 case 0x06b: /* VIS I fnot1s */
4358 CHECK_FPU_FEATURE(dc, VIS1);
4359 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4360 break;
4361 case 0x06c: /* VIS I fxor */
4362 CHECK_FPU_FEATURE(dc, VIS1);
4363 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4364 break;
4365 case 0x06d: /* VIS I fxors */
4366 CHECK_FPU_FEATURE(dc, VIS1);
4367 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4368 break;
4369 case 0x06e: /* VIS I fnand */
4370 CHECK_FPU_FEATURE(dc, VIS1);
4371 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4372 break;
4373 case 0x06f: /* VIS I fnands */
4374 CHECK_FPU_FEATURE(dc, VIS1);
4375 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4376 break;
4377 case 0x070: /* VIS I fand */
4378 CHECK_FPU_FEATURE(dc, VIS1);
4379 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4380 break;
4381 case 0x071: /* VIS I fands */
4382 CHECK_FPU_FEATURE(dc, VIS1);
4383 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4384 break;
4385 case 0x072: /* VIS I fxnor */
4386 CHECK_FPU_FEATURE(dc, VIS1);
4387 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4388 break;
4389 case 0x073: /* VIS I fxnors */
4390 CHECK_FPU_FEATURE(dc, VIS1);
4391 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4392 break;
4393 case 0x074: /* VIS I fsrc1 */
4394 CHECK_FPU_FEATURE(dc, VIS1);
4395 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4396 gen_store_fpr_D(dc, rd, cpu_src1_64);
4397 break;
4398 case 0x075: /* VIS I fsrc1s */
4399 CHECK_FPU_FEATURE(dc, VIS1);
4400 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4401 gen_store_fpr_F(dc, rd, cpu_src1_32);
4402 break;
4403 case 0x076: /* VIS I fornot2 */
4404 CHECK_FPU_FEATURE(dc, VIS1);
4405 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4406 break;
4407 case 0x077: /* VIS I fornot2s */
4408 CHECK_FPU_FEATURE(dc, VIS1);
4409 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4410 break;
4411 case 0x078: /* VIS I fsrc2 */
4412 CHECK_FPU_FEATURE(dc, VIS1);
4413 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4414 gen_store_fpr_D(dc, rd, cpu_src1_64);
4415 break;
4416 case 0x079: /* VIS I fsrc2s */
4417 CHECK_FPU_FEATURE(dc, VIS1);
4418 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4419 gen_store_fpr_F(dc, rd, cpu_src1_32);
4420 break;
4421 case 0x07a: /* VIS I fornot1 */
4422 CHECK_FPU_FEATURE(dc, VIS1);
4423 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4424 break;
4425 case 0x07b: /* VIS I fornot1s */
4426 CHECK_FPU_FEATURE(dc, VIS1);
4427 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4428 break;
4429 case 0x07c: /* VIS I for */
4430 CHECK_FPU_FEATURE(dc, VIS1);
4431 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4432 break;
4433 case 0x07d: /* VIS I fors */
4434 CHECK_FPU_FEATURE(dc, VIS1);
4435 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4436 break;
4437 case 0x07e: /* VIS I fone */
4438 CHECK_FPU_FEATURE(dc, VIS1);
4439 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4440 tcg_gen_movi_i64(cpu_dst_64, -1);
4441 gen_store_fpr_D(dc, rd, cpu_dst_64);
4442 break;
4443 case 0x07f: /* VIS I fones */
4444 CHECK_FPU_FEATURE(dc, VIS1);
4445 cpu_dst_32 = gen_dest_fpr_F(dc);
4446 tcg_gen_movi_i32(cpu_dst_32, -1);
4447 gen_store_fpr_F(dc, rd, cpu_dst_32);
4448 break;
4449 case 0x080: /* VIS I shutdown */
4450 case 0x081: /* VIS II siam */
4451 // XXX
4452 goto illegal_insn;
4453 default:
4454 goto illegal_insn;
4455 }
4456 #else
4457 goto ncp_insn;
4458 #endif
4459 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4460 #ifdef TARGET_SPARC64
4461 goto illegal_insn;
4462 #else
4463 goto ncp_insn;
4464 #endif
4465 #ifdef TARGET_SPARC64
4466 } else if (xop == 0x39) { /* V9 return */
4467 TCGv_i32 r_const;
4468
4469 save_state(dc);
4470 cpu_src1 = get_src1(dc, insn);
4471 cpu_tmp0 = get_temp_tl(dc);
4472 if (IS_IMM) { /* immediate */
4473 simm = GET_FIELDs(insn, 19, 31);
4474 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4475 } else { /* register */
4476 rs2 = GET_FIELD(insn, 27, 31);
4477 if (rs2) {
4478 cpu_src2 = gen_load_gpr(dc, rs2);
4479 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4480 } else {
4481 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4482 }
4483 }
4484 gen_helper_restore(cpu_env);
4485 gen_mov_pc_npc(dc);
4486 r_const = tcg_const_i32(3);
4487 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4488 tcg_temp_free_i32(r_const);
4489 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4490 dc->npc = DYNAMIC_PC;
4491 goto jmp_insn;
4492 #endif
4493 } else {
4494 cpu_src1 = get_src1(dc, insn);
4495 cpu_tmp0 = get_temp_tl(dc);
4496 if (IS_IMM) { /* immediate */
4497 simm = GET_FIELDs(insn, 19, 31);
4498 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4499 } else { /* register */
4500 rs2 = GET_FIELD(insn, 27, 31);
4501 if (rs2) {
4502 cpu_src2 = gen_load_gpr(dc, rs2);
4503 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4504 } else {
4505 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4506 }
4507 }
4508 switch (xop) {
4509 case 0x38: /* jmpl */
4510 {
4511 TCGv t;
4512 TCGv_i32 r_const;
4513
4514 t = gen_dest_gpr(dc, rd);
4515 tcg_gen_movi_tl(t, dc->pc);
4516 gen_store_gpr(dc, rd, t);
4517 gen_mov_pc_npc(dc);
4518 r_const = tcg_const_i32(3);
4519 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4520 tcg_temp_free_i32(r_const);
4521 gen_address_mask(dc, cpu_tmp0);
4522 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4523 dc->npc = DYNAMIC_PC;
4524 }
4525 goto jmp_insn;
4526 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4527 case 0x39: /* rett, V9 return */
4528 {
4529 TCGv_i32 r_const;
4530
4531 if (!supervisor(dc))
4532 goto priv_insn;
4533 gen_mov_pc_npc(dc);
4534 r_const = tcg_const_i32(3);
4535 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4536 tcg_temp_free_i32(r_const);
4537 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4538 dc->npc = DYNAMIC_PC;
4539 gen_helper_rett(cpu_env);
4540 }
4541 goto jmp_insn;
4542 #endif
4543 case 0x3b: /* flush */
4544 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4545 goto unimp_flush;
4546 /* nop */
4547 break;
4548 case 0x3c: /* save */
4549 save_state(dc);
4550 gen_helper_save(cpu_env);
4551 gen_store_gpr(dc, rd, cpu_tmp0);
4552 break;
4553 case 0x3d: /* restore */
4554 save_state(dc);
4555 gen_helper_restore(cpu_env);
4556 gen_store_gpr(dc, rd, cpu_tmp0);
4557 break;
4558 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4559 case 0x3e: /* V9 done/retry */
4560 {
4561 switch (rd) {
4562 case 0:
4563 if (!supervisor(dc))
4564 goto priv_insn;
4565 dc->npc = DYNAMIC_PC;
4566 dc->pc = DYNAMIC_PC;
4567 gen_helper_done(cpu_env);
4568 goto jmp_insn;
4569 case 1:
4570 if (!supervisor(dc))
4571 goto priv_insn;
4572 dc->npc = DYNAMIC_PC;
4573 dc->pc = DYNAMIC_PC;
4574 gen_helper_retry(cpu_env);
4575 goto jmp_insn;
4576 default:
4577 goto illegal_insn;
4578 }
4579 }
4580 break;
4581 #endif
4582 default:
4583 goto illegal_insn;
4584 }
4585 }
4586 break;
4587 }
4588 break;
4589 case 3: /* load/store instructions */
4590 {
4591 unsigned int xop = GET_FIELD(insn, 7, 12);
4592 /* ??? gen_address_mask prevents us from using a source
4593 register directly. Always generate a temporary. */
4594 TCGv cpu_addr = get_temp_tl(dc);
4595
4596 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
4597 if (xop == 0x3c || xop == 0x3e) {
4598 /* V9 casa/casxa : no offset */
4599 } else if (IS_IMM) { /* immediate */
4600 simm = GET_FIELDs(insn, 19, 31);
4601 if (simm != 0) {
4602 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
4603 }
4604 } else { /* register */
4605 rs2 = GET_FIELD(insn, 27, 31);
4606 if (rs2 != 0) {
4607 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
4608 }
4609 }
4610 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4611 (xop > 0x17 && xop <= 0x1d ) ||
4612 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4613 TCGv cpu_val = gen_dest_gpr(dc, rd);
4614
4615 switch (xop) {
4616 case 0x0: /* ld, V9 lduw, load unsigned word */
4617 gen_address_mask(dc, cpu_addr);
4618 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4619 break;
4620 case 0x1: /* ldub, load unsigned byte */
4621 gen_address_mask(dc, cpu_addr);
4622 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4623 break;
4624 case 0x2: /* lduh, load unsigned halfword */
4625 gen_address_mask(dc, cpu_addr);
4626 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4627 break;
4628 case 0x3: /* ldd, load double word */
4629 if (rd & 1)
4630 goto illegal_insn;
4631 else {
4632 TCGv_i32 r_const;
4633 TCGv_i64 t64;
4634
4635 save_state(dc);
4636 r_const = tcg_const_i32(7);
4637 /* XXX remove alignment check */
4638 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4639 tcg_temp_free_i32(r_const);
4640 gen_address_mask(dc, cpu_addr);
4641 t64 = tcg_temp_new_i64();
4642 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4643 tcg_gen_trunc_i64_tl(cpu_val, t64);
4644 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4645 gen_store_gpr(dc, rd + 1, cpu_val);
4646 tcg_gen_shri_i64(t64, t64, 32);
4647 tcg_gen_trunc_i64_tl(cpu_val, t64);
4648 tcg_temp_free_i64(t64);
4649 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4650 }
4651 break;
4652 case 0x9: /* ldsb, load signed byte */
4653 gen_address_mask(dc, cpu_addr);
4654 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4655 break;
4656 case 0xa: /* ldsh, load signed halfword */
4657 gen_address_mask(dc, cpu_addr);
4658 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4659 break;
4660 case 0xd: /* ldstub -- XXX: should be atomically */
4661 {
4662 TCGv r_const;
4663
4664 gen_address_mask(dc, cpu_addr);
4665 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4666 r_const = tcg_const_tl(0xff);
4667 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4668 tcg_temp_free(r_const);
4669 }
4670 break;
4671 case 0x0f:
4672 /* swap, swap register with memory. Also atomically */
4673 {
4674 TCGv t0 = get_temp_tl(dc);
4675 CHECK_IU_FEATURE(dc, SWAP);
4676 cpu_src1 = gen_load_gpr(dc, rd);
4677 gen_address_mask(dc, cpu_addr);
4678 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4679 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
4680 tcg_gen_mov_tl(cpu_val, t0);
4681 }
4682 break;
4683 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4684 case 0x10: /* lda, V9 lduwa, load word alternate */
4685 #ifndef TARGET_SPARC64
4686 if (IS_IMM)
4687 goto illegal_insn;
4688 if (!supervisor(dc))
4689 goto priv_insn;
4690 #endif
4691 save_state(dc);
4692 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4693 break;
4694 case 0x11: /* lduba, load unsigned byte alternate */
4695 #ifndef TARGET_SPARC64
4696 if (IS_IMM)
4697 goto illegal_insn;
4698 if (!supervisor(dc))
4699 goto priv_insn;
4700 #endif
4701 save_state(dc);
4702 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4703 break;
4704 case 0x12: /* lduha, load unsigned halfword alternate */
4705 #ifndef TARGET_SPARC64
4706 if (IS_IMM)
4707 goto illegal_insn;
4708 if (!supervisor(dc))
4709 goto priv_insn;
4710 #endif
4711 save_state(dc);
4712 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4713 break;
4714 case 0x13: /* ldda, load double word alternate */
4715 #ifndef TARGET_SPARC64
4716 if (IS_IMM)
4717 goto illegal_insn;
4718 if (!supervisor(dc))
4719 goto priv_insn;
4720 #endif
4721 if (rd & 1)
4722 goto illegal_insn;
4723 save_state(dc);
4724 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4725 goto skip_move;
4726 case 0x19: /* ldsba, load signed byte alternate */
4727 #ifndef TARGET_SPARC64
4728 if (IS_IMM)
4729 goto illegal_insn;
4730 if (!supervisor(dc))
4731 goto priv_insn;
4732 #endif
4733 save_state(dc);
4734 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4735 break;
4736 case 0x1a: /* ldsha, load signed halfword alternate */
4737 #ifndef TARGET_SPARC64
4738 if (IS_IMM)
4739 goto illegal_insn;
4740 if (!supervisor(dc))
4741 goto priv_insn;
4742 #endif
4743 save_state(dc);
4744 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4745 break;
4746 case 0x1d: /* ldstuba -- XXX: should be atomically */
4747 #ifndef TARGET_SPARC64
4748 if (IS_IMM)
4749 goto illegal_insn;
4750 if (!supervisor(dc))
4751 goto priv_insn;
4752 #endif
4753 save_state(dc);
4754 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4755 break;
4756 case 0x1f: /* swapa, swap reg with alt. memory. Also
4757 atomically */
4758 CHECK_IU_FEATURE(dc, SWAP);
4759 #ifndef TARGET_SPARC64
4760 if (IS_IMM)
4761 goto illegal_insn;
4762 if (!supervisor(dc))
4763 goto priv_insn;
4764 #endif
4765 save_state(dc);
4766 cpu_src1 = gen_load_gpr(dc, rd);
4767 gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
4768 break;
4769
4770 #ifndef TARGET_SPARC64
4771 case 0x30: /* ldc */
4772 case 0x31: /* ldcsr */
4773 case 0x33: /* lddc */
4774 goto ncp_insn;
4775 #endif
4776 #endif
4777 #ifdef TARGET_SPARC64
4778 case 0x08: /* V9 ldsw */
4779 gen_address_mask(dc, cpu_addr);
4780 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4781 break;
4782 case 0x0b: /* V9 ldx */
4783 gen_address_mask(dc, cpu_addr);
4784 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4785 break;
4786 case 0x18: /* V9 ldswa */
4787 save_state(dc);
4788 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4789 break;
4790 case 0x1b: /* V9 ldxa */
4791 save_state(dc);
4792 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4793 break;
4794 case 0x2d: /* V9 prefetch, no effect */
4795 goto skip_move;
4796 case 0x30: /* V9 ldfa */
4797 if (gen_trap_ifnofpu(dc)) {
4798 goto jmp_insn;
4799 }
4800 save_state(dc);
4801 gen_ldf_asi(cpu_addr, insn, 4, rd);
4802 gen_update_fprs_dirty(rd);
4803 goto skip_move;
4804 case 0x33: /* V9 lddfa */
4805 if (gen_trap_ifnofpu(dc)) {
4806 goto jmp_insn;
4807 }
4808 save_state(dc);
4809 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4810 gen_update_fprs_dirty(DFPREG(rd));
4811 goto skip_move;
4812 case 0x3d: /* V9 prefetcha, no effect */
4813 goto skip_move;
4814 case 0x32: /* V9 ldqfa */
4815 CHECK_FPU_FEATURE(dc, FLOAT128);
4816 if (gen_trap_ifnofpu(dc)) {
4817 goto jmp_insn;
4818 }
4819 save_state(dc);
4820 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4821 gen_update_fprs_dirty(QFPREG(rd));
4822 goto skip_move;
4823 #endif
4824 default:
4825 goto illegal_insn;
4826 }
4827 gen_store_gpr(dc, rd, cpu_val);
4828 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4829 skip_move: ;
4830 #endif
4831 } else if (xop >= 0x20 && xop < 0x24) {
4832 TCGv t0;
4833
4834 if (gen_trap_ifnofpu(dc)) {
4835 goto jmp_insn;
4836 }
4837 save_state(dc);
4838 switch (xop) {
4839 case 0x20: /* ldf, load fpreg */
4840 gen_address_mask(dc, cpu_addr);
4841 t0 = get_temp_tl(dc);
4842 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4843 cpu_dst_32 = gen_dest_fpr_F(dc);
4844 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4845 gen_store_fpr_F(dc, rd, cpu_dst_32);
4846 break;
4847 case 0x21: /* ldfsr, V9 ldxfsr */
4848 #ifdef TARGET_SPARC64
4849 gen_address_mask(dc, cpu_addr);
4850 if (rd == 1) {
4851 TCGv_i64 t64 = tcg_temp_new_i64();
4852 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4853 gen_helper_ldxfsr(cpu_env, t64);
4854 tcg_temp_free_i64(t64);
4855 break;
4856 }
4857 #endif
4858 cpu_dst_32 = get_temp_i32(dc);
4859 t0 = get_temp_tl(dc);
4860 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4861 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4862 gen_helper_ldfsr(cpu_env, cpu_dst_32);
4863 break;
4864 case 0x22: /* ldqf, load quad fpreg */
4865 {
4866 TCGv_i32 r_const;
4867
4868 CHECK_FPU_FEATURE(dc, FLOAT128);
4869 r_const = tcg_const_i32(dc->mem_idx);
4870 gen_address_mask(dc, cpu_addr);
4871 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4872 tcg_temp_free_i32(r_const);
4873 gen_op_store_QT0_fpr(QFPREG(rd));
4874 gen_update_fprs_dirty(QFPREG(rd));
4875 }
4876 break;
4877 case 0x23: /* lddf, load double fpreg */
4878 gen_address_mask(dc, cpu_addr);
4879 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4880 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4881 gen_store_fpr_D(dc, rd, cpu_dst_64);
4882 break;
4883 default:
4884 goto illegal_insn;
4885 }
4886 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4887 xop == 0xe || xop == 0x1e) {
4888 TCGv cpu_val = gen_load_gpr(dc, rd);
4889
4890 switch (xop) {
4891 case 0x4: /* st, store word */
4892 gen_address_mask(dc, cpu_addr);
4893 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4894 break;
4895 case 0x5: /* stb, store byte */
4896 gen_address_mask(dc, cpu_addr);
4897 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4898 break;
4899 case 0x6: /* sth, store halfword */
4900 gen_address_mask(dc, cpu_addr);
4901 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4902 break;
4903 case 0x7: /* std, store double word */
4904 if (rd & 1)
4905 goto illegal_insn;
4906 else {
4907 TCGv_i32 r_const;
4908 TCGv_i64 t64;
4909 TCGv lo;
4910
4911 save_state(dc);
4912 gen_address_mask(dc, cpu_addr);
4913 r_const = tcg_const_i32(7);
4914 /* XXX remove alignment check */
4915 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4916 tcg_temp_free_i32(r_const);
4917 lo = gen_load_gpr(dc, rd + 1);
4918
4919 t64 = tcg_temp_new_i64();
4920 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
4921 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
4922 tcg_temp_free_i64(t64);
4923 }
4924 break;
4925 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4926 case 0x14: /* sta, V9 stwa, store word alternate */
4927 #ifndef TARGET_SPARC64
4928 if (IS_IMM)
4929 goto illegal_insn;
4930 if (!supervisor(dc))
4931 goto priv_insn;
4932 #endif
4933 save_state(dc);
4934 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4935 dc->npc = DYNAMIC_PC;
4936 break;
4937 case 0x15: /* stba, store byte alternate */
4938 #ifndef TARGET_SPARC64
4939 if (IS_IMM)
4940 goto illegal_insn;
4941 if (!supervisor(dc))
4942 goto priv_insn;
4943 #endif
4944 save_state(dc);
4945 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4946 dc->npc = DYNAMIC_PC;
4947 break;
4948 case 0x16: /* stha, store halfword alternate */
4949 #ifndef TARGET_SPARC64
4950 if (IS_IMM)
4951 goto illegal_insn;
4952 if (!supervisor(dc))
4953 goto priv_insn;
4954 #endif
4955 save_state(dc);
4956 gen_st_asi(cpu_val, cpu_addr, insn, 2);
4957 dc->npc = DYNAMIC_PC;
4958 break;
4959 case 0x17: /* stda, store double word alternate */
4960 #ifndef TARGET_SPARC64
4961 if (IS_IMM)
4962 goto illegal_insn;
4963 if (!supervisor(dc))
4964 goto priv_insn;
4965 #endif
4966 if (rd & 1)
4967 goto illegal_insn;
4968 else {
4969 save_state(dc);
4970 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
4971 }
4972 break;
4973 #endif
4974 #ifdef TARGET_SPARC64
4975 case 0x0e: /* V9 stx */
4976 gen_address_mask(dc, cpu_addr);
4977 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
4978 break;
4979 case 0x1e: /* V9 stxa */
4980 save_state(dc);
4981 gen_st_asi(cpu_val, cpu_addr, insn, 8);
4982 dc->npc = DYNAMIC_PC;
4983 break;
4984 #endif
4985 default:
4986 goto illegal_insn;
4987 }
4988 } else if (xop > 0x23 && xop < 0x28) {
4989 if (gen_trap_ifnofpu(dc)) {
4990 goto jmp_insn;
4991 }
4992 save_state(dc);
4993 switch (xop) {
4994 case 0x24: /* stf, store fpreg */
4995 {
4996 TCGv t = get_temp_tl(dc);
4997 gen_address_mask(dc, cpu_addr);
4998 cpu_src1_32 = gen_load_fpr_F(dc, rd);
4999 tcg_gen_ext_i32_tl(t, cpu_src1_32);
5000 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5001 }
5002 break;
5003 case 0x25: /* stfsr, V9 stxfsr */
5004 {
5005 TCGv t = get_temp_tl(dc);
5006
5007 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUSPARCState, fsr));
5008 #ifdef TARGET_SPARC64
5009 gen_address_mask(dc, cpu_addr);
5010 if (rd == 1) {
5011 tcg_gen_qemu_st64(t, cpu_addr, dc->mem_idx);
5012 break;
5013 }
5014 #endif
5015 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5016 }
5017 break;
5018 case 0x26:
5019 #ifdef TARGET_SPARC64
5020 /* V9 stqf, store quad fpreg */
5021 {
5022 TCGv_i32 r_const;
5023
5024 CHECK_FPU_FEATURE(dc, FLOAT128);
5025 gen_op_load_fpr_QT0(QFPREG(rd));
5026 r_const = tcg_const_i32(dc->mem_idx);
5027 gen_address_mask(dc, cpu_addr);
5028 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5029 tcg_temp_free_i32(r_const);
5030 }
5031 break;
5032 #else /* !TARGET_SPARC64 */
5033 /* stdfq, store floating point queue */
5034 #if defined(CONFIG_USER_ONLY)
5035 goto illegal_insn;
5036 #else
5037 if (!supervisor(dc))
5038 goto priv_insn;
5039 if (gen_trap_ifnofpu(dc)) {
5040 goto jmp_insn;
5041 }
5042 goto nfq_insn;
5043 #endif
5044 #endif
5045 case 0x27: /* stdf, store double fpreg */
5046 gen_address_mask(dc, cpu_addr);
5047 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5048 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5049 break;
5050 default:
5051 goto illegal_insn;
5052 }
5053 } else if (xop > 0x33 && xop < 0x3f) {
5054 save_state(dc);
5055 switch (xop) {
5056 #ifdef TARGET_SPARC64
5057 case 0x34: /* V9 stfa */
5058 if (gen_trap_ifnofpu(dc)) {
5059 goto jmp_insn;
5060 }
5061 gen_stf_asi(cpu_addr, insn, 4, rd);
5062 break;
5063 case 0x36: /* V9 stqfa */
5064 {
5065 TCGv_i32 r_const;
5066
5067 CHECK_FPU_FEATURE(dc, FLOAT128);
5068 if (gen_trap_ifnofpu(dc)) {
5069 goto jmp_insn;
5070 }
5071 r_const = tcg_const_i32(7);
5072 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5073 tcg_temp_free_i32(r_const);
5074 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5075 }
5076 break;
5077 case 0x37: /* V9 stdfa */
5078 if (gen_trap_ifnofpu(dc)) {
5079 goto jmp_insn;
5080 }
5081 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5082 break;
5083 case 0x3e: /* V9 casxa */
5084 rs2 = GET_FIELD(insn, 27, 31);
5085 cpu_src2 = gen_load_gpr(dc, rs2);
5086 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5087 break;
5088 #else
5089 case 0x34: /* stc */
5090 case 0x35: /* stcsr */
5091 case 0x36: /* stdcq */
5092 case 0x37: /* stdc */
5093 goto ncp_insn;
5094 #endif
5095 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5096 case 0x3c: /* V9 or LEON3 casa */
5097 #ifndef TARGET_SPARC64
5098 CHECK_IU_FEATURE(dc, CASA);
5099 if (IS_IMM) {
5100 goto illegal_insn;
5101 }
5102 if (!supervisor(dc)) {
5103 goto priv_insn;
5104 }
5105 #endif
5106 rs2 = GET_FIELD(insn, 27, 31);
5107 cpu_src2 = gen_load_gpr(dc, rs2);
5108 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5109 break;
5110 #endif
5111 default:
5112 goto illegal_insn;
5113 }
5114 } else {
5115 goto illegal_insn;
5116 }
5117 }
5118 break;
5119 }
5120 /* default case for non jump instructions */
5121 if (dc->npc == DYNAMIC_PC) {
5122 dc->pc = DYNAMIC_PC;
5123 gen_op_next_insn();
5124 } else if (dc->npc == JUMP_PC) {
5125 /* we can do a static jump */
5126 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5127 dc->is_br = 1;
5128 } else {
5129 dc->pc = dc->npc;
5130 dc->npc = dc->npc + 4;
5131 }
5132 jmp_insn:
5133 goto egress;
5134 illegal_insn:
5135 {
5136 TCGv_i32 r_const;
5137
5138 save_state(dc);
5139 r_const = tcg_const_i32(TT_ILL_INSN);
5140 gen_helper_raise_exception(cpu_env, r_const);
5141 tcg_temp_free_i32(r_const);
5142 dc->is_br = 1;
5143 }
5144 goto egress;
5145 unimp_flush:
5146 {
5147 TCGv_i32 r_const;
5148
5149 save_state(dc);
5150 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5151 gen_helper_raise_exception(cpu_env, r_const);
5152 tcg_temp_free_i32(r_const);
5153 dc->is_br = 1;
5154 }
5155 goto egress;
5156 #if !defined(CONFIG_USER_ONLY)
5157 priv_insn:
5158 {
5159 TCGv_i32 r_const;
5160
5161 save_state(dc);
5162 r_const = tcg_const_i32(TT_PRIV_INSN);
5163 gen_helper_raise_exception(cpu_env, r_const);
5164 tcg_temp_free_i32(r_const);
5165 dc->is_br = 1;
5166 }
5167 goto egress;
5168 #endif
5169 nfpu_insn:
5170 save_state(dc);
5171 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5172 dc->is_br = 1;
5173 goto egress;
5174 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5175 nfq_insn:
5176 save_state(dc);
5177 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5178 dc->is_br = 1;
5179 goto egress;
5180 #endif
5181 #ifndef TARGET_SPARC64
5182 ncp_insn:
5183 {
5184 TCGv r_const;
5185
5186 save_state(dc);
5187 r_const = tcg_const_i32(TT_NCP_INSN);
5188 gen_helper_raise_exception(cpu_env, r_const);
5189 tcg_temp_free(r_const);
5190 dc->is_br = 1;
5191 }
5192 goto egress;
5193 #endif
5194 egress:
5195 if (dc->n_t32 != 0) {
5196 int i;
5197 for (i = dc->n_t32 - 1; i >= 0; --i) {
5198 tcg_temp_free_i32(dc->t32[i]);
5199 }
5200 dc->n_t32 = 0;
5201 }
5202 if (dc->n_ttl != 0) {
5203 int i;
5204 for (i = dc->n_ttl - 1; i >= 0; --i) {
5205 tcg_temp_free(dc->ttl[i]);
5206 }
5207 dc->n_ttl = 0;
5208 }
5209 }
5210
5211 static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
5212 TranslationBlock *tb,
5213 bool spc)
5214 {
5215 CPUState *cs = CPU(cpu);
5216 CPUSPARCState *env = &cpu->env;
5217 target_ulong pc_start, last_pc;
5218 DisasContext dc1, *dc = &dc1;
5219 int j, lj = -1;
5220 int num_insns;
5221 int max_insns;
5222 unsigned int insn;
5223
5224 memset(dc, 0, sizeof(DisasContext));
5225 dc->tb = tb;
5226 pc_start = tb->pc;
5227 dc->pc = pc_start;
5228 last_pc = dc->pc;
5229 dc->npc = (target_ulong) tb->cs_base;
5230 dc->cc_op = CC_OP_DYNAMIC;
5231 dc->mem_idx = cpu_mmu_index(env, false);
5232 dc->def = env->def;
5233 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5234 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5235 dc->singlestep = (cs->singlestep_enabled || singlestep);
5236
5237 num_insns = 0;
5238 max_insns = tb->cflags & CF_COUNT_MASK;
5239 if (max_insns == 0)
5240 max_insns = CF_COUNT_MASK;
5241 gen_tb_start(tb);
5242 do {
5243 if (spc) {
5244 qemu_log("Search PC...\n");
5245 j = tcg_op_buf_count();
5246 if (lj < j) {
5247 lj++;
5248 while (lj < j)
5249 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5250 tcg_ctx.gen_opc_pc[lj] = dc->pc;
5251 gen_opc_npc[lj] = dc->npc;
5252 if (dc->npc & JUMP_PC) {
5253 assert(dc->jump_pc[1] == dc->pc + 4);
5254 gen_opc_npc[lj] = dc->jump_pc[0] | JUMP_PC;
5255 }
5256 tcg_ctx.gen_opc_instr_start[lj] = 1;
5257 tcg_ctx.gen_opc_icount[lj] = num_insns;
5258 }
5259 }
5260 tcg_gen_insn_start(dc->pc);
5261 num_insns++;
5262
5263 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5264 if (dc->pc != pc_start) {
5265 save_state(dc);
5266 }
5267 gen_helper_debug(cpu_env);
5268 tcg_gen_exit_tb(0);
5269 dc->is_br = 1;
5270 goto exit_gen_loop;
5271 }
5272
5273 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5274 gen_io_start();
5275 }
5276
5277 last_pc = dc->pc;
5278 insn = cpu_ldl_code(env, dc->pc);
5279
5280 disas_sparc_insn(dc, insn);
5281
5282 if (dc->is_br)
5283 break;
5284 /* if the next PC is different, we abort now */
5285 if (dc->pc != (last_pc + 4))
5286 break;
5287 /* if we reach a page boundary, we stop generation so that the
5288 PC of a TT_TFAULT exception is always in the right page */
5289 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5290 break;
5291 /* if single step mode, we generate only one instruction and
5292 generate an exception */
5293 if (dc->singlestep) {
5294 break;
5295 }
5296 } while (!tcg_op_buf_full() &&
5297 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5298 num_insns < max_insns);
5299
5300 exit_gen_loop:
5301 if (tb->cflags & CF_LAST_IO) {
5302 gen_io_end();
5303 }
5304 if (!dc->is_br) {
5305 if (dc->pc != DYNAMIC_PC &&
5306 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5307 /* static PC and NPC: we can use direct chaining */
5308 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5309 } else {
5310 if (dc->pc != DYNAMIC_PC) {
5311 tcg_gen_movi_tl(cpu_pc, dc->pc);
5312 }
5313 save_npc(dc);
5314 tcg_gen_exit_tb(0);
5315 }
5316 }
5317 gen_tb_end(tb, num_insns);
5318
5319 if (spc) {
5320 j = tcg_op_buf_count();
5321 lj++;
5322 while (lj <= j)
5323 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5324 #if 0
5325 log_page_dump();
5326 #endif
5327 } else {
5328 tb->size = last_pc + 4 - pc_start;
5329 tb->icount = num_insns;
5330 }
5331 #ifdef DEBUG_DISAS
5332 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5333 qemu_log("--------------\n");
5334 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5335 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5336 qemu_log("\n");
5337 }
5338 #endif
5339 }
5340
5341 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5342 {
5343 gen_intermediate_code_internal(sparc_env_get_cpu(env), tb, false);
5344 }
5345
5346 void gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb)
5347 {
5348 gen_intermediate_code_internal(sparc_env_get_cpu(env), tb, true);
5349 }
5350
5351 void gen_intermediate_code_init(CPUSPARCState *env)
5352 {
5353 unsigned int i;
5354 static int inited;
5355 static const char * const gregnames[8] = {
5356 NULL, // g0 not used
5357 "g1",
5358 "g2",
5359 "g3",
5360 "g4",
5361 "g5",
5362 "g6",
5363 "g7",
5364 };
5365 static const char * const fregnames[32] = {
5366 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5367 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5368 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5369 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5370 };
5371
5372 /* init various static tables */
5373 if (!inited) {
5374 inited = 1;
5375
5376 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5377 cpu_regwptr = tcg_global_mem_new_ptr(TCG_AREG0,
5378 offsetof(CPUSPARCState, regwptr),
5379 "regwptr");
5380 #ifdef TARGET_SPARC64
5381 cpu_xcc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, xcc),
5382 "xcc");
5383 cpu_asi = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, asi),
5384 "asi");
5385 cpu_fprs = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, fprs),
5386 "fprs");
5387 cpu_gsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, gsr),
5388 "gsr");
5389 cpu_tick_cmpr = tcg_global_mem_new(TCG_AREG0,
5390 offsetof(CPUSPARCState, tick_cmpr),
5391 "tick_cmpr");
5392 cpu_stick_cmpr = tcg_global_mem_new(TCG_AREG0,
5393 offsetof(CPUSPARCState, stick_cmpr),
5394 "stick_cmpr");
5395 cpu_hstick_cmpr = tcg_global_mem_new(TCG_AREG0,
5396 offsetof(CPUSPARCState, hstick_cmpr),
5397 "hstick_cmpr");
5398 cpu_hintp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hintp),
5399 "hintp");
5400 cpu_htba = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, htba),
5401 "htba");
5402 cpu_hver = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hver),
5403 "hver");
5404 cpu_ssr = tcg_global_mem_new(TCG_AREG0,
5405 offsetof(CPUSPARCState, ssr), "ssr");
5406 cpu_ver = tcg_global_mem_new(TCG_AREG0,
5407 offsetof(CPUSPARCState, version), "ver");
5408 cpu_softint = tcg_global_mem_new_i32(TCG_AREG0,
5409 offsetof(CPUSPARCState, softint),
5410 "softint");
5411 #else
5412 cpu_wim = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, wim),
5413 "wim");
5414 #endif
5415 cpu_cond = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cond),
5416 "cond");
5417 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_src),
5418 "cc_src");
5419 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0,
5420 offsetof(CPUSPARCState, cc_src2),
5421 "cc_src2");
5422 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_dst),
5423 "cc_dst");
5424 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, cc_op),
5425 "cc_op");
5426 cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, psr),
5427 "psr");
5428 cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, fsr),
5429 "fsr");
5430 cpu_pc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, pc),
5431 "pc");
5432 cpu_npc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, npc),
5433 "npc");
5434 cpu_y = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, y), "y");
5435 #ifndef CONFIG_USER_ONLY
5436 cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, tbr),
5437 "tbr");
5438 #endif
5439 for (i = 1; i < 8; i++) {
5440 cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0,
5441 offsetof(CPUSPARCState, gregs[i]),
5442 gregnames[i]);
5443 }
5444 for (i = 0; i < TARGET_DPREGS; i++) {
5445 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
5446 offsetof(CPUSPARCState, fpr[i]),
5447 fregnames[i]);
5448 }
5449 }
5450 }
5451
5452 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, int pc_pos)
5453 {
5454 target_ulong pc, npc;
5455 env->pc = pc = tcg_ctx.gen_opc_pc[pc_pos];
5456 npc = gen_opc_npc[pc_pos];
5457 if (npc == DYNAMIC_PC) {
5458 /* dynamic NPC: already stored */
5459 } else if (npc & JUMP_PC) {
5460 /* jump PC: use 'cond' and the jump targets of the translation */
5461 if (env->cond) {
5462 env->npc = npc & ~3;
5463 } else {
5464 env->npc = pc + 4;
5465 }
5466 } else {
5467 env->npc = npc;
5468 }
5469 }