4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rett(E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_done(E) qemu_build_not_reached()
46 # define gen_helper_fabsd(D, S) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_fnegd(D, S) qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
51 # define gen_helper_restored(E) qemu_build_not_reached()
52 # define gen_helper_retry(E) qemu_build_not_reached()
53 # define gen_helper_saved(E) qemu_build_not_reached()
54 # define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached()
55 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
56 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
57 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
58 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
59 # define gen_helper_udivx(D, E, A, B) qemu_build_not_reached()
60 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
61 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
62 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
63 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
64 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
65 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
66 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
81 # define FSR_LDXFSR_MASK 0
82 # define FSR_LDXFSR_OLDMASK 0
86 /* Dynamic PC, must exit to main loop. */
88 /* Dynamic PC, one of two values according to jump_pc[T2]. */
90 /* Dynamic PC, may lookup next TB. */
91 #define DYNAMIC_PC_LOOKUP 3
93 #define DISAS_EXIT DISAS_TARGET_0
95 /* global register indexes */
96 static TCGv_ptr cpu_regwptr
;
97 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
98 static TCGv_i32 cpu_cc_op
;
99 static TCGv_i32 cpu_psr
;
100 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
101 static TCGv cpu_regs
[32];
104 static TCGv cpu_cond
;
105 #ifdef TARGET_SPARC64
106 static TCGv_i32 cpu_xcc
, cpu_fprs
;
109 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
110 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
112 /* Floating point registers */
113 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
115 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
116 #ifdef TARGET_SPARC64
117 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
118 # define env64_field_offsetof(X) env_field_offsetof(X)
120 # define env32_field_offsetof(X) env_field_offsetof(X)
121 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
124 typedef struct DisasDelayException
{
125 struct DisasDelayException
*next
;
128 /* Saved state at parent insn. */
131 } DisasDelayException
;
133 typedef struct DisasContext
{
134 DisasContextBase base
;
135 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
136 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
137 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
140 bool address_mask_32bit
;
141 #ifndef CONFIG_USER_ONLY
143 #ifdef TARGET_SPARC64
148 uint32_t cc_op
; /* current CC operation */
150 #ifdef TARGET_SPARC64
154 DisasDelayException
*delay_excp_list
;
163 // This function uses non-native bit order
164 #define GET_FIELD(X, FROM, TO) \
165 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
167 // This function uses the order in the manuals, i.e. bit 0 is 2^0
168 #define GET_FIELD_SP(X, FROM, TO) \
169 GET_FIELD(X, 31 - (TO), 31 - (FROM))
171 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
172 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
174 #ifdef TARGET_SPARC64
175 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
176 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
178 #define DFPREG(r) (r & 0x1e)
179 #define QFPREG(r) (r & 0x1c)
182 #define UA2005_HTRAP_MASK 0xff
183 #define V8_TRAP_MASK 0x7f
185 #define IS_IMM (insn & (1<<13))
187 static void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
189 #if defined(TARGET_SPARC64)
190 int bit
= (rd
< 32) ? 1 : 2;
191 /* If we know we've already set this bit within the TB,
192 we can avoid setting it again. */
193 if (!(dc
->fprs_dirty
& bit
)) {
194 dc
->fprs_dirty
|= bit
;
195 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
200 /* floating point registers moves */
201 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
203 TCGv_i32 ret
= tcg_temp_new_i32();
205 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
207 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
212 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
214 TCGv_i64 t
= tcg_temp_new_i64();
216 tcg_gen_extu_i32_i64(t
, v
);
217 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
218 (dst
& 1 ? 0 : 32), 32);
219 gen_update_fprs_dirty(dc
, dst
);
222 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
224 return tcg_temp_new_i32();
227 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
230 return cpu_fpr
[src
/ 2];
233 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
236 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
237 gen_update_fprs_dirty(dc
, dst
);
240 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
242 return cpu_fpr
[DFPREG(dst
) / 2];
245 static void gen_op_load_fpr_QT0(unsigned int src
)
247 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
248 offsetof(CPU_QuadU
, ll
.upper
));
249 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
250 offsetof(CPU_QuadU
, ll
.lower
));
253 static void gen_op_load_fpr_QT1(unsigned int src
)
255 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
256 offsetof(CPU_QuadU
, ll
.upper
));
257 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
258 offsetof(CPU_QuadU
, ll
.lower
));
261 static void gen_op_store_QT0_fpr(unsigned int dst
)
263 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
264 offsetof(CPU_QuadU
, ll
.upper
));
265 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
266 offsetof(CPU_QuadU
, ll
.lower
));
269 #ifdef TARGET_SPARC64
270 static void gen_move_Q(DisasContext
*dc
, unsigned int rd
, unsigned int rs
)
275 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
276 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
277 gen_update_fprs_dirty(dc
, rd
);
282 #ifdef CONFIG_USER_ONLY
283 #define supervisor(dc) 0
284 #define hypervisor(dc) 0
286 #ifdef TARGET_SPARC64
287 #define hypervisor(dc) (dc->hypervisor)
288 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
290 #define supervisor(dc) (dc->supervisor)
291 #define hypervisor(dc) 0
295 #if !defined(TARGET_SPARC64)
296 # define AM_CHECK(dc) false
297 #elif defined(TARGET_ABI32)
298 # define AM_CHECK(dc) true
299 #elif defined(CONFIG_USER_ONLY)
300 # define AM_CHECK(dc) false
302 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
305 static void gen_address_mask(DisasContext
*dc
, TCGv addr
)
308 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
312 static target_ulong
address_mask_i(DisasContext
*dc
, target_ulong addr
)
314 return AM_CHECK(dc
) ? (uint32_t)addr
: addr
;
317 static TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
321 return cpu_regs
[reg
];
323 TCGv t
= tcg_temp_new();
324 tcg_gen_movi_tl(t
, 0);
329 static void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
333 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
337 static TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
341 return cpu_regs
[reg
];
343 return tcg_temp_new();
347 static bool use_goto_tb(DisasContext
*s
, target_ulong pc
, target_ulong npc
)
349 return translator_use_goto_tb(&s
->base
, pc
) &&
350 translator_use_goto_tb(&s
->base
, npc
);
353 static void gen_goto_tb(DisasContext
*s
, int tb_num
,
354 target_ulong pc
, target_ulong npc
)
356 if (use_goto_tb(s
, pc
, npc
)) {
357 /* jump to same page: we can use a direct jump */
358 tcg_gen_goto_tb(tb_num
);
359 tcg_gen_movi_tl(cpu_pc
, pc
);
360 tcg_gen_movi_tl(cpu_npc
, npc
);
361 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
363 /* jump to another page: we can use an indirect jump */
364 tcg_gen_movi_tl(cpu_pc
, pc
);
365 tcg_gen_movi_tl(cpu_npc
, npc
);
366 tcg_gen_lookup_and_goto_ptr();
371 static void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
373 tcg_gen_extu_i32_tl(reg
, src
);
374 tcg_gen_extract_tl(reg
, reg
, PSR_NEG_SHIFT
, 1);
377 static void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
379 tcg_gen_extu_i32_tl(reg
, src
);
380 tcg_gen_extract_tl(reg
, reg
, PSR_ZERO_SHIFT
, 1);
383 static void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
385 tcg_gen_extu_i32_tl(reg
, src
);
386 tcg_gen_extract_tl(reg
, reg
, PSR_OVF_SHIFT
, 1);
389 static void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
391 tcg_gen_extu_i32_tl(reg
, src
);
392 tcg_gen_extract_tl(reg
, reg
, PSR_CARRY_SHIFT
, 1);
395 static void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
397 tcg_gen_mov_tl(cpu_cc_src
, src1
);
398 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
399 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
400 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
403 static TCGv_i32
gen_add32_carry32(void)
405 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
407 /* Carry is computed from a previous add: (dst < src) */
408 #if TARGET_LONG_BITS == 64
409 cc_src1_32
= tcg_temp_new_i32();
410 cc_src2_32
= tcg_temp_new_i32();
411 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
412 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
414 cc_src1_32
= cpu_cc_dst
;
415 cc_src2_32
= cpu_cc_src
;
418 carry_32
= tcg_temp_new_i32();
419 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
424 static TCGv_i32
gen_sub32_carry32(void)
426 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
428 /* Carry is computed from a previous borrow: (src1 < src2) */
429 #if TARGET_LONG_BITS == 64
430 cc_src1_32
= tcg_temp_new_i32();
431 cc_src2_32
= tcg_temp_new_i32();
432 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
433 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
435 cc_src1_32
= cpu_cc_src
;
436 cc_src2_32
= cpu_cc_src2
;
439 carry_32
= tcg_temp_new_i32();
440 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
445 static void gen_op_addc_int(TCGv dst
, TCGv src1
, TCGv src2
,
446 TCGv_i32 carry_32
, bool update_cc
)
448 tcg_gen_add_tl(dst
, src1
, src2
);
450 #ifdef TARGET_SPARC64
451 TCGv carry
= tcg_temp_new();
452 tcg_gen_extu_i32_tl(carry
, carry_32
);
453 tcg_gen_add_tl(dst
, dst
, carry
);
455 tcg_gen_add_i32(dst
, dst
, carry_32
);
459 tcg_debug_assert(dst
== cpu_cc_dst
);
460 tcg_gen_mov_tl(cpu_cc_src
, src1
);
461 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
465 static void gen_op_addc_int_add(TCGv dst
, TCGv src1
, TCGv src2
, bool update_cc
)
469 if (TARGET_LONG_BITS
== 64) {
470 gen_op_addc_int(dst
, src1
, src2
, gen_add32_carry32(), update_cc
);
475 * We can re-use the host's hardware carry generation by using
476 * an ADD2 opcode. We discard the low part of the output.
477 * Ideally we'd combine this operation with the add that
478 * generated the carry in the first place.
480 discard
= tcg_temp_new();
481 tcg_gen_add2_tl(discard
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
484 tcg_debug_assert(dst
== cpu_cc_dst
);
485 tcg_gen_mov_tl(cpu_cc_src
, src1
);
486 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
490 static void gen_op_addc_add(TCGv dst
, TCGv src1
, TCGv src2
)
492 gen_op_addc_int_add(dst
, src1
, src2
, false);
495 static void gen_op_addccc_add(TCGv dst
, TCGv src1
, TCGv src2
)
497 gen_op_addc_int_add(dst
, src1
, src2
, true);
500 static void gen_op_addc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
502 gen_op_addc_int(dst
, src1
, src2
, gen_sub32_carry32(), false);
505 static void gen_op_addccc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
507 gen_op_addc_int(dst
, src1
, src2
, gen_sub32_carry32(), true);
510 static void gen_op_addc_int_generic(TCGv dst
, TCGv src1
, TCGv src2
,
513 TCGv_i32 carry_32
= tcg_temp_new_i32();
514 gen_helper_compute_C_icc(carry_32
, tcg_env
);
515 gen_op_addc_int(dst
, src1
, src2
, carry_32
, update_cc
);
518 static void gen_op_addc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
520 gen_op_addc_int_generic(dst
, src1
, src2
, false);
523 static void gen_op_addccc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
525 gen_op_addc_int_generic(dst
, src1
, src2
, true);
528 static void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
530 tcg_gen_mov_tl(cpu_cc_src
, src1
);
531 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
532 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
533 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
536 static void gen_op_subc_int(TCGv dst
, TCGv src1
, TCGv src2
,
537 TCGv_i32 carry_32
, bool update_cc
)
541 #if TARGET_LONG_BITS == 64
542 carry
= tcg_temp_new();
543 tcg_gen_extu_i32_i64(carry
, carry_32
);
548 tcg_gen_sub_tl(dst
, src1
, src2
);
549 tcg_gen_sub_tl(dst
, dst
, carry
);
552 tcg_debug_assert(dst
== cpu_cc_dst
);
553 tcg_gen_mov_tl(cpu_cc_src
, src1
);
554 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
558 static void gen_op_subc_add(TCGv dst
, TCGv src1
, TCGv src2
)
560 gen_op_subc_int(dst
, src1
, src2
, gen_add32_carry32(), false);
563 static void gen_op_subccc_add(TCGv dst
, TCGv src1
, TCGv src2
)
565 gen_op_subc_int(dst
, src1
, src2
, gen_add32_carry32(), true);
568 static void gen_op_subc_int_sub(TCGv dst
, TCGv src1
, TCGv src2
, bool update_cc
)
572 if (TARGET_LONG_BITS
== 64) {
573 gen_op_subc_int(dst
, src1
, src2
, gen_sub32_carry32(), update_cc
);
578 * We can re-use the host's hardware carry generation by using
579 * a SUB2 opcode. We discard the low part of the output.
581 discard
= tcg_temp_new();
582 tcg_gen_sub2_tl(discard
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
585 tcg_debug_assert(dst
== cpu_cc_dst
);
586 tcg_gen_mov_tl(cpu_cc_src
, src1
);
587 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
591 static void gen_op_subc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
593 gen_op_subc_int_sub(dst
, src1
, src2
, false);
596 static void gen_op_subccc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
598 gen_op_subc_int_sub(dst
, src1
, src2
, true);
601 static void gen_op_subc_int_generic(TCGv dst
, TCGv src1
, TCGv src2
,
604 TCGv_i32 carry_32
= tcg_temp_new_i32();
606 gen_helper_compute_C_icc(carry_32
, tcg_env
);
607 gen_op_subc_int(dst
, src1
, src2
, carry_32
, update_cc
);
610 static void gen_op_subc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
612 gen_op_subc_int_generic(dst
, src1
, src2
, false);
615 static void gen_op_subccc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
617 gen_op_subc_int_generic(dst
, src1
, src2
, true);
620 static void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
622 TCGv r_temp
, zero
, t0
;
624 r_temp
= tcg_temp_new();
631 zero
= tcg_constant_tl(0);
632 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
633 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
634 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
635 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
639 // env->y = (b2 << 31) | (env->y >> 1);
640 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
641 tcg_gen_deposit_tl(cpu_y
, t0
, cpu_cc_src
, 31, 1);
644 gen_mov_reg_N(t0
, cpu_psr
);
645 gen_mov_reg_V(r_temp
, cpu_psr
);
646 tcg_gen_xor_tl(t0
, t0
, r_temp
);
648 // T0 = (b1 << 31) | (T0 >> 1);
650 tcg_gen_shli_tl(t0
, t0
, 31);
651 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
652 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
654 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
656 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
659 static void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
661 #if TARGET_LONG_BITS == 32
663 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
665 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
668 TCGv t0
= tcg_temp_new_i64();
669 TCGv t1
= tcg_temp_new_i64();
672 tcg_gen_ext32s_i64(t0
, src1
);
673 tcg_gen_ext32s_i64(t1
, src2
);
675 tcg_gen_ext32u_i64(t0
, src1
);
676 tcg_gen_ext32u_i64(t1
, src2
);
679 tcg_gen_mul_i64(dst
, t0
, t1
);
680 tcg_gen_shri_i64(cpu_y
, dst
, 32);
684 static void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
686 /* zero-extend truncated operands before multiplication */
687 gen_op_multiply(dst
, src1
, src2
, 0);
690 static void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
692 /* sign-extend truncated operands before multiplication */
693 gen_op_multiply(dst
, src1
, src2
, 1);
696 static void gen_op_udivx(TCGv dst
, TCGv src1
, TCGv src2
)
698 gen_helper_udivx(dst
, tcg_env
, src1
, src2
);
701 static void gen_op_sdivx(TCGv dst
, TCGv src1
, TCGv src2
)
703 gen_helper_sdivx(dst
, tcg_env
, src1
, src2
);
706 static void gen_op_udiv(TCGv dst
, TCGv src1
, TCGv src2
)
708 gen_helper_udiv(dst
, tcg_env
, src1
, src2
);
711 static void gen_op_sdiv(TCGv dst
, TCGv src1
, TCGv src2
)
713 gen_helper_sdiv(dst
, tcg_env
, src1
, src2
);
716 static void gen_op_udivcc(TCGv dst
, TCGv src1
, TCGv src2
)
718 gen_helper_udiv_cc(dst
, tcg_env
, src1
, src2
);
721 static void gen_op_sdivcc(TCGv dst
, TCGv src1
, TCGv src2
)
723 gen_helper_sdiv_cc(dst
, tcg_env
, src1
, src2
);
726 static void gen_op_taddcctv(TCGv dst
, TCGv src1
, TCGv src2
)
728 gen_helper_taddcctv(dst
, tcg_env
, src1
, src2
);
731 static void gen_op_tsubcctv(TCGv dst
, TCGv src1
, TCGv src2
)
733 gen_helper_tsubcctv(dst
, tcg_env
, src1
, src2
);
736 static void gen_op_popc(TCGv dst
, TCGv src1
, TCGv src2
)
738 tcg_gen_ctpop_tl(dst
, src2
);
741 #ifndef TARGET_SPARC64
742 static void gen_helper_array8(TCGv dst
, TCGv src1
, TCGv src2
)
744 g_assert_not_reached();
748 static void gen_op_array16(TCGv dst
, TCGv src1
, TCGv src2
)
750 gen_helper_array8(dst
, src1
, src2
);
751 tcg_gen_shli_tl(dst
, dst
, 1);
754 static void gen_op_array32(TCGv dst
, TCGv src1
, TCGv src2
)
756 gen_helper_array8(dst
, src1
, src2
);
757 tcg_gen_shli_tl(dst
, dst
, 2);
760 static void gen_op_fpack32(TCGv_i64 dst
, TCGv_i64 src1
, TCGv_i64 src2
)
762 #ifdef TARGET_SPARC64
763 gen_helper_fpack32(dst
, cpu_gsr
, src1
, src2
);
765 g_assert_not_reached();
769 static void gen_op_faligndata(TCGv_i64 dst
, TCGv_i64 s1
, TCGv_i64 s2
)
771 #ifdef TARGET_SPARC64
776 shift
= tcg_temp_new();
778 tcg_gen_andi_tl(shift
, cpu_gsr
, 7);
779 tcg_gen_shli_tl(shift
, shift
, 3);
780 tcg_gen_shl_tl(t1
, s1
, shift
);
783 * A shift of 64 does not produce 0 in TCG. Divide this into a
784 * shift of (up to 63) followed by a constant shift of 1.
786 tcg_gen_xori_tl(shift
, shift
, 63);
787 tcg_gen_shr_tl(t2
, s2
, shift
);
788 tcg_gen_shri_tl(t2
, t2
, 1);
790 tcg_gen_or_tl(dst
, t1
, t2
);
792 g_assert_not_reached();
796 static void gen_op_bshuffle(TCGv_i64 dst
, TCGv_i64 src1
, TCGv_i64 src2
)
798 #ifdef TARGET_SPARC64
799 gen_helper_bshuffle(dst
, cpu_gsr
, src1
, src2
);
801 g_assert_not_reached();
806 static void gen_op_eval_ba(TCGv dst
)
808 tcg_gen_movi_tl(dst
, 1);
812 static void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
814 gen_mov_reg_Z(dst
, src
);
818 static void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
820 TCGv t0
= tcg_temp_new();
821 gen_mov_reg_N(t0
, src
);
822 gen_mov_reg_V(dst
, src
);
823 tcg_gen_xor_tl(dst
, dst
, t0
);
824 gen_mov_reg_Z(t0
, src
);
825 tcg_gen_or_tl(dst
, dst
, t0
);
829 static void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
831 TCGv t0
= tcg_temp_new();
832 gen_mov_reg_V(t0
, src
);
833 gen_mov_reg_N(dst
, src
);
834 tcg_gen_xor_tl(dst
, dst
, t0
);
838 static void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
840 TCGv t0
= tcg_temp_new();
841 gen_mov_reg_Z(t0
, src
);
842 gen_mov_reg_C(dst
, src
);
843 tcg_gen_or_tl(dst
, dst
, t0
);
847 static void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
849 gen_mov_reg_C(dst
, src
);
853 static void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
855 gen_mov_reg_V(dst
, src
);
859 static void gen_op_eval_bn(TCGv dst
)
861 tcg_gen_movi_tl(dst
, 0);
865 static void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
867 gen_mov_reg_N(dst
, src
);
871 static void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
873 gen_mov_reg_Z(dst
, src
);
874 tcg_gen_xori_tl(dst
, dst
, 0x1);
878 static void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
880 gen_op_eval_ble(dst
, src
);
881 tcg_gen_xori_tl(dst
, dst
, 0x1);
885 static void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
887 gen_op_eval_bl(dst
, src
);
888 tcg_gen_xori_tl(dst
, dst
, 0x1);
892 static void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
894 gen_op_eval_bleu(dst
, src
);
895 tcg_gen_xori_tl(dst
, dst
, 0x1);
899 static void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
901 gen_mov_reg_C(dst
, src
);
902 tcg_gen_xori_tl(dst
, dst
, 0x1);
906 static void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
908 gen_mov_reg_N(dst
, src
);
909 tcg_gen_xori_tl(dst
, dst
, 0x1);
913 static void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
915 gen_mov_reg_V(dst
, src
);
916 tcg_gen_xori_tl(dst
, dst
, 0x1);
920 FPSR bit field FCC1 | FCC0:
926 static void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
927 unsigned int fcc_offset
)
929 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
930 tcg_gen_andi_tl(reg
, reg
, 0x1);
933 static void gen_mov_reg_FCC1(TCGv reg
, TCGv src
, unsigned int fcc_offset
)
935 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
936 tcg_gen_andi_tl(reg
, reg
, 0x1);
940 static void gen_op_eval_fbne(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
942 TCGv t0
= tcg_temp_new();
943 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
944 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
945 tcg_gen_or_tl(dst
, dst
, t0
);
948 // 1 or 2: FCC0 ^ FCC1
949 static void gen_op_eval_fblg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
951 TCGv t0
= tcg_temp_new();
952 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
953 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
954 tcg_gen_xor_tl(dst
, dst
, t0
);
958 static void gen_op_eval_fbul(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
960 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
964 static void gen_op_eval_fbl(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
966 TCGv t0
= tcg_temp_new();
967 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
968 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
969 tcg_gen_andc_tl(dst
, dst
, t0
);
973 static void gen_op_eval_fbug(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
975 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
979 static void gen_op_eval_fbg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
981 TCGv t0
= tcg_temp_new();
982 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
983 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
984 tcg_gen_andc_tl(dst
, t0
, dst
);
988 static void gen_op_eval_fbu(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
990 TCGv t0
= tcg_temp_new();
991 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
992 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
993 tcg_gen_and_tl(dst
, dst
, t0
);
997 static void gen_op_eval_fbe(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
999 TCGv t0
= tcg_temp_new();
1000 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
1001 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
1002 tcg_gen_or_tl(dst
, dst
, t0
);
1003 tcg_gen_xori_tl(dst
, dst
, 0x1);
1006 // 0 or 3: !(FCC0 ^ FCC1)
1007 static void gen_op_eval_fbue(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
1009 TCGv t0
= tcg_temp_new();
1010 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
1011 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
1012 tcg_gen_xor_tl(dst
, dst
, t0
);
1013 tcg_gen_xori_tl(dst
, dst
, 0x1);
1017 static void gen_op_eval_fbge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
1019 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
1020 tcg_gen_xori_tl(dst
, dst
, 0x1);
1023 // !1: !(FCC0 & !FCC1)
1024 static void gen_op_eval_fbuge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
1026 TCGv t0
= tcg_temp_new();
1027 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
1028 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
1029 tcg_gen_andc_tl(dst
, dst
, t0
);
1030 tcg_gen_xori_tl(dst
, dst
, 0x1);
1034 static void gen_op_eval_fble(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
1036 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
1037 tcg_gen_xori_tl(dst
, dst
, 0x1);
1040 // !2: !(!FCC0 & FCC1)
1041 static void gen_op_eval_fbule(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
1043 TCGv t0
= tcg_temp_new();
1044 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
1045 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
1046 tcg_gen_andc_tl(dst
, t0
, dst
);
1047 tcg_gen_xori_tl(dst
, dst
, 0x1);
1050 // !3: !(FCC0 & FCC1)
1051 static void gen_op_eval_fbo(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
1053 TCGv t0
= tcg_temp_new();
1054 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
1055 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
1056 tcg_gen_and_tl(dst
, dst
, t0
);
1057 tcg_gen_xori_tl(dst
, dst
, 0x1);
1060 static void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
1061 target_ulong pc2
, TCGv r_cond
)
1063 TCGLabel
*l1
= gen_new_label();
1065 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
1067 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
1070 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
1073 static void gen_generic_branch(DisasContext
*dc
)
1075 TCGv npc0
= tcg_constant_tl(dc
->jump_pc
[0]);
1076 TCGv npc1
= tcg_constant_tl(dc
->jump_pc
[1]);
1077 TCGv zero
= tcg_constant_tl(0);
1079 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1082 /* call this function before using the condition register as it may
1083 have been set for a jump */
1084 static void flush_cond(DisasContext
*dc
)
1086 if (dc
->npc
== JUMP_PC
) {
1087 gen_generic_branch(dc
);
1088 dc
->npc
= DYNAMIC_PC_LOOKUP
;
1092 static void save_npc(DisasContext
*dc
)
1097 gen_generic_branch(dc
);
1098 dc
->npc
= DYNAMIC_PC_LOOKUP
;
1101 case DYNAMIC_PC_LOOKUP
:
1104 g_assert_not_reached();
1107 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1111 static void update_psr(DisasContext
*dc
)
1113 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1114 dc
->cc_op
= CC_OP_FLAGS
;
1115 gen_helper_compute_psr(tcg_env
);
1119 static void save_state(DisasContext
*dc
)
1121 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1125 static void gen_exception(DisasContext
*dc
, int which
)
1128 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(which
));
1129 dc
->base
.is_jmp
= DISAS_NORETURN
;
1132 static TCGLabel
*delay_exceptionv(DisasContext
*dc
, TCGv_i32 excp
)
1134 DisasDelayException
*e
= g_new0(DisasDelayException
, 1);
1136 e
->next
= dc
->delay_excp_list
;
1137 dc
->delay_excp_list
= e
;
1139 e
->lab
= gen_new_label();
1142 /* Caller must have used flush_cond before branch. */
1143 assert(e
->npc
!= JUMP_PC
);
1149 static TCGLabel
*delay_exception(DisasContext
*dc
, int excp
)
1151 return delay_exceptionv(dc
, tcg_constant_i32(excp
));
1154 static void gen_check_align(DisasContext
*dc
, TCGv addr
, int mask
)
1156 TCGv t
= tcg_temp_new();
1159 tcg_gen_andi_tl(t
, addr
, mask
);
1162 lab
= delay_exception(dc
, TT_UNALIGNED
);
1163 tcg_gen_brcondi_tl(TCG_COND_NE
, t
, 0, lab
);
1166 static void gen_mov_pc_npc(DisasContext
*dc
)
1171 gen_generic_branch(dc
);
1172 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1173 dc
->pc
= DYNAMIC_PC_LOOKUP
;
1176 case DYNAMIC_PC_LOOKUP
:
1177 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1181 g_assert_not_reached();
1188 static void gen_op_next_insn(void)
1190 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1191 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1194 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1197 static int subcc_cond
[16] = {
1213 -1, /* no overflow */
1216 static int logic_cond
[16] = {
1218 TCG_COND_EQ
, /* eq: Z */
1219 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1220 TCG_COND_LT
, /* lt: N ^ V -> N */
1221 TCG_COND_EQ
, /* leu: C | Z -> Z */
1222 TCG_COND_NEVER
, /* ltu: C -> 0 */
1223 TCG_COND_LT
, /* neg: N */
1224 TCG_COND_NEVER
, /* vs: V -> 0 */
1226 TCG_COND_NE
, /* ne: !Z */
1227 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1228 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1229 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1230 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1231 TCG_COND_GE
, /* pos: !N */
1232 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1238 #ifdef TARGET_SPARC64
1248 switch (dc
->cc_op
) {
1250 cmp
->cond
= logic_cond
[cond
];
1252 cmp
->is_bool
= false;
1253 cmp
->c2
= tcg_constant_tl(0);
1254 #ifdef TARGET_SPARC64
1256 cmp
->c1
= tcg_temp_new();
1257 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1261 cmp
->c1
= cpu_cc_dst
;
1268 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1269 goto do_compare_dst_0
;
1271 case 7: /* overflow */
1272 case 15: /* !overflow */
1276 cmp
->cond
= subcc_cond
[cond
];
1277 cmp
->is_bool
= false;
1278 #ifdef TARGET_SPARC64
1280 /* Note that sign-extension works for unsigned compares as
1281 long as both operands are sign-extended. */
1282 cmp
->c1
= tcg_temp_new();
1283 cmp
->c2
= tcg_temp_new();
1284 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1285 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1289 cmp
->c1
= cpu_cc_src
;
1290 cmp
->c2
= cpu_cc_src2
;
1297 gen_helper_compute_psr(tcg_env
);
1298 dc
->cc_op
= CC_OP_FLAGS
;
1302 /* We're going to generate a boolean result. */
1303 cmp
->cond
= TCG_COND_NE
;
1304 cmp
->is_bool
= true;
1305 cmp
->c1
= r_dst
= tcg_temp_new();
1306 cmp
->c2
= tcg_constant_tl(0);
1310 gen_op_eval_bn(r_dst
);
1313 gen_op_eval_be(r_dst
, r_src
);
1316 gen_op_eval_ble(r_dst
, r_src
);
1319 gen_op_eval_bl(r_dst
, r_src
);
1322 gen_op_eval_bleu(r_dst
, r_src
);
1325 gen_op_eval_bcs(r_dst
, r_src
);
1328 gen_op_eval_bneg(r_dst
, r_src
);
1331 gen_op_eval_bvs(r_dst
, r_src
);
1334 gen_op_eval_ba(r_dst
);
1337 gen_op_eval_bne(r_dst
, r_src
);
1340 gen_op_eval_bg(r_dst
, r_src
);
1343 gen_op_eval_bge(r_dst
, r_src
);
1346 gen_op_eval_bgu(r_dst
, r_src
);
1349 gen_op_eval_bcc(r_dst
, r_src
);
1352 gen_op_eval_bpos(r_dst
, r_src
);
1355 gen_op_eval_bvc(r_dst
, r_src
);
1362 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1364 unsigned int offset
;
1367 /* For now we still generate a straight boolean result. */
1368 cmp
->cond
= TCG_COND_NE
;
1369 cmp
->is_bool
= true;
1370 cmp
->c1
= r_dst
= tcg_temp_new();
1371 cmp
->c2
= tcg_constant_tl(0);
1391 gen_op_eval_bn(r_dst
);
1394 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1397 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1400 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1403 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1406 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1409 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1412 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1415 gen_op_eval_ba(r_dst
);
1418 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1421 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1424 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1427 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1430 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1433 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1436 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1442 static const TCGCond gen_tcg_cond_reg
[8] = {
1443 TCG_COND_NEVER
, /* reserved */
1447 TCG_COND_NEVER
, /* reserved */
1453 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1455 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1456 cmp
->is_bool
= false;
1458 cmp
->c2
= tcg_constant_tl(0);
1461 static void gen_op_clear_ieee_excp_and_FTT(void)
1463 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1466 static void gen_op_fmovs(TCGv_i32 dst
, TCGv_i32 src
)
1468 gen_op_clear_ieee_excp_and_FTT();
1469 tcg_gen_mov_i32(dst
, src
);
1472 static void gen_op_fnegs(TCGv_i32 dst
, TCGv_i32 src
)
1474 gen_op_clear_ieee_excp_and_FTT();
1475 gen_helper_fnegs(dst
, src
);
1478 static void gen_op_fabss(TCGv_i32 dst
, TCGv_i32 src
)
1480 gen_op_clear_ieee_excp_and_FTT();
1481 gen_helper_fabss(dst
, src
);
1484 static void gen_op_fmovd(TCGv_i64 dst
, TCGv_i64 src
)
1486 gen_op_clear_ieee_excp_and_FTT();
1487 tcg_gen_mov_i64(dst
, src
);
1490 static void gen_op_fnegd(TCGv_i64 dst
, TCGv_i64 src
)
1492 gen_op_clear_ieee_excp_and_FTT();
1493 gen_helper_fnegd(dst
, src
);
1496 static void gen_op_fabsd(TCGv_i64 dst
, TCGv_i64 src
)
1498 gen_op_clear_ieee_excp_and_FTT();
1499 gen_helper_fabsd(dst
, src
);
1502 #ifdef TARGET_SPARC64
1503 static void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1507 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1510 gen_helper_fcmps_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1513 gen_helper_fcmps_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1516 gen_helper_fcmps_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1521 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1525 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1528 gen_helper_fcmpd_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1531 gen_helper_fcmpd_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1534 gen_helper_fcmpd_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1539 static void gen_op_fcmpq(int fccno
)
1543 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1546 gen_helper_fcmpq_fcc1(cpu_fsr
, tcg_env
);
1549 gen_helper_fcmpq_fcc2(cpu_fsr
, tcg_env
);
1552 gen_helper_fcmpq_fcc3(cpu_fsr
, tcg_env
);
1557 static void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1561 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1564 gen_helper_fcmpes_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1567 gen_helper_fcmpes_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1570 gen_helper_fcmpes_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1575 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1579 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1582 gen_helper_fcmped_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1585 gen_helper_fcmped_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1588 gen_helper_fcmped_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1593 static void gen_op_fcmpeq(int fccno
)
1597 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1600 gen_helper_fcmpeq_fcc1(cpu_fsr
, tcg_env
);
1603 gen_helper_fcmpeq_fcc2(cpu_fsr
, tcg_env
);
1606 gen_helper_fcmpeq_fcc3(cpu_fsr
, tcg_env
);
1613 static void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1615 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1618 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1620 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1623 static void gen_op_fcmpq(int fccno
)
1625 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1628 static void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1630 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1633 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1635 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1638 static void gen_op_fcmpeq(int fccno
)
1640 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1644 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1646 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1647 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1648 gen_exception(dc
, TT_FP_EXCP
);
1651 static int gen_trap_ifnofpu(DisasContext
*dc
)
1653 #if !defined(CONFIG_USER_ONLY)
1654 if (!dc
->fpu_enabled
) {
1655 gen_exception(dc
, TT_NFPU_INSN
);
1662 #ifdef TARGET_SPARC64
1663 static void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1664 void (*gen
)(TCGv_ptr
))
1666 gen_op_load_fpr_QT1(QFPREG(rs
));
1670 gen_op_store_QT0_fpr(QFPREG(rd
));
1671 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1675 static void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1676 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1680 src
= gen_load_fpr_D(dc
, rs
);
1684 gen_op_store_QT0_fpr(QFPREG(rd
));
1685 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1709 * For asi == -1, treat as non-asi.
1710 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1712 static DisasASI
resolve_asi(DisasContext
*dc
, int asi
, MemOp memop
)
1714 ASIType type
= GET_ASI_HELPER
;
1715 int mem_idx
= dc
->mem_idx
;
1718 /* Artificial "non-asi" case. */
1719 type
= GET_ASI_DIRECT
;
1723 #ifndef TARGET_SPARC64
1724 /* Before v9, all asis are immediate and privileged. */
1726 gen_exception(dc
, TT_ILL_INSN
);
1727 type
= GET_ASI_EXCP
;
1728 } else if (supervisor(dc
)
1729 /* Note that LEON accepts ASI_USERDATA in user mode, for
1730 use with CASA. Also note that previous versions of
1731 QEMU allowed (and old versions of gcc emitted) ASI_P
1732 for LEON, which is incorrect. */
1733 || (asi
== ASI_USERDATA
1734 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
1736 case ASI_USERDATA
: /* User data access */
1737 mem_idx
= MMU_USER_IDX
;
1738 type
= GET_ASI_DIRECT
;
1740 case ASI_KERNELDATA
: /* Supervisor data access */
1741 mem_idx
= MMU_KERNEL_IDX
;
1742 type
= GET_ASI_DIRECT
;
1744 case ASI_M_BYPASS
: /* MMU passthrough */
1745 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
1746 mem_idx
= MMU_PHYS_IDX
;
1747 type
= GET_ASI_DIRECT
;
1749 case ASI_M_BCOPY
: /* Block copy, sta access */
1750 mem_idx
= MMU_KERNEL_IDX
;
1751 type
= GET_ASI_BCOPY
;
1753 case ASI_M_BFILL
: /* Block fill, stda access */
1754 mem_idx
= MMU_KERNEL_IDX
;
1755 type
= GET_ASI_BFILL
;
1759 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1760 * permissions check in get_physical_address(..).
1762 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
1764 gen_exception(dc
, TT_PRIV_INSN
);
1765 type
= GET_ASI_EXCP
;
1771 /* With v9, all asis below 0x80 are privileged. */
1772 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1773 down that bit into DisasContext. For the moment that's ok,
1774 since the direct implementations below doesn't have any ASIs
1775 in the restricted [0x30, 0x7f] range, and the check will be
1776 done properly in the helper. */
1777 if (!supervisor(dc
) && asi
< 0x80) {
1778 gen_exception(dc
, TT_PRIV_ACT
);
1779 type
= GET_ASI_EXCP
;
1782 case ASI_REAL
: /* Bypass */
1783 case ASI_REAL_IO
: /* Bypass, non-cacheable */
1784 case ASI_REAL_L
: /* Bypass LE */
1785 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
1786 case ASI_TWINX_REAL
: /* Real address, twinx */
1787 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
1788 case ASI_QUAD_LDD_PHYS
:
1789 case ASI_QUAD_LDD_PHYS_L
:
1790 mem_idx
= MMU_PHYS_IDX
;
1792 case ASI_N
: /* Nucleus */
1793 case ASI_NL
: /* Nucleus LE */
1796 case ASI_NUCLEUS_QUAD_LDD
:
1797 case ASI_NUCLEUS_QUAD_LDD_L
:
1798 if (hypervisor(dc
)) {
1799 mem_idx
= MMU_PHYS_IDX
;
1801 mem_idx
= MMU_NUCLEUS_IDX
;
1804 case ASI_AIUP
: /* As if user primary */
1805 case ASI_AIUPL
: /* As if user primary LE */
1806 case ASI_TWINX_AIUP
:
1807 case ASI_TWINX_AIUP_L
:
1808 case ASI_BLK_AIUP_4V
:
1809 case ASI_BLK_AIUP_L_4V
:
1812 mem_idx
= MMU_USER_IDX
;
1814 case ASI_AIUS
: /* As if user secondary */
1815 case ASI_AIUSL
: /* As if user secondary LE */
1816 case ASI_TWINX_AIUS
:
1817 case ASI_TWINX_AIUS_L
:
1818 case ASI_BLK_AIUS_4V
:
1819 case ASI_BLK_AIUS_L_4V
:
1822 mem_idx
= MMU_USER_SECONDARY_IDX
;
1824 case ASI_S
: /* Secondary */
1825 case ASI_SL
: /* Secondary LE */
1828 case ASI_BLK_COMMIT_S
:
1835 if (mem_idx
== MMU_USER_IDX
) {
1836 mem_idx
= MMU_USER_SECONDARY_IDX
;
1837 } else if (mem_idx
== MMU_KERNEL_IDX
) {
1838 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
1841 case ASI_P
: /* Primary */
1842 case ASI_PL
: /* Primary LE */
1845 case ASI_BLK_COMMIT_P
:
1869 type
= GET_ASI_DIRECT
;
1871 case ASI_TWINX_REAL
:
1872 case ASI_TWINX_REAL_L
:
1875 case ASI_TWINX_AIUP
:
1876 case ASI_TWINX_AIUP_L
:
1877 case ASI_TWINX_AIUS
:
1878 case ASI_TWINX_AIUS_L
:
1883 case ASI_QUAD_LDD_PHYS
:
1884 case ASI_QUAD_LDD_PHYS_L
:
1885 case ASI_NUCLEUS_QUAD_LDD
:
1886 case ASI_NUCLEUS_QUAD_LDD_L
:
1887 type
= GET_ASI_DTWINX
;
1889 case ASI_BLK_COMMIT_P
:
1890 case ASI_BLK_COMMIT_S
:
1891 case ASI_BLK_AIUP_4V
:
1892 case ASI_BLK_AIUP_L_4V
:
1895 case ASI_BLK_AIUS_4V
:
1896 case ASI_BLK_AIUS_L_4V
:
1903 type
= GET_ASI_BLOCK
;
1910 type
= GET_ASI_SHORT
;
1917 type
= GET_ASI_SHORT
;
1920 /* The little-endian asis all have bit 3 set. */
1928 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
1931 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1932 static void gen_helper_ld_asi(TCGv_i64 r
, TCGv_env e
, TCGv a
,
1933 TCGv_i32 asi
, TCGv_i32 mop
)
1935 g_assert_not_reached();
1938 static void gen_helper_st_asi(TCGv_env e
, TCGv a
, TCGv_i64 r
,
1939 TCGv_i32 asi
, TCGv_i32 mop
)
1941 g_assert_not_reached();
1945 static void gen_ld_asi(DisasContext
*dc
, DisasASI
*da
, TCGv dst
, TCGv addr
)
1950 case GET_ASI_DTWINX
: /* Reserved for ldda. */
1951 gen_exception(dc
, TT_ILL_INSN
);
1953 case GET_ASI_DIRECT
:
1954 tcg_gen_qemu_ld_tl(dst
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
1958 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1959 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
| MO_ALIGN
);
1962 #ifdef TARGET_SPARC64
1963 gen_helper_ld_asi(dst
, tcg_env
, addr
, r_asi
, r_mop
);
1966 TCGv_i64 t64
= tcg_temp_new_i64();
1967 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
1968 tcg_gen_trunc_i64_tl(dst
, t64
);
1976 static void gen_st_asi(DisasContext
*dc
, DisasASI
*da
, TCGv src
, TCGv addr
)
1982 case GET_ASI_DTWINX
: /* Reserved for stda. */
1983 if (TARGET_LONG_BITS
== 32) {
1984 gen_exception(dc
, TT_ILL_INSN
);
1986 } else if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
1987 /* Pre OpenSPARC CPUs don't have these */
1988 gen_exception(dc
, TT_ILL_INSN
);
1991 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1994 case GET_ASI_DIRECT
:
1995 tcg_gen_qemu_st_tl(src
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
1999 assert(TARGET_LONG_BITS
== 32);
2000 /* Copy 32 bytes from the address in SRC to ADDR. */
2001 /* ??? The original qemu code suggests 4-byte alignment, dropping
2002 the low bits, but the only place I can see this used is in the
2003 Linux kernel with 32 byte alignment, which would make more sense
2004 as a cacheline-style operation. */
2006 TCGv saddr
= tcg_temp_new();
2007 TCGv daddr
= tcg_temp_new();
2008 TCGv four
= tcg_constant_tl(4);
2009 TCGv_i32 tmp
= tcg_temp_new_i32();
2012 tcg_gen_andi_tl(saddr
, src
, -4);
2013 tcg_gen_andi_tl(daddr
, addr
, -4);
2014 for (i
= 0; i
< 32; i
+= 4) {
2015 /* Since the loads and stores are paired, allow the
2016 copy to happen in the host endianness. */
2017 tcg_gen_qemu_ld_i32(tmp
, saddr
, da
->mem_idx
, MO_UL
);
2018 tcg_gen_qemu_st_i32(tmp
, daddr
, da
->mem_idx
, MO_UL
);
2019 tcg_gen_add_tl(saddr
, saddr
, four
);
2020 tcg_gen_add_tl(daddr
, daddr
, four
);
2027 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2028 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
| MO_ALIGN
);
2031 #ifdef TARGET_SPARC64
2032 gen_helper_st_asi(tcg_env
, addr
, src
, r_asi
, r_mop
);
2035 TCGv_i64 t64
= tcg_temp_new_i64();
2036 tcg_gen_extu_tl_i64(t64
, src
);
2037 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2041 /* A write to a TLB register may alter page maps. End the TB. */
2042 dc
->npc
= DYNAMIC_PC
;
2048 static void gen_swap_asi(DisasContext
*dc
, DisasASI
*da
,
2049 TCGv dst
, TCGv src
, TCGv addr
)
2054 case GET_ASI_DIRECT
:
2055 tcg_gen_atomic_xchg_tl(dst
, addr
, src
,
2056 da
->mem_idx
, da
->memop
| MO_ALIGN
);
2059 /* ??? Should be DAE_invalid_asi. */
2060 gen_exception(dc
, TT_DATA_ACCESS
);
2065 static void gen_cas_asi(DisasContext
*dc
, DisasASI
*da
,
2066 TCGv oldv
, TCGv newv
, TCGv cmpv
, TCGv addr
)
2071 case GET_ASI_DIRECT
:
2072 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, newv
,
2073 da
->mem_idx
, da
->memop
| MO_ALIGN
);
2076 /* ??? Should be DAE_invalid_asi. */
2077 gen_exception(dc
, TT_DATA_ACCESS
);
2082 static void gen_ldstub_asi(DisasContext
*dc
, DisasASI
*da
, TCGv dst
, TCGv addr
)
2087 case GET_ASI_DIRECT
:
2088 tcg_gen_atomic_xchg_tl(dst
, addr
, tcg_constant_tl(0xff),
2089 da
->mem_idx
, MO_UB
);
2092 /* ??? In theory, this should be raise DAE_invalid_asi.
2093 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2094 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
2095 gen_helper_exit_atomic(tcg_env
);
2097 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2098 TCGv_i32 r_mop
= tcg_constant_i32(MO_UB
);
2102 t64
= tcg_temp_new_i64();
2103 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
2105 s64
= tcg_constant_i64(0xff);
2106 gen_helper_st_asi(tcg_env
, addr
, s64
, r_asi
, r_mop
);
2108 tcg_gen_trunc_i64_tl(dst
, t64
);
2111 dc
->npc
= DYNAMIC_PC
;
2117 static void gen_ldf_asi(DisasContext
*dc
, DisasASI
*da
, MemOp orig_size
,
2120 MemOp memop
= da
->memop
;
2121 MemOp size
= memop
& MO_SIZE
;
2126 /* TODO: Use 128-bit load/store below. */
2127 if (size
== MO_128
) {
2128 memop
= (memop
& ~MO_SIZE
) | MO_64
;
2135 case GET_ASI_DIRECT
:
2136 memop
|= MO_ALIGN_4
;
2139 d32
= gen_dest_fpr_F(dc
);
2140 tcg_gen_qemu_ld_i32(d32
, addr
, da
->mem_idx
, memop
);
2141 gen_store_fpr_F(dc
, rd
, d32
);
2145 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
, memop
);
2149 d64
= tcg_temp_new_i64();
2150 tcg_gen_qemu_ld_i64(d64
, addr
, da
->mem_idx
, memop
);
2151 addr_tmp
= tcg_temp_new();
2152 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
2153 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + 1], addr_tmp
, da
->mem_idx
, memop
);
2154 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2157 g_assert_not_reached();
2162 /* Valid for lddfa on aligned registers only. */
2163 if (orig_size
== MO_64
&& (rd
& 7) == 0) {
2164 /* The first operation checks required alignment. */
2165 addr_tmp
= tcg_temp_new();
2166 for (int i
= 0; ; ++i
) {
2167 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
, da
->mem_idx
,
2168 memop
| (i
== 0 ? MO_ALIGN_64
: 0));
2172 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
2176 gen_exception(dc
, TT_ILL_INSN
);
2181 /* Valid for lddfa only. */
2182 if (orig_size
== MO_64
) {
2183 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
2186 gen_exception(dc
, TT_ILL_INSN
);
2192 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2193 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
2196 /* According to the table in the UA2011 manual, the only
2197 other asis that are valid for ldfa/lddfa/ldqfa are
2198 the NO_FAULT asis. We still need a helper for these,
2199 but we can just use the integer asi helper for them. */
2202 d64
= tcg_temp_new_i64();
2203 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
2204 d32
= gen_dest_fpr_F(dc
);
2205 tcg_gen_extrl_i64_i32(d32
, d64
);
2206 gen_store_fpr_F(dc
, rd
, d32
);
2209 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], tcg_env
, addr
,
2213 d64
= tcg_temp_new_i64();
2214 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
2215 addr_tmp
= tcg_temp_new();
2216 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
2217 gen_helper_ld_asi(cpu_fpr
[rd
/ 2 + 1], tcg_env
, addr_tmp
,
2219 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2222 g_assert_not_reached();
2229 static void gen_stf_asi(DisasContext
*dc
, DisasASI
*da
, MemOp orig_size
,
2232 MemOp memop
= da
->memop
;
2233 MemOp size
= memop
& MO_SIZE
;
2237 /* TODO: Use 128-bit load/store below. */
2238 if (size
== MO_128
) {
2239 memop
= (memop
& ~MO_SIZE
) | MO_64
;
2246 case GET_ASI_DIRECT
:
2247 memop
|= MO_ALIGN_4
;
2250 d32
= gen_load_fpr_F(dc
, rd
);
2251 tcg_gen_qemu_st_i32(d32
, addr
, da
->mem_idx
, memop
| MO_ALIGN
);
2254 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
2255 memop
| MO_ALIGN_4
);
2258 /* Only 4-byte alignment required. However, it is legal for the
2259 cpu to signal the alignment fault, and the OS trap handler is
2260 required to fix it up. Requiring 16-byte alignment here avoids
2261 having to probe the second page before performing the first
2263 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
2264 memop
| MO_ALIGN_16
);
2265 addr_tmp
= tcg_temp_new();
2266 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
2267 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + 1], addr_tmp
, da
->mem_idx
, memop
);
2270 g_assert_not_reached();
2275 /* Valid for stdfa on aligned registers only. */
2276 if (orig_size
== MO_64
&& (rd
& 7) == 0) {
2277 /* The first operation checks required alignment. */
2278 addr_tmp
= tcg_temp_new();
2279 for (int i
= 0; ; ++i
) {
2280 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
, da
->mem_idx
,
2281 memop
| (i
== 0 ? MO_ALIGN_64
: 0));
2285 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
2289 gen_exception(dc
, TT_ILL_INSN
);
2294 /* Valid for stdfa only. */
2295 if (orig_size
== MO_64
) {
2296 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
2299 gen_exception(dc
, TT_ILL_INSN
);
2304 /* According to the table in the UA2011 manual, the only
2305 other asis that are valid for ldfa/lddfa/ldqfa are
2306 the PST* asis, which aren't currently handled. */
2307 gen_exception(dc
, TT_ILL_INSN
);
2312 static void gen_ldda_asi(DisasContext
*dc
, DisasASI
*da
, TCGv addr
, int rd
)
2314 TCGv hi
= gen_dest_gpr(dc
, rd
);
2315 TCGv lo
= gen_dest_gpr(dc
, rd
+ 1);
2321 case GET_ASI_DTWINX
:
2322 #ifdef TARGET_SPARC64
2324 MemOp mop
= (da
->memop
& MO_BSWAP
) | MO_128
| MO_ALIGN_16
;
2325 TCGv_i128 t
= tcg_temp_new_i128();
2327 tcg_gen_qemu_ld_i128(t
, addr
, da
->mem_idx
, mop
);
2329 * Note that LE twinx acts as if each 64-bit register result is
2330 * byte swapped. We perform one 128-bit LE load, so must swap
2331 * the order of the writebacks.
2333 if ((mop
& MO_BSWAP
) == MO_TE
) {
2334 tcg_gen_extr_i128_i64(lo
, hi
, t
);
2336 tcg_gen_extr_i128_i64(hi
, lo
, t
);
2341 g_assert_not_reached();
2344 case GET_ASI_DIRECT
:
2346 TCGv_i64 tmp
= tcg_temp_new_i64();
2348 tcg_gen_qemu_ld_i64(tmp
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
2350 /* Note that LE ldda acts as if each 32-bit register
2351 result is byte swapped. Having just performed one
2352 64-bit bswap, we need now to swap the writebacks. */
2353 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2354 tcg_gen_extr_i64_tl(lo
, hi
, tmp
);
2356 tcg_gen_extr_i64_tl(hi
, lo
, tmp
);
2362 /* ??? In theory we've handled all of the ASIs that are valid
2363 for ldda, and this should raise DAE_invalid_asi. However,
2364 real hardware allows others. This can be seen with e.g.
2365 FreeBSD 10.3 wrt ASI_IC_TAG. */
2367 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2368 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
);
2369 TCGv_i64 tmp
= tcg_temp_new_i64();
2372 gen_helper_ld_asi(tmp
, tcg_env
, addr
, r_asi
, r_mop
);
2375 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2376 tcg_gen_extr_i64_tl(lo
, hi
, tmp
);
2378 tcg_gen_extr_i64_tl(hi
, lo
, tmp
);
2384 gen_store_gpr(dc
, rd
, hi
);
2385 gen_store_gpr(dc
, rd
+ 1, lo
);
2388 static void gen_stda_asi(DisasContext
*dc
, DisasASI
*da
, TCGv addr
, int rd
)
2390 TCGv hi
= gen_load_gpr(dc
, rd
);
2391 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2397 case GET_ASI_DTWINX
:
2398 #ifdef TARGET_SPARC64
2400 MemOp mop
= (da
->memop
& MO_BSWAP
) | MO_128
| MO_ALIGN_16
;
2401 TCGv_i128 t
= tcg_temp_new_i128();
2404 * Note that LE twinx acts as if each 64-bit register result is
2405 * byte swapped. We perform one 128-bit LE store, so must swap
2406 * the order of the construction.
2408 if ((mop
& MO_BSWAP
) == MO_TE
) {
2409 tcg_gen_concat_i64_i128(t
, lo
, hi
);
2411 tcg_gen_concat_i64_i128(t
, hi
, lo
);
2413 tcg_gen_qemu_st_i128(t
, addr
, da
->mem_idx
, mop
);
2417 g_assert_not_reached();
2420 case GET_ASI_DIRECT
:
2422 TCGv_i64 t64
= tcg_temp_new_i64();
2424 /* Note that LE stda acts as if each 32-bit register result is
2425 byte swapped. We will perform one 64-bit LE store, so now
2426 we must swap the order of the construction. */
2427 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2428 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2430 tcg_gen_concat_tl_i64(t64
, hi
, lo
);
2432 tcg_gen_qemu_st_i64(t64
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
2437 assert(TARGET_LONG_BITS
== 32);
2438 /* Store 32 bytes of T64 to ADDR. */
2439 /* ??? The original qemu code suggests 8-byte alignment, dropping
2440 the low bits, but the only place I can see this used is in the
2441 Linux kernel with 32 byte alignment, which would make more sense
2442 as a cacheline-style operation. */
2444 TCGv_i64 t64
= tcg_temp_new_i64();
2445 TCGv d_addr
= tcg_temp_new();
2446 TCGv eight
= tcg_constant_tl(8);
2449 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2450 tcg_gen_andi_tl(d_addr
, addr
, -8);
2451 for (i
= 0; i
< 32; i
+= 8) {
2452 tcg_gen_qemu_st_i64(t64
, d_addr
, da
->mem_idx
, da
->memop
);
2453 tcg_gen_add_tl(d_addr
, d_addr
, eight
);
2459 /* ??? In theory we've handled all of the ASIs that are valid
2460 for stda, and this should raise DAE_invalid_asi. */
2462 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2463 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
);
2464 TCGv_i64 t64
= tcg_temp_new_i64();
2467 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2468 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2470 tcg_gen_concat_tl_i64(t64
, hi
, lo
);
2474 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2480 #ifdef TARGET_SPARC64
2481 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2483 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2484 return gen_load_gpr(dc
, rs1
);
2487 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2489 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2491 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2492 or fold the comparison down to 32 bits and use movcond_i32. Choose
2494 c32
= tcg_temp_new_i32();
2496 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2498 TCGv_i64 c64
= tcg_temp_new_i64();
2499 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2500 tcg_gen_extrl_i64_i32(c32
, c64
);
2503 s1
= gen_load_fpr_F(dc
, rs
);
2504 s2
= gen_load_fpr_F(dc
, rd
);
2505 dst
= gen_dest_fpr_F(dc
);
2506 zero
= tcg_constant_i32(0);
2508 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2510 gen_store_fpr_F(dc
, rd
, dst
);
2513 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2515 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2516 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2517 gen_load_fpr_D(dc
, rs
),
2518 gen_load_fpr_D(dc
, rd
));
2519 gen_store_fpr_D(dc
, rd
, dst
);
2522 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2524 int qd
= QFPREG(rd
);
2525 int qs
= QFPREG(rs
);
2527 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2528 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2529 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2530 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2532 gen_update_fprs_dirty(dc
, qd
);
2535 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
)
2537 TCGv_i32 r_tl
= tcg_temp_new_i32();
2539 /* load env->tl into r_tl */
2540 tcg_gen_ld_i32(r_tl
, tcg_env
, offsetof(CPUSPARCState
, tl
));
2542 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2543 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2545 /* calculate offset to current trap state from env->ts, reuse r_tl */
2546 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2547 tcg_gen_addi_ptr(r_tsptr
, tcg_env
, offsetof(CPUSPARCState
, ts
));
2549 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2551 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2552 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2553 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2558 static int extract_dfpreg(DisasContext
*dc
, int x
)
2563 static int extract_qfpreg(DisasContext
*dc
, int x
)
2568 /* Include the auto-generated decoder. */
2569 #include "decode-insns.c.inc"
2571 #define TRANS(NAME, AVAIL, FUNC, ...) \
2572 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2573 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2575 #define avail_ALL(C) true
2576 #ifdef TARGET_SPARC64
2577 # define avail_32(C) false
2578 # define avail_ASR17(C) false
2579 # define avail_CASA(C) true
2580 # define avail_DIV(C) true
2581 # define avail_MUL(C) true
2582 # define avail_POWERDOWN(C) false
2583 # define avail_64(C) true
2584 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2585 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2586 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2587 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2589 # define avail_32(C) true
2590 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2591 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2592 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2593 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2594 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2595 # define avail_64(C) false
2596 # define avail_GL(C) false
2597 # define avail_HYPV(C) false
2598 # define avail_VIS1(C) false
2599 # define avail_VIS2(C) false
2602 /* Default case for non jump instructions. */
2603 static bool advance_pc(DisasContext
*dc
)
2608 case DYNAMIC_PC_LOOKUP
:
2613 /* we can do a static jump */
2614 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
2615 dc
->base
.is_jmp
= DISAS_NORETURN
;
2618 g_assert_not_reached();
2622 dc
->npc
= dc
->npc
+ 4;
2628 * Major opcodes 00 and 01 -- branches, call, and sethi
2631 static bool advance_jump_uncond_never(DisasContext
*dc
, bool annul
)
2634 dc
->pc
= dc
->npc
+ 4;
2635 dc
->npc
= dc
->pc
+ 4;
2638 dc
->npc
= dc
->pc
+ 4;
2643 static bool advance_jump_uncond_always(DisasContext
*dc
, bool annul
,
2652 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
2657 static bool advance_jump_cond(DisasContext
*dc
, DisasCompare
*cmp
,
2658 bool annul
, target_ulong dest
)
2660 target_ulong npc
= dc
->npc
;
2663 TCGLabel
*l1
= gen_new_label();
2665 tcg_gen_brcond_tl(tcg_invert_cond(cmp
->cond
), cmp
->c1
, cmp
->c2
, l1
);
2666 gen_goto_tb(dc
, 0, npc
, dest
);
2668 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
2670 dc
->base
.is_jmp
= DISAS_NORETURN
;
2675 case DYNAMIC_PC_LOOKUP
:
2676 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
2677 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
2678 tcg_gen_movcond_tl(cmp
->cond
, cpu_npc
,
2680 tcg_constant_tl(dest
), cpu_npc
);
2684 g_assert_not_reached();
2688 dc
->jump_pc
[0] = dest
;
2689 dc
->jump_pc
[1] = npc
+ 4;
2692 tcg_gen_mov_tl(cpu_cond
, cmp
->c1
);
2694 tcg_gen_setcond_tl(cmp
->cond
, cpu_cond
, cmp
->c1
, cmp
->c2
);
2701 static bool raise_priv(DisasContext
*dc
)
2703 gen_exception(dc
, TT_PRIV_INSN
);
2707 static bool raise_unimpfpop(DisasContext
*dc
)
2709 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
2713 static bool gen_trap_float128(DisasContext
*dc
)
2715 if (dc
->def
->features
& CPU_FEATURE_FLOAT128
) {
2718 return raise_unimpfpop(dc
);
2721 static bool do_bpcc(DisasContext
*dc
, arg_bcc
*a
)
2723 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
2728 return advance_jump_uncond_never(dc
, a
->a
);
2730 return advance_jump_uncond_always(dc
, a
->a
, target
);
2734 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
2735 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
2739 TRANS(Bicc
, ALL
, do_bpcc
, a
)
2740 TRANS(BPcc
, 64, do_bpcc
, a
)
2742 static bool do_fbpfcc(DisasContext
*dc
, arg_bcc
*a
)
2744 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
2747 if (gen_trap_ifnofpu(dc
)) {
2752 return advance_jump_uncond_never(dc
, a
->a
);
2754 return advance_jump_uncond_always(dc
, a
->a
, target
);
2758 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
2759 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
2763 TRANS(FBPfcc
, 64, do_fbpfcc
, a
)
2764 TRANS(FBfcc
, ALL
, do_fbpfcc
, a
)
2766 static bool trans_BPr(DisasContext
*dc
, arg_BPr
*a
)
2768 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
2771 if (!avail_64(dc
)) {
2774 if (gen_tcg_cond_reg
[a
->cond
] == TCG_COND_NEVER
) {
2779 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
2780 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
2783 static bool trans_CALL(DisasContext
*dc
, arg_CALL
*a
)
2785 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
2787 gen_store_gpr(dc
, 15, tcg_constant_tl(dc
->pc
));
2793 static bool trans_NCP(DisasContext
*dc
, arg_NCP
*a
)
2796 * For sparc32, always generate the no-coprocessor exception.
2797 * For sparc64, always generate illegal instruction.
2799 #ifdef TARGET_SPARC64
2802 gen_exception(dc
, TT_NCP_INSN
);
2807 static bool trans_SETHI(DisasContext
*dc
, arg_SETHI
*a
)
2809 /* Special-case %g0 because that's the canonical nop. */
2811 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl((uint32_t)a
->i
<< 10));
2813 return advance_pc(dc
);
2817 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2820 static bool do_tcc(DisasContext
*dc
, int cond
, int cc
,
2821 int rs1
, bool imm
, int rs2_or_imm
)
2823 int mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2824 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2831 return advance_pc(dc
);
2835 * Immediate traps are the most common case. Since this value is
2836 * live across the branch, it really pays to evaluate the constant.
2838 if (rs1
== 0 && (imm
|| rs2_or_imm
== 0)) {
2839 trap
= tcg_constant_i32((rs2_or_imm
& mask
) + TT_TRAP
);
2841 trap
= tcg_temp_new_i32();
2842 tcg_gen_trunc_tl_i32(trap
, gen_load_gpr(dc
, rs1
));
2844 tcg_gen_addi_i32(trap
, trap
, rs2_or_imm
);
2846 TCGv_i32 t2
= tcg_temp_new_i32();
2847 tcg_gen_trunc_tl_i32(t2
, gen_load_gpr(dc
, rs2_or_imm
));
2848 tcg_gen_add_i32(trap
, trap
, t2
);
2850 tcg_gen_andi_i32(trap
, trap
, mask
);
2851 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2857 gen_helper_raise_exception(tcg_env
, trap
);
2858 dc
->base
.is_jmp
= DISAS_NORETURN
;
2862 /* Conditional trap. */
2864 lab
= delay_exceptionv(dc
, trap
);
2865 gen_compare(&cmp
, cc
, cond
, dc
);
2866 tcg_gen_brcond_tl(cmp
.cond
, cmp
.c1
, cmp
.c2
, lab
);
2868 return advance_pc(dc
);
2871 static bool trans_Tcc_r(DisasContext
*dc
, arg_Tcc_r
*a
)
2873 if (avail_32(dc
) && a
->cc
) {
2876 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, false, a
->rs2
);
2879 static bool trans_Tcc_i_v7(DisasContext
*dc
, arg_Tcc_i_v7
*a
)
2884 return do_tcc(dc
, a
->cond
, 0, a
->rs1
, true, a
->i
);
2887 static bool trans_Tcc_i_v9(DisasContext
*dc
, arg_Tcc_i_v9
*a
)
2892 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, true, a
->i
);
2895 static bool trans_STBAR(DisasContext
*dc
, arg_STBAR
*a
)
2897 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2898 return advance_pc(dc
);
2901 static bool trans_MEMBAR(DisasContext
*dc
, arg_MEMBAR
*a
)
2907 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2908 tcg_gen_mb(a
->mmask
| TCG_BAR_SC
);
2911 /* For #Sync, etc, end the TB to recognize interrupts. */
2912 dc
->base
.is_jmp
= DISAS_EXIT
;
2914 return advance_pc(dc
);
2917 static bool do_rd_special(DisasContext
*dc
, bool priv
, int rd
,
2918 TCGv (*func
)(DisasContext
*, TCGv
))
2921 return raise_priv(dc
);
2923 gen_store_gpr(dc
, rd
, func(dc
, gen_dest_gpr(dc
, rd
)));
2924 return advance_pc(dc
);
2927 static TCGv
do_rdy(DisasContext
*dc
, TCGv dst
)
2932 static bool trans_RDY(DisasContext
*dc
, arg_RDY
*a
)
2935 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2936 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2937 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2939 if (avail_64(dc
) && a
->rs1
!= 0) {
2942 return do_rd_special(dc
, true, a
->rd
, do_rdy
);
2945 static TCGv
do_rd_leon3_config(DisasContext
*dc
, TCGv dst
)
2950 * TODO: There are many more fields to be filled,
2951 * some of which are writable.
2953 val
= dc
->def
->nwindows
- 1; /* [4:0] NWIN */
2954 val
|= 1 << 8; /* [8] V8 */
2956 return tcg_constant_tl(val
);
2959 TRANS(RDASR17
, ASR17
, do_rd_special
, true, a
->rd
, do_rd_leon3_config
)
2961 static TCGv
do_rdccr(DisasContext
*dc
, TCGv dst
)
2964 gen_helper_rdccr(dst
, tcg_env
);
2968 TRANS(RDCCR
, 64, do_rd_special
, true, a
->rd
, do_rdccr
)
2970 static TCGv
do_rdasi(DisasContext
*dc
, TCGv dst
)
2972 #ifdef TARGET_SPARC64
2973 return tcg_constant_tl(dc
->asi
);
2975 qemu_build_not_reached();
2979 TRANS(RDASI
, 64, do_rd_special
, true, a
->rd
, do_rdasi
)
2981 static TCGv
do_rdtick(DisasContext
*dc
, TCGv dst
)
2983 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
2985 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
2986 if (translator_io_start(&dc
->base
)) {
2987 dc
->base
.is_jmp
= DISAS_EXIT
;
2989 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
2990 tcg_constant_i32(dc
->mem_idx
));
2994 /* TODO: non-priv access only allowed when enabled. */
2995 TRANS(RDTICK
, 64, do_rd_special
, true, a
->rd
, do_rdtick
)
2997 static TCGv
do_rdpc(DisasContext
*dc
, TCGv dst
)
2999 return tcg_constant_tl(address_mask_i(dc
, dc
->pc
));
3002 TRANS(RDPC
, 64, do_rd_special
, true, a
->rd
, do_rdpc
)
3004 static TCGv
do_rdfprs(DisasContext
*dc
, TCGv dst
)
3006 tcg_gen_ext_i32_tl(dst
, cpu_fprs
);
3010 TRANS(RDFPRS
, 64, do_rd_special
, true, a
->rd
, do_rdfprs
)
3012 static TCGv
do_rdgsr(DisasContext
*dc
, TCGv dst
)
3014 gen_trap_ifnofpu(dc
);
3018 TRANS(RDGSR
, 64, do_rd_special
, true, a
->rd
, do_rdgsr
)
3020 static TCGv
do_rdsoftint(DisasContext
*dc
, TCGv dst
)
3022 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(softint
));
3026 TRANS(RDSOFTINT
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdsoftint
)
3028 static TCGv
do_rdtick_cmpr(DisasContext
*dc
, TCGv dst
)
3030 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(tick_cmpr
));
3034 /* TODO: non-priv access only allowed when enabled. */
3035 TRANS(RDTICK_CMPR
, 64, do_rd_special
, true, a
->rd
, do_rdtick_cmpr
)
3037 static TCGv
do_rdstick(DisasContext
*dc
, TCGv dst
)
3039 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3041 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
3042 if (translator_io_start(&dc
->base
)) {
3043 dc
->base
.is_jmp
= DISAS_EXIT
;
3045 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
3046 tcg_constant_i32(dc
->mem_idx
));
3050 /* TODO: non-priv access only allowed when enabled. */
3051 TRANS(RDSTICK
, 64, do_rd_special
, true, a
->rd
, do_rdstick
)
3053 static TCGv
do_rdstick_cmpr(DisasContext
*dc
, TCGv dst
)
3055 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(stick_cmpr
));
3059 /* TODO: supervisor access only allowed when enabled by hypervisor. */
3060 TRANS(RDSTICK_CMPR
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdstick_cmpr
)
3063 * UltraSPARC-T1 Strand status.
3064 * HYPV check maybe not enough, UA2005 & UA2007 describe
3065 * this ASR as impl. dep
3067 static TCGv
do_rdstrand_status(DisasContext
*dc
, TCGv dst
)
3069 return tcg_constant_tl(1);
3072 TRANS(RDSTRAND_STATUS
, HYPV
, do_rd_special
, true, a
->rd
, do_rdstrand_status
)
3074 static TCGv
do_rdpsr(DisasContext
*dc
, TCGv dst
)
3077 gen_helper_rdpsr(dst
, tcg_env
);
3081 TRANS(RDPSR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpsr
)
3083 static TCGv
do_rdhpstate(DisasContext
*dc
, TCGv dst
)
3085 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hpstate
));
3089 TRANS(RDHPR_hpstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhpstate
)
3091 static TCGv
do_rdhtstate(DisasContext
*dc
, TCGv dst
)
3093 TCGv_i32 tl
= tcg_temp_new_i32();
3094 TCGv_ptr tp
= tcg_temp_new_ptr();
3096 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
3097 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
3098 tcg_gen_shli_i32(tl
, tl
, 3);
3099 tcg_gen_ext_i32_ptr(tp
, tl
);
3100 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
3102 tcg_gen_ld_tl(dst
, tp
, env64_field_offsetof(htstate
));
3106 TRANS(RDHPR_htstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtstate
)
3108 static TCGv
do_rdhintp(DisasContext
*dc
, TCGv dst
)
3110 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hintp
));
3114 TRANS(RDHPR_hintp
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhintp
)
3116 static TCGv
do_rdhtba(DisasContext
*dc
, TCGv dst
)
3118 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(htba
));
3122 TRANS(RDHPR_htba
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtba
)
3124 static TCGv
do_rdhver(DisasContext
*dc
, TCGv dst
)
3126 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hver
));
3130 TRANS(RDHPR_hver
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhver
)
3132 static TCGv
do_rdhstick_cmpr(DisasContext
*dc
, TCGv dst
)
3134 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
3138 TRANS(RDHPR_hstick_cmpr
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
,
3141 static TCGv
do_rdwim(DisasContext
*dc
, TCGv dst
)
3143 tcg_gen_ld_tl(dst
, tcg_env
, env32_field_offsetof(wim
));
3147 TRANS(RDWIM
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwim
)
3149 static TCGv
do_rdtpc(DisasContext
*dc
, TCGv dst
)
3151 #ifdef TARGET_SPARC64
3152 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3154 gen_load_trap_state_at_tl(r_tsptr
);
3155 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tpc
));
3158 qemu_build_not_reached();
3162 TRANS(RDPR_tpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtpc
)
3164 static TCGv
do_rdtnpc(DisasContext
*dc
, TCGv dst
)
3166 #ifdef TARGET_SPARC64
3167 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3169 gen_load_trap_state_at_tl(r_tsptr
);
3170 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tnpc
));
3173 qemu_build_not_reached();
3177 TRANS(RDPR_tnpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtnpc
)
3179 static TCGv
do_rdtstate(DisasContext
*dc
, TCGv dst
)
3181 #ifdef TARGET_SPARC64
3182 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3184 gen_load_trap_state_at_tl(r_tsptr
);
3185 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tstate
));
3188 qemu_build_not_reached();
3192 TRANS(RDPR_tstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtstate
)
3194 static TCGv
do_rdtt(DisasContext
*dc
, TCGv dst
)
3196 #ifdef TARGET_SPARC64
3197 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3199 gen_load_trap_state_at_tl(r_tsptr
);
3200 tcg_gen_ld32s_tl(dst
, r_tsptr
, offsetof(trap_state
, tt
));
3203 qemu_build_not_reached();
3207 TRANS(RDPR_tt
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtt
)
3208 TRANS(RDPR_tick
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtick
)
3210 static TCGv
do_rdtba(DisasContext
*dc
, TCGv dst
)
3215 TRANS(RDTBR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
3216 TRANS(RDPR_tba
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
3218 static TCGv
do_rdpstate(DisasContext
*dc
, TCGv dst
)
3220 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(pstate
));
3224 TRANS(RDPR_pstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpstate
)
3226 static TCGv
do_rdtl(DisasContext
*dc
, TCGv dst
)
3228 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(tl
));
3232 TRANS(RDPR_tl
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtl
)
3234 static TCGv
do_rdpil(DisasContext
*dc
, TCGv dst
)
3236 tcg_gen_ld32s_tl(dst
, tcg_env
, env_field_offsetof(psrpil
));
3240 TRANS(RDPR_pil
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpil
)
3242 static TCGv
do_rdcwp(DisasContext
*dc
, TCGv dst
)
3244 gen_helper_rdcwp(dst
, tcg_env
);
3248 TRANS(RDPR_cwp
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcwp
)
3250 static TCGv
do_rdcansave(DisasContext
*dc
, TCGv dst
)
3252 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cansave
));
3256 TRANS(RDPR_cansave
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcansave
)
3258 static TCGv
do_rdcanrestore(DisasContext
*dc
, TCGv dst
)
3260 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(canrestore
));
3264 TRANS(RDPR_canrestore
, 64, do_rd_special
, supervisor(dc
), a
->rd
,
3267 static TCGv
do_rdcleanwin(DisasContext
*dc
, TCGv dst
)
3269 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cleanwin
));
3273 TRANS(RDPR_cleanwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcleanwin
)
3275 static TCGv
do_rdotherwin(DisasContext
*dc
, TCGv dst
)
3277 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(otherwin
));
3281 TRANS(RDPR_otherwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdotherwin
)
3283 static TCGv
do_rdwstate(DisasContext
*dc
, TCGv dst
)
3285 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(wstate
));
3289 TRANS(RDPR_wstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwstate
)
3291 static TCGv
do_rdgl(DisasContext
*dc
, TCGv dst
)
3293 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(gl
));
3297 TRANS(RDPR_gl
, GL
, do_rd_special
, supervisor(dc
), a
->rd
, do_rdgl
)
3299 /* UA2005 strand status */
3300 static TCGv
do_rdssr(DisasContext
*dc
, TCGv dst
)
3302 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(ssr
));
3306 TRANS(RDPR_strand_status
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdssr
)
3308 static TCGv
do_rdver(DisasContext
*dc
, TCGv dst
)
3310 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(version
));
3314 TRANS(RDPR_ver
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdver
)
3316 static bool trans_FLUSHW(DisasContext
*dc
, arg_FLUSHW
*a
)
3319 gen_helper_flushw(tcg_env
);
3320 return advance_pc(dc
);
3325 static bool do_wr_special(DisasContext
*dc
, arg_r_r_ri
*a
, bool priv
,
3326 void (*func
)(DisasContext
*, TCGv
))
3330 /* For simplicity, we under-decoded the rs2 form. */
3331 if (!a
->imm
&& (a
->rs2_or_imm
& ~0x1f)) {
3335 return raise_priv(dc
);
3338 if (a
->rs1
== 0 && (a
->imm
|| a
->rs2_or_imm
== 0)) {
3339 src
= tcg_constant_tl(a
->rs2_or_imm
);
3341 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
3342 if (a
->rs2_or_imm
== 0) {
3345 src
= tcg_temp_new();
3347 tcg_gen_xori_tl(src
, src1
, a
->rs2_or_imm
);
3349 tcg_gen_xor_tl(src
, src1
, gen_load_gpr(dc
, a
->rs2_or_imm
));
3354 return advance_pc(dc
);
3357 static void do_wry(DisasContext
*dc
, TCGv src
)
3359 tcg_gen_ext32u_tl(cpu_y
, src
);
3362 TRANS(WRY
, ALL
, do_wr_special
, a
, true, do_wry
)
3364 static void do_wrccr(DisasContext
*dc
, TCGv src
)
3366 gen_helper_wrccr(tcg_env
, src
);
3369 TRANS(WRCCR
, 64, do_wr_special
, a
, true, do_wrccr
)
3371 static void do_wrasi(DisasContext
*dc
, TCGv src
)
3373 TCGv tmp
= tcg_temp_new();
3375 tcg_gen_ext8u_tl(tmp
, src
);
3376 tcg_gen_st32_tl(tmp
, tcg_env
, env64_field_offsetof(asi
));
3377 /* End TB to notice changed ASI. */
3378 dc
->base
.is_jmp
= DISAS_EXIT
;
3381 TRANS(WRASI
, 64, do_wr_special
, a
, true, do_wrasi
)
3383 static void do_wrfprs(DisasContext
*dc
, TCGv src
)
3385 #ifdef TARGET_SPARC64
3386 tcg_gen_trunc_tl_i32(cpu_fprs
, src
);
3388 dc
->base
.is_jmp
= DISAS_EXIT
;
3390 qemu_build_not_reached();
3394 TRANS(WRFPRS
, 64, do_wr_special
, a
, true, do_wrfprs
)
3396 static void do_wrgsr(DisasContext
*dc
, TCGv src
)
3398 gen_trap_ifnofpu(dc
);
3399 tcg_gen_mov_tl(cpu_gsr
, src
);
3402 TRANS(WRGSR
, 64, do_wr_special
, a
, true, do_wrgsr
)
3404 static void do_wrsoftint_set(DisasContext
*dc
, TCGv src
)
3406 gen_helper_set_softint(tcg_env
, src
);
3409 TRANS(WRSOFTINT_SET
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_set
)
3411 static void do_wrsoftint_clr(DisasContext
*dc
, TCGv src
)
3413 gen_helper_clear_softint(tcg_env
, src
);
3416 TRANS(WRSOFTINT_CLR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_clr
)
3418 static void do_wrsoftint(DisasContext
*dc
, TCGv src
)
3420 gen_helper_write_softint(tcg_env
, src
);
3423 TRANS(WRSOFTINT
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint
)
3425 static void do_wrtick_cmpr(DisasContext
*dc
, TCGv src
)
3427 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3429 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(tick_cmpr
));
3430 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3431 translator_io_start(&dc
->base
);
3432 gen_helper_tick_set_limit(r_tickptr
, src
);
3433 /* End TB to handle timer interrupt */
3434 dc
->base
.is_jmp
= DISAS_EXIT
;
3437 TRANS(WRTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick_cmpr
)
3439 static void do_wrstick(DisasContext
*dc
, TCGv src
)
3441 #ifdef TARGET_SPARC64
3442 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3444 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, offsetof(CPUSPARCState
, stick
));
3445 translator_io_start(&dc
->base
);
3446 gen_helper_tick_set_count(r_tickptr
, src
);
3447 /* End TB to handle timer interrupt */
3448 dc
->base
.is_jmp
= DISAS_EXIT
;
3450 qemu_build_not_reached();
3454 TRANS(WRSTICK
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick
)
3456 static void do_wrstick_cmpr(DisasContext
*dc
, TCGv src
)
3458 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3460 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(stick_cmpr
));
3461 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
3462 translator_io_start(&dc
->base
);
3463 gen_helper_tick_set_limit(r_tickptr
, src
);
3464 /* End TB to handle timer interrupt */
3465 dc
->base
.is_jmp
= DISAS_EXIT
;
3468 TRANS(WRSTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick_cmpr
)
3470 static void do_wrpowerdown(DisasContext
*dc
, TCGv src
)
3473 gen_helper_power_down(tcg_env
);
3476 TRANS(WRPOWERDOWN
, POWERDOWN
, do_wr_special
, a
, supervisor(dc
), do_wrpowerdown
)
3478 static void do_wrpsr(DisasContext
*dc
, TCGv src
)
3480 gen_helper_wrpsr(tcg_env
, src
);
3481 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3482 dc
->cc_op
= CC_OP_FLAGS
;
3483 dc
->base
.is_jmp
= DISAS_EXIT
;
3486 TRANS(WRPSR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrpsr
)
3488 static void do_wrwim(DisasContext
*dc
, TCGv src
)
3490 target_ulong mask
= MAKE_64BIT_MASK(0, dc
->def
->nwindows
);
3491 TCGv tmp
= tcg_temp_new();
3493 tcg_gen_andi_tl(tmp
, src
, mask
);
3494 tcg_gen_st_tl(tmp
, tcg_env
, env32_field_offsetof(wim
));
3497 TRANS(WRWIM
, 32, do_wr_special
, a
, supervisor(dc
), do_wrwim
)
3499 static void do_wrtpc(DisasContext
*dc
, TCGv src
)
3501 #ifdef TARGET_SPARC64
3502 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3504 gen_load_trap_state_at_tl(r_tsptr
);
3505 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tpc
));
3507 qemu_build_not_reached();
3511 TRANS(WRPR_tpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtpc
)
3513 static void do_wrtnpc(DisasContext
*dc
, TCGv src
)
3515 #ifdef TARGET_SPARC64
3516 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3518 gen_load_trap_state_at_tl(r_tsptr
);
3519 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tnpc
));
3521 qemu_build_not_reached();
3525 TRANS(WRPR_tnpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtnpc
)
3527 static void do_wrtstate(DisasContext
*dc
, TCGv src
)
3529 #ifdef TARGET_SPARC64
3530 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3532 gen_load_trap_state_at_tl(r_tsptr
);
3533 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tstate
));
3535 qemu_build_not_reached();
3539 TRANS(WRPR_tstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtstate
)
3541 static void do_wrtt(DisasContext
*dc
, TCGv src
)
3543 #ifdef TARGET_SPARC64
3544 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3546 gen_load_trap_state_at_tl(r_tsptr
);
3547 tcg_gen_st32_tl(src
, r_tsptr
, offsetof(trap_state
, tt
));
3549 qemu_build_not_reached();
3553 TRANS(WRPR_tt
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtt
)
3555 static void do_wrtick(DisasContext
*dc
, TCGv src
)
3557 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3559 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3560 translator_io_start(&dc
->base
);
3561 gen_helper_tick_set_count(r_tickptr
, src
);
3562 /* End TB to handle timer interrupt */
3563 dc
->base
.is_jmp
= DISAS_EXIT
;
3566 TRANS(WRPR_tick
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick
)
3568 static void do_wrtba(DisasContext
*dc
, TCGv src
)
3570 tcg_gen_mov_tl(cpu_tbr
, src
);
3573 TRANS(WRPR_tba
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3575 static void do_wrpstate(DisasContext
*dc
, TCGv src
)
3578 if (translator_io_start(&dc
->base
)) {
3579 dc
->base
.is_jmp
= DISAS_EXIT
;
3581 gen_helper_wrpstate(tcg_env
, src
);
3582 dc
->npc
= DYNAMIC_PC
;
3585 TRANS(WRPR_pstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpstate
)
3587 static void do_wrtl(DisasContext
*dc
, TCGv src
)
3590 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(tl
));
3591 dc
->npc
= DYNAMIC_PC
;
3594 TRANS(WRPR_tl
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtl
)
3596 static void do_wrpil(DisasContext
*dc
, TCGv src
)
3598 if (translator_io_start(&dc
->base
)) {
3599 dc
->base
.is_jmp
= DISAS_EXIT
;
3601 gen_helper_wrpil(tcg_env
, src
);
3604 TRANS(WRPR_pil
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpil
)
3606 static void do_wrcwp(DisasContext
*dc
, TCGv src
)
3608 gen_helper_wrcwp(tcg_env
, src
);
3611 TRANS(WRPR_cwp
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcwp
)
3613 static void do_wrcansave(DisasContext
*dc
, TCGv src
)
3615 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cansave
));
3618 TRANS(WRPR_cansave
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcansave
)
3620 static void do_wrcanrestore(DisasContext
*dc
, TCGv src
)
3622 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(canrestore
));
3625 TRANS(WRPR_canrestore
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcanrestore
)
3627 static void do_wrcleanwin(DisasContext
*dc
, TCGv src
)
3629 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cleanwin
));
3632 TRANS(WRPR_cleanwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcleanwin
)
3634 static void do_wrotherwin(DisasContext
*dc
, TCGv src
)
3636 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(otherwin
));
3639 TRANS(WRPR_otherwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrotherwin
)
3641 static void do_wrwstate(DisasContext
*dc
, TCGv src
)
3643 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(wstate
));
3646 TRANS(WRPR_wstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrwstate
)
3648 static void do_wrgl(DisasContext
*dc
, TCGv src
)
3650 gen_helper_wrgl(tcg_env
, src
);
3653 TRANS(WRPR_gl
, GL
, do_wr_special
, a
, supervisor(dc
), do_wrgl
)
3655 /* UA2005 strand status */
3656 static void do_wrssr(DisasContext
*dc
, TCGv src
)
3658 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(ssr
));
3661 TRANS(WRPR_strand_status
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrssr
)
3663 TRANS(WRTBR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3665 static void do_wrhpstate(DisasContext
*dc
, TCGv src
)
3667 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hpstate
));
3668 dc
->base
.is_jmp
= DISAS_EXIT
;
3671 TRANS(WRHPR_hpstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhpstate
)
3673 static void do_wrhtstate(DisasContext
*dc
, TCGv src
)
3675 TCGv_i32 tl
= tcg_temp_new_i32();
3676 TCGv_ptr tp
= tcg_temp_new_ptr();
3678 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
3679 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
3680 tcg_gen_shli_i32(tl
, tl
, 3);
3681 tcg_gen_ext_i32_ptr(tp
, tl
);
3682 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
3684 tcg_gen_st_tl(src
, tp
, env64_field_offsetof(htstate
));
3687 TRANS(WRHPR_htstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtstate
)
3689 static void do_wrhintp(DisasContext
*dc
, TCGv src
)
3691 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hintp
));
3694 TRANS(WRHPR_hintp
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhintp
)
3696 static void do_wrhtba(DisasContext
*dc
, TCGv src
)
3698 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(htba
));
3701 TRANS(WRHPR_htba
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtba
)
3703 static void do_wrhstick_cmpr(DisasContext
*dc
, TCGv src
)
3705 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3707 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
3708 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(hstick
));
3709 translator_io_start(&dc
->base
);
3710 gen_helper_tick_set_limit(r_tickptr
, src
);
3711 /* End TB to handle timer interrupt */
3712 dc
->base
.is_jmp
= DISAS_EXIT
;
3715 TRANS(WRHPR_hstick_cmpr
, HYPV
, do_wr_special
, a
, hypervisor(dc
),
3718 static bool do_saved_restored(DisasContext
*dc
, bool saved
)
3720 if (!supervisor(dc
)) {
3721 return raise_priv(dc
);
3724 gen_helper_saved(tcg_env
);
3726 gen_helper_restored(tcg_env
);
3728 return advance_pc(dc
);
3731 TRANS(SAVED
, 64, do_saved_restored
, true)
3732 TRANS(RESTORED
, 64, do_saved_restored
, false)
3734 static bool trans_NOP(DisasContext
*dc
, arg_NOP
*a
)
3736 return advance_pc(dc
);
3740 * TODO: Need a feature bit for sparcv8.
3741 * In the meantime, treat all 32-bit cpus like sparcv7.
3743 TRANS(NOP_v7
, 32, trans_NOP
, a
)
3744 TRANS(NOP_v9
, 64, trans_NOP
, a
)
3746 static bool do_arith_int(DisasContext
*dc
, arg_r_r_ri_cc
*a
, int cc_op
,
3747 void (*func
)(TCGv
, TCGv
, TCGv
),
3748 void (*funci
)(TCGv
, TCGv
, target_long
))
3752 /* For simplicity, we under-decoded the rs2 form. */
3753 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3760 dst
= gen_dest_gpr(dc
, a
->rd
);
3762 src1
= gen_load_gpr(dc
, a
->rs1
);
3764 if (a
->imm
|| a
->rs2_or_imm
== 0) {
3766 funci(dst
, src1
, a
->rs2_or_imm
);
3768 func(dst
, src1
, tcg_constant_tl(a
->rs2_or_imm
));
3771 func(dst
, src1
, cpu_regs
[a
->rs2_or_imm
]);
3773 gen_store_gpr(dc
, a
->rd
, dst
);
3776 tcg_gen_movi_i32(cpu_cc_op
, cc_op
);
3779 return advance_pc(dc
);
3782 static bool do_arith(DisasContext
*dc
, arg_r_r_ri_cc
*a
, int cc_op
,
3783 void (*func
)(TCGv
, TCGv
, TCGv
),
3784 void (*funci
)(TCGv
, TCGv
, target_long
),
3785 void (*func_cc
)(TCGv
, TCGv
, TCGv
))
3789 return do_arith_int(dc
, a
, cc_op
, func_cc
, NULL
);
3791 return do_arith_int(dc
, a
, cc_op
, func
, funci
);
3794 static bool do_logic(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
3795 void (*func
)(TCGv
, TCGv
, TCGv
),
3796 void (*funci
)(TCGv
, TCGv
, target_long
))
3798 return do_arith_int(dc
, a
, CC_OP_LOGIC
, func
, funci
);
3801 TRANS(ADD
, ALL
, do_arith
, a
, CC_OP_ADD
,
3802 tcg_gen_add_tl
, tcg_gen_addi_tl
, gen_op_add_cc
)
3803 TRANS(SUB
, ALL
, do_arith
, a
, CC_OP_SUB
,
3804 tcg_gen_sub_tl
, tcg_gen_subi_tl
, gen_op_sub_cc
)
3806 TRANS(TADDcc
, ALL
, do_arith
, a
, CC_OP_TADD
, NULL
, NULL
, gen_op_add_cc
)
3807 TRANS(TSUBcc
, ALL
, do_arith
, a
, CC_OP_TSUB
, NULL
, NULL
, gen_op_sub_cc
)
3808 TRANS(TADDccTV
, ALL
, do_arith
, a
, CC_OP_TADDTV
, NULL
, NULL
, gen_op_taddcctv
)
3809 TRANS(TSUBccTV
, ALL
, do_arith
, a
, CC_OP_TSUBTV
, NULL
, NULL
, gen_op_tsubcctv
)
3811 TRANS(AND
, ALL
, do_logic
, a
, tcg_gen_and_tl
, tcg_gen_andi_tl
)
3812 TRANS(XOR
, ALL
, do_logic
, a
, tcg_gen_xor_tl
, tcg_gen_xori_tl
)
3813 TRANS(ANDN
, ALL
, do_logic
, a
, tcg_gen_andc_tl
, NULL
)
3814 TRANS(ORN
, ALL
, do_logic
, a
, tcg_gen_orc_tl
, NULL
)
3815 TRANS(XORN
, ALL
, do_logic
, a
, tcg_gen_eqv_tl
, NULL
)
3817 TRANS(MULX
, 64, do_arith
, a
, -1, tcg_gen_mul_tl
, tcg_gen_muli_tl
, NULL
)
3818 TRANS(UMUL
, MUL
, do_logic
, a
, gen_op_umul
, NULL
)
3819 TRANS(SMUL
, MUL
, do_logic
, a
, gen_op_smul
, NULL
)
3821 TRANS(UDIVX
, 64, do_arith
, a
, -1, gen_op_udivx
, NULL
, NULL
)
3822 TRANS(SDIVX
, 64, do_arith
, a
, -1, gen_op_sdivx
, NULL
, NULL
)
3823 TRANS(UDIV
, DIV
, do_arith
, a
, CC_OP_DIV
, gen_op_udiv
, NULL
, gen_op_udivcc
)
3824 TRANS(SDIV
, DIV
, do_arith
, a
, CC_OP_DIV
, gen_op_sdiv
, NULL
, gen_op_sdivcc
)
3826 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3827 TRANS(POPC
, 64, do_arith
, a
, -1, gen_op_popc
, NULL
, NULL
)
3829 static bool trans_OR(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
3831 /* OR with %g0 is the canonical alias for MOV. */
3832 if (!a
->cc
&& a
->rs1
== 0) {
3833 if (a
->imm
|| a
->rs2_or_imm
== 0) {
3834 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl(a
->rs2_or_imm
));
3835 } else if (a
->rs2_or_imm
& ~0x1f) {
3836 /* For simplicity, we under-decoded the rs2 form. */
3839 gen_store_gpr(dc
, a
->rd
, cpu_regs
[a
->rs2_or_imm
]);
3841 return advance_pc(dc
);
3843 return do_logic(dc
, a
, tcg_gen_or_tl
, tcg_gen_ori_tl
);
3846 static bool trans_ADDC(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
3848 switch (dc
->cc_op
) {
3851 /* Carry is known to be zero. Fall back to plain ADD. */
3852 return do_arith(dc
, a
, CC_OP_ADD
,
3853 tcg_gen_add_tl
, tcg_gen_addi_tl
, gen_op_add_cc
);
3857 return do_arith(dc
, a
, CC_OP_ADDX
,
3858 gen_op_addc_add
, NULL
, gen_op_addccc_add
);
3862 return do_arith(dc
, a
, CC_OP_ADDX
,
3863 gen_op_addc_sub
, NULL
, gen_op_addccc_sub
);
3865 return do_arith(dc
, a
, CC_OP_ADDX
,
3866 gen_op_addc_generic
, NULL
, gen_op_addccc_generic
);
3870 static bool trans_SUBC(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
3872 switch (dc
->cc_op
) {
3875 /* Carry is known to be zero. Fall back to plain SUB. */
3876 return do_arith(dc
, a
, CC_OP_SUB
,
3877 tcg_gen_sub_tl
, tcg_gen_subi_tl
, gen_op_sub_cc
);
3881 return do_arith(dc
, a
, CC_OP_SUBX
,
3882 gen_op_subc_add
, NULL
, gen_op_subccc_add
);
3886 return do_arith(dc
, a
, CC_OP_SUBX
,
3887 gen_op_subc_sub
, NULL
, gen_op_subccc_sub
);
3889 return do_arith(dc
, a
, CC_OP_SUBX
,
3890 gen_op_subc_generic
, NULL
, gen_op_subccc_generic
);
3894 static bool trans_MULScc(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
3897 return do_arith(dc
, a
, CC_OP_ADD
, NULL
, NULL
, gen_op_mulscc
);
3900 static bool gen_edge(DisasContext
*dc
, arg_r_r_r
*a
,
3901 int width
, bool cc
, bool left
)
3903 TCGv dst
, s1
, s2
, lo1
, lo2
;
3904 uint64_t amask
, tabl
, tabr
;
3905 int shift
, imask
, omask
;
3907 dst
= gen_dest_gpr(dc
, a
->rd
);
3908 s1
= gen_load_gpr(dc
, a
->rs1
);
3909 s2
= gen_load_gpr(dc
, a
->rs2
);
3912 tcg_gen_mov_tl(cpu_cc_src
, s1
);
3913 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
3914 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
3915 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3916 dc
->cc_op
= CC_OP_SUB
;
3920 * Theory of operation: there are two tables, left and right (not to
3921 * be confused with the left and right versions of the opcode). These
3922 * are indexed by the low 3 bits of the inputs. To make things "easy",
3923 * these tables are loaded into two constants, TABL and TABR below.
3924 * The operation index = (input & imask) << shift calculates the index
3925 * into the constant, while val = (table >> index) & omask calculates
3926 * the value we're looking for.
3934 tabl
= 0x80c0e0f0f8fcfeffULL
;
3935 tabr
= 0xff7f3f1f0f070301ULL
;
3937 tabl
= 0x0103070f1f3f7fffULL
;
3938 tabr
= 0xfffefcf8f0e0c080ULL
;
3958 tabl
= (2 << 2) | 3;
3959 tabr
= (3 << 2) | 1;
3961 tabl
= (1 << 2) | 3;
3962 tabr
= (3 << 2) | 2;
3969 lo1
= tcg_temp_new();
3970 lo2
= tcg_temp_new();
3971 tcg_gen_andi_tl(lo1
, s1
, imask
);
3972 tcg_gen_andi_tl(lo2
, s2
, imask
);
3973 tcg_gen_shli_tl(lo1
, lo1
, shift
);
3974 tcg_gen_shli_tl(lo2
, lo2
, shift
);
3976 tcg_gen_shr_tl(lo1
, tcg_constant_tl(tabl
), lo1
);
3977 tcg_gen_shr_tl(lo2
, tcg_constant_tl(tabr
), lo2
);
3978 tcg_gen_andi_tl(lo1
, lo1
, omask
);
3979 tcg_gen_andi_tl(lo2
, lo2
, omask
);
3981 amask
= address_mask_i(dc
, -8);
3982 tcg_gen_andi_tl(s1
, s1
, amask
);
3983 tcg_gen_andi_tl(s2
, s2
, amask
);
3985 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3986 tcg_gen_and_tl(lo2
, lo2
, lo1
);
3987 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, s1
, s2
, lo1
, lo2
);
3989 gen_store_gpr(dc
, a
->rd
, dst
);
3990 return advance_pc(dc
);
3993 TRANS(EDGE8cc
, VIS1
, gen_edge
, a
, 8, 1, 0)
3994 TRANS(EDGE8Lcc
, VIS1
, gen_edge
, a
, 8, 1, 1)
3995 TRANS(EDGE16cc
, VIS1
, gen_edge
, a
, 16, 1, 0)
3996 TRANS(EDGE16Lcc
, VIS1
, gen_edge
, a
, 16, 1, 1)
3997 TRANS(EDGE32cc
, VIS1
, gen_edge
, a
, 32, 1, 0)
3998 TRANS(EDGE32Lcc
, VIS1
, gen_edge
, a
, 32, 1, 1)
4000 TRANS(EDGE8N
, VIS2
, gen_edge
, a
, 8, 0, 0)
4001 TRANS(EDGE8LN
, VIS2
, gen_edge
, a
, 8, 0, 1)
4002 TRANS(EDGE16N
, VIS2
, gen_edge
, a
, 16, 0, 0)
4003 TRANS(EDGE16LN
, VIS2
, gen_edge
, a
, 16, 0, 1)
4004 TRANS(EDGE32N
, VIS2
, gen_edge
, a
, 32, 0, 0)
4005 TRANS(EDGE32LN
, VIS2
, gen_edge
, a
, 32, 0, 1)
4007 static bool do_rrr(DisasContext
*dc
, arg_r_r_r
*a
,
4008 void (*func
)(TCGv
, TCGv
, TCGv
))
4010 TCGv dst
= gen_dest_gpr(dc
, a
->rd
);
4011 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
4012 TCGv src2
= gen_load_gpr(dc
, a
->rs2
);
4014 func(dst
, src1
, src2
);
4015 gen_store_gpr(dc
, a
->rd
, dst
);
4016 return advance_pc(dc
);
4019 TRANS(ARRAY8
, VIS1
, do_rrr
, a
, gen_helper_array8
)
4020 TRANS(ARRAY16
, VIS1
, do_rrr
, a
, gen_op_array16
)
4021 TRANS(ARRAY32
, VIS1
, do_rrr
, a
, gen_op_array32
)
4023 static void gen_op_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
)
4025 #ifdef TARGET_SPARC64
4026 TCGv tmp
= tcg_temp_new();
4028 tcg_gen_add_tl(tmp
, s1
, s2
);
4029 tcg_gen_andi_tl(dst
, tmp
, -8);
4030 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
4032 g_assert_not_reached();
4036 static void gen_op_alignaddrl(TCGv dst
, TCGv s1
, TCGv s2
)
4038 #ifdef TARGET_SPARC64
4039 TCGv tmp
= tcg_temp_new();
4041 tcg_gen_add_tl(tmp
, s1
, s2
);
4042 tcg_gen_andi_tl(dst
, tmp
, -8);
4043 tcg_gen_neg_tl(tmp
, tmp
);
4044 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
4046 g_assert_not_reached();
4050 TRANS(ALIGNADDR
, VIS1
, do_rrr
, a
, gen_op_alignaddr
)
4051 TRANS(ALIGNADDRL
, VIS1
, do_rrr
, a
, gen_op_alignaddrl
)
4053 static void gen_op_bmask(TCGv dst
, TCGv s1
, TCGv s2
)
4055 #ifdef TARGET_SPARC64
4056 tcg_gen_add_tl(dst
, s1
, s2
);
4057 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, dst
, 32, 32);
4059 g_assert_not_reached();
4063 TRANS(BMASK
, VIS2
, do_rrr
, a
, gen_op_bmask
)
4065 static bool do_shift_r(DisasContext
*dc
, arg_shiftr
*a
, bool l
, bool u
)
4067 TCGv dst
, src1
, src2
;
4069 /* Reject 64-bit shifts for sparc32. */
4070 if (avail_32(dc
) && a
->x
) {
4074 src2
= tcg_temp_new();
4075 tcg_gen_andi_tl(src2
, gen_load_gpr(dc
, a
->rs2
), a
->x
? 63 : 31);
4076 src1
= gen_load_gpr(dc
, a
->rs1
);
4077 dst
= gen_dest_gpr(dc
, a
->rd
);
4080 tcg_gen_shl_tl(dst
, src1
, src2
);
4082 tcg_gen_ext32u_tl(dst
, dst
);
4086 tcg_gen_ext32u_tl(dst
, src1
);
4089 tcg_gen_shr_tl(dst
, src1
, src2
);
4092 tcg_gen_ext32s_tl(dst
, src1
);
4095 tcg_gen_sar_tl(dst
, src1
, src2
);
4097 gen_store_gpr(dc
, a
->rd
, dst
);
4098 return advance_pc(dc
);
4101 TRANS(SLL_r
, ALL
, do_shift_r
, a
, true, true)
4102 TRANS(SRL_r
, ALL
, do_shift_r
, a
, false, true)
4103 TRANS(SRA_r
, ALL
, do_shift_r
, a
, false, false)
4105 static bool do_shift_i(DisasContext
*dc
, arg_shifti
*a
, bool l
, bool u
)
4109 /* Reject 64-bit shifts for sparc32. */
4110 if (avail_32(dc
) && (a
->x
|| a
->i
>= 32)) {
4114 src1
= gen_load_gpr(dc
, a
->rs1
);
4115 dst
= gen_dest_gpr(dc
, a
->rd
);
4117 if (avail_32(dc
) || a
->x
) {
4119 tcg_gen_shli_tl(dst
, src1
, a
->i
);
4121 tcg_gen_shri_tl(dst
, src1
, a
->i
);
4123 tcg_gen_sari_tl(dst
, src1
, a
->i
);
4127 tcg_gen_deposit_z_tl(dst
, src1
, a
->i
, 32 - a
->i
);
4129 tcg_gen_extract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
4131 tcg_gen_sextract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
4134 gen_store_gpr(dc
, a
->rd
, dst
);
4135 return advance_pc(dc
);
4138 TRANS(SLL_i
, ALL
, do_shift_i
, a
, true, true)
4139 TRANS(SRL_i
, ALL
, do_shift_i
, a
, false, true)
4140 TRANS(SRA_i
, ALL
, do_shift_i
, a
, false, false)
4142 static TCGv
gen_rs2_or_imm(DisasContext
*dc
, bool imm
, int rs2_or_imm
)
4144 /* For simplicity, we under-decoded the rs2 form. */
4145 if (!imm
&& rs2_or_imm
& ~0x1f) {
4148 if (imm
|| rs2_or_imm
== 0) {
4149 return tcg_constant_tl(rs2_or_imm
);
4151 return cpu_regs
[rs2_or_imm
];
4155 static bool do_mov_cond(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, TCGv src2
)
4157 TCGv dst
= gen_load_gpr(dc
, rd
);
4159 tcg_gen_movcond_tl(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
, src2
, dst
);
4160 gen_store_gpr(dc
, rd
, dst
);
4161 return advance_pc(dc
);
4164 static bool trans_MOVcc(DisasContext
*dc
, arg_MOVcc
*a
)
4166 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4172 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
4173 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4176 static bool trans_MOVfcc(DisasContext
*dc
, arg_MOVfcc
*a
)
4178 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4184 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
4185 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4188 static bool trans_MOVR(DisasContext
*dc
, arg_MOVR
*a
)
4190 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4196 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
4197 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4200 static bool do_add_special(DisasContext
*dc
, arg_r_r_ri
*a
,
4201 bool (*func
)(DisasContext
*dc
, int rd
, TCGv src
))
4205 /* For simplicity, we under-decoded the rs2 form. */
4206 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
4211 * Always load the sum into a new temporary.
4212 * This is required to capture the value across a window change,
4213 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4215 sum
= tcg_temp_new();
4216 src1
= gen_load_gpr(dc
, a
->rs1
);
4217 if (a
->imm
|| a
->rs2_or_imm
== 0) {
4218 tcg_gen_addi_tl(sum
, src1
, a
->rs2_or_imm
);
4220 tcg_gen_add_tl(sum
, src1
, cpu_regs
[a
->rs2_or_imm
]);
4222 return func(dc
, a
->rd
, sum
);
4225 static bool do_jmpl(DisasContext
*dc
, int rd
, TCGv src
)
4228 * Preserve pc across advance, so that we can delay
4229 * the writeback to rd until after src is consumed.
4231 target_ulong cur_pc
= dc
->pc
;
4233 gen_check_align(dc
, src
, 3);
4236 tcg_gen_mov_tl(cpu_npc
, src
);
4237 gen_address_mask(dc
, cpu_npc
);
4238 gen_store_gpr(dc
, rd
, tcg_constant_tl(cur_pc
));
4240 dc
->npc
= DYNAMIC_PC_LOOKUP
;
4244 TRANS(JMPL
, ALL
, do_add_special
, a
, do_jmpl
)
4246 static bool do_rett(DisasContext
*dc
, int rd
, TCGv src
)
4248 if (!supervisor(dc
)) {
4249 return raise_priv(dc
);
4252 gen_check_align(dc
, src
, 3);
4255 tcg_gen_mov_tl(cpu_npc
, src
);
4256 gen_helper_rett(tcg_env
);
4258 dc
->npc
= DYNAMIC_PC
;
4262 TRANS(RETT
, 32, do_add_special
, a
, do_rett
)
4264 static bool do_return(DisasContext
*dc
, int rd
, TCGv src
)
4266 gen_check_align(dc
, src
, 3);
4269 tcg_gen_mov_tl(cpu_npc
, src
);
4270 gen_address_mask(dc
, cpu_npc
);
4272 gen_helper_restore(tcg_env
);
4273 dc
->npc
= DYNAMIC_PC_LOOKUP
;
4277 TRANS(RETURN
, 64, do_add_special
, a
, do_return
)
4279 static bool do_save(DisasContext
*dc
, int rd
, TCGv src
)
4281 gen_helper_save(tcg_env
);
4282 gen_store_gpr(dc
, rd
, src
);
4283 return advance_pc(dc
);
4286 TRANS(SAVE
, ALL
, do_add_special
, a
, do_save
)
4288 static bool do_restore(DisasContext
*dc
, int rd
, TCGv src
)
4290 gen_helper_restore(tcg_env
);
4291 gen_store_gpr(dc
, rd
, src
);
4292 return advance_pc(dc
);
4295 TRANS(RESTORE
, ALL
, do_add_special
, a
, do_restore
)
4297 static bool do_done_retry(DisasContext
*dc
, bool done
)
4299 if (!supervisor(dc
)) {
4300 return raise_priv(dc
);
4302 dc
->npc
= DYNAMIC_PC
;
4303 dc
->pc
= DYNAMIC_PC
;
4304 translator_io_start(&dc
->base
);
4306 gen_helper_done(tcg_env
);
4308 gen_helper_retry(tcg_env
);
4313 TRANS(DONE
, 64, do_done_retry
, true)
4314 TRANS(RETRY
, 64, do_done_retry
, false)
4317 * Major opcode 11 -- load and store instructions
4320 static TCGv
gen_ldst_addr(DisasContext
*dc
, int rs1
, bool imm
, int rs2_or_imm
)
4322 TCGv addr
, tmp
= NULL
;
4324 /* For simplicity, we under-decoded the rs2 form. */
4325 if (!imm
&& rs2_or_imm
& ~0x1f) {
4329 addr
= gen_load_gpr(dc
, rs1
);
4331 tmp
= tcg_temp_new();
4333 tcg_gen_addi_tl(tmp
, addr
, rs2_or_imm
);
4335 tcg_gen_add_tl(tmp
, addr
, cpu_regs
[rs2_or_imm
]);
4341 tmp
= tcg_temp_new();
4343 tcg_gen_ext32u_tl(tmp
, addr
);
4349 static bool do_ld_gpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4351 TCGv reg
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4357 da
= resolve_asi(dc
, a
->asi
, mop
);
4359 reg
= gen_dest_gpr(dc
, a
->rd
);
4360 gen_ld_asi(dc
, &da
, reg
, addr
);
4361 gen_store_gpr(dc
, a
->rd
, reg
);
4362 return advance_pc(dc
);
4365 TRANS(LDUW
, ALL
, do_ld_gpr
, a
, MO_TEUL
)
4366 TRANS(LDUB
, ALL
, do_ld_gpr
, a
, MO_UB
)
4367 TRANS(LDUH
, ALL
, do_ld_gpr
, a
, MO_TEUW
)
4368 TRANS(LDSB
, ALL
, do_ld_gpr
, a
, MO_SB
)
4369 TRANS(LDSH
, ALL
, do_ld_gpr
, a
, MO_TESW
)
4370 TRANS(LDSW
, 64, do_ld_gpr
, a
, MO_TESL
)
4371 TRANS(LDX
, 64, do_ld_gpr
, a
, MO_TEUQ
)
4373 static bool do_st_gpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4375 TCGv reg
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4381 da
= resolve_asi(dc
, a
->asi
, mop
);
4383 reg
= gen_load_gpr(dc
, a
->rd
);
4384 gen_st_asi(dc
, &da
, reg
, addr
);
4385 return advance_pc(dc
);
4388 TRANS(STW
, ALL
, do_st_gpr
, a
, MO_TEUL
)
4389 TRANS(STB
, ALL
, do_st_gpr
, a
, MO_UB
)
4390 TRANS(STH
, ALL
, do_st_gpr
, a
, MO_TEUW
)
4391 TRANS(STX
, 64, do_st_gpr
, a
, MO_TEUQ
)
4393 static bool trans_LDD(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4401 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4405 da
= resolve_asi(dc
, a
->asi
, MO_TEUQ
);
4406 gen_ldda_asi(dc
, &da
, addr
, a
->rd
);
4407 return advance_pc(dc
);
4410 static bool trans_STD(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4418 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4422 da
= resolve_asi(dc
, a
->asi
, MO_TEUQ
);
4423 gen_stda_asi(dc
, &da
, addr
, a
->rd
);
4424 return advance_pc(dc
);
4427 static bool trans_LDSTUB(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4432 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4436 da
= resolve_asi(dc
, a
->asi
, MO_UB
);
4438 reg
= gen_dest_gpr(dc
, a
->rd
);
4439 gen_ldstub_asi(dc
, &da
, reg
, addr
);
4440 gen_store_gpr(dc
, a
->rd
, reg
);
4441 return advance_pc(dc
);
4444 static bool trans_SWAP(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4446 TCGv addr
, dst
, src
;
4449 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4453 da
= resolve_asi(dc
, a
->asi
, MO_TEUL
);
4455 dst
= gen_dest_gpr(dc
, a
->rd
);
4456 src
= gen_load_gpr(dc
, a
->rd
);
4457 gen_swap_asi(dc
, &da
, dst
, src
, addr
);
4458 gen_store_gpr(dc
, a
->rd
, dst
);
4459 return advance_pc(dc
);
4462 static bool do_casa(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4467 addr
= gen_ldst_addr(dc
, a
->rs1
, true, 0);
4471 da
= resolve_asi(dc
, a
->asi
, mop
);
4473 o
= gen_dest_gpr(dc
, a
->rd
);
4474 n
= gen_load_gpr(dc
, a
->rd
);
4475 c
= gen_load_gpr(dc
, a
->rs2_or_imm
);
4476 gen_cas_asi(dc
, &da
, o
, n
, c
, addr
);
4477 gen_store_gpr(dc
, a
->rd
, o
);
4478 return advance_pc(dc
);
4481 TRANS(CASA
, CASA
, do_casa
, a
, MO_TEUL
)
4482 TRANS(CASXA
, 64, do_casa
, a
, MO_TEUQ
)
4484 static bool do_ld_fpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp sz
)
4486 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4492 if (gen_trap_ifnofpu(dc
)) {
4495 if (sz
== MO_128
&& gen_trap_float128(dc
)) {
4498 da
= resolve_asi(dc
, a
->asi
, MO_TE
| sz
);
4499 gen_ldf_asi(dc
, &da
, sz
, addr
, a
->rd
);
4500 gen_update_fprs_dirty(dc
, a
->rd
);
4501 return advance_pc(dc
);
4504 TRANS(LDF
, ALL
, do_ld_fpr
, a
, MO_32
)
4505 TRANS(LDDF
, ALL
, do_ld_fpr
, a
, MO_64
)
4506 TRANS(LDQF
, ALL
, do_ld_fpr
, a
, MO_128
)
4508 TRANS(LDFA
, 64, do_ld_fpr
, a
, MO_32
)
4509 TRANS(LDDFA
, 64, do_ld_fpr
, a
, MO_64
)
4510 TRANS(LDQFA
, 64, do_ld_fpr
, a
, MO_128
)
4512 static bool do_st_fpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp sz
)
4514 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4520 if (gen_trap_ifnofpu(dc
)) {
4523 if (sz
== MO_128
&& gen_trap_float128(dc
)) {
4526 da
= resolve_asi(dc
, a
->asi
, MO_TE
| sz
);
4527 gen_stf_asi(dc
, &da
, sz
, addr
, a
->rd
);
4528 return advance_pc(dc
);
4531 TRANS(STF
, ALL
, do_st_fpr
, a
, MO_32
)
4532 TRANS(STDF
, ALL
, do_st_fpr
, a
, MO_64
)
4533 TRANS(STQF
, ALL
, do_st_fpr
, a
, MO_128
)
4535 TRANS(STFA
, 64, do_st_fpr
, a
, MO_32
)
4536 TRANS(STDFA
, 64, do_st_fpr
, a
, MO_64
)
4537 TRANS(STQFA
, 64, do_st_fpr
, a
, MO_128
)
4539 static bool trans_STDFQ(DisasContext
*dc
, arg_STDFQ
*a
)
4541 if (!avail_32(dc
)) {
4544 if (!supervisor(dc
)) {
4545 return raise_priv(dc
);
4547 if (gen_trap_ifnofpu(dc
)) {
4550 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
4554 static bool do_ldfsr(DisasContext
*dc
, arg_r_r_ri
*a
, MemOp mop
,
4555 target_ulong new_mask
, target_ulong old_mask
)
4557 TCGv tmp
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4561 if (gen_trap_ifnofpu(dc
)) {
4564 tmp
= tcg_temp_new();
4565 tcg_gen_qemu_ld_tl(tmp
, addr
, dc
->mem_idx
, mop
| MO_ALIGN
);
4566 tcg_gen_andi_tl(tmp
, tmp
, new_mask
);
4567 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, old_mask
);
4568 tcg_gen_or_tl(cpu_fsr
, cpu_fsr
, tmp
);
4569 gen_helper_set_fsr(tcg_env
, cpu_fsr
);
4570 return advance_pc(dc
);
4573 TRANS(LDFSR
, ALL
, do_ldfsr
, a
, MO_TEUL
, FSR_LDFSR_MASK
, FSR_LDFSR_OLDMASK
)
4574 TRANS(LDXFSR
, 64, do_ldfsr
, a
, MO_TEUQ
, FSR_LDXFSR_MASK
, FSR_LDXFSR_OLDMASK
)
4576 static bool do_stfsr(DisasContext
*dc
, arg_r_r_ri
*a
, MemOp mop
)
4578 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4582 if (gen_trap_ifnofpu(dc
)) {
4585 tcg_gen_qemu_st_tl(cpu_fsr
, addr
, dc
->mem_idx
, mop
| MO_ALIGN
);
4586 return advance_pc(dc
);
4589 TRANS(STFSR
, ALL
, do_stfsr
, a
, MO_TEUL
)
4590 TRANS(STXFSR
, 64, do_stfsr
, a
, MO_TEUQ
)
4592 static bool do_ff(DisasContext
*dc
, arg_r_r
*a
,
4593 void (*func
)(TCGv_i32
, TCGv_i32
))
4597 if (gen_trap_ifnofpu(dc
)) {
4601 tmp
= gen_load_fpr_F(dc
, a
->rs
);
4603 gen_store_fpr_F(dc
, a
->rd
, tmp
);
4604 return advance_pc(dc
);
4607 TRANS(FMOVs
, ALL
, do_ff
, a
, gen_op_fmovs
)
4608 TRANS(FNEGs
, ALL
, do_ff
, a
, gen_op_fnegs
)
4609 TRANS(FABSs
, ALL
, do_ff
, a
, gen_op_fabss
)
4610 TRANS(FSRCs
, VIS1
, do_ff
, a
, tcg_gen_mov_i32
)
4611 TRANS(FNOTs
, VIS1
, do_ff
, a
, tcg_gen_not_i32
)
4613 static bool do_env_ff(DisasContext
*dc
, arg_r_r
*a
,
4614 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
))
4618 if (gen_trap_ifnofpu(dc
)) {
4622 gen_op_clear_ieee_excp_and_FTT();
4623 tmp
= gen_load_fpr_F(dc
, a
->rs
);
4624 func(tmp
, tcg_env
, tmp
);
4625 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4626 gen_store_fpr_F(dc
, a
->rd
, tmp
);
4627 return advance_pc(dc
);
4630 TRANS(FSQRTs
, ALL
, do_env_ff
, a
, gen_helper_fsqrts
)
4631 TRANS(FiTOs
, ALL
, do_env_ff
, a
, gen_helper_fitos
)
4632 TRANS(FsTOi
, ALL
, do_env_ff
, a
, gen_helper_fstoi
)
4634 static bool do_env_fd(DisasContext
*dc
, arg_r_r
*a
,
4635 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i64
))
4640 if (gen_trap_ifnofpu(dc
)) {
4644 gen_op_clear_ieee_excp_and_FTT();
4645 dst
= gen_dest_fpr_F(dc
);
4646 src
= gen_load_fpr_D(dc
, a
->rs
);
4647 func(dst
, tcg_env
, src
);
4648 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4649 gen_store_fpr_F(dc
, a
->rd
, dst
);
4650 return advance_pc(dc
);
4653 TRANS(FdTOs
, ALL
, do_env_fd
, a
, gen_helper_fdtos
)
4654 TRANS(FdTOi
, ALL
, do_env_fd
, a
, gen_helper_fdtoi
)
4655 TRANS(FxTOs
, 64, do_env_fd
, a
, gen_helper_fxtos
)
4657 static bool do_dd(DisasContext
*dc
, arg_r_r
*a
,
4658 void (*func
)(TCGv_i64
, TCGv_i64
))
4662 if (gen_trap_ifnofpu(dc
)) {
4666 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4667 src
= gen_load_fpr_D(dc
, a
->rs
);
4669 gen_store_fpr_D(dc
, a
->rd
, dst
);
4670 return advance_pc(dc
);
4673 TRANS(FMOVd
, 64, do_dd
, a
, gen_op_fmovd
)
4674 TRANS(FNEGd
, 64, do_dd
, a
, gen_op_fnegd
)
4675 TRANS(FABSd
, 64, do_dd
, a
, gen_op_fabsd
)
4676 TRANS(FSRCd
, VIS1
, do_dd
, a
, tcg_gen_mov_i64
)
4677 TRANS(FNOTd
, VIS1
, do_dd
, a
, tcg_gen_not_i64
)
4679 static bool do_env_dd(DisasContext
*dc
, arg_r_r
*a
,
4680 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
4684 if (gen_trap_ifnofpu(dc
)) {
4688 gen_op_clear_ieee_excp_and_FTT();
4689 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4690 src
= gen_load_fpr_D(dc
, a
->rs
);
4691 func(dst
, tcg_env
, src
);
4692 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4693 gen_store_fpr_D(dc
, a
->rd
, dst
);
4694 return advance_pc(dc
);
4697 TRANS(FSQRTd
, ALL
, do_env_dd
, a
, gen_helper_fsqrtd
)
4698 TRANS(FxTOd
, 64, do_env_dd
, a
, gen_helper_fxtod
)
4699 TRANS(FdTOx
, 64, do_env_dd
, a
, gen_helper_fdtox
)
4701 static bool do_env_df(DisasContext
*dc
, arg_r_r
*a
,
4702 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i32
))
4707 if (gen_trap_ifnofpu(dc
)) {
4711 gen_op_clear_ieee_excp_and_FTT();
4712 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4713 src
= gen_load_fpr_F(dc
, a
->rs
);
4714 func(dst
, tcg_env
, src
);
4715 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4716 gen_store_fpr_D(dc
, a
->rd
, dst
);
4717 return advance_pc(dc
);
4720 TRANS(FiTOd
, ALL
, do_env_df
, a
, gen_helper_fitod
)
4721 TRANS(FsTOd
, ALL
, do_env_df
, a
, gen_helper_fstod
)
4722 TRANS(FsTOx
, 64, do_env_df
, a
, gen_helper_fstox
)
4724 static bool do_env_qq(DisasContext
*dc
, arg_r_r
*a
,
4725 void (*func
)(TCGv_env
))
4727 if (gen_trap_ifnofpu(dc
)) {
4730 if (gen_trap_float128(dc
)) {
4734 gen_op_clear_ieee_excp_and_FTT();
4735 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4737 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4738 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4739 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4740 return advance_pc(dc
);
4743 TRANS(FSQRTq
, ALL
, do_env_qq
, a
, gen_helper_fsqrtq
)
4745 static bool do_env_fq(DisasContext
*dc
, arg_r_r
*a
,
4746 void (*func
)(TCGv_i32
, TCGv_env
))
4750 if (gen_trap_ifnofpu(dc
)) {
4753 if (gen_trap_float128(dc
)) {
4757 gen_op_clear_ieee_excp_and_FTT();
4758 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4759 dst
= gen_dest_fpr_F(dc
);
4761 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4762 gen_store_fpr_F(dc
, a
->rd
, dst
);
4763 return advance_pc(dc
);
4766 TRANS(FqTOs
, ALL
, do_env_fq
, a
, gen_helper_fqtos
)
4767 TRANS(FqTOi
, ALL
, do_env_fq
, a
, gen_helper_fqtoi
)
4769 static bool do_env_dq(DisasContext
*dc
, arg_r_r
*a
,
4770 void (*func
)(TCGv_i64
, TCGv_env
))
4774 if (gen_trap_ifnofpu(dc
)) {
4777 if (gen_trap_float128(dc
)) {
4781 gen_op_clear_ieee_excp_and_FTT();
4782 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4783 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4785 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4786 gen_store_fpr_D(dc
, a
->rd
, dst
);
4787 return advance_pc(dc
);
4790 TRANS(FqTOd
, ALL
, do_env_dq
, a
, gen_helper_fqtod
)
4791 TRANS(FqTOx
, 64, do_env_dq
, a
, gen_helper_fqtox
)
4793 static bool do_env_qf(DisasContext
*dc
, arg_r_r
*a
,
4794 void (*func
)(TCGv_env
, TCGv_i32
))
4798 if (gen_trap_ifnofpu(dc
)) {
4801 if (gen_trap_float128(dc
)) {
4805 gen_op_clear_ieee_excp_and_FTT();
4806 src
= gen_load_fpr_F(dc
, a
->rs
);
4808 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4809 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4810 return advance_pc(dc
);
4813 TRANS(FiTOq
, ALL
, do_env_qf
, a
, gen_helper_fitoq
)
4814 TRANS(FsTOq
, ALL
, do_env_qf
, a
, gen_helper_fstoq
)
4816 static bool do_fff(DisasContext
*dc
, arg_r_r_r
*a
,
4817 void (*func
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
4819 TCGv_i32 src1
, src2
;
4821 if (gen_trap_ifnofpu(dc
)) {
4825 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4826 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4827 func(src1
, src1
, src2
);
4828 gen_store_fpr_F(dc
, a
->rd
, src1
);
4829 return advance_pc(dc
);
4832 TRANS(FPADD16s
, VIS1
, do_fff
, a
, tcg_gen_vec_add16_i32
)
4833 TRANS(FPADD32s
, VIS1
, do_fff
, a
, tcg_gen_add_i32
)
4834 TRANS(FPSUB16s
, VIS1
, do_fff
, a
, tcg_gen_vec_sub16_i32
)
4835 TRANS(FPSUB32s
, VIS1
, do_fff
, a
, tcg_gen_sub_i32
)
4836 TRANS(FNORs
, VIS1
, do_fff
, a
, tcg_gen_nor_i32
)
4837 TRANS(FANDNOTs
, VIS1
, do_fff
, a
, tcg_gen_andc_i32
)
4838 TRANS(FXORs
, VIS1
, do_fff
, a
, tcg_gen_xor_i32
)
4839 TRANS(FNANDs
, VIS1
, do_fff
, a
, tcg_gen_nand_i32
)
4840 TRANS(FANDs
, VIS1
, do_fff
, a
, tcg_gen_and_i32
)
4841 TRANS(FXNORs
, VIS1
, do_fff
, a
, tcg_gen_eqv_i32
)
4842 TRANS(FORNOTs
, VIS1
, do_fff
, a
, tcg_gen_orc_i32
)
4843 TRANS(FORs
, VIS1
, do_fff
, a
, tcg_gen_or_i32
)
4845 static bool do_env_fff(DisasContext
*dc
, arg_r_r_r
*a
,
4846 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
4848 TCGv_i32 src1
, src2
;
4850 if (gen_trap_ifnofpu(dc
)) {
4854 gen_op_clear_ieee_excp_and_FTT();
4855 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4856 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4857 func(src1
, tcg_env
, src1
, src2
);
4858 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4859 gen_store_fpr_F(dc
, a
->rd
, src1
);
4860 return advance_pc(dc
);
4863 TRANS(FADDs
, ALL
, do_env_fff
, a
, gen_helper_fadds
)
4864 TRANS(FSUBs
, ALL
, do_env_fff
, a
, gen_helper_fsubs
)
4865 TRANS(FMULs
, ALL
, do_env_fff
, a
, gen_helper_fmuls
)
4866 TRANS(FDIVs
, ALL
, do_env_fff
, a
, gen_helper_fdivs
)
4868 static bool do_ddd(DisasContext
*dc
, arg_r_r_r
*a
,
4869 void (*func
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
4871 TCGv_i64 dst
, src1
, src2
;
4873 if (gen_trap_ifnofpu(dc
)) {
4877 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4878 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4879 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4880 func(dst
, src1
, src2
);
4881 gen_store_fpr_D(dc
, a
->rd
, dst
);
4882 return advance_pc(dc
);
4885 TRANS(FMUL8x16
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16
)
4886 TRANS(FMUL8x16AU
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16au
)
4887 TRANS(FMUL8x16AL
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16al
)
4888 TRANS(FMUL8SUx16
, VIS1
, do_ddd
, a
, gen_helper_fmul8sux16
)
4889 TRANS(FMUL8ULx16
, VIS1
, do_ddd
, a
, gen_helper_fmul8ulx16
)
4890 TRANS(FMULD8SUx16
, VIS1
, do_ddd
, a
, gen_helper_fmuld8sux16
)
4891 TRANS(FMULD8ULx16
, VIS1
, do_ddd
, a
, gen_helper_fmuld8ulx16
)
4892 TRANS(FPMERGE
, VIS1
, do_ddd
, a
, gen_helper_fpmerge
)
4893 TRANS(FEXPAND
, VIS1
, do_ddd
, a
, gen_helper_fexpand
)
4895 TRANS(FPADD16
, VIS1
, do_ddd
, a
, tcg_gen_vec_add16_i64
)
4896 TRANS(FPADD32
, VIS1
, do_ddd
, a
, tcg_gen_vec_add32_i64
)
4897 TRANS(FPSUB16
, VIS1
, do_ddd
, a
, tcg_gen_vec_sub16_i64
)
4898 TRANS(FPSUB32
, VIS1
, do_ddd
, a
, tcg_gen_vec_sub32_i64
)
4899 TRANS(FNORd
, VIS1
, do_ddd
, a
, tcg_gen_nor_i64
)
4900 TRANS(FANDNOTd
, VIS1
, do_ddd
, a
, tcg_gen_andc_i64
)
4901 TRANS(FXORd
, VIS1
, do_ddd
, a
, tcg_gen_xor_i64
)
4902 TRANS(FNANDd
, VIS1
, do_ddd
, a
, tcg_gen_nand_i64
)
4903 TRANS(FANDd
, VIS1
, do_ddd
, a
, tcg_gen_and_i64
)
4904 TRANS(FXNORd
, VIS1
, do_ddd
, a
, tcg_gen_eqv_i64
)
4905 TRANS(FORNOTd
, VIS1
, do_ddd
, a
, tcg_gen_orc_i64
)
4906 TRANS(FORd
, VIS1
, do_ddd
, a
, tcg_gen_or_i64
)
4908 TRANS(FPACK32
, VIS1
, do_ddd
, a
, gen_op_fpack32
)
4909 TRANS(FALIGNDATAg
, VIS1
, do_ddd
, a
, gen_op_faligndata
)
4910 TRANS(BSHUFFLE
, VIS2
, do_ddd
, a
, gen_op_bshuffle
)
4912 static bool do_env_ddd(DisasContext
*dc
, arg_r_r_r
*a
,
4913 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
))
4915 TCGv_i64 dst
, src1
, src2
;
4917 if (gen_trap_ifnofpu(dc
)) {
4921 gen_op_clear_ieee_excp_and_FTT();
4922 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4923 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4924 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4925 func(dst
, tcg_env
, src1
, src2
);
4926 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4927 gen_store_fpr_D(dc
, a
->rd
, dst
);
4928 return advance_pc(dc
);
4931 TRANS(FADDd
, ALL
, do_env_ddd
, a
, gen_helper_faddd
)
4932 TRANS(FSUBd
, ALL
, do_env_ddd
, a
, gen_helper_fsubd
)
4933 TRANS(FMULd
, ALL
, do_env_ddd
, a
, gen_helper_fmuld
)
4934 TRANS(FDIVd
, ALL
, do_env_ddd
, a
, gen_helper_fdivd
)
4936 static bool trans_FsMULd(DisasContext
*dc
, arg_r_r_r
*a
)
4939 TCGv_i32 src1
, src2
;
4941 if (gen_trap_ifnofpu(dc
)) {
4944 if (!(dc
->def
->features
& CPU_FEATURE_FSMULD
)) {
4945 return raise_unimpfpop(dc
);
4948 gen_op_clear_ieee_excp_and_FTT();
4949 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4950 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4951 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4952 gen_helper_fsmuld(dst
, tcg_env
, src1
, src2
);
4953 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4954 gen_store_fpr_D(dc
, a
->rd
, dst
);
4955 return advance_pc(dc
);
4958 static bool do_dddd(DisasContext
*dc
, arg_r_r_r
*a
,
4959 void (*func
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
4961 TCGv_i64 dst
, src0
, src1
, src2
;
4963 if (gen_trap_ifnofpu(dc
)) {
4967 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4968 src0
= gen_load_fpr_D(dc
, a
->rd
);
4969 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4970 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4971 func(dst
, src0
, src1
, src2
);
4972 gen_store_fpr_D(dc
, a
->rd
, dst
);
4973 return advance_pc(dc
);
4976 TRANS(PDIST
, VIS1
, do_dddd
, a
, gen_helper_pdist
)
4978 static bool do_env_qqq(DisasContext
*dc
, arg_r_r_r
*a
,
4979 void (*func
)(TCGv_env
))
4981 if (gen_trap_ifnofpu(dc
)) {
4984 if (gen_trap_float128(dc
)) {
4988 gen_op_clear_ieee_excp_and_FTT();
4989 gen_op_load_fpr_QT0(QFPREG(a
->rs1
));
4990 gen_op_load_fpr_QT1(QFPREG(a
->rs2
));
4992 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4993 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4994 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4995 return advance_pc(dc
);
4998 TRANS(FADDq
, ALL
, do_env_qqq
, a
, gen_helper_faddq
)
4999 TRANS(FSUBq
, ALL
, do_env_qqq
, a
, gen_helper_fsubq
)
5000 TRANS(FMULq
, ALL
, do_env_qqq
, a
, gen_helper_fmulq
)
5001 TRANS(FDIVq
, ALL
, do_env_qqq
, a
, gen_helper_fdivq
)
5003 static bool trans_FdMULq(DisasContext
*dc
, arg_r_r_r
*a
)
5005 TCGv_i64 src1
, src2
;
5007 if (gen_trap_ifnofpu(dc
)) {
5010 if (gen_trap_float128(dc
)) {
5014 gen_op_clear_ieee_excp_and_FTT();
5015 src1
= gen_load_fpr_D(dc
, a
->rs1
);
5016 src2
= gen_load_fpr_D(dc
, a
->rs2
);
5017 gen_helper_fdmulq(tcg_env
, src1
, src2
);
5018 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
5019 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
5020 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
5021 return advance_pc(dc
);
5024 #define CHECK_IU_FEATURE(dc, FEATURE) \
5025 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
5027 #define CHECK_FPU_FEATURE(dc, FEATURE) \
5028 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
5031 /* before an instruction, dc->pc must be static */
5032 static void disas_sparc_legacy(DisasContext
*dc
, unsigned int insn
)
5034 unsigned int opc
, rs1
, rs2
, rd
;
5035 TCGv cpu_src1
__attribute__((unused
));
5036 TCGv_i32 cpu_src1_32
, cpu_src2_32
;
5037 TCGv_i64 cpu_src1_64
, cpu_src2_64
;
5038 TCGv_i32 cpu_dst_32
__attribute__((unused
));
5039 TCGv_i64 cpu_dst_64
__attribute__((unused
));
5041 opc
= GET_FIELD(insn
, 0, 1);
5042 rd
= GET_FIELD(insn
, 2, 6);
5046 goto illegal_insn
; /* in decodetree */
5048 g_assert_not_reached(); /* in decodetree */
5049 case 2: /* FPU & Logical Operations */
5051 unsigned int xop
= GET_FIELD(insn
, 7, 12);
5052 TCGv cpu_dst
__attribute__((unused
)) = tcg_temp_new();
5054 if (xop
== 0x34) { /* FPU Operations */
5055 if (gen_trap_ifnofpu(dc
)) {
5058 gen_op_clear_ieee_excp_and_FTT();
5059 rs1
= GET_FIELD(insn
, 13, 17);
5060 rs2
= GET_FIELD(insn
, 27, 31);
5061 xop
= GET_FIELD(insn
, 18, 26);
5064 case 0x1: /* fmovs */
5065 case 0x5: /* fnegs */
5066 case 0x9: /* fabss */
5067 case 0x2: /* V9 fmovd */
5068 case 0x6: /* V9 fnegd */
5069 case 0xa: /* V9 fabsd */
5070 case 0x29: /* fsqrts */
5071 case 0xc4: /* fitos */
5072 case 0xd1: /* fstoi */
5073 case 0x2a: /* fsqrtd */
5074 case 0x82: /* V9 fdtox */
5075 case 0x88: /* V9 fxtod */
5076 case 0x2b: /* fsqrtq */
5077 case 0x41: /* fadds */
5078 case 0x45: /* fsubs */
5079 case 0x49: /* fmuls */
5080 case 0x4d: /* fdivs */
5081 case 0x42: /* faddd */
5082 case 0x46: /* fsubd */
5083 case 0x4a: /* fmuld */
5084 case 0x4e: /* fdivd */
5085 case 0x43: /* faddq */
5086 case 0x47: /* fsubq */
5087 case 0x4b: /* fmulq */
5088 case 0x4f: /* fdivq */
5089 case 0x69: /* fsmuld */
5090 case 0x6e: /* fdmulq */
5091 case 0xc6: /* fdtos */
5092 case 0xd2: /* fdtoi */
5093 case 0x84: /* V9 fxtos */
5094 case 0xc8: /* fitod */
5095 case 0xc9: /* fstod */
5096 case 0x81: /* V9 fstox */
5097 case 0xc7: /* fqtos */
5098 case 0xd3: /* fqtoi */
5099 case 0xcb: /* fqtod */
5100 case 0x83: /* V9 fqtox */
5101 case 0xcc: /* fitoq */
5102 case 0xcd: /* fstoq */
5103 g_assert_not_reached(); /* in decodetree */
5104 case 0xce: /* fdtoq */
5105 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5106 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
5108 #ifdef TARGET_SPARC64
5109 case 0x3: /* V9 fmovq */
5110 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5111 gen_move_Q(dc
, rd
, rs2
);
5113 case 0x7: /* V9 fnegq */
5114 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5115 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
5117 case 0xb: /* V9 fabsq */
5118 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5119 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
5121 case 0x8c: /* V9 fxtoq */
5122 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5123 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
5129 } else if (xop
== 0x35) { /* FPU Operations */
5130 #ifdef TARGET_SPARC64
5133 if (gen_trap_ifnofpu(dc
)) {
5136 gen_op_clear_ieee_excp_and_FTT();
5137 rs1
= GET_FIELD(insn
, 13, 17);
5138 rs2
= GET_FIELD(insn
, 27, 31);
5139 xop
= GET_FIELD(insn
, 18, 26);
5141 #ifdef TARGET_SPARC64
5145 cond = GET_FIELD_SP(insn, 10, 12); \
5146 cpu_src1 = get_src1(dc, insn); \
5147 gen_compare_reg(&cmp, cond, cpu_src1); \
5148 gen_fmov##sz(dc, &cmp, rd, rs2); \
5151 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
5154 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
5157 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
5158 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5165 #ifdef TARGET_SPARC64
5166 #define FMOVCC(fcc, sz) \
5169 cond = GET_FIELD_SP(insn, 14, 17); \
5170 gen_fcompare(&cmp, fcc, cond); \
5171 gen_fmov##sz(dc, &cmp, rd, rs2); \
5174 case 0x001: /* V9 fmovscc %fcc0 */
5177 case 0x002: /* V9 fmovdcc %fcc0 */
5180 case 0x003: /* V9 fmovqcc %fcc0 */
5181 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5184 case 0x041: /* V9 fmovscc %fcc1 */
5187 case 0x042: /* V9 fmovdcc %fcc1 */
5190 case 0x043: /* V9 fmovqcc %fcc1 */
5191 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5194 case 0x081: /* V9 fmovscc %fcc2 */
5197 case 0x082: /* V9 fmovdcc %fcc2 */
5200 case 0x083: /* V9 fmovqcc %fcc2 */
5201 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5204 case 0x0c1: /* V9 fmovscc %fcc3 */
5207 case 0x0c2: /* V9 fmovdcc %fcc3 */
5210 case 0x0c3: /* V9 fmovqcc %fcc3 */
5211 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5215 #define FMOVCC(xcc, sz) \
5218 cond = GET_FIELD_SP(insn, 14, 17); \
5219 gen_compare(&cmp, xcc, cond, dc); \
5220 gen_fmov##sz(dc, &cmp, rd, rs2); \
5223 case 0x101: /* V9 fmovscc %icc */
5226 case 0x102: /* V9 fmovdcc %icc */
5229 case 0x103: /* V9 fmovqcc %icc */
5230 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5233 case 0x181: /* V9 fmovscc %xcc */
5236 case 0x182: /* V9 fmovdcc %xcc */
5239 case 0x183: /* V9 fmovqcc %xcc */
5240 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5245 case 0x51: /* fcmps, V9 %fcc */
5246 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
5247 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
5248 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
5250 case 0x52: /* fcmpd, V9 %fcc */
5251 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5252 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5253 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
5255 case 0x53: /* fcmpq, V9 %fcc */
5256 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5257 gen_op_load_fpr_QT0(QFPREG(rs1
));
5258 gen_op_load_fpr_QT1(QFPREG(rs2
));
5259 gen_op_fcmpq(rd
& 3);
5261 case 0x55: /* fcmpes, V9 %fcc */
5262 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
5263 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
5264 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
5266 case 0x56: /* fcmped, V9 %fcc */
5267 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5268 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5269 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
5271 case 0x57: /* fcmpeq, V9 %fcc */
5272 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5273 gen_op_load_fpr_QT0(QFPREG(rs1
));
5274 gen_op_load_fpr_QT1(QFPREG(rs2
));
5275 gen_op_fcmpeq(rd
& 3);
5280 } else if (xop
== 0x36) {
5281 #ifdef TARGET_SPARC64
5283 int opf
= GET_FIELD_SP(insn
, 5, 13);
5284 rs1
= GET_FIELD(insn
, 13, 17);
5285 rs2
= GET_FIELD(insn
, 27, 31);
5286 if (gen_trap_ifnofpu(dc
)) {
5291 case 0x000: /* VIS I edge8cc */
5292 case 0x001: /* VIS II edge8n */
5293 case 0x002: /* VIS I edge8lcc */
5294 case 0x003: /* VIS II edge8ln */
5295 case 0x004: /* VIS I edge16cc */
5296 case 0x005: /* VIS II edge16n */
5297 case 0x006: /* VIS I edge16lcc */
5298 case 0x007: /* VIS II edge16ln */
5299 case 0x008: /* VIS I edge32cc */
5300 case 0x009: /* VIS II edge32n */
5301 case 0x00a: /* VIS I edge32lcc */
5302 case 0x00b: /* VIS II edge32ln */
5303 case 0x010: /* VIS I array8 */
5304 case 0x012: /* VIS I array16 */
5305 case 0x014: /* VIS I array32 */
5306 case 0x018: /* VIS I alignaddr */
5307 case 0x01a: /* VIS I alignaddrl */
5308 case 0x019: /* VIS II bmask */
5309 case 0x067: /* VIS I fnot2s */
5310 case 0x06b: /* VIS I fnot1s */
5311 case 0x075: /* VIS I fsrc1s */
5312 case 0x079: /* VIS I fsrc2s */
5313 case 0x066: /* VIS I fnot2 */
5314 case 0x06a: /* VIS I fnot1 */
5315 case 0x074: /* VIS I fsrc1 */
5316 case 0x078: /* VIS I fsrc2 */
5317 case 0x051: /* VIS I fpadd16s */
5318 case 0x053: /* VIS I fpadd32s */
5319 case 0x055: /* VIS I fpsub16s */
5320 case 0x057: /* VIS I fpsub32s */
5321 case 0x063: /* VIS I fnors */
5322 case 0x065: /* VIS I fandnot2s */
5323 case 0x069: /* VIS I fandnot1s */
5324 case 0x06d: /* VIS I fxors */
5325 case 0x06f: /* VIS I fnands */
5326 case 0x071: /* VIS I fands */
5327 case 0x073: /* VIS I fxnors */
5328 case 0x077: /* VIS I fornot2s */
5329 case 0x07b: /* VIS I fornot1s */
5330 case 0x07d: /* VIS I fors */
5331 case 0x050: /* VIS I fpadd16 */
5332 case 0x052: /* VIS I fpadd32 */
5333 case 0x054: /* VIS I fpsub16 */
5334 case 0x056: /* VIS I fpsub32 */
5335 case 0x062: /* VIS I fnor */
5336 case 0x064: /* VIS I fandnot2 */
5337 case 0x068: /* VIS I fandnot1 */
5338 case 0x06c: /* VIS I fxor */
5339 case 0x06e: /* VIS I fnand */
5340 case 0x070: /* VIS I fand */
5341 case 0x072: /* VIS I fxnor */
5342 case 0x076: /* VIS I fornot2 */
5343 case 0x07a: /* VIS I fornot1 */
5344 case 0x07c: /* VIS I for */
5345 case 0x031: /* VIS I fmul8x16 */
5346 case 0x033: /* VIS I fmul8x16au */
5347 case 0x035: /* VIS I fmul8x16al */
5348 case 0x036: /* VIS I fmul8sux16 */
5349 case 0x037: /* VIS I fmul8ulx16 */
5350 case 0x038: /* VIS I fmuld8sux16 */
5351 case 0x039: /* VIS I fmuld8ulx16 */
5352 case 0x04b: /* VIS I fpmerge */
5353 case 0x04d: /* VIS I fexpand */
5354 case 0x03e: /* VIS I pdist */
5355 case 0x03a: /* VIS I fpack32 */
5356 case 0x048: /* VIS I faligndata */
5357 case 0x04c: /* VIS II bshuffle */
5358 g_assert_not_reached(); /* in decodetree */
5359 case 0x020: /* VIS I fcmple16 */
5360 CHECK_FPU_FEATURE(dc
, VIS1
);
5361 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5362 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5363 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5364 gen_store_gpr(dc
, rd
, cpu_dst
);
5366 case 0x022: /* VIS I fcmpne16 */
5367 CHECK_FPU_FEATURE(dc
, VIS1
);
5368 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5369 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5370 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5371 gen_store_gpr(dc
, rd
, cpu_dst
);
5373 case 0x024: /* VIS I fcmple32 */
5374 CHECK_FPU_FEATURE(dc
, VIS1
);
5375 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5376 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5377 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5378 gen_store_gpr(dc
, rd
, cpu_dst
);
5380 case 0x026: /* VIS I fcmpne32 */
5381 CHECK_FPU_FEATURE(dc
, VIS1
);
5382 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5383 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5384 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5385 gen_store_gpr(dc
, rd
, cpu_dst
);
5387 case 0x028: /* VIS I fcmpgt16 */
5388 CHECK_FPU_FEATURE(dc
, VIS1
);
5389 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5390 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5391 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5392 gen_store_gpr(dc
, rd
, cpu_dst
);
5394 case 0x02a: /* VIS I fcmpeq16 */
5395 CHECK_FPU_FEATURE(dc
, VIS1
);
5396 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5397 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5398 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5399 gen_store_gpr(dc
, rd
, cpu_dst
);
5401 case 0x02c: /* VIS I fcmpgt32 */
5402 CHECK_FPU_FEATURE(dc
, VIS1
);
5403 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5404 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5405 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5406 gen_store_gpr(dc
, rd
, cpu_dst
);
5408 case 0x02e: /* VIS I fcmpeq32 */
5409 CHECK_FPU_FEATURE(dc
, VIS1
);
5410 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5411 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5412 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5413 gen_store_gpr(dc
, rd
, cpu_dst
);
5415 case 0x03b: /* VIS I fpack16 */
5416 CHECK_FPU_FEATURE(dc
, VIS1
);
5417 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5418 cpu_dst_32
= gen_dest_fpr_F(dc
);
5419 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
5420 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5422 case 0x03d: /* VIS I fpackfix */
5423 CHECK_FPU_FEATURE(dc
, VIS1
);
5424 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5425 cpu_dst_32
= gen_dest_fpr_F(dc
);
5426 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
5427 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5429 case 0x060: /* VIS I fzero */
5430 CHECK_FPU_FEATURE(dc
, VIS1
);
5431 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5432 tcg_gen_movi_i64(cpu_dst_64
, 0);
5433 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5435 case 0x061: /* VIS I fzeros */
5436 CHECK_FPU_FEATURE(dc
, VIS1
);
5437 cpu_dst_32
= gen_dest_fpr_F(dc
);
5438 tcg_gen_movi_i32(cpu_dst_32
, 0);
5439 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5441 case 0x07e: /* VIS I fone */
5442 CHECK_FPU_FEATURE(dc
, VIS1
);
5443 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5444 tcg_gen_movi_i64(cpu_dst_64
, -1);
5445 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5447 case 0x07f: /* VIS I fones */
5448 CHECK_FPU_FEATURE(dc
, VIS1
);
5449 cpu_dst_32
= gen_dest_fpr_F(dc
);
5450 tcg_gen_movi_i32(cpu_dst_32
, -1);
5451 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5453 case 0x080: /* VIS I shutdown */
5454 case 0x081: /* VIS II siam */
5462 goto illegal_insn
; /* in decodetree */
5466 case 3: /* load/store instructions */
5467 goto illegal_insn
; /* in decodetree */
5473 gen_exception(dc
, TT_ILL_INSN
);
5476 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
5480 static void sparc_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
5482 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5483 CPUSPARCState
*env
= cpu_env(cs
);
5486 dc
->pc
= dc
->base
.pc_first
;
5487 dc
->npc
= (target_ulong
)dc
->base
.tb
->cs_base
;
5488 dc
->cc_op
= CC_OP_DYNAMIC
;
5489 dc
->mem_idx
= dc
->base
.tb
->flags
& TB_FLAG_MMU_MASK
;
5490 dc
->def
= &env
->def
;
5491 dc
->fpu_enabled
= tb_fpu_enabled(dc
->base
.tb
->flags
);
5492 dc
->address_mask_32bit
= tb_am_enabled(dc
->base
.tb
->flags
);
5493 #ifndef CONFIG_USER_ONLY
5494 dc
->supervisor
= (dc
->base
.tb
->flags
& TB_FLAG_SUPER
) != 0;
5496 #ifdef TARGET_SPARC64
5498 dc
->asi
= (dc
->base
.tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
5499 #ifndef CONFIG_USER_ONLY
5500 dc
->hypervisor
= (dc
->base
.tb
->flags
& TB_FLAG_HYPER
) != 0;
5504 * if we reach a page boundary, we stop generation so that the
5505 * PC of a TT_TFAULT exception is always in the right page
5507 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
5508 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
5511 static void sparc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
5515 static void sparc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
5517 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5518 target_ulong npc
= dc
->npc
;
5523 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5524 npc
= dc
->jump_pc
[0] | JUMP_PC
;
5527 case DYNAMIC_PC_LOOKUP
:
5531 g_assert_not_reached();
5534 tcg_gen_insn_start(dc
->pc
, npc
);
5537 static void sparc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
5539 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5540 CPUSPARCState
*env
= cpu_env(cs
);
5543 insn
= translator_ldl(env
, &dc
->base
, dc
->pc
);
5544 dc
->base
.pc_next
+= 4;
5546 if (!decode(dc
, insn
)) {
5547 disas_sparc_legacy(dc
, insn
);
5550 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
5553 if (dc
->pc
!= dc
->base
.pc_next
) {
5554 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
5558 static void sparc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
5560 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5561 DisasDelayException
*e
, *e_next
;
5564 switch (dc
->base
.is_jmp
) {
5566 case DISAS_TOO_MANY
:
5567 if (((dc
->pc
| dc
->npc
) & 3) == 0) {
5568 /* static PC and NPC: we can use direct chaining */
5569 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5576 case DYNAMIC_PC_LOOKUP
:
5582 g_assert_not_reached();
5585 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5591 gen_generic_branch(dc
);
5596 case DYNAMIC_PC_LOOKUP
:
5599 g_assert_not_reached();
5602 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
5605 tcg_gen_lookup_and_goto_ptr();
5607 tcg_gen_exit_tb(NULL
, 0);
5611 case DISAS_NORETURN
:
5617 tcg_gen_exit_tb(NULL
, 0);
5621 g_assert_not_reached();
5624 for (e
= dc
->delay_excp_list
; e
; e
= e_next
) {
5625 gen_set_label(e
->lab
);
5627 tcg_gen_movi_tl(cpu_pc
, e
->pc
);
5628 if (e
->npc
% 4 == 0) {
5629 tcg_gen_movi_tl(cpu_npc
, e
->npc
);
5631 gen_helper_raise_exception(tcg_env
, e
->excp
);
5638 static void sparc_tr_disas_log(const DisasContextBase
*dcbase
,
5639 CPUState
*cpu
, FILE *logfile
)
5641 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
5642 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
5645 static const TranslatorOps sparc_tr_ops
= {
5646 .init_disas_context
= sparc_tr_init_disas_context
,
5647 .tb_start
= sparc_tr_tb_start
,
5648 .insn_start
= sparc_tr_insn_start
,
5649 .translate_insn
= sparc_tr_translate_insn
,
5650 .tb_stop
= sparc_tr_tb_stop
,
5651 .disas_log
= sparc_tr_disas_log
,
5654 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
5655 target_ulong pc
, void *host_pc
)
5657 DisasContext dc
= {};
5659 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &sparc_tr_ops
, &dc
.base
);
5662 void sparc_tcg_init(void)
5664 static const char gregnames
[32][4] = {
5665 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5666 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5667 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5668 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5670 static const char fregnames
[32][4] = {
5671 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5672 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5673 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5674 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5677 static const struct { TCGv_i32
*ptr
; int off
; const char *name
; } r32
[] = {
5678 #ifdef TARGET_SPARC64
5679 { &cpu_xcc
, offsetof(CPUSPARCState
, xcc
), "xcc" },
5680 { &cpu_fprs
, offsetof(CPUSPARCState
, fprs
), "fprs" },
5682 { &cpu_cc_op
, offsetof(CPUSPARCState
, cc_op
), "cc_op" },
5683 { &cpu_psr
, offsetof(CPUSPARCState
, psr
), "psr" },
5686 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5687 #ifdef TARGET_SPARC64
5688 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5690 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5691 { &cpu_cc_src
, offsetof(CPUSPARCState
, cc_src
), "cc_src" },
5692 { &cpu_cc_src2
, offsetof(CPUSPARCState
, cc_src2
), "cc_src2" },
5693 { &cpu_cc_dst
, offsetof(CPUSPARCState
, cc_dst
), "cc_dst" },
5694 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
5695 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5696 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5697 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5698 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5703 cpu_regwptr
= tcg_global_mem_new_ptr(tcg_env
,
5704 offsetof(CPUSPARCState
, regwptr
),
5707 for (i
= 0; i
< ARRAY_SIZE(r32
); ++i
) {
5708 *r32
[i
].ptr
= tcg_global_mem_new_i32(tcg_env
, r32
[i
].off
, r32
[i
].name
);
5711 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5712 *rtl
[i
].ptr
= tcg_global_mem_new(tcg_env
, rtl
[i
].off
, rtl
[i
].name
);
5716 for (i
= 1; i
< 8; ++i
) {
5717 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
5718 offsetof(CPUSPARCState
, gregs
[i
]),
5722 for (i
= 8; i
< 32; ++i
) {
5723 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5724 (i
- 8) * sizeof(target_ulong
),
5728 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5729 cpu_fpr
[i
] = tcg_global_mem_new_i64(tcg_env
,
5730 offsetof(CPUSPARCState
, fpr
[i
]),
5735 void sparc_restore_state_to_opc(CPUState
*cs
,
5736 const TranslationBlock
*tb
,
5737 const uint64_t *data
)
5739 SPARCCPU
*cpu
= SPARC_CPU(cs
);
5740 CPUSPARCState
*env
= &cpu
->env
;
5741 target_ulong pc
= data
[0];
5742 target_ulong npc
= data
[1];
5745 if (npc
== DYNAMIC_PC
) {
5746 /* dynamic NPC: already stored */
5747 } else if (npc
& JUMP_PC
) {
5748 /* jump PC: use 'cond' and the jump targets of the translation */
5750 env
->npc
= npc
& ~3;