4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rett(E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_done(E) qemu_build_not_reached()
46 # define gen_helper_fabsd(D, S) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_fnegd(D, S) qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
51 # define gen_helper_restored(E) qemu_build_not_reached()
52 # define gen_helper_retry(E) qemu_build_not_reached()
53 # define gen_helper_saved(E) qemu_build_not_reached()
54 # define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached()
55 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
56 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
57 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
58 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
59 # define gen_helper_udivx(D, E, A, B) qemu_build_not_reached()
60 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
61 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
62 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
63 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
64 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
65 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
66 # define gen_helper_fabsq ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fnegq ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
84 # define FSR_LDXFSR_MASK 0
85 # define FSR_LDXFSR_OLDMASK 0
89 /* Dynamic PC, must exit to main loop. */
91 /* Dynamic PC, one of two values according to jump_pc[T2]. */
93 /* Dynamic PC, may lookup next TB. */
94 #define DYNAMIC_PC_LOOKUP 3
96 #define DISAS_EXIT DISAS_TARGET_0
98 /* global register indexes */
99 static TCGv_ptr cpu_regwptr
;
100 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
101 static TCGv_i32 cpu_cc_op
;
102 static TCGv_i32 cpu_psr
;
103 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
104 static TCGv cpu_regs
[32];
107 static TCGv cpu_cond
;
108 #ifdef TARGET_SPARC64
109 static TCGv_i32 cpu_xcc
, cpu_fprs
;
112 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
113 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
115 /* Floating point registers */
116 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
118 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
119 #ifdef TARGET_SPARC64
120 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
121 # define env64_field_offsetof(X) env_field_offsetof(X)
123 # define env32_field_offsetof(X) env_field_offsetof(X)
124 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
127 typedef struct DisasDelayException
{
128 struct DisasDelayException
*next
;
131 /* Saved state at parent insn. */
134 } DisasDelayException
;
136 typedef struct DisasContext
{
137 DisasContextBase base
;
138 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
139 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
140 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
143 bool address_mask_32bit
;
144 #ifndef CONFIG_USER_ONLY
146 #ifdef TARGET_SPARC64
151 uint32_t cc_op
; /* current CC operation */
153 #ifdef TARGET_SPARC64
157 DisasDelayException
*delay_excp_list
;
166 // This function uses non-native bit order
167 #define GET_FIELD(X, FROM, TO) \
168 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
170 // This function uses the order in the manuals, i.e. bit 0 is 2^0
171 #define GET_FIELD_SP(X, FROM, TO) \
172 GET_FIELD(X, 31 - (TO), 31 - (FROM))
174 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
175 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
177 #ifdef TARGET_SPARC64
178 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
179 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
181 #define DFPREG(r) (r & 0x1e)
182 #define QFPREG(r) (r & 0x1c)
185 #define UA2005_HTRAP_MASK 0xff
186 #define V8_TRAP_MASK 0x7f
188 #define IS_IMM (insn & (1<<13))
190 static void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
192 #if defined(TARGET_SPARC64)
193 int bit
= (rd
< 32) ? 1 : 2;
194 /* If we know we've already set this bit within the TB,
195 we can avoid setting it again. */
196 if (!(dc
->fprs_dirty
& bit
)) {
197 dc
->fprs_dirty
|= bit
;
198 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
203 /* floating point registers moves */
204 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
206 TCGv_i32 ret
= tcg_temp_new_i32();
208 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
210 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
215 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
217 TCGv_i64 t
= tcg_temp_new_i64();
219 tcg_gen_extu_i32_i64(t
, v
);
220 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
221 (dst
& 1 ? 0 : 32), 32);
222 gen_update_fprs_dirty(dc
, dst
);
225 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
227 return tcg_temp_new_i32();
230 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
233 return cpu_fpr
[src
/ 2];
236 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
239 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
240 gen_update_fprs_dirty(dc
, dst
);
243 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
245 return cpu_fpr
[DFPREG(dst
) / 2];
248 static void gen_op_load_fpr_QT0(unsigned int src
)
250 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
251 offsetof(CPU_QuadU
, ll
.upper
));
252 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
253 offsetof(CPU_QuadU
, ll
.lower
));
256 static void gen_op_load_fpr_QT1(unsigned int src
)
258 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
259 offsetof(CPU_QuadU
, ll
.upper
));
260 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
261 offsetof(CPU_QuadU
, ll
.lower
));
264 static void gen_op_store_QT0_fpr(unsigned int dst
)
266 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
267 offsetof(CPU_QuadU
, ll
.upper
));
268 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
269 offsetof(CPU_QuadU
, ll
.lower
));
273 #ifdef CONFIG_USER_ONLY
274 #define supervisor(dc) 0
275 #define hypervisor(dc) 0
277 #ifdef TARGET_SPARC64
278 #define hypervisor(dc) (dc->hypervisor)
279 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
281 #define supervisor(dc) (dc->supervisor)
282 #define hypervisor(dc) 0
286 #if !defined(TARGET_SPARC64)
287 # define AM_CHECK(dc) false
288 #elif defined(TARGET_ABI32)
289 # define AM_CHECK(dc) true
290 #elif defined(CONFIG_USER_ONLY)
291 # define AM_CHECK(dc) false
293 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
296 static void gen_address_mask(DisasContext
*dc
, TCGv addr
)
299 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
303 static target_ulong
address_mask_i(DisasContext
*dc
, target_ulong addr
)
305 return AM_CHECK(dc
) ? (uint32_t)addr
: addr
;
308 static TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
312 return cpu_regs
[reg
];
314 TCGv t
= tcg_temp_new();
315 tcg_gen_movi_tl(t
, 0);
320 static void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
324 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
328 static TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
332 return cpu_regs
[reg
];
334 return tcg_temp_new();
338 static bool use_goto_tb(DisasContext
*s
, target_ulong pc
, target_ulong npc
)
340 return translator_use_goto_tb(&s
->base
, pc
) &&
341 translator_use_goto_tb(&s
->base
, npc
);
344 static void gen_goto_tb(DisasContext
*s
, int tb_num
,
345 target_ulong pc
, target_ulong npc
)
347 if (use_goto_tb(s
, pc
, npc
)) {
348 /* jump to same page: we can use a direct jump */
349 tcg_gen_goto_tb(tb_num
);
350 tcg_gen_movi_tl(cpu_pc
, pc
);
351 tcg_gen_movi_tl(cpu_npc
, npc
);
352 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
354 /* jump to another page: we can use an indirect jump */
355 tcg_gen_movi_tl(cpu_pc
, pc
);
356 tcg_gen_movi_tl(cpu_npc
, npc
);
357 tcg_gen_lookup_and_goto_ptr();
362 static void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
364 tcg_gen_extu_i32_tl(reg
, src
);
365 tcg_gen_extract_tl(reg
, reg
, PSR_NEG_SHIFT
, 1);
368 static void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
370 tcg_gen_extu_i32_tl(reg
, src
);
371 tcg_gen_extract_tl(reg
, reg
, PSR_ZERO_SHIFT
, 1);
374 static void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
376 tcg_gen_extu_i32_tl(reg
, src
);
377 tcg_gen_extract_tl(reg
, reg
, PSR_OVF_SHIFT
, 1);
380 static void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
382 tcg_gen_extu_i32_tl(reg
, src
);
383 tcg_gen_extract_tl(reg
, reg
, PSR_CARRY_SHIFT
, 1);
386 static void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
388 tcg_gen_mov_tl(cpu_cc_src
, src1
);
389 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
390 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
391 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
394 static TCGv_i32
gen_add32_carry32(void)
396 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
398 /* Carry is computed from a previous add: (dst < src) */
399 #if TARGET_LONG_BITS == 64
400 cc_src1_32
= tcg_temp_new_i32();
401 cc_src2_32
= tcg_temp_new_i32();
402 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
403 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
405 cc_src1_32
= cpu_cc_dst
;
406 cc_src2_32
= cpu_cc_src
;
409 carry_32
= tcg_temp_new_i32();
410 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
415 static TCGv_i32
gen_sub32_carry32(void)
417 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
419 /* Carry is computed from a previous borrow: (src1 < src2) */
420 #if TARGET_LONG_BITS == 64
421 cc_src1_32
= tcg_temp_new_i32();
422 cc_src2_32
= tcg_temp_new_i32();
423 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
424 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
426 cc_src1_32
= cpu_cc_src
;
427 cc_src2_32
= cpu_cc_src2
;
430 carry_32
= tcg_temp_new_i32();
431 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
436 static void gen_op_addc_int(TCGv dst
, TCGv src1
, TCGv src2
,
437 TCGv_i32 carry_32
, bool update_cc
)
439 tcg_gen_add_tl(dst
, src1
, src2
);
441 #ifdef TARGET_SPARC64
442 TCGv carry
= tcg_temp_new();
443 tcg_gen_extu_i32_tl(carry
, carry_32
);
444 tcg_gen_add_tl(dst
, dst
, carry
);
446 tcg_gen_add_i32(dst
, dst
, carry_32
);
450 tcg_debug_assert(dst
== cpu_cc_dst
);
451 tcg_gen_mov_tl(cpu_cc_src
, src1
);
452 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
456 static void gen_op_addc_int_add(TCGv dst
, TCGv src1
, TCGv src2
, bool update_cc
)
460 if (TARGET_LONG_BITS
== 64) {
461 gen_op_addc_int(dst
, src1
, src2
, gen_add32_carry32(), update_cc
);
466 * We can re-use the host's hardware carry generation by using
467 * an ADD2 opcode. We discard the low part of the output.
468 * Ideally we'd combine this operation with the add that
469 * generated the carry in the first place.
471 discard
= tcg_temp_new();
472 tcg_gen_add2_tl(discard
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
475 tcg_debug_assert(dst
== cpu_cc_dst
);
476 tcg_gen_mov_tl(cpu_cc_src
, src1
);
477 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
481 static void gen_op_addc_add(TCGv dst
, TCGv src1
, TCGv src2
)
483 gen_op_addc_int_add(dst
, src1
, src2
, false);
486 static void gen_op_addccc_add(TCGv dst
, TCGv src1
, TCGv src2
)
488 gen_op_addc_int_add(dst
, src1
, src2
, true);
491 static void gen_op_addc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
493 gen_op_addc_int(dst
, src1
, src2
, gen_sub32_carry32(), false);
496 static void gen_op_addccc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
498 gen_op_addc_int(dst
, src1
, src2
, gen_sub32_carry32(), true);
501 static void gen_op_addc_int_generic(TCGv dst
, TCGv src1
, TCGv src2
,
504 TCGv_i32 carry_32
= tcg_temp_new_i32();
505 gen_helper_compute_C_icc(carry_32
, tcg_env
);
506 gen_op_addc_int(dst
, src1
, src2
, carry_32
, update_cc
);
509 static void gen_op_addc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
511 gen_op_addc_int_generic(dst
, src1
, src2
, false);
514 static void gen_op_addccc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
516 gen_op_addc_int_generic(dst
, src1
, src2
, true);
519 static void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
521 tcg_gen_mov_tl(cpu_cc_src
, src1
);
522 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
523 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
524 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
527 static void gen_op_subc_int(TCGv dst
, TCGv src1
, TCGv src2
,
528 TCGv_i32 carry_32
, bool update_cc
)
532 #if TARGET_LONG_BITS == 64
533 carry
= tcg_temp_new();
534 tcg_gen_extu_i32_i64(carry
, carry_32
);
539 tcg_gen_sub_tl(dst
, src1
, src2
);
540 tcg_gen_sub_tl(dst
, dst
, carry
);
543 tcg_debug_assert(dst
== cpu_cc_dst
);
544 tcg_gen_mov_tl(cpu_cc_src
, src1
);
545 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
549 static void gen_op_subc_add(TCGv dst
, TCGv src1
, TCGv src2
)
551 gen_op_subc_int(dst
, src1
, src2
, gen_add32_carry32(), false);
554 static void gen_op_subccc_add(TCGv dst
, TCGv src1
, TCGv src2
)
556 gen_op_subc_int(dst
, src1
, src2
, gen_add32_carry32(), true);
559 static void gen_op_subc_int_sub(TCGv dst
, TCGv src1
, TCGv src2
, bool update_cc
)
563 if (TARGET_LONG_BITS
== 64) {
564 gen_op_subc_int(dst
, src1
, src2
, gen_sub32_carry32(), update_cc
);
569 * We can re-use the host's hardware carry generation by using
570 * a SUB2 opcode. We discard the low part of the output.
572 discard
= tcg_temp_new();
573 tcg_gen_sub2_tl(discard
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
576 tcg_debug_assert(dst
== cpu_cc_dst
);
577 tcg_gen_mov_tl(cpu_cc_src
, src1
);
578 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
582 static void gen_op_subc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
584 gen_op_subc_int_sub(dst
, src1
, src2
, false);
587 static void gen_op_subccc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
589 gen_op_subc_int_sub(dst
, src1
, src2
, true);
592 static void gen_op_subc_int_generic(TCGv dst
, TCGv src1
, TCGv src2
,
595 TCGv_i32 carry_32
= tcg_temp_new_i32();
597 gen_helper_compute_C_icc(carry_32
, tcg_env
);
598 gen_op_subc_int(dst
, src1
, src2
, carry_32
, update_cc
);
601 static void gen_op_subc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
603 gen_op_subc_int_generic(dst
, src1
, src2
, false);
606 static void gen_op_subccc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
608 gen_op_subc_int_generic(dst
, src1
, src2
, true);
611 static void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
613 TCGv r_temp
, zero
, t0
;
615 r_temp
= tcg_temp_new();
622 zero
= tcg_constant_tl(0);
623 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
624 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
625 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
626 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
630 // env->y = (b2 << 31) | (env->y >> 1);
631 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
632 tcg_gen_deposit_tl(cpu_y
, t0
, cpu_cc_src
, 31, 1);
635 gen_mov_reg_N(t0
, cpu_psr
);
636 gen_mov_reg_V(r_temp
, cpu_psr
);
637 tcg_gen_xor_tl(t0
, t0
, r_temp
);
639 // T0 = (b1 << 31) | (T0 >> 1);
641 tcg_gen_shli_tl(t0
, t0
, 31);
642 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
643 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
645 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
647 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
650 static void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
652 #if TARGET_LONG_BITS == 32
654 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
656 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
659 TCGv t0
= tcg_temp_new_i64();
660 TCGv t1
= tcg_temp_new_i64();
663 tcg_gen_ext32s_i64(t0
, src1
);
664 tcg_gen_ext32s_i64(t1
, src2
);
666 tcg_gen_ext32u_i64(t0
, src1
);
667 tcg_gen_ext32u_i64(t1
, src2
);
670 tcg_gen_mul_i64(dst
, t0
, t1
);
671 tcg_gen_shri_i64(cpu_y
, dst
, 32);
675 static void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
677 /* zero-extend truncated operands before multiplication */
678 gen_op_multiply(dst
, src1
, src2
, 0);
681 static void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
683 /* sign-extend truncated operands before multiplication */
684 gen_op_multiply(dst
, src1
, src2
, 1);
687 static void gen_op_udivx(TCGv dst
, TCGv src1
, TCGv src2
)
689 gen_helper_udivx(dst
, tcg_env
, src1
, src2
);
692 static void gen_op_sdivx(TCGv dst
, TCGv src1
, TCGv src2
)
694 gen_helper_sdivx(dst
, tcg_env
, src1
, src2
);
697 static void gen_op_udiv(TCGv dst
, TCGv src1
, TCGv src2
)
699 gen_helper_udiv(dst
, tcg_env
, src1
, src2
);
702 static void gen_op_sdiv(TCGv dst
, TCGv src1
, TCGv src2
)
704 gen_helper_sdiv(dst
, tcg_env
, src1
, src2
);
707 static void gen_op_udivcc(TCGv dst
, TCGv src1
, TCGv src2
)
709 gen_helper_udiv_cc(dst
, tcg_env
, src1
, src2
);
712 static void gen_op_sdivcc(TCGv dst
, TCGv src1
, TCGv src2
)
714 gen_helper_sdiv_cc(dst
, tcg_env
, src1
, src2
);
717 static void gen_op_taddcctv(TCGv dst
, TCGv src1
, TCGv src2
)
719 gen_helper_taddcctv(dst
, tcg_env
, src1
, src2
);
722 static void gen_op_tsubcctv(TCGv dst
, TCGv src1
, TCGv src2
)
724 gen_helper_tsubcctv(dst
, tcg_env
, src1
, src2
);
727 static void gen_op_popc(TCGv dst
, TCGv src1
, TCGv src2
)
729 tcg_gen_ctpop_tl(dst
, src2
);
732 #ifndef TARGET_SPARC64
733 static void gen_helper_array8(TCGv dst
, TCGv src1
, TCGv src2
)
735 g_assert_not_reached();
739 static void gen_op_array16(TCGv dst
, TCGv src1
, TCGv src2
)
741 gen_helper_array8(dst
, src1
, src2
);
742 tcg_gen_shli_tl(dst
, dst
, 1);
745 static void gen_op_array32(TCGv dst
, TCGv src1
, TCGv src2
)
747 gen_helper_array8(dst
, src1
, src2
);
748 tcg_gen_shli_tl(dst
, dst
, 2);
751 static void gen_op_fpack32(TCGv_i64 dst
, TCGv_i64 src1
, TCGv_i64 src2
)
753 #ifdef TARGET_SPARC64
754 gen_helper_fpack32(dst
, cpu_gsr
, src1
, src2
);
756 g_assert_not_reached();
760 static void gen_op_faligndata(TCGv_i64 dst
, TCGv_i64 s1
, TCGv_i64 s2
)
762 #ifdef TARGET_SPARC64
767 shift
= tcg_temp_new();
769 tcg_gen_andi_tl(shift
, cpu_gsr
, 7);
770 tcg_gen_shli_tl(shift
, shift
, 3);
771 tcg_gen_shl_tl(t1
, s1
, shift
);
774 * A shift of 64 does not produce 0 in TCG. Divide this into a
775 * shift of (up to 63) followed by a constant shift of 1.
777 tcg_gen_xori_tl(shift
, shift
, 63);
778 tcg_gen_shr_tl(t2
, s2
, shift
);
779 tcg_gen_shri_tl(t2
, t2
, 1);
781 tcg_gen_or_tl(dst
, t1
, t2
);
783 g_assert_not_reached();
787 static void gen_op_bshuffle(TCGv_i64 dst
, TCGv_i64 src1
, TCGv_i64 src2
)
789 #ifdef TARGET_SPARC64
790 gen_helper_bshuffle(dst
, cpu_gsr
, src1
, src2
);
792 g_assert_not_reached();
797 static void gen_op_eval_ba(TCGv dst
)
799 tcg_gen_movi_tl(dst
, 1);
803 static void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
805 gen_mov_reg_Z(dst
, src
);
809 static void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
811 TCGv t0
= tcg_temp_new();
812 gen_mov_reg_N(t0
, src
);
813 gen_mov_reg_V(dst
, src
);
814 tcg_gen_xor_tl(dst
, dst
, t0
);
815 gen_mov_reg_Z(t0
, src
);
816 tcg_gen_or_tl(dst
, dst
, t0
);
820 static void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
822 TCGv t0
= tcg_temp_new();
823 gen_mov_reg_V(t0
, src
);
824 gen_mov_reg_N(dst
, src
);
825 tcg_gen_xor_tl(dst
, dst
, t0
);
829 static void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
831 TCGv t0
= tcg_temp_new();
832 gen_mov_reg_Z(t0
, src
);
833 gen_mov_reg_C(dst
, src
);
834 tcg_gen_or_tl(dst
, dst
, t0
);
838 static void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
840 gen_mov_reg_C(dst
, src
);
844 static void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
846 gen_mov_reg_V(dst
, src
);
850 static void gen_op_eval_bn(TCGv dst
)
852 tcg_gen_movi_tl(dst
, 0);
856 static void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
858 gen_mov_reg_N(dst
, src
);
862 static void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
864 gen_mov_reg_Z(dst
, src
);
865 tcg_gen_xori_tl(dst
, dst
, 0x1);
869 static void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
871 gen_op_eval_ble(dst
, src
);
872 tcg_gen_xori_tl(dst
, dst
, 0x1);
876 static void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
878 gen_op_eval_bl(dst
, src
);
879 tcg_gen_xori_tl(dst
, dst
, 0x1);
883 static void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
885 gen_op_eval_bleu(dst
, src
);
886 tcg_gen_xori_tl(dst
, dst
, 0x1);
890 static void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
892 gen_mov_reg_C(dst
, src
);
893 tcg_gen_xori_tl(dst
, dst
, 0x1);
897 static void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
899 gen_mov_reg_N(dst
, src
);
900 tcg_gen_xori_tl(dst
, dst
, 0x1);
904 static void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
906 gen_mov_reg_V(dst
, src
);
907 tcg_gen_xori_tl(dst
, dst
, 0x1);
911 FPSR bit field FCC1 | FCC0:
917 static void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
918 unsigned int fcc_offset
)
920 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
921 tcg_gen_andi_tl(reg
, reg
, 0x1);
924 static void gen_mov_reg_FCC1(TCGv reg
, TCGv src
, unsigned int fcc_offset
)
926 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
927 tcg_gen_andi_tl(reg
, reg
, 0x1);
931 static void gen_op_eval_fbne(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
933 TCGv t0
= tcg_temp_new();
934 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
935 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
936 tcg_gen_or_tl(dst
, dst
, t0
);
939 // 1 or 2: FCC0 ^ FCC1
940 static void gen_op_eval_fblg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
942 TCGv t0
= tcg_temp_new();
943 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
944 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
945 tcg_gen_xor_tl(dst
, dst
, t0
);
949 static void gen_op_eval_fbul(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
951 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
955 static void gen_op_eval_fbl(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
957 TCGv t0
= tcg_temp_new();
958 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
959 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
960 tcg_gen_andc_tl(dst
, dst
, t0
);
964 static void gen_op_eval_fbug(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
966 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
970 static void gen_op_eval_fbg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
972 TCGv t0
= tcg_temp_new();
973 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
974 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
975 tcg_gen_andc_tl(dst
, t0
, dst
);
979 static void gen_op_eval_fbu(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
981 TCGv t0
= tcg_temp_new();
982 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
983 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
984 tcg_gen_and_tl(dst
, dst
, t0
);
988 static void gen_op_eval_fbe(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
990 TCGv t0
= tcg_temp_new();
991 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
992 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
993 tcg_gen_or_tl(dst
, dst
, t0
);
994 tcg_gen_xori_tl(dst
, dst
, 0x1);
997 // 0 or 3: !(FCC0 ^ FCC1)
998 static void gen_op_eval_fbue(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
1000 TCGv t0
= tcg_temp_new();
1001 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
1002 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
1003 tcg_gen_xor_tl(dst
, dst
, t0
);
1004 tcg_gen_xori_tl(dst
, dst
, 0x1);
1008 static void gen_op_eval_fbge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
1010 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
1011 tcg_gen_xori_tl(dst
, dst
, 0x1);
1014 // !1: !(FCC0 & !FCC1)
1015 static void gen_op_eval_fbuge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
1017 TCGv t0
= tcg_temp_new();
1018 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
1019 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
1020 tcg_gen_andc_tl(dst
, dst
, t0
);
1021 tcg_gen_xori_tl(dst
, dst
, 0x1);
1025 static void gen_op_eval_fble(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
1027 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
1028 tcg_gen_xori_tl(dst
, dst
, 0x1);
1031 // !2: !(!FCC0 & FCC1)
1032 static void gen_op_eval_fbule(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
1034 TCGv t0
= tcg_temp_new();
1035 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
1036 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
1037 tcg_gen_andc_tl(dst
, t0
, dst
);
1038 tcg_gen_xori_tl(dst
, dst
, 0x1);
1041 // !3: !(FCC0 & FCC1)
1042 static void gen_op_eval_fbo(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
1044 TCGv t0
= tcg_temp_new();
1045 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
1046 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
1047 tcg_gen_and_tl(dst
, dst
, t0
);
1048 tcg_gen_xori_tl(dst
, dst
, 0x1);
1051 static void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
1052 target_ulong pc2
, TCGv r_cond
)
1054 TCGLabel
*l1
= gen_new_label();
1056 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
1058 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
1061 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
1064 static void gen_generic_branch(DisasContext
*dc
)
1066 TCGv npc0
= tcg_constant_tl(dc
->jump_pc
[0]);
1067 TCGv npc1
= tcg_constant_tl(dc
->jump_pc
[1]);
1068 TCGv zero
= tcg_constant_tl(0);
1070 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1073 /* call this function before using the condition register as it may
1074 have been set for a jump */
1075 static void flush_cond(DisasContext
*dc
)
1077 if (dc
->npc
== JUMP_PC
) {
1078 gen_generic_branch(dc
);
1079 dc
->npc
= DYNAMIC_PC_LOOKUP
;
1083 static void save_npc(DisasContext
*dc
)
1088 gen_generic_branch(dc
);
1089 dc
->npc
= DYNAMIC_PC_LOOKUP
;
1092 case DYNAMIC_PC_LOOKUP
:
1095 g_assert_not_reached();
1098 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1102 static void update_psr(DisasContext
*dc
)
1104 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1105 dc
->cc_op
= CC_OP_FLAGS
;
1106 gen_helper_compute_psr(tcg_env
);
1110 static void save_state(DisasContext
*dc
)
1112 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1116 static void gen_exception(DisasContext
*dc
, int which
)
1119 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(which
));
1120 dc
->base
.is_jmp
= DISAS_NORETURN
;
1123 static TCGLabel
*delay_exceptionv(DisasContext
*dc
, TCGv_i32 excp
)
1125 DisasDelayException
*e
= g_new0(DisasDelayException
, 1);
1127 e
->next
= dc
->delay_excp_list
;
1128 dc
->delay_excp_list
= e
;
1130 e
->lab
= gen_new_label();
1133 /* Caller must have used flush_cond before branch. */
1134 assert(e
->npc
!= JUMP_PC
);
1140 static TCGLabel
*delay_exception(DisasContext
*dc
, int excp
)
1142 return delay_exceptionv(dc
, tcg_constant_i32(excp
));
1145 static void gen_check_align(DisasContext
*dc
, TCGv addr
, int mask
)
1147 TCGv t
= tcg_temp_new();
1150 tcg_gen_andi_tl(t
, addr
, mask
);
1153 lab
= delay_exception(dc
, TT_UNALIGNED
);
1154 tcg_gen_brcondi_tl(TCG_COND_NE
, t
, 0, lab
);
1157 static void gen_mov_pc_npc(DisasContext
*dc
)
1162 gen_generic_branch(dc
);
1163 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1164 dc
->pc
= DYNAMIC_PC_LOOKUP
;
1167 case DYNAMIC_PC_LOOKUP
:
1168 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1172 g_assert_not_reached();
1179 static void gen_op_next_insn(void)
1181 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1182 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1185 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1188 static int subcc_cond
[16] = {
1204 -1, /* no overflow */
1207 static int logic_cond
[16] = {
1209 TCG_COND_EQ
, /* eq: Z */
1210 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1211 TCG_COND_LT
, /* lt: N ^ V -> N */
1212 TCG_COND_EQ
, /* leu: C | Z -> Z */
1213 TCG_COND_NEVER
, /* ltu: C -> 0 */
1214 TCG_COND_LT
, /* neg: N */
1215 TCG_COND_NEVER
, /* vs: V -> 0 */
1217 TCG_COND_NE
, /* ne: !Z */
1218 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1219 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1220 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1221 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1222 TCG_COND_GE
, /* pos: !N */
1223 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1229 #ifdef TARGET_SPARC64
1239 switch (dc
->cc_op
) {
1241 cmp
->cond
= logic_cond
[cond
];
1243 cmp
->is_bool
= false;
1244 cmp
->c2
= tcg_constant_tl(0);
1245 #ifdef TARGET_SPARC64
1247 cmp
->c1
= tcg_temp_new();
1248 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1252 cmp
->c1
= cpu_cc_dst
;
1259 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1260 goto do_compare_dst_0
;
1262 case 7: /* overflow */
1263 case 15: /* !overflow */
1267 cmp
->cond
= subcc_cond
[cond
];
1268 cmp
->is_bool
= false;
1269 #ifdef TARGET_SPARC64
1271 /* Note that sign-extension works for unsigned compares as
1272 long as both operands are sign-extended. */
1273 cmp
->c1
= tcg_temp_new();
1274 cmp
->c2
= tcg_temp_new();
1275 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1276 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1280 cmp
->c1
= cpu_cc_src
;
1281 cmp
->c2
= cpu_cc_src2
;
1288 gen_helper_compute_psr(tcg_env
);
1289 dc
->cc_op
= CC_OP_FLAGS
;
1293 /* We're going to generate a boolean result. */
1294 cmp
->cond
= TCG_COND_NE
;
1295 cmp
->is_bool
= true;
1296 cmp
->c1
= r_dst
= tcg_temp_new();
1297 cmp
->c2
= tcg_constant_tl(0);
1301 gen_op_eval_bn(r_dst
);
1304 gen_op_eval_be(r_dst
, r_src
);
1307 gen_op_eval_ble(r_dst
, r_src
);
1310 gen_op_eval_bl(r_dst
, r_src
);
1313 gen_op_eval_bleu(r_dst
, r_src
);
1316 gen_op_eval_bcs(r_dst
, r_src
);
1319 gen_op_eval_bneg(r_dst
, r_src
);
1322 gen_op_eval_bvs(r_dst
, r_src
);
1325 gen_op_eval_ba(r_dst
);
1328 gen_op_eval_bne(r_dst
, r_src
);
1331 gen_op_eval_bg(r_dst
, r_src
);
1334 gen_op_eval_bge(r_dst
, r_src
);
1337 gen_op_eval_bgu(r_dst
, r_src
);
1340 gen_op_eval_bcc(r_dst
, r_src
);
1343 gen_op_eval_bpos(r_dst
, r_src
);
1346 gen_op_eval_bvc(r_dst
, r_src
);
1353 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1355 unsigned int offset
;
1358 /* For now we still generate a straight boolean result. */
1359 cmp
->cond
= TCG_COND_NE
;
1360 cmp
->is_bool
= true;
1361 cmp
->c1
= r_dst
= tcg_temp_new();
1362 cmp
->c2
= tcg_constant_tl(0);
1382 gen_op_eval_bn(r_dst
);
1385 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1388 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1391 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1394 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1397 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1400 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1403 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1406 gen_op_eval_ba(r_dst
);
1409 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1412 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1415 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1418 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1421 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1424 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1427 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1433 static const TCGCond gen_tcg_cond_reg
[8] = {
1434 TCG_COND_NEVER
, /* reserved */
1438 TCG_COND_NEVER
, /* reserved */
1444 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1446 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1447 cmp
->is_bool
= false;
1449 cmp
->c2
= tcg_constant_tl(0);
1452 static void gen_op_clear_ieee_excp_and_FTT(void)
1454 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1457 static void gen_op_fmovs(TCGv_i32 dst
, TCGv_i32 src
)
1459 gen_op_clear_ieee_excp_and_FTT();
1460 tcg_gen_mov_i32(dst
, src
);
1463 static void gen_op_fnegs(TCGv_i32 dst
, TCGv_i32 src
)
1465 gen_op_clear_ieee_excp_and_FTT();
1466 gen_helper_fnegs(dst
, src
);
1469 static void gen_op_fabss(TCGv_i32 dst
, TCGv_i32 src
)
1471 gen_op_clear_ieee_excp_and_FTT();
1472 gen_helper_fabss(dst
, src
);
1475 static void gen_op_fmovd(TCGv_i64 dst
, TCGv_i64 src
)
1477 gen_op_clear_ieee_excp_and_FTT();
1478 tcg_gen_mov_i64(dst
, src
);
1481 static void gen_op_fnegd(TCGv_i64 dst
, TCGv_i64 src
)
1483 gen_op_clear_ieee_excp_and_FTT();
1484 gen_helper_fnegd(dst
, src
);
1487 static void gen_op_fabsd(TCGv_i64 dst
, TCGv_i64 src
)
1489 gen_op_clear_ieee_excp_and_FTT();
1490 gen_helper_fabsd(dst
, src
);
1493 #ifdef TARGET_SPARC64
1494 static void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1498 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1501 gen_helper_fcmps_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1504 gen_helper_fcmps_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1507 gen_helper_fcmps_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1512 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1516 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1519 gen_helper_fcmpd_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1522 gen_helper_fcmpd_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1525 gen_helper_fcmpd_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1530 static void gen_op_fcmpq(int fccno
)
1534 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1537 gen_helper_fcmpq_fcc1(cpu_fsr
, tcg_env
);
1540 gen_helper_fcmpq_fcc2(cpu_fsr
, tcg_env
);
1543 gen_helper_fcmpq_fcc3(cpu_fsr
, tcg_env
);
1548 static void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1552 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1555 gen_helper_fcmpes_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1558 gen_helper_fcmpes_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1561 gen_helper_fcmpes_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1566 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1570 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1573 gen_helper_fcmped_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1576 gen_helper_fcmped_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1579 gen_helper_fcmped_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1584 static void gen_op_fcmpeq(int fccno
)
1588 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1591 gen_helper_fcmpeq_fcc1(cpu_fsr
, tcg_env
);
1594 gen_helper_fcmpeq_fcc2(cpu_fsr
, tcg_env
);
1597 gen_helper_fcmpeq_fcc3(cpu_fsr
, tcg_env
);
1604 static void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1606 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1609 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1611 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1614 static void gen_op_fcmpq(int fccno
)
1616 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1619 static void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1621 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1624 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1626 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1629 static void gen_op_fcmpeq(int fccno
)
1631 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1635 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1637 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1638 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1639 gen_exception(dc
, TT_FP_EXCP
);
1642 static int gen_trap_ifnofpu(DisasContext
*dc
)
1644 #if !defined(CONFIG_USER_ONLY)
1645 if (!dc
->fpu_enabled
) {
1646 gen_exception(dc
, TT_NFPU_INSN
);
1674 * For asi == -1, treat as non-asi.
1675 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1677 static DisasASI
resolve_asi(DisasContext
*dc
, int asi
, MemOp memop
)
1679 ASIType type
= GET_ASI_HELPER
;
1680 int mem_idx
= dc
->mem_idx
;
1683 /* Artificial "non-asi" case. */
1684 type
= GET_ASI_DIRECT
;
1688 #ifndef TARGET_SPARC64
1689 /* Before v9, all asis are immediate and privileged. */
1691 gen_exception(dc
, TT_ILL_INSN
);
1692 type
= GET_ASI_EXCP
;
1693 } else if (supervisor(dc
)
1694 /* Note that LEON accepts ASI_USERDATA in user mode, for
1695 use with CASA. Also note that previous versions of
1696 QEMU allowed (and old versions of gcc emitted) ASI_P
1697 for LEON, which is incorrect. */
1698 || (asi
== ASI_USERDATA
1699 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
1701 case ASI_USERDATA
: /* User data access */
1702 mem_idx
= MMU_USER_IDX
;
1703 type
= GET_ASI_DIRECT
;
1705 case ASI_KERNELDATA
: /* Supervisor data access */
1706 mem_idx
= MMU_KERNEL_IDX
;
1707 type
= GET_ASI_DIRECT
;
1709 case ASI_M_BYPASS
: /* MMU passthrough */
1710 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
1711 mem_idx
= MMU_PHYS_IDX
;
1712 type
= GET_ASI_DIRECT
;
1714 case ASI_M_BCOPY
: /* Block copy, sta access */
1715 mem_idx
= MMU_KERNEL_IDX
;
1716 type
= GET_ASI_BCOPY
;
1718 case ASI_M_BFILL
: /* Block fill, stda access */
1719 mem_idx
= MMU_KERNEL_IDX
;
1720 type
= GET_ASI_BFILL
;
1724 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1725 * permissions check in get_physical_address(..).
1727 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
1729 gen_exception(dc
, TT_PRIV_INSN
);
1730 type
= GET_ASI_EXCP
;
1736 /* With v9, all asis below 0x80 are privileged. */
1737 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1738 down that bit into DisasContext. For the moment that's ok,
1739 since the direct implementations below doesn't have any ASIs
1740 in the restricted [0x30, 0x7f] range, and the check will be
1741 done properly in the helper. */
1742 if (!supervisor(dc
) && asi
< 0x80) {
1743 gen_exception(dc
, TT_PRIV_ACT
);
1744 type
= GET_ASI_EXCP
;
1747 case ASI_REAL
: /* Bypass */
1748 case ASI_REAL_IO
: /* Bypass, non-cacheable */
1749 case ASI_REAL_L
: /* Bypass LE */
1750 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
1751 case ASI_TWINX_REAL
: /* Real address, twinx */
1752 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
1753 case ASI_QUAD_LDD_PHYS
:
1754 case ASI_QUAD_LDD_PHYS_L
:
1755 mem_idx
= MMU_PHYS_IDX
;
1757 case ASI_N
: /* Nucleus */
1758 case ASI_NL
: /* Nucleus LE */
1761 case ASI_NUCLEUS_QUAD_LDD
:
1762 case ASI_NUCLEUS_QUAD_LDD_L
:
1763 if (hypervisor(dc
)) {
1764 mem_idx
= MMU_PHYS_IDX
;
1766 mem_idx
= MMU_NUCLEUS_IDX
;
1769 case ASI_AIUP
: /* As if user primary */
1770 case ASI_AIUPL
: /* As if user primary LE */
1771 case ASI_TWINX_AIUP
:
1772 case ASI_TWINX_AIUP_L
:
1773 case ASI_BLK_AIUP_4V
:
1774 case ASI_BLK_AIUP_L_4V
:
1777 mem_idx
= MMU_USER_IDX
;
1779 case ASI_AIUS
: /* As if user secondary */
1780 case ASI_AIUSL
: /* As if user secondary LE */
1781 case ASI_TWINX_AIUS
:
1782 case ASI_TWINX_AIUS_L
:
1783 case ASI_BLK_AIUS_4V
:
1784 case ASI_BLK_AIUS_L_4V
:
1787 mem_idx
= MMU_USER_SECONDARY_IDX
;
1789 case ASI_S
: /* Secondary */
1790 case ASI_SL
: /* Secondary LE */
1793 case ASI_BLK_COMMIT_S
:
1800 if (mem_idx
== MMU_USER_IDX
) {
1801 mem_idx
= MMU_USER_SECONDARY_IDX
;
1802 } else if (mem_idx
== MMU_KERNEL_IDX
) {
1803 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
1806 case ASI_P
: /* Primary */
1807 case ASI_PL
: /* Primary LE */
1810 case ASI_BLK_COMMIT_P
:
1834 type
= GET_ASI_DIRECT
;
1836 case ASI_TWINX_REAL
:
1837 case ASI_TWINX_REAL_L
:
1840 case ASI_TWINX_AIUP
:
1841 case ASI_TWINX_AIUP_L
:
1842 case ASI_TWINX_AIUS
:
1843 case ASI_TWINX_AIUS_L
:
1848 case ASI_QUAD_LDD_PHYS
:
1849 case ASI_QUAD_LDD_PHYS_L
:
1850 case ASI_NUCLEUS_QUAD_LDD
:
1851 case ASI_NUCLEUS_QUAD_LDD_L
:
1852 type
= GET_ASI_DTWINX
;
1854 case ASI_BLK_COMMIT_P
:
1855 case ASI_BLK_COMMIT_S
:
1856 case ASI_BLK_AIUP_4V
:
1857 case ASI_BLK_AIUP_L_4V
:
1860 case ASI_BLK_AIUS_4V
:
1861 case ASI_BLK_AIUS_L_4V
:
1868 type
= GET_ASI_BLOCK
;
1875 type
= GET_ASI_SHORT
;
1882 type
= GET_ASI_SHORT
;
1885 /* The little-endian asis all have bit 3 set. */
1893 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
1896 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1897 static void gen_helper_ld_asi(TCGv_i64 r
, TCGv_env e
, TCGv a
,
1898 TCGv_i32 asi
, TCGv_i32 mop
)
1900 g_assert_not_reached();
1903 static void gen_helper_st_asi(TCGv_env e
, TCGv a
, TCGv_i64 r
,
1904 TCGv_i32 asi
, TCGv_i32 mop
)
1906 g_assert_not_reached();
1910 static void gen_ld_asi(DisasContext
*dc
, DisasASI
*da
, TCGv dst
, TCGv addr
)
1915 case GET_ASI_DTWINX
: /* Reserved for ldda. */
1916 gen_exception(dc
, TT_ILL_INSN
);
1918 case GET_ASI_DIRECT
:
1919 tcg_gen_qemu_ld_tl(dst
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
1923 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1924 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
| MO_ALIGN
);
1927 #ifdef TARGET_SPARC64
1928 gen_helper_ld_asi(dst
, tcg_env
, addr
, r_asi
, r_mop
);
1931 TCGv_i64 t64
= tcg_temp_new_i64();
1932 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
1933 tcg_gen_trunc_i64_tl(dst
, t64
);
1941 static void gen_st_asi(DisasContext
*dc
, DisasASI
*da
, TCGv src
, TCGv addr
)
1947 case GET_ASI_DTWINX
: /* Reserved for stda. */
1948 if (TARGET_LONG_BITS
== 32) {
1949 gen_exception(dc
, TT_ILL_INSN
);
1951 } else if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
1952 /* Pre OpenSPARC CPUs don't have these */
1953 gen_exception(dc
, TT_ILL_INSN
);
1956 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1959 case GET_ASI_DIRECT
:
1960 tcg_gen_qemu_st_tl(src
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
1964 assert(TARGET_LONG_BITS
== 32);
1965 /* Copy 32 bytes from the address in SRC to ADDR. */
1966 /* ??? The original qemu code suggests 4-byte alignment, dropping
1967 the low bits, but the only place I can see this used is in the
1968 Linux kernel with 32 byte alignment, which would make more sense
1969 as a cacheline-style operation. */
1971 TCGv saddr
= tcg_temp_new();
1972 TCGv daddr
= tcg_temp_new();
1973 TCGv four
= tcg_constant_tl(4);
1974 TCGv_i32 tmp
= tcg_temp_new_i32();
1977 tcg_gen_andi_tl(saddr
, src
, -4);
1978 tcg_gen_andi_tl(daddr
, addr
, -4);
1979 for (i
= 0; i
< 32; i
+= 4) {
1980 /* Since the loads and stores are paired, allow the
1981 copy to happen in the host endianness. */
1982 tcg_gen_qemu_ld_i32(tmp
, saddr
, da
->mem_idx
, MO_UL
);
1983 tcg_gen_qemu_st_i32(tmp
, daddr
, da
->mem_idx
, MO_UL
);
1984 tcg_gen_add_tl(saddr
, saddr
, four
);
1985 tcg_gen_add_tl(daddr
, daddr
, four
);
1992 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1993 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
| MO_ALIGN
);
1996 #ifdef TARGET_SPARC64
1997 gen_helper_st_asi(tcg_env
, addr
, src
, r_asi
, r_mop
);
2000 TCGv_i64 t64
= tcg_temp_new_i64();
2001 tcg_gen_extu_tl_i64(t64
, src
);
2002 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2006 /* A write to a TLB register may alter page maps. End the TB. */
2007 dc
->npc
= DYNAMIC_PC
;
2013 static void gen_swap_asi(DisasContext
*dc
, DisasASI
*da
,
2014 TCGv dst
, TCGv src
, TCGv addr
)
2019 case GET_ASI_DIRECT
:
2020 tcg_gen_atomic_xchg_tl(dst
, addr
, src
,
2021 da
->mem_idx
, da
->memop
| MO_ALIGN
);
2024 /* ??? Should be DAE_invalid_asi. */
2025 gen_exception(dc
, TT_DATA_ACCESS
);
2030 static void gen_cas_asi(DisasContext
*dc
, DisasASI
*da
,
2031 TCGv oldv
, TCGv newv
, TCGv cmpv
, TCGv addr
)
2036 case GET_ASI_DIRECT
:
2037 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, newv
,
2038 da
->mem_idx
, da
->memop
| MO_ALIGN
);
2041 /* ??? Should be DAE_invalid_asi. */
2042 gen_exception(dc
, TT_DATA_ACCESS
);
2047 static void gen_ldstub_asi(DisasContext
*dc
, DisasASI
*da
, TCGv dst
, TCGv addr
)
2052 case GET_ASI_DIRECT
:
2053 tcg_gen_atomic_xchg_tl(dst
, addr
, tcg_constant_tl(0xff),
2054 da
->mem_idx
, MO_UB
);
2057 /* ??? In theory, this should be raise DAE_invalid_asi.
2058 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2059 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
2060 gen_helper_exit_atomic(tcg_env
);
2062 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2063 TCGv_i32 r_mop
= tcg_constant_i32(MO_UB
);
2067 t64
= tcg_temp_new_i64();
2068 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
2070 s64
= tcg_constant_i64(0xff);
2071 gen_helper_st_asi(tcg_env
, addr
, s64
, r_asi
, r_mop
);
2073 tcg_gen_trunc_i64_tl(dst
, t64
);
2076 dc
->npc
= DYNAMIC_PC
;
2082 static void gen_ldf_asi(DisasContext
*dc
, DisasASI
*da
, MemOp orig_size
,
2085 MemOp memop
= da
->memop
;
2086 MemOp size
= memop
& MO_SIZE
;
2091 /* TODO: Use 128-bit load/store below. */
2092 if (size
== MO_128
) {
2093 memop
= (memop
& ~MO_SIZE
) | MO_64
;
2100 case GET_ASI_DIRECT
:
2101 memop
|= MO_ALIGN_4
;
2104 d32
= gen_dest_fpr_F(dc
);
2105 tcg_gen_qemu_ld_i32(d32
, addr
, da
->mem_idx
, memop
);
2106 gen_store_fpr_F(dc
, rd
, d32
);
2110 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
, memop
);
2114 d64
= tcg_temp_new_i64();
2115 tcg_gen_qemu_ld_i64(d64
, addr
, da
->mem_idx
, memop
);
2116 addr_tmp
= tcg_temp_new();
2117 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
2118 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + 1], addr_tmp
, da
->mem_idx
, memop
);
2119 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2122 g_assert_not_reached();
2127 /* Valid for lddfa on aligned registers only. */
2128 if (orig_size
== MO_64
&& (rd
& 7) == 0) {
2129 /* The first operation checks required alignment. */
2130 addr_tmp
= tcg_temp_new();
2131 for (int i
= 0; ; ++i
) {
2132 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
, da
->mem_idx
,
2133 memop
| (i
== 0 ? MO_ALIGN_64
: 0));
2137 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
2141 gen_exception(dc
, TT_ILL_INSN
);
2146 /* Valid for lddfa only. */
2147 if (orig_size
== MO_64
) {
2148 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
2151 gen_exception(dc
, TT_ILL_INSN
);
2157 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2158 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
2161 /* According to the table in the UA2011 manual, the only
2162 other asis that are valid for ldfa/lddfa/ldqfa are
2163 the NO_FAULT asis. We still need a helper for these,
2164 but we can just use the integer asi helper for them. */
2167 d64
= tcg_temp_new_i64();
2168 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
2169 d32
= gen_dest_fpr_F(dc
);
2170 tcg_gen_extrl_i64_i32(d32
, d64
);
2171 gen_store_fpr_F(dc
, rd
, d32
);
2174 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], tcg_env
, addr
,
2178 d64
= tcg_temp_new_i64();
2179 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
2180 addr_tmp
= tcg_temp_new();
2181 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
2182 gen_helper_ld_asi(cpu_fpr
[rd
/ 2 + 1], tcg_env
, addr_tmp
,
2184 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2187 g_assert_not_reached();
2194 static void gen_stf_asi(DisasContext
*dc
, DisasASI
*da
, MemOp orig_size
,
2197 MemOp memop
= da
->memop
;
2198 MemOp size
= memop
& MO_SIZE
;
2202 /* TODO: Use 128-bit load/store below. */
2203 if (size
== MO_128
) {
2204 memop
= (memop
& ~MO_SIZE
) | MO_64
;
2211 case GET_ASI_DIRECT
:
2212 memop
|= MO_ALIGN_4
;
2215 d32
= gen_load_fpr_F(dc
, rd
);
2216 tcg_gen_qemu_st_i32(d32
, addr
, da
->mem_idx
, memop
| MO_ALIGN
);
2219 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
2220 memop
| MO_ALIGN_4
);
2223 /* Only 4-byte alignment required. However, it is legal for the
2224 cpu to signal the alignment fault, and the OS trap handler is
2225 required to fix it up. Requiring 16-byte alignment here avoids
2226 having to probe the second page before performing the first
2228 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
2229 memop
| MO_ALIGN_16
);
2230 addr_tmp
= tcg_temp_new();
2231 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
2232 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + 1], addr_tmp
, da
->mem_idx
, memop
);
2235 g_assert_not_reached();
2240 /* Valid for stdfa on aligned registers only. */
2241 if (orig_size
== MO_64
&& (rd
& 7) == 0) {
2242 /* The first operation checks required alignment. */
2243 addr_tmp
= tcg_temp_new();
2244 for (int i
= 0; ; ++i
) {
2245 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
, da
->mem_idx
,
2246 memop
| (i
== 0 ? MO_ALIGN_64
: 0));
2250 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
2254 gen_exception(dc
, TT_ILL_INSN
);
2259 /* Valid for stdfa only. */
2260 if (orig_size
== MO_64
) {
2261 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
2264 gen_exception(dc
, TT_ILL_INSN
);
2269 /* According to the table in the UA2011 manual, the only
2270 other asis that are valid for ldfa/lddfa/ldqfa are
2271 the PST* asis, which aren't currently handled. */
2272 gen_exception(dc
, TT_ILL_INSN
);
2277 static void gen_ldda_asi(DisasContext
*dc
, DisasASI
*da
, TCGv addr
, int rd
)
2279 TCGv hi
= gen_dest_gpr(dc
, rd
);
2280 TCGv lo
= gen_dest_gpr(dc
, rd
+ 1);
2286 case GET_ASI_DTWINX
:
2287 #ifdef TARGET_SPARC64
2289 MemOp mop
= (da
->memop
& MO_BSWAP
) | MO_128
| MO_ALIGN_16
;
2290 TCGv_i128 t
= tcg_temp_new_i128();
2292 tcg_gen_qemu_ld_i128(t
, addr
, da
->mem_idx
, mop
);
2294 * Note that LE twinx acts as if each 64-bit register result is
2295 * byte swapped. We perform one 128-bit LE load, so must swap
2296 * the order of the writebacks.
2298 if ((mop
& MO_BSWAP
) == MO_TE
) {
2299 tcg_gen_extr_i128_i64(lo
, hi
, t
);
2301 tcg_gen_extr_i128_i64(hi
, lo
, t
);
2306 g_assert_not_reached();
2309 case GET_ASI_DIRECT
:
2311 TCGv_i64 tmp
= tcg_temp_new_i64();
2313 tcg_gen_qemu_ld_i64(tmp
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
2315 /* Note that LE ldda acts as if each 32-bit register
2316 result is byte swapped. Having just performed one
2317 64-bit bswap, we need now to swap the writebacks. */
2318 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2319 tcg_gen_extr_i64_tl(lo
, hi
, tmp
);
2321 tcg_gen_extr_i64_tl(hi
, lo
, tmp
);
2327 /* ??? In theory we've handled all of the ASIs that are valid
2328 for ldda, and this should raise DAE_invalid_asi. However,
2329 real hardware allows others. This can be seen with e.g.
2330 FreeBSD 10.3 wrt ASI_IC_TAG. */
2332 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2333 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
);
2334 TCGv_i64 tmp
= tcg_temp_new_i64();
2337 gen_helper_ld_asi(tmp
, tcg_env
, addr
, r_asi
, r_mop
);
2340 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2341 tcg_gen_extr_i64_tl(lo
, hi
, tmp
);
2343 tcg_gen_extr_i64_tl(hi
, lo
, tmp
);
2349 gen_store_gpr(dc
, rd
, hi
);
2350 gen_store_gpr(dc
, rd
+ 1, lo
);
2353 static void gen_stda_asi(DisasContext
*dc
, DisasASI
*da
, TCGv addr
, int rd
)
2355 TCGv hi
= gen_load_gpr(dc
, rd
);
2356 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2362 case GET_ASI_DTWINX
:
2363 #ifdef TARGET_SPARC64
2365 MemOp mop
= (da
->memop
& MO_BSWAP
) | MO_128
| MO_ALIGN_16
;
2366 TCGv_i128 t
= tcg_temp_new_i128();
2369 * Note that LE twinx acts as if each 64-bit register result is
2370 * byte swapped. We perform one 128-bit LE store, so must swap
2371 * the order of the construction.
2373 if ((mop
& MO_BSWAP
) == MO_TE
) {
2374 tcg_gen_concat_i64_i128(t
, lo
, hi
);
2376 tcg_gen_concat_i64_i128(t
, hi
, lo
);
2378 tcg_gen_qemu_st_i128(t
, addr
, da
->mem_idx
, mop
);
2382 g_assert_not_reached();
2385 case GET_ASI_DIRECT
:
2387 TCGv_i64 t64
= tcg_temp_new_i64();
2389 /* Note that LE stda acts as if each 32-bit register result is
2390 byte swapped. We will perform one 64-bit LE store, so now
2391 we must swap the order of the construction. */
2392 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2393 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2395 tcg_gen_concat_tl_i64(t64
, hi
, lo
);
2397 tcg_gen_qemu_st_i64(t64
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
2402 assert(TARGET_LONG_BITS
== 32);
2403 /* Store 32 bytes of T64 to ADDR. */
2404 /* ??? The original qemu code suggests 8-byte alignment, dropping
2405 the low bits, but the only place I can see this used is in the
2406 Linux kernel with 32 byte alignment, which would make more sense
2407 as a cacheline-style operation. */
2409 TCGv_i64 t64
= tcg_temp_new_i64();
2410 TCGv d_addr
= tcg_temp_new();
2411 TCGv eight
= tcg_constant_tl(8);
2414 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2415 tcg_gen_andi_tl(d_addr
, addr
, -8);
2416 for (i
= 0; i
< 32; i
+= 8) {
2417 tcg_gen_qemu_st_i64(t64
, d_addr
, da
->mem_idx
, da
->memop
);
2418 tcg_gen_add_tl(d_addr
, d_addr
, eight
);
2424 /* ??? In theory we've handled all of the ASIs that are valid
2425 for stda, and this should raise DAE_invalid_asi. */
2427 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2428 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
);
2429 TCGv_i64 t64
= tcg_temp_new_i64();
2432 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2433 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2435 tcg_gen_concat_tl_i64(t64
, hi
, lo
);
2439 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2445 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2447 #ifdef TARGET_SPARC64
2448 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2450 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2451 or fold the comparison down to 32 bits and use movcond_i32. Choose
2453 c32
= tcg_temp_new_i32();
2455 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2457 TCGv_i64 c64
= tcg_temp_new_i64();
2458 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2459 tcg_gen_extrl_i64_i32(c32
, c64
);
2462 s1
= gen_load_fpr_F(dc
, rs
);
2463 s2
= gen_load_fpr_F(dc
, rd
);
2464 dst
= gen_dest_fpr_F(dc
);
2465 zero
= tcg_constant_i32(0);
2467 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2469 gen_store_fpr_F(dc
, rd
, dst
);
2471 qemu_build_not_reached();
2475 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2477 #ifdef TARGET_SPARC64
2478 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2479 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2480 gen_load_fpr_D(dc
, rs
),
2481 gen_load_fpr_D(dc
, rd
));
2482 gen_store_fpr_D(dc
, rd
, dst
);
2484 qemu_build_not_reached();
2488 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2490 #ifdef TARGET_SPARC64
2491 int qd
= QFPREG(rd
);
2492 int qs
= QFPREG(rs
);
2494 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2495 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2496 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2497 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2499 gen_update_fprs_dirty(dc
, qd
);
2501 qemu_build_not_reached();
2505 #ifdef TARGET_SPARC64
2506 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
)
2508 TCGv_i32 r_tl
= tcg_temp_new_i32();
2510 /* load env->tl into r_tl */
2511 tcg_gen_ld_i32(r_tl
, tcg_env
, offsetof(CPUSPARCState
, tl
));
2513 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2514 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2516 /* calculate offset to current trap state from env->ts, reuse r_tl */
2517 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2518 tcg_gen_addi_ptr(r_tsptr
, tcg_env
, offsetof(CPUSPARCState
, ts
));
2520 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2522 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2523 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2524 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2529 static int extract_dfpreg(DisasContext
*dc
, int x
)
2534 static int extract_qfpreg(DisasContext
*dc
, int x
)
2539 /* Include the auto-generated decoder. */
2540 #include "decode-insns.c.inc"
2542 #define TRANS(NAME, AVAIL, FUNC, ...) \
2543 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2544 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2546 #define avail_ALL(C) true
2547 #ifdef TARGET_SPARC64
2548 # define avail_32(C) false
2549 # define avail_ASR17(C) false
2550 # define avail_CASA(C) true
2551 # define avail_DIV(C) true
2552 # define avail_MUL(C) true
2553 # define avail_POWERDOWN(C) false
2554 # define avail_64(C) true
2555 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2556 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2557 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2558 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2560 # define avail_32(C) true
2561 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2562 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2563 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2564 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2565 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2566 # define avail_64(C) false
2567 # define avail_GL(C) false
2568 # define avail_HYPV(C) false
2569 # define avail_VIS1(C) false
2570 # define avail_VIS2(C) false
2573 /* Default case for non jump instructions. */
2574 static bool advance_pc(DisasContext
*dc
)
2579 case DYNAMIC_PC_LOOKUP
:
2584 /* we can do a static jump */
2585 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
2586 dc
->base
.is_jmp
= DISAS_NORETURN
;
2589 g_assert_not_reached();
2593 dc
->npc
= dc
->npc
+ 4;
2599 * Major opcodes 00 and 01 -- branches, call, and sethi
2602 static bool advance_jump_uncond_never(DisasContext
*dc
, bool annul
)
2605 dc
->pc
= dc
->npc
+ 4;
2606 dc
->npc
= dc
->pc
+ 4;
2609 dc
->npc
= dc
->pc
+ 4;
2614 static bool advance_jump_uncond_always(DisasContext
*dc
, bool annul
,
2623 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
2628 static bool advance_jump_cond(DisasContext
*dc
, DisasCompare
*cmp
,
2629 bool annul
, target_ulong dest
)
2631 target_ulong npc
= dc
->npc
;
2634 TCGLabel
*l1
= gen_new_label();
2636 tcg_gen_brcond_tl(tcg_invert_cond(cmp
->cond
), cmp
->c1
, cmp
->c2
, l1
);
2637 gen_goto_tb(dc
, 0, npc
, dest
);
2639 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
2641 dc
->base
.is_jmp
= DISAS_NORETURN
;
2646 case DYNAMIC_PC_LOOKUP
:
2647 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
2648 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
2649 tcg_gen_movcond_tl(cmp
->cond
, cpu_npc
,
2651 tcg_constant_tl(dest
), cpu_npc
);
2655 g_assert_not_reached();
2659 dc
->jump_pc
[0] = dest
;
2660 dc
->jump_pc
[1] = npc
+ 4;
2663 tcg_gen_mov_tl(cpu_cond
, cmp
->c1
);
2665 tcg_gen_setcond_tl(cmp
->cond
, cpu_cond
, cmp
->c1
, cmp
->c2
);
2672 static bool raise_priv(DisasContext
*dc
)
2674 gen_exception(dc
, TT_PRIV_INSN
);
2678 static bool raise_unimpfpop(DisasContext
*dc
)
2680 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
2684 static bool gen_trap_float128(DisasContext
*dc
)
2686 if (dc
->def
->features
& CPU_FEATURE_FLOAT128
) {
2689 return raise_unimpfpop(dc
);
2692 static bool do_bpcc(DisasContext
*dc
, arg_bcc
*a
)
2694 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
2699 return advance_jump_uncond_never(dc
, a
->a
);
2701 return advance_jump_uncond_always(dc
, a
->a
, target
);
2705 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
2706 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
2710 TRANS(Bicc
, ALL
, do_bpcc
, a
)
2711 TRANS(BPcc
, 64, do_bpcc
, a
)
2713 static bool do_fbpfcc(DisasContext
*dc
, arg_bcc
*a
)
2715 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
2718 if (gen_trap_ifnofpu(dc
)) {
2723 return advance_jump_uncond_never(dc
, a
->a
);
2725 return advance_jump_uncond_always(dc
, a
->a
, target
);
2729 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
2730 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
2734 TRANS(FBPfcc
, 64, do_fbpfcc
, a
)
2735 TRANS(FBfcc
, ALL
, do_fbpfcc
, a
)
2737 static bool trans_BPr(DisasContext
*dc
, arg_BPr
*a
)
2739 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
2742 if (!avail_64(dc
)) {
2745 if (gen_tcg_cond_reg
[a
->cond
] == TCG_COND_NEVER
) {
2750 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
2751 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
2754 static bool trans_CALL(DisasContext
*dc
, arg_CALL
*a
)
2756 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
2758 gen_store_gpr(dc
, 15, tcg_constant_tl(dc
->pc
));
2764 static bool trans_NCP(DisasContext
*dc
, arg_NCP
*a
)
2767 * For sparc32, always generate the no-coprocessor exception.
2768 * For sparc64, always generate illegal instruction.
2770 #ifdef TARGET_SPARC64
2773 gen_exception(dc
, TT_NCP_INSN
);
2778 static bool trans_SETHI(DisasContext
*dc
, arg_SETHI
*a
)
2780 /* Special-case %g0 because that's the canonical nop. */
2782 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl((uint32_t)a
->i
<< 10));
2784 return advance_pc(dc
);
2788 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2791 static bool do_tcc(DisasContext
*dc
, int cond
, int cc
,
2792 int rs1
, bool imm
, int rs2_or_imm
)
2794 int mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2795 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2802 return advance_pc(dc
);
2806 * Immediate traps are the most common case. Since this value is
2807 * live across the branch, it really pays to evaluate the constant.
2809 if (rs1
== 0 && (imm
|| rs2_or_imm
== 0)) {
2810 trap
= tcg_constant_i32((rs2_or_imm
& mask
) + TT_TRAP
);
2812 trap
= tcg_temp_new_i32();
2813 tcg_gen_trunc_tl_i32(trap
, gen_load_gpr(dc
, rs1
));
2815 tcg_gen_addi_i32(trap
, trap
, rs2_or_imm
);
2817 TCGv_i32 t2
= tcg_temp_new_i32();
2818 tcg_gen_trunc_tl_i32(t2
, gen_load_gpr(dc
, rs2_or_imm
));
2819 tcg_gen_add_i32(trap
, trap
, t2
);
2821 tcg_gen_andi_i32(trap
, trap
, mask
);
2822 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2828 gen_helper_raise_exception(tcg_env
, trap
);
2829 dc
->base
.is_jmp
= DISAS_NORETURN
;
2833 /* Conditional trap. */
2835 lab
= delay_exceptionv(dc
, trap
);
2836 gen_compare(&cmp
, cc
, cond
, dc
);
2837 tcg_gen_brcond_tl(cmp
.cond
, cmp
.c1
, cmp
.c2
, lab
);
2839 return advance_pc(dc
);
2842 static bool trans_Tcc_r(DisasContext
*dc
, arg_Tcc_r
*a
)
2844 if (avail_32(dc
) && a
->cc
) {
2847 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, false, a
->rs2
);
2850 static bool trans_Tcc_i_v7(DisasContext
*dc
, arg_Tcc_i_v7
*a
)
2855 return do_tcc(dc
, a
->cond
, 0, a
->rs1
, true, a
->i
);
2858 static bool trans_Tcc_i_v9(DisasContext
*dc
, arg_Tcc_i_v9
*a
)
2863 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, true, a
->i
);
2866 static bool trans_STBAR(DisasContext
*dc
, arg_STBAR
*a
)
2868 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2869 return advance_pc(dc
);
2872 static bool trans_MEMBAR(DisasContext
*dc
, arg_MEMBAR
*a
)
2878 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2879 tcg_gen_mb(a
->mmask
| TCG_BAR_SC
);
2882 /* For #Sync, etc, end the TB to recognize interrupts. */
2883 dc
->base
.is_jmp
= DISAS_EXIT
;
2885 return advance_pc(dc
);
2888 static bool do_rd_special(DisasContext
*dc
, bool priv
, int rd
,
2889 TCGv (*func
)(DisasContext
*, TCGv
))
2892 return raise_priv(dc
);
2894 gen_store_gpr(dc
, rd
, func(dc
, gen_dest_gpr(dc
, rd
)));
2895 return advance_pc(dc
);
2898 static TCGv
do_rdy(DisasContext
*dc
, TCGv dst
)
2903 static bool trans_RDY(DisasContext
*dc
, arg_RDY
*a
)
2906 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2907 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2908 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2910 if (avail_64(dc
) && a
->rs1
!= 0) {
2913 return do_rd_special(dc
, true, a
->rd
, do_rdy
);
2916 static TCGv
do_rd_leon3_config(DisasContext
*dc
, TCGv dst
)
2921 * TODO: There are many more fields to be filled,
2922 * some of which are writable.
2924 val
= dc
->def
->nwindows
- 1; /* [4:0] NWIN */
2925 val
|= 1 << 8; /* [8] V8 */
2927 return tcg_constant_tl(val
);
2930 TRANS(RDASR17
, ASR17
, do_rd_special
, true, a
->rd
, do_rd_leon3_config
)
2932 static TCGv
do_rdccr(DisasContext
*dc
, TCGv dst
)
2935 gen_helper_rdccr(dst
, tcg_env
);
2939 TRANS(RDCCR
, 64, do_rd_special
, true, a
->rd
, do_rdccr
)
2941 static TCGv
do_rdasi(DisasContext
*dc
, TCGv dst
)
2943 #ifdef TARGET_SPARC64
2944 return tcg_constant_tl(dc
->asi
);
2946 qemu_build_not_reached();
2950 TRANS(RDASI
, 64, do_rd_special
, true, a
->rd
, do_rdasi
)
2952 static TCGv
do_rdtick(DisasContext
*dc
, TCGv dst
)
2954 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
2956 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
2957 if (translator_io_start(&dc
->base
)) {
2958 dc
->base
.is_jmp
= DISAS_EXIT
;
2960 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
2961 tcg_constant_i32(dc
->mem_idx
));
2965 /* TODO: non-priv access only allowed when enabled. */
2966 TRANS(RDTICK
, 64, do_rd_special
, true, a
->rd
, do_rdtick
)
2968 static TCGv
do_rdpc(DisasContext
*dc
, TCGv dst
)
2970 return tcg_constant_tl(address_mask_i(dc
, dc
->pc
));
2973 TRANS(RDPC
, 64, do_rd_special
, true, a
->rd
, do_rdpc
)
2975 static TCGv
do_rdfprs(DisasContext
*dc
, TCGv dst
)
2977 tcg_gen_ext_i32_tl(dst
, cpu_fprs
);
2981 TRANS(RDFPRS
, 64, do_rd_special
, true, a
->rd
, do_rdfprs
)
2983 static TCGv
do_rdgsr(DisasContext
*dc
, TCGv dst
)
2985 gen_trap_ifnofpu(dc
);
2989 TRANS(RDGSR
, 64, do_rd_special
, true, a
->rd
, do_rdgsr
)
2991 static TCGv
do_rdsoftint(DisasContext
*dc
, TCGv dst
)
2993 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(softint
));
2997 TRANS(RDSOFTINT
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdsoftint
)
2999 static TCGv
do_rdtick_cmpr(DisasContext
*dc
, TCGv dst
)
3001 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(tick_cmpr
));
3005 /* TODO: non-priv access only allowed when enabled. */
3006 TRANS(RDTICK_CMPR
, 64, do_rd_special
, true, a
->rd
, do_rdtick_cmpr
)
3008 static TCGv
do_rdstick(DisasContext
*dc
, TCGv dst
)
3010 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3012 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
3013 if (translator_io_start(&dc
->base
)) {
3014 dc
->base
.is_jmp
= DISAS_EXIT
;
3016 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
3017 tcg_constant_i32(dc
->mem_idx
));
3021 /* TODO: non-priv access only allowed when enabled. */
3022 TRANS(RDSTICK
, 64, do_rd_special
, true, a
->rd
, do_rdstick
)
3024 static TCGv
do_rdstick_cmpr(DisasContext
*dc
, TCGv dst
)
3026 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(stick_cmpr
));
3030 /* TODO: supervisor access only allowed when enabled by hypervisor. */
3031 TRANS(RDSTICK_CMPR
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdstick_cmpr
)
3034 * UltraSPARC-T1 Strand status.
3035 * HYPV check maybe not enough, UA2005 & UA2007 describe
3036 * this ASR as impl. dep
3038 static TCGv
do_rdstrand_status(DisasContext
*dc
, TCGv dst
)
3040 return tcg_constant_tl(1);
3043 TRANS(RDSTRAND_STATUS
, HYPV
, do_rd_special
, true, a
->rd
, do_rdstrand_status
)
3045 static TCGv
do_rdpsr(DisasContext
*dc
, TCGv dst
)
3048 gen_helper_rdpsr(dst
, tcg_env
);
3052 TRANS(RDPSR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpsr
)
3054 static TCGv
do_rdhpstate(DisasContext
*dc
, TCGv dst
)
3056 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hpstate
));
3060 TRANS(RDHPR_hpstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhpstate
)
3062 static TCGv
do_rdhtstate(DisasContext
*dc
, TCGv dst
)
3064 TCGv_i32 tl
= tcg_temp_new_i32();
3065 TCGv_ptr tp
= tcg_temp_new_ptr();
3067 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
3068 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
3069 tcg_gen_shli_i32(tl
, tl
, 3);
3070 tcg_gen_ext_i32_ptr(tp
, tl
);
3071 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
3073 tcg_gen_ld_tl(dst
, tp
, env64_field_offsetof(htstate
));
3077 TRANS(RDHPR_htstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtstate
)
3079 static TCGv
do_rdhintp(DisasContext
*dc
, TCGv dst
)
3081 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hintp
));
3085 TRANS(RDHPR_hintp
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhintp
)
3087 static TCGv
do_rdhtba(DisasContext
*dc
, TCGv dst
)
3089 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(htba
));
3093 TRANS(RDHPR_htba
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtba
)
3095 static TCGv
do_rdhver(DisasContext
*dc
, TCGv dst
)
3097 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hver
));
3101 TRANS(RDHPR_hver
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhver
)
3103 static TCGv
do_rdhstick_cmpr(DisasContext
*dc
, TCGv dst
)
3105 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
3109 TRANS(RDHPR_hstick_cmpr
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
,
3112 static TCGv
do_rdwim(DisasContext
*dc
, TCGv dst
)
3114 tcg_gen_ld_tl(dst
, tcg_env
, env32_field_offsetof(wim
));
3118 TRANS(RDWIM
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwim
)
3120 static TCGv
do_rdtpc(DisasContext
*dc
, TCGv dst
)
3122 #ifdef TARGET_SPARC64
3123 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3125 gen_load_trap_state_at_tl(r_tsptr
);
3126 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tpc
));
3129 qemu_build_not_reached();
3133 TRANS(RDPR_tpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtpc
)
3135 static TCGv
do_rdtnpc(DisasContext
*dc
, TCGv dst
)
3137 #ifdef TARGET_SPARC64
3138 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3140 gen_load_trap_state_at_tl(r_tsptr
);
3141 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tnpc
));
3144 qemu_build_not_reached();
3148 TRANS(RDPR_tnpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtnpc
)
3150 static TCGv
do_rdtstate(DisasContext
*dc
, TCGv dst
)
3152 #ifdef TARGET_SPARC64
3153 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3155 gen_load_trap_state_at_tl(r_tsptr
);
3156 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tstate
));
3159 qemu_build_not_reached();
3163 TRANS(RDPR_tstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtstate
)
3165 static TCGv
do_rdtt(DisasContext
*dc
, TCGv dst
)
3167 #ifdef TARGET_SPARC64
3168 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3170 gen_load_trap_state_at_tl(r_tsptr
);
3171 tcg_gen_ld32s_tl(dst
, r_tsptr
, offsetof(trap_state
, tt
));
3174 qemu_build_not_reached();
3178 TRANS(RDPR_tt
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtt
)
3179 TRANS(RDPR_tick
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtick
)
3181 static TCGv
do_rdtba(DisasContext
*dc
, TCGv dst
)
3186 TRANS(RDTBR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
3187 TRANS(RDPR_tba
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
3189 static TCGv
do_rdpstate(DisasContext
*dc
, TCGv dst
)
3191 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(pstate
));
3195 TRANS(RDPR_pstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpstate
)
3197 static TCGv
do_rdtl(DisasContext
*dc
, TCGv dst
)
3199 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(tl
));
3203 TRANS(RDPR_tl
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtl
)
3205 static TCGv
do_rdpil(DisasContext
*dc
, TCGv dst
)
3207 tcg_gen_ld32s_tl(dst
, tcg_env
, env_field_offsetof(psrpil
));
3211 TRANS(RDPR_pil
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpil
)
3213 static TCGv
do_rdcwp(DisasContext
*dc
, TCGv dst
)
3215 gen_helper_rdcwp(dst
, tcg_env
);
3219 TRANS(RDPR_cwp
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcwp
)
3221 static TCGv
do_rdcansave(DisasContext
*dc
, TCGv dst
)
3223 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cansave
));
3227 TRANS(RDPR_cansave
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcansave
)
3229 static TCGv
do_rdcanrestore(DisasContext
*dc
, TCGv dst
)
3231 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(canrestore
));
3235 TRANS(RDPR_canrestore
, 64, do_rd_special
, supervisor(dc
), a
->rd
,
3238 static TCGv
do_rdcleanwin(DisasContext
*dc
, TCGv dst
)
3240 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cleanwin
));
3244 TRANS(RDPR_cleanwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcleanwin
)
3246 static TCGv
do_rdotherwin(DisasContext
*dc
, TCGv dst
)
3248 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(otherwin
));
3252 TRANS(RDPR_otherwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdotherwin
)
3254 static TCGv
do_rdwstate(DisasContext
*dc
, TCGv dst
)
3256 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(wstate
));
3260 TRANS(RDPR_wstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwstate
)
3262 static TCGv
do_rdgl(DisasContext
*dc
, TCGv dst
)
3264 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(gl
));
3268 TRANS(RDPR_gl
, GL
, do_rd_special
, supervisor(dc
), a
->rd
, do_rdgl
)
3270 /* UA2005 strand status */
3271 static TCGv
do_rdssr(DisasContext
*dc
, TCGv dst
)
3273 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(ssr
));
3277 TRANS(RDPR_strand_status
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdssr
)
3279 static TCGv
do_rdver(DisasContext
*dc
, TCGv dst
)
3281 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(version
));
3285 TRANS(RDPR_ver
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdver
)
3287 static bool trans_FLUSHW(DisasContext
*dc
, arg_FLUSHW
*a
)
3290 gen_helper_flushw(tcg_env
);
3291 return advance_pc(dc
);
3296 static bool do_wr_special(DisasContext
*dc
, arg_r_r_ri
*a
, bool priv
,
3297 void (*func
)(DisasContext
*, TCGv
))
3301 /* For simplicity, we under-decoded the rs2 form. */
3302 if (!a
->imm
&& (a
->rs2_or_imm
& ~0x1f)) {
3306 return raise_priv(dc
);
3309 if (a
->rs1
== 0 && (a
->imm
|| a
->rs2_or_imm
== 0)) {
3310 src
= tcg_constant_tl(a
->rs2_or_imm
);
3312 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
3313 if (a
->rs2_or_imm
== 0) {
3316 src
= tcg_temp_new();
3318 tcg_gen_xori_tl(src
, src1
, a
->rs2_or_imm
);
3320 tcg_gen_xor_tl(src
, src1
, gen_load_gpr(dc
, a
->rs2_or_imm
));
3325 return advance_pc(dc
);
3328 static void do_wry(DisasContext
*dc
, TCGv src
)
3330 tcg_gen_ext32u_tl(cpu_y
, src
);
3333 TRANS(WRY
, ALL
, do_wr_special
, a
, true, do_wry
)
3335 static void do_wrccr(DisasContext
*dc
, TCGv src
)
3337 gen_helper_wrccr(tcg_env
, src
);
3340 TRANS(WRCCR
, 64, do_wr_special
, a
, true, do_wrccr
)
3342 static void do_wrasi(DisasContext
*dc
, TCGv src
)
3344 TCGv tmp
= tcg_temp_new();
3346 tcg_gen_ext8u_tl(tmp
, src
);
3347 tcg_gen_st32_tl(tmp
, tcg_env
, env64_field_offsetof(asi
));
3348 /* End TB to notice changed ASI. */
3349 dc
->base
.is_jmp
= DISAS_EXIT
;
3352 TRANS(WRASI
, 64, do_wr_special
, a
, true, do_wrasi
)
3354 static void do_wrfprs(DisasContext
*dc
, TCGv src
)
3356 #ifdef TARGET_SPARC64
3357 tcg_gen_trunc_tl_i32(cpu_fprs
, src
);
3359 dc
->base
.is_jmp
= DISAS_EXIT
;
3361 qemu_build_not_reached();
3365 TRANS(WRFPRS
, 64, do_wr_special
, a
, true, do_wrfprs
)
3367 static void do_wrgsr(DisasContext
*dc
, TCGv src
)
3369 gen_trap_ifnofpu(dc
);
3370 tcg_gen_mov_tl(cpu_gsr
, src
);
3373 TRANS(WRGSR
, 64, do_wr_special
, a
, true, do_wrgsr
)
3375 static void do_wrsoftint_set(DisasContext
*dc
, TCGv src
)
3377 gen_helper_set_softint(tcg_env
, src
);
3380 TRANS(WRSOFTINT_SET
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_set
)
3382 static void do_wrsoftint_clr(DisasContext
*dc
, TCGv src
)
3384 gen_helper_clear_softint(tcg_env
, src
);
3387 TRANS(WRSOFTINT_CLR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_clr
)
3389 static void do_wrsoftint(DisasContext
*dc
, TCGv src
)
3391 gen_helper_write_softint(tcg_env
, src
);
3394 TRANS(WRSOFTINT
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint
)
3396 static void do_wrtick_cmpr(DisasContext
*dc
, TCGv src
)
3398 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3400 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(tick_cmpr
));
3401 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3402 translator_io_start(&dc
->base
);
3403 gen_helper_tick_set_limit(r_tickptr
, src
);
3404 /* End TB to handle timer interrupt */
3405 dc
->base
.is_jmp
= DISAS_EXIT
;
3408 TRANS(WRTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick_cmpr
)
3410 static void do_wrstick(DisasContext
*dc
, TCGv src
)
3412 #ifdef TARGET_SPARC64
3413 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3415 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, offsetof(CPUSPARCState
, stick
));
3416 translator_io_start(&dc
->base
);
3417 gen_helper_tick_set_count(r_tickptr
, src
);
3418 /* End TB to handle timer interrupt */
3419 dc
->base
.is_jmp
= DISAS_EXIT
;
3421 qemu_build_not_reached();
3425 TRANS(WRSTICK
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick
)
3427 static void do_wrstick_cmpr(DisasContext
*dc
, TCGv src
)
3429 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3431 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(stick_cmpr
));
3432 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
3433 translator_io_start(&dc
->base
);
3434 gen_helper_tick_set_limit(r_tickptr
, src
);
3435 /* End TB to handle timer interrupt */
3436 dc
->base
.is_jmp
= DISAS_EXIT
;
3439 TRANS(WRSTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick_cmpr
)
3441 static void do_wrpowerdown(DisasContext
*dc
, TCGv src
)
3444 gen_helper_power_down(tcg_env
);
3447 TRANS(WRPOWERDOWN
, POWERDOWN
, do_wr_special
, a
, supervisor(dc
), do_wrpowerdown
)
3449 static void do_wrpsr(DisasContext
*dc
, TCGv src
)
3451 gen_helper_wrpsr(tcg_env
, src
);
3452 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3453 dc
->cc_op
= CC_OP_FLAGS
;
3454 dc
->base
.is_jmp
= DISAS_EXIT
;
3457 TRANS(WRPSR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrpsr
)
3459 static void do_wrwim(DisasContext
*dc
, TCGv src
)
3461 target_ulong mask
= MAKE_64BIT_MASK(0, dc
->def
->nwindows
);
3462 TCGv tmp
= tcg_temp_new();
3464 tcg_gen_andi_tl(tmp
, src
, mask
);
3465 tcg_gen_st_tl(tmp
, tcg_env
, env32_field_offsetof(wim
));
3468 TRANS(WRWIM
, 32, do_wr_special
, a
, supervisor(dc
), do_wrwim
)
3470 static void do_wrtpc(DisasContext
*dc
, TCGv src
)
3472 #ifdef TARGET_SPARC64
3473 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3475 gen_load_trap_state_at_tl(r_tsptr
);
3476 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tpc
));
3478 qemu_build_not_reached();
3482 TRANS(WRPR_tpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtpc
)
3484 static void do_wrtnpc(DisasContext
*dc
, TCGv src
)
3486 #ifdef TARGET_SPARC64
3487 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3489 gen_load_trap_state_at_tl(r_tsptr
);
3490 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tnpc
));
3492 qemu_build_not_reached();
3496 TRANS(WRPR_tnpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtnpc
)
3498 static void do_wrtstate(DisasContext
*dc
, TCGv src
)
3500 #ifdef TARGET_SPARC64
3501 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3503 gen_load_trap_state_at_tl(r_tsptr
);
3504 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tstate
));
3506 qemu_build_not_reached();
3510 TRANS(WRPR_tstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtstate
)
3512 static void do_wrtt(DisasContext
*dc
, TCGv src
)
3514 #ifdef TARGET_SPARC64
3515 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3517 gen_load_trap_state_at_tl(r_tsptr
);
3518 tcg_gen_st32_tl(src
, r_tsptr
, offsetof(trap_state
, tt
));
3520 qemu_build_not_reached();
3524 TRANS(WRPR_tt
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtt
)
3526 static void do_wrtick(DisasContext
*dc
, TCGv src
)
3528 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3530 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3531 translator_io_start(&dc
->base
);
3532 gen_helper_tick_set_count(r_tickptr
, src
);
3533 /* End TB to handle timer interrupt */
3534 dc
->base
.is_jmp
= DISAS_EXIT
;
3537 TRANS(WRPR_tick
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick
)
3539 static void do_wrtba(DisasContext
*dc
, TCGv src
)
3541 tcg_gen_mov_tl(cpu_tbr
, src
);
3544 TRANS(WRPR_tba
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3546 static void do_wrpstate(DisasContext
*dc
, TCGv src
)
3549 if (translator_io_start(&dc
->base
)) {
3550 dc
->base
.is_jmp
= DISAS_EXIT
;
3552 gen_helper_wrpstate(tcg_env
, src
);
3553 dc
->npc
= DYNAMIC_PC
;
3556 TRANS(WRPR_pstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpstate
)
3558 static void do_wrtl(DisasContext
*dc
, TCGv src
)
3561 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(tl
));
3562 dc
->npc
= DYNAMIC_PC
;
3565 TRANS(WRPR_tl
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtl
)
3567 static void do_wrpil(DisasContext
*dc
, TCGv src
)
3569 if (translator_io_start(&dc
->base
)) {
3570 dc
->base
.is_jmp
= DISAS_EXIT
;
3572 gen_helper_wrpil(tcg_env
, src
);
3575 TRANS(WRPR_pil
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpil
)
3577 static void do_wrcwp(DisasContext
*dc
, TCGv src
)
3579 gen_helper_wrcwp(tcg_env
, src
);
3582 TRANS(WRPR_cwp
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcwp
)
3584 static void do_wrcansave(DisasContext
*dc
, TCGv src
)
3586 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cansave
));
3589 TRANS(WRPR_cansave
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcansave
)
3591 static void do_wrcanrestore(DisasContext
*dc
, TCGv src
)
3593 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(canrestore
));
3596 TRANS(WRPR_canrestore
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcanrestore
)
3598 static void do_wrcleanwin(DisasContext
*dc
, TCGv src
)
3600 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cleanwin
));
3603 TRANS(WRPR_cleanwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcleanwin
)
3605 static void do_wrotherwin(DisasContext
*dc
, TCGv src
)
3607 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(otherwin
));
3610 TRANS(WRPR_otherwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrotherwin
)
3612 static void do_wrwstate(DisasContext
*dc
, TCGv src
)
3614 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(wstate
));
3617 TRANS(WRPR_wstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrwstate
)
3619 static void do_wrgl(DisasContext
*dc
, TCGv src
)
3621 gen_helper_wrgl(tcg_env
, src
);
3624 TRANS(WRPR_gl
, GL
, do_wr_special
, a
, supervisor(dc
), do_wrgl
)
3626 /* UA2005 strand status */
3627 static void do_wrssr(DisasContext
*dc
, TCGv src
)
3629 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(ssr
));
3632 TRANS(WRPR_strand_status
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrssr
)
3634 TRANS(WRTBR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3636 static void do_wrhpstate(DisasContext
*dc
, TCGv src
)
3638 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hpstate
));
3639 dc
->base
.is_jmp
= DISAS_EXIT
;
3642 TRANS(WRHPR_hpstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhpstate
)
3644 static void do_wrhtstate(DisasContext
*dc
, TCGv src
)
3646 TCGv_i32 tl
= tcg_temp_new_i32();
3647 TCGv_ptr tp
= tcg_temp_new_ptr();
3649 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
3650 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
3651 tcg_gen_shli_i32(tl
, tl
, 3);
3652 tcg_gen_ext_i32_ptr(tp
, tl
);
3653 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
3655 tcg_gen_st_tl(src
, tp
, env64_field_offsetof(htstate
));
3658 TRANS(WRHPR_htstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtstate
)
3660 static void do_wrhintp(DisasContext
*dc
, TCGv src
)
3662 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hintp
));
3665 TRANS(WRHPR_hintp
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhintp
)
3667 static void do_wrhtba(DisasContext
*dc
, TCGv src
)
3669 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(htba
));
3672 TRANS(WRHPR_htba
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtba
)
3674 static void do_wrhstick_cmpr(DisasContext
*dc
, TCGv src
)
3676 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3678 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
3679 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(hstick
));
3680 translator_io_start(&dc
->base
);
3681 gen_helper_tick_set_limit(r_tickptr
, src
);
3682 /* End TB to handle timer interrupt */
3683 dc
->base
.is_jmp
= DISAS_EXIT
;
3686 TRANS(WRHPR_hstick_cmpr
, HYPV
, do_wr_special
, a
, hypervisor(dc
),
3689 static bool do_saved_restored(DisasContext
*dc
, bool saved
)
3691 if (!supervisor(dc
)) {
3692 return raise_priv(dc
);
3695 gen_helper_saved(tcg_env
);
3697 gen_helper_restored(tcg_env
);
3699 return advance_pc(dc
);
3702 TRANS(SAVED
, 64, do_saved_restored
, true)
3703 TRANS(RESTORED
, 64, do_saved_restored
, false)
3705 static bool trans_NOP(DisasContext
*dc
, arg_NOP
*a
)
3707 return advance_pc(dc
);
3711 * TODO: Need a feature bit for sparcv8.
3712 * In the meantime, treat all 32-bit cpus like sparcv7.
3714 TRANS(NOP_v7
, 32, trans_NOP
, a
)
3715 TRANS(NOP_v9
, 64, trans_NOP
, a
)
3717 static bool do_arith_int(DisasContext
*dc
, arg_r_r_ri_cc
*a
, int cc_op
,
3718 void (*func
)(TCGv
, TCGv
, TCGv
),
3719 void (*funci
)(TCGv
, TCGv
, target_long
))
3723 /* For simplicity, we under-decoded the rs2 form. */
3724 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3731 dst
= gen_dest_gpr(dc
, a
->rd
);
3733 src1
= gen_load_gpr(dc
, a
->rs1
);
3735 if (a
->imm
|| a
->rs2_or_imm
== 0) {
3737 funci(dst
, src1
, a
->rs2_or_imm
);
3739 func(dst
, src1
, tcg_constant_tl(a
->rs2_or_imm
));
3742 func(dst
, src1
, cpu_regs
[a
->rs2_or_imm
]);
3744 gen_store_gpr(dc
, a
->rd
, dst
);
3747 tcg_gen_movi_i32(cpu_cc_op
, cc_op
);
3750 return advance_pc(dc
);
3753 static bool do_arith(DisasContext
*dc
, arg_r_r_ri_cc
*a
, int cc_op
,
3754 void (*func
)(TCGv
, TCGv
, TCGv
),
3755 void (*funci
)(TCGv
, TCGv
, target_long
),
3756 void (*func_cc
)(TCGv
, TCGv
, TCGv
))
3760 return do_arith_int(dc
, a
, cc_op
, func_cc
, NULL
);
3762 return do_arith_int(dc
, a
, cc_op
, func
, funci
);
3765 static bool do_logic(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
3766 void (*func
)(TCGv
, TCGv
, TCGv
),
3767 void (*funci
)(TCGv
, TCGv
, target_long
))
3769 return do_arith_int(dc
, a
, CC_OP_LOGIC
, func
, funci
);
3772 TRANS(ADD
, ALL
, do_arith
, a
, CC_OP_ADD
,
3773 tcg_gen_add_tl
, tcg_gen_addi_tl
, gen_op_add_cc
)
3774 TRANS(SUB
, ALL
, do_arith
, a
, CC_OP_SUB
,
3775 tcg_gen_sub_tl
, tcg_gen_subi_tl
, gen_op_sub_cc
)
3777 TRANS(TADDcc
, ALL
, do_arith
, a
, CC_OP_TADD
, NULL
, NULL
, gen_op_add_cc
)
3778 TRANS(TSUBcc
, ALL
, do_arith
, a
, CC_OP_TSUB
, NULL
, NULL
, gen_op_sub_cc
)
3779 TRANS(TADDccTV
, ALL
, do_arith
, a
, CC_OP_TADDTV
, NULL
, NULL
, gen_op_taddcctv
)
3780 TRANS(TSUBccTV
, ALL
, do_arith
, a
, CC_OP_TSUBTV
, NULL
, NULL
, gen_op_tsubcctv
)
3782 TRANS(AND
, ALL
, do_logic
, a
, tcg_gen_and_tl
, tcg_gen_andi_tl
)
3783 TRANS(XOR
, ALL
, do_logic
, a
, tcg_gen_xor_tl
, tcg_gen_xori_tl
)
3784 TRANS(ANDN
, ALL
, do_logic
, a
, tcg_gen_andc_tl
, NULL
)
3785 TRANS(ORN
, ALL
, do_logic
, a
, tcg_gen_orc_tl
, NULL
)
3786 TRANS(XORN
, ALL
, do_logic
, a
, tcg_gen_eqv_tl
, NULL
)
3788 TRANS(MULX
, 64, do_arith
, a
, -1, tcg_gen_mul_tl
, tcg_gen_muli_tl
, NULL
)
3789 TRANS(UMUL
, MUL
, do_logic
, a
, gen_op_umul
, NULL
)
3790 TRANS(SMUL
, MUL
, do_logic
, a
, gen_op_smul
, NULL
)
3792 TRANS(UDIVX
, 64, do_arith
, a
, -1, gen_op_udivx
, NULL
, NULL
)
3793 TRANS(SDIVX
, 64, do_arith
, a
, -1, gen_op_sdivx
, NULL
, NULL
)
3794 TRANS(UDIV
, DIV
, do_arith
, a
, CC_OP_DIV
, gen_op_udiv
, NULL
, gen_op_udivcc
)
3795 TRANS(SDIV
, DIV
, do_arith
, a
, CC_OP_DIV
, gen_op_sdiv
, NULL
, gen_op_sdivcc
)
3797 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3798 TRANS(POPC
, 64, do_arith
, a
, -1, gen_op_popc
, NULL
, NULL
)
3800 static bool trans_OR(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
3802 /* OR with %g0 is the canonical alias for MOV. */
3803 if (!a
->cc
&& a
->rs1
== 0) {
3804 if (a
->imm
|| a
->rs2_or_imm
== 0) {
3805 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl(a
->rs2_or_imm
));
3806 } else if (a
->rs2_or_imm
& ~0x1f) {
3807 /* For simplicity, we under-decoded the rs2 form. */
3810 gen_store_gpr(dc
, a
->rd
, cpu_regs
[a
->rs2_or_imm
]);
3812 return advance_pc(dc
);
3814 return do_logic(dc
, a
, tcg_gen_or_tl
, tcg_gen_ori_tl
);
3817 static bool trans_ADDC(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
3819 switch (dc
->cc_op
) {
3822 /* Carry is known to be zero. Fall back to plain ADD. */
3823 return do_arith(dc
, a
, CC_OP_ADD
,
3824 tcg_gen_add_tl
, tcg_gen_addi_tl
, gen_op_add_cc
);
3828 return do_arith(dc
, a
, CC_OP_ADDX
,
3829 gen_op_addc_add
, NULL
, gen_op_addccc_add
);
3833 return do_arith(dc
, a
, CC_OP_ADDX
,
3834 gen_op_addc_sub
, NULL
, gen_op_addccc_sub
);
3836 return do_arith(dc
, a
, CC_OP_ADDX
,
3837 gen_op_addc_generic
, NULL
, gen_op_addccc_generic
);
3841 static bool trans_SUBC(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
3843 switch (dc
->cc_op
) {
3846 /* Carry is known to be zero. Fall back to plain SUB. */
3847 return do_arith(dc
, a
, CC_OP_SUB
,
3848 tcg_gen_sub_tl
, tcg_gen_subi_tl
, gen_op_sub_cc
);
3852 return do_arith(dc
, a
, CC_OP_SUBX
,
3853 gen_op_subc_add
, NULL
, gen_op_subccc_add
);
3857 return do_arith(dc
, a
, CC_OP_SUBX
,
3858 gen_op_subc_sub
, NULL
, gen_op_subccc_sub
);
3860 return do_arith(dc
, a
, CC_OP_SUBX
,
3861 gen_op_subc_generic
, NULL
, gen_op_subccc_generic
);
3865 static bool trans_MULScc(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
3868 return do_arith(dc
, a
, CC_OP_ADD
, NULL
, NULL
, gen_op_mulscc
);
3871 static bool gen_edge(DisasContext
*dc
, arg_r_r_r
*a
,
3872 int width
, bool cc
, bool left
)
3874 TCGv dst
, s1
, s2
, lo1
, lo2
;
3875 uint64_t amask
, tabl
, tabr
;
3876 int shift
, imask
, omask
;
3878 dst
= gen_dest_gpr(dc
, a
->rd
);
3879 s1
= gen_load_gpr(dc
, a
->rs1
);
3880 s2
= gen_load_gpr(dc
, a
->rs2
);
3883 tcg_gen_mov_tl(cpu_cc_src
, s1
);
3884 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
3885 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
3886 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3887 dc
->cc_op
= CC_OP_SUB
;
3891 * Theory of operation: there are two tables, left and right (not to
3892 * be confused with the left and right versions of the opcode). These
3893 * are indexed by the low 3 bits of the inputs. To make things "easy",
3894 * these tables are loaded into two constants, TABL and TABR below.
3895 * The operation index = (input & imask) << shift calculates the index
3896 * into the constant, while val = (table >> index) & omask calculates
3897 * the value we're looking for.
3905 tabl
= 0x80c0e0f0f8fcfeffULL
;
3906 tabr
= 0xff7f3f1f0f070301ULL
;
3908 tabl
= 0x0103070f1f3f7fffULL
;
3909 tabr
= 0xfffefcf8f0e0c080ULL
;
3929 tabl
= (2 << 2) | 3;
3930 tabr
= (3 << 2) | 1;
3932 tabl
= (1 << 2) | 3;
3933 tabr
= (3 << 2) | 2;
3940 lo1
= tcg_temp_new();
3941 lo2
= tcg_temp_new();
3942 tcg_gen_andi_tl(lo1
, s1
, imask
);
3943 tcg_gen_andi_tl(lo2
, s2
, imask
);
3944 tcg_gen_shli_tl(lo1
, lo1
, shift
);
3945 tcg_gen_shli_tl(lo2
, lo2
, shift
);
3947 tcg_gen_shr_tl(lo1
, tcg_constant_tl(tabl
), lo1
);
3948 tcg_gen_shr_tl(lo2
, tcg_constant_tl(tabr
), lo2
);
3949 tcg_gen_andi_tl(lo1
, lo1
, omask
);
3950 tcg_gen_andi_tl(lo2
, lo2
, omask
);
3952 amask
= address_mask_i(dc
, -8);
3953 tcg_gen_andi_tl(s1
, s1
, amask
);
3954 tcg_gen_andi_tl(s2
, s2
, amask
);
3956 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3957 tcg_gen_and_tl(lo2
, lo2
, lo1
);
3958 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, s1
, s2
, lo1
, lo2
);
3960 gen_store_gpr(dc
, a
->rd
, dst
);
3961 return advance_pc(dc
);
3964 TRANS(EDGE8cc
, VIS1
, gen_edge
, a
, 8, 1, 0)
3965 TRANS(EDGE8Lcc
, VIS1
, gen_edge
, a
, 8, 1, 1)
3966 TRANS(EDGE16cc
, VIS1
, gen_edge
, a
, 16, 1, 0)
3967 TRANS(EDGE16Lcc
, VIS1
, gen_edge
, a
, 16, 1, 1)
3968 TRANS(EDGE32cc
, VIS1
, gen_edge
, a
, 32, 1, 0)
3969 TRANS(EDGE32Lcc
, VIS1
, gen_edge
, a
, 32, 1, 1)
3971 TRANS(EDGE8N
, VIS2
, gen_edge
, a
, 8, 0, 0)
3972 TRANS(EDGE8LN
, VIS2
, gen_edge
, a
, 8, 0, 1)
3973 TRANS(EDGE16N
, VIS2
, gen_edge
, a
, 16, 0, 0)
3974 TRANS(EDGE16LN
, VIS2
, gen_edge
, a
, 16, 0, 1)
3975 TRANS(EDGE32N
, VIS2
, gen_edge
, a
, 32, 0, 0)
3976 TRANS(EDGE32LN
, VIS2
, gen_edge
, a
, 32, 0, 1)
3978 static bool do_rrr(DisasContext
*dc
, arg_r_r_r
*a
,
3979 void (*func
)(TCGv
, TCGv
, TCGv
))
3981 TCGv dst
= gen_dest_gpr(dc
, a
->rd
);
3982 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
3983 TCGv src2
= gen_load_gpr(dc
, a
->rs2
);
3985 func(dst
, src1
, src2
);
3986 gen_store_gpr(dc
, a
->rd
, dst
);
3987 return advance_pc(dc
);
3990 TRANS(ARRAY8
, VIS1
, do_rrr
, a
, gen_helper_array8
)
3991 TRANS(ARRAY16
, VIS1
, do_rrr
, a
, gen_op_array16
)
3992 TRANS(ARRAY32
, VIS1
, do_rrr
, a
, gen_op_array32
)
3994 static void gen_op_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
)
3996 #ifdef TARGET_SPARC64
3997 TCGv tmp
= tcg_temp_new();
3999 tcg_gen_add_tl(tmp
, s1
, s2
);
4000 tcg_gen_andi_tl(dst
, tmp
, -8);
4001 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
4003 g_assert_not_reached();
4007 static void gen_op_alignaddrl(TCGv dst
, TCGv s1
, TCGv s2
)
4009 #ifdef TARGET_SPARC64
4010 TCGv tmp
= tcg_temp_new();
4012 tcg_gen_add_tl(tmp
, s1
, s2
);
4013 tcg_gen_andi_tl(dst
, tmp
, -8);
4014 tcg_gen_neg_tl(tmp
, tmp
);
4015 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
4017 g_assert_not_reached();
4021 TRANS(ALIGNADDR
, VIS1
, do_rrr
, a
, gen_op_alignaddr
)
4022 TRANS(ALIGNADDRL
, VIS1
, do_rrr
, a
, gen_op_alignaddrl
)
4024 static void gen_op_bmask(TCGv dst
, TCGv s1
, TCGv s2
)
4026 #ifdef TARGET_SPARC64
4027 tcg_gen_add_tl(dst
, s1
, s2
);
4028 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, dst
, 32, 32);
4030 g_assert_not_reached();
4034 TRANS(BMASK
, VIS2
, do_rrr
, a
, gen_op_bmask
)
4036 static bool do_shift_r(DisasContext
*dc
, arg_shiftr
*a
, bool l
, bool u
)
4038 TCGv dst
, src1
, src2
;
4040 /* Reject 64-bit shifts for sparc32. */
4041 if (avail_32(dc
) && a
->x
) {
4045 src2
= tcg_temp_new();
4046 tcg_gen_andi_tl(src2
, gen_load_gpr(dc
, a
->rs2
), a
->x
? 63 : 31);
4047 src1
= gen_load_gpr(dc
, a
->rs1
);
4048 dst
= gen_dest_gpr(dc
, a
->rd
);
4051 tcg_gen_shl_tl(dst
, src1
, src2
);
4053 tcg_gen_ext32u_tl(dst
, dst
);
4057 tcg_gen_ext32u_tl(dst
, src1
);
4060 tcg_gen_shr_tl(dst
, src1
, src2
);
4063 tcg_gen_ext32s_tl(dst
, src1
);
4066 tcg_gen_sar_tl(dst
, src1
, src2
);
4068 gen_store_gpr(dc
, a
->rd
, dst
);
4069 return advance_pc(dc
);
4072 TRANS(SLL_r
, ALL
, do_shift_r
, a
, true, true)
4073 TRANS(SRL_r
, ALL
, do_shift_r
, a
, false, true)
4074 TRANS(SRA_r
, ALL
, do_shift_r
, a
, false, false)
4076 static bool do_shift_i(DisasContext
*dc
, arg_shifti
*a
, bool l
, bool u
)
4080 /* Reject 64-bit shifts for sparc32. */
4081 if (avail_32(dc
) && (a
->x
|| a
->i
>= 32)) {
4085 src1
= gen_load_gpr(dc
, a
->rs1
);
4086 dst
= gen_dest_gpr(dc
, a
->rd
);
4088 if (avail_32(dc
) || a
->x
) {
4090 tcg_gen_shli_tl(dst
, src1
, a
->i
);
4092 tcg_gen_shri_tl(dst
, src1
, a
->i
);
4094 tcg_gen_sari_tl(dst
, src1
, a
->i
);
4098 tcg_gen_deposit_z_tl(dst
, src1
, a
->i
, 32 - a
->i
);
4100 tcg_gen_extract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
4102 tcg_gen_sextract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
4105 gen_store_gpr(dc
, a
->rd
, dst
);
4106 return advance_pc(dc
);
4109 TRANS(SLL_i
, ALL
, do_shift_i
, a
, true, true)
4110 TRANS(SRL_i
, ALL
, do_shift_i
, a
, false, true)
4111 TRANS(SRA_i
, ALL
, do_shift_i
, a
, false, false)
4113 static TCGv
gen_rs2_or_imm(DisasContext
*dc
, bool imm
, int rs2_or_imm
)
4115 /* For simplicity, we under-decoded the rs2 form. */
4116 if (!imm
&& rs2_or_imm
& ~0x1f) {
4119 if (imm
|| rs2_or_imm
== 0) {
4120 return tcg_constant_tl(rs2_or_imm
);
4122 return cpu_regs
[rs2_or_imm
];
4126 static bool do_mov_cond(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, TCGv src2
)
4128 TCGv dst
= gen_load_gpr(dc
, rd
);
4130 tcg_gen_movcond_tl(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
, src2
, dst
);
4131 gen_store_gpr(dc
, rd
, dst
);
4132 return advance_pc(dc
);
4135 static bool trans_MOVcc(DisasContext
*dc
, arg_MOVcc
*a
)
4137 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4143 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
4144 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4147 static bool trans_MOVfcc(DisasContext
*dc
, arg_MOVfcc
*a
)
4149 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4155 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
4156 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4159 static bool trans_MOVR(DisasContext
*dc
, arg_MOVR
*a
)
4161 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4167 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
4168 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4171 static bool do_add_special(DisasContext
*dc
, arg_r_r_ri
*a
,
4172 bool (*func
)(DisasContext
*dc
, int rd
, TCGv src
))
4176 /* For simplicity, we under-decoded the rs2 form. */
4177 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
4182 * Always load the sum into a new temporary.
4183 * This is required to capture the value across a window change,
4184 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4186 sum
= tcg_temp_new();
4187 src1
= gen_load_gpr(dc
, a
->rs1
);
4188 if (a
->imm
|| a
->rs2_or_imm
== 0) {
4189 tcg_gen_addi_tl(sum
, src1
, a
->rs2_or_imm
);
4191 tcg_gen_add_tl(sum
, src1
, cpu_regs
[a
->rs2_or_imm
]);
4193 return func(dc
, a
->rd
, sum
);
4196 static bool do_jmpl(DisasContext
*dc
, int rd
, TCGv src
)
4199 * Preserve pc across advance, so that we can delay
4200 * the writeback to rd until after src is consumed.
4202 target_ulong cur_pc
= dc
->pc
;
4204 gen_check_align(dc
, src
, 3);
4207 tcg_gen_mov_tl(cpu_npc
, src
);
4208 gen_address_mask(dc
, cpu_npc
);
4209 gen_store_gpr(dc
, rd
, tcg_constant_tl(cur_pc
));
4211 dc
->npc
= DYNAMIC_PC_LOOKUP
;
4215 TRANS(JMPL
, ALL
, do_add_special
, a
, do_jmpl
)
4217 static bool do_rett(DisasContext
*dc
, int rd
, TCGv src
)
4219 if (!supervisor(dc
)) {
4220 return raise_priv(dc
);
4223 gen_check_align(dc
, src
, 3);
4226 tcg_gen_mov_tl(cpu_npc
, src
);
4227 gen_helper_rett(tcg_env
);
4229 dc
->npc
= DYNAMIC_PC
;
4233 TRANS(RETT
, 32, do_add_special
, a
, do_rett
)
4235 static bool do_return(DisasContext
*dc
, int rd
, TCGv src
)
4237 gen_check_align(dc
, src
, 3);
4240 tcg_gen_mov_tl(cpu_npc
, src
);
4241 gen_address_mask(dc
, cpu_npc
);
4243 gen_helper_restore(tcg_env
);
4244 dc
->npc
= DYNAMIC_PC_LOOKUP
;
4248 TRANS(RETURN
, 64, do_add_special
, a
, do_return
)
4250 static bool do_save(DisasContext
*dc
, int rd
, TCGv src
)
4252 gen_helper_save(tcg_env
);
4253 gen_store_gpr(dc
, rd
, src
);
4254 return advance_pc(dc
);
4257 TRANS(SAVE
, ALL
, do_add_special
, a
, do_save
)
4259 static bool do_restore(DisasContext
*dc
, int rd
, TCGv src
)
4261 gen_helper_restore(tcg_env
);
4262 gen_store_gpr(dc
, rd
, src
);
4263 return advance_pc(dc
);
4266 TRANS(RESTORE
, ALL
, do_add_special
, a
, do_restore
)
4268 static bool do_done_retry(DisasContext
*dc
, bool done
)
4270 if (!supervisor(dc
)) {
4271 return raise_priv(dc
);
4273 dc
->npc
= DYNAMIC_PC
;
4274 dc
->pc
= DYNAMIC_PC
;
4275 translator_io_start(&dc
->base
);
4277 gen_helper_done(tcg_env
);
4279 gen_helper_retry(tcg_env
);
4284 TRANS(DONE
, 64, do_done_retry
, true)
4285 TRANS(RETRY
, 64, do_done_retry
, false)
4288 * Major opcode 11 -- load and store instructions
4291 static TCGv
gen_ldst_addr(DisasContext
*dc
, int rs1
, bool imm
, int rs2_or_imm
)
4293 TCGv addr
, tmp
= NULL
;
4295 /* For simplicity, we under-decoded the rs2 form. */
4296 if (!imm
&& rs2_or_imm
& ~0x1f) {
4300 addr
= gen_load_gpr(dc
, rs1
);
4302 tmp
= tcg_temp_new();
4304 tcg_gen_addi_tl(tmp
, addr
, rs2_or_imm
);
4306 tcg_gen_add_tl(tmp
, addr
, cpu_regs
[rs2_or_imm
]);
4312 tmp
= tcg_temp_new();
4314 tcg_gen_ext32u_tl(tmp
, addr
);
4320 static bool do_ld_gpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4322 TCGv reg
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4328 da
= resolve_asi(dc
, a
->asi
, mop
);
4330 reg
= gen_dest_gpr(dc
, a
->rd
);
4331 gen_ld_asi(dc
, &da
, reg
, addr
);
4332 gen_store_gpr(dc
, a
->rd
, reg
);
4333 return advance_pc(dc
);
4336 TRANS(LDUW
, ALL
, do_ld_gpr
, a
, MO_TEUL
)
4337 TRANS(LDUB
, ALL
, do_ld_gpr
, a
, MO_UB
)
4338 TRANS(LDUH
, ALL
, do_ld_gpr
, a
, MO_TEUW
)
4339 TRANS(LDSB
, ALL
, do_ld_gpr
, a
, MO_SB
)
4340 TRANS(LDSH
, ALL
, do_ld_gpr
, a
, MO_TESW
)
4341 TRANS(LDSW
, 64, do_ld_gpr
, a
, MO_TESL
)
4342 TRANS(LDX
, 64, do_ld_gpr
, a
, MO_TEUQ
)
4344 static bool do_st_gpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4346 TCGv reg
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4352 da
= resolve_asi(dc
, a
->asi
, mop
);
4354 reg
= gen_load_gpr(dc
, a
->rd
);
4355 gen_st_asi(dc
, &da
, reg
, addr
);
4356 return advance_pc(dc
);
4359 TRANS(STW
, ALL
, do_st_gpr
, a
, MO_TEUL
)
4360 TRANS(STB
, ALL
, do_st_gpr
, a
, MO_UB
)
4361 TRANS(STH
, ALL
, do_st_gpr
, a
, MO_TEUW
)
4362 TRANS(STX
, 64, do_st_gpr
, a
, MO_TEUQ
)
4364 static bool trans_LDD(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4372 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4376 da
= resolve_asi(dc
, a
->asi
, MO_TEUQ
);
4377 gen_ldda_asi(dc
, &da
, addr
, a
->rd
);
4378 return advance_pc(dc
);
4381 static bool trans_STD(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4389 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4393 da
= resolve_asi(dc
, a
->asi
, MO_TEUQ
);
4394 gen_stda_asi(dc
, &da
, addr
, a
->rd
);
4395 return advance_pc(dc
);
4398 static bool trans_LDSTUB(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4403 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4407 da
= resolve_asi(dc
, a
->asi
, MO_UB
);
4409 reg
= gen_dest_gpr(dc
, a
->rd
);
4410 gen_ldstub_asi(dc
, &da
, reg
, addr
);
4411 gen_store_gpr(dc
, a
->rd
, reg
);
4412 return advance_pc(dc
);
4415 static bool trans_SWAP(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4417 TCGv addr
, dst
, src
;
4420 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4424 da
= resolve_asi(dc
, a
->asi
, MO_TEUL
);
4426 dst
= gen_dest_gpr(dc
, a
->rd
);
4427 src
= gen_load_gpr(dc
, a
->rd
);
4428 gen_swap_asi(dc
, &da
, dst
, src
, addr
);
4429 gen_store_gpr(dc
, a
->rd
, dst
);
4430 return advance_pc(dc
);
4433 static bool do_casa(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4438 addr
= gen_ldst_addr(dc
, a
->rs1
, true, 0);
4442 da
= resolve_asi(dc
, a
->asi
, mop
);
4444 o
= gen_dest_gpr(dc
, a
->rd
);
4445 n
= gen_load_gpr(dc
, a
->rd
);
4446 c
= gen_load_gpr(dc
, a
->rs2_or_imm
);
4447 gen_cas_asi(dc
, &da
, o
, n
, c
, addr
);
4448 gen_store_gpr(dc
, a
->rd
, o
);
4449 return advance_pc(dc
);
4452 TRANS(CASA
, CASA
, do_casa
, a
, MO_TEUL
)
4453 TRANS(CASXA
, 64, do_casa
, a
, MO_TEUQ
)
4455 static bool do_ld_fpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp sz
)
4457 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4463 if (gen_trap_ifnofpu(dc
)) {
4466 if (sz
== MO_128
&& gen_trap_float128(dc
)) {
4469 da
= resolve_asi(dc
, a
->asi
, MO_TE
| sz
);
4470 gen_ldf_asi(dc
, &da
, sz
, addr
, a
->rd
);
4471 gen_update_fprs_dirty(dc
, a
->rd
);
4472 return advance_pc(dc
);
4475 TRANS(LDF
, ALL
, do_ld_fpr
, a
, MO_32
)
4476 TRANS(LDDF
, ALL
, do_ld_fpr
, a
, MO_64
)
4477 TRANS(LDQF
, ALL
, do_ld_fpr
, a
, MO_128
)
4479 TRANS(LDFA
, 64, do_ld_fpr
, a
, MO_32
)
4480 TRANS(LDDFA
, 64, do_ld_fpr
, a
, MO_64
)
4481 TRANS(LDQFA
, 64, do_ld_fpr
, a
, MO_128
)
4483 static bool do_st_fpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp sz
)
4485 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4491 if (gen_trap_ifnofpu(dc
)) {
4494 if (sz
== MO_128
&& gen_trap_float128(dc
)) {
4497 da
= resolve_asi(dc
, a
->asi
, MO_TE
| sz
);
4498 gen_stf_asi(dc
, &da
, sz
, addr
, a
->rd
);
4499 return advance_pc(dc
);
4502 TRANS(STF
, ALL
, do_st_fpr
, a
, MO_32
)
4503 TRANS(STDF
, ALL
, do_st_fpr
, a
, MO_64
)
4504 TRANS(STQF
, ALL
, do_st_fpr
, a
, MO_128
)
4506 TRANS(STFA
, 64, do_st_fpr
, a
, MO_32
)
4507 TRANS(STDFA
, 64, do_st_fpr
, a
, MO_64
)
4508 TRANS(STQFA
, 64, do_st_fpr
, a
, MO_128
)
4510 static bool trans_STDFQ(DisasContext
*dc
, arg_STDFQ
*a
)
4512 if (!avail_32(dc
)) {
4515 if (!supervisor(dc
)) {
4516 return raise_priv(dc
);
4518 if (gen_trap_ifnofpu(dc
)) {
4521 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
4525 static bool do_ldfsr(DisasContext
*dc
, arg_r_r_ri
*a
, MemOp mop
,
4526 target_ulong new_mask
, target_ulong old_mask
)
4528 TCGv tmp
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4532 if (gen_trap_ifnofpu(dc
)) {
4535 tmp
= tcg_temp_new();
4536 tcg_gen_qemu_ld_tl(tmp
, addr
, dc
->mem_idx
, mop
| MO_ALIGN
);
4537 tcg_gen_andi_tl(tmp
, tmp
, new_mask
);
4538 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, old_mask
);
4539 tcg_gen_or_tl(cpu_fsr
, cpu_fsr
, tmp
);
4540 gen_helper_set_fsr(tcg_env
, cpu_fsr
);
4541 return advance_pc(dc
);
4544 TRANS(LDFSR
, ALL
, do_ldfsr
, a
, MO_TEUL
, FSR_LDFSR_MASK
, FSR_LDFSR_OLDMASK
)
4545 TRANS(LDXFSR
, 64, do_ldfsr
, a
, MO_TEUQ
, FSR_LDXFSR_MASK
, FSR_LDXFSR_OLDMASK
)
4547 static bool do_stfsr(DisasContext
*dc
, arg_r_r_ri
*a
, MemOp mop
)
4549 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4553 if (gen_trap_ifnofpu(dc
)) {
4556 tcg_gen_qemu_st_tl(cpu_fsr
, addr
, dc
->mem_idx
, mop
| MO_ALIGN
);
4557 return advance_pc(dc
);
4560 TRANS(STFSR
, ALL
, do_stfsr
, a
, MO_TEUL
)
4561 TRANS(STXFSR
, 64, do_stfsr
, a
, MO_TEUQ
)
4563 static bool do_ff(DisasContext
*dc
, arg_r_r
*a
,
4564 void (*func
)(TCGv_i32
, TCGv_i32
))
4568 if (gen_trap_ifnofpu(dc
)) {
4572 tmp
= gen_load_fpr_F(dc
, a
->rs
);
4574 gen_store_fpr_F(dc
, a
->rd
, tmp
);
4575 return advance_pc(dc
);
4578 TRANS(FMOVs
, ALL
, do_ff
, a
, gen_op_fmovs
)
4579 TRANS(FNEGs
, ALL
, do_ff
, a
, gen_op_fnegs
)
4580 TRANS(FABSs
, ALL
, do_ff
, a
, gen_op_fabss
)
4581 TRANS(FSRCs
, VIS1
, do_ff
, a
, tcg_gen_mov_i32
)
4582 TRANS(FNOTs
, VIS1
, do_ff
, a
, tcg_gen_not_i32
)
4584 static bool do_env_ff(DisasContext
*dc
, arg_r_r
*a
,
4585 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
))
4589 if (gen_trap_ifnofpu(dc
)) {
4593 gen_op_clear_ieee_excp_and_FTT();
4594 tmp
= gen_load_fpr_F(dc
, a
->rs
);
4595 func(tmp
, tcg_env
, tmp
);
4596 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4597 gen_store_fpr_F(dc
, a
->rd
, tmp
);
4598 return advance_pc(dc
);
4601 TRANS(FSQRTs
, ALL
, do_env_ff
, a
, gen_helper_fsqrts
)
4602 TRANS(FiTOs
, ALL
, do_env_ff
, a
, gen_helper_fitos
)
4603 TRANS(FsTOi
, ALL
, do_env_ff
, a
, gen_helper_fstoi
)
4605 static bool do_env_fd(DisasContext
*dc
, arg_r_r
*a
,
4606 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i64
))
4611 if (gen_trap_ifnofpu(dc
)) {
4615 gen_op_clear_ieee_excp_and_FTT();
4616 dst
= gen_dest_fpr_F(dc
);
4617 src
= gen_load_fpr_D(dc
, a
->rs
);
4618 func(dst
, tcg_env
, src
);
4619 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4620 gen_store_fpr_F(dc
, a
->rd
, dst
);
4621 return advance_pc(dc
);
4624 TRANS(FdTOs
, ALL
, do_env_fd
, a
, gen_helper_fdtos
)
4625 TRANS(FdTOi
, ALL
, do_env_fd
, a
, gen_helper_fdtoi
)
4626 TRANS(FxTOs
, 64, do_env_fd
, a
, gen_helper_fxtos
)
4628 static bool do_dd(DisasContext
*dc
, arg_r_r
*a
,
4629 void (*func
)(TCGv_i64
, TCGv_i64
))
4633 if (gen_trap_ifnofpu(dc
)) {
4637 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4638 src
= gen_load_fpr_D(dc
, a
->rs
);
4640 gen_store_fpr_D(dc
, a
->rd
, dst
);
4641 return advance_pc(dc
);
4644 TRANS(FMOVd
, 64, do_dd
, a
, gen_op_fmovd
)
4645 TRANS(FNEGd
, 64, do_dd
, a
, gen_op_fnegd
)
4646 TRANS(FABSd
, 64, do_dd
, a
, gen_op_fabsd
)
4647 TRANS(FSRCd
, VIS1
, do_dd
, a
, tcg_gen_mov_i64
)
4648 TRANS(FNOTd
, VIS1
, do_dd
, a
, tcg_gen_not_i64
)
4650 static bool do_env_dd(DisasContext
*dc
, arg_r_r
*a
,
4651 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
4655 if (gen_trap_ifnofpu(dc
)) {
4659 gen_op_clear_ieee_excp_and_FTT();
4660 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4661 src
= gen_load_fpr_D(dc
, a
->rs
);
4662 func(dst
, tcg_env
, src
);
4663 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4664 gen_store_fpr_D(dc
, a
->rd
, dst
);
4665 return advance_pc(dc
);
4668 TRANS(FSQRTd
, ALL
, do_env_dd
, a
, gen_helper_fsqrtd
)
4669 TRANS(FxTOd
, 64, do_env_dd
, a
, gen_helper_fxtod
)
4670 TRANS(FdTOx
, 64, do_env_dd
, a
, gen_helper_fdtox
)
4672 static bool do_env_df(DisasContext
*dc
, arg_r_r
*a
,
4673 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i32
))
4678 if (gen_trap_ifnofpu(dc
)) {
4682 gen_op_clear_ieee_excp_and_FTT();
4683 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4684 src
= gen_load_fpr_F(dc
, a
->rs
);
4685 func(dst
, tcg_env
, src
);
4686 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4687 gen_store_fpr_D(dc
, a
->rd
, dst
);
4688 return advance_pc(dc
);
4691 TRANS(FiTOd
, ALL
, do_env_df
, a
, gen_helper_fitod
)
4692 TRANS(FsTOd
, ALL
, do_env_df
, a
, gen_helper_fstod
)
4693 TRANS(FsTOx
, 64, do_env_df
, a
, gen_helper_fstox
)
4695 static bool trans_FMOVq(DisasContext
*dc
, arg_FMOVq
*a
)
4699 if (!avail_64(dc
)) {
4702 if (gen_trap_ifnofpu(dc
)) {
4705 if (gen_trap_float128(dc
)) {
4709 gen_op_clear_ieee_excp_and_FTT();
4712 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
4713 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
4714 gen_update_fprs_dirty(dc
, rd
);
4715 return advance_pc(dc
);
4718 static bool do_qq(DisasContext
*dc
, arg_r_r
*a
,
4719 void (*func
)(TCGv_env
))
4721 if (gen_trap_ifnofpu(dc
)) {
4724 if (gen_trap_float128(dc
)) {
4728 gen_op_clear_ieee_excp_and_FTT();
4729 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4731 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4732 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4733 return advance_pc(dc
);
4736 TRANS(FNEGq
, 64, do_qq
, a
, gen_helper_fnegq
)
4737 TRANS(FABSq
, 64, do_qq
, a
, gen_helper_fabsq
)
4739 static bool do_env_qq(DisasContext
*dc
, arg_r_r
*a
,
4740 void (*func
)(TCGv_env
))
4742 if (gen_trap_ifnofpu(dc
)) {
4745 if (gen_trap_float128(dc
)) {
4749 gen_op_clear_ieee_excp_and_FTT();
4750 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4752 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4753 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4754 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4755 return advance_pc(dc
);
4758 TRANS(FSQRTq
, ALL
, do_env_qq
, a
, gen_helper_fsqrtq
)
4760 static bool do_env_fq(DisasContext
*dc
, arg_r_r
*a
,
4761 void (*func
)(TCGv_i32
, TCGv_env
))
4765 if (gen_trap_ifnofpu(dc
)) {
4768 if (gen_trap_float128(dc
)) {
4772 gen_op_clear_ieee_excp_and_FTT();
4773 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4774 dst
= gen_dest_fpr_F(dc
);
4776 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4777 gen_store_fpr_F(dc
, a
->rd
, dst
);
4778 return advance_pc(dc
);
4781 TRANS(FqTOs
, ALL
, do_env_fq
, a
, gen_helper_fqtos
)
4782 TRANS(FqTOi
, ALL
, do_env_fq
, a
, gen_helper_fqtoi
)
4784 static bool do_env_dq(DisasContext
*dc
, arg_r_r
*a
,
4785 void (*func
)(TCGv_i64
, TCGv_env
))
4789 if (gen_trap_ifnofpu(dc
)) {
4792 if (gen_trap_float128(dc
)) {
4796 gen_op_clear_ieee_excp_and_FTT();
4797 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4798 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4800 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4801 gen_store_fpr_D(dc
, a
->rd
, dst
);
4802 return advance_pc(dc
);
4805 TRANS(FqTOd
, ALL
, do_env_dq
, a
, gen_helper_fqtod
)
4806 TRANS(FqTOx
, 64, do_env_dq
, a
, gen_helper_fqtox
)
4808 static bool do_env_qf(DisasContext
*dc
, arg_r_r
*a
,
4809 void (*func
)(TCGv_env
, TCGv_i32
))
4813 if (gen_trap_ifnofpu(dc
)) {
4816 if (gen_trap_float128(dc
)) {
4820 gen_op_clear_ieee_excp_and_FTT();
4821 src
= gen_load_fpr_F(dc
, a
->rs
);
4823 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4824 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4825 return advance_pc(dc
);
4828 TRANS(FiTOq
, ALL
, do_env_qf
, a
, gen_helper_fitoq
)
4829 TRANS(FsTOq
, ALL
, do_env_qf
, a
, gen_helper_fstoq
)
4831 static bool do_env_qd(DisasContext
*dc
, arg_r_r
*a
,
4832 void (*func
)(TCGv_env
, TCGv_i64
))
4836 if (gen_trap_ifnofpu(dc
)) {
4839 if (gen_trap_float128(dc
)) {
4843 gen_op_clear_ieee_excp_and_FTT();
4844 src
= gen_load_fpr_D(dc
, a
->rs
);
4846 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4847 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4848 return advance_pc(dc
);
4851 TRANS(FdTOq
, ALL
, do_env_qd
, a
, gen_helper_fdtoq
)
4852 TRANS(FxTOq
, 64, do_env_qd
, a
, gen_helper_fxtoq
)
4854 static bool do_fff(DisasContext
*dc
, arg_r_r_r
*a
,
4855 void (*func
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
4857 TCGv_i32 src1
, src2
;
4859 if (gen_trap_ifnofpu(dc
)) {
4863 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4864 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4865 func(src1
, src1
, src2
);
4866 gen_store_fpr_F(dc
, a
->rd
, src1
);
4867 return advance_pc(dc
);
4870 TRANS(FPADD16s
, VIS1
, do_fff
, a
, tcg_gen_vec_add16_i32
)
4871 TRANS(FPADD32s
, VIS1
, do_fff
, a
, tcg_gen_add_i32
)
4872 TRANS(FPSUB16s
, VIS1
, do_fff
, a
, tcg_gen_vec_sub16_i32
)
4873 TRANS(FPSUB32s
, VIS1
, do_fff
, a
, tcg_gen_sub_i32
)
4874 TRANS(FNORs
, VIS1
, do_fff
, a
, tcg_gen_nor_i32
)
4875 TRANS(FANDNOTs
, VIS1
, do_fff
, a
, tcg_gen_andc_i32
)
4876 TRANS(FXORs
, VIS1
, do_fff
, a
, tcg_gen_xor_i32
)
4877 TRANS(FNANDs
, VIS1
, do_fff
, a
, tcg_gen_nand_i32
)
4878 TRANS(FANDs
, VIS1
, do_fff
, a
, tcg_gen_and_i32
)
4879 TRANS(FXNORs
, VIS1
, do_fff
, a
, tcg_gen_eqv_i32
)
4880 TRANS(FORNOTs
, VIS1
, do_fff
, a
, tcg_gen_orc_i32
)
4881 TRANS(FORs
, VIS1
, do_fff
, a
, tcg_gen_or_i32
)
4883 static bool do_env_fff(DisasContext
*dc
, arg_r_r_r
*a
,
4884 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
4886 TCGv_i32 src1
, src2
;
4888 if (gen_trap_ifnofpu(dc
)) {
4892 gen_op_clear_ieee_excp_and_FTT();
4893 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4894 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4895 func(src1
, tcg_env
, src1
, src2
);
4896 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4897 gen_store_fpr_F(dc
, a
->rd
, src1
);
4898 return advance_pc(dc
);
4901 TRANS(FADDs
, ALL
, do_env_fff
, a
, gen_helper_fadds
)
4902 TRANS(FSUBs
, ALL
, do_env_fff
, a
, gen_helper_fsubs
)
4903 TRANS(FMULs
, ALL
, do_env_fff
, a
, gen_helper_fmuls
)
4904 TRANS(FDIVs
, ALL
, do_env_fff
, a
, gen_helper_fdivs
)
4906 static bool do_ddd(DisasContext
*dc
, arg_r_r_r
*a
,
4907 void (*func
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
4909 TCGv_i64 dst
, src1
, src2
;
4911 if (gen_trap_ifnofpu(dc
)) {
4915 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4916 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4917 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4918 func(dst
, src1
, src2
);
4919 gen_store_fpr_D(dc
, a
->rd
, dst
);
4920 return advance_pc(dc
);
4923 TRANS(FMUL8x16
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16
)
4924 TRANS(FMUL8x16AU
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16au
)
4925 TRANS(FMUL8x16AL
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16al
)
4926 TRANS(FMUL8SUx16
, VIS1
, do_ddd
, a
, gen_helper_fmul8sux16
)
4927 TRANS(FMUL8ULx16
, VIS1
, do_ddd
, a
, gen_helper_fmul8ulx16
)
4928 TRANS(FMULD8SUx16
, VIS1
, do_ddd
, a
, gen_helper_fmuld8sux16
)
4929 TRANS(FMULD8ULx16
, VIS1
, do_ddd
, a
, gen_helper_fmuld8ulx16
)
4930 TRANS(FPMERGE
, VIS1
, do_ddd
, a
, gen_helper_fpmerge
)
4931 TRANS(FEXPAND
, VIS1
, do_ddd
, a
, gen_helper_fexpand
)
4933 TRANS(FPADD16
, VIS1
, do_ddd
, a
, tcg_gen_vec_add16_i64
)
4934 TRANS(FPADD32
, VIS1
, do_ddd
, a
, tcg_gen_vec_add32_i64
)
4935 TRANS(FPSUB16
, VIS1
, do_ddd
, a
, tcg_gen_vec_sub16_i64
)
4936 TRANS(FPSUB32
, VIS1
, do_ddd
, a
, tcg_gen_vec_sub32_i64
)
4937 TRANS(FNORd
, VIS1
, do_ddd
, a
, tcg_gen_nor_i64
)
4938 TRANS(FANDNOTd
, VIS1
, do_ddd
, a
, tcg_gen_andc_i64
)
4939 TRANS(FXORd
, VIS1
, do_ddd
, a
, tcg_gen_xor_i64
)
4940 TRANS(FNANDd
, VIS1
, do_ddd
, a
, tcg_gen_nand_i64
)
4941 TRANS(FANDd
, VIS1
, do_ddd
, a
, tcg_gen_and_i64
)
4942 TRANS(FXNORd
, VIS1
, do_ddd
, a
, tcg_gen_eqv_i64
)
4943 TRANS(FORNOTd
, VIS1
, do_ddd
, a
, tcg_gen_orc_i64
)
4944 TRANS(FORd
, VIS1
, do_ddd
, a
, tcg_gen_or_i64
)
4946 TRANS(FPACK32
, VIS1
, do_ddd
, a
, gen_op_fpack32
)
4947 TRANS(FALIGNDATAg
, VIS1
, do_ddd
, a
, gen_op_faligndata
)
4948 TRANS(BSHUFFLE
, VIS2
, do_ddd
, a
, gen_op_bshuffle
)
4950 static bool do_env_ddd(DisasContext
*dc
, arg_r_r_r
*a
,
4951 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
))
4953 TCGv_i64 dst
, src1
, src2
;
4955 if (gen_trap_ifnofpu(dc
)) {
4959 gen_op_clear_ieee_excp_and_FTT();
4960 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4961 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4962 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4963 func(dst
, tcg_env
, src1
, src2
);
4964 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4965 gen_store_fpr_D(dc
, a
->rd
, dst
);
4966 return advance_pc(dc
);
4969 TRANS(FADDd
, ALL
, do_env_ddd
, a
, gen_helper_faddd
)
4970 TRANS(FSUBd
, ALL
, do_env_ddd
, a
, gen_helper_fsubd
)
4971 TRANS(FMULd
, ALL
, do_env_ddd
, a
, gen_helper_fmuld
)
4972 TRANS(FDIVd
, ALL
, do_env_ddd
, a
, gen_helper_fdivd
)
4974 static bool trans_FsMULd(DisasContext
*dc
, arg_r_r_r
*a
)
4977 TCGv_i32 src1
, src2
;
4979 if (gen_trap_ifnofpu(dc
)) {
4982 if (!(dc
->def
->features
& CPU_FEATURE_FSMULD
)) {
4983 return raise_unimpfpop(dc
);
4986 gen_op_clear_ieee_excp_and_FTT();
4987 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4988 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4989 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4990 gen_helper_fsmuld(dst
, tcg_env
, src1
, src2
);
4991 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4992 gen_store_fpr_D(dc
, a
->rd
, dst
);
4993 return advance_pc(dc
);
4996 static bool do_dddd(DisasContext
*dc
, arg_r_r_r
*a
,
4997 void (*func
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
4999 TCGv_i64 dst
, src0
, src1
, src2
;
5001 if (gen_trap_ifnofpu(dc
)) {
5005 dst
= gen_dest_fpr_D(dc
, a
->rd
);
5006 src0
= gen_load_fpr_D(dc
, a
->rd
);
5007 src1
= gen_load_fpr_D(dc
, a
->rs1
);
5008 src2
= gen_load_fpr_D(dc
, a
->rs2
);
5009 func(dst
, src0
, src1
, src2
);
5010 gen_store_fpr_D(dc
, a
->rd
, dst
);
5011 return advance_pc(dc
);
5014 TRANS(PDIST
, VIS1
, do_dddd
, a
, gen_helper_pdist
)
5016 static bool do_env_qqq(DisasContext
*dc
, arg_r_r_r
*a
,
5017 void (*func
)(TCGv_env
))
5019 if (gen_trap_ifnofpu(dc
)) {
5022 if (gen_trap_float128(dc
)) {
5026 gen_op_clear_ieee_excp_and_FTT();
5027 gen_op_load_fpr_QT0(QFPREG(a
->rs1
));
5028 gen_op_load_fpr_QT1(QFPREG(a
->rs2
));
5030 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
5031 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
5032 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
5033 return advance_pc(dc
);
5036 TRANS(FADDq
, ALL
, do_env_qqq
, a
, gen_helper_faddq
)
5037 TRANS(FSUBq
, ALL
, do_env_qqq
, a
, gen_helper_fsubq
)
5038 TRANS(FMULq
, ALL
, do_env_qqq
, a
, gen_helper_fmulq
)
5039 TRANS(FDIVq
, ALL
, do_env_qqq
, a
, gen_helper_fdivq
)
5041 static bool trans_FdMULq(DisasContext
*dc
, arg_r_r_r
*a
)
5043 TCGv_i64 src1
, src2
;
5045 if (gen_trap_ifnofpu(dc
)) {
5048 if (gen_trap_float128(dc
)) {
5052 gen_op_clear_ieee_excp_and_FTT();
5053 src1
= gen_load_fpr_D(dc
, a
->rs1
);
5054 src2
= gen_load_fpr_D(dc
, a
->rs2
);
5055 gen_helper_fdmulq(tcg_env
, src1
, src2
);
5056 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
5057 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
5058 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
5059 return advance_pc(dc
);
5062 static bool do_fmovr(DisasContext
*dc
, arg_FMOVRs
*a
, bool is_128
,
5063 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
5067 if (gen_trap_ifnofpu(dc
)) {
5070 if (is_128
&& gen_trap_float128(dc
)) {
5074 gen_op_clear_ieee_excp_and_FTT();
5075 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
5076 func(dc
, &cmp
, a
->rd
, a
->rs2
);
5077 return advance_pc(dc
);
5080 TRANS(FMOVRs
, 64, do_fmovr
, a
, false, gen_fmovs
)
5081 TRANS(FMOVRd
, 64, do_fmovr
, a
, false, gen_fmovd
)
5082 TRANS(FMOVRq
, 64, do_fmovr
, a
, true, gen_fmovq
)
5084 static bool do_fmovcc(DisasContext
*dc
, arg_FMOVscc
*a
, bool is_128
,
5085 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
5089 if (gen_trap_ifnofpu(dc
)) {
5092 if (is_128
&& gen_trap_float128(dc
)) {
5096 gen_op_clear_ieee_excp_and_FTT();
5097 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
5098 func(dc
, &cmp
, a
->rd
, a
->rs2
);
5099 return advance_pc(dc
);
5102 TRANS(FMOVscc
, 64, do_fmovcc
, a
, false, gen_fmovs
)
5103 TRANS(FMOVdcc
, 64, do_fmovcc
, a
, false, gen_fmovd
)
5104 TRANS(FMOVqcc
, 64, do_fmovcc
, a
, true, gen_fmovq
)
5106 static bool do_fmovfcc(DisasContext
*dc
, arg_FMOVsfcc
*a
, bool is_128
,
5107 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
5111 if (gen_trap_ifnofpu(dc
)) {
5114 if (is_128
&& gen_trap_float128(dc
)) {
5118 gen_op_clear_ieee_excp_and_FTT();
5119 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
5120 func(dc
, &cmp
, a
->rd
, a
->rs2
);
5121 return advance_pc(dc
);
5124 TRANS(FMOVsfcc
, 64, do_fmovfcc
, a
, false, gen_fmovs
)
5125 TRANS(FMOVdfcc
, 64, do_fmovfcc
, a
, false, gen_fmovd
)
5126 TRANS(FMOVqfcc
, 64, do_fmovfcc
, a
, true, gen_fmovq
)
5128 static bool do_fcmps(DisasContext
*dc
, arg_FCMPs
*a
, bool e
)
5130 TCGv_i32 src1
, src2
;
5132 if (avail_32(dc
) && a
->cc
!= 0) {
5135 if (gen_trap_ifnofpu(dc
)) {
5139 gen_op_clear_ieee_excp_and_FTT();
5140 src1
= gen_load_fpr_F(dc
, a
->rs1
);
5141 src2
= gen_load_fpr_F(dc
, a
->rs2
);
5143 gen_op_fcmpes(a
->cc
, src1
, src2
);
5145 gen_op_fcmps(a
->cc
, src1
, src2
);
5147 return advance_pc(dc
);
5150 TRANS(FCMPs
, ALL
, do_fcmps
, a
, false)
5151 TRANS(FCMPEs
, ALL
, do_fcmps
, a
, true)
5153 static bool do_fcmpd(DisasContext
*dc
, arg_FCMPd
*a
, bool e
)
5155 TCGv_i64 src1
, src2
;
5157 if (avail_32(dc
) && a
->cc
!= 0) {
5160 if (gen_trap_ifnofpu(dc
)) {
5164 gen_op_clear_ieee_excp_and_FTT();
5165 src1
= gen_load_fpr_D(dc
, a
->rs1
);
5166 src2
= gen_load_fpr_D(dc
, a
->rs2
);
5168 gen_op_fcmped(a
->cc
, src1
, src2
);
5170 gen_op_fcmpd(a
->cc
, src1
, src2
);
5172 return advance_pc(dc
);
5175 TRANS(FCMPd
, ALL
, do_fcmpd
, a
, false)
5176 TRANS(FCMPEd
, ALL
, do_fcmpd
, a
, true)
5178 static bool do_fcmpq(DisasContext
*dc
, arg_FCMPq
*a
, bool e
)
5180 if (avail_32(dc
) && a
->cc
!= 0) {
5183 if (gen_trap_ifnofpu(dc
)) {
5186 if (gen_trap_float128(dc
)) {
5190 gen_op_clear_ieee_excp_and_FTT();
5191 gen_op_load_fpr_QT0(QFPREG(a
->rs1
));
5192 gen_op_load_fpr_QT1(QFPREG(a
->rs2
));
5194 gen_op_fcmpeq(a
->cc
);
5196 gen_op_fcmpq(a
->cc
);
5198 return advance_pc(dc
);
5201 TRANS(FCMPq
, ALL
, do_fcmpq
, a
, false)
5202 TRANS(FCMPEq
, ALL
, do_fcmpq
, a
, true)
5204 #define CHECK_IU_FEATURE(dc, FEATURE) \
5205 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
5207 #define CHECK_FPU_FEATURE(dc, FEATURE) \
5208 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
5211 /* before an instruction, dc->pc must be static */
5212 static void disas_sparc_legacy(DisasContext
*dc
, unsigned int insn
)
5214 unsigned int opc
= GET_FIELD(insn
, 0, 1);
5218 goto illegal_insn
; /* in decodetree */
5220 g_assert_not_reached(); /* in decodetree */
5221 case 2: /* FPU & Logical Operations */
5223 unsigned int xop
= GET_FIELD(insn
, 7, 12);
5225 if (xop
== 0x34) { /* FPU Operations */
5226 goto illegal_insn
; /* in decodetree */
5227 } else if (xop
== 0x35) { /* FPU Operations */
5228 goto illegal_insn
; /* in decodetree */
5229 } else if (xop
== 0x36) {
5230 #ifdef TARGET_SPARC64
5232 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
5233 TCGv_i32 cpu_dst_32
;
5234 TCGv cpu_dst
= tcg_temp_new();
5235 int opf
= GET_FIELD_SP(insn
, 5, 13);
5236 int rs1
= GET_FIELD(insn
, 13, 17);
5237 int rs2
= GET_FIELD(insn
, 27, 31);
5238 int rd
= GET_FIELD(insn
, 2, 6);
5240 if (gen_trap_ifnofpu(dc
)) {
5245 case 0x000: /* VIS I edge8cc */
5246 case 0x001: /* VIS II edge8n */
5247 case 0x002: /* VIS I edge8lcc */
5248 case 0x003: /* VIS II edge8ln */
5249 case 0x004: /* VIS I edge16cc */
5250 case 0x005: /* VIS II edge16n */
5251 case 0x006: /* VIS I edge16lcc */
5252 case 0x007: /* VIS II edge16ln */
5253 case 0x008: /* VIS I edge32cc */
5254 case 0x009: /* VIS II edge32n */
5255 case 0x00a: /* VIS I edge32lcc */
5256 case 0x00b: /* VIS II edge32ln */
5257 case 0x010: /* VIS I array8 */
5258 case 0x012: /* VIS I array16 */
5259 case 0x014: /* VIS I array32 */
5260 case 0x018: /* VIS I alignaddr */
5261 case 0x01a: /* VIS I alignaddrl */
5262 case 0x019: /* VIS II bmask */
5263 case 0x067: /* VIS I fnot2s */
5264 case 0x06b: /* VIS I fnot1s */
5265 case 0x075: /* VIS I fsrc1s */
5266 case 0x079: /* VIS I fsrc2s */
5267 case 0x066: /* VIS I fnot2 */
5268 case 0x06a: /* VIS I fnot1 */
5269 case 0x074: /* VIS I fsrc1 */
5270 case 0x078: /* VIS I fsrc2 */
5271 case 0x051: /* VIS I fpadd16s */
5272 case 0x053: /* VIS I fpadd32s */
5273 case 0x055: /* VIS I fpsub16s */
5274 case 0x057: /* VIS I fpsub32s */
5275 case 0x063: /* VIS I fnors */
5276 case 0x065: /* VIS I fandnot2s */
5277 case 0x069: /* VIS I fandnot1s */
5278 case 0x06d: /* VIS I fxors */
5279 case 0x06f: /* VIS I fnands */
5280 case 0x071: /* VIS I fands */
5281 case 0x073: /* VIS I fxnors */
5282 case 0x077: /* VIS I fornot2s */
5283 case 0x07b: /* VIS I fornot1s */
5284 case 0x07d: /* VIS I fors */
5285 case 0x050: /* VIS I fpadd16 */
5286 case 0x052: /* VIS I fpadd32 */
5287 case 0x054: /* VIS I fpsub16 */
5288 case 0x056: /* VIS I fpsub32 */
5289 case 0x062: /* VIS I fnor */
5290 case 0x064: /* VIS I fandnot2 */
5291 case 0x068: /* VIS I fandnot1 */
5292 case 0x06c: /* VIS I fxor */
5293 case 0x06e: /* VIS I fnand */
5294 case 0x070: /* VIS I fand */
5295 case 0x072: /* VIS I fxnor */
5296 case 0x076: /* VIS I fornot2 */
5297 case 0x07a: /* VIS I fornot1 */
5298 case 0x07c: /* VIS I for */
5299 case 0x031: /* VIS I fmul8x16 */
5300 case 0x033: /* VIS I fmul8x16au */
5301 case 0x035: /* VIS I fmul8x16al */
5302 case 0x036: /* VIS I fmul8sux16 */
5303 case 0x037: /* VIS I fmul8ulx16 */
5304 case 0x038: /* VIS I fmuld8sux16 */
5305 case 0x039: /* VIS I fmuld8ulx16 */
5306 case 0x04b: /* VIS I fpmerge */
5307 case 0x04d: /* VIS I fexpand */
5308 case 0x03e: /* VIS I pdist */
5309 case 0x03a: /* VIS I fpack32 */
5310 case 0x048: /* VIS I faligndata */
5311 case 0x04c: /* VIS II bshuffle */
5312 g_assert_not_reached(); /* in decodetree */
5313 case 0x020: /* VIS I fcmple16 */
5314 CHECK_FPU_FEATURE(dc
, VIS1
);
5315 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5316 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5317 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5318 gen_store_gpr(dc
, rd
, cpu_dst
);
5320 case 0x022: /* VIS I fcmpne16 */
5321 CHECK_FPU_FEATURE(dc
, VIS1
);
5322 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5323 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5324 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5325 gen_store_gpr(dc
, rd
, cpu_dst
);
5327 case 0x024: /* VIS I fcmple32 */
5328 CHECK_FPU_FEATURE(dc
, VIS1
);
5329 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5330 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5331 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5332 gen_store_gpr(dc
, rd
, cpu_dst
);
5334 case 0x026: /* VIS I fcmpne32 */
5335 CHECK_FPU_FEATURE(dc
, VIS1
);
5336 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5337 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5338 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5339 gen_store_gpr(dc
, rd
, cpu_dst
);
5341 case 0x028: /* VIS I fcmpgt16 */
5342 CHECK_FPU_FEATURE(dc
, VIS1
);
5343 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5344 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5345 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5346 gen_store_gpr(dc
, rd
, cpu_dst
);
5348 case 0x02a: /* VIS I fcmpeq16 */
5349 CHECK_FPU_FEATURE(dc
, VIS1
);
5350 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5351 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5352 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5353 gen_store_gpr(dc
, rd
, cpu_dst
);
5355 case 0x02c: /* VIS I fcmpgt32 */
5356 CHECK_FPU_FEATURE(dc
, VIS1
);
5357 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5358 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5359 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5360 gen_store_gpr(dc
, rd
, cpu_dst
);
5362 case 0x02e: /* VIS I fcmpeq32 */
5363 CHECK_FPU_FEATURE(dc
, VIS1
);
5364 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5365 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5366 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5367 gen_store_gpr(dc
, rd
, cpu_dst
);
5369 case 0x03b: /* VIS I fpack16 */
5370 CHECK_FPU_FEATURE(dc
, VIS1
);
5371 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5372 cpu_dst_32
= gen_dest_fpr_F(dc
);
5373 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
5374 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5376 case 0x03d: /* VIS I fpackfix */
5377 CHECK_FPU_FEATURE(dc
, VIS1
);
5378 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5379 cpu_dst_32
= gen_dest_fpr_F(dc
);
5380 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
5381 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5383 case 0x060: /* VIS I fzero */
5384 CHECK_FPU_FEATURE(dc
, VIS1
);
5385 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5386 tcg_gen_movi_i64(cpu_dst_64
, 0);
5387 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5389 case 0x061: /* VIS I fzeros */
5390 CHECK_FPU_FEATURE(dc
, VIS1
);
5391 cpu_dst_32
= gen_dest_fpr_F(dc
);
5392 tcg_gen_movi_i32(cpu_dst_32
, 0);
5393 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5395 case 0x07e: /* VIS I fone */
5396 CHECK_FPU_FEATURE(dc
, VIS1
);
5397 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5398 tcg_gen_movi_i64(cpu_dst_64
, -1);
5399 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5401 case 0x07f: /* VIS I fones */
5402 CHECK_FPU_FEATURE(dc
, VIS1
);
5403 cpu_dst_32
= gen_dest_fpr_F(dc
);
5404 tcg_gen_movi_i32(cpu_dst_32
, -1);
5405 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5407 case 0x080: /* VIS I shutdown */
5408 case 0x081: /* VIS II siam */
5416 goto illegal_insn
; /* in decodetree */
5420 case 3: /* load/store instructions */
5421 goto illegal_insn
; /* in decodetree */
5424 #ifdef TARGET_SPARC64
5429 gen_exception(dc
, TT_ILL_INSN
);
5431 #ifdef TARGET_SPARC64
5433 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
5438 static void sparc_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
5440 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5441 CPUSPARCState
*env
= cpu_env(cs
);
5444 dc
->pc
= dc
->base
.pc_first
;
5445 dc
->npc
= (target_ulong
)dc
->base
.tb
->cs_base
;
5446 dc
->cc_op
= CC_OP_DYNAMIC
;
5447 dc
->mem_idx
= dc
->base
.tb
->flags
& TB_FLAG_MMU_MASK
;
5448 dc
->def
= &env
->def
;
5449 dc
->fpu_enabled
= tb_fpu_enabled(dc
->base
.tb
->flags
);
5450 dc
->address_mask_32bit
= tb_am_enabled(dc
->base
.tb
->flags
);
5451 #ifndef CONFIG_USER_ONLY
5452 dc
->supervisor
= (dc
->base
.tb
->flags
& TB_FLAG_SUPER
) != 0;
5454 #ifdef TARGET_SPARC64
5456 dc
->asi
= (dc
->base
.tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
5457 #ifndef CONFIG_USER_ONLY
5458 dc
->hypervisor
= (dc
->base
.tb
->flags
& TB_FLAG_HYPER
) != 0;
5462 * if we reach a page boundary, we stop generation so that the
5463 * PC of a TT_TFAULT exception is always in the right page
5465 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
5466 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
5469 static void sparc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
5473 static void sparc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
5475 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5476 target_ulong npc
= dc
->npc
;
5481 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5482 npc
= dc
->jump_pc
[0] | JUMP_PC
;
5485 case DYNAMIC_PC_LOOKUP
:
5489 g_assert_not_reached();
5492 tcg_gen_insn_start(dc
->pc
, npc
);
5495 static void sparc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
5497 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5498 CPUSPARCState
*env
= cpu_env(cs
);
5501 insn
= translator_ldl(env
, &dc
->base
, dc
->pc
);
5502 dc
->base
.pc_next
+= 4;
5504 if (!decode(dc
, insn
)) {
5505 disas_sparc_legacy(dc
, insn
);
5508 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
5511 if (dc
->pc
!= dc
->base
.pc_next
) {
5512 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
5516 static void sparc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
5518 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5519 DisasDelayException
*e
, *e_next
;
5522 switch (dc
->base
.is_jmp
) {
5524 case DISAS_TOO_MANY
:
5525 if (((dc
->pc
| dc
->npc
) & 3) == 0) {
5526 /* static PC and NPC: we can use direct chaining */
5527 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5534 case DYNAMIC_PC_LOOKUP
:
5540 g_assert_not_reached();
5543 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5549 gen_generic_branch(dc
);
5554 case DYNAMIC_PC_LOOKUP
:
5557 g_assert_not_reached();
5560 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
5563 tcg_gen_lookup_and_goto_ptr();
5565 tcg_gen_exit_tb(NULL
, 0);
5569 case DISAS_NORETURN
:
5575 tcg_gen_exit_tb(NULL
, 0);
5579 g_assert_not_reached();
5582 for (e
= dc
->delay_excp_list
; e
; e
= e_next
) {
5583 gen_set_label(e
->lab
);
5585 tcg_gen_movi_tl(cpu_pc
, e
->pc
);
5586 if (e
->npc
% 4 == 0) {
5587 tcg_gen_movi_tl(cpu_npc
, e
->npc
);
5589 gen_helper_raise_exception(tcg_env
, e
->excp
);
5596 static void sparc_tr_disas_log(const DisasContextBase
*dcbase
,
5597 CPUState
*cpu
, FILE *logfile
)
5599 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
5600 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
5603 static const TranslatorOps sparc_tr_ops
= {
5604 .init_disas_context
= sparc_tr_init_disas_context
,
5605 .tb_start
= sparc_tr_tb_start
,
5606 .insn_start
= sparc_tr_insn_start
,
5607 .translate_insn
= sparc_tr_translate_insn
,
5608 .tb_stop
= sparc_tr_tb_stop
,
5609 .disas_log
= sparc_tr_disas_log
,
5612 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
5613 target_ulong pc
, void *host_pc
)
5615 DisasContext dc
= {};
5617 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &sparc_tr_ops
, &dc
.base
);
5620 void sparc_tcg_init(void)
5622 static const char gregnames
[32][4] = {
5623 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5624 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5625 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5626 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5628 static const char fregnames
[32][4] = {
5629 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5630 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5631 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5632 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5635 static const struct { TCGv_i32
*ptr
; int off
; const char *name
; } r32
[] = {
5636 #ifdef TARGET_SPARC64
5637 { &cpu_xcc
, offsetof(CPUSPARCState
, xcc
), "xcc" },
5638 { &cpu_fprs
, offsetof(CPUSPARCState
, fprs
), "fprs" },
5640 { &cpu_cc_op
, offsetof(CPUSPARCState
, cc_op
), "cc_op" },
5641 { &cpu_psr
, offsetof(CPUSPARCState
, psr
), "psr" },
5644 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5645 #ifdef TARGET_SPARC64
5646 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5648 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5649 { &cpu_cc_src
, offsetof(CPUSPARCState
, cc_src
), "cc_src" },
5650 { &cpu_cc_src2
, offsetof(CPUSPARCState
, cc_src2
), "cc_src2" },
5651 { &cpu_cc_dst
, offsetof(CPUSPARCState
, cc_dst
), "cc_dst" },
5652 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
5653 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5654 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5655 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5656 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5661 cpu_regwptr
= tcg_global_mem_new_ptr(tcg_env
,
5662 offsetof(CPUSPARCState
, regwptr
),
5665 for (i
= 0; i
< ARRAY_SIZE(r32
); ++i
) {
5666 *r32
[i
].ptr
= tcg_global_mem_new_i32(tcg_env
, r32
[i
].off
, r32
[i
].name
);
5669 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5670 *rtl
[i
].ptr
= tcg_global_mem_new(tcg_env
, rtl
[i
].off
, rtl
[i
].name
);
5674 for (i
= 1; i
< 8; ++i
) {
5675 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
5676 offsetof(CPUSPARCState
, gregs
[i
]),
5680 for (i
= 8; i
< 32; ++i
) {
5681 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5682 (i
- 8) * sizeof(target_ulong
),
5686 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5687 cpu_fpr
[i
] = tcg_global_mem_new_i64(tcg_env
,
5688 offsetof(CPUSPARCState
, fpr
[i
]),
5693 void sparc_restore_state_to_opc(CPUState
*cs
,
5694 const TranslationBlock
*tb
,
5695 const uint64_t *data
)
5697 SPARCCPU
*cpu
= SPARC_CPU(cs
);
5698 CPUSPARCState
*env
= &cpu
->env
;
5699 target_ulong pc
= data
[0];
5700 target_ulong npc
= data
[1];
5703 if (npc
== DYNAMIC_PC
) {
5704 /* dynamic NPC: already stored */
5705 } else if (npc
& JUMP_PC
) {
5706 /* jump PC: use 'cond' and the jump targets of the translation */
5708 env
->npc
= npc
& ~3;