4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rett(E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_done(E) qemu_build_not_reached()
46 # define gen_helper_fabsd(D, S) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_fnegd(D, S) qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
51 # define gen_helper_restored(E) qemu_build_not_reached()
52 # define gen_helper_retry(E) qemu_build_not_reached()
53 # define gen_helper_saved(E) qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
64 # define gen_helper_fabsq ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fnegq ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
90 # define FSR_LDXFSR_MASK 0
91 # define FSR_LDXFSR_OLDMASK 0
95 /* Dynamic PC, must exit to main loop. */
97 /* Dynamic PC, one of two values according to jump_pc[T2]. */
99 /* Dynamic PC, may lookup next TB. */
100 #define DYNAMIC_PC_LOOKUP 3
102 #define DISAS_EXIT DISAS_TARGET_0
104 /* global register indexes */
105 static TCGv_ptr cpu_regwptr
;
106 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
107 static TCGv cpu_regs
[32];
110 static TCGv cpu_cond
;
111 static TCGv cpu_cc_N
;
112 static TCGv cpu_cc_V
;
113 static TCGv cpu_icc_Z
;
114 static TCGv cpu_icc_C
;
115 #ifdef TARGET_SPARC64
116 static TCGv cpu_xcc_Z
;
117 static TCGv cpu_xcc_C
;
118 static TCGv_i32 cpu_fprs
;
121 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
122 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
125 #ifdef TARGET_SPARC64
126 #define cpu_cc_Z cpu_xcc_Z
127 #define cpu_cc_C cpu_xcc_C
129 #define cpu_cc_Z cpu_icc_Z
130 #define cpu_cc_C cpu_icc_C
131 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
132 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
135 /* Floating point registers */
136 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
138 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
139 #ifdef TARGET_SPARC64
140 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
141 # define env64_field_offsetof(X) env_field_offsetof(X)
143 # define env32_field_offsetof(X) env_field_offsetof(X)
144 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
147 typedef struct DisasCompare
{
153 typedef struct DisasDelayException
{
154 struct DisasDelayException
*next
;
157 /* Saved state at parent insn. */
160 } DisasDelayException
;
162 typedef struct DisasContext
{
163 DisasContextBase base
;
164 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
165 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
167 /* Used when JUMP_PC value is used. */
169 target_ulong jump_pc
[2];
174 bool address_mask_32bit
;
175 #ifndef CONFIG_USER_ONLY
177 #ifdef TARGET_SPARC64
183 #ifdef TARGET_SPARC64
187 DisasDelayException
*delay_excp_list
;
190 // This function uses non-native bit order
191 #define GET_FIELD(X, FROM, TO) \
192 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
194 // This function uses the order in the manuals, i.e. bit 0 is 2^0
195 #define GET_FIELD_SP(X, FROM, TO) \
196 GET_FIELD(X, 31 - (TO), 31 - (FROM))
198 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
199 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
201 #ifdef TARGET_SPARC64
202 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
203 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
205 #define DFPREG(r) (r & 0x1e)
206 #define QFPREG(r) (r & 0x1c)
209 #define UA2005_HTRAP_MASK 0xff
210 #define V8_TRAP_MASK 0x7f
212 #define IS_IMM (insn & (1<<13))
214 static void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
216 #if defined(TARGET_SPARC64)
217 int bit
= (rd
< 32) ? 1 : 2;
218 /* If we know we've already set this bit within the TB,
219 we can avoid setting it again. */
220 if (!(dc
->fprs_dirty
& bit
)) {
221 dc
->fprs_dirty
|= bit
;
222 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
227 /* floating point registers moves */
228 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
230 TCGv_i32 ret
= tcg_temp_new_i32();
232 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
234 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
239 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
241 TCGv_i64 t
= tcg_temp_new_i64();
243 tcg_gen_extu_i32_i64(t
, v
);
244 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
245 (dst
& 1 ? 0 : 32), 32);
246 gen_update_fprs_dirty(dc
, dst
);
249 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
251 return tcg_temp_new_i32();
254 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
257 return cpu_fpr
[src
/ 2];
260 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
263 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
264 gen_update_fprs_dirty(dc
, dst
);
267 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
269 return cpu_fpr
[DFPREG(dst
) / 2];
272 static void gen_op_load_fpr_QT0(unsigned int src
)
274 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
275 offsetof(CPU_QuadU
, ll
.upper
));
276 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
277 offsetof(CPU_QuadU
, ll
.lower
));
280 static void gen_op_load_fpr_QT1(unsigned int src
)
282 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
283 offsetof(CPU_QuadU
, ll
.upper
));
284 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
285 offsetof(CPU_QuadU
, ll
.lower
));
288 static void gen_op_store_QT0_fpr(unsigned int dst
)
290 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
291 offsetof(CPU_QuadU
, ll
.upper
));
292 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
293 offsetof(CPU_QuadU
, ll
.lower
));
297 #ifdef CONFIG_USER_ONLY
298 #define supervisor(dc) 0
299 #define hypervisor(dc) 0
301 #ifdef TARGET_SPARC64
302 #define hypervisor(dc) (dc->hypervisor)
303 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
305 #define supervisor(dc) (dc->supervisor)
306 #define hypervisor(dc) 0
310 #if !defined(TARGET_SPARC64)
311 # define AM_CHECK(dc) false
312 #elif defined(TARGET_ABI32)
313 # define AM_CHECK(dc) true
314 #elif defined(CONFIG_USER_ONLY)
315 # define AM_CHECK(dc) false
317 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
320 static void gen_address_mask(DisasContext
*dc
, TCGv addr
)
323 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
327 static target_ulong
address_mask_i(DisasContext
*dc
, target_ulong addr
)
329 return AM_CHECK(dc
) ? (uint32_t)addr
: addr
;
332 static TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
336 return cpu_regs
[reg
];
338 TCGv t
= tcg_temp_new();
339 tcg_gen_movi_tl(t
, 0);
344 static void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
348 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
352 static TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
356 return cpu_regs
[reg
];
358 return tcg_temp_new();
362 static bool use_goto_tb(DisasContext
*s
, target_ulong pc
, target_ulong npc
)
364 return translator_use_goto_tb(&s
->base
, pc
) &&
365 translator_use_goto_tb(&s
->base
, npc
);
368 static void gen_goto_tb(DisasContext
*s
, int tb_num
,
369 target_ulong pc
, target_ulong npc
)
371 if (use_goto_tb(s
, pc
, npc
)) {
372 /* jump to same page: we can use a direct jump */
373 tcg_gen_goto_tb(tb_num
);
374 tcg_gen_movi_tl(cpu_pc
, pc
);
375 tcg_gen_movi_tl(cpu_npc
, npc
);
376 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
378 /* jump to another page: we can use an indirect jump */
379 tcg_gen_movi_tl(cpu_pc
, pc
);
380 tcg_gen_movi_tl(cpu_npc
, npc
);
381 tcg_gen_lookup_and_goto_ptr();
385 static TCGv
gen_carry32(void)
387 if (TARGET_LONG_BITS
== 64) {
388 TCGv t
= tcg_temp_new();
389 tcg_gen_extract_tl(t
, cpu_icc_C
, 32, 1);
395 static void gen_op_addcc_int(TCGv dst
, TCGv src1
, TCGv src2
, TCGv cin
)
397 TCGv z
= tcg_constant_tl(0);
400 tcg_gen_add2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, cin
, z
);
401 tcg_gen_add2_tl(cpu_cc_N
, cpu_cc_C
, cpu_cc_N
, cpu_cc_C
, src2
, z
);
403 tcg_gen_add2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, src2
, z
);
405 tcg_gen_xor_tl(cpu_cc_Z
, src1
, src2
);
406 tcg_gen_xor_tl(cpu_cc_V
, cpu_cc_N
, src2
);
407 tcg_gen_andc_tl(cpu_cc_V
, cpu_cc_V
, cpu_cc_Z
);
408 if (TARGET_LONG_BITS
== 64) {
410 * Carry-in to bit 32 is result ^ src1 ^ src2.
411 * We already have the src xor term in Z, from computation of V.
413 tcg_gen_xor_tl(cpu_icc_C
, cpu_cc_Z
, cpu_cc_N
);
414 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
416 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
417 tcg_gen_mov_tl(dst
, cpu_cc_N
);
420 static void gen_op_addcc(TCGv dst
, TCGv src1
, TCGv src2
)
422 gen_op_addcc_int(dst
, src1
, src2
, NULL
);
425 static void gen_op_taddcc(TCGv dst
, TCGv src1
, TCGv src2
)
427 TCGv t
= tcg_temp_new();
429 /* Save the tag bits around modification of dst. */
430 tcg_gen_or_tl(t
, src1
, src2
);
432 gen_op_addcc(dst
, src1
, src2
);
434 /* Incorprate tag bits into icc.V */
435 tcg_gen_andi_tl(t
, t
, 3);
436 tcg_gen_neg_tl(t
, t
);
437 tcg_gen_ext32u_tl(t
, t
);
438 tcg_gen_or_tl(cpu_cc_V
, cpu_cc_V
, t
);
441 static void gen_op_addc(TCGv dst
, TCGv src1
, TCGv src2
)
443 tcg_gen_add_tl(dst
, src1
, src2
);
444 tcg_gen_add_tl(dst
, dst
, gen_carry32());
447 static void gen_op_addccc(TCGv dst
, TCGv src1
, TCGv src2
)
449 gen_op_addcc_int(dst
, src1
, src2
, gen_carry32());
452 static void gen_op_subcc_int(TCGv dst
, TCGv src1
, TCGv src2
, TCGv cin
)
454 TCGv z
= tcg_constant_tl(0);
457 tcg_gen_sub2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, cin
, z
);
458 tcg_gen_sub2_tl(cpu_cc_N
, cpu_cc_C
, cpu_cc_N
, cpu_cc_C
, src2
, z
);
460 tcg_gen_sub2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, src2
, z
);
462 tcg_gen_neg_tl(cpu_cc_C
, cpu_cc_C
);
463 tcg_gen_xor_tl(cpu_cc_Z
, src1
, src2
);
464 tcg_gen_xor_tl(cpu_cc_V
, cpu_cc_N
, src1
);
465 tcg_gen_and_tl(cpu_cc_V
, cpu_cc_V
, cpu_cc_Z
);
466 #ifdef TARGET_SPARC64
467 tcg_gen_xor_tl(cpu_icc_C
, cpu_cc_Z
, cpu_cc_N
);
468 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
470 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
471 tcg_gen_mov_tl(dst
, cpu_cc_N
);
474 static void gen_op_subcc(TCGv dst
, TCGv src1
, TCGv src2
)
476 gen_op_subcc_int(dst
, src1
, src2
, NULL
);
479 static void gen_op_tsubcc(TCGv dst
, TCGv src1
, TCGv src2
)
481 TCGv t
= tcg_temp_new();
483 /* Save the tag bits around modification of dst. */
484 tcg_gen_or_tl(t
, src1
, src2
);
486 gen_op_subcc(dst
, src1
, src2
);
488 /* Incorprate tag bits into icc.V */
489 tcg_gen_andi_tl(t
, t
, 3);
490 tcg_gen_neg_tl(t
, t
);
491 tcg_gen_ext32u_tl(t
, t
);
492 tcg_gen_or_tl(cpu_cc_V
, cpu_cc_V
, t
);
495 static void gen_op_subc(TCGv dst
, TCGv src1
, TCGv src2
)
497 tcg_gen_sub_tl(dst
, src1
, src2
);
498 tcg_gen_sub_tl(dst
, dst
, gen_carry32());
501 static void gen_op_subccc(TCGv dst
, TCGv src1
, TCGv src2
)
503 gen_op_subcc_int(dst
, src1
, src2
, gen_carry32());
506 static void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
508 TCGv zero
= tcg_constant_tl(0);
509 TCGv t_src1
= tcg_temp_new();
510 TCGv t_src2
= tcg_temp_new();
511 TCGv t0
= tcg_temp_new();
513 tcg_gen_ext32u_tl(t_src1
, src1
);
514 tcg_gen_ext32u_tl(t_src2
, src2
);
520 tcg_gen_andi_tl(t0
, cpu_y
, 0x1);
521 tcg_gen_movcond_tl(TCG_COND_EQ
, t_src2
, t0
, zero
, zero
, t_src2
);
525 * y = (b2 << 31) | (y >> 1);
527 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
528 tcg_gen_deposit_tl(cpu_y
, t0
, src1
, 31, 1);
531 tcg_gen_xor_tl(t0
, cpu_cc_N
, cpu_cc_V
);
534 * src1 = (b1 << 31) | (src1 >> 1)
536 tcg_gen_andi_tl(t0
, t0
, 1u << 31);
537 tcg_gen_shri_tl(t_src1
, t_src1
, 1);
538 tcg_gen_or_tl(t_src1
, t_src1
, t0
);
540 gen_op_addcc(dst
, t_src1
, t_src2
);
543 static void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
545 #if TARGET_LONG_BITS == 32
547 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
549 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
552 TCGv t0
= tcg_temp_new_i64();
553 TCGv t1
= tcg_temp_new_i64();
556 tcg_gen_ext32s_i64(t0
, src1
);
557 tcg_gen_ext32s_i64(t1
, src2
);
559 tcg_gen_ext32u_i64(t0
, src1
);
560 tcg_gen_ext32u_i64(t1
, src2
);
563 tcg_gen_mul_i64(dst
, t0
, t1
);
564 tcg_gen_shri_i64(cpu_y
, dst
, 32);
568 static void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
570 /* zero-extend truncated operands before multiplication */
571 gen_op_multiply(dst
, src1
, src2
, 0);
574 static void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
576 /* sign-extend truncated operands before multiplication */
577 gen_op_multiply(dst
, src1
, src2
, 1);
580 static void gen_op_sdiv(TCGv dst
, TCGv src1
, TCGv src2
)
582 #ifdef TARGET_SPARC64
583 gen_helper_sdiv(dst
, tcg_env
, src1
, src2
);
584 tcg_gen_ext32s_tl(dst
, dst
);
586 TCGv_i64 t64
= tcg_temp_new_i64();
587 gen_helper_sdiv(t64
, tcg_env
, src1
, src2
);
588 tcg_gen_trunc_i64_tl(dst
, t64
);
592 static void gen_op_udivcc(TCGv dst
, TCGv src1
, TCGv src2
)
596 #ifdef TARGET_SPARC64
599 t64
= tcg_temp_new_i64();
602 gen_helper_udiv(t64
, tcg_env
, src1
, src2
);
604 #ifdef TARGET_SPARC64
605 tcg_gen_ext32u_tl(cpu_cc_N
, t64
);
606 tcg_gen_shri_tl(cpu_cc_V
, t64
, 32);
607 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
608 tcg_gen_movi_tl(cpu_icc_C
, 0);
610 tcg_gen_extr_i64_tl(cpu_cc_N
, cpu_cc_V
, t64
);
612 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
613 tcg_gen_movi_tl(cpu_cc_C
, 0);
614 tcg_gen_mov_tl(dst
, cpu_cc_N
);
617 static void gen_op_sdivcc(TCGv dst
, TCGv src1
, TCGv src2
)
621 #ifdef TARGET_SPARC64
624 t64
= tcg_temp_new_i64();
627 gen_helper_sdiv(t64
, tcg_env
, src1
, src2
);
629 #ifdef TARGET_SPARC64
630 tcg_gen_ext32s_tl(cpu_cc_N
, t64
);
631 tcg_gen_shri_tl(cpu_cc_V
, t64
, 32);
632 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
633 tcg_gen_movi_tl(cpu_icc_C
, 0);
635 tcg_gen_extr_i64_tl(cpu_cc_N
, cpu_cc_V
, t64
);
637 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
638 tcg_gen_movi_tl(cpu_cc_C
, 0);
639 tcg_gen_mov_tl(dst
, cpu_cc_N
);
642 static void gen_op_taddcctv(TCGv dst
, TCGv src1
, TCGv src2
)
644 gen_helper_taddcctv(dst
, tcg_env
, src1
, src2
);
647 static void gen_op_tsubcctv(TCGv dst
, TCGv src1
, TCGv src2
)
649 gen_helper_tsubcctv(dst
, tcg_env
, src1
, src2
);
652 static void gen_op_popc(TCGv dst
, TCGv src1
, TCGv src2
)
654 tcg_gen_ctpop_tl(dst
, src2
);
657 #ifndef TARGET_SPARC64
658 static void gen_helper_array8(TCGv dst
, TCGv src1
, TCGv src2
)
660 g_assert_not_reached();
664 static void gen_op_array16(TCGv dst
, TCGv src1
, TCGv src2
)
666 gen_helper_array8(dst
, src1
, src2
);
667 tcg_gen_shli_tl(dst
, dst
, 1);
670 static void gen_op_array32(TCGv dst
, TCGv src1
, TCGv src2
)
672 gen_helper_array8(dst
, src1
, src2
);
673 tcg_gen_shli_tl(dst
, dst
, 2);
676 static void gen_op_fpack16(TCGv_i32 dst
, TCGv_i64 src
)
678 #ifdef TARGET_SPARC64
679 gen_helper_fpack16(dst
, cpu_gsr
, src
);
681 g_assert_not_reached();
685 static void gen_op_fpackfix(TCGv_i32 dst
, TCGv_i64 src
)
687 #ifdef TARGET_SPARC64
688 gen_helper_fpackfix(dst
, cpu_gsr
, src
);
690 g_assert_not_reached();
694 static void gen_op_fpack32(TCGv_i64 dst
, TCGv_i64 src1
, TCGv_i64 src2
)
696 #ifdef TARGET_SPARC64
697 gen_helper_fpack32(dst
, cpu_gsr
, src1
, src2
);
699 g_assert_not_reached();
703 static void gen_op_faligndata(TCGv_i64 dst
, TCGv_i64 s1
, TCGv_i64 s2
)
705 #ifdef TARGET_SPARC64
710 shift
= tcg_temp_new();
712 tcg_gen_andi_tl(shift
, cpu_gsr
, 7);
713 tcg_gen_shli_tl(shift
, shift
, 3);
714 tcg_gen_shl_tl(t1
, s1
, shift
);
717 * A shift of 64 does not produce 0 in TCG. Divide this into a
718 * shift of (up to 63) followed by a constant shift of 1.
720 tcg_gen_xori_tl(shift
, shift
, 63);
721 tcg_gen_shr_tl(t2
, s2
, shift
);
722 tcg_gen_shri_tl(t2
, t2
, 1);
724 tcg_gen_or_tl(dst
, t1
, t2
);
726 g_assert_not_reached();
730 static void gen_op_bshuffle(TCGv_i64 dst
, TCGv_i64 src1
, TCGv_i64 src2
)
732 #ifdef TARGET_SPARC64
733 gen_helper_bshuffle(dst
, cpu_gsr
, src1
, src2
);
735 g_assert_not_reached();
740 static void gen_op_eval_ba(TCGv dst
)
742 tcg_gen_movi_tl(dst
, 1);
746 static void gen_op_eval_bn(TCGv dst
)
748 tcg_gen_movi_tl(dst
, 0);
752 FPSR bit field FCC1 | FCC0:
758 static void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
759 unsigned int fcc_offset
)
761 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
762 tcg_gen_andi_tl(reg
, reg
, 0x1);
765 static void gen_mov_reg_FCC1(TCGv reg
, TCGv src
, unsigned int fcc_offset
)
767 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
768 tcg_gen_andi_tl(reg
, reg
, 0x1);
772 static void gen_op_eval_fbne(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
774 TCGv t0
= tcg_temp_new();
775 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
776 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
777 tcg_gen_or_tl(dst
, dst
, t0
);
780 // 1 or 2: FCC0 ^ FCC1
781 static void gen_op_eval_fblg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
783 TCGv t0
= tcg_temp_new();
784 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
785 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
786 tcg_gen_xor_tl(dst
, dst
, t0
);
790 static void gen_op_eval_fbul(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
792 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
796 static void gen_op_eval_fbl(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
798 TCGv t0
= tcg_temp_new();
799 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
800 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
801 tcg_gen_andc_tl(dst
, dst
, t0
);
805 static void gen_op_eval_fbug(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
807 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
811 static void gen_op_eval_fbg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
813 TCGv t0
= tcg_temp_new();
814 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
815 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
816 tcg_gen_andc_tl(dst
, t0
, dst
);
820 static void gen_op_eval_fbu(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
822 TCGv t0
= tcg_temp_new();
823 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
824 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
825 tcg_gen_and_tl(dst
, dst
, t0
);
829 static void gen_op_eval_fbe(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
831 TCGv t0
= tcg_temp_new();
832 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
833 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
834 tcg_gen_or_tl(dst
, dst
, t0
);
835 tcg_gen_xori_tl(dst
, dst
, 0x1);
838 // 0 or 3: !(FCC0 ^ FCC1)
839 static void gen_op_eval_fbue(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
841 TCGv t0
= tcg_temp_new();
842 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
843 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
844 tcg_gen_xor_tl(dst
, dst
, t0
);
845 tcg_gen_xori_tl(dst
, dst
, 0x1);
849 static void gen_op_eval_fbge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
851 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
852 tcg_gen_xori_tl(dst
, dst
, 0x1);
855 // !1: !(FCC0 & !FCC1)
856 static void gen_op_eval_fbuge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
858 TCGv t0
= tcg_temp_new();
859 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
860 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
861 tcg_gen_andc_tl(dst
, dst
, t0
);
862 tcg_gen_xori_tl(dst
, dst
, 0x1);
866 static void gen_op_eval_fble(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
868 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
869 tcg_gen_xori_tl(dst
, dst
, 0x1);
872 // !2: !(!FCC0 & FCC1)
873 static void gen_op_eval_fbule(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
875 TCGv t0
= tcg_temp_new();
876 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
877 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
878 tcg_gen_andc_tl(dst
, t0
, dst
);
879 tcg_gen_xori_tl(dst
, dst
, 0x1);
882 // !3: !(FCC0 & FCC1)
883 static void gen_op_eval_fbo(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
885 TCGv t0
= tcg_temp_new();
886 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
887 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
888 tcg_gen_and_tl(dst
, dst
, t0
);
889 tcg_gen_xori_tl(dst
, dst
, 0x1);
892 static void finishing_insn(DisasContext
*dc
)
895 * From here, there is no future path through an unwinding exception.
896 * If the current insn cannot raise an exception, the computation of
897 * cpu_cond may be able to be elided.
899 if (dc
->cpu_cond_live
) {
900 tcg_gen_discard_tl(cpu_cond
);
901 dc
->cpu_cond_live
= false;
905 static void gen_generic_branch(DisasContext
*dc
)
907 TCGv npc0
= tcg_constant_tl(dc
->jump_pc
[0]);
908 TCGv npc1
= tcg_constant_tl(dc
->jump_pc
[1]);
909 TCGv c2
= tcg_constant_tl(dc
->jump
.c2
);
911 tcg_gen_movcond_tl(dc
->jump
.cond
, cpu_npc
, dc
->jump
.c1
, c2
, npc0
, npc1
);
914 /* call this function before using the condition register as it may
915 have been set for a jump */
916 static void flush_cond(DisasContext
*dc
)
918 if (dc
->npc
== JUMP_PC
) {
919 gen_generic_branch(dc
);
920 dc
->npc
= DYNAMIC_PC_LOOKUP
;
924 static void save_npc(DisasContext
*dc
)
929 gen_generic_branch(dc
);
930 dc
->npc
= DYNAMIC_PC_LOOKUP
;
933 case DYNAMIC_PC_LOOKUP
:
936 g_assert_not_reached();
939 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
943 static void save_state(DisasContext
*dc
)
945 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
949 static void gen_exception(DisasContext
*dc
, int which
)
953 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(which
));
954 dc
->base
.is_jmp
= DISAS_NORETURN
;
957 static TCGLabel
*delay_exceptionv(DisasContext
*dc
, TCGv_i32 excp
)
959 DisasDelayException
*e
= g_new0(DisasDelayException
, 1);
961 e
->next
= dc
->delay_excp_list
;
962 dc
->delay_excp_list
= e
;
964 e
->lab
= gen_new_label();
967 /* Caller must have used flush_cond before branch. */
968 assert(e
->npc
!= JUMP_PC
);
974 static TCGLabel
*delay_exception(DisasContext
*dc
, int excp
)
976 return delay_exceptionv(dc
, tcg_constant_i32(excp
));
979 static void gen_check_align(DisasContext
*dc
, TCGv addr
, int mask
)
981 TCGv t
= tcg_temp_new();
984 tcg_gen_andi_tl(t
, addr
, mask
);
987 lab
= delay_exception(dc
, TT_UNALIGNED
);
988 tcg_gen_brcondi_tl(TCG_COND_NE
, t
, 0, lab
);
991 static void gen_mov_pc_npc(DisasContext
*dc
)
998 gen_generic_branch(dc
);
999 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1000 dc
->pc
= DYNAMIC_PC_LOOKUP
;
1003 case DYNAMIC_PC_LOOKUP
:
1004 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1008 g_assert_not_reached();
1015 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1020 cmp
->c1
= t1
= tcg_temp_new();
1024 case 0x0: /* never */
1025 cmp
->cond
= TCG_COND_NEVER
;
1026 cmp
->c1
= tcg_constant_tl(0);
1029 case 0x1: /* eq: Z */
1030 cmp
->cond
= TCG_COND_EQ
;
1031 if (TARGET_LONG_BITS
== 32 || xcc
) {
1032 tcg_gen_mov_tl(t1
, cpu_cc_Z
);
1034 tcg_gen_ext32u_tl(t1
, cpu_icc_Z
);
1038 case 0x2: /* le: Z | (N ^ V) */
1041 * cc_Z || (N ^ V) < 0 NE
1042 * cc_Z && !((N ^ V) < 0) EQ
1043 * cc_Z & ~((N ^ V) >> TLB) EQ
1045 cmp
->cond
= TCG_COND_EQ
;
1046 tcg_gen_xor_tl(t1
, cpu_cc_N
, cpu_cc_V
);
1047 tcg_gen_sextract_tl(t1
, t1
, xcc
? 63 : 31, 1);
1048 tcg_gen_andc_tl(t1
, xcc
? cpu_cc_Z
: cpu_icc_Z
, t1
);
1049 if (TARGET_LONG_BITS
== 64 && !xcc
) {
1050 tcg_gen_ext32u_tl(t1
, t1
);
1054 case 0x3: /* lt: N ^ V */
1055 cmp
->cond
= TCG_COND_LT
;
1056 tcg_gen_xor_tl(t1
, cpu_cc_N
, cpu_cc_V
);
1057 if (TARGET_LONG_BITS
== 64 && !xcc
) {
1058 tcg_gen_ext32s_tl(t1
, t1
);
1062 case 0x4: /* leu: Z | C */
1065 * cc_Z == 0 || cc_C != 0 NE
1066 * cc_Z != 0 && cc_C == 0 EQ
1067 * cc_Z & (cc_C ? 0 : -1) EQ
1068 * cc_Z & (cc_C - 1) EQ
1070 cmp
->cond
= TCG_COND_EQ
;
1071 if (TARGET_LONG_BITS
== 32 || xcc
) {
1072 tcg_gen_subi_tl(t1
, cpu_cc_C
, 1);
1073 tcg_gen_and_tl(t1
, t1
, cpu_cc_Z
);
1075 tcg_gen_extract_tl(t1
, cpu_icc_C
, 32, 1);
1076 tcg_gen_subi_tl(t1
, t1
, 1);
1077 tcg_gen_and_tl(t1
, t1
, cpu_icc_Z
);
1078 tcg_gen_ext32u_tl(t1
, t1
);
1082 case 0x5: /* ltu: C */
1083 cmp
->cond
= TCG_COND_NE
;
1084 if (TARGET_LONG_BITS
== 32 || xcc
) {
1085 tcg_gen_mov_tl(t1
, cpu_cc_C
);
1087 tcg_gen_extract_tl(t1
, cpu_icc_C
, 32, 1);
1091 case 0x6: /* neg: N */
1092 cmp
->cond
= TCG_COND_LT
;
1093 if (TARGET_LONG_BITS
== 32 || xcc
) {
1094 tcg_gen_mov_tl(t1
, cpu_cc_N
);
1096 tcg_gen_ext32s_tl(t1
, cpu_cc_N
);
1100 case 0x7: /* vs: V */
1101 cmp
->cond
= TCG_COND_LT
;
1102 if (TARGET_LONG_BITS
== 32 || xcc
) {
1103 tcg_gen_mov_tl(t1
, cpu_cc_V
);
1105 tcg_gen_ext32s_tl(t1
, cpu_cc_V
);
1110 cmp
->cond
= tcg_invert_cond(cmp
->cond
);
1114 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1116 unsigned int offset
;
1119 /* For now we still generate a straight boolean result. */
1120 cmp
->cond
= TCG_COND_NE
;
1121 cmp
->c1
= r_dst
= tcg_temp_new();
1142 gen_op_eval_bn(r_dst
);
1145 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1148 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1151 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1154 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1157 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1160 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1163 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1166 gen_op_eval_ba(r_dst
);
1169 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1172 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1175 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1178 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1181 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1184 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1187 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1193 static const TCGCond gen_tcg_cond_reg
[8] = {
1194 TCG_COND_NEVER
, /* reserved */
1198 TCG_COND_NEVER
, /* reserved */
1204 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1206 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1207 cmp
->c1
= tcg_temp_new();
1209 tcg_gen_mov_tl(cmp
->c1
, r_src
);
1212 static void gen_op_clear_ieee_excp_and_FTT(void)
1214 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1217 static void gen_op_fmovs(TCGv_i32 dst
, TCGv_i32 src
)
1219 gen_op_clear_ieee_excp_and_FTT();
1220 tcg_gen_mov_i32(dst
, src
);
1223 static void gen_op_fnegs(TCGv_i32 dst
, TCGv_i32 src
)
1225 gen_op_clear_ieee_excp_and_FTT();
1226 gen_helper_fnegs(dst
, src
);
1229 static void gen_op_fabss(TCGv_i32 dst
, TCGv_i32 src
)
1231 gen_op_clear_ieee_excp_and_FTT();
1232 gen_helper_fabss(dst
, src
);
1235 static void gen_op_fmovd(TCGv_i64 dst
, TCGv_i64 src
)
1237 gen_op_clear_ieee_excp_and_FTT();
1238 tcg_gen_mov_i64(dst
, src
);
1241 static void gen_op_fnegd(TCGv_i64 dst
, TCGv_i64 src
)
1243 gen_op_clear_ieee_excp_and_FTT();
1244 gen_helper_fnegd(dst
, src
);
1247 static void gen_op_fabsd(TCGv_i64 dst
, TCGv_i64 src
)
1249 gen_op_clear_ieee_excp_and_FTT();
1250 gen_helper_fabsd(dst
, src
);
1253 #ifdef TARGET_SPARC64
1254 static void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1258 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1261 gen_helper_fcmps_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1264 gen_helper_fcmps_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1267 gen_helper_fcmps_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1272 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1276 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1279 gen_helper_fcmpd_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1282 gen_helper_fcmpd_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1285 gen_helper_fcmpd_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1290 static void gen_op_fcmpq(int fccno
)
1294 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1297 gen_helper_fcmpq_fcc1(cpu_fsr
, tcg_env
);
1300 gen_helper_fcmpq_fcc2(cpu_fsr
, tcg_env
);
1303 gen_helper_fcmpq_fcc3(cpu_fsr
, tcg_env
);
1308 static void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1312 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1315 gen_helper_fcmpes_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1318 gen_helper_fcmpes_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1321 gen_helper_fcmpes_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1326 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1330 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1333 gen_helper_fcmped_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1336 gen_helper_fcmped_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1339 gen_helper_fcmped_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1344 static void gen_op_fcmpeq(int fccno
)
1348 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1351 gen_helper_fcmpeq_fcc1(cpu_fsr
, tcg_env
);
1354 gen_helper_fcmpeq_fcc2(cpu_fsr
, tcg_env
);
1357 gen_helper_fcmpeq_fcc3(cpu_fsr
, tcg_env
);
1364 static void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1366 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1369 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1371 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1374 static void gen_op_fcmpq(int fccno
)
1376 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1379 static void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1381 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1384 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1386 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1389 static void gen_op_fcmpeq(int fccno
)
1391 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1395 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1397 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1398 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1399 gen_exception(dc
, TT_FP_EXCP
);
1402 static int gen_trap_ifnofpu(DisasContext
*dc
)
1404 #if !defined(CONFIG_USER_ONLY)
1405 if (!dc
->fpu_enabled
) {
1406 gen_exception(dc
, TT_NFPU_INSN
);
1434 * For asi == -1, treat as non-asi.
1435 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1437 static DisasASI
resolve_asi(DisasContext
*dc
, int asi
, MemOp memop
)
1439 ASIType type
= GET_ASI_HELPER
;
1440 int mem_idx
= dc
->mem_idx
;
1443 /* Artificial "non-asi" case. */
1444 type
= GET_ASI_DIRECT
;
1448 #ifndef TARGET_SPARC64
1449 /* Before v9, all asis are immediate and privileged. */
1451 gen_exception(dc
, TT_ILL_INSN
);
1452 type
= GET_ASI_EXCP
;
1453 } else if (supervisor(dc
)
1454 /* Note that LEON accepts ASI_USERDATA in user mode, for
1455 use with CASA. Also note that previous versions of
1456 QEMU allowed (and old versions of gcc emitted) ASI_P
1457 for LEON, which is incorrect. */
1458 || (asi
== ASI_USERDATA
1459 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
1461 case ASI_USERDATA
: /* User data access */
1462 mem_idx
= MMU_USER_IDX
;
1463 type
= GET_ASI_DIRECT
;
1465 case ASI_KERNELDATA
: /* Supervisor data access */
1466 mem_idx
= MMU_KERNEL_IDX
;
1467 type
= GET_ASI_DIRECT
;
1469 case ASI_M_BYPASS
: /* MMU passthrough */
1470 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
1471 mem_idx
= MMU_PHYS_IDX
;
1472 type
= GET_ASI_DIRECT
;
1474 case ASI_M_BCOPY
: /* Block copy, sta access */
1475 mem_idx
= MMU_KERNEL_IDX
;
1476 type
= GET_ASI_BCOPY
;
1478 case ASI_M_BFILL
: /* Block fill, stda access */
1479 mem_idx
= MMU_KERNEL_IDX
;
1480 type
= GET_ASI_BFILL
;
1484 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1485 * permissions check in get_physical_address(..).
1487 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
1489 gen_exception(dc
, TT_PRIV_INSN
);
1490 type
= GET_ASI_EXCP
;
1496 /* With v9, all asis below 0x80 are privileged. */
1497 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1498 down that bit into DisasContext. For the moment that's ok,
1499 since the direct implementations below doesn't have any ASIs
1500 in the restricted [0x30, 0x7f] range, and the check will be
1501 done properly in the helper. */
1502 if (!supervisor(dc
) && asi
< 0x80) {
1503 gen_exception(dc
, TT_PRIV_ACT
);
1504 type
= GET_ASI_EXCP
;
1507 case ASI_REAL
: /* Bypass */
1508 case ASI_REAL_IO
: /* Bypass, non-cacheable */
1509 case ASI_REAL_L
: /* Bypass LE */
1510 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
1511 case ASI_TWINX_REAL
: /* Real address, twinx */
1512 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
1513 case ASI_QUAD_LDD_PHYS
:
1514 case ASI_QUAD_LDD_PHYS_L
:
1515 mem_idx
= MMU_PHYS_IDX
;
1517 case ASI_N
: /* Nucleus */
1518 case ASI_NL
: /* Nucleus LE */
1521 case ASI_NUCLEUS_QUAD_LDD
:
1522 case ASI_NUCLEUS_QUAD_LDD_L
:
1523 if (hypervisor(dc
)) {
1524 mem_idx
= MMU_PHYS_IDX
;
1526 mem_idx
= MMU_NUCLEUS_IDX
;
1529 case ASI_AIUP
: /* As if user primary */
1530 case ASI_AIUPL
: /* As if user primary LE */
1531 case ASI_TWINX_AIUP
:
1532 case ASI_TWINX_AIUP_L
:
1533 case ASI_BLK_AIUP_4V
:
1534 case ASI_BLK_AIUP_L_4V
:
1537 mem_idx
= MMU_USER_IDX
;
1539 case ASI_AIUS
: /* As if user secondary */
1540 case ASI_AIUSL
: /* As if user secondary LE */
1541 case ASI_TWINX_AIUS
:
1542 case ASI_TWINX_AIUS_L
:
1543 case ASI_BLK_AIUS_4V
:
1544 case ASI_BLK_AIUS_L_4V
:
1547 mem_idx
= MMU_USER_SECONDARY_IDX
;
1549 case ASI_S
: /* Secondary */
1550 case ASI_SL
: /* Secondary LE */
1553 case ASI_BLK_COMMIT_S
:
1560 if (mem_idx
== MMU_USER_IDX
) {
1561 mem_idx
= MMU_USER_SECONDARY_IDX
;
1562 } else if (mem_idx
== MMU_KERNEL_IDX
) {
1563 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
1566 case ASI_P
: /* Primary */
1567 case ASI_PL
: /* Primary LE */
1570 case ASI_BLK_COMMIT_P
:
1594 type
= GET_ASI_DIRECT
;
1596 case ASI_TWINX_REAL
:
1597 case ASI_TWINX_REAL_L
:
1600 case ASI_TWINX_AIUP
:
1601 case ASI_TWINX_AIUP_L
:
1602 case ASI_TWINX_AIUS
:
1603 case ASI_TWINX_AIUS_L
:
1608 case ASI_QUAD_LDD_PHYS
:
1609 case ASI_QUAD_LDD_PHYS_L
:
1610 case ASI_NUCLEUS_QUAD_LDD
:
1611 case ASI_NUCLEUS_QUAD_LDD_L
:
1612 type
= GET_ASI_DTWINX
;
1614 case ASI_BLK_COMMIT_P
:
1615 case ASI_BLK_COMMIT_S
:
1616 case ASI_BLK_AIUP_4V
:
1617 case ASI_BLK_AIUP_L_4V
:
1620 case ASI_BLK_AIUS_4V
:
1621 case ASI_BLK_AIUS_L_4V
:
1628 type
= GET_ASI_BLOCK
;
1635 type
= GET_ASI_SHORT
;
1642 type
= GET_ASI_SHORT
;
1645 /* The little-endian asis all have bit 3 set. */
1653 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
1656 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1657 static void gen_helper_ld_asi(TCGv_i64 r
, TCGv_env e
, TCGv a
,
1658 TCGv_i32 asi
, TCGv_i32 mop
)
1660 g_assert_not_reached();
1663 static void gen_helper_st_asi(TCGv_env e
, TCGv a
, TCGv_i64 r
,
1664 TCGv_i32 asi
, TCGv_i32 mop
)
1666 g_assert_not_reached();
1670 static void gen_ld_asi(DisasContext
*dc
, DisasASI
*da
, TCGv dst
, TCGv addr
)
1675 case GET_ASI_DTWINX
: /* Reserved for ldda. */
1676 gen_exception(dc
, TT_ILL_INSN
);
1678 case GET_ASI_DIRECT
:
1679 tcg_gen_qemu_ld_tl(dst
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
1683 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1684 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
| MO_ALIGN
);
1687 #ifdef TARGET_SPARC64
1688 gen_helper_ld_asi(dst
, tcg_env
, addr
, r_asi
, r_mop
);
1691 TCGv_i64 t64
= tcg_temp_new_i64();
1692 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
1693 tcg_gen_trunc_i64_tl(dst
, t64
);
1701 static void gen_st_asi(DisasContext
*dc
, DisasASI
*da
, TCGv src
, TCGv addr
)
1707 case GET_ASI_DTWINX
: /* Reserved for stda. */
1708 if (TARGET_LONG_BITS
== 32) {
1709 gen_exception(dc
, TT_ILL_INSN
);
1711 } else if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
1712 /* Pre OpenSPARC CPUs don't have these */
1713 gen_exception(dc
, TT_ILL_INSN
);
1716 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1719 case GET_ASI_DIRECT
:
1720 tcg_gen_qemu_st_tl(src
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
1724 assert(TARGET_LONG_BITS
== 32);
1725 /* Copy 32 bytes from the address in SRC to ADDR. */
1726 /* ??? The original qemu code suggests 4-byte alignment, dropping
1727 the low bits, but the only place I can see this used is in the
1728 Linux kernel with 32 byte alignment, which would make more sense
1729 as a cacheline-style operation. */
1731 TCGv saddr
= tcg_temp_new();
1732 TCGv daddr
= tcg_temp_new();
1733 TCGv four
= tcg_constant_tl(4);
1734 TCGv_i32 tmp
= tcg_temp_new_i32();
1737 tcg_gen_andi_tl(saddr
, src
, -4);
1738 tcg_gen_andi_tl(daddr
, addr
, -4);
1739 for (i
= 0; i
< 32; i
+= 4) {
1740 /* Since the loads and stores are paired, allow the
1741 copy to happen in the host endianness. */
1742 tcg_gen_qemu_ld_i32(tmp
, saddr
, da
->mem_idx
, MO_UL
);
1743 tcg_gen_qemu_st_i32(tmp
, daddr
, da
->mem_idx
, MO_UL
);
1744 tcg_gen_add_tl(saddr
, saddr
, four
);
1745 tcg_gen_add_tl(daddr
, daddr
, four
);
1752 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1753 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
| MO_ALIGN
);
1756 #ifdef TARGET_SPARC64
1757 gen_helper_st_asi(tcg_env
, addr
, src
, r_asi
, r_mop
);
1760 TCGv_i64 t64
= tcg_temp_new_i64();
1761 tcg_gen_extu_tl_i64(t64
, src
);
1762 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
1766 /* A write to a TLB register may alter page maps. End the TB. */
1767 dc
->npc
= DYNAMIC_PC
;
1773 static void gen_swap_asi(DisasContext
*dc
, DisasASI
*da
,
1774 TCGv dst
, TCGv src
, TCGv addr
)
1779 case GET_ASI_DIRECT
:
1780 tcg_gen_atomic_xchg_tl(dst
, addr
, src
,
1781 da
->mem_idx
, da
->memop
| MO_ALIGN
);
1784 /* ??? Should be DAE_invalid_asi. */
1785 gen_exception(dc
, TT_DATA_ACCESS
);
1790 static void gen_cas_asi(DisasContext
*dc
, DisasASI
*da
,
1791 TCGv oldv
, TCGv newv
, TCGv cmpv
, TCGv addr
)
1796 case GET_ASI_DIRECT
:
1797 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, newv
,
1798 da
->mem_idx
, da
->memop
| MO_ALIGN
);
1801 /* ??? Should be DAE_invalid_asi. */
1802 gen_exception(dc
, TT_DATA_ACCESS
);
1807 static void gen_ldstub_asi(DisasContext
*dc
, DisasASI
*da
, TCGv dst
, TCGv addr
)
1812 case GET_ASI_DIRECT
:
1813 tcg_gen_atomic_xchg_tl(dst
, addr
, tcg_constant_tl(0xff),
1814 da
->mem_idx
, MO_UB
);
1817 /* ??? In theory, this should be raise DAE_invalid_asi.
1818 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1819 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
1820 gen_helper_exit_atomic(tcg_env
);
1822 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1823 TCGv_i32 r_mop
= tcg_constant_i32(MO_UB
);
1827 t64
= tcg_temp_new_i64();
1828 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
1830 s64
= tcg_constant_i64(0xff);
1831 gen_helper_st_asi(tcg_env
, addr
, s64
, r_asi
, r_mop
);
1833 tcg_gen_trunc_i64_tl(dst
, t64
);
1836 dc
->npc
= DYNAMIC_PC
;
1842 static void gen_ldf_asi(DisasContext
*dc
, DisasASI
*da
, MemOp orig_size
,
1845 MemOp memop
= da
->memop
;
1846 MemOp size
= memop
& MO_SIZE
;
1851 /* TODO: Use 128-bit load/store below. */
1852 if (size
== MO_128
) {
1853 memop
= (memop
& ~MO_SIZE
) | MO_64
;
1860 case GET_ASI_DIRECT
:
1861 memop
|= MO_ALIGN_4
;
1864 d32
= gen_dest_fpr_F(dc
);
1865 tcg_gen_qemu_ld_i32(d32
, addr
, da
->mem_idx
, memop
);
1866 gen_store_fpr_F(dc
, rd
, d32
);
1870 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
, memop
);
1874 d64
= tcg_temp_new_i64();
1875 tcg_gen_qemu_ld_i64(d64
, addr
, da
->mem_idx
, memop
);
1876 addr_tmp
= tcg_temp_new();
1877 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1878 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + 1], addr_tmp
, da
->mem_idx
, memop
);
1879 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
1882 g_assert_not_reached();
1887 /* Valid for lddfa on aligned registers only. */
1888 if (orig_size
== MO_64
&& (rd
& 7) == 0) {
1889 /* The first operation checks required alignment. */
1890 addr_tmp
= tcg_temp_new();
1891 for (int i
= 0; ; ++i
) {
1892 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
, da
->mem_idx
,
1893 memop
| (i
== 0 ? MO_ALIGN_64
: 0));
1897 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1901 gen_exception(dc
, TT_ILL_INSN
);
1906 /* Valid for lddfa only. */
1907 if (orig_size
== MO_64
) {
1908 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
1911 gen_exception(dc
, TT_ILL_INSN
);
1917 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1918 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
1921 /* According to the table in the UA2011 manual, the only
1922 other asis that are valid for ldfa/lddfa/ldqfa are
1923 the NO_FAULT asis. We still need a helper for these,
1924 but we can just use the integer asi helper for them. */
1927 d64
= tcg_temp_new_i64();
1928 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
1929 d32
= gen_dest_fpr_F(dc
);
1930 tcg_gen_extrl_i64_i32(d32
, d64
);
1931 gen_store_fpr_F(dc
, rd
, d32
);
1934 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], tcg_env
, addr
,
1938 d64
= tcg_temp_new_i64();
1939 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
1940 addr_tmp
= tcg_temp_new();
1941 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1942 gen_helper_ld_asi(cpu_fpr
[rd
/ 2 + 1], tcg_env
, addr_tmp
,
1944 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
1947 g_assert_not_reached();
1954 static void gen_stf_asi(DisasContext
*dc
, DisasASI
*da
, MemOp orig_size
,
1957 MemOp memop
= da
->memop
;
1958 MemOp size
= memop
& MO_SIZE
;
1962 /* TODO: Use 128-bit load/store below. */
1963 if (size
== MO_128
) {
1964 memop
= (memop
& ~MO_SIZE
) | MO_64
;
1971 case GET_ASI_DIRECT
:
1972 memop
|= MO_ALIGN_4
;
1975 d32
= gen_load_fpr_F(dc
, rd
);
1976 tcg_gen_qemu_st_i32(d32
, addr
, da
->mem_idx
, memop
| MO_ALIGN
);
1979 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
1980 memop
| MO_ALIGN_4
);
1983 /* Only 4-byte alignment required. However, it is legal for the
1984 cpu to signal the alignment fault, and the OS trap handler is
1985 required to fix it up. Requiring 16-byte alignment here avoids
1986 having to probe the second page before performing the first
1988 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
1989 memop
| MO_ALIGN_16
);
1990 addr_tmp
= tcg_temp_new();
1991 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1992 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + 1], addr_tmp
, da
->mem_idx
, memop
);
1995 g_assert_not_reached();
2000 /* Valid for stdfa on aligned registers only. */
2001 if (orig_size
== MO_64
&& (rd
& 7) == 0) {
2002 /* The first operation checks required alignment. */
2003 addr_tmp
= tcg_temp_new();
2004 for (int i
= 0; ; ++i
) {
2005 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
, da
->mem_idx
,
2006 memop
| (i
== 0 ? MO_ALIGN_64
: 0));
2010 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
2014 gen_exception(dc
, TT_ILL_INSN
);
2019 /* Valid for stdfa only. */
2020 if (orig_size
== MO_64
) {
2021 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
2024 gen_exception(dc
, TT_ILL_INSN
);
2029 /* According to the table in the UA2011 manual, the only
2030 other asis that are valid for ldfa/lddfa/ldqfa are
2031 the PST* asis, which aren't currently handled. */
2032 gen_exception(dc
, TT_ILL_INSN
);
2037 static void gen_ldda_asi(DisasContext
*dc
, DisasASI
*da
, TCGv addr
, int rd
)
2039 TCGv hi
= gen_dest_gpr(dc
, rd
);
2040 TCGv lo
= gen_dest_gpr(dc
, rd
+ 1);
2046 case GET_ASI_DTWINX
:
2047 #ifdef TARGET_SPARC64
2049 MemOp mop
= (da
->memop
& MO_BSWAP
) | MO_128
| MO_ALIGN_16
;
2050 TCGv_i128 t
= tcg_temp_new_i128();
2052 tcg_gen_qemu_ld_i128(t
, addr
, da
->mem_idx
, mop
);
2054 * Note that LE twinx acts as if each 64-bit register result is
2055 * byte swapped. We perform one 128-bit LE load, so must swap
2056 * the order of the writebacks.
2058 if ((mop
& MO_BSWAP
) == MO_TE
) {
2059 tcg_gen_extr_i128_i64(lo
, hi
, t
);
2061 tcg_gen_extr_i128_i64(hi
, lo
, t
);
2066 g_assert_not_reached();
2069 case GET_ASI_DIRECT
:
2071 TCGv_i64 tmp
= tcg_temp_new_i64();
2073 tcg_gen_qemu_ld_i64(tmp
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
2075 /* Note that LE ldda acts as if each 32-bit register
2076 result is byte swapped. Having just performed one
2077 64-bit bswap, we need now to swap the writebacks. */
2078 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2079 tcg_gen_extr_i64_tl(lo
, hi
, tmp
);
2081 tcg_gen_extr_i64_tl(hi
, lo
, tmp
);
2087 /* ??? In theory we've handled all of the ASIs that are valid
2088 for ldda, and this should raise DAE_invalid_asi. However,
2089 real hardware allows others. This can be seen with e.g.
2090 FreeBSD 10.3 wrt ASI_IC_TAG. */
2092 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2093 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
);
2094 TCGv_i64 tmp
= tcg_temp_new_i64();
2097 gen_helper_ld_asi(tmp
, tcg_env
, addr
, r_asi
, r_mop
);
2100 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2101 tcg_gen_extr_i64_tl(lo
, hi
, tmp
);
2103 tcg_gen_extr_i64_tl(hi
, lo
, tmp
);
2109 gen_store_gpr(dc
, rd
, hi
);
2110 gen_store_gpr(dc
, rd
+ 1, lo
);
2113 static void gen_stda_asi(DisasContext
*dc
, DisasASI
*da
, TCGv addr
, int rd
)
2115 TCGv hi
= gen_load_gpr(dc
, rd
);
2116 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2122 case GET_ASI_DTWINX
:
2123 #ifdef TARGET_SPARC64
2125 MemOp mop
= (da
->memop
& MO_BSWAP
) | MO_128
| MO_ALIGN_16
;
2126 TCGv_i128 t
= tcg_temp_new_i128();
2129 * Note that LE twinx acts as if each 64-bit register result is
2130 * byte swapped. We perform one 128-bit LE store, so must swap
2131 * the order of the construction.
2133 if ((mop
& MO_BSWAP
) == MO_TE
) {
2134 tcg_gen_concat_i64_i128(t
, lo
, hi
);
2136 tcg_gen_concat_i64_i128(t
, hi
, lo
);
2138 tcg_gen_qemu_st_i128(t
, addr
, da
->mem_idx
, mop
);
2142 g_assert_not_reached();
2145 case GET_ASI_DIRECT
:
2147 TCGv_i64 t64
= tcg_temp_new_i64();
2149 /* Note that LE stda acts as if each 32-bit register result is
2150 byte swapped. We will perform one 64-bit LE store, so now
2151 we must swap the order of the construction. */
2152 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2153 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2155 tcg_gen_concat_tl_i64(t64
, hi
, lo
);
2157 tcg_gen_qemu_st_i64(t64
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
2162 assert(TARGET_LONG_BITS
== 32);
2163 /* Store 32 bytes of T64 to ADDR. */
2164 /* ??? The original qemu code suggests 8-byte alignment, dropping
2165 the low bits, but the only place I can see this used is in the
2166 Linux kernel with 32 byte alignment, which would make more sense
2167 as a cacheline-style operation. */
2169 TCGv_i64 t64
= tcg_temp_new_i64();
2170 TCGv d_addr
= tcg_temp_new();
2171 TCGv eight
= tcg_constant_tl(8);
2174 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2175 tcg_gen_andi_tl(d_addr
, addr
, -8);
2176 for (i
= 0; i
< 32; i
+= 8) {
2177 tcg_gen_qemu_st_i64(t64
, d_addr
, da
->mem_idx
, da
->memop
);
2178 tcg_gen_add_tl(d_addr
, d_addr
, eight
);
2184 /* ??? In theory we've handled all of the ASIs that are valid
2185 for stda, and this should raise DAE_invalid_asi. */
2187 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2188 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
);
2189 TCGv_i64 t64
= tcg_temp_new_i64();
2192 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2193 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2195 tcg_gen_concat_tl_i64(t64
, hi
, lo
);
2199 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2205 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2207 #ifdef TARGET_SPARC64
2208 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2209 TCGv_i64 c64
= tcg_temp_new_i64();
2211 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2212 or fold the comparison down to 32 bits and use movcond_i32. Choose
2214 c32
= tcg_temp_new_i32();
2215 tcg_gen_setcondi_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2216 tcg_gen_extrl_i64_i32(c32
, c64
);
2218 s1
= gen_load_fpr_F(dc
, rs
);
2219 s2
= gen_load_fpr_F(dc
, rd
);
2220 dst
= gen_dest_fpr_F(dc
);
2221 zero
= tcg_constant_i32(0);
2223 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2225 gen_store_fpr_F(dc
, rd
, dst
);
2227 qemu_build_not_reached();
2231 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2233 #ifdef TARGET_SPARC64
2234 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2235 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, tcg_constant_tl(cmp
->c2
),
2236 gen_load_fpr_D(dc
, rs
),
2237 gen_load_fpr_D(dc
, rd
));
2238 gen_store_fpr_D(dc
, rd
, dst
);
2240 qemu_build_not_reached();
2244 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2246 #ifdef TARGET_SPARC64
2247 int qd
= QFPREG(rd
);
2248 int qs
= QFPREG(rs
);
2249 TCGv c2
= tcg_constant_tl(cmp
->c2
);
2251 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, c2
,
2252 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2253 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, c2
,
2254 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2256 gen_update_fprs_dirty(dc
, qd
);
2258 qemu_build_not_reached();
2262 #ifdef TARGET_SPARC64
2263 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
)
2265 TCGv_i32 r_tl
= tcg_temp_new_i32();
2267 /* load env->tl into r_tl */
2268 tcg_gen_ld_i32(r_tl
, tcg_env
, offsetof(CPUSPARCState
, tl
));
2270 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2271 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2273 /* calculate offset to current trap state from env->ts, reuse r_tl */
2274 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2275 tcg_gen_addi_ptr(r_tsptr
, tcg_env
, offsetof(CPUSPARCState
, ts
));
2277 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2279 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2280 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2281 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2286 static int extract_dfpreg(DisasContext
*dc
, int x
)
2291 static int extract_qfpreg(DisasContext
*dc
, int x
)
2296 /* Include the auto-generated decoder. */
2297 #include "decode-insns.c.inc"
2299 #define TRANS(NAME, AVAIL, FUNC, ...) \
2300 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2301 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2303 #define avail_ALL(C) true
2304 #ifdef TARGET_SPARC64
2305 # define avail_32(C) false
2306 # define avail_ASR17(C) false
2307 # define avail_CASA(C) true
2308 # define avail_DIV(C) true
2309 # define avail_MUL(C) true
2310 # define avail_POWERDOWN(C) false
2311 # define avail_64(C) true
2312 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2313 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2314 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2315 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2317 # define avail_32(C) true
2318 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2319 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2320 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2321 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2322 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2323 # define avail_64(C) false
2324 # define avail_GL(C) false
2325 # define avail_HYPV(C) false
2326 # define avail_VIS1(C) false
2327 # define avail_VIS2(C) false
2330 /* Default case for non jump instructions. */
2331 static bool advance_pc(DisasContext
*dc
)
2340 case DYNAMIC_PC_LOOKUP
:
2342 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
2343 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
2347 /* we can do a static jump */
2348 l1
= gen_new_label();
2349 tcg_gen_brcondi_tl(dc
->jump
.cond
, dc
->jump
.c1
, dc
->jump
.c2
, l1
);
2351 /* jump not taken */
2352 gen_goto_tb(dc
, 1, dc
->jump_pc
[1], dc
->jump_pc
[1] + 4);
2356 gen_goto_tb(dc
, 0, dc
->jump_pc
[0], dc
->jump_pc
[0] + 4);
2358 dc
->base
.is_jmp
= DISAS_NORETURN
;
2362 g_assert_not_reached();
2366 dc
->npc
= dc
->npc
+ 4;
2372 * Major opcodes 00 and 01 -- branches, call, and sethi
2375 static bool advance_jump_cond(DisasContext
*dc
, DisasCompare
*cmp
,
2376 bool annul
, int disp
)
2378 target_ulong dest
= address_mask_i(dc
, dc
->pc
+ disp
* 4);
2383 if (cmp
->cond
== TCG_COND_ALWAYS
) {
2394 if (cmp
->cond
== TCG_COND_NEVER
) {
2399 tcg_gen_addi_tl(cpu_pc
, cpu_pc
, 4);
2401 tcg_gen_addi_tl(cpu_npc
, cpu_pc
, 4);
2403 dc
->pc
= npc
+ (annul
? 4 : 0);
2404 dc
->npc
= dc
->pc
+ 4;
2413 TCGLabel
*l1
= gen_new_label();
2415 tcg_gen_brcondi_tl(tcg_invert_cond(cmp
->cond
), cmp
->c1
, cmp
->c2
, l1
);
2416 gen_goto_tb(dc
, 0, npc
, dest
);
2418 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
2420 dc
->base
.is_jmp
= DISAS_NORETURN
;
2425 case DYNAMIC_PC_LOOKUP
:
2426 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
2427 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
2428 tcg_gen_movcond_tl(cmp
->cond
, cpu_npc
,
2429 cmp
->c1
, tcg_constant_tl(cmp
->c2
),
2430 tcg_constant_tl(dest
), cpu_npc
);
2434 g_assert_not_reached();
2440 dc
->jump_pc
[0] = dest
;
2441 dc
->jump_pc
[1] = npc
+ 4;
2443 /* The condition for cpu_cond is always NE -- normalize. */
2444 if (cmp
->cond
== TCG_COND_NE
) {
2445 tcg_gen_xori_tl(cpu_cond
, cmp
->c1
, cmp
->c2
);
2447 tcg_gen_setcondi_tl(cmp
->cond
, cpu_cond
, cmp
->c1
, cmp
->c2
);
2449 dc
->cpu_cond_live
= true;
2455 static bool raise_priv(DisasContext
*dc
)
2457 gen_exception(dc
, TT_PRIV_INSN
);
2461 static bool raise_unimpfpop(DisasContext
*dc
)
2463 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
2467 static bool gen_trap_float128(DisasContext
*dc
)
2469 if (dc
->def
->features
& CPU_FEATURE_FLOAT128
) {
2472 return raise_unimpfpop(dc
);
2475 static bool do_bpcc(DisasContext
*dc
, arg_bcc
*a
)
2479 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
2480 return advance_jump_cond(dc
, &cmp
, a
->a
, a
->i
);
2483 TRANS(Bicc
, ALL
, do_bpcc
, a
)
2484 TRANS(BPcc
, 64, do_bpcc
, a
)
2486 static bool do_fbpfcc(DisasContext
*dc
, arg_bcc
*a
)
2490 if (gen_trap_ifnofpu(dc
)) {
2493 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
2494 return advance_jump_cond(dc
, &cmp
, a
->a
, a
->i
);
2497 TRANS(FBPfcc
, 64, do_fbpfcc
, a
)
2498 TRANS(FBfcc
, ALL
, do_fbpfcc
, a
)
2500 static bool trans_BPr(DisasContext
*dc
, arg_BPr
*a
)
2504 if (!avail_64(dc
)) {
2507 if (gen_tcg_cond_reg
[a
->cond
] == TCG_COND_NEVER
) {
2511 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
2512 return advance_jump_cond(dc
, &cmp
, a
->a
, a
->i
);
2515 static bool trans_CALL(DisasContext
*dc
, arg_CALL
*a
)
2517 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
2519 gen_store_gpr(dc
, 15, tcg_constant_tl(dc
->pc
));
2525 static bool trans_NCP(DisasContext
*dc
, arg_NCP
*a
)
2528 * For sparc32, always generate the no-coprocessor exception.
2529 * For sparc64, always generate illegal instruction.
2531 #ifdef TARGET_SPARC64
2534 gen_exception(dc
, TT_NCP_INSN
);
2539 static bool trans_SETHI(DisasContext
*dc
, arg_SETHI
*a
)
2541 /* Special-case %g0 because that's the canonical nop. */
2543 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl((uint32_t)a
->i
<< 10));
2545 return advance_pc(dc
);
2549 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2552 static bool do_tcc(DisasContext
*dc
, int cond
, int cc
,
2553 int rs1
, bool imm
, int rs2_or_imm
)
2555 int mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2556 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2563 return advance_pc(dc
);
2567 * Immediate traps are the most common case. Since this value is
2568 * live across the branch, it really pays to evaluate the constant.
2570 if (rs1
== 0 && (imm
|| rs2_or_imm
== 0)) {
2571 trap
= tcg_constant_i32((rs2_or_imm
& mask
) + TT_TRAP
);
2573 trap
= tcg_temp_new_i32();
2574 tcg_gen_trunc_tl_i32(trap
, gen_load_gpr(dc
, rs1
));
2576 tcg_gen_addi_i32(trap
, trap
, rs2_or_imm
);
2578 TCGv_i32 t2
= tcg_temp_new_i32();
2579 tcg_gen_trunc_tl_i32(t2
, gen_load_gpr(dc
, rs2_or_imm
));
2580 tcg_gen_add_i32(trap
, trap
, t2
);
2582 tcg_gen_andi_i32(trap
, trap
, mask
);
2583 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2591 gen_helper_raise_exception(tcg_env
, trap
);
2592 dc
->base
.is_jmp
= DISAS_NORETURN
;
2596 /* Conditional trap. */
2598 lab
= delay_exceptionv(dc
, trap
);
2599 gen_compare(&cmp
, cc
, cond
, dc
);
2600 tcg_gen_brcondi_tl(cmp
.cond
, cmp
.c1
, cmp
.c2
, lab
);
2602 return advance_pc(dc
);
2605 static bool trans_Tcc_r(DisasContext
*dc
, arg_Tcc_r
*a
)
2607 if (avail_32(dc
) && a
->cc
) {
2610 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, false, a
->rs2
);
2613 static bool trans_Tcc_i_v7(DisasContext
*dc
, arg_Tcc_i_v7
*a
)
2618 return do_tcc(dc
, a
->cond
, 0, a
->rs1
, true, a
->i
);
2621 static bool trans_Tcc_i_v9(DisasContext
*dc
, arg_Tcc_i_v9
*a
)
2626 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, true, a
->i
);
2629 static bool trans_STBAR(DisasContext
*dc
, arg_STBAR
*a
)
2631 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2632 return advance_pc(dc
);
2635 static bool trans_MEMBAR(DisasContext
*dc
, arg_MEMBAR
*a
)
2641 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2642 tcg_gen_mb(a
->mmask
| TCG_BAR_SC
);
2645 /* For #Sync, etc, end the TB to recognize interrupts. */
2646 dc
->base
.is_jmp
= DISAS_EXIT
;
2648 return advance_pc(dc
);
2651 static bool do_rd_special(DisasContext
*dc
, bool priv
, int rd
,
2652 TCGv (*func
)(DisasContext
*, TCGv
))
2655 return raise_priv(dc
);
2657 gen_store_gpr(dc
, rd
, func(dc
, gen_dest_gpr(dc
, rd
)));
2658 return advance_pc(dc
);
2661 static TCGv
do_rdy(DisasContext
*dc
, TCGv dst
)
2666 static bool trans_RDY(DisasContext
*dc
, arg_RDY
*a
)
2669 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2670 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2671 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2673 if (avail_64(dc
) && a
->rs1
!= 0) {
2676 return do_rd_special(dc
, true, a
->rd
, do_rdy
);
2679 static TCGv
do_rd_leon3_config(DisasContext
*dc
, TCGv dst
)
2684 * TODO: There are many more fields to be filled,
2685 * some of which are writable.
2687 val
= dc
->def
->nwindows
- 1; /* [4:0] NWIN */
2688 val
|= 1 << 8; /* [8] V8 */
2690 return tcg_constant_tl(val
);
2693 TRANS(RDASR17
, ASR17
, do_rd_special
, true, a
->rd
, do_rd_leon3_config
)
2695 static TCGv
do_rdccr(DisasContext
*dc
, TCGv dst
)
2697 gen_helper_rdccr(dst
, tcg_env
);
2701 TRANS(RDCCR
, 64, do_rd_special
, true, a
->rd
, do_rdccr
)
2703 static TCGv
do_rdasi(DisasContext
*dc
, TCGv dst
)
2705 #ifdef TARGET_SPARC64
2706 return tcg_constant_tl(dc
->asi
);
2708 qemu_build_not_reached();
2712 TRANS(RDASI
, 64, do_rd_special
, true, a
->rd
, do_rdasi
)
2714 static TCGv
do_rdtick(DisasContext
*dc
, TCGv dst
)
2716 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
2718 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
2719 if (translator_io_start(&dc
->base
)) {
2720 dc
->base
.is_jmp
= DISAS_EXIT
;
2722 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
2723 tcg_constant_i32(dc
->mem_idx
));
2727 /* TODO: non-priv access only allowed when enabled. */
2728 TRANS(RDTICK
, 64, do_rd_special
, true, a
->rd
, do_rdtick
)
2730 static TCGv
do_rdpc(DisasContext
*dc
, TCGv dst
)
2732 return tcg_constant_tl(address_mask_i(dc
, dc
->pc
));
2735 TRANS(RDPC
, 64, do_rd_special
, true, a
->rd
, do_rdpc
)
2737 static TCGv
do_rdfprs(DisasContext
*dc
, TCGv dst
)
2739 tcg_gen_ext_i32_tl(dst
, cpu_fprs
);
2743 TRANS(RDFPRS
, 64, do_rd_special
, true, a
->rd
, do_rdfprs
)
2745 static TCGv
do_rdgsr(DisasContext
*dc
, TCGv dst
)
2747 gen_trap_ifnofpu(dc
);
2751 TRANS(RDGSR
, 64, do_rd_special
, true, a
->rd
, do_rdgsr
)
2753 static TCGv
do_rdsoftint(DisasContext
*dc
, TCGv dst
)
2755 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(softint
));
2759 TRANS(RDSOFTINT
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdsoftint
)
2761 static TCGv
do_rdtick_cmpr(DisasContext
*dc
, TCGv dst
)
2763 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(tick_cmpr
));
2767 /* TODO: non-priv access only allowed when enabled. */
2768 TRANS(RDTICK_CMPR
, 64, do_rd_special
, true, a
->rd
, do_rdtick_cmpr
)
2770 static TCGv
do_rdstick(DisasContext
*dc
, TCGv dst
)
2772 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
2774 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
2775 if (translator_io_start(&dc
->base
)) {
2776 dc
->base
.is_jmp
= DISAS_EXIT
;
2778 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
2779 tcg_constant_i32(dc
->mem_idx
));
2783 /* TODO: non-priv access only allowed when enabled. */
2784 TRANS(RDSTICK
, 64, do_rd_special
, true, a
->rd
, do_rdstick
)
2786 static TCGv
do_rdstick_cmpr(DisasContext
*dc
, TCGv dst
)
2788 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(stick_cmpr
));
2792 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2793 TRANS(RDSTICK_CMPR
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdstick_cmpr
)
2796 * UltraSPARC-T1 Strand status.
2797 * HYPV check maybe not enough, UA2005 & UA2007 describe
2798 * this ASR as impl. dep
2800 static TCGv
do_rdstrand_status(DisasContext
*dc
, TCGv dst
)
2802 return tcg_constant_tl(1);
2805 TRANS(RDSTRAND_STATUS
, HYPV
, do_rd_special
, true, a
->rd
, do_rdstrand_status
)
2807 static TCGv
do_rdpsr(DisasContext
*dc
, TCGv dst
)
2809 gen_helper_rdpsr(dst
, tcg_env
);
2813 TRANS(RDPSR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpsr
)
2815 static TCGv
do_rdhpstate(DisasContext
*dc
, TCGv dst
)
2817 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hpstate
));
2821 TRANS(RDHPR_hpstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhpstate
)
2823 static TCGv
do_rdhtstate(DisasContext
*dc
, TCGv dst
)
2825 TCGv_i32 tl
= tcg_temp_new_i32();
2826 TCGv_ptr tp
= tcg_temp_new_ptr();
2828 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
2829 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
2830 tcg_gen_shli_i32(tl
, tl
, 3);
2831 tcg_gen_ext_i32_ptr(tp
, tl
);
2832 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
2834 tcg_gen_ld_tl(dst
, tp
, env64_field_offsetof(htstate
));
2838 TRANS(RDHPR_htstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtstate
)
2840 static TCGv
do_rdhintp(DisasContext
*dc
, TCGv dst
)
2842 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hintp
));
2846 TRANS(RDHPR_hintp
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhintp
)
2848 static TCGv
do_rdhtba(DisasContext
*dc
, TCGv dst
)
2850 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(htba
));
2854 TRANS(RDHPR_htba
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtba
)
2856 static TCGv
do_rdhver(DisasContext
*dc
, TCGv dst
)
2858 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hver
));
2862 TRANS(RDHPR_hver
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhver
)
2864 static TCGv
do_rdhstick_cmpr(DisasContext
*dc
, TCGv dst
)
2866 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
2870 TRANS(RDHPR_hstick_cmpr
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
,
2873 static TCGv
do_rdwim(DisasContext
*dc
, TCGv dst
)
2875 tcg_gen_ld_tl(dst
, tcg_env
, env32_field_offsetof(wim
));
2879 TRANS(RDWIM
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwim
)
2881 static TCGv
do_rdtpc(DisasContext
*dc
, TCGv dst
)
2883 #ifdef TARGET_SPARC64
2884 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2886 gen_load_trap_state_at_tl(r_tsptr
);
2887 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tpc
));
2890 qemu_build_not_reached();
2894 TRANS(RDPR_tpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtpc
)
2896 static TCGv
do_rdtnpc(DisasContext
*dc
, TCGv dst
)
2898 #ifdef TARGET_SPARC64
2899 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2901 gen_load_trap_state_at_tl(r_tsptr
);
2902 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tnpc
));
2905 qemu_build_not_reached();
2909 TRANS(RDPR_tnpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtnpc
)
2911 static TCGv
do_rdtstate(DisasContext
*dc
, TCGv dst
)
2913 #ifdef TARGET_SPARC64
2914 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2916 gen_load_trap_state_at_tl(r_tsptr
);
2917 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tstate
));
2920 qemu_build_not_reached();
2924 TRANS(RDPR_tstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtstate
)
2926 static TCGv
do_rdtt(DisasContext
*dc
, TCGv dst
)
2928 #ifdef TARGET_SPARC64
2929 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2931 gen_load_trap_state_at_tl(r_tsptr
);
2932 tcg_gen_ld32s_tl(dst
, r_tsptr
, offsetof(trap_state
, tt
));
2935 qemu_build_not_reached();
2939 TRANS(RDPR_tt
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtt
)
2940 TRANS(RDPR_tick
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtick
)
2942 static TCGv
do_rdtba(DisasContext
*dc
, TCGv dst
)
2947 TRANS(RDTBR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
2948 TRANS(RDPR_tba
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
2950 static TCGv
do_rdpstate(DisasContext
*dc
, TCGv dst
)
2952 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(pstate
));
2956 TRANS(RDPR_pstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpstate
)
2958 static TCGv
do_rdtl(DisasContext
*dc
, TCGv dst
)
2960 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(tl
));
2964 TRANS(RDPR_tl
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtl
)
2966 static TCGv
do_rdpil(DisasContext
*dc
, TCGv dst
)
2968 tcg_gen_ld32s_tl(dst
, tcg_env
, env_field_offsetof(psrpil
));
2972 TRANS(RDPR_pil
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpil
)
2974 static TCGv
do_rdcwp(DisasContext
*dc
, TCGv dst
)
2976 gen_helper_rdcwp(dst
, tcg_env
);
2980 TRANS(RDPR_cwp
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcwp
)
2982 static TCGv
do_rdcansave(DisasContext
*dc
, TCGv dst
)
2984 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cansave
));
2988 TRANS(RDPR_cansave
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcansave
)
2990 static TCGv
do_rdcanrestore(DisasContext
*dc
, TCGv dst
)
2992 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(canrestore
));
2996 TRANS(RDPR_canrestore
, 64, do_rd_special
, supervisor(dc
), a
->rd
,
2999 static TCGv
do_rdcleanwin(DisasContext
*dc
, TCGv dst
)
3001 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cleanwin
));
3005 TRANS(RDPR_cleanwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcleanwin
)
3007 static TCGv
do_rdotherwin(DisasContext
*dc
, TCGv dst
)
3009 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(otherwin
));
3013 TRANS(RDPR_otherwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdotherwin
)
3015 static TCGv
do_rdwstate(DisasContext
*dc
, TCGv dst
)
3017 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(wstate
));
3021 TRANS(RDPR_wstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwstate
)
3023 static TCGv
do_rdgl(DisasContext
*dc
, TCGv dst
)
3025 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(gl
));
3029 TRANS(RDPR_gl
, GL
, do_rd_special
, supervisor(dc
), a
->rd
, do_rdgl
)
3031 /* UA2005 strand status */
3032 static TCGv
do_rdssr(DisasContext
*dc
, TCGv dst
)
3034 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(ssr
));
3038 TRANS(RDPR_strand_status
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdssr
)
3040 static TCGv
do_rdver(DisasContext
*dc
, TCGv dst
)
3042 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(version
));
3046 TRANS(RDPR_ver
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdver
)
3048 static bool trans_FLUSHW(DisasContext
*dc
, arg_FLUSHW
*a
)
3051 gen_helper_flushw(tcg_env
);
3052 return advance_pc(dc
);
3057 static bool do_wr_special(DisasContext
*dc
, arg_r_r_ri
*a
, bool priv
,
3058 void (*func
)(DisasContext
*, TCGv
))
3062 /* For simplicity, we under-decoded the rs2 form. */
3063 if (!a
->imm
&& (a
->rs2_or_imm
& ~0x1f)) {
3067 return raise_priv(dc
);
3070 if (a
->rs1
== 0 && (a
->imm
|| a
->rs2_or_imm
== 0)) {
3071 src
= tcg_constant_tl(a
->rs2_or_imm
);
3073 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
3074 if (a
->rs2_or_imm
== 0) {
3077 src
= tcg_temp_new();
3079 tcg_gen_xori_tl(src
, src1
, a
->rs2_or_imm
);
3081 tcg_gen_xor_tl(src
, src1
, gen_load_gpr(dc
, a
->rs2_or_imm
));
3086 return advance_pc(dc
);
3089 static void do_wry(DisasContext
*dc
, TCGv src
)
3091 tcg_gen_ext32u_tl(cpu_y
, src
);
3094 TRANS(WRY
, ALL
, do_wr_special
, a
, true, do_wry
)
3096 static void do_wrccr(DisasContext
*dc
, TCGv src
)
3098 gen_helper_wrccr(tcg_env
, src
);
3101 TRANS(WRCCR
, 64, do_wr_special
, a
, true, do_wrccr
)
3103 static void do_wrasi(DisasContext
*dc
, TCGv src
)
3105 TCGv tmp
= tcg_temp_new();
3107 tcg_gen_ext8u_tl(tmp
, src
);
3108 tcg_gen_st32_tl(tmp
, tcg_env
, env64_field_offsetof(asi
));
3109 /* End TB to notice changed ASI. */
3110 dc
->base
.is_jmp
= DISAS_EXIT
;
3113 TRANS(WRASI
, 64, do_wr_special
, a
, true, do_wrasi
)
3115 static void do_wrfprs(DisasContext
*dc
, TCGv src
)
3117 #ifdef TARGET_SPARC64
3118 tcg_gen_trunc_tl_i32(cpu_fprs
, src
);
3120 dc
->base
.is_jmp
= DISAS_EXIT
;
3122 qemu_build_not_reached();
3126 TRANS(WRFPRS
, 64, do_wr_special
, a
, true, do_wrfprs
)
3128 static void do_wrgsr(DisasContext
*dc
, TCGv src
)
3130 gen_trap_ifnofpu(dc
);
3131 tcg_gen_mov_tl(cpu_gsr
, src
);
3134 TRANS(WRGSR
, 64, do_wr_special
, a
, true, do_wrgsr
)
3136 static void do_wrsoftint_set(DisasContext
*dc
, TCGv src
)
3138 gen_helper_set_softint(tcg_env
, src
);
3141 TRANS(WRSOFTINT_SET
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_set
)
3143 static void do_wrsoftint_clr(DisasContext
*dc
, TCGv src
)
3145 gen_helper_clear_softint(tcg_env
, src
);
3148 TRANS(WRSOFTINT_CLR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_clr
)
3150 static void do_wrsoftint(DisasContext
*dc
, TCGv src
)
3152 gen_helper_write_softint(tcg_env
, src
);
3155 TRANS(WRSOFTINT
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint
)
3157 static void do_wrtick_cmpr(DisasContext
*dc
, TCGv src
)
3159 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3161 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(tick_cmpr
));
3162 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3163 translator_io_start(&dc
->base
);
3164 gen_helper_tick_set_limit(r_tickptr
, src
);
3165 /* End TB to handle timer interrupt */
3166 dc
->base
.is_jmp
= DISAS_EXIT
;
3169 TRANS(WRTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick_cmpr
)
3171 static void do_wrstick(DisasContext
*dc
, TCGv src
)
3173 #ifdef TARGET_SPARC64
3174 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3176 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, offsetof(CPUSPARCState
, stick
));
3177 translator_io_start(&dc
->base
);
3178 gen_helper_tick_set_count(r_tickptr
, src
);
3179 /* End TB to handle timer interrupt */
3180 dc
->base
.is_jmp
= DISAS_EXIT
;
3182 qemu_build_not_reached();
3186 TRANS(WRSTICK
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick
)
3188 static void do_wrstick_cmpr(DisasContext
*dc
, TCGv src
)
3190 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3192 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(stick_cmpr
));
3193 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
3194 translator_io_start(&dc
->base
);
3195 gen_helper_tick_set_limit(r_tickptr
, src
);
3196 /* End TB to handle timer interrupt */
3197 dc
->base
.is_jmp
= DISAS_EXIT
;
3200 TRANS(WRSTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick_cmpr
)
3202 static void do_wrpowerdown(DisasContext
*dc
, TCGv src
)
3206 gen_helper_power_down(tcg_env
);
3209 TRANS(WRPOWERDOWN
, POWERDOWN
, do_wr_special
, a
, supervisor(dc
), do_wrpowerdown
)
3211 static void do_wrpsr(DisasContext
*dc
, TCGv src
)
3213 gen_helper_wrpsr(tcg_env
, src
);
3214 dc
->base
.is_jmp
= DISAS_EXIT
;
3217 TRANS(WRPSR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrpsr
)
3219 static void do_wrwim(DisasContext
*dc
, TCGv src
)
3221 target_ulong mask
= MAKE_64BIT_MASK(0, dc
->def
->nwindows
);
3222 TCGv tmp
= tcg_temp_new();
3224 tcg_gen_andi_tl(tmp
, src
, mask
);
3225 tcg_gen_st_tl(tmp
, tcg_env
, env32_field_offsetof(wim
));
3228 TRANS(WRWIM
, 32, do_wr_special
, a
, supervisor(dc
), do_wrwim
)
3230 static void do_wrtpc(DisasContext
*dc
, TCGv src
)
3232 #ifdef TARGET_SPARC64
3233 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3235 gen_load_trap_state_at_tl(r_tsptr
);
3236 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tpc
));
3238 qemu_build_not_reached();
3242 TRANS(WRPR_tpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtpc
)
3244 static void do_wrtnpc(DisasContext
*dc
, TCGv src
)
3246 #ifdef TARGET_SPARC64
3247 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3249 gen_load_trap_state_at_tl(r_tsptr
);
3250 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tnpc
));
3252 qemu_build_not_reached();
3256 TRANS(WRPR_tnpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtnpc
)
3258 static void do_wrtstate(DisasContext
*dc
, TCGv src
)
3260 #ifdef TARGET_SPARC64
3261 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3263 gen_load_trap_state_at_tl(r_tsptr
);
3264 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tstate
));
3266 qemu_build_not_reached();
3270 TRANS(WRPR_tstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtstate
)
3272 static void do_wrtt(DisasContext
*dc
, TCGv src
)
3274 #ifdef TARGET_SPARC64
3275 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3277 gen_load_trap_state_at_tl(r_tsptr
);
3278 tcg_gen_st32_tl(src
, r_tsptr
, offsetof(trap_state
, tt
));
3280 qemu_build_not_reached();
3284 TRANS(WRPR_tt
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtt
)
3286 static void do_wrtick(DisasContext
*dc
, TCGv src
)
3288 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3290 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3291 translator_io_start(&dc
->base
);
3292 gen_helper_tick_set_count(r_tickptr
, src
);
3293 /* End TB to handle timer interrupt */
3294 dc
->base
.is_jmp
= DISAS_EXIT
;
3297 TRANS(WRPR_tick
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick
)
3299 static void do_wrtba(DisasContext
*dc
, TCGv src
)
3301 tcg_gen_mov_tl(cpu_tbr
, src
);
3304 TRANS(WRPR_tba
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3306 static void do_wrpstate(DisasContext
*dc
, TCGv src
)
3309 if (translator_io_start(&dc
->base
)) {
3310 dc
->base
.is_jmp
= DISAS_EXIT
;
3312 gen_helper_wrpstate(tcg_env
, src
);
3313 dc
->npc
= DYNAMIC_PC
;
3316 TRANS(WRPR_pstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpstate
)
3318 static void do_wrtl(DisasContext
*dc
, TCGv src
)
3321 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(tl
));
3322 dc
->npc
= DYNAMIC_PC
;
3325 TRANS(WRPR_tl
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtl
)
3327 static void do_wrpil(DisasContext
*dc
, TCGv src
)
3329 if (translator_io_start(&dc
->base
)) {
3330 dc
->base
.is_jmp
= DISAS_EXIT
;
3332 gen_helper_wrpil(tcg_env
, src
);
3335 TRANS(WRPR_pil
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpil
)
3337 static void do_wrcwp(DisasContext
*dc
, TCGv src
)
3339 gen_helper_wrcwp(tcg_env
, src
);
3342 TRANS(WRPR_cwp
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcwp
)
3344 static void do_wrcansave(DisasContext
*dc
, TCGv src
)
3346 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cansave
));
3349 TRANS(WRPR_cansave
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcansave
)
3351 static void do_wrcanrestore(DisasContext
*dc
, TCGv src
)
3353 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(canrestore
));
3356 TRANS(WRPR_canrestore
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcanrestore
)
3358 static void do_wrcleanwin(DisasContext
*dc
, TCGv src
)
3360 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cleanwin
));
3363 TRANS(WRPR_cleanwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcleanwin
)
3365 static void do_wrotherwin(DisasContext
*dc
, TCGv src
)
3367 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(otherwin
));
3370 TRANS(WRPR_otherwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrotherwin
)
3372 static void do_wrwstate(DisasContext
*dc
, TCGv src
)
3374 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(wstate
));
3377 TRANS(WRPR_wstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrwstate
)
3379 static void do_wrgl(DisasContext
*dc
, TCGv src
)
3381 gen_helper_wrgl(tcg_env
, src
);
3384 TRANS(WRPR_gl
, GL
, do_wr_special
, a
, supervisor(dc
), do_wrgl
)
3386 /* UA2005 strand status */
3387 static void do_wrssr(DisasContext
*dc
, TCGv src
)
3389 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(ssr
));
3392 TRANS(WRPR_strand_status
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrssr
)
3394 TRANS(WRTBR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3396 static void do_wrhpstate(DisasContext
*dc
, TCGv src
)
3398 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hpstate
));
3399 dc
->base
.is_jmp
= DISAS_EXIT
;
3402 TRANS(WRHPR_hpstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhpstate
)
3404 static void do_wrhtstate(DisasContext
*dc
, TCGv src
)
3406 TCGv_i32 tl
= tcg_temp_new_i32();
3407 TCGv_ptr tp
= tcg_temp_new_ptr();
3409 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
3410 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
3411 tcg_gen_shli_i32(tl
, tl
, 3);
3412 tcg_gen_ext_i32_ptr(tp
, tl
);
3413 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
3415 tcg_gen_st_tl(src
, tp
, env64_field_offsetof(htstate
));
3418 TRANS(WRHPR_htstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtstate
)
3420 static void do_wrhintp(DisasContext
*dc
, TCGv src
)
3422 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hintp
));
3425 TRANS(WRHPR_hintp
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhintp
)
3427 static void do_wrhtba(DisasContext
*dc
, TCGv src
)
3429 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(htba
));
3432 TRANS(WRHPR_htba
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtba
)
3434 static void do_wrhstick_cmpr(DisasContext
*dc
, TCGv src
)
3436 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3438 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
3439 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(hstick
));
3440 translator_io_start(&dc
->base
);
3441 gen_helper_tick_set_limit(r_tickptr
, src
);
3442 /* End TB to handle timer interrupt */
3443 dc
->base
.is_jmp
= DISAS_EXIT
;
3446 TRANS(WRHPR_hstick_cmpr
, HYPV
, do_wr_special
, a
, hypervisor(dc
),
3449 static bool do_saved_restored(DisasContext
*dc
, bool saved
)
3451 if (!supervisor(dc
)) {
3452 return raise_priv(dc
);
3455 gen_helper_saved(tcg_env
);
3457 gen_helper_restored(tcg_env
);
3459 return advance_pc(dc
);
3462 TRANS(SAVED
, 64, do_saved_restored
, true)
3463 TRANS(RESTORED
, 64, do_saved_restored
, false)
3465 static bool trans_NOP(DisasContext
*dc
, arg_NOP
*a
)
3467 return advance_pc(dc
);
3471 * TODO: Need a feature bit for sparcv8.
3472 * In the meantime, treat all 32-bit cpus like sparcv7.
3474 TRANS(NOP_v7
, 32, trans_NOP
, a
)
3475 TRANS(NOP_v9
, 64, trans_NOP
, a
)
3477 static bool do_arith_int(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
3478 void (*func
)(TCGv
, TCGv
, TCGv
),
3479 void (*funci
)(TCGv
, TCGv
, target_long
),
3484 /* For simplicity, we under-decoded the rs2 form. */
3485 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3492 dst
= gen_dest_gpr(dc
, a
->rd
);
3494 src1
= gen_load_gpr(dc
, a
->rs1
);
3496 if (a
->imm
|| a
->rs2_or_imm
== 0) {
3498 funci(dst
, src1
, a
->rs2_or_imm
);
3500 func(dst
, src1
, tcg_constant_tl(a
->rs2_or_imm
));
3503 func(dst
, src1
, cpu_regs
[a
->rs2_or_imm
]);
3507 if (TARGET_LONG_BITS
== 64) {
3508 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
3509 tcg_gen_movi_tl(cpu_icc_C
, 0);
3511 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
3512 tcg_gen_movi_tl(cpu_cc_C
, 0);
3513 tcg_gen_movi_tl(cpu_cc_V
, 0);
3516 gen_store_gpr(dc
, a
->rd
, dst
);
3517 return advance_pc(dc
);
3520 static bool do_arith(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
3521 void (*func
)(TCGv
, TCGv
, TCGv
),
3522 void (*funci
)(TCGv
, TCGv
, target_long
),
3523 void (*func_cc
)(TCGv
, TCGv
, TCGv
))
3526 return do_arith_int(dc
, a
, func_cc
, NULL
, false);
3528 return do_arith_int(dc
, a
, func
, funci
, false);
3531 static bool do_logic(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
3532 void (*func
)(TCGv
, TCGv
, TCGv
),
3533 void (*funci
)(TCGv
, TCGv
, target_long
))
3535 return do_arith_int(dc
, a
, func
, funci
, a
->cc
);
3538 TRANS(ADD
, ALL
, do_arith
, a
, tcg_gen_add_tl
, tcg_gen_addi_tl
, gen_op_addcc
)
3539 TRANS(SUB
, ALL
, do_arith
, a
, tcg_gen_sub_tl
, tcg_gen_subi_tl
, gen_op_subcc
)
3540 TRANS(ADDC
, ALL
, do_arith
, a
, gen_op_addc
, NULL
, gen_op_addccc
)
3541 TRANS(SUBC
, ALL
, do_arith
, a
, gen_op_subc
, NULL
, gen_op_subccc
)
3543 TRANS(TADDcc
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_taddcc
)
3544 TRANS(TSUBcc
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_tsubcc
)
3545 TRANS(TADDccTV
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_taddcctv
)
3546 TRANS(TSUBccTV
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_tsubcctv
)
3548 TRANS(AND
, ALL
, do_logic
, a
, tcg_gen_and_tl
, tcg_gen_andi_tl
)
3549 TRANS(XOR
, ALL
, do_logic
, a
, tcg_gen_xor_tl
, tcg_gen_xori_tl
)
3550 TRANS(ANDN
, ALL
, do_logic
, a
, tcg_gen_andc_tl
, NULL
)
3551 TRANS(ORN
, ALL
, do_logic
, a
, tcg_gen_orc_tl
, NULL
)
3552 TRANS(XORN
, ALL
, do_logic
, a
, tcg_gen_eqv_tl
, NULL
)
3554 TRANS(MULX
, 64, do_arith
, a
, tcg_gen_mul_tl
, tcg_gen_muli_tl
, NULL
)
3555 TRANS(UMUL
, MUL
, do_logic
, a
, gen_op_umul
, NULL
)
3556 TRANS(SMUL
, MUL
, do_logic
, a
, gen_op_smul
, NULL
)
3557 TRANS(MULScc
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_mulscc
)
3559 TRANS(UDIVcc
, DIV
, do_arith
, a
, NULL
, NULL
, gen_op_udivcc
)
3560 TRANS(SDIV
, DIV
, do_arith
, a
, gen_op_sdiv
, NULL
, gen_op_sdivcc
)
3562 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3563 TRANS(POPC
, 64, do_arith
, a
, gen_op_popc
, NULL
, NULL
)
3565 static bool trans_OR(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
3567 /* OR with %g0 is the canonical alias for MOV. */
3568 if (!a
->cc
&& a
->rs1
== 0) {
3569 if (a
->imm
|| a
->rs2_or_imm
== 0) {
3570 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl(a
->rs2_or_imm
));
3571 } else if (a
->rs2_or_imm
& ~0x1f) {
3572 /* For simplicity, we under-decoded the rs2 form. */
3575 gen_store_gpr(dc
, a
->rd
, cpu_regs
[a
->rs2_or_imm
]);
3577 return advance_pc(dc
);
3579 return do_logic(dc
, a
, tcg_gen_or_tl
, tcg_gen_ori_tl
);
3582 static bool trans_UDIV(DisasContext
*dc
, arg_r_r_ri
*a
)
3587 if (!avail_DIV(dc
)) {
3590 /* For simplicity, we under-decoded the rs2 form. */
3591 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3595 if (unlikely(a
->rs2_or_imm
== 0)) {
3596 gen_exception(dc
, TT_DIV_ZERO
);
3601 t2
= tcg_constant_i64((uint32_t)a
->rs2_or_imm
);
3609 n2
= tcg_temp_new_i32();
3610 tcg_gen_trunc_tl_i32(n2
, cpu_regs
[a
->rs2_or_imm
]);
3612 lab
= delay_exception(dc
, TT_DIV_ZERO
);
3613 tcg_gen_brcondi_i32(TCG_COND_EQ
, n2
, 0, lab
);
3615 t2
= tcg_temp_new_i64();
3616 #ifdef TARGET_SPARC64
3617 tcg_gen_ext32u_i64(t2
, cpu_regs
[a
->rs2_or_imm
]);
3619 tcg_gen_extu_i32_i64(t2
, cpu_regs
[a
->rs2_or_imm
]);
3623 t1
= tcg_temp_new_i64();
3624 tcg_gen_concat_tl_i64(t1
, gen_load_gpr(dc
, a
->rs1
), cpu_y
);
3626 tcg_gen_divu_i64(t1
, t1
, t2
);
3627 tcg_gen_umin_i64(t1
, t1
, tcg_constant_i64(UINT32_MAX
));
3629 dst
= gen_dest_gpr(dc
, a
->rd
);
3630 tcg_gen_trunc_i64_tl(dst
, t1
);
3631 gen_store_gpr(dc
, a
->rd
, dst
);
3632 return advance_pc(dc
);
3635 static bool trans_UDIVX(DisasContext
*dc
, arg_r_r_ri
*a
)
3637 TCGv dst
, src1
, src2
;
3639 if (!avail_64(dc
)) {
3642 /* For simplicity, we under-decoded the rs2 form. */
3643 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3647 if (unlikely(a
->rs2_or_imm
== 0)) {
3648 gen_exception(dc
, TT_DIV_ZERO
);
3653 src2
= tcg_constant_tl(a
->rs2_or_imm
);
3660 lab
= delay_exception(dc
, TT_DIV_ZERO
);
3661 src2
= cpu_regs
[a
->rs2_or_imm
];
3662 tcg_gen_brcondi_tl(TCG_COND_EQ
, src2
, 0, lab
);
3665 dst
= gen_dest_gpr(dc
, a
->rd
);
3666 src1
= gen_load_gpr(dc
, a
->rs1
);
3668 tcg_gen_divu_tl(dst
, src1
, src2
);
3669 gen_store_gpr(dc
, a
->rd
, dst
);
3670 return advance_pc(dc
);
3673 static bool trans_SDIVX(DisasContext
*dc
, arg_r_r_ri
*a
)
3675 TCGv dst
, src1
, src2
;
3677 if (!avail_64(dc
)) {
3680 /* For simplicity, we under-decoded the rs2 form. */
3681 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3685 if (unlikely(a
->rs2_or_imm
== 0)) {
3686 gen_exception(dc
, TT_DIV_ZERO
);
3690 dst
= gen_dest_gpr(dc
, a
->rd
);
3691 src1
= gen_load_gpr(dc
, a
->rs1
);
3694 if (unlikely(a
->rs2_or_imm
== -1)) {
3695 tcg_gen_neg_tl(dst
, src1
);
3696 gen_store_gpr(dc
, a
->rd
, dst
);
3697 return advance_pc(dc
);
3699 src2
= tcg_constant_tl(a
->rs2_or_imm
);
3707 lab
= delay_exception(dc
, TT_DIV_ZERO
);
3708 src2
= cpu_regs
[a
->rs2_or_imm
];
3709 tcg_gen_brcondi_tl(TCG_COND_EQ
, src2
, 0, lab
);
3712 * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3713 * Set SRC2 to 1 as a new divisor, to produce the correct result.
3715 t1
= tcg_temp_new();
3716 t2
= tcg_temp_new();
3717 tcg_gen_setcondi_tl(TCG_COND_EQ
, t1
, src1
, (target_long
)INT64_MIN
);
3718 tcg_gen_setcondi_tl(TCG_COND_EQ
, t2
, src2
, -1);
3719 tcg_gen_and_tl(t1
, t1
, t2
);
3720 tcg_gen_movcond_tl(TCG_COND_NE
, t1
, t1
, tcg_constant_tl(0),
3721 tcg_constant_tl(1), src2
);
3725 tcg_gen_div_tl(dst
, src1
, src2
);
3726 gen_store_gpr(dc
, a
->rd
, dst
);
3727 return advance_pc(dc
);
3730 static bool gen_edge(DisasContext
*dc
, arg_r_r_r
*a
,
3731 int width
, bool cc
, bool left
)
3733 TCGv dst
, s1
, s2
, lo1
, lo2
;
3734 uint64_t amask
, tabl
, tabr
;
3735 int shift
, imask
, omask
;
3737 dst
= gen_dest_gpr(dc
, a
->rd
);
3738 s1
= gen_load_gpr(dc
, a
->rs1
);
3739 s2
= gen_load_gpr(dc
, a
->rs2
);
3742 gen_op_subcc(cpu_cc_N
, s1
, s2
);
3746 * Theory of operation: there are two tables, left and right (not to
3747 * be confused with the left and right versions of the opcode). These
3748 * are indexed by the low 3 bits of the inputs. To make things "easy",
3749 * these tables are loaded into two constants, TABL and TABR below.
3750 * The operation index = (input & imask) << shift calculates the index
3751 * into the constant, while val = (table >> index) & omask calculates
3752 * the value we're looking for.
3760 tabl
= 0x80c0e0f0f8fcfeffULL
;
3761 tabr
= 0xff7f3f1f0f070301ULL
;
3763 tabl
= 0x0103070f1f3f7fffULL
;
3764 tabr
= 0xfffefcf8f0e0c080ULL
;
3784 tabl
= (2 << 2) | 3;
3785 tabr
= (3 << 2) | 1;
3787 tabl
= (1 << 2) | 3;
3788 tabr
= (3 << 2) | 2;
3795 lo1
= tcg_temp_new();
3796 lo2
= tcg_temp_new();
3797 tcg_gen_andi_tl(lo1
, s1
, imask
);
3798 tcg_gen_andi_tl(lo2
, s2
, imask
);
3799 tcg_gen_shli_tl(lo1
, lo1
, shift
);
3800 tcg_gen_shli_tl(lo2
, lo2
, shift
);
3802 tcg_gen_shr_tl(lo1
, tcg_constant_tl(tabl
), lo1
);
3803 tcg_gen_shr_tl(lo2
, tcg_constant_tl(tabr
), lo2
);
3804 tcg_gen_andi_tl(lo1
, lo1
, omask
);
3805 tcg_gen_andi_tl(lo2
, lo2
, omask
);
3807 amask
= address_mask_i(dc
, -8);
3808 tcg_gen_andi_tl(s1
, s1
, amask
);
3809 tcg_gen_andi_tl(s2
, s2
, amask
);
3811 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3812 tcg_gen_and_tl(lo2
, lo2
, lo1
);
3813 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, s1
, s2
, lo1
, lo2
);
3815 gen_store_gpr(dc
, a
->rd
, dst
);
3816 return advance_pc(dc
);
3819 TRANS(EDGE8cc
, VIS1
, gen_edge
, a
, 8, 1, 0)
3820 TRANS(EDGE8Lcc
, VIS1
, gen_edge
, a
, 8, 1, 1)
3821 TRANS(EDGE16cc
, VIS1
, gen_edge
, a
, 16, 1, 0)
3822 TRANS(EDGE16Lcc
, VIS1
, gen_edge
, a
, 16, 1, 1)
3823 TRANS(EDGE32cc
, VIS1
, gen_edge
, a
, 32, 1, 0)
3824 TRANS(EDGE32Lcc
, VIS1
, gen_edge
, a
, 32, 1, 1)
3826 TRANS(EDGE8N
, VIS2
, gen_edge
, a
, 8, 0, 0)
3827 TRANS(EDGE8LN
, VIS2
, gen_edge
, a
, 8, 0, 1)
3828 TRANS(EDGE16N
, VIS2
, gen_edge
, a
, 16, 0, 0)
3829 TRANS(EDGE16LN
, VIS2
, gen_edge
, a
, 16, 0, 1)
3830 TRANS(EDGE32N
, VIS2
, gen_edge
, a
, 32, 0, 0)
3831 TRANS(EDGE32LN
, VIS2
, gen_edge
, a
, 32, 0, 1)
3833 static bool do_rrr(DisasContext
*dc
, arg_r_r_r
*a
,
3834 void (*func
)(TCGv
, TCGv
, TCGv
))
3836 TCGv dst
= gen_dest_gpr(dc
, a
->rd
);
3837 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
3838 TCGv src2
= gen_load_gpr(dc
, a
->rs2
);
3840 func(dst
, src1
, src2
);
3841 gen_store_gpr(dc
, a
->rd
, dst
);
3842 return advance_pc(dc
);
3845 TRANS(ARRAY8
, VIS1
, do_rrr
, a
, gen_helper_array8
)
3846 TRANS(ARRAY16
, VIS1
, do_rrr
, a
, gen_op_array16
)
3847 TRANS(ARRAY32
, VIS1
, do_rrr
, a
, gen_op_array32
)
3849 static void gen_op_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
)
3851 #ifdef TARGET_SPARC64
3852 TCGv tmp
= tcg_temp_new();
3854 tcg_gen_add_tl(tmp
, s1
, s2
);
3855 tcg_gen_andi_tl(dst
, tmp
, -8);
3856 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
3858 g_assert_not_reached();
3862 static void gen_op_alignaddrl(TCGv dst
, TCGv s1
, TCGv s2
)
3864 #ifdef TARGET_SPARC64
3865 TCGv tmp
= tcg_temp_new();
3867 tcg_gen_add_tl(tmp
, s1
, s2
);
3868 tcg_gen_andi_tl(dst
, tmp
, -8);
3869 tcg_gen_neg_tl(tmp
, tmp
);
3870 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
3872 g_assert_not_reached();
3876 TRANS(ALIGNADDR
, VIS1
, do_rrr
, a
, gen_op_alignaddr
)
3877 TRANS(ALIGNADDRL
, VIS1
, do_rrr
, a
, gen_op_alignaddrl
)
3879 static void gen_op_bmask(TCGv dst
, TCGv s1
, TCGv s2
)
3881 #ifdef TARGET_SPARC64
3882 tcg_gen_add_tl(dst
, s1
, s2
);
3883 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, dst
, 32, 32);
3885 g_assert_not_reached();
3889 TRANS(BMASK
, VIS2
, do_rrr
, a
, gen_op_bmask
)
3891 static bool do_shift_r(DisasContext
*dc
, arg_shiftr
*a
, bool l
, bool u
)
3893 TCGv dst
, src1
, src2
;
3895 /* Reject 64-bit shifts for sparc32. */
3896 if (avail_32(dc
) && a
->x
) {
3900 src2
= tcg_temp_new();
3901 tcg_gen_andi_tl(src2
, gen_load_gpr(dc
, a
->rs2
), a
->x
? 63 : 31);
3902 src1
= gen_load_gpr(dc
, a
->rs1
);
3903 dst
= gen_dest_gpr(dc
, a
->rd
);
3906 tcg_gen_shl_tl(dst
, src1
, src2
);
3908 tcg_gen_ext32u_tl(dst
, dst
);
3912 tcg_gen_ext32u_tl(dst
, src1
);
3915 tcg_gen_shr_tl(dst
, src1
, src2
);
3918 tcg_gen_ext32s_tl(dst
, src1
);
3921 tcg_gen_sar_tl(dst
, src1
, src2
);
3923 gen_store_gpr(dc
, a
->rd
, dst
);
3924 return advance_pc(dc
);
3927 TRANS(SLL_r
, ALL
, do_shift_r
, a
, true, true)
3928 TRANS(SRL_r
, ALL
, do_shift_r
, a
, false, true)
3929 TRANS(SRA_r
, ALL
, do_shift_r
, a
, false, false)
3931 static bool do_shift_i(DisasContext
*dc
, arg_shifti
*a
, bool l
, bool u
)
3935 /* Reject 64-bit shifts for sparc32. */
3936 if (avail_32(dc
) && (a
->x
|| a
->i
>= 32)) {
3940 src1
= gen_load_gpr(dc
, a
->rs1
);
3941 dst
= gen_dest_gpr(dc
, a
->rd
);
3943 if (avail_32(dc
) || a
->x
) {
3945 tcg_gen_shli_tl(dst
, src1
, a
->i
);
3947 tcg_gen_shri_tl(dst
, src1
, a
->i
);
3949 tcg_gen_sari_tl(dst
, src1
, a
->i
);
3953 tcg_gen_deposit_z_tl(dst
, src1
, a
->i
, 32 - a
->i
);
3955 tcg_gen_extract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
3957 tcg_gen_sextract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
3960 gen_store_gpr(dc
, a
->rd
, dst
);
3961 return advance_pc(dc
);
3964 TRANS(SLL_i
, ALL
, do_shift_i
, a
, true, true)
3965 TRANS(SRL_i
, ALL
, do_shift_i
, a
, false, true)
3966 TRANS(SRA_i
, ALL
, do_shift_i
, a
, false, false)
3968 static TCGv
gen_rs2_or_imm(DisasContext
*dc
, bool imm
, int rs2_or_imm
)
3970 /* For simplicity, we under-decoded the rs2 form. */
3971 if (!imm
&& rs2_or_imm
& ~0x1f) {
3974 if (imm
|| rs2_or_imm
== 0) {
3975 return tcg_constant_tl(rs2_or_imm
);
3977 return cpu_regs
[rs2_or_imm
];
3981 static bool do_mov_cond(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, TCGv src2
)
3983 TCGv dst
= gen_load_gpr(dc
, rd
);
3984 TCGv c2
= tcg_constant_tl(cmp
->c2
);
3986 tcg_gen_movcond_tl(cmp
->cond
, dst
, cmp
->c1
, c2
, src2
, dst
);
3987 gen_store_gpr(dc
, rd
, dst
);
3988 return advance_pc(dc
);
3991 static bool trans_MOVcc(DisasContext
*dc
, arg_MOVcc
*a
)
3993 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
3999 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
4000 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4003 static bool trans_MOVfcc(DisasContext
*dc
, arg_MOVfcc
*a
)
4005 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4011 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
4012 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4015 static bool trans_MOVR(DisasContext
*dc
, arg_MOVR
*a
)
4017 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4023 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
4024 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4027 static bool do_add_special(DisasContext
*dc
, arg_r_r_ri
*a
,
4028 bool (*func
)(DisasContext
*dc
, int rd
, TCGv src
))
4032 /* For simplicity, we under-decoded the rs2 form. */
4033 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
4038 * Always load the sum into a new temporary.
4039 * This is required to capture the value across a window change,
4040 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4042 sum
= tcg_temp_new();
4043 src1
= gen_load_gpr(dc
, a
->rs1
);
4044 if (a
->imm
|| a
->rs2_or_imm
== 0) {
4045 tcg_gen_addi_tl(sum
, src1
, a
->rs2_or_imm
);
4047 tcg_gen_add_tl(sum
, src1
, cpu_regs
[a
->rs2_or_imm
]);
4049 return func(dc
, a
->rd
, sum
);
4052 static bool do_jmpl(DisasContext
*dc
, int rd
, TCGv src
)
4055 * Preserve pc across advance, so that we can delay
4056 * the writeback to rd until after src is consumed.
4058 target_ulong cur_pc
= dc
->pc
;
4060 gen_check_align(dc
, src
, 3);
4063 tcg_gen_mov_tl(cpu_npc
, src
);
4064 gen_address_mask(dc
, cpu_npc
);
4065 gen_store_gpr(dc
, rd
, tcg_constant_tl(cur_pc
));
4067 dc
->npc
= DYNAMIC_PC_LOOKUP
;
4071 TRANS(JMPL
, ALL
, do_add_special
, a
, do_jmpl
)
4073 static bool do_rett(DisasContext
*dc
, int rd
, TCGv src
)
4075 if (!supervisor(dc
)) {
4076 return raise_priv(dc
);
4079 gen_check_align(dc
, src
, 3);
4082 tcg_gen_mov_tl(cpu_npc
, src
);
4083 gen_helper_rett(tcg_env
);
4085 dc
->npc
= DYNAMIC_PC
;
4089 TRANS(RETT
, 32, do_add_special
, a
, do_rett
)
4091 static bool do_return(DisasContext
*dc
, int rd
, TCGv src
)
4093 gen_check_align(dc
, src
, 3);
4096 tcg_gen_mov_tl(cpu_npc
, src
);
4097 gen_address_mask(dc
, cpu_npc
);
4099 gen_helper_restore(tcg_env
);
4100 dc
->npc
= DYNAMIC_PC_LOOKUP
;
4104 TRANS(RETURN
, 64, do_add_special
, a
, do_return
)
4106 static bool do_save(DisasContext
*dc
, int rd
, TCGv src
)
4108 gen_helper_save(tcg_env
);
4109 gen_store_gpr(dc
, rd
, src
);
4110 return advance_pc(dc
);
4113 TRANS(SAVE
, ALL
, do_add_special
, a
, do_save
)
4115 static bool do_restore(DisasContext
*dc
, int rd
, TCGv src
)
4117 gen_helper_restore(tcg_env
);
4118 gen_store_gpr(dc
, rd
, src
);
4119 return advance_pc(dc
);
4122 TRANS(RESTORE
, ALL
, do_add_special
, a
, do_restore
)
4124 static bool do_done_retry(DisasContext
*dc
, bool done
)
4126 if (!supervisor(dc
)) {
4127 return raise_priv(dc
);
4129 dc
->npc
= DYNAMIC_PC
;
4130 dc
->pc
= DYNAMIC_PC
;
4131 translator_io_start(&dc
->base
);
4133 gen_helper_done(tcg_env
);
4135 gen_helper_retry(tcg_env
);
4140 TRANS(DONE
, 64, do_done_retry
, true)
4141 TRANS(RETRY
, 64, do_done_retry
, false)
4144 * Major opcode 11 -- load and store instructions
4147 static TCGv
gen_ldst_addr(DisasContext
*dc
, int rs1
, bool imm
, int rs2_or_imm
)
4149 TCGv addr
, tmp
= NULL
;
4151 /* For simplicity, we under-decoded the rs2 form. */
4152 if (!imm
&& rs2_or_imm
& ~0x1f) {
4156 addr
= gen_load_gpr(dc
, rs1
);
4158 tmp
= tcg_temp_new();
4160 tcg_gen_addi_tl(tmp
, addr
, rs2_or_imm
);
4162 tcg_gen_add_tl(tmp
, addr
, cpu_regs
[rs2_or_imm
]);
4168 tmp
= tcg_temp_new();
4170 tcg_gen_ext32u_tl(tmp
, addr
);
4176 static bool do_ld_gpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4178 TCGv reg
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4184 da
= resolve_asi(dc
, a
->asi
, mop
);
4186 reg
= gen_dest_gpr(dc
, a
->rd
);
4187 gen_ld_asi(dc
, &da
, reg
, addr
);
4188 gen_store_gpr(dc
, a
->rd
, reg
);
4189 return advance_pc(dc
);
4192 TRANS(LDUW
, ALL
, do_ld_gpr
, a
, MO_TEUL
)
4193 TRANS(LDUB
, ALL
, do_ld_gpr
, a
, MO_UB
)
4194 TRANS(LDUH
, ALL
, do_ld_gpr
, a
, MO_TEUW
)
4195 TRANS(LDSB
, ALL
, do_ld_gpr
, a
, MO_SB
)
4196 TRANS(LDSH
, ALL
, do_ld_gpr
, a
, MO_TESW
)
4197 TRANS(LDSW
, 64, do_ld_gpr
, a
, MO_TESL
)
4198 TRANS(LDX
, 64, do_ld_gpr
, a
, MO_TEUQ
)
4200 static bool do_st_gpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4202 TCGv reg
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4208 da
= resolve_asi(dc
, a
->asi
, mop
);
4210 reg
= gen_load_gpr(dc
, a
->rd
);
4211 gen_st_asi(dc
, &da
, reg
, addr
);
4212 return advance_pc(dc
);
4215 TRANS(STW
, ALL
, do_st_gpr
, a
, MO_TEUL
)
4216 TRANS(STB
, ALL
, do_st_gpr
, a
, MO_UB
)
4217 TRANS(STH
, ALL
, do_st_gpr
, a
, MO_TEUW
)
4218 TRANS(STX
, 64, do_st_gpr
, a
, MO_TEUQ
)
4220 static bool trans_LDD(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4228 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4232 da
= resolve_asi(dc
, a
->asi
, MO_TEUQ
);
4233 gen_ldda_asi(dc
, &da
, addr
, a
->rd
);
4234 return advance_pc(dc
);
4237 static bool trans_STD(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4245 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4249 da
= resolve_asi(dc
, a
->asi
, MO_TEUQ
);
4250 gen_stda_asi(dc
, &da
, addr
, a
->rd
);
4251 return advance_pc(dc
);
4254 static bool trans_LDSTUB(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4259 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4263 da
= resolve_asi(dc
, a
->asi
, MO_UB
);
4265 reg
= gen_dest_gpr(dc
, a
->rd
);
4266 gen_ldstub_asi(dc
, &da
, reg
, addr
);
4267 gen_store_gpr(dc
, a
->rd
, reg
);
4268 return advance_pc(dc
);
4271 static bool trans_SWAP(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4273 TCGv addr
, dst
, src
;
4276 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4280 da
= resolve_asi(dc
, a
->asi
, MO_TEUL
);
4282 dst
= gen_dest_gpr(dc
, a
->rd
);
4283 src
= gen_load_gpr(dc
, a
->rd
);
4284 gen_swap_asi(dc
, &da
, dst
, src
, addr
);
4285 gen_store_gpr(dc
, a
->rd
, dst
);
4286 return advance_pc(dc
);
4289 static bool do_casa(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4294 addr
= gen_ldst_addr(dc
, a
->rs1
, true, 0);
4298 da
= resolve_asi(dc
, a
->asi
, mop
);
4300 o
= gen_dest_gpr(dc
, a
->rd
);
4301 n
= gen_load_gpr(dc
, a
->rd
);
4302 c
= gen_load_gpr(dc
, a
->rs2_or_imm
);
4303 gen_cas_asi(dc
, &da
, o
, n
, c
, addr
);
4304 gen_store_gpr(dc
, a
->rd
, o
);
4305 return advance_pc(dc
);
4308 TRANS(CASA
, CASA
, do_casa
, a
, MO_TEUL
)
4309 TRANS(CASXA
, 64, do_casa
, a
, MO_TEUQ
)
4311 static bool do_ld_fpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp sz
)
4313 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4319 if (gen_trap_ifnofpu(dc
)) {
4322 if (sz
== MO_128
&& gen_trap_float128(dc
)) {
4325 da
= resolve_asi(dc
, a
->asi
, MO_TE
| sz
);
4326 gen_ldf_asi(dc
, &da
, sz
, addr
, a
->rd
);
4327 gen_update_fprs_dirty(dc
, a
->rd
);
4328 return advance_pc(dc
);
4331 TRANS(LDF
, ALL
, do_ld_fpr
, a
, MO_32
)
4332 TRANS(LDDF
, ALL
, do_ld_fpr
, a
, MO_64
)
4333 TRANS(LDQF
, ALL
, do_ld_fpr
, a
, MO_128
)
4335 TRANS(LDFA
, 64, do_ld_fpr
, a
, MO_32
)
4336 TRANS(LDDFA
, 64, do_ld_fpr
, a
, MO_64
)
4337 TRANS(LDQFA
, 64, do_ld_fpr
, a
, MO_128
)
4339 static bool do_st_fpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp sz
)
4341 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4347 if (gen_trap_ifnofpu(dc
)) {
4350 if (sz
== MO_128
&& gen_trap_float128(dc
)) {
4353 da
= resolve_asi(dc
, a
->asi
, MO_TE
| sz
);
4354 gen_stf_asi(dc
, &da
, sz
, addr
, a
->rd
);
4355 return advance_pc(dc
);
4358 TRANS(STF
, ALL
, do_st_fpr
, a
, MO_32
)
4359 TRANS(STDF
, ALL
, do_st_fpr
, a
, MO_64
)
4360 TRANS(STQF
, ALL
, do_st_fpr
, a
, MO_128
)
4362 TRANS(STFA
, 64, do_st_fpr
, a
, MO_32
)
4363 TRANS(STDFA
, 64, do_st_fpr
, a
, MO_64
)
4364 TRANS(STQFA
, 64, do_st_fpr
, a
, MO_128
)
4366 static bool trans_STDFQ(DisasContext
*dc
, arg_STDFQ
*a
)
4368 if (!avail_32(dc
)) {
4371 if (!supervisor(dc
)) {
4372 return raise_priv(dc
);
4374 if (gen_trap_ifnofpu(dc
)) {
4377 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
4381 static bool do_ldfsr(DisasContext
*dc
, arg_r_r_ri
*a
, MemOp mop
,
4382 target_ulong new_mask
, target_ulong old_mask
)
4384 TCGv tmp
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4388 if (gen_trap_ifnofpu(dc
)) {
4391 tmp
= tcg_temp_new();
4392 tcg_gen_qemu_ld_tl(tmp
, addr
, dc
->mem_idx
, mop
| MO_ALIGN
);
4393 tcg_gen_andi_tl(tmp
, tmp
, new_mask
);
4394 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, old_mask
);
4395 tcg_gen_or_tl(cpu_fsr
, cpu_fsr
, tmp
);
4396 gen_helper_set_fsr(tcg_env
, cpu_fsr
);
4397 return advance_pc(dc
);
4400 TRANS(LDFSR
, ALL
, do_ldfsr
, a
, MO_TEUL
, FSR_LDFSR_MASK
, FSR_LDFSR_OLDMASK
)
4401 TRANS(LDXFSR
, 64, do_ldfsr
, a
, MO_TEUQ
, FSR_LDXFSR_MASK
, FSR_LDXFSR_OLDMASK
)
4403 static bool do_stfsr(DisasContext
*dc
, arg_r_r_ri
*a
, MemOp mop
)
4405 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4409 if (gen_trap_ifnofpu(dc
)) {
4412 tcg_gen_qemu_st_tl(cpu_fsr
, addr
, dc
->mem_idx
, mop
| MO_ALIGN
);
4413 return advance_pc(dc
);
4416 TRANS(STFSR
, ALL
, do_stfsr
, a
, MO_TEUL
)
4417 TRANS(STXFSR
, 64, do_stfsr
, a
, MO_TEUQ
)
4419 static bool do_fc(DisasContext
*dc
, int rd
, bool c
)
4423 if (gen_trap_ifnofpu(dc
)) {
4428 mask
= MAKE_64BIT_MASK(0, 32);
4430 mask
= MAKE_64BIT_MASK(32, 32);
4433 tcg_gen_ori_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rd
/ 2], mask
);
4435 tcg_gen_andi_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rd
/ 2], ~mask
);
4437 gen_update_fprs_dirty(dc
, rd
);
4438 return advance_pc(dc
);
4441 TRANS(FZEROs
, VIS1
, do_fc
, a
->rd
, 0)
4442 TRANS(FONEs
, VIS1
, do_fc
, a
->rd
, 1)
4444 static bool do_dc(DisasContext
*dc
, int rd
, int64_t c
)
4446 if (gen_trap_ifnofpu(dc
)) {
4450 tcg_gen_movi_i64(cpu_fpr
[rd
/ 2], c
);
4451 gen_update_fprs_dirty(dc
, rd
);
4452 return advance_pc(dc
);
4455 TRANS(FZEROd
, VIS1
, do_dc
, a
->rd
, 0)
4456 TRANS(FONEd
, VIS1
, do_dc
, a
->rd
, -1)
4458 static bool do_ff(DisasContext
*dc
, arg_r_r
*a
,
4459 void (*func
)(TCGv_i32
, TCGv_i32
))
4463 if (gen_trap_ifnofpu(dc
)) {
4467 tmp
= gen_load_fpr_F(dc
, a
->rs
);
4469 gen_store_fpr_F(dc
, a
->rd
, tmp
);
4470 return advance_pc(dc
);
4473 TRANS(FMOVs
, ALL
, do_ff
, a
, gen_op_fmovs
)
4474 TRANS(FNEGs
, ALL
, do_ff
, a
, gen_op_fnegs
)
4475 TRANS(FABSs
, ALL
, do_ff
, a
, gen_op_fabss
)
4476 TRANS(FSRCs
, VIS1
, do_ff
, a
, tcg_gen_mov_i32
)
4477 TRANS(FNOTs
, VIS1
, do_ff
, a
, tcg_gen_not_i32
)
4479 static bool do_fd(DisasContext
*dc
, arg_r_r
*a
,
4480 void (*func
)(TCGv_i32
, TCGv_i64
))
4485 if (gen_trap_ifnofpu(dc
)) {
4489 dst
= gen_dest_fpr_F(dc
);
4490 src
= gen_load_fpr_D(dc
, a
->rs
);
4492 gen_store_fpr_F(dc
, a
->rd
, dst
);
4493 return advance_pc(dc
);
4496 TRANS(FPACK16
, VIS1
, do_fd
, a
, gen_op_fpack16
)
4497 TRANS(FPACKFIX
, VIS1
, do_fd
, a
, gen_op_fpackfix
)
4499 static bool do_env_ff(DisasContext
*dc
, arg_r_r
*a
,
4500 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
))
4504 if (gen_trap_ifnofpu(dc
)) {
4508 gen_op_clear_ieee_excp_and_FTT();
4509 tmp
= gen_load_fpr_F(dc
, a
->rs
);
4510 func(tmp
, tcg_env
, tmp
);
4511 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4512 gen_store_fpr_F(dc
, a
->rd
, tmp
);
4513 return advance_pc(dc
);
4516 TRANS(FSQRTs
, ALL
, do_env_ff
, a
, gen_helper_fsqrts
)
4517 TRANS(FiTOs
, ALL
, do_env_ff
, a
, gen_helper_fitos
)
4518 TRANS(FsTOi
, ALL
, do_env_ff
, a
, gen_helper_fstoi
)
4520 static bool do_env_fd(DisasContext
*dc
, arg_r_r
*a
,
4521 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i64
))
4526 if (gen_trap_ifnofpu(dc
)) {
4530 gen_op_clear_ieee_excp_and_FTT();
4531 dst
= gen_dest_fpr_F(dc
);
4532 src
= gen_load_fpr_D(dc
, a
->rs
);
4533 func(dst
, tcg_env
, src
);
4534 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4535 gen_store_fpr_F(dc
, a
->rd
, dst
);
4536 return advance_pc(dc
);
4539 TRANS(FdTOs
, ALL
, do_env_fd
, a
, gen_helper_fdtos
)
4540 TRANS(FdTOi
, ALL
, do_env_fd
, a
, gen_helper_fdtoi
)
4541 TRANS(FxTOs
, 64, do_env_fd
, a
, gen_helper_fxtos
)
4543 static bool do_dd(DisasContext
*dc
, arg_r_r
*a
,
4544 void (*func
)(TCGv_i64
, TCGv_i64
))
4548 if (gen_trap_ifnofpu(dc
)) {
4552 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4553 src
= gen_load_fpr_D(dc
, a
->rs
);
4555 gen_store_fpr_D(dc
, a
->rd
, dst
);
4556 return advance_pc(dc
);
4559 TRANS(FMOVd
, 64, do_dd
, a
, gen_op_fmovd
)
4560 TRANS(FNEGd
, 64, do_dd
, a
, gen_op_fnegd
)
4561 TRANS(FABSd
, 64, do_dd
, a
, gen_op_fabsd
)
4562 TRANS(FSRCd
, VIS1
, do_dd
, a
, tcg_gen_mov_i64
)
4563 TRANS(FNOTd
, VIS1
, do_dd
, a
, tcg_gen_not_i64
)
4565 static bool do_env_dd(DisasContext
*dc
, arg_r_r
*a
,
4566 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
4570 if (gen_trap_ifnofpu(dc
)) {
4574 gen_op_clear_ieee_excp_and_FTT();
4575 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4576 src
= gen_load_fpr_D(dc
, a
->rs
);
4577 func(dst
, tcg_env
, src
);
4578 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4579 gen_store_fpr_D(dc
, a
->rd
, dst
);
4580 return advance_pc(dc
);
4583 TRANS(FSQRTd
, ALL
, do_env_dd
, a
, gen_helper_fsqrtd
)
4584 TRANS(FxTOd
, 64, do_env_dd
, a
, gen_helper_fxtod
)
4585 TRANS(FdTOx
, 64, do_env_dd
, a
, gen_helper_fdtox
)
4587 static bool do_env_df(DisasContext
*dc
, arg_r_r
*a
,
4588 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i32
))
4593 if (gen_trap_ifnofpu(dc
)) {
4597 gen_op_clear_ieee_excp_and_FTT();
4598 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4599 src
= gen_load_fpr_F(dc
, a
->rs
);
4600 func(dst
, tcg_env
, src
);
4601 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4602 gen_store_fpr_D(dc
, a
->rd
, dst
);
4603 return advance_pc(dc
);
4606 TRANS(FiTOd
, ALL
, do_env_df
, a
, gen_helper_fitod
)
4607 TRANS(FsTOd
, ALL
, do_env_df
, a
, gen_helper_fstod
)
4608 TRANS(FsTOx
, 64, do_env_df
, a
, gen_helper_fstox
)
4610 static bool trans_FMOVq(DisasContext
*dc
, arg_FMOVq
*a
)
4614 if (!avail_64(dc
)) {
4617 if (gen_trap_ifnofpu(dc
)) {
4620 if (gen_trap_float128(dc
)) {
4624 gen_op_clear_ieee_excp_and_FTT();
4627 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
4628 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
4629 gen_update_fprs_dirty(dc
, rd
);
4630 return advance_pc(dc
);
4633 static bool do_qq(DisasContext
*dc
, arg_r_r
*a
,
4634 void (*func
)(TCGv_env
))
4636 if (gen_trap_ifnofpu(dc
)) {
4639 if (gen_trap_float128(dc
)) {
4643 gen_op_clear_ieee_excp_and_FTT();
4644 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4646 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4647 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4648 return advance_pc(dc
);
4651 TRANS(FNEGq
, 64, do_qq
, a
, gen_helper_fnegq
)
4652 TRANS(FABSq
, 64, do_qq
, a
, gen_helper_fabsq
)
4654 static bool do_env_qq(DisasContext
*dc
, arg_r_r
*a
,
4655 void (*func
)(TCGv_env
))
4657 if (gen_trap_ifnofpu(dc
)) {
4660 if (gen_trap_float128(dc
)) {
4664 gen_op_clear_ieee_excp_and_FTT();
4665 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4667 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4668 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4669 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4670 return advance_pc(dc
);
4673 TRANS(FSQRTq
, ALL
, do_env_qq
, a
, gen_helper_fsqrtq
)
4675 static bool do_env_fq(DisasContext
*dc
, arg_r_r
*a
,
4676 void (*func
)(TCGv_i32
, TCGv_env
))
4680 if (gen_trap_ifnofpu(dc
)) {
4683 if (gen_trap_float128(dc
)) {
4687 gen_op_clear_ieee_excp_and_FTT();
4688 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4689 dst
= gen_dest_fpr_F(dc
);
4691 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4692 gen_store_fpr_F(dc
, a
->rd
, dst
);
4693 return advance_pc(dc
);
4696 TRANS(FqTOs
, ALL
, do_env_fq
, a
, gen_helper_fqtos
)
4697 TRANS(FqTOi
, ALL
, do_env_fq
, a
, gen_helper_fqtoi
)
4699 static bool do_env_dq(DisasContext
*dc
, arg_r_r
*a
,
4700 void (*func
)(TCGv_i64
, TCGv_env
))
4704 if (gen_trap_ifnofpu(dc
)) {
4707 if (gen_trap_float128(dc
)) {
4711 gen_op_clear_ieee_excp_and_FTT();
4712 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4713 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4715 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4716 gen_store_fpr_D(dc
, a
->rd
, dst
);
4717 return advance_pc(dc
);
4720 TRANS(FqTOd
, ALL
, do_env_dq
, a
, gen_helper_fqtod
)
4721 TRANS(FqTOx
, 64, do_env_dq
, a
, gen_helper_fqtox
)
4723 static bool do_env_qf(DisasContext
*dc
, arg_r_r
*a
,
4724 void (*func
)(TCGv_env
, TCGv_i32
))
4728 if (gen_trap_ifnofpu(dc
)) {
4731 if (gen_trap_float128(dc
)) {
4735 gen_op_clear_ieee_excp_and_FTT();
4736 src
= gen_load_fpr_F(dc
, a
->rs
);
4738 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4739 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4740 return advance_pc(dc
);
4743 TRANS(FiTOq
, ALL
, do_env_qf
, a
, gen_helper_fitoq
)
4744 TRANS(FsTOq
, ALL
, do_env_qf
, a
, gen_helper_fstoq
)
4746 static bool do_env_qd(DisasContext
*dc
, arg_r_r
*a
,
4747 void (*func
)(TCGv_env
, TCGv_i64
))
4751 if (gen_trap_ifnofpu(dc
)) {
4754 if (gen_trap_float128(dc
)) {
4758 gen_op_clear_ieee_excp_and_FTT();
4759 src
= gen_load_fpr_D(dc
, a
->rs
);
4761 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4762 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4763 return advance_pc(dc
);
4766 TRANS(FdTOq
, ALL
, do_env_qd
, a
, gen_helper_fdtoq
)
4767 TRANS(FxTOq
, 64, do_env_qd
, a
, gen_helper_fxtoq
)
4769 static bool do_fff(DisasContext
*dc
, arg_r_r_r
*a
,
4770 void (*func
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
4772 TCGv_i32 src1
, src2
;
4774 if (gen_trap_ifnofpu(dc
)) {
4778 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4779 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4780 func(src1
, src1
, src2
);
4781 gen_store_fpr_F(dc
, a
->rd
, src1
);
4782 return advance_pc(dc
);
4785 TRANS(FPADD16s
, VIS1
, do_fff
, a
, tcg_gen_vec_add16_i32
)
4786 TRANS(FPADD32s
, VIS1
, do_fff
, a
, tcg_gen_add_i32
)
4787 TRANS(FPSUB16s
, VIS1
, do_fff
, a
, tcg_gen_vec_sub16_i32
)
4788 TRANS(FPSUB32s
, VIS1
, do_fff
, a
, tcg_gen_sub_i32
)
4789 TRANS(FNORs
, VIS1
, do_fff
, a
, tcg_gen_nor_i32
)
4790 TRANS(FANDNOTs
, VIS1
, do_fff
, a
, tcg_gen_andc_i32
)
4791 TRANS(FXORs
, VIS1
, do_fff
, a
, tcg_gen_xor_i32
)
4792 TRANS(FNANDs
, VIS1
, do_fff
, a
, tcg_gen_nand_i32
)
4793 TRANS(FANDs
, VIS1
, do_fff
, a
, tcg_gen_and_i32
)
4794 TRANS(FXNORs
, VIS1
, do_fff
, a
, tcg_gen_eqv_i32
)
4795 TRANS(FORNOTs
, VIS1
, do_fff
, a
, tcg_gen_orc_i32
)
4796 TRANS(FORs
, VIS1
, do_fff
, a
, tcg_gen_or_i32
)
4798 static bool do_env_fff(DisasContext
*dc
, arg_r_r_r
*a
,
4799 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
4801 TCGv_i32 src1
, src2
;
4803 if (gen_trap_ifnofpu(dc
)) {
4807 gen_op_clear_ieee_excp_and_FTT();
4808 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4809 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4810 func(src1
, tcg_env
, src1
, src2
);
4811 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4812 gen_store_fpr_F(dc
, a
->rd
, src1
);
4813 return advance_pc(dc
);
4816 TRANS(FADDs
, ALL
, do_env_fff
, a
, gen_helper_fadds
)
4817 TRANS(FSUBs
, ALL
, do_env_fff
, a
, gen_helper_fsubs
)
4818 TRANS(FMULs
, ALL
, do_env_fff
, a
, gen_helper_fmuls
)
4819 TRANS(FDIVs
, ALL
, do_env_fff
, a
, gen_helper_fdivs
)
4821 static bool do_ddd(DisasContext
*dc
, arg_r_r_r
*a
,
4822 void (*func
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
4824 TCGv_i64 dst
, src1
, src2
;
4826 if (gen_trap_ifnofpu(dc
)) {
4830 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4831 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4832 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4833 func(dst
, src1
, src2
);
4834 gen_store_fpr_D(dc
, a
->rd
, dst
);
4835 return advance_pc(dc
);
4838 TRANS(FMUL8x16
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16
)
4839 TRANS(FMUL8x16AU
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16au
)
4840 TRANS(FMUL8x16AL
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16al
)
4841 TRANS(FMUL8SUx16
, VIS1
, do_ddd
, a
, gen_helper_fmul8sux16
)
4842 TRANS(FMUL8ULx16
, VIS1
, do_ddd
, a
, gen_helper_fmul8ulx16
)
4843 TRANS(FMULD8SUx16
, VIS1
, do_ddd
, a
, gen_helper_fmuld8sux16
)
4844 TRANS(FMULD8ULx16
, VIS1
, do_ddd
, a
, gen_helper_fmuld8ulx16
)
4845 TRANS(FPMERGE
, VIS1
, do_ddd
, a
, gen_helper_fpmerge
)
4846 TRANS(FEXPAND
, VIS1
, do_ddd
, a
, gen_helper_fexpand
)
4848 TRANS(FPADD16
, VIS1
, do_ddd
, a
, tcg_gen_vec_add16_i64
)
4849 TRANS(FPADD32
, VIS1
, do_ddd
, a
, tcg_gen_vec_add32_i64
)
4850 TRANS(FPSUB16
, VIS1
, do_ddd
, a
, tcg_gen_vec_sub16_i64
)
4851 TRANS(FPSUB32
, VIS1
, do_ddd
, a
, tcg_gen_vec_sub32_i64
)
4852 TRANS(FNORd
, VIS1
, do_ddd
, a
, tcg_gen_nor_i64
)
4853 TRANS(FANDNOTd
, VIS1
, do_ddd
, a
, tcg_gen_andc_i64
)
4854 TRANS(FXORd
, VIS1
, do_ddd
, a
, tcg_gen_xor_i64
)
4855 TRANS(FNANDd
, VIS1
, do_ddd
, a
, tcg_gen_nand_i64
)
4856 TRANS(FANDd
, VIS1
, do_ddd
, a
, tcg_gen_and_i64
)
4857 TRANS(FXNORd
, VIS1
, do_ddd
, a
, tcg_gen_eqv_i64
)
4858 TRANS(FORNOTd
, VIS1
, do_ddd
, a
, tcg_gen_orc_i64
)
4859 TRANS(FORd
, VIS1
, do_ddd
, a
, tcg_gen_or_i64
)
4861 TRANS(FPACK32
, VIS1
, do_ddd
, a
, gen_op_fpack32
)
4862 TRANS(FALIGNDATAg
, VIS1
, do_ddd
, a
, gen_op_faligndata
)
4863 TRANS(BSHUFFLE
, VIS2
, do_ddd
, a
, gen_op_bshuffle
)
4865 static bool do_rdd(DisasContext
*dc
, arg_r_r_r
*a
,
4866 void (*func
)(TCGv
, TCGv_i64
, TCGv_i64
))
4868 TCGv_i64 src1
, src2
;
4871 if (gen_trap_ifnofpu(dc
)) {
4875 dst
= gen_dest_gpr(dc
, a
->rd
);
4876 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4877 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4878 func(dst
, src1
, src2
);
4879 gen_store_gpr(dc
, a
->rd
, dst
);
4880 return advance_pc(dc
);
4883 TRANS(FPCMPLE16
, VIS1
, do_rdd
, a
, gen_helper_fcmple16
)
4884 TRANS(FPCMPNE16
, VIS1
, do_rdd
, a
, gen_helper_fcmpne16
)
4885 TRANS(FPCMPGT16
, VIS1
, do_rdd
, a
, gen_helper_fcmpgt16
)
4886 TRANS(FPCMPEQ16
, VIS1
, do_rdd
, a
, gen_helper_fcmpeq16
)
4888 TRANS(FPCMPLE32
, VIS1
, do_rdd
, a
, gen_helper_fcmple32
)
4889 TRANS(FPCMPNE32
, VIS1
, do_rdd
, a
, gen_helper_fcmpne32
)
4890 TRANS(FPCMPGT32
, VIS1
, do_rdd
, a
, gen_helper_fcmpgt32
)
4891 TRANS(FPCMPEQ32
, VIS1
, do_rdd
, a
, gen_helper_fcmpeq32
)
4893 static bool do_env_ddd(DisasContext
*dc
, arg_r_r_r
*a
,
4894 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
))
4896 TCGv_i64 dst
, src1
, src2
;
4898 if (gen_trap_ifnofpu(dc
)) {
4902 gen_op_clear_ieee_excp_and_FTT();
4903 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4904 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4905 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4906 func(dst
, tcg_env
, src1
, src2
);
4907 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4908 gen_store_fpr_D(dc
, a
->rd
, dst
);
4909 return advance_pc(dc
);
4912 TRANS(FADDd
, ALL
, do_env_ddd
, a
, gen_helper_faddd
)
4913 TRANS(FSUBd
, ALL
, do_env_ddd
, a
, gen_helper_fsubd
)
4914 TRANS(FMULd
, ALL
, do_env_ddd
, a
, gen_helper_fmuld
)
4915 TRANS(FDIVd
, ALL
, do_env_ddd
, a
, gen_helper_fdivd
)
4917 static bool trans_FsMULd(DisasContext
*dc
, arg_r_r_r
*a
)
4920 TCGv_i32 src1
, src2
;
4922 if (gen_trap_ifnofpu(dc
)) {
4925 if (!(dc
->def
->features
& CPU_FEATURE_FSMULD
)) {
4926 return raise_unimpfpop(dc
);
4929 gen_op_clear_ieee_excp_and_FTT();
4930 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4931 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4932 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4933 gen_helper_fsmuld(dst
, tcg_env
, src1
, src2
);
4934 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4935 gen_store_fpr_D(dc
, a
->rd
, dst
);
4936 return advance_pc(dc
);
4939 static bool do_dddd(DisasContext
*dc
, arg_r_r_r
*a
,
4940 void (*func
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
4942 TCGv_i64 dst
, src0
, src1
, src2
;
4944 if (gen_trap_ifnofpu(dc
)) {
4948 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4949 src0
= gen_load_fpr_D(dc
, a
->rd
);
4950 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4951 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4952 func(dst
, src0
, src1
, src2
);
4953 gen_store_fpr_D(dc
, a
->rd
, dst
);
4954 return advance_pc(dc
);
4957 TRANS(PDIST
, VIS1
, do_dddd
, a
, gen_helper_pdist
)
4959 static bool do_env_qqq(DisasContext
*dc
, arg_r_r_r
*a
,
4960 void (*func
)(TCGv_env
))
4962 if (gen_trap_ifnofpu(dc
)) {
4965 if (gen_trap_float128(dc
)) {
4969 gen_op_clear_ieee_excp_and_FTT();
4970 gen_op_load_fpr_QT0(QFPREG(a
->rs1
));
4971 gen_op_load_fpr_QT1(QFPREG(a
->rs2
));
4973 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4974 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4975 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4976 return advance_pc(dc
);
4979 TRANS(FADDq
, ALL
, do_env_qqq
, a
, gen_helper_faddq
)
4980 TRANS(FSUBq
, ALL
, do_env_qqq
, a
, gen_helper_fsubq
)
4981 TRANS(FMULq
, ALL
, do_env_qqq
, a
, gen_helper_fmulq
)
4982 TRANS(FDIVq
, ALL
, do_env_qqq
, a
, gen_helper_fdivq
)
4984 static bool trans_FdMULq(DisasContext
*dc
, arg_r_r_r
*a
)
4986 TCGv_i64 src1
, src2
;
4988 if (gen_trap_ifnofpu(dc
)) {
4991 if (gen_trap_float128(dc
)) {
4995 gen_op_clear_ieee_excp_and_FTT();
4996 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4997 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4998 gen_helper_fdmulq(tcg_env
, src1
, src2
);
4999 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
5000 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
5001 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
5002 return advance_pc(dc
);
5005 static bool do_fmovr(DisasContext
*dc
, arg_FMOVRs
*a
, bool is_128
,
5006 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
5010 if (gen_trap_ifnofpu(dc
)) {
5013 if (is_128
&& gen_trap_float128(dc
)) {
5017 gen_op_clear_ieee_excp_and_FTT();
5018 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
5019 func(dc
, &cmp
, a
->rd
, a
->rs2
);
5020 return advance_pc(dc
);
5023 TRANS(FMOVRs
, 64, do_fmovr
, a
, false, gen_fmovs
)
5024 TRANS(FMOVRd
, 64, do_fmovr
, a
, false, gen_fmovd
)
5025 TRANS(FMOVRq
, 64, do_fmovr
, a
, true, gen_fmovq
)
5027 static bool do_fmovcc(DisasContext
*dc
, arg_FMOVscc
*a
, bool is_128
,
5028 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
5032 if (gen_trap_ifnofpu(dc
)) {
5035 if (is_128
&& gen_trap_float128(dc
)) {
5039 gen_op_clear_ieee_excp_and_FTT();
5040 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
5041 func(dc
, &cmp
, a
->rd
, a
->rs2
);
5042 return advance_pc(dc
);
5045 TRANS(FMOVscc
, 64, do_fmovcc
, a
, false, gen_fmovs
)
5046 TRANS(FMOVdcc
, 64, do_fmovcc
, a
, false, gen_fmovd
)
5047 TRANS(FMOVqcc
, 64, do_fmovcc
, a
, true, gen_fmovq
)
5049 static bool do_fmovfcc(DisasContext
*dc
, arg_FMOVsfcc
*a
, bool is_128
,
5050 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
5054 if (gen_trap_ifnofpu(dc
)) {
5057 if (is_128
&& gen_trap_float128(dc
)) {
5061 gen_op_clear_ieee_excp_and_FTT();
5062 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
5063 func(dc
, &cmp
, a
->rd
, a
->rs2
);
5064 return advance_pc(dc
);
5067 TRANS(FMOVsfcc
, 64, do_fmovfcc
, a
, false, gen_fmovs
)
5068 TRANS(FMOVdfcc
, 64, do_fmovfcc
, a
, false, gen_fmovd
)
5069 TRANS(FMOVqfcc
, 64, do_fmovfcc
, a
, true, gen_fmovq
)
5071 static bool do_fcmps(DisasContext
*dc
, arg_FCMPs
*a
, bool e
)
5073 TCGv_i32 src1
, src2
;
5075 if (avail_32(dc
) && a
->cc
!= 0) {
5078 if (gen_trap_ifnofpu(dc
)) {
5082 gen_op_clear_ieee_excp_and_FTT();
5083 src1
= gen_load_fpr_F(dc
, a
->rs1
);
5084 src2
= gen_load_fpr_F(dc
, a
->rs2
);
5086 gen_op_fcmpes(a
->cc
, src1
, src2
);
5088 gen_op_fcmps(a
->cc
, src1
, src2
);
5090 return advance_pc(dc
);
5093 TRANS(FCMPs
, ALL
, do_fcmps
, a
, false)
5094 TRANS(FCMPEs
, ALL
, do_fcmps
, a
, true)
5096 static bool do_fcmpd(DisasContext
*dc
, arg_FCMPd
*a
, bool e
)
5098 TCGv_i64 src1
, src2
;
5100 if (avail_32(dc
) && a
->cc
!= 0) {
5103 if (gen_trap_ifnofpu(dc
)) {
5107 gen_op_clear_ieee_excp_and_FTT();
5108 src1
= gen_load_fpr_D(dc
, a
->rs1
);
5109 src2
= gen_load_fpr_D(dc
, a
->rs2
);
5111 gen_op_fcmped(a
->cc
, src1
, src2
);
5113 gen_op_fcmpd(a
->cc
, src1
, src2
);
5115 return advance_pc(dc
);
5118 TRANS(FCMPd
, ALL
, do_fcmpd
, a
, false)
5119 TRANS(FCMPEd
, ALL
, do_fcmpd
, a
, true)
5121 static bool do_fcmpq(DisasContext
*dc
, arg_FCMPq
*a
, bool e
)
5123 if (avail_32(dc
) && a
->cc
!= 0) {
5126 if (gen_trap_ifnofpu(dc
)) {
5129 if (gen_trap_float128(dc
)) {
5133 gen_op_clear_ieee_excp_and_FTT();
5134 gen_op_load_fpr_QT0(QFPREG(a
->rs1
));
5135 gen_op_load_fpr_QT1(QFPREG(a
->rs2
));
5137 gen_op_fcmpeq(a
->cc
);
5139 gen_op_fcmpq(a
->cc
);
5141 return advance_pc(dc
);
5144 TRANS(FCMPq
, ALL
, do_fcmpq
, a
, false)
5145 TRANS(FCMPEq
, ALL
, do_fcmpq
, a
, true)
5147 static void sparc_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
5149 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5150 CPUSPARCState
*env
= cpu_env(cs
);
5153 dc
->pc
= dc
->base
.pc_first
;
5154 dc
->npc
= (target_ulong
)dc
->base
.tb
->cs_base
;
5155 dc
->mem_idx
= dc
->base
.tb
->flags
& TB_FLAG_MMU_MASK
;
5156 dc
->def
= &env
->def
;
5157 dc
->fpu_enabled
= tb_fpu_enabled(dc
->base
.tb
->flags
);
5158 dc
->address_mask_32bit
= tb_am_enabled(dc
->base
.tb
->flags
);
5159 #ifndef CONFIG_USER_ONLY
5160 dc
->supervisor
= (dc
->base
.tb
->flags
& TB_FLAG_SUPER
) != 0;
5162 #ifdef TARGET_SPARC64
5164 dc
->asi
= (dc
->base
.tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
5165 #ifndef CONFIG_USER_ONLY
5166 dc
->hypervisor
= (dc
->base
.tb
->flags
& TB_FLAG_HYPER
) != 0;
5170 * if we reach a page boundary, we stop generation so that the
5171 * PC of a TT_TFAULT exception is always in the right page
5173 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
5174 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
5177 static void sparc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
5181 static void sparc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
5183 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5184 target_ulong npc
= dc
->npc
;
5189 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5190 npc
= dc
->jump_pc
[0] | JUMP_PC
;
5193 case DYNAMIC_PC_LOOKUP
:
5197 g_assert_not_reached();
5200 tcg_gen_insn_start(dc
->pc
, npc
);
5203 static void sparc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
5205 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5206 CPUSPARCState
*env
= cpu_env(cs
);
5209 insn
= translator_ldl(env
, &dc
->base
, dc
->pc
);
5210 dc
->base
.pc_next
+= 4;
5212 if (!decode(dc
, insn
)) {
5213 gen_exception(dc
, TT_ILL_INSN
);
5216 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
5219 if (dc
->pc
!= dc
->base
.pc_next
) {
5220 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
5224 static void sparc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
5226 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5227 DisasDelayException
*e
, *e_next
;
5232 switch (dc
->base
.is_jmp
) {
5234 case DISAS_TOO_MANY
:
5235 if (((dc
->pc
| dc
->npc
) & 3) == 0) {
5236 /* static PC and NPC: we can use direct chaining */
5237 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5244 case DYNAMIC_PC_LOOKUP
:
5250 g_assert_not_reached();
5253 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5259 gen_generic_branch(dc
);
5264 case DYNAMIC_PC_LOOKUP
:
5267 g_assert_not_reached();
5270 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
5273 tcg_gen_lookup_and_goto_ptr();
5275 tcg_gen_exit_tb(NULL
, 0);
5279 case DISAS_NORETURN
:
5285 tcg_gen_exit_tb(NULL
, 0);
5289 g_assert_not_reached();
5292 for (e
= dc
->delay_excp_list
; e
; e
= e_next
) {
5293 gen_set_label(e
->lab
);
5295 tcg_gen_movi_tl(cpu_pc
, e
->pc
);
5296 if (e
->npc
% 4 == 0) {
5297 tcg_gen_movi_tl(cpu_npc
, e
->npc
);
5299 gen_helper_raise_exception(tcg_env
, e
->excp
);
5306 static void sparc_tr_disas_log(const DisasContextBase
*dcbase
,
5307 CPUState
*cpu
, FILE *logfile
)
5309 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
5310 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
5313 static const TranslatorOps sparc_tr_ops
= {
5314 .init_disas_context
= sparc_tr_init_disas_context
,
5315 .tb_start
= sparc_tr_tb_start
,
5316 .insn_start
= sparc_tr_insn_start
,
5317 .translate_insn
= sparc_tr_translate_insn
,
5318 .tb_stop
= sparc_tr_tb_stop
,
5319 .disas_log
= sparc_tr_disas_log
,
5322 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
5323 target_ulong pc
, void *host_pc
)
5325 DisasContext dc
= {};
5327 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &sparc_tr_ops
, &dc
.base
);
5330 void sparc_tcg_init(void)
5332 static const char gregnames
[32][4] = {
5333 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5334 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5335 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5336 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5338 static const char fregnames
[32][4] = {
5339 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5340 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5341 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5342 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5345 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5346 #ifdef TARGET_SPARC64
5347 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5348 { &cpu_xcc_Z
, offsetof(CPUSPARCState
, xcc_Z
), "xcc_Z" },
5349 { &cpu_xcc_C
, offsetof(CPUSPARCState
, xcc_C
), "xcc_C" },
5351 { &cpu_cc_N
, offsetof(CPUSPARCState
, cc_N
), "cc_N" },
5352 { &cpu_cc_V
, offsetof(CPUSPARCState
, cc_V
), "cc_V" },
5353 { &cpu_icc_Z
, offsetof(CPUSPARCState
, icc_Z
), "icc_Z" },
5354 { &cpu_icc_C
, offsetof(CPUSPARCState
, icc_C
), "icc_C" },
5355 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5356 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
5357 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5358 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5359 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5360 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5365 cpu_regwptr
= tcg_global_mem_new_ptr(tcg_env
,
5366 offsetof(CPUSPARCState
, regwptr
),
5369 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5370 *rtl
[i
].ptr
= tcg_global_mem_new(tcg_env
, rtl
[i
].off
, rtl
[i
].name
);
5374 for (i
= 1; i
< 8; ++i
) {
5375 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
5376 offsetof(CPUSPARCState
, gregs
[i
]),
5380 for (i
= 8; i
< 32; ++i
) {
5381 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5382 (i
- 8) * sizeof(target_ulong
),
5386 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5387 cpu_fpr
[i
] = tcg_global_mem_new_i64(tcg_env
,
5388 offsetof(CPUSPARCState
, fpr
[i
]),
5392 #ifdef TARGET_SPARC64
5393 cpu_fprs
= tcg_global_mem_new_i32(tcg_env
,
5394 offsetof(CPUSPARCState
, fprs
), "fprs");
5398 void sparc_restore_state_to_opc(CPUState
*cs
,
5399 const TranslationBlock
*tb
,
5400 const uint64_t *data
)
5402 SPARCCPU
*cpu
= SPARC_CPU(cs
);
5403 CPUSPARCState
*env
= &cpu
->env
;
5404 target_ulong pc
= data
[0];
5405 target_ulong npc
= data
[1];
5408 if (npc
== DYNAMIC_PC
) {
5409 /* dynamic NPC: already stored */
5410 } else if (npc
& JUMP_PC
) {
5411 /* jump PC: use 'cond' and the jump targets of the translation */
5413 env
->npc
= npc
& ~3;