]> git.ipfire.org Git - thirdparty/qemu.git/blob - target/sparc/translate.c
target/sparc: Convert FCMP, FCMPE to decodetree
[thirdparty/qemu.git] / target / sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
33
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef HELPER_H
37
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rett(E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_done(E) qemu_build_not_reached()
46 # define gen_helper_fabsd(D, S) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_fnegd(D, S) qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
51 # define gen_helper_restored(E) qemu_build_not_reached()
52 # define gen_helper_retry(E) qemu_build_not_reached()
53 # define gen_helper_saved(E) qemu_build_not_reached()
54 # define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached()
55 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
56 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
57 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
58 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
59 # define gen_helper_udivx(D, E, A, B) qemu_build_not_reached()
60 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
61 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
62 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
63 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
64 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
65 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
66 # define gen_helper_fabsq ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fnegq ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
84 # define FSR_LDXFSR_MASK 0
85 # define FSR_LDXFSR_OLDMASK 0
86 # define MAXTL_MASK 0
87 #endif
88
89 /* Dynamic PC, must exit to main loop. */
90 #define DYNAMIC_PC 1
91 /* Dynamic PC, one of two values according to jump_pc[T2]. */
92 #define JUMP_PC 2
93 /* Dynamic PC, may lookup next TB. */
94 #define DYNAMIC_PC_LOOKUP 3
95
96 #define DISAS_EXIT DISAS_TARGET_0
97
98 /* global register indexes */
99 static TCGv_ptr cpu_regwptr;
100 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
101 static TCGv_i32 cpu_cc_op;
102 static TCGv_i32 cpu_psr;
103 static TCGv cpu_fsr, cpu_pc, cpu_npc;
104 static TCGv cpu_regs[32];
105 static TCGv cpu_y;
106 static TCGv cpu_tbr;
107 static TCGv cpu_cond;
108 #ifdef TARGET_SPARC64
109 static TCGv_i32 cpu_xcc, cpu_fprs;
110 static TCGv cpu_gsr;
111 #else
112 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
113 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
114 #endif
115 /* Floating point registers */
116 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
117
118 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
119 #ifdef TARGET_SPARC64
120 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
121 # define env64_field_offsetof(X) env_field_offsetof(X)
122 #else
123 # define env32_field_offsetof(X) env_field_offsetof(X)
124 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
125 #endif
126
127 typedef struct DisasDelayException {
128 struct DisasDelayException *next;
129 TCGLabel *lab;
130 TCGv_i32 excp;
131 /* Saved state at parent insn. */
132 target_ulong pc;
133 target_ulong npc;
134 } DisasDelayException;
135
136 typedef struct DisasContext {
137 DisasContextBase base;
138 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
139 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
140 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
141 int mem_idx;
142 bool fpu_enabled;
143 bool address_mask_32bit;
144 #ifndef CONFIG_USER_ONLY
145 bool supervisor;
146 #ifdef TARGET_SPARC64
147 bool hypervisor;
148 #endif
149 #endif
150
151 uint32_t cc_op; /* current CC operation */
152 sparc_def_t *def;
153 #ifdef TARGET_SPARC64
154 int fprs_dirty;
155 int asi;
156 #endif
157 DisasDelayException *delay_excp_list;
158 } DisasContext;
159
160 typedef struct {
161 TCGCond cond;
162 bool is_bool;
163 TCGv c1, c2;
164 } DisasCompare;
165
166 // This function uses non-native bit order
167 #define GET_FIELD(X, FROM, TO) \
168 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
169
170 // This function uses the order in the manuals, i.e. bit 0 is 2^0
171 #define GET_FIELD_SP(X, FROM, TO) \
172 GET_FIELD(X, 31 - (TO), 31 - (FROM))
173
174 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
175 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
176
177 #ifdef TARGET_SPARC64
178 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
179 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
180 #else
181 #define DFPREG(r) (r & 0x1e)
182 #define QFPREG(r) (r & 0x1c)
183 #endif
184
185 #define UA2005_HTRAP_MASK 0xff
186 #define V8_TRAP_MASK 0x7f
187
188 #define IS_IMM (insn & (1<<13))
189
190 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
191 {
192 #if defined(TARGET_SPARC64)
193 int bit = (rd < 32) ? 1 : 2;
194 /* If we know we've already set this bit within the TB,
195 we can avoid setting it again. */
196 if (!(dc->fprs_dirty & bit)) {
197 dc->fprs_dirty |= bit;
198 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
199 }
200 #endif
201 }
202
203 /* floating point registers moves */
204 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
205 {
206 TCGv_i32 ret = tcg_temp_new_i32();
207 if (src & 1) {
208 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
209 } else {
210 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
211 }
212 return ret;
213 }
214
215 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
216 {
217 TCGv_i64 t = tcg_temp_new_i64();
218
219 tcg_gen_extu_i32_i64(t, v);
220 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
221 (dst & 1 ? 0 : 32), 32);
222 gen_update_fprs_dirty(dc, dst);
223 }
224
225 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
226 {
227 return tcg_temp_new_i32();
228 }
229
230 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
231 {
232 src = DFPREG(src);
233 return cpu_fpr[src / 2];
234 }
235
236 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
237 {
238 dst = DFPREG(dst);
239 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
240 gen_update_fprs_dirty(dc, dst);
241 }
242
243 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
244 {
245 return cpu_fpr[DFPREG(dst) / 2];
246 }
247
248 static void gen_op_load_fpr_QT0(unsigned int src)
249 {
250 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
251 offsetof(CPU_QuadU, ll.upper));
252 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
253 offsetof(CPU_QuadU, ll.lower));
254 }
255
256 static void gen_op_load_fpr_QT1(unsigned int src)
257 {
258 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
259 offsetof(CPU_QuadU, ll.upper));
260 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
261 offsetof(CPU_QuadU, ll.lower));
262 }
263
264 static void gen_op_store_QT0_fpr(unsigned int dst)
265 {
266 tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
267 offsetof(CPU_QuadU, ll.upper));
268 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
269 offsetof(CPU_QuadU, ll.lower));
270 }
271
272 /* moves */
273 #ifdef CONFIG_USER_ONLY
274 #define supervisor(dc) 0
275 #define hypervisor(dc) 0
276 #else
277 #ifdef TARGET_SPARC64
278 #define hypervisor(dc) (dc->hypervisor)
279 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
280 #else
281 #define supervisor(dc) (dc->supervisor)
282 #define hypervisor(dc) 0
283 #endif
284 #endif
285
286 #if !defined(TARGET_SPARC64)
287 # define AM_CHECK(dc) false
288 #elif defined(TARGET_ABI32)
289 # define AM_CHECK(dc) true
290 #elif defined(CONFIG_USER_ONLY)
291 # define AM_CHECK(dc) false
292 #else
293 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
294 #endif
295
296 static void gen_address_mask(DisasContext *dc, TCGv addr)
297 {
298 if (AM_CHECK(dc)) {
299 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
300 }
301 }
302
303 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
304 {
305 return AM_CHECK(dc) ? (uint32_t)addr : addr;
306 }
307
308 static TCGv gen_load_gpr(DisasContext *dc, int reg)
309 {
310 if (reg > 0) {
311 assert(reg < 32);
312 return cpu_regs[reg];
313 } else {
314 TCGv t = tcg_temp_new();
315 tcg_gen_movi_tl(t, 0);
316 return t;
317 }
318 }
319
320 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
321 {
322 if (reg > 0) {
323 assert(reg < 32);
324 tcg_gen_mov_tl(cpu_regs[reg], v);
325 }
326 }
327
328 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
329 {
330 if (reg > 0) {
331 assert(reg < 32);
332 return cpu_regs[reg];
333 } else {
334 return tcg_temp_new();
335 }
336 }
337
338 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
339 {
340 return translator_use_goto_tb(&s->base, pc) &&
341 translator_use_goto_tb(&s->base, npc);
342 }
343
344 static void gen_goto_tb(DisasContext *s, int tb_num,
345 target_ulong pc, target_ulong npc)
346 {
347 if (use_goto_tb(s, pc, npc)) {
348 /* jump to same page: we can use a direct jump */
349 tcg_gen_goto_tb(tb_num);
350 tcg_gen_movi_tl(cpu_pc, pc);
351 tcg_gen_movi_tl(cpu_npc, npc);
352 tcg_gen_exit_tb(s->base.tb, tb_num);
353 } else {
354 /* jump to another page: we can use an indirect jump */
355 tcg_gen_movi_tl(cpu_pc, pc);
356 tcg_gen_movi_tl(cpu_npc, npc);
357 tcg_gen_lookup_and_goto_ptr();
358 }
359 }
360
361 // XXX suboptimal
362 static void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
363 {
364 tcg_gen_extu_i32_tl(reg, src);
365 tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
366 }
367
368 static void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
369 {
370 tcg_gen_extu_i32_tl(reg, src);
371 tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
372 }
373
374 static void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
375 {
376 tcg_gen_extu_i32_tl(reg, src);
377 tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
378 }
379
380 static void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
381 {
382 tcg_gen_extu_i32_tl(reg, src);
383 tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
384 }
385
386 static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
387 {
388 tcg_gen_mov_tl(cpu_cc_src, src1);
389 tcg_gen_mov_tl(cpu_cc_src2, src2);
390 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
391 tcg_gen_mov_tl(dst, cpu_cc_dst);
392 }
393
394 static TCGv_i32 gen_add32_carry32(void)
395 {
396 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
397
398 /* Carry is computed from a previous add: (dst < src) */
399 #if TARGET_LONG_BITS == 64
400 cc_src1_32 = tcg_temp_new_i32();
401 cc_src2_32 = tcg_temp_new_i32();
402 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
403 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
404 #else
405 cc_src1_32 = cpu_cc_dst;
406 cc_src2_32 = cpu_cc_src;
407 #endif
408
409 carry_32 = tcg_temp_new_i32();
410 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
411
412 return carry_32;
413 }
414
415 static TCGv_i32 gen_sub32_carry32(void)
416 {
417 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
418
419 /* Carry is computed from a previous borrow: (src1 < src2) */
420 #if TARGET_LONG_BITS == 64
421 cc_src1_32 = tcg_temp_new_i32();
422 cc_src2_32 = tcg_temp_new_i32();
423 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
424 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
425 #else
426 cc_src1_32 = cpu_cc_src;
427 cc_src2_32 = cpu_cc_src2;
428 #endif
429
430 carry_32 = tcg_temp_new_i32();
431 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
432
433 return carry_32;
434 }
435
436 static void gen_op_addc_int(TCGv dst, TCGv src1, TCGv src2,
437 TCGv_i32 carry_32, bool update_cc)
438 {
439 tcg_gen_add_tl(dst, src1, src2);
440
441 #ifdef TARGET_SPARC64
442 TCGv carry = tcg_temp_new();
443 tcg_gen_extu_i32_tl(carry, carry_32);
444 tcg_gen_add_tl(dst, dst, carry);
445 #else
446 tcg_gen_add_i32(dst, dst, carry_32);
447 #endif
448
449 if (update_cc) {
450 tcg_debug_assert(dst == cpu_cc_dst);
451 tcg_gen_mov_tl(cpu_cc_src, src1);
452 tcg_gen_mov_tl(cpu_cc_src2, src2);
453 }
454 }
455
456 static void gen_op_addc_int_add(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
457 {
458 TCGv discard;
459
460 if (TARGET_LONG_BITS == 64) {
461 gen_op_addc_int(dst, src1, src2, gen_add32_carry32(), update_cc);
462 return;
463 }
464
465 /*
466 * We can re-use the host's hardware carry generation by using
467 * an ADD2 opcode. We discard the low part of the output.
468 * Ideally we'd combine this operation with the add that
469 * generated the carry in the first place.
470 */
471 discard = tcg_temp_new();
472 tcg_gen_add2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
473
474 if (update_cc) {
475 tcg_debug_assert(dst == cpu_cc_dst);
476 tcg_gen_mov_tl(cpu_cc_src, src1);
477 tcg_gen_mov_tl(cpu_cc_src2, src2);
478 }
479 }
480
481 static void gen_op_addc_add(TCGv dst, TCGv src1, TCGv src2)
482 {
483 gen_op_addc_int_add(dst, src1, src2, false);
484 }
485
486 static void gen_op_addccc_add(TCGv dst, TCGv src1, TCGv src2)
487 {
488 gen_op_addc_int_add(dst, src1, src2, true);
489 }
490
491 static void gen_op_addc_sub(TCGv dst, TCGv src1, TCGv src2)
492 {
493 gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), false);
494 }
495
496 static void gen_op_addccc_sub(TCGv dst, TCGv src1, TCGv src2)
497 {
498 gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), true);
499 }
500
501 static void gen_op_addc_int_generic(TCGv dst, TCGv src1, TCGv src2,
502 bool update_cc)
503 {
504 TCGv_i32 carry_32 = tcg_temp_new_i32();
505 gen_helper_compute_C_icc(carry_32, tcg_env);
506 gen_op_addc_int(dst, src1, src2, carry_32, update_cc);
507 }
508
509 static void gen_op_addc_generic(TCGv dst, TCGv src1, TCGv src2)
510 {
511 gen_op_addc_int_generic(dst, src1, src2, false);
512 }
513
514 static void gen_op_addccc_generic(TCGv dst, TCGv src1, TCGv src2)
515 {
516 gen_op_addc_int_generic(dst, src1, src2, true);
517 }
518
519 static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
520 {
521 tcg_gen_mov_tl(cpu_cc_src, src1);
522 tcg_gen_mov_tl(cpu_cc_src2, src2);
523 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
524 tcg_gen_mov_tl(dst, cpu_cc_dst);
525 }
526
527 static void gen_op_subc_int(TCGv dst, TCGv src1, TCGv src2,
528 TCGv_i32 carry_32, bool update_cc)
529 {
530 TCGv carry;
531
532 #if TARGET_LONG_BITS == 64
533 carry = tcg_temp_new();
534 tcg_gen_extu_i32_i64(carry, carry_32);
535 #else
536 carry = carry_32;
537 #endif
538
539 tcg_gen_sub_tl(dst, src1, src2);
540 tcg_gen_sub_tl(dst, dst, carry);
541
542 if (update_cc) {
543 tcg_debug_assert(dst == cpu_cc_dst);
544 tcg_gen_mov_tl(cpu_cc_src, src1);
545 tcg_gen_mov_tl(cpu_cc_src2, src2);
546 }
547 }
548
549 static void gen_op_subc_add(TCGv dst, TCGv src1, TCGv src2)
550 {
551 gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), false);
552 }
553
554 static void gen_op_subccc_add(TCGv dst, TCGv src1, TCGv src2)
555 {
556 gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), true);
557 }
558
559 static void gen_op_subc_int_sub(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
560 {
561 TCGv discard;
562
563 if (TARGET_LONG_BITS == 64) {
564 gen_op_subc_int(dst, src1, src2, gen_sub32_carry32(), update_cc);
565 return;
566 }
567
568 /*
569 * We can re-use the host's hardware carry generation by using
570 * a SUB2 opcode. We discard the low part of the output.
571 */
572 discard = tcg_temp_new();
573 tcg_gen_sub2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
574
575 if (update_cc) {
576 tcg_debug_assert(dst == cpu_cc_dst);
577 tcg_gen_mov_tl(cpu_cc_src, src1);
578 tcg_gen_mov_tl(cpu_cc_src2, src2);
579 }
580 }
581
582 static void gen_op_subc_sub(TCGv dst, TCGv src1, TCGv src2)
583 {
584 gen_op_subc_int_sub(dst, src1, src2, false);
585 }
586
587 static void gen_op_subccc_sub(TCGv dst, TCGv src1, TCGv src2)
588 {
589 gen_op_subc_int_sub(dst, src1, src2, true);
590 }
591
592 static void gen_op_subc_int_generic(TCGv dst, TCGv src1, TCGv src2,
593 bool update_cc)
594 {
595 TCGv_i32 carry_32 = tcg_temp_new_i32();
596
597 gen_helper_compute_C_icc(carry_32, tcg_env);
598 gen_op_subc_int(dst, src1, src2, carry_32, update_cc);
599 }
600
601 static void gen_op_subc_generic(TCGv dst, TCGv src1, TCGv src2)
602 {
603 gen_op_subc_int_generic(dst, src1, src2, false);
604 }
605
606 static void gen_op_subccc_generic(TCGv dst, TCGv src1, TCGv src2)
607 {
608 gen_op_subc_int_generic(dst, src1, src2, true);
609 }
610
611 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
612 {
613 TCGv r_temp, zero, t0;
614
615 r_temp = tcg_temp_new();
616 t0 = tcg_temp_new();
617
618 /* old op:
619 if (!(env->y & 1))
620 T1 = 0;
621 */
622 zero = tcg_constant_tl(0);
623 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
624 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
625 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
626 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
627 zero, cpu_cc_src2);
628
629 // b2 = T0 & 1;
630 // env->y = (b2 << 31) | (env->y >> 1);
631 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
632 tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
633
634 // b1 = N ^ V;
635 gen_mov_reg_N(t0, cpu_psr);
636 gen_mov_reg_V(r_temp, cpu_psr);
637 tcg_gen_xor_tl(t0, t0, r_temp);
638
639 // T0 = (b1 << 31) | (T0 >> 1);
640 // src1 = T0;
641 tcg_gen_shli_tl(t0, t0, 31);
642 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
643 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
644
645 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
646
647 tcg_gen_mov_tl(dst, cpu_cc_dst);
648 }
649
650 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
651 {
652 #if TARGET_LONG_BITS == 32
653 if (sign_ext) {
654 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
655 } else {
656 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
657 }
658 #else
659 TCGv t0 = tcg_temp_new_i64();
660 TCGv t1 = tcg_temp_new_i64();
661
662 if (sign_ext) {
663 tcg_gen_ext32s_i64(t0, src1);
664 tcg_gen_ext32s_i64(t1, src2);
665 } else {
666 tcg_gen_ext32u_i64(t0, src1);
667 tcg_gen_ext32u_i64(t1, src2);
668 }
669
670 tcg_gen_mul_i64(dst, t0, t1);
671 tcg_gen_shri_i64(cpu_y, dst, 32);
672 #endif
673 }
674
675 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
676 {
677 /* zero-extend truncated operands before multiplication */
678 gen_op_multiply(dst, src1, src2, 0);
679 }
680
681 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
682 {
683 /* sign-extend truncated operands before multiplication */
684 gen_op_multiply(dst, src1, src2, 1);
685 }
686
687 static void gen_op_udivx(TCGv dst, TCGv src1, TCGv src2)
688 {
689 gen_helper_udivx(dst, tcg_env, src1, src2);
690 }
691
692 static void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
693 {
694 gen_helper_sdivx(dst, tcg_env, src1, src2);
695 }
696
697 static void gen_op_udiv(TCGv dst, TCGv src1, TCGv src2)
698 {
699 gen_helper_udiv(dst, tcg_env, src1, src2);
700 }
701
702 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
703 {
704 gen_helper_sdiv(dst, tcg_env, src1, src2);
705 }
706
707 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
708 {
709 gen_helper_udiv_cc(dst, tcg_env, src1, src2);
710 }
711
712 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
713 {
714 gen_helper_sdiv_cc(dst, tcg_env, src1, src2);
715 }
716
717 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
718 {
719 gen_helper_taddcctv(dst, tcg_env, src1, src2);
720 }
721
722 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
723 {
724 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
725 }
726
727 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
728 {
729 tcg_gen_ctpop_tl(dst, src2);
730 }
731
732 #ifndef TARGET_SPARC64
733 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
734 {
735 g_assert_not_reached();
736 }
737 #endif
738
739 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
740 {
741 gen_helper_array8(dst, src1, src2);
742 tcg_gen_shli_tl(dst, dst, 1);
743 }
744
745 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
746 {
747 gen_helper_array8(dst, src1, src2);
748 tcg_gen_shli_tl(dst, dst, 2);
749 }
750
751 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
752 {
753 #ifdef TARGET_SPARC64
754 gen_helper_fpack32(dst, cpu_gsr, src1, src2);
755 #else
756 g_assert_not_reached();
757 #endif
758 }
759
760 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
761 {
762 #ifdef TARGET_SPARC64
763 TCGv t1, t2, shift;
764
765 t1 = tcg_temp_new();
766 t2 = tcg_temp_new();
767 shift = tcg_temp_new();
768
769 tcg_gen_andi_tl(shift, cpu_gsr, 7);
770 tcg_gen_shli_tl(shift, shift, 3);
771 tcg_gen_shl_tl(t1, s1, shift);
772
773 /*
774 * A shift of 64 does not produce 0 in TCG. Divide this into a
775 * shift of (up to 63) followed by a constant shift of 1.
776 */
777 tcg_gen_xori_tl(shift, shift, 63);
778 tcg_gen_shr_tl(t2, s2, shift);
779 tcg_gen_shri_tl(t2, t2, 1);
780
781 tcg_gen_or_tl(dst, t1, t2);
782 #else
783 g_assert_not_reached();
784 #endif
785 }
786
787 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
788 {
789 #ifdef TARGET_SPARC64
790 gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
791 #else
792 g_assert_not_reached();
793 #endif
794 }
795
796 // 1
797 static void gen_op_eval_ba(TCGv dst)
798 {
799 tcg_gen_movi_tl(dst, 1);
800 }
801
802 // Z
803 static void gen_op_eval_be(TCGv dst, TCGv_i32 src)
804 {
805 gen_mov_reg_Z(dst, src);
806 }
807
808 // Z | (N ^ V)
809 static void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
810 {
811 TCGv t0 = tcg_temp_new();
812 gen_mov_reg_N(t0, src);
813 gen_mov_reg_V(dst, src);
814 tcg_gen_xor_tl(dst, dst, t0);
815 gen_mov_reg_Z(t0, src);
816 tcg_gen_or_tl(dst, dst, t0);
817 }
818
819 // N ^ V
820 static void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
821 {
822 TCGv t0 = tcg_temp_new();
823 gen_mov_reg_V(t0, src);
824 gen_mov_reg_N(dst, src);
825 tcg_gen_xor_tl(dst, dst, t0);
826 }
827
828 // C | Z
829 static void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
830 {
831 TCGv t0 = tcg_temp_new();
832 gen_mov_reg_Z(t0, src);
833 gen_mov_reg_C(dst, src);
834 tcg_gen_or_tl(dst, dst, t0);
835 }
836
837 // C
838 static void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
839 {
840 gen_mov_reg_C(dst, src);
841 }
842
843 // V
844 static void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
845 {
846 gen_mov_reg_V(dst, src);
847 }
848
849 // 0
850 static void gen_op_eval_bn(TCGv dst)
851 {
852 tcg_gen_movi_tl(dst, 0);
853 }
854
855 // N
856 static void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
857 {
858 gen_mov_reg_N(dst, src);
859 }
860
861 // !Z
862 static void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
863 {
864 gen_mov_reg_Z(dst, src);
865 tcg_gen_xori_tl(dst, dst, 0x1);
866 }
867
868 // !(Z | (N ^ V))
869 static void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
870 {
871 gen_op_eval_ble(dst, src);
872 tcg_gen_xori_tl(dst, dst, 0x1);
873 }
874
875 // !(N ^ V)
876 static void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
877 {
878 gen_op_eval_bl(dst, src);
879 tcg_gen_xori_tl(dst, dst, 0x1);
880 }
881
882 // !(C | Z)
883 static void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
884 {
885 gen_op_eval_bleu(dst, src);
886 tcg_gen_xori_tl(dst, dst, 0x1);
887 }
888
889 // !C
890 static void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
891 {
892 gen_mov_reg_C(dst, src);
893 tcg_gen_xori_tl(dst, dst, 0x1);
894 }
895
896 // !N
897 static void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
898 {
899 gen_mov_reg_N(dst, src);
900 tcg_gen_xori_tl(dst, dst, 0x1);
901 }
902
903 // !V
904 static void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
905 {
906 gen_mov_reg_V(dst, src);
907 tcg_gen_xori_tl(dst, dst, 0x1);
908 }
909
910 /*
911 FPSR bit field FCC1 | FCC0:
912 0 =
913 1 <
914 2 >
915 3 unordered
916 */
917 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
918 unsigned int fcc_offset)
919 {
920 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
921 tcg_gen_andi_tl(reg, reg, 0x1);
922 }
923
924 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
925 {
926 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
927 tcg_gen_andi_tl(reg, reg, 0x1);
928 }
929
930 // !0: FCC0 | FCC1
931 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
932 {
933 TCGv t0 = tcg_temp_new();
934 gen_mov_reg_FCC0(dst, src, fcc_offset);
935 gen_mov_reg_FCC1(t0, src, fcc_offset);
936 tcg_gen_or_tl(dst, dst, t0);
937 }
938
939 // 1 or 2: FCC0 ^ FCC1
940 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
941 {
942 TCGv t0 = tcg_temp_new();
943 gen_mov_reg_FCC0(dst, src, fcc_offset);
944 gen_mov_reg_FCC1(t0, src, fcc_offset);
945 tcg_gen_xor_tl(dst, dst, t0);
946 }
947
948 // 1 or 3: FCC0
949 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
950 {
951 gen_mov_reg_FCC0(dst, src, fcc_offset);
952 }
953
954 // 1: FCC0 & !FCC1
955 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
956 {
957 TCGv t0 = tcg_temp_new();
958 gen_mov_reg_FCC0(dst, src, fcc_offset);
959 gen_mov_reg_FCC1(t0, src, fcc_offset);
960 tcg_gen_andc_tl(dst, dst, t0);
961 }
962
963 // 2 or 3: FCC1
964 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
965 {
966 gen_mov_reg_FCC1(dst, src, fcc_offset);
967 }
968
969 // 2: !FCC0 & FCC1
970 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
971 {
972 TCGv t0 = tcg_temp_new();
973 gen_mov_reg_FCC0(dst, src, fcc_offset);
974 gen_mov_reg_FCC1(t0, src, fcc_offset);
975 tcg_gen_andc_tl(dst, t0, dst);
976 }
977
978 // 3: FCC0 & FCC1
979 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
980 {
981 TCGv t0 = tcg_temp_new();
982 gen_mov_reg_FCC0(dst, src, fcc_offset);
983 gen_mov_reg_FCC1(t0, src, fcc_offset);
984 tcg_gen_and_tl(dst, dst, t0);
985 }
986
987 // 0: !(FCC0 | FCC1)
988 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
989 {
990 TCGv t0 = tcg_temp_new();
991 gen_mov_reg_FCC0(dst, src, fcc_offset);
992 gen_mov_reg_FCC1(t0, src, fcc_offset);
993 tcg_gen_or_tl(dst, dst, t0);
994 tcg_gen_xori_tl(dst, dst, 0x1);
995 }
996
997 // 0 or 3: !(FCC0 ^ FCC1)
998 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
999 {
1000 TCGv t0 = tcg_temp_new();
1001 gen_mov_reg_FCC0(dst, src, fcc_offset);
1002 gen_mov_reg_FCC1(t0, src, fcc_offset);
1003 tcg_gen_xor_tl(dst, dst, t0);
1004 tcg_gen_xori_tl(dst, dst, 0x1);
1005 }
1006
1007 // 0 or 2: !FCC0
1008 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
1009 {
1010 gen_mov_reg_FCC0(dst, src, fcc_offset);
1011 tcg_gen_xori_tl(dst, dst, 0x1);
1012 }
1013
1014 // !1: !(FCC0 & !FCC1)
1015 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
1016 {
1017 TCGv t0 = tcg_temp_new();
1018 gen_mov_reg_FCC0(dst, src, fcc_offset);
1019 gen_mov_reg_FCC1(t0, src, fcc_offset);
1020 tcg_gen_andc_tl(dst, dst, t0);
1021 tcg_gen_xori_tl(dst, dst, 0x1);
1022 }
1023
1024 // 0 or 1: !FCC1
1025 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
1026 {
1027 gen_mov_reg_FCC1(dst, src, fcc_offset);
1028 tcg_gen_xori_tl(dst, dst, 0x1);
1029 }
1030
1031 // !2: !(!FCC0 & FCC1)
1032 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
1033 {
1034 TCGv t0 = tcg_temp_new();
1035 gen_mov_reg_FCC0(dst, src, fcc_offset);
1036 gen_mov_reg_FCC1(t0, src, fcc_offset);
1037 tcg_gen_andc_tl(dst, t0, dst);
1038 tcg_gen_xori_tl(dst, dst, 0x1);
1039 }
1040
1041 // !3: !(FCC0 & FCC1)
1042 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
1043 {
1044 TCGv t0 = tcg_temp_new();
1045 gen_mov_reg_FCC0(dst, src, fcc_offset);
1046 gen_mov_reg_FCC1(t0, src, fcc_offset);
1047 tcg_gen_and_tl(dst, dst, t0);
1048 tcg_gen_xori_tl(dst, dst, 0x1);
1049 }
1050
1051 static void gen_branch2(DisasContext *dc, target_ulong pc1,
1052 target_ulong pc2, TCGv r_cond)
1053 {
1054 TCGLabel *l1 = gen_new_label();
1055
1056 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
1057
1058 gen_goto_tb(dc, 0, pc1, pc1 + 4);
1059
1060 gen_set_label(l1);
1061 gen_goto_tb(dc, 1, pc2, pc2 + 4);
1062 }
1063
1064 static void gen_generic_branch(DisasContext *dc)
1065 {
1066 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1067 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1068 TCGv zero = tcg_constant_tl(0);
1069
1070 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1071 }
1072
1073 /* call this function before using the condition register as it may
1074 have been set for a jump */
1075 static void flush_cond(DisasContext *dc)
1076 {
1077 if (dc->npc == JUMP_PC) {
1078 gen_generic_branch(dc);
1079 dc->npc = DYNAMIC_PC_LOOKUP;
1080 }
1081 }
1082
1083 static void save_npc(DisasContext *dc)
1084 {
1085 if (dc->npc & 3) {
1086 switch (dc->npc) {
1087 case JUMP_PC:
1088 gen_generic_branch(dc);
1089 dc->npc = DYNAMIC_PC_LOOKUP;
1090 break;
1091 case DYNAMIC_PC:
1092 case DYNAMIC_PC_LOOKUP:
1093 break;
1094 default:
1095 g_assert_not_reached();
1096 }
1097 } else {
1098 tcg_gen_movi_tl(cpu_npc, dc->npc);
1099 }
1100 }
1101
1102 static void update_psr(DisasContext *dc)
1103 {
1104 if (dc->cc_op != CC_OP_FLAGS) {
1105 dc->cc_op = CC_OP_FLAGS;
1106 gen_helper_compute_psr(tcg_env);
1107 }
1108 }
1109
1110 static void save_state(DisasContext *dc)
1111 {
1112 tcg_gen_movi_tl(cpu_pc, dc->pc);
1113 save_npc(dc);
1114 }
1115
1116 static void gen_exception(DisasContext *dc, int which)
1117 {
1118 save_state(dc);
1119 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1120 dc->base.is_jmp = DISAS_NORETURN;
1121 }
1122
1123 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1124 {
1125 DisasDelayException *e = g_new0(DisasDelayException, 1);
1126
1127 e->next = dc->delay_excp_list;
1128 dc->delay_excp_list = e;
1129
1130 e->lab = gen_new_label();
1131 e->excp = excp;
1132 e->pc = dc->pc;
1133 /* Caller must have used flush_cond before branch. */
1134 assert(e->npc != JUMP_PC);
1135 e->npc = dc->npc;
1136
1137 return e->lab;
1138 }
1139
1140 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1141 {
1142 return delay_exceptionv(dc, tcg_constant_i32(excp));
1143 }
1144
1145 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1146 {
1147 TCGv t = tcg_temp_new();
1148 TCGLabel *lab;
1149
1150 tcg_gen_andi_tl(t, addr, mask);
1151
1152 flush_cond(dc);
1153 lab = delay_exception(dc, TT_UNALIGNED);
1154 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1155 }
1156
1157 static void gen_mov_pc_npc(DisasContext *dc)
1158 {
1159 if (dc->npc & 3) {
1160 switch (dc->npc) {
1161 case JUMP_PC:
1162 gen_generic_branch(dc);
1163 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1164 dc->pc = DYNAMIC_PC_LOOKUP;
1165 break;
1166 case DYNAMIC_PC:
1167 case DYNAMIC_PC_LOOKUP:
1168 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1169 dc->pc = dc->npc;
1170 break;
1171 default:
1172 g_assert_not_reached();
1173 }
1174 } else {
1175 dc->pc = dc->npc;
1176 }
1177 }
1178
1179 static void gen_op_next_insn(void)
1180 {
1181 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1182 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1183 }
1184
1185 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1186 DisasContext *dc)
1187 {
1188 static int subcc_cond[16] = {
1189 TCG_COND_NEVER,
1190 TCG_COND_EQ,
1191 TCG_COND_LE,
1192 TCG_COND_LT,
1193 TCG_COND_LEU,
1194 TCG_COND_LTU,
1195 -1, /* neg */
1196 -1, /* overflow */
1197 TCG_COND_ALWAYS,
1198 TCG_COND_NE,
1199 TCG_COND_GT,
1200 TCG_COND_GE,
1201 TCG_COND_GTU,
1202 TCG_COND_GEU,
1203 -1, /* pos */
1204 -1, /* no overflow */
1205 };
1206
1207 static int logic_cond[16] = {
1208 TCG_COND_NEVER,
1209 TCG_COND_EQ, /* eq: Z */
1210 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1211 TCG_COND_LT, /* lt: N ^ V -> N */
1212 TCG_COND_EQ, /* leu: C | Z -> Z */
1213 TCG_COND_NEVER, /* ltu: C -> 0 */
1214 TCG_COND_LT, /* neg: N */
1215 TCG_COND_NEVER, /* vs: V -> 0 */
1216 TCG_COND_ALWAYS,
1217 TCG_COND_NE, /* ne: !Z */
1218 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1219 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1220 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1221 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1222 TCG_COND_GE, /* pos: !N */
1223 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1224 };
1225
1226 TCGv_i32 r_src;
1227 TCGv r_dst;
1228
1229 #ifdef TARGET_SPARC64
1230 if (xcc) {
1231 r_src = cpu_xcc;
1232 } else {
1233 r_src = cpu_psr;
1234 }
1235 #else
1236 r_src = cpu_psr;
1237 #endif
1238
1239 switch (dc->cc_op) {
1240 case CC_OP_LOGIC:
1241 cmp->cond = logic_cond[cond];
1242 do_compare_dst_0:
1243 cmp->is_bool = false;
1244 cmp->c2 = tcg_constant_tl(0);
1245 #ifdef TARGET_SPARC64
1246 if (!xcc) {
1247 cmp->c1 = tcg_temp_new();
1248 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1249 break;
1250 }
1251 #endif
1252 cmp->c1 = cpu_cc_dst;
1253 break;
1254
1255 case CC_OP_SUB:
1256 switch (cond) {
1257 case 6: /* neg */
1258 case 14: /* pos */
1259 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1260 goto do_compare_dst_0;
1261
1262 case 7: /* overflow */
1263 case 15: /* !overflow */
1264 goto do_dynamic;
1265
1266 default:
1267 cmp->cond = subcc_cond[cond];
1268 cmp->is_bool = false;
1269 #ifdef TARGET_SPARC64
1270 if (!xcc) {
1271 /* Note that sign-extension works for unsigned compares as
1272 long as both operands are sign-extended. */
1273 cmp->c1 = tcg_temp_new();
1274 cmp->c2 = tcg_temp_new();
1275 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1276 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1277 break;
1278 }
1279 #endif
1280 cmp->c1 = cpu_cc_src;
1281 cmp->c2 = cpu_cc_src2;
1282 break;
1283 }
1284 break;
1285
1286 default:
1287 do_dynamic:
1288 gen_helper_compute_psr(tcg_env);
1289 dc->cc_op = CC_OP_FLAGS;
1290 /* FALLTHRU */
1291
1292 case CC_OP_FLAGS:
1293 /* We're going to generate a boolean result. */
1294 cmp->cond = TCG_COND_NE;
1295 cmp->is_bool = true;
1296 cmp->c1 = r_dst = tcg_temp_new();
1297 cmp->c2 = tcg_constant_tl(0);
1298
1299 switch (cond) {
1300 case 0x0:
1301 gen_op_eval_bn(r_dst);
1302 break;
1303 case 0x1:
1304 gen_op_eval_be(r_dst, r_src);
1305 break;
1306 case 0x2:
1307 gen_op_eval_ble(r_dst, r_src);
1308 break;
1309 case 0x3:
1310 gen_op_eval_bl(r_dst, r_src);
1311 break;
1312 case 0x4:
1313 gen_op_eval_bleu(r_dst, r_src);
1314 break;
1315 case 0x5:
1316 gen_op_eval_bcs(r_dst, r_src);
1317 break;
1318 case 0x6:
1319 gen_op_eval_bneg(r_dst, r_src);
1320 break;
1321 case 0x7:
1322 gen_op_eval_bvs(r_dst, r_src);
1323 break;
1324 case 0x8:
1325 gen_op_eval_ba(r_dst);
1326 break;
1327 case 0x9:
1328 gen_op_eval_bne(r_dst, r_src);
1329 break;
1330 case 0xa:
1331 gen_op_eval_bg(r_dst, r_src);
1332 break;
1333 case 0xb:
1334 gen_op_eval_bge(r_dst, r_src);
1335 break;
1336 case 0xc:
1337 gen_op_eval_bgu(r_dst, r_src);
1338 break;
1339 case 0xd:
1340 gen_op_eval_bcc(r_dst, r_src);
1341 break;
1342 case 0xe:
1343 gen_op_eval_bpos(r_dst, r_src);
1344 break;
1345 case 0xf:
1346 gen_op_eval_bvc(r_dst, r_src);
1347 break;
1348 }
1349 break;
1350 }
1351 }
1352
1353 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1354 {
1355 unsigned int offset;
1356 TCGv r_dst;
1357
1358 /* For now we still generate a straight boolean result. */
1359 cmp->cond = TCG_COND_NE;
1360 cmp->is_bool = true;
1361 cmp->c1 = r_dst = tcg_temp_new();
1362 cmp->c2 = tcg_constant_tl(0);
1363
1364 switch (cc) {
1365 default:
1366 case 0x0:
1367 offset = 0;
1368 break;
1369 case 0x1:
1370 offset = 32 - 10;
1371 break;
1372 case 0x2:
1373 offset = 34 - 10;
1374 break;
1375 case 0x3:
1376 offset = 36 - 10;
1377 break;
1378 }
1379
1380 switch (cond) {
1381 case 0x0:
1382 gen_op_eval_bn(r_dst);
1383 break;
1384 case 0x1:
1385 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1386 break;
1387 case 0x2:
1388 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1389 break;
1390 case 0x3:
1391 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1392 break;
1393 case 0x4:
1394 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1395 break;
1396 case 0x5:
1397 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1398 break;
1399 case 0x6:
1400 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1401 break;
1402 case 0x7:
1403 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1404 break;
1405 case 0x8:
1406 gen_op_eval_ba(r_dst);
1407 break;
1408 case 0x9:
1409 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1410 break;
1411 case 0xa:
1412 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1413 break;
1414 case 0xb:
1415 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1416 break;
1417 case 0xc:
1418 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1419 break;
1420 case 0xd:
1421 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1422 break;
1423 case 0xe:
1424 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1425 break;
1426 case 0xf:
1427 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1428 break;
1429 }
1430 }
1431
1432 // Inverted logic
1433 static const TCGCond gen_tcg_cond_reg[8] = {
1434 TCG_COND_NEVER, /* reserved */
1435 TCG_COND_NE,
1436 TCG_COND_GT,
1437 TCG_COND_GE,
1438 TCG_COND_NEVER, /* reserved */
1439 TCG_COND_EQ,
1440 TCG_COND_LE,
1441 TCG_COND_LT,
1442 };
1443
1444 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1445 {
1446 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1447 cmp->is_bool = false;
1448 cmp->c1 = r_src;
1449 cmp->c2 = tcg_constant_tl(0);
1450 }
1451
1452 static void gen_op_clear_ieee_excp_and_FTT(void)
1453 {
1454 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1455 }
1456
1457 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1458 {
1459 gen_op_clear_ieee_excp_and_FTT();
1460 tcg_gen_mov_i32(dst, src);
1461 }
1462
1463 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1464 {
1465 gen_op_clear_ieee_excp_and_FTT();
1466 gen_helper_fnegs(dst, src);
1467 }
1468
1469 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1470 {
1471 gen_op_clear_ieee_excp_and_FTT();
1472 gen_helper_fabss(dst, src);
1473 }
1474
1475 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1476 {
1477 gen_op_clear_ieee_excp_and_FTT();
1478 tcg_gen_mov_i64(dst, src);
1479 }
1480
1481 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1482 {
1483 gen_op_clear_ieee_excp_and_FTT();
1484 gen_helper_fnegd(dst, src);
1485 }
1486
1487 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1488 {
1489 gen_op_clear_ieee_excp_and_FTT();
1490 gen_helper_fabsd(dst, src);
1491 }
1492
1493 #ifdef TARGET_SPARC64
1494 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1495 {
1496 switch (fccno) {
1497 case 0:
1498 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1499 break;
1500 case 1:
1501 gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1502 break;
1503 case 2:
1504 gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1505 break;
1506 case 3:
1507 gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1508 break;
1509 }
1510 }
1511
1512 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1513 {
1514 switch (fccno) {
1515 case 0:
1516 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1517 break;
1518 case 1:
1519 gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1520 break;
1521 case 2:
1522 gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1523 break;
1524 case 3:
1525 gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1526 break;
1527 }
1528 }
1529
1530 static void gen_op_fcmpq(int fccno)
1531 {
1532 switch (fccno) {
1533 case 0:
1534 gen_helper_fcmpq(cpu_fsr, tcg_env);
1535 break;
1536 case 1:
1537 gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
1538 break;
1539 case 2:
1540 gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
1541 break;
1542 case 3:
1543 gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
1544 break;
1545 }
1546 }
1547
1548 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1549 {
1550 switch (fccno) {
1551 case 0:
1552 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1553 break;
1554 case 1:
1555 gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1556 break;
1557 case 2:
1558 gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1559 break;
1560 case 3:
1561 gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1562 break;
1563 }
1564 }
1565
1566 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1567 {
1568 switch (fccno) {
1569 case 0:
1570 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1571 break;
1572 case 1:
1573 gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1574 break;
1575 case 2:
1576 gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1577 break;
1578 case 3:
1579 gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1580 break;
1581 }
1582 }
1583
1584 static void gen_op_fcmpeq(int fccno)
1585 {
1586 switch (fccno) {
1587 case 0:
1588 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1589 break;
1590 case 1:
1591 gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
1592 break;
1593 case 2:
1594 gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
1595 break;
1596 case 3:
1597 gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
1598 break;
1599 }
1600 }
1601
1602 #else
1603
1604 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1605 {
1606 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1607 }
1608
1609 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1610 {
1611 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1612 }
1613
1614 static void gen_op_fcmpq(int fccno)
1615 {
1616 gen_helper_fcmpq(cpu_fsr, tcg_env);
1617 }
1618
1619 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1620 {
1621 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1622 }
1623
1624 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1625 {
1626 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1627 }
1628
1629 static void gen_op_fcmpeq(int fccno)
1630 {
1631 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1632 }
1633 #endif
1634
1635 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1636 {
1637 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1638 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1639 gen_exception(dc, TT_FP_EXCP);
1640 }
1641
1642 static int gen_trap_ifnofpu(DisasContext *dc)
1643 {
1644 #if !defined(CONFIG_USER_ONLY)
1645 if (!dc->fpu_enabled) {
1646 gen_exception(dc, TT_NFPU_INSN);
1647 return 1;
1648 }
1649 #endif
1650 return 0;
1651 }
1652
1653 /* asi moves */
1654 typedef enum {
1655 GET_ASI_HELPER,
1656 GET_ASI_EXCP,
1657 GET_ASI_DIRECT,
1658 GET_ASI_DTWINX,
1659 GET_ASI_BLOCK,
1660 GET_ASI_SHORT,
1661 GET_ASI_BCOPY,
1662 GET_ASI_BFILL,
1663 } ASIType;
1664
1665 typedef struct {
1666 ASIType type;
1667 int asi;
1668 int mem_idx;
1669 MemOp memop;
1670 } DisasASI;
1671
1672 /*
1673 * Build DisasASI.
1674 * For asi == -1, treat as non-asi.
1675 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1676 */
1677 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1678 {
1679 ASIType type = GET_ASI_HELPER;
1680 int mem_idx = dc->mem_idx;
1681
1682 if (asi == -1) {
1683 /* Artificial "non-asi" case. */
1684 type = GET_ASI_DIRECT;
1685 goto done;
1686 }
1687
1688 #ifndef TARGET_SPARC64
1689 /* Before v9, all asis are immediate and privileged. */
1690 if (asi < 0) {
1691 gen_exception(dc, TT_ILL_INSN);
1692 type = GET_ASI_EXCP;
1693 } else if (supervisor(dc)
1694 /* Note that LEON accepts ASI_USERDATA in user mode, for
1695 use with CASA. Also note that previous versions of
1696 QEMU allowed (and old versions of gcc emitted) ASI_P
1697 for LEON, which is incorrect. */
1698 || (asi == ASI_USERDATA
1699 && (dc->def->features & CPU_FEATURE_CASA))) {
1700 switch (asi) {
1701 case ASI_USERDATA: /* User data access */
1702 mem_idx = MMU_USER_IDX;
1703 type = GET_ASI_DIRECT;
1704 break;
1705 case ASI_KERNELDATA: /* Supervisor data access */
1706 mem_idx = MMU_KERNEL_IDX;
1707 type = GET_ASI_DIRECT;
1708 break;
1709 case ASI_M_BYPASS: /* MMU passthrough */
1710 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1711 mem_idx = MMU_PHYS_IDX;
1712 type = GET_ASI_DIRECT;
1713 break;
1714 case ASI_M_BCOPY: /* Block copy, sta access */
1715 mem_idx = MMU_KERNEL_IDX;
1716 type = GET_ASI_BCOPY;
1717 break;
1718 case ASI_M_BFILL: /* Block fill, stda access */
1719 mem_idx = MMU_KERNEL_IDX;
1720 type = GET_ASI_BFILL;
1721 break;
1722 }
1723
1724 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1725 * permissions check in get_physical_address(..).
1726 */
1727 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1728 } else {
1729 gen_exception(dc, TT_PRIV_INSN);
1730 type = GET_ASI_EXCP;
1731 }
1732 #else
1733 if (asi < 0) {
1734 asi = dc->asi;
1735 }
1736 /* With v9, all asis below 0x80 are privileged. */
1737 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1738 down that bit into DisasContext. For the moment that's ok,
1739 since the direct implementations below doesn't have any ASIs
1740 in the restricted [0x30, 0x7f] range, and the check will be
1741 done properly in the helper. */
1742 if (!supervisor(dc) && asi < 0x80) {
1743 gen_exception(dc, TT_PRIV_ACT);
1744 type = GET_ASI_EXCP;
1745 } else {
1746 switch (asi) {
1747 case ASI_REAL: /* Bypass */
1748 case ASI_REAL_IO: /* Bypass, non-cacheable */
1749 case ASI_REAL_L: /* Bypass LE */
1750 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1751 case ASI_TWINX_REAL: /* Real address, twinx */
1752 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1753 case ASI_QUAD_LDD_PHYS:
1754 case ASI_QUAD_LDD_PHYS_L:
1755 mem_idx = MMU_PHYS_IDX;
1756 break;
1757 case ASI_N: /* Nucleus */
1758 case ASI_NL: /* Nucleus LE */
1759 case ASI_TWINX_N:
1760 case ASI_TWINX_NL:
1761 case ASI_NUCLEUS_QUAD_LDD:
1762 case ASI_NUCLEUS_QUAD_LDD_L:
1763 if (hypervisor(dc)) {
1764 mem_idx = MMU_PHYS_IDX;
1765 } else {
1766 mem_idx = MMU_NUCLEUS_IDX;
1767 }
1768 break;
1769 case ASI_AIUP: /* As if user primary */
1770 case ASI_AIUPL: /* As if user primary LE */
1771 case ASI_TWINX_AIUP:
1772 case ASI_TWINX_AIUP_L:
1773 case ASI_BLK_AIUP_4V:
1774 case ASI_BLK_AIUP_L_4V:
1775 case ASI_BLK_AIUP:
1776 case ASI_BLK_AIUPL:
1777 mem_idx = MMU_USER_IDX;
1778 break;
1779 case ASI_AIUS: /* As if user secondary */
1780 case ASI_AIUSL: /* As if user secondary LE */
1781 case ASI_TWINX_AIUS:
1782 case ASI_TWINX_AIUS_L:
1783 case ASI_BLK_AIUS_4V:
1784 case ASI_BLK_AIUS_L_4V:
1785 case ASI_BLK_AIUS:
1786 case ASI_BLK_AIUSL:
1787 mem_idx = MMU_USER_SECONDARY_IDX;
1788 break;
1789 case ASI_S: /* Secondary */
1790 case ASI_SL: /* Secondary LE */
1791 case ASI_TWINX_S:
1792 case ASI_TWINX_SL:
1793 case ASI_BLK_COMMIT_S:
1794 case ASI_BLK_S:
1795 case ASI_BLK_SL:
1796 case ASI_FL8_S:
1797 case ASI_FL8_SL:
1798 case ASI_FL16_S:
1799 case ASI_FL16_SL:
1800 if (mem_idx == MMU_USER_IDX) {
1801 mem_idx = MMU_USER_SECONDARY_IDX;
1802 } else if (mem_idx == MMU_KERNEL_IDX) {
1803 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1804 }
1805 break;
1806 case ASI_P: /* Primary */
1807 case ASI_PL: /* Primary LE */
1808 case ASI_TWINX_P:
1809 case ASI_TWINX_PL:
1810 case ASI_BLK_COMMIT_P:
1811 case ASI_BLK_P:
1812 case ASI_BLK_PL:
1813 case ASI_FL8_P:
1814 case ASI_FL8_PL:
1815 case ASI_FL16_P:
1816 case ASI_FL16_PL:
1817 break;
1818 }
1819 switch (asi) {
1820 case ASI_REAL:
1821 case ASI_REAL_IO:
1822 case ASI_REAL_L:
1823 case ASI_REAL_IO_L:
1824 case ASI_N:
1825 case ASI_NL:
1826 case ASI_AIUP:
1827 case ASI_AIUPL:
1828 case ASI_AIUS:
1829 case ASI_AIUSL:
1830 case ASI_S:
1831 case ASI_SL:
1832 case ASI_P:
1833 case ASI_PL:
1834 type = GET_ASI_DIRECT;
1835 break;
1836 case ASI_TWINX_REAL:
1837 case ASI_TWINX_REAL_L:
1838 case ASI_TWINX_N:
1839 case ASI_TWINX_NL:
1840 case ASI_TWINX_AIUP:
1841 case ASI_TWINX_AIUP_L:
1842 case ASI_TWINX_AIUS:
1843 case ASI_TWINX_AIUS_L:
1844 case ASI_TWINX_P:
1845 case ASI_TWINX_PL:
1846 case ASI_TWINX_S:
1847 case ASI_TWINX_SL:
1848 case ASI_QUAD_LDD_PHYS:
1849 case ASI_QUAD_LDD_PHYS_L:
1850 case ASI_NUCLEUS_QUAD_LDD:
1851 case ASI_NUCLEUS_QUAD_LDD_L:
1852 type = GET_ASI_DTWINX;
1853 break;
1854 case ASI_BLK_COMMIT_P:
1855 case ASI_BLK_COMMIT_S:
1856 case ASI_BLK_AIUP_4V:
1857 case ASI_BLK_AIUP_L_4V:
1858 case ASI_BLK_AIUP:
1859 case ASI_BLK_AIUPL:
1860 case ASI_BLK_AIUS_4V:
1861 case ASI_BLK_AIUS_L_4V:
1862 case ASI_BLK_AIUS:
1863 case ASI_BLK_AIUSL:
1864 case ASI_BLK_S:
1865 case ASI_BLK_SL:
1866 case ASI_BLK_P:
1867 case ASI_BLK_PL:
1868 type = GET_ASI_BLOCK;
1869 break;
1870 case ASI_FL8_S:
1871 case ASI_FL8_SL:
1872 case ASI_FL8_P:
1873 case ASI_FL8_PL:
1874 memop = MO_UB;
1875 type = GET_ASI_SHORT;
1876 break;
1877 case ASI_FL16_S:
1878 case ASI_FL16_SL:
1879 case ASI_FL16_P:
1880 case ASI_FL16_PL:
1881 memop = MO_TEUW;
1882 type = GET_ASI_SHORT;
1883 break;
1884 }
1885 /* The little-endian asis all have bit 3 set. */
1886 if (asi & 8) {
1887 memop ^= MO_BSWAP;
1888 }
1889 }
1890 #endif
1891
1892 done:
1893 return (DisasASI){ type, asi, mem_idx, memop };
1894 }
1895
1896 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1897 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1898 TCGv_i32 asi, TCGv_i32 mop)
1899 {
1900 g_assert_not_reached();
1901 }
1902
1903 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1904 TCGv_i32 asi, TCGv_i32 mop)
1905 {
1906 g_assert_not_reached();
1907 }
1908 #endif
1909
1910 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1911 {
1912 switch (da->type) {
1913 case GET_ASI_EXCP:
1914 break;
1915 case GET_ASI_DTWINX: /* Reserved for ldda. */
1916 gen_exception(dc, TT_ILL_INSN);
1917 break;
1918 case GET_ASI_DIRECT:
1919 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1920 break;
1921 default:
1922 {
1923 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1924 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1925
1926 save_state(dc);
1927 #ifdef TARGET_SPARC64
1928 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1929 #else
1930 {
1931 TCGv_i64 t64 = tcg_temp_new_i64();
1932 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1933 tcg_gen_trunc_i64_tl(dst, t64);
1934 }
1935 #endif
1936 }
1937 break;
1938 }
1939 }
1940
1941 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1942 {
1943 switch (da->type) {
1944 case GET_ASI_EXCP:
1945 break;
1946
1947 case GET_ASI_DTWINX: /* Reserved for stda. */
1948 if (TARGET_LONG_BITS == 32) {
1949 gen_exception(dc, TT_ILL_INSN);
1950 break;
1951 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1952 /* Pre OpenSPARC CPUs don't have these */
1953 gen_exception(dc, TT_ILL_INSN);
1954 break;
1955 }
1956 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1957 /* fall through */
1958
1959 case GET_ASI_DIRECT:
1960 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1961 break;
1962
1963 case GET_ASI_BCOPY:
1964 assert(TARGET_LONG_BITS == 32);
1965 /* Copy 32 bytes from the address in SRC to ADDR. */
1966 /* ??? The original qemu code suggests 4-byte alignment, dropping
1967 the low bits, but the only place I can see this used is in the
1968 Linux kernel with 32 byte alignment, which would make more sense
1969 as a cacheline-style operation. */
1970 {
1971 TCGv saddr = tcg_temp_new();
1972 TCGv daddr = tcg_temp_new();
1973 TCGv four = tcg_constant_tl(4);
1974 TCGv_i32 tmp = tcg_temp_new_i32();
1975 int i;
1976
1977 tcg_gen_andi_tl(saddr, src, -4);
1978 tcg_gen_andi_tl(daddr, addr, -4);
1979 for (i = 0; i < 32; i += 4) {
1980 /* Since the loads and stores are paired, allow the
1981 copy to happen in the host endianness. */
1982 tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL);
1983 tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL);
1984 tcg_gen_add_tl(saddr, saddr, four);
1985 tcg_gen_add_tl(daddr, daddr, four);
1986 }
1987 }
1988 break;
1989
1990 default:
1991 {
1992 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1993 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1994
1995 save_state(dc);
1996 #ifdef TARGET_SPARC64
1997 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1998 #else
1999 {
2000 TCGv_i64 t64 = tcg_temp_new_i64();
2001 tcg_gen_extu_tl_i64(t64, src);
2002 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2003 }
2004 #endif
2005
2006 /* A write to a TLB register may alter page maps. End the TB. */
2007 dc->npc = DYNAMIC_PC;
2008 }
2009 break;
2010 }
2011 }
2012
2013 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
2014 TCGv dst, TCGv src, TCGv addr)
2015 {
2016 switch (da->type) {
2017 case GET_ASI_EXCP:
2018 break;
2019 case GET_ASI_DIRECT:
2020 tcg_gen_atomic_xchg_tl(dst, addr, src,
2021 da->mem_idx, da->memop | MO_ALIGN);
2022 break;
2023 default:
2024 /* ??? Should be DAE_invalid_asi. */
2025 gen_exception(dc, TT_DATA_ACCESS);
2026 break;
2027 }
2028 }
2029
2030 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
2031 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
2032 {
2033 switch (da->type) {
2034 case GET_ASI_EXCP:
2035 return;
2036 case GET_ASI_DIRECT:
2037 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
2038 da->mem_idx, da->memop | MO_ALIGN);
2039 break;
2040 default:
2041 /* ??? Should be DAE_invalid_asi. */
2042 gen_exception(dc, TT_DATA_ACCESS);
2043 break;
2044 }
2045 }
2046
2047 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
2048 {
2049 switch (da->type) {
2050 case GET_ASI_EXCP:
2051 break;
2052 case GET_ASI_DIRECT:
2053 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
2054 da->mem_idx, MO_UB);
2055 break;
2056 default:
2057 /* ??? In theory, this should be raise DAE_invalid_asi.
2058 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2059 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
2060 gen_helper_exit_atomic(tcg_env);
2061 } else {
2062 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2063 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
2064 TCGv_i64 s64, t64;
2065
2066 save_state(dc);
2067 t64 = tcg_temp_new_i64();
2068 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2069
2070 s64 = tcg_constant_i64(0xff);
2071 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
2072
2073 tcg_gen_trunc_i64_tl(dst, t64);
2074
2075 /* End the TB. */
2076 dc->npc = DYNAMIC_PC;
2077 }
2078 break;
2079 }
2080 }
2081
2082 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2083 TCGv addr, int rd)
2084 {
2085 MemOp memop = da->memop;
2086 MemOp size = memop & MO_SIZE;
2087 TCGv_i32 d32;
2088 TCGv_i64 d64;
2089 TCGv addr_tmp;
2090
2091 /* TODO: Use 128-bit load/store below. */
2092 if (size == MO_128) {
2093 memop = (memop & ~MO_SIZE) | MO_64;
2094 }
2095
2096 switch (da->type) {
2097 case GET_ASI_EXCP:
2098 break;
2099
2100 case GET_ASI_DIRECT:
2101 memop |= MO_ALIGN_4;
2102 switch (size) {
2103 case MO_32:
2104 d32 = gen_dest_fpr_F(dc);
2105 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
2106 gen_store_fpr_F(dc, rd, d32);
2107 break;
2108
2109 case MO_64:
2110 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
2111 break;
2112
2113 case MO_128:
2114 d64 = tcg_temp_new_i64();
2115 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2116 addr_tmp = tcg_temp_new();
2117 tcg_gen_addi_tl(addr_tmp, addr, 8);
2118 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2119 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2120 break;
2121 default:
2122 g_assert_not_reached();
2123 }
2124 break;
2125
2126 case GET_ASI_BLOCK:
2127 /* Valid for lddfa on aligned registers only. */
2128 if (orig_size == MO_64 && (rd & 7) == 0) {
2129 /* The first operation checks required alignment. */
2130 addr_tmp = tcg_temp_new();
2131 for (int i = 0; ; ++i) {
2132 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2133 memop | (i == 0 ? MO_ALIGN_64 : 0));
2134 if (i == 7) {
2135 break;
2136 }
2137 tcg_gen_addi_tl(addr_tmp, addr, 8);
2138 addr = addr_tmp;
2139 }
2140 } else {
2141 gen_exception(dc, TT_ILL_INSN);
2142 }
2143 break;
2144
2145 case GET_ASI_SHORT:
2146 /* Valid for lddfa only. */
2147 if (orig_size == MO_64) {
2148 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2149 memop | MO_ALIGN);
2150 } else {
2151 gen_exception(dc, TT_ILL_INSN);
2152 }
2153 break;
2154
2155 default:
2156 {
2157 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2158 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2159
2160 save_state(dc);
2161 /* According to the table in the UA2011 manual, the only
2162 other asis that are valid for ldfa/lddfa/ldqfa are
2163 the NO_FAULT asis. We still need a helper for these,
2164 but we can just use the integer asi helper for them. */
2165 switch (size) {
2166 case MO_32:
2167 d64 = tcg_temp_new_i64();
2168 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2169 d32 = gen_dest_fpr_F(dc);
2170 tcg_gen_extrl_i64_i32(d32, d64);
2171 gen_store_fpr_F(dc, rd, d32);
2172 break;
2173 case MO_64:
2174 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
2175 r_asi, r_mop);
2176 break;
2177 case MO_128:
2178 d64 = tcg_temp_new_i64();
2179 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2180 addr_tmp = tcg_temp_new();
2181 tcg_gen_addi_tl(addr_tmp, addr, 8);
2182 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
2183 r_asi, r_mop);
2184 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2185 break;
2186 default:
2187 g_assert_not_reached();
2188 }
2189 }
2190 break;
2191 }
2192 }
2193
2194 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2195 TCGv addr, int rd)
2196 {
2197 MemOp memop = da->memop;
2198 MemOp size = memop & MO_SIZE;
2199 TCGv_i32 d32;
2200 TCGv addr_tmp;
2201
2202 /* TODO: Use 128-bit load/store below. */
2203 if (size == MO_128) {
2204 memop = (memop & ~MO_SIZE) | MO_64;
2205 }
2206
2207 switch (da->type) {
2208 case GET_ASI_EXCP:
2209 break;
2210
2211 case GET_ASI_DIRECT:
2212 memop |= MO_ALIGN_4;
2213 switch (size) {
2214 case MO_32:
2215 d32 = gen_load_fpr_F(dc, rd);
2216 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2217 break;
2218 case MO_64:
2219 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2220 memop | MO_ALIGN_4);
2221 break;
2222 case MO_128:
2223 /* Only 4-byte alignment required. However, it is legal for the
2224 cpu to signal the alignment fault, and the OS trap handler is
2225 required to fix it up. Requiring 16-byte alignment here avoids
2226 having to probe the second page before performing the first
2227 write. */
2228 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2229 memop | MO_ALIGN_16);
2230 addr_tmp = tcg_temp_new();
2231 tcg_gen_addi_tl(addr_tmp, addr, 8);
2232 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2233 break;
2234 default:
2235 g_assert_not_reached();
2236 }
2237 break;
2238
2239 case GET_ASI_BLOCK:
2240 /* Valid for stdfa on aligned registers only. */
2241 if (orig_size == MO_64 && (rd & 7) == 0) {
2242 /* The first operation checks required alignment. */
2243 addr_tmp = tcg_temp_new();
2244 for (int i = 0; ; ++i) {
2245 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2246 memop | (i == 0 ? MO_ALIGN_64 : 0));
2247 if (i == 7) {
2248 break;
2249 }
2250 tcg_gen_addi_tl(addr_tmp, addr, 8);
2251 addr = addr_tmp;
2252 }
2253 } else {
2254 gen_exception(dc, TT_ILL_INSN);
2255 }
2256 break;
2257
2258 case GET_ASI_SHORT:
2259 /* Valid for stdfa only. */
2260 if (orig_size == MO_64) {
2261 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2262 memop | MO_ALIGN);
2263 } else {
2264 gen_exception(dc, TT_ILL_INSN);
2265 }
2266 break;
2267
2268 default:
2269 /* According to the table in the UA2011 manual, the only
2270 other asis that are valid for ldfa/lddfa/ldqfa are
2271 the PST* asis, which aren't currently handled. */
2272 gen_exception(dc, TT_ILL_INSN);
2273 break;
2274 }
2275 }
2276
2277 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2278 {
2279 TCGv hi = gen_dest_gpr(dc, rd);
2280 TCGv lo = gen_dest_gpr(dc, rd + 1);
2281
2282 switch (da->type) {
2283 case GET_ASI_EXCP:
2284 return;
2285
2286 case GET_ASI_DTWINX:
2287 #ifdef TARGET_SPARC64
2288 {
2289 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2290 TCGv_i128 t = tcg_temp_new_i128();
2291
2292 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2293 /*
2294 * Note that LE twinx acts as if each 64-bit register result is
2295 * byte swapped. We perform one 128-bit LE load, so must swap
2296 * the order of the writebacks.
2297 */
2298 if ((mop & MO_BSWAP) == MO_TE) {
2299 tcg_gen_extr_i128_i64(lo, hi, t);
2300 } else {
2301 tcg_gen_extr_i128_i64(hi, lo, t);
2302 }
2303 }
2304 break;
2305 #else
2306 g_assert_not_reached();
2307 #endif
2308
2309 case GET_ASI_DIRECT:
2310 {
2311 TCGv_i64 tmp = tcg_temp_new_i64();
2312
2313 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2314
2315 /* Note that LE ldda acts as if each 32-bit register
2316 result is byte swapped. Having just performed one
2317 64-bit bswap, we need now to swap the writebacks. */
2318 if ((da->memop & MO_BSWAP) == MO_TE) {
2319 tcg_gen_extr_i64_tl(lo, hi, tmp);
2320 } else {
2321 tcg_gen_extr_i64_tl(hi, lo, tmp);
2322 }
2323 }
2324 break;
2325
2326 default:
2327 /* ??? In theory we've handled all of the ASIs that are valid
2328 for ldda, and this should raise DAE_invalid_asi. However,
2329 real hardware allows others. This can be seen with e.g.
2330 FreeBSD 10.3 wrt ASI_IC_TAG. */
2331 {
2332 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2333 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2334 TCGv_i64 tmp = tcg_temp_new_i64();
2335
2336 save_state(dc);
2337 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2338
2339 /* See above. */
2340 if ((da->memop & MO_BSWAP) == MO_TE) {
2341 tcg_gen_extr_i64_tl(lo, hi, tmp);
2342 } else {
2343 tcg_gen_extr_i64_tl(hi, lo, tmp);
2344 }
2345 }
2346 break;
2347 }
2348
2349 gen_store_gpr(dc, rd, hi);
2350 gen_store_gpr(dc, rd + 1, lo);
2351 }
2352
2353 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2354 {
2355 TCGv hi = gen_load_gpr(dc, rd);
2356 TCGv lo = gen_load_gpr(dc, rd + 1);
2357
2358 switch (da->type) {
2359 case GET_ASI_EXCP:
2360 break;
2361
2362 case GET_ASI_DTWINX:
2363 #ifdef TARGET_SPARC64
2364 {
2365 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2366 TCGv_i128 t = tcg_temp_new_i128();
2367
2368 /*
2369 * Note that LE twinx acts as if each 64-bit register result is
2370 * byte swapped. We perform one 128-bit LE store, so must swap
2371 * the order of the construction.
2372 */
2373 if ((mop & MO_BSWAP) == MO_TE) {
2374 tcg_gen_concat_i64_i128(t, lo, hi);
2375 } else {
2376 tcg_gen_concat_i64_i128(t, hi, lo);
2377 }
2378 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2379 }
2380 break;
2381 #else
2382 g_assert_not_reached();
2383 #endif
2384
2385 case GET_ASI_DIRECT:
2386 {
2387 TCGv_i64 t64 = tcg_temp_new_i64();
2388
2389 /* Note that LE stda acts as if each 32-bit register result is
2390 byte swapped. We will perform one 64-bit LE store, so now
2391 we must swap the order of the construction. */
2392 if ((da->memop & MO_BSWAP) == MO_TE) {
2393 tcg_gen_concat_tl_i64(t64, lo, hi);
2394 } else {
2395 tcg_gen_concat_tl_i64(t64, hi, lo);
2396 }
2397 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2398 }
2399 break;
2400
2401 case GET_ASI_BFILL:
2402 assert(TARGET_LONG_BITS == 32);
2403 /* Store 32 bytes of T64 to ADDR. */
2404 /* ??? The original qemu code suggests 8-byte alignment, dropping
2405 the low bits, but the only place I can see this used is in the
2406 Linux kernel with 32 byte alignment, which would make more sense
2407 as a cacheline-style operation. */
2408 {
2409 TCGv_i64 t64 = tcg_temp_new_i64();
2410 TCGv d_addr = tcg_temp_new();
2411 TCGv eight = tcg_constant_tl(8);
2412 int i;
2413
2414 tcg_gen_concat_tl_i64(t64, lo, hi);
2415 tcg_gen_andi_tl(d_addr, addr, -8);
2416 for (i = 0; i < 32; i += 8) {
2417 tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop);
2418 tcg_gen_add_tl(d_addr, d_addr, eight);
2419 }
2420 }
2421 break;
2422
2423 default:
2424 /* ??? In theory we've handled all of the ASIs that are valid
2425 for stda, and this should raise DAE_invalid_asi. */
2426 {
2427 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2428 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2429 TCGv_i64 t64 = tcg_temp_new_i64();
2430
2431 /* See above. */
2432 if ((da->memop & MO_BSWAP) == MO_TE) {
2433 tcg_gen_concat_tl_i64(t64, lo, hi);
2434 } else {
2435 tcg_gen_concat_tl_i64(t64, hi, lo);
2436 }
2437
2438 save_state(dc);
2439 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2440 }
2441 break;
2442 }
2443 }
2444
2445 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2446 {
2447 #ifdef TARGET_SPARC64
2448 TCGv_i32 c32, zero, dst, s1, s2;
2449
2450 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2451 or fold the comparison down to 32 bits and use movcond_i32. Choose
2452 the later. */
2453 c32 = tcg_temp_new_i32();
2454 if (cmp->is_bool) {
2455 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2456 } else {
2457 TCGv_i64 c64 = tcg_temp_new_i64();
2458 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2459 tcg_gen_extrl_i64_i32(c32, c64);
2460 }
2461
2462 s1 = gen_load_fpr_F(dc, rs);
2463 s2 = gen_load_fpr_F(dc, rd);
2464 dst = gen_dest_fpr_F(dc);
2465 zero = tcg_constant_i32(0);
2466
2467 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2468
2469 gen_store_fpr_F(dc, rd, dst);
2470 #else
2471 qemu_build_not_reached();
2472 #endif
2473 }
2474
2475 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2476 {
2477 #ifdef TARGET_SPARC64
2478 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2479 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2480 gen_load_fpr_D(dc, rs),
2481 gen_load_fpr_D(dc, rd));
2482 gen_store_fpr_D(dc, rd, dst);
2483 #else
2484 qemu_build_not_reached();
2485 #endif
2486 }
2487
2488 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2489 {
2490 #ifdef TARGET_SPARC64
2491 int qd = QFPREG(rd);
2492 int qs = QFPREG(rs);
2493
2494 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2495 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2496 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2497 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2498
2499 gen_update_fprs_dirty(dc, qd);
2500 #else
2501 qemu_build_not_reached();
2502 #endif
2503 }
2504
2505 #ifdef TARGET_SPARC64
2506 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2507 {
2508 TCGv_i32 r_tl = tcg_temp_new_i32();
2509
2510 /* load env->tl into r_tl */
2511 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2512
2513 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2514 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2515
2516 /* calculate offset to current trap state from env->ts, reuse r_tl */
2517 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2518 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2519
2520 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2521 {
2522 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2523 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2524 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2525 }
2526 }
2527 #endif
2528
2529 static int extract_dfpreg(DisasContext *dc, int x)
2530 {
2531 return DFPREG(x);
2532 }
2533
2534 static int extract_qfpreg(DisasContext *dc, int x)
2535 {
2536 return QFPREG(x);
2537 }
2538
2539 /* Include the auto-generated decoder. */
2540 #include "decode-insns.c.inc"
2541
2542 #define TRANS(NAME, AVAIL, FUNC, ...) \
2543 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2544 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2545
2546 #define avail_ALL(C) true
2547 #ifdef TARGET_SPARC64
2548 # define avail_32(C) false
2549 # define avail_ASR17(C) false
2550 # define avail_CASA(C) true
2551 # define avail_DIV(C) true
2552 # define avail_MUL(C) true
2553 # define avail_POWERDOWN(C) false
2554 # define avail_64(C) true
2555 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2556 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2557 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2558 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2559 #else
2560 # define avail_32(C) true
2561 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2562 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2563 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2564 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2565 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2566 # define avail_64(C) false
2567 # define avail_GL(C) false
2568 # define avail_HYPV(C) false
2569 # define avail_VIS1(C) false
2570 # define avail_VIS2(C) false
2571 #endif
2572
2573 /* Default case for non jump instructions. */
2574 static bool advance_pc(DisasContext *dc)
2575 {
2576 if (dc->npc & 3) {
2577 switch (dc->npc) {
2578 case DYNAMIC_PC:
2579 case DYNAMIC_PC_LOOKUP:
2580 dc->pc = dc->npc;
2581 gen_op_next_insn();
2582 break;
2583 case JUMP_PC:
2584 /* we can do a static jump */
2585 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
2586 dc->base.is_jmp = DISAS_NORETURN;
2587 break;
2588 default:
2589 g_assert_not_reached();
2590 }
2591 } else {
2592 dc->pc = dc->npc;
2593 dc->npc = dc->npc + 4;
2594 }
2595 return true;
2596 }
2597
2598 /*
2599 * Major opcodes 00 and 01 -- branches, call, and sethi
2600 */
2601
2602 static bool advance_jump_uncond_never(DisasContext *dc, bool annul)
2603 {
2604 if (annul) {
2605 dc->pc = dc->npc + 4;
2606 dc->npc = dc->pc + 4;
2607 } else {
2608 dc->pc = dc->npc;
2609 dc->npc = dc->pc + 4;
2610 }
2611 return true;
2612 }
2613
2614 static bool advance_jump_uncond_always(DisasContext *dc, bool annul,
2615 target_ulong dest)
2616 {
2617 if (annul) {
2618 dc->pc = dest;
2619 dc->npc = dest + 4;
2620 } else {
2621 dc->pc = dc->npc;
2622 dc->npc = dest;
2623 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2624 }
2625 return true;
2626 }
2627
2628 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2629 bool annul, target_ulong dest)
2630 {
2631 target_ulong npc = dc->npc;
2632
2633 if (annul) {
2634 TCGLabel *l1 = gen_new_label();
2635
2636 tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2637 gen_goto_tb(dc, 0, npc, dest);
2638 gen_set_label(l1);
2639 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2640
2641 dc->base.is_jmp = DISAS_NORETURN;
2642 } else {
2643 if (npc & 3) {
2644 switch (npc) {
2645 case DYNAMIC_PC:
2646 case DYNAMIC_PC_LOOKUP:
2647 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2648 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2649 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2650 cmp->c1, cmp->c2,
2651 tcg_constant_tl(dest), cpu_npc);
2652 dc->pc = npc;
2653 break;
2654 default:
2655 g_assert_not_reached();
2656 }
2657 } else {
2658 dc->pc = npc;
2659 dc->jump_pc[0] = dest;
2660 dc->jump_pc[1] = npc + 4;
2661 dc->npc = JUMP_PC;
2662 if (cmp->is_bool) {
2663 tcg_gen_mov_tl(cpu_cond, cmp->c1);
2664 } else {
2665 tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2666 }
2667 }
2668 }
2669 return true;
2670 }
2671
2672 static bool raise_priv(DisasContext *dc)
2673 {
2674 gen_exception(dc, TT_PRIV_INSN);
2675 return true;
2676 }
2677
2678 static bool raise_unimpfpop(DisasContext *dc)
2679 {
2680 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2681 return true;
2682 }
2683
2684 static bool gen_trap_float128(DisasContext *dc)
2685 {
2686 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2687 return false;
2688 }
2689 return raise_unimpfpop(dc);
2690 }
2691
2692 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2693 {
2694 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2695 DisasCompare cmp;
2696
2697 switch (a->cond) {
2698 case 0x0:
2699 return advance_jump_uncond_never(dc, a->a);
2700 case 0x8:
2701 return advance_jump_uncond_always(dc, a->a, target);
2702 default:
2703 flush_cond(dc);
2704
2705 gen_compare(&cmp, a->cc, a->cond, dc);
2706 return advance_jump_cond(dc, &cmp, a->a, target);
2707 }
2708 }
2709
2710 TRANS(Bicc, ALL, do_bpcc, a)
2711 TRANS(BPcc, 64, do_bpcc, a)
2712
2713 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2714 {
2715 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2716 DisasCompare cmp;
2717
2718 if (gen_trap_ifnofpu(dc)) {
2719 return true;
2720 }
2721 switch (a->cond) {
2722 case 0x0:
2723 return advance_jump_uncond_never(dc, a->a);
2724 case 0x8:
2725 return advance_jump_uncond_always(dc, a->a, target);
2726 default:
2727 flush_cond(dc);
2728
2729 gen_fcompare(&cmp, a->cc, a->cond);
2730 return advance_jump_cond(dc, &cmp, a->a, target);
2731 }
2732 }
2733
2734 TRANS(FBPfcc, 64, do_fbpfcc, a)
2735 TRANS(FBfcc, ALL, do_fbpfcc, a)
2736
2737 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2738 {
2739 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2740 DisasCompare cmp;
2741
2742 if (!avail_64(dc)) {
2743 return false;
2744 }
2745 if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) {
2746 return false;
2747 }
2748
2749 flush_cond(dc);
2750 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
2751 return advance_jump_cond(dc, &cmp, a->a, target);
2752 }
2753
2754 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2755 {
2756 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2757
2758 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2759 gen_mov_pc_npc(dc);
2760 dc->npc = target;
2761 return true;
2762 }
2763
2764 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2765 {
2766 /*
2767 * For sparc32, always generate the no-coprocessor exception.
2768 * For sparc64, always generate illegal instruction.
2769 */
2770 #ifdef TARGET_SPARC64
2771 return false;
2772 #else
2773 gen_exception(dc, TT_NCP_INSN);
2774 return true;
2775 #endif
2776 }
2777
2778 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2779 {
2780 /* Special-case %g0 because that's the canonical nop. */
2781 if (a->rd) {
2782 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2783 }
2784 return advance_pc(dc);
2785 }
2786
2787 /*
2788 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2789 */
2790
2791 static bool do_tcc(DisasContext *dc, int cond, int cc,
2792 int rs1, bool imm, int rs2_or_imm)
2793 {
2794 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2795 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2796 DisasCompare cmp;
2797 TCGLabel *lab;
2798 TCGv_i32 trap;
2799
2800 /* Trap never. */
2801 if (cond == 0) {
2802 return advance_pc(dc);
2803 }
2804
2805 /*
2806 * Immediate traps are the most common case. Since this value is
2807 * live across the branch, it really pays to evaluate the constant.
2808 */
2809 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2810 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2811 } else {
2812 trap = tcg_temp_new_i32();
2813 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2814 if (imm) {
2815 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2816 } else {
2817 TCGv_i32 t2 = tcg_temp_new_i32();
2818 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2819 tcg_gen_add_i32(trap, trap, t2);
2820 }
2821 tcg_gen_andi_i32(trap, trap, mask);
2822 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2823 }
2824
2825 /* Trap always. */
2826 if (cond == 8) {
2827 save_state(dc);
2828 gen_helper_raise_exception(tcg_env, trap);
2829 dc->base.is_jmp = DISAS_NORETURN;
2830 return true;
2831 }
2832
2833 /* Conditional trap. */
2834 flush_cond(dc);
2835 lab = delay_exceptionv(dc, trap);
2836 gen_compare(&cmp, cc, cond, dc);
2837 tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2838
2839 return advance_pc(dc);
2840 }
2841
2842 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2843 {
2844 if (avail_32(dc) && a->cc) {
2845 return false;
2846 }
2847 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2848 }
2849
2850 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2851 {
2852 if (avail_64(dc)) {
2853 return false;
2854 }
2855 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2856 }
2857
2858 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2859 {
2860 if (avail_32(dc)) {
2861 return false;
2862 }
2863 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2864 }
2865
2866 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2867 {
2868 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2869 return advance_pc(dc);
2870 }
2871
2872 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2873 {
2874 if (avail_32(dc)) {
2875 return false;
2876 }
2877 if (a->mmask) {
2878 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2879 tcg_gen_mb(a->mmask | TCG_BAR_SC);
2880 }
2881 if (a->cmask) {
2882 /* For #Sync, etc, end the TB to recognize interrupts. */
2883 dc->base.is_jmp = DISAS_EXIT;
2884 }
2885 return advance_pc(dc);
2886 }
2887
2888 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2889 TCGv (*func)(DisasContext *, TCGv))
2890 {
2891 if (!priv) {
2892 return raise_priv(dc);
2893 }
2894 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2895 return advance_pc(dc);
2896 }
2897
2898 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2899 {
2900 return cpu_y;
2901 }
2902
2903 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2904 {
2905 /*
2906 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2907 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2908 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2909 */
2910 if (avail_64(dc) && a->rs1 != 0) {
2911 return false;
2912 }
2913 return do_rd_special(dc, true, a->rd, do_rdy);
2914 }
2915
2916 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2917 {
2918 uint32_t val;
2919
2920 /*
2921 * TODO: There are many more fields to be filled,
2922 * some of which are writable.
2923 */
2924 val = dc->def->nwindows - 1; /* [4:0] NWIN */
2925 val |= 1 << 8; /* [8] V8 */
2926
2927 return tcg_constant_tl(val);
2928 }
2929
2930 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2931
2932 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2933 {
2934 update_psr(dc);
2935 gen_helper_rdccr(dst, tcg_env);
2936 return dst;
2937 }
2938
2939 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2940
2941 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2942 {
2943 #ifdef TARGET_SPARC64
2944 return tcg_constant_tl(dc->asi);
2945 #else
2946 qemu_build_not_reached();
2947 #endif
2948 }
2949
2950 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2951
2952 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2953 {
2954 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2955
2956 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2957 if (translator_io_start(&dc->base)) {
2958 dc->base.is_jmp = DISAS_EXIT;
2959 }
2960 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2961 tcg_constant_i32(dc->mem_idx));
2962 return dst;
2963 }
2964
2965 /* TODO: non-priv access only allowed when enabled. */
2966 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2967
2968 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2969 {
2970 return tcg_constant_tl(address_mask_i(dc, dc->pc));
2971 }
2972
2973 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2974
2975 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2976 {
2977 tcg_gen_ext_i32_tl(dst, cpu_fprs);
2978 return dst;
2979 }
2980
2981 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2982
2983 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2984 {
2985 gen_trap_ifnofpu(dc);
2986 return cpu_gsr;
2987 }
2988
2989 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2990
2991 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2992 {
2993 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2994 return dst;
2995 }
2996
2997 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2998
2999 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
3000 {
3001 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
3002 return dst;
3003 }
3004
3005 /* TODO: non-priv access only allowed when enabled. */
3006 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
3007
3008 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
3009 {
3010 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3011
3012 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3013 if (translator_io_start(&dc->base)) {
3014 dc->base.is_jmp = DISAS_EXIT;
3015 }
3016 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
3017 tcg_constant_i32(dc->mem_idx));
3018 return dst;
3019 }
3020
3021 /* TODO: non-priv access only allowed when enabled. */
3022 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
3023
3024 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
3025 {
3026 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
3027 return dst;
3028 }
3029
3030 /* TODO: supervisor access only allowed when enabled by hypervisor. */
3031 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
3032
3033 /*
3034 * UltraSPARC-T1 Strand status.
3035 * HYPV check maybe not enough, UA2005 & UA2007 describe
3036 * this ASR as impl. dep
3037 */
3038 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
3039 {
3040 return tcg_constant_tl(1);
3041 }
3042
3043 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
3044
3045 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
3046 {
3047 update_psr(dc);
3048 gen_helper_rdpsr(dst, tcg_env);
3049 return dst;
3050 }
3051
3052 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
3053
3054 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
3055 {
3056 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
3057 return dst;
3058 }
3059
3060 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
3061
3062 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
3063 {
3064 TCGv_i32 tl = tcg_temp_new_i32();
3065 TCGv_ptr tp = tcg_temp_new_ptr();
3066
3067 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3068 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3069 tcg_gen_shli_i32(tl, tl, 3);
3070 tcg_gen_ext_i32_ptr(tp, tl);
3071 tcg_gen_add_ptr(tp, tp, tcg_env);
3072
3073 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3074 return dst;
3075 }
3076
3077 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3078
3079 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3080 {
3081 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3082 return dst;
3083 }
3084
3085 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3086
3087 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3088 {
3089 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3090 return dst;
3091 }
3092
3093 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3094
3095 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3096 {
3097 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3098 return dst;
3099 }
3100
3101 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3102
3103 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3104 {
3105 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3106 return dst;
3107 }
3108
3109 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3110 do_rdhstick_cmpr)
3111
3112 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3113 {
3114 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3115 return dst;
3116 }
3117
3118 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3119
3120 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3121 {
3122 #ifdef TARGET_SPARC64
3123 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3124
3125 gen_load_trap_state_at_tl(r_tsptr);
3126 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3127 return dst;
3128 #else
3129 qemu_build_not_reached();
3130 #endif
3131 }
3132
3133 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3134
3135 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3136 {
3137 #ifdef TARGET_SPARC64
3138 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3139
3140 gen_load_trap_state_at_tl(r_tsptr);
3141 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3142 return dst;
3143 #else
3144 qemu_build_not_reached();
3145 #endif
3146 }
3147
3148 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3149
3150 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3151 {
3152 #ifdef TARGET_SPARC64
3153 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3154
3155 gen_load_trap_state_at_tl(r_tsptr);
3156 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3157 return dst;
3158 #else
3159 qemu_build_not_reached();
3160 #endif
3161 }
3162
3163 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3164
3165 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3166 {
3167 #ifdef TARGET_SPARC64
3168 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3169
3170 gen_load_trap_state_at_tl(r_tsptr);
3171 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3172 return dst;
3173 #else
3174 qemu_build_not_reached();
3175 #endif
3176 }
3177
3178 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3179 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3180
3181 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3182 {
3183 return cpu_tbr;
3184 }
3185
3186 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3187 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3188
3189 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3190 {
3191 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3192 return dst;
3193 }
3194
3195 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3196
3197 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3198 {
3199 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3200 return dst;
3201 }
3202
3203 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3204
3205 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3206 {
3207 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3208 return dst;
3209 }
3210
3211 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3212
3213 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3214 {
3215 gen_helper_rdcwp(dst, tcg_env);
3216 return dst;
3217 }
3218
3219 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3220
3221 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3222 {
3223 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3224 return dst;
3225 }
3226
3227 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3228
3229 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3230 {
3231 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3232 return dst;
3233 }
3234
3235 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3236 do_rdcanrestore)
3237
3238 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3239 {
3240 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3241 return dst;
3242 }
3243
3244 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3245
3246 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3247 {
3248 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3249 return dst;
3250 }
3251
3252 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3253
3254 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3255 {
3256 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3257 return dst;
3258 }
3259
3260 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3261
3262 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3263 {
3264 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3265 return dst;
3266 }
3267
3268 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3269
3270 /* UA2005 strand status */
3271 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3272 {
3273 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3274 return dst;
3275 }
3276
3277 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3278
3279 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3280 {
3281 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3282 return dst;
3283 }
3284
3285 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3286
3287 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3288 {
3289 if (avail_64(dc)) {
3290 gen_helper_flushw(tcg_env);
3291 return advance_pc(dc);
3292 }
3293 return false;
3294 }
3295
3296 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3297 void (*func)(DisasContext *, TCGv))
3298 {
3299 TCGv src;
3300
3301 /* For simplicity, we under-decoded the rs2 form. */
3302 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3303 return false;
3304 }
3305 if (!priv) {
3306 return raise_priv(dc);
3307 }
3308
3309 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3310 src = tcg_constant_tl(a->rs2_or_imm);
3311 } else {
3312 TCGv src1 = gen_load_gpr(dc, a->rs1);
3313 if (a->rs2_or_imm == 0) {
3314 src = src1;
3315 } else {
3316 src = tcg_temp_new();
3317 if (a->imm) {
3318 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3319 } else {
3320 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3321 }
3322 }
3323 }
3324 func(dc, src);
3325 return advance_pc(dc);
3326 }
3327
3328 static void do_wry(DisasContext *dc, TCGv src)
3329 {
3330 tcg_gen_ext32u_tl(cpu_y, src);
3331 }
3332
3333 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3334
3335 static void do_wrccr(DisasContext *dc, TCGv src)
3336 {
3337 gen_helper_wrccr(tcg_env, src);
3338 }
3339
3340 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3341
3342 static void do_wrasi(DisasContext *dc, TCGv src)
3343 {
3344 TCGv tmp = tcg_temp_new();
3345
3346 tcg_gen_ext8u_tl(tmp, src);
3347 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3348 /* End TB to notice changed ASI. */
3349 dc->base.is_jmp = DISAS_EXIT;
3350 }
3351
3352 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3353
3354 static void do_wrfprs(DisasContext *dc, TCGv src)
3355 {
3356 #ifdef TARGET_SPARC64
3357 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3358 dc->fprs_dirty = 0;
3359 dc->base.is_jmp = DISAS_EXIT;
3360 #else
3361 qemu_build_not_reached();
3362 #endif
3363 }
3364
3365 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3366
3367 static void do_wrgsr(DisasContext *dc, TCGv src)
3368 {
3369 gen_trap_ifnofpu(dc);
3370 tcg_gen_mov_tl(cpu_gsr, src);
3371 }
3372
3373 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3374
3375 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3376 {
3377 gen_helper_set_softint(tcg_env, src);
3378 }
3379
3380 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3381
3382 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3383 {
3384 gen_helper_clear_softint(tcg_env, src);
3385 }
3386
3387 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3388
3389 static void do_wrsoftint(DisasContext *dc, TCGv src)
3390 {
3391 gen_helper_write_softint(tcg_env, src);
3392 }
3393
3394 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3395
3396 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3397 {
3398 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3399
3400 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3401 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3402 translator_io_start(&dc->base);
3403 gen_helper_tick_set_limit(r_tickptr, src);
3404 /* End TB to handle timer interrupt */
3405 dc->base.is_jmp = DISAS_EXIT;
3406 }
3407
3408 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3409
3410 static void do_wrstick(DisasContext *dc, TCGv src)
3411 {
3412 #ifdef TARGET_SPARC64
3413 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3414
3415 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3416 translator_io_start(&dc->base);
3417 gen_helper_tick_set_count(r_tickptr, src);
3418 /* End TB to handle timer interrupt */
3419 dc->base.is_jmp = DISAS_EXIT;
3420 #else
3421 qemu_build_not_reached();
3422 #endif
3423 }
3424
3425 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3426
3427 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3428 {
3429 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3430
3431 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3432 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3433 translator_io_start(&dc->base);
3434 gen_helper_tick_set_limit(r_tickptr, src);
3435 /* End TB to handle timer interrupt */
3436 dc->base.is_jmp = DISAS_EXIT;
3437 }
3438
3439 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3440
3441 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3442 {
3443 save_state(dc);
3444 gen_helper_power_down(tcg_env);
3445 }
3446
3447 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3448
3449 static void do_wrpsr(DisasContext *dc, TCGv src)
3450 {
3451 gen_helper_wrpsr(tcg_env, src);
3452 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3453 dc->cc_op = CC_OP_FLAGS;
3454 dc->base.is_jmp = DISAS_EXIT;
3455 }
3456
3457 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3458
3459 static void do_wrwim(DisasContext *dc, TCGv src)
3460 {
3461 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3462 TCGv tmp = tcg_temp_new();
3463
3464 tcg_gen_andi_tl(tmp, src, mask);
3465 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3466 }
3467
3468 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3469
3470 static void do_wrtpc(DisasContext *dc, TCGv src)
3471 {
3472 #ifdef TARGET_SPARC64
3473 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3474
3475 gen_load_trap_state_at_tl(r_tsptr);
3476 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3477 #else
3478 qemu_build_not_reached();
3479 #endif
3480 }
3481
3482 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3483
3484 static void do_wrtnpc(DisasContext *dc, TCGv src)
3485 {
3486 #ifdef TARGET_SPARC64
3487 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3488
3489 gen_load_trap_state_at_tl(r_tsptr);
3490 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3491 #else
3492 qemu_build_not_reached();
3493 #endif
3494 }
3495
3496 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3497
3498 static void do_wrtstate(DisasContext *dc, TCGv src)
3499 {
3500 #ifdef TARGET_SPARC64
3501 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3502
3503 gen_load_trap_state_at_tl(r_tsptr);
3504 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3505 #else
3506 qemu_build_not_reached();
3507 #endif
3508 }
3509
3510 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3511
3512 static void do_wrtt(DisasContext *dc, TCGv src)
3513 {
3514 #ifdef TARGET_SPARC64
3515 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3516
3517 gen_load_trap_state_at_tl(r_tsptr);
3518 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3519 #else
3520 qemu_build_not_reached();
3521 #endif
3522 }
3523
3524 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3525
3526 static void do_wrtick(DisasContext *dc, TCGv src)
3527 {
3528 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3529
3530 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3531 translator_io_start(&dc->base);
3532 gen_helper_tick_set_count(r_tickptr, src);
3533 /* End TB to handle timer interrupt */
3534 dc->base.is_jmp = DISAS_EXIT;
3535 }
3536
3537 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3538
3539 static void do_wrtba(DisasContext *dc, TCGv src)
3540 {
3541 tcg_gen_mov_tl(cpu_tbr, src);
3542 }
3543
3544 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3545
3546 static void do_wrpstate(DisasContext *dc, TCGv src)
3547 {
3548 save_state(dc);
3549 if (translator_io_start(&dc->base)) {
3550 dc->base.is_jmp = DISAS_EXIT;
3551 }
3552 gen_helper_wrpstate(tcg_env, src);
3553 dc->npc = DYNAMIC_PC;
3554 }
3555
3556 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3557
3558 static void do_wrtl(DisasContext *dc, TCGv src)
3559 {
3560 save_state(dc);
3561 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3562 dc->npc = DYNAMIC_PC;
3563 }
3564
3565 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3566
3567 static void do_wrpil(DisasContext *dc, TCGv src)
3568 {
3569 if (translator_io_start(&dc->base)) {
3570 dc->base.is_jmp = DISAS_EXIT;
3571 }
3572 gen_helper_wrpil(tcg_env, src);
3573 }
3574
3575 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3576
3577 static void do_wrcwp(DisasContext *dc, TCGv src)
3578 {
3579 gen_helper_wrcwp(tcg_env, src);
3580 }
3581
3582 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3583
3584 static void do_wrcansave(DisasContext *dc, TCGv src)
3585 {
3586 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3587 }
3588
3589 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3590
3591 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3592 {
3593 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3594 }
3595
3596 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3597
3598 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3599 {
3600 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3601 }
3602
3603 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3604
3605 static void do_wrotherwin(DisasContext *dc, TCGv src)
3606 {
3607 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3608 }
3609
3610 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3611
3612 static void do_wrwstate(DisasContext *dc, TCGv src)
3613 {
3614 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3615 }
3616
3617 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3618
3619 static void do_wrgl(DisasContext *dc, TCGv src)
3620 {
3621 gen_helper_wrgl(tcg_env, src);
3622 }
3623
3624 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3625
3626 /* UA2005 strand status */
3627 static void do_wrssr(DisasContext *dc, TCGv src)
3628 {
3629 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3630 }
3631
3632 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3633
3634 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3635
3636 static void do_wrhpstate(DisasContext *dc, TCGv src)
3637 {
3638 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3639 dc->base.is_jmp = DISAS_EXIT;
3640 }
3641
3642 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3643
3644 static void do_wrhtstate(DisasContext *dc, TCGv src)
3645 {
3646 TCGv_i32 tl = tcg_temp_new_i32();
3647 TCGv_ptr tp = tcg_temp_new_ptr();
3648
3649 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3650 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3651 tcg_gen_shli_i32(tl, tl, 3);
3652 tcg_gen_ext_i32_ptr(tp, tl);
3653 tcg_gen_add_ptr(tp, tp, tcg_env);
3654
3655 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3656 }
3657
3658 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3659
3660 static void do_wrhintp(DisasContext *dc, TCGv src)
3661 {
3662 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3663 }
3664
3665 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3666
3667 static void do_wrhtba(DisasContext *dc, TCGv src)
3668 {
3669 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3670 }
3671
3672 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3673
3674 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3675 {
3676 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3677
3678 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3679 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3680 translator_io_start(&dc->base);
3681 gen_helper_tick_set_limit(r_tickptr, src);
3682 /* End TB to handle timer interrupt */
3683 dc->base.is_jmp = DISAS_EXIT;
3684 }
3685
3686 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3687 do_wrhstick_cmpr)
3688
3689 static bool do_saved_restored(DisasContext *dc, bool saved)
3690 {
3691 if (!supervisor(dc)) {
3692 return raise_priv(dc);
3693 }
3694 if (saved) {
3695 gen_helper_saved(tcg_env);
3696 } else {
3697 gen_helper_restored(tcg_env);
3698 }
3699 return advance_pc(dc);
3700 }
3701
3702 TRANS(SAVED, 64, do_saved_restored, true)
3703 TRANS(RESTORED, 64, do_saved_restored, false)
3704
3705 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3706 {
3707 return advance_pc(dc);
3708 }
3709
3710 /*
3711 * TODO: Need a feature bit for sparcv8.
3712 * In the meantime, treat all 32-bit cpus like sparcv7.
3713 */
3714 TRANS(NOP_v7, 32, trans_NOP, a)
3715 TRANS(NOP_v9, 64, trans_NOP, a)
3716
3717 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
3718 void (*func)(TCGv, TCGv, TCGv),
3719 void (*funci)(TCGv, TCGv, target_long))
3720 {
3721 TCGv dst, src1;
3722
3723 /* For simplicity, we under-decoded the rs2 form. */
3724 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3725 return false;
3726 }
3727
3728 if (a->cc) {
3729 dst = cpu_cc_dst;
3730 } else {
3731 dst = gen_dest_gpr(dc, a->rd);
3732 }
3733 src1 = gen_load_gpr(dc, a->rs1);
3734
3735 if (a->imm || a->rs2_or_imm == 0) {
3736 if (funci) {
3737 funci(dst, src1, a->rs2_or_imm);
3738 } else {
3739 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3740 }
3741 } else {
3742 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3743 }
3744 gen_store_gpr(dc, a->rd, dst);
3745
3746 if (a->cc) {
3747 tcg_gen_movi_i32(cpu_cc_op, cc_op);
3748 dc->cc_op = cc_op;
3749 }
3750 return advance_pc(dc);
3751 }
3752
3753 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
3754 void (*func)(TCGv, TCGv, TCGv),
3755 void (*funci)(TCGv, TCGv, target_long),
3756 void (*func_cc)(TCGv, TCGv, TCGv))
3757 {
3758 if (a->cc) {
3759 assert(cc_op >= 0);
3760 return do_arith_int(dc, a, cc_op, func_cc, NULL);
3761 }
3762 return do_arith_int(dc, a, cc_op, func, funci);
3763 }
3764
3765 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3766 void (*func)(TCGv, TCGv, TCGv),
3767 void (*funci)(TCGv, TCGv, target_long))
3768 {
3769 return do_arith_int(dc, a, CC_OP_LOGIC, func, funci);
3770 }
3771
3772 TRANS(ADD, ALL, do_arith, a, CC_OP_ADD,
3773 tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc)
3774 TRANS(SUB, ALL, do_arith, a, CC_OP_SUB,
3775 tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc)
3776
3777 TRANS(TADDcc, ALL, do_arith, a, CC_OP_TADD, NULL, NULL, gen_op_add_cc)
3778 TRANS(TSUBcc, ALL, do_arith, a, CC_OP_TSUB, NULL, NULL, gen_op_sub_cc)
3779 TRANS(TADDccTV, ALL, do_arith, a, CC_OP_TADDTV, NULL, NULL, gen_op_taddcctv)
3780 TRANS(TSUBccTV, ALL, do_arith, a, CC_OP_TSUBTV, NULL, NULL, gen_op_tsubcctv)
3781
3782 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3783 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3784 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3785 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3786 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3787
3788 TRANS(MULX, 64, do_arith, a, -1, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3789 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3790 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3791
3792 TRANS(UDIVX, 64, do_arith, a, -1, gen_op_udivx, NULL, NULL)
3793 TRANS(SDIVX, 64, do_arith, a, -1, gen_op_sdivx, NULL, NULL)
3794 TRANS(UDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_udiv, NULL, gen_op_udivcc)
3795 TRANS(SDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_sdiv, NULL, gen_op_sdivcc)
3796
3797 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3798 TRANS(POPC, 64, do_arith, a, -1, gen_op_popc, NULL, NULL)
3799
3800 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3801 {
3802 /* OR with %g0 is the canonical alias for MOV. */
3803 if (!a->cc && a->rs1 == 0) {
3804 if (a->imm || a->rs2_or_imm == 0) {
3805 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3806 } else if (a->rs2_or_imm & ~0x1f) {
3807 /* For simplicity, we under-decoded the rs2 form. */
3808 return false;
3809 } else {
3810 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3811 }
3812 return advance_pc(dc);
3813 }
3814 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3815 }
3816
3817 static bool trans_ADDC(DisasContext *dc, arg_r_r_ri_cc *a)
3818 {
3819 switch (dc->cc_op) {
3820 case CC_OP_DIV:
3821 case CC_OP_LOGIC:
3822 /* Carry is known to be zero. Fall back to plain ADD. */
3823 return do_arith(dc, a, CC_OP_ADD,
3824 tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc);
3825 case CC_OP_ADD:
3826 case CC_OP_TADD:
3827 case CC_OP_TADDTV:
3828 return do_arith(dc, a, CC_OP_ADDX,
3829 gen_op_addc_add, NULL, gen_op_addccc_add);
3830 case CC_OP_SUB:
3831 case CC_OP_TSUB:
3832 case CC_OP_TSUBTV:
3833 return do_arith(dc, a, CC_OP_ADDX,
3834 gen_op_addc_sub, NULL, gen_op_addccc_sub);
3835 default:
3836 return do_arith(dc, a, CC_OP_ADDX,
3837 gen_op_addc_generic, NULL, gen_op_addccc_generic);
3838 }
3839 }
3840
3841 static bool trans_SUBC(DisasContext *dc, arg_r_r_ri_cc *a)
3842 {
3843 switch (dc->cc_op) {
3844 case CC_OP_DIV:
3845 case CC_OP_LOGIC:
3846 /* Carry is known to be zero. Fall back to plain SUB. */
3847 return do_arith(dc, a, CC_OP_SUB,
3848 tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc);
3849 case CC_OP_ADD:
3850 case CC_OP_TADD:
3851 case CC_OP_TADDTV:
3852 return do_arith(dc, a, CC_OP_SUBX,
3853 gen_op_subc_add, NULL, gen_op_subccc_add);
3854 case CC_OP_SUB:
3855 case CC_OP_TSUB:
3856 case CC_OP_TSUBTV:
3857 return do_arith(dc, a, CC_OP_SUBX,
3858 gen_op_subc_sub, NULL, gen_op_subccc_sub);
3859 default:
3860 return do_arith(dc, a, CC_OP_SUBX,
3861 gen_op_subc_generic, NULL, gen_op_subccc_generic);
3862 }
3863 }
3864
3865 static bool trans_MULScc(DisasContext *dc, arg_r_r_ri_cc *a)
3866 {
3867 update_psr(dc);
3868 return do_arith(dc, a, CC_OP_ADD, NULL, NULL, gen_op_mulscc);
3869 }
3870
3871 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3872 int width, bool cc, bool left)
3873 {
3874 TCGv dst, s1, s2, lo1, lo2;
3875 uint64_t amask, tabl, tabr;
3876 int shift, imask, omask;
3877
3878 dst = gen_dest_gpr(dc, a->rd);
3879 s1 = gen_load_gpr(dc, a->rs1);
3880 s2 = gen_load_gpr(dc, a->rs2);
3881
3882 if (cc) {
3883 tcg_gen_mov_tl(cpu_cc_src, s1);
3884 tcg_gen_mov_tl(cpu_cc_src2, s2);
3885 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
3886 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3887 dc->cc_op = CC_OP_SUB;
3888 }
3889
3890 /*
3891 * Theory of operation: there are two tables, left and right (not to
3892 * be confused with the left and right versions of the opcode). These
3893 * are indexed by the low 3 bits of the inputs. To make things "easy",
3894 * these tables are loaded into two constants, TABL and TABR below.
3895 * The operation index = (input & imask) << shift calculates the index
3896 * into the constant, while val = (table >> index) & omask calculates
3897 * the value we're looking for.
3898 */
3899 switch (width) {
3900 case 8:
3901 imask = 0x7;
3902 shift = 3;
3903 omask = 0xff;
3904 if (left) {
3905 tabl = 0x80c0e0f0f8fcfeffULL;
3906 tabr = 0xff7f3f1f0f070301ULL;
3907 } else {
3908 tabl = 0x0103070f1f3f7fffULL;
3909 tabr = 0xfffefcf8f0e0c080ULL;
3910 }
3911 break;
3912 case 16:
3913 imask = 0x6;
3914 shift = 1;
3915 omask = 0xf;
3916 if (left) {
3917 tabl = 0x8cef;
3918 tabr = 0xf731;
3919 } else {
3920 tabl = 0x137f;
3921 tabr = 0xfec8;
3922 }
3923 break;
3924 case 32:
3925 imask = 0x4;
3926 shift = 0;
3927 omask = 0x3;
3928 if (left) {
3929 tabl = (2 << 2) | 3;
3930 tabr = (3 << 2) | 1;
3931 } else {
3932 tabl = (1 << 2) | 3;
3933 tabr = (3 << 2) | 2;
3934 }
3935 break;
3936 default:
3937 abort();
3938 }
3939
3940 lo1 = tcg_temp_new();
3941 lo2 = tcg_temp_new();
3942 tcg_gen_andi_tl(lo1, s1, imask);
3943 tcg_gen_andi_tl(lo2, s2, imask);
3944 tcg_gen_shli_tl(lo1, lo1, shift);
3945 tcg_gen_shli_tl(lo2, lo2, shift);
3946
3947 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3948 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3949 tcg_gen_andi_tl(lo1, lo1, omask);
3950 tcg_gen_andi_tl(lo2, lo2, omask);
3951
3952 amask = address_mask_i(dc, -8);
3953 tcg_gen_andi_tl(s1, s1, amask);
3954 tcg_gen_andi_tl(s2, s2, amask);
3955
3956 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3957 tcg_gen_and_tl(lo2, lo2, lo1);
3958 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3959
3960 gen_store_gpr(dc, a->rd, dst);
3961 return advance_pc(dc);
3962 }
3963
3964 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3965 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3966 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3967 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3968 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3969 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3970
3971 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3972 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3973 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3974 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3975 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3976 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3977
3978 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3979 void (*func)(TCGv, TCGv, TCGv))
3980 {
3981 TCGv dst = gen_dest_gpr(dc, a->rd);
3982 TCGv src1 = gen_load_gpr(dc, a->rs1);
3983 TCGv src2 = gen_load_gpr(dc, a->rs2);
3984
3985 func(dst, src1, src2);
3986 gen_store_gpr(dc, a->rd, dst);
3987 return advance_pc(dc);
3988 }
3989
3990 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3991 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3992 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3993
3994 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3995 {
3996 #ifdef TARGET_SPARC64
3997 TCGv tmp = tcg_temp_new();
3998
3999 tcg_gen_add_tl(tmp, s1, s2);
4000 tcg_gen_andi_tl(dst, tmp, -8);
4001 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4002 #else
4003 g_assert_not_reached();
4004 #endif
4005 }
4006
4007 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4008 {
4009 #ifdef TARGET_SPARC64
4010 TCGv tmp = tcg_temp_new();
4011
4012 tcg_gen_add_tl(tmp, s1, s2);
4013 tcg_gen_andi_tl(dst, tmp, -8);
4014 tcg_gen_neg_tl(tmp, tmp);
4015 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4016 #else
4017 g_assert_not_reached();
4018 #endif
4019 }
4020
4021 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4022 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4023
4024 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4025 {
4026 #ifdef TARGET_SPARC64
4027 tcg_gen_add_tl(dst, s1, s2);
4028 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4029 #else
4030 g_assert_not_reached();
4031 #endif
4032 }
4033
4034 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4035
4036 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4037 {
4038 TCGv dst, src1, src2;
4039
4040 /* Reject 64-bit shifts for sparc32. */
4041 if (avail_32(dc) && a->x) {
4042 return false;
4043 }
4044
4045 src2 = tcg_temp_new();
4046 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4047 src1 = gen_load_gpr(dc, a->rs1);
4048 dst = gen_dest_gpr(dc, a->rd);
4049
4050 if (l) {
4051 tcg_gen_shl_tl(dst, src1, src2);
4052 if (!a->x) {
4053 tcg_gen_ext32u_tl(dst, dst);
4054 }
4055 } else if (u) {
4056 if (!a->x) {
4057 tcg_gen_ext32u_tl(dst, src1);
4058 src1 = dst;
4059 }
4060 tcg_gen_shr_tl(dst, src1, src2);
4061 } else {
4062 if (!a->x) {
4063 tcg_gen_ext32s_tl(dst, src1);
4064 src1 = dst;
4065 }
4066 tcg_gen_sar_tl(dst, src1, src2);
4067 }
4068 gen_store_gpr(dc, a->rd, dst);
4069 return advance_pc(dc);
4070 }
4071
4072 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4073 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4074 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4075
4076 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4077 {
4078 TCGv dst, src1;
4079
4080 /* Reject 64-bit shifts for sparc32. */
4081 if (avail_32(dc) && (a->x || a->i >= 32)) {
4082 return false;
4083 }
4084
4085 src1 = gen_load_gpr(dc, a->rs1);
4086 dst = gen_dest_gpr(dc, a->rd);
4087
4088 if (avail_32(dc) || a->x) {
4089 if (l) {
4090 tcg_gen_shli_tl(dst, src1, a->i);
4091 } else if (u) {
4092 tcg_gen_shri_tl(dst, src1, a->i);
4093 } else {
4094 tcg_gen_sari_tl(dst, src1, a->i);
4095 }
4096 } else {
4097 if (l) {
4098 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4099 } else if (u) {
4100 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4101 } else {
4102 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4103 }
4104 }
4105 gen_store_gpr(dc, a->rd, dst);
4106 return advance_pc(dc);
4107 }
4108
4109 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4110 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4111 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4112
4113 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4114 {
4115 /* For simplicity, we under-decoded the rs2 form. */
4116 if (!imm && rs2_or_imm & ~0x1f) {
4117 return NULL;
4118 }
4119 if (imm || rs2_or_imm == 0) {
4120 return tcg_constant_tl(rs2_or_imm);
4121 } else {
4122 return cpu_regs[rs2_or_imm];
4123 }
4124 }
4125
4126 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4127 {
4128 TCGv dst = gen_load_gpr(dc, rd);
4129
4130 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, cmp->c2, src2, dst);
4131 gen_store_gpr(dc, rd, dst);
4132 return advance_pc(dc);
4133 }
4134
4135 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4136 {
4137 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4138 DisasCompare cmp;
4139
4140 if (src2 == NULL) {
4141 return false;
4142 }
4143 gen_compare(&cmp, a->cc, a->cond, dc);
4144 return do_mov_cond(dc, &cmp, a->rd, src2);
4145 }
4146
4147 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4148 {
4149 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4150 DisasCompare cmp;
4151
4152 if (src2 == NULL) {
4153 return false;
4154 }
4155 gen_fcompare(&cmp, a->cc, a->cond);
4156 return do_mov_cond(dc, &cmp, a->rd, src2);
4157 }
4158
4159 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4160 {
4161 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4162 DisasCompare cmp;
4163
4164 if (src2 == NULL) {
4165 return false;
4166 }
4167 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
4168 return do_mov_cond(dc, &cmp, a->rd, src2);
4169 }
4170
4171 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4172 bool (*func)(DisasContext *dc, int rd, TCGv src))
4173 {
4174 TCGv src1, sum;
4175
4176 /* For simplicity, we under-decoded the rs2 form. */
4177 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4178 return false;
4179 }
4180
4181 /*
4182 * Always load the sum into a new temporary.
4183 * This is required to capture the value across a window change,
4184 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4185 */
4186 sum = tcg_temp_new();
4187 src1 = gen_load_gpr(dc, a->rs1);
4188 if (a->imm || a->rs2_or_imm == 0) {
4189 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4190 } else {
4191 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4192 }
4193 return func(dc, a->rd, sum);
4194 }
4195
4196 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4197 {
4198 /*
4199 * Preserve pc across advance, so that we can delay
4200 * the writeback to rd until after src is consumed.
4201 */
4202 target_ulong cur_pc = dc->pc;
4203
4204 gen_check_align(dc, src, 3);
4205
4206 gen_mov_pc_npc(dc);
4207 tcg_gen_mov_tl(cpu_npc, src);
4208 gen_address_mask(dc, cpu_npc);
4209 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4210
4211 dc->npc = DYNAMIC_PC_LOOKUP;
4212 return true;
4213 }
4214
4215 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4216
4217 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4218 {
4219 if (!supervisor(dc)) {
4220 return raise_priv(dc);
4221 }
4222
4223 gen_check_align(dc, src, 3);
4224
4225 gen_mov_pc_npc(dc);
4226 tcg_gen_mov_tl(cpu_npc, src);
4227 gen_helper_rett(tcg_env);
4228
4229 dc->npc = DYNAMIC_PC;
4230 return true;
4231 }
4232
4233 TRANS(RETT, 32, do_add_special, a, do_rett)
4234
4235 static bool do_return(DisasContext *dc, int rd, TCGv src)
4236 {
4237 gen_check_align(dc, src, 3);
4238
4239 gen_mov_pc_npc(dc);
4240 tcg_gen_mov_tl(cpu_npc, src);
4241 gen_address_mask(dc, cpu_npc);
4242
4243 gen_helper_restore(tcg_env);
4244 dc->npc = DYNAMIC_PC_LOOKUP;
4245 return true;
4246 }
4247
4248 TRANS(RETURN, 64, do_add_special, a, do_return)
4249
4250 static bool do_save(DisasContext *dc, int rd, TCGv src)
4251 {
4252 gen_helper_save(tcg_env);
4253 gen_store_gpr(dc, rd, src);
4254 return advance_pc(dc);
4255 }
4256
4257 TRANS(SAVE, ALL, do_add_special, a, do_save)
4258
4259 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4260 {
4261 gen_helper_restore(tcg_env);
4262 gen_store_gpr(dc, rd, src);
4263 return advance_pc(dc);
4264 }
4265
4266 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4267
4268 static bool do_done_retry(DisasContext *dc, bool done)
4269 {
4270 if (!supervisor(dc)) {
4271 return raise_priv(dc);
4272 }
4273 dc->npc = DYNAMIC_PC;
4274 dc->pc = DYNAMIC_PC;
4275 translator_io_start(&dc->base);
4276 if (done) {
4277 gen_helper_done(tcg_env);
4278 } else {
4279 gen_helper_retry(tcg_env);
4280 }
4281 return true;
4282 }
4283
4284 TRANS(DONE, 64, do_done_retry, true)
4285 TRANS(RETRY, 64, do_done_retry, false)
4286
4287 /*
4288 * Major opcode 11 -- load and store instructions
4289 */
4290
4291 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4292 {
4293 TCGv addr, tmp = NULL;
4294
4295 /* For simplicity, we under-decoded the rs2 form. */
4296 if (!imm && rs2_or_imm & ~0x1f) {
4297 return NULL;
4298 }
4299
4300 addr = gen_load_gpr(dc, rs1);
4301 if (rs2_or_imm) {
4302 tmp = tcg_temp_new();
4303 if (imm) {
4304 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4305 } else {
4306 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4307 }
4308 addr = tmp;
4309 }
4310 if (AM_CHECK(dc)) {
4311 if (!tmp) {
4312 tmp = tcg_temp_new();
4313 }
4314 tcg_gen_ext32u_tl(tmp, addr);
4315 addr = tmp;
4316 }
4317 return addr;
4318 }
4319
4320 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4321 {
4322 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4323 DisasASI da;
4324
4325 if (addr == NULL) {
4326 return false;
4327 }
4328 da = resolve_asi(dc, a->asi, mop);
4329
4330 reg = gen_dest_gpr(dc, a->rd);
4331 gen_ld_asi(dc, &da, reg, addr);
4332 gen_store_gpr(dc, a->rd, reg);
4333 return advance_pc(dc);
4334 }
4335
4336 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4337 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4338 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4339 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4340 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4341 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4342 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4343
4344 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4345 {
4346 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4347 DisasASI da;
4348
4349 if (addr == NULL) {
4350 return false;
4351 }
4352 da = resolve_asi(dc, a->asi, mop);
4353
4354 reg = gen_load_gpr(dc, a->rd);
4355 gen_st_asi(dc, &da, reg, addr);
4356 return advance_pc(dc);
4357 }
4358
4359 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4360 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4361 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4362 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4363
4364 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4365 {
4366 TCGv addr;
4367 DisasASI da;
4368
4369 if (a->rd & 1) {
4370 return false;
4371 }
4372 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4373 if (addr == NULL) {
4374 return false;
4375 }
4376 da = resolve_asi(dc, a->asi, MO_TEUQ);
4377 gen_ldda_asi(dc, &da, addr, a->rd);
4378 return advance_pc(dc);
4379 }
4380
4381 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4382 {
4383 TCGv addr;
4384 DisasASI da;
4385
4386 if (a->rd & 1) {
4387 return false;
4388 }
4389 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4390 if (addr == NULL) {
4391 return false;
4392 }
4393 da = resolve_asi(dc, a->asi, MO_TEUQ);
4394 gen_stda_asi(dc, &da, addr, a->rd);
4395 return advance_pc(dc);
4396 }
4397
4398 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4399 {
4400 TCGv addr, reg;
4401 DisasASI da;
4402
4403 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4404 if (addr == NULL) {
4405 return false;
4406 }
4407 da = resolve_asi(dc, a->asi, MO_UB);
4408
4409 reg = gen_dest_gpr(dc, a->rd);
4410 gen_ldstub_asi(dc, &da, reg, addr);
4411 gen_store_gpr(dc, a->rd, reg);
4412 return advance_pc(dc);
4413 }
4414
4415 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4416 {
4417 TCGv addr, dst, src;
4418 DisasASI da;
4419
4420 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4421 if (addr == NULL) {
4422 return false;
4423 }
4424 da = resolve_asi(dc, a->asi, MO_TEUL);
4425
4426 dst = gen_dest_gpr(dc, a->rd);
4427 src = gen_load_gpr(dc, a->rd);
4428 gen_swap_asi(dc, &da, dst, src, addr);
4429 gen_store_gpr(dc, a->rd, dst);
4430 return advance_pc(dc);
4431 }
4432
4433 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4434 {
4435 TCGv addr, o, n, c;
4436 DisasASI da;
4437
4438 addr = gen_ldst_addr(dc, a->rs1, true, 0);
4439 if (addr == NULL) {
4440 return false;
4441 }
4442 da = resolve_asi(dc, a->asi, mop);
4443
4444 o = gen_dest_gpr(dc, a->rd);
4445 n = gen_load_gpr(dc, a->rd);
4446 c = gen_load_gpr(dc, a->rs2_or_imm);
4447 gen_cas_asi(dc, &da, o, n, c, addr);
4448 gen_store_gpr(dc, a->rd, o);
4449 return advance_pc(dc);
4450 }
4451
4452 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4453 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4454
4455 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4456 {
4457 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4458 DisasASI da;
4459
4460 if (addr == NULL) {
4461 return false;
4462 }
4463 if (gen_trap_ifnofpu(dc)) {
4464 return true;
4465 }
4466 if (sz == MO_128 && gen_trap_float128(dc)) {
4467 return true;
4468 }
4469 da = resolve_asi(dc, a->asi, MO_TE | sz);
4470 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4471 gen_update_fprs_dirty(dc, a->rd);
4472 return advance_pc(dc);
4473 }
4474
4475 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4476 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4477 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4478
4479 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4480 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4481 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4482
4483 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4484 {
4485 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4486 DisasASI da;
4487
4488 if (addr == NULL) {
4489 return false;
4490 }
4491 if (gen_trap_ifnofpu(dc)) {
4492 return true;
4493 }
4494 if (sz == MO_128 && gen_trap_float128(dc)) {
4495 return true;
4496 }
4497 da = resolve_asi(dc, a->asi, MO_TE | sz);
4498 gen_stf_asi(dc, &da, sz, addr, a->rd);
4499 return advance_pc(dc);
4500 }
4501
4502 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4503 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4504 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4505
4506 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4507 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4508 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4509
4510 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4511 {
4512 if (!avail_32(dc)) {
4513 return false;
4514 }
4515 if (!supervisor(dc)) {
4516 return raise_priv(dc);
4517 }
4518 if (gen_trap_ifnofpu(dc)) {
4519 return true;
4520 }
4521 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4522 return true;
4523 }
4524
4525 static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
4526 target_ulong new_mask, target_ulong old_mask)
4527 {
4528 TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4529 if (addr == NULL) {
4530 return false;
4531 }
4532 if (gen_trap_ifnofpu(dc)) {
4533 return true;
4534 }
4535 tmp = tcg_temp_new();
4536 tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN);
4537 tcg_gen_andi_tl(tmp, tmp, new_mask);
4538 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask);
4539 tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp);
4540 gen_helper_set_fsr(tcg_env, cpu_fsr);
4541 return advance_pc(dc);
4542 }
4543
4544 TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
4545 TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
4546
4547 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4548 {
4549 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4550 if (addr == NULL) {
4551 return false;
4552 }
4553 if (gen_trap_ifnofpu(dc)) {
4554 return true;
4555 }
4556 tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4557 return advance_pc(dc);
4558 }
4559
4560 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4561 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4562
4563 static bool do_ff(DisasContext *dc, arg_r_r *a,
4564 void (*func)(TCGv_i32, TCGv_i32))
4565 {
4566 TCGv_i32 tmp;
4567
4568 if (gen_trap_ifnofpu(dc)) {
4569 return true;
4570 }
4571
4572 tmp = gen_load_fpr_F(dc, a->rs);
4573 func(tmp, tmp);
4574 gen_store_fpr_F(dc, a->rd, tmp);
4575 return advance_pc(dc);
4576 }
4577
4578 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4579 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4580 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4581 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4582 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4583
4584 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4585 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4586 {
4587 TCGv_i32 tmp;
4588
4589 if (gen_trap_ifnofpu(dc)) {
4590 return true;
4591 }
4592
4593 gen_op_clear_ieee_excp_and_FTT();
4594 tmp = gen_load_fpr_F(dc, a->rs);
4595 func(tmp, tcg_env, tmp);
4596 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4597 gen_store_fpr_F(dc, a->rd, tmp);
4598 return advance_pc(dc);
4599 }
4600
4601 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4602 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4603 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4604
4605 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4606 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4607 {
4608 TCGv_i32 dst;
4609 TCGv_i64 src;
4610
4611 if (gen_trap_ifnofpu(dc)) {
4612 return true;
4613 }
4614
4615 gen_op_clear_ieee_excp_and_FTT();
4616 dst = gen_dest_fpr_F(dc);
4617 src = gen_load_fpr_D(dc, a->rs);
4618 func(dst, tcg_env, src);
4619 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4620 gen_store_fpr_F(dc, a->rd, dst);
4621 return advance_pc(dc);
4622 }
4623
4624 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4625 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4626 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4627
4628 static bool do_dd(DisasContext *dc, arg_r_r *a,
4629 void (*func)(TCGv_i64, TCGv_i64))
4630 {
4631 TCGv_i64 dst, src;
4632
4633 if (gen_trap_ifnofpu(dc)) {
4634 return true;
4635 }
4636
4637 dst = gen_dest_fpr_D(dc, a->rd);
4638 src = gen_load_fpr_D(dc, a->rs);
4639 func(dst, src);
4640 gen_store_fpr_D(dc, a->rd, dst);
4641 return advance_pc(dc);
4642 }
4643
4644 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4645 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4646 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4647 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4648 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4649
4650 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4651 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4652 {
4653 TCGv_i64 dst, src;
4654
4655 if (gen_trap_ifnofpu(dc)) {
4656 return true;
4657 }
4658
4659 gen_op_clear_ieee_excp_and_FTT();
4660 dst = gen_dest_fpr_D(dc, a->rd);
4661 src = gen_load_fpr_D(dc, a->rs);
4662 func(dst, tcg_env, src);
4663 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4664 gen_store_fpr_D(dc, a->rd, dst);
4665 return advance_pc(dc);
4666 }
4667
4668 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4669 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4670 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4671
4672 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4673 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4674 {
4675 TCGv_i64 dst;
4676 TCGv_i32 src;
4677
4678 if (gen_trap_ifnofpu(dc)) {
4679 return true;
4680 }
4681
4682 gen_op_clear_ieee_excp_and_FTT();
4683 dst = gen_dest_fpr_D(dc, a->rd);
4684 src = gen_load_fpr_F(dc, a->rs);
4685 func(dst, tcg_env, src);
4686 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4687 gen_store_fpr_D(dc, a->rd, dst);
4688 return advance_pc(dc);
4689 }
4690
4691 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4692 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4693 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4694
4695 static bool trans_FMOVq(DisasContext *dc, arg_FMOVq *a)
4696 {
4697 int rd, rs;
4698
4699 if (!avail_64(dc)) {
4700 return false;
4701 }
4702 if (gen_trap_ifnofpu(dc)) {
4703 return true;
4704 }
4705 if (gen_trap_float128(dc)) {
4706 return true;
4707 }
4708
4709 gen_op_clear_ieee_excp_and_FTT();
4710 rd = QFPREG(a->rd);
4711 rs = QFPREG(a->rs);
4712 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
4713 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
4714 gen_update_fprs_dirty(dc, rd);
4715 return advance_pc(dc);
4716 }
4717
4718 static bool do_qq(DisasContext *dc, arg_r_r *a,
4719 void (*func)(TCGv_env))
4720 {
4721 if (gen_trap_ifnofpu(dc)) {
4722 return true;
4723 }
4724 if (gen_trap_float128(dc)) {
4725 return true;
4726 }
4727
4728 gen_op_clear_ieee_excp_and_FTT();
4729 gen_op_load_fpr_QT1(QFPREG(a->rs));
4730 func(tcg_env);
4731 gen_op_store_QT0_fpr(QFPREG(a->rd));
4732 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4733 return advance_pc(dc);
4734 }
4735
4736 TRANS(FNEGq, 64, do_qq, a, gen_helper_fnegq)
4737 TRANS(FABSq, 64, do_qq, a, gen_helper_fabsq)
4738
4739 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4740 void (*func)(TCGv_env))
4741 {
4742 if (gen_trap_ifnofpu(dc)) {
4743 return true;
4744 }
4745 if (gen_trap_float128(dc)) {
4746 return true;
4747 }
4748
4749 gen_op_clear_ieee_excp_and_FTT();
4750 gen_op_load_fpr_QT1(QFPREG(a->rs));
4751 func(tcg_env);
4752 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4753 gen_op_store_QT0_fpr(QFPREG(a->rd));
4754 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4755 return advance_pc(dc);
4756 }
4757
4758 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4759
4760 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4761 void (*func)(TCGv_i32, TCGv_env))
4762 {
4763 TCGv_i32 dst;
4764
4765 if (gen_trap_ifnofpu(dc)) {
4766 return true;
4767 }
4768 if (gen_trap_float128(dc)) {
4769 return true;
4770 }
4771
4772 gen_op_clear_ieee_excp_and_FTT();
4773 gen_op_load_fpr_QT1(QFPREG(a->rs));
4774 dst = gen_dest_fpr_F(dc);
4775 func(dst, tcg_env);
4776 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4777 gen_store_fpr_F(dc, a->rd, dst);
4778 return advance_pc(dc);
4779 }
4780
4781 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4782 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4783
4784 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4785 void (*func)(TCGv_i64, TCGv_env))
4786 {
4787 TCGv_i64 dst;
4788
4789 if (gen_trap_ifnofpu(dc)) {
4790 return true;
4791 }
4792 if (gen_trap_float128(dc)) {
4793 return true;
4794 }
4795
4796 gen_op_clear_ieee_excp_and_FTT();
4797 gen_op_load_fpr_QT1(QFPREG(a->rs));
4798 dst = gen_dest_fpr_D(dc, a->rd);
4799 func(dst, tcg_env);
4800 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4801 gen_store_fpr_D(dc, a->rd, dst);
4802 return advance_pc(dc);
4803 }
4804
4805 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4806 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4807
4808 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4809 void (*func)(TCGv_env, TCGv_i32))
4810 {
4811 TCGv_i32 src;
4812
4813 if (gen_trap_ifnofpu(dc)) {
4814 return true;
4815 }
4816 if (gen_trap_float128(dc)) {
4817 return true;
4818 }
4819
4820 gen_op_clear_ieee_excp_and_FTT();
4821 src = gen_load_fpr_F(dc, a->rs);
4822 func(tcg_env, src);
4823 gen_op_store_QT0_fpr(QFPREG(a->rd));
4824 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4825 return advance_pc(dc);
4826 }
4827
4828 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4829 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4830
4831 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4832 void (*func)(TCGv_env, TCGv_i64))
4833 {
4834 TCGv_i64 src;
4835
4836 if (gen_trap_ifnofpu(dc)) {
4837 return true;
4838 }
4839 if (gen_trap_float128(dc)) {
4840 return true;
4841 }
4842
4843 gen_op_clear_ieee_excp_and_FTT();
4844 src = gen_load_fpr_D(dc, a->rs);
4845 func(tcg_env, src);
4846 gen_op_store_QT0_fpr(QFPREG(a->rd));
4847 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4848 return advance_pc(dc);
4849 }
4850
4851 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4852 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4853
4854 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4855 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4856 {
4857 TCGv_i32 src1, src2;
4858
4859 if (gen_trap_ifnofpu(dc)) {
4860 return true;
4861 }
4862
4863 src1 = gen_load_fpr_F(dc, a->rs1);
4864 src2 = gen_load_fpr_F(dc, a->rs2);
4865 func(src1, src1, src2);
4866 gen_store_fpr_F(dc, a->rd, src1);
4867 return advance_pc(dc);
4868 }
4869
4870 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4871 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4872 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4873 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4874 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4875 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4876 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4877 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4878 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4879 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4880 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4881 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4882
4883 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4884 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4885 {
4886 TCGv_i32 src1, src2;
4887
4888 if (gen_trap_ifnofpu(dc)) {
4889 return true;
4890 }
4891
4892 gen_op_clear_ieee_excp_and_FTT();
4893 src1 = gen_load_fpr_F(dc, a->rs1);
4894 src2 = gen_load_fpr_F(dc, a->rs2);
4895 func(src1, tcg_env, src1, src2);
4896 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4897 gen_store_fpr_F(dc, a->rd, src1);
4898 return advance_pc(dc);
4899 }
4900
4901 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4902 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4903 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4904 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4905
4906 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4907 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4908 {
4909 TCGv_i64 dst, src1, src2;
4910
4911 if (gen_trap_ifnofpu(dc)) {
4912 return true;
4913 }
4914
4915 dst = gen_dest_fpr_D(dc, a->rd);
4916 src1 = gen_load_fpr_D(dc, a->rs1);
4917 src2 = gen_load_fpr_D(dc, a->rs2);
4918 func(dst, src1, src2);
4919 gen_store_fpr_D(dc, a->rd, dst);
4920 return advance_pc(dc);
4921 }
4922
4923 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
4924 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
4925 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
4926 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4927 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4928 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
4929 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
4930 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
4931 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
4932
4933 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4934 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4935 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4936 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4937 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4938 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4939 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4940 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4941 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4942 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4943 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4944 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4945
4946 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4947 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4948 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4949
4950 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4951 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4952 {
4953 TCGv_i64 dst, src1, src2;
4954
4955 if (gen_trap_ifnofpu(dc)) {
4956 return true;
4957 }
4958
4959 gen_op_clear_ieee_excp_and_FTT();
4960 dst = gen_dest_fpr_D(dc, a->rd);
4961 src1 = gen_load_fpr_D(dc, a->rs1);
4962 src2 = gen_load_fpr_D(dc, a->rs2);
4963 func(dst, tcg_env, src1, src2);
4964 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4965 gen_store_fpr_D(dc, a->rd, dst);
4966 return advance_pc(dc);
4967 }
4968
4969 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4970 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4971 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4972 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4973
4974 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4975 {
4976 TCGv_i64 dst;
4977 TCGv_i32 src1, src2;
4978
4979 if (gen_trap_ifnofpu(dc)) {
4980 return true;
4981 }
4982 if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4983 return raise_unimpfpop(dc);
4984 }
4985
4986 gen_op_clear_ieee_excp_and_FTT();
4987 dst = gen_dest_fpr_D(dc, a->rd);
4988 src1 = gen_load_fpr_F(dc, a->rs1);
4989 src2 = gen_load_fpr_F(dc, a->rs2);
4990 gen_helper_fsmuld(dst, tcg_env, src1, src2);
4991 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4992 gen_store_fpr_D(dc, a->rd, dst);
4993 return advance_pc(dc);
4994 }
4995
4996 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4997 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4998 {
4999 TCGv_i64 dst, src0, src1, src2;
5000
5001 if (gen_trap_ifnofpu(dc)) {
5002 return true;
5003 }
5004
5005 dst = gen_dest_fpr_D(dc, a->rd);
5006 src0 = gen_load_fpr_D(dc, a->rd);
5007 src1 = gen_load_fpr_D(dc, a->rs1);
5008 src2 = gen_load_fpr_D(dc, a->rs2);
5009 func(dst, src0, src1, src2);
5010 gen_store_fpr_D(dc, a->rd, dst);
5011 return advance_pc(dc);
5012 }
5013
5014 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5015
5016 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5017 void (*func)(TCGv_env))
5018 {
5019 if (gen_trap_ifnofpu(dc)) {
5020 return true;
5021 }
5022 if (gen_trap_float128(dc)) {
5023 return true;
5024 }
5025
5026 gen_op_clear_ieee_excp_and_FTT();
5027 gen_op_load_fpr_QT0(QFPREG(a->rs1));
5028 gen_op_load_fpr_QT1(QFPREG(a->rs2));
5029 func(tcg_env);
5030 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
5031 gen_op_store_QT0_fpr(QFPREG(a->rd));
5032 gen_update_fprs_dirty(dc, QFPREG(a->rd));
5033 return advance_pc(dc);
5034 }
5035
5036 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5037 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5038 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5039 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5040
5041 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5042 {
5043 TCGv_i64 src1, src2;
5044
5045 if (gen_trap_ifnofpu(dc)) {
5046 return true;
5047 }
5048 if (gen_trap_float128(dc)) {
5049 return true;
5050 }
5051
5052 gen_op_clear_ieee_excp_and_FTT();
5053 src1 = gen_load_fpr_D(dc, a->rs1);
5054 src2 = gen_load_fpr_D(dc, a->rs2);
5055 gen_helper_fdmulq(tcg_env, src1, src2);
5056 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
5057 gen_op_store_QT0_fpr(QFPREG(a->rd));
5058 gen_update_fprs_dirty(dc, QFPREG(a->rd));
5059 return advance_pc(dc);
5060 }
5061
5062 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5063 void (*func)(DisasContext *, DisasCompare *, int, int))
5064 {
5065 DisasCompare cmp;
5066
5067 if (gen_trap_ifnofpu(dc)) {
5068 return true;
5069 }
5070 if (is_128 && gen_trap_float128(dc)) {
5071 return true;
5072 }
5073
5074 gen_op_clear_ieee_excp_and_FTT();
5075 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
5076 func(dc, &cmp, a->rd, a->rs2);
5077 return advance_pc(dc);
5078 }
5079
5080 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5081 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5082 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5083
5084 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5085 void (*func)(DisasContext *, DisasCompare *, int, int))
5086 {
5087 DisasCompare cmp;
5088
5089 if (gen_trap_ifnofpu(dc)) {
5090 return true;
5091 }
5092 if (is_128 && gen_trap_float128(dc)) {
5093 return true;
5094 }
5095
5096 gen_op_clear_ieee_excp_and_FTT();
5097 gen_compare(&cmp, a->cc, a->cond, dc);
5098 func(dc, &cmp, a->rd, a->rs2);
5099 return advance_pc(dc);
5100 }
5101
5102 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5103 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5104 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5105
5106 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5107 void (*func)(DisasContext *, DisasCompare *, int, int))
5108 {
5109 DisasCompare cmp;
5110
5111 if (gen_trap_ifnofpu(dc)) {
5112 return true;
5113 }
5114 if (is_128 && gen_trap_float128(dc)) {
5115 return true;
5116 }
5117
5118 gen_op_clear_ieee_excp_and_FTT();
5119 gen_fcompare(&cmp, a->cc, a->cond);
5120 func(dc, &cmp, a->rd, a->rs2);
5121 return advance_pc(dc);
5122 }
5123
5124 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5125 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5126 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5127
5128 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5129 {
5130 TCGv_i32 src1, src2;
5131
5132 if (avail_32(dc) && a->cc != 0) {
5133 return false;
5134 }
5135 if (gen_trap_ifnofpu(dc)) {
5136 return true;
5137 }
5138
5139 gen_op_clear_ieee_excp_and_FTT();
5140 src1 = gen_load_fpr_F(dc, a->rs1);
5141 src2 = gen_load_fpr_F(dc, a->rs2);
5142 if (e) {
5143 gen_op_fcmpes(a->cc, src1, src2);
5144 } else {
5145 gen_op_fcmps(a->cc, src1, src2);
5146 }
5147 return advance_pc(dc);
5148 }
5149
5150 TRANS(FCMPs, ALL, do_fcmps, a, false)
5151 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5152
5153 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5154 {
5155 TCGv_i64 src1, src2;
5156
5157 if (avail_32(dc) && a->cc != 0) {
5158 return false;
5159 }
5160 if (gen_trap_ifnofpu(dc)) {
5161 return true;
5162 }
5163
5164 gen_op_clear_ieee_excp_and_FTT();
5165 src1 = gen_load_fpr_D(dc, a->rs1);
5166 src2 = gen_load_fpr_D(dc, a->rs2);
5167 if (e) {
5168 gen_op_fcmped(a->cc, src1, src2);
5169 } else {
5170 gen_op_fcmpd(a->cc, src1, src2);
5171 }
5172 return advance_pc(dc);
5173 }
5174
5175 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5176 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5177
5178 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5179 {
5180 if (avail_32(dc) && a->cc != 0) {
5181 return false;
5182 }
5183 if (gen_trap_ifnofpu(dc)) {
5184 return true;
5185 }
5186 if (gen_trap_float128(dc)) {
5187 return true;
5188 }
5189
5190 gen_op_clear_ieee_excp_and_FTT();
5191 gen_op_load_fpr_QT0(QFPREG(a->rs1));
5192 gen_op_load_fpr_QT1(QFPREG(a->rs2));
5193 if (e) {
5194 gen_op_fcmpeq(a->cc);
5195 } else {
5196 gen_op_fcmpq(a->cc);
5197 }
5198 return advance_pc(dc);
5199 }
5200
5201 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5202 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5203
5204 #define CHECK_IU_FEATURE(dc, FEATURE) \
5205 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
5206 goto illegal_insn;
5207 #define CHECK_FPU_FEATURE(dc, FEATURE) \
5208 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
5209 goto nfpu_insn;
5210
5211 /* before an instruction, dc->pc must be static */
5212 static void disas_sparc_legacy(DisasContext *dc, unsigned int insn)
5213 {
5214 unsigned int opc = GET_FIELD(insn, 0, 1);
5215
5216 switch (opc) {
5217 case 0:
5218 goto illegal_insn; /* in decodetree */
5219 case 1:
5220 g_assert_not_reached(); /* in decodetree */
5221 case 2: /* FPU & Logical Operations */
5222 {
5223 unsigned int xop = GET_FIELD(insn, 7, 12);
5224
5225 if (xop == 0x34) { /* FPU Operations */
5226 goto illegal_insn; /* in decodetree */
5227 } else if (xop == 0x35) { /* FPU Operations */
5228 goto illegal_insn; /* in decodetree */
5229 } else if (xop == 0x36) {
5230 #ifdef TARGET_SPARC64
5231 /* VIS */
5232 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
5233 TCGv_i32 cpu_dst_32;
5234 TCGv cpu_dst = tcg_temp_new();
5235 int opf = GET_FIELD_SP(insn, 5, 13);
5236 int rs1 = GET_FIELD(insn, 13, 17);
5237 int rs2 = GET_FIELD(insn, 27, 31);
5238 int rd = GET_FIELD(insn, 2, 6);
5239
5240 if (gen_trap_ifnofpu(dc)) {
5241 goto jmp_insn;
5242 }
5243
5244 switch (opf) {
5245 case 0x000: /* VIS I edge8cc */
5246 case 0x001: /* VIS II edge8n */
5247 case 0x002: /* VIS I edge8lcc */
5248 case 0x003: /* VIS II edge8ln */
5249 case 0x004: /* VIS I edge16cc */
5250 case 0x005: /* VIS II edge16n */
5251 case 0x006: /* VIS I edge16lcc */
5252 case 0x007: /* VIS II edge16ln */
5253 case 0x008: /* VIS I edge32cc */
5254 case 0x009: /* VIS II edge32n */
5255 case 0x00a: /* VIS I edge32lcc */
5256 case 0x00b: /* VIS II edge32ln */
5257 case 0x010: /* VIS I array8 */
5258 case 0x012: /* VIS I array16 */
5259 case 0x014: /* VIS I array32 */
5260 case 0x018: /* VIS I alignaddr */
5261 case 0x01a: /* VIS I alignaddrl */
5262 case 0x019: /* VIS II bmask */
5263 case 0x067: /* VIS I fnot2s */
5264 case 0x06b: /* VIS I fnot1s */
5265 case 0x075: /* VIS I fsrc1s */
5266 case 0x079: /* VIS I fsrc2s */
5267 case 0x066: /* VIS I fnot2 */
5268 case 0x06a: /* VIS I fnot1 */
5269 case 0x074: /* VIS I fsrc1 */
5270 case 0x078: /* VIS I fsrc2 */
5271 case 0x051: /* VIS I fpadd16s */
5272 case 0x053: /* VIS I fpadd32s */
5273 case 0x055: /* VIS I fpsub16s */
5274 case 0x057: /* VIS I fpsub32s */
5275 case 0x063: /* VIS I fnors */
5276 case 0x065: /* VIS I fandnot2s */
5277 case 0x069: /* VIS I fandnot1s */
5278 case 0x06d: /* VIS I fxors */
5279 case 0x06f: /* VIS I fnands */
5280 case 0x071: /* VIS I fands */
5281 case 0x073: /* VIS I fxnors */
5282 case 0x077: /* VIS I fornot2s */
5283 case 0x07b: /* VIS I fornot1s */
5284 case 0x07d: /* VIS I fors */
5285 case 0x050: /* VIS I fpadd16 */
5286 case 0x052: /* VIS I fpadd32 */
5287 case 0x054: /* VIS I fpsub16 */
5288 case 0x056: /* VIS I fpsub32 */
5289 case 0x062: /* VIS I fnor */
5290 case 0x064: /* VIS I fandnot2 */
5291 case 0x068: /* VIS I fandnot1 */
5292 case 0x06c: /* VIS I fxor */
5293 case 0x06e: /* VIS I fnand */
5294 case 0x070: /* VIS I fand */
5295 case 0x072: /* VIS I fxnor */
5296 case 0x076: /* VIS I fornot2 */
5297 case 0x07a: /* VIS I fornot1 */
5298 case 0x07c: /* VIS I for */
5299 case 0x031: /* VIS I fmul8x16 */
5300 case 0x033: /* VIS I fmul8x16au */
5301 case 0x035: /* VIS I fmul8x16al */
5302 case 0x036: /* VIS I fmul8sux16 */
5303 case 0x037: /* VIS I fmul8ulx16 */
5304 case 0x038: /* VIS I fmuld8sux16 */
5305 case 0x039: /* VIS I fmuld8ulx16 */
5306 case 0x04b: /* VIS I fpmerge */
5307 case 0x04d: /* VIS I fexpand */
5308 case 0x03e: /* VIS I pdist */
5309 case 0x03a: /* VIS I fpack32 */
5310 case 0x048: /* VIS I faligndata */
5311 case 0x04c: /* VIS II bshuffle */
5312 g_assert_not_reached(); /* in decodetree */
5313 case 0x020: /* VIS I fcmple16 */
5314 CHECK_FPU_FEATURE(dc, VIS1);
5315 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5316 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5317 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
5318 gen_store_gpr(dc, rd, cpu_dst);
5319 break;
5320 case 0x022: /* VIS I fcmpne16 */
5321 CHECK_FPU_FEATURE(dc, VIS1);
5322 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5323 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5324 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
5325 gen_store_gpr(dc, rd, cpu_dst);
5326 break;
5327 case 0x024: /* VIS I fcmple32 */
5328 CHECK_FPU_FEATURE(dc, VIS1);
5329 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5330 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5331 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
5332 gen_store_gpr(dc, rd, cpu_dst);
5333 break;
5334 case 0x026: /* VIS I fcmpne32 */
5335 CHECK_FPU_FEATURE(dc, VIS1);
5336 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5337 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5338 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
5339 gen_store_gpr(dc, rd, cpu_dst);
5340 break;
5341 case 0x028: /* VIS I fcmpgt16 */
5342 CHECK_FPU_FEATURE(dc, VIS1);
5343 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5344 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5345 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
5346 gen_store_gpr(dc, rd, cpu_dst);
5347 break;
5348 case 0x02a: /* VIS I fcmpeq16 */
5349 CHECK_FPU_FEATURE(dc, VIS1);
5350 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5351 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5352 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
5353 gen_store_gpr(dc, rd, cpu_dst);
5354 break;
5355 case 0x02c: /* VIS I fcmpgt32 */
5356 CHECK_FPU_FEATURE(dc, VIS1);
5357 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5358 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5359 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
5360 gen_store_gpr(dc, rd, cpu_dst);
5361 break;
5362 case 0x02e: /* VIS I fcmpeq32 */
5363 CHECK_FPU_FEATURE(dc, VIS1);
5364 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5365 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5366 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
5367 gen_store_gpr(dc, rd, cpu_dst);
5368 break;
5369 case 0x03b: /* VIS I fpack16 */
5370 CHECK_FPU_FEATURE(dc, VIS1);
5371 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5372 cpu_dst_32 = gen_dest_fpr_F(dc);
5373 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
5374 gen_store_fpr_F(dc, rd, cpu_dst_32);
5375 break;
5376 case 0x03d: /* VIS I fpackfix */
5377 CHECK_FPU_FEATURE(dc, VIS1);
5378 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5379 cpu_dst_32 = gen_dest_fpr_F(dc);
5380 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
5381 gen_store_fpr_F(dc, rd, cpu_dst_32);
5382 break;
5383 case 0x060: /* VIS I fzero */
5384 CHECK_FPU_FEATURE(dc, VIS1);
5385 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5386 tcg_gen_movi_i64(cpu_dst_64, 0);
5387 gen_store_fpr_D(dc, rd, cpu_dst_64);
5388 break;
5389 case 0x061: /* VIS I fzeros */
5390 CHECK_FPU_FEATURE(dc, VIS1);
5391 cpu_dst_32 = gen_dest_fpr_F(dc);
5392 tcg_gen_movi_i32(cpu_dst_32, 0);
5393 gen_store_fpr_F(dc, rd, cpu_dst_32);
5394 break;
5395 case 0x07e: /* VIS I fone */
5396 CHECK_FPU_FEATURE(dc, VIS1);
5397 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5398 tcg_gen_movi_i64(cpu_dst_64, -1);
5399 gen_store_fpr_D(dc, rd, cpu_dst_64);
5400 break;
5401 case 0x07f: /* VIS I fones */
5402 CHECK_FPU_FEATURE(dc, VIS1);
5403 cpu_dst_32 = gen_dest_fpr_F(dc);
5404 tcg_gen_movi_i32(cpu_dst_32, -1);
5405 gen_store_fpr_F(dc, rd, cpu_dst_32);
5406 break;
5407 case 0x080: /* VIS I shutdown */
5408 case 0x081: /* VIS II siam */
5409 // XXX
5410 goto illegal_insn;
5411 default:
5412 goto illegal_insn;
5413 }
5414 #endif
5415 } else {
5416 goto illegal_insn; /* in decodetree */
5417 }
5418 }
5419 break;
5420 case 3: /* load/store instructions */
5421 goto illegal_insn; /* in decodetree */
5422 }
5423 advance_pc(dc);
5424 #ifdef TARGET_SPARC64
5425 jmp_insn:
5426 #endif
5427 return;
5428 illegal_insn:
5429 gen_exception(dc, TT_ILL_INSN);
5430 return;
5431 #ifdef TARGET_SPARC64
5432 nfpu_insn:
5433 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5434 return;
5435 #endif
5436 }
5437
5438 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5439 {
5440 DisasContext *dc = container_of(dcbase, DisasContext, base);
5441 CPUSPARCState *env = cpu_env(cs);
5442 int bound;
5443
5444 dc->pc = dc->base.pc_first;
5445 dc->npc = (target_ulong)dc->base.tb->cs_base;
5446 dc->cc_op = CC_OP_DYNAMIC;
5447 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5448 dc->def = &env->def;
5449 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5450 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5451 #ifndef CONFIG_USER_ONLY
5452 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5453 #endif
5454 #ifdef TARGET_SPARC64
5455 dc->fprs_dirty = 0;
5456 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5457 #ifndef CONFIG_USER_ONLY
5458 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5459 #endif
5460 #endif
5461 /*
5462 * if we reach a page boundary, we stop generation so that the
5463 * PC of a TT_TFAULT exception is always in the right page
5464 */
5465 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5466 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5467 }
5468
5469 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5470 {
5471 }
5472
5473 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5474 {
5475 DisasContext *dc = container_of(dcbase, DisasContext, base);
5476 target_ulong npc = dc->npc;
5477
5478 if (npc & 3) {
5479 switch (npc) {
5480 case JUMP_PC:
5481 assert(dc->jump_pc[1] == dc->pc + 4);
5482 npc = dc->jump_pc[0] | JUMP_PC;
5483 break;
5484 case DYNAMIC_PC:
5485 case DYNAMIC_PC_LOOKUP:
5486 npc = DYNAMIC_PC;
5487 break;
5488 default:
5489 g_assert_not_reached();
5490 }
5491 }
5492 tcg_gen_insn_start(dc->pc, npc);
5493 }
5494
5495 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5496 {
5497 DisasContext *dc = container_of(dcbase, DisasContext, base);
5498 CPUSPARCState *env = cpu_env(cs);
5499 unsigned int insn;
5500
5501 insn = translator_ldl(env, &dc->base, dc->pc);
5502 dc->base.pc_next += 4;
5503
5504 if (!decode(dc, insn)) {
5505 disas_sparc_legacy(dc, insn);
5506 }
5507
5508 if (dc->base.is_jmp == DISAS_NORETURN) {
5509 return;
5510 }
5511 if (dc->pc != dc->base.pc_next) {
5512 dc->base.is_jmp = DISAS_TOO_MANY;
5513 }
5514 }
5515
5516 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5517 {
5518 DisasContext *dc = container_of(dcbase, DisasContext, base);
5519 DisasDelayException *e, *e_next;
5520 bool may_lookup;
5521
5522 switch (dc->base.is_jmp) {
5523 case DISAS_NEXT:
5524 case DISAS_TOO_MANY:
5525 if (((dc->pc | dc->npc) & 3) == 0) {
5526 /* static PC and NPC: we can use direct chaining */
5527 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5528 break;
5529 }
5530
5531 may_lookup = true;
5532 if (dc->pc & 3) {
5533 switch (dc->pc) {
5534 case DYNAMIC_PC_LOOKUP:
5535 break;
5536 case DYNAMIC_PC:
5537 may_lookup = false;
5538 break;
5539 default:
5540 g_assert_not_reached();
5541 }
5542 } else {
5543 tcg_gen_movi_tl(cpu_pc, dc->pc);
5544 }
5545
5546 if (dc->npc & 3) {
5547 switch (dc->npc) {
5548 case JUMP_PC:
5549 gen_generic_branch(dc);
5550 break;
5551 case DYNAMIC_PC:
5552 may_lookup = false;
5553 break;
5554 case DYNAMIC_PC_LOOKUP:
5555 break;
5556 default:
5557 g_assert_not_reached();
5558 }
5559 } else {
5560 tcg_gen_movi_tl(cpu_npc, dc->npc);
5561 }
5562 if (may_lookup) {
5563 tcg_gen_lookup_and_goto_ptr();
5564 } else {
5565 tcg_gen_exit_tb(NULL, 0);
5566 }
5567 break;
5568
5569 case DISAS_NORETURN:
5570 break;
5571
5572 case DISAS_EXIT:
5573 /* Exit TB */
5574 save_state(dc);
5575 tcg_gen_exit_tb(NULL, 0);
5576 break;
5577
5578 default:
5579 g_assert_not_reached();
5580 }
5581
5582 for (e = dc->delay_excp_list; e ; e = e_next) {
5583 gen_set_label(e->lab);
5584
5585 tcg_gen_movi_tl(cpu_pc, e->pc);
5586 if (e->npc % 4 == 0) {
5587 tcg_gen_movi_tl(cpu_npc, e->npc);
5588 }
5589 gen_helper_raise_exception(tcg_env, e->excp);
5590
5591 e_next = e->next;
5592 g_free(e);
5593 }
5594 }
5595
5596 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5597 CPUState *cpu, FILE *logfile)
5598 {
5599 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5600 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5601 }
5602
5603 static const TranslatorOps sparc_tr_ops = {
5604 .init_disas_context = sparc_tr_init_disas_context,
5605 .tb_start = sparc_tr_tb_start,
5606 .insn_start = sparc_tr_insn_start,
5607 .translate_insn = sparc_tr_translate_insn,
5608 .tb_stop = sparc_tr_tb_stop,
5609 .disas_log = sparc_tr_disas_log,
5610 };
5611
5612 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5613 target_ulong pc, void *host_pc)
5614 {
5615 DisasContext dc = {};
5616
5617 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5618 }
5619
5620 void sparc_tcg_init(void)
5621 {
5622 static const char gregnames[32][4] = {
5623 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5624 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5625 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5626 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5627 };
5628 static const char fregnames[32][4] = {
5629 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5630 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5631 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5632 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5633 };
5634
5635 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5636 #ifdef TARGET_SPARC64
5637 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5638 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5639 #endif
5640 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5641 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5642 };
5643
5644 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5645 #ifdef TARGET_SPARC64
5646 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5647 #endif
5648 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5649 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5650 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5651 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5652 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5653 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5654 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5655 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5656 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5657 };
5658
5659 unsigned int i;
5660
5661 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5662 offsetof(CPUSPARCState, regwptr),
5663 "regwptr");
5664
5665 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5666 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5667 }
5668
5669 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5670 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5671 }
5672
5673 cpu_regs[0] = NULL;
5674 for (i = 1; i < 8; ++i) {
5675 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5676 offsetof(CPUSPARCState, gregs[i]),
5677 gregnames[i]);
5678 }
5679
5680 for (i = 8; i < 32; ++i) {
5681 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5682 (i - 8) * sizeof(target_ulong),
5683 gregnames[i]);
5684 }
5685
5686 for (i = 0; i < TARGET_DPREGS; i++) {
5687 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5688 offsetof(CPUSPARCState, fpr[i]),
5689 fregnames[i]);
5690 }
5691 }
5692
5693 void sparc_restore_state_to_opc(CPUState *cs,
5694 const TranslationBlock *tb,
5695 const uint64_t *data)
5696 {
5697 SPARCCPU *cpu = SPARC_CPU(cs);
5698 CPUSPARCState *env = &cpu->env;
5699 target_ulong pc = data[0];
5700 target_ulong npc = data[1];
5701
5702 env->pc = pc;
5703 if (npc == DYNAMIC_PC) {
5704 /* dynamic NPC: already stored */
5705 } else if (npc & JUMP_PC) {
5706 /* jump PC: use 'cond' and the jump targets of the translation */
5707 if (env->cond) {
5708 env->npc = npc & ~3;
5709 } else {
5710 env->npc = pc + 4;
5711 }
5712 } else {
5713 env->npc = npc;
5714 }
5715 }