]> git.ipfire.org Git - thirdparty/qemu.git/blob - target/sparc/translate.c
target/sparc: Move DONE, RETRY to decodetree
[thirdparty/qemu.git] / target / sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28
29 #include "exec/helper-gen.h"
30
31 #include "exec/translator.h"
32 #include "exec/log.h"
33 #include "asi.h"
34
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef HELPER_H
38
39 #ifdef TARGET_SPARC64
40 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
41 # define gen_helper_rett(E) qemu_build_not_reached()
42 # define gen_helper_power_down(E) qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
46 # define gen_helper_done(E) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
49 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
50 # define gen_helper_restored(E) qemu_build_not_reached()
51 # define gen_helper_retry(E) qemu_build_not_reached()
52 # define gen_helper_saved(E) qemu_build_not_reached()
53 # define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
58 # define gen_helper_udivx(D, E, A, B) qemu_build_not_reached()
59 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
60 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
61 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
62 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
63 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
64 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
65 # define MAXTL_MASK 0
66 #endif
67
68 /* Dynamic PC, must exit to main loop. */
69 #define DYNAMIC_PC 1
70 /* Dynamic PC, one of two values according to jump_pc[T2]. */
71 #define JUMP_PC 2
72 /* Dynamic PC, may lookup next TB. */
73 #define DYNAMIC_PC_LOOKUP 3
74
75 #define DISAS_EXIT DISAS_TARGET_0
76
77 /* global register indexes */
78 static TCGv_ptr cpu_regwptr;
79 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
80 static TCGv_i32 cpu_cc_op;
81 static TCGv_i32 cpu_psr;
82 static TCGv cpu_fsr, cpu_pc, cpu_npc;
83 static TCGv cpu_regs[32];
84 static TCGv cpu_y;
85 static TCGv cpu_tbr;
86 static TCGv cpu_cond;
87 #ifdef TARGET_SPARC64
88 static TCGv_i32 cpu_xcc, cpu_fprs;
89 static TCGv cpu_gsr;
90 #else
91 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
92 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
93 #endif
94 /* Floating point registers */
95 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
96
97 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
98 #ifdef TARGET_SPARC64
99 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
100 # define env64_field_offsetof(X) env_field_offsetof(X)
101 #else
102 # define env32_field_offsetof(X) env_field_offsetof(X)
103 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
104 #endif
105
106 typedef struct DisasDelayException {
107 struct DisasDelayException *next;
108 TCGLabel *lab;
109 TCGv_i32 excp;
110 /* Saved state at parent insn. */
111 target_ulong pc;
112 target_ulong npc;
113 } DisasDelayException;
114
115 typedef struct DisasContext {
116 DisasContextBase base;
117 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
118 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
119 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
120 int mem_idx;
121 bool fpu_enabled;
122 bool address_mask_32bit;
123 #ifndef CONFIG_USER_ONLY
124 bool supervisor;
125 #ifdef TARGET_SPARC64
126 bool hypervisor;
127 #endif
128 #endif
129
130 uint32_t cc_op; /* current CC operation */
131 sparc_def_t *def;
132 #ifdef TARGET_SPARC64
133 int fprs_dirty;
134 int asi;
135 #endif
136 DisasDelayException *delay_excp_list;
137 } DisasContext;
138
139 typedef struct {
140 TCGCond cond;
141 bool is_bool;
142 TCGv c1, c2;
143 } DisasCompare;
144
145 // This function uses non-native bit order
146 #define GET_FIELD(X, FROM, TO) \
147 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
148
149 // This function uses the order in the manuals, i.e. bit 0 is 2^0
150 #define GET_FIELD_SP(X, FROM, TO) \
151 GET_FIELD(X, 31 - (TO), 31 - (FROM))
152
153 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
154 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
155
156 #ifdef TARGET_SPARC64
157 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
158 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
159 #else
160 #define DFPREG(r) (r & 0x1e)
161 #define QFPREG(r) (r & 0x1c)
162 #endif
163
164 #define UA2005_HTRAP_MASK 0xff
165 #define V8_TRAP_MASK 0x7f
166
167 static int sign_extend(int x, int len)
168 {
169 len = 32 - len;
170 return (x << len) >> len;
171 }
172
173 #define IS_IMM (insn & (1<<13))
174
175 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
176 {
177 #if defined(TARGET_SPARC64)
178 int bit = (rd < 32) ? 1 : 2;
179 /* If we know we've already set this bit within the TB,
180 we can avoid setting it again. */
181 if (!(dc->fprs_dirty & bit)) {
182 dc->fprs_dirty |= bit;
183 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
184 }
185 #endif
186 }
187
188 /* floating point registers moves */
189 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
190 {
191 TCGv_i32 ret = tcg_temp_new_i32();
192 if (src & 1) {
193 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
194 } else {
195 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
196 }
197 return ret;
198 }
199
200 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
201 {
202 TCGv_i64 t = tcg_temp_new_i64();
203
204 tcg_gen_extu_i32_i64(t, v);
205 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
206 (dst & 1 ? 0 : 32), 32);
207 gen_update_fprs_dirty(dc, dst);
208 }
209
210 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
211 {
212 return tcg_temp_new_i32();
213 }
214
215 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
216 {
217 src = DFPREG(src);
218 return cpu_fpr[src / 2];
219 }
220
221 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
222 {
223 dst = DFPREG(dst);
224 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
225 gen_update_fprs_dirty(dc, dst);
226 }
227
228 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
229 {
230 return cpu_fpr[DFPREG(dst) / 2];
231 }
232
233 static void gen_op_load_fpr_QT0(unsigned int src)
234 {
235 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
236 offsetof(CPU_QuadU, ll.upper));
237 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
238 offsetof(CPU_QuadU, ll.lower));
239 }
240
241 static void gen_op_load_fpr_QT1(unsigned int src)
242 {
243 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
244 offsetof(CPU_QuadU, ll.upper));
245 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
246 offsetof(CPU_QuadU, ll.lower));
247 }
248
249 static void gen_op_store_QT0_fpr(unsigned int dst)
250 {
251 tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
252 offsetof(CPU_QuadU, ll.upper));
253 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
254 offsetof(CPU_QuadU, ll.lower));
255 }
256
257 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
258 TCGv_i64 v1, TCGv_i64 v2)
259 {
260 dst = QFPREG(dst);
261
262 tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
263 tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
264 gen_update_fprs_dirty(dc, dst);
265 }
266
267 #ifdef TARGET_SPARC64
268 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
269 {
270 src = QFPREG(src);
271 return cpu_fpr[src / 2];
272 }
273
274 static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
275 {
276 src = QFPREG(src);
277 return cpu_fpr[src / 2 + 1];
278 }
279
280 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
281 {
282 rd = QFPREG(rd);
283 rs = QFPREG(rs);
284
285 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
286 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
287 gen_update_fprs_dirty(dc, rd);
288 }
289 #endif
290
291 /* moves */
292 #ifdef CONFIG_USER_ONLY
293 #define supervisor(dc) 0
294 #define hypervisor(dc) 0
295 #else
296 #ifdef TARGET_SPARC64
297 #define hypervisor(dc) (dc->hypervisor)
298 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
299 #else
300 #define supervisor(dc) (dc->supervisor)
301 #define hypervisor(dc) 0
302 #endif
303 #endif
304
305 #if !defined(TARGET_SPARC64)
306 # define AM_CHECK(dc) false
307 #elif defined(TARGET_ABI32)
308 # define AM_CHECK(dc) true
309 #elif defined(CONFIG_USER_ONLY)
310 # define AM_CHECK(dc) false
311 #else
312 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
313 #endif
314
315 static void gen_address_mask(DisasContext *dc, TCGv addr)
316 {
317 if (AM_CHECK(dc)) {
318 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
319 }
320 }
321
322 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
323 {
324 return AM_CHECK(dc) ? (uint32_t)addr : addr;
325 }
326
327 static TCGv gen_load_gpr(DisasContext *dc, int reg)
328 {
329 if (reg > 0) {
330 assert(reg < 32);
331 return cpu_regs[reg];
332 } else {
333 TCGv t = tcg_temp_new();
334 tcg_gen_movi_tl(t, 0);
335 return t;
336 }
337 }
338
339 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
340 {
341 if (reg > 0) {
342 assert(reg < 32);
343 tcg_gen_mov_tl(cpu_regs[reg], v);
344 }
345 }
346
347 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
348 {
349 if (reg > 0) {
350 assert(reg < 32);
351 return cpu_regs[reg];
352 } else {
353 return tcg_temp_new();
354 }
355 }
356
357 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
358 {
359 return translator_use_goto_tb(&s->base, pc) &&
360 translator_use_goto_tb(&s->base, npc);
361 }
362
363 static void gen_goto_tb(DisasContext *s, int tb_num,
364 target_ulong pc, target_ulong npc)
365 {
366 if (use_goto_tb(s, pc, npc)) {
367 /* jump to same page: we can use a direct jump */
368 tcg_gen_goto_tb(tb_num);
369 tcg_gen_movi_tl(cpu_pc, pc);
370 tcg_gen_movi_tl(cpu_npc, npc);
371 tcg_gen_exit_tb(s->base.tb, tb_num);
372 } else {
373 /* jump to another page: we can use an indirect jump */
374 tcg_gen_movi_tl(cpu_pc, pc);
375 tcg_gen_movi_tl(cpu_npc, npc);
376 tcg_gen_lookup_and_goto_ptr();
377 }
378 }
379
380 // XXX suboptimal
381 static void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
382 {
383 tcg_gen_extu_i32_tl(reg, src);
384 tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
385 }
386
387 static void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
388 {
389 tcg_gen_extu_i32_tl(reg, src);
390 tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
391 }
392
393 static void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
394 {
395 tcg_gen_extu_i32_tl(reg, src);
396 tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
397 }
398
399 static void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
400 {
401 tcg_gen_extu_i32_tl(reg, src);
402 tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
403 }
404
405 static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
406 {
407 tcg_gen_mov_tl(cpu_cc_src, src1);
408 tcg_gen_mov_tl(cpu_cc_src2, src2);
409 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
410 tcg_gen_mov_tl(dst, cpu_cc_dst);
411 }
412
413 static TCGv_i32 gen_add32_carry32(void)
414 {
415 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
416
417 /* Carry is computed from a previous add: (dst < src) */
418 #if TARGET_LONG_BITS == 64
419 cc_src1_32 = tcg_temp_new_i32();
420 cc_src2_32 = tcg_temp_new_i32();
421 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
422 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
423 #else
424 cc_src1_32 = cpu_cc_dst;
425 cc_src2_32 = cpu_cc_src;
426 #endif
427
428 carry_32 = tcg_temp_new_i32();
429 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
430
431 return carry_32;
432 }
433
434 static TCGv_i32 gen_sub32_carry32(void)
435 {
436 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
437
438 /* Carry is computed from a previous borrow: (src1 < src2) */
439 #if TARGET_LONG_BITS == 64
440 cc_src1_32 = tcg_temp_new_i32();
441 cc_src2_32 = tcg_temp_new_i32();
442 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
443 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
444 #else
445 cc_src1_32 = cpu_cc_src;
446 cc_src2_32 = cpu_cc_src2;
447 #endif
448
449 carry_32 = tcg_temp_new_i32();
450 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
451
452 return carry_32;
453 }
454
455 static void gen_op_addc_int(TCGv dst, TCGv src1, TCGv src2,
456 TCGv_i32 carry_32, bool update_cc)
457 {
458 tcg_gen_add_tl(dst, src1, src2);
459
460 #ifdef TARGET_SPARC64
461 TCGv carry = tcg_temp_new();
462 tcg_gen_extu_i32_tl(carry, carry_32);
463 tcg_gen_add_tl(dst, dst, carry);
464 #else
465 tcg_gen_add_i32(dst, dst, carry_32);
466 #endif
467
468 if (update_cc) {
469 tcg_debug_assert(dst == cpu_cc_dst);
470 tcg_gen_mov_tl(cpu_cc_src, src1);
471 tcg_gen_mov_tl(cpu_cc_src2, src2);
472 }
473 }
474
475 static void gen_op_addc_int_add(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
476 {
477 TCGv discard;
478
479 if (TARGET_LONG_BITS == 64) {
480 gen_op_addc_int(dst, src1, src2, gen_add32_carry32(), update_cc);
481 return;
482 }
483
484 /*
485 * We can re-use the host's hardware carry generation by using
486 * an ADD2 opcode. We discard the low part of the output.
487 * Ideally we'd combine this operation with the add that
488 * generated the carry in the first place.
489 */
490 discard = tcg_temp_new();
491 tcg_gen_add2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
492
493 if (update_cc) {
494 tcg_debug_assert(dst == cpu_cc_dst);
495 tcg_gen_mov_tl(cpu_cc_src, src1);
496 tcg_gen_mov_tl(cpu_cc_src2, src2);
497 }
498 }
499
500 static void gen_op_addc_add(TCGv dst, TCGv src1, TCGv src2)
501 {
502 gen_op_addc_int_add(dst, src1, src2, false);
503 }
504
505 static void gen_op_addccc_add(TCGv dst, TCGv src1, TCGv src2)
506 {
507 gen_op_addc_int_add(dst, src1, src2, true);
508 }
509
510 static void gen_op_addc_sub(TCGv dst, TCGv src1, TCGv src2)
511 {
512 gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), false);
513 }
514
515 static void gen_op_addccc_sub(TCGv dst, TCGv src1, TCGv src2)
516 {
517 gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), true);
518 }
519
520 static void gen_op_addc_int_generic(TCGv dst, TCGv src1, TCGv src2,
521 bool update_cc)
522 {
523 TCGv_i32 carry_32 = tcg_temp_new_i32();
524 gen_helper_compute_C_icc(carry_32, tcg_env);
525 gen_op_addc_int(dst, src1, src2, carry_32, update_cc);
526 }
527
528 static void gen_op_addc_generic(TCGv dst, TCGv src1, TCGv src2)
529 {
530 gen_op_addc_int_generic(dst, src1, src2, false);
531 }
532
533 static void gen_op_addccc_generic(TCGv dst, TCGv src1, TCGv src2)
534 {
535 gen_op_addc_int_generic(dst, src1, src2, true);
536 }
537
538 static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
539 {
540 tcg_gen_mov_tl(cpu_cc_src, src1);
541 tcg_gen_mov_tl(cpu_cc_src2, src2);
542 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
543 tcg_gen_mov_tl(dst, cpu_cc_dst);
544 }
545
546 static void gen_op_subc_int(TCGv dst, TCGv src1, TCGv src2,
547 TCGv_i32 carry_32, bool update_cc)
548 {
549 TCGv carry;
550
551 #if TARGET_LONG_BITS == 64
552 carry = tcg_temp_new();
553 tcg_gen_extu_i32_i64(carry, carry_32);
554 #else
555 carry = carry_32;
556 #endif
557
558 tcg_gen_sub_tl(dst, src1, src2);
559 tcg_gen_sub_tl(dst, dst, carry);
560
561 if (update_cc) {
562 tcg_debug_assert(dst == cpu_cc_dst);
563 tcg_gen_mov_tl(cpu_cc_src, src1);
564 tcg_gen_mov_tl(cpu_cc_src2, src2);
565 }
566 }
567
568 static void gen_op_subc_add(TCGv dst, TCGv src1, TCGv src2)
569 {
570 gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), false);
571 }
572
573 static void gen_op_subccc_add(TCGv dst, TCGv src1, TCGv src2)
574 {
575 gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), true);
576 }
577
578 static void gen_op_subc_int_sub(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
579 {
580 TCGv discard;
581
582 if (TARGET_LONG_BITS == 64) {
583 gen_op_subc_int(dst, src1, src2, gen_sub32_carry32(), update_cc);
584 return;
585 }
586
587 /*
588 * We can re-use the host's hardware carry generation by using
589 * a SUB2 opcode. We discard the low part of the output.
590 */
591 discard = tcg_temp_new();
592 tcg_gen_sub2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
593
594 if (update_cc) {
595 tcg_debug_assert(dst == cpu_cc_dst);
596 tcg_gen_mov_tl(cpu_cc_src, src1);
597 tcg_gen_mov_tl(cpu_cc_src2, src2);
598 }
599 }
600
601 static void gen_op_subc_sub(TCGv dst, TCGv src1, TCGv src2)
602 {
603 gen_op_subc_int_sub(dst, src1, src2, false);
604 }
605
606 static void gen_op_subccc_sub(TCGv dst, TCGv src1, TCGv src2)
607 {
608 gen_op_subc_int_sub(dst, src1, src2, true);
609 }
610
611 static void gen_op_subc_int_generic(TCGv dst, TCGv src1, TCGv src2,
612 bool update_cc)
613 {
614 TCGv_i32 carry_32 = tcg_temp_new_i32();
615
616 gen_helper_compute_C_icc(carry_32, tcg_env);
617 gen_op_subc_int(dst, src1, src2, carry_32, update_cc);
618 }
619
620 static void gen_op_subc_generic(TCGv dst, TCGv src1, TCGv src2)
621 {
622 gen_op_subc_int_generic(dst, src1, src2, false);
623 }
624
625 static void gen_op_subccc_generic(TCGv dst, TCGv src1, TCGv src2)
626 {
627 gen_op_subc_int_generic(dst, src1, src2, true);
628 }
629
630 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
631 {
632 TCGv r_temp, zero, t0;
633
634 r_temp = tcg_temp_new();
635 t0 = tcg_temp_new();
636
637 /* old op:
638 if (!(env->y & 1))
639 T1 = 0;
640 */
641 zero = tcg_constant_tl(0);
642 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
643 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
644 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
645 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
646 zero, cpu_cc_src2);
647
648 // b2 = T0 & 1;
649 // env->y = (b2 << 31) | (env->y >> 1);
650 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
651 tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
652
653 // b1 = N ^ V;
654 gen_mov_reg_N(t0, cpu_psr);
655 gen_mov_reg_V(r_temp, cpu_psr);
656 tcg_gen_xor_tl(t0, t0, r_temp);
657
658 // T0 = (b1 << 31) | (T0 >> 1);
659 // src1 = T0;
660 tcg_gen_shli_tl(t0, t0, 31);
661 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
662 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
663
664 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
665
666 tcg_gen_mov_tl(dst, cpu_cc_dst);
667 }
668
669 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
670 {
671 #if TARGET_LONG_BITS == 32
672 if (sign_ext) {
673 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
674 } else {
675 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
676 }
677 #else
678 TCGv t0 = tcg_temp_new_i64();
679 TCGv t1 = tcg_temp_new_i64();
680
681 if (sign_ext) {
682 tcg_gen_ext32s_i64(t0, src1);
683 tcg_gen_ext32s_i64(t1, src2);
684 } else {
685 tcg_gen_ext32u_i64(t0, src1);
686 tcg_gen_ext32u_i64(t1, src2);
687 }
688
689 tcg_gen_mul_i64(dst, t0, t1);
690 tcg_gen_shri_i64(cpu_y, dst, 32);
691 #endif
692 }
693
694 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
695 {
696 /* zero-extend truncated operands before multiplication */
697 gen_op_multiply(dst, src1, src2, 0);
698 }
699
700 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
701 {
702 /* sign-extend truncated operands before multiplication */
703 gen_op_multiply(dst, src1, src2, 1);
704 }
705
706 static void gen_op_udivx(TCGv dst, TCGv src1, TCGv src2)
707 {
708 gen_helper_udivx(dst, tcg_env, src1, src2);
709 }
710
711 static void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
712 {
713 gen_helper_sdivx(dst, tcg_env, src1, src2);
714 }
715
716 static void gen_op_udiv(TCGv dst, TCGv src1, TCGv src2)
717 {
718 gen_helper_udiv(dst, tcg_env, src1, src2);
719 }
720
721 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
722 {
723 gen_helper_sdiv(dst, tcg_env, src1, src2);
724 }
725
726 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
727 {
728 gen_helper_udiv_cc(dst, tcg_env, src1, src2);
729 }
730
731 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
732 {
733 gen_helper_sdiv_cc(dst, tcg_env, src1, src2);
734 }
735
736 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
737 {
738 gen_helper_taddcctv(dst, tcg_env, src1, src2);
739 }
740
741 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
742 {
743 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
744 }
745
746 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
747 {
748 tcg_gen_ctpop_tl(dst, src2);
749 }
750
751 // 1
752 static void gen_op_eval_ba(TCGv dst)
753 {
754 tcg_gen_movi_tl(dst, 1);
755 }
756
757 // Z
758 static void gen_op_eval_be(TCGv dst, TCGv_i32 src)
759 {
760 gen_mov_reg_Z(dst, src);
761 }
762
763 // Z | (N ^ V)
764 static void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
765 {
766 TCGv t0 = tcg_temp_new();
767 gen_mov_reg_N(t0, src);
768 gen_mov_reg_V(dst, src);
769 tcg_gen_xor_tl(dst, dst, t0);
770 gen_mov_reg_Z(t0, src);
771 tcg_gen_or_tl(dst, dst, t0);
772 }
773
774 // N ^ V
775 static void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
776 {
777 TCGv t0 = tcg_temp_new();
778 gen_mov_reg_V(t0, src);
779 gen_mov_reg_N(dst, src);
780 tcg_gen_xor_tl(dst, dst, t0);
781 }
782
783 // C | Z
784 static void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
785 {
786 TCGv t0 = tcg_temp_new();
787 gen_mov_reg_Z(t0, src);
788 gen_mov_reg_C(dst, src);
789 tcg_gen_or_tl(dst, dst, t0);
790 }
791
792 // C
793 static void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
794 {
795 gen_mov_reg_C(dst, src);
796 }
797
798 // V
799 static void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
800 {
801 gen_mov_reg_V(dst, src);
802 }
803
804 // 0
805 static void gen_op_eval_bn(TCGv dst)
806 {
807 tcg_gen_movi_tl(dst, 0);
808 }
809
810 // N
811 static void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
812 {
813 gen_mov_reg_N(dst, src);
814 }
815
816 // !Z
817 static void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
818 {
819 gen_mov_reg_Z(dst, src);
820 tcg_gen_xori_tl(dst, dst, 0x1);
821 }
822
823 // !(Z | (N ^ V))
824 static void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
825 {
826 gen_op_eval_ble(dst, src);
827 tcg_gen_xori_tl(dst, dst, 0x1);
828 }
829
830 // !(N ^ V)
831 static void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
832 {
833 gen_op_eval_bl(dst, src);
834 tcg_gen_xori_tl(dst, dst, 0x1);
835 }
836
837 // !(C | Z)
838 static void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
839 {
840 gen_op_eval_bleu(dst, src);
841 tcg_gen_xori_tl(dst, dst, 0x1);
842 }
843
844 // !C
845 static void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
846 {
847 gen_mov_reg_C(dst, src);
848 tcg_gen_xori_tl(dst, dst, 0x1);
849 }
850
851 // !N
852 static void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
853 {
854 gen_mov_reg_N(dst, src);
855 tcg_gen_xori_tl(dst, dst, 0x1);
856 }
857
858 // !V
859 static void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
860 {
861 gen_mov_reg_V(dst, src);
862 tcg_gen_xori_tl(dst, dst, 0x1);
863 }
864
865 /*
866 FPSR bit field FCC1 | FCC0:
867 0 =
868 1 <
869 2 >
870 3 unordered
871 */
872 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
873 unsigned int fcc_offset)
874 {
875 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
876 tcg_gen_andi_tl(reg, reg, 0x1);
877 }
878
879 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
880 {
881 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
882 tcg_gen_andi_tl(reg, reg, 0x1);
883 }
884
885 // !0: FCC0 | FCC1
886 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
887 {
888 TCGv t0 = tcg_temp_new();
889 gen_mov_reg_FCC0(dst, src, fcc_offset);
890 gen_mov_reg_FCC1(t0, src, fcc_offset);
891 tcg_gen_or_tl(dst, dst, t0);
892 }
893
894 // 1 or 2: FCC0 ^ FCC1
895 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
896 {
897 TCGv t0 = tcg_temp_new();
898 gen_mov_reg_FCC0(dst, src, fcc_offset);
899 gen_mov_reg_FCC1(t0, src, fcc_offset);
900 tcg_gen_xor_tl(dst, dst, t0);
901 }
902
903 // 1 or 3: FCC0
904 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
905 {
906 gen_mov_reg_FCC0(dst, src, fcc_offset);
907 }
908
909 // 1: FCC0 & !FCC1
910 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
911 {
912 TCGv t0 = tcg_temp_new();
913 gen_mov_reg_FCC0(dst, src, fcc_offset);
914 gen_mov_reg_FCC1(t0, src, fcc_offset);
915 tcg_gen_andc_tl(dst, dst, t0);
916 }
917
918 // 2 or 3: FCC1
919 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
920 {
921 gen_mov_reg_FCC1(dst, src, fcc_offset);
922 }
923
924 // 2: !FCC0 & FCC1
925 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
926 {
927 TCGv t0 = tcg_temp_new();
928 gen_mov_reg_FCC0(dst, src, fcc_offset);
929 gen_mov_reg_FCC1(t0, src, fcc_offset);
930 tcg_gen_andc_tl(dst, t0, dst);
931 }
932
933 // 3: FCC0 & FCC1
934 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
935 {
936 TCGv t0 = tcg_temp_new();
937 gen_mov_reg_FCC0(dst, src, fcc_offset);
938 gen_mov_reg_FCC1(t0, src, fcc_offset);
939 tcg_gen_and_tl(dst, dst, t0);
940 }
941
942 // 0: !(FCC0 | FCC1)
943 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
944 {
945 TCGv t0 = tcg_temp_new();
946 gen_mov_reg_FCC0(dst, src, fcc_offset);
947 gen_mov_reg_FCC1(t0, src, fcc_offset);
948 tcg_gen_or_tl(dst, dst, t0);
949 tcg_gen_xori_tl(dst, dst, 0x1);
950 }
951
952 // 0 or 3: !(FCC0 ^ FCC1)
953 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
954 {
955 TCGv t0 = tcg_temp_new();
956 gen_mov_reg_FCC0(dst, src, fcc_offset);
957 gen_mov_reg_FCC1(t0, src, fcc_offset);
958 tcg_gen_xor_tl(dst, dst, t0);
959 tcg_gen_xori_tl(dst, dst, 0x1);
960 }
961
962 // 0 or 2: !FCC0
963 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
964 {
965 gen_mov_reg_FCC0(dst, src, fcc_offset);
966 tcg_gen_xori_tl(dst, dst, 0x1);
967 }
968
969 // !1: !(FCC0 & !FCC1)
970 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
971 {
972 TCGv t0 = tcg_temp_new();
973 gen_mov_reg_FCC0(dst, src, fcc_offset);
974 gen_mov_reg_FCC1(t0, src, fcc_offset);
975 tcg_gen_andc_tl(dst, dst, t0);
976 tcg_gen_xori_tl(dst, dst, 0x1);
977 }
978
979 // 0 or 1: !FCC1
980 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
981 {
982 gen_mov_reg_FCC1(dst, src, fcc_offset);
983 tcg_gen_xori_tl(dst, dst, 0x1);
984 }
985
986 // !2: !(!FCC0 & FCC1)
987 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
988 {
989 TCGv t0 = tcg_temp_new();
990 gen_mov_reg_FCC0(dst, src, fcc_offset);
991 gen_mov_reg_FCC1(t0, src, fcc_offset);
992 tcg_gen_andc_tl(dst, t0, dst);
993 tcg_gen_xori_tl(dst, dst, 0x1);
994 }
995
996 // !3: !(FCC0 & FCC1)
997 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
998 {
999 TCGv t0 = tcg_temp_new();
1000 gen_mov_reg_FCC0(dst, src, fcc_offset);
1001 gen_mov_reg_FCC1(t0, src, fcc_offset);
1002 tcg_gen_and_tl(dst, dst, t0);
1003 tcg_gen_xori_tl(dst, dst, 0x1);
1004 }
1005
1006 static void gen_branch2(DisasContext *dc, target_ulong pc1,
1007 target_ulong pc2, TCGv r_cond)
1008 {
1009 TCGLabel *l1 = gen_new_label();
1010
1011 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
1012
1013 gen_goto_tb(dc, 0, pc1, pc1 + 4);
1014
1015 gen_set_label(l1);
1016 gen_goto_tb(dc, 1, pc2, pc2 + 4);
1017 }
1018
1019 static void gen_generic_branch(DisasContext *dc)
1020 {
1021 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1022 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1023 TCGv zero = tcg_constant_tl(0);
1024
1025 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1026 }
1027
1028 /* call this function before using the condition register as it may
1029 have been set for a jump */
1030 static void flush_cond(DisasContext *dc)
1031 {
1032 if (dc->npc == JUMP_PC) {
1033 gen_generic_branch(dc);
1034 dc->npc = DYNAMIC_PC_LOOKUP;
1035 }
1036 }
1037
1038 static void save_npc(DisasContext *dc)
1039 {
1040 if (dc->npc & 3) {
1041 switch (dc->npc) {
1042 case JUMP_PC:
1043 gen_generic_branch(dc);
1044 dc->npc = DYNAMIC_PC_LOOKUP;
1045 break;
1046 case DYNAMIC_PC:
1047 case DYNAMIC_PC_LOOKUP:
1048 break;
1049 default:
1050 g_assert_not_reached();
1051 }
1052 } else {
1053 tcg_gen_movi_tl(cpu_npc, dc->npc);
1054 }
1055 }
1056
1057 static void update_psr(DisasContext *dc)
1058 {
1059 if (dc->cc_op != CC_OP_FLAGS) {
1060 dc->cc_op = CC_OP_FLAGS;
1061 gen_helper_compute_psr(tcg_env);
1062 }
1063 }
1064
1065 static void save_state(DisasContext *dc)
1066 {
1067 tcg_gen_movi_tl(cpu_pc, dc->pc);
1068 save_npc(dc);
1069 }
1070
1071 static void gen_exception(DisasContext *dc, int which)
1072 {
1073 save_state(dc);
1074 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1075 dc->base.is_jmp = DISAS_NORETURN;
1076 }
1077
1078 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1079 {
1080 DisasDelayException *e = g_new0(DisasDelayException, 1);
1081
1082 e->next = dc->delay_excp_list;
1083 dc->delay_excp_list = e;
1084
1085 e->lab = gen_new_label();
1086 e->excp = excp;
1087 e->pc = dc->pc;
1088 /* Caller must have used flush_cond before branch. */
1089 assert(e->npc != JUMP_PC);
1090 e->npc = dc->npc;
1091
1092 return e->lab;
1093 }
1094
1095 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1096 {
1097 return delay_exceptionv(dc, tcg_constant_i32(excp));
1098 }
1099
1100 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1101 {
1102 TCGv t = tcg_temp_new();
1103 TCGLabel *lab;
1104
1105 tcg_gen_andi_tl(t, addr, mask);
1106
1107 flush_cond(dc);
1108 lab = delay_exception(dc, TT_UNALIGNED);
1109 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1110 }
1111
1112 static void gen_mov_pc_npc(DisasContext *dc)
1113 {
1114 if (dc->npc & 3) {
1115 switch (dc->npc) {
1116 case JUMP_PC:
1117 gen_generic_branch(dc);
1118 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1119 dc->pc = DYNAMIC_PC_LOOKUP;
1120 break;
1121 case DYNAMIC_PC:
1122 case DYNAMIC_PC_LOOKUP:
1123 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1124 dc->pc = dc->npc;
1125 break;
1126 default:
1127 g_assert_not_reached();
1128 }
1129 } else {
1130 dc->pc = dc->npc;
1131 }
1132 }
1133
1134 static void gen_op_next_insn(void)
1135 {
1136 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1137 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1138 }
1139
1140 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1141 DisasContext *dc)
1142 {
1143 static int subcc_cond[16] = {
1144 TCG_COND_NEVER,
1145 TCG_COND_EQ,
1146 TCG_COND_LE,
1147 TCG_COND_LT,
1148 TCG_COND_LEU,
1149 TCG_COND_LTU,
1150 -1, /* neg */
1151 -1, /* overflow */
1152 TCG_COND_ALWAYS,
1153 TCG_COND_NE,
1154 TCG_COND_GT,
1155 TCG_COND_GE,
1156 TCG_COND_GTU,
1157 TCG_COND_GEU,
1158 -1, /* pos */
1159 -1, /* no overflow */
1160 };
1161
1162 static int logic_cond[16] = {
1163 TCG_COND_NEVER,
1164 TCG_COND_EQ, /* eq: Z */
1165 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1166 TCG_COND_LT, /* lt: N ^ V -> N */
1167 TCG_COND_EQ, /* leu: C | Z -> Z */
1168 TCG_COND_NEVER, /* ltu: C -> 0 */
1169 TCG_COND_LT, /* neg: N */
1170 TCG_COND_NEVER, /* vs: V -> 0 */
1171 TCG_COND_ALWAYS,
1172 TCG_COND_NE, /* ne: !Z */
1173 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1174 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1175 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1176 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1177 TCG_COND_GE, /* pos: !N */
1178 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1179 };
1180
1181 TCGv_i32 r_src;
1182 TCGv r_dst;
1183
1184 #ifdef TARGET_SPARC64
1185 if (xcc) {
1186 r_src = cpu_xcc;
1187 } else {
1188 r_src = cpu_psr;
1189 }
1190 #else
1191 r_src = cpu_psr;
1192 #endif
1193
1194 switch (dc->cc_op) {
1195 case CC_OP_LOGIC:
1196 cmp->cond = logic_cond[cond];
1197 do_compare_dst_0:
1198 cmp->is_bool = false;
1199 cmp->c2 = tcg_constant_tl(0);
1200 #ifdef TARGET_SPARC64
1201 if (!xcc) {
1202 cmp->c1 = tcg_temp_new();
1203 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1204 break;
1205 }
1206 #endif
1207 cmp->c1 = cpu_cc_dst;
1208 break;
1209
1210 case CC_OP_SUB:
1211 switch (cond) {
1212 case 6: /* neg */
1213 case 14: /* pos */
1214 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1215 goto do_compare_dst_0;
1216
1217 case 7: /* overflow */
1218 case 15: /* !overflow */
1219 goto do_dynamic;
1220
1221 default:
1222 cmp->cond = subcc_cond[cond];
1223 cmp->is_bool = false;
1224 #ifdef TARGET_SPARC64
1225 if (!xcc) {
1226 /* Note that sign-extension works for unsigned compares as
1227 long as both operands are sign-extended. */
1228 cmp->c1 = tcg_temp_new();
1229 cmp->c2 = tcg_temp_new();
1230 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1231 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1232 break;
1233 }
1234 #endif
1235 cmp->c1 = cpu_cc_src;
1236 cmp->c2 = cpu_cc_src2;
1237 break;
1238 }
1239 break;
1240
1241 default:
1242 do_dynamic:
1243 gen_helper_compute_psr(tcg_env);
1244 dc->cc_op = CC_OP_FLAGS;
1245 /* FALLTHRU */
1246
1247 case CC_OP_FLAGS:
1248 /* We're going to generate a boolean result. */
1249 cmp->cond = TCG_COND_NE;
1250 cmp->is_bool = true;
1251 cmp->c1 = r_dst = tcg_temp_new();
1252 cmp->c2 = tcg_constant_tl(0);
1253
1254 switch (cond) {
1255 case 0x0:
1256 gen_op_eval_bn(r_dst);
1257 break;
1258 case 0x1:
1259 gen_op_eval_be(r_dst, r_src);
1260 break;
1261 case 0x2:
1262 gen_op_eval_ble(r_dst, r_src);
1263 break;
1264 case 0x3:
1265 gen_op_eval_bl(r_dst, r_src);
1266 break;
1267 case 0x4:
1268 gen_op_eval_bleu(r_dst, r_src);
1269 break;
1270 case 0x5:
1271 gen_op_eval_bcs(r_dst, r_src);
1272 break;
1273 case 0x6:
1274 gen_op_eval_bneg(r_dst, r_src);
1275 break;
1276 case 0x7:
1277 gen_op_eval_bvs(r_dst, r_src);
1278 break;
1279 case 0x8:
1280 gen_op_eval_ba(r_dst);
1281 break;
1282 case 0x9:
1283 gen_op_eval_bne(r_dst, r_src);
1284 break;
1285 case 0xa:
1286 gen_op_eval_bg(r_dst, r_src);
1287 break;
1288 case 0xb:
1289 gen_op_eval_bge(r_dst, r_src);
1290 break;
1291 case 0xc:
1292 gen_op_eval_bgu(r_dst, r_src);
1293 break;
1294 case 0xd:
1295 gen_op_eval_bcc(r_dst, r_src);
1296 break;
1297 case 0xe:
1298 gen_op_eval_bpos(r_dst, r_src);
1299 break;
1300 case 0xf:
1301 gen_op_eval_bvc(r_dst, r_src);
1302 break;
1303 }
1304 break;
1305 }
1306 }
1307
1308 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1309 {
1310 unsigned int offset;
1311 TCGv r_dst;
1312
1313 /* For now we still generate a straight boolean result. */
1314 cmp->cond = TCG_COND_NE;
1315 cmp->is_bool = true;
1316 cmp->c1 = r_dst = tcg_temp_new();
1317 cmp->c2 = tcg_constant_tl(0);
1318
1319 switch (cc) {
1320 default:
1321 case 0x0:
1322 offset = 0;
1323 break;
1324 case 0x1:
1325 offset = 32 - 10;
1326 break;
1327 case 0x2:
1328 offset = 34 - 10;
1329 break;
1330 case 0x3:
1331 offset = 36 - 10;
1332 break;
1333 }
1334
1335 switch (cond) {
1336 case 0x0:
1337 gen_op_eval_bn(r_dst);
1338 break;
1339 case 0x1:
1340 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1341 break;
1342 case 0x2:
1343 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1344 break;
1345 case 0x3:
1346 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1347 break;
1348 case 0x4:
1349 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1350 break;
1351 case 0x5:
1352 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1353 break;
1354 case 0x6:
1355 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1356 break;
1357 case 0x7:
1358 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1359 break;
1360 case 0x8:
1361 gen_op_eval_ba(r_dst);
1362 break;
1363 case 0x9:
1364 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1365 break;
1366 case 0xa:
1367 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1368 break;
1369 case 0xb:
1370 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1371 break;
1372 case 0xc:
1373 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1374 break;
1375 case 0xd:
1376 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1377 break;
1378 case 0xe:
1379 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1380 break;
1381 case 0xf:
1382 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1383 break;
1384 }
1385 }
1386
1387 // Inverted logic
1388 static const TCGCond gen_tcg_cond_reg[8] = {
1389 TCG_COND_NEVER, /* reserved */
1390 TCG_COND_NE,
1391 TCG_COND_GT,
1392 TCG_COND_GE,
1393 TCG_COND_NEVER, /* reserved */
1394 TCG_COND_EQ,
1395 TCG_COND_LE,
1396 TCG_COND_LT,
1397 };
1398
1399 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1400 {
1401 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1402 cmp->is_bool = false;
1403 cmp->c1 = r_src;
1404 cmp->c2 = tcg_constant_tl(0);
1405 }
1406
1407 #ifdef TARGET_SPARC64
1408 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1409 {
1410 switch (fccno) {
1411 case 0:
1412 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1413 break;
1414 case 1:
1415 gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1416 break;
1417 case 2:
1418 gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1419 break;
1420 case 3:
1421 gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1422 break;
1423 }
1424 }
1425
1426 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1427 {
1428 switch (fccno) {
1429 case 0:
1430 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1431 break;
1432 case 1:
1433 gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1434 break;
1435 case 2:
1436 gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1437 break;
1438 case 3:
1439 gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1440 break;
1441 }
1442 }
1443
1444 static void gen_op_fcmpq(int fccno)
1445 {
1446 switch (fccno) {
1447 case 0:
1448 gen_helper_fcmpq(cpu_fsr, tcg_env);
1449 break;
1450 case 1:
1451 gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
1452 break;
1453 case 2:
1454 gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
1455 break;
1456 case 3:
1457 gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
1458 break;
1459 }
1460 }
1461
1462 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1463 {
1464 switch (fccno) {
1465 case 0:
1466 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1467 break;
1468 case 1:
1469 gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1470 break;
1471 case 2:
1472 gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1473 break;
1474 case 3:
1475 gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1476 break;
1477 }
1478 }
1479
1480 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1481 {
1482 switch (fccno) {
1483 case 0:
1484 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1485 break;
1486 case 1:
1487 gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1488 break;
1489 case 2:
1490 gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1491 break;
1492 case 3:
1493 gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1494 break;
1495 }
1496 }
1497
1498 static void gen_op_fcmpeq(int fccno)
1499 {
1500 switch (fccno) {
1501 case 0:
1502 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1503 break;
1504 case 1:
1505 gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
1506 break;
1507 case 2:
1508 gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
1509 break;
1510 case 3:
1511 gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
1512 break;
1513 }
1514 }
1515
1516 #else
1517
1518 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1519 {
1520 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1521 }
1522
1523 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1524 {
1525 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1526 }
1527
1528 static void gen_op_fcmpq(int fccno)
1529 {
1530 gen_helper_fcmpq(cpu_fsr, tcg_env);
1531 }
1532
1533 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1534 {
1535 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1536 }
1537
1538 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1539 {
1540 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1541 }
1542
1543 static void gen_op_fcmpeq(int fccno)
1544 {
1545 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1546 }
1547 #endif
1548
1549 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1550 {
1551 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1552 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1553 gen_exception(dc, TT_FP_EXCP);
1554 }
1555
1556 static int gen_trap_ifnofpu(DisasContext *dc)
1557 {
1558 #if !defined(CONFIG_USER_ONLY)
1559 if (!dc->fpu_enabled) {
1560 gen_exception(dc, TT_NFPU_INSN);
1561 return 1;
1562 }
1563 #endif
1564 return 0;
1565 }
1566
1567 static void gen_op_clear_ieee_excp_and_FTT(void)
1568 {
1569 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1570 }
1571
1572 static void gen_fop_FF(DisasContext *dc, int rd, int rs,
1573 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1574 {
1575 TCGv_i32 dst, src;
1576
1577 src = gen_load_fpr_F(dc, rs);
1578 dst = gen_dest_fpr_F(dc);
1579
1580 gen(dst, tcg_env, src);
1581 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1582
1583 gen_store_fpr_F(dc, rd, dst);
1584 }
1585
1586 static void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1587 void (*gen)(TCGv_i32, TCGv_i32))
1588 {
1589 TCGv_i32 dst, src;
1590
1591 src = gen_load_fpr_F(dc, rs);
1592 dst = gen_dest_fpr_F(dc);
1593
1594 gen(dst, src);
1595
1596 gen_store_fpr_F(dc, rd, dst);
1597 }
1598
1599 static void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1600 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1601 {
1602 TCGv_i32 dst, src1, src2;
1603
1604 src1 = gen_load_fpr_F(dc, rs1);
1605 src2 = gen_load_fpr_F(dc, rs2);
1606 dst = gen_dest_fpr_F(dc);
1607
1608 gen(dst, tcg_env, src1, src2);
1609 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1610
1611 gen_store_fpr_F(dc, rd, dst);
1612 }
1613
1614 #ifdef TARGET_SPARC64
1615 static void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1616 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1617 {
1618 TCGv_i32 dst, src1, src2;
1619
1620 src1 = gen_load_fpr_F(dc, rs1);
1621 src2 = gen_load_fpr_F(dc, rs2);
1622 dst = gen_dest_fpr_F(dc);
1623
1624 gen(dst, src1, src2);
1625
1626 gen_store_fpr_F(dc, rd, dst);
1627 }
1628 #endif
1629
1630 static void gen_fop_DD(DisasContext *dc, int rd, int rs,
1631 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1632 {
1633 TCGv_i64 dst, src;
1634
1635 src = gen_load_fpr_D(dc, rs);
1636 dst = gen_dest_fpr_D(dc, rd);
1637
1638 gen(dst, tcg_env, src);
1639 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1640
1641 gen_store_fpr_D(dc, rd, dst);
1642 }
1643
1644 #ifdef TARGET_SPARC64
1645 static void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1646 void (*gen)(TCGv_i64, TCGv_i64))
1647 {
1648 TCGv_i64 dst, src;
1649
1650 src = gen_load_fpr_D(dc, rs);
1651 dst = gen_dest_fpr_D(dc, rd);
1652
1653 gen(dst, src);
1654
1655 gen_store_fpr_D(dc, rd, dst);
1656 }
1657 #endif
1658
1659 static void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1660 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1661 {
1662 TCGv_i64 dst, src1, src2;
1663
1664 src1 = gen_load_fpr_D(dc, rs1);
1665 src2 = gen_load_fpr_D(dc, rs2);
1666 dst = gen_dest_fpr_D(dc, rd);
1667
1668 gen(dst, tcg_env, src1, src2);
1669 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1670
1671 gen_store_fpr_D(dc, rd, dst);
1672 }
1673
1674 #ifdef TARGET_SPARC64
1675 static void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1676 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1677 {
1678 TCGv_i64 dst, src1, src2;
1679
1680 src1 = gen_load_fpr_D(dc, rs1);
1681 src2 = gen_load_fpr_D(dc, rs2);
1682 dst = gen_dest_fpr_D(dc, rd);
1683
1684 gen(dst, src1, src2);
1685
1686 gen_store_fpr_D(dc, rd, dst);
1687 }
1688
1689 static void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1690 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1691 {
1692 TCGv_i64 dst, src1, src2;
1693
1694 src1 = gen_load_fpr_D(dc, rs1);
1695 src2 = gen_load_fpr_D(dc, rs2);
1696 dst = gen_dest_fpr_D(dc, rd);
1697
1698 gen(dst, cpu_gsr, src1, src2);
1699
1700 gen_store_fpr_D(dc, rd, dst);
1701 }
1702
1703 static void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1704 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1705 {
1706 TCGv_i64 dst, src0, src1, src2;
1707
1708 src1 = gen_load_fpr_D(dc, rs1);
1709 src2 = gen_load_fpr_D(dc, rs2);
1710 src0 = gen_load_fpr_D(dc, rd);
1711 dst = gen_dest_fpr_D(dc, rd);
1712
1713 gen(dst, src0, src1, src2);
1714
1715 gen_store_fpr_D(dc, rd, dst);
1716 }
1717 #endif
1718
1719 static void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1720 void (*gen)(TCGv_ptr))
1721 {
1722 gen_op_load_fpr_QT1(QFPREG(rs));
1723
1724 gen(tcg_env);
1725 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1726
1727 gen_op_store_QT0_fpr(QFPREG(rd));
1728 gen_update_fprs_dirty(dc, QFPREG(rd));
1729 }
1730
1731 #ifdef TARGET_SPARC64
1732 static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1733 void (*gen)(TCGv_ptr))
1734 {
1735 gen_op_load_fpr_QT1(QFPREG(rs));
1736
1737 gen(tcg_env);
1738
1739 gen_op_store_QT0_fpr(QFPREG(rd));
1740 gen_update_fprs_dirty(dc, QFPREG(rd));
1741 }
1742 #endif
1743
1744 static void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1745 void (*gen)(TCGv_ptr))
1746 {
1747 gen_op_load_fpr_QT0(QFPREG(rs1));
1748 gen_op_load_fpr_QT1(QFPREG(rs2));
1749
1750 gen(tcg_env);
1751 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1752
1753 gen_op_store_QT0_fpr(QFPREG(rd));
1754 gen_update_fprs_dirty(dc, QFPREG(rd));
1755 }
1756
1757 static void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1758 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1759 {
1760 TCGv_i64 dst;
1761 TCGv_i32 src1, src2;
1762
1763 src1 = gen_load_fpr_F(dc, rs1);
1764 src2 = gen_load_fpr_F(dc, rs2);
1765 dst = gen_dest_fpr_D(dc, rd);
1766
1767 gen(dst, tcg_env, src1, src2);
1768 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1769
1770 gen_store_fpr_D(dc, rd, dst);
1771 }
1772
1773 static void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1774 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1775 {
1776 TCGv_i64 src1, src2;
1777
1778 src1 = gen_load_fpr_D(dc, rs1);
1779 src2 = gen_load_fpr_D(dc, rs2);
1780
1781 gen(tcg_env, src1, src2);
1782 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1783
1784 gen_op_store_QT0_fpr(QFPREG(rd));
1785 gen_update_fprs_dirty(dc, QFPREG(rd));
1786 }
1787
1788 #ifdef TARGET_SPARC64
1789 static void gen_fop_DF(DisasContext *dc, int rd, int rs,
1790 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1791 {
1792 TCGv_i64 dst;
1793 TCGv_i32 src;
1794
1795 src = gen_load_fpr_F(dc, rs);
1796 dst = gen_dest_fpr_D(dc, rd);
1797
1798 gen(dst, tcg_env, src);
1799 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1800
1801 gen_store_fpr_D(dc, rd, dst);
1802 }
1803 #endif
1804
1805 static void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1806 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1807 {
1808 TCGv_i64 dst;
1809 TCGv_i32 src;
1810
1811 src = gen_load_fpr_F(dc, rs);
1812 dst = gen_dest_fpr_D(dc, rd);
1813
1814 gen(dst, tcg_env, src);
1815
1816 gen_store_fpr_D(dc, rd, dst);
1817 }
1818
1819 static void gen_fop_FD(DisasContext *dc, int rd, int rs,
1820 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1821 {
1822 TCGv_i32 dst;
1823 TCGv_i64 src;
1824
1825 src = gen_load_fpr_D(dc, rs);
1826 dst = gen_dest_fpr_F(dc);
1827
1828 gen(dst, tcg_env, src);
1829 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1830
1831 gen_store_fpr_F(dc, rd, dst);
1832 }
1833
1834 static void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1835 void (*gen)(TCGv_i32, TCGv_ptr))
1836 {
1837 TCGv_i32 dst;
1838
1839 gen_op_load_fpr_QT1(QFPREG(rs));
1840 dst = gen_dest_fpr_F(dc);
1841
1842 gen(dst, tcg_env);
1843 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1844
1845 gen_store_fpr_F(dc, rd, dst);
1846 }
1847
1848 static void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1849 void (*gen)(TCGv_i64, TCGv_ptr))
1850 {
1851 TCGv_i64 dst;
1852
1853 gen_op_load_fpr_QT1(QFPREG(rs));
1854 dst = gen_dest_fpr_D(dc, rd);
1855
1856 gen(dst, tcg_env);
1857 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1858
1859 gen_store_fpr_D(dc, rd, dst);
1860 }
1861
1862 static void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1863 void (*gen)(TCGv_ptr, TCGv_i32))
1864 {
1865 TCGv_i32 src;
1866
1867 src = gen_load_fpr_F(dc, rs);
1868
1869 gen(tcg_env, src);
1870
1871 gen_op_store_QT0_fpr(QFPREG(rd));
1872 gen_update_fprs_dirty(dc, QFPREG(rd));
1873 }
1874
1875 static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1876 void (*gen)(TCGv_ptr, TCGv_i64))
1877 {
1878 TCGv_i64 src;
1879
1880 src = gen_load_fpr_D(dc, rs);
1881
1882 gen(tcg_env, src);
1883
1884 gen_op_store_QT0_fpr(QFPREG(rd));
1885 gen_update_fprs_dirty(dc, QFPREG(rd));
1886 }
1887
1888 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
1889 TCGv addr, int mmu_idx, MemOp memop)
1890 {
1891 gen_address_mask(dc, addr);
1892 tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN);
1893 }
1894
1895 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
1896 {
1897 TCGv m1 = tcg_constant_tl(0xff);
1898 gen_address_mask(dc, addr);
1899 tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
1900 }
1901
1902 /* asi moves */
1903 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
1904 typedef enum {
1905 GET_ASI_HELPER,
1906 GET_ASI_EXCP,
1907 GET_ASI_DIRECT,
1908 GET_ASI_DTWINX,
1909 GET_ASI_BLOCK,
1910 GET_ASI_SHORT,
1911 GET_ASI_BCOPY,
1912 GET_ASI_BFILL,
1913 } ASIType;
1914
1915 typedef struct {
1916 ASIType type;
1917 int asi;
1918 int mem_idx;
1919 MemOp memop;
1920 } DisasASI;
1921
1922 static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop)
1923 {
1924 int asi = GET_FIELD(insn, 19, 26);
1925 ASIType type = GET_ASI_HELPER;
1926 int mem_idx = dc->mem_idx;
1927
1928 #ifndef TARGET_SPARC64
1929 /* Before v9, all asis are immediate and privileged. */
1930 if (IS_IMM) {
1931 gen_exception(dc, TT_ILL_INSN);
1932 type = GET_ASI_EXCP;
1933 } else if (supervisor(dc)
1934 /* Note that LEON accepts ASI_USERDATA in user mode, for
1935 use with CASA. Also note that previous versions of
1936 QEMU allowed (and old versions of gcc emitted) ASI_P
1937 for LEON, which is incorrect. */
1938 || (asi == ASI_USERDATA
1939 && (dc->def->features & CPU_FEATURE_CASA))) {
1940 switch (asi) {
1941 case ASI_USERDATA: /* User data access */
1942 mem_idx = MMU_USER_IDX;
1943 type = GET_ASI_DIRECT;
1944 break;
1945 case ASI_KERNELDATA: /* Supervisor data access */
1946 mem_idx = MMU_KERNEL_IDX;
1947 type = GET_ASI_DIRECT;
1948 break;
1949 case ASI_M_BYPASS: /* MMU passthrough */
1950 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1951 mem_idx = MMU_PHYS_IDX;
1952 type = GET_ASI_DIRECT;
1953 break;
1954 case ASI_M_BCOPY: /* Block copy, sta access */
1955 mem_idx = MMU_KERNEL_IDX;
1956 type = GET_ASI_BCOPY;
1957 break;
1958 case ASI_M_BFILL: /* Block fill, stda access */
1959 mem_idx = MMU_KERNEL_IDX;
1960 type = GET_ASI_BFILL;
1961 break;
1962 }
1963
1964 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1965 * permissions check in get_physical_address(..).
1966 */
1967 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1968 } else {
1969 gen_exception(dc, TT_PRIV_INSN);
1970 type = GET_ASI_EXCP;
1971 }
1972 #else
1973 if (IS_IMM) {
1974 asi = dc->asi;
1975 }
1976 /* With v9, all asis below 0x80 are privileged. */
1977 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1978 down that bit into DisasContext. For the moment that's ok,
1979 since the direct implementations below doesn't have any ASIs
1980 in the restricted [0x30, 0x7f] range, and the check will be
1981 done properly in the helper. */
1982 if (!supervisor(dc) && asi < 0x80) {
1983 gen_exception(dc, TT_PRIV_ACT);
1984 type = GET_ASI_EXCP;
1985 } else {
1986 switch (asi) {
1987 case ASI_REAL: /* Bypass */
1988 case ASI_REAL_IO: /* Bypass, non-cacheable */
1989 case ASI_REAL_L: /* Bypass LE */
1990 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1991 case ASI_TWINX_REAL: /* Real address, twinx */
1992 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1993 case ASI_QUAD_LDD_PHYS:
1994 case ASI_QUAD_LDD_PHYS_L:
1995 mem_idx = MMU_PHYS_IDX;
1996 break;
1997 case ASI_N: /* Nucleus */
1998 case ASI_NL: /* Nucleus LE */
1999 case ASI_TWINX_N:
2000 case ASI_TWINX_NL:
2001 case ASI_NUCLEUS_QUAD_LDD:
2002 case ASI_NUCLEUS_QUAD_LDD_L:
2003 if (hypervisor(dc)) {
2004 mem_idx = MMU_PHYS_IDX;
2005 } else {
2006 mem_idx = MMU_NUCLEUS_IDX;
2007 }
2008 break;
2009 case ASI_AIUP: /* As if user primary */
2010 case ASI_AIUPL: /* As if user primary LE */
2011 case ASI_TWINX_AIUP:
2012 case ASI_TWINX_AIUP_L:
2013 case ASI_BLK_AIUP_4V:
2014 case ASI_BLK_AIUP_L_4V:
2015 case ASI_BLK_AIUP:
2016 case ASI_BLK_AIUPL:
2017 mem_idx = MMU_USER_IDX;
2018 break;
2019 case ASI_AIUS: /* As if user secondary */
2020 case ASI_AIUSL: /* As if user secondary LE */
2021 case ASI_TWINX_AIUS:
2022 case ASI_TWINX_AIUS_L:
2023 case ASI_BLK_AIUS_4V:
2024 case ASI_BLK_AIUS_L_4V:
2025 case ASI_BLK_AIUS:
2026 case ASI_BLK_AIUSL:
2027 mem_idx = MMU_USER_SECONDARY_IDX;
2028 break;
2029 case ASI_S: /* Secondary */
2030 case ASI_SL: /* Secondary LE */
2031 case ASI_TWINX_S:
2032 case ASI_TWINX_SL:
2033 case ASI_BLK_COMMIT_S:
2034 case ASI_BLK_S:
2035 case ASI_BLK_SL:
2036 case ASI_FL8_S:
2037 case ASI_FL8_SL:
2038 case ASI_FL16_S:
2039 case ASI_FL16_SL:
2040 if (mem_idx == MMU_USER_IDX) {
2041 mem_idx = MMU_USER_SECONDARY_IDX;
2042 } else if (mem_idx == MMU_KERNEL_IDX) {
2043 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2044 }
2045 break;
2046 case ASI_P: /* Primary */
2047 case ASI_PL: /* Primary LE */
2048 case ASI_TWINX_P:
2049 case ASI_TWINX_PL:
2050 case ASI_BLK_COMMIT_P:
2051 case ASI_BLK_P:
2052 case ASI_BLK_PL:
2053 case ASI_FL8_P:
2054 case ASI_FL8_PL:
2055 case ASI_FL16_P:
2056 case ASI_FL16_PL:
2057 break;
2058 }
2059 switch (asi) {
2060 case ASI_REAL:
2061 case ASI_REAL_IO:
2062 case ASI_REAL_L:
2063 case ASI_REAL_IO_L:
2064 case ASI_N:
2065 case ASI_NL:
2066 case ASI_AIUP:
2067 case ASI_AIUPL:
2068 case ASI_AIUS:
2069 case ASI_AIUSL:
2070 case ASI_S:
2071 case ASI_SL:
2072 case ASI_P:
2073 case ASI_PL:
2074 type = GET_ASI_DIRECT;
2075 break;
2076 case ASI_TWINX_REAL:
2077 case ASI_TWINX_REAL_L:
2078 case ASI_TWINX_N:
2079 case ASI_TWINX_NL:
2080 case ASI_TWINX_AIUP:
2081 case ASI_TWINX_AIUP_L:
2082 case ASI_TWINX_AIUS:
2083 case ASI_TWINX_AIUS_L:
2084 case ASI_TWINX_P:
2085 case ASI_TWINX_PL:
2086 case ASI_TWINX_S:
2087 case ASI_TWINX_SL:
2088 case ASI_QUAD_LDD_PHYS:
2089 case ASI_QUAD_LDD_PHYS_L:
2090 case ASI_NUCLEUS_QUAD_LDD:
2091 case ASI_NUCLEUS_QUAD_LDD_L:
2092 type = GET_ASI_DTWINX;
2093 break;
2094 case ASI_BLK_COMMIT_P:
2095 case ASI_BLK_COMMIT_S:
2096 case ASI_BLK_AIUP_4V:
2097 case ASI_BLK_AIUP_L_4V:
2098 case ASI_BLK_AIUP:
2099 case ASI_BLK_AIUPL:
2100 case ASI_BLK_AIUS_4V:
2101 case ASI_BLK_AIUS_L_4V:
2102 case ASI_BLK_AIUS:
2103 case ASI_BLK_AIUSL:
2104 case ASI_BLK_S:
2105 case ASI_BLK_SL:
2106 case ASI_BLK_P:
2107 case ASI_BLK_PL:
2108 type = GET_ASI_BLOCK;
2109 break;
2110 case ASI_FL8_S:
2111 case ASI_FL8_SL:
2112 case ASI_FL8_P:
2113 case ASI_FL8_PL:
2114 memop = MO_UB;
2115 type = GET_ASI_SHORT;
2116 break;
2117 case ASI_FL16_S:
2118 case ASI_FL16_SL:
2119 case ASI_FL16_P:
2120 case ASI_FL16_PL:
2121 memop = MO_TEUW;
2122 type = GET_ASI_SHORT;
2123 break;
2124 }
2125 /* The little-endian asis all have bit 3 set. */
2126 if (asi & 8) {
2127 memop ^= MO_BSWAP;
2128 }
2129 }
2130 #endif
2131
2132 return (DisasASI){ type, asi, mem_idx, memop };
2133 }
2134
2135 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2136 int insn, MemOp memop)
2137 {
2138 DisasASI da = get_asi(dc, insn, memop);
2139
2140 switch (da.type) {
2141 case GET_ASI_EXCP:
2142 break;
2143 case GET_ASI_DTWINX: /* Reserved for ldda. */
2144 gen_exception(dc, TT_ILL_INSN);
2145 break;
2146 case GET_ASI_DIRECT:
2147 gen_address_mask(dc, addr);
2148 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN);
2149 break;
2150 default:
2151 {
2152 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2153 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2154
2155 save_state(dc);
2156 #ifdef TARGET_SPARC64
2157 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
2158 #else
2159 {
2160 TCGv_i64 t64 = tcg_temp_new_i64();
2161 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2162 tcg_gen_trunc_i64_tl(dst, t64);
2163 }
2164 #endif
2165 }
2166 break;
2167 }
2168 }
2169
2170 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2171 int insn, MemOp memop)
2172 {
2173 DisasASI da = get_asi(dc, insn, memop);
2174
2175 switch (da.type) {
2176 case GET_ASI_EXCP:
2177 break;
2178 case GET_ASI_DTWINX: /* Reserved for stda. */
2179 #ifndef TARGET_SPARC64
2180 gen_exception(dc, TT_ILL_INSN);
2181 break;
2182 #else
2183 if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2184 /* Pre OpenSPARC CPUs don't have these */
2185 gen_exception(dc, TT_ILL_INSN);
2186 return;
2187 }
2188 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2189 * are ST_BLKINIT_ ASIs */
2190 #endif
2191 /* fall through */
2192 case GET_ASI_DIRECT:
2193 gen_address_mask(dc, addr);
2194 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN);
2195 break;
2196 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2197 case GET_ASI_BCOPY:
2198 /* Copy 32 bytes from the address in SRC to ADDR. */
2199 /* ??? The original qemu code suggests 4-byte alignment, dropping
2200 the low bits, but the only place I can see this used is in the
2201 Linux kernel with 32 byte alignment, which would make more sense
2202 as a cacheline-style operation. */
2203 {
2204 TCGv saddr = tcg_temp_new();
2205 TCGv daddr = tcg_temp_new();
2206 TCGv four = tcg_constant_tl(4);
2207 TCGv_i32 tmp = tcg_temp_new_i32();
2208 int i;
2209
2210 tcg_gen_andi_tl(saddr, src, -4);
2211 tcg_gen_andi_tl(daddr, addr, -4);
2212 for (i = 0; i < 32; i += 4) {
2213 /* Since the loads and stores are paired, allow the
2214 copy to happen in the host endianness. */
2215 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2216 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2217 tcg_gen_add_tl(saddr, saddr, four);
2218 tcg_gen_add_tl(daddr, daddr, four);
2219 }
2220 }
2221 break;
2222 #endif
2223 default:
2224 {
2225 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2226 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2227
2228 save_state(dc);
2229 #ifdef TARGET_SPARC64
2230 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
2231 #else
2232 {
2233 TCGv_i64 t64 = tcg_temp_new_i64();
2234 tcg_gen_extu_tl_i64(t64, src);
2235 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2236 }
2237 #endif
2238
2239 /* A write to a TLB register may alter page maps. End the TB. */
2240 dc->npc = DYNAMIC_PC;
2241 }
2242 break;
2243 }
2244 }
2245
2246 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2247 TCGv addr, int insn)
2248 {
2249 DisasASI da = get_asi(dc, insn, MO_TEUL);
2250
2251 switch (da.type) {
2252 case GET_ASI_EXCP:
2253 break;
2254 case GET_ASI_DIRECT:
2255 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2256 break;
2257 default:
2258 /* ??? Should be DAE_invalid_asi. */
2259 gen_exception(dc, TT_DATA_ACCESS);
2260 break;
2261 }
2262 }
2263
2264 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2265 int insn, int rd)
2266 {
2267 DisasASI da = get_asi(dc, insn, MO_TEUL);
2268 TCGv oldv;
2269
2270 switch (da.type) {
2271 case GET_ASI_EXCP:
2272 return;
2273 case GET_ASI_DIRECT:
2274 oldv = tcg_temp_new();
2275 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2276 da.mem_idx, da.memop | MO_ALIGN);
2277 gen_store_gpr(dc, rd, oldv);
2278 break;
2279 default:
2280 /* ??? Should be DAE_invalid_asi. */
2281 gen_exception(dc, TT_DATA_ACCESS);
2282 break;
2283 }
2284 }
2285
2286 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2287 {
2288 DisasASI da = get_asi(dc, insn, MO_UB);
2289
2290 switch (da.type) {
2291 case GET_ASI_EXCP:
2292 break;
2293 case GET_ASI_DIRECT:
2294 gen_ldstub(dc, dst, addr, da.mem_idx);
2295 break;
2296 default:
2297 /* ??? In theory, this should be raise DAE_invalid_asi.
2298 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2299 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
2300 gen_helper_exit_atomic(tcg_env);
2301 } else {
2302 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2303 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
2304 TCGv_i64 s64, t64;
2305
2306 save_state(dc);
2307 t64 = tcg_temp_new_i64();
2308 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2309
2310 s64 = tcg_constant_i64(0xff);
2311 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
2312
2313 tcg_gen_trunc_i64_tl(dst, t64);
2314
2315 /* End the TB. */
2316 dc->npc = DYNAMIC_PC;
2317 }
2318 break;
2319 }
2320 }
2321 #endif
2322
2323 #ifdef TARGET_SPARC64
2324 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2325 int insn, int size, int rd)
2326 {
2327 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
2328 TCGv_i32 d32;
2329 TCGv_i64 d64;
2330
2331 switch (da.type) {
2332 case GET_ASI_EXCP:
2333 break;
2334
2335 case GET_ASI_DIRECT:
2336 gen_address_mask(dc, addr);
2337 switch (size) {
2338 case 4:
2339 d32 = gen_dest_fpr_F(dc);
2340 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
2341 gen_store_fpr_F(dc, rd, d32);
2342 break;
2343 case 8:
2344 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2345 da.memop | MO_ALIGN_4);
2346 break;
2347 case 16:
2348 d64 = tcg_temp_new_i64();
2349 tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
2350 tcg_gen_addi_tl(addr, addr, 8);
2351 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
2352 da.memop | MO_ALIGN_4);
2353 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2354 break;
2355 default:
2356 g_assert_not_reached();
2357 }
2358 break;
2359
2360 case GET_ASI_BLOCK:
2361 /* Valid for lddfa on aligned registers only. */
2362 if (size == 8 && (rd & 7) == 0) {
2363 MemOp memop;
2364 TCGv eight;
2365 int i;
2366
2367 gen_address_mask(dc, addr);
2368
2369 /* The first operation checks required alignment. */
2370 memop = da.memop | MO_ALIGN_64;
2371 eight = tcg_constant_tl(8);
2372 for (i = 0; ; ++i) {
2373 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2374 da.mem_idx, memop);
2375 if (i == 7) {
2376 break;
2377 }
2378 tcg_gen_add_tl(addr, addr, eight);
2379 memop = da.memop;
2380 }
2381 } else {
2382 gen_exception(dc, TT_ILL_INSN);
2383 }
2384 break;
2385
2386 case GET_ASI_SHORT:
2387 /* Valid for lddfa only. */
2388 if (size == 8) {
2389 gen_address_mask(dc, addr);
2390 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2391 da.memop | MO_ALIGN);
2392 } else {
2393 gen_exception(dc, TT_ILL_INSN);
2394 }
2395 break;
2396
2397 default:
2398 {
2399 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2400 TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
2401
2402 save_state(dc);
2403 /* According to the table in the UA2011 manual, the only
2404 other asis that are valid for ldfa/lddfa/ldqfa are
2405 the NO_FAULT asis. We still need a helper for these,
2406 but we can just use the integer asi helper for them. */
2407 switch (size) {
2408 case 4:
2409 d64 = tcg_temp_new_i64();
2410 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2411 d32 = gen_dest_fpr_F(dc);
2412 tcg_gen_extrl_i64_i32(d32, d64);
2413 gen_store_fpr_F(dc, rd, d32);
2414 break;
2415 case 8:
2416 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, r_asi, r_mop);
2417 break;
2418 case 16:
2419 d64 = tcg_temp_new_i64();
2420 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2421 tcg_gen_addi_tl(addr, addr, 8);
2422 gen_helper_ld_asi(cpu_fpr[rd/2+1], tcg_env, addr, r_asi, r_mop);
2423 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2424 break;
2425 default:
2426 g_assert_not_reached();
2427 }
2428 }
2429 break;
2430 }
2431 }
2432
2433 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2434 int insn, int size, int rd)
2435 {
2436 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
2437 TCGv_i32 d32;
2438
2439 switch (da.type) {
2440 case GET_ASI_EXCP:
2441 break;
2442
2443 case GET_ASI_DIRECT:
2444 gen_address_mask(dc, addr);
2445 switch (size) {
2446 case 4:
2447 d32 = gen_load_fpr_F(dc, rd);
2448 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
2449 break;
2450 case 8:
2451 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2452 da.memop | MO_ALIGN_4);
2453 break;
2454 case 16:
2455 /* Only 4-byte alignment required. However, it is legal for the
2456 cpu to signal the alignment fault, and the OS trap handler is
2457 required to fix it up. Requiring 16-byte alignment here avoids
2458 having to probe the second page before performing the first
2459 write. */
2460 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2461 da.memop | MO_ALIGN_16);
2462 tcg_gen_addi_tl(addr, addr, 8);
2463 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2464 break;
2465 default:
2466 g_assert_not_reached();
2467 }
2468 break;
2469
2470 case GET_ASI_BLOCK:
2471 /* Valid for stdfa on aligned registers only. */
2472 if (size == 8 && (rd & 7) == 0) {
2473 MemOp memop;
2474 TCGv eight;
2475 int i;
2476
2477 gen_address_mask(dc, addr);
2478
2479 /* The first operation checks required alignment. */
2480 memop = da.memop | MO_ALIGN_64;
2481 eight = tcg_constant_tl(8);
2482 for (i = 0; ; ++i) {
2483 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2484 da.mem_idx, memop);
2485 if (i == 7) {
2486 break;
2487 }
2488 tcg_gen_add_tl(addr, addr, eight);
2489 memop = da.memop;
2490 }
2491 } else {
2492 gen_exception(dc, TT_ILL_INSN);
2493 }
2494 break;
2495
2496 case GET_ASI_SHORT:
2497 /* Valid for stdfa only. */
2498 if (size == 8) {
2499 gen_address_mask(dc, addr);
2500 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2501 da.memop | MO_ALIGN);
2502 } else {
2503 gen_exception(dc, TT_ILL_INSN);
2504 }
2505 break;
2506
2507 default:
2508 /* According to the table in the UA2011 manual, the only
2509 other asis that are valid for ldfa/lddfa/ldqfa are
2510 the PST* asis, which aren't currently handled. */
2511 gen_exception(dc, TT_ILL_INSN);
2512 break;
2513 }
2514 }
2515
2516 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2517 {
2518 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2519 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2520 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2521
2522 switch (da.type) {
2523 case GET_ASI_EXCP:
2524 return;
2525
2526 case GET_ASI_DTWINX:
2527 gen_address_mask(dc, addr);
2528 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2529 tcg_gen_addi_tl(addr, addr, 8);
2530 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2531 break;
2532
2533 case GET_ASI_DIRECT:
2534 {
2535 TCGv_i64 tmp = tcg_temp_new_i64();
2536
2537 gen_address_mask(dc, addr);
2538 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN);
2539
2540 /* Note that LE ldda acts as if each 32-bit register
2541 result is byte swapped. Having just performed one
2542 64-bit bswap, we need now to swap the writebacks. */
2543 if ((da.memop & MO_BSWAP) == MO_TE) {
2544 tcg_gen_extr32_i64(lo, hi, tmp);
2545 } else {
2546 tcg_gen_extr32_i64(hi, lo, tmp);
2547 }
2548 }
2549 break;
2550
2551 default:
2552 /* ??? In theory we've handled all of the ASIs that are valid
2553 for ldda, and this should raise DAE_invalid_asi. However,
2554 real hardware allows others. This can be seen with e.g.
2555 FreeBSD 10.3 wrt ASI_IC_TAG. */
2556 {
2557 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2558 TCGv_i32 r_mop = tcg_constant_i32(da.memop);
2559 TCGv_i64 tmp = tcg_temp_new_i64();
2560
2561 save_state(dc);
2562 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2563
2564 /* See above. */
2565 if ((da.memop & MO_BSWAP) == MO_TE) {
2566 tcg_gen_extr32_i64(lo, hi, tmp);
2567 } else {
2568 tcg_gen_extr32_i64(hi, lo, tmp);
2569 }
2570 }
2571 break;
2572 }
2573
2574 gen_store_gpr(dc, rd, hi);
2575 gen_store_gpr(dc, rd + 1, lo);
2576 }
2577
2578 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2579 int insn, int rd)
2580 {
2581 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2582 TCGv lo = gen_load_gpr(dc, rd + 1);
2583
2584 switch (da.type) {
2585 case GET_ASI_EXCP:
2586 break;
2587
2588 case GET_ASI_DTWINX:
2589 gen_address_mask(dc, addr);
2590 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2591 tcg_gen_addi_tl(addr, addr, 8);
2592 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2593 break;
2594
2595 case GET_ASI_DIRECT:
2596 {
2597 TCGv_i64 t64 = tcg_temp_new_i64();
2598
2599 /* Note that LE stda acts as if each 32-bit register result is
2600 byte swapped. We will perform one 64-bit LE store, so now
2601 we must swap the order of the construction. */
2602 if ((da.memop & MO_BSWAP) == MO_TE) {
2603 tcg_gen_concat32_i64(t64, lo, hi);
2604 } else {
2605 tcg_gen_concat32_i64(t64, hi, lo);
2606 }
2607 gen_address_mask(dc, addr);
2608 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2609 }
2610 break;
2611
2612 default:
2613 /* ??? In theory we've handled all of the ASIs that are valid
2614 for stda, and this should raise DAE_invalid_asi. */
2615 {
2616 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2617 TCGv_i32 r_mop = tcg_constant_i32(da.memop);
2618 TCGv_i64 t64 = tcg_temp_new_i64();
2619
2620 /* See above. */
2621 if ((da.memop & MO_BSWAP) == MO_TE) {
2622 tcg_gen_concat32_i64(t64, lo, hi);
2623 } else {
2624 tcg_gen_concat32_i64(t64, hi, lo);
2625 }
2626
2627 save_state(dc);
2628 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2629 }
2630 break;
2631 }
2632 }
2633
2634 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2635 int insn, int rd)
2636 {
2637 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2638 TCGv oldv;
2639
2640 switch (da.type) {
2641 case GET_ASI_EXCP:
2642 return;
2643 case GET_ASI_DIRECT:
2644 oldv = tcg_temp_new();
2645 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2646 da.mem_idx, da.memop | MO_ALIGN);
2647 gen_store_gpr(dc, rd, oldv);
2648 break;
2649 default:
2650 /* ??? Should be DAE_invalid_asi. */
2651 gen_exception(dc, TT_DATA_ACCESS);
2652 break;
2653 }
2654 }
2655
2656 #elif !defined(CONFIG_USER_ONLY)
2657 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2658 {
2659 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2660 whereby "rd + 1" elicits "error: array subscript is above array".
2661 Since we have already asserted that rd is even, the semantics
2662 are unchanged. */
2663 TCGv lo = gen_dest_gpr(dc, rd | 1);
2664 TCGv hi = gen_dest_gpr(dc, rd);
2665 TCGv_i64 t64 = tcg_temp_new_i64();
2666 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2667
2668 switch (da.type) {
2669 case GET_ASI_EXCP:
2670 return;
2671 case GET_ASI_DIRECT:
2672 gen_address_mask(dc, addr);
2673 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2674 break;
2675 default:
2676 {
2677 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2678 TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
2679
2680 save_state(dc);
2681 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2682 }
2683 break;
2684 }
2685
2686 tcg_gen_extr_i64_i32(lo, hi, t64);
2687 gen_store_gpr(dc, rd | 1, lo);
2688 gen_store_gpr(dc, rd, hi);
2689 }
2690
2691 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2692 int insn, int rd)
2693 {
2694 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2695 TCGv lo = gen_load_gpr(dc, rd + 1);
2696 TCGv_i64 t64 = tcg_temp_new_i64();
2697
2698 tcg_gen_concat_tl_i64(t64, lo, hi);
2699
2700 switch (da.type) {
2701 case GET_ASI_EXCP:
2702 break;
2703 case GET_ASI_DIRECT:
2704 gen_address_mask(dc, addr);
2705 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2706 break;
2707 case GET_ASI_BFILL:
2708 /* Store 32 bytes of T64 to ADDR. */
2709 /* ??? The original qemu code suggests 8-byte alignment, dropping
2710 the low bits, but the only place I can see this used is in the
2711 Linux kernel with 32 byte alignment, which would make more sense
2712 as a cacheline-style operation. */
2713 {
2714 TCGv d_addr = tcg_temp_new();
2715 TCGv eight = tcg_constant_tl(8);
2716 int i;
2717
2718 tcg_gen_andi_tl(d_addr, addr, -8);
2719 for (i = 0; i < 32; i += 8) {
2720 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2721 tcg_gen_add_tl(d_addr, d_addr, eight);
2722 }
2723 }
2724 break;
2725 default:
2726 {
2727 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2728 TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
2729
2730 save_state(dc);
2731 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2732 }
2733 break;
2734 }
2735 }
2736 #endif
2737
2738 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2739 {
2740 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2741 return gen_load_gpr(dc, rs1);
2742 }
2743
2744 #ifdef TARGET_SPARC64
2745 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2746 {
2747 TCGv_i32 c32, zero, dst, s1, s2;
2748
2749 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2750 or fold the comparison down to 32 bits and use movcond_i32. Choose
2751 the later. */
2752 c32 = tcg_temp_new_i32();
2753 if (cmp->is_bool) {
2754 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2755 } else {
2756 TCGv_i64 c64 = tcg_temp_new_i64();
2757 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2758 tcg_gen_extrl_i64_i32(c32, c64);
2759 }
2760
2761 s1 = gen_load_fpr_F(dc, rs);
2762 s2 = gen_load_fpr_F(dc, rd);
2763 dst = gen_dest_fpr_F(dc);
2764 zero = tcg_constant_i32(0);
2765
2766 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2767
2768 gen_store_fpr_F(dc, rd, dst);
2769 }
2770
2771 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2772 {
2773 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2774 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2775 gen_load_fpr_D(dc, rs),
2776 gen_load_fpr_D(dc, rd));
2777 gen_store_fpr_D(dc, rd, dst);
2778 }
2779
2780 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2781 {
2782 int qd = QFPREG(rd);
2783 int qs = QFPREG(rs);
2784
2785 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2786 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2787 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2788 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2789
2790 gen_update_fprs_dirty(dc, qd);
2791 }
2792
2793 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2794 {
2795 TCGv_i32 r_tl = tcg_temp_new_i32();
2796
2797 /* load env->tl into r_tl */
2798 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2799
2800 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2801 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2802
2803 /* calculate offset to current trap state from env->ts, reuse r_tl */
2804 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2805 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2806
2807 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2808 {
2809 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2810 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2811 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2812 }
2813 }
2814
2815 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2816 int width, bool cc, bool left)
2817 {
2818 TCGv lo1, lo2;
2819 uint64_t amask, tabl, tabr;
2820 int shift, imask, omask;
2821
2822 if (cc) {
2823 tcg_gen_mov_tl(cpu_cc_src, s1);
2824 tcg_gen_mov_tl(cpu_cc_src2, s2);
2825 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2826 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2827 dc->cc_op = CC_OP_SUB;
2828 }
2829
2830 /* Theory of operation: there are two tables, left and right (not to
2831 be confused with the left and right versions of the opcode). These
2832 are indexed by the low 3 bits of the inputs. To make things "easy",
2833 these tables are loaded into two constants, TABL and TABR below.
2834 The operation index = (input & imask) << shift calculates the index
2835 into the constant, while val = (table >> index) & omask calculates
2836 the value we're looking for. */
2837 switch (width) {
2838 case 8:
2839 imask = 0x7;
2840 shift = 3;
2841 omask = 0xff;
2842 if (left) {
2843 tabl = 0x80c0e0f0f8fcfeffULL;
2844 tabr = 0xff7f3f1f0f070301ULL;
2845 } else {
2846 tabl = 0x0103070f1f3f7fffULL;
2847 tabr = 0xfffefcf8f0e0c080ULL;
2848 }
2849 break;
2850 case 16:
2851 imask = 0x6;
2852 shift = 1;
2853 omask = 0xf;
2854 if (left) {
2855 tabl = 0x8cef;
2856 tabr = 0xf731;
2857 } else {
2858 tabl = 0x137f;
2859 tabr = 0xfec8;
2860 }
2861 break;
2862 case 32:
2863 imask = 0x4;
2864 shift = 0;
2865 omask = 0x3;
2866 if (left) {
2867 tabl = (2 << 2) | 3;
2868 tabr = (3 << 2) | 1;
2869 } else {
2870 tabl = (1 << 2) | 3;
2871 tabr = (3 << 2) | 2;
2872 }
2873 break;
2874 default:
2875 abort();
2876 }
2877
2878 lo1 = tcg_temp_new();
2879 lo2 = tcg_temp_new();
2880 tcg_gen_andi_tl(lo1, s1, imask);
2881 tcg_gen_andi_tl(lo2, s2, imask);
2882 tcg_gen_shli_tl(lo1, lo1, shift);
2883 tcg_gen_shli_tl(lo2, lo2, shift);
2884
2885 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
2886 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
2887 tcg_gen_andi_tl(lo1, lo1, omask);
2888 tcg_gen_andi_tl(lo2, lo2, omask);
2889
2890 amask = -8;
2891 if (AM_CHECK(dc)) {
2892 amask &= 0xffffffffULL;
2893 }
2894 tcg_gen_andi_tl(s1, s1, amask);
2895 tcg_gen_andi_tl(s2, s2, amask);
2896
2897 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
2898 tcg_gen_and_tl(lo2, lo2, lo1);
2899 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
2900 }
2901
2902 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2903 {
2904 TCGv tmp = tcg_temp_new();
2905
2906 tcg_gen_add_tl(tmp, s1, s2);
2907 tcg_gen_andi_tl(dst, tmp, -8);
2908 if (left) {
2909 tcg_gen_neg_tl(tmp, tmp);
2910 }
2911 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2912 }
2913
2914 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2915 {
2916 TCGv t1, t2, shift;
2917
2918 t1 = tcg_temp_new();
2919 t2 = tcg_temp_new();
2920 shift = tcg_temp_new();
2921
2922 tcg_gen_andi_tl(shift, gsr, 7);
2923 tcg_gen_shli_tl(shift, shift, 3);
2924 tcg_gen_shl_tl(t1, s1, shift);
2925
2926 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2927 shift of (up to 63) followed by a constant shift of 1. */
2928 tcg_gen_xori_tl(shift, shift, 63);
2929 tcg_gen_shr_tl(t2, s2, shift);
2930 tcg_gen_shri_tl(t2, t2, 1);
2931
2932 tcg_gen_or_tl(dst, t1, t2);
2933 }
2934 #endif
2935
2936 /* Include the auto-generated decoder. */
2937 #include "decode-insns.c.inc"
2938
2939 #define TRANS(NAME, AVAIL, FUNC, ...) \
2940 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2941 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2942
2943 #define avail_ALL(C) true
2944 #ifdef TARGET_SPARC64
2945 # define avail_32(C) false
2946 # define avail_ASR17(C) false
2947 # define avail_DIV(C) true
2948 # define avail_MUL(C) true
2949 # define avail_POWERDOWN(C) false
2950 # define avail_64(C) true
2951 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2952 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2953 #else
2954 # define avail_32(C) true
2955 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2956 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2957 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2958 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2959 # define avail_64(C) false
2960 # define avail_GL(C) false
2961 # define avail_HYPV(C) false
2962 #endif
2963
2964 /* Default case for non jump instructions. */
2965 static bool advance_pc(DisasContext *dc)
2966 {
2967 if (dc->npc & 3) {
2968 switch (dc->npc) {
2969 case DYNAMIC_PC:
2970 case DYNAMIC_PC_LOOKUP:
2971 dc->pc = dc->npc;
2972 gen_op_next_insn();
2973 break;
2974 case JUMP_PC:
2975 /* we can do a static jump */
2976 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
2977 dc->base.is_jmp = DISAS_NORETURN;
2978 break;
2979 default:
2980 g_assert_not_reached();
2981 }
2982 } else {
2983 dc->pc = dc->npc;
2984 dc->npc = dc->npc + 4;
2985 }
2986 return true;
2987 }
2988
2989 /*
2990 * Major opcodes 00 and 01 -- branches, call, and sethi
2991 */
2992
2993 static bool advance_jump_uncond_never(DisasContext *dc, bool annul)
2994 {
2995 if (annul) {
2996 dc->pc = dc->npc + 4;
2997 dc->npc = dc->pc + 4;
2998 } else {
2999 dc->pc = dc->npc;
3000 dc->npc = dc->pc + 4;
3001 }
3002 return true;
3003 }
3004
3005 static bool advance_jump_uncond_always(DisasContext *dc, bool annul,
3006 target_ulong dest)
3007 {
3008 if (annul) {
3009 dc->pc = dest;
3010 dc->npc = dest + 4;
3011 } else {
3012 dc->pc = dc->npc;
3013 dc->npc = dest;
3014 tcg_gen_mov_tl(cpu_pc, cpu_npc);
3015 }
3016 return true;
3017 }
3018
3019 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
3020 bool annul, target_ulong dest)
3021 {
3022 target_ulong npc = dc->npc;
3023
3024 if (annul) {
3025 TCGLabel *l1 = gen_new_label();
3026
3027 tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
3028 gen_goto_tb(dc, 0, npc, dest);
3029 gen_set_label(l1);
3030 gen_goto_tb(dc, 1, npc + 4, npc + 8);
3031
3032 dc->base.is_jmp = DISAS_NORETURN;
3033 } else {
3034 if (npc & 3) {
3035 switch (npc) {
3036 case DYNAMIC_PC:
3037 case DYNAMIC_PC_LOOKUP:
3038 tcg_gen_mov_tl(cpu_pc, cpu_npc);
3039 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
3040 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
3041 cmp->c1, cmp->c2,
3042 tcg_constant_tl(dest), cpu_npc);
3043 dc->pc = npc;
3044 break;
3045 default:
3046 g_assert_not_reached();
3047 }
3048 } else {
3049 dc->pc = npc;
3050 dc->jump_pc[0] = dest;
3051 dc->jump_pc[1] = npc + 4;
3052 dc->npc = JUMP_PC;
3053 if (cmp->is_bool) {
3054 tcg_gen_mov_tl(cpu_cond, cmp->c1);
3055 } else {
3056 tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
3057 }
3058 }
3059 }
3060 return true;
3061 }
3062
3063 static bool raise_priv(DisasContext *dc)
3064 {
3065 gen_exception(dc, TT_PRIV_INSN);
3066 return true;
3067 }
3068
3069 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
3070 {
3071 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3072 DisasCompare cmp;
3073
3074 switch (a->cond) {
3075 case 0x0:
3076 return advance_jump_uncond_never(dc, a->a);
3077 case 0x8:
3078 return advance_jump_uncond_always(dc, a->a, target);
3079 default:
3080 flush_cond(dc);
3081
3082 gen_compare(&cmp, a->cc, a->cond, dc);
3083 return advance_jump_cond(dc, &cmp, a->a, target);
3084 }
3085 }
3086
3087 TRANS(Bicc, ALL, do_bpcc, a)
3088 TRANS(BPcc, 64, do_bpcc, a)
3089
3090 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
3091 {
3092 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3093 DisasCompare cmp;
3094
3095 if (gen_trap_ifnofpu(dc)) {
3096 return true;
3097 }
3098 switch (a->cond) {
3099 case 0x0:
3100 return advance_jump_uncond_never(dc, a->a);
3101 case 0x8:
3102 return advance_jump_uncond_always(dc, a->a, target);
3103 default:
3104 flush_cond(dc);
3105
3106 gen_fcompare(&cmp, a->cc, a->cond);
3107 return advance_jump_cond(dc, &cmp, a->a, target);
3108 }
3109 }
3110
3111 TRANS(FBPfcc, 64, do_fbpfcc, a)
3112 TRANS(FBfcc, ALL, do_fbpfcc, a)
3113
3114 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
3115 {
3116 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3117 DisasCompare cmp;
3118
3119 if (!avail_64(dc)) {
3120 return false;
3121 }
3122 if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) {
3123 return false;
3124 }
3125
3126 flush_cond(dc);
3127 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
3128 return advance_jump_cond(dc, &cmp, a->a, target);
3129 }
3130
3131 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
3132 {
3133 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3134
3135 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
3136 gen_mov_pc_npc(dc);
3137 dc->npc = target;
3138 return true;
3139 }
3140
3141 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
3142 {
3143 /*
3144 * For sparc32, always generate the no-coprocessor exception.
3145 * For sparc64, always generate illegal instruction.
3146 */
3147 #ifdef TARGET_SPARC64
3148 return false;
3149 #else
3150 gen_exception(dc, TT_NCP_INSN);
3151 return true;
3152 #endif
3153 }
3154
3155 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
3156 {
3157 /* Special-case %g0 because that's the canonical nop. */
3158 if (a->rd) {
3159 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
3160 }
3161 return advance_pc(dc);
3162 }
3163
3164 /*
3165 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
3166 */
3167
3168 static bool do_tcc(DisasContext *dc, int cond, int cc,
3169 int rs1, bool imm, int rs2_or_imm)
3170 {
3171 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3172 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3173 DisasCompare cmp;
3174 TCGLabel *lab;
3175 TCGv_i32 trap;
3176
3177 /* Trap never. */
3178 if (cond == 0) {
3179 return advance_pc(dc);
3180 }
3181
3182 /*
3183 * Immediate traps are the most common case. Since this value is
3184 * live across the branch, it really pays to evaluate the constant.
3185 */
3186 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
3187 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
3188 } else {
3189 trap = tcg_temp_new_i32();
3190 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
3191 if (imm) {
3192 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
3193 } else {
3194 TCGv_i32 t2 = tcg_temp_new_i32();
3195 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
3196 tcg_gen_add_i32(trap, trap, t2);
3197 }
3198 tcg_gen_andi_i32(trap, trap, mask);
3199 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3200 }
3201
3202 /* Trap always. */
3203 if (cond == 8) {
3204 save_state(dc);
3205 gen_helper_raise_exception(tcg_env, trap);
3206 dc->base.is_jmp = DISAS_NORETURN;
3207 return true;
3208 }
3209
3210 /* Conditional trap. */
3211 flush_cond(dc);
3212 lab = delay_exceptionv(dc, trap);
3213 gen_compare(&cmp, cc, cond, dc);
3214 tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab);
3215
3216 return advance_pc(dc);
3217 }
3218
3219 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
3220 {
3221 if (avail_32(dc) && a->cc) {
3222 return false;
3223 }
3224 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
3225 }
3226
3227 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
3228 {
3229 if (avail_64(dc)) {
3230 return false;
3231 }
3232 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
3233 }
3234
3235 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
3236 {
3237 if (avail_32(dc)) {
3238 return false;
3239 }
3240 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
3241 }
3242
3243 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
3244 {
3245 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
3246 return advance_pc(dc);
3247 }
3248
3249 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
3250 {
3251 if (avail_32(dc)) {
3252 return false;
3253 }
3254 if (a->mmask) {
3255 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
3256 tcg_gen_mb(a->mmask | TCG_BAR_SC);
3257 }
3258 if (a->cmask) {
3259 /* For #Sync, etc, end the TB to recognize interrupts. */
3260 dc->base.is_jmp = DISAS_EXIT;
3261 }
3262 return advance_pc(dc);
3263 }
3264
3265 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
3266 TCGv (*func)(DisasContext *, TCGv))
3267 {
3268 if (!priv) {
3269 return raise_priv(dc);
3270 }
3271 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
3272 return advance_pc(dc);
3273 }
3274
3275 static TCGv do_rdy(DisasContext *dc, TCGv dst)
3276 {
3277 return cpu_y;
3278 }
3279
3280 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
3281 {
3282 /*
3283 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
3284 * 32-bit cpus like sparcv7, which ignores the rs1 field.
3285 * This matches after all other ASR, so Leon3 Asr17 is handled first.
3286 */
3287 if (avail_64(dc) && a->rs1 != 0) {
3288 return false;
3289 }
3290 return do_rd_special(dc, true, a->rd, do_rdy);
3291 }
3292
3293 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
3294 {
3295 uint32_t val;
3296
3297 /*
3298 * TODO: There are many more fields to be filled,
3299 * some of which are writable.
3300 */
3301 val = dc->def->nwindows - 1; /* [4:0] NWIN */
3302 val |= 1 << 8; /* [8] V8 */
3303
3304 return tcg_constant_tl(val);
3305 }
3306
3307 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
3308
3309 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
3310 {
3311 update_psr(dc);
3312 gen_helper_rdccr(dst, tcg_env);
3313 return dst;
3314 }
3315
3316 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
3317
3318 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
3319 {
3320 #ifdef TARGET_SPARC64
3321 return tcg_constant_tl(dc->asi);
3322 #else
3323 qemu_build_not_reached();
3324 #endif
3325 }
3326
3327 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
3328
3329 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
3330 {
3331 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3332
3333 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3334 if (translator_io_start(&dc->base)) {
3335 dc->base.is_jmp = DISAS_EXIT;
3336 }
3337 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
3338 tcg_constant_i32(dc->mem_idx));
3339 return dst;
3340 }
3341
3342 /* TODO: non-priv access only allowed when enabled. */
3343 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
3344
3345 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
3346 {
3347 return tcg_constant_tl(address_mask_i(dc, dc->pc));
3348 }
3349
3350 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
3351
3352 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
3353 {
3354 tcg_gen_ext_i32_tl(dst, cpu_fprs);
3355 return dst;
3356 }
3357
3358 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
3359
3360 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
3361 {
3362 gen_trap_ifnofpu(dc);
3363 return cpu_gsr;
3364 }
3365
3366 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
3367
3368 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
3369 {
3370 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
3371 return dst;
3372 }
3373
3374 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
3375
3376 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
3377 {
3378 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
3379 return dst;
3380 }
3381
3382 /* TODO: non-priv access only allowed when enabled. */
3383 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
3384
3385 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
3386 {
3387 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3388
3389 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3390 if (translator_io_start(&dc->base)) {
3391 dc->base.is_jmp = DISAS_EXIT;
3392 }
3393 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
3394 tcg_constant_i32(dc->mem_idx));
3395 return dst;
3396 }
3397
3398 /* TODO: non-priv access only allowed when enabled. */
3399 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
3400
3401 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
3402 {
3403 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
3404 return dst;
3405 }
3406
3407 /* TODO: supervisor access only allowed when enabled by hypervisor. */
3408 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
3409
3410 /*
3411 * UltraSPARC-T1 Strand status.
3412 * HYPV check maybe not enough, UA2005 & UA2007 describe
3413 * this ASR as impl. dep
3414 */
3415 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
3416 {
3417 return tcg_constant_tl(1);
3418 }
3419
3420 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
3421
3422 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
3423 {
3424 update_psr(dc);
3425 gen_helper_rdpsr(dst, tcg_env);
3426 return dst;
3427 }
3428
3429 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
3430
3431 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
3432 {
3433 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
3434 return dst;
3435 }
3436
3437 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
3438
3439 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
3440 {
3441 TCGv_i32 tl = tcg_temp_new_i32();
3442 TCGv_ptr tp = tcg_temp_new_ptr();
3443
3444 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3445 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3446 tcg_gen_shli_i32(tl, tl, 3);
3447 tcg_gen_ext_i32_ptr(tp, tl);
3448 tcg_gen_add_ptr(tp, tp, tcg_env);
3449
3450 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3451 return dst;
3452 }
3453
3454 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3455
3456 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3457 {
3458 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3459 return dst;
3460 }
3461
3462 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3463
3464 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3465 {
3466 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3467 return dst;
3468 }
3469
3470 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3471
3472 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3473 {
3474 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3475 return dst;
3476 }
3477
3478 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3479
3480 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3481 {
3482 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3483 return dst;
3484 }
3485
3486 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3487 do_rdhstick_cmpr)
3488
3489 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3490 {
3491 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3492 return dst;
3493 }
3494
3495 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3496
3497 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3498 {
3499 #ifdef TARGET_SPARC64
3500 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3501
3502 gen_load_trap_state_at_tl(r_tsptr);
3503 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3504 return dst;
3505 #else
3506 qemu_build_not_reached();
3507 #endif
3508 }
3509
3510 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3511
3512 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3513 {
3514 #ifdef TARGET_SPARC64
3515 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3516
3517 gen_load_trap_state_at_tl(r_tsptr);
3518 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3519 return dst;
3520 #else
3521 qemu_build_not_reached();
3522 #endif
3523 }
3524
3525 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3526
3527 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3528 {
3529 #ifdef TARGET_SPARC64
3530 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3531
3532 gen_load_trap_state_at_tl(r_tsptr);
3533 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3534 return dst;
3535 #else
3536 qemu_build_not_reached();
3537 #endif
3538 }
3539
3540 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3541
3542 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3543 {
3544 #ifdef TARGET_SPARC64
3545 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3546
3547 gen_load_trap_state_at_tl(r_tsptr);
3548 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3549 return dst;
3550 #else
3551 qemu_build_not_reached();
3552 #endif
3553 }
3554
3555 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3556 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3557
3558 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3559 {
3560 return cpu_tbr;
3561 }
3562
3563 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3564 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3565
3566 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3567 {
3568 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3569 return dst;
3570 }
3571
3572 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3573
3574 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3575 {
3576 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3577 return dst;
3578 }
3579
3580 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3581
3582 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3583 {
3584 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3585 return dst;
3586 }
3587
3588 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3589
3590 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3591 {
3592 gen_helper_rdcwp(dst, tcg_env);
3593 return dst;
3594 }
3595
3596 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3597
3598 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3599 {
3600 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3601 return dst;
3602 }
3603
3604 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3605
3606 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3607 {
3608 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3609 return dst;
3610 }
3611
3612 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3613 do_rdcanrestore)
3614
3615 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3616 {
3617 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3618 return dst;
3619 }
3620
3621 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3622
3623 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3624 {
3625 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3626 return dst;
3627 }
3628
3629 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3630
3631 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3632 {
3633 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3634 return dst;
3635 }
3636
3637 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3638
3639 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3640 {
3641 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3642 return dst;
3643 }
3644
3645 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3646
3647 /* UA2005 strand status */
3648 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3649 {
3650 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3651 return dst;
3652 }
3653
3654 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3655
3656 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3657 {
3658 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3659 return dst;
3660 }
3661
3662 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3663
3664 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3665 {
3666 if (avail_64(dc)) {
3667 gen_helper_flushw(tcg_env);
3668 return advance_pc(dc);
3669 }
3670 return false;
3671 }
3672
3673 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3674 void (*func)(DisasContext *, TCGv))
3675 {
3676 TCGv src;
3677
3678 /* For simplicity, we under-decoded the rs2 form. */
3679 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3680 return false;
3681 }
3682 if (!priv) {
3683 return raise_priv(dc);
3684 }
3685
3686 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3687 src = tcg_constant_tl(a->rs2_or_imm);
3688 } else {
3689 TCGv src1 = gen_load_gpr(dc, a->rs1);
3690 if (a->rs2_or_imm == 0) {
3691 src = src1;
3692 } else {
3693 src = tcg_temp_new();
3694 if (a->imm) {
3695 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3696 } else {
3697 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3698 }
3699 }
3700 }
3701 func(dc, src);
3702 return advance_pc(dc);
3703 }
3704
3705 static void do_wry(DisasContext *dc, TCGv src)
3706 {
3707 tcg_gen_ext32u_tl(cpu_y, src);
3708 }
3709
3710 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3711
3712 static void do_wrccr(DisasContext *dc, TCGv src)
3713 {
3714 gen_helper_wrccr(tcg_env, src);
3715 }
3716
3717 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3718
3719 static void do_wrasi(DisasContext *dc, TCGv src)
3720 {
3721 TCGv tmp = tcg_temp_new();
3722
3723 tcg_gen_ext8u_tl(tmp, src);
3724 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3725 /* End TB to notice changed ASI. */
3726 dc->base.is_jmp = DISAS_EXIT;
3727 }
3728
3729 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3730
3731 static void do_wrfprs(DisasContext *dc, TCGv src)
3732 {
3733 #ifdef TARGET_SPARC64
3734 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3735 dc->fprs_dirty = 0;
3736 dc->base.is_jmp = DISAS_EXIT;
3737 #else
3738 qemu_build_not_reached();
3739 #endif
3740 }
3741
3742 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3743
3744 static void do_wrgsr(DisasContext *dc, TCGv src)
3745 {
3746 gen_trap_ifnofpu(dc);
3747 tcg_gen_mov_tl(cpu_gsr, src);
3748 }
3749
3750 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3751
3752 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3753 {
3754 gen_helper_set_softint(tcg_env, src);
3755 }
3756
3757 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3758
3759 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3760 {
3761 gen_helper_clear_softint(tcg_env, src);
3762 }
3763
3764 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3765
3766 static void do_wrsoftint(DisasContext *dc, TCGv src)
3767 {
3768 gen_helper_write_softint(tcg_env, src);
3769 }
3770
3771 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3772
3773 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3774 {
3775 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3776
3777 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3778 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3779 translator_io_start(&dc->base);
3780 gen_helper_tick_set_limit(r_tickptr, src);
3781 /* End TB to handle timer interrupt */
3782 dc->base.is_jmp = DISAS_EXIT;
3783 }
3784
3785 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3786
3787 static void do_wrstick(DisasContext *dc, TCGv src)
3788 {
3789 #ifdef TARGET_SPARC64
3790 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3791
3792 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3793 translator_io_start(&dc->base);
3794 gen_helper_tick_set_count(r_tickptr, src);
3795 /* End TB to handle timer interrupt */
3796 dc->base.is_jmp = DISAS_EXIT;
3797 #else
3798 qemu_build_not_reached();
3799 #endif
3800 }
3801
3802 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3803
3804 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3805 {
3806 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3807
3808 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3809 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3810 translator_io_start(&dc->base);
3811 gen_helper_tick_set_limit(r_tickptr, src);
3812 /* End TB to handle timer interrupt */
3813 dc->base.is_jmp = DISAS_EXIT;
3814 }
3815
3816 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3817
3818 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3819 {
3820 save_state(dc);
3821 gen_helper_power_down(tcg_env);
3822 }
3823
3824 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3825
3826 static void do_wrpsr(DisasContext *dc, TCGv src)
3827 {
3828 gen_helper_wrpsr(tcg_env, src);
3829 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3830 dc->cc_op = CC_OP_FLAGS;
3831 dc->base.is_jmp = DISAS_EXIT;
3832 }
3833
3834 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3835
3836 static void do_wrwim(DisasContext *dc, TCGv src)
3837 {
3838 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3839 TCGv tmp = tcg_temp_new();
3840
3841 tcg_gen_andi_tl(tmp, src, mask);
3842 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3843 }
3844
3845 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3846
3847 static void do_wrtpc(DisasContext *dc, TCGv src)
3848 {
3849 #ifdef TARGET_SPARC64
3850 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3851
3852 gen_load_trap_state_at_tl(r_tsptr);
3853 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3854 #else
3855 qemu_build_not_reached();
3856 #endif
3857 }
3858
3859 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3860
3861 static void do_wrtnpc(DisasContext *dc, TCGv src)
3862 {
3863 #ifdef TARGET_SPARC64
3864 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3865
3866 gen_load_trap_state_at_tl(r_tsptr);
3867 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3868 #else
3869 qemu_build_not_reached();
3870 #endif
3871 }
3872
3873 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3874
3875 static void do_wrtstate(DisasContext *dc, TCGv src)
3876 {
3877 #ifdef TARGET_SPARC64
3878 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3879
3880 gen_load_trap_state_at_tl(r_tsptr);
3881 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3882 #else
3883 qemu_build_not_reached();
3884 #endif
3885 }
3886
3887 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3888
3889 static void do_wrtt(DisasContext *dc, TCGv src)
3890 {
3891 #ifdef TARGET_SPARC64
3892 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3893
3894 gen_load_trap_state_at_tl(r_tsptr);
3895 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3896 #else
3897 qemu_build_not_reached();
3898 #endif
3899 }
3900
3901 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3902
3903 static void do_wrtick(DisasContext *dc, TCGv src)
3904 {
3905 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3906
3907 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3908 translator_io_start(&dc->base);
3909 gen_helper_tick_set_count(r_tickptr, src);
3910 /* End TB to handle timer interrupt */
3911 dc->base.is_jmp = DISAS_EXIT;
3912 }
3913
3914 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3915
3916 static void do_wrtba(DisasContext *dc, TCGv src)
3917 {
3918 tcg_gen_mov_tl(cpu_tbr, src);
3919 }
3920
3921 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3922
3923 static void do_wrpstate(DisasContext *dc, TCGv src)
3924 {
3925 save_state(dc);
3926 if (translator_io_start(&dc->base)) {
3927 dc->base.is_jmp = DISAS_EXIT;
3928 }
3929 gen_helper_wrpstate(tcg_env, src);
3930 dc->npc = DYNAMIC_PC;
3931 }
3932
3933 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3934
3935 static void do_wrtl(DisasContext *dc, TCGv src)
3936 {
3937 save_state(dc);
3938 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3939 dc->npc = DYNAMIC_PC;
3940 }
3941
3942 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3943
3944 static void do_wrpil(DisasContext *dc, TCGv src)
3945 {
3946 if (translator_io_start(&dc->base)) {
3947 dc->base.is_jmp = DISAS_EXIT;
3948 }
3949 gen_helper_wrpil(tcg_env, src);
3950 }
3951
3952 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3953
3954 static void do_wrcwp(DisasContext *dc, TCGv src)
3955 {
3956 gen_helper_wrcwp(tcg_env, src);
3957 }
3958
3959 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3960
3961 static void do_wrcansave(DisasContext *dc, TCGv src)
3962 {
3963 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3964 }
3965
3966 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3967
3968 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3969 {
3970 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3971 }
3972
3973 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3974
3975 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3976 {
3977 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3978 }
3979
3980 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3981
3982 static void do_wrotherwin(DisasContext *dc, TCGv src)
3983 {
3984 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3985 }
3986
3987 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3988
3989 static void do_wrwstate(DisasContext *dc, TCGv src)
3990 {
3991 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3992 }
3993
3994 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3995
3996 static void do_wrgl(DisasContext *dc, TCGv src)
3997 {
3998 gen_helper_wrgl(tcg_env, src);
3999 }
4000
4001 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
4002
4003 /* UA2005 strand status */
4004 static void do_wrssr(DisasContext *dc, TCGv src)
4005 {
4006 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
4007 }
4008
4009 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
4010
4011 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
4012
4013 static void do_wrhpstate(DisasContext *dc, TCGv src)
4014 {
4015 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
4016 dc->base.is_jmp = DISAS_EXIT;
4017 }
4018
4019 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
4020
4021 static void do_wrhtstate(DisasContext *dc, TCGv src)
4022 {
4023 TCGv_i32 tl = tcg_temp_new_i32();
4024 TCGv_ptr tp = tcg_temp_new_ptr();
4025
4026 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
4027 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
4028 tcg_gen_shli_i32(tl, tl, 3);
4029 tcg_gen_ext_i32_ptr(tp, tl);
4030 tcg_gen_add_ptr(tp, tp, tcg_env);
4031
4032 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
4033 }
4034
4035 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
4036
4037 static void do_wrhintp(DisasContext *dc, TCGv src)
4038 {
4039 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
4040 }
4041
4042 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
4043
4044 static void do_wrhtba(DisasContext *dc, TCGv src)
4045 {
4046 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
4047 }
4048
4049 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
4050
4051 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
4052 {
4053 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
4054
4055 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
4056 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
4057 translator_io_start(&dc->base);
4058 gen_helper_tick_set_limit(r_tickptr, src);
4059 /* End TB to handle timer interrupt */
4060 dc->base.is_jmp = DISAS_EXIT;
4061 }
4062
4063 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
4064 do_wrhstick_cmpr)
4065
4066 static bool do_saved_restored(DisasContext *dc, bool saved)
4067 {
4068 if (!supervisor(dc)) {
4069 return raise_priv(dc);
4070 }
4071 if (saved) {
4072 gen_helper_saved(tcg_env);
4073 } else {
4074 gen_helper_restored(tcg_env);
4075 }
4076 return advance_pc(dc);
4077 }
4078
4079 TRANS(SAVED, 64, do_saved_restored, true)
4080 TRANS(RESTORED, 64, do_saved_restored, false)
4081
4082 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
4083 {
4084 return advance_pc(dc);
4085 }
4086
4087 static bool trans_NOP_v7(DisasContext *dc, arg_NOP_v7 *a)
4088 {
4089 /*
4090 * TODO: Need a feature bit for sparcv8.
4091 * In the meantime, treat all 32-bit cpus like sparcv7.
4092 */
4093 if (avail_32(dc)) {
4094 return advance_pc(dc);
4095 }
4096 return false;
4097 }
4098
4099 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
4100 void (*func)(TCGv, TCGv, TCGv),
4101 void (*funci)(TCGv, TCGv, target_long))
4102 {
4103 TCGv dst, src1;
4104
4105 /* For simplicity, we under-decoded the rs2 form. */
4106 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4107 return false;
4108 }
4109
4110 if (a->cc) {
4111 dst = cpu_cc_dst;
4112 } else {
4113 dst = gen_dest_gpr(dc, a->rd);
4114 }
4115 src1 = gen_load_gpr(dc, a->rs1);
4116
4117 if (a->imm || a->rs2_or_imm == 0) {
4118 if (funci) {
4119 funci(dst, src1, a->rs2_or_imm);
4120 } else {
4121 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
4122 }
4123 } else {
4124 func(dst, src1, cpu_regs[a->rs2_or_imm]);
4125 }
4126 gen_store_gpr(dc, a->rd, dst);
4127
4128 if (a->cc) {
4129 tcg_gen_movi_i32(cpu_cc_op, cc_op);
4130 dc->cc_op = cc_op;
4131 }
4132 return advance_pc(dc);
4133 }
4134
4135 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
4136 void (*func)(TCGv, TCGv, TCGv),
4137 void (*funci)(TCGv, TCGv, target_long),
4138 void (*func_cc)(TCGv, TCGv, TCGv))
4139 {
4140 if (a->cc) {
4141 assert(cc_op >= 0);
4142 return do_arith_int(dc, a, cc_op, func_cc, NULL);
4143 }
4144 return do_arith_int(dc, a, cc_op, func, funci);
4145 }
4146
4147 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
4148 void (*func)(TCGv, TCGv, TCGv),
4149 void (*funci)(TCGv, TCGv, target_long))
4150 {
4151 return do_arith_int(dc, a, CC_OP_LOGIC, func, funci);
4152 }
4153
4154 TRANS(ADD, ALL, do_arith, a, CC_OP_ADD,
4155 tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc)
4156 TRANS(SUB, ALL, do_arith, a, CC_OP_SUB,
4157 tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc)
4158
4159 TRANS(TADDcc, ALL, do_arith, a, CC_OP_TADD, NULL, NULL, gen_op_add_cc)
4160 TRANS(TSUBcc, ALL, do_arith, a, CC_OP_TSUB, NULL, NULL, gen_op_sub_cc)
4161 TRANS(TADDccTV, ALL, do_arith, a, CC_OP_TADDTV, NULL, NULL, gen_op_taddcctv)
4162 TRANS(TSUBccTV, ALL, do_arith, a, CC_OP_TSUBTV, NULL, NULL, gen_op_tsubcctv)
4163
4164 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
4165 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
4166 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
4167 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
4168 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
4169
4170 TRANS(MULX, 64, do_arith, a, -1, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
4171 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
4172 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
4173
4174 TRANS(UDIVX, 64, do_arith, a, -1, gen_op_udivx, NULL, NULL)
4175 TRANS(SDIVX, 64, do_arith, a, -1, gen_op_sdivx, NULL, NULL)
4176 TRANS(UDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_udiv, NULL, gen_op_udivcc)
4177 TRANS(SDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_sdiv, NULL, gen_op_sdivcc)
4178
4179 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
4180 TRANS(POPC, 64, do_arith, a, -1, gen_op_popc, NULL, NULL)
4181
4182 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
4183 {
4184 /* OR with %g0 is the canonical alias for MOV. */
4185 if (!a->cc && a->rs1 == 0) {
4186 if (a->imm || a->rs2_or_imm == 0) {
4187 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
4188 } else if (a->rs2_or_imm & ~0x1f) {
4189 /* For simplicity, we under-decoded the rs2 form. */
4190 return false;
4191 } else {
4192 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
4193 }
4194 return advance_pc(dc);
4195 }
4196 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
4197 }
4198
4199 static bool trans_ADDC(DisasContext *dc, arg_r_r_ri_cc *a)
4200 {
4201 switch (dc->cc_op) {
4202 case CC_OP_DIV:
4203 case CC_OP_LOGIC:
4204 /* Carry is known to be zero. Fall back to plain ADD. */
4205 return do_arith(dc, a, CC_OP_ADD,
4206 tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc);
4207 case CC_OP_ADD:
4208 case CC_OP_TADD:
4209 case CC_OP_TADDTV:
4210 return do_arith(dc, a, CC_OP_ADDX,
4211 gen_op_addc_add, NULL, gen_op_addccc_add);
4212 case CC_OP_SUB:
4213 case CC_OP_TSUB:
4214 case CC_OP_TSUBTV:
4215 return do_arith(dc, a, CC_OP_ADDX,
4216 gen_op_addc_sub, NULL, gen_op_addccc_sub);
4217 default:
4218 return do_arith(dc, a, CC_OP_ADDX,
4219 gen_op_addc_generic, NULL, gen_op_addccc_generic);
4220 }
4221 }
4222
4223 static bool trans_SUBC(DisasContext *dc, arg_r_r_ri_cc *a)
4224 {
4225 switch (dc->cc_op) {
4226 case CC_OP_DIV:
4227 case CC_OP_LOGIC:
4228 /* Carry is known to be zero. Fall back to plain SUB. */
4229 return do_arith(dc, a, CC_OP_SUB,
4230 tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc);
4231 case CC_OP_ADD:
4232 case CC_OP_TADD:
4233 case CC_OP_TADDTV:
4234 return do_arith(dc, a, CC_OP_SUBX,
4235 gen_op_subc_add, NULL, gen_op_subccc_add);
4236 case CC_OP_SUB:
4237 case CC_OP_TSUB:
4238 case CC_OP_TSUBTV:
4239 return do_arith(dc, a, CC_OP_SUBX,
4240 gen_op_subc_sub, NULL, gen_op_subccc_sub);
4241 default:
4242 return do_arith(dc, a, CC_OP_SUBX,
4243 gen_op_subc_generic, NULL, gen_op_subccc_generic);
4244 }
4245 }
4246
4247 static bool trans_MULScc(DisasContext *dc, arg_r_r_ri_cc *a)
4248 {
4249 update_psr(dc);
4250 return do_arith(dc, a, CC_OP_ADD, NULL, NULL, gen_op_mulscc);
4251 }
4252
4253 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4254 {
4255 TCGv dst, src1, src2;
4256
4257 /* Reject 64-bit shifts for sparc32. */
4258 if (avail_32(dc) && a->x) {
4259 return false;
4260 }
4261
4262 src2 = tcg_temp_new();
4263 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4264 src1 = gen_load_gpr(dc, a->rs1);
4265 dst = gen_dest_gpr(dc, a->rd);
4266
4267 if (l) {
4268 tcg_gen_shl_tl(dst, src1, src2);
4269 if (!a->x) {
4270 tcg_gen_ext32u_tl(dst, dst);
4271 }
4272 } else if (u) {
4273 if (!a->x) {
4274 tcg_gen_ext32u_tl(dst, src1);
4275 src1 = dst;
4276 }
4277 tcg_gen_shr_tl(dst, src1, src2);
4278 } else {
4279 if (!a->x) {
4280 tcg_gen_ext32s_tl(dst, src1);
4281 src1 = dst;
4282 }
4283 tcg_gen_sar_tl(dst, src1, src2);
4284 }
4285 gen_store_gpr(dc, a->rd, dst);
4286 return advance_pc(dc);
4287 }
4288
4289 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4290 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4291 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4292
4293 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4294 {
4295 TCGv dst, src1;
4296
4297 /* Reject 64-bit shifts for sparc32. */
4298 if (avail_32(dc) && (a->x || a->i >= 32)) {
4299 return false;
4300 }
4301
4302 src1 = gen_load_gpr(dc, a->rs1);
4303 dst = gen_dest_gpr(dc, a->rd);
4304
4305 if (avail_32(dc) || a->x) {
4306 if (l) {
4307 tcg_gen_shli_tl(dst, src1, a->i);
4308 } else if (u) {
4309 tcg_gen_shri_tl(dst, src1, a->i);
4310 } else {
4311 tcg_gen_sari_tl(dst, src1, a->i);
4312 }
4313 } else {
4314 if (l) {
4315 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4316 } else if (u) {
4317 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4318 } else {
4319 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4320 }
4321 }
4322 gen_store_gpr(dc, a->rd, dst);
4323 return advance_pc(dc);
4324 }
4325
4326 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4327 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4328 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4329
4330 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4331 {
4332 /* For simplicity, we under-decoded the rs2 form. */
4333 if (!imm && rs2_or_imm & ~0x1f) {
4334 return NULL;
4335 }
4336 if (imm || rs2_or_imm == 0) {
4337 return tcg_constant_tl(rs2_or_imm);
4338 } else {
4339 return cpu_regs[rs2_or_imm];
4340 }
4341 }
4342
4343 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4344 {
4345 TCGv dst = gen_load_gpr(dc, rd);
4346
4347 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, cmp->c2, src2, dst);
4348 gen_store_gpr(dc, rd, dst);
4349 return advance_pc(dc);
4350 }
4351
4352 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4353 {
4354 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4355 DisasCompare cmp;
4356
4357 if (src2 == NULL) {
4358 return false;
4359 }
4360 gen_compare(&cmp, a->cc, a->cond, dc);
4361 return do_mov_cond(dc, &cmp, a->rd, src2);
4362 }
4363
4364 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4365 {
4366 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4367 DisasCompare cmp;
4368
4369 if (src2 == NULL) {
4370 return false;
4371 }
4372 gen_fcompare(&cmp, a->cc, a->cond);
4373 return do_mov_cond(dc, &cmp, a->rd, src2);
4374 }
4375
4376 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4377 {
4378 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4379 DisasCompare cmp;
4380
4381 if (src2 == NULL) {
4382 return false;
4383 }
4384 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
4385 return do_mov_cond(dc, &cmp, a->rd, src2);
4386 }
4387
4388 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4389 bool (*func)(DisasContext *dc, int rd, TCGv src))
4390 {
4391 TCGv src1, sum;
4392
4393 /* For simplicity, we under-decoded the rs2 form. */
4394 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4395 return false;
4396 }
4397
4398 /*
4399 * Always load the sum into a new temporary.
4400 * This is required to capture the value across a window change,
4401 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4402 */
4403 sum = tcg_temp_new();
4404 src1 = gen_load_gpr(dc, a->rs1);
4405 if (a->imm || a->rs2_or_imm == 0) {
4406 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4407 } else {
4408 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4409 }
4410 return func(dc, a->rd, sum);
4411 }
4412
4413 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4414 {
4415 /*
4416 * Preserve pc across advance, so that we can delay
4417 * the writeback to rd until after src is consumed.
4418 */
4419 target_ulong cur_pc = dc->pc;
4420
4421 gen_check_align(dc, src, 3);
4422
4423 gen_mov_pc_npc(dc);
4424 tcg_gen_mov_tl(cpu_npc, src);
4425 gen_address_mask(dc, cpu_npc);
4426 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4427
4428 dc->npc = DYNAMIC_PC_LOOKUP;
4429 return true;
4430 }
4431
4432 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4433
4434 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4435 {
4436 if (!supervisor(dc)) {
4437 return raise_priv(dc);
4438 }
4439
4440 gen_check_align(dc, src, 3);
4441
4442 gen_mov_pc_npc(dc);
4443 tcg_gen_mov_tl(cpu_npc, src);
4444 gen_helper_rett(tcg_env);
4445
4446 dc->npc = DYNAMIC_PC;
4447 return true;
4448 }
4449
4450 TRANS(RETT, 32, do_add_special, a, do_rett)
4451
4452 static bool do_return(DisasContext *dc, int rd, TCGv src)
4453 {
4454 gen_check_align(dc, src, 3);
4455
4456 gen_mov_pc_npc(dc);
4457 tcg_gen_mov_tl(cpu_npc, src);
4458 gen_address_mask(dc, cpu_npc);
4459
4460 gen_helper_restore(tcg_env);
4461 dc->npc = DYNAMIC_PC_LOOKUP;
4462 return true;
4463 }
4464
4465 TRANS(RETURN, 64, do_add_special, a, do_return)
4466
4467 static bool do_save(DisasContext *dc, int rd, TCGv src)
4468 {
4469 gen_helper_save(tcg_env);
4470 gen_store_gpr(dc, rd, src);
4471 return advance_pc(dc);
4472 }
4473
4474 TRANS(SAVE, ALL, do_add_special, a, do_save)
4475
4476 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4477 {
4478 gen_helper_restore(tcg_env);
4479 gen_store_gpr(dc, rd, src);
4480 return advance_pc(dc);
4481 }
4482
4483 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4484
4485 static bool do_done_retry(DisasContext *dc, bool done)
4486 {
4487 if (!supervisor(dc)) {
4488 return raise_priv(dc);
4489 }
4490 dc->npc = DYNAMIC_PC;
4491 dc->pc = DYNAMIC_PC;
4492 translator_io_start(&dc->base);
4493 if (done) {
4494 gen_helper_done(tcg_env);
4495 } else {
4496 gen_helper_retry(tcg_env);
4497 }
4498 return true;
4499 }
4500
4501 TRANS(DONE, 64, do_done_retry, true)
4502 TRANS(RETRY, 64, do_done_retry, false)
4503
4504 #define CHECK_IU_FEATURE(dc, FEATURE) \
4505 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4506 goto illegal_insn;
4507 #define CHECK_FPU_FEATURE(dc, FEATURE) \
4508 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4509 goto nfpu_insn;
4510
4511 /* before an instruction, dc->pc must be static */
4512 static void disas_sparc_legacy(DisasContext *dc, unsigned int insn)
4513 {
4514 unsigned int opc, rs1, rs2, rd;
4515 TCGv cpu_src1;
4516 TCGv cpu_src2 __attribute__((unused));
4517 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
4518 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
4519 target_long simm;
4520
4521 opc = GET_FIELD(insn, 0, 1);
4522 rd = GET_FIELD(insn, 2, 6);
4523
4524 switch (opc) {
4525 case 0:
4526 goto illegal_insn; /* in decodetree */
4527 case 1:
4528 g_assert_not_reached(); /* in decodetree */
4529 case 2: /* FPU & Logical Operations */
4530 {
4531 unsigned int xop = GET_FIELD(insn, 7, 12);
4532 TCGv cpu_dst __attribute__((unused)) = tcg_temp_new();
4533
4534 if (xop == 0x34) { /* FPU Operations */
4535 if (gen_trap_ifnofpu(dc)) {
4536 goto jmp_insn;
4537 }
4538 gen_op_clear_ieee_excp_and_FTT();
4539 rs1 = GET_FIELD(insn, 13, 17);
4540 rs2 = GET_FIELD(insn, 27, 31);
4541 xop = GET_FIELD(insn, 18, 26);
4542
4543 switch (xop) {
4544 case 0x1: /* fmovs */
4545 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4546 gen_store_fpr_F(dc, rd, cpu_src1_32);
4547 break;
4548 case 0x5: /* fnegs */
4549 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
4550 break;
4551 case 0x9: /* fabss */
4552 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
4553 break;
4554 case 0x29: /* fsqrts */
4555 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
4556 break;
4557 case 0x2a: /* fsqrtd */
4558 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
4559 break;
4560 case 0x2b: /* fsqrtq */
4561 CHECK_FPU_FEATURE(dc, FLOAT128);
4562 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
4563 break;
4564 case 0x41: /* fadds */
4565 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
4566 break;
4567 case 0x42: /* faddd */
4568 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
4569 break;
4570 case 0x43: /* faddq */
4571 CHECK_FPU_FEATURE(dc, FLOAT128);
4572 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
4573 break;
4574 case 0x45: /* fsubs */
4575 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
4576 break;
4577 case 0x46: /* fsubd */
4578 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
4579 break;
4580 case 0x47: /* fsubq */
4581 CHECK_FPU_FEATURE(dc, FLOAT128);
4582 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
4583 break;
4584 case 0x49: /* fmuls */
4585 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
4586 break;
4587 case 0x4a: /* fmuld */
4588 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
4589 break;
4590 case 0x4b: /* fmulq */
4591 CHECK_FPU_FEATURE(dc, FLOAT128);
4592 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
4593 break;
4594 case 0x4d: /* fdivs */
4595 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
4596 break;
4597 case 0x4e: /* fdivd */
4598 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
4599 break;
4600 case 0x4f: /* fdivq */
4601 CHECK_FPU_FEATURE(dc, FLOAT128);
4602 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
4603 break;
4604 case 0x69: /* fsmuld */
4605 CHECK_FPU_FEATURE(dc, FSMULD);
4606 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
4607 break;
4608 case 0x6e: /* fdmulq */
4609 CHECK_FPU_FEATURE(dc, FLOAT128);
4610 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
4611 break;
4612 case 0xc4: /* fitos */
4613 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
4614 break;
4615 case 0xc6: /* fdtos */
4616 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
4617 break;
4618 case 0xc7: /* fqtos */
4619 CHECK_FPU_FEATURE(dc, FLOAT128);
4620 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
4621 break;
4622 case 0xc8: /* fitod */
4623 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
4624 break;
4625 case 0xc9: /* fstod */
4626 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
4627 break;
4628 case 0xcb: /* fqtod */
4629 CHECK_FPU_FEATURE(dc, FLOAT128);
4630 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
4631 break;
4632 case 0xcc: /* fitoq */
4633 CHECK_FPU_FEATURE(dc, FLOAT128);
4634 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
4635 break;
4636 case 0xcd: /* fstoq */
4637 CHECK_FPU_FEATURE(dc, FLOAT128);
4638 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
4639 break;
4640 case 0xce: /* fdtoq */
4641 CHECK_FPU_FEATURE(dc, FLOAT128);
4642 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
4643 break;
4644 case 0xd1: /* fstoi */
4645 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
4646 break;
4647 case 0xd2: /* fdtoi */
4648 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
4649 break;
4650 case 0xd3: /* fqtoi */
4651 CHECK_FPU_FEATURE(dc, FLOAT128);
4652 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
4653 break;
4654 #ifdef TARGET_SPARC64
4655 case 0x2: /* V9 fmovd */
4656 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4657 gen_store_fpr_D(dc, rd, cpu_src1_64);
4658 break;
4659 case 0x3: /* V9 fmovq */
4660 CHECK_FPU_FEATURE(dc, FLOAT128);
4661 gen_move_Q(dc, rd, rs2);
4662 break;
4663 case 0x6: /* V9 fnegd */
4664 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
4665 break;
4666 case 0x7: /* V9 fnegq */
4667 CHECK_FPU_FEATURE(dc, FLOAT128);
4668 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
4669 break;
4670 case 0xa: /* V9 fabsd */
4671 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
4672 break;
4673 case 0xb: /* V9 fabsq */
4674 CHECK_FPU_FEATURE(dc, FLOAT128);
4675 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
4676 break;
4677 case 0x81: /* V9 fstox */
4678 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
4679 break;
4680 case 0x82: /* V9 fdtox */
4681 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
4682 break;
4683 case 0x83: /* V9 fqtox */
4684 CHECK_FPU_FEATURE(dc, FLOAT128);
4685 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
4686 break;
4687 case 0x84: /* V9 fxtos */
4688 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
4689 break;
4690 case 0x88: /* V9 fxtod */
4691 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
4692 break;
4693 case 0x8c: /* V9 fxtoq */
4694 CHECK_FPU_FEATURE(dc, FLOAT128);
4695 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
4696 break;
4697 #endif
4698 default:
4699 goto illegal_insn;
4700 }
4701 } else if (xop == 0x35) { /* FPU Operations */
4702 #ifdef TARGET_SPARC64
4703 int cond;
4704 #endif
4705 if (gen_trap_ifnofpu(dc)) {
4706 goto jmp_insn;
4707 }
4708 gen_op_clear_ieee_excp_and_FTT();
4709 rs1 = GET_FIELD(insn, 13, 17);
4710 rs2 = GET_FIELD(insn, 27, 31);
4711 xop = GET_FIELD(insn, 18, 26);
4712
4713 #ifdef TARGET_SPARC64
4714 #define FMOVR(sz) \
4715 do { \
4716 DisasCompare cmp; \
4717 cond = GET_FIELD_SP(insn, 10, 12); \
4718 cpu_src1 = get_src1(dc, insn); \
4719 gen_compare_reg(&cmp, cond, cpu_src1); \
4720 gen_fmov##sz(dc, &cmp, rd, rs2); \
4721 } while (0)
4722
4723 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
4724 FMOVR(s);
4725 break;
4726 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
4727 FMOVR(d);
4728 break;
4729 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
4730 CHECK_FPU_FEATURE(dc, FLOAT128);
4731 FMOVR(q);
4732 break;
4733 }
4734 #undef FMOVR
4735 #endif
4736 switch (xop) {
4737 #ifdef TARGET_SPARC64
4738 #define FMOVCC(fcc, sz) \
4739 do { \
4740 DisasCompare cmp; \
4741 cond = GET_FIELD_SP(insn, 14, 17); \
4742 gen_fcompare(&cmp, fcc, cond); \
4743 gen_fmov##sz(dc, &cmp, rd, rs2); \
4744 } while (0)
4745
4746 case 0x001: /* V9 fmovscc %fcc0 */
4747 FMOVCC(0, s);
4748 break;
4749 case 0x002: /* V9 fmovdcc %fcc0 */
4750 FMOVCC(0, d);
4751 break;
4752 case 0x003: /* V9 fmovqcc %fcc0 */
4753 CHECK_FPU_FEATURE(dc, FLOAT128);
4754 FMOVCC(0, q);
4755 break;
4756 case 0x041: /* V9 fmovscc %fcc1 */
4757 FMOVCC(1, s);
4758 break;
4759 case 0x042: /* V9 fmovdcc %fcc1 */
4760 FMOVCC(1, d);
4761 break;
4762 case 0x043: /* V9 fmovqcc %fcc1 */
4763 CHECK_FPU_FEATURE(dc, FLOAT128);
4764 FMOVCC(1, q);
4765 break;
4766 case 0x081: /* V9 fmovscc %fcc2 */
4767 FMOVCC(2, s);
4768 break;
4769 case 0x082: /* V9 fmovdcc %fcc2 */
4770 FMOVCC(2, d);
4771 break;
4772 case 0x083: /* V9 fmovqcc %fcc2 */
4773 CHECK_FPU_FEATURE(dc, FLOAT128);
4774 FMOVCC(2, q);
4775 break;
4776 case 0x0c1: /* V9 fmovscc %fcc3 */
4777 FMOVCC(3, s);
4778 break;
4779 case 0x0c2: /* V9 fmovdcc %fcc3 */
4780 FMOVCC(3, d);
4781 break;
4782 case 0x0c3: /* V9 fmovqcc %fcc3 */
4783 CHECK_FPU_FEATURE(dc, FLOAT128);
4784 FMOVCC(3, q);
4785 break;
4786 #undef FMOVCC
4787 #define FMOVCC(xcc, sz) \
4788 do { \
4789 DisasCompare cmp; \
4790 cond = GET_FIELD_SP(insn, 14, 17); \
4791 gen_compare(&cmp, xcc, cond, dc); \
4792 gen_fmov##sz(dc, &cmp, rd, rs2); \
4793 } while (0)
4794
4795 case 0x101: /* V9 fmovscc %icc */
4796 FMOVCC(0, s);
4797 break;
4798 case 0x102: /* V9 fmovdcc %icc */
4799 FMOVCC(0, d);
4800 break;
4801 case 0x103: /* V9 fmovqcc %icc */
4802 CHECK_FPU_FEATURE(dc, FLOAT128);
4803 FMOVCC(0, q);
4804 break;
4805 case 0x181: /* V9 fmovscc %xcc */
4806 FMOVCC(1, s);
4807 break;
4808 case 0x182: /* V9 fmovdcc %xcc */
4809 FMOVCC(1, d);
4810 break;
4811 case 0x183: /* V9 fmovqcc %xcc */
4812 CHECK_FPU_FEATURE(dc, FLOAT128);
4813 FMOVCC(1, q);
4814 break;
4815 #undef FMOVCC
4816 #endif
4817 case 0x51: /* fcmps, V9 %fcc */
4818 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4819 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
4820 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
4821 break;
4822 case 0x52: /* fcmpd, V9 %fcc */
4823 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4824 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4825 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
4826 break;
4827 case 0x53: /* fcmpq, V9 %fcc */
4828 CHECK_FPU_FEATURE(dc, FLOAT128);
4829 gen_op_load_fpr_QT0(QFPREG(rs1));
4830 gen_op_load_fpr_QT1(QFPREG(rs2));
4831 gen_op_fcmpq(rd & 3);
4832 break;
4833 case 0x55: /* fcmpes, V9 %fcc */
4834 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4835 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
4836 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
4837 break;
4838 case 0x56: /* fcmped, V9 %fcc */
4839 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4840 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4841 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
4842 break;
4843 case 0x57: /* fcmpeq, V9 %fcc */
4844 CHECK_FPU_FEATURE(dc, FLOAT128);
4845 gen_op_load_fpr_QT0(QFPREG(rs1));
4846 gen_op_load_fpr_QT1(QFPREG(rs2));
4847 gen_op_fcmpeq(rd & 3);
4848 break;
4849 default:
4850 goto illegal_insn;
4851 }
4852 } else if (xop == 0x36) {
4853 #ifdef TARGET_SPARC64
4854 /* VIS */
4855 int opf = GET_FIELD_SP(insn, 5, 13);
4856 rs1 = GET_FIELD(insn, 13, 17);
4857 rs2 = GET_FIELD(insn, 27, 31);
4858 if (gen_trap_ifnofpu(dc)) {
4859 goto jmp_insn;
4860 }
4861
4862 switch (opf) {
4863 case 0x000: /* VIS I edge8cc */
4864 CHECK_FPU_FEATURE(dc, VIS1);
4865 cpu_src1 = gen_load_gpr(dc, rs1);
4866 cpu_src2 = gen_load_gpr(dc, rs2);
4867 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4868 gen_store_gpr(dc, rd, cpu_dst);
4869 break;
4870 case 0x001: /* VIS II edge8n */
4871 CHECK_FPU_FEATURE(dc, VIS2);
4872 cpu_src1 = gen_load_gpr(dc, rs1);
4873 cpu_src2 = gen_load_gpr(dc, rs2);
4874 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4875 gen_store_gpr(dc, rd, cpu_dst);
4876 break;
4877 case 0x002: /* VIS I edge8lcc */
4878 CHECK_FPU_FEATURE(dc, VIS1);
4879 cpu_src1 = gen_load_gpr(dc, rs1);
4880 cpu_src2 = gen_load_gpr(dc, rs2);
4881 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4882 gen_store_gpr(dc, rd, cpu_dst);
4883 break;
4884 case 0x003: /* VIS II edge8ln */
4885 CHECK_FPU_FEATURE(dc, VIS2);
4886 cpu_src1 = gen_load_gpr(dc, rs1);
4887 cpu_src2 = gen_load_gpr(dc, rs2);
4888 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4889 gen_store_gpr(dc, rd, cpu_dst);
4890 break;
4891 case 0x004: /* VIS I edge16cc */
4892 CHECK_FPU_FEATURE(dc, VIS1);
4893 cpu_src1 = gen_load_gpr(dc, rs1);
4894 cpu_src2 = gen_load_gpr(dc, rs2);
4895 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4896 gen_store_gpr(dc, rd, cpu_dst);
4897 break;
4898 case 0x005: /* VIS II edge16n */
4899 CHECK_FPU_FEATURE(dc, VIS2);
4900 cpu_src1 = gen_load_gpr(dc, rs1);
4901 cpu_src2 = gen_load_gpr(dc, rs2);
4902 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4903 gen_store_gpr(dc, rd, cpu_dst);
4904 break;
4905 case 0x006: /* VIS I edge16lcc */
4906 CHECK_FPU_FEATURE(dc, VIS1);
4907 cpu_src1 = gen_load_gpr(dc, rs1);
4908 cpu_src2 = gen_load_gpr(dc, rs2);
4909 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4910 gen_store_gpr(dc, rd, cpu_dst);
4911 break;
4912 case 0x007: /* VIS II edge16ln */
4913 CHECK_FPU_FEATURE(dc, VIS2);
4914 cpu_src1 = gen_load_gpr(dc, rs1);
4915 cpu_src2 = gen_load_gpr(dc, rs2);
4916 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4917 gen_store_gpr(dc, rd, cpu_dst);
4918 break;
4919 case 0x008: /* VIS I edge32cc */
4920 CHECK_FPU_FEATURE(dc, VIS1);
4921 cpu_src1 = gen_load_gpr(dc, rs1);
4922 cpu_src2 = gen_load_gpr(dc, rs2);
4923 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4924 gen_store_gpr(dc, rd, cpu_dst);
4925 break;
4926 case 0x009: /* VIS II edge32n */
4927 CHECK_FPU_FEATURE(dc, VIS2);
4928 cpu_src1 = gen_load_gpr(dc, rs1);
4929 cpu_src2 = gen_load_gpr(dc, rs2);
4930 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4931 gen_store_gpr(dc, rd, cpu_dst);
4932 break;
4933 case 0x00a: /* VIS I edge32lcc */
4934 CHECK_FPU_FEATURE(dc, VIS1);
4935 cpu_src1 = gen_load_gpr(dc, rs1);
4936 cpu_src2 = gen_load_gpr(dc, rs2);
4937 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4938 gen_store_gpr(dc, rd, cpu_dst);
4939 break;
4940 case 0x00b: /* VIS II edge32ln */
4941 CHECK_FPU_FEATURE(dc, VIS2);
4942 cpu_src1 = gen_load_gpr(dc, rs1);
4943 cpu_src2 = gen_load_gpr(dc, rs2);
4944 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4945 gen_store_gpr(dc, rd, cpu_dst);
4946 break;
4947 case 0x010: /* VIS I array8 */
4948 CHECK_FPU_FEATURE(dc, VIS1);
4949 cpu_src1 = gen_load_gpr(dc, rs1);
4950 cpu_src2 = gen_load_gpr(dc, rs2);
4951 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4952 gen_store_gpr(dc, rd, cpu_dst);
4953 break;
4954 case 0x012: /* VIS I array16 */
4955 CHECK_FPU_FEATURE(dc, VIS1);
4956 cpu_src1 = gen_load_gpr(dc, rs1);
4957 cpu_src2 = gen_load_gpr(dc, rs2);
4958 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4959 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4960 gen_store_gpr(dc, rd, cpu_dst);
4961 break;
4962 case 0x014: /* VIS I array32 */
4963 CHECK_FPU_FEATURE(dc, VIS1);
4964 cpu_src1 = gen_load_gpr(dc, rs1);
4965 cpu_src2 = gen_load_gpr(dc, rs2);
4966 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4967 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4968 gen_store_gpr(dc, rd, cpu_dst);
4969 break;
4970 case 0x018: /* VIS I alignaddr */
4971 CHECK_FPU_FEATURE(dc, VIS1);
4972 cpu_src1 = gen_load_gpr(dc, rs1);
4973 cpu_src2 = gen_load_gpr(dc, rs2);
4974 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4975 gen_store_gpr(dc, rd, cpu_dst);
4976 break;
4977 case 0x01a: /* VIS I alignaddrl */
4978 CHECK_FPU_FEATURE(dc, VIS1);
4979 cpu_src1 = gen_load_gpr(dc, rs1);
4980 cpu_src2 = gen_load_gpr(dc, rs2);
4981 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4982 gen_store_gpr(dc, rd, cpu_dst);
4983 break;
4984 case 0x019: /* VIS II bmask */
4985 CHECK_FPU_FEATURE(dc, VIS2);
4986 cpu_src1 = gen_load_gpr(dc, rs1);
4987 cpu_src2 = gen_load_gpr(dc, rs2);
4988 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4989 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4990 gen_store_gpr(dc, rd, cpu_dst);
4991 break;
4992 case 0x020: /* VIS I fcmple16 */
4993 CHECK_FPU_FEATURE(dc, VIS1);
4994 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4995 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4996 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4997 gen_store_gpr(dc, rd, cpu_dst);
4998 break;
4999 case 0x022: /* VIS I fcmpne16 */
5000 CHECK_FPU_FEATURE(dc, VIS1);
5001 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5002 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5003 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
5004 gen_store_gpr(dc, rd, cpu_dst);
5005 break;
5006 case 0x024: /* VIS I fcmple32 */
5007 CHECK_FPU_FEATURE(dc, VIS1);
5008 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5009 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5010 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
5011 gen_store_gpr(dc, rd, cpu_dst);
5012 break;
5013 case 0x026: /* VIS I fcmpne32 */
5014 CHECK_FPU_FEATURE(dc, VIS1);
5015 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5016 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5017 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
5018 gen_store_gpr(dc, rd, cpu_dst);
5019 break;
5020 case 0x028: /* VIS I fcmpgt16 */
5021 CHECK_FPU_FEATURE(dc, VIS1);
5022 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5023 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5024 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
5025 gen_store_gpr(dc, rd, cpu_dst);
5026 break;
5027 case 0x02a: /* VIS I fcmpeq16 */
5028 CHECK_FPU_FEATURE(dc, VIS1);
5029 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5030 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5031 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
5032 gen_store_gpr(dc, rd, cpu_dst);
5033 break;
5034 case 0x02c: /* VIS I fcmpgt32 */
5035 CHECK_FPU_FEATURE(dc, VIS1);
5036 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5037 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5038 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
5039 gen_store_gpr(dc, rd, cpu_dst);
5040 break;
5041 case 0x02e: /* VIS I fcmpeq32 */
5042 CHECK_FPU_FEATURE(dc, VIS1);
5043 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5044 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5045 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
5046 gen_store_gpr(dc, rd, cpu_dst);
5047 break;
5048 case 0x031: /* VIS I fmul8x16 */
5049 CHECK_FPU_FEATURE(dc, VIS1);
5050 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
5051 break;
5052 case 0x033: /* VIS I fmul8x16au */
5053 CHECK_FPU_FEATURE(dc, VIS1);
5054 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
5055 break;
5056 case 0x035: /* VIS I fmul8x16al */
5057 CHECK_FPU_FEATURE(dc, VIS1);
5058 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
5059 break;
5060 case 0x036: /* VIS I fmul8sux16 */
5061 CHECK_FPU_FEATURE(dc, VIS1);
5062 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
5063 break;
5064 case 0x037: /* VIS I fmul8ulx16 */
5065 CHECK_FPU_FEATURE(dc, VIS1);
5066 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
5067 break;
5068 case 0x038: /* VIS I fmuld8sux16 */
5069 CHECK_FPU_FEATURE(dc, VIS1);
5070 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
5071 break;
5072 case 0x039: /* VIS I fmuld8ulx16 */
5073 CHECK_FPU_FEATURE(dc, VIS1);
5074 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
5075 break;
5076 case 0x03a: /* VIS I fpack32 */
5077 CHECK_FPU_FEATURE(dc, VIS1);
5078 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
5079 break;
5080 case 0x03b: /* VIS I fpack16 */
5081 CHECK_FPU_FEATURE(dc, VIS1);
5082 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5083 cpu_dst_32 = gen_dest_fpr_F(dc);
5084 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
5085 gen_store_fpr_F(dc, rd, cpu_dst_32);
5086 break;
5087 case 0x03d: /* VIS I fpackfix */
5088 CHECK_FPU_FEATURE(dc, VIS1);
5089 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5090 cpu_dst_32 = gen_dest_fpr_F(dc);
5091 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
5092 gen_store_fpr_F(dc, rd, cpu_dst_32);
5093 break;
5094 case 0x03e: /* VIS I pdist */
5095 CHECK_FPU_FEATURE(dc, VIS1);
5096 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
5097 break;
5098 case 0x048: /* VIS I faligndata */
5099 CHECK_FPU_FEATURE(dc, VIS1);
5100 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
5101 break;
5102 case 0x04b: /* VIS I fpmerge */
5103 CHECK_FPU_FEATURE(dc, VIS1);
5104 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
5105 break;
5106 case 0x04c: /* VIS II bshuffle */
5107 CHECK_FPU_FEATURE(dc, VIS2);
5108 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
5109 break;
5110 case 0x04d: /* VIS I fexpand */
5111 CHECK_FPU_FEATURE(dc, VIS1);
5112 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
5113 break;
5114 case 0x050: /* VIS I fpadd16 */
5115 CHECK_FPU_FEATURE(dc, VIS1);
5116 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
5117 break;
5118 case 0x051: /* VIS I fpadd16s */
5119 CHECK_FPU_FEATURE(dc, VIS1);
5120 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
5121 break;
5122 case 0x052: /* VIS I fpadd32 */
5123 CHECK_FPU_FEATURE(dc, VIS1);
5124 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
5125 break;
5126 case 0x053: /* VIS I fpadd32s */
5127 CHECK_FPU_FEATURE(dc, VIS1);
5128 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
5129 break;
5130 case 0x054: /* VIS I fpsub16 */
5131 CHECK_FPU_FEATURE(dc, VIS1);
5132 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
5133 break;
5134 case 0x055: /* VIS I fpsub16s */
5135 CHECK_FPU_FEATURE(dc, VIS1);
5136 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
5137 break;
5138 case 0x056: /* VIS I fpsub32 */
5139 CHECK_FPU_FEATURE(dc, VIS1);
5140 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
5141 break;
5142 case 0x057: /* VIS I fpsub32s */
5143 CHECK_FPU_FEATURE(dc, VIS1);
5144 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
5145 break;
5146 case 0x060: /* VIS I fzero */
5147 CHECK_FPU_FEATURE(dc, VIS1);
5148 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5149 tcg_gen_movi_i64(cpu_dst_64, 0);
5150 gen_store_fpr_D(dc, rd, cpu_dst_64);
5151 break;
5152 case 0x061: /* VIS I fzeros */
5153 CHECK_FPU_FEATURE(dc, VIS1);
5154 cpu_dst_32 = gen_dest_fpr_F(dc);
5155 tcg_gen_movi_i32(cpu_dst_32, 0);
5156 gen_store_fpr_F(dc, rd, cpu_dst_32);
5157 break;
5158 case 0x062: /* VIS I fnor */
5159 CHECK_FPU_FEATURE(dc, VIS1);
5160 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
5161 break;
5162 case 0x063: /* VIS I fnors */
5163 CHECK_FPU_FEATURE(dc, VIS1);
5164 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
5165 break;
5166 case 0x064: /* VIS I fandnot2 */
5167 CHECK_FPU_FEATURE(dc, VIS1);
5168 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
5169 break;
5170 case 0x065: /* VIS I fandnot2s */
5171 CHECK_FPU_FEATURE(dc, VIS1);
5172 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
5173 break;
5174 case 0x066: /* VIS I fnot2 */
5175 CHECK_FPU_FEATURE(dc, VIS1);
5176 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
5177 break;
5178 case 0x067: /* VIS I fnot2s */
5179 CHECK_FPU_FEATURE(dc, VIS1);
5180 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
5181 break;
5182 case 0x068: /* VIS I fandnot1 */
5183 CHECK_FPU_FEATURE(dc, VIS1);
5184 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
5185 break;
5186 case 0x069: /* VIS I fandnot1s */
5187 CHECK_FPU_FEATURE(dc, VIS1);
5188 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
5189 break;
5190 case 0x06a: /* VIS I fnot1 */
5191 CHECK_FPU_FEATURE(dc, VIS1);
5192 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
5193 break;
5194 case 0x06b: /* VIS I fnot1s */
5195 CHECK_FPU_FEATURE(dc, VIS1);
5196 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
5197 break;
5198 case 0x06c: /* VIS I fxor */
5199 CHECK_FPU_FEATURE(dc, VIS1);
5200 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
5201 break;
5202 case 0x06d: /* VIS I fxors */
5203 CHECK_FPU_FEATURE(dc, VIS1);
5204 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
5205 break;
5206 case 0x06e: /* VIS I fnand */
5207 CHECK_FPU_FEATURE(dc, VIS1);
5208 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
5209 break;
5210 case 0x06f: /* VIS I fnands */
5211 CHECK_FPU_FEATURE(dc, VIS1);
5212 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
5213 break;
5214 case 0x070: /* VIS I fand */
5215 CHECK_FPU_FEATURE(dc, VIS1);
5216 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
5217 break;
5218 case 0x071: /* VIS I fands */
5219 CHECK_FPU_FEATURE(dc, VIS1);
5220 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
5221 break;
5222 case 0x072: /* VIS I fxnor */
5223 CHECK_FPU_FEATURE(dc, VIS1);
5224 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
5225 break;
5226 case 0x073: /* VIS I fxnors */
5227 CHECK_FPU_FEATURE(dc, VIS1);
5228 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
5229 break;
5230 case 0x074: /* VIS I fsrc1 */
5231 CHECK_FPU_FEATURE(dc, VIS1);
5232 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5233 gen_store_fpr_D(dc, rd, cpu_src1_64);
5234 break;
5235 case 0x075: /* VIS I fsrc1s */
5236 CHECK_FPU_FEATURE(dc, VIS1);
5237 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5238 gen_store_fpr_F(dc, rd, cpu_src1_32);
5239 break;
5240 case 0x076: /* VIS I fornot2 */
5241 CHECK_FPU_FEATURE(dc, VIS1);
5242 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
5243 break;
5244 case 0x077: /* VIS I fornot2s */
5245 CHECK_FPU_FEATURE(dc, VIS1);
5246 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
5247 break;
5248 case 0x078: /* VIS I fsrc2 */
5249 CHECK_FPU_FEATURE(dc, VIS1);
5250 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5251 gen_store_fpr_D(dc, rd, cpu_src1_64);
5252 break;
5253 case 0x079: /* VIS I fsrc2s */
5254 CHECK_FPU_FEATURE(dc, VIS1);
5255 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
5256 gen_store_fpr_F(dc, rd, cpu_src1_32);
5257 break;
5258 case 0x07a: /* VIS I fornot1 */
5259 CHECK_FPU_FEATURE(dc, VIS1);
5260 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
5261 break;
5262 case 0x07b: /* VIS I fornot1s */
5263 CHECK_FPU_FEATURE(dc, VIS1);
5264 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
5265 break;
5266 case 0x07c: /* VIS I for */
5267 CHECK_FPU_FEATURE(dc, VIS1);
5268 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
5269 break;
5270 case 0x07d: /* VIS I fors */
5271 CHECK_FPU_FEATURE(dc, VIS1);
5272 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
5273 break;
5274 case 0x07e: /* VIS I fone */
5275 CHECK_FPU_FEATURE(dc, VIS1);
5276 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5277 tcg_gen_movi_i64(cpu_dst_64, -1);
5278 gen_store_fpr_D(dc, rd, cpu_dst_64);
5279 break;
5280 case 0x07f: /* VIS I fones */
5281 CHECK_FPU_FEATURE(dc, VIS1);
5282 cpu_dst_32 = gen_dest_fpr_F(dc);
5283 tcg_gen_movi_i32(cpu_dst_32, -1);
5284 gen_store_fpr_F(dc, rd, cpu_dst_32);
5285 break;
5286 case 0x080: /* VIS I shutdown */
5287 case 0x081: /* VIS II siam */
5288 // XXX
5289 goto illegal_insn;
5290 default:
5291 goto illegal_insn;
5292 }
5293 #endif
5294 } else {
5295 goto illegal_insn; /* in decodetree */
5296 }
5297 }
5298 break;
5299 case 3: /* load/store instructions */
5300 {
5301 unsigned int xop = GET_FIELD(insn, 7, 12);
5302 /* ??? gen_address_mask prevents us from using a source
5303 register directly. Always generate a temporary. */
5304 TCGv cpu_addr = tcg_temp_new();
5305
5306 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5307 if (xop == 0x3c || xop == 0x3e) {
5308 /* V9 casa/casxa : no offset */
5309 } else if (IS_IMM) { /* immediate */
5310 simm = GET_FIELDs(insn, 19, 31);
5311 if (simm != 0) {
5312 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5313 }
5314 } else { /* register */
5315 rs2 = GET_FIELD(insn, 27, 31);
5316 if (rs2 != 0) {
5317 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5318 }
5319 }
5320 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5321 (xop > 0x17 && xop <= 0x1d ) ||
5322 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5323 TCGv cpu_val = gen_dest_gpr(dc, rd);
5324
5325 switch (xop) {
5326 case 0x0: /* ld, V9 lduw, load unsigned word */
5327 gen_address_mask(dc, cpu_addr);
5328 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5329 dc->mem_idx, MO_TEUL | MO_ALIGN);
5330 break;
5331 case 0x1: /* ldub, load unsigned byte */
5332 gen_address_mask(dc, cpu_addr);
5333 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5334 dc->mem_idx, MO_UB);
5335 break;
5336 case 0x2: /* lduh, load unsigned halfword */
5337 gen_address_mask(dc, cpu_addr);
5338 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5339 dc->mem_idx, MO_TEUW | MO_ALIGN);
5340 break;
5341 case 0x3: /* ldd, load double word */
5342 if (rd & 1)
5343 goto illegal_insn;
5344 else {
5345 TCGv_i64 t64;
5346
5347 gen_address_mask(dc, cpu_addr);
5348 t64 = tcg_temp_new_i64();
5349 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5350 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5351 tcg_gen_trunc_i64_tl(cpu_val, t64);
5352 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5353 gen_store_gpr(dc, rd + 1, cpu_val);
5354 tcg_gen_shri_i64(t64, t64, 32);
5355 tcg_gen_trunc_i64_tl(cpu_val, t64);
5356 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5357 }
5358 break;
5359 case 0x9: /* ldsb, load signed byte */
5360 gen_address_mask(dc, cpu_addr);
5361 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB);
5362 break;
5363 case 0xa: /* ldsh, load signed halfword */
5364 gen_address_mask(dc, cpu_addr);
5365 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5366 dc->mem_idx, MO_TESW | MO_ALIGN);
5367 break;
5368 case 0xd: /* ldstub */
5369 gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
5370 break;
5371 case 0x0f:
5372 /* swap, swap register with memory. Also atomically */
5373 cpu_src1 = gen_load_gpr(dc, rd);
5374 gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5375 dc->mem_idx, MO_TEUL);
5376 break;
5377 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5378 case 0x10: /* lda, V9 lduwa, load word alternate */
5379 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5380 break;
5381 case 0x11: /* lduba, load unsigned byte alternate */
5382 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5383 break;
5384 case 0x12: /* lduha, load unsigned halfword alternate */
5385 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5386 break;
5387 case 0x13: /* ldda, load double word alternate */
5388 if (rd & 1) {
5389 goto illegal_insn;
5390 }
5391 gen_ldda_asi(dc, cpu_addr, insn, rd);
5392 goto skip_move;
5393 case 0x19: /* ldsba, load signed byte alternate */
5394 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5395 break;
5396 case 0x1a: /* ldsha, load signed halfword alternate */
5397 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5398 break;
5399 case 0x1d: /* ldstuba -- XXX: should be atomically */
5400 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5401 break;
5402 case 0x1f: /* swapa, swap reg with alt. memory. Also
5403 atomically */
5404 cpu_src1 = gen_load_gpr(dc, rd);
5405 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5406 break;
5407 #endif
5408 #ifdef TARGET_SPARC64
5409 case 0x08: /* V9 ldsw */
5410 gen_address_mask(dc, cpu_addr);
5411 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5412 dc->mem_idx, MO_TESL | MO_ALIGN);
5413 break;
5414 case 0x0b: /* V9 ldx */
5415 gen_address_mask(dc, cpu_addr);
5416 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5417 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5418 break;
5419 case 0x18: /* V9 ldswa */
5420 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5421 break;
5422 case 0x1b: /* V9 ldxa */
5423 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
5424 break;
5425 case 0x2d: /* V9 prefetch, no effect */
5426 goto skip_move;
5427 case 0x30: /* V9 ldfa */
5428 if (gen_trap_ifnofpu(dc)) {
5429 goto jmp_insn;
5430 }
5431 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5432 gen_update_fprs_dirty(dc, rd);
5433 goto skip_move;
5434 case 0x33: /* V9 lddfa */
5435 if (gen_trap_ifnofpu(dc)) {
5436 goto jmp_insn;
5437 }
5438 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5439 gen_update_fprs_dirty(dc, DFPREG(rd));
5440 goto skip_move;
5441 case 0x3d: /* V9 prefetcha, no effect */
5442 goto skip_move;
5443 case 0x32: /* V9 ldqfa */
5444 CHECK_FPU_FEATURE(dc, FLOAT128);
5445 if (gen_trap_ifnofpu(dc)) {
5446 goto jmp_insn;
5447 }
5448 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5449 gen_update_fprs_dirty(dc, QFPREG(rd));
5450 goto skip_move;
5451 #endif
5452 default:
5453 goto illegal_insn;
5454 }
5455 gen_store_gpr(dc, rd, cpu_val);
5456 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5457 skip_move: ;
5458 #endif
5459 } else if (xop >= 0x20 && xop < 0x24) {
5460 if (gen_trap_ifnofpu(dc)) {
5461 goto jmp_insn;
5462 }
5463 switch (xop) {
5464 case 0x20: /* ldf, load fpreg */
5465 gen_address_mask(dc, cpu_addr);
5466 cpu_dst_32 = gen_dest_fpr_F(dc);
5467 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5468 dc->mem_idx, MO_TEUL | MO_ALIGN);
5469 gen_store_fpr_F(dc, rd, cpu_dst_32);
5470 break;
5471 case 0x21: /* ldfsr, V9 ldxfsr */
5472 #ifdef TARGET_SPARC64
5473 gen_address_mask(dc, cpu_addr);
5474 if (rd == 1) {
5475 TCGv_i64 t64 = tcg_temp_new_i64();
5476 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5477 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5478 gen_helper_ldxfsr(cpu_fsr, tcg_env, cpu_fsr, t64);
5479 break;
5480 }
5481 #endif
5482 cpu_dst_32 = tcg_temp_new_i32();
5483 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5484 dc->mem_idx, MO_TEUL | MO_ALIGN);
5485 gen_helper_ldfsr(cpu_fsr, tcg_env, cpu_fsr, cpu_dst_32);
5486 break;
5487 case 0x22: /* ldqf, load quad fpreg */
5488 CHECK_FPU_FEATURE(dc, FLOAT128);
5489 gen_address_mask(dc, cpu_addr);
5490 cpu_src1_64 = tcg_temp_new_i64();
5491 tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5492 MO_TEUQ | MO_ALIGN_4);
5493 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5494 cpu_src2_64 = tcg_temp_new_i64();
5495 tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
5496 MO_TEUQ | MO_ALIGN_4);
5497 gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
5498 break;
5499 case 0x23: /* lddf, load double fpreg */
5500 gen_address_mask(dc, cpu_addr);
5501 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5502 tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
5503 MO_TEUQ | MO_ALIGN_4);
5504 gen_store_fpr_D(dc, rd, cpu_dst_64);
5505 break;
5506 default:
5507 goto illegal_insn;
5508 }
5509 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5510 xop == 0xe || xop == 0x1e) {
5511 TCGv cpu_val = gen_load_gpr(dc, rd);
5512
5513 switch (xop) {
5514 case 0x4: /* st, store word */
5515 gen_address_mask(dc, cpu_addr);
5516 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5517 dc->mem_idx, MO_TEUL | MO_ALIGN);
5518 break;
5519 case 0x5: /* stb, store byte */
5520 gen_address_mask(dc, cpu_addr);
5521 tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB);
5522 break;
5523 case 0x6: /* sth, store halfword */
5524 gen_address_mask(dc, cpu_addr);
5525 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5526 dc->mem_idx, MO_TEUW | MO_ALIGN);
5527 break;
5528 case 0x7: /* std, store double word */
5529 if (rd & 1)
5530 goto illegal_insn;
5531 else {
5532 TCGv_i64 t64;
5533 TCGv lo;
5534
5535 gen_address_mask(dc, cpu_addr);
5536 lo = gen_load_gpr(dc, rd + 1);
5537 t64 = tcg_temp_new_i64();
5538 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5539 tcg_gen_qemu_st_i64(t64, cpu_addr,
5540 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5541 }
5542 break;
5543 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5544 case 0x14: /* sta, V9 stwa, store word alternate */
5545 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5546 break;
5547 case 0x15: /* stba, store byte alternate */
5548 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5549 break;
5550 case 0x16: /* stha, store halfword alternate */
5551 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5552 break;
5553 case 0x17: /* stda, store double word alternate */
5554 if (rd & 1) {
5555 goto illegal_insn;
5556 }
5557 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5558 break;
5559 #endif
5560 #ifdef TARGET_SPARC64
5561 case 0x0e: /* V9 stx */
5562 gen_address_mask(dc, cpu_addr);
5563 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5564 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5565 break;
5566 case 0x1e: /* V9 stxa */
5567 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
5568 break;
5569 #endif
5570 default:
5571 goto illegal_insn;
5572 }
5573 } else if (xop > 0x23 && xop < 0x28) {
5574 if (gen_trap_ifnofpu(dc)) {
5575 goto jmp_insn;
5576 }
5577 switch (xop) {
5578 case 0x24: /* stf, store fpreg */
5579 gen_address_mask(dc, cpu_addr);
5580 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5581 tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
5582 dc->mem_idx, MO_TEUL | MO_ALIGN);
5583 break;
5584 case 0x25: /* stfsr, V9 stxfsr */
5585 {
5586 #ifdef TARGET_SPARC64
5587 gen_address_mask(dc, cpu_addr);
5588 if (rd == 1) {
5589 tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
5590 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5591 break;
5592 }
5593 #endif
5594 tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
5595 dc->mem_idx, MO_TEUL | MO_ALIGN);
5596 }
5597 break;
5598 case 0x26:
5599 #ifdef TARGET_SPARC64
5600 /* V9 stqf, store quad fpreg */
5601 CHECK_FPU_FEATURE(dc, FLOAT128);
5602 gen_address_mask(dc, cpu_addr);
5603 /* ??? While stqf only requires 4-byte alignment, it is
5604 legal for the cpu to signal the unaligned exception.
5605 The OS trap handler is then required to fix it up.
5606 For qemu, this avoids having to probe the second page
5607 before performing the first write. */
5608 cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
5609 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5610 dc->mem_idx, MO_TEUQ | MO_ALIGN_16);
5611 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5612 cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
5613 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5614 dc->mem_idx, MO_TEUQ);
5615 break;
5616 #else /* !TARGET_SPARC64 */
5617 /* stdfq, store floating point queue */
5618 #if defined(CONFIG_USER_ONLY)
5619 goto illegal_insn;
5620 #else
5621 if (!supervisor(dc))
5622 goto priv_insn;
5623 if (gen_trap_ifnofpu(dc)) {
5624 goto jmp_insn;
5625 }
5626 goto nfq_insn;
5627 #endif
5628 #endif
5629 case 0x27: /* stdf, store double fpreg */
5630 gen_address_mask(dc, cpu_addr);
5631 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5632 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5633 MO_TEUQ | MO_ALIGN_4);
5634 break;
5635 default:
5636 goto illegal_insn;
5637 }
5638 } else if (xop > 0x33 && xop < 0x3f) {
5639 switch (xop) {
5640 #ifdef TARGET_SPARC64
5641 case 0x34: /* V9 stfa */
5642 if (gen_trap_ifnofpu(dc)) {
5643 goto jmp_insn;
5644 }
5645 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5646 break;
5647 case 0x36: /* V9 stqfa */
5648 {
5649 CHECK_FPU_FEATURE(dc, FLOAT128);
5650 if (gen_trap_ifnofpu(dc)) {
5651 goto jmp_insn;
5652 }
5653 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5654 }
5655 break;
5656 case 0x37: /* V9 stdfa */
5657 if (gen_trap_ifnofpu(dc)) {
5658 goto jmp_insn;
5659 }
5660 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5661 break;
5662 case 0x3e: /* V9 casxa */
5663 rs2 = GET_FIELD(insn, 27, 31);
5664 cpu_src2 = gen_load_gpr(dc, rs2);
5665 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5666 break;
5667 #endif
5668 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5669 case 0x3c: /* V9 or LEON3 casa */
5670 #ifndef TARGET_SPARC64
5671 CHECK_IU_FEATURE(dc, CASA);
5672 #endif
5673 rs2 = GET_FIELD(insn, 27, 31);
5674 cpu_src2 = gen_load_gpr(dc, rs2);
5675 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5676 break;
5677 #endif
5678 default:
5679 goto illegal_insn;
5680 }
5681 } else {
5682 goto illegal_insn;
5683 }
5684 }
5685 break;
5686 }
5687 advance_pc(dc);
5688 jmp_insn:
5689 return;
5690 illegal_insn:
5691 gen_exception(dc, TT_ILL_INSN);
5692 return;
5693 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5694 priv_insn:
5695 gen_exception(dc, TT_PRIV_INSN);
5696 return;
5697 #endif
5698 nfpu_insn:
5699 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5700 return;
5701 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5702 nfq_insn:
5703 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5704 return;
5705 #endif
5706 }
5707
5708 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5709 {
5710 DisasContext *dc = container_of(dcbase, DisasContext, base);
5711 CPUSPARCState *env = cpu_env(cs);
5712 int bound;
5713
5714 dc->pc = dc->base.pc_first;
5715 dc->npc = (target_ulong)dc->base.tb->cs_base;
5716 dc->cc_op = CC_OP_DYNAMIC;
5717 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5718 dc->def = &env->def;
5719 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5720 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5721 #ifndef CONFIG_USER_ONLY
5722 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5723 #endif
5724 #ifdef TARGET_SPARC64
5725 dc->fprs_dirty = 0;
5726 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5727 #ifndef CONFIG_USER_ONLY
5728 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5729 #endif
5730 #endif
5731 /*
5732 * if we reach a page boundary, we stop generation so that the
5733 * PC of a TT_TFAULT exception is always in the right page
5734 */
5735 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5736 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5737 }
5738
5739 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5740 {
5741 }
5742
5743 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5744 {
5745 DisasContext *dc = container_of(dcbase, DisasContext, base);
5746 target_ulong npc = dc->npc;
5747
5748 if (npc & 3) {
5749 switch (npc) {
5750 case JUMP_PC:
5751 assert(dc->jump_pc[1] == dc->pc + 4);
5752 npc = dc->jump_pc[0] | JUMP_PC;
5753 break;
5754 case DYNAMIC_PC:
5755 case DYNAMIC_PC_LOOKUP:
5756 npc = DYNAMIC_PC;
5757 break;
5758 default:
5759 g_assert_not_reached();
5760 }
5761 }
5762 tcg_gen_insn_start(dc->pc, npc);
5763 }
5764
5765 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5766 {
5767 DisasContext *dc = container_of(dcbase, DisasContext, base);
5768 CPUSPARCState *env = cpu_env(cs);
5769 unsigned int insn;
5770
5771 insn = translator_ldl(env, &dc->base, dc->pc);
5772 dc->base.pc_next += 4;
5773
5774 if (!decode(dc, insn)) {
5775 disas_sparc_legacy(dc, insn);
5776 }
5777
5778 if (dc->base.is_jmp == DISAS_NORETURN) {
5779 return;
5780 }
5781 if (dc->pc != dc->base.pc_next) {
5782 dc->base.is_jmp = DISAS_TOO_MANY;
5783 }
5784 }
5785
5786 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5787 {
5788 DisasContext *dc = container_of(dcbase, DisasContext, base);
5789 DisasDelayException *e, *e_next;
5790 bool may_lookup;
5791
5792 switch (dc->base.is_jmp) {
5793 case DISAS_NEXT:
5794 case DISAS_TOO_MANY:
5795 if (((dc->pc | dc->npc) & 3) == 0) {
5796 /* static PC and NPC: we can use direct chaining */
5797 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5798 break;
5799 }
5800
5801 may_lookup = true;
5802 if (dc->pc & 3) {
5803 switch (dc->pc) {
5804 case DYNAMIC_PC_LOOKUP:
5805 break;
5806 case DYNAMIC_PC:
5807 may_lookup = false;
5808 break;
5809 default:
5810 g_assert_not_reached();
5811 }
5812 } else {
5813 tcg_gen_movi_tl(cpu_pc, dc->pc);
5814 }
5815
5816 if (dc->npc & 3) {
5817 switch (dc->npc) {
5818 case JUMP_PC:
5819 gen_generic_branch(dc);
5820 break;
5821 case DYNAMIC_PC:
5822 may_lookup = false;
5823 break;
5824 case DYNAMIC_PC_LOOKUP:
5825 break;
5826 default:
5827 g_assert_not_reached();
5828 }
5829 } else {
5830 tcg_gen_movi_tl(cpu_npc, dc->npc);
5831 }
5832 if (may_lookup) {
5833 tcg_gen_lookup_and_goto_ptr();
5834 } else {
5835 tcg_gen_exit_tb(NULL, 0);
5836 }
5837 break;
5838
5839 case DISAS_NORETURN:
5840 break;
5841
5842 case DISAS_EXIT:
5843 /* Exit TB */
5844 save_state(dc);
5845 tcg_gen_exit_tb(NULL, 0);
5846 break;
5847
5848 default:
5849 g_assert_not_reached();
5850 }
5851
5852 for (e = dc->delay_excp_list; e ; e = e_next) {
5853 gen_set_label(e->lab);
5854
5855 tcg_gen_movi_tl(cpu_pc, e->pc);
5856 if (e->npc % 4 == 0) {
5857 tcg_gen_movi_tl(cpu_npc, e->npc);
5858 }
5859 gen_helper_raise_exception(tcg_env, e->excp);
5860
5861 e_next = e->next;
5862 g_free(e);
5863 }
5864 }
5865
5866 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5867 CPUState *cpu, FILE *logfile)
5868 {
5869 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5870 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5871 }
5872
5873 static const TranslatorOps sparc_tr_ops = {
5874 .init_disas_context = sparc_tr_init_disas_context,
5875 .tb_start = sparc_tr_tb_start,
5876 .insn_start = sparc_tr_insn_start,
5877 .translate_insn = sparc_tr_translate_insn,
5878 .tb_stop = sparc_tr_tb_stop,
5879 .disas_log = sparc_tr_disas_log,
5880 };
5881
5882 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5883 target_ulong pc, void *host_pc)
5884 {
5885 DisasContext dc = {};
5886
5887 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5888 }
5889
5890 void sparc_tcg_init(void)
5891 {
5892 static const char gregnames[32][4] = {
5893 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5894 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5895 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5896 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5897 };
5898 static const char fregnames[32][4] = {
5899 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5900 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5901 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5902 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5903 };
5904
5905 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5906 #ifdef TARGET_SPARC64
5907 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5908 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5909 #endif
5910 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5911 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5912 };
5913
5914 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5915 #ifdef TARGET_SPARC64
5916 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5917 #endif
5918 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5919 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5920 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5921 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5922 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5923 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5924 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5925 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5926 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5927 };
5928
5929 unsigned int i;
5930
5931 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5932 offsetof(CPUSPARCState, regwptr),
5933 "regwptr");
5934
5935 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5936 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5937 }
5938
5939 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5940 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5941 }
5942
5943 cpu_regs[0] = NULL;
5944 for (i = 1; i < 8; ++i) {
5945 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5946 offsetof(CPUSPARCState, gregs[i]),
5947 gregnames[i]);
5948 }
5949
5950 for (i = 8; i < 32; ++i) {
5951 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5952 (i - 8) * sizeof(target_ulong),
5953 gregnames[i]);
5954 }
5955
5956 for (i = 0; i < TARGET_DPREGS; i++) {
5957 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5958 offsetof(CPUSPARCState, fpr[i]),
5959 fregnames[i]);
5960 }
5961 }
5962
5963 void sparc_restore_state_to_opc(CPUState *cs,
5964 const TranslationBlock *tb,
5965 const uint64_t *data)
5966 {
5967 SPARCCPU *cpu = SPARC_CPU(cs);
5968 CPUSPARCState *env = &cpu->env;
5969 target_ulong pc = data[0];
5970 target_ulong npc = data[1];
5971
5972 env->pc = pc;
5973 if (npc == DYNAMIC_PC) {
5974 /* dynamic NPC: already stored */
5975 } else if (npc & JUMP_PC) {
5976 /* jump PC: use 'cond' and the jump targets of the translation */
5977 if (env->cond) {
5978 env->npc = npc & ~3;
5979 } else {
5980 env->npc = pc + 4;
5981 }
5982 } else {
5983 env->npc = npc;
5984 }
5985 }