]> git.ipfire.org Git - thirdparty/qemu.git/blob - target/sparc/translate.c
target/sparc: Move ADDC to decodetree
[thirdparty/qemu.git] / target / sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28
29 #include "exec/helper-gen.h"
30
31 #include "exec/translator.h"
32 #include "exec/log.h"
33 #include "asi.h"
34
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef HELPER_H
38
39 #ifdef TARGET_SPARC64
40 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_flushw(E) qemu_build_not_reached()
46 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
47 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
48 # define gen_helper_restored(E) qemu_build_not_reached()
49 # define gen_helper_saved(E) qemu_build_not_reached()
50 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
51 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
52 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
53 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
54 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
55 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
56 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
57 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
58 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
59 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
60 # define MAXTL_MASK 0
61 #endif
62
63 /* Dynamic PC, must exit to main loop. */
64 #define DYNAMIC_PC 1
65 /* Dynamic PC, one of two values according to jump_pc[T2]. */
66 #define JUMP_PC 2
67 /* Dynamic PC, may lookup next TB. */
68 #define DYNAMIC_PC_LOOKUP 3
69
70 #define DISAS_EXIT DISAS_TARGET_0
71
72 /* global register indexes */
73 static TCGv_ptr cpu_regwptr;
74 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
75 static TCGv_i32 cpu_cc_op;
76 static TCGv_i32 cpu_psr;
77 static TCGv cpu_fsr, cpu_pc, cpu_npc;
78 static TCGv cpu_regs[32];
79 static TCGv cpu_y;
80 static TCGv cpu_tbr;
81 static TCGv cpu_cond;
82 #ifdef TARGET_SPARC64
83 static TCGv_i32 cpu_xcc, cpu_fprs;
84 static TCGv cpu_gsr;
85 #else
86 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
87 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
88 #endif
89 /* Floating point registers */
90 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
91
92 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
93 #ifdef TARGET_SPARC64
94 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
95 # define env64_field_offsetof(X) env_field_offsetof(X)
96 #else
97 # define env32_field_offsetof(X) env_field_offsetof(X)
98 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
99 #endif
100
101 typedef struct DisasDelayException {
102 struct DisasDelayException *next;
103 TCGLabel *lab;
104 TCGv_i32 excp;
105 /* Saved state at parent insn. */
106 target_ulong pc;
107 target_ulong npc;
108 } DisasDelayException;
109
110 typedef struct DisasContext {
111 DisasContextBase base;
112 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
113 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
114 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
115 int mem_idx;
116 bool fpu_enabled;
117 bool address_mask_32bit;
118 #ifndef CONFIG_USER_ONLY
119 bool supervisor;
120 #ifdef TARGET_SPARC64
121 bool hypervisor;
122 #endif
123 #endif
124
125 uint32_t cc_op; /* current CC operation */
126 sparc_def_t *def;
127 #ifdef TARGET_SPARC64
128 int fprs_dirty;
129 int asi;
130 #endif
131 DisasDelayException *delay_excp_list;
132 } DisasContext;
133
134 typedef struct {
135 TCGCond cond;
136 bool is_bool;
137 TCGv c1, c2;
138 } DisasCompare;
139
140 // This function uses non-native bit order
141 #define GET_FIELD(X, FROM, TO) \
142 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
143
144 // This function uses the order in the manuals, i.e. bit 0 is 2^0
145 #define GET_FIELD_SP(X, FROM, TO) \
146 GET_FIELD(X, 31 - (TO), 31 - (FROM))
147
148 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
149 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
150
151 #ifdef TARGET_SPARC64
152 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
153 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
154 #else
155 #define DFPREG(r) (r & 0x1e)
156 #define QFPREG(r) (r & 0x1c)
157 #endif
158
159 #define UA2005_HTRAP_MASK 0xff
160 #define V8_TRAP_MASK 0x7f
161
162 static int sign_extend(int x, int len)
163 {
164 len = 32 - len;
165 return (x << len) >> len;
166 }
167
168 #define IS_IMM (insn & (1<<13))
169
170 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
171 {
172 #if defined(TARGET_SPARC64)
173 int bit = (rd < 32) ? 1 : 2;
174 /* If we know we've already set this bit within the TB,
175 we can avoid setting it again. */
176 if (!(dc->fprs_dirty & bit)) {
177 dc->fprs_dirty |= bit;
178 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
179 }
180 #endif
181 }
182
183 /* floating point registers moves */
184 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
185 {
186 TCGv_i32 ret = tcg_temp_new_i32();
187 if (src & 1) {
188 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
189 } else {
190 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
191 }
192 return ret;
193 }
194
195 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
196 {
197 TCGv_i64 t = tcg_temp_new_i64();
198
199 tcg_gen_extu_i32_i64(t, v);
200 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
201 (dst & 1 ? 0 : 32), 32);
202 gen_update_fprs_dirty(dc, dst);
203 }
204
205 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
206 {
207 return tcg_temp_new_i32();
208 }
209
210 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
211 {
212 src = DFPREG(src);
213 return cpu_fpr[src / 2];
214 }
215
216 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
217 {
218 dst = DFPREG(dst);
219 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
220 gen_update_fprs_dirty(dc, dst);
221 }
222
223 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
224 {
225 return cpu_fpr[DFPREG(dst) / 2];
226 }
227
228 static void gen_op_load_fpr_QT0(unsigned int src)
229 {
230 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
231 offsetof(CPU_QuadU, ll.upper));
232 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
233 offsetof(CPU_QuadU, ll.lower));
234 }
235
236 static void gen_op_load_fpr_QT1(unsigned int src)
237 {
238 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
239 offsetof(CPU_QuadU, ll.upper));
240 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
241 offsetof(CPU_QuadU, ll.lower));
242 }
243
244 static void gen_op_store_QT0_fpr(unsigned int dst)
245 {
246 tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
247 offsetof(CPU_QuadU, ll.upper));
248 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
249 offsetof(CPU_QuadU, ll.lower));
250 }
251
252 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
253 TCGv_i64 v1, TCGv_i64 v2)
254 {
255 dst = QFPREG(dst);
256
257 tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
258 tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
259 gen_update_fprs_dirty(dc, dst);
260 }
261
262 #ifdef TARGET_SPARC64
263 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
264 {
265 src = QFPREG(src);
266 return cpu_fpr[src / 2];
267 }
268
269 static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
270 {
271 src = QFPREG(src);
272 return cpu_fpr[src / 2 + 1];
273 }
274
275 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
276 {
277 rd = QFPREG(rd);
278 rs = QFPREG(rs);
279
280 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
281 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
282 gen_update_fprs_dirty(dc, rd);
283 }
284 #endif
285
286 /* moves */
287 #ifdef CONFIG_USER_ONLY
288 #define supervisor(dc) 0
289 #define hypervisor(dc) 0
290 #else
291 #ifdef TARGET_SPARC64
292 #define hypervisor(dc) (dc->hypervisor)
293 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
294 #else
295 #define supervisor(dc) (dc->supervisor)
296 #define hypervisor(dc) 0
297 #endif
298 #endif
299
300 #if !defined(TARGET_SPARC64)
301 # define AM_CHECK(dc) false
302 #elif defined(TARGET_ABI32)
303 # define AM_CHECK(dc) true
304 #elif defined(CONFIG_USER_ONLY)
305 # define AM_CHECK(dc) false
306 #else
307 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
308 #endif
309
310 static void gen_address_mask(DisasContext *dc, TCGv addr)
311 {
312 if (AM_CHECK(dc)) {
313 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
314 }
315 }
316
317 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
318 {
319 return AM_CHECK(dc) ? (uint32_t)addr : addr;
320 }
321
322 static TCGv gen_load_gpr(DisasContext *dc, int reg)
323 {
324 if (reg > 0) {
325 assert(reg < 32);
326 return cpu_regs[reg];
327 } else {
328 TCGv t = tcg_temp_new();
329 tcg_gen_movi_tl(t, 0);
330 return t;
331 }
332 }
333
334 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
335 {
336 if (reg > 0) {
337 assert(reg < 32);
338 tcg_gen_mov_tl(cpu_regs[reg], v);
339 }
340 }
341
342 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
343 {
344 if (reg > 0) {
345 assert(reg < 32);
346 return cpu_regs[reg];
347 } else {
348 return tcg_temp_new();
349 }
350 }
351
352 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
353 {
354 return translator_use_goto_tb(&s->base, pc) &&
355 translator_use_goto_tb(&s->base, npc);
356 }
357
358 static void gen_goto_tb(DisasContext *s, int tb_num,
359 target_ulong pc, target_ulong npc)
360 {
361 if (use_goto_tb(s, pc, npc)) {
362 /* jump to same page: we can use a direct jump */
363 tcg_gen_goto_tb(tb_num);
364 tcg_gen_movi_tl(cpu_pc, pc);
365 tcg_gen_movi_tl(cpu_npc, npc);
366 tcg_gen_exit_tb(s->base.tb, tb_num);
367 } else {
368 /* jump to another page: we can use an indirect jump */
369 tcg_gen_movi_tl(cpu_pc, pc);
370 tcg_gen_movi_tl(cpu_npc, npc);
371 tcg_gen_lookup_and_goto_ptr();
372 }
373 }
374
375 // XXX suboptimal
376 static void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
377 {
378 tcg_gen_extu_i32_tl(reg, src);
379 tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
380 }
381
382 static void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
383 {
384 tcg_gen_extu_i32_tl(reg, src);
385 tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
386 }
387
388 static void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
389 {
390 tcg_gen_extu_i32_tl(reg, src);
391 tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
392 }
393
394 static void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
395 {
396 tcg_gen_extu_i32_tl(reg, src);
397 tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
398 }
399
400 static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
401 {
402 tcg_gen_mov_tl(cpu_cc_src, src1);
403 tcg_gen_mov_tl(cpu_cc_src2, src2);
404 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
405 tcg_gen_mov_tl(dst, cpu_cc_dst);
406 }
407
408 static TCGv_i32 gen_add32_carry32(void)
409 {
410 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
411
412 /* Carry is computed from a previous add: (dst < src) */
413 #if TARGET_LONG_BITS == 64
414 cc_src1_32 = tcg_temp_new_i32();
415 cc_src2_32 = tcg_temp_new_i32();
416 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
417 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
418 #else
419 cc_src1_32 = cpu_cc_dst;
420 cc_src2_32 = cpu_cc_src;
421 #endif
422
423 carry_32 = tcg_temp_new_i32();
424 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
425
426 return carry_32;
427 }
428
429 static TCGv_i32 gen_sub32_carry32(void)
430 {
431 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
432
433 /* Carry is computed from a previous borrow: (src1 < src2) */
434 #if TARGET_LONG_BITS == 64
435 cc_src1_32 = tcg_temp_new_i32();
436 cc_src2_32 = tcg_temp_new_i32();
437 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
438 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
439 #else
440 cc_src1_32 = cpu_cc_src;
441 cc_src2_32 = cpu_cc_src2;
442 #endif
443
444 carry_32 = tcg_temp_new_i32();
445 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
446
447 return carry_32;
448 }
449
450 static void gen_op_addc_int(TCGv dst, TCGv src1, TCGv src2,
451 TCGv_i32 carry_32, bool update_cc)
452 {
453 tcg_gen_add_tl(dst, src1, src2);
454
455 #ifdef TARGET_SPARC64
456 TCGv carry = tcg_temp_new();
457 tcg_gen_extu_i32_tl(carry, carry_32);
458 tcg_gen_add_tl(dst, dst, carry);
459 #else
460 tcg_gen_add_i32(dst, dst, carry_32);
461 #endif
462
463 if (update_cc) {
464 tcg_debug_assert(dst == cpu_cc_dst);
465 tcg_gen_mov_tl(cpu_cc_src, src1);
466 tcg_gen_mov_tl(cpu_cc_src2, src2);
467 }
468 }
469
470 static void gen_op_addc_int_add(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
471 {
472 TCGv discard;
473
474 if (TARGET_LONG_BITS == 64) {
475 gen_op_addc_int(dst, src1, src2, gen_add32_carry32(), update_cc);
476 return;
477 }
478
479 /*
480 * We can re-use the host's hardware carry generation by using
481 * an ADD2 opcode. We discard the low part of the output.
482 * Ideally we'd combine this operation with the add that
483 * generated the carry in the first place.
484 */
485 discard = tcg_temp_new();
486 tcg_gen_add2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
487
488 if (update_cc) {
489 tcg_debug_assert(dst == cpu_cc_dst);
490 tcg_gen_mov_tl(cpu_cc_src, src1);
491 tcg_gen_mov_tl(cpu_cc_src2, src2);
492 }
493 }
494
495 static void gen_op_addc_add(TCGv dst, TCGv src1, TCGv src2)
496 {
497 gen_op_addc_int_add(dst, src1, src2, false);
498 }
499
500 static void gen_op_addccc_add(TCGv dst, TCGv src1, TCGv src2)
501 {
502 gen_op_addc_int_add(dst, src1, src2, true);
503 }
504
505 static void gen_op_addc_sub(TCGv dst, TCGv src1, TCGv src2)
506 {
507 gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), false);
508 }
509
510 static void gen_op_addccc_sub(TCGv dst, TCGv src1, TCGv src2)
511 {
512 gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), true);
513 }
514
515 static void gen_op_addc_int_generic(TCGv dst, TCGv src1, TCGv src2,
516 bool update_cc)
517 {
518 TCGv_i32 carry_32 = tcg_temp_new_i32();
519 gen_helper_compute_C_icc(carry_32, tcg_env);
520 gen_op_addc_int(dst, src1, src2, carry_32, update_cc);
521 }
522
523 static void gen_op_addc_generic(TCGv dst, TCGv src1, TCGv src2)
524 {
525 gen_op_addc_int_generic(dst, src1, src2, false);
526 }
527
528 static void gen_op_addccc_generic(TCGv dst, TCGv src1, TCGv src2)
529 {
530 gen_op_addc_int_generic(dst, src1, src2, true);
531 }
532
533 static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
534 {
535 tcg_gen_mov_tl(cpu_cc_src, src1);
536 tcg_gen_mov_tl(cpu_cc_src2, src2);
537 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
538 tcg_gen_mov_tl(dst, cpu_cc_dst);
539 }
540
541 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
542 TCGv src2, int update_cc)
543 {
544 TCGv_i32 carry_32;
545 TCGv carry;
546
547 switch (dc->cc_op) {
548 case CC_OP_DIV:
549 case CC_OP_LOGIC:
550 /* Carry is known to be zero. Fall back to plain SUB. */
551 if (update_cc) {
552 gen_op_sub_cc(dst, src1, src2);
553 } else {
554 tcg_gen_sub_tl(dst, src1, src2);
555 }
556 return;
557
558 case CC_OP_ADD:
559 case CC_OP_TADD:
560 case CC_OP_TADDTV:
561 carry_32 = gen_add32_carry32();
562 break;
563
564 case CC_OP_SUB:
565 case CC_OP_TSUB:
566 case CC_OP_TSUBTV:
567 if (TARGET_LONG_BITS == 32) {
568 /* We can re-use the host's hardware carry generation by using
569 a SUB2 opcode. We discard the low part of the output.
570 Ideally we'd combine this operation with the add that
571 generated the carry in the first place. */
572 carry = tcg_temp_new();
573 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
574 goto sub_done;
575 }
576 carry_32 = gen_sub32_carry32();
577 break;
578
579 default:
580 /* We need external help to produce the carry. */
581 carry_32 = tcg_temp_new_i32();
582 gen_helper_compute_C_icc(carry_32, tcg_env);
583 break;
584 }
585
586 #if TARGET_LONG_BITS == 64
587 carry = tcg_temp_new();
588 tcg_gen_extu_i32_i64(carry, carry_32);
589 #else
590 carry = carry_32;
591 #endif
592
593 tcg_gen_sub_tl(dst, src1, src2);
594 tcg_gen_sub_tl(dst, dst, carry);
595
596 sub_done:
597 if (update_cc) {
598 tcg_gen_mov_tl(cpu_cc_src, src1);
599 tcg_gen_mov_tl(cpu_cc_src2, src2);
600 tcg_gen_mov_tl(cpu_cc_dst, dst);
601 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
602 dc->cc_op = CC_OP_SUBX;
603 }
604 }
605
606 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
607 {
608 TCGv r_temp, zero, t0;
609
610 r_temp = tcg_temp_new();
611 t0 = tcg_temp_new();
612
613 /* old op:
614 if (!(env->y & 1))
615 T1 = 0;
616 */
617 zero = tcg_constant_tl(0);
618 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
619 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
620 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
621 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
622 zero, cpu_cc_src2);
623
624 // b2 = T0 & 1;
625 // env->y = (b2 << 31) | (env->y >> 1);
626 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
627 tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
628
629 // b1 = N ^ V;
630 gen_mov_reg_N(t0, cpu_psr);
631 gen_mov_reg_V(r_temp, cpu_psr);
632 tcg_gen_xor_tl(t0, t0, r_temp);
633
634 // T0 = (b1 << 31) | (T0 >> 1);
635 // src1 = T0;
636 tcg_gen_shli_tl(t0, t0, 31);
637 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
638 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
639
640 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
641
642 tcg_gen_mov_tl(dst, cpu_cc_dst);
643 }
644
645 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
646 {
647 #if TARGET_LONG_BITS == 32
648 if (sign_ext) {
649 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
650 } else {
651 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
652 }
653 #else
654 TCGv t0 = tcg_temp_new_i64();
655 TCGv t1 = tcg_temp_new_i64();
656
657 if (sign_ext) {
658 tcg_gen_ext32s_i64(t0, src1);
659 tcg_gen_ext32s_i64(t1, src2);
660 } else {
661 tcg_gen_ext32u_i64(t0, src1);
662 tcg_gen_ext32u_i64(t1, src2);
663 }
664
665 tcg_gen_mul_i64(dst, t0, t1);
666 tcg_gen_shri_i64(cpu_y, dst, 32);
667 #endif
668 }
669
670 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
671 {
672 /* zero-extend truncated operands before multiplication */
673 gen_op_multiply(dst, src1, src2, 0);
674 }
675
676 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
677 {
678 /* sign-extend truncated operands before multiplication */
679 gen_op_multiply(dst, src1, src2, 1);
680 }
681
682 // 1
683 static void gen_op_eval_ba(TCGv dst)
684 {
685 tcg_gen_movi_tl(dst, 1);
686 }
687
688 // Z
689 static void gen_op_eval_be(TCGv dst, TCGv_i32 src)
690 {
691 gen_mov_reg_Z(dst, src);
692 }
693
694 // Z | (N ^ V)
695 static void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
696 {
697 TCGv t0 = tcg_temp_new();
698 gen_mov_reg_N(t0, src);
699 gen_mov_reg_V(dst, src);
700 tcg_gen_xor_tl(dst, dst, t0);
701 gen_mov_reg_Z(t0, src);
702 tcg_gen_or_tl(dst, dst, t0);
703 }
704
705 // N ^ V
706 static void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
707 {
708 TCGv t0 = tcg_temp_new();
709 gen_mov_reg_V(t0, src);
710 gen_mov_reg_N(dst, src);
711 tcg_gen_xor_tl(dst, dst, t0);
712 }
713
714 // C | Z
715 static void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
716 {
717 TCGv t0 = tcg_temp_new();
718 gen_mov_reg_Z(t0, src);
719 gen_mov_reg_C(dst, src);
720 tcg_gen_or_tl(dst, dst, t0);
721 }
722
723 // C
724 static void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
725 {
726 gen_mov_reg_C(dst, src);
727 }
728
729 // V
730 static void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
731 {
732 gen_mov_reg_V(dst, src);
733 }
734
735 // 0
736 static void gen_op_eval_bn(TCGv dst)
737 {
738 tcg_gen_movi_tl(dst, 0);
739 }
740
741 // N
742 static void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
743 {
744 gen_mov_reg_N(dst, src);
745 }
746
747 // !Z
748 static void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
749 {
750 gen_mov_reg_Z(dst, src);
751 tcg_gen_xori_tl(dst, dst, 0x1);
752 }
753
754 // !(Z | (N ^ V))
755 static void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
756 {
757 gen_op_eval_ble(dst, src);
758 tcg_gen_xori_tl(dst, dst, 0x1);
759 }
760
761 // !(N ^ V)
762 static void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
763 {
764 gen_op_eval_bl(dst, src);
765 tcg_gen_xori_tl(dst, dst, 0x1);
766 }
767
768 // !(C | Z)
769 static void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
770 {
771 gen_op_eval_bleu(dst, src);
772 tcg_gen_xori_tl(dst, dst, 0x1);
773 }
774
775 // !C
776 static void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
777 {
778 gen_mov_reg_C(dst, src);
779 tcg_gen_xori_tl(dst, dst, 0x1);
780 }
781
782 // !N
783 static void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
784 {
785 gen_mov_reg_N(dst, src);
786 tcg_gen_xori_tl(dst, dst, 0x1);
787 }
788
789 // !V
790 static void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
791 {
792 gen_mov_reg_V(dst, src);
793 tcg_gen_xori_tl(dst, dst, 0x1);
794 }
795
796 /*
797 FPSR bit field FCC1 | FCC0:
798 0 =
799 1 <
800 2 >
801 3 unordered
802 */
803 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
804 unsigned int fcc_offset)
805 {
806 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
807 tcg_gen_andi_tl(reg, reg, 0x1);
808 }
809
810 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
811 {
812 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
813 tcg_gen_andi_tl(reg, reg, 0x1);
814 }
815
816 // !0: FCC0 | FCC1
817 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
818 {
819 TCGv t0 = tcg_temp_new();
820 gen_mov_reg_FCC0(dst, src, fcc_offset);
821 gen_mov_reg_FCC1(t0, src, fcc_offset);
822 tcg_gen_or_tl(dst, dst, t0);
823 }
824
825 // 1 or 2: FCC0 ^ FCC1
826 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
827 {
828 TCGv t0 = tcg_temp_new();
829 gen_mov_reg_FCC0(dst, src, fcc_offset);
830 gen_mov_reg_FCC1(t0, src, fcc_offset);
831 tcg_gen_xor_tl(dst, dst, t0);
832 }
833
834 // 1 or 3: FCC0
835 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
836 {
837 gen_mov_reg_FCC0(dst, src, fcc_offset);
838 }
839
840 // 1: FCC0 & !FCC1
841 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
842 {
843 TCGv t0 = tcg_temp_new();
844 gen_mov_reg_FCC0(dst, src, fcc_offset);
845 gen_mov_reg_FCC1(t0, src, fcc_offset);
846 tcg_gen_andc_tl(dst, dst, t0);
847 }
848
849 // 2 or 3: FCC1
850 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
851 {
852 gen_mov_reg_FCC1(dst, src, fcc_offset);
853 }
854
855 // 2: !FCC0 & FCC1
856 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
857 {
858 TCGv t0 = tcg_temp_new();
859 gen_mov_reg_FCC0(dst, src, fcc_offset);
860 gen_mov_reg_FCC1(t0, src, fcc_offset);
861 tcg_gen_andc_tl(dst, t0, dst);
862 }
863
864 // 3: FCC0 & FCC1
865 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
866 {
867 TCGv t0 = tcg_temp_new();
868 gen_mov_reg_FCC0(dst, src, fcc_offset);
869 gen_mov_reg_FCC1(t0, src, fcc_offset);
870 tcg_gen_and_tl(dst, dst, t0);
871 }
872
873 // 0: !(FCC0 | FCC1)
874 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
875 {
876 TCGv t0 = tcg_temp_new();
877 gen_mov_reg_FCC0(dst, src, fcc_offset);
878 gen_mov_reg_FCC1(t0, src, fcc_offset);
879 tcg_gen_or_tl(dst, dst, t0);
880 tcg_gen_xori_tl(dst, dst, 0x1);
881 }
882
883 // 0 or 3: !(FCC0 ^ FCC1)
884 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
885 {
886 TCGv t0 = tcg_temp_new();
887 gen_mov_reg_FCC0(dst, src, fcc_offset);
888 gen_mov_reg_FCC1(t0, src, fcc_offset);
889 tcg_gen_xor_tl(dst, dst, t0);
890 tcg_gen_xori_tl(dst, dst, 0x1);
891 }
892
893 // 0 or 2: !FCC0
894 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
895 {
896 gen_mov_reg_FCC0(dst, src, fcc_offset);
897 tcg_gen_xori_tl(dst, dst, 0x1);
898 }
899
900 // !1: !(FCC0 & !FCC1)
901 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
902 {
903 TCGv t0 = tcg_temp_new();
904 gen_mov_reg_FCC0(dst, src, fcc_offset);
905 gen_mov_reg_FCC1(t0, src, fcc_offset);
906 tcg_gen_andc_tl(dst, dst, t0);
907 tcg_gen_xori_tl(dst, dst, 0x1);
908 }
909
910 // 0 or 1: !FCC1
911 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
912 {
913 gen_mov_reg_FCC1(dst, src, fcc_offset);
914 tcg_gen_xori_tl(dst, dst, 0x1);
915 }
916
917 // !2: !(!FCC0 & FCC1)
918 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
919 {
920 TCGv t0 = tcg_temp_new();
921 gen_mov_reg_FCC0(dst, src, fcc_offset);
922 gen_mov_reg_FCC1(t0, src, fcc_offset);
923 tcg_gen_andc_tl(dst, t0, dst);
924 tcg_gen_xori_tl(dst, dst, 0x1);
925 }
926
927 // !3: !(FCC0 & FCC1)
928 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
929 {
930 TCGv t0 = tcg_temp_new();
931 gen_mov_reg_FCC0(dst, src, fcc_offset);
932 gen_mov_reg_FCC1(t0, src, fcc_offset);
933 tcg_gen_and_tl(dst, dst, t0);
934 tcg_gen_xori_tl(dst, dst, 0x1);
935 }
936
937 static void gen_branch2(DisasContext *dc, target_ulong pc1,
938 target_ulong pc2, TCGv r_cond)
939 {
940 TCGLabel *l1 = gen_new_label();
941
942 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
943
944 gen_goto_tb(dc, 0, pc1, pc1 + 4);
945
946 gen_set_label(l1);
947 gen_goto_tb(dc, 1, pc2, pc2 + 4);
948 }
949
950 static void gen_generic_branch(DisasContext *dc)
951 {
952 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
953 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
954 TCGv zero = tcg_constant_tl(0);
955
956 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
957 }
958
959 /* call this function before using the condition register as it may
960 have been set for a jump */
961 static void flush_cond(DisasContext *dc)
962 {
963 if (dc->npc == JUMP_PC) {
964 gen_generic_branch(dc);
965 dc->npc = DYNAMIC_PC_LOOKUP;
966 }
967 }
968
969 static void save_npc(DisasContext *dc)
970 {
971 if (dc->npc & 3) {
972 switch (dc->npc) {
973 case JUMP_PC:
974 gen_generic_branch(dc);
975 dc->npc = DYNAMIC_PC_LOOKUP;
976 break;
977 case DYNAMIC_PC:
978 case DYNAMIC_PC_LOOKUP:
979 break;
980 default:
981 g_assert_not_reached();
982 }
983 } else {
984 tcg_gen_movi_tl(cpu_npc, dc->npc);
985 }
986 }
987
988 static void update_psr(DisasContext *dc)
989 {
990 if (dc->cc_op != CC_OP_FLAGS) {
991 dc->cc_op = CC_OP_FLAGS;
992 gen_helper_compute_psr(tcg_env);
993 }
994 }
995
996 static void save_state(DisasContext *dc)
997 {
998 tcg_gen_movi_tl(cpu_pc, dc->pc);
999 save_npc(dc);
1000 }
1001
1002 static void gen_exception(DisasContext *dc, int which)
1003 {
1004 save_state(dc);
1005 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1006 dc->base.is_jmp = DISAS_NORETURN;
1007 }
1008
1009 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1010 {
1011 DisasDelayException *e = g_new0(DisasDelayException, 1);
1012
1013 e->next = dc->delay_excp_list;
1014 dc->delay_excp_list = e;
1015
1016 e->lab = gen_new_label();
1017 e->excp = excp;
1018 e->pc = dc->pc;
1019 /* Caller must have used flush_cond before branch. */
1020 assert(e->npc != JUMP_PC);
1021 e->npc = dc->npc;
1022
1023 return e->lab;
1024 }
1025
1026 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1027 {
1028 return delay_exceptionv(dc, tcg_constant_i32(excp));
1029 }
1030
1031 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1032 {
1033 TCGv t = tcg_temp_new();
1034 TCGLabel *lab;
1035
1036 tcg_gen_andi_tl(t, addr, mask);
1037
1038 flush_cond(dc);
1039 lab = delay_exception(dc, TT_UNALIGNED);
1040 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1041 }
1042
1043 static void gen_mov_pc_npc(DisasContext *dc)
1044 {
1045 if (dc->npc & 3) {
1046 switch (dc->npc) {
1047 case JUMP_PC:
1048 gen_generic_branch(dc);
1049 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1050 dc->pc = DYNAMIC_PC_LOOKUP;
1051 break;
1052 case DYNAMIC_PC:
1053 case DYNAMIC_PC_LOOKUP:
1054 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1055 dc->pc = dc->npc;
1056 break;
1057 default:
1058 g_assert_not_reached();
1059 }
1060 } else {
1061 dc->pc = dc->npc;
1062 }
1063 }
1064
1065 static void gen_op_next_insn(void)
1066 {
1067 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1068 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1069 }
1070
1071 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1072 DisasContext *dc)
1073 {
1074 static int subcc_cond[16] = {
1075 TCG_COND_NEVER,
1076 TCG_COND_EQ,
1077 TCG_COND_LE,
1078 TCG_COND_LT,
1079 TCG_COND_LEU,
1080 TCG_COND_LTU,
1081 -1, /* neg */
1082 -1, /* overflow */
1083 TCG_COND_ALWAYS,
1084 TCG_COND_NE,
1085 TCG_COND_GT,
1086 TCG_COND_GE,
1087 TCG_COND_GTU,
1088 TCG_COND_GEU,
1089 -1, /* pos */
1090 -1, /* no overflow */
1091 };
1092
1093 static int logic_cond[16] = {
1094 TCG_COND_NEVER,
1095 TCG_COND_EQ, /* eq: Z */
1096 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1097 TCG_COND_LT, /* lt: N ^ V -> N */
1098 TCG_COND_EQ, /* leu: C | Z -> Z */
1099 TCG_COND_NEVER, /* ltu: C -> 0 */
1100 TCG_COND_LT, /* neg: N */
1101 TCG_COND_NEVER, /* vs: V -> 0 */
1102 TCG_COND_ALWAYS,
1103 TCG_COND_NE, /* ne: !Z */
1104 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1105 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1106 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1107 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1108 TCG_COND_GE, /* pos: !N */
1109 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1110 };
1111
1112 TCGv_i32 r_src;
1113 TCGv r_dst;
1114
1115 #ifdef TARGET_SPARC64
1116 if (xcc) {
1117 r_src = cpu_xcc;
1118 } else {
1119 r_src = cpu_psr;
1120 }
1121 #else
1122 r_src = cpu_psr;
1123 #endif
1124
1125 switch (dc->cc_op) {
1126 case CC_OP_LOGIC:
1127 cmp->cond = logic_cond[cond];
1128 do_compare_dst_0:
1129 cmp->is_bool = false;
1130 cmp->c2 = tcg_constant_tl(0);
1131 #ifdef TARGET_SPARC64
1132 if (!xcc) {
1133 cmp->c1 = tcg_temp_new();
1134 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1135 break;
1136 }
1137 #endif
1138 cmp->c1 = cpu_cc_dst;
1139 break;
1140
1141 case CC_OP_SUB:
1142 switch (cond) {
1143 case 6: /* neg */
1144 case 14: /* pos */
1145 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1146 goto do_compare_dst_0;
1147
1148 case 7: /* overflow */
1149 case 15: /* !overflow */
1150 goto do_dynamic;
1151
1152 default:
1153 cmp->cond = subcc_cond[cond];
1154 cmp->is_bool = false;
1155 #ifdef TARGET_SPARC64
1156 if (!xcc) {
1157 /* Note that sign-extension works for unsigned compares as
1158 long as both operands are sign-extended. */
1159 cmp->c1 = tcg_temp_new();
1160 cmp->c2 = tcg_temp_new();
1161 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1162 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1163 break;
1164 }
1165 #endif
1166 cmp->c1 = cpu_cc_src;
1167 cmp->c2 = cpu_cc_src2;
1168 break;
1169 }
1170 break;
1171
1172 default:
1173 do_dynamic:
1174 gen_helper_compute_psr(tcg_env);
1175 dc->cc_op = CC_OP_FLAGS;
1176 /* FALLTHRU */
1177
1178 case CC_OP_FLAGS:
1179 /* We're going to generate a boolean result. */
1180 cmp->cond = TCG_COND_NE;
1181 cmp->is_bool = true;
1182 cmp->c1 = r_dst = tcg_temp_new();
1183 cmp->c2 = tcg_constant_tl(0);
1184
1185 switch (cond) {
1186 case 0x0:
1187 gen_op_eval_bn(r_dst);
1188 break;
1189 case 0x1:
1190 gen_op_eval_be(r_dst, r_src);
1191 break;
1192 case 0x2:
1193 gen_op_eval_ble(r_dst, r_src);
1194 break;
1195 case 0x3:
1196 gen_op_eval_bl(r_dst, r_src);
1197 break;
1198 case 0x4:
1199 gen_op_eval_bleu(r_dst, r_src);
1200 break;
1201 case 0x5:
1202 gen_op_eval_bcs(r_dst, r_src);
1203 break;
1204 case 0x6:
1205 gen_op_eval_bneg(r_dst, r_src);
1206 break;
1207 case 0x7:
1208 gen_op_eval_bvs(r_dst, r_src);
1209 break;
1210 case 0x8:
1211 gen_op_eval_ba(r_dst);
1212 break;
1213 case 0x9:
1214 gen_op_eval_bne(r_dst, r_src);
1215 break;
1216 case 0xa:
1217 gen_op_eval_bg(r_dst, r_src);
1218 break;
1219 case 0xb:
1220 gen_op_eval_bge(r_dst, r_src);
1221 break;
1222 case 0xc:
1223 gen_op_eval_bgu(r_dst, r_src);
1224 break;
1225 case 0xd:
1226 gen_op_eval_bcc(r_dst, r_src);
1227 break;
1228 case 0xe:
1229 gen_op_eval_bpos(r_dst, r_src);
1230 break;
1231 case 0xf:
1232 gen_op_eval_bvc(r_dst, r_src);
1233 break;
1234 }
1235 break;
1236 }
1237 }
1238
1239 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1240 {
1241 unsigned int offset;
1242 TCGv r_dst;
1243
1244 /* For now we still generate a straight boolean result. */
1245 cmp->cond = TCG_COND_NE;
1246 cmp->is_bool = true;
1247 cmp->c1 = r_dst = tcg_temp_new();
1248 cmp->c2 = tcg_constant_tl(0);
1249
1250 switch (cc) {
1251 default:
1252 case 0x0:
1253 offset = 0;
1254 break;
1255 case 0x1:
1256 offset = 32 - 10;
1257 break;
1258 case 0x2:
1259 offset = 34 - 10;
1260 break;
1261 case 0x3:
1262 offset = 36 - 10;
1263 break;
1264 }
1265
1266 switch (cond) {
1267 case 0x0:
1268 gen_op_eval_bn(r_dst);
1269 break;
1270 case 0x1:
1271 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1272 break;
1273 case 0x2:
1274 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1275 break;
1276 case 0x3:
1277 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1278 break;
1279 case 0x4:
1280 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1281 break;
1282 case 0x5:
1283 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1284 break;
1285 case 0x6:
1286 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1287 break;
1288 case 0x7:
1289 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1290 break;
1291 case 0x8:
1292 gen_op_eval_ba(r_dst);
1293 break;
1294 case 0x9:
1295 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1296 break;
1297 case 0xa:
1298 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1299 break;
1300 case 0xb:
1301 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1302 break;
1303 case 0xc:
1304 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1305 break;
1306 case 0xd:
1307 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1308 break;
1309 case 0xe:
1310 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1311 break;
1312 case 0xf:
1313 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1314 break;
1315 }
1316 }
1317
1318 // Inverted logic
1319 static const TCGCond gen_tcg_cond_reg[8] = {
1320 TCG_COND_NEVER, /* reserved */
1321 TCG_COND_NE,
1322 TCG_COND_GT,
1323 TCG_COND_GE,
1324 TCG_COND_NEVER, /* reserved */
1325 TCG_COND_EQ,
1326 TCG_COND_LE,
1327 TCG_COND_LT,
1328 };
1329
1330 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1331 {
1332 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1333 cmp->is_bool = false;
1334 cmp->c1 = r_src;
1335 cmp->c2 = tcg_constant_tl(0);
1336 }
1337
1338 #ifdef TARGET_SPARC64
1339 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1340 {
1341 switch (fccno) {
1342 case 0:
1343 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1344 break;
1345 case 1:
1346 gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1347 break;
1348 case 2:
1349 gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1350 break;
1351 case 3:
1352 gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1353 break;
1354 }
1355 }
1356
1357 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1358 {
1359 switch (fccno) {
1360 case 0:
1361 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1362 break;
1363 case 1:
1364 gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1365 break;
1366 case 2:
1367 gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1368 break;
1369 case 3:
1370 gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1371 break;
1372 }
1373 }
1374
1375 static void gen_op_fcmpq(int fccno)
1376 {
1377 switch (fccno) {
1378 case 0:
1379 gen_helper_fcmpq(cpu_fsr, tcg_env);
1380 break;
1381 case 1:
1382 gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
1383 break;
1384 case 2:
1385 gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
1386 break;
1387 case 3:
1388 gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
1389 break;
1390 }
1391 }
1392
1393 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1394 {
1395 switch (fccno) {
1396 case 0:
1397 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1398 break;
1399 case 1:
1400 gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1401 break;
1402 case 2:
1403 gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1404 break;
1405 case 3:
1406 gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1407 break;
1408 }
1409 }
1410
1411 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1412 {
1413 switch (fccno) {
1414 case 0:
1415 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1416 break;
1417 case 1:
1418 gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1419 break;
1420 case 2:
1421 gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1422 break;
1423 case 3:
1424 gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1425 break;
1426 }
1427 }
1428
1429 static void gen_op_fcmpeq(int fccno)
1430 {
1431 switch (fccno) {
1432 case 0:
1433 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1434 break;
1435 case 1:
1436 gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
1437 break;
1438 case 2:
1439 gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
1440 break;
1441 case 3:
1442 gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
1443 break;
1444 }
1445 }
1446
1447 #else
1448
1449 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1450 {
1451 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1452 }
1453
1454 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1455 {
1456 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1457 }
1458
1459 static void gen_op_fcmpq(int fccno)
1460 {
1461 gen_helper_fcmpq(cpu_fsr, tcg_env);
1462 }
1463
1464 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1465 {
1466 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1467 }
1468
1469 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1470 {
1471 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1472 }
1473
1474 static void gen_op_fcmpeq(int fccno)
1475 {
1476 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1477 }
1478 #endif
1479
1480 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1481 {
1482 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1483 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1484 gen_exception(dc, TT_FP_EXCP);
1485 }
1486
1487 static int gen_trap_ifnofpu(DisasContext *dc)
1488 {
1489 #if !defined(CONFIG_USER_ONLY)
1490 if (!dc->fpu_enabled) {
1491 gen_exception(dc, TT_NFPU_INSN);
1492 return 1;
1493 }
1494 #endif
1495 return 0;
1496 }
1497
1498 static void gen_op_clear_ieee_excp_and_FTT(void)
1499 {
1500 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1501 }
1502
1503 static void gen_fop_FF(DisasContext *dc, int rd, int rs,
1504 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1505 {
1506 TCGv_i32 dst, src;
1507
1508 src = gen_load_fpr_F(dc, rs);
1509 dst = gen_dest_fpr_F(dc);
1510
1511 gen(dst, tcg_env, src);
1512 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1513
1514 gen_store_fpr_F(dc, rd, dst);
1515 }
1516
1517 static void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1518 void (*gen)(TCGv_i32, TCGv_i32))
1519 {
1520 TCGv_i32 dst, src;
1521
1522 src = gen_load_fpr_F(dc, rs);
1523 dst = gen_dest_fpr_F(dc);
1524
1525 gen(dst, src);
1526
1527 gen_store_fpr_F(dc, rd, dst);
1528 }
1529
1530 static void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1531 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1532 {
1533 TCGv_i32 dst, src1, src2;
1534
1535 src1 = gen_load_fpr_F(dc, rs1);
1536 src2 = gen_load_fpr_F(dc, rs2);
1537 dst = gen_dest_fpr_F(dc);
1538
1539 gen(dst, tcg_env, src1, src2);
1540 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1541
1542 gen_store_fpr_F(dc, rd, dst);
1543 }
1544
1545 #ifdef TARGET_SPARC64
1546 static void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1547 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1548 {
1549 TCGv_i32 dst, src1, src2;
1550
1551 src1 = gen_load_fpr_F(dc, rs1);
1552 src2 = gen_load_fpr_F(dc, rs2);
1553 dst = gen_dest_fpr_F(dc);
1554
1555 gen(dst, src1, src2);
1556
1557 gen_store_fpr_F(dc, rd, dst);
1558 }
1559 #endif
1560
1561 static void gen_fop_DD(DisasContext *dc, int rd, int rs,
1562 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1563 {
1564 TCGv_i64 dst, src;
1565
1566 src = gen_load_fpr_D(dc, rs);
1567 dst = gen_dest_fpr_D(dc, rd);
1568
1569 gen(dst, tcg_env, src);
1570 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1571
1572 gen_store_fpr_D(dc, rd, dst);
1573 }
1574
1575 #ifdef TARGET_SPARC64
1576 static void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1577 void (*gen)(TCGv_i64, TCGv_i64))
1578 {
1579 TCGv_i64 dst, src;
1580
1581 src = gen_load_fpr_D(dc, rs);
1582 dst = gen_dest_fpr_D(dc, rd);
1583
1584 gen(dst, src);
1585
1586 gen_store_fpr_D(dc, rd, dst);
1587 }
1588 #endif
1589
1590 static void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1591 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1592 {
1593 TCGv_i64 dst, src1, src2;
1594
1595 src1 = gen_load_fpr_D(dc, rs1);
1596 src2 = gen_load_fpr_D(dc, rs2);
1597 dst = gen_dest_fpr_D(dc, rd);
1598
1599 gen(dst, tcg_env, src1, src2);
1600 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1601
1602 gen_store_fpr_D(dc, rd, dst);
1603 }
1604
1605 #ifdef TARGET_SPARC64
1606 static void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1607 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1608 {
1609 TCGv_i64 dst, src1, src2;
1610
1611 src1 = gen_load_fpr_D(dc, rs1);
1612 src2 = gen_load_fpr_D(dc, rs2);
1613 dst = gen_dest_fpr_D(dc, rd);
1614
1615 gen(dst, src1, src2);
1616
1617 gen_store_fpr_D(dc, rd, dst);
1618 }
1619
1620 static void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1621 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1622 {
1623 TCGv_i64 dst, src1, src2;
1624
1625 src1 = gen_load_fpr_D(dc, rs1);
1626 src2 = gen_load_fpr_D(dc, rs2);
1627 dst = gen_dest_fpr_D(dc, rd);
1628
1629 gen(dst, cpu_gsr, src1, src2);
1630
1631 gen_store_fpr_D(dc, rd, dst);
1632 }
1633
1634 static void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1635 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1636 {
1637 TCGv_i64 dst, src0, src1, src2;
1638
1639 src1 = gen_load_fpr_D(dc, rs1);
1640 src2 = gen_load_fpr_D(dc, rs2);
1641 src0 = gen_load_fpr_D(dc, rd);
1642 dst = gen_dest_fpr_D(dc, rd);
1643
1644 gen(dst, src0, src1, src2);
1645
1646 gen_store_fpr_D(dc, rd, dst);
1647 }
1648 #endif
1649
1650 static void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1651 void (*gen)(TCGv_ptr))
1652 {
1653 gen_op_load_fpr_QT1(QFPREG(rs));
1654
1655 gen(tcg_env);
1656 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1657
1658 gen_op_store_QT0_fpr(QFPREG(rd));
1659 gen_update_fprs_dirty(dc, QFPREG(rd));
1660 }
1661
1662 #ifdef TARGET_SPARC64
1663 static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1664 void (*gen)(TCGv_ptr))
1665 {
1666 gen_op_load_fpr_QT1(QFPREG(rs));
1667
1668 gen(tcg_env);
1669
1670 gen_op_store_QT0_fpr(QFPREG(rd));
1671 gen_update_fprs_dirty(dc, QFPREG(rd));
1672 }
1673 #endif
1674
1675 static void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1676 void (*gen)(TCGv_ptr))
1677 {
1678 gen_op_load_fpr_QT0(QFPREG(rs1));
1679 gen_op_load_fpr_QT1(QFPREG(rs2));
1680
1681 gen(tcg_env);
1682 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1683
1684 gen_op_store_QT0_fpr(QFPREG(rd));
1685 gen_update_fprs_dirty(dc, QFPREG(rd));
1686 }
1687
1688 static void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1689 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1690 {
1691 TCGv_i64 dst;
1692 TCGv_i32 src1, src2;
1693
1694 src1 = gen_load_fpr_F(dc, rs1);
1695 src2 = gen_load_fpr_F(dc, rs2);
1696 dst = gen_dest_fpr_D(dc, rd);
1697
1698 gen(dst, tcg_env, src1, src2);
1699 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1700
1701 gen_store_fpr_D(dc, rd, dst);
1702 }
1703
1704 static void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1705 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1706 {
1707 TCGv_i64 src1, src2;
1708
1709 src1 = gen_load_fpr_D(dc, rs1);
1710 src2 = gen_load_fpr_D(dc, rs2);
1711
1712 gen(tcg_env, src1, src2);
1713 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1714
1715 gen_op_store_QT0_fpr(QFPREG(rd));
1716 gen_update_fprs_dirty(dc, QFPREG(rd));
1717 }
1718
1719 #ifdef TARGET_SPARC64
1720 static void gen_fop_DF(DisasContext *dc, int rd, int rs,
1721 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1722 {
1723 TCGv_i64 dst;
1724 TCGv_i32 src;
1725
1726 src = gen_load_fpr_F(dc, rs);
1727 dst = gen_dest_fpr_D(dc, rd);
1728
1729 gen(dst, tcg_env, src);
1730 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1731
1732 gen_store_fpr_D(dc, rd, dst);
1733 }
1734 #endif
1735
1736 static void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1737 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1738 {
1739 TCGv_i64 dst;
1740 TCGv_i32 src;
1741
1742 src = gen_load_fpr_F(dc, rs);
1743 dst = gen_dest_fpr_D(dc, rd);
1744
1745 gen(dst, tcg_env, src);
1746
1747 gen_store_fpr_D(dc, rd, dst);
1748 }
1749
1750 static void gen_fop_FD(DisasContext *dc, int rd, int rs,
1751 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1752 {
1753 TCGv_i32 dst;
1754 TCGv_i64 src;
1755
1756 src = gen_load_fpr_D(dc, rs);
1757 dst = gen_dest_fpr_F(dc);
1758
1759 gen(dst, tcg_env, src);
1760 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1761
1762 gen_store_fpr_F(dc, rd, dst);
1763 }
1764
1765 static void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1766 void (*gen)(TCGv_i32, TCGv_ptr))
1767 {
1768 TCGv_i32 dst;
1769
1770 gen_op_load_fpr_QT1(QFPREG(rs));
1771 dst = gen_dest_fpr_F(dc);
1772
1773 gen(dst, tcg_env);
1774 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1775
1776 gen_store_fpr_F(dc, rd, dst);
1777 }
1778
1779 static void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1780 void (*gen)(TCGv_i64, TCGv_ptr))
1781 {
1782 TCGv_i64 dst;
1783
1784 gen_op_load_fpr_QT1(QFPREG(rs));
1785 dst = gen_dest_fpr_D(dc, rd);
1786
1787 gen(dst, tcg_env);
1788 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1789
1790 gen_store_fpr_D(dc, rd, dst);
1791 }
1792
1793 static void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1794 void (*gen)(TCGv_ptr, TCGv_i32))
1795 {
1796 TCGv_i32 src;
1797
1798 src = gen_load_fpr_F(dc, rs);
1799
1800 gen(tcg_env, src);
1801
1802 gen_op_store_QT0_fpr(QFPREG(rd));
1803 gen_update_fprs_dirty(dc, QFPREG(rd));
1804 }
1805
1806 static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1807 void (*gen)(TCGv_ptr, TCGv_i64))
1808 {
1809 TCGv_i64 src;
1810
1811 src = gen_load_fpr_D(dc, rs);
1812
1813 gen(tcg_env, src);
1814
1815 gen_op_store_QT0_fpr(QFPREG(rd));
1816 gen_update_fprs_dirty(dc, QFPREG(rd));
1817 }
1818
1819 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
1820 TCGv addr, int mmu_idx, MemOp memop)
1821 {
1822 gen_address_mask(dc, addr);
1823 tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN);
1824 }
1825
1826 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
1827 {
1828 TCGv m1 = tcg_constant_tl(0xff);
1829 gen_address_mask(dc, addr);
1830 tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
1831 }
1832
1833 /* asi moves */
1834 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
1835 typedef enum {
1836 GET_ASI_HELPER,
1837 GET_ASI_EXCP,
1838 GET_ASI_DIRECT,
1839 GET_ASI_DTWINX,
1840 GET_ASI_BLOCK,
1841 GET_ASI_SHORT,
1842 GET_ASI_BCOPY,
1843 GET_ASI_BFILL,
1844 } ASIType;
1845
1846 typedef struct {
1847 ASIType type;
1848 int asi;
1849 int mem_idx;
1850 MemOp memop;
1851 } DisasASI;
1852
1853 static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop)
1854 {
1855 int asi = GET_FIELD(insn, 19, 26);
1856 ASIType type = GET_ASI_HELPER;
1857 int mem_idx = dc->mem_idx;
1858
1859 #ifndef TARGET_SPARC64
1860 /* Before v9, all asis are immediate and privileged. */
1861 if (IS_IMM) {
1862 gen_exception(dc, TT_ILL_INSN);
1863 type = GET_ASI_EXCP;
1864 } else if (supervisor(dc)
1865 /* Note that LEON accepts ASI_USERDATA in user mode, for
1866 use with CASA. Also note that previous versions of
1867 QEMU allowed (and old versions of gcc emitted) ASI_P
1868 for LEON, which is incorrect. */
1869 || (asi == ASI_USERDATA
1870 && (dc->def->features & CPU_FEATURE_CASA))) {
1871 switch (asi) {
1872 case ASI_USERDATA: /* User data access */
1873 mem_idx = MMU_USER_IDX;
1874 type = GET_ASI_DIRECT;
1875 break;
1876 case ASI_KERNELDATA: /* Supervisor data access */
1877 mem_idx = MMU_KERNEL_IDX;
1878 type = GET_ASI_DIRECT;
1879 break;
1880 case ASI_M_BYPASS: /* MMU passthrough */
1881 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1882 mem_idx = MMU_PHYS_IDX;
1883 type = GET_ASI_DIRECT;
1884 break;
1885 case ASI_M_BCOPY: /* Block copy, sta access */
1886 mem_idx = MMU_KERNEL_IDX;
1887 type = GET_ASI_BCOPY;
1888 break;
1889 case ASI_M_BFILL: /* Block fill, stda access */
1890 mem_idx = MMU_KERNEL_IDX;
1891 type = GET_ASI_BFILL;
1892 break;
1893 }
1894
1895 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1896 * permissions check in get_physical_address(..).
1897 */
1898 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1899 } else {
1900 gen_exception(dc, TT_PRIV_INSN);
1901 type = GET_ASI_EXCP;
1902 }
1903 #else
1904 if (IS_IMM) {
1905 asi = dc->asi;
1906 }
1907 /* With v9, all asis below 0x80 are privileged. */
1908 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1909 down that bit into DisasContext. For the moment that's ok,
1910 since the direct implementations below doesn't have any ASIs
1911 in the restricted [0x30, 0x7f] range, and the check will be
1912 done properly in the helper. */
1913 if (!supervisor(dc) && asi < 0x80) {
1914 gen_exception(dc, TT_PRIV_ACT);
1915 type = GET_ASI_EXCP;
1916 } else {
1917 switch (asi) {
1918 case ASI_REAL: /* Bypass */
1919 case ASI_REAL_IO: /* Bypass, non-cacheable */
1920 case ASI_REAL_L: /* Bypass LE */
1921 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1922 case ASI_TWINX_REAL: /* Real address, twinx */
1923 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1924 case ASI_QUAD_LDD_PHYS:
1925 case ASI_QUAD_LDD_PHYS_L:
1926 mem_idx = MMU_PHYS_IDX;
1927 break;
1928 case ASI_N: /* Nucleus */
1929 case ASI_NL: /* Nucleus LE */
1930 case ASI_TWINX_N:
1931 case ASI_TWINX_NL:
1932 case ASI_NUCLEUS_QUAD_LDD:
1933 case ASI_NUCLEUS_QUAD_LDD_L:
1934 if (hypervisor(dc)) {
1935 mem_idx = MMU_PHYS_IDX;
1936 } else {
1937 mem_idx = MMU_NUCLEUS_IDX;
1938 }
1939 break;
1940 case ASI_AIUP: /* As if user primary */
1941 case ASI_AIUPL: /* As if user primary LE */
1942 case ASI_TWINX_AIUP:
1943 case ASI_TWINX_AIUP_L:
1944 case ASI_BLK_AIUP_4V:
1945 case ASI_BLK_AIUP_L_4V:
1946 case ASI_BLK_AIUP:
1947 case ASI_BLK_AIUPL:
1948 mem_idx = MMU_USER_IDX;
1949 break;
1950 case ASI_AIUS: /* As if user secondary */
1951 case ASI_AIUSL: /* As if user secondary LE */
1952 case ASI_TWINX_AIUS:
1953 case ASI_TWINX_AIUS_L:
1954 case ASI_BLK_AIUS_4V:
1955 case ASI_BLK_AIUS_L_4V:
1956 case ASI_BLK_AIUS:
1957 case ASI_BLK_AIUSL:
1958 mem_idx = MMU_USER_SECONDARY_IDX;
1959 break;
1960 case ASI_S: /* Secondary */
1961 case ASI_SL: /* Secondary LE */
1962 case ASI_TWINX_S:
1963 case ASI_TWINX_SL:
1964 case ASI_BLK_COMMIT_S:
1965 case ASI_BLK_S:
1966 case ASI_BLK_SL:
1967 case ASI_FL8_S:
1968 case ASI_FL8_SL:
1969 case ASI_FL16_S:
1970 case ASI_FL16_SL:
1971 if (mem_idx == MMU_USER_IDX) {
1972 mem_idx = MMU_USER_SECONDARY_IDX;
1973 } else if (mem_idx == MMU_KERNEL_IDX) {
1974 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1975 }
1976 break;
1977 case ASI_P: /* Primary */
1978 case ASI_PL: /* Primary LE */
1979 case ASI_TWINX_P:
1980 case ASI_TWINX_PL:
1981 case ASI_BLK_COMMIT_P:
1982 case ASI_BLK_P:
1983 case ASI_BLK_PL:
1984 case ASI_FL8_P:
1985 case ASI_FL8_PL:
1986 case ASI_FL16_P:
1987 case ASI_FL16_PL:
1988 break;
1989 }
1990 switch (asi) {
1991 case ASI_REAL:
1992 case ASI_REAL_IO:
1993 case ASI_REAL_L:
1994 case ASI_REAL_IO_L:
1995 case ASI_N:
1996 case ASI_NL:
1997 case ASI_AIUP:
1998 case ASI_AIUPL:
1999 case ASI_AIUS:
2000 case ASI_AIUSL:
2001 case ASI_S:
2002 case ASI_SL:
2003 case ASI_P:
2004 case ASI_PL:
2005 type = GET_ASI_DIRECT;
2006 break;
2007 case ASI_TWINX_REAL:
2008 case ASI_TWINX_REAL_L:
2009 case ASI_TWINX_N:
2010 case ASI_TWINX_NL:
2011 case ASI_TWINX_AIUP:
2012 case ASI_TWINX_AIUP_L:
2013 case ASI_TWINX_AIUS:
2014 case ASI_TWINX_AIUS_L:
2015 case ASI_TWINX_P:
2016 case ASI_TWINX_PL:
2017 case ASI_TWINX_S:
2018 case ASI_TWINX_SL:
2019 case ASI_QUAD_LDD_PHYS:
2020 case ASI_QUAD_LDD_PHYS_L:
2021 case ASI_NUCLEUS_QUAD_LDD:
2022 case ASI_NUCLEUS_QUAD_LDD_L:
2023 type = GET_ASI_DTWINX;
2024 break;
2025 case ASI_BLK_COMMIT_P:
2026 case ASI_BLK_COMMIT_S:
2027 case ASI_BLK_AIUP_4V:
2028 case ASI_BLK_AIUP_L_4V:
2029 case ASI_BLK_AIUP:
2030 case ASI_BLK_AIUPL:
2031 case ASI_BLK_AIUS_4V:
2032 case ASI_BLK_AIUS_L_4V:
2033 case ASI_BLK_AIUS:
2034 case ASI_BLK_AIUSL:
2035 case ASI_BLK_S:
2036 case ASI_BLK_SL:
2037 case ASI_BLK_P:
2038 case ASI_BLK_PL:
2039 type = GET_ASI_BLOCK;
2040 break;
2041 case ASI_FL8_S:
2042 case ASI_FL8_SL:
2043 case ASI_FL8_P:
2044 case ASI_FL8_PL:
2045 memop = MO_UB;
2046 type = GET_ASI_SHORT;
2047 break;
2048 case ASI_FL16_S:
2049 case ASI_FL16_SL:
2050 case ASI_FL16_P:
2051 case ASI_FL16_PL:
2052 memop = MO_TEUW;
2053 type = GET_ASI_SHORT;
2054 break;
2055 }
2056 /* The little-endian asis all have bit 3 set. */
2057 if (asi & 8) {
2058 memop ^= MO_BSWAP;
2059 }
2060 }
2061 #endif
2062
2063 return (DisasASI){ type, asi, mem_idx, memop };
2064 }
2065
2066 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2067 int insn, MemOp memop)
2068 {
2069 DisasASI da = get_asi(dc, insn, memop);
2070
2071 switch (da.type) {
2072 case GET_ASI_EXCP:
2073 break;
2074 case GET_ASI_DTWINX: /* Reserved for ldda. */
2075 gen_exception(dc, TT_ILL_INSN);
2076 break;
2077 case GET_ASI_DIRECT:
2078 gen_address_mask(dc, addr);
2079 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN);
2080 break;
2081 default:
2082 {
2083 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2084 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2085
2086 save_state(dc);
2087 #ifdef TARGET_SPARC64
2088 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
2089 #else
2090 {
2091 TCGv_i64 t64 = tcg_temp_new_i64();
2092 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2093 tcg_gen_trunc_i64_tl(dst, t64);
2094 }
2095 #endif
2096 }
2097 break;
2098 }
2099 }
2100
2101 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2102 int insn, MemOp memop)
2103 {
2104 DisasASI da = get_asi(dc, insn, memop);
2105
2106 switch (da.type) {
2107 case GET_ASI_EXCP:
2108 break;
2109 case GET_ASI_DTWINX: /* Reserved for stda. */
2110 #ifndef TARGET_SPARC64
2111 gen_exception(dc, TT_ILL_INSN);
2112 break;
2113 #else
2114 if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2115 /* Pre OpenSPARC CPUs don't have these */
2116 gen_exception(dc, TT_ILL_INSN);
2117 return;
2118 }
2119 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2120 * are ST_BLKINIT_ ASIs */
2121 #endif
2122 /* fall through */
2123 case GET_ASI_DIRECT:
2124 gen_address_mask(dc, addr);
2125 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN);
2126 break;
2127 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2128 case GET_ASI_BCOPY:
2129 /* Copy 32 bytes from the address in SRC to ADDR. */
2130 /* ??? The original qemu code suggests 4-byte alignment, dropping
2131 the low bits, but the only place I can see this used is in the
2132 Linux kernel with 32 byte alignment, which would make more sense
2133 as a cacheline-style operation. */
2134 {
2135 TCGv saddr = tcg_temp_new();
2136 TCGv daddr = tcg_temp_new();
2137 TCGv four = tcg_constant_tl(4);
2138 TCGv_i32 tmp = tcg_temp_new_i32();
2139 int i;
2140
2141 tcg_gen_andi_tl(saddr, src, -4);
2142 tcg_gen_andi_tl(daddr, addr, -4);
2143 for (i = 0; i < 32; i += 4) {
2144 /* Since the loads and stores are paired, allow the
2145 copy to happen in the host endianness. */
2146 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2147 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2148 tcg_gen_add_tl(saddr, saddr, four);
2149 tcg_gen_add_tl(daddr, daddr, four);
2150 }
2151 }
2152 break;
2153 #endif
2154 default:
2155 {
2156 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2157 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2158
2159 save_state(dc);
2160 #ifdef TARGET_SPARC64
2161 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
2162 #else
2163 {
2164 TCGv_i64 t64 = tcg_temp_new_i64();
2165 tcg_gen_extu_tl_i64(t64, src);
2166 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2167 }
2168 #endif
2169
2170 /* A write to a TLB register may alter page maps. End the TB. */
2171 dc->npc = DYNAMIC_PC;
2172 }
2173 break;
2174 }
2175 }
2176
2177 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2178 TCGv addr, int insn)
2179 {
2180 DisasASI da = get_asi(dc, insn, MO_TEUL);
2181
2182 switch (da.type) {
2183 case GET_ASI_EXCP:
2184 break;
2185 case GET_ASI_DIRECT:
2186 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2187 break;
2188 default:
2189 /* ??? Should be DAE_invalid_asi. */
2190 gen_exception(dc, TT_DATA_ACCESS);
2191 break;
2192 }
2193 }
2194
2195 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2196 int insn, int rd)
2197 {
2198 DisasASI da = get_asi(dc, insn, MO_TEUL);
2199 TCGv oldv;
2200
2201 switch (da.type) {
2202 case GET_ASI_EXCP:
2203 return;
2204 case GET_ASI_DIRECT:
2205 oldv = tcg_temp_new();
2206 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2207 da.mem_idx, da.memop | MO_ALIGN);
2208 gen_store_gpr(dc, rd, oldv);
2209 break;
2210 default:
2211 /* ??? Should be DAE_invalid_asi. */
2212 gen_exception(dc, TT_DATA_ACCESS);
2213 break;
2214 }
2215 }
2216
2217 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2218 {
2219 DisasASI da = get_asi(dc, insn, MO_UB);
2220
2221 switch (da.type) {
2222 case GET_ASI_EXCP:
2223 break;
2224 case GET_ASI_DIRECT:
2225 gen_ldstub(dc, dst, addr, da.mem_idx);
2226 break;
2227 default:
2228 /* ??? In theory, this should be raise DAE_invalid_asi.
2229 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2230 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
2231 gen_helper_exit_atomic(tcg_env);
2232 } else {
2233 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2234 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
2235 TCGv_i64 s64, t64;
2236
2237 save_state(dc);
2238 t64 = tcg_temp_new_i64();
2239 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2240
2241 s64 = tcg_constant_i64(0xff);
2242 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
2243
2244 tcg_gen_trunc_i64_tl(dst, t64);
2245
2246 /* End the TB. */
2247 dc->npc = DYNAMIC_PC;
2248 }
2249 break;
2250 }
2251 }
2252 #endif
2253
2254 #ifdef TARGET_SPARC64
2255 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2256 int insn, int size, int rd)
2257 {
2258 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
2259 TCGv_i32 d32;
2260 TCGv_i64 d64;
2261
2262 switch (da.type) {
2263 case GET_ASI_EXCP:
2264 break;
2265
2266 case GET_ASI_DIRECT:
2267 gen_address_mask(dc, addr);
2268 switch (size) {
2269 case 4:
2270 d32 = gen_dest_fpr_F(dc);
2271 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
2272 gen_store_fpr_F(dc, rd, d32);
2273 break;
2274 case 8:
2275 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2276 da.memop | MO_ALIGN_4);
2277 break;
2278 case 16:
2279 d64 = tcg_temp_new_i64();
2280 tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
2281 tcg_gen_addi_tl(addr, addr, 8);
2282 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
2283 da.memop | MO_ALIGN_4);
2284 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2285 break;
2286 default:
2287 g_assert_not_reached();
2288 }
2289 break;
2290
2291 case GET_ASI_BLOCK:
2292 /* Valid for lddfa on aligned registers only. */
2293 if (size == 8 && (rd & 7) == 0) {
2294 MemOp memop;
2295 TCGv eight;
2296 int i;
2297
2298 gen_address_mask(dc, addr);
2299
2300 /* The first operation checks required alignment. */
2301 memop = da.memop | MO_ALIGN_64;
2302 eight = tcg_constant_tl(8);
2303 for (i = 0; ; ++i) {
2304 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2305 da.mem_idx, memop);
2306 if (i == 7) {
2307 break;
2308 }
2309 tcg_gen_add_tl(addr, addr, eight);
2310 memop = da.memop;
2311 }
2312 } else {
2313 gen_exception(dc, TT_ILL_INSN);
2314 }
2315 break;
2316
2317 case GET_ASI_SHORT:
2318 /* Valid for lddfa only. */
2319 if (size == 8) {
2320 gen_address_mask(dc, addr);
2321 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2322 da.memop | MO_ALIGN);
2323 } else {
2324 gen_exception(dc, TT_ILL_INSN);
2325 }
2326 break;
2327
2328 default:
2329 {
2330 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2331 TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
2332
2333 save_state(dc);
2334 /* According to the table in the UA2011 manual, the only
2335 other asis that are valid for ldfa/lddfa/ldqfa are
2336 the NO_FAULT asis. We still need a helper for these,
2337 but we can just use the integer asi helper for them. */
2338 switch (size) {
2339 case 4:
2340 d64 = tcg_temp_new_i64();
2341 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2342 d32 = gen_dest_fpr_F(dc);
2343 tcg_gen_extrl_i64_i32(d32, d64);
2344 gen_store_fpr_F(dc, rd, d32);
2345 break;
2346 case 8:
2347 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, r_asi, r_mop);
2348 break;
2349 case 16:
2350 d64 = tcg_temp_new_i64();
2351 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2352 tcg_gen_addi_tl(addr, addr, 8);
2353 gen_helper_ld_asi(cpu_fpr[rd/2+1], tcg_env, addr, r_asi, r_mop);
2354 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2355 break;
2356 default:
2357 g_assert_not_reached();
2358 }
2359 }
2360 break;
2361 }
2362 }
2363
2364 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2365 int insn, int size, int rd)
2366 {
2367 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
2368 TCGv_i32 d32;
2369
2370 switch (da.type) {
2371 case GET_ASI_EXCP:
2372 break;
2373
2374 case GET_ASI_DIRECT:
2375 gen_address_mask(dc, addr);
2376 switch (size) {
2377 case 4:
2378 d32 = gen_load_fpr_F(dc, rd);
2379 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
2380 break;
2381 case 8:
2382 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2383 da.memop | MO_ALIGN_4);
2384 break;
2385 case 16:
2386 /* Only 4-byte alignment required. However, it is legal for the
2387 cpu to signal the alignment fault, and the OS trap handler is
2388 required to fix it up. Requiring 16-byte alignment here avoids
2389 having to probe the second page before performing the first
2390 write. */
2391 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2392 da.memop | MO_ALIGN_16);
2393 tcg_gen_addi_tl(addr, addr, 8);
2394 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2395 break;
2396 default:
2397 g_assert_not_reached();
2398 }
2399 break;
2400
2401 case GET_ASI_BLOCK:
2402 /* Valid for stdfa on aligned registers only. */
2403 if (size == 8 && (rd & 7) == 0) {
2404 MemOp memop;
2405 TCGv eight;
2406 int i;
2407
2408 gen_address_mask(dc, addr);
2409
2410 /* The first operation checks required alignment. */
2411 memop = da.memop | MO_ALIGN_64;
2412 eight = tcg_constant_tl(8);
2413 for (i = 0; ; ++i) {
2414 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2415 da.mem_idx, memop);
2416 if (i == 7) {
2417 break;
2418 }
2419 tcg_gen_add_tl(addr, addr, eight);
2420 memop = da.memop;
2421 }
2422 } else {
2423 gen_exception(dc, TT_ILL_INSN);
2424 }
2425 break;
2426
2427 case GET_ASI_SHORT:
2428 /* Valid for stdfa only. */
2429 if (size == 8) {
2430 gen_address_mask(dc, addr);
2431 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2432 da.memop | MO_ALIGN);
2433 } else {
2434 gen_exception(dc, TT_ILL_INSN);
2435 }
2436 break;
2437
2438 default:
2439 /* According to the table in the UA2011 manual, the only
2440 other asis that are valid for ldfa/lddfa/ldqfa are
2441 the PST* asis, which aren't currently handled. */
2442 gen_exception(dc, TT_ILL_INSN);
2443 break;
2444 }
2445 }
2446
2447 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2448 {
2449 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2450 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2451 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2452
2453 switch (da.type) {
2454 case GET_ASI_EXCP:
2455 return;
2456
2457 case GET_ASI_DTWINX:
2458 gen_address_mask(dc, addr);
2459 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2460 tcg_gen_addi_tl(addr, addr, 8);
2461 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2462 break;
2463
2464 case GET_ASI_DIRECT:
2465 {
2466 TCGv_i64 tmp = tcg_temp_new_i64();
2467
2468 gen_address_mask(dc, addr);
2469 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN);
2470
2471 /* Note that LE ldda acts as if each 32-bit register
2472 result is byte swapped. Having just performed one
2473 64-bit bswap, we need now to swap the writebacks. */
2474 if ((da.memop & MO_BSWAP) == MO_TE) {
2475 tcg_gen_extr32_i64(lo, hi, tmp);
2476 } else {
2477 tcg_gen_extr32_i64(hi, lo, tmp);
2478 }
2479 }
2480 break;
2481
2482 default:
2483 /* ??? In theory we've handled all of the ASIs that are valid
2484 for ldda, and this should raise DAE_invalid_asi. However,
2485 real hardware allows others. This can be seen with e.g.
2486 FreeBSD 10.3 wrt ASI_IC_TAG. */
2487 {
2488 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2489 TCGv_i32 r_mop = tcg_constant_i32(da.memop);
2490 TCGv_i64 tmp = tcg_temp_new_i64();
2491
2492 save_state(dc);
2493 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2494
2495 /* See above. */
2496 if ((da.memop & MO_BSWAP) == MO_TE) {
2497 tcg_gen_extr32_i64(lo, hi, tmp);
2498 } else {
2499 tcg_gen_extr32_i64(hi, lo, tmp);
2500 }
2501 }
2502 break;
2503 }
2504
2505 gen_store_gpr(dc, rd, hi);
2506 gen_store_gpr(dc, rd + 1, lo);
2507 }
2508
2509 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2510 int insn, int rd)
2511 {
2512 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2513 TCGv lo = gen_load_gpr(dc, rd + 1);
2514
2515 switch (da.type) {
2516 case GET_ASI_EXCP:
2517 break;
2518
2519 case GET_ASI_DTWINX:
2520 gen_address_mask(dc, addr);
2521 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2522 tcg_gen_addi_tl(addr, addr, 8);
2523 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2524 break;
2525
2526 case GET_ASI_DIRECT:
2527 {
2528 TCGv_i64 t64 = tcg_temp_new_i64();
2529
2530 /* Note that LE stda acts as if each 32-bit register result is
2531 byte swapped. We will perform one 64-bit LE store, so now
2532 we must swap the order of the construction. */
2533 if ((da.memop & MO_BSWAP) == MO_TE) {
2534 tcg_gen_concat32_i64(t64, lo, hi);
2535 } else {
2536 tcg_gen_concat32_i64(t64, hi, lo);
2537 }
2538 gen_address_mask(dc, addr);
2539 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2540 }
2541 break;
2542
2543 default:
2544 /* ??? In theory we've handled all of the ASIs that are valid
2545 for stda, and this should raise DAE_invalid_asi. */
2546 {
2547 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2548 TCGv_i32 r_mop = tcg_constant_i32(da.memop);
2549 TCGv_i64 t64 = tcg_temp_new_i64();
2550
2551 /* See above. */
2552 if ((da.memop & MO_BSWAP) == MO_TE) {
2553 tcg_gen_concat32_i64(t64, lo, hi);
2554 } else {
2555 tcg_gen_concat32_i64(t64, hi, lo);
2556 }
2557
2558 save_state(dc);
2559 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2560 }
2561 break;
2562 }
2563 }
2564
2565 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2566 int insn, int rd)
2567 {
2568 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2569 TCGv oldv;
2570
2571 switch (da.type) {
2572 case GET_ASI_EXCP:
2573 return;
2574 case GET_ASI_DIRECT:
2575 oldv = tcg_temp_new();
2576 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2577 da.mem_idx, da.memop | MO_ALIGN);
2578 gen_store_gpr(dc, rd, oldv);
2579 break;
2580 default:
2581 /* ??? Should be DAE_invalid_asi. */
2582 gen_exception(dc, TT_DATA_ACCESS);
2583 break;
2584 }
2585 }
2586
2587 #elif !defined(CONFIG_USER_ONLY)
2588 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2589 {
2590 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2591 whereby "rd + 1" elicits "error: array subscript is above array".
2592 Since we have already asserted that rd is even, the semantics
2593 are unchanged. */
2594 TCGv lo = gen_dest_gpr(dc, rd | 1);
2595 TCGv hi = gen_dest_gpr(dc, rd);
2596 TCGv_i64 t64 = tcg_temp_new_i64();
2597 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2598
2599 switch (da.type) {
2600 case GET_ASI_EXCP:
2601 return;
2602 case GET_ASI_DIRECT:
2603 gen_address_mask(dc, addr);
2604 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2605 break;
2606 default:
2607 {
2608 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2609 TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
2610
2611 save_state(dc);
2612 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2613 }
2614 break;
2615 }
2616
2617 tcg_gen_extr_i64_i32(lo, hi, t64);
2618 gen_store_gpr(dc, rd | 1, lo);
2619 gen_store_gpr(dc, rd, hi);
2620 }
2621
2622 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2623 int insn, int rd)
2624 {
2625 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2626 TCGv lo = gen_load_gpr(dc, rd + 1);
2627 TCGv_i64 t64 = tcg_temp_new_i64();
2628
2629 tcg_gen_concat_tl_i64(t64, lo, hi);
2630
2631 switch (da.type) {
2632 case GET_ASI_EXCP:
2633 break;
2634 case GET_ASI_DIRECT:
2635 gen_address_mask(dc, addr);
2636 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2637 break;
2638 case GET_ASI_BFILL:
2639 /* Store 32 bytes of T64 to ADDR. */
2640 /* ??? The original qemu code suggests 8-byte alignment, dropping
2641 the low bits, but the only place I can see this used is in the
2642 Linux kernel with 32 byte alignment, which would make more sense
2643 as a cacheline-style operation. */
2644 {
2645 TCGv d_addr = tcg_temp_new();
2646 TCGv eight = tcg_constant_tl(8);
2647 int i;
2648
2649 tcg_gen_andi_tl(d_addr, addr, -8);
2650 for (i = 0; i < 32; i += 8) {
2651 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2652 tcg_gen_add_tl(d_addr, d_addr, eight);
2653 }
2654 }
2655 break;
2656 default:
2657 {
2658 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2659 TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
2660
2661 save_state(dc);
2662 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2663 }
2664 break;
2665 }
2666 }
2667 #endif
2668
2669 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2670 {
2671 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2672 return gen_load_gpr(dc, rs1);
2673 }
2674
2675 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2676 {
2677 if (IS_IMM) { /* immediate */
2678 target_long simm = GET_FIELDs(insn, 19, 31);
2679 TCGv t = tcg_temp_new();
2680 tcg_gen_movi_tl(t, simm);
2681 return t;
2682 } else { /* register */
2683 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2684 return gen_load_gpr(dc, rs2);
2685 }
2686 }
2687
2688 #ifdef TARGET_SPARC64
2689 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2690 {
2691 TCGv_i32 c32, zero, dst, s1, s2;
2692
2693 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2694 or fold the comparison down to 32 bits and use movcond_i32. Choose
2695 the later. */
2696 c32 = tcg_temp_new_i32();
2697 if (cmp->is_bool) {
2698 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2699 } else {
2700 TCGv_i64 c64 = tcg_temp_new_i64();
2701 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2702 tcg_gen_extrl_i64_i32(c32, c64);
2703 }
2704
2705 s1 = gen_load_fpr_F(dc, rs);
2706 s2 = gen_load_fpr_F(dc, rd);
2707 dst = gen_dest_fpr_F(dc);
2708 zero = tcg_constant_i32(0);
2709
2710 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2711
2712 gen_store_fpr_F(dc, rd, dst);
2713 }
2714
2715 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2716 {
2717 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2718 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2719 gen_load_fpr_D(dc, rs),
2720 gen_load_fpr_D(dc, rd));
2721 gen_store_fpr_D(dc, rd, dst);
2722 }
2723
2724 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2725 {
2726 int qd = QFPREG(rd);
2727 int qs = QFPREG(rs);
2728
2729 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2730 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2731 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2732 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2733
2734 gen_update_fprs_dirty(dc, qd);
2735 }
2736
2737 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2738 {
2739 TCGv_i32 r_tl = tcg_temp_new_i32();
2740
2741 /* load env->tl into r_tl */
2742 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2743
2744 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2745 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2746
2747 /* calculate offset to current trap state from env->ts, reuse r_tl */
2748 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2749 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2750
2751 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2752 {
2753 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2754 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2755 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2756 }
2757 }
2758
2759 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2760 int width, bool cc, bool left)
2761 {
2762 TCGv lo1, lo2;
2763 uint64_t amask, tabl, tabr;
2764 int shift, imask, omask;
2765
2766 if (cc) {
2767 tcg_gen_mov_tl(cpu_cc_src, s1);
2768 tcg_gen_mov_tl(cpu_cc_src2, s2);
2769 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2770 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2771 dc->cc_op = CC_OP_SUB;
2772 }
2773
2774 /* Theory of operation: there are two tables, left and right (not to
2775 be confused with the left and right versions of the opcode). These
2776 are indexed by the low 3 bits of the inputs. To make things "easy",
2777 these tables are loaded into two constants, TABL and TABR below.
2778 The operation index = (input & imask) << shift calculates the index
2779 into the constant, while val = (table >> index) & omask calculates
2780 the value we're looking for. */
2781 switch (width) {
2782 case 8:
2783 imask = 0x7;
2784 shift = 3;
2785 omask = 0xff;
2786 if (left) {
2787 tabl = 0x80c0e0f0f8fcfeffULL;
2788 tabr = 0xff7f3f1f0f070301ULL;
2789 } else {
2790 tabl = 0x0103070f1f3f7fffULL;
2791 tabr = 0xfffefcf8f0e0c080ULL;
2792 }
2793 break;
2794 case 16:
2795 imask = 0x6;
2796 shift = 1;
2797 omask = 0xf;
2798 if (left) {
2799 tabl = 0x8cef;
2800 tabr = 0xf731;
2801 } else {
2802 tabl = 0x137f;
2803 tabr = 0xfec8;
2804 }
2805 break;
2806 case 32:
2807 imask = 0x4;
2808 shift = 0;
2809 omask = 0x3;
2810 if (left) {
2811 tabl = (2 << 2) | 3;
2812 tabr = (3 << 2) | 1;
2813 } else {
2814 tabl = (1 << 2) | 3;
2815 tabr = (3 << 2) | 2;
2816 }
2817 break;
2818 default:
2819 abort();
2820 }
2821
2822 lo1 = tcg_temp_new();
2823 lo2 = tcg_temp_new();
2824 tcg_gen_andi_tl(lo1, s1, imask);
2825 tcg_gen_andi_tl(lo2, s2, imask);
2826 tcg_gen_shli_tl(lo1, lo1, shift);
2827 tcg_gen_shli_tl(lo2, lo2, shift);
2828
2829 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
2830 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
2831 tcg_gen_andi_tl(lo1, lo1, omask);
2832 tcg_gen_andi_tl(lo2, lo2, omask);
2833
2834 amask = -8;
2835 if (AM_CHECK(dc)) {
2836 amask &= 0xffffffffULL;
2837 }
2838 tcg_gen_andi_tl(s1, s1, amask);
2839 tcg_gen_andi_tl(s2, s2, amask);
2840
2841 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
2842 tcg_gen_and_tl(lo2, lo2, lo1);
2843 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
2844 }
2845
2846 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2847 {
2848 TCGv tmp = tcg_temp_new();
2849
2850 tcg_gen_add_tl(tmp, s1, s2);
2851 tcg_gen_andi_tl(dst, tmp, -8);
2852 if (left) {
2853 tcg_gen_neg_tl(tmp, tmp);
2854 }
2855 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2856 }
2857
2858 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2859 {
2860 TCGv t1, t2, shift;
2861
2862 t1 = tcg_temp_new();
2863 t2 = tcg_temp_new();
2864 shift = tcg_temp_new();
2865
2866 tcg_gen_andi_tl(shift, gsr, 7);
2867 tcg_gen_shli_tl(shift, shift, 3);
2868 tcg_gen_shl_tl(t1, s1, shift);
2869
2870 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2871 shift of (up to 63) followed by a constant shift of 1. */
2872 tcg_gen_xori_tl(shift, shift, 63);
2873 tcg_gen_shr_tl(t2, s2, shift);
2874 tcg_gen_shri_tl(t2, t2, 1);
2875
2876 tcg_gen_or_tl(dst, t1, t2);
2877 }
2878 #endif
2879
2880 /* Include the auto-generated decoder. */
2881 #include "decode-insns.c.inc"
2882
2883 #define TRANS(NAME, AVAIL, FUNC, ...) \
2884 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2885 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2886
2887 #define avail_ALL(C) true
2888 #ifdef TARGET_SPARC64
2889 # define avail_32(C) false
2890 # define avail_ASR17(C) false
2891 # define avail_POWERDOWN(C) false
2892 # define avail_64(C) true
2893 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2894 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2895 #else
2896 # define avail_32(C) true
2897 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2898 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2899 # define avail_64(C) false
2900 # define avail_GL(C) false
2901 # define avail_HYPV(C) false
2902 #endif
2903
2904 /* Default case for non jump instructions. */
2905 static bool advance_pc(DisasContext *dc)
2906 {
2907 if (dc->npc & 3) {
2908 switch (dc->npc) {
2909 case DYNAMIC_PC:
2910 case DYNAMIC_PC_LOOKUP:
2911 dc->pc = dc->npc;
2912 gen_op_next_insn();
2913 break;
2914 case JUMP_PC:
2915 /* we can do a static jump */
2916 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
2917 dc->base.is_jmp = DISAS_NORETURN;
2918 break;
2919 default:
2920 g_assert_not_reached();
2921 }
2922 } else {
2923 dc->pc = dc->npc;
2924 dc->npc = dc->npc + 4;
2925 }
2926 return true;
2927 }
2928
2929 /*
2930 * Major opcodes 00 and 01 -- branches, call, and sethi
2931 */
2932
2933 static bool advance_jump_uncond_never(DisasContext *dc, bool annul)
2934 {
2935 if (annul) {
2936 dc->pc = dc->npc + 4;
2937 dc->npc = dc->pc + 4;
2938 } else {
2939 dc->pc = dc->npc;
2940 dc->npc = dc->pc + 4;
2941 }
2942 return true;
2943 }
2944
2945 static bool advance_jump_uncond_always(DisasContext *dc, bool annul,
2946 target_ulong dest)
2947 {
2948 if (annul) {
2949 dc->pc = dest;
2950 dc->npc = dest + 4;
2951 } else {
2952 dc->pc = dc->npc;
2953 dc->npc = dest;
2954 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2955 }
2956 return true;
2957 }
2958
2959 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2960 bool annul, target_ulong dest)
2961 {
2962 target_ulong npc = dc->npc;
2963
2964 if (annul) {
2965 TCGLabel *l1 = gen_new_label();
2966
2967 tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2968 gen_goto_tb(dc, 0, npc, dest);
2969 gen_set_label(l1);
2970 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2971
2972 dc->base.is_jmp = DISAS_NORETURN;
2973 } else {
2974 if (npc & 3) {
2975 switch (npc) {
2976 case DYNAMIC_PC:
2977 case DYNAMIC_PC_LOOKUP:
2978 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2979 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2980 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2981 cmp->c1, cmp->c2,
2982 tcg_constant_tl(dest), cpu_npc);
2983 dc->pc = npc;
2984 break;
2985 default:
2986 g_assert_not_reached();
2987 }
2988 } else {
2989 dc->pc = npc;
2990 dc->jump_pc[0] = dest;
2991 dc->jump_pc[1] = npc + 4;
2992 dc->npc = JUMP_PC;
2993 if (cmp->is_bool) {
2994 tcg_gen_mov_tl(cpu_cond, cmp->c1);
2995 } else {
2996 tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2997 }
2998 }
2999 }
3000 return true;
3001 }
3002
3003 static bool raise_priv(DisasContext *dc)
3004 {
3005 gen_exception(dc, TT_PRIV_INSN);
3006 return true;
3007 }
3008
3009 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
3010 {
3011 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3012 DisasCompare cmp;
3013
3014 switch (a->cond) {
3015 case 0x0:
3016 return advance_jump_uncond_never(dc, a->a);
3017 case 0x8:
3018 return advance_jump_uncond_always(dc, a->a, target);
3019 default:
3020 flush_cond(dc);
3021
3022 gen_compare(&cmp, a->cc, a->cond, dc);
3023 return advance_jump_cond(dc, &cmp, a->a, target);
3024 }
3025 }
3026
3027 TRANS(Bicc, ALL, do_bpcc, a)
3028 TRANS(BPcc, 64, do_bpcc, a)
3029
3030 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
3031 {
3032 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3033 DisasCompare cmp;
3034
3035 if (gen_trap_ifnofpu(dc)) {
3036 return true;
3037 }
3038 switch (a->cond) {
3039 case 0x0:
3040 return advance_jump_uncond_never(dc, a->a);
3041 case 0x8:
3042 return advance_jump_uncond_always(dc, a->a, target);
3043 default:
3044 flush_cond(dc);
3045
3046 gen_fcompare(&cmp, a->cc, a->cond);
3047 return advance_jump_cond(dc, &cmp, a->a, target);
3048 }
3049 }
3050
3051 TRANS(FBPfcc, 64, do_fbpfcc, a)
3052 TRANS(FBfcc, ALL, do_fbpfcc, a)
3053
3054 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
3055 {
3056 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3057 DisasCompare cmp;
3058
3059 if (!avail_64(dc)) {
3060 return false;
3061 }
3062 if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) {
3063 return false;
3064 }
3065
3066 flush_cond(dc);
3067 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
3068 return advance_jump_cond(dc, &cmp, a->a, target);
3069 }
3070
3071 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
3072 {
3073 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3074
3075 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
3076 gen_mov_pc_npc(dc);
3077 dc->npc = target;
3078 return true;
3079 }
3080
3081 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
3082 {
3083 /*
3084 * For sparc32, always generate the no-coprocessor exception.
3085 * For sparc64, always generate illegal instruction.
3086 */
3087 #ifdef TARGET_SPARC64
3088 return false;
3089 #else
3090 gen_exception(dc, TT_NCP_INSN);
3091 return true;
3092 #endif
3093 }
3094
3095 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
3096 {
3097 /* Special-case %g0 because that's the canonical nop. */
3098 if (a->rd) {
3099 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
3100 }
3101 return advance_pc(dc);
3102 }
3103
3104 /*
3105 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
3106 */
3107
3108 static bool do_tcc(DisasContext *dc, int cond, int cc,
3109 int rs1, bool imm, int rs2_or_imm)
3110 {
3111 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3112 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3113 DisasCompare cmp;
3114 TCGLabel *lab;
3115 TCGv_i32 trap;
3116
3117 /* Trap never. */
3118 if (cond == 0) {
3119 return advance_pc(dc);
3120 }
3121
3122 /*
3123 * Immediate traps are the most common case. Since this value is
3124 * live across the branch, it really pays to evaluate the constant.
3125 */
3126 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
3127 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
3128 } else {
3129 trap = tcg_temp_new_i32();
3130 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
3131 if (imm) {
3132 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
3133 } else {
3134 TCGv_i32 t2 = tcg_temp_new_i32();
3135 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
3136 tcg_gen_add_i32(trap, trap, t2);
3137 }
3138 tcg_gen_andi_i32(trap, trap, mask);
3139 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3140 }
3141
3142 /* Trap always. */
3143 if (cond == 8) {
3144 save_state(dc);
3145 gen_helper_raise_exception(tcg_env, trap);
3146 dc->base.is_jmp = DISAS_NORETURN;
3147 return true;
3148 }
3149
3150 /* Conditional trap. */
3151 flush_cond(dc);
3152 lab = delay_exceptionv(dc, trap);
3153 gen_compare(&cmp, cc, cond, dc);
3154 tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab);
3155
3156 return advance_pc(dc);
3157 }
3158
3159 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
3160 {
3161 if (avail_32(dc) && a->cc) {
3162 return false;
3163 }
3164 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
3165 }
3166
3167 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
3168 {
3169 if (avail_64(dc)) {
3170 return false;
3171 }
3172 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
3173 }
3174
3175 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
3176 {
3177 if (avail_32(dc)) {
3178 return false;
3179 }
3180 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
3181 }
3182
3183 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
3184 {
3185 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
3186 return advance_pc(dc);
3187 }
3188
3189 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
3190 {
3191 if (avail_32(dc)) {
3192 return false;
3193 }
3194 if (a->mmask) {
3195 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
3196 tcg_gen_mb(a->mmask | TCG_BAR_SC);
3197 }
3198 if (a->cmask) {
3199 /* For #Sync, etc, end the TB to recognize interrupts. */
3200 dc->base.is_jmp = DISAS_EXIT;
3201 }
3202 return advance_pc(dc);
3203 }
3204
3205 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
3206 TCGv (*func)(DisasContext *, TCGv))
3207 {
3208 if (!priv) {
3209 return raise_priv(dc);
3210 }
3211 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
3212 return advance_pc(dc);
3213 }
3214
3215 static TCGv do_rdy(DisasContext *dc, TCGv dst)
3216 {
3217 return cpu_y;
3218 }
3219
3220 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
3221 {
3222 /*
3223 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
3224 * 32-bit cpus like sparcv7, which ignores the rs1 field.
3225 * This matches after all other ASR, so Leon3 Asr17 is handled first.
3226 */
3227 if (avail_64(dc) && a->rs1 != 0) {
3228 return false;
3229 }
3230 return do_rd_special(dc, true, a->rd, do_rdy);
3231 }
3232
3233 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
3234 {
3235 uint32_t val;
3236
3237 /*
3238 * TODO: There are many more fields to be filled,
3239 * some of which are writable.
3240 */
3241 val = dc->def->nwindows - 1; /* [4:0] NWIN */
3242 val |= 1 << 8; /* [8] V8 */
3243
3244 return tcg_constant_tl(val);
3245 }
3246
3247 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
3248
3249 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
3250 {
3251 update_psr(dc);
3252 gen_helper_rdccr(dst, tcg_env);
3253 return dst;
3254 }
3255
3256 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
3257
3258 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
3259 {
3260 #ifdef TARGET_SPARC64
3261 return tcg_constant_tl(dc->asi);
3262 #else
3263 qemu_build_not_reached();
3264 #endif
3265 }
3266
3267 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
3268
3269 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
3270 {
3271 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3272
3273 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3274 if (translator_io_start(&dc->base)) {
3275 dc->base.is_jmp = DISAS_EXIT;
3276 }
3277 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
3278 tcg_constant_i32(dc->mem_idx));
3279 return dst;
3280 }
3281
3282 /* TODO: non-priv access only allowed when enabled. */
3283 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
3284
3285 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
3286 {
3287 return tcg_constant_tl(address_mask_i(dc, dc->pc));
3288 }
3289
3290 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
3291
3292 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
3293 {
3294 tcg_gen_ext_i32_tl(dst, cpu_fprs);
3295 return dst;
3296 }
3297
3298 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
3299
3300 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
3301 {
3302 gen_trap_ifnofpu(dc);
3303 return cpu_gsr;
3304 }
3305
3306 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
3307
3308 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
3309 {
3310 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
3311 return dst;
3312 }
3313
3314 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
3315
3316 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
3317 {
3318 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
3319 return dst;
3320 }
3321
3322 /* TODO: non-priv access only allowed when enabled. */
3323 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
3324
3325 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
3326 {
3327 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3328
3329 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3330 if (translator_io_start(&dc->base)) {
3331 dc->base.is_jmp = DISAS_EXIT;
3332 }
3333 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
3334 tcg_constant_i32(dc->mem_idx));
3335 return dst;
3336 }
3337
3338 /* TODO: non-priv access only allowed when enabled. */
3339 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
3340
3341 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
3342 {
3343 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
3344 return dst;
3345 }
3346
3347 /* TODO: supervisor access only allowed when enabled by hypervisor. */
3348 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
3349
3350 /*
3351 * UltraSPARC-T1 Strand status.
3352 * HYPV check maybe not enough, UA2005 & UA2007 describe
3353 * this ASR as impl. dep
3354 */
3355 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
3356 {
3357 return tcg_constant_tl(1);
3358 }
3359
3360 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
3361
3362 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
3363 {
3364 update_psr(dc);
3365 gen_helper_rdpsr(dst, tcg_env);
3366 return dst;
3367 }
3368
3369 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
3370
3371 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
3372 {
3373 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
3374 return dst;
3375 }
3376
3377 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
3378
3379 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
3380 {
3381 TCGv_i32 tl = tcg_temp_new_i32();
3382 TCGv_ptr tp = tcg_temp_new_ptr();
3383
3384 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3385 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3386 tcg_gen_shli_i32(tl, tl, 3);
3387 tcg_gen_ext_i32_ptr(tp, tl);
3388 tcg_gen_add_ptr(tp, tp, tcg_env);
3389
3390 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3391 return dst;
3392 }
3393
3394 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3395
3396 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3397 {
3398 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3399 return dst;
3400 }
3401
3402 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3403
3404 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3405 {
3406 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3407 return dst;
3408 }
3409
3410 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3411
3412 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3413 {
3414 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3415 return dst;
3416 }
3417
3418 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3419
3420 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3421 {
3422 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3423 return dst;
3424 }
3425
3426 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3427 do_rdhstick_cmpr)
3428
3429 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3430 {
3431 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3432 return dst;
3433 }
3434
3435 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3436
3437 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3438 {
3439 #ifdef TARGET_SPARC64
3440 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3441
3442 gen_load_trap_state_at_tl(r_tsptr);
3443 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3444 return dst;
3445 #else
3446 qemu_build_not_reached();
3447 #endif
3448 }
3449
3450 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3451
3452 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3453 {
3454 #ifdef TARGET_SPARC64
3455 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3456
3457 gen_load_trap_state_at_tl(r_tsptr);
3458 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3459 return dst;
3460 #else
3461 qemu_build_not_reached();
3462 #endif
3463 }
3464
3465 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3466
3467 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3468 {
3469 #ifdef TARGET_SPARC64
3470 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3471
3472 gen_load_trap_state_at_tl(r_tsptr);
3473 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3474 return dst;
3475 #else
3476 qemu_build_not_reached();
3477 #endif
3478 }
3479
3480 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3481
3482 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3483 {
3484 #ifdef TARGET_SPARC64
3485 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3486
3487 gen_load_trap_state_at_tl(r_tsptr);
3488 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3489 return dst;
3490 #else
3491 qemu_build_not_reached();
3492 #endif
3493 }
3494
3495 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3496 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3497
3498 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3499 {
3500 return cpu_tbr;
3501 }
3502
3503 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3504 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3505
3506 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3507 {
3508 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3509 return dst;
3510 }
3511
3512 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3513
3514 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3515 {
3516 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3517 return dst;
3518 }
3519
3520 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3521
3522 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3523 {
3524 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3525 return dst;
3526 }
3527
3528 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3529
3530 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3531 {
3532 gen_helper_rdcwp(dst, tcg_env);
3533 return dst;
3534 }
3535
3536 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3537
3538 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3539 {
3540 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3541 return dst;
3542 }
3543
3544 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3545
3546 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3547 {
3548 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3549 return dst;
3550 }
3551
3552 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3553 do_rdcanrestore)
3554
3555 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3556 {
3557 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3558 return dst;
3559 }
3560
3561 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3562
3563 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3564 {
3565 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3566 return dst;
3567 }
3568
3569 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3570
3571 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3572 {
3573 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3574 return dst;
3575 }
3576
3577 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3578
3579 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3580 {
3581 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3582 return dst;
3583 }
3584
3585 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3586
3587 /* UA2005 strand status */
3588 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3589 {
3590 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3591 return dst;
3592 }
3593
3594 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3595
3596 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3597 {
3598 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3599 return dst;
3600 }
3601
3602 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3603
3604 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3605 {
3606 if (avail_64(dc)) {
3607 gen_helper_flushw(tcg_env);
3608 return advance_pc(dc);
3609 }
3610 return false;
3611 }
3612
3613 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3614 void (*func)(DisasContext *, TCGv))
3615 {
3616 TCGv src;
3617
3618 /* For simplicity, we under-decoded the rs2 form. */
3619 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3620 return false;
3621 }
3622 if (!priv) {
3623 return raise_priv(dc);
3624 }
3625
3626 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3627 src = tcg_constant_tl(a->rs2_or_imm);
3628 } else {
3629 TCGv src1 = gen_load_gpr(dc, a->rs1);
3630 if (a->rs2_or_imm == 0) {
3631 src = src1;
3632 } else {
3633 src = tcg_temp_new();
3634 if (a->imm) {
3635 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3636 } else {
3637 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3638 }
3639 }
3640 }
3641 func(dc, src);
3642 return advance_pc(dc);
3643 }
3644
3645 static void do_wry(DisasContext *dc, TCGv src)
3646 {
3647 tcg_gen_ext32u_tl(cpu_y, src);
3648 }
3649
3650 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3651
3652 static void do_wrccr(DisasContext *dc, TCGv src)
3653 {
3654 gen_helper_wrccr(tcg_env, src);
3655 }
3656
3657 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3658
3659 static void do_wrasi(DisasContext *dc, TCGv src)
3660 {
3661 TCGv tmp = tcg_temp_new();
3662
3663 tcg_gen_ext8u_tl(tmp, src);
3664 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3665 /* End TB to notice changed ASI. */
3666 dc->base.is_jmp = DISAS_EXIT;
3667 }
3668
3669 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3670
3671 static void do_wrfprs(DisasContext *dc, TCGv src)
3672 {
3673 #ifdef TARGET_SPARC64
3674 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3675 dc->fprs_dirty = 0;
3676 dc->base.is_jmp = DISAS_EXIT;
3677 #else
3678 qemu_build_not_reached();
3679 #endif
3680 }
3681
3682 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3683
3684 static void do_wrgsr(DisasContext *dc, TCGv src)
3685 {
3686 gen_trap_ifnofpu(dc);
3687 tcg_gen_mov_tl(cpu_gsr, src);
3688 }
3689
3690 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3691
3692 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3693 {
3694 gen_helper_set_softint(tcg_env, src);
3695 }
3696
3697 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3698
3699 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3700 {
3701 gen_helper_clear_softint(tcg_env, src);
3702 }
3703
3704 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3705
3706 static void do_wrsoftint(DisasContext *dc, TCGv src)
3707 {
3708 gen_helper_write_softint(tcg_env, src);
3709 }
3710
3711 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3712
3713 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3714 {
3715 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3716
3717 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3718 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3719 translator_io_start(&dc->base);
3720 gen_helper_tick_set_limit(r_tickptr, src);
3721 /* End TB to handle timer interrupt */
3722 dc->base.is_jmp = DISAS_EXIT;
3723 }
3724
3725 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3726
3727 static void do_wrstick(DisasContext *dc, TCGv src)
3728 {
3729 #ifdef TARGET_SPARC64
3730 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3731
3732 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3733 translator_io_start(&dc->base);
3734 gen_helper_tick_set_count(r_tickptr, src);
3735 /* End TB to handle timer interrupt */
3736 dc->base.is_jmp = DISAS_EXIT;
3737 #else
3738 qemu_build_not_reached();
3739 #endif
3740 }
3741
3742 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3743
3744 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3745 {
3746 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3747
3748 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3749 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3750 translator_io_start(&dc->base);
3751 gen_helper_tick_set_limit(r_tickptr, src);
3752 /* End TB to handle timer interrupt */
3753 dc->base.is_jmp = DISAS_EXIT;
3754 }
3755
3756 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3757
3758 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3759 {
3760 save_state(dc);
3761 gen_helper_power_down(tcg_env);
3762 }
3763
3764 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3765
3766 static void do_wrpsr(DisasContext *dc, TCGv src)
3767 {
3768 gen_helper_wrpsr(tcg_env, src);
3769 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3770 dc->cc_op = CC_OP_FLAGS;
3771 dc->base.is_jmp = DISAS_EXIT;
3772 }
3773
3774 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3775
3776 static void do_wrwim(DisasContext *dc, TCGv src)
3777 {
3778 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3779 TCGv tmp = tcg_temp_new();
3780
3781 tcg_gen_andi_tl(tmp, src, mask);
3782 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3783 }
3784
3785 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3786
3787 static void do_wrtpc(DisasContext *dc, TCGv src)
3788 {
3789 #ifdef TARGET_SPARC64
3790 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3791
3792 gen_load_trap_state_at_tl(r_tsptr);
3793 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3794 #else
3795 qemu_build_not_reached();
3796 #endif
3797 }
3798
3799 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3800
3801 static void do_wrtnpc(DisasContext *dc, TCGv src)
3802 {
3803 #ifdef TARGET_SPARC64
3804 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3805
3806 gen_load_trap_state_at_tl(r_tsptr);
3807 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3808 #else
3809 qemu_build_not_reached();
3810 #endif
3811 }
3812
3813 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3814
3815 static void do_wrtstate(DisasContext *dc, TCGv src)
3816 {
3817 #ifdef TARGET_SPARC64
3818 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3819
3820 gen_load_trap_state_at_tl(r_tsptr);
3821 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3822 #else
3823 qemu_build_not_reached();
3824 #endif
3825 }
3826
3827 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3828
3829 static void do_wrtt(DisasContext *dc, TCGv src)
3830 {
3831 #ifdef TARGET_SPARC64
3832 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3833
3834 gen_load_trap_state_at_tl(r_tsptr);
3835 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3836 #else
3837 qemu_build_not_reached();
3838 #endif
3839 }
3840
3841 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3842
3843 static void do_wrtick(DisasContext *dc, TCGv src)
3844 {
3845 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3846
3847 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3848 translator_io_start(&dc->base);
3849 gen_helper_tick_set_count(r_tickptr, src);
3850 /* End TB to handle timer interrupt */
3851 dc->base.is_jmp = DISAS_EXIT;
3852 }
3853
3854 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3855
3856 static void do_wrtba(DisasContext *dc, TCGv src)
3857 {
3858 tcg_gen_mov_tl(cpu_tbr, src);
3859 }
3860
3861 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3862
3863 static void do_wrpstate(DisasContext *dc, TCGv src)
3864 {
3865 save_state(dc);
3866 if (translator_io_start(&dc->base)) {
3867 dc->base.is_jmp = DISAS_EXIT;
3868 }
3869 gen_helper_wrpstate(tcg_env, src);
3870 dc->npc = DYNAMIC_PC;
3871 }
3872
3873 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3874
3875 static void do_wrtl(DisasContext *dc, TCGv src)
3876 {
3877 save_state(dc);
3878 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3879 dc->npc = DYNAMIC_PC;
3880 }
3881
3882 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3883
3884 static void do_wrpil(DisasContext *dc, TCGv src)
3885 {
3886 if (translator_io_start(&dc->base)) {
3887 dc->base.is_jmp = DISAS_EXIT;
3888 }
3889 gen_helper_wrpil(tcg_env, src);
3890 }
3891
3892 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3893
3894 static void do_wrcwp(DisasContext *dc, TCGv src)
3895 {
3896 gen_helper_wrcwp(tcg_env, src);
3897 }
3898
3899 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3900
3901 static void do_wrcansave(DisasContext *dc, TCGv src)
3902 {
3903 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3904 }
3905
3906 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3907
3908 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3909 {
3910 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3911 }
3912
3913 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3914
3915 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3916 {
3917 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3918 }
3919
3920 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3921
3922 static void do_wrotherwin(DisasContext *dc, TCGv src)
3923 {
3924 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3925 }
3926
3927 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3928
3929 static void do_wrwstate(DisasContext *dc, TCGv src)
3930 {
3931 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3932 }
3933
3934 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3935
3936 static void do_wrgl(DisasContext *dc, TCGv src)
3937 {
3938 gen_helper_wrgl(tcg_env, src);
3939 }
3940
3941 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3942
3943 /* UA2005 strand status */
3944 static void do_wrssr(DisasContext *dc, TCGv src)
3945 {
3946 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3947 }
3948
3949 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3950
3951 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3952
3953 static void do_wrhpstate(DisasContext *dc, TCGv src)
3954 {
3955 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3956 dc->base.is_jmp = DISAS_EXIT;
3957 }
3958
3959 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3960
3961 static void do_wrhtstate(DisasContext *dc, TCGv src)
3962 {
3963 TCGv_i32 tl = tcg_temp_new_i32();
3964 TCGv_ptr tp = tcg_temp_new_ptr();
3965
3966 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3967 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3968 tcg_gen_shli_i32(tl, tl, 3);
3969 tcg_gen_ext_i32_ptr(tp, tl);
3970 tcg_gen_add_ptr(tp, tp, tcg_env);
3971
3972 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3973 }
3974
3975 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3976
3977 static void do_wrhintp(DisasContext *dc, TCGv src)
3978 {
3979 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3980 }
3981
3982 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3983
3984 static void do_wrhtba(DisasContext *dc, TCGv src)
3985 {
3986 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3987 }
3988
3989 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3990
3991 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3992 {
3993 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3994
3995 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3996 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3997 translator_io_start(&dc->base);
3998 gen_helper_tick_set_limit(r_tickptr, src);
3999 /* End TB to handle timer interrupt */
4000 dc->base.is_jmp = DISAS_EXIT;
4001 }
4002
4003 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
4004 do_wrhstick_cmpr)
4005
4006 static bool do_saved_restored(DisasContext *dc, bool saved)
4007 {
4008 if (!supervisor(dc)) {
4009 return raise_priv(dc);
4010 }
4011 if (saved) {
4012 gen_helper_saved(tcg_env);
4013 } else {
4014 gen_helper_restored(tcg_env);
4015 }
4016 return advance_pc(dc);
4017 }
4018
4019 TRANS(SAVED, 64, do_saved_restored, true)
4020 TRANS(RESTORED, 64, do_saved_restored, false)
4021
4022 static bool trans_NOP_v7(DisasContext *dc, arg_NOP_v7 *a)
4023 {
4024 /*
4025 * TODO: Need a feature bit for sparcv8.
4026 * In the meantime, treat all 32-bit cpus like sparcv7.
4027 */
4028 if (avail_32(dc)) {
4029 return advance_pc(dc);
4030 }
4031 return false;
4032 }
4033
4034 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
4035 void (*func)(TCGv, TCGv, TCGv),
4036 void (*funci)(TCGv, TCGv, target_long))
4037 {
4038 TCGv dst, src1;
4039
4040 /* For simplicity, we under-decoded the rs2 form. */
4041 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4042 return false;
4043 }
4044
4045 if (a->cc) {
4046 dst = cpu_cc_dst;
4047 } else {
4048 dst = gen_dest_gpr(dc, a->rd);
4049 }
4050 src1 = gen_load_gpr(dc, a->rs1);
4051
4052 if (a->imm || a->rs2_or_imm == 0) {
4053 if (funci) {
4054 funci(dst, src1, a->rs2_or_imm);
4055 } else {
4056 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
4057 }
4058 } else {
4059 func(dst, src1, cpu_regs[a->rs2_or_imm]);
4060 }
4061 gen_store_gpr(dc, a->rd, dst);
4062
4063 if (a->cc) {
4064 tcg_gen_movi_i32(cpu_cc_op, cc_op);
4065 dc->cc_op = cc_op;
4066 }
4067 return advance_pc(dc);
4068 }
4069
4070 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
4071 void (*func)(TCGv, TCGv, TCGv),
4072 void (*funci)(TCGv, TCGv, target_long),
4073 void (*func_cc)(TCGv, TCGv, TCGv))
4074 {
4075 if (a->cc) {
4076 return do_arith_int(dc, a, cc_op, func_cc, NULL);
4077 }
4078 return do_arith_int(dc, a, cc_op, func, funci);
4079 }
4080
4081 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
4082 void (*func)(TCGv, TCGv, TCGv),
4083 void (*funci)(TCGv, TCGv, target_long))
4084 {
4085 return do_arith_int(dc, a, CC_OP_LOGIC, func, funci);
4086 }
4087
4088 TRANS(ADD, ALL, do_arith, a, CC_OP_ADD,
4089 tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc)
4090 TRANS(SUB, ALL, do_arith, a, CC_OP_SUB,
4091 tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc)
4092
4093 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
4094 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
4095 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
4096 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
4097 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
4098
4099 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
4100 {
4101 /* OR with %g0 is the canonical alias for MOV. */
4102 if (!a->cc && a->rs1 == 0) {
4103 if (a->imm || a->rs2_or_imm == 0) {
4104 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
4105 } else if (a->rs2_or_imm & ~0x1f) {
4106 /* For simplicity, we under-decoded the rs2 form. */
4107 return false;
4108 } else {
4109 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
4110 }
4111 return advance_pc(dc);
4112 }
4113 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
4114 }
4115
4116 static bool trans_ADDC(DisasContext *dc, arg_r_r_ri_cc *a)
4117 {
4118 switch (dc->cc_op) {
4119 case CC_OP_DIV:
4120 case CC_OP_LOGIC:
4121 /* Carry is known to be zero. Fall back to plain ADD. */
4122 return do_arith(dc, a, CC_OP_ADD,
4123 tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc);
4124 case CC_OP_ADD:
4125 case CC_OP_TADD:
4126 case CC_OP_TADDTV:
4127 return do_arith(dc, a, CC_OP_ADDX,
4128 gen_op_addc_add, NULL, gen_op_addccc_add);
4129 case CC_OP_SUB:
4130 case CC_OP_TSUB:
4131 case CC_OP_TSUBTV:
4132 return do_arith(dc, a, CC_OP_ADDX,
4133 gen_op_addc_sub, NULL, gen_op_addccc_sub);
4134 default:
4135 return do_arith(dc, a, CC_OP_ADDX,
4136 gen_op_addc_generic, NULL, gen_op_addccc_generic);
4137 }
4138 }
4139
4140 #define CHECK_IU_FEATURE(dc, FEATURE) \
4141 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4142 goto illegal_insn;
4143 #define CHECK_FPU_FEATURE(dc, FEATURE) \
4144 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4145 goto nfpu_insn;
4146
4147 /* before an instruction, dc->pc must be static */
4148 static void disas_sparc_legacy(DisasContext *dc, unsigned int insn)
4149 {
4150 unsigned int opc, rs1, rs2, rd;
4151 TCGv cpu_src1, cpu_src2;
4152 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
4153 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
4154 target_long simm;
4155
4156 opc = GET_FIELD(insn, 0, 1);
4157 rd = GET_FIELD(insn, 2, 6);
4158
4159 switch (opc) {
4160 case 0:
4161 goto illegal_insn; /* in decodetree */
4162 case 1:
4163 g_assert_not_reached(); /* in decodetree */
4164 case 2: /* FPU & Logical Operations */
4165 {
4166 unsigned int xop __attribute__((unused)) = GET_FIELD(insn, 7, 12);
4167 TCGv cpu_dst __attribute__((unused)) = tcg_temp_new();
4168 TCGv cpu_tmp0 __attribute__((unused));
4169
4170 if (xop == 0x34) { /* FPU Operations */
4171 if (gen_trap_ifnofpu(dc)) {
4172 goto jmp_insn;
4173 }
4174 gen_op_clear_ieee_excp_and_FTT();
4175 rs1 = GET_FIELD(insn, 13, 17);
4176 rs2 = GET_FIELD(insn, 27, 31);
4177 xop = GET_FIELD(insn, 18, 26);
4178
4179 switch (xop) {
4180 case 0x1: /* fmovs */
4181 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4182 gen_store_fpr_F(dc, rd, cpu_src1_32);
4183 break;
4184 case 0x5: /* fnegs */
4185 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
4186 break;
4187 case 0x9: /* fabss */
4188 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
4189 break;
4190 case 0x29: /* fsqrts */
4191 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
4192 break;
4193 case 0x2a: /* fsqrtd */
4194 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
4195 break;
4196 case 0x2b: /* fsqrtq */
4197 CHECK_FPU_FEATURE(dc, FLOAT128);
4198 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
4199 break;
4200 case 0x41: /* fadds */
4201 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
4202 break;
4203 case 0x42: /* faddd */
4204 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
4205 break;
4206 case 0x43: /* faddq */
4207 CHECK_FPU_FEATURE(dc, FLOAT128);
4208 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
4209 break;
4210 case 0x45: /* fsubs */
4211 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
4212 break;
4213 case 0x46: /* fsubd */
4214 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
4215 break;
4216 case 0x47: /* fsubq */
4217 CHECK_FPU_FEATURE(dc, FLOAT128);
4218 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
4219 break;
4220 case 0x49: /* fmuls */
4221 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
4222 break;
4223 case 0x4a: /* fmuld */
4224 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
4225 break;
4226 case 0x4b: /* fmulq */
4227 CHECK_FPU_FEATURE(dc, FLOAT128);
4228 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
4229 break;
4230 case 0x4d: /* fdivs */
4231 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
4232 break;
4233 case 0x4e: /* fdivd */
4234 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
4235 break;
4236 case 0x4f: /* fdivq */
4237 CHECK_FPU_FEATURE(dc, FLOAT128);
4238 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
4239 break;
4240 case 0x69: /* fsmuld */
4241 CHECK_FPU_FEATURE(dc, FSMULD);
4242 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
4243 break;
4244 case 0x6e: /* fdmulq */
4245 CHECK_FPU_FEATURE(dc, FLOAT128);
4246 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
4247 break;
4248 case 0xc4: /* fitos */
4249 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
4250 break;
4251 case 0xc6: /* fdtos */
4252 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
4253 break;
4254 case 0xc7: /* fqtos */
4255 CHECK_FPU_FEATURE(dc, FLOAT128);
4256 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
4257 break;
4258 case 0xc8: /* fitod */
4259 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
4260 break;
4261 case 0xc9: /* fstod */
4262 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
4263 break;
4264 case 0xcb: /* fqtod */
4265 CHECK_FPU_FEATURE(dc, FLOAT128);
4266 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
4267 break;
4268 case 0xcc: /* fitoq */
4269 CHECK_FPU_FEATURE(dc, FLOAT128);
4270 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
4271 break;
4272 case 0xcd: /* fstoq */
4273 CHECK_FPU_FEATURE(dc, FLOAT128);
4274 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
4275 break;
4276 case 0xce: /* fdtoq */
4277 CHECK_FPU_FEATURE(dc, FLOAT128);
4278 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
4279 break;
4280 case 0xd1: /* fstoi */
4281 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
4282 break;
4283 case 0xd2: /* fdtoi */
4284 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
4285 break;
4286 case 0xd3: /* fqtoi */
4287 CHECK_FPU_FEATURE(dc, FLOAT128);
4288 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
4289 break;
4290 #ifdef TARGET_SPARC64
4291 case 0x2: /* V9 fmovd */
4292 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4293 gen_store_fpr_D(dc, rd, cpu_src1_64);
4294 break;
4295 case 0x3: /* V9 fmovq */
4296 CHECK_FPU_FEATURE(dc, FLOAT128);
4297 gen_move_Q(dc, rd, rs2);
4298 break;
4299 case 0x6: /* V9 fnegd */
4300 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
4301 break;
4302 case 0x7: /* V9 fnegq */
4303 CHECK_FPU_FEATURE(dc, FLOAT128);
4304 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
4305 break;
4306 case 0xa: /* V9 fabsd */
4307 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
4308 break;
4309 case 0xb: /* V9 fabsq */
4310 CHECK_FPU_FEATURE(dc, FLOAT128);
4311 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
4312 break;
4313 case 0x81: /* V9 fstox */
4314 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
4315 break;
4316 case 0x82: /* V9 fdtox */
4317 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
4318 break;
4319 case 0x83: /* V9 fqtox */
4320 CHECK_FPU_FEATURE(dc, FLOAT128);
4321 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
4322 break;
4323 case 0x84: /* V9 fxtos */
4324 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
4325 break;
4326 case 0x88: /* V9 fxtod */
4327 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
4328 break;
4329 case 0x8c: /* V9 fxtoq */
4330 CHECK_FPU_FEATURE(dc, FLOAT128);
4331 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
4332 break;
4333 #endif
4334 default:
4335 goto illegal_insn;
4336 }
4337 } else if (xop == 0x35) { /* FPU Operations */
4338 #ifdef TARGET_SPARC64
4339 int cond;
4340 #endif
4341 if (gen_trap_ifnofpu(dc)) {
4342 goto jmp_insn;
4343 }
4344 gen_op_clear_ieee_excp_and_FTT();
4345 rs1 = GET_FIELD(insn, 13, 17);
4346 rs2 = GET_FIELD(insn, 27, 31);
4347 xop = GET_FIELD(insn, 18, 26);
4348
4349 #ifdef TARGET_SPARC64
4350 #define FMOVR(sz) \
4351 do { \
4352 DisasCompare cmp; \
4353 cond = GET_FIELD_SP(insn, 10, 12); \
4354 cpu_src1 = get_src1(dc, insn); \
4355 gen_compare_reg(&cmp, cond, cpu_src1); \
4356 gen_fmov##sz(dc, &cmp, rd, rs2); \
4357 } while (0)
4358
4359 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
4360 FMOVR(s);
4361 break;
4362 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
4363 FMOVR(d);
4364 break;
4365 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
4366 CHECK_FPU_FEATURE(dc, FLOAT128);
4367 FMOVR(q);
4368 break;
4369 }
4370 #undef FMOVR
4371 #endif
4372 switch (xop) {
4373 #ifdef TARGET_SPARC64
4374 #define FMOVCC(fcc, sz) \
4375 do { \
4376 DisasCompare cmp; \
4377 cond = GET_FIELD_SP(insn, 14, 17); \
4378 gen_fcompare(&cmp, fcc, cond); \
4379 gen_fmov##sz(dc, &cmp, rd, rs2); \
4380 } while (0)
4381
4382 case 0x001: /* V9 fmovscc %fcc0 */
4383 FMOVCC(0, s);
4384 break;
4385 case 0x002: /* V9 fmovdcc %fcc0 */
4386 FMOVCC(0, d);
4387 break;
4388 case 0x003: /* V9 fmovqcc %fcc0 */
4389 CHECK_FPU_FEATURE(dc, FLOAT128);
4390 FMOVCC(0, q);
4391 break;
4392 case 0x041: /* V9 fmovscc %fcc1 */
4393 FMOVCC(1, s);
4394 break;
4395 case 0x042: /* V9 fmovdcc %fcc1 */
4396 FMOVCC(1, d);
4397 break;
4398 case 0x043: /* V9 fmovqcc %fcc1 */
4399 CHECK_FPU_FEATURE(dc, FLOAT128);
4400 FMOVCC(1, q);
4401 break;
4402 case 0x081: /* V9 fmovscc %fcc2 */
4403 FMOVCC(2, s);
4404 break;
4405 case 0x082: /* V9 fmovdcc %fcc2 */
4406 FMOVCC(2, d);
4407 break;
4408 case 0x083: /* V9 fmovqcc %fcc2 */
4409 CHECK_FPU_FEATURE(dc, FLOAT128);
4410 FMOVCC(2, q);
4411 break;
4412 case 0x0c1: /* V9 fmovscc %fcc3 */
4413 FMOVCC(3, s);
4414 break;
4415 case 0x0c2: /* V9 fmovdcc %fcc3 */
4416 FMOVCC(3, d);
4417 break;
4418 case 0x0c3: /* V9 fmovqcc %fcc3 */
4419 CHECK_FPU_FEATURE(dc, FLOAT128);
4420 FMOVCC(3, q);
4421 break;
4422 #undef FMOVCC
4423 #define FMOVCC(xcc, sz) \
4424 do { \
4425 DisasCompare cmp; \
4426 cond = GET_FIELD_SP(insn, 14, 17); \
4427 gen_compare(&cmp, xcc, cond, dc); \
4428 gen_fmov##sz(dc, &cmp, rd, rs2); \
4429 } while (0)
4430
4431 case 0x101: /* V9 fmovscc %icc */
4432 FMOVCC(0, s);
4433 break;
4434 case 0x102: /* V9 fmovdcc %icc */
4435 FMOVCC(0, d);
4436 break;
4437 case 0x103: /* V9 fmovqcc %icc */
4438 CHECK_FPU_FEATURE(dc, FLOAT128);
4439 FMOVCC(0, q);
4440 break;
4441 case 0x181: /* V9 fmovscc %xcc */
4442 FMOVCC(1, s);
4443 break;
4444 case 0x182: /* V9 fmovdcc %xcc */
4445 FMOVCC(1, d);
4446 break;
4447 case 0x183: /* V9 fmovqcc %xcc */
4448 CHECK_FPU_FEATURE(dc, FLOAT128);
4449 FMOVCC(1, q);
4450 break;
4451 #undef FMOVCC
4452 #endif
4453 case 0x51: /* fcmps, V9 %fcc */
4454 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4455 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
4456 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
4457 break;
4458 case 0x52: /* fcmpd, V9 %fcc */
4459 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4460 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4461 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
4462 break;
4463 case 0x53: /* fcmpq, V9 %fcc */
4464 CHECK_FPU_FEATURE(dc, FLOAT128);
4465 gen_op_load_fpr_QT0(QFPREG(rs1));
4466 gen_op_load_fpr_QT1(QFPREG(rs2));
4467 gen_op_fcmpq(rd & 3);
4468 break;
4469 case 0x55: /* fcmpes, V9 %fcc */
4470 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4471 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
4472 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
4473 break;
4474 case 0x56: /* fcmped, V9 %fcc */
4475 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4476 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4477 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
4478 break;
4479 case 0x57: /* fcmpeq, V9 %fcc */
4480 CHECK_FPU_FEATURE(dc, FLOAT128);
4481 gen_op_load_fpr_QT0(QFPREG(rs1));
4482 gen_op_load_fpr_QT1(QFPREG(rs2));
4483 gen_op_fcmpeq(rd & 3);
4484 break;
4485 default:
4486 goto illegal_insn;
4487 }
4488 #ifdef TARGET_SPARC64
4489 } else if (xop == 0x25) { /* sll, V9 sllx */
4490 cpu_src1 = get_src1(dc, insn);
4491 if (IS_IMM) { /* immediate */
4492 simm = GET_FIELDs(insn, 20, 31);
4493 if (insn & (1 << 12)) {
4494 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
4495 } else {
4496 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
4497 }
4498 } else { /* register */
4499 rs2 = GET_FIELD(insn, 27, 31);
4500 cpu_src2 = gen_load_gpr(dc, rs2);
4501 cpu_tmp0 = tcg_temp_new();
4502 if (insn & (1 << 12)) {
4503 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4504 } else {
4505 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4506 }
4507 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
4508 }
4509 gen_store_gpr(dc, rd, cpu_dst);
4510 } else if (xop == 0x26) { /* srl, V9 srlx */
4511 cpu_src1 = get_src1(dc, insn);
4512 if (IS_IMM) { /* immediate */
4513 simm = GET_FIELDs(insn, 20, 31);
4514 if (insn & (1 << 12)) {
4515 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
4516 } else {
4517 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4518 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
4519 }
4520 } else { /* register */
4521 rs2 = GET_FIELD(insn, 27, 31);
4522 cpu_src2 = gen_load_gpr(dc, rs2);
4523 cpu_tmp0 = tcg_temp_new();
4524 if (insn & (1 << 12)) {
4525 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4526 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
4527 } else {
4528 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4529 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4530 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
4531 }
4532 }
4533 gen_store_gpr(dc, rd, cpu_dst);
4534 } else if (xop == 0x27) { /* sra, V9 srax */
4535 cpu_src1 = get_src1(dc, insn);
4536 if (IS_IMM) { /* immediate */
4537 simm = GET_FIELDs(insn, 20, 31);
4538 if (insn & (1 << 12)) {
4539 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
4540 } else {
4541 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4542 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
4543 }
4544 } else { /* register */
4545 rs2 = GET_FIELD(insn, 27, 31);
4546 cpu_src2 = gen_load_gpr(dc, rs2);
4547 cpu_tmp0 = tcg_temp_new();
4548 if (insn & (1 << 12)) {
4549 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4550 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
4551 } else {
4552 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4553 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4554 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
4555 }
4556 }
4557 gen_store_gpr(dc, rd, cpu_dst);
4558 #endif
4559 } else if (xop < 0x36) {
4560 if (xop < 0x20) {
4561 cpu_src1 = get_src1(dc, insn);
4562 cpu_src2 = get_src2(dc, insn);
4563 switch (xop & ~0x10) {
4564 #ifdef TARGET_SPARC64
4565 case 0x9: /* V9 mulx */
4566 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
4567 break;
4568 #endif
4569 case 0xa: /* umul */
4570 CHECK_IU_FEATURE(dc, MUL);
4571 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
4572 if (xop & 0x10) {
4573 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4574 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4575 dc->cc_op = CC_OP_LOGIC;
4576 }
4577 break;
4578 case 0xb: /* smul */
4579 CHECK_IU_FEATURE(dc, MUL);
4580 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
4581 if (xop & 0x10) {
4582 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4583 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4584 dc->cc_op = CC_OP_LOGIC;
4585 }
4586 break;
4587 case 0xc: /* subx, V9 subc */
4588 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4589 (xop & 0x10));
4590 break;
4591 #ifdef TARGET_SPARC64
4592 case 0xd: /* V9 udivx */
4593 gen_helper_udivx(cpu_dst, tcg_env, cpu_src1, cpu_src2);
4594 break;
4595 #endif
4596 case 0xe: /* udiv */
4597 CHECK_IU_FEATURE(dc, DIV);
4598 if (xop & 0x10) {
4599 gen_helper_udiv_cc(cpu_dst, tcg_env, cpu_src1,
4600 cpu_src2);
4601 dc->cc_op = CC_OP_DIV;
4602 } else {
4603 gen_helper_udiv(cpu_dst, tcg_env, cpu_src1,
4604 cpu_src2);
4605 }
4606 break;
4607 case 0xf: /* sdiv */
4608 CHECK_IU_FEATURE(dc, DIV);
4609 if (xop & 0x10) {
4610 gen_helper_sdiv_cc(cpu_dst, tcg_env, cpu_src1,
4611 cpu_src2);
4612 dc->cc_op = CC_OP_DIV;
4613 } else {
4614 gen_helper_sdiv(cpu_dst, tcg_env, cpu_src1,
4615 cpu_src2);
4616 }
4617 break;
4618 default:
4619 goto illegal_insn;
4620 }
4621 gen_store_gpr(dc, rd, cpu_dst);
4622 } else {
4623 cpu_src1 = get_src1(dc, insn);
4624 cpu_src2 = get_src2(dc, insn);
4625 switch (xop) {
4626 case 0x20: /* taddcc */
4627 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4628 gen_store_gpr(dc, rd, cpu_dst);
4629 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4630 dc->cc_op = CC_OP_TADD;
4631 break;
4632 case 0x21: /* tsubcc */
4633 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4634 gen_store_gpr(dc, rd, cpu_dst);
4635 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4636 dc->cc_op = CC_OP_TSUB;
4637 break;
4638 case 0x22: /* taddcctv */
4639 gen_helper_taddcctv(cpu_dst, tcg_env,
4640 cpu_src1, cpu_src2);
4641 gen_store_gpr(dc, rd, cpu_dst);
4642 dc->cc_op = CC_OP_TADDTV;
4643 break;
4644 case 0x23: /* tsubcctv */
4645 gen_helper_tsubcctv(cpu_dst, tcg_env,
4646 cpu_src1, cpu_src2);
4647 gen_store_gpr(dc, rd, cpu_dst);
4648 dc->cc_op = CC_OP_TSUBTV;
4649 break;
4650 case 0x24: /* mulscc */
4651 update_psr(dc);
4652 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4653 gen_store_gpr(dc, rd, cpu_dst);
4654 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4655 dc->cc_op = CC_OP_ADD;
4656 break;
4657 #ifndef TARGET_SPARC64
4658 case 0x25: /* sll */
4659 if (IS_IMM) { /* immediate */
4660 simm = GET_FIELDs(insn, 20, 31);
4661 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4662 } else { /* register */
4663 cpu_tmp0 = tcg_temp_new();
4664 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4665 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4666 }
4667 gen_store_gpr(dc, rd, cpu_dst);
4668 break;
4669 case 0x26: /* srl */
4670 if (IS_IMM) { /* immediate */
4671 simm = GET_FIELDs(insn, 20, 31);
4672 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4673 } else { /* register */
4674 cpu_tmp0 = tcg_temp_new();
4675 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4676 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4677 }
4678 gen_store_gpr(dc, rd, cpu_dst);
4679 break;
4680 case 0x27: /* sra */
4681 if (IS_IMM) { /* immediate */
4682 simm = GET_FIELDs(insn, 20, 31);
4683 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4684 } else { /* register */
4685 cpu_tmp0 = tcg_temp_new();
4686 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4687 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4688 }
4689 gen_store_gpr(dc, rd, cpu_dst);
4690 break;
4691 #endif
4692 case 0x30:
4693 goto illegal_insn; /* WRASR in decodetree */
4694 case 0x32:
4695 goto illegal_insn; /* WRPR in decodetree */
4696 case 0x33: /* wrtbr, UA2005 wrhpr */
4697 goto illegal_insn; /* WRTBR, WRHPR in decodetree */
4698 #ifdef TARGET_SPARC64
4699 case 0x2c: /* V9 movcc */
4700 {
4701 int cc = GET_FIELD_SP(insn, 11, 12);
4702 int cond = GET_FIELD_SP(insn, 14, 17);
4703 DisasCompare cmp;
4704 TCGv dst;
4705
4706 if (insn & (1 << 18)) {
4707 if (cc == 0) {
4708 gen_compare(&cmp, 0, cond, dc);
4709 } else if (cc == 2) {
4710 gen_compare(&cmp, 1, cond, dc);
4711 } else {
4712 goto illegal_insn;
4713 }
4714 } else {
4715 gen_fcompare(&cmp, cc, cond);
4716 }
4717
4718 /* The get_src2 above loaded the normal 13-bit
4719 immediate field, not the 11-bit field we have
4720 in movcc. But it did handle the reg case. */
4721 if (IS_IMM) {
4722 simm = GET_FIELD_SPs(insn, 0, 10);
4723 tcg_gen_movi_tl(cpu_src2, simm);
4724 }
4725
4726 dst = gen_load_gpr(dc, rd);
4727 tcg_gen_movcond_tl(cmp.cond, dst,
4728 cmp.c1, cmp.c2,
4729 cpu_src2, dst);
4730 gen_store_gpr(dc, rd, dst);
4731 break;
4732 }
4733 case 0x2d: /* V9 sdivx */
4734 gen_helper_sdivx(cpu_dst, tcg_env, cpu_src1, cpu_src2);
4735 gen_store_gpr(dc, rd, cpu_dst);
4736 break;
4737 case 0x2e: /* V9 popc */
4738 tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
4739 gen_store_gpr(dc, rd, cpu_dst);
4740 break;
4741 case 0x2f: /* V9 movr */
4742 {
4743 int cond = GET_FIELD_SP(insn, 10, 12);
4744 DisasCompare cmp;
4745 TCGv dst;
4746
4747 gen_compare_reg(&cmp, cond, cpu_src1);
4748
4749 /* The get_src2 above loaded the normal 13-bit
4750 immediate field, not the 10-bit field we have
4751 in movr. But it did handle the reg case. */
4752 if (IS_IMM) {
4753 simm = GET_FIELD_SPs(insn, 0, 9);
4754 tcg_gen_movi_tl(cpu_src2, simm);
4755 }
4756
4757 dst = gen_load_gpr(dc, rd);
4758 tcg_gen_movcond_tl(cmp.cond, dst,
4759 cmp.c1, cmp.c2,
4760 cpu_src2, dst);
4761 gen_store_gpr(dc, rd, dst);
4762 break;
4763 }
4764 #endif
4765 default:
4766 goto illegal_insn;
4767 }
4768 }
4769 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4770 #ifdef TARGET_SPARC64
4771 int opf = GET_FIELD_SP(insn, 5, 13);
4772 rs1 = GET_FIELD(insn, 13, 17);
4773 rs2 = GET_FIELD(insn, 27, 31);
4774 if (gen_trap_ifnofpu(dc)) {
4775 goto jmp_insn;
4776 }
4777
4778 switch (opf) {
4779 case 0x000: /* VIS I edge8cc */
4780 CHECK_FPU_FEATURE(dc, VIS1);
4781 cpu_src1 = gen_load_gpr(dc, rs1);
4782 cpu_src2 = gen_load_gpr(dc, rs2);
4783 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4784 gen_store_gpr(dc, rd, cpu_dst);
4785 break;
4786 case 0x001: /* VIS II edge8n */
4787 CHECK_FPU_FEATURE(dc, VIS2);
4788 cpu_src1 = gen_load_gpr(dc, rs1);
4789 cpu_src2 = gen_load_gpr(dc, rs2);
4790 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4791 gen_store_gpr(dc, rd, cpu_dst);
4792 break;
4793 case 0x002: /* VIS I edge8lcc */
4794 CHECK_FPU_FEATURE(dc, VIS1);
4795 cpu_src1 = gen_load_gpr(dc, rs1);
4796 cpu_src2 = gen_load_gpr(dc, rs2);
4797 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4798 gen_store_gpr(dc, rd, cpu_dst);
4799 break;
4800 case 0x003: /* VIS II edge8ln */
4801 CHECK_FPU_FEATURE(dc, VIS2);
4802 cpu_src1 = gen_load_gpr(dc, rs1);
4803 cpu_src2 = gen_load_gpr(dc, rs2);
4804 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4805 gen_store_gpr(dc, rd, cpu_dst);
4806 break;
4807 case 0x004: /* VIS I edge16cc */
4808 CHECK_FPU_FEATURE(dc, VIS1);
4809 cpu_src1 = gen_load_gpr(dc, rs1);
4810 cpu_src2 = gen_load_gpr(dc, rs2);
4811 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4812 gen_store_gpr(dc, rd, cpu_dst);
4813 break;
4814 case 0x005: /* VIS II edge16n */
4815 CHECK_FPU_FEATURE(dc, VIS2);
4816 cpu_src1 = gen_load_gpr(dc, rs1);
4817 cpu_src2 = gen_load_gpr(dc, rs2);
4818 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4819 gen_store_gpr(dc, rd, cpu_dst);
4820 break;
4821 case 0x006: /* VIS I edge16lcc */
4822 CHECK_FPU_FEATURE(dc, VIS1);
4823 cpu_src1 = gen_load_gpr(dc, rs1);
4824 cpu_src2 = gen_load_gpr(dc, rs2);
4825 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4826 gen_store_gpr(dc, rd, cpu_dst);
4827 break;
4828 case 0x007: /* VIS II edge16ln */
4829 CHECK_FPU_FEATURE(dc, VIS2);
4830 cpu_src1 = gen_load_gpr(dc, rs1);
4831 cpu_src2 = gen_load_gpr(dc, rs2);
4832 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4833 gen_store_gpr(dc, rd, cpu_dst);
4834 break;
4835 case 0x008: /* VIS I edge32cc */
4836 CHECK_FPU_FEATURE(dc, VIS1);
4837 cpu_src1 = gen_load_gpr(dc, rs1);
4838 cpu_src2 = gen_load_gpr(dc, rs2);
4839 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4840 gen_store_gpr(dc, rd, cpu_dst);
4841 break;
4842 case 0x009: /* VIS II edge32n */
4843 CHECK_FPU_FEATURE(dc, VIS2);
4844 cpu_src1 = gen_load_gpr(dc, rs1);
4845 cpu_src2 = gen_load_gpr(dc, rs2);
4846 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4847 gen_store_gpr(dc, rd, cpu_dst);
4848 break;
4849 case 0x00a: /* VIS I edge32lcc */
4850 CHECK_FPU_FEATURE(dc, VIS1);
4851 cpu_src1 = gen_load_gpr(dc, rs1);
4852 cpu_src2 = gen_load_gpr(dc, rs2);
4853 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4854 gen_store_gpr(dc, rd, cpu_dst);
4855 break;
4856 case 0x00b: /* VIS II edge32ln */
4857 CHECK_FPU_FEATURE(dc, VIS2);
4858 cpu_src1 = gen_load_gpr(dc, rs1);
4859 cpu_src2 = gen_load_gpr(dc, rs2);
4860 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4861 gen_store_gpr(dc, rd, cpu_dst);
4862 break;
4863 case 0x010: /* VIS I array8 */
4864 CHECK_FPU_FEATURE(dc, VIS1);
4865 cpu_src1 = gen_load_gpr(dc, rs1);
4866 cpu_src2 = gen_load_gpr(dc, rs2);
4867 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4868 gen_store_gpr(dc, rd, cpu_dst);
4869 break;
4870 case 0x012: /* VIS I array16 */
4871 CHECK_FPU_FEATURE(dc, VIS1);
4872 cpu_src1 = gen_load_gpr(dc, rs1);
4873 cpu_src2 = gen_load_gpr(dc, rs2);
4874 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4875 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4876 gen_store_gpr(dc, rd, cpu_dst);
4877 break;
4878 case 0x014: /* VIS I array32 */
4879 CHECK_FPU_FEATURE(dc, VIS1);
4880 cpu_src1 = gen_load_gpr(dc, rs1);
4881 cpu_src2 = gen_load_gpr(dc, rs2);
4882 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4883 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4884 gen_store_gpr(dc, rd, cpu_dst);
4885 break;
4886 case 0x018: /* VIS I alignaddr */
4887 CHECK_FPU_FEATURE(dc, VIS1);
4888 cpu_src1 = gen_load_gpr(dc, rs1);
4889 cpu_src2 = gen_load_gpr(dc, rs2);
4890 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4891 gen_store_gpr(dc, rd, cpu_dst);
4892 break;
4893 case 0x01a: /* VIS I alignaddrl */
4894 CHECK_FPU_FEATURE(dc, VIS1);
4895 cpu_src1 = gen_load_gpr(dc, rs1);
4896 cpu_src2 = gen_load_gpr(dc, rs2);
4897 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4898 gen_store_gpr(dc, rd, cpu_dst);
4899 break;
4900 case 0x019: /* VIS II bmask */
4901 CHECK_FPU_FEATURE(dc, VIS2);
4902 cpu_src1 = gen_load_gpr(dc, rs1);
4903 cpu_src2 = gen_load_gpr(dc, rs2);
4904 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4905 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4906 gen_store_gpr(dc, rd, cpu_dst);
4907 break;
4908 case 0x020: /* VIS I fcmple16 */
4909 CHECK_FPU_FEATURE(dc, VIS1);
4910 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4911 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4912 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4913 gen_store_gpr(dc, rd, cpu_dst);
4914 break;
4915 case 0x022: /* VIS I fcmpne16 */
4916 CHECK_FPU_FEATURE(dc, VIS1);
4917 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4918 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4919 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4920 gen_store_gpr(dc, rd, cpu_dst);
4921 break;
4922 case 0x024: /* VIS I fcmple32 */
4923 CHECK_FPU_FEATURE(dc, VIS1);
4924 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4925 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4926 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4927 gen_store_gpr(dc, rd, cpu_dst);
4928 break;
4929 case 0x026: /* VIS I fcmpne32 */
4930 CHECK_FPU_FEATURE(dc, VIS1);
4931 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4932 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4933 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4934 gen_store_gpr(dc, rd, cpu_dst);
4935 break;
4936 case 0x028: /* VIS I fcmpgt16 */
4937 CHECK_FPU_FEATURE(dc, VIS1);
4938 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4939 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4940 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4941 gen_store_gpr(dc, rd, cpu_dst);
4942 break;
4943 case 0x02a: /* VIS I fcmpeq16 */
4944 CHECK_FPU_FEATURE(dc, VIS1);
4945 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4946 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4947 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4948 gen_store_gpr(dc, rd, cpu_dst);
4949 break;
4950 case 0x02c: /* VIS I fcmpgt32 */
4951 CHECK_FPU_FEATURE(dc, VIS1);
4952 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4953 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4954 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4955 gen_store_gpr(dc, rd, cpu_dst);
4956 break;
4957 case 0x02e: /* VIS I fcmpeq32 */
4958 CHECK_FPU_FEATURE(dc, VIS1);
4959 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4960 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4961 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4962 gen_store_gpr(dc, rd, cpu_dst);
4963 break;
4964 case 0x031: /* VIS I fmul8x16 */
4965 CHECK_FPU_FEATURE(dc, VIS1);
4966 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4967 break;
4968 case 0x033: /* VIS I fmul8x16au */
4969 CHECK_FPU_FEATURE(dc, VIS1);
4970 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4971 break;
4972 case 0x035: /* VIS I fmul8x16al */
4973 CHECK_FPU_FEATURE(dc, VIS1);
4974 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4975 break;
4976 case 0x036: /* VIS I fmul8sux16 */
4977 CHECK_FPU_FEATURE(dc, VIS1);
4978 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4979 break;
4980 case 0x037: /* VIS I fmul8ulx16 */
4981 CHECK_FPU_FEATURE(dc, VIS1);
4982 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4983 break;
4984 case 0x038: /* VIS I fmuld8sux16 */
4985 CHECK_FPU_FEATURE(dc, VIS1);
4986 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4987 break;
4988 case 0x039: /* VIS I fmuld8ulx16 */
4989 CHECK_FPU_FEATURE(dc, VIS1);
4990 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4991 break;
4992 case 0x03a: /* VIS I fpack32 */
4993 CHECK_FPU_FEATURE(dc, VIS1);
4994 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4995 break;
4996 case 0x03b: /* VIS I fpack16 */
4997 CHECK_FPU_FEATURE(dc, VIS1);
4998 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4999 cpu_dst_32 = gen_dest_fpr_F(dc);
5000 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
5001 gen_store_fpr_F(dc, rd, cpu_dst_32);
5002 break;
5003 case 0x03d: /* VIS I fpackfix */
5004 CHECK_FPU_FEATURE(dc, VIS1);
5005 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5006 cpu_dst_32 = gen_dest_fpr_F(dc);
5007 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
5008 gen_store_fpr_F(dc, rd, cpu_dst_32);
5009 break;
5010 case 0x03e: /* VIS I pdist */
5011 CHECK_FPU_FEATURE(dc, VIS1);
5012 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
5013 break;
5014 case 0x048: /* VIS I faligndata */
5015 CHECK_FPU_FEATURE(dc, VIS1);
5016 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
5017 break;
5018 case 0x04b: /* VIS I fpmerge */
5019 CHECK_FPU_FEATURE(dc, VIS1);
5020 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
5021 break;
5022 case 0x04c: /* VIS II bshuffle */
5023 CHECK_FPU_FEATURE(dc, VIS2);
5024 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
5025 break;
5026 case 0x04d: /* VIS I fexpand */
5027 CHECK_FPU_FEATURE(dc, VIS1);
5028 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
5029 break;
5030 case 0x050: /* VIS I fpadd16 */
5031 CHECK_FPU_FEATURE(dc, VIS1);
5032 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
5033 break;
5034 case 0x051: /* VIS I fpadd16s */
5035 CHECK_FPU_FEATURE(dc, VIS1);
5036 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
5037 break;
5038 case 0x052: /* VIS I fpadd32 */
5039 CHECK_FPU_FEATURE(dc, VIS1);
5040 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
5041 break;
5042 case 0x053: /* VIS I fpadd32s */
5043 CHECK_FPU_FEATURE(dc, VIS1);
5044 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
5045 break;
5046 case 0x054: /* VIS I fpsub16 */
5047 CHECK_FPU_FEATURE(dc, VIS1);
5048 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
5049 break;
5050 case 0x055: /* VIS I fpsub16s */
5051 CHECK_FPU_FEATURE(dc, VIS1);
5052 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
5053 break;
5054 case 0x056: /* VIS I fpsub32 */
5055 CHECK_FPU_FEATURE(dc, VIS1);
5056 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
5057 break;
5058 case 0x057: /* VIS I fpsub32s */
5059 CHECK_FPU_FEATURE(dc, VIS1);
5060 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
5061 break;
5062 case 0x060: /* VIS I fzero */
5063 CHECK_FPU_FEATURE(dc, VIS1);
5064 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5065 tcg_gen_movi_i64(cpu_dst_64, 0);
5066 gen_store_fpr_D(dc, rd, cpu_dst_64);
5067 break;
5068 case 0x061: /* VIS I fzeros */
5069 CHECK_FPU_FEATURE(dc, VIS1);
5070 cpu_dst_32 = gen_dest_fpr_F(dc);
5071 tcg_gen_movi_i32(cpu_dst_32, 0);
5072 gen_store_fpr_F(dc, rd, cpu_dst_32);
5073 break;
5074 case 0x062: /* VIS I fnor */
5075 CHECK_FPU_FEATURE(dc, VIS1);
5076 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
5077 break;
5078 case 0x063: /* VIS I fnors */
5079 CHECK_FPU_FEATURE(dc, VIS1);
5080 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
5081 break;
5082 case 0x064: /* VIS I fandnot2 */
5083 CHECK_FPU_FEATURE(dc, VIS1);
5084 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
5085 break;
5086 case 0x065: /* VIS I fandnot2s */
5087 CHECK_FPU_FEATURE(dc, VIS1);
5088 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
5089 break;
5090 case 0x066: /* VIS I fnot2 */
5091 CHECK_FPU_FEATURE(dc, VIS1);
5092 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
5093 break;
5094 case 0x067: /* VIS I fnot2s */
5095 CHECK_FPU_FEATURE(dc, VIS1);
5096 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
5097 break;
5098 case 0x068: /* VIS I fandnot1 */
5099 CHECK_FPU_FEATURE(dc, VIS1);
5100 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
5101 break;
5102 case 0x069: /* VIS I fandnot1s */
5103 CHECK_FPU_FEATURE(dc, VIS1);
5104 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
5105 break;
5106 case 0x06a: /* VIS I fnot1 */
5107 CHECK_FPU_FEATURE(dc, VIS1);
5108 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
5109 break;
5110 case 0x06b: /* VIS I fnot1s */
5111 CHECK_FPU_FEATURE(dc, VIS1);
5112 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
5113 break;
5114 case 0x06c: /* VIS I fxor */
5115 CHECK_FPU_FEATURE(dc, VIS1);
5116 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
5117 break;
5118 case 0x06d: /* VIS I fxors */
5119 CHECK_FPU_FEATURE(dc, VIS1);
5120 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
5121 break;
5122 case 0x06e: /* VIS I fnand */
5123 CHECK_FPU_FEATURE(dc, VIS1);
5124 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
5125 break;
5126 case 0x06f: /* VIS I fnands */
5127 CHECK_FPU_FEATURE(dc, VIS1);
5128 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
5129 break;
5130 case 0x070: /* VIS I fand */
5131 CHECK_FPU_FEATURE(dc, VIS1);
5132 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
5133 break;
5134 case 0x071: /* VIS I fands */
5135 CHECK_FPU_FEATURE(dc, VIS1);
5136 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
5137 break;
5138 case 0x072: /* VIS I fxnor */
5139 CHECK_FPU_FEATURE(dc, VIS1);
5140 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
5141 break;
5142 case 0x073: /* VIS I fxnors */
5143 CHECK_FPU_FEATURE(dc, VIS1);
5144 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
5145 break;
5146 case 0x074: /* VIS I fsrc1 */
5147 CHECK_FPU_FEATURE(dc, VIS1);
5148 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5149 gen_store_fpr_D(dc, rd, cpu_src1_64);
5150 break;
5151 case 0x075: /* VIS I fsrc1s */
5152 CHECK_FPU_FEATURE(dc, VIS1);
5153 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5154 gen_store_fpr_F(dc, rd, cpu_src1_32);
5155 break;
5156 case 0x076: /* VIS I fornot2 */
5157 CHECK_FPU_FEATURE(dc, VIS1);
5158 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
5159 break;
5160 case 0x077: /* VIS I fornot2s */
5161 CHECK_FPU_FEATURE(dc, VIS1);
5162 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
5163 break;
5164 case 0x078: /* VIS I fsrc2 */
5165 CHECK_FPU_FEATURE(dc, VIS1);
5166 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5167 gen_store_fpr_D(dc, rd, cpu_src1_64);
5168 break;
5169 case 0x079: /* VIS I fsrc2s */
5170 CHECK_FPU_FEATURE(dc, VIS1);
5171 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
5172 gen_store_fpr_F(dc, rd, cpu_src1_32);
5173 break;
5174 case 0x07a: /* VIS I fornot1 */
5175 CHECK_FPU_FEATURE(dc, VIS1);
5176 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
5177 break;
5178 case 0x07b: /* VIS I fornot1s */
5179 CHECK_FPU_FEATURE(dc, VIS1);
5180 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
5181 break;
5182 case 0x07c: /* VIS I for */
5183 CHECK_FPU_FEATURE(dc, VIS1);
5184 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
5185 break;
5186 case 0x07d: /* VIS I fors */
5187 CHECK_FPU_FEATURE(dc, VIS1);
5188 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
5189 break;
5190 case 0x07e: /* VIS I fone */
5191 CHECK_FPU_FEATURE(dc, VIS1);
5192 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5193 tcg_gen_movi_i64(cpu_dst_64, -1);
5194 gen_store_fpr_D(dc, rd, cpu_dst_64);
5195 break;
5196 case 0x07f: /* VIS I fones */
5197 CHECK_FPU_FEATURE(dc, VIS1);
5198 cpu_dst_32 = gen_dest_fpr_F(dc);
5199 tcg_gen_movi_i32(cpu_dst_32, -1);
5200 gen_store_fpr_F(dc, rd, cpu_dst_32);
5201 break;
5202 case 0x080: /* VIS I shutdown */
5203 case 0x081: /* VIS II siam */
5204 // XXX
5205 goto illegal_insn;
5206 default:
5207 goto illegal_insn;
5208 }
5209 #else
5210 goto ncp_insn;
5211 #endif
5212 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5213 #ifdef TARGET_SPARC64
5214 goto illegal_insn;
5215 #else
5216 goto ncp_insn;
5217 #endif
5218 #ifdef TARGET_SPARC64
5219 } else if (xop == 0x39) { /* V9 return */
5220 save_state(dc);
5221 cpu_src1 = get_src1(dc, insn);
5222 cpu_tmp0 = tcg_temp_new();
5223 if (IS_IMM) { /* immediate */
5224 simm = GET_FIELDs(insn, 19, 31);
5225 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5226 } else { /* register */
5227 rs2 = GET_FIELD(insn, 27, 31);
5228 if (rs2) {
5229 cpu_src2 = gen_load_gpr(dc, rs2);
5230 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5231 } else {
5232 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5233 }
5234 }
5235 gen_check_align(dc, cpu_tmp0, 3);
5236 gen_helper_restore(tcg_env);
5237 gen_mov_pc_npc(dc);
5238 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5239 dc->npc = DYNAMIC_PC_LOOKUP;
5240 goto jmp_insn;
5241 #endif
5242 } else {
5243 cpu_src1 = get_src1(dc, insn);
5244 cpu_tmp0 = tcg_temp_new();
5245 if (IS_IMM) { /* immediate */
5246 simm = GET_FIELDs(insn, 19, 31);
5247 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5248 } else { /* register */
5249 rs2 = GET_FIELD(insn, 27, 31);
5250 if (rs2) {
5251 cpu_src2 = gen_load_gpr(dc, rs2);
5252 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5253 } else {
5254 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5255 }
5256 }
5257 switch (xop) {
5258 case 0x38: /* jmpl */
5259 {
5260 gen_check_align(dc, cpu_tmp0, 3);
5261 gen_store_gpr(dc, rd, tcg_constant_tl(dc->pc));
5262 gen_mov_pc_npc(dc);
5263 gen_address_mask(dc, cpu_tmp0);
5264 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5265 dc->npc = DYNAMIC_PC_LOOKUP;
5266 }
5267 goto jmp_insn;
5268 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5269 case 0x39: /* rett, V9 return */
5270 {
5271 if (!supervisor(dc))
5272 goto priv_insn;
5273 gen_check_align(dc, cpu_tmp0, 3);
5274 gen_mov_pc_npc(dc);
5275 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5276 dc->npc = DYNAMIC_PC;
5277 gen_helper_rett(tcg_env);
5278 }
5279 goto jmp_insn;
5280 #endif
5281 case 0x3b: /* flush */
5282 /* nop */
5283 break;
5284 case 0x3c: /* save */
5285 gen_helper_save(tcg_env);
5286 gen_store_gpr(dc, rd, cpu_tmp0);
5287 break;
5288 case 0x3d: /* restore */
5289 gen_helper_restore(tcg_env);
5290 gen_store_gpr(dc, rd, cpu_tmp0);
5291 break;
5292 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5293 case 0x3e: /* V9 done/retry */
5294 {
5295 switch (rd) {
5296 case 0:
5297 if (!supervisor(dc))
5298 goto priv_insn;
5299 dc->npc = DYNAMIC_PC;
5300 dc->pc = DYNAMIC_PC;
5301 translator_io_start(&dc->base);
5302 gen_helper_done(tcg_env);
5303 goto jmp_insn;
5304 case 1:
5305 if (!supervisor(dc))
5306 goto priv_insn;
5307 dc->npc = DYNAMIC_PC;
5308 dc->pc = DYNAMIC_PC;
5309 translator_io_start(&dc->base);
5310 gen_helper_retry(tcg_env);
5311 goto jmp_insn;
5312 default:
5313 goto illegal_insn;
5314 }
5315 }
5316 break;
5317 #endif
5318 default:
5319 goto illegal_insn;
5320 }
5321 }
5322 break;
5323 }
5324 break;
5325 case 3: /* load/store instructions */
5326 {
5327 unsigned int xop = GET_FIELD(insn, 7, 12);
5328 /* ??? gen_address_mask prevents us from using a source
5329 register directly. Always generate a temporary. */
5330 TCGv cpu_addr = tcg_temp_new();
5331
5332 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5333 if (xop == 0x3c || xop == 0x3e) {
5334 /* V9 casa/casxa : no offset */
5335 } else if (IS_IMM) { /* immediate */
5336 simm = GET_FIELDs(insn, 19, 31);
5337 if (simm != 0) {
5338 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5339 }
5340 } else { /* register */
5341 rs2 = GET_FIELD(insn, 27, 31);
5342 if (rs2 != 0) {
5343 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5344 }
5345 }
5346 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5347 (xop > 0x17 && xop <= 0x1d ) ||
5348 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5349 TCGv cpu_val = gen_dest_gpr(dc, rd);
5350
5351 switch (xop) {
5352 case 0x0: /* ld, V9 lduw, load unsigned word */
5353 gen_address_mask(dc, cpu_addr);
5354 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5355 dc->mem_idx, MO_TEUL | MO_ALIGN);
5356 break;
5357 case 0x1: /* ldub, load unsigned byte */
5358 gen_address_mask(dc, cpu_addr);
5359 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5360 dc->mem_idx, MO_UB);
5361 break;
5362 case 0x2: /* lduh, load unsigned halfword */
5363 gen_address_mask(dc, cpu_addr);
5364 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5365 dc->mem_idx, MO_TEUW | MO_ALIGN);
5366 break;
5367 case 0x3: /* ldd, load double word */
5368 if (rd & 1)
5369 goto illegal_insn;
5370 else {
5371 TCGv_i64 t64;
5372
5373 gen_address_mask(dc, cpu_addr);
5374 t64 = tcg_temp_new_i64();
5375 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5376 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5377 tcg_gen_trunc_i64_tl(cpu_val, t64);
5378 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5379 gen_store_gpr(dc, rd + 1, cpu_val);
5380 tcg_gen_shri_i64(t64, t64, 32);
5381 tcg_gen_trunc_i64_tl(cpu_val, t64);
5382 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5383 }
5384 break;
5385 case 0x9: /* ldsb, load signed byte */
5386 gen_address_mask(dc, cpu_addr);
5387 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB);
5388 break;
5389 case 0xa: /* ldsh, load signed halfword */
5390 gen_address_mask(dc, cpu_addr);
5391 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5392 dc->mem_idx, MO_TESW | MO_ALIGN);
5393 break;
5394 case 0xd: /* ldstub */
5395 gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
5396 break;
5397 case 0x0f:
5398 /* swap, swap register with memory. Also atomically */
5399 cpu_src1 = gen_load_gpr(dc, rd);
5400 gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5401 dc->mem_idx, MO_TEUL);
5402 break;
5403 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5404 case 0x10: /* lda, V9 lduwa, load word alternate */
5405 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5406 break;
5407 case 0x11: /* lduba, load unsigned byte alternate */
5408 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5409 break;
5410 case 0x12: /* lduha, load unsigned halfword alternate */
5411 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5412 break;
5413 case 0x13: /* ldda, load double word alternate */
5414 if (rd & 1) {
5415 goto illegal_insn;
5416 }
5417 gen_ldda_asi(dc, cpu_addr, insn, rd);
5418 goto skip_move;
5419 case 0x19: /* ldsba, load signed byte alternate */
5420 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5421 break;
5422 case 0x1a: /* ldsha, load signed halfword alternate */
5423 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5424 break;
5425 case 0x1d: /* ldstuba -- XXX: should be atomically */
5426 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5427 break;
5428 case 0x1f: /* swapa, swap reg with alt. memory. Also
5429 atomically */
5430 cpu_src1 = gen_load_gpr(dc, rd);
5431 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5432 break;
5433
5434 #ifndef TARGET_SPARC64
5435 case 0x30: /* ldc */
5436 case 0x31: /* ldcsr */
5437 case 0x33: /* lddc */
5438 goto ncp_insn;
5439 #endif
5440 #endif
5441 #ifdef TARGET_SPARC64
5442 case 0x08: /* V9 ldsw */
5443 gen_address_mask(dc, cpu_addr);
5444 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5445 dc->mem_idx, MO_TESL | MO_ALIGN);
5446 break;
5447 case 0x0b: /* V9 ldx */
5448 gen_address_mask(dc, cpu_addr);
5449 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5450 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5451 break;
5452 case 0x18: /* V9 ldswa */
5453 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5454 break;
5455 case 0x1b: /* V9 ldxa */
5456 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
5457 break;
5458 case 0x2d: /* V9 prefetch, no effect */
5459 goto skip_move;
5460 case 0x30: /* V9 ldfa */
5461 if (gen_trap_ifnofpu(dc)) {
5462 goto jmp_insn;
5463 }
5464 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5465 gen_update_fprs_dirty(dc, rd);
5466 goto skip_move;
5467 case 0x33: /* V9 lddfa */
5468 if (gen_trap_ifnofpu(dc)) {
5469 goto jmp_insn;
5470 }
5471 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5472 gen_update_fprs_dirty(dc, DFPREG(rd));
5473 goto skip_move;
5474 case 0x3d: /* V9 prefetcha, no effect */
5475 goto skip_move;
5476 case 0x32: /* V9 ldqfa */
5477 CHECK_FPU_FEATURE(dc, FLOAT128);
5478 if (gen_trap_ifnofpu(dc)) {
5479 goto jmp_insn;
5480 }
5481 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5482 gen_update_fprs_dirty(dc, QFPREG(rd));
5483 goto skip_move;
5484 #endif
5485 default:
5486 goto illegal_insn;
5487 }
5488 gen_store_gpr(dc, rd, cpu_val);
5489 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5490 skip_move: ;
5491 #endif
5492 } else if (xop >= 0x20 && xop < 0x24) {
5493 if (gen_trap_ifnofpu(dc)) {
5494 goto jmp_insn;
5495 }
5496 switch (xop) {
5497 case 0x20: /* ldf, load fpreg */
5498 gen_address_mask(dc, cpu_addr);
5499 cpu_dst_32 = gen_dest_fpr_F(dc);
5500 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5501 dc->mem_idx, MO_TEUL | MO_ALIGN);
5502 gen_store_fpr_F(dc, rd, cpu_dst_32);
5503 break;
5504 case 0x21: /* ldfsr, V9 ldxfsr */
5505 #ifdef TARGET_SPARC64
5506 gen_address_mask(dc, cpu_addr);
5507 if (rd == 1) {
5508 TCGv_i64 t64 = tcg_temp_new_i64();
5509 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5510 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5511 gen_helper_ldxfsr(cpu_fsr, tcg_env, cpu_fsr, t64);
5512 break;
5513 }
5514 #endif
5515 cpu_dst_32 = tcg_temp_new_i32();
5516 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5517 dc->mem_idx, MO_TEUL | MO_ALIGN);
5518 gen_helper_ldfsr(cpu_fsr, tcg_env, cpu_fsr, cpu_dst_32);
5519 break;
5520 case 0x22: /* ldqf, load quad fpreg */
5521 CHECK_FPU_FEATURE(dc, FLOAT128);
5522 gen_address_mask(dc, cpu_addr);
5523 cpu_src1_64 = tcg_temp_new_i64();
5524 tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5525 MO_TEUQ | MO_ALIGN_4);
5526 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5527 cpu_src2_64 = tcg_temp_new_i64();
5528 tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
5529 MO_TEUQ | MO_ALIGN_4);
5530 gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
5531 break;
5532 case 0x23: /* lddf, load double fpreg */
5533 gen_address_mask(dc, cpu_addr);
5534 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5535 tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
5536 MO_TEUQ | MO_ALIGN_4);
5537 gen_store_fpr_D(dc, rd, cpu_dst_64);
5538 break;
5539 default:
5540 goto illegal_insn;
5541 }
5542 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5543 xop == 0xe || xop == 0x1e) {
5544 TCGv cpu_val = gen_load_gpr(dc, rd);
5545
5546 switch (xop) {
5547 case 0x4: /* st, store word */
5548 gen_address_mask(dc, cpu_addr);
5549 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5550 dc->mem_idx, MO_TEUL | MO_ALIGN);
5551 break;
5552 case 0x5: /* stb, store byte */
5553 gen_address_mask(dc, cpu_addr);
5554 tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB);
5555 break;
5556 case 0x6: /* sth, store halfword */
5557 gen_address_mask(dc, cpu_addr);
5558 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5559 dc->mem_idx, MO_TEUW | MO_ALIGN);
5560 break;
5561 case 0x7: /* std, store double word */
5562 if (rd & 1)
5563 goto illegal_insn;
5564 else {
5565 TCGv_i64 t64;
5566 TCGv lo;
5567
5568 gen_address_mask(dc, cpu_addr);
5569 lo = gen_load_gpr(dc, rd + 1);
5570 t64 = tcg_temp_new_i64();
5571 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5572 tcg_gen_qemu_st_i64(t64, cpu_addr,
5573 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5574 }
5575 break;
5576 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5577 case 0x14: /* sta, V9 stwa, store word alternate */
5578 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5579 break;
5580 case 0x15: /* stba, store byte alternate */
5581 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5582 break;
5583 case 0x16: /* stha, store halfword alternate */
5584 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5585 break;
5586 case 0x17: /* stda, store double word alternate */
5587 if (rd & 1) {
5588 goto illegal_insn;
5589 }
5590 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5591 break;
5592 #endif
5593 #ifdef TARGET_SPARC64
5594 case 0x0e: /* V9 stx */
5595 gen_address_mask(dc, cpu_addr);
5596 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5597 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5598 break;
5599 case 0x1e: /* V9 stxa */
5600 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
5601 break;
5602 #endif
5603 default:
5604 goto illegal_insn;
5605 }
5606 } else if (xop > 0x23 && xop < 0x28) {
5607 if (gen_trap_ifnofpu(dc)) {
5608 goto jmp_insn;
5609 }
5610 switch (xop) {
5611 case 0x24: /* stf, store fpreg */
5612 gen_address_mask(dc, cpu_addr);
5613 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5614 tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
5615 dc->mem_idx, MO_TEUL | MO_ALIGN);
5616 break;
5617 case 0x25: /* stfsr, V9 stxfsr */
5618 {
5619 #ifdef TARGET_SPARC64
5620 gen_address_mask(dc, cpu_addr);
5621 if (rd == 1) {
5622 tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
5623 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5624 break;
5625 }
5626 #endif
5627 tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
5628 dc->mem_idx, MO_TEUL | MO_ALIGN);
5629 }
5630 break;
5631 case 0x26:
5632 #ifdef TARGET_SPARC64
5633 /* V9 stqf, store quad fpreg */
5634 CHECK_FPU_FEATURE(dc, FLOAT128);
5635 gen_address_mask(dc, cpu_addr);
5636 /* ??? While stqf only requires 4-byte alignment, it is
5637 legal for the cpu to signal the unaligned exception.
5638 The OS trap handler is then required to fix it up.
5639 For qemu, this avoids having to probe the second page
5640 before performing the first write. */
5641 cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
5642 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5643 dc->mem_idx, MO_TEUQ | MO_ALIGN_16);
5644 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5645 cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
5646 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5647 dc->mem_idx, MO_TEUQ);
5648 break;
5649 #else /* !TARGET_SPARC64 */
5650 /* stdfq, store floating point queue */
5651 #if defined(CONFIG_USER_ONLY)
5652 goto illegal_insn;
5653 #else
5654 if (!supervisor(dc))
5655 goto priv_insn;
5656 if (gen_trap_ifnofpu(dc)) {
5657 goto jmp_insn;
5658 }
5659 goto nfq_insn;
5660 #endif
5661 #endif
5662 case 0x27: /* stdf, store double fpreg */
5663 gen_address_mask(dc, cpu_addr);
5664 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5665 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5666 MO_TEUQ | MO_ALIGN_4);
5667 break;
5668 default:
5669 goto illegal_insn;
5670 }
5671 } else if (xop > 0x33 && xop < 0x3f) {
5672 switch (xop) {
5673 #ifdef TARGET_SPARC64
5674 case 0x34: /* V9 stfa */
5675 if (gen_trap_ifnofpu(dc)) {
5676 goto jmp_insn;
5677 }
5678 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5679 break;
5680 case 0x36: /* V9 stqfa */
5681 {
5682 CHECK_FPU_FEATURE(dc, FLOAT128);
5683 if (gen_trap_ifnofpu(dc)) {
5684 goto jmp_insn;
5685 }
5686 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5687 }
5688 break;
5689 case 0x37: /* V9 stdfa */
5690 if (gen_trap_ifnofpu(dc)) {
5691 goto jmp_insn;
5692 }
5693 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5694 break;
5695 case 0x3e: /* V9 casxa */
5696 rs2 = GET_FIELD(insn, 27, 31);
5697 cpu_src2 = gen_load_gpr(dc, rs2);
5698 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5699 break;
5700 #else
5701 case 0x34: /* stc */
5702 case 0x35: /* stcsr */
5703 case 0x36: /* stdcq */
5704 case 0x37: /* stdc */
5705 goto ncp_insn;
5706 #endif
5707 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5708 case 0x3c: /* V9 or LEON3 casa */
5709 #ifndef TARGET_SPARC64
5710 CHECK_IU_FEATURE(dc, CASA);
5711 #endif
5712 rs2 = GET_FIELD(insn, 27, 31);
5713 cpu_src2 = gen_load_gpr(dc, rs2);
5714 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5715 break;
5716 #endif
5717 default:
5718 goto illegal_insn;
5719 }
5720 } else {
5721 goto illegal_insn;
5722 }
5723 }
5724 break;
5725 }
5726 advance_pc(dc);
5727 jmp_insn:
5728 return;
5729 illegal_insn:
5730 gen_exception(dc, TT_ILL_INSN);
5731 return;
5732 #if !defined(CONFIG_USER_ONLY)
5733 priv_insn:
5734 gen_exception(dc, TT_PRIV_INSN);
5735 return;
5736 #endif
5737 nfpu_insn:
5738 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5739 return;
5740 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5741 nfq_insn:
5742 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5743 return;
5744 #endif
5745 #ifndef TARGET_SPARC64
5746 ncp_insn:
5747 gen_exception(dc, TT_NCP_INSN);
5748 return;
5749 #endif
5750 }
5751
5752 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5753 {
5754 DisasContext *dc = container_of(dcbase, DisasContext, base);
5755 CPUSPARCState *env = cpu_env(cs);
5756 int bound;
5757
5758 dc->pc = dc->base.pc_first;
5759 dc->npc = (target_ulong)dc->base.tb->cs_base;
5760 dc->cc_op = CC_OP_DYNAMIC;
5761 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5762 dc->def = &env->def;
5763 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5764 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5765 #ifndef CONFIG_USER_ONLY
5766 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5767 #endif
5768 #ifdef TARGET_SPARC64
5769 dc->fprs_dirty = 0;
5770 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5771 #ifndef CONFIG_USER_ONLY
5772 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5773 #endif
5774 #endif
5775 /*
5776 * if we reach a page boundary, we stop generation so that the
5777 * PC of a TT_TFAULT exception is always in the right page
5778 */
5779 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5780 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5781 }
5782
5783 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5784 {
5785 }
5786
5787 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5788 {
5789 DisasContext *dc = container_of(dcbase, DisasContext, base);
5790 target_ulong npc = dc->npc;
5791
5792 if (npc & 3) {
5793 switch (npc) {
5794 case JUMP_PC:
5795 assert(dc->jump_pc[1] == dc->pc + 4);
5796 npc = dc->jump_pc[0] | JUMP_PC;
5797 break;
5798 case DYNAMIC_PC:
5799 case DYNAMIC_PC_LOOKUP:
5800 npc = DYNAMIC_PC;
5801 break;
5802 default:
5803 g_assert_not_reached();
5804 }
5805 }
5806 tcg_gen_insn_start(dc->pc, npc);
5807 }
5808
5809 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5810 {
5811 DisasContext *dc = container_of(dcbase, DisasContext, base);
5812 CPUSPARCState *env = cpu_env(cs);
5813 unsigned int insn;
5814
5815 insn = translator_ldl(env, &dc->base, dc->pc);
5816 dc->base.pc_next += 4;
5817
5818 if (!decode(dc, insn)) {
5819 disas_sparc_legacy(dc, insn);
5820 }
5821
5822 if (dc->base.is_jmp == DISAS_NORETURN) {
5823 return;
5824 }
5825 if (dc->pc != dc->base.pc_next) {
5826 dc->base.is_jmp = DISAS_TOO_MANY;
5827 }
5828 }
5829
5830 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5831 {
5832 DisasContext *dc = container_of(dcbase, DisasContext, base);
5833 DisasDelayException *e, *e_next;
5834 bool may_lookup;
5835
5836 switch (dc->base.is_jmp) {
5837 case DISAS_NEXT:
5838 case DISAS_TOO_MANY:
5839 if (((dc->pc | dc->npc) & 3) == 0) {
5840 /* static PC and NPC: we can use direct chaining */
5841 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5842 break;
5843 }
5844
5845 may_lookup = true;
5846 if (dc->pc & 3) {
5847 switch (dc->pc) {
5848 case DYNAMIC_PC_LOOKUP:
5849 break;
5850 case DYNAMIC_PC:
5851 may_lookup = false;
5852 break;
5853 default:
5854 g_assert_not_reached();
5855 }
5856 } else {
5857 tcg_gen_movi_tl(cpu_pc, dc->pc);
5858 }
5859
5860 if (dc->npc & 3) {
5861 switch (dc->npc) {
5862 case JUMP_PC:
5863 gen_generic_branch(dc);
5864 break;
5865 case DYNAMIC_PC:
5866 may_lookup = false;
5867 break;
5868 case DYNAMIC_PC_LOOKUP:
5869 break;
5870 default:
5871 g_assert_not_reached();
5872 }
5873 } else {
5874 tcg_gen_movi_tl(cpu_npc, dc->npc);
5875 }
5876 if (may_lookup) {
5877 tcg_gen_lookup_and_goto_ptr();
5878 } else {
5879 tcg_gen_exit_tb(NULL, 0);
5880 }
5881 break;
5882
5883 case DISAS_NORETURN:
5884 break;
5885
5886 case DISAS_EXIT:
5887 /* Exit TB */
5888 save_state(dc);
5889 tcg_gen_exit_tb(NULL, 0);
5890 break;
5891
5892 default:
5893 g_assert_not_reached();
5894 }
5895
5896 for (e = dc->delay_excp_list; e ; e = e_next) {
5897 gen_set_label(e->lab);
5898
5899 tcg_gen_movi_tl(cpu_pc, e->pc);
5900 if (e->npc % 4 == 0) {
5901 tcg_gen_movi_tl(cpu_npc, e->npc);
5902 }
5903 gen_helper_raise_exception(tcg_env, e->excp);
5904
5905 e_next = e->next;
5906 g_free(e);
5907 }
5908 }
5909
5910 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5911 CPUState *cpu, FILE *logfile)
5912 {
5913 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5914 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5915 }
5916
5917 static const TranslatorOps sparc_tr_ops = {
5918 .init_disas_context = sparc_tr_init_disas_context,
5919 .tb_start = sparc_tr_tb_start,
5920 .insn_start = sparc_tr_insn_start,
5921 .translate_insn = sparc_tr_translate_insn,
5922 .tb_stop = sparc_tr_tb_stop,
5923 .disas_log = sparc_tr_disas_log,
5924 };
5925
5926 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5927 target_ulong pc, void *host_pc)
5928 {
5929 DisasContext dc = {};
5930
5931 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5932 }
5933
5934 void sparc_tcg_init(void)
5935 {
5936 static const char gregnames[32][4] = {
5937 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5938 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5939 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5940 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5941 };
5942 static const char fregnames[32][4] = {
5943 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5944 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5945 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5946 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5947 };
5948
5949 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5950 #ifdef TARGET_SPARC64
5951 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5952 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5953 #endif
5954 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5955 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5956 };
5957
5958 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5959 #ifdef TARGET_SPARC64
5960 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5961 #endif
5962 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5963 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5964 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5965 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5966 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5967 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5968 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5969 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5970 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5971 };
5972
5973 unsigned int i;
5974
5975 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5976 offsetof(CPUSPARCState, regwptr),
5977 "regwptr");
5978
5979 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5980 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5981 }
5982
5983 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5984 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5985 }
5986
5987 cpu_regs[0] = NULL;
5988 for (i = 1; i < 8; ++i) {
5989 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5990 offsetof(CPUSPARCState, gregs[i]),
5991 gregnames[i]);
5992 }
5993
5994 for (i = 8; i < 32; ++i) {
5995 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5996 (i - 8) * sizeof(target_ulong),
5997 gregnames[i]);
5998 }
5999
6000 for (i = 0; i < TARGET_DPREGS; i++) {
6001 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
6002 offsetof(CPUSPARCState, fpr[i]),
6003 fregnames[i]);
6004 }
6005 }
6006
6007 void sparc_restore_state_to_opc(CPUState *cs,
6008 const TranslationBlock *tb,
6009 const uint64_t *data)
6010 {
6011 SPARCCPU *cpu = SPARC_CPU(cs);
6012 CPUSPARCState *env = &cpu->env;
6013 target_ulong pc = data[0];
6014 target_ulong npc = data[1];
6015
6016 env->pc = pc;
6017 if (npc == DYNAMIC_PC) {
6018 /* dynamic NPC: already stored */
6019 } else if (npc & JUMP_PC) {
6020 /* jump PC: use 'cond' and the jump targets of the translation */
6021 if (env->cond) {
6022 env->npc = npc & ~3;
6023 } else {
6024 env->npc = pc + 4;
6025 }
6026 } else {
6027 env->npc = npc;
6028 }
6029 }