]> git.ipfire.org Git - thirdparty/qemu.git/blob - target/sparc/translate.c
target/sparc: Move ADDRALIGN* to decodetree
[thirdparty/qemu.git] / target / sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28
29 #include "exec/helper-gen.h"
30
31 #include "exec/translator.h"
32 #include "exec/log.h"
33 #include "asi.h"
34
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef HELPER_H
38
39 #ifdef TARGET_SPARC64
40 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
41 # define gen_helper_rett(E) qemu_build_not_reached()
42 # define gen_helper_power_down(E) qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
46 # define gen_helper_done(E) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
49 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
50 # define gen_helper_restored(E) qemu_build_not_reached()
51 # define gen_helper_retry(E) qemu_build_not_reached()
52 # define gen_helper_saved(E) qemu_build_not_reached()
53 # define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
58 # define gen_helper_udivx(D, E, A, B) qemu_build_not_reached()
59 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
60 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
61 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
62 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
63 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
64 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
65 # define FSR_LDXFSR_MASK 0
66 # define FSR_LDXFSR_OLDMASK 0
67 # define MAXTL_MASK 0
68 #endif
69
70 /* Dynamic PC, must exit to main loop. */
71 #define DYNAMIC_PC 1
72 /* Dynamic PC, one of two values according to jump_pc[T2]. */
73 #define JUMP_PC 2
74 /* Dynamic PC, may lookup next TB. */
75 #define DYNAMIC_PC_LOOKUP 3
76
77 #define DISAS_EXIT DISAS_TARGET_0
78
79 /* global register indexes */
80 static TCGv_ptr cpu_regwptr;
81 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
82 static TCGv_i32 cpu_cc_op;
83 static TCGv_i32 cpu_psr;
84 static TCGv cpu_fsr, cpu_pc, cpu_npc;
85 static TCGv cpu_regs[32];
86 static TCGv cpu_y;
87 static TCGv cpu_tbr;
88 static TCGv cpu_cond;
89 #ifdef TARGET_SPARC64
90 static TCGv_i32 cpu_xcc, cpu_fprs;
91 static TCGv cpu_gsr;
92 #else
93 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
94 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
95 #endif
96 /* Floating point registers */
97 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
98
99 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
100 #ifdef TARGET_SPARC64
101 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
102 # define env64_field_offsetof(X) env_field_offsetof(X)
103 #else
104 # define env32_field_offsetof(X) env_field_offsetof(X)
105 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
106 #endif
107
108 typedef struct DisasDelayException {
109 struct DisasDelayException *next;
110 TCGLabel *lab;
111 TCGv_i32 excp;
112 /* Saved state at parent insn. */
113 target_ulong pc;
114 target_ulong npc;
115 } DisasDelayException;
116
117 typedef struct DisasContext {
118 DisasContextBase base;
119 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
120 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
121 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
122 int mem_idx;
123 bool fpu_enabled;
124 bool address_mask_32bit;
125 #ifndef CONFIG_USER_ONLY
126 bool supervisor;
127 #ifdef TARGET_SPARC64
128 bool hypervisor;
129 #endif
130 #endif
131
132 uint32_t cc_op; /* current CC operation */
133 sparc_def_t *def;
134 #ifdef TARGET_SPARC64
135 int fprs_dirty;
136 int asi;
137 #endif
138 DisasDelayException *delay_excp_list;
139 } DisasContext;
140
141 typedef struct {
142 TCGCond cond;
143 bool is_bool;
144 TCGv c1, c2;
145 } DisasCompare;
146
147 // This function uses non-native bit order
148 #define GET_FIELD(X, FROM, TO) \
149 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
150
151 // This function uses the order in the manuals, i.e. bit 0 is 2^0
152 #define GET_FIELD_SP(X, FROM, TO) \
153 GET_FIELD(X, 31 - (TO), 31 - (FROM))
154
155 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
156 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
157
158 #ifdef TARGET_SPARC64
159 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
160 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
161 #else
162 #define DFPREG(r) (r & 0x1e)
163 #define QFPREG(r) (r & 0x1c)
164 #endif
165
166 #define UA2005_HTRAP_MASK 0xff
167 #define V8_TRAP_MASK 0x7f
168
169 #define IS_IMM (insn & (1<<13))
170
171 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
172 {
173 #if defined(TARGET_SPARC64)
174 int bit = (rd < 32) ? 1 : 2;
175 /* If we know we've already set this bit within the TB,
176 we can avoid setting it again. */
177 if (!(dc->fprs_dirty & bit)) {
178 dc->fprs_dirty |= bit;
179 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
180 }
181 #endif
182 }
183
184 /* floating point registers moves */
185 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
186 {
187 TCGv_i32 ret = tcg_temp_new_i32();
188 if (src & 1) {
189 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
190 } else {
191 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
192 }
193 return ret;
194 }
195
196 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
197 {
198 TCGv_i64 t = tcg_temp_new_i64();
199
200 tcg_gen_extu_i32_i64(t, v);
201 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
202 (dst & 1 ? 0 : 32), 32);
203 gen_update_fprs_dirty(dc, dst);
204 }
205
206 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
207 {
208 return tcg_temp_new_i32();
209 }
210
211 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
212 {
213 src = DFPREG(src);
214 return cpu_fpr[src / 2];
215 }
216
217 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
218 {
219 dst = DFPREG(dst);
220 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
221 gen_update_fprs_dirty(dc, dst);
222 }
223
224 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
225 {
226 return cpu_fpr[DFPREG(dst) / 2];
227 }
228
229 static void gen_op_load_fpr_QT0(unsigned int src)
230 {
231 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
232 offsetof(CPU_QuadU, ll.upper));
233 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
234 offsetof(CPU_QuadU, ll.lower));
235 }
236
237 static void gen_op_load_fpr_QT1(unsigned int src)
238 {
239 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
240 offsetof(CPU_QuadU, ll.upper));
241 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
242 offsetof(CPU_QuadU, ll.lower));
243 }
244
245 static void gen_op_store_QT0_fpr(unsigned int dst)
246 {
247 tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
248 offsetof(CPU_QuadU, ll.upper));
249 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
250 offsetof(CPU_QuadU, ll.lower));
251 }
252
253 #ifdef TARGET_SPARC64
254 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
255 {
256 rd = QFPREG(rd);
257 rs = QFPREG(rs);
258
259 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
260 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
261 gen_update_fprs_dirty(dc, rd);
262 }
263 #endif
264
265 /* moves */
266 #ifdef CONFIG_USER_ONLY
267 #define supervisor(dc) 0
268 #define hypervisor(dc) 0
269 #else
270 #ifdef TARGET_SPARC64
271 #define hypervisor(dc) (dc->hypervisor)
272 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
273 #else
274 #define supervisor(dc) (dc->supervisor)
275 #define hypervisor(dc) 0
276 #endif
277 #endif
278
279 #if !defined(TARGET_SPARC64)
280 # define AM_CHECK(dc) false
281 #elif defined(TARGET_ABI32)
282 # define AM_CHECK(dc) true
283 #elif defined(CONFIG_USER_ONLY)
284 # define AM_CHECK(dc) false
285 #else
286 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
287 #endif
288
289 static void gen_address_mask(DisasContext *dc, TCGv addr)
290 {
291 if (AM_CHECK(dc)) {
292 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
293 }
294 }
295
296 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
297 {
298 return AM_CHECK(dc) ? (uint32_t)addr : addr;
299 }
300
301 static TCGv gen_load_gpr(DisasContext *dc, int reg)
302 {
303 if (reg > 0) {
304 assert(reg < 32);
305 return cpu_regs[reg];
306 } else {
307 TCGv t = tcg_temp_new();
308 tcg_gen_movi_tl(t, 0);
309 return t;
310 }
311 }
312
313 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
314 {
315 if (reg > 0) {
316 assert(reg < 32);
317 tcg_gen_mov_tl(cpu_regs[reg], v);
318 }
319 }
320
321 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
322 {
323 if (reg > 0) {
324 assert(reg < 32);
325 return cpu_regs[reg];
326 } else {
327 return tcg_temp_new();
328 }
329 }
330
331 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
332 {
333 return translator_use_goto_tb(&s->base, pc) &&
334 translator_use_goto_tb(&s->base, npc);
335 }
336
337 static void gen_goto_tb(DisasContext *s, int tb_num,
338 target_ulong pc, target_ulong npc)
339 {
340 if (use_goto_tb(s, pc, npc)) {
341 /* jump to same page: we can use a direct jump */
342 tcg_gen_goto_tb(tb_num);
343 tcg_gen_movi_tl(cpu_pc, pc);
344 tcg_gen_movi_tl(cpu_npc, npc);
345 tcg_gen_exit_tb(s->base.tb, tb_num);
346 } else {
347 /* jump to another page: we can use an indirect jump */
348 tcg_gen_movi_tl(cpu_pc, pc);
349 tcg_gen_movi_tl(cpu_npc, npc);
350 tcg_gen_lookup_and_goto_ptr();
351 }
352 }
353
354 // XXX suboptimal
355 static void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
356 {
357 tcg_gen_extu_i32_tl(reg, src);
358 tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
359 }
360
361 static void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
362 {
363 tcg_gen_extu_i32_tl(reg, src);
364 tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
365 }
366
367 static void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
368 {
369 tcg_gen_extu_i32_tl(reg, src);
370 tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
371 }
372
373 static void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
374 {
375 tcg_gen_extu_i32_tl(reg, src);
376 tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
377 }
378
379 static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
380 {
381 tcg_gen_mov_tl(cpu_cc_src, src1);
382 tcg_gen_mov_tl(cpu_cc_src2, src2);
383 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
384 tcg_gen_mov_tl(dst, cpu_cc_dst);
385 }
386
387 static TCGv_i32 gen_add32_carry32(void)
388 {
389 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
390
391 /* Carry is computed from a previous add: (dst < src) */
392 #if TARGET_LONG_BITS == 64
393 cc_src1_32 = tcg_temp_new_i32();
394 cc_src2_32 = tcg_temp_new_i32();
395 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
396 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
397 #else
398 cc_src1_32 = cpu_cc_dst;
399 cc_src2_32 = cpu_cc_src;
400 #endif
401
402 carry_32 = tcg_temp_new_i32();
403 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
404
405 return carry_32;
406 }
407
408 static TCGv_i32 gen_sub32_carry32(void)
409 {
410 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
411
412 /* Carry is computed from a previous borrow: (src1 < src2) */
413 #if TARGET_LONG_BITS == 64
414 cc_src1_32 = tcg_temp_new_i32();
415 cc_src2_32 = tcg_temp_new_i32();
416 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
417 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
418 #else
419 cc_src1_32 = cpu_cc_src;
420 cc_src2_32 = cpu_cc_src2;
421 #endif
422
423 carry_32 = tcg_temp_new_i32();
424 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
425
426 return carry_32;
427 }
428
429 static void gen_op_addc_int(TCGv dst, TCGv src1, TCGv src2,
430 TCGv_i32 carry_32, bool update_cc)
431 {
432 tcg_gen_add_tl(dst, src1, src2);
433
434 #ifdef TARGET_SPARC64
435 TCGv carry = tcg_temp_new();
436 tcg_gen_extu_i32_tl(carry, carry_32);
437 tcg_gen_add_tl(dst, dst, carry);
438 #else
439 tcg_gen_add_i32(dst, dst, carry_32);
440 #endif
441
442 if (update_cc) {
443 tcg_debug_assert(dst == cpu_cc_dst);
444 tcg_gen_mov_tl(cpu_cc_src, src1);
445 tcg_gen_mov_tl(cpu_cc_src2, src2);
446 }
447 }
448
449 static void gen_op_addc_int_add(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
450 {
451 TCGv discard;
452
453 if (TARGET_LONG_BITS == 64) {
454 gen_op_addc_int(dst, src1, src2, gen_add32_carry32(), update_cc);
455 return;
456 }
457
458 /*
459 * We can re-use the host's hardware carry generation by using
460 * an ADD2 opcode. We discard the low part of the output.
461 * Ideally we'd combine this operation with the add that
462 * generated the carry in the first place.
463 */
464 discard = tcg_temp_new();
465 tcg_gen_add2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
466
467 if (update_cc) {
468 tcg_debug_assert(dst == cpu_cc_dst);
469 tcg_gen_mov_tl(cpu_cc_src, src1);
470 tcg_gen_mov_tl(cpu_cc_src2, src2);
471 }
472 }
473
474 static void gen_op_addc_add(TCGv dst, TCGv src1, TCGv src2)
475 {
476 gen_op_addc_int_add(dst, src1, src2, false);
477 }
478
479 static void gen_op_addccc_add(TCGv dst, TCGv src1, TCGv src2)
480 {
481 gen_op_addc_int_add(dst, src1, src2, true);
482 }
483
484 static void gen_op_addc_sub(TCGv dst, TCGv src1, TCGv src2)
485 {
486 gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), false);
487 }
488
489 static void gen_op_addccc_sub(TCGv dst, TCGv src1, TCGv src2)
490 {
491 gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), true);
492 }
493
494 static void gen_op_addc_int_generic(TCGv dst, TCGv src1, TCGv src2,
495 bool update_cc)
496 {
497 TCGv_i32 carry_32 = tcg_temp_new_i32();
498 gen_helper_compute_C_icc(carry_32, tcg_env);
499 gen_op_addc_int(dst, src1, src2, carry_32, update_cc);
500 }
501
502 static void gen_op_addc_generic(TCGv dst, TCGv src1, TCGv src2)
503 {
504 gen_op_addc_int_generic(dst, src1, src2, false);
505 }
506
507 static void gen_op_addccc_generic(TCGv dst, TCGv src1, TCGv src2)
508 {
509 gen_op_addc_int_generic(dst, src1, src2, true);
510 }
511
512 static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
513 {
514 tcg_gen_mov_tl(cpu_cc_src, src1);
515 tcg_gen_mov_tl(cpu_cc_src2, src2);
516 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
517 tcg_gen_mov_tl(dst, cpu_cc_dst);
518 }
519
520 static void gen_op_subc_int(TCGv dst, TCGv src1, TCGv src2,
521 TCGv_i32 carry_32, bool update_cc)
522 {
523 TCGv carry;
524
525 #if TARGET_LONG_BITS == 64
526 carry = tcg_temp_new();
527 tcg_gen_extu_i32_i64(carry, carry_32);
528 #else
529 carry = carry_32;
530 #endif
531
532 tcg_gen_sub_tl(dst, src1, src2);
533 tcg_gen_sub_tl(dst, dst, carry);
534
535 if (update_cc) {
536 tcg_debug_assert(dst == cpu_cc_dst);
537 tcg_gen_mov_tl(cpu_cc_src, src1);
538 tcg_gen_mov_tl(cpu_cc_src2, src2);
539 }
540 }
541
542 static void gen_op_subc_add(TCGv dst, TCGv src1, TCGv src2)
543 {
544 gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), false);
545 }
546
547 static void gen_op_subccc_add(TCGv dst, TCGv src1, TCGv src2)
548 {
549 gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), true);
550 }
551
552 static void gen_op_subc_int_sub(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
553 {
554 TCGv discard;
555
556 if (TARGET_LONG_BITS == 64) {
557 gen_op_subc_int(dst, src1, src2, gen_sub32_carry32(), update_cc);
558 return;
559 }
560
561 /*
562 * We can re-use the host's hardware carry generation by using
563 * a SUB2 opcode. We discard the low part of the output.
564 */
565 discard = tcg_temp_new();
566 tcg_gen_sub2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
567
568 if (update_cc) {
569 tcg_debug_assert(dst == cpu_cc_dst);
570 tcg_gen_mov_tl(cpu_cc_src, src1);
571 tcg_gen_mov_tl(cpu_cc_src2, src2);
572 }
573 }
574
575 static void gen_op_subc_sub(TCGv dst, TCGv src1, TCGv src2)
576 {
577 gen_op_subc_int_sub(dst, src1, src2, false);
578 }
579
580 static void gen_op_subccc_sub(TCGv dst, TCGv src1, TCGv src2)
581 {
582 gen_op_subc_int_sub(dst, src1, src2, true);
583 }
584
585 static void gen_op_subc_int_generic(TCGv dst, TCGv src1, TCGv src2,
586 bool update_cc)
587 {
588 TCGv_i32 carry_32 = tcg_temp_new_i32();
589
590 gen_helper_compute_C_icc(carry_32, tcg_env);
591 gen_op_subc_int(dst, src1, src2, carry_32, update_cc);
592 }
593
594 static void gen_op_subc_generic(TCGv dst, TCGv src1, TCGv src2)
595 {
596 gen_op_subc_int_generic(dst, src1, src2, false);
597 }
598
599 static void gen_op_subccc_generic(TCGv dst, TCGv src1, TCGv src2)
600 {
601 gen_op_subc_int_generic(dst, src1, src2, true);
602 }
603
604 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
605 {
606 TCGv r_temp, zero, t0;
607
608 r_temp = tcg_temp_new();
609 t0 = tcg_temp_new();
610
611 /* old op:
612 if (!(env->y & 1))
613 T1 = 0;
614 */
615 zero = tcg_constant_tl(0);
616 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
617 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
618 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
619 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
620 zero, cpu_cc_src2);
621
622 // b2 = T0 & 1;
623 // env->y = (b2 << 31) | (env->y >> 1);
624 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
625 tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
626
627 // b1 = N ^ V;
628 gen_mov_reg_N(t0, cpu_psr);
629 gen_mov_reg_V(r_temp, cpu_psr);
630 tcg_gen_xor_tl(t0, t0, r_temp);
631
632 // T0 = (b1 << 31) | (T0 >> 1);
633 // src1 = T0;
634 tcg_gen_shli_tl(t0, t0, 31);
635 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
636 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
637
638 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
639
640 tcg_gen_mov_tl(dst, cpu_cc_dst);
641 }
642
643 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
644 {
645 #if TARGET_LONG_BITS == 32
646 if (sign_ext) {
647 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
648 } else {
649 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
650 }
651 #else
652 TCGv t0 = tcg_temp_new_i64();
653 TCGv t1 = tcg_temp_new_i64();
654
655 if (sign_ext) {
656 tcg_gen_ext32s_i64(t0, src1);
657 tcg_gen_ext32s_i64(t1, src2);
658 } else {
659 tcg_gen_ext32u_i64(t0, src1);
660 tcg_gen_ext32u_i64(t1, src2);
661 }
662
663 tcg_gen_mul_i64(dst, t0, t1);
664 tcg_gen_shri_i64(cpu_y, dst, 32);
665 #endif
666 }
667
668 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
669 {
670 /* zero-extend truncated operands before multiplication */
671 gen_op_multiply(dst, src1, src2, 0);
672 }
673
674 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
675 {
676 /* sign-extend truncated operands before multiplication */
677 gen_op_multiply(dst, src1, src2, 1);
678 }
679
680 static void gen_op_udivx(TCGv dst, TCGv src1, TCGv src2)
681 {
682 gen_helper_udivx(dst, tcg_env, src1, src2);
683 }
684
685 static void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
686 {
687 gen_helper_sdivx(dst, tcg_env, src1, src2);
688 }
689
690 static void gen_op_udiv(TCGv dst, TCGv src1, TCGv src2)
691 {
692 gen_helper_udiv(dst, tcg_env, src1, src2);
693 }
694
695 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
696 {
697 gen_helper_sdiv(dst, tcg_env, src1, src2);
698 }
699
700 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
701 {
702 gen_helper_udiv_cc(dst, tcg_env, src1, src2);
703 }
704
705 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
706 {
707 gen_helper_sdiv_cc(dst, tcg_env, src1, src2);
708 }
709
710 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
711 {
712 gen_helper_taddcctv(dst, tcg_env, src1, src2);
713 }
714
715 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
716 {
717 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
718 }
719
720 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
721 {
722 tcg_gen_ctpop_tl(dst, src2);
723 }
724
725 #ifndef TARGET_SPARC64
726 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
727 {
728 g_assert_not_reached();
729 }
730 #endif
731
732 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
733 {
734 gen_helper_array8(dst, src1, src2);
735 tcg_gen_shli_tl(dst, dst, 1);
736 }
737
738 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
739 {
740 gen_helper_array8(dst, src1, src2);
741 tcg_gen_shli_tl(dst, dst, 2);
742 }
743
744 // 1
745 static void gen_op_eval_ba(TCGv dst)
746 {
747 tcg_gen_movi_tl(dst, 1);
748 }
749
750 // Z
751 static void gen_op_eval_be(TCGv dst, TCGv_i32 src)
752 {
753 gen_mov_reg_Z(dst, src);
754 }
755
756 // Z | (N ^ V)
757 static void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
758 {
759 TCGv t0 = tcg_temp_new();
760 gen_mov_reg_N(t0, src);
761 gen_mov_reg_V(dst, src);
762 tcg_gen_xor_tl(dst, dst, t0);
763 gen_mov_reg_Z(t0, src);
764 tcg_gen_or_tl(dst, dst, t0);
765 }
766
767 // N ^ V
768 static void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
769 {
770 TCGv t0 = tcg_temp_new();
771 gen_mov_reg_V(t0, src);
772 gen_mov_reg_N(dst, src);
773 tcg_gen_xor_tl(dst, dst, t0);
774 }
775
776 // C | Z
777 static void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
778 {
779 TCGv t0 = tcg_temp_new();
780 gen_mov_reg_Z(t0, src);
781 gen_mov_reg_C(dst, src);
782 tcg_gen_or_tl(dst, dst, t0);
783 }
784
785 // C
786 static void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
787 {
788 gen_mov_reg_C(dst, src);
789 }
790
791 // V
792 static void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
793 {
794 gen_mov_reg_V(dst, src);
795 }
796
797 // 0
798 static void gen_op_eval_bn(TCGv dst)
799 {
800 tcg_gen_movi_tl(dst, 0);
801 }
802
803 // N
804 static void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
805 {
806 gen_mov_reg_N(dst, src);
807 }
808
809 // !Z
810 static void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
811 {
812 gen_mov_reg_Z(dst, src);
813 tcg_gen_xori_tl(dst, dst, 0x1);
814 }
815
816 // !(Z | (N ^ V))
817 static void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
818 {
819 gen_op_eval_ble(dst, src);
820 tcg_gen_xori_tl(dst, dst, 0x1);
821 }
822
823 // !(N ^ V)
824 static void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
825 {
826 gen_op_eval_bl(dst, src);
827 tcg_gen_xori_tl(dst, dst, 0x1);
828 }
829
830 // !(C | Z)
831 static void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
832 {
833 gen_op_eval_bleu(dst, src);
834 tcg_gen_xori_tl(dst, dst, 0x1);
835 }
836
837 // !C
838 static void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
839 {
840 gen_mov_reg_C(dst, src);
841 tcg_gen_xori_tl(dst, dst, 0x1);
842 }
843
844 // !N
845 static void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
846 {
847 gen_mov_reg_N(dst, src);
848 tcg_gen_xori_tl(dst, dst, 0x1);
849 }
850
851 // !V
852 static void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
853 {
854 gen_mov_reg_V(dst, src);
855 tcg_gen_xori_tl(dst, dst, 0x1);
856 }
857
858 /*
859 FPSR bit field FCC1 | FCC0:
860 0 =
861 1 <
862 2 >
863 3 unordered
864 */
865 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
866 unsigned int fcc_offset)
867 {
868 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
869 tcg_gen_andi_tl(reg, reg, 0x1);
870 }
871
872 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
873 {
874 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
875 tcg_gen_andi_tl(reg, reg, 0x1);
876 }
877
878 // !0: FCC0 | FCC1
879 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
880 {
881 TCGv t0 = tcg_temp_new();
882 gen_mov_reg_FCC0(dst, src, fcc_offset);
883 gen_mov_reg_FCC1(t0, src, fcc_offset);
884 tcg_gen_or_tl(dst, dst, t0);
885 }
886
887 // 1 or 2: FCC0 ^ FCC1
888 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
889 {
890 TCGv t0 = tcg_temp_new();
891 gen_mov_reg_FCC0(dst, src, fcc_offset);
892 gen_mov_reg_FCC1(t0, src, fcc_offset);
893 tcg_gen_xor_tl(dst, dst, t0);
894 }
895
896 // 1 or 3: FCC0
897 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
898 {
899 gen_mov_reg_FCC0(dst, src, fcc_offset);
900 }
901
902 // 1: FCC0 & !FCC1
903 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
904 {
905 TCGv t0 = tcg_temp_new();
906 gen_mov_reg_FCC0(dst, src, fcc_offset);
907 gen_mov_reg_FCC1(t0, src, fcc_offset);
908 tcg_gen_andc_tl(dst, dst, t0);
909 }
910
911 // 2 or 3: FCC1
912 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
913 {
914 gen_mov_reg_FCC1(dst, src, fcc_offset);
915 }
916
917 // 2: !FCC0 & FCC1
918 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
919 {
920 TCGv t0 = tcg_temp_new();
921 gen_mov_reg_FCC0(dst, src, fcc_offset);
922 gen_mov_reg_FCC1(t0, src, fcc_offset);
923 tcg_gen_andc_tl(dst, t0, dst);
924 }
925
926 // 3: FCC0 & FCC1
927 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
928 {
929 TCGv t0 = tcg_temp_new();
930 gen_mov_reg_FCC0(dst, src, fcc_offset);
931 gen_mov_reg_FCC1(t0, src, fcc_offset);
932 tcg_gen_and_tl(dst, dst, t0);
933 }
934
935 // 0: !(FCC0 | FCC1)
936 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
937 {
938 TCGv t0 = tcg_temp_new();
939 gen_mov_reg_FCC0(dst, src, fcc_offset);
940 gen_mov_reg_FCC1(t0, src, fcc_offset);
941 tcg_gen_or_tl(dst, dst, t0);
942 tcg_gen_xori_tl(dst, dst, 0x1);
943 }
944
945 // 0 or 3: !(FCC0 ^ FCC1)
946 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
947 {
948 TCGv t0 = tcg_temp_new();
949 gen_mov_reg_FCC0(dst, src, fcc_offset);
950 gen_mov_reg_FCC1(t0, src, fcc_offset);
951 tcg_gen_xor_tl(dst, dst, t0);
952 tcg_gen_xori_tl(dst, dst, 0x1);
953 }
954
955 // 0 or 2: !FCC0
956 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
957 {
958 gen_mov_reg_FCC0(dst, src, fcc_offset);
959 tcg_gen_xori_tl(dst, dst, 0x1);
960 }
961
962 // !1: !(FCC0 & !FCC1)
963 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
964 {
965 TCGv t0 = tcg_temp_new();
966 gen_mov_reg_FCC0(dst, src, fcc_offset);
967 gen_mov_reg_FCC1(t0, src, fcc_offset);
968 tcg_gen_andc_tl(dst, dst, t0);
969 tcg_gen_xori_tl(dst, dst, 0x1);
970 }
971
972 // 0 or 1: !FCC1
973 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
974 {
975 gen_mov_reg_FCC1(dst, src, fcc_offset);
976 tcg_gen_xori_tl(dst, dst, 0x1);
977 }
978
979 // !2: !(!FCC0 & FCC1)
980 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
981 {
982 TCGv t0 = tcg_temp_new();
983 gen_mov_reg_FCC0(dst, src, fcc_offset);
984 gen_mov_reg_FCC1(t0, src, fcc_offset);
985 tcg_gen_andc_tl(dst, t0, dst);
986 tcg_gen_xori_tl(dst, dst, 0x1);
987 }
988
989 // !3: !(FCC0 & FCC1)
990 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
991 {
992 TCGv t0 = tcg_temp_new();
993 gen_mov_reg_FCC0(dst, src, fcc_offset);
994 gen_mov_reg_FCC1(t0, src, fcc_offset);
995 tcg_gen_and_tl(dst, dst, t0);
996 tcg_gen_xori_tl(dst, dst, 0x1);
997 }
998
999 static void gen_branch2(DisasContext *dc, target_ulong pc1,
1000 target_ulong pc2, TCGv r_cond)
1001 {
1002 TCGLabel *l1 = gen_new_label();
1003
1004 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
1005
1006 gen_goto_tb(dc, 0, pc1, pc1 + 4);
1007
1008 gen_set_label(l1);
1009 gen_goto_tb(dc, 1, pc2, pc2 + 4);
1010 }
1011
1012 static void gen_generic_branch(DisasContext *dc)
1013 {
1014 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1015 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1016 TCGv zero = tcg_constant_tl(0);
1017
1018 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1019 }
1020
1021 /* call this function before using the condition register as it may
1022 have been set for a jump */
1023 static void flush_cond(DisasContext *dc)
1024 {
1025 if (dc->npc == JUMP_PC) {
1026 gen_generic_branch(dc);
1027 dc->npc = DYNAMIC_PC_LOOKUP;
1028 }
1029 }
1030
1031 static void save_npc(DisasContext *dc)
1032 {
1033 if (dc->npc & 3) {
1034 switch (dc->npc) {
1035 case JUMP_PC:
1036 gen_generic_branch(dc);
1037 dc->npc = DYNAMIC_PC_LOOKUP;
1038 break;
1039 case DYNAMIC_PC:
1040 case DYNAMIC_PC_LOOKUP:
1041 break;
1042 default:
1043 g_assert_not_reached();
1044 }
1045 } else {
1046 tcg_gen_movi_tl(cpu_npc, dc->npc);
1047 }
1048 }
1049
1050 static void update_psr(DisasContext *dc)
1051 {
1052 if (dc->cc_op != CC_OP_FLAGS) {
1053 dc->cc_op = CC_OP_FLAGS;
1054 gen_helper_compute_psr(tcg_env);
1055 }
1056 }
1057
1058 static void save_state(DisasContext *dc)
1059 {
1060 tcg_gen_movi_tl(cpu_pc, dc->pc);
1061 save_npc(dc);
1062 }
1063
1064 static void gen_exception(DisasContext *dc, int which)
1065 {
1066 save_state(dc);
1067 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1068 dc->base.is_jmp = DISAS_NORETURN;
1069 }
1070
1071 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1072 {
1073 DisasDelayException *e = g_new0(DisasDelayException, 1);
1074
1075 e->next = dc->delay_excp_list;
1076 dc->delay_excp_list = e;
1077
1078 e->lab = gen_new_label();
1079 e->excp = excp;
1080 e->pc = dc->pc;
1081 /* Caller must have used flush_cond before branch. */
1082 assert(e->npc != JUMP_PC);
1083 e->npc = dc->npc;
1084
1085 return e->lab;
1086 }
1087
1088 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1089 {
1090 return delay_exceptionv(dc, tcg_constant_i32(excp));
1091 }
1092
1093 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1094 {
1095 TCGv t = tcg_temp_new();
1096 TCGLabel *lab;
1097
1098 tcg_gen_andi_tl(t, addr, mask);
1099
1100 flush_cond(dc);
1101 lab = delay_exception(dc, TT_UNALIGNED);
1102 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1103 }
1104
1105 static void gen_mov_pc_npc(DisasContext *dc)
1106 {
1107 if (dc->npc & 3) {
1108 switch (dc->npc) {
1109 case JUMP_PC:
1110 gen_generic_branch(dc);
1111 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1112 dc->pc = DYNAMIC_PC_LOOKUP;
1113 break;
1114 case DYNAMIC_PC:
1115 case DYNAMIC_PC_LOOKUP:
1116 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1117 dc->pc = dc->npc;
1118 break;
1119 default:
1120 g_assert_not_reached();
1121 }
1122 } else {
1123 dc->pc = dc->npc;
1124 }
1125 }
1126
1127 static void gen_op_next_insn(void)
1128 {
1129 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1130 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1131 }
1132
1133 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1134 DisasContext *dc)
1135 {
1136 static int subcc_cond[16] = {
1137 TCG_COND_NEVER,
1138 TCG_COND_EQ,
1139 TCG_COND_LE,
1140 TCG_COND_LT,
1141 TCG_COND_LEU,
1142 TCG_COND_LTU,
1143 -1, /* neg */
1144 -1, /* overflow */
1145 TCG_COND_ALWAYS,
1146 TCG_COND_NE,
1147 TCG_COND_GT,
1148 TCG_COND_GE,
1149 TCG_COND_GTU,
1150 TCG_COND_GEU,
1151 -1, /* pos */
1152 -1, /* no overflow */
1153 };
1154
1155 static int logic_cond[16] = {
1156 TCG_COND_NEVER,
1157 TCG_COND_EQ, /* eq: Z */
1158 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1159 TCG_COND_LT, /* lt: N ^ V -> N */
1160 TCG_COND_EQ, /* leu: C | Z -> Z */
1161 TCG_COND_NEVER, /* ltu: C -> 0 */
1162 TCG_COND_LT, /* neg: N */
1163 TCG_COND_NEVER, /* vs: V -> 0 */
1164 TCG_COND_ALWAYS,
1165 TCG_COND_NE, /* ne: !Z */
1166 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1167 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1168 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1169 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1170 TCG_COND_GE, /* pos: !N */
1171 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1172 };
1173
1174 TCGv_i32 r_src;
1175 TCGv r_dst;
1176
1177 #ifdef TARGET_SPARC64
1178 if (xcc) {
1179 r_src = cpu_xcc;
1180 } else {
1181 r_src = cpu_psr;
1182 }
1183 #else
1184 r_src = cpu_psr;
1185 #endif
1186
1187 switch (dc->cc_op) {
1188 case CC_OP_LOGIC:
1189 cmp->cond = logic_cond[cond];
1190 do_compare_dst_0:
1191 cmp->is_bool = false;
1192 cmp->c2 = tcg_constant_tl(0);
1193 #ifdef TARGET_SPARC64
1194 if (!xcc) {
1195 cmp->c1 = tcg_temp_new();
1196 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1197 break;
1198 }
1199 #endif
1200 cmp->c1 = cpu_cc_dst;
1201 break;
1202
1203 case CC_OP_SUB:
1204 switch (cond) {
1205 case 6: /* neg */
1206 case 14: /* pos */
1207 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1208 goto do_compare_dst_0;
1209
1210 case 7: /* overflow */
1211 case 15: /* !overflow */
1212 goto do_dynamic;
1213
1214 default:
1215 cmp->cond = subcc_cond[cond];
1216 cmp->is_bool = false;
1217 #ifdef TARGET_SPARC64
1218 if (!xcc) {
1219 /* Note that sign-extension works for unsigned compares as
1220 long as both operands are sign-extended. */
1221 cmp->c1 = tcg_temp_new();
1222 cmp->c2 = tcg_temp_new();
1223 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1224 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1225 break;
1226 }
1227 #endif
1228 cmp->c1 = cpu_cc_src;
1229 cmp->c2 = cpu_cc_src2;
1230 break;
1231 }
1232 break;
1233
1234 default:
1235 do_dynamic:
1236 gen_helper_compute_psr(tcg_env);
1237 dc->cc_op = CC_OP_FLAGS;
1238 /* FALLTHRU */
1239
1240 case CC_OP_FLAGS:
1241 /* We're going to generate a boolean result. */
1242 cmp->cond = TCG_COND_NE;
1243 cmp->is_bool = true;
1244 cmp->c1 = r_dst = tcg_temp_new();
1245 cmp->c2 = tcg_constant_tl(0);
1246
1247 switch (cond) {
1248 case 0x0:
1249 gen_op_eval_bn(r_dst);
1250 break;
1251 case 0x1:
1252 gen_op_eval_be(r_dst, r_src);
1253 break;
1254 case 0x2:
1255 gen_op_eval_ble(r_dst, r_src);
1256 break;
1257 case 0x3:
1258 gen_op_eval_bl(r_dst, r_src);
1259 break;
1260 case 0x4:
1261 gen_op_eval_bleu(r_dst, r_src);
1262 break;
1263 case 0x5:
1264 gen_op_eval_bcs(r_dst, r_src);
1265 break;
1266 case 0x6:
1267 gen_op_eval_bneg(r_dst, r_src);
1268 break;
1269 case 0x7:
1270 gen_op_eval_bvs(r_dst, r_src);
1271 break;
1272 case 0x8:
1273 gen_op_eval_ba(r_dst);
1274 break;
1275 case 0x9:
1276 gen_op_eval_bne(r_dst, r_src);
1277 break;
1278 case 0xa:
1279 gen_op_eval_bg(r_dst, r_src);
1280 break;
1281 case 0xb:
1282 gen_op_eval_bge(r_dst, r_src);
1283 break;
1284 case 0xc:
1285 gen_op_eval_bgu(r_dst, r_src);
1286 break;
1287 case 0xd:
1288 gen_op_eval_bcc(r_dst, r_src);
1289 break;
1290 case 0xe:
1291 gen_op_eval_bpos(r_dst, r_src);
1292 break;
1293 case 0xf:
1294 gen_op_eval_bvc(r_dst, r_src);
1295 break;
1296 }
1297 break;
1298 }
1299 }
1300
1301 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1302 {
1303 unsigned int offset;
1304 TCGv r_dst;
1305
1306 /* For now we still generate a straight boolean result. */
1307 cmp->cond = TCG_COND_NE;
1308 cmp->is_bool = true;
1309 cmp->c1 = r_dst = tcg_temp_new();
1310 cmp->c2 = tcg_constant_tl(0);
1311
1312 switch (cc) {
1313 default:
1314 case 0x0:
1315 offset = 0;
1316 break;
1317 case 0x1:
1318 offset = 32 - 10;
1319 break;
1320 case 0x2:
1321 offset = 34 - 10;
1322 break;
1323 case 0x3:
1324 offset = 36 - 10;
1325 break;
1326 }
1327
1328 switch (cond) {
1329 case 0x0:
1330 gen_op_eval_bn(r_dst);
1331 break;
1332 case 0x1:
1333 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1334 break;
1335 case 0x2:
1336 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1337 break;
1338 case 0x3:
1339 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1340 break;
1341 case 0x4:
1342 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1343 break;
1344 case 0x5:
1345 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1346 break;
1347 case 0x6:
1348 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1349 break;
1350 case 0x7:
1351 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1352 break;
1353 case 0x8:
1354 gen_op_eval_ba(r_dst);
1355 break;
1356 case 0x9:
1357 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1358 break;
1359 case 0xa:
1360 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1361 break;
1362 case 0xb:
1363 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1364 break;
1365 case 0xc:
1366 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1367 break;
1368 case 0xd:
1369 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1370 break;
1371 case 0xe:
1372 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1373 break;
1374 case 0xf:
1375 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1376 break;
1377 }
1378 }
1379
1380 // Inverted logic
1381 static const TCGCond gen_tcg_cond_reg[8] = {
1382 TCG_COND_NEVER, /* reserved */
1383 TCG_COND_NE,
1384 TCG_COND_GT,
1385 TCG_COND_GE,
1386 TCG_COND_NEVER, /* reserved */
1387 TCG_COND_EQ,
1388 TCG_COND_LE,
1389 TCG_COND_LT,
1390 };
1391
1392 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1393 {
1394 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1395 cmp->is_bool = false;
1396 cmp->c1 = r_src;
1397 cmp->c2 = tcg_constant_tl(0);
1398 }
1399
1400 #ifdef TARGET_SPARC64
1401 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1402 {
1403 switch (fccno) {
1404 case 0:
1405 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1406 break;
1407 case 1:
1408 gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1409 break;
1410 case 2:
1411 gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1412 break;
1413 case 3:
1414 gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1415 break;
1416 }
1417 }
1418
1419 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1420 {
1421 switch (fccno) {
1422 case 0:
1423 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1424 break;
1425 case 1:
1426 gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1427 break;
1428 case 2:
1429 gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1430 break;
1431 case 3:
1432 gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1433 break;
1434 }
1435 }
1436
1437 static void gen_op_fcmpq(int fccno)
1438 {
1439 switch (fccno) {
1440 case 0:
1441 gen_helper_fcmpq(cpu_fsr, tcg_env);
1442 break;
1443 case 1:
1444 gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
1445 break;
1446 case 2:
1447 gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
1448 break;
1449 case 3:
1450 gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
1451 break;
1452 }
1453 }
1454
1455 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1456 {
1457 switch (fccno) {
1458 case 0:
1459 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1460 break;
1461 case 1:
1462 gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1463 break;
1464 case 2:
1465 gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1466 break;
1467 case 3:
1468 gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1469 break;
1470 }
1471 }
1472
1473 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1474 {
1475 switch (fccno) {
1476 case 0:
1477 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1478 break;
1479 case 1:
1480 gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1481 break;
1482 case 2:
1483 gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1484 break;
1485 case 3:
1486 gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1487 break;
1488 }
1489 }
1490
1491 static void gen_op_fcmpeq(int fccno)
1492 {
1493 switch (fccno) {
1494 case 0:
1495 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1496 break;
1497 case 1:
1498 gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
1499 break;
1500 case 2:
1501 gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
1502 break;
1503 case 3:
1504 gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
1505 break;
1506 }
1507 }
1508
1509 #else
1510
1511 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1512 {
1513 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1514 }
1515
1516 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1517 {
1518 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1519 }
1520
1521 static void gen_op_fcmpq(int fccno)
1522 {
1523 gen_helper_fcmpq(cpu_fsr, tcg_env);
1524 }
1525
1526 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1527 {
1528 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1529 }
1530
1531 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1532 {
1533 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1534 }
1535
1536 static void gen_op_fcmpeq(int fccno)
1537 {
1538 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1539 }
1540 #endif
1541
1542 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1543 {
1544 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1545 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1546 gen_exception(dc, TT_FP_EXCP);
1547 }
1548
1549 static int gen_trap_ifnofpu(DisasContext *dc)
1550 {
1551 #if !defined(CONFIG_USER_ONLY)
1552 if (!dc->fpu_enabled) {
1553 gen_exception(dc, TT_NFPU_INSN);
1554 return 1;
1555 }
1556 #endif
1557 return 0;
1558 }
1559
1560 static void gen_op_clear_ieee_excp_and_FTT(void)
1561 {
1562 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1563 }
1564
1565 static void gen_fop_FF(DisasContext *dc, int rd, int rs,
1566 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1567 {
1568 TCGv_i32 dst, src;
1569
1570 src = gen_load_fpr_F(dc, rs);
1571 dst = gen_dest_fpr_F(dc);
1572
1573 gen(dst, tcg_env, src);
1574 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1575
1576 gen_store_fpr_F(dc, rd, dst);
1577 }
1578
1579 static void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1580 void (*gen)(TCGv_i32, TCGv_i32))
1581 {
1582 TCGv_i32 dst, src;
1583
1584 src = gen_load_fpr_F(dc, rs);
1585 dst = gen_dest_fpr_F(dc);
1586
1587 gen(dst, src);
1588
1589 gen_store_fpr_F(dc, rd, dst);
1590 }
1591
1592 static void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1593 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1594 {
1595 TCGv_i32 dst, src1, src2;
1596
1597 src1 = gen_load_fpr_F(dc, rs1);
1598 src2 = gen_load_fpr_F(dc, rs2);
1599 dst = gen_dest_fpr_F(dc);
1600
1601 gen(dst, tcg_env, src1, src2);
1602 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1603
1604 gen_store_fpr_F(dc, rd, dst);
1605 }
1606
1607 #ifdef TARGET_SPARC64
1608 static void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1609 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1610 {
1611 TCGv_i32 dst, src1, src2;
1612
1613 src1 = gen_load_fpr_F(dc, rs1);
1614 src2 = gen_load_fpr_F(dc, rs2);
1615 dst = gen_dest_fpr_F(dc);
1616
1617 gen(dst, src1, src2);
1618
1619 gen_store_fpr_F(dc, rd, dst);
1620 }
1621 #endif
1622
1623 static void gen_fop_DD(DisasContext *dc, int rd, int rs,
1624 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1625 {
1626 TCGv_i64 dst, src;
1627
1628 src = gen_load_fpr_D(dc, rs);
1629 dst = gen_dest_fpr_D(dc, rd);
1630
1631 gen(dst, tcg_env, src);
1632 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1633
1634 gen_store_fpr_D(dc, rd, dst);
1635 }
1636
1637 #ifdef TARGET_SPARC64
1638 static void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1639 void (*gen)(TCGv_i64, TCGv_i64))
1640 {
1641 TCGv_i64 dst, src;
1642
1643 src = gen_load_fpr_D(dc, rs);
1644 dst = gen_dest_fpr_D(dc, rd);
1645
1646 gen(dst, src);
1647
1648 gen_store_fpr_D(dc, rd, dst);
1649 }
1650 #endif
1651
1652 static void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1653 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1654 {
1655 TCGv_i64 dst, src1, src2;
1656
1657 src1 = gen_load_fpr_D(dc, rs1);
1658 src2 = gen_load_fpr_D(dc, rs2);
1659 dst = gen_dest_fpr_D(dc, rd);
1660
1661 gen(dst, tcg_env, src1, src2);
1662 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1663
1664 gen_store_fpr_D(dc, rd, dst);
1665 }
1666
1667 #ifdef TARGET_SPARC64
1668 static void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1669 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1670 {
1671 TCGv_i64 dst, src1, src2;
1672
1673 src1 = gen_load_fpr_D(dc, rs1);
1674 src2 = gen_load_fpr_D(dc, rs2);
1675 dst = gen_dest_fpr_D(dc, rd);
1676
1677 gen(dst, src1, src2);
1678
1679 gen_store_fpr_D(dc, rd, dst);
1680 }
1681
1682 static void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1683 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1684 {
1685 TCGv_i64 dst, src1, src2;
1686
1687 src1 = gen_load_fpr_D(dc, rs1);
1688 src2 = gen_load_fpr_D(dc, rs2);
1689 dst = gen_dest_fpr_D(dc, rd);
1690
1691 gen(dst, cpu_gsr, src1, src2);
1692
1693 gen_store_fpr_D(dc, rd, dst);
1694 }
1695
1696 static void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1697 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1698 {
1699 TCGv_i64 dst, src0, src1, src2;
1700
1701 src1 = gen_load_fpr_D(dc, rs1);
1702 src2 = gen_load_fpr_D(dc, rs2);
1703 src0 = gen_load_fpr_D(dc, rd);
1704 dst = gen_dest_fpr_D(dc, rd);
1705
1706 gen(dst, src0, src1, src2);
1707
1708 gen_store_fpr_D(dc, rd, dst);
1709 }
1710 #endif
1711
1712 static void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1713 void (*gen)(TCGv_ptr))
1714 {
1715 gen_op_load_fpr_QT1(QFPREG(rs));
1716
1717 gen(tcg_env);
1718 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1719
1720 gen_op_store_QT0_fpr(QFPREG(rd));
1721 gen_update_fprs_dirty(dc, QFPREG(rd));
1722 }
1723
1724 #ifdef TARGET_SPARC64
1725 static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1726 void (*gen)(TCGv_ptr))
1727 {
1728 gen_op_load_fpr_QT1(QFPREG(rs));
1729
1730 gen(tcg_env);
1731
1732 gen_op_store_QT0_fpr(QFPREG(rd));
1733 gen_update_fprs_dirty(dc, QFPREG(rd));
1734 }
1735 #endif
1736
1737 static void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1738 void (*gen)(TCGv_ptr))
1739 {
1740 gen_op_load_fpr_QT0(QFPREG(rs1));
1741 gen_op_load_fpr_QT1(QFPREG(rs2));
1742
1743 gen(tcg_env);
1744 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1745
1746 gen_op_store_QT0_fpr(QFPREG(rd));
1747 gen_update_fprs_dirty(dc, QFPREG(rd));
1748 }
1749
1750 static void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1751 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1752 {
1753 TCGv_i64 dst;
1754 TCGv_i32 src1, src2;
1755
1756 src1 = gen_load_fpr_F(dc, rs1);
1757 src2 = gen_load_fpr_F(dc, rs2);
1758 dst = gen_dest_fpr_D(dc, rd);
1759
1760 gen(dst, tcg_env, src1, src2);
1761 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1762
1763 gen_store_fpr_D(dc, rd, dst);
1764 }
1765
1766 static void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1767 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1768 {
1769 TCGv_i64 src1, src2;
1770
1771 src1 = gen_load_fpr_D(dc, rs1);
1772 src2 = gen_load_fpr_D(dc, rs2);
1773
1774 gen(tcg_env, src1, src2);
1775 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1776
1777 gen_op_store_QT0_fpr(QFPREG(rd));
1778 gen_update_fprs_dirty(dc, QFPREG(rd));
1779 }
1780
1781 #ifdef TARGET_SPARC64
1782 static void gen_fop_DF(DisasContext *dc, int rd, int rs,
1783 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1784 {
1785 TCGv_i64 dst;
1786 TCGv_i32 src;
1787
1788 src = gen_load_fpr_F(dc, rs);
1789 dst = gen_dest_fpr_D(dc, rd);
1790
1791 gen(dst, tcg_env, src);
1792 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1793
1794 gen_store_fpr_D(dc, rd, dst);
1795 }
1796 #endif
1797
1798 static void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1799 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1800 {
1801 TCGv_i64 dst;
1802 TCGv_i32 src;
1803
1804 src = gen_load_fpr_F(dc, rs);
1805 dst = gen_dest_fpr_D(dc, rd);
1806
1807 gen(dst, tcg_env, src);
1808
1809 gen_store_fpr_D(dc, rd, dst);
1810 }
1811
1812 static void gen_fop_FD(DisasContext *dc, int rd, int rs,
1813 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1814 {
1815 TCGv_i32 dst;
1816 TCGv_i64 src;
1817
1818 src = gen_load_fpr_D(dc, rs);
1819 dst = gen_dest_fpr_F(dc);
1820
1821 gen(dst, tcg_env, src);
1822 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1823
1824 gen_store_fpr_F(dc, rd, dst);
1825 }
1826
1827 static void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1828 void (*gen)(TCGv_i32, TCGv_ptr))
1829 {
1830 TCGv_i32 dst;
1831
1832 gen_op_load_fpr_QT1(QFPREG(rs));
1833 dst = gen_dest_fpr_F(dc);
1834
1835 gen(dst, tcg_env);
1836 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1837
1838 gen_store_fpr_F(dc, rd, dst);
1839 }
1840
1841 static void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1842 void (*gen)(TCGv_i64, TCGv_ptr))
1843 {
1844 TCGv_i64 dst;
1845
1846 gen_op_load_fpr_QT1(QFPREG(rs));
1847 dst = gen_dest_fpr_D(dc, rd);
1848
1849 gen(dst, tcg_env);
1850 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1851
1852 gen_store_fpr_D(dc, rd, dst);
1853 }
1854
1855 static void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1856 void (*gen)(TCGv_ptr, TCGv_i32))
1857 {
1858 TCGv_i32 src;
1859
1860 src = gen_load_fpr_F(dc, rs);
1861
1862 gen(tcg_env, src);
1863
1864 gen_op_store_QT0_fpr(QFPREG(rd));
1865 gen_update_fprs_dirty(dc, QFPREG(rd));
1866 }
1867
1868 static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1869 void (*gen)(TCGv_ptr, TCGv_i64))
1870 {
1871 TCGv_i64 src;
1872
1873 src = gen_load_fpr_D(dc, rs);
1874
1875 gen(tcg_env, src);
1876
1877 gen_op_store_QT0_fpr(QFPREG(rd));
1878 gen_update_fprs_dirty(dc, QFPREG(rd));
1879 }
1880
1881 /* asi moves */
1882 typedef enum {
1883 GET_ASI_HELPER,
1884 GET_ASI_EXCP,
1885 GET_ASI_DIRECT,
1886 GET_ASI_DTWINX,
1887 GET_ASI_BLOCK,
1888 GET_ASI_SHORT,
1889 GET_ASI_BCOPY,
1890 GET_ASI_BFILL,
1891 } ASIType;
1892
1893 typedef struct {
1894 ASIType type;
1895 int asi;
1896 int mem_idx;
1897 MemOp memop;
1898 } DisasASI;
1899
1900 /*
1901 * Build DisasASI.
1902 * For asi == -1, treat as non-asi.
1903 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1904 */
1905 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1906 {
1907 ASIType type = GET_ASI_HELPER;
1908 int mem_idx = dc->mem_idx;
1909
1910 if (asi == -1) {
1911 /* Artificial "non-asi" case. */
1912 type = GET_ASI_DIRECT;
1913 goto done;
1914 }
1915
1916 #ifndef TARGET_SPARC64
1917 /* Before v9, all asis are immediate and privileged. */
1918 if (asi < 0) {
1919 gen_exception(dc, TT_ILL_INSN);
1920 type = GET_ASI_EXCP;
1921 } else if (supervisor(dc)
1922 /* Note that LEON accepts ASI_USERDATA in user mode, for
1923 use with CASA. Also note that previous versions of
1924 QEMU allowed (and old versions of gcc emitted) ASI_P
1925 for LEON, which is incorrect. */
1926 || (asi == ASI_USERDATA
1927 && (dc->def->features & CPU_FEATURE_CASA))) {
1928 switch (asi) {
1929 case ASI_USERDATA: /* User data access */
1930 mem_idx = MMU_USER_IDX;
1931 type = GET_ASI_DIRECT;
1932 break;
1933 case ASI_KERNELDATA: /* Supervisor data access */
1934 mem_idx = MMU_KERNEL_IDX;
1935 type = GET_ASI_DIRECT;
1936 break;
1937 case ASI_M_BYPASS: /* MMU passthrough */
1938 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1939 mem_idx = MMU_PHYS_IDX;
1940 type = GET_ASI_DIRECT;
1941 break;
1942 case ASI_M_BCOPY: /* Block copy, sta access */
1943 mem_idx = MMU_KERNEL_IDX;
1944 type = GET_ASI_BCOPY;
1945 break;
1946 case ASI_M_BFILL: /* Block fill, stda access */
1947 mem_idx = MMU_KERNEL_IDX;
1948 type = GET_ASI_BFILL;
1949 break;
1950 }
1951
1952 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1953 * permissions check in get_physical_address(..).
1954 */
1955 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1956 } else {
1957 gen_exception(dc, TT_PRIV_INSN);
1958 type = GET_ASI_EXCP;
1959 }
1960 #else
1961 if (asi < 0) {
1962 asi = dc->asi;
1963 }
1964 /* With v9, all asis below 0x80 are privileged. */
1965 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1966 down that bit into DisasContext. For the moment that's ok,
1967 since the direct implementations below doesn't have any ASIs
1968 in the restricted [0x30, 0x7f] range, and the check will be
1969 done properly in the helper. */
1970 if (!supervisor(dc) && asi < 0x80) {
1971 gen_exception(dc, TT_PRIV_ACT);
1972 type = GET_ASI_EXCP;
1973 } else {
1974 switch (asi) {
1975 case ASI_REAL: /* Bypass */
1976 case ASI_REAL_IO: /* Bypass, non-cacheable */
1977 case ASI_REAL_L: /* Bypass LE */
1978 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1979 case ASI_TWINX_REAL: /* Real address, twinx */
1980 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1981 case ASI_QUAD_LDD_PHYS:
1982 case ASI_QUAD_LDD_PHYS_L:
1983 mem_idx = MMU_PHYS_IDX;
1984 break;
1985 case ASI_N: /* Nucleus */
1986 case ASI_NL: /* Nucleus LE */
1987 case ASI_TWINX_N:
1988 case ASI_TWINX_NL:
1989 case ASI_NUCLEUS_QUAD_LDD:
1990 case ASI_NUCLEUS_QUAD_LDD_L:
1991 if (hypervisor(dc)) {
1992 mem_idx = MMU_PHYS_IDX;
1993 } else {
1994 mem_idx = MMU_NUCLEUS_IDX;
1995 }
1996 break;
1997 case ASI_AIUP: /* As if user primary */
1998 case ASI_AIUPL: /* As if user primary LE */
1999 case ASI_TWINX_AIUP:
2000 case ASI_TWINX_AIUP_L:
2001 case ASI_BLK_AIUP_4V:
2002 case ASI_BLK_AIUP_L_4V:
2003 case ASI_BLK_AIUP:
2004 case ASI_BLK_AIUPL:
2005 mem_idx = MMU_USER_IDX;
2006 break;
2007 case ASI_AIUS: /* As if user secondary */
2008 case ASI_AIUSL: /* As if user secondary LE */
2009 case ASI_TWINX_AIUS:
2010 case ASI_TWINX_AIUS_L:
2011 case ASI_BLK_AIUS_4V:
2012 case ASI_BLK_AIUS_L_4V:
2013 case ASI_BLK_AIUS:
2014 case ASI_BLK_AIUSL:
2015 mem_idx = MMU_USER_SECONDARY_IDX;
2016 break;
2017 case ASI_S: /* Secondary */
2018 case ASI_SL: /* Secondary LE */
2019 case ASI_TWINX_S:
2020 case ASI_TWINX_SL:
2021 case ASI_BLK_COMMIT_S:
2022 case ASI_BLK_S:
2023 case ASI_BLK_SL:
2024 case ASI_FL8_S:
2025 case ASI_FL8_SL:
2026 case ASI_FL16_S:
2027 case ASI_FL16_SL:
2028 if (mem_idx == MMU_USER_IDX) {
2029 mem_idx = MMU_USER_SECONDARY_IDX;
2030 } else if (mem_idx == MMU_KERNEL_IDX) {
2031 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2032 }
2033 break;
2034 case ASI_P: /* Primary */
2035 case ASI_PL: /* Primary LE */
2036 case ASI_TWINX_P:
2037 case ASI_TWINX_PL:
2038 case ASI_BLK_COMMIT_P:
2039 case ASI_BLK_P:
2040 case ASI_BLK_PL:
2041 case ASI_FL8_P:
2042 case ASI_FL8_PL:
2043 case ASI_FL16_P:
2044 case ASI_FL16_PL:
2045 break;
2046 }
2047 switch (asi) {
2048 case ASI_REAL:
2049 case ASI_REAL_IO:
2050 case ASI_REAL_L:
2051 case ASI_REAL_IO_L:
2052 case ASI_N:
2053 case ASI_NL:
2054 case ASI_AIUP:
2055 case ASI_AIUPL:
2056 case ASI_AIUS:
2057 case ASI_AIUSL:
2058 case ASI_S:
2059 case ASI_SL:
2060 case ASI_P:
2061 case ASI_PL:
2062 type = GET_ASI_DIRECT;
2063 break;
2064 case ASI_TWINX_REAL:
2065 case ASI_TWINX_REAL_L:
2066 case ASI_TWINX_N:
2067 case ASI_TWINX_NL:
2068 case ASI_TWINX_AIUP:
2069 case ASI_TWINX_AIUP_L:
2070 case ASI_TWINX_AIUS:
2071 case ASI_TWINX_AIUS_L:
2072 case ASI_TWINX_P:
2073 case ASI_TWINX_PL:
2074 case ASI_TWINX_S:
2075 case ASI_TWINX_SL:
2076 case ASI_QUAD_LDD_PHYS:
2077 case ASI_QUAD_LDD_PHYS_L:
2078 case ASI_NUCLEUS_QUAD_LDD:
2079 case ASI_NUCLEUS_QUAD_LDD_L:
2080 type = GET_ASI_DTWINX;
2081 break;
2082 case ASI_BLK_COMMIT_P:
2083 case ASI_BLK_COMMIT_S:
2084 case ASI_BLK_AIUP_4V:
2085 case ASI_BLK_AIUP_L_4V:
2086 case ASI_BLK_AIUP:
2087 case ASI_BLK_AIUPL:
2088 case ASI_BLK_AIUS_4V:
2089 case ASI_BLK_AIUS_L_4V:
2090 case ASI_BLK_AIUS:
2091 case ASI_BLK_AIUSL:
2092 case ASI_BLK_S:
2093 case ASI_BLK_SL:
2094 case ASI_BLK_P:
2095 case ASI_BLK_PL:
2096 type = GET_ASI_BLOCK;
2097 break;
2098 case ASI_FL8_S:
2099 case ASI_FL8_SL:
2100 case ASI_FL8_P:
2101 case ASI_FL8_PL:
2102 memop = MO_UB;
2103 type = GET_ASI_SHORT;
2104 break;
2105 case ASI_FL16_S:
2106 case ASI_FL16_SL:
2107 case ASI_FL16_P:
2108 case ASI_FL16_PL:
2109 memop = MO_TEUW;
2110 type = GET_ASI_SHORT;
2111 break;
2112 }
2113 /* The little-endian asis all have bit 3 set. */
2114 if (asi & 8) {
2115 memop ^= MO_BSWAP;
2116 }
2117 }
2118 #endif
2119
2120 done:
2121 return (DisasASI){ type, asi, mem_idx, memop };
2122 }
2123
2124 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2125 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
2126 TCGv_i32 asi, TCGv_i32 mop)
2127 {
2128 g_assert_not_reached();
2129 }
2130
2131 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
2132 TCGv_i32 asi, TCGv_i32 mop)
2133 {
2134 g_assert_not_reached();
2135 }
2136 #endif
2137
2138 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
2139 {
2140 switch (da->type) {
2141 case GET_ASI_EXCP:
2142 break;
2143 case GET_ASI_DTWINX: /* Reserved for ldda. */
2144 gen_exception(dc, TT_ILL_INSN);
2145 break;
2146 case GET_ASI_DIRECT:
2147 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
2148 break;
2149 default:
2150 {
2151 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2152 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
2153
2154 save_state(dc);
2155 #ifdef TARGET_SPARC64
2156 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
2157 #else
2158 {
2159 TCGv_i64 t64 = tcg_temp_new_i64();
2160 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2161 tcg_gen_trunc_i64_tl(dst, t64);
2162 }
2163 #endif
2164 }
2165 break;
2166 }
2167 }
2168
2169 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
2170 {
2171 switch (da->type) {
2172 case GET_ASI_EXCP:
2173 break;
2174
2175 case GET_ASI_DTWINX: /* Reserved for stda. */
2176 if (TARGET_LONG_BITS == 32) {
2177 gen_exception(dc, TT_ILL_INSN);
2178 break;
2179 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2180 /* Pre OpenSPARC CPUs don't have these */
2181 gen_exception(dc, TT_ILL_INSN);
2182 break;
2183 }
2184 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
2185 /* fall through */
2186
2187 case GET_ASI_DIRECT:
2188 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
2189 break;
2190
2191 case GET_ASI_BCOPY:
2192 assert(TARGET_LONG_BITS == 32);
2193 /* Copy 32 bytes from the address in SRC to ADDR. */
2194 /* ??? The original qemu code suggests 4-byte alignment, dropping
2195 the low bits, but the only place I can see this used is in the
2196 Linux kernel with 32 byte alignment, which would make more sense
2197 as a cacheline-style operation. */
2198 {
2199 TCGv saddr = tcg_temp_new();
2200 TCGv daddr = tcg_temp_new();
2201 TCGv four = tcg_constant_tl(4);
2202 TCGv_i32 tmp = tcg_temp_new_i32();
2203 int i;
2204
2205 tcg_gen_andi_tl(saddr, src, -4);
2206 tcg_gen_andi_tl(daddr, addr, -4);
2207 for (i = 0; i < 32; i += 4) {
2208 /* Since the loads and stores are paired, allow the
2209 copy to happen in the host endianness. */
2210 tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL);
2211 tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL);
2212 tcg_gen_add_tl(saddr, saddr, four);
2213 tcg_gen_add_tl(daddr, daddr, four);
2214 }
2215 }
2216 break;
2217
2218 default:
2219 {
2220 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2221 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
2222
2223 save_state(dc);
2224 #ifdef TARGET_SPARC64
2225 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
2226 #else
2227 {
2228 TCGv_i64 t64 = tcg_temp_new_i64();
2229 tcg_gen_extu_tl_i64(t64, src);
2230 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2231 }
2232 #endif
2233
2234 /* A write to a TLB register may alter page maps. End the TB. */
2235 dc->npc = DYNAMIC_PC;
2236 }
2237 break;
2238 }
2239 }
2240
2241 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
2242 TCGv dst, TCGv src, TCGv addr)
2243 {
2244 switch (da->type) {
2245 case GET_ASI_EXCP:
2246 break;
2247 case GET_ASI_DIRECT:
2248 tcg_gen_atomic_xchg_tl(dst, addr, src,
2249 da->mem_idx, da->memop | MO_ALIGN);
2250 break;
2251 default:
2252 /* ??? Should be DAE_invalid_asi. */
2253 gen_exception(dc, TT_DATA_ACCESS);
2254 break;
2255 }
2256 }
2257
2258 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
2259 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
2260 {
2261 switch (da->type) {
2262 case GET_ASI_EXCP:
2263 return;
2264 case GET_ASI_DIRECT:
2265 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
2266 da->mem_idx, da->memop | MO_ALIGN);
2267 break;
2268 default:
2269 /* ??? Should be DAE_invalid_asi. */
2270 gen_exception(dc, TT_DATA_ACCESS);
2271 break;
2272 }
2273 }
2274
2275 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
2276 {
2277 switch (da->type) {
2278 case GET_ASI_EXCP:
2279 break;
2280 case GET_ASI_DIRECT:
2281 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
2282 da->mem_idx, MO_UB);
2283 break;
2284 default:
2285 /* ??? In theory, this should be raise DAE_invalid_asi.
2286 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2287 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
2288 gen_helper_exit_atomic(tcg_env);
2289 } else {
2290 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2291 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
2292 TCGv_i64 s64, t64;
2293
2294 save_state(dc);
2295 t64 = tcg_temp_new_i64();
2296 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2297
2298 s64 = tcg_constant_i64(0xff);
2299 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
2300
2301 tcg_gen_trunc_i64_tl(dst, t64);
2302
2303 /* End the TB. */
2304 dc->npc = DYNAMIC_PC;
2305 }
2306 break;
2307 }
2308 }
2309
2310 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2311 TCGv addr, int rd)
2312 {
2313 MemOp memop = da->memop;
2314 MemOp size = memop & MO_SIZE;
2315 TCGv_i32 d32;
2316 TCGv_i64 d64;
2317 TCGv addr_tmp;
2318
2319 /* TODO: Use 128-bit load/store below. */
2320 if (size == MO_128) {
2321 memop = (memop & ~MO_SIZE) | MO_64;
2322 }
2323
2324 switch (da->type) {
2325 case GET_ASI_EXCP:
2326 break;
2327
2328 case GET_ASI_DIRECT:
2329 memop |= MO_ALIGN_4;
2330 switch (size) {
2331 case MO_32:
2332 d32 = gen_dest_fpr_F(dc);
2333 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
2334 gen_store_fpr_F(dc, rd, d32);
2335 break;
2336
2337 case MO_64:
2338 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
2339 break;
2340
2341 case MO_128:
2342 d64 = tcg_temp_new_i64();
2343 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2344 addr_tmp = tcg_temp_new();
2345 tcg_gen_addi_tl(addr_tmp, addr, 8);
2346 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2347 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2348 break;
2349 default:
2350 g_assert_not_reached();
2351 }
2352 break;
2353
2354 case GET_ASI_BLOCK:
2355 /* Valid for lddfa on aligned registers only. */
2356 if (orig_size == MO_64 && (rd & 7) == 0) {
2357 /* The first operation checks required alignment. */
2358 addr_tmp = tcg_temp_new();
2359 for (int i = 0; ; ++i) {
2360 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2361 memop | (i == 0 ? MO_ALIGN_64 : 0));
2362 if (i == 7) {
2363 break;
2364 }
2365 tcg_gen_addi_tl(addr_tmp, addr, 8);
2366 addr = addr_tmp;
2367 }
2368 } else {
2369 gen_exception(dc, TT_ILL_INSN);
2370 }
2371 break;
2372
2373 case GET_ASI_SHORT:
2374 /* Valid for lddfa only. */
2375 if (orig_size == MO_64) {
2376 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2377 memop | MO_ALIGN);
2378 } else {
2379 gen_exception(dc, TT_ILL_INSN);
2380 }
2381 break;
2382
2383 default:
2384 {
2385 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2386 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2387
2388 save_state(dc);
2389 /* According to the table in the UA2011 manual, the only
2390 other asis that are valid for ldfa/lddfa/ldqfa are
2391 the NO_FAULT asis. We still need a helper for these,
2392 but we can just use the integer asi helper for them. */
2393 switch (size) {
2394 case MO_32:
2395 d64 = tcg_temp_new_i64();
2396 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2397 d32 = gen_dest_fpr_F(dc);
2398 tcg_gen_extrl_i64_i32(d32, d64);
2399 gen_store_fpr_F(dc, rd, d32);
2400 break;
2401 case MO_64:
2402 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
2403 r_asi, r_mop);
2404 break;
2405 case MO_128:
2406 d64 = tcg_temp_new_i64();
2407 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2408 addr_tmp = tcg_temp_new();
2409 tcg_gen_addi_tl(addr_tmp, addr, 8);
2410 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
2411 r_asi, r_mop);
2412 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2413 break;
2414 default:
2415 g_assert_not_reached();
2416 }
2417 }
2418 break;
2419 }
2420 }
2421
2422 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2423 TCGv addr, int rd)
2424 {
2425 MemOp memop = da->memop;
2426 MemOp size = memop & MO_SIZE;
2427 TCGv_i32 d32;
2428 TCGv addr_tmp;
2429
2430 /* TODO: Use 128-bit load/store below. */
2431 if (size == MO_128) {
2432 memop = (memop & ~MO_SIZE) | MO_64;
2433 }
2434
2435 switch (da->type) {
2436 case GET_ASI_EXCP:
2437 break;
2438
2439 case GET_ASI_DIRECT:
2440 memop |= MO_ALIGN_4;
2441 switch (size) {
2442 case MO_32:
2443 d32 = gen_load_fpr_F(dc, rd);
2444 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2445 break;
2446 case MO_64:
2447 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2448 memop | MO_ALIGN_4);
2449 break;
2450 case MO_128:
2451 /* Only 4-byte alignment required. However, it is legal for the
2452 cpu to signal the alignment fault, and the OS trap handler is
2453 required to fix it up. Requiring 16-byte alignment here avoids
2454 having to probe the second page before performing the first
2455 write. */
2456 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2457 memop | MO_ALIGN_16);
2458 addr_tmp = tcg_temp_new();
2459 tcg_gen_addi_tl(addr_tmp, addr, 8);
2460 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2461 break;
2462 default:
2463 g_assert_not_reached();
2464 }
2465 break;
2466
2467 case GET_ASI_BLOCK:
2468 /* Valid for stdfa on aligned registers only. */
2469 if (orig_size == MO_64 && (rd & 7) == 0) {
2470 /* The first operation checks required alignment. */
2471 addr_tmp = tcg_temp_new();
2472 for (int i = 0; ; ++i) {
2473 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2474 memop | (i == 0 ? MO_ALIGN_64 : 0));
2475 if (i == 7) {
2476 break;
2477 }
2478 tcg_gen_addi_tl(addr_tmp, addr, 8);
2479 addr = addr_tmp;
2480 }
2481 } else {
2482 gen_exception(dc, TT_ILL_INSN);
2483 }
2484 break;
2485
2486 case GET_ASI_SHORT:
2487 /* Valid for stdfa only. */
2488 if (orig_size == MO_64) {
2489 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2490 memop | MO_ALIGN);
2491 } else {
2492 gen_exception(dc, TT_ILL_INSN);
2493 }
2494 break;
2495
2496 default:
2497 /* According to the table in the UA2011 manual, the only
2498 other asis that are valid for ldfa/lddfa/ldqfa are
2499 the PST* asis, which aren't currently handled. */
2500 gen_exception(dc, TT_ILL_INSN);
2501 break;
2502 }
2503 }
2504
2505 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2506 {
2507 TCGv hi = gen_dest_gpr(dc, rd);
2508 TCGv lo = gen_dest_gpr(dc, rd + 1);
2509
2510 switch (da->type) {
2511 case GET_ASI_EXCP:
2512 return;
2513
2514 case GET_ASI_DTWINX:
2515 #ifdef TARGET_SPARC64
2516 {
2517 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2518 TCGv_i128 t = tcg_temp_new_i128();
2519
2520 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2521 /*
2522 * Note that LE twinx acts as if each 64-bit register result is
2523 * byte swapped. We perform one 128-bit LE load, so must swap
2524 * the order of the writebacks.
2525 */
2526 if ((mop & MO_BSWAP) == MO_TE) {
2527 tcg_gen_extr_i128_i64(lo, hi, t);
2528 } else {
2529 tcg_gen_extr_i128_i64(hi, lo, t);
2530 }
2531 }
2532 break;
2533 #else
2534 g_assert_not_reached();
2535 #endif
2536
2537 case GET_ASI_DIRECT:
2538 {
2539 TCGv_i64 tmp = tcg_temp_new_i64();
2540
2541 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2542
2543 /* Note that LE ldda acts as if each 32-bit register
2544 result is byte swapped. Having just performed one
2545 64-bit bswap, we need now to swap the writebacks. */
2546 if ((da->memop & MO_BSWAP) == MO_TE) {
2547 tcg_gen_extr_i64_tl(lo, hi, tmp);
2548 } else {
2549 tcg_gen_extr_i64_tl(hi, lo, tmp);
2550 }
2551 }
2552 break;
2553
2554 default:
2555 /* ??? In theory we've handled all of the ASIs that are valid
2556 for ldda, and this should raise DAE_invalid_asi. However,
2557 real hardware allows others. This can be seen with e.g.
2558 FreeBSD 10.3 wrt ASI_IC_TAG. */
2559 {
2560 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2561 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2562 TCGv_i64 tmp = tcg_temp_new_i64();
2563
2564 save_state(dc);
2565 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2566
2567 /* See above. */
2568 if ((da->memop & MO_BSWAP) == MO_TE) {
2569 tcg_gen_extr_i64_tl(lo, hi, tmp);
2570 } else {
2571 tcg_gen_extr_i64_tl(hi, lo, tmp);
2572 }
2573 }
2574 break;
2575 }
2576
2577 gen_store_gpr(dc, rd, hi);
2578 gen_store_gpr(dc, rd + 1, lo);
2579 }
2580
2581 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2582 {
2583 TCGv hi = gen_load_gpr(dc, rd);
2584 TCGv lo = gen_load_gpr(dc, rd + 1);
2585
2586 switch (da->type) {
2587 case GET_ASI_EXCP:
2588 break;
2589
2590 case GET_ASI_DTWINX:
2591 #ifdef TARGET_SPARC64
2592 {
2593 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2594 TCGv_i128 t = tcg_temp_new_i128();
2595
2596 /*
2597 * Note that LE twinx acts as if each 64-bit register result is
2598 * byte swapped. We perform one 128-bit LE store, so must swap
2599 * the order of the construction.
2600 */
2601 if ((mop & MO_BSWAP) == MO_TE) {
2602 tcg_gen_concat_i64_i128(t, lo, hi);
2603 } else {
2604 tcg_gen_concat_i64_i128(t, hi, lo);
2605 }
2606 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2607 }
2608 break;
2609 #else
2610 g_assert_not_reached();
2611 #endif
2612
2613 case GET_ASI_DIRECT:
2614 {
2615 TCGv_i64 t64 = tcg_temp_new_i64();
2616
2617 /* Note that LE stda acts as if each 32-bit register result is
2618 byte swapped. We will perform one 64-bit LE store, so now
2619 we must swap the order of the construction. */
2620 if ((da->memop & MO_BSWAP) == MO_TE) {
2621 tcg_gen_concat_tl_i64(t64, lo, hi);
2622 } else {
2623 tcg_gen_concat_tl_i64(t64, hi, lo);
2624 }
2625 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2626 }
2627 break;
2628
2629 case GET_ASI_BFILL:
2630 assert(TARGET_LONG_BITS == 32);
2631 /* Store 32 bytes of T64 to ADDR. */
2632 /* ??? The original qemu code suggests 8-byte alignment, dropping
2633 the low bits, but the only place I can see this used is in the
2634 Linux kernel with 32 byte alignment, which would make more sense
2635 as a cacheline-style operation. */
2636 {
2637 TCGv_i64 t64 = tcg_temp_new_i64();
2638 TCGv d_addr = tcg_temp_new();
2639 TCGv eight = tcg_constant_tl(8);
2640 int i;
2641
2642 tcg_gen_concat_tl_i64(t64, lo, hi);
2643 tcg_gen_andi_tl(d_addr, addr, -8);
2644 for (i = 0; i < 32; i += 8) {
2645 tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop);
2646 tcg_gen_add_tl(d_addr, d_addr, eight);
2647 }
2648 }
2649 break;
2650
2651 default:
2652 /* ??? In theory we've handled all of the ASIs that are valid
2653 for stda, and this should raise DAE_invalid_asi. */
2654 {
2655 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2656 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2657 TCGv_i64 t64 = tcg_temp_new_i64();
2658
2659 /* See above. */
2660 if ((da->memop & MO_BSWAP) == MO_TE) {
2661 tcg_gen_concat_tl_i64(t64, lo, hi);
2662 } else {
2663 tcg_gen_concat_tl_i64(t64, hi, lo);
2664 }
2665
2666 save_state(dc);
2667 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2668 }
2669 break;
2670 }
2671 }
2672
2673 #ifdef TARGET_SPARC64
2674 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2675 {
2676 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2677 return gen_load_gpr(dc, rs1);
2678 }
2679
2680 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2681 {
2682 TCGv_i32 c32, zero, dst, s1, s2;
2683
2684 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2685 or fold the comparison down to 32 bits and use movcond_i32. Choose
2686 the later. */
2687 c32 = tcg_temp_new_i32();
2688 if (cmp->is_bool) {
2689 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2690 } else {
2691 TCGv_i64 c64 = tcg_temp_new_i64();
2692 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2693 tcg_gen_extrl_i64_i32(c32, c64);
2694 }
2695
2696 s1 = gen_load_fpr_F(dc, rs);
2697 s2 = gen_load_fpr_F(dc, rd);
2698 dst = gen_dest_fpr_F(dc);
2699 zero = tcg_constant_i32(0);
2700
2701 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2702
2703 gen_store_fpr_F(dc, rd, dst);
2704 }
2705
2706 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2707 {
2708 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2709 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2710 gen_load_fpr_D(dc, rs),
2711 gen_load_fpr_D(dc, rd));
2712 gen_store_fpr_D(dc, rd, dst);
2713 }
2714
2715 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2716 {
2717 int qd = QFPREG(rd);
2718 int qs = QFPREG(rs);
2719
2720 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2721 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2722 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2723 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2724
2725 gen_update_fprs_dirty(dc, qd);
2726 }
2727
2728 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2729 {
2730 TCGv_i32 r_tl = tcg_temp_new_i32();
2731
2732 /* load env->tl into r_tl */
2733 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2734
2735 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2736 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2737
2738 /* calculate offset to current trap state from env->ts, reuse r_tl */
2739 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2740 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2741
2742 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2743 {
2744 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2745 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2746 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2747 }
2748 }
2749
2750 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2751 {
2752 TCGv t1, t2, shift;
2753
2754 t1 = tcg_temp_new();
2755 t2 = tcg_temp_new();
2756 shift = tcg_temp_new();
2757
2758 tcg_gen_andi_tl(shift, gsr, 7);
2759 tcg_gen_shli_tl(shift, shift, 3);
2760 tcg_gen_shl_tl(t1, s1, shift);
2761
2762 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2763 shift of (up to 63) followed by a constant shift of 1. */
2764 tcg_gen_xori_tl(shift, shift, 63);
2765 tcg_gen_shr_tl(t2, s2, shift);
2766 tcg_gen_shri_tl(t2, t2, 1);
2767
2768 tcg_gen_or_tl(dst, t1, t2);
2769 }
2770 #endif
2771
2772 static int extract_dfpreg(DisasContext *dc, int x)
2773 {
2774 return DFPREG(x);
2775 }
2776
2777 static int extract_qfpreg(DisasContext *dc, int x)
2778 {
2779 return QFPREG(x);
2780 }
2781
2782 /* Include the auto-generated decoder. */
2783 #include "decode-insns.c.inc"
2784
2785 #define TRANS(NAME, AVAIL, FUNC, ...) \
2786 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2787 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2788
2789 #define avail_ALL(C) true
2790 #ifdef TARGET_SPARC64
2791 # define avail_32(C) false
2792 # define avail_ASR17(C) false
2793 # define avail_CASA(C) true
2794 # define avail_DIV(C) true
2795 # define avail_MUL(C) true
2796 # define avail_POWERDOWN(C) false
2797 # define avail_64(C) true
2798 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2799 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2800 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2801 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2802 #else
2803 # define avail_32(C) true
2804 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2805 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2806 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2807 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2808 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2809 # define avail_64(C) false
2810 # define avail_GL(C) false
2811 # define avail_HYPV(C) false
2812 # define avail_VIS1(C) false
2813 # define avail_VIS2(C) false
2814 #endif
2815
2816 /* Default case for non jump instructions. */
2817 static bool advance_pc(DisasContext *dc)
2818 {
2819 if (dc->npc & 3) {
2820 switch (dc->npc) {
2821 case DYNAMIC_PC:
2822 case DYNAMIC_PC_LOOKUP:
2823 dc->pc = dc->npc;
2824 gen_op_next_insn();
2825 break;
2826 case JUMP_PC:
2827 /* we can do a static jump */
2828 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
2829 dc->base.is_jmp = DISAS_NORETURN;
2830 break;
2831 default:
2832 g_assert_not_reached();
2833 }
2834 } else {
2835 dc->pc = dc->npc;
2836 dc->npc = dc->npc + 4;
2837 }
2838 return true;
2839 }
2840
2841 /*
2842 * Major opcodes 00 and 01 -- branches, call, and sethi
2843 */
2844
2845 static bool advance_jump_uncond_never(DisasContext *dc, bool annul)
2846 {
2847 if (annul) {
2848 dc->pc = dc->npc + 4;
2849 dc->npc = dc->pc + 4;
2850 } else {
2851 dc->pc = dc->npc;
2852 dc->npc = dc->pc + 4;
2853 }
2854 return true;
2855 }
2856
2857 static bool advance_jump_uncond_always(DisasContext *dc, bool annul,
2858 target_ulong dest)
2859 {
2860 if (annul) {
2861 dc->pc = dest;
2862 dc->npc = dest + 4;
2863 } else {
2864 dc->pc = dc->npc;
2865 dc->npc = dest;
2866 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2867 }
2868 return true;
2869 }
2870
2871 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2872 bool annul, target_ulong dest)
2873 {
2874 target_ulong npc = dc->npc;
2875
2876 if (annul) {
2877 TCGLabel *l1 = gen_new_label();
2878
2879 tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2880 gen_goto_tb(dc, 0, npc, dest);
2881 gen_set_label(l1);
2882 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2883
2884 dc->base.is_jmp = DISAS_NORETURN;
2885 } else {
2886 if (npc & 3) {
2887 switch (npc) {
2888 case DYNAMIC_PC:
2889 case DYNAMIC_PC_LOOKUP:
2890 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2891 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2892 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2893 cmp->c1, cmp->c2,
2894 tcg_constant_tl(dest), cpu_npc);
2895 dc->pc = npc;
2896 break;
2897 default:
2898 g_assert_not_reached();
2899 }
2900 } else {
2901 dc->pc = npc;
2902 dc->jump_pc[0] = dest;
2903 dc->jump_pc[1] = npc + 4;
2904 dc->npc = JUMP_PC;
2905 if (cmp->is_bool) {
2906 tcg_gen_mov_tl(cpu_cond, cmp->c1);
2907 } else {
2908 tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2909 }
2910 }
2911 }
2912 return true;
2913 }
2914
2915 static bool raise_priv(DisasContext *dc)
2916 {
2917 gen_exception(dc, TT_PRIV_INSN);
2918 return true;
2919 }
2920
2921 static bool raise_unimpfpop(DisasContext *dc)
2922 {
2923 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2924 return true;
2925 }
2926
2927 static bool gen_trap_float128(DisasContext *dc)
2928 {
2929 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2930 return false;
2931 }
2932 return raise_unimpfpop(dc);
2933 }
2934
2935 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2936 {
2937 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2938 DisasCompare cmp;
2939
2940 switch (a->cond) {
2941 case 0x0:
2942 return advance_jump_uncond_never(dc, a->a);
2943 case 0x8:
2944 return advance_jump_uncond_always(dc, a->a, target);
2945 default:
2946 flush_cond(dc);
2947
2948 gen_compare(&cmp, a->cc, a->cond, dc);
2949 return advance_jump_cond(dc, &cmp, a->a, target);
2950 }
2951 }
2952
2953 TRANS(Bicc, ALL, do_bpcc, a)
2954 TRANS(BPcc, 64, do_bpcc, a)
2955
2956 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2957 {
2958 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2959 DisasCompare cmp;
2960
2961 if (gen_trap_ifnofpu(dc)) {
2962 return true;
2963 }
2964 switch (a->cond) {
2965 case 0x0:
2966 return advance_jump_uncond_never(dc, a->a);
2967 case 0x8:
2968 return advance_jump_uncond_always(dc, a->a, target);
2969 default:
2970 flush_cond(dc);
2971
2972 gen_fcompare(&cmp, a->cc, a->cond);
2973 return advance_jump_cond(dc, &cmp, a->a, target);
2974 }
2975 }
2976
2977 TRANS(FBPfcc, 64, do_fbpfcc, a)
2978 TRANS(FBfcc, ALL, do_fbpfcc, a)
2979
2980 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2981 {
2982 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2983 DisasCompare cmp;
2984
2985 if (!avail_64(dc)) {
2986 return false;
2987 }
2988 if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) {
2989 return false;
2990 }
2991
2992 flush_cond(dc);
2993 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
2994 return advance_jump_cond(dc, &cmp, a->a, target);
2995 }
2996
2997 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2998 {
2999 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3000
3001 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
3002 gen_mov_pc_npc(dc);
3003 dc->npc = target;
3004 return true;
3005 }
3006
3007 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
3008 {
3009 /*
3010 * For sparc32, always generate the no-coprocessor exception.
3011 * For sparc64, always generate illegal instruction.
3012 */
3013 #ifdef TARGET_SPARC64
3014 return false;
3015 #else
3016 gen_exception(dc, TT_NCP_INSN);
3017 return true;
3018 #endif
3019 }
3020
3021 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
3022 {
3023 /* Special-case %g0 because that's the canonical nop. */
3024 if (a->rd) {
3025 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
3026 }
3027 return advance_pc(dc);
3028 }
3029
3030 /*
3031 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
3032 */
3033
3034 static bool do_tcc(DisasContext *dc, int cond, int cc,
3035 int rs1, bool imm, int rs2_or_imm)
3036 {
3037 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3038 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3039 DisasCompare cmp;
3040 TCGLabel *lab;
3041 TCGv_i32 trap;
3042
3043 /* Trap never. */
3044 if (cond == 0) {
3045 return advance_pc(dc);
3046 }
3047
3048 /*
3049 * Immediate traps are the most common case. Since this value is
3050 * live across the branch, it really pays to evaluate the constant.
3051 */
3052 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
3053 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
3054 } else {
3055 trap = tcg_temp_new_i32();
3056 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
3057 if (imm) {
3058 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
3059 } else {
3060 TCGv_i32 t2 = tcg_temp_new_i32();
3061 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
3062 tcg_gen_add_i32(trap, trap, t2);
3063 }
3064 tcg_gen_andi_i32(trap, trap, mask);
3065 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3066 }
3067
3068 /* Trap always. */
3069 if (cond == 8) {
3070 save_state(dc);
3071 gen_helper_raise_exception(tcg_env, trap);
3072 dc->base.is_jmp = DISAS_NORETURN;
3073 return true;
3074 }
3075
3076 /* Conditional trap. */
3077 flush_cond(dc);
3078 lab = delay_exceptionv(dc, trap);
3079 gen_compare(&cmp, cc, cond, dc);
3080 tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab);
3081
3082 return advance_pc(dc);
3083 }
3084
3085 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
3086 {
3087 if (avail_32(dc) && a->cc) {
3088 return false;
3089 }
3090 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
3091 }
3092
3093 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
3094 {
3095 if (avail_64(dc)) {
3096 return false;
3097 }
3098 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
3099 }
3100
3101 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
3102 {
3103 if (avail_32(dc)) {
3104 return false;
3105 }
3106 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
3107 }
3108
3109 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
3110 {
3111 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
3112 return advance_pc(dc);
3113 }
3114
3115 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
3116 {
3117 if (avail_32(dc)) {
3118 return false;
3119 }
3120 if (a->mmask) {
3121 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
3122 tcg_gen_mb(a->mmask | TCG_BAR_SC);
3123 }
3124 if (a->cmask) {
3125 /* For #Sync, etc, end the TB to recognize interrupts. */
3126 dc->base.is_jmp = DISAS_EXIT;
3127 }
3128 return advance_pc(dc);
3129 }
3130
3131 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
3132 TCGv (*func)(DisasContext *, TCGv))
3133 {
3134 if (!priv) {
3135 return raise_priv(dc);
3136 }
3137 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
3138 return advance_pc(dc);
3139 }
3140
3141 static TCGv do_rdy(DisasContext *dc, TCGv dst)
3142 {
3143 return cpu_y;
3144 }
3145
3146 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
3147 {
3148 /*
3149 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
3150 * 32-bit cpus like sparcv7, which ignores the rs1 field.
3151 * This matches after all other ASR, so Leon3 Asr17 is handled first.
3152 */
3153 if (avail_64(dc) && a->rs1 != 0) {
3154 return false;
3155 }
3156 return do_rd_special(dc, true, a->rd, do_rdy);
3157 }
3158
3159 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
3160 {
3161 uint32_t val;
3162
3163 /*
3164 * TODO: There are many more fields to be filled,
3165 * some of which are writable.
3166 */
3167 val = dc->def->nwindows - 1; /* [4:0] NWIN */
3168 val |= 1 << 8; /* [8] V8 */
3169
3170 return tcg_constant_tl(val);
3171 }
3172
3173 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
3174
3175 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
3176 {
3177 update_psr(dc);
3178 gen_helper_rdccr(dst, tcg_env);
3179 return dst;
3180 }
3181
3182 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
3183
3184 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
3185 {
3186 #ifdef TARGET_SPARC64
3187 return tcg_constant_tl(dc->asi);
3188 #else
3189 qemu_build_not_reached();
3190 #endif
3191 }
3192
3193 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
3194
3195 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
3196 {
3197 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3198
3199 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3200 if (translator_io_start(&dc->base)) {
3201 dc->base.is_jmp = DISAS_EXIT;
3202 }
3203 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
3204 tcg_constant_i32(dc->mem_idx));
3205 return dst;
3206 }
3207
3208 /* TODO: non-priv access only allowed when enabled. */
3209 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
3210
3211 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
3212 {
3213 return tcg_constant_tl(address_mask_i(dc, dc->pc));
3214 }
3215
3216 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
3217
3218 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
3219 {
3220 tcg_gen_ext_i32_tl(dst, cpu_fprs);
3221 return dst;
3222 }
3223
3224 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
3225
3226 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
3227 {
3228 gen_trap_ifnofpu(dc);
3229 return cpu_gsr;
3230 }
3231
3232 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
3233
3234 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
3235 {
3236 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
3237 return dst;
3238 }
3239
3240 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
3241
3242 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
3243 {
3244 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
3245 return dst;
3246 }
3247
3248 /* TODO: non-priv access only allowed when enabled. */
3249 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
3250
3251 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
3252 {
3253 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3254
3255 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3256 if (translator_io_start(&dc->base)) {
3257 dc->base.is_jmp = DISAS_EXIT;
3258 }
3259 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
3260 tcg_constant_i32(dc->mem_idx));
3261 return dst;
3262 }
3263
3264 /* TODO: non-priv access only allowed when enabled. */
3265 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
3266
3267 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
3268 {
3269 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
3270 return dst;
3271 }
3272
3273 /* TODO: supervisor access only allowed when enabled by hypervisor. */
3274 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
3275
3276 /*
3277 * UltraSPARC-T1 Strand status.
3278 * HYPV check maybe not enough, UA2005 & UA2007 describe
3279 * this ASR as impl. dep
3280 */
3281 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
3282 {
3283 return tcg_constant_tl(1);
3284 }
3285
3286 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
3287
3288 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
3289 {
3290 update_psr(dc);
3291 gen_helper_rdpsr(dst, tcg_env);
3292 return dst;
3293 }
3294
3295 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
3296
3297 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
3298 {
3299 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
3300 return dst;
3301 }
3302
3303 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
3304
3305 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
3306 {
3307 TCGv_i32 tl = tcg_temp_new_i32();
3308 TCGv_ptr tp = tcg_temp_new_ptr();
3309
3310 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3311 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3312 tcg_gen_shli_i32(tl, tl, 3);
3313 tcg_gen_ext_i32_ptr(tp, tl);
3314 tcg_gen_add_ptr(tp, tp, tcg_env);
3315
3316 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3317 return dst;
3318 }
3319
3320 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3321
3322 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3323 {
3324 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3325 return dst;
3326 }
3327
3328 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3329
3330 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3331 {
3332 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3333 return dst;
3334 }
3335
3336 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3337
3338 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3339 {
3340 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3341 return dst;
3342 }
3343
3344 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3345
3346 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3347 {
3348 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3349 return dst;
3350 }
3351
3352 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3353 do_rdhstick_cmpr)
3354
3355 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3356 {
3357 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3358 return dst;
3359 }
3360
3361 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3362
3363 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3364 {
3365 #ifdef TARGET_SPARC64
3366 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3367
3368 gen_load_trap_state_at_tl(r_tsptr);
3369 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3370 return dst;
3371 #else
3372 qemu_build_not_reached();
3373 #endif
3374 }
3375
3376 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3377
3378 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3379 {
3380 #ifdef TARGET_SPARC64
3381 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3382
3383 gen_load_trap_state_at_tl(r_tsptr);
3384 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3385 return dst;
3386 #else
3387 qemu_build_not_reached();
3388 #endif
3389 }
3390
3391 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3392
3393 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3394 {
3395 #ifdef TARGET_SPARC64
3396 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3397
3398 gen_load_trap_state_at_tl(r_tsptr);
3399 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3400 return dst;
3401 #else
3402 qemu_build_not_reached();
3403 #endif
3404 }
3405
3406 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3407
3408 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3409 {
3410 #ifdef TARGET_SPARC64
3411 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3412
3413 gen_load_trap_state_at_tl(r_tsptr);
3414 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3415 return dst;
3416 #else
3417 qemu_build_not_reached();
3418 #endif
3419 }
3420
3421 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3422 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3423
3424 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3425 {
3426 return cpu_tbr;
3427 }
3428
3429 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3430 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3431
3432 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3433 {
3434 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3435 return dst;
3436 }
3437
3438 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3439
3440 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3441 {
3442 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3443 return dst;
3444 }
3445
3446 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3447
3448 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3449 {
3450 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3451 return dst;
3452 }
3453
3454 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3455
3456 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3457 {
3458 gen_helper_rdcwp(dst, tcg_env);
3459 return dst;
3460 }
3461
3462 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3463
3464 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3465 {
3466 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3467 return dst;
3468 }
3469
3470 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3471
3472 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3473 {
3474 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3475 return dst;
3476 }
3477
3478 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3479 do_rdcanrestore)
3480
3481 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3482 {
3483 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3484 return dst;
3485 }
3486
3487 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3488
3489 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3490 {
3491 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3492 return dst;
3493 }
3494
3495 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3496
3497 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3498 {
3499 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3500 return dst;
3501 }
3502
3503 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3504
3505 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3506 {
3507 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3508 return dst;
3509 }
3510
3511 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3512
3513 /* UA2005 strand status */
3514 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3515 {
3516 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3517 return dst;
3518 }
3519
3520 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3521
3522 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3523 {
3524 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3525 return dst;
3526 }
3527
3528 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3529
3530 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3531 {
3532 if (avail_64(dc)) {
3533 gen_helper_flushw(tcg_env);
3534 return advance_pc(dc);
3535 }
3536 return false;
3537 }
3538
3539 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3540 void (*func)(DisasContext *, TCGv))
3541 {
3542 TCGv src;
3543
3544 /* For simplicity, we under-decoded the rs2 form. */
3545 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3546 return false;
3547 }
3548 if (!priv) {
3549 return raise_priv(dc);
3550 }
3551
3552 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3553 src = tcg_constant_tl(a->rs2_or_imm);
3554 } else {
3555 TCGv src1 = gen_load_gpr(dc, a->rs1);
3556 if (a->rs2_or_imm == 0) {
3557 src = src1;
3558 } else {
3559 src = tcg_temp_new();
3560 if (a->imm) {
3561 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3562 } else {
3563 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3564 }
3565 }
3566 }
3567 func(dc, src);
3568 return advance_pc(dc);
3569 }
3570
3571 static void do_wry(DisasContext *dc, TCGv src)
3572 {
3573 tcg_gen_ext32u_tl(cpu_y, src);
3574 }
3575
3576 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3577
3578 static void do_wrccr(DisasContext *dc, TCGv src)
3579 {
3580 gen_helper_wrccr(tcg_env, src);
3581 }
3582
3583 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3584
3585 static void do_wrasi(DisasContext *dc, TCGv src)
3586 {
3587 TCGv tmp = tcg_temp_new();
3588
3589 tcg_gen_ext8u_tl(tmp, src);
3590 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3591 /* End TB to notice changed ASI. */
3592 dc->base.is_jmp = DISAS_EXIT;
3593 }
3594
3595 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3596
3597 static void do_wrfprs(DisasContext *dc, TCGv src)
3598 {
3599 #ifdef TARGET_SPARC64
3600 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3601 dc->fprs_dirty = 0;
3602 dc->base.is_jmp = DISAS_EXIT;
3603 #else
3604 qemu_build_not_reached();
3605 #endif
3606 }
3607
3608 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3609
3610 static void do_wrgsr(DisasContext *dc, TCGv src)
3611 {
3612 gen_trap_ifnofpu(dc);
3613 tcg_gen_mov_tl(cpu_gsr, src);
3614 }
3615
3616 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3617
3618 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3619 {
3620 gen_helper_set_softint(tcg_env, src);
3621 }
3622
3623 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3624
3625 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3626 {
3627 gen_helper_clear_softint(tcg_env, src);
3628 }
3629
3630 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3631
3632 static void do_wrsoftint(DisasContext *dc, TCGv src)
3633 {
3634 gen_helper_write_softint(tcg_env, src);
3635 }
3636
3637 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3638
3639 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3640 {
3641 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3642
3643 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3644 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3645 translator_io_start(&dc->base);
3646 gen_helper_tick_set_limit(r_tickptr, src);
3647 /* End TB to handle timer interrupt */
3648 dc->base.is_jmp = DISAS_EXIT;
3649 }
3650
3651 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3652
3653 static void do_wrstick(DisasContext *dc, TCGv src)
3654 {
3655 #ifdef TARGET_SPARC64
3656 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3657
3658 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3659 translator_io_start(&dc->base);
3660 gen_helper_tick_set_count(r_tickptr, src);
3661 /* End TB to handle timer interrupt */
3662 dc->base.is_jmp = DISAS_EXIT;
3663 #else
3664 qemu_build_not_reached();
3665 #endif
3666 }
3667
3668 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3669
3670 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3671 {
3672 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3673
3674 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3675 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3676 translator_io_start(&dc->base);
3677 gen_helper_tick_set_limit(r_tickptr, src);
3678 /* End TB to handle timer interrupt */
3679 dc->base.is_jmp = DISAS_EXIT;
3680 }
3681
3682 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3683
3684 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3685 {
3686 save_state(dc);
3687 gen_helper_power_down(tcg_env);
3688 }
3689
3690 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3691
3692 static void do_wrpsr(DisasContext *dc, TCGv src)
3693 {
3694 gen_helper_wrpsr(tcg_env, src);
3695 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3696 dc->cc_op = CC_OP_FLAGS;
3697 dc->base.is_jmp = DISAS_EXIT;
3698 }
3699
3700 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3701
3702 static void do_wrwim(DisasContext *dc, TCGv src)
3703 {
3704 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3705 TCGv tmp = tcg_temp_new();
3706
3707 tcg_gen_andi_tl(tmp, src, mask);
3708 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3709 }
3710
3711 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3712
3713 static void do_wrtpc(DisasContext *dc, TCGv src)
3714 {
3715 #ifdef TARGET_SPARC64
3716 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3717
3718 gen_load_trap_state_at_tl(r_tsptr);
3719 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3720 #else
3721 qemu_build_not_reached();
3722 #endif
3723 }
3724
3725 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3726
3727 static void do_wrtnpc(DisasContext *dc, TCGv src)
3728 {
3729 #ifdef TARGET_SPARC64
3730 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3731
3732 gen_load_trap_state_at_tl(r_tsptr);
3733 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3734 #else
3735 qemu_build_not_reached();
3736 #endif
3737 }
3738
3739 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3740
3741 static void do_wrtstate(DisasContext *dc, TCGv src)
3742 {
3743 #ifdef TARGET_SPARC64
3744 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3745
3746 gen_load_trap_state_at_tl(r_tsptr);
3747 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3748 #else
3749 qemu_build_not_reached();
3750 #endif
3751 }
3752
3753 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3754
3755 static void do_wrtt(DisasContext *dc, TCGv src)
3756 {
3757 #ifdef TARGET_SPARC64
3758 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3759
3760 gen_load_trap_state_at_tl(r_tsptr);
3761 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3762 #else
3763 qemu_build_not_reached();
3764 #endif
3765 }
3766
3767 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3768
3769 static void do_wrtick(DisasContext *dc, TCGv src)
3770 {
3771 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3772
3773 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3774 translator_io_start(&dc->base);
3775 gen_helper_tick_set_count(r_tickptr, src);
3776 /* End TB to handle timer interrupt */
3777 dc->base.is_jmp = DISAS_EXIT;
3778 }
3779
3780 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3781
3782 static void do_wrtba(DisasContext *dc, TCGv src)
3783 {
3784 tcg_gen_mov_tl(cpu_tbr, src);
3785 }
3786
3787 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3788
3789 static void do_wrpstate(DisasContext *dc, TCGv src)
3790 {
3791 save_state(dc);
3792 if (translator_io_start(&dc->base)) {
3793 dc->base.is_jmp = DISAS_EXIT;
3794 }
3795 gen_helper_wrpstate(tcg_env, src);
3796 dc->npc = DYNAMIC_PC;
3797 }
3798
3799 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3800
3801 static void do_wrtl(DisasContext *dc, TCGv src)
3802 {
3803 save_state(dc);
3804 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3805 dc->npc = DYNAMIC_PC;
3806 }
3807
3808 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3809
3810 static void do_wrpil(DisasContext *dc, TCGv src)
3811 {
3812 if (translator_io_start(&dc->base)) {
3813 dc->base.is_jmp = DISAS_EXIT;
3814 }
3815 gen_helper_wrpil(tcg_env, src);
3816 }
3817
3818 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3819
3820 static void do_wrcwp(DisasContext *dc, TCGv src)
3821 {
3822 gen_helper_wrcwp(tcg_env, src);
3823 }
3824
3825 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3826
3827 static void do_wrcansave(DisasContext *dc, TCGv src)
3828 {
3829 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3830 }
3831
3832 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3833
3834 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3835 {
3836 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3837 }
3838
3839 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3840
3841 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3842 {
3843 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3844 }
3845
3846 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3847
3848 static void do_wrotherwin(DisasContext *dc, TCGv src)
3849 {
3850 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3851 }
3852
3853 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3854
3855 static void do_wrwstate(DisasContext *dc, TCGv src)
3856 {
3857 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3858 }
3859
3860 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3861
3862 static void do_wrgl(DisasContext *dc, TCGv src)
3863 {
3864 gen_helper_wrgl(tcg_env, src);
3865 }
3866
3867 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3868
3869 /* UA2005 strand status */
3870 static void do_wrssr(DisasContext *dc, TCGv src)
3871 {
3872 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3873 }
3874
3875 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3876
3877 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3878
3879 static void do_wrhpstate(DisasContext *dc, TCGv src)
3880 {
3881 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3882 dc->base.is_jmp = DISAS_EXIT;
3883 }
3884
3885 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3886
3887 static void do_wrhtstate(DisasContext *dc, TCGv src)
3888 {
3889 TCGv_i32 tl = tcg_temp_new_i32();
3890 TCGv_ptr tp = tcg_temp_new_ptr();
3891
3892 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3893 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3894 tcg_gen_shli_i32(tl, tl, 3);
3895 tcg_gen_ext_i32_ptr(tp, tl);
3896 tcg_gen_add_ptr(tp, tp, tcg_env);
3897
3898 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3899 }
3900
3901 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3902
3903 static void do_wrhintp(DisasContext *dc, TCGv src)
3904 {
3905 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3906 }
3907
3908 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3909
3910 static void do_wrhtba(DisasContext *dc, TCGv src)
3911 {
3912 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3913 }
3914
3915 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3916
3917 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3918 {
3919 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3920
3921 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3922 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3923 translator_io_start(&dc->base);
3924 gen_helper_tick_set_limit(r_tickptr, src);
3925 /* End TB to handle timer interrupt */
3926 dc->base.is_jmp = DISAS_EXIT;
3927 }
3928
3929 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3930 do_wrhstick_cmpr)
3931
3932 static bool do_saved_restored(DisasContext *dc, bool saved)
3933 {
3934 if (!supervisor(dc)) {
3935 return raise_priv(dc);
3936 }
3937 if (saved) {
3938 gen_helper_saved(tcg_env);
3939 } else {
3940 gen_helper_restored(tcg_env);
3941 }
3942 return advance_pc(dc);
3943 }
3944
3945 TRANS(SAVED, 64, do_saved_restored, true)
3946 TRANS(RESTORED, 64, do_saved_restored, false)
3947
3948 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3949 {
3950 return advance_pc(dc);
3951 }
3952
3953 /*
3954 * TODO: Need a feature bit for sparcv8.
3955 * In the meantime, treat all 32-bit cpus like sparcv7.
3956 */
3957 TRANS(NOP_v7, 32, trans_NOP, a)
3958 TRANS(NOP_v9, 64, trans_NOP, a)
3959
3960 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
3961 void (*func)(TCGv, TCGv, TCGv),
3962 void (*funci)(TCGv, TCGv, target_long))
3963 {
3964 TCGv dst, src1;
3965
3966 /* For simplicity, we under-decoded the rs2 form. */
3967 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3968 return false;
3969 }
3970
3971 if (a->cc) {
3972 dst = cpu_cc_dst;
3973 } else {
3974 dst = gen_dest_gpr(dc, a->rd);
3975 }
3976 src1 = gen_load_gpr(dc, a->rs1);
3977
3978 if (a->imm || a->rs2_or_imm == 0) {
3979 if (funci) {
3980 funci(dst, src1, a->rs2_or_imm);
3981 } else {
3982 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3983 }
3984 } else {
3985 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3986 }
3987 gen_store_gpr(dc, a->rd, dst);
3988
3989 if (a->cc) {
3990 tcg_gen_movi_i32(cpu_cc_op, cc_op);
3991 dc->cc_op = cc_op;
3992 }
3993 return advance_pc(dc);
3994 }
3995
3996 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
3997 void (*func)(TCGv, TCGv, TCGv),
3998 void (*funci)(TCGv, TCGv, target_long),
3999 void (*func_cc)(TCGv, TCGv, TCGv))
4000 {
4001 if (a->cc) {
4002 assert(cc_op >= 0);
4003 return do_arith_int(dc, a, cc_op, func_cc, NULL);
4004 }
4005 return do_arith_int(dc, a, cc_op, func, funci);
4006 }
4007
4008 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
4009 void (*func)(TCGv, TCGv, TCGv),
4010 void (*funci)(TCGv, TCGv, target_long))
4011 {
4012 return do_arith_int(dc, a, CC_OP_LOGIC, func, funci);
4013 }
4014
4015 TRANS(ADD, ALL, do_arith, a, CC_OP_ADD,
4016 tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc)
4017 TRANS(SUB, ALL, do_arith, a, CC_OP_SUB,
4018 tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc)
4019
4020 TRANS(TADDcc, ALL, do_arith, a, CC_OP_TADD, NULL, NULL, gen_op_add_cc)
4021 TRANS(TSUBcc, ALL, do_arith, a, CC_OP_TSUB, NULL, NULL, gen_op_sub_cc)
4022 TRANS(TADDccTV, ALL, do_arith, a, CC_OP_TADDTV, NULL, NULL, gen_op_taddcctv)
4023 TRANS(TSUBccTV, ALL, do_arith, a, CC_OP_TSUBTV, NULL, NULL, gen_op_tsubcctv)
4024
4025 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
4026 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
4027 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
4028 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
4029 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
4030
4031 TRANS(MULX, 64, do_arith, a, -1, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
4032 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
4033 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
4034
4035 TRANS(UDIVX, 64, do_arith, a, -1, gen_op_udivx, NULL, NULL)
4036 TRANS(SDIVX, 64, do_arith, a, -1, gen_op_sdivx, NULL, NULL)
4037 TRANS(UDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_udiv, NULL, gen_op_udivcc)
4038 TRANS(SDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_sdiv, NULL, gen_op_sdivcc)
4039
4040 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
4041 TRANS(POPC, 64, do_arith, a, -1, gen_op_popc, NULL, NULL)
4042
4043 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
4044 {
4045 /* OR with %g0 is the canonical alias for MOV. */
4046 if (!a->cc && a->rs1 == 0) {
4047 if (a->imm || a->rs2_or_imm == 0) {
4048 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
4049 } else if (a->rs2_or_imm & ~0x1f) {
4050 /* For simplicity, we under-decoded the rs2 form. */
4051 return false;
4052 } else {
4053 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
4054 }
4055 return advance_pc(dc);
4056 }
4057 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
4058 }
4059
4060 static bool trans_ADDC(DisasContext *dc, arg_r_r_ri_cc *a)
4061 {
4062 switch (dc->cc_op) {
4063 case CC_OP_DIV:
4064 case CC_OP_LOGIC:
4065 /* Carry is known to be zero. Fall back to plain ADD. */
4066 return do_arith(dc, a, CC_OP_ADD,
4067 tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc);
4068 case CC_OP_ADD:
4069 case CC_OP_TADD:
4070 case CC_OP_TADDTV:
4071 return do_arith(dc, a, CC_OP_ADDX,
4072 gen_op_addc_add, NULL, gen_op_addccc_add);
4073 case CC_OP_SUB:
4074 case CC_OP_TSUB:
4075 case CC_OP_TSUBTV:
4076 return do_arith(dc, a, CC_OP_ADDX,
4077 gen_op_addc_sub, NULL, gen_op_addccc_sub);
4078 default:
4079 return do_arith(dc, a, CC_OP_ADDX,
4080 gen_op_addc_generic, NULL, gen_op_addccc_generic);
4081 }
4082 }
4083
4084 static bool trans_SUBC(DisasContext *dc, arg_r_r_ri_cc *a)
4085 {
4086 switch (dc->cc_op) {
4087 case CC_OP_DIV:
4088 case CC_OP_LOGIC:
4089 /* Carry is known to be zero. Fall back to plain SUB. */
4090 return do_arith(dc, a, CC_OP_SUB,
4091 tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc);
4092 case CC_OP_ADD:
4093 case CC_OP_TADD:
4094 case CC_OP_TADDTV:
4095 return do_arith(dc, a, CC_OP_SUBX,
4096 gen_op_subc_add, NULL, gen_op_subccc_add);
4097 case CC_OP_SUB:
4098 case CC_OP_TSUB:
4099 case CC_OP_TSUBTV:
4100 return do_arith(dc, a, CC_OP_SUBX,
4101 gen_op_subc_sub, NULL, gen_op_subccc_sub);
4102 default:
4103 return do_arith(dc, a, CC_OP_SUBX,
4104 gen_op_subc_generic, NULL, gen_op_subccc_generic);
4105 }
4106 }
4107
4108 static bool trans_MULScc(DisasContext *dc, arg_r_r_ri_cc *a)
4109 {
4110 update_psr(dc);
4111 return do_arith(dc, a, CC_OP_ADD, NULL, NULL, gen_op_mulscc);
4112 }
4113
4114 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
4115 int width, bool cc, bool left)
4116 {
4117 TCGv dst, s1, s2, lo1, lo2;
4118 uint64_t amask, tabl, tabr;
4119 int shift, imask, omask;
4120
4121 dst = gen_dest_gpr(dc, a->rd);
4122 s1 = gen_load_gpr(dc, a->rs1);
4123 s2 = gen_load_gpr(dc, a->rs2);
4124
4125 if (cc) {
4126 tcg_gen_mov_tl(cpu_cc_src, s1);
4127 tcg_gen_mov_tl(cpu_cc_src2, s2);
4128 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
4129 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
4130 dc->cc_op = CC_OP_SUB;
4131 }
4132
4133 /*
4134 * Theory of operation: there are two tables, left and right (not to
4135 * be confused with the left and right versions of the opcode). These
4136 * are indexed by the low 3 bits of the inputs. To make things "easy",
4137 * these tables are loaded into two constants, TABL and TABR below.
4138 * The operation index = (input & imask) << shift calculates the index
4139 * into the constant, while val = (table >> index) & omask calculates
4140 * the value we're looking for.
4141 */
4142 switch (width) {
4143 case 8:
4144 imask = 0x7;
4145 shift = 3;
4146 omask = 0xff;
4147 if (left) {
4148 tabl = 0x80c0e0f0f8fcfeffULL;
4149 tabr = 0xff7f3f1f0f070301ULL;
4150 } else {
4151 tabl = 0x0103070f1f3f7fffULL;
4152 tabr = 0xfffefcf8f0e0c080ULL;
4153 }
4154 break;
4155 case 16:
4156 imask = 0x6;
4157 shift = 1;
4158 omask = 0xf;
4159 if (left) {
4160 tabl = 0x8cef;
4161 tabr = 0xf731;
4162 } else {
4163 tabl = 0x137f;
4164 tabr = 0xfec8;
4165 }
4166 break;
4167 case 32:
4168 imask = 0x4;
4169 shift = 0;
4170 omask = 0x3;
4171 if (left) {
4172 tabl = (2 << 2) | 3;
4173 tabr = (3 << 2) | 1;
4174 } else {
4175 tabl = (1 << 2) | 3;
4176 tabr = (3 << 2) | 2;
4177 }
4178 break;
4179 default:
4180 abort();
4181 }
4182
4183 lo1 = tcg_temp_new();
4184 lo2 = tcg_temp_new();
4185 tcg_gen_andi_tl(lo1, s1, imask);
4186 tcg_gen_andi_tl(lo2, s2, imask);
4187 tcg_gen_shli_tl(lo1, lo1, shift);
4188 tcg_gen_shli_tl(lo2, lo2, shift);
4189
4190 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
4191 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
4192 tcg_gen_andi_tl(lo1, lo1, omask);
4193 tcg_gen_andi_tl(lo2, lo2, omask);
4194
4195 amask = address_mask_i(dc, -8);
4196 tcg_gen_andi_tl(s1, s1, amask);
4197 tcg_gen_andi_tl(s2, s2, amask);
4198
4199 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
4200 tcg_gen_and_tl(lo2, lo2, lo1);
4201 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
4202
4203 gen_store_gpr(dc, a->rd, dst);
4204 return advance_pc(dc);
4205 }
4206
4207 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
4208 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
4209 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
4210 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
4211 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
4212 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
4213
4214 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
4215 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
4216 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
4217 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
4218 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
4219 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
4220
4221 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
4222 void (*func)(TCGv, TCGv, TCGv))
4223 {
4224 TCGv dst = gen_dest_gpr(dc, a->rd);
4225 TCGv src1 = gen_load_gpr(dc, a->rs1);
4226 TCGv src2 = gen_load_gpr(dc, a->rs2);
4227
4228 func(dst, src1, src2);
4229 gen_store_gpr(dc, a->rd, dst);
4230 return advance_pc(dc);
4231 }
4232
4233 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
4234 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
4235 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
4236
4237 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
4238 {
4239 #ifdef TARGET_SPARC64
4240 TCGv tmp = tcg_temp_new();
4241
4242 tcg_gen_add_tl(tmp, s1, s2);
4243 tcg_gen_andi_tl(dst, tmp, -8);
4244 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4245 #else
4246 g_assert_not_reached();
4247 #endif
4248 }
4249
4250 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4251 {
4252 #ifdef TARGET_SPARC64
4253 TCGv tmp = tcg_temp_new();
4254
4255 tcg_gen_add_tl(tmp, s1, s2);
4256 tcg_gen_andi_tl(dst, tmp, -8);
4257 tcg_gen_neg_tl(tmp, tmp);
4258 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4259 #else
4260 g_assert_not_reached();
4261 #endif
4262 }
4263
4264 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4265 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4266
4267 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4268 {
4269 TCGv dst, src1, src2;
4270
4271 /* Reject 64-bit shifts for sparc32. */
4272 if (avail_32(dc) && a->x) {
4273 return false;
4274 }
4275
4276 src2 = tcg_temp_new();
4277 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4278 src1 = gen_load_gpr(dc, a->rs1);
4279 dst = gen_dest_gpr(dc, a->rd);
4280
4281 if (l) {
4282 tcg_gen_shl_tl(dst, src1, src2);
4283 if (!a->x) {
4284 tcg_gen_ext32u_tl(dst, dst);
4285 }
4286 } else if (u) {
4287 if (!a->x) {
4288 tcg_gen_ext32u_tl(dst, src1);
4289 src1 = dst;
4290 }
4291 tcg_gen_shr_tl(dst, src1, src2);
4292 } else {
4293 if (!a->x) {
4294 tcg_gen_ext32s_tl(dst, src1);
4295 src1 = dst;
4296 }
4297 tcg_gen_sar_tl(dst, src1, src2);
4298 }
4299 gen_store_gpr(dc, a->rd, dst);
4300 return advance_pc(dc);
4301 }
4302
4303 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4304 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4305 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4306
4307 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4308 {
4309 TCGv dst, src1;
4310
4311 /* Reject 64-bit shifts for sparc32. */
4312 if (avail_32(dc) && (a->x || a->i >= 32)) {
4313 return false;
4314 }
4315
4316 src1 = gen_load_gpr(dc, a->rs1);
4317 dst = gen_dest_gpr(dc, a->rd);
4318
4319 if (avail_32(dc) || a->x) {
4320 if (l) {
4321 tcg_gen_shli_tl(dst, src1, a->i);
4322 } else if (u) {
4323 tcg_gen_shri_tl(dst, src1, a->i);
4324 } else {
4325 tcg_gen_sari_tl(dst, src1, a->i);
4326 }
4327 } else {
4328 if (l) {
4329 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4330 } else if (u) {
4331 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4332 } else {
4333 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4334 }
4335 }
4336 gen_store_gpr(dc, a->rd, dst);
4337 return advance_pc(dc);
4338 }
4339
4340 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4341 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4342 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4343
4344 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4345 {
4346 /* For simplicity, we under-decoded the rs2 form. */
4347 if (!imm && rs2_or_imm & ~0x1f) {
4348 return NULL;
4349 }
4350 if (imm || rs2_or_imm == 0) {
4351 return tcg_constant_tl(rs2_or_imm);
4352 } else {
4353 return cpu_regs[rs2_or_imm];
4354 }
4355 }
4356
4357 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4358 {
4359 TCGv dst = gen_load_gpr(dc, rd);
4360
4361 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, cmp->c2, src2, dst);
4362 gen_store_gpr(dc, rd, dst);
4363 return advance_pc(dc);
4364 }
4365
4366 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4367 {
4368 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4369 DisasCompare cmp;
4370
4371 if (src2 == NULL) {
4372 return false;
4373 }
4374 gen_compare(&cmp, a->cc, a->cond, dc);
4375 return do_mov_cond(dc, &cmp, a->rd, src2);
4376 }
4377
4378 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4379 {
4380 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4381 DisasCompare cmp;
4382
4383 if (src2 == NULL) {
4384 return false;
4385 }
4386 gen_fcompare(&cmp, a->cc, a->cond);
4387 return do_mov_cond(dc, &cmp, a->rd, src2);
4388 }
4389
4390 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4391 {
4392 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4393 DisasCompare cmp;
4394
4395 if (src2 == NULL) {
4396 return false;
4397 }
4398 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
4399 return do_mov_cond(dc, &cmp, a->rd, src2);
4400 }
4401
4402 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4403 bool (*func)(DisasContext *dc, int rd, TCGv src))
4404 {
4405 TCGv src1, sum;
4406
4407 /* For simplicity, we under-decoded the rs2 form. */
4408 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4409 return false;
4410 }
4411
4412 /*
4413 * Always load the sum into a new temporary.
4414 * This is required to capture the value across a window change,
4415 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4416 */
4417 sum = tcg_temp_new();
4418 src1 = gen_load_gpr(dc, a->rs1);
4419 if (a->imm || a->rs2_or_imm == 0) {
4420 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4421 } else {
4422 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4423 }
4424 return func(dc, a->rd, sum);
4425 }
4426
4427 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4428 {
4429 /*
4430 * Preserve pc across advance, so that we can delay
4431 * the writeback to rd until after src is consumed.
4432 */
4433 target_ulong cur_pc = dc->pc;
4434
4435 gen_check_align(dc, src, 3);
4436
4437 gen_mov_pc_npc(dc);
4438 tcg_gen_mov_tl(cpu_npc, src);
4439 gen_address_mask(dc, cpu_npc);
4440 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4441
4442 dc->npc = DYNAMIC_PC_LOOKUP;
4443 return true;
4444 }
4445
4446 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4447
4448 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4449 {
4450 if (!supervisor(dc)) {
4451 return raise_priv(dc);
4452 }
4453
4454 gen_check_align(dc, src, 3);
4455
4456 gen_mov_pc_npc(dc);
4457 tcg_gen_mov_tl(cpu_npc, src);
4458 gen_helper_rett(tcg_env);
4459
4460 dc->npc = DYNAMIC_PC;
4461 return true;
4462 }
4463
4464 TRANS(RETT, 32, do_add_special, a, do_rett)
4465
4466 static bool do_return(DisasContext *dc, int rd, TCGv src)
4467 {
4468 gen_check_align(dc, src, 3);
4469
4470 gen_mov_pc_npc(dc);
4471 tcg_gen_mov_tl(cpu_npc, src);
4472 gen_address_mask(dc, cpu_npc);
4473
4474 gen_helper_restore(tcg_env);
4475 dc->npc = DYNAMIC_PC_LOOKUP;
4476 return true;
4477 }
4478
4479 TRANS(RETURN, 64, do_add_special, a, do_return)
4480
4481 static bool do_save(DisasContext *dc, int rd, TCGv src)
4482 {
4483 gen_helper_save(tcg_env);
4484 gen_store_gpr(dc, rd, src);
4485 return advance_pc(dc);
4486 }
4487
4488 TRANS(SAVE, ALL, do_add_special, a, do_save)
4489
4490 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4491 {
4492 gen_helper_restore(tcg_env);
4493 gen_store_gpr(dc, rd, src);
4494 return advance_pc(dc);
4495 }
4496
4497 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4498
4499 static bool do_done_retry(DisasContext *dc, bool done)
4500 {
4501 if (!supervisor(dc)) {
4502 return raise_priv(dc);
4503 }
4504 dc->npc = DYNAMIC_PC;
4505 dc->pc = DYNAMIC_PC;
4506 translator_io_start(&dc->base);
4507 if (done) {
4508 gen_helper_done(tcg_env);
4509 } else {
4510 gen_helper_retry(tcg_env);
4511 }
4512 return true;
4513 }
4514
4515 TRANS(DONE, 64, do_done_retry, true)
4516 TRANS(RETRY, 64, do_done_retry, false)
4517
4518 /*
4519 * Major opcode 11 -- load and store instructions
4520 */
4521
4522 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4523 {
4524 TCGv addr, tmp = NULL;
4525
4526 /* For simplicity, we under-decoded the rs2 form. */
4527 if (!imm && rs2_or_imm & ~0x1f) {
4528 return NULL;
4529 }
4530
4531 addr = gen_load_gpr(dc, rs1);
4532 if (rs2_or_imm) {
4533 tmp = tcg_temp_new();
4534 if (imm) {
4535 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4536 } else {
4537 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4538 }
4539 addr = tmp;
4540 }
4541 if (AM_CHECK(dc)) {
4542 if (!tmp) {
4543 tmp = tcg_temp_new();
4544 }
4545 tcg_gen_ext32u_tl(tmp, addr);
4546 addr = tmp;
4547 }
4548 return addr;
4549 }
4550
4551 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4552 {
4553 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4554 DisasASI da;
4555
4556 if (addr == NULL) {
4557 return false;
4558 }
4559 da = resolve_asi(dc, a->asi, mop);
4560
4561 reg = gen_dest_gpr(dc, a->rd);
4562 gen_ld_asi(dc, &da, reg, addr);
4563 gen_store_gpr(dc, a->rd, reg);
4564 return advance_pc(dc);
4565 }
4566
4567 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4568 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4569 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4570 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4571 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4572 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4573 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4574
4575 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4576 {
4577 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4578 DisasASI da;
4579
4580 if (addr == NULL) {
4581 return false;
4582 }
4583 da = resolve_asi(dc, a->asi, mop);
4584
4585 reg = gen_load_gpr(dc, a->rd);
4586 gen_st_asi(dc, &da, reg, addr);
4587 return advance_pc(dc);
4588 }
4589
4590 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4591 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4592 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4593 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4594
4595 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4596 {
4597 TCGv addr;
4598 DisasASI da;
4599
4600 if (a->rd & 1) {
4601 return false;
4602 }
4603 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4604 if (addr == NULL) {
4605 return false;
4606 }
4607 da = resolve_asi(dc, a->asi, MO_TEUQ);
4608 gen_ldda_asi(dc, &da, addr, a->rd);
4609 return advance_pc(dc);
4610 }
4611
4612 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4613 {
4614 TCGv addr;
4615 DisasASI da;
4616
4617 if (a->rd & 1) {
4618 return false;
4619 }
4620 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4621 if (addr == NULL) {
4622 return false;
4623 }
4624 da = resolve_asi(dc, a->asi, MO_TEUQ);
4625 gen_stda_asi(dc, &da, addr, a->rd);
4626 return advance_pc(dc);
4627 }
4628
4629 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4630 {
4631 TCGv addr, reg;
4632 DisasASI da;
4633
4634 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4635 if (addr == NULL) {
4636 return false;
4637 }
4638 da = resolve_asi(dc, a->asi, MO_UB);
4639
4640 reg = gen_dest_gpr(dc, a->rd);
4641 gen_ldstub_asi(dc, &da, reg, addr);
4642 gen_store_gpr(dc, a->rd, reg);
4643 return advance_pc(dc);
4644 }
4645
4646 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4647 {
4648 TCGv addr, dst, src;
4649 DisasASI da;
4650
4651 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4652 if (addr == NULL) {
4653 return false;
4654 }
4655 da = resolve_asi(dc, a->asi, MO_TEUL);
4656
4657 dst = gen_dest_gpr(dc, a->rd);
4658 src = gen_load_gpr(dc, a->rd);
4659 gen_swap_asi(dc, &da, dst, src, addr);
4660 gen_store_gpr(dc, a->rd, dst);
4661 return advance_pc(dc);
4662 }
4663
4664 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4665 {
4666 TCGv addr, o, n, c;
4667 DisasASI da;
4668
4669 addr = gen_ldst_addr(dc, a->rs1, true, 0);
4670 if (addr == NULL) {
4671 return false;
4672 }
4673 da = resolve_asi(dc, a->asi, mop);
4674
4675 o = gen_dest_gpr(dc, a->rd);
4676 n = gen_load_gpr(dc, a->rd);
4677 c = gen_load_gpr(dc, a->rs2_or_imm);
4678 gen_cas_asi(dc, &da, o, n, c, addr);
4679 gen_store_gpr(dc, a->rd, o);
4680 return advance_pc(dc);
4681 }
4682
4683 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4684 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4685
4686 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4687 {
4688 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4689 DisasASI da;
4690
4691 if (addr == NULL) {
4692 return false;
4693 }
4694 if (gen_trap_ifnofpu(dc)) {
4695 return true;
4696 }
4697 if (sz == MO_128 && gen_trap_float128(dc)) {
4698 return true;
4699 }
4700 da = resolve_asi(dc, a->asi, MO_TE | sz);
4701 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4702 gen_update_fprs_dirty(dc, a->rd);
4703 return advance_pc(dc);
4704 }
4705
4706 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4707 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4708 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4709
4710 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4711 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4712 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4713
4714 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4715 {
4716 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4717 DisasASI da;
4718
4719 if (addr == NULL) {
4720 return false;
4721 }
4722 if (gen_trap_ifnofpu(dc)) {
4723 return true;
4724 }
4725 if (sz == MO_128 && gen_trap_float128(dc)) {
4726 return true;
4727 }
4728 da = resolve_asi(dc, a->asi, MO_TE | sz);
4729 gen_stf_asi(dc, &da, sz, addr, a->rd);
4730 return advance_pc(dc);
4731 }
4732
4733 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4734 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4735 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4736
4737 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4738 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4739 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4740
4741 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4742 {
4743 if (!avail_32(dc)) {
4744 return false;
4745 }
4746 if (!supervisor(dc)) {
4747 return raise_priv(dc);
4748 }
4749 if (gen_trap_ifnofpu(dc)) {
4750 return true;
4751 }
4752 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4753 return true;
4754 }
4755
4756 static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
4757 target_ulong new_mask, target_ulong old_mask)
4758 {
4759 TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4760 if (addr == NULL) {
4761 return false;
4762 }
4763 if (gen_trap_ifnofpu(dc)) {
4764 return true;
4765 }
4766 tmp = tcg_temp_new();
4767 tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN);
4768 tcg_gen_andi_tl(tmp, tmp, new_mask);
4769 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask);
4770 tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp);
4771 gen_helper_set_fsr(tcg_env, cpu_fsr);
4772 return advance_pc(dc);
4773 }
4774
4775 TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
4776 TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
4777
4778 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4779 {
4780 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4781 if (addr == NULL) {
4782 return false;
4783 }
4784 if (gen_trap_ifnofpu(dc)) {
4785 return true;
4786 }
4787 tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4788 return advance_pc(dc);
4789 }
4790
4791 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4792 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4793
4794 #define CHECK_IU_FEATURE(dc, FEATURE) \
4795 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4796 goto illegal_insn;
4797 #define CHECK_FPU_FEATURE(dc, FEATURE) \
4798 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4799 goto nfpu_insn;
4800
4801 /* before an instruction, dc->pc must be static */
4802 static void disas_sparc_legacy(DisasContext *dc, unsigned int insn)
4803 {
4804 unsigned int opc, rs1, rs2, rd;
4805 TCGv cpu_src1 __attribute__((unused));
4806 TCGv cpu_src2 __attribute__((unused));
4807 TCGv_i32 cpu_src1_32, cpu_src2_32;
4808 TCGv_i64 cpu_src1_64, cpu_src2_64;
4809 TCGv_i32 cpu_dst_32 __attribute__((unused));
4810 TCGv_i64 cpu_dst_64 __attribute__((unused));
4811
4812 opc = GET_FIELD(insn, 0, 1);
4813 rd = GET_FIELD(insn, 2, 6);
4814
4815 switch (opc) {
4816 case 0:
4817 goto illegal_insn; /* in decodetree */
4818 case 1:
4819 g_assert_not_reached(); /* in decodetree */
4820 case 2: /* FPU & Logical Operations */
4821 {
4822 unsigned int xop = GET_FIELD(insn, 7, 12);
4823 TCGv cpu_dst __attribute__((unused)) = tcg_temp_new();
4824
4825 if (xop == 0x34) { /* FPU Operations */
4826 if (gen_trap_ifnofpu(dc)) {
4827 goto jmp_insn;
4828 }
4829 gen_op_clear_ieee_excp_and_FTT();
4830 rs1 = GET_FIELD(insn, 13, 17);
4831 rs2 = GET_FIELD(insn, 27, 31);
4832 xop = GET_FIELD(insn, 18, 26);
4833
4834 switch (xop) {
4835 case 0x1: /* fmovs */
4836 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4837 gen_store_fpr_F(dc, rd, cpu_src1_32);
4838 break;
4839 case 0x5: /* fnegs */
4840 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
4841 break;
4842 case 0x9: /* fabss */
4843 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
4844 break;
4845 case 0x29: /* fsqrts */
4846 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
4847 break;
4848 case 0x2a: /* fsqrtd */
4849 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
4850 break;
4851 case 0x2b: /* fsqrtq */
4852 CHECK_FPU_FEATURE(dc, FLOAT128);
4853 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
4854 break;
4855 case 0x41: /* fadds */
4856 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
4857 break;
4858 case 0x42: /* faddd */
4859 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
4860 break;
4861 case 0x43: /* faddq */
4862 CHECK_FPU_FEATURE(dc, FLOAT128);
4863 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
4864 break;
4865 case 0x45: /* fsubs */
4866 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
4867 break;
4868 case 0x46: /* fsubd */
4869 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
4870 break;
4871 case 0x47: /* fsubq */
4872 CHECK_FPU_FEATURE(dc, FLOAT128);
4873 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
4874 break;
4875 case 0x49: /* fmuls */
4876 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
4877 break;
4878 case 0x4a: /* fmuld */
4879 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
4880 break;
4881 case 0x4b: /* fmulq */
4882 CHECK_FPU_FEATURE(dc, FLOAT128);
4883 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
4884 break;
4885 case 0x4d: /* fdivs */
4886 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
4887 break;
4888 case 0x4e: /* fdivd */
4889 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
4890 break;
4891 case 0x4f: /* fdivq */
4892 CHECK_FPU_FEATURE(dc, FLOAT128);
4893 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
4894 break;
4895 case 0x69: /* fsmuld */
4896 CHECK_FPU_FEATURE(dc, FSMULD);
4897 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
4898 break;
4899 case 0x6e: /* fdmulq */
4900 CHECK_FPU_FEATURE(dc, FLOAT128);
4901 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
4902 break;
4903 case 0xc4: /* fitos */
4904 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
4905 break;
4906 case 0xc6: /* fdtos */
4907 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
4908 break;
4909 case 0xc7: /* fqtos */
4910 CHECK_FPU_FEATURE(dc, FLOAT128);
4911 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
4912 break;
4913 case 0xc8: /* fitod */
4914 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
4915 break;
4916 case 0xc9: /* fstod */
4917 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
4918 break;
4919 case 0xcb: /* fqtod */
4920 CHECK_FPU_FEATURE(dc, FLOAT128);
4921 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
4922 break;
4923 case 0xcc: /* fitoq */
4924 CHECK_FPU_FEATURE(dc, FLOAT128);
4925 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
4926 break;
4927 case 0xcd: /* fstoq */
4928 CHECK_FPU_FEATURE(dc, FLOAT128);
4929 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
4930 break;
4931 case 0xce: /* fdtoq */
4932 CHECK_FPU_FEATURE(dc, FLOAT128);
4933 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
4934 break;
4935 case 0xd1: /* fstoi */
4936 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
4937 break;
4938 case 0xd2: /* fdtoi */
4939 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
4940 break;
4941 case 0xd3: /* fqtoi */
4942 CHECK_FPU_FEATURE(dc, FLOAT128);
4943 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
4944 break;
4945 #ifdef TARGET_SPARC64
4946 case 0x2: /* V9 fmovd */
4947 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4948 gen_store_fpr_D(dc, rd, cpu_src1_64);
4949 break;
4950 case 0x3: /* V9 fmovq */
4951 CHECK_FPU_FEATURE(dc, FLOAT128);
4952 gen_move_Q(dc, rd, rs2);
4953 break;
4954 case 0x6: /* V9 fnegd */
4955 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
4956 break;
4957 case 0x7: /* V9 fnegq */
4958 CHECK_FPU_FEATURE(dc, FLOAT128);
4959 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
4960 break;
4961 case 0xa: /* V9 fabsd */
4962 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
4963 break;
4964 case 0xb: /* V9 fabsq */
4965 CHECK_FPU_FEATURE(dc, FLOAT128);
4966 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
4967 break;
4968 case 0x81: /* V9 fstox */
4969 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
4970 break;
4971 case 0x82: /* V9 fdtox */
4972 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
4973 break;
4974 case 0x83: /* V9 fqtox */
4975 CHECK_FPU_FEATURE(dc, FLOAT128);
4976 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
4977 break;
4978 case 0x84: /* V9 fxtos */
4979 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
4980 break;
4981 case 0x88: /* V9 fxtod */
4982 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
4983 break;
4984 case 0x8c: /* V9 fxtoq */
4985 CHECK_FPU_FEATURE(dc, FLOAT128);
4986 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
4987 break;
4988 #endif
4989 default:
4990 goto illegal_insn;
4991 }
4992 } else if (xop == 0x35) { /* FPU Operations */
4993 #ifdef TARGET_SPARC64
4994 int cond;
4995 #endif
4996 if (gen_trap_ifnofpu(dc)) {
4997 goto jmp_insn;
4998 }
4999 gen_op_clear_ieee_excp_and_FTT();
5000 rs1 = GET_FIELD(insn, 13, 17);
5001 rs2 = GET_FIELD(insn, 27, 31);
5002 xop = GET_FIELD(insn, 18, 26);
5003
5004 #ifdef TARGET_SPARC64
5005 #define FMOVR(sz) \
5006 do { \
5007 DisasCompare cmp; \
5008 cond = GET_FIELD_SP(insn, 10, 12); \
5009 cpu_src1 = get_src1(dc, insn); \
5010 gen_compare_reg(&cmp, cond, cpu_src1); \
5011 gen_fmov##sz(dc, &cmp, rd, rs2); \
5012 } while (0)
5013
5014 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
5015 FMOVR(s);
5016 break;
5017 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
5018 FMOVR(d);
5019 break;
5020 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
5021 CHECK_FPU_FEATURE(dc, FLOAT128);
5022 FMOVR(q);
5023 break;
5024 }
5025 #undef FMOVR
5026 #endif
5027 switch (xop) {
5028 #ifdef TARGET_SPARC64
5029 #define FMOVCC(fcc, sz) \
5030 do { \
5031 DisasCompare cmp; \
5032 cond = GET_FIELD_SP(insn, 14, 17); \
5033 gen_fcompare(&cmp, fcc, cond); \
5034 gen_fmov##sz(dc, &cmp, rd, rs2); \
5035 } while (0)
5036
5037 case 0x001: /* V9 fmovscc %fcc0 */
5038 FMOVCC(0, s);
5039 break;
5040 case 0x002: /* V9 fmovdcc %fcc0 */
5041 FMOVCC(0, d);
5042 break;
5043 case 0x003: /* V9 fmovqcc %fcc0 */
5044 CHECK_FPU_FEATURE(dc, FLOAT128);
5045 FMOVCC(0, q);
5046 break;
5047 case 0x041: /* V9 fmovscc %fcc1 */
5048 FMOVCC(1, s);
5049 break;
5050 case 0x042: /* V9 fmovdcc %fcc1 */
5051 FMOVCC(1, d);
5052 break;
5053 case 0x043: /* V9 fmovqcc %fcc1 */
5054 CHECK_FPU_FEATURE(dc, FLOAT128);
5055 FMOVCC(1, q);
5056 break;
5057 case 0x081: /* V9 fmovscc %fcc2 */
5058 FMOVCC(2, s);
5059 break;
5060 case 0x082: /* V9 fmovdcc %fcc2 */
5061 FMOVCC(2, d);
5062 break;
5063 case 0x083: /* V9 fmovqcc %fcc2 */
5064 CHECK_FPU_FEATURE(dc, FLOAT128);
5065 FMOVCC(2, q);
5066 break;
5067 case 0x0c1: /* V9 fmovscc %fcc3 */
5068 FMOVCC(3, s);
5069 break;
5070 case 0x0c2: /* V9 fmovdcc %fcc3 */
5071 FMOVCC(3, d);
5072 break;
5073 case 0x0c3: /* V9 fmovqcc %fcc3 */
5074 CHECK_FPU_FEATURE(dc, FLOAT128);
5075 FMOVCC(3, q);
5076 break;
5077 #undef FMOVCC
5078 #define FMOVCC(xcc, sz) \
5079 do { \
5080 DisasCompare cmp; \
5081 cond = GET_FIELD_SP(insn, 14, 17); \
5082 gen_compare(&cmp, xcc, cond, dc); \
5083 gen_fmov##sz(dc, &cmp, rd, rs2); \
5084 } while (0)
5085
5086 case 0x101: /* V9 fmovscc %icc */
5087 FMOVCC(0, s);
5088 break;
5089 case 0x102: /* V9 fmovdcc %icc */
5090 FMOVCC(0, d);
5091 break;
5092 case 0x103: /* V9 fmovqcc %icc */
5093 CHECK_FPU_FEATURE(dc, FLOAT128);
5094 FMOVCC(0, q);
5095 break;
5096 case 0x181: /* V9 fmovscc %xcc */
5097 FMOVCC(1, s);
5098 break;
5099 case 0x182: /* V9 fmovdcc %xcc */
5100 FMOVCC(1, d);
5101 break;
5102 case 0x183: /* V9 fmovqcc %xcc */
5103 CHECK_FPU_FEATURE(dc, FLOAT128);
5104 FMOVCC(1, q);
5105 break;
5106 #undef FMOVCC
5107 #endif
5108 case 0x51: /* fcmps, V9 %fcc */
5109 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5110 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
5111 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
5112 break;
5113 case 0x52: /* fcmpd, V9 %fcc */
5114 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5115 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5116 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
5117 break;
5118 case 0x53: /* fcmpq, V9 %fcc */
5119 CHECK_FPU_FEATURE(dc, FLOAT128);
5120 gen_op_load_fpr_QT0(QFPREG(rs1));
5121 gen_op_load_fpr_QT1(QFPREG(rs2));
5122 gen_op_fcmpq(rd & 3);
5123 break;
5124 case 0x55: /* fcmpes, V9 %fcc */
5125 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5126 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
5127 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
5128 break;
5129 case 0x56: /* fcmped, V9 %fcc */
5130 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5131 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5132 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
5133 break;
5134 case 0x57: /* fcmpeq, V9 %fcc */
5135 CHECK_FPU_FEATURE(dc, FLOAT128);
5136 gen_op_load_fpr_QT0(QFPREG(rs1));
5137 gen_op_load_fpr_QT1(QFPREG(rs2));
5138 gen_op_fcmpeq(rd & 3);
5139 break;
5140 default:
5141 goto illegal_insn;
5142 }
5143 } else if (xop == 0x36) {
5144 #ifdef TARGET_SPARC64
5145 /* VIS */
5146 int opf = GET_FIELD_SP(insn, 5, 13);
5147 rs1 = GET_FIELD(insn, 13, 17);
5148 rs2 = GET_FIELD(insn, 27, 31);
5149 if (gen_trap_ifnofpu(dc)) {
5150 goto jmp_insn;
5151 }
5152
5153 switch (opf) {
5154 case 0x000: /* VIS I edge8cc */
5155 case 0x001: /* VIS II edge8n */
5156 case 0x002: /* VIS I edge8lcc */
5157 case 0x003: /* VIS II edge8ln */
5158 case 0x004: /* VIS I edge16cc */
5159 case 0x005: /* VIS II edge16n */
5160 case 0x006: /* VIS I edge16lcc */
5161 case 0x007: /* VIS II edge16ln */
5162 case 0x008: /* VIS I edge32cc */
5163 case 0x009: /* VIS II edge32n */
5164 case 0x00a: /* VIS I edge32lcc */
5165 case 0x00b: /* VIS II edge32ln */
5166 case 0x010: /* VIS I array8 */
5167 case 0x012: /* VIS I array16 */
5168 case 0x014: /* VIS I array32 */
5169 case 0x018: /* VIS I alignaddr */
5170 case 0x01a: /* VIS I alignaddrl */
5171 g_assert_not_reached(); /* in decodetree */
5172 case 0x019: /* VIS II bmask */
5173 CHECK_FPU_FEATURE(dc, VIS2);
5174 cpu_src1 = gen_load_gpr(dc, rs1);
5175 cpu_src2 = gen_load_gpr(dc, rs2);
5176 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
5177 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
5178 gen_store_gpr(dc, rd, cpu_dst);
5179 break;
5180 case 0x020: /* VIS I fcmple16 */
5181 CHECK_FPU_FEATURE(dc, VIS1);
5182 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5183 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5184 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
5185 gen_store_gpr(dc, rd, cpu_dst);
5186 break;
5187 case 0x022: /* VIS I fcmpne16 */
5188 CHECK_FPU_FEATURE(dc, VIS1);
5189 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5190 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5191 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
5192 gen_store_gpr(dc, rd, cpu_dst);
5193 break;
5194 case 0x024: /* VIS I fcmple32 */
5195 CHECK_FPU_FEATURE(dc, VIS1);
5196 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5197 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5198 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
5199 gen_store_gpr(dc, rd, cpu_dst);
5200 break;
5201 case 0x026: /* VIS I fcmpne32 */
5202 CHECK_FPU_FEATURE(dc, VIS1);
5203 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5204 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5205 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
5206 gen_store_gpr(dc, rd, cpu_dst);
5207 break;
5208 case 0x028: /* VIS I fcmpgt16 */
5209 CHECK_FPU_FEATURE(dc, VIS1);
5210 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5211 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5212 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
5213 gen_store_gpr(dc, rd, cpu_dst);
5214 break;
5215 case 0x02a: /* VIS I fcmpeq16 */
5216 CHECK_FPU_FEATURE(dc, VIS1);
5217 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5218 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5219 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
5220 gen_store_gpr(dc, rd, cpu_dst);
5221 break;
5222 case 0x02c: /* VIS I fcmpgt32 */
5223 CHECK_FPU_FEATURE(dc, VIS1);
5224 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5225 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5226 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
5227 gen_store_gpr(dc, rd, cpu_dst);
5228 break;
5229 case 0x02e: /* VIS I fcmpeq32 */
5230 CHECK_FPU_FEATURE(dc, VIS1);
5231 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5232 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5233 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
5234 gen_store_gpr(dc, rd, cpu_dst);
5235 break;
5236 case 0x031: /* VIS I fmul8x16 */
5237 CHECK_FPU_FEATURE(dc, VIS1);
5238 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
5239 break;
5240 case 0x033: /* VIS I fmul8x16au */
5241 CHECK_FPU_FEATURE(dc, VIS1);
5242 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
5243 break;
5244 case 0x035: /* VIS I fmul8x16al */
5245 CHECK_FPU_FEATURE(dc, VIS1);
5246 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
5247 break;
5248 case 0x036: /* VIS I fmul8sux16 */
5249 CHECK_FPU_FEATURE(dc, VIS1);
5250 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
5251 break;
5252 case 0x037: /* VIS I fmul8ulx16 */
5253 CHECK_FPU_FEATURE(dc, VIS1);
5254 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
5255 break;
5256 case 0x038: /* VIS I fmuld8sux16 */
5257 CHECK_FPU_FEATURE(dc, VIS1);
5258 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
5259 break;
5260 case 0x039: /* VIS I fmuld8ulx16 */
5261 CHECK_FPU_FEATURE(dc, VIS1);
5262 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
5263 break;
5264 case 0x03a: /* VIS I fpack32 */
5265 CHECK_FPU_FEATURE(dc, VIS1);
5266 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
5267 break;
5268 case 0x03b: /* VIS I fpack16 */
5269 CHECK_FPU_FEATURE(dc, VIS1);
5270 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5271 cpu_dst_32 = gen_dest_fpr_F(dc);
5272 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
5273 gen_store_fpr_F(dc, rd, cpu_dst_32);
5274 break;
5275 case 0x03d: /* VIS I fpackfix */
5276 CHECK_FPU_FEATURE(dc, VIS1);
5277 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5278 cpu_dst_32 = gen_dest_fpr_F(dc);
5279 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
5280 gen_store_fpr_F(dc, rd, cpu_dst_32);
5281 break;
5282 case 0x03e: /* VIS I pdist */
5283 CHECK_FPU_FEATURE(dc, VIS1);
5284 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
5285 break;
5286 case 0x048: /* VIS I faligndata */
5287 CHECK_FPU_FEATURE(dc, VIS1);
5288 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
5289 break;
5290 case 0x04b: /* VIS I fpmerge */
5291 CHECK_FPU_FEATURE(dc, VIS1);
5292 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
5293 break;
5294 case 0x04c: /* VIS II bshuffle */
5295 CHECK_FPU_FEATURE(dc, VIS2);
5296 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
5297 break;
5298 case 0x04d: /* VIS I fexpand */
5299 CHECK_FPU_FEATURE(dc, VIS1);
5300 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
5301 break;
5302 case 0x050: /* VIS I fpadd16 */
5303 CHECK_FPU_FEATURE(dc, VIS1);
5304 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
5305 break;
5306 case 0x051: /* VIS I fpadd16s */
5307 CHECK_FPU_FEATURE(dc, VIS1);
5308 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
5309 break;
5310 case 0x052: /* VIS I fpadd32 */
5311 CHECK_FPU_FEATURE(dc, VIS1);
5312 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
5313 break;
5314 case 0x053: /* VIS I fpadd32s */
5315 CHECK_FPU_FEATURE(dc, VIS1);
5316 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
5317 break;
5318 case 0x054: /* VIS I fpsub16 */
5319 CHECK_FPU_FEATURE(dc, VIS1);
5320 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
5321 break;
5322 case 0x055: /* VIS I fpsub16s */
5323 CHECK_FPU_FEATURE(dc, VIS1);
5324 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
5325 break;
5326 case 0x056: /* VIS I fpsub32 */
5327 CHECK_FPU_FEATURE(dc, VIS1);
5328 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
5329 break;
5330 case 0x057: /* VIS I fpsub32s */
5331 CHECK_FPU_FEATURE(dc, VIS1);
5332 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
5333 break;
5334 case 0x060: /* VIS I fzero */
5335 CHECK_FPU_FEATURE(dc, VIS1);
5336 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5337 tcg_gen_movi_i64(cpu_dst_64, 0);
5338 gen_store_fpr_D(dc, rd, cpu_dst_64);
5339 break;
5340 case 0x061: /* VIS I fzeros */
5341 CHECK_FPU_FEATURE(dc, VIS1);
5342 cpu_dst_32 = gen_dest_fpr_F(dc);
5343 tcg_gen_movi_i32(cpu_dst_32, 0);
5344 gen_store_fpr_F(dc, rd, cpu_dst_32);
5345 break;
5346 case 0x062: /* VIS I fnor */
5347 CHECK_FPU_FEATURE(dc, VIS1);
5348 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
5349 break;
5350 case 0x063: /* VIS I fnors */
5351 CHECK_FPU_FEATURE(dc, VIS1);
5352 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
5353 break;
5354 case 0x064: /* VIS I fandnot2 */
5355 CHECK_FPU_FEATURE(dc, VIS1);
5356 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
5357 break;
5358 case 0x065: /* VIS I fandnot2s */
5359 CHECK_FPU_FEATURE(dc, VIS1);
5360 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
5361 break;
5362 case 0x066: /* VIS I fnot2 */
5363 CHECK_FPU_FEATURE(dc, VIS1);
5364 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
5365 break;
5366 case 0x067: /* VIS I fnot2s */
5367 CHECK_FPU_FEATURE(dc, VIS1);
5368 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
5369 break;
5370 case 0x068: /* VIS I fandnot1 */
5371 CHECK_FPU_FEATURE(dc, VIS1);
5372 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
5373 break;
5374 case 0x069: /* VIS I fandnot1s */
5375 CHECK_FPU_FEATURE(dc, VIS1);
5376 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
5377 break;
5378 case 0x06a: /* VIS I fnot1 */
5379 CHECK_FPU_FEATURE(dc, VIS1);
5380 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
5381 break;
5382 case 0x06b: /* VIS I fnot1s */
5383 CHECK_FPU_FEATURE(dc, VIS1);
5384 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
5385 break;
5386 case 0x06c: /* VIS I fxor */
5387 CHECK_FPU_FEATURE(dc, VIS1);
5388 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
5389 break;
5390 case 0x06d: /* VIS I fxors */
5391 CHECK_FPU_FEATURE(dc, VIS1);
5392 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
5393 break;
5394 case 0x06e: /* VIS I fnand */
5395 CHECK_FPU_FEATURE(dc, VIS1);
5396 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
5397 break;
5398 case 0x06f: /* VIS I fnands */
5399 CHECK_FPU_FEATURE(dc, VIS1);
5400 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
5401 break;
5402 case 0x070: /* VIS I fand */
5403 CHECK_FPU_FEATURE(dc, VIS1);
5404 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
5405 break;
5406 case 0x071: /* VIS I fands */
5407 CHECK_FPU_FEATURE(dc, VIS1);
5408 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
5409 break;
5410 case 0x072: /* VIS I fxnor */
5411 CHECK_FPU_FEATURE(dc, VIS1);
5412 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
5413 break;
5414 case 0x073: /* VIS I fxnors */
5415 CHECK_FPU_FEATURE(dc, VIS1);
5416 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
5417 break;
5418 case 0x074: /* VIS I fsrc1 */
5419 CHECK_FPU_FEATURE(dc, VIS1);
5420 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5421 gen_store_fpr_D(dc, rd, cpu_src1_64);
5422 break;
5423 case 0x075: /* VIS I fsrc1s */
5424 CHECK_FPU_FEATURE(dc, VIS1);
5425 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5426 gen_store_fpr_F(dc, rd, cpu_src1_32);
5427 break;
5428 case 0x076: /* VIS I fornot2 */
5429 CHECK_FPU_FEATURE(dc, VIS1);
5430 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
5431 break;
5432 case 0x077: /* VIS I fornot2s */
5433 CHECK_FPU_FEATURE(dc, VIS1);
5434 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
5435 break;
5436 case 0x078: /* VIS I fsrc2 */
5437 CHECK_FPU_FEATURE(dc, VIS1);
5438 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5439 gen_store_fpr_D(dc, rd, cpu_src1_64);
5440 break;
5441 case 0x079: /* VIS I fsrc2s */
5442 CHECK_FPU_FEATURE(dc, VIS1);
5443 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
5444 gen_store_fpr_F(dc, rd, cpu_src1_32);
5445 break;
5446 case 0x07a: /* VIS I fornot1 */
5447 CHECK_FPU_FEATURE(dc, VIS1);
5448 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
5449 break;
5450 case 0x07b: /* VIS I fornot1s */
5451 CHECK_FPU_FEATURE(dc, VIS1);
5452 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
5453 break;
5454 case 0x07c: /* VIS I for */
5455 CHECK_FPU_FEATURE(dc, VIS1);
5456 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
5457 break;
5458 case 0x07d: /* VIS I fors */
5459 CHECK_FPU_FEATURE(dc, VIS1);
5460 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
5461 break;
5462 case 0x07e: /* VIS I fone */
5463 CHECK_FPU_FEATURE(dc, VIS1);
5464 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5465 tcg_gen_movi_i64(cpu_dst_64, -1);
5466 gen_store_fpr_D(dc, rd, cpu_dst_64);
5467 break;
5468 case 0x07f: /* VIS I fones */
5469 CHECK_FPU_FEATURE(dc, VIS1);
5470 cpu_dst_32 = gen_dest_fpr_F(dc);
5471 tcg_gen_movi_i32(cpu_dst_32, -1);
5472 gen_store_fpr_F(dc, rd, cpu_dst_32);
5473 break;
5474 case 0x080: /* VIS I shutdown */
5475 case 0x081: /* VIS II siam */
5476 // XXX
5477 goto illegal_insn;
5478 default:
5479 goto illegal_insn;
5480 }
5481 #endif
5482 } else {
5483 goto illegal_insn; /* in decodetree */
5484 }
5485 }
5486 break;
5487 case 3: /* load/store instructions */
5488 goto illegal_insn; /* in decodetree */
5489 }
5490 advance_pc(dc);
5491 jmp_insn:
5492 return;
5493 illegal_insn:
5494 gen_exception(dc, TT_ILL_INSN);
5495 return;
5496 nfpu_insn:
5497 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5498 return;
5499 }
5500
5501 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5502 {
5503 DisasContext *dc = container_of(dcbase, DisasContext, base);
5504 CPUSPARCState *env = cpu_env(cs);
5505 int bound;
5506
5507 dc->pc = dc->base.pc_first;
5508 dc->npc = (target_ulong)dc->base.tb->cs_base;
5509 dc->cc_op = CC_OP_DYNAMIC;
5510 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5511 dc->def = &env->def;
5512 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5513 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5514 #ifndef CONFIG_USER_ONLY
5515 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5516 #endif
5517 #ifdef TARGET_SPARC64
5518 dc->fprs_dirty = 0;
5519 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5520 #ifndef CONFIG_USER_ONLY
5521 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5522 #endif
5523 #endif
5524 /*
5525 * if we reach a page boundary, we stop generation so that the
5526 * PC of a TT_TFAULT exception is always in the right page
5527 */
5528 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5529 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5530 }
5531
5532 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5533 {
5534 }
5535
5536 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5537 {
5538 DisasContext *dc = container_of(dcbase, DisasContext, base);
5539 target_ulong npc = dc->npc;
5540
5541 if (npc & 3) {
5542 switch (npc) {
5543 case JUMP_PC:
5544 assert(dc->jump_pc[1] == dc->pc + 4);
5545 npc = dc->jump_pc[0] | JUMP_PC;
5546 break;
5547 case DYNAMIC_PC:
5548 case DYNAMIC_PC_LOOKUP:
5549 npc = DYNAMIC_PC;
5550 break;
5551 default:
5552 g_assert_not_reached();
5553 }
5554 }
5555 tcg_gen_insn_start(dc->pc, npc);
5556 }
5557
5558 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5559 {
5560 DisasContext *dc = container_of(dcbase, DisasContext, base);
5561 CPUSPARCState *env = cpu_env(cs);
5562 unsigned int insn;
5563
5564 insn = translator_ldl(env, &dc->base, dc->pc);
5565 dc->base.pc_next += 4;
5566
5567 if (!decode(dc, insn)) {
5568 disas_sparc_legacy(dc, insn);
5569 }
5570
5571 if (dc->base.is_jmp == DISAS_NORETURN) {
5572 return;
5573 }
5574 if (dc->pc != dc->base.pc_next) {
5575 dc->base.is_jmp = DISAS_TOO_MANY;
5576 }
5577 }
5578
5579 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5580 {
5581 DisasContext *dc = container_of(dcbase, DisasContext, base);
5582 DisasDelayException *e, *e_next;
5583 bool may_lookup;
5584
5585 switch (dc->base.is_jmp) {
5586 case DISAS_NEXT:
5587 case DISAS_TOO_MANY:
5588 if (((dc->pc | dc->npc) & 3) == 0) {
5589 /* static PC and NPC: we can use direct chaining */
5590 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5591 break;
5592 }
5593
5594 may_lookup = true;
5595 if (dc->pc & 3) {
5596 switch (dc->pc) {
5597 case DYNAMIC_PC_LOOKUP:
5598 break;
5599 case DYNAMIC_PC:
5600 may_lookup = false;
5601 break;
5602 default:
5603 g_assert_not_reached();
5604 }
5605 } else {
5606 tcg_gen_movi_tl(cpu_pc, dc->pc);
5607 }
5608
5609 if (dc->npc & 3) {
5610 switch (dc->npc) {
5611 case JUMP_PC:
5612 gen_generic_branch(dc);
5613 break;
5614 case DYNAMIC_PC:
5615 may_lookup = false;
5616 break;
5617 case DYNAMIC_PC_LOOKUP:
5618 break;
5619 default:
5620 g_assert_not_reached();
5621 }
5622 } else {
5623 tcg_gen_movi_tl(cpu_npc, dc->npc);
5624 }
5625 if (may_lookup) {
5626 tcg_gen_lookup_and_goto_ptr();
5627 } else {
5628 tcg_gen_exit_tb(NULL, 0);
5629 }
5630 break;
5631
5632 case DISAS_NORETURN:
5633 break;
5634
5635 case DISAS_EXIT:
5636 /* Exit TB */
5637 save_state(dc);
5638 tcg_gen_exit_tb(NULL, 0);
5639 break;
5640
5641 default:
5642 g_assert_not_reached();
5643 }
5644
5645 for (e = dc->delay_excp_list; e ; e = e_next) {
5646 gen_set_label(e->lab);
5647
5648 tcg_gen_movi_tl(cpu_pc, e->pc);
5649 if (e->npc % 4 == 0) {
5650 tcg_gen_movi_tl(cpu_npc, e->npc);
5651 }
5652 gen_helper_raise_exception(tcg_env, e->excp);
5653
5654 e_next = e->next;
5655 g_free(e);
5656 }
5657 }
5658
5659 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5660 CPUState *cpu, FILE *logfile)
5661 {
5662 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5663 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5664 }
5665
5666 static const TranslatorOps sparc_tr_ops = {
5667 .init_disas_context = sparc_tr_init_disas_context,
5668 .tb_start = sparc_tr_tb_start,
5669 .insn_start = sparc_tr_insn_start,
5670 .translate_insn = sparc_tr_translate_insn,
5671 .tb_stop = sparc_tr_tb_stop,
5672 .disas_log = sparc_tr_disas_log,
5673 };
5674
5675 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5676 target_ulong pc, void *host_pc)
5677 {
5678 DisasContext dc = {};
5679
5680 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5681 }
5682
5683 void sparc_tcg_init(void)
5684 {
5685 static const char gregnames[32][4] = {
5686 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5687 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5688 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5689 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5690 };
5691 static const char fregnames[32][4] = {
5692 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5693 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5694 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5695 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5696 };
5697
5698 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5699 #ifdef TARGET_SPARC64
5700 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5701 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5702 #endif
5703 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5704 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5705 };
5706
5707 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5708 #ifdef TARGET_SPARC64
5709 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5710 #endif
5711 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5712 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5713 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5714 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5715 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5716 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5717 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5718 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5719 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5720 };
5721
5722 unsigned int i;
5723
5724 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5725 offsetof(CPUSPARCState, regwptr),
5726 "regwptr");
5727
5728 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5729 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5730 }
5731
5732 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5733 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5734 }
5735
5736 cpu_regs[0] = NULL;
5737 for (i = 1; i < 8; ++i) {
5738 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5739 offsetof(CPUSPARCState, gregs[i]),
5740 gregnames[i]);
5741 }
5742
5743 for (i = 8; i < 32; ++i) {
5744 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5745 (i - 8) * sizeof(target_ulong),
5746 gregnames[i]);
5747 }
5748
5749 for (i = 0; i < TARGET_DPREGS; i++) {
5750 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5751 offsetof(CPUSPARCState, fpr[i]),
5752 fregnames[i]);
5753 }
5754 }
5755
5756 void sparc_restore_state_to_opc(CPUState *cs,
5757 const TranslationBlock *tb,
5758 const uint64_t *data)
5759 {
5760 SPARCCPU *cpu = SPARC_CPU(cs);
5761 CPUSPARCState *env = &cpu->env;
5762 target_ulong pc = data[0];
5763 target_ulong npc = data[1];
5764
5765 env->pc = pc;
5766 if (npc == DYNAMIC_PC) {
5767 /* dynamic NPC: already stored */
5768 } else if (npc & JUMP_PC) {
5769 /* jump PC: use 'cond' and the jump targets of the translation */
5770 if (env->cond) {
5771 env->npc = npc & ~3;
5772 } else {
5773 env->npc = pc + 4;
5774 }
5775 } else {
5776 env->npc = npc;
5777 }
5778 }