]> git.ipfire.org Git - thirdparty/qemu.git/blob - target/sparc/translate.c
target/sparc: Move FiTOq, FsTOq to decodetree
[thirdparty/qemu.git] / target / sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
33
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef HELPER_H
37
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rett(E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_done(E) qemu_build_not_reached()
46 # define gen_helper_fabsd(D, S) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_fnegd(D, S) qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
51 # define gen_helper_restored(E) qemu_build_not_reached()
52 # define gen_helper_retry(E) qemu_build_not_reached()
53 # define gen_helper_saved(E) qemu_build_not_reached()
54 # define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached()
55 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
56 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
57 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
58 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
59 # define gen_helper_udivx(D, E, A, B) qemu_build_not_reached()
60 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
61 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
62 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
63 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
64 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
65 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
66 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
81 # define FSR_LDXFSR_MASK 0
82 # define FSR_LDXFSR_OLDMASK 0
83 # define MAXTL_MASK 0
84 #endif
85
86 /* Dynamic PC, must exit to main loop. */
87 #define DYNAMIC_PC 1
88 /* Dynamic PC, one of two values according to jump_pc[T2]. */
89 #define JUMP_PC 2
90 /* Dynamic PC, may lookup next TB. */
91 #define DYNAMIC_PC_LOOKUP 3
92
93 #define DISAS_EXIT DISAS_TARGET_0
94
95 /* global register indexes */
96 static TCGv_ptr cpu_regwptr;
97 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
98 static TCGv_i32 cpu_cc_op;
99 static TCGv_i32 cpu_psr;
100 static TCGv cpu_fsr, cpu_pc, cpu_npc;
101 static TCGv cpu_regs[32];
102 static TCGv cpu_y;
103 static TCGv cpu_tbr;
104 static TCGv cpu_cond;
105 #ifdef TARGET_SPARC64
106 static TCGv_i32 cpu_xcc, cpu_fprs;
107 static TCGv cpu_gsr;
108 #else
109 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
110 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
111 #endif
112 /* Floating point registers */
113 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
114
115 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
116 #ifdef TARGET_SPARC64
117 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
118 # define env64_field_offsetof(X) env_field_offsetof(X)
119 #else
120 # define env32_field_offsetof(X) env_field_offsetof(X)
121 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
122 #endif
123
124 typedef struct DisasDelayException {
125 struct DisasDelayException *next;
126 TCGLabel *lab;
127 TCGv_i32 excp;
128 /* Saved state at parent insn. */
129 target_ulong pc;
130 target_ulong npc;
131 } DisasDelayException;
132
133 typedef struct DisasContext {
134 DisasContextBase base;
135 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
136 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
137 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
138 int mem_idx;
139 bool fpu_enabled;
140 bool address_mask_32bit;
141 #ifndef CONFIG_USER_ONLY
142 bool supervisor;
143 #ifdef TARGET_SPARC64
144 bool hypervisor;
145 #endif
146 #endif
147
148 uint32_t cc_op; /* current CC operation */
149 sparc_def_t *def;
150 #ifdef TARGET_SPARC64
151 int fprs_dirty;
152 int asi;
153 #endif
154 DisasDelayException *delay_excp_list;
155 } DisasContext;
156
157 typedef struct {
158 TCGCond cond;
159 bool is_bool;
160 TCGv c1, c2;
161 } DisasCompare;
162
163 // This function uses non-native bit order
164 #define GET_FIELD(X, FROM, TO) \
165 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
166
167 // This function uses the order in the manuals, i.e. bit 0 is 2^0
168 #define GET_FIELD_SP(X, FROM, TO) \
169 GET_FIELD(X, 31 - (TO), 31 - (FROM))
170
171 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
172 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
173
174 #ifdef TARGET_SPARC64
175 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
176 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
177 #else
178 #define DFPREG(r) (r & 0x1e)
179 #define QFPREG(r) (r & 0x1c)
180 #endif
181
182 #define UA2005_HTRAP_MASK 0xff
183 #define V8_TRAP_MASK 0x7f
184
185 #define IS_IMM (insn & (1<<13))
186
187 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
188 {
189 #if defined(TARGET_SPARC64)
190 int bit = (rd < 32) ? 1 : 2;
191 /* If we know we've already set this bit within the TB,
192 we can avoid setting it again. */
193 if (!(dc->fprs_dirty & bit)) {
194 dc->fprs_dirty |= bit;
195 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
196 }
197 #endif
198 }
199
200 /* floating point registers moves */
201 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
202 {
203 TCGv_i32 ret = tcg_temp_new_i32();
204 if (src & 1) {
205 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
206 } else {
207 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
208 }
209 return ret;
210 }
211
212 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
213 {
214 TCGv_i64 t = tcg_temp_new_i64();
215
216 tcg_gen_extu_i32_i64(t, v);
217 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
218 (dst & 1 ? 0 : 32), 32);
219 gen_update_fprs_dirty(dc, dst);
220 }
221
222 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
223 {
224 return tcg_temp_new_i32();
225 }
226
227 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
228 {
229 src = DFPREG(src);
230 return cpu_fpr[src / 2];
231 }
232
233 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
234 {
235 dst = DFPREG(dst);
236 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
237 gen_update_fprs_dirty(dc, dst);
238 }
239
240 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
241 {
242 return cpu_fpr[DFPREG(dst) / 2];
243 }
244
245 static void gen_op_load_fpr_QT0(unsigned int src)
246 {
247 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
248 offsetof(CPU_QuadU, ll.upper));
249 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
250 offsetof(CPU_QuadU, ll.lower));
251 }
252
253 static void gen_op_load_fpr_QT1(unsigned int src)
254 {
255 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
256 offsetof(CPU_QuadU, ll.upper));
257 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
258 offsetof(CPU_QuadU, ll.lower));
259 }
260
261 static void gen_op_store_QT0_fpr(unsigned int dst)
262 {
263 tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
264 offsetof(CPU_QuadU, ll.upper));
265 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
266 offsetof(CPU_QuadU, ll.lower));
267 }
268
269 #ifdef TARGET_SPARC64
270 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
271 {
272 rd = QFPREG(rd);
273 rs = QFPREG(rs);
274
275 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
276 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
277 gen_update_fprs_dirty(dc, rd);
278 }
279 #endif
280
281 /* moves */
282 #ifdef CONFIG_USER_ONLY
283 #define supervisor(dc) 0
284 #define hypervisor(dc) 0
285 #else
286 #ifdef TARGET_SPARC64
287 #define hypervisor(dc) (dc->hypervisor)
288 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
289 #else
290 #define supervisor(dc) (dc->supervisor)
291 #define hypervisor(dc) 0
292 #endif
293 #endif
294
295 #if !defined(TARGET_SPARC64)
296 # define AM_CHECK(dc) false
297 #elif defined(TARGET_ABI32)
298 # define AM_CHECK(dc) true
299 #elif defined(CONFIG_USER_ONLY)
300 # define AM_CHECK(dc) false
301 #else
302 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
303 #endif
304
305 static void gen_address_mask(DisasContext *dc, TCGv addr)
306 {
307 if (AM_CHECK(dc)) {
308 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
309 }
310 }
311
312 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
313 {
314 return AM_CHECK(dc) ? (uint32_t)addr : addr;
315 }
316
317 static TCGv gen_load_gpr(DisasContext *dc, int reg)
318 {
319 if (reg > 0) {
320 assert(reg < 32);
321 return cpu_regs[reg];
322 } else {
323 TCGv t = tcg_temp_new();
324 tcg_gen_movi_tl(t, 0);
325 return t;
326 }
327 }
328
329 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
330 {
331 if (reg > 0) {
332 assert(reg < 32);
333 tcg_gen_mov_tl(cpu_regs[reg], v);
334 }
335 }
336
337 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
338 {
339 if (reg > 0) {
340 assert(reg < 32);
341 return cpu_regs[reg];
342 } else {
343 return tcg_temp_new();
344 }
345 }
346
347 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
348 {
349 return translator_use_goto_tb(&s->base, pc) &&
350 translator_use_goto_tb(&s->base, npc);
351 }
352
353 static void gen_goto_tb(DisasContext *s, int tb_num,
354 target_ulong pc, target_ulong npc)
355 {
356 if (use_goto_tb(s, pc, npc)) {
357 /* jump to same page: we can use a direct jump */
358 tcg_gen_goto_tb(tb_num);
359 tcg_gen_movi_tl(cpu_pc, pc);
360 tcg_gen_movi_tl(cpu_npc, npc);
361 tcg_gen_exit_tb(s->base.tb, tb_num);
362 } else {
363 /* jump to another page: we can use an indirect jump */
364 tcg_gen_movi_tl(cpu_pc, pc);
365 tcg_gen_movi_tl(cpu_npc, npc);
366 tcg_gen_lookup_and_goto_ptr();
367 }
368 }
369
370 // XXX suboptimal
371 static void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
372 {
373 tcg_gen_extu_i32_tl(reg, src);
374 tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
375 }
376
377 static void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
378 {
379 tcg_gen_extu_i32_tl(reg, src);
380 tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
381 }
382
383 static void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
384 {
385 tcg_gen_extu_i32_tl(reg, src);
386 tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
387 }
388
389 static void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
390 {
391 tcg_gen_extu_i32_tl(reg, src);
392 tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
393 }
394
395 static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
396 {
397 tcg_gen_mov_tl(cpu_cc_src, src1);
398 tcg_gen_mov_tl(cpu_cc_src2, src2);
399 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
400 tcg_gen_mov_tl(dst, cpu_cc_dst);
401 }
402
403 static TCGv_i32 gen_add32_carry32(void)
404 {
405 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
406
407 /* Carry is computed from a previous add: (dst < src) */
408 #if TARGET_LONG_BITS == 64
409 cc_src1_32 = tcg_temp_new_i32();
410 cc_src2_32 = tcg_temp_new_i32();
411 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
412 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
413 #else
414 cc_src1_32 = cpu_cc_dst;
415 cc_src2_32 = cpu_cc_src;
416 #endif
417
418 carry_32 = tcg_temp_new_i32();
419 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
420
421 return carry_32;
422 }
423
424 static TCGv_i32 gen_sub32_carry32(void)
425 {
426 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
427
428 /* Carry is computed from a previous borrow: (src1 < src2) */
429 #if TARGET_LONG_BITS == 64
430 cc_src1_32 = tcg_temp_new_i32();
431 cc_src2_32 = tcg_temp_new_i32();
432 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
433 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
434 #else
435 cc_src1_32 = cpu_cc_src;
436 cc_src2_32 = cpu_cc_src2;
437 #endif
438
439 carry_32 = tcg_temp_new_i32();
440 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
441
442 return carry_32;
443 }
444
445 static void gen_op_addc_int(TCGv dst, TCGv src1, TCGv src2,
446 TCGv_i32 carry_32, bool update_cc)
447 {
448 tcg_gen_add_tl(dst, src1, src2);
449
450 #ifdef TARGET_SPARC64
451 TCGv carry = tcg_temp_new();
452 tcg_gen_extu_i32_tl(carry, carry_32);
453 tcg_gen_add_tl(dst, dst, carry);
454 #else
455 tcg_gen_add_i32(dst, dst, carry_32);
456 #endif
457
458 if (update_cc) {
459 tcg_debug_assert(dst == cpu_cc_dst);
460 tcg_gen_mov_tl(cpu_cc_src, src1);
461 tcg_gen_mov_tl(cpu_cc_src2, src2);
462 }
463 }
464
465 static void gen_op_addc_int_add(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
466 {
467 TCGv discard;
468
469 if (TARGET_LONG_BITS == 64) {
470 gen_op_addc_int(dst, src1, src2, gen_add32_carry32(), update_cc);
471 return;
472 }
473
474 /*
475 * We can re-use the host's hardware carry generation by using
476 * an ADD2 opcode. We discard the low part of the output.
477 * Ideally we'd combine this operation with the add that
478 * generated the carry in the first place.
479 */
480 discard = tcg_temp_new();
481 tcg_gen_add2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
482
483 if (update_cc) {
484 tcg_debug_assert(dst == cpu_cc_dst);
485 tcg_gen_mov_tl(cpu_cc_src, src1);
486 tcg_gen_mov_tl(cpu_cc_src2, src2);
487 }
488 }
489
490 static void gen_op_addc_add(TCGv dst, TCGv src1, TCGv src2)
491 {
492 gen_op_addc_int_add(dst, src1, src2, false);
493 }
494
495 static void gen_op_addccc_add(TCGv dst, TCGv src1, TCGv src2)
496 {
497 gen_op_addc_int_add(dst, src1, src2, true);
498 }
499
500 static void gen_op_addc_sub(TCGv dst, TCGv src1, TCGv src2)
501 {
502 gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), false);
503 }
504
505 static void gen_op_addccc_sub(TCGv dst, TCGv src1, TCGv src2)
506 {
507 gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), true);
508 }
509
510 static void gen_op_addc_int_generic(TCGv dst, TCGv src1, TCGv src2,
511 bool update_cc)
512 {
513 TCGv_i32 carry_32 = tcg_temp_new_i32();
514 gen_helper_compute_C_icc(carry_32, tcg_env);
515 gen_op_addc_int(dst, src1, src2, carry_32, update_cc);
516 }
517
518 static void gen_op_addc_generic(TCGv dst, TCGv src1, TCGv src2)
519 {
520 gen_op_addc_int_generic(dst, src1, src2, false);
521 }
522
523 static void gen_op_addccc_generic(TCGv dst, TCGv src1, TCGv src2)
524 {
525 gen_op_addc_int_generic(dst, src1, src2, true);
526 }
527
528 static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
529 {
530 tcg_gen_mov_tl(cpu_cc_src, src1);
531 tcg_gen_mov_tl(cpu_cc_src2, src2);
532 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
533 tcg_gen_mov_tl(dst, cpu_cc_dst);
534 }
535
536 static void gen_op_subc_int(TCGv dst, TCGv src1, TCGv src2,
537 TCGv_i32 carry_32, bool update_cc)
538 {
539 TCGv carry;
540
541 #if TARGET_LONG_BITS == 64
542 carry = tcg_temp_new();
543 tcg_gen_extu_i32_i64(carry, carry_32);
544 #else
545 carry = carry_32;
546 #endif
547
548 tcg_gen_sub_tl(dst, src1, src2);
549 tcg_gen_sub_tl(dst, dst, carry);
550
551 if (update_cc) {
552 tcg_debug_assert(dst == cpu_cc_dst);
553 tcg_gen_mov_tl(cpu_cc_src, src1);
554 tcg_gen_mov_tl(cpu_cc_src2, src2);
555 }
556 }
557
558 static void gen_op_subc_add(TCGv dst, TCGv src1, TCGv src2)
559 {
560 gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), false);
561 }
562
563 static void gen_op_subccc_add(TCGv dst, TCGv src1, TCGv src2)
564 {
565 gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), true);
566 }
567
568 static void gen_op_subc_int_sub(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
569 {
570 TCGv discard;
571
572 if (TARGET_LONG_BITS == 64) {
573 gen_op_subc_int(dst, src1, src2, gen_sub32_carry32(), update_cc);
574 return;
575 }
576
577 /*
578 * We can re-use the host's hardware carry generation by using
579 * a SUB2 opcode. We discard the low part of the output.
580 */
581 discard = tcg_temp_new();
582 tcg_gen_sub2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
583
584 if (update_cc) {
585 tcg_debug_assert(dst == cpu_cc_dst);
586 tcg_gen_mov_tl(cpu_cc_src, src1);
587 tcg_gen_mov_tl(cpu_cc_src2, src2);
588 }
589 }
590
591 static void gen_op_subc_sub(TCGv dst, TCGv src1, TCGv src2)
592 {
593 gen_op_subc_int_sub(dst, src1, src2, false);
594 }
595
596 static void gen_op_subccc_sub(TCGv dst, TCGv src1, TCGv src2)
597 {
598 gen_op_subc_int_sub(dst, src1, src2, true);
599 }
600
601 static void gen_op_subc_int_generic(TCGv dst, TCGv src1, TCGv src2,
602 bool update_cc)
603 {
604 TCGv_i32 carry_32 = tcg_temp_new_i32();
605
606 gen_helper_compute_C_icc(carry_32, tcg_env);
607 gen_op_subc_int(dst, src1, src2, carry_32, update_cc);
608 }
609
610 static void gen_op_subc_generic(TCGv dst, TCGv src1, TCGv src2)
611 {
612 gen_op_subc_int_generic(dst, src1, src2, false);
613 }
614
615 static void gen_op_subccc_generic(TCGv dst, TCGv src1, TCGv src2)
616 {
617 gen_op_subc_int_generic(dst, src1, src2, true);
618 }
619
620 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
621 {
622 TCGv r_temp, zero, t0;
623
624 r_temp = tcg_temp_new();
625 t0 = tcg_temp_new();
626
627 /* old op:
628 if (!(env->y & 1))
629 T1 = 0;
630 */
631 zero = tcg_constant_tl(0);
632 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
633 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
634 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
635 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
636 zero, cpu_cc_src2);
637
638 // b2 = T0 & 1;
639 // env->y = (b2 << 31) | (env->y >> 1);
640 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
641 tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
642
643 // b1 = N ^ V;
644 gen_mov_reg_N(t0, cpu_psr);
645 gen_mov_reg_V(r_temp, cpu_psr);
646 tcg_gen_xor_tl(t0, t0, r_temp);
647
648 // T0 = (b1 << 31) | (T0 >> 1);
649 // src1 = T0;
650 tcg_gen_shli_tl(t0, t0, 31);
651 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
652 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
653
654 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
655
656 tcg_gen_mov_tl(dst, cpu_cc_dst);
657 }
658
659 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
660 {
661 #if TARGET_LONG_BITS == 32
662 if (sign_ext) {
663 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
664 } else {
665 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
666 }
667 #else
668 TCGv t0 = tcg_temp_new_i64();
669 TCGv t1 = tcg_temp_new_i64();
670
671 if (sign_ext) {
672 tcg_gen_ext32s_i64(t0, src1);
673 tcg_gen_ext32s_i64(t1, src2);
674 } else {
675 tcg_gen_ext32u_i64(t0, src1);
676 tcg_gen_ext32u_i64(t1, src2);
677 }
678
679 tcg_gen_mul_i64(dst, t0, t1);
680 tcg_gen_shri_i64(cpu_y, dst, 32);
681 #endif
682 }
683
684 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
685 {
686 /* zero-extend truncated operands before multiplication */
687 gen_op_multiply(dst, src1, src2, 0);
688 }
689
690 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
691 {
692 /* sign-extend truncated operands before multiplication */
693 gen_op_multiply(dst, src1, src2, 1);
694 }
695
696 static void gen_op_udivx(TCGv dst, TCGv src1, TCGv src2)
697 {
698 gen_helper_udivx(dst, tcg_env, src1, src2);
699 }
700
701 static void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
702 {
703 gen_helper_sdivx(dst, tcg_env, src1, src2);
704 }
705
706 static void gen_op_udiv(TCGv dst, TCGv src1, TCGv src2)
707 {
708 gen_helper_udiv(dst, tcg_env, src1, src2);
709 }
710
711 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
712 {
713 gen_helper_sdiv(dst, tcg_env, src1, src2);
714 }
715
716 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
717 {
718 gen_helper_udiv_cc(dst, tcg_env, src1, src2);
719 }
720
721 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
722 {
723 gen_helper_sdiv_cc(dst, tcg_env, src1, src2);
724 }
725
726 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
727 {
728 gen_helper_taddcctv(dst, tcg_env, src1, src2);
729 }
730
731 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
732 {
733 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
734 }
735
736 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
737 {
738 tcg_gen_ctpop_tl(dst, src2);
739 }
740
741 #ifndef TARGET_SPARC64
742 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
743 {
744 g_assert_not_reached();
745 }
746 #endif
747
748 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
749 {
750 gen_helper_array8(dst, src1, src2);
751 tcg_gen_shli_tl(dst, dst, 1);
752 }
753
754 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
755 {
756 gen_helper_array8(dst, src1, src2);
757 tcg_gen_shli_tl(dst, dst, 2);
758 }
759
760 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
761 {
762 #ifdef TARGET_SPARC64
763 gen_helper_fpack32(dst, cpu_gsr, src1, src2);
764 #else
765 g_assert_not_reached();
766 #endif
767 }
768
769 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
770 {
771 #ifdef TARGET_SPARC64
772 TCGv t1, t2, shift;
773
774 t1 = tcg_temp_new();
775 t2 = tcg_temp_new();
776 shift = tcg_temp_new();
777
778 tcg_gen_andi_tl(shift, cpu_gsr, 7);
779 tcg_gen_shli_tl(shift, shift, 3);
780 tcg_gen_shl_tl(t1, s1, shift);
781
782 /*
783 * A shift of 64 does not produce 0 in TCG. Divide this into a
784 * shift of (up to 63) followed by a constant shift of 1.
785 */
786 tcg_gen_xori_tl(shift, shift, 63);
787 tcg_gen_shr_tl(t2, s2, shift);
788 tcg_gen_shri_tl(t2, t2, 1);
789
790 tcg_gen_or_tl(dst, t1, t2);
791 #else
792 g_assert_not_reached();
793 #endif
794 }
795
796 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
797 {
798 #ifdef TARGET_SPARC64
799 gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
800 #else
801 g_assert_not_reached();
802 #endif
803 }
804
805 // 1
806 static void gen_op_eval_ba(TCGv dst)
807 {
808 tcg_gen_movi_tl(dst, 1);
809 }
810
811 // Z
812 static void gen_op_eval_be(TCGv dst, TCGv_i32 src)
813 {
814 gen_mov_reg_Z(dst, src);
815 }
816
817 // Z | (N ^ V)
818 static void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
819 {
820 TCGv t0 = tcg_temp_new();
821 gen_mov_reg_N(t0, src);
822 gen_mov_reg_V(dst, src);
823 tcg_gen_xor_tl(dst, dst, t0);
824 gen_mov_reg_Z(t0, src);
825 tcg_gen_or_tl(dst, dst, t0);
826 }
827
828 // N ^ V
829 static void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
830 {
831 TCGv t0 = tcg_temp_new();
832 gen_mov_reg_V(t0, src);
833 gen_mov_reg_N(dst, src);
834 tcg_gen_xor_tl(dst, dst, t0);
835 }
836
837 // C | Z
838 static void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
839 {
840 TCGv t0 = tcg_temp_new();
841 gen_mov_reg_Z(t0, src);
842 gen_mov_reg_C(dst, src);
843 tcg_gen_or_tl(dst, dst, t0);
844 }
845
846 // C
847 static void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
848 {
849 gen_mov_reg_C(dst, src);
850 }
851
852 // V
853 static void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
854 {
855 gen_mov_reg_V(dst, src);
856 }
857
858 // 0
859 static void gen_op_eval_bn(TCGv dst)
860 {
861 tcg_gen_movi_tl(dst, 0);
862 }
863
864 // N
865 static void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
866 {
867 gen_mov_reg_N(dst, src);
868 }
869
870 // !Z
871 static void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
872 {
873 gen_mov_reg_Z(dst, src);
874 tcg_gen_xori_tl(dst, dst, 0x1);
875 }
876
877 // !(Z | (N ^ V))
878 static void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
879 {
880 gen_op_eval_ble(dst, src);
881 tcg_gen_xori_tl(dst, dst, 0x1);
882 }
883
884 // !(N ^ V)
885 static void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
886 {
887 gen_op_eval_bl(dst, src);
888 tcg_gen_xori_tl(dst, dst, 0x1);
889 }
890
891 // !(C | Z)
892 static void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
893 {
894 gen_op_eval_bleu(dst, src);
895 tcg_gen_xori_tl(dst, dst, 0x1);
896 }
897
898 // !C
899 static void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
900 {
901 gen_mov_reg_C(dst, src);
902 tcg_gen_xori_tl(dst, dst, 0x1);
903 }
904
905 // !N
906 static void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
907 {
908 gen_mov_reg_N(dst, src);
909 tcg_gen_xori_tl(dst, dst, 0x1);
910 }
911
912 // !V
913 static void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
914 {
915 gen_mov_reg_V(dst, src);
916 tcg_gen_xori_tl(dst, dst, 0x1);
917 }
918
919 /*
920 FPSR bit field FCC1 | FCC0:
921 0 =
922 1 <
923 2 >
924 3 unordered
925 */
926 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
927 unsigned int fcc_offset)
928 {
929 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
930 tcg_gen_andi_tl(reg, reg, 0x1);
931 }
932
933 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
934 {
935 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
936 tcg_gen_andi_tl(reg, reg, 0x1);
937 }
938
939 // !0: FCC0 | FCC1
940 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
941 {
942 TCGv t0 = tcg_temp_new();
943 gen_mov_reg_FCC0(dst, src, fcc_offset);
944 gen_mov_reg_FCC1(t0, src, fcc_offset);
945 tcg_gen_or_tl(dst, dst, t0);
946 }
947
948 // 1 or 2: FCC0 ^ FCC1
949 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
950 {
951 TCGv t0 = tcg_temp_new();
952 gen_mov_reg_FCC0(dst, src, fcc_offset);
953 gen_mov_reg_FCC1(t0, src, fcc_offset);
954 tcg_gen_xor_tl(dst, dst, t0);
955 }
956
957 // 1 or 3: FCC0
958 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
959 {
960 gen_mov_reg_FCC0(dst, src, fcc_offset);
961 }
962
963 // 1: FCC0 & !FCC1
964 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
965 {
966 TCGv t0 = tcg_temp_new();
967 gen_mov_reg_FCC0(dst, src, fcc_offset);
968 gen_mov_reg_FCC1(t0, src, fcc_offset);
969 tcg_gen_andc_tl(dst, dst, t0);
970 }
971
972 // 2 or 3: FCC1
973 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
974 {
975 gen_mov_reg_FCC1(dst, src, fcc_offset);
976 }
977
978 // 2: !FCC0 & FCC1
979 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
980 {
981 TCGv t0 = tcg_temp_new();
982 gen_mov_reg_FCC0(dst, src, fcc_offset);
983 gen_mov_reg_FCC1(t0, src, fcc_offset);
984 tcg_gen_andc_tl(dst, t0, dst);
985 }
986
987 // 3: FCC0 & FCC1
988 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
989 {
990 TCGv t0 = tcg_temp_new();
991 gen_mov_reg_FCC0(dst, src, fcc_offset);
992 gen_mov_reg_FCC1(t0, src, fcc_offset);
993 tcg_gen_and_tl(dst, dst, t0);
994 }
995
996 // 0: !(FCC0 | FCC1)
997 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
998 {
999 TCGv t0 = tcg_temp_new();
1000 gen_mov_reg_FCC0(dst, src, fcc_offset);
1001 gen_mov_reg_FCC1(t0, src, fcc_offset);
1002 tcg_gen_or_tl(dst, dst, t0);
1003 tcg_gen_xori_tl(dst, dst, 0x1);
1004 }
1005
1006 // 0 or 3: !(FCC0 ^ FCC1)
1007 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
1008 {
1009 TCGv t0 = tcg_temp_new();
1010 gen_mov_reg_FCC0(dst, src, fcc_offset);
1011 gen_mov_reg_FCC1(t0, src, fcc_offset);
1012 tcg_gen_xor_tl(dst, dst, t0);
1013 tcg_gen_xori_tl(dst, dst, 0x1);
1014 }
1015
1016 // 0 or 2: !FCC0
1017 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
1018 {
1019 gen_mov_reg_FCC0(dst, src, fcc_offset);
1020 tcg_gen_xori_tl(dst, dst, 0x1);
1021 }
1022
1023 // !1: !(FCC0 & !FCC1)
1024 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
1025 {
1026 TCGv t0 = tcg_temp_new();
1027 gen_mov_reg_FCC0(dst, src, fcc_offset);
1028 gen_mov_reg_FCC1(t0, src, fcc_offset);
1029 tcg_gen_andc_tl(dst, dst, t0);
1030 tcg_gen_xori_tl(dst, dst, 0x1);
1031 }
1032
1033 // 0 or 1: !FCC1
1034 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
1035 {
1036 gen_mov_reg_FCC1(dst, src, fcc_offset);
1037 tcg_gen_xori_tl(dst, dst, 0x1);
1038 }
1039
1040 // !2: !(!FCC0 & FCC1)
1041 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
1042 {
1043 TCGv t0 = tcg_temp_new();
1044 gen_mov_reg_FCC0(dst, src, fcc_offset);
1045 gen_mov_reg_FCC1(t0, src, fcc_offset);
1046 tcg_gen_andc_tl(dst, t0, dst);
1047 tcg_gen_xori_tl(dst, dst, 0x1);
1048 }
1049
1050 // !3: !(FCC0 & FCC1)
1051 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
1052 {
1053 TCGv t0 = tcg_temp_new();
1054 gen_mov_reg_FCC0(dst, src, fcc_offset);
1055 gen_mov_reg_FCC1(t0, src, fcc_offset);
1056 tcg_gen_and_tl(dst, dst, t0);
1057 tcg_gen_xori_tl(dst, dst, 0x1);
1058 }
1059
1060 static void gen_branch2(DisasContext *dc, target_ulong pc1,
1061 target_ulong pc2, TCGv r_cond)
1062 {
1063 TCGLabel *l1 = gen_new_label();
1064
1065 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
1066
1067 gen_goto_tb(dc, 0, pc1, pc1 + 4);
1068
1069 gen_set_label(l1);
1070 gen_goto_tb(dc, 1, pc2, pc2 + 4);
1071 }
1072
1073 static void gen_generic_branch(DisasContext *dc)
1074 {
1075 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1076 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1077 TCGv zero = tcg_constant_tl(0);
1078
1079 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1080 }
1081
1082 /* call this function before using the condition register as it may
1083 have been set for a jump */
1084 static void flush_cond(DisasContext *dc)
1085 {
1086 if (dc->npc == JUMP_PC) {
1087 gen_generic_branch(dc);
1088 dc->npc = DYNAMIC_PC_LOOKUP;
1089 }
1090 }
1091
1092 static void save_npc(DisasContext *dc)
1093 {
1094 if (dc->npc & 3) {
1095 switch (dc->npc) {
1096 case JUMP_PC:
1097 gen_generic_branch(dc);
1098 dc->npc = DYNAMIC_PC_LOOKUP;
1099 break;
1100 case DYNAMIC_PC:
1101 case DYNAMIC_PC_LOOKUP:
1102 break;
1103 default:
1104 g_assert_not_reached();
1105 }
1106 } else {
1107 tcg_gen_movi_tl(cpu_npc, dc->npc);
1108 }
1109 }
1110
1111 static void update_psr(DisasContext *dc)
1112 {
1113 if (dc->cc_op != CC_OP_FLAGS) {
1114 dc->cc_op = CC_OP_FLAGS;
1115 gen_helper_compute_psr(tcg_env);
1116 }
1117 }
1118
1119 static void save_state(DisasContext *dc)
1120 {
1121 tcg_gen_movi_tl(cpu_pc, dc->pc);
1122 save_npc(dc);
1123 }
1124
1125 static void gen_exception(DisasContext *dc, int which)
1126 {
1127 save_state(dc);
1128 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1129 dc->base.is_jmp = DISAS_NORETURN;
1130 }
1131
1132 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1133 {
1134 DisasDelayException *e = g_new0(DisasDelayException, 1);
1135
1136 e->next = dc->delay_excp_list;
1137 dc->delay_excp_list = e;
1138
1139 e->lab = gen_new_label();
1140 e->excp = excp;
1141 e->pc = dc->pc;
1142 /* Caller must have used flush_cond before branch. */
1143 assert(e->npc != JUMP_PC);
1144 e->npc = dc->npc;
1145
1146 return e->lab;
1147 }
1148
1149 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1150 {
1151 return delay_exceptionv(dc, tcg_constant_i32(excp));
1152 }
1153
1154 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1155 {
1156 TCGv t = tcg_temp_new();
1157 TCGLabel *lab;
1158
1159 tcg_gen_andi_tl(t, addr, mask);
1160
1161 flush_cond(dc);
1162 lab = delay_exception(dc, TT_UNALIGNED);
1163 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1164 }
1165
1166 static void gen_mov_pc_npc(DisasContext *dc)
1167 {
1168 if (dc->npc & 3) {
1169 switch (dc->npc) {
1170 case JUMP_PC:
1171 gen_generic_branch(dc);
1172 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1173 dc->pc = DYNAMIC_PC_LOOKUP;
1174 break;
1175 case DYNAMIC_PC:
1176 case DYNAMIC_PC_LOOKUP:
1177 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1178 dc->pc = dc->npc;
1179 break;
1180 default:
1181 g_assert_not_reached();
1182 }
1183 } else {
1184 dc->pc = dc->npc;
1185 }
1186 }
1187
1188 static void gen_op_next_insn(void)
1189 {
1190 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1191 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1192 }
1193
1194 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1195 DisasContext *dc)
1196 {
1197 static int subcc_cond[16] = {
1198 TCG_COND_NEVER,
1199 TCG_COND_EQ,
1200 TCG_COND_LE,
1201 TCG_COND_LT,
1202 TCG_COND_LEU,
1203 TCG_COND_LTU,
1204 -1, /* neg */
1205 -1, /* overflow */
1206 TCG_COND_ALWAYS,
1207 TCG_COND_NE,
1208 TCG_COND_GT,
1209 TCG_COND_GE,
1210 TCG_COND_GTU,
1211 TCG_COND_GEU,
1212 -1, /* pos */
1213 -1, /* no overflow */
1214 };
1215
1216 static int logic_cond[16] = {
1217 TCG_COND_NEVER,
1218 TCG_COND_EQ, /* eq: Z */
1219 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1220 TCG_COND_LT, /* lt: N ^ V -> N */
1221 TCG_COND_EQ, /* leu: C | Z -> Z */
1222 TCG_COND_NEVER, /* ltu: C -> 0 */
1223 TCG_COND_LT, /* neg: N */
1224 TCG_COND_NEVER, /* vs: V -> 0 */
1225 TCG_COND_ALWAYS,
1226 TCG_COND_NE, /* ne: !Z */
1227 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1228 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1229 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1230 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1231 TCG_COND_GE, /* pos: !N */
1232 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1233 };
1234
1235 TCGv_i32 r_src;
1236 TCGv r_dst;
1237
1238 #ifdef TARGET_SPARC64
1239 if (xcc) {
1240 r_src = cpu_xcc;
1241 } else {
1242 r_src = cpu_psr;
1243 }
1244 #else
1245 r_src = cpu_psr;
1246 #endif
1247
1248 switch (dc->cc_op) {
1249 case CC_OP_LOGIC:
1250 cmp->cond = logic_cond[cond];
1251 do_compare_dst_0:
1252 cmp->is_bool = false;
1253 cmp->c2 = tcg_constant_tl(0);
1254 #ifdef TARGET_SPARC64
1255 if (!xcc) {
1256 cmp->c1 = tcg_temp_new();
1257 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1258 break;
1259 }
1260 #endif
1261 cmp->c1 = cpu_cc_dst;
1262 break;
1263
1264 case CC_OP_SUB:
1265 switch (cond) {
1266 case 6: /* neg */
1267 case 14: /* pos */
1268 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1269 goto do_compare_dst_0;
1270
1271 case 7: /* overflow */
1272 case 15: /* !overflow */
1273 goto do_dynamic;
1274
1275 default:
1276 cmp->cond = subcc_cond[cond];
1277 cmp->is_bool = false;
1278 #ifdef TARGET_SPARC64
1279 if (!xcc) {
1280 /* Note that sign-extension works for unsigned compares as
1281 long as both operands are sign-extended. */
1282 cmp->c1 = tcg_temp_new();
1283 cmp->c2 = tcg_temp_new();
1284 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1285 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1286 break;
1287 }
1288 #endif
1289 cmp->c1 = cpu_cc_src;
1290 cmp->c2 = cpu_cc_src2;
1291 break;
1292 }
1293 break;
1294
1295 default:
1296 do_dynamic:
1297 gen_helper_compute_psr(tcg_env);
1298 dc->cc_op = CC_OP_FLAGS;
1299 /* FALLTHRU */
1300
1301 case CC_OP_FLAGS:
1302 /* We're going to generate a boolean result. */
1303 cmp->cond = TCG_COND_NE;
1304 cmp->is_bool = true;
1305 cmp->c1 = r_dst = tcg_temp_new();
1306 cmp->c2 = tcg_constant_tl(0);
1307
1308 switch (cond) {
1309 case 0x0:
1310 gen_op_eval_bn(r_dst);
1311 break;
1312 case 0x1:
1313 gen_op_eval_be(r_dst, r_src);
1314 break;
1315 case 0x2:
1316 gen_op_eval_ble(r_dst, r_src);
1317 break;
1318 case 0x3:
1319 gen_op_eval_bl(r_dst, r_src);
1320 break;
1321 case 0x4:
1322 gen_op_eval_bleu(r_dst, r_src);
1323 break;
1324 case 0x5:
1325 gen_op_eval_bcs(r_dst, r_src);
1326 break;
1327 case 0x6:
1328 gen_op_eval_bneg(r_dst, r_src);
1329 break;
1330 case 0x7:
1331 gen_op_eval_bvs(r_dst, r_src);
1332 break;
1333 case 0x8:
1334 gen_op_eval_ba(r_dst);
1335 break;
1336 case 0x9:
1337 gen_op_eval_bne(r_dst, r_src);
1338 break;
1339 case 0xa:
1340 gen_op_eval_bg(r_dst, r_src);
1341 break;
1342 case 0xb:
1343 gen_op_eval_bge(r_dst, r_src);
1344 break;
1345 case 0xc:
1346 gen_op_eval_bgu(r_dst, r_src);
1347 break;
1348 case 0xd:
1349 gen_op_eval_bcc(r_dst, r_src);
1350 break;
1351 case 0xe:
1352 gen_op_eval_bpos(r_dst, r_src);
1353 break;
1354 case 0xf:
1355 gen_op_eval_bvc(r_dst, r_src);
1356 break;
1357 }
1358 break;
1359 }
1360 }
1361
1362 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1363 {
1364 unsigned int offset;
1365 TCGv r_dst;
1366
1367 /* For now we still generate a straight boolean result. */
1368 cmp->cond = TCG_COND_NE;
1369 cmp->is_bool = true;
1370 cmp->c1 = r_dst = tcg_temp_new();
1371 cmp->c2 = tcg_constant_tl(0);
1372
1373 switch (cc) {
1374 default:
1375 case 0x0:
1376 offset = 0;
1377 break;
1378 case 0x1:
1379 offset = 32 - 10;
1380 break;
1381 case 0x2:
1382 offset = 34 - 10;
1383 break;
1384 case 0x3:
1385 offset = 36 - 10;
1386 break;
1387 }
1388
1389 switch (cond) {
1390 case 0x0:
1391 gen_op_eval_bn(r_dst);
1392 break;
1393 case 0x1:
1394 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1395 break;
1396 case 0x2:
1397 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1398 break;
1399 case 0x3:
1400 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1401 break;
1402 case 0x4:
1403 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1404 break;
1405 case 0x5:
1406 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1407 break;
1408 case 0x6:
1409 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1410 break;
1411 case 0x7:
1412 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1413 break;
1414 case 0x8:
1415 gen_op_eval_ba(r_dst);
1416 break;
1417 case 0x9:
1418 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1419 break;
1420 case 0xa:
1421 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1422 break;
1423 case 0xb:
1424 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1425 break;
1426 case 0xc:
1427 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1428 break;
1429 case 0xd:
1430 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1431 break;
1432 case 0xe:
1433 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1434 break;
1435 case 0xf:
1436 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1437 break;
1438 }
1439 }
1440
1441 // Inverted logic
1442 static const TCGCond gen_tcg_cond_reg[8] = {
1443 TCG_COND_NEVER, /* reserved */
1444 TCG_COND_NE,
1445 TCG_COND_GT,
1446 TCG_COND_GE,
1447 TCG_COND_NEVER, /* reserved */
1448 TCG_COND_EQ,
1449 TCG_COND_LE,
1450 TCG_COND_LT,
1451 };
1452
1453 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1454 {
1455 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1456 cmp->is_bool = false;
1457 cmp->c1 = r_src;
1458 cmp->c2 = tcg_constant_tl(0);
1459 }
1460
1461 static void gen_op_clear_ieee_excp_and_FTT(void)
1462 {
1463 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1464 }
1465
1466 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1467 {
1468 gen_op_clear_ieee_excp_and_FTT();
1469 tcg_gen_mov_i32(dst, src);
1470 }
1471
1472 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1473 {
1474 gen_op_clear_ieee_excp_and_FTT();
1475 gen_helper_fnegs(dst, src);
1476 }
1477
1478 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1479 {
1480 gen_op_clear_ieee_excp_and_FTT();
1481 gen_helper_fabss(dst, src);
1482 }
1483
1484 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1485 {
1486 gen_op_clear_ieee_excp_and_FTT();
1487 tcg_gen_mov_i64(dst, src);
1488 }
1489
1490 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1491 {
1492 gen_op_clear_ieee_excp_and_FTT();
1493 gen_helper_fnegd(dst, src);
1494 }
1495
1496 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1497 {
1498 gen_op_clear_ieee_excp_and_FTT();
1499 gen_helper_fabsd(dst, src);
1500 }
1501
1502 #ifdef TARGET_SPARC64
1503 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1504 {
1505 switch (fccno) {
1506 case 0:
1507 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1508 break;
1509 case 1:
1510 gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1511 break;
1512 case 2:
1513 gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1514 break;
1515 case 3:
1516 gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1517 break;
1518 }
1519 }
1520
1521 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1522 {
1523 switch (fccno) {
1524 case 0:
1525 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1526 break;
1527 case 1:
1528 gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1529 break;
1530 case 2:
1531 gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1532 break;
1533 case 3:
1534 gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1535 break;
1536 }
1537 }
1538
1539 static void gen_op_fcmpq(int fccno)
1540 {
1541 switch (fccno) {
1542 case 0:
1543 gen_helper_fcmpq(cpu_fsr, tcg_env);
1544 break;
1545 case 1:
1546 gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
1547 break;
1548 case 2:
1549 gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
1550 break;
1551 case 3:
1552 gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
1553 break;
1554 }
1555 }
1556
1557 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1558 {
1559 switch (fccno) {
1560 case 0:
1561 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1562 break;
1563 case 1:
1564 gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1565 break;
1566 case 2:
1567 gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1568 break;
1569 case 3:
1570 gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1571 break;
1572 }
1573 }
1574
1575 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1576 {
1577 switch (fccno) {
1578 case 0:
1579 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1580 break;
1581 case 1:
1582 gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1583 break;
1584 case 2:
1585 gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1586 break;
1587 case 3:
1588 gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1589 break;
1590 }
1591 }
1592
1593 static void gen_op_fcmpeq(int fccno)
1594 {
1595 switch (fccno) {
1596 case 0:
1597 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1598 break;
1599 case 1:
1600 gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
1601 break;
1602 case 2:
1603 gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
1604 break;
1605 case 3:
1606 gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
1607 break;
1608 }
1609 }
1610
1611 #else
1612
1613 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1614 {
1615 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1616 }
1617
1618 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1619 {
1620 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1621 }
1622
1623 static void gen_op_fcmpq(int fccno)
1624 {
1625 gen_helper_fcmpq(cpu_fsr, tcg_env);
1626 }
1627
1628 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1629 {
1630 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1631 }
1632
1633 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1634 {
1635 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1636 }
1637
1638 static void gen_op_fcmpeq(int fccno)
1639 {
1640 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1641 }
1642 #endif
1643
1644 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1645 {
1646 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1647 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1648 gen_exception(dc, TT_FP_EXCP);
1649 }
1650
1651 static int gen_trap_ifnofpu(DisasContext *dc)
1652 {
1653 #if !defined(CONFIG_USER_ONLY)
1654 if (!dc->fpu_enabled) {
1655 gen_exception(dc, TT_NFPU_INSN);
1656 return 1;
1657 }
1658 #endif
1659 return 0;
1660 }
1661
1662 #ifdef TARGET_SPARC64
1663 static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1664 void (*gen)(TCGv_ptr))
1665 {
1666 gen_op_load_fpr_QT1(QFPREG(rs));
1667
1668 gen(tcg_env);
1669
1670 gen_op_store_QT0_fpr(QFPREG(rd));
1671 gen_update_fprs_dirty(dc, QFPREG(rd));
1672 }
1673 #endif
1674
1675 static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1676 void (*gen)(TCGv_ptr, TCGv_i64))
1677 {
1678 TCGv_i64 src;
1679
1680 src = gen_load_fpr_D(dc, rs);
1681
1682 gen(tcg_env, src);
1683
1684 gen_op_store_QT0_fpr(QFPREG(rd));
1685 gen_update_fprs_dirty(dc, QFPREG(rd));
1686 }
1687
1688 /* asi moves */
1689 typedef enum {
1690 GET_ASI_HELPER,
1691 GET_ASI_EXCP,
1692 GET_ASI_DIRECT,
1693 GET_ASI_DTWINX,
1694 GET_ASI_BLOCK,
1695 GET_ASI_SHORT,
1696 GET_ASI_BCOPY,
1697 GET_ASI_BFILL,
1698 } ASIType;
1699
1700 typedef struct {
1701 ASIType type;
1702 int asi;
1703 int mem_idx;
1704 MemOp memop;
1705 } DisasASI;
1706
1707 /*
1708 * Build DisasASI.
1709 * For asi == -1, treat as non-asi.
1710 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1711 */
1712 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1713 {
1714 ASIType type = GET_ASI_HELPER;
1715 int mem_idx = dc->mem_idx;
1716
1717 if (asi == -1) {
1718 /* Artificial "non-asi" case. */
1719 type = GET_ASI_DIRECT;
1720 goto done;
1721 }
1722
1723 #ifndef TARGET_SPARC64
1724 /* Before v9, all asis are immediate and privileged. */
1725 if (asi < 0) {
1726 gen_exception(dc, TT_ILL_INSN);
1727 type = GET_ASI_EXCP;
1728 } else if (supervisor(dc)
1729 /* Note that LEON accepts ASI_USERDATA in user mode, for
1730 use with CASA. Also note that previous versions of
1731 QEMU allowed (and old versions of gcc emitted) ASI_P
1732 for LEON, which is incorrect. */
1733 || (asi == ASI_USERDATA
1734 && (dc->def->features & CPU_FEATURE_CASA))) {
1735 switch (asi) {
1736 case ASI_USERDATA: /* User data access */
1737 mem_idx = MMU_USER_IDX;
1738 type = GET_ASI_DIRECT;
1739 break;
1740 case ASI_KERNELDATA: /* Supervisor data access */
1741 mem_idx = MMU_KERNEL_IDX;
1742 type = GET_ASI_DIRECT;
1743 break;
1744 case ASI_M_BYPASS: /* MMU passthrough */
1745 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1746 mem_idx = MMU_PHYS_IDX;
1747 type = GET_ASI_DIRECT;
1748 break;
1749 case ASI_M_BCOPY: /* Block copy, sta access */
1750 mem_idx = MMU_KERNEL_IDX;
1751 type = GET_ASI_BCOPY;
1752 break;
1753 case ASI_M_BFILL: /* Block fill, stda access */
1754 mem_idx = MMU_KERNEL_IDX;
1755 type = GET_ASI_BFILL;
1756 break;
1757 }
1758
1759 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1760 * permissions check in get_physical_address(..).
1761 */
1762 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1763 } else {
1764 gen_exception(dc, TT_PRIV_INSN);
1765 type = GET_ASI_EXCP;
1766 }
1767 #else
1768 if (asi < 0) {
1769 asi = dc->asi;
1770 }
1771 /* With v9, all asis below 0x80 are privileged. */
1772 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1773 down that bit into DisasContext. For the moment that's ok,
1774 since the direct implementations below doesn't have any ASIs
1775 in the restricted [0x30, 0x7f] range, and the check will be
1776 done properly in the helper. */
1777 if (!supervisor(dc) && asi < 0x80) {
1778 gen_exception(dc, TT_PRIV_ACT);
1779 type = GET_ASI_EXCP;
1780 } else {
1781 switch (asi) {
1782 case ASI_REAL: /* Bypass */
1783 case ASI_REAL_IO: /* Bypass, non-cacheable */
1784 case ASI_REAL_L: /* Bypass LE */
1785 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1786 case ASI_TWINX_REAL: /* Real address, twinx */
1787 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1788 case ASI_QUAD_LDD_PHYS:
1789 case ASI_QUAD_LDD_PHYS_L:
1790 mem_idx = MMU_PHYS_IDX;
1791 break;
1792 case ASI_N: /* Nucleus */
1793 case ASI_NL: /* Nucleus LE */
1794 case ASI_TWINX_N:
1795 case ASI_TWINX_NL:
1796 case ASI_NUCLEUS_QUAD_LDD:
1797 case ASI_NUCLEUS_QUAD_LDD_L:
1798 if (hypervisor(dc)) {
1799 mem_idx = MMU_PHYS_IDX;
1800 } else {
1801 mem_idx = MMU_NUCLEUS_IDX;
1802 }
1803 break;
1804 case ASI_AIUP: /* As if user primary */
1805 case ASI_AIUPL: /* As if user primary LE */
1806 case ASI_TWINX_AIUP:
1807 case ASI_TWINX_AIUP_L:
1808 case ASI_BLK_AIUP_4V:
1809 case ASI_BLK_AIUP_L_4V:
1810 case ASI_BLK_AIUP:
1811 case ASI_BLK_AIUPL:
1812 mem_idx = MMU_USER_IDX;
1813 break;
1814 case ASI_AIUS: /* As if user secondary */
1815 case ASI_AIUSL: /* As if user secondary LE */
1816 case ASI_TWINX_AIUS:
1817 case ASI_TWINX_AIUS_L:
1818 case ASI_BLK_AIUS_4V:
1819 case ASI_BLK_AIUS_L_4V:
1820 case ASI_BLK_AIUS:
1821 case ASI_BLK_AIUSL:
1822 mem_idx = MMU_USER_SECONDARY_IDX;
1823 break;
1824 case ASI_S: /* Secondary */
1825 case ASI_SL: /* Secondary LE */
1826 case ASI_TWINX_S:
1827 case ASI_TWINX_SL:
1828 case ASI_BLK_COMMIT_S:
1829 case ASI_BLK_S:
1830 case ASI_BLK_SL:
1831 case ASI_FL8_S:
1832 case ASI_FL8_SL:
1833 case ASI_FL16_S:
1834 case ASI_FL16_SL:
1835 if (mem_idx == MMU_USER_IDX) {
1836 mem_idx = MMU_USER_SECONDARY_IDX;
1837 } else if (mem_idx == MMU_KERNEL_IDX) {
1838 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1839 }
1840 break;
1841 case ASI_P: /* Primary */
1842 case ASI_PL: /* Primary LE */
1843 case ASI_TWINX_P:
1844 case ASI_TWINX_PL:
1845 case ASI_BLK_COMMIT_P:
1846 case ASI_BLK_P:
1847 case ASI_BLK_PL:
1848 case ASI_FL8_P:
1849 case ASI_FL8_PL:
1850 case ASI_FL16_P:
1851 case ASI_FL16_PL:
1852 break;
1853 }
1854 switch (asi) {
1855 case ASI_REAL:
1856 case ASI_REAL_IO:
1857 case ASI_REAL_L:
1858 case ASI_REAL_IO_L:
1859 case ASI_N:
1860 case ASI_NL:
1861 case ASI_AIUP:
1862 case ASI_AIUPL:
1863 case ASI_AIUS:
1864 case ASI_AIUSL:
1865 case ASI_S:
1866 case ASI_SL:
1867 case ASI_P:
1868 case ASI_PL:
1869 type = GET_ASI_DIRECT;
1870 break;
1871 case ASI_TWINX_REAL:
1872 case ASI_TWINX_REAL_L:
1873 case ASI_TWINX_N:
1874 case ASI_TWINX_NL:
1875 case ASI_TWINX_AIUP:
1876 case ASI_TWINX_AIUP_L:
1877 case ASI_TWINX_AIUS:
1878 case ASI_TWINX_AIUS_L:
1879 case ASI_TWINX_P:
1880 case ASI_TWINX_PL:
1881 case ASI_TWINX_S:
1882 case ASI_TWINX_SL:
1883 case ASI_QUAD_LDD_PHYS:
1884 case ASI_QUAD_LDD_PHYS_L:
1885 case ASI_NUCLEUS_QUAD_LDD:
1886 case ASI_NUCLEUS_QUAD_LDD_L:
1887 type = GET_ASI_DTWINX;
1888 break;
1889 case ASI_BLK_COMMIT_P:
1890 case ASI_BLK_COMMIT_S:
1891 case ASI_BLK_AIUP_4V:
1892 case ASI_BLK_AIUP_L_4V:
1893 case ASI_BLK_AIUP:
1894 case ASI_BLK_AIUPL:
1895 case ASI_BLK_AIUS_4V:
1896 case ASI_BLK_AIUS_L_4V:
1897 case ASI_BLK_AIUS:
1898 case ASI_BLK_AIUSL:
1899 case ASI_BLK_S:
1900 case ASI_BLK_SL:
1901 case ASI_BLK_P:
1902 case ASI_BLK_PL:
1903 type = GET_ASI_BLOCK;
1904 break;
1905 case ASI_FL8_S:
1906 case ASI_FL8_SL:
1907 case ASI_FL8_P:
1908 case ASI_FL8_PL:
1909 memop = MO_UB;
1910 type = GET_ASI_SHORT;
1911 break;
1912 case ASI_FL16_S:
1913 case ASI_FL16_SL:
1914 case ASI_FL16_P:
1915 case ASI_FL16_PL:
1916 memop = MO_TEUW;
1917 type = GET_ASI_SHORT;
1918 break;
1919 }
1920 /* The little-endian asis all have bit 3 set. */
1921 if (asi & 8) {
1922 memop ^= MO_BSWAP;
1923 }
1924 }
1925 #endif
1926
1927 done:
1928 return (DisasASI){ type, asi, mem_idx, memop };
1929 }
1930
1931 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1932 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1933 TCGv_i32 asi, TCGv_i32 mop)
1934 {
1935 g_assert_not_reached();
1936 }
1937
1938 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1939 TCGv_i32 asi, TCGv_i32 mop)
1940 {
1941 g_assert_not_reached();
1942 }
1943 #endif
1944
1945 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1946 {
1947 switch (da->type) {
1948 case GET_ASI_EXCP:
1949 break;
1950 case GET_ASI_DTWINX: /* Reserved for ldda. */
1951 gen_exception(dc, TT_ILL_INSN);
1952 break;
1953 case GET_ASI_DIRECT:
1954 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1955 break;
1956 default:
1957 {
1958 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1959 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1960
1961 save_state(dc);
1962 #ifdef TARGET_SPARC64
1963 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1964 #else
1965 {
1966 TCGv_i64 t64 = tcg_temp_new_i64();
1967 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1968 tcg_gen_trunc_i64_tl(dst, t64);
1969 }
1970 #endif
1971 }
1972 break;
1973 }
1974 }
1975
1976 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1977 {
1978 switch (da->type) {
1979 case GET_ASI_EXCP:
1980 break;
1981
1982 case GET_ASI_DTWINX: /* Reserved for stda. */
1983 if (TARGET_LONG_BITS == 32) {
1984 gen_exception(dc, TT_ILL_INSN);
1985 break;
1986 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1987 /* Pre OpenSPARC CPUs don't have these */
1988 gen_exception(dc, TT_ILL_INSN);
1989 break;
1990 }
1991 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1992 /* fall through */
1993
1994 case GET_ASI_DIRECT:
1995 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1996 break;
1997
1998 case GET_ASI_BCOPY:
1999 assert(TARGET_LONG_BITS == 32);
2000 /* Copy 32 bytes from the address in SRC to ADDR. */
2001 /* ??? The original qemu code suggests 4-byte alignment, dropping
2002 the low bits, but the only place I can see this used is in the
2003 Linux kernel with 32 byte alignment, which would make more sense
2004 as a cacheline-style operation. */
2005 {
2006 TCGv saddr = tcg_temp_new();
2007 TCGv daddr = tcg_temp_new();
2008 TCGv four = tcg_constant_tl(4);
2009 TCGv_i32 tmp = tcg_temp_new_i32();
2010 int i;
2011
2012 tcg_gen_andi_tl(saddr, src, -4);
2013 tcg_gen_andi_tl(daddr, addr, -4);
2014 for (i = 0; i < 32; i += 4) {
2015 /* Since the loads and stores are paired, allow the
2016 copy to happen in the host endianness. */
2017 tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL);
2018 tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL);
2019 tcg_gen_add_tl(saddr, saddr, four);
2020 tcg_gen_add_tl(daddr, daddr, four);
2021 }
2022 }
2023 break;
2024
2025 default:
2026 {
2027 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2028 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
2029
2030 save_state(dc);
2031 #ifdef TARGET_SPARC64
2032 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
2033 #else
2034 {
2035 TCGv_i64 t64 = tcg_temp_new_i64();
2036 tcg_gen_extu_tl_i64(t64, src);
2037 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2038 }
2039 #endif
2040
2041 /* A write to a TLB register may alter page maps. End the TB. */
2042 dc->npc = DYNAMIC_PC;
2043 }
2044 break;
2045 }
2046 }
2047
2048 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
2049 TCGv dst, TCGv src, TCGv addr)
2050 {
2051 switch (da->type) {
2052 case GET_ASI_EXCP:
2053 break;
2054 case GET_ASI_DIRECT:
2055 tcg_gen_atomic_xchg_tl(dst, addr, src,
2056 da->mem_idx, da->memop | MO_ALIGN);
2057 break;
2058 default:
2059 /* ??? Should be DAE_invalid_asi. */
2060 gen_exception(dc, TT_DATA_ACCESS);
2061 break;
2062 }
2063 }
2064
2065 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
2066 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
2067 {
2068 switch (da->type) {
2069 case GET_ASI_EXCP:
2070 return;
2071 case GET_ASI_DIRECT:
2072 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
2073 da->mem_idx, da->memop | MO_ALIGN);
2074 break;
2075 default:
2076 /* ??? Should be DAE_invalid_asi. */
2077 gen_exception(dc, TT_DATA_ACCESS);
2078 break;
2079 }
2080 }
2081
2082 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
2083 {
2084 switch (da->type) {
2085 case GET_ASI_EXCP:
2086 break;
2087 case GET_ASI_DIRECT:
2088 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
2089 da->mem_idx, MO_UB);
2090 break;
2091 default:
2092 /* ??? In theory, this should be raise DAE_invalid_asi.
2093 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2094 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
2095 gen_helper_exit_atomic(tcg_env);
2096 } else {
2097 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2098 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
2099 TCGv_i64 s64, t64;
2100
2101 save_state(dc);
2102 t64 = tcg_temp_new_i64();
2103 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2104
2105 s64 = tcg_constant_i64(0xff);
2106 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
2107
2108 tcg_gen_trunc_i64_tl(dst, t64);
2109
2110 /* End the TB. */
2111 dc->npc = DYNAMIC_PC;
2112 }
2113 break;
2114 }
2115 }
2116
2117 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2118 TCGv addr, int rd)
2119 {
2120 MemOp memop = da->memop;
2121 MemOp size = memop & MO_SIZE;
2122 TCGv_i32 d32;
2123 TCGv_i64 d64;
2124 TCGv addr_tmp;
2125
2126 /* TODO: Use 128-bit load/store below. */
2127 if (size == MO_128) {
2128 memop = (memop & ~MO_SIZE) | MO_64;
2129 }
2130
2131 switch (da->type) {
2132 case GET_ASI_EXCP:
2133 break;
2134
2135 case GET_ASI_DIRECT:
2136 memop |= MO_ALIGN_4;
2137 switch (size) {
2138 case MO_32:
2139 d32 = gen_dest_fpr_F(dc);
2140 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
2141 gen_store_fpr_F(dc, rd, d32);
2142 break;
2143
2144 case MO_64:
2145 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
2146 break;
2147
2148 case MO_128:
2149 d64 = tcg_temp_new_i64();
2150 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2151 addr_tmp = tcg_temp_new();
2152 tcg_gen_addi_tl(addr_tmp, addr, 8);
2153 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2154 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2155 break;
2156 default:
2157 g_assert_not_reached();
2158 }
2159 break;
2160
2161 case GET_ASI_BLOCK:
2162 /* Valid for lddfa on aligned registers only. */
2163 if (orig_size == MO_64 && (rd & 7) == 0) {
2164 /* The first operation checks required alignment. */
2165 addr_tmp = tcg_temp_new();
2166 for (int i = 0; ; ++i) {
2167 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2168 memop | (i == 0 ? MO_ALIGN_64 : 0));
2169 if (i == 7) {
2170 break;
2171 }
2172 tcg_gen_addi_tl(addr_tmp, addr, 8);
2173 addr = addr_tmp;
2174 }
2175 } else {
2176 gen_exception(dc, TT_ILL_INSN);
2177 }
2178 break;
2179
2180 case GET_ASI_SHORT:
2181 /* Valid for lddfa only. */
2182 if (orig_size == MO_64) {
2183 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2184 memop | MO_ALIGN);
2185 } else {
2186 gen_exception(dc, TT_ILL_INSN);
2187 }
2188 break;
2189
2190 default:
2191 {
2192 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2193 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2194
2195 save_state(dc);
2196 /* According to the table in the UA2011 manual, the only
2197 other asis that are valid for ldfa/lddfa/ldqfa are
2198 the NO_FAULT asis. We still need a helper for these,
2199 but we can just use the integer asi helper for them. */
2200 switch (size) {
2201 case MO_32:
2202 d64 = tcg_temp_new_i64();
2203 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2204 d32 = gen_dest_fpr_F(dc);
2205 tcg_gen_extrl_i64_i32(d32, d64);
2206 gen_store_fpr_F(dc, rd, d32);
2207 break;
2208 case MO_64:
2209 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
2210 r_asi, r_mop);
2211 break;
2212 case MO_128:
2213 d64 = tcg_temp_new_i64();
2214 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2215 addr_tmp = tcg_temp_new();
2216 tcg_gen_addi_tl(addr_tmp, addr, 8);
2217 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
2218 r_asi, r_mop);
2219 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2220 break;
2221 default:
2222 g_assert_not_reached();
2223 }
2224 }
2225 break;
2226 }
2227 }
2228
2229 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2230 TCGv addr, int rd)
2231 {
2232 MemOp memop = da->memop;
2233 MemOp size = memop & MO_SIZE;
2234 TCGv_i32 d32;
2235 TCGv addr_tmp;
2236
2237 /* TODO: Use 128-bit load/store below. */
2238 if (size == MO_128) {
2239 memop = (memop & ~MO_SIZE) | MO_64;
2240 }
2241
2242 switch (da->type) {
2243 case GET_ASI_EXCP:
2244 break;
2245
2246 case GET_ASI_DIRECT:
2247 memop |= MO_ALIGN_4;
2248 switch (size) {
2249 case MO_32:
2250 d32 = gen_load_fpr_F(dc, rd);
2251 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2252 break;
2253 case MO_64:
2254 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2255 memop | MO_ALIGN_4);
2256 break;
2257 case MO_128:
2258 /* Only 4-byte alignment required. However, it is legal for the
2259 cpu to signal the alignment fault, and the OS trap handler is
2260 required to fix it up. Requiring 16-byte alignment here avoids
2261 having to probe the second page before performing the first
2262 write. */
2263 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2264 memop | MO_ALIGN_16);
2265 addr_tmp = tcg_temp_new();
2266 tcg_gen_addi_tl(addr_tmp, addr, 8);
2267 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2268 break;
2269 default:
2270 g_assert_not_reached();
2271 }
2272 break;
2273
2274 case GET_ASI_BLOCK:
2275 /* Valid for stdfa on aligned registers only. */
2276 if (orig_size == MO_64 && (rd & 7) == 0) {
2277 /* The first operation checks required alignment. */
2278 addr_tmp = tcg_temp_new();
2279 for (int i = 0; ; ++i) {
2280 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2281 memop | (i == 0 ? MO_ALIGN_64 : 0));
2282 if (i == 7) {
2283 break;
2284 }
2285 tcg_gen_addi_tl(addr_tmp, addr, 8);
2286 addr = addr_tmp;
2287 }
2288 } else {
2289 gen_exception(dc, TT_ILL_INSN);
2290 }
2291 break;
2292
2293 case GET_ASI_SHORT:
2294 /* Valid for stdfa only. */
2295 if (orig_size == MO_64) {
2296 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2297 memop | MO_ALIGN);
2298 } else {
2299 gen_exception(dc, TT_ILL_INSN);
2300 }
2301 break;
2302
2303 default:
2304 /* According to the table in the UA2011 manual, the only
2305 other asis that are valid for ldfa/lddfa/ldqfa are
2306 the PST* asis, which aren't currently handled. */
2307 gen_exception(dc, TT_ILL_INSN);
2308 break;
2309 }
2310 }
2311
2312 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2313 {
2314 TCGv hi = gen_dest_gpr(dc, rd);
2315 TCGv lo = gen_dest_gpr(dc, rd + 1);
2316
2317 switch (da->type) {
2318 case GET_ASI_EXCP:
2319 return;
2320
2321 case GET_ASI_DTWINX:
2322 #ifdef TARGET_SPARC64
2323 {
2324 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2325 TCGv_i128 t = tcg_temp_new_i128();
2326
2327 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2328 /*
2329 * Note that LE twinx acts as if each 64-bit register result is
2330 * byte swapped. We perform one 128-bit LE load, so must swap
2331 * the order of the writebacks.
2332 */
2333 if ((mop & MO_BSWAP) == MO_TE) {
2334 tcg_gen_extr_i128_i64(lo, hi, t);
2335 } else {
2336 tcg_gen_extr_i128_i64(hi, lo, t);
2337 }
2338 }
2339 break;
2340 #else
2341 g_assert_not_reached();
2342 #endif
2343
2344 case GET_ASI_DIRECT:
2345 {
2346 TCGv_i64 tmp = tcg_temp_new_i64();
2347
2348 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2349
2350 /* Note that LE ldda acts as if each 32-bit register
2351 result is byte swapped. Having just performed one
2352 64-bit bswap, we need now to swap the writebacks. */
2353 if ((da->memop & MO_BSWAP) == MO_TE) {
2354 tcg_gen_extr_i64_tl(lo, hi, tmp);
2355 } else {
2356 tcg_gen_extr_i64_tl(hi, lo, tmp);
2357 }
2358 }
2359 break;
2360
2361 default:
2362 /* ??? In theory we've handled all of the ASIs that are valid
2363 for ldda, and this should raise DAE_invalid_asi. However,
2364 real hardware allows others. This can be seen with e.g.
2365 FreeBSD 10.3 wrt ASI_IC_TAG. */
2366 {
2367 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2368 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2369 TCGv_i64 tmp = tcg_temp_new_i64();
2370
2371 save_state(dc);
2372 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2373
2374 /* See above. */
2375 if ((da->memop & MO_BSWAP) == MO_TE) {
2376 tcg_gen_extr_i64_tl(lo, hi, tmp);
2377 } else {
2378 tcg_gen_extr_i64_tl(hi, lo, tmp);
2379 }
2380 }
2381 break;
2382 }
2383
2384 gen_store_gpr(dc, rd, hi);
2385 gen_store_gpr(dc, rd + 1, lo);
2386 }
2387
2388 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2389 {
2390 TCGv hi = gen_load_gpr(dc, rd);
2391 TCGv lo = gen_load_gpr(dc, rd + 1);
2392
2393 switch (da->type) {
2394 case GET_ASI_EXCP:
2395 break;
2396
2397 case GET_ASI_DTWINX:
2398 #ifdef TARGET_SPARC64
2399 {
2400 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2401 TCGv_i128 t = tcg_temp_new_i128();
2402
2403 /*
2404 * Note that LE twinx acts as if each 64-bit register result is
2405 * byte swapped. We perform one 128-bit LE store, so must swap
2406 * the order of the construction.
2407 */
2408 if ((mop & MO_BSWAP) == MO_TE) {
2409 tcg_gen_concat_i64_i128(t, lo, hi);
2410 } else {
2411 tcg_gen_concat_i64_i128(t, hi, lo);
2412 }
2413 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2414 }
2415 break;
2416 #else
2417 g_assert_not_reached();
2418 #endif
2419
2420 case GET_ASI_DIRECT:
2421 {
2422 TCGv_i64 t64 = tcg_temp_new_i64();
2423
2424 /* Note that LE stda acts as if each 32-bit register result is
2425 byte swapped. We will perform one 64-bit LE store, so now
2426 we must swap the order of the construction. */
2427 if ((da->memop & MO_BSWAP) == MO_TE) {
2428 tcg_gen_concat_tl_i64(t64, lo, hi);
2429 } else {
2430 tcg_gen_concat_tl_i64(t64, hi, lo);
2431 }
2432 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2433 }
2434 break;
2435
2436 case GET_ASI_BFILL:
2437 assert(TARGET_LONG_BITS == 32);
2438 /* Store 32 bytes of T64 to ADDR. */
2439 /* ??? The original qemu code suggests 8-byte alignment, dropping
2440 the low bits, but the only place I can see this used is in the
2441 Linux kernel with 32 byte alignment, which would make more sense
2442 as a cacheline-style operation. */
2443 {
2444 TCGv_i64 t64 = tcg_temp_new_i64();
2445 TCGv d_addr = tcg_temp_new();
2446 TCGv eight = tcg_constant_tl(8);
2447 int i;
2448
2449 tcg_gen_concat_tl_i64(t64, lo, hi);
2450 tcg_gen_andi_tl(d_addr, addr, -8);
2451 for (i = 0; i < 32; i += 8) {
2452 tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop);
2453 tcg_gen_add_tl(d_addr, d_addr, eight);
2454 }
2455 }
2456 break;
2457
2458 default:
2459 /* ??? In theory we've handled all of the ASIs that are valid
2460 for stda, and this should raise DAE_invalid_asi. */
2461 {
2462 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2463 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2464 TCGv_i64 t64 = tcg_temp_new_i64();
2465
2466 /* See above. */
2467 if ((da->memop & MO_BSWAP) == MO_TE) {
2468 tcg_gen_concat_tl_i64(t64, lo, hi);
2469 } else {
2470 tcg_gen_concat_tl_i64(t64, hi, lo);
2471 }
2472
2473 save_state(dc);
2474 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2475 }
2476 break;
2477 }
2478 }
2479
2480 #ifdef TARGET_SPARC64
2481 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2482 {
2483 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2484 return gen_load_gpr(dc, rs1);
2485 }
2486
2487 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2488 {
2489 TCGv_i32 c32, zero, dst, s1, s2;
2490
2491 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2492 or fold the comparison down to 32 bits and use movcond_i32. Choose
2493 the later. */
2494 c32 = tcg_temp_new_i32();
2495 if (cmp->is_bool) {
2496 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2497 } else {
2498 TCGv_i64 c64 = tcg_temp_new_i64();
2499 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2500 tcg_gen_extrl_i64_i32(c32, c64);
2501 }
2502
2503 s1 = gen_load_fpr_F(dc, rs);
2504 s2 = gen_load_fpr_F(dc, rd);
2505 dst = gen_dest_fpr_F(dc);
2506 zero = tcg_constant_i32(0);
2507
2508 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2509
2510 gen_store_fpr_F(dc, rd, dst);
2511 }
2512
2513 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2514 {
2515 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2516 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2517 gen_load_fpr_D(dc, rs),
2518 gen_load_fpr_D(dc, rd));
2519 gen_store_fpr_D(dc, rd, dst);
2520 }
2521
2522 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2523 {
2524 int qd = QFPREG(rd);
2525 int qs = QFPREG(rs);
2526
2527 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2528 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2529 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2530 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2531
2532 gen_update_fprs_dirty(dc, qd);
2533 }
2534
2535 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2536 {
2537 TCGv_i32 r_tl = tcg_temp_new_i32();
2538
2539 /* load env->tl into r_tl */
2540 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2541
2542 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2543 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2544
2545 /* calculate offset to current trap state from env->ts, reuse r_tl */
2546 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2547 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2548
2549 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2550 {
2551 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2552 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2553 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2554 }
2555 }
2556 #endif
2557
2558 static int extract_dfpreg(DisasContext *dc, int x)
2559 {
2560 return DFPREG(x);
2561 }
2562
2563 static int extract_qfpreg(DisasContext *dc, int x)
2564 {
2565 return QFPREG(x);
2566 }
2567
2568 /* Include the auto-generated decoder. */
2569 #include "decode-insns.c.inc"
2570
2571 #define TRANS(NAME, AVAIL, FUNC, ...) \
2572 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2573 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2574
2575 #define avail_ALL(C) true
2576 #ifdef TARGET_SPARC64
2577 # define avail_32(C) false
2578 # define avail_ASR17(C) false
2579 # define avail_CASA(C) true
2580 # define avail_DIV(C) true
2581 # define avail_MUL(C) true
2582 # define avail_POWERDOWN(C) false
2583 # define avail_64(C) true
2584 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2585 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2586 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2587 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2588 #else
2589 # define avail_32(C) true
2590 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2591 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2592 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2593 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2594 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2595 # define avail_64(C) false
2596 # define avail_GL(C) false
2597 # define avail_HYPV(C) false
2598 # define avail_VIS1(C) false
2599 # define avail_VIS2(C) false
2600 #endif
2601
2602 /* Default case for non jump instructions. */
2603 static bool advance_pc(DisasContext *dc)
2604 {
2605 if (dc->npc & 3) {
2606 switch (dc->npc) {
2607 case DYNAMIC_PC:
2608 case DYNAMIC_PC_LOOKUP:
2609 dc->pc = dc->npc;
2610 gen_op_next_insn();
2611 break;
2612 case JUMP_PC:
2613 /* we can do a static jump */
2614 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
2615 dc->base.is_jmp = DISAS_NORETURN;
2616 break;
2617 default:
2618 g_assert_not_reached();
2619 }
2620 } else {
2621 dc->pc = dc->npc;
2622 dc->npc = dc->npc + 4;
2623 }
2624 return true;
2625 }
2626
2627 /*
2628 * Major opcodes 00 and 01 -- branches, call, and sethi
2629 */
2630
2631 static bool advance_jump_uncond_never(DisasContext *dc, bool annul)
2632 {
2633 if (annul) {
2634 dc->pc = dc->npc + 4;
2635 dc->npc = dc->pc + 4;
2636 } else {
2637 dc->pc = dc->npc;
2638 dc->npc = dc->pc + 4;
2639 }
2640 return true;
2641 }
2642
2643 static bool advance_jump_uncond_always(DisasContext *dc, bool annul,
2644 target_ulong dest)
2645 {
2646 if (annul) {
2647 dc->pc = dest;
2648 dc->npc = dest + 4;
2649 } else {
2650 dc->pc = dc->npc;
2651 dc->npc = dest;
2652 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2653 }
2654 return true;
2655 }
2656
2657 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2658 bool annul, target_ulong dest)
2659 {
2660 target_ulong npc = dc->npc;
2661
2662 if (annul) {
2663 TCGLabel *l1 = gen_new_label();
2664
2665 tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2666 gen_goto_tb(dc, 0, npc, dest);
2667 gen_set_label(l1);
2668 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2669
2670 dc->base.is_jmp = DISAS_NORETURN;
2671 } else {
2672 if (npc & 3) {
2673 switch (npc) {
2674 case DYNAMIC_PC:
2675 case DYNAMIC_PC_LOOKUP:
2676 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2677 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2678 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2679 cmp->c1, cmp->c2,
2680 tcg_constant_tl(dest), cpu_npc);
2681 dc->pc = npc;
2682 break;
2683 default:
2684 g_assert_not_reached();
2685 }
2686 } else {
2687 dc->pc = npc;
2688 dc->jump_pc[0] = dest;
2689 dc->jump_pc[1] = npc + 4;
2690 dc->npc = JUMP_PC;
2691 if (cmp->is_bool) {
2692 tcg_gen_mov_tl(cpu_cond, cmp->c1);
2693 } else {
2694 tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2695 }
2696 }
2697 }
2698 return true;
2699 }
2700
2701 static bool raise_priv(DisasContext *dc)
2702 {
2703 gen_exception(dc, TT_PRIV_INSN);
2704 return true;
2705 }
2706
2707 static bool raise_unimpfpop(DisasContext *dc)
2708 {
2709 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2710 return true;
2711 }
2712
2713 static bool gen_trap_float128(DisasContext *dc)
2714 {
2715 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2716 return false;
2717 }
2718 return raise_unimpfpop(dc);
2719 }
2720
2721 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2722 {
2723 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2724 DisasCompare cmp;
2725
2726 switch (a->cond) {
2727 case 0x0:
2728 return advance_jump_uncond_never(dc, a->a);
2729 case 0x8:
2730 return advance_jump_uncond_always(dc, a->a, target);
2731 default:
2732 flush_cond(dc);
2733
2734 gen_compare(&cmp, a->cc, a->cond, dc);
2735 return advance_jump_cond(dc, &cmp, a->a, target);
2736 }
2737 }
2738
2739 TRANS(Bicc, ALL, do_bpcc, a)
2740 TRANS(BPcc, 64, do_bpcc, a)
2741
2742 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2743 {
2744 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2745 DisasCompare cmp;
2746
2747 if (gen_trap_ifnofpu(dc)) {
2748 return true;
2749 }
2750 switch (a->cond) {
2751 case 0x0:
2752 return advance_jump_uncond_never(dc, a->a);
2753 case 0x8:
2754 return advance_jump_uncond_always(dc, a->a, target);
2755 default:
2756 flush_cond(dc);
2757
2758 gen_fcompare(&cmp, a->cc, a->cond);
2759 return advance_jump_cond(dc, &cmp, a->a, target);
2760 }
2761 }
2762
2763 TRANS(FBPfcc, 64, do_fbpfcc, a)
2764 TRANS(FBfcc, ALL, do_fbpfcc, a)
2765
2766 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2767 {
2768 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2769 DisasCompare cmp;
2770
2771 if (!avail_64(dc)) {
2772 return false;
2773 }
2774 if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) {
2775 return false;
2776 }
2777
2778 flush_cond(dc);
2779 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
2780 return advance_jump_cond(dc, &cmp, a->a, target);
2781 }
2782
2783 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2784 {
2785 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2786
2787 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2788 gen_mov_pc_npc(dc);
2789 dc->npc = target;
2790 return true;
2791 }
2792
2793 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2794 {
2795 /*
2796 * For sparc32, always generate the no-coprocessor exception.
2797 * For sparc64, always generate illegal instruction.
2798 */
2799 #ifdef TARGET_SPARC64
2800 return false;
2801 #else
2802 gen_exception(dc, TT_NCP_INSN);
2803 return true;
2804 #endif
2805 }
2806
2807 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2808 {
2809 /* Special-case %g0 because that's the canonical nop. */
2810 if (a->rd) {
2811 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2812 }
2813 return advance_pc(dc);
2814 }
2815
2816 /*
2817 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2818 */
2819
2820 static bool do_tcc(DisasContext *dc, int cond, int cc,
2821 int rs1, bool imm, int rs2_or_imm)
2822 {
2823 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2824 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2825 DisasCompare cmp;
2826 TCGLabel *lab;
2827 TCGv_i32 trap;
2828
2829 /* Trap never. */
2830 if (cond == 0) {
2831 return advance_pc(dc);
2832 }
2833
2834 /*
2835 * Immediate traps are the most common case. Since this value is
2836 * live across the branch, it really pays to evaluate the constant.
2837 */
2838 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2839 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2840 } else {
2841 trap = tcg_temp_new_i32();
2842 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2843 if (imm) {
2844 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2845 } else {
2846 TCGv_i32 t2 = tcg_temp_new_i32();
2847 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2848 tcg_gen_add_i32(trap, trap, t2);
2849 }
2850 tcg_gen_andi_i32(trap, trap, mask);
2851 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2852 }
2853
2854 /* Trap always. */
2855 if (cond == 8) {
2856 save_state(dc);
2857 gen_helper_raise_exception(tcg_env, trap);
2858 dc->base.is_jmp = DISAS_NORETURN;
2859 return true;
2860 }
2861
2862 /* Conditional trap. */
2863 flush_cond(dc);
2864 lab = delay_exceptionv(dc, trap);
2865 gen_compare(&cmp, cc, cond, dc);
2866 tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2867
2868 return advance_pc(dc);
2869 }
2870
2871 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2872 {
2873 if (avail_32(dc) && a->cc) {
2874 return false;
2875 }
2876 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2877 }
2878
2879 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2880 {
2881 if (avail_64(dc)) {
2882 return false;
2883 }
2884 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2885 }
2886
2887 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2888 {
2889 if (avail_32(dc)) {
2890 return false;
2891 }
2892 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2893 }
2894
2895 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2896 {
2897 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2898 return advance_pc(dc);
2899 }
2900
2901 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2902 {
2903 if (avail_32(dc)) {
2904 return false;
2905 }
2906 if (a->mmask) {
2907 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2908 tcg_gen_mb(a->mmask | TCG_BAR_SC);
2909 }
2910 if (a->cmask) {
2911 /* For #Sync, etc, end the TB to recognize interrupts. */
2912 dc->base.is_jmp = DISAS_EXIT;
2913 }
2914 return advance_pc(dc);
2915 }
2916
2917 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2918 TCGv (*func)(DisasContext *, TCGv))
2919 {
2920 if (!priv) {
2921 return raise_priv(dc);
2922 }
2923 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2924 return advance_pc(dc);
2925 }
2926
2927 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2928 {
2929 return cpu_y;
2930 }
2931
2932 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2933 {
2934 /*
2935 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2936 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2937 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2938 */
2939 if (avail_64(dc) && a->rs1 != 0) {
2940 return false;
2941 }
2942 return do_rd_special(dc, true, a->rd, do_rdy);
2943 }
2944
2945 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2946 {
2947 uint32_t val;
2948
2949 /*
2950 * TODO: There are many more fields to be filled,
2951 * some of which are writable.
2952 */
2953 val = dc->def->nwindows - 1; /* [4:0] NWIN */
2954 val |= 1 << 8; /* [8] V8 */
2955
2956 return tcg_constant_tl(val);
2957 }
2958
2959 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2960
2961 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2962 {
2963 update_psr(dc);
2964 gen_helper_rdccr(dst, tcg_env);
2965 return dst;
2966 }
2967
2968 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2969
2970 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2971 {
2972 #ifdef TARGET_SPARC64
2973 return tcg_constant_tl(dc->asi);
2974 #else
2975 qemu_build_not_reached();
2976 #endif
2977 }
2978
2979 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2980
2981 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2982 {
2983 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2984
2985 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2986 if (translator_io_start(&dc->base)) {
2987 dc->base.is_jmp = DISAS_EXIT;
2988 }
2989 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2990 tcg_constant_i32(dc->mem_idx));
2991 return dst;
2992 }
2993
2994 /* TODO: non-priv access only allowed when enabled. */
2995 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2996
2997 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2998 {
2999 return tcg_constant_tl(address_mask_i(dc, dc->pc));
3000 }
3001
3002 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
3003
3004 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
3005 {
3006 tcg_gen_ext_i32_tl(dst, cpu_fprs);
3007 return dst;
3008 }
3009
3010 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
3011
3012 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
3013 {
3014 gen_trap_ifnofpu(dc);
3015 return cpu_gsr;
3016 }
3017
3018 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
3019
3020 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
3021 {
3022 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
3023 return dst;
3024 }
3025
3026 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
3027
3028 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
3029 {
3030 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
3031 return dst;
3032 }
3033
3034 /* TODO: non-priv access only allowed when enabled. */
3035 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
3036
3037 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
3038 {
3039 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3040
3041 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3042 if (translator_io_start(&dc->base)) {
3043 dc->base.is_jmp = DISAS_EXIT;
3044 }
3045 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
3046 tcg_constant_i32(dc->mem_idx));
3047 return dst;
3048 }
3049
3050 /* TODO: non-priv access only allowed when enabled. */
3051 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
3052
3053 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
3054 {
3055 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
3056 return dst;
3057 }
3058
3059 /* TODO: supervisor access only allowed when enabled by hypervisor. */
3060 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
3061
3062 /*
3063 * UltraSPARC-T1 Strand status.
3064 * HYPV check maybe not enough, UA2005 & UA2007 describe
3065 * this ASR as impl. dep
3066 */
3067 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
3068 {
3069 return tcg_constant_tl(1);
3070 }
3071
3072 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
3073
3074 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
3075 {
3076 update_psr(dc);
3077 gen_helper_rdpsr(dst, tcg_env);
3078 return dst;
3079 }
3080
3081 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
3082
3083 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
3084 {
3085 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
3086 return dst;
3087 }
3088
3089 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
3090
3091 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
3092 {
3093 TCGv_i32 tl = tcg_temp_new_i32();
3094 TCGv_ptr tp = tcg_temp_new_ptr();
3095
3096 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3097 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3098 tcg_gen_shli_i32(tl, tl, 3);
3099 tcg_gen_ext_i32_ptr(tp, tl);
3100 tcg_gen_add_ptr(tp, tp, tcg_env);
3101
3102 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3103 return dst;
3104 }
3105
3106 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3107
3108 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3109 {
3110 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3111 return dst;
3112 }
3113
3114 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3115
3116 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3117 {
3118 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3119 return dst;
3120 }
3121
3122 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3123
3124 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3125 {
3126 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3127 return dst;
3128 }
3129
3130 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3131
3132 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3133 {
3134 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3135 return dst;
3136 }
3137
3138 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3139 do_rdhstick_cmpr)
3140
3141 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3142 {
3143 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3144 return dst;
3145 }
3146
3147 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3148
3149 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3150 {
3151 #ifdef TARGET_SPARC64
3152 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3153
3154 gen_load_trap_state_at_tl(r_tsptr);
3155 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3156 return dst;
3157 #else
3158 qemu_build_not_reached();
3159 #endif
3160 }
3161
3162 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3163
3164 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3165 {
3166 #ifdef TARGET_SPARC64
3167 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3168
3169 gen_load_trap_state_at_tl(r_tsptr);
3170 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3171 return dst;
3172 #else
3173 qemu_build_not_reached();
3174 #endif
3175 }
3176
3177 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3178
3179 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3180 {
3181 #ifdef TARGET_SPARC64
3182 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3183
3184 gen_load_trap_state_at_tl(r_tsptr);
3185 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3186 return dst;
3187 #else
3188 qemu_build_not_reached();
3189 #endif
3190 }
3191
3192 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3193
3194 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3195 {
3196 #ifdef TARGET_SPARC64
3197 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3198
3199 gen_load_trap_state_at_tl(r_tsptr);
3200 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3201 return dst;
3202 #else
3203 qemu_build_not_reached();
3204 #endif
3205 }
3206
3207 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3208 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3209
3210 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3211 {
3212 return cpu_tbr;
3213 }
3214
3215 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3216 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3217
3218 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3219 {
3220 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3221 return dst;
3222 }
3223
3224 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3225
3226 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3227 {
3228 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3229 return dst;
3230 }
3231
3232 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3233
3234 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3235 {
3236 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3237 return dst;
3238 }
3239
3240 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3241
3242 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3243 {
3244 gen_helper_rdcwp(dst, tcg_env);
3245 return dst;
3246 }
3247
3248 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3249
3250 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3251 {
3252 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3253 return dst;
3254 }
3255
3256 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3257
3258 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3259 {
3260 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3261 return dst;
3262 }
3263
3264 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3265 do_rdcanrestore)
3266
3267 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3268 {
3269 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3270 return dst;
3271 }
3272
3273 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3274
3275 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3276 {
3277 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3278 return dst;
3279 }
3280
3281 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3282
3283 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3284 {
3285 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3286 return dst;
3287 }
3288
3289 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3290
3291 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3292 {
3293 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3294 return dst;
3295 }
3296
3297 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3298
3299 /* UA2005 strand status */
3300 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3301 {
3302 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3303 return dst;
3304 }
3305
3306 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3307
3308 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3309 {
3310 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3311 return dst;
3312 }
3313
3314 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3315
3316 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3317 {
3318 if (avail_64(dc)) {
3319 gen_helper_flushw(tcg_env);
3320 return advance_pc(dc);
3321 }
3322 return false;
3323 }
3324
3325 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3326 void (*func)(DisasContext *, TCGv))
3327 {
3328 TCGv src;
3329
3330 /* For simplicity, we under-decoded the rs2 form. */
3331 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3332 return false;
3333 }
3334 if (!priv) {
3335 return raise_priv(dc);
3336 }
3337
3338 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3339 src = tcg_constant_tl(a->rs2_or_imm);
3340 } else {
3341 TCGv src1 = gen_load_gpr(dc, a->rs1);
3342 if (a->rs2_or_imm == 0) {
3343 src = src1;
3344 } else {
3345 src = tcg_temp_new();
3346 if (a->imm) {
3347 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3348 } else {
3349 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3350 }
3351 }
3352 }
3353 func(dc, src);
3354 return advance_pc(dc);
3355 }
3356
3357 static void do_wry(DisasContext *dc, TCGv src)
3358 {
3359 tcg_gen_ext32u_tl(cpu_y, src);
3360 }
3361
3362 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3363
3364 static void do_wrccr(DisasContext *dc, TCGv src)
3365 {
3366 gen_helper_wrccr(tcg_env, src);
3367 }
3368
3369 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3370
3371 static void do_wrasi(DisasContext *dc, TCGv src)
3372 {
3373 TCGv tmp = tcg_temp_new();
3374
3375 tcg_gen_ext8u_tl(tmp, src);
3376 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3377 /* End TB to notice changed ASI. */
3378 dc->base.is_jmp = DISAS_EXIT;
3379 }
3380
3381 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3382
3383 static void do_wrfprs(DisasContext *dc, TCGv src)
3384 {
3385 #ifdef TARGET_SPARC64
3386 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3387 dc->fprs_dirty = 0;
3388 dc->base.is_jmp = DISAS_EXIT;
3389 #else
3390 qemu_build_not_reached();
3391 #endif
3392 }
3393
3394 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3395
3396 static void do_wrgsr(DisasContext *dc, TCGv src)
3397 {
3398 gen_trap_ifnofpu(dc);
3399 tcg_gen_mov_tl(cpu_gsr, src);
3400 }
3401
3402 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3403
3404 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3405 {
3406 gen_helper_set_softint(tcg_env, src);
3407 }
3408
3409 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3410
3411 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3412 {
3413 gen_helper_clear_softint(tcg_env, src);
3414 }
3415
3416 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3417
3418 static void do_wrsoftint(DisasContext *dc, TCGv src)
3419 {
3420 gen_helper_write_softint(tcg_env, src);
3421 }
3422
3423 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3424
3425 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3426 {
3427 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3428
3429 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3430 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3431 translator_io_start(&dc->base);
3432 gen_helper_tick_set_limit(r_tickptr, src);
3433 /* End TB to handle timer interrupt */
3434 dc->base.is_jmp = DISAS_EXIT;
3435 }
3436
3437 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3438
3439 static void do_wrstick(DisasContext *dc, TCGv src)
3440 {
3441 #ifdef TARGET_SPARC64
3442 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3443
3444 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3445 translator_io_start(&dc->base);
3446 gen_helper_tick_set_count(r_tickptr, src);
3447 /* End TB to handle timer interrupt */
3448 dc->base.is_jmp = DISAS_EXIT;
3449 #else
3450 qemu_build_not_reached();
3451 #endif
3452 }
3453
3454 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3455
3456 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3457 {
3458 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3459
3460 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3461 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3462 translator_io_start(&dc->base);
3463 gen_helper_tick_set_limit(r_tickptr, src);
3464 /* End TB to handle timer interrupt */
3465 dc->base.is_jmp = DISAS_EXIT;
3466 }
3467
3468 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3469
3470 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3471 {
3472 save_state(dc);
3473 gen_helper_power_down(tcg_env);
3474 }
3475
3476 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3477
3478 static void do_wrpsr(DisasContext *dc, TCGv src)
3479 {
3480 gen_helper_wrpsr(tcg_env, src);
3481 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3482 dc->cc_op = CC_OP_FLAGS;
3483 dc->base.is_jmp = DISAS_EXIT;
3484 }
3485
3486 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3487
3488 static void do_wrwim(DisasContext *dc, TCGv src)
3489 {
3490 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3491 TCGv tmp = tcg_temp_new();
3492
3493 tcg_gen_andi_tl(tmp, src, mask);
3494 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3495 }
3496
3497 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3498
3499 static void do_wrtpc(DisasContext *dc, TCGv src)
3500 {
3501 #ifdef TARGET_SPARC64
3502 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3503
3504 gen_load_trap_state_at_tl(r_tsptr);
3505 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3506 #else
3507 qemu_build_not_reached();
3508 #endif
3509 }
3510
3511 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3512
3513 static void do_wrtnpc(DisasContext *dc, TCGv src)
3514 {
3515 #ifdef TARGET_SPARC64
3516 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3517
3518 gen_load_trap_state_at_tl(r_tsptr);
3519 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3520 #else
3521 qemu_build_not_reached();
3522 #endif
3523 }
3524
3525 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3526
3527 static void do_wrtstate(DisasContext *dc, TCGv src)
3528 {
3529 #ifdef TARGET_SPARC64
3530 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3531
3532 gen_load_trap_state_at_tl(r_tsptr);
3533 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3534 #else
3535 qemu_build_not_reached();
3536 #endif
3537 }
3538
3539 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3540
3541 static void do_wrtt(DisasContext *dc, TCGv src)
3542 {
3543 #ifdef TARGET_SPARC64
3544 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3545
3546 gen_load_trap_state_at_tl(r_tsptr);
3547 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3548 #else
3549 qemu_build_not_reached();
3550 #endif
3551 }
3552
3553 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3554
3555 static void do_wrtick(DisasContext *dc, TCGv src)
3556 {
3557 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3558
3559 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3560 translator_io_start(&dc->base);
3561 gen_helper_tick_set_count(r_tickptr, src);
3562 /* End TB to handle timer interrupt */
3563 dc->base.is_jmp = DISAS_EXIT;
3564 }
3565
3566 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3567
3568 static void do_wrtba(DisasContext *dc, TCGv src)
3569 {
3570 tcg_gen_mov_tl(cpu_tbr, src);
3571 }
3572
3573 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3574
3575 static void do_wrpstate(DisasContext *dc, TCGv src)
3576 {
3577 save_state(dc);
3578 if (translator_io_start(&dc->base)) {
3579 dc->base.is_jmp = DISAS_EXIT;
3580 }
3581 gen_helper_wrpstate(tcg_env, src);
3582 dc->npc = DYNAMIC_PC;
3583 }
3584
3585 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3586
3587 static void do_wrtl(DisasContext *dc, TCGv src)
3588 {
3589 save_state(dc);
3590 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3591 dc->npc = DYNAMIC_PC;
3592 }
3593
3594 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3595
3596 static void do_wrpil(DisasContext *dc, TCGv src)
3597 {
3598 if (translator_io_start(&dc->base)) {
3599 dc->base.is_jmp = DISAS_EXIT;
3600 }
3601 gen_helper_wrpil(tcg_env, src);
3602 }
3603
3604 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3605
3606 static void do_wrcwp(DisasContext *dc, TCGv src)
3607 {
3608 gen_helper_wrcwp(tcg_env, src);
3609 }
3610
3611 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3612
3613 static void do_wrcansave(DisasContext *dc, TCGv src)
3614 {
3615 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3616 }
3617
3618 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3619
3620 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3621 {
3622 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3623 }
3624
3625 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3626
3627 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3628 {
3629 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3630 }
3631
3632 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3633
3634 static void do_wrotherwin(DisasContext *dc, TCGv src)
3635 {
3636 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3637 }
3638
3639 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3640
3641 static void do_wrwstate(DisasContext *dc, TCGv src)
3642 {
3643 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3644 }
3645
3646 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3647
3648 static void do_wrgl(DisasContext *dc, TCGv src)
3649 {
3650 gen_helper_wrgl(tcg_env, src);
3651 }
3652
3653 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3654
3655 /* UA2005 strand status */
3656 static void do_wrssr(DisasContext *dc, TCGv src)
3657 {
3658 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3659 }
3660
3661 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3662
3663 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3664
3665 static void do_wrhpstate(DisasContext *dc, TCGv src)
3666 {
3667 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3668 dc->base.is_jmp = DISAS_EXIT;
3669 }
3670
3671 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3672
3673 static void do_wrhtstate(DisasContext *dc, TCGv src)
3674 {
3675 TCGv_i32 tl = tcg_temp_new_i32();
3676 TCGv_ptr tp = tcg_temp_new_ptr();
3677
3678 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3679 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3680 tcg_gen_shli_i32(tl, tl, 3);
3681 tcg_gen_ext_i32_ptr(tp, tl);
3682 tcg_gen_add_ptr(tp, tp, tcg_env);
3683
3684 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3685 }
3686
3687 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3688
3689 static void do_wrhintp(DisasContext *dc, TCGv src)
3690 {
3691 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3692 }
3693
3694 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3695
3696 static void do_wrhtba(DisasContext *dc, TCGv src)
3697 {
3698 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3699 }
3700
3701 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3702
3703 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3704 {
3705 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3706
3707 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3708 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3709 translator_io_start(&dc->base);
3710 gen_helper_tick_set_limit(r_tickptr, src);
3711 /* End TB to handle timer interrupt */
3712 dc->base.is_jmp = DISAS_EXIT;
3713 }
3714
3715 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3716 do_wrhstick_cmpr)
3717
3718 static bool do_saved_restored(DisasContext *dc, bool saved)
3719 {
3720 if (!supervisor(dc)) {
3721 return raise_priv(dc);
3722 }
3723 if (saved) {
3724 gen_helper_saved(tcg_env);
3725 } else {
3726 gen_helper_restored(tcg_env);
3727 }
3728 return advance_pc(dc);
3729 }
3730
3731 TRANS(SAVED, 64, do_saved_restored, true)
3732 TRANS(RESTORED, 64, do_saved_restored, false)
3733
3734 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3735 {
3736 return advance_pc(dc);
3737 }
3738
3739 /*
3740 * TODO: Need a feature bit for sparcv8.
3741 * In the meantime, treat all 32-bit cpus like sparcv7.
3742 */
3743 TRANS(NOP_v7, 32, trans_NOP, a)
3744 TRANS(NOP_v9, 64, trans_NOP, a)
3745
3746 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
3747 void (*func)(TCGv, TCGv, TCGv),
3748 void (*funci)(TCGv, TCGv, target_long))
3749 {
3750 TCGv dst, src1;
3751
3752 /* For simplicity, we under-decoded the rs2 form. */
3753 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3754 return false;
3755 }
3756
3757 if (a->cc) {
3758 dst = cpu_cc_dst;
3759 } else {
3760 dst = gen_dest_gpr(dc, a->rd);
3761 }
3762 src1 = gen_load_gpr(dc, a->rs1);
3763
3764 if (a->imm || a->rs2_or_imm == 0) {
3765 if (funci) {
3766 funci(dst, src1, a->rs2_or_imm);
3767 } else {
3768 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3769 }
3770 } else {
3771 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3772 }
3773 gen_store_gpr(dc, a->rd, dst);
3774
3775 if (a->cc) {
3776 tcg_gen_movi_i32(cpu_cc_op, cc_op);
3777 dc->cc_op = cc_op;
3778 }
3779 return advance_pc(dc);
3780 }
3781
3782 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
3783 void (*func)(TCGv, TCGv, TCGv),
3784 void (*funci)(TCGv, TCGv, target_long),
3785 void (*func_cc)(TCGv, TCGv, TCGv))
3786 {
3787 if (a->cc) {
3788 assert(cc_op >= 0);
3789 return do_arith_int(dc, a, cc_op, func_cc, NULL);
3790 }
3791 return do_arith_int(dc, a, cc_op, func, funci);
3792 }
3793
3794 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3795 void (*func)(TCGv, TCGv, TCGv),
3796 void (*funci)(TCGv, TCGv, target_long))
3797 {
3798 return do_arith_int(dc, a, CC_OP_LOGIC, func, funci);
3799 }
3800
3801 TRANS(ADD, ALL, do_arith, a, CC_OP_ADD,
3802 tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc)
3803 TRANS(SUB, ALL, do_arith, a, CC_OP_SUB,
3804 tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc)
3805
3806 TRANS(TADDcc, ALL, do_arith, a, CC_OP_TADD, NULL, NULL, gen_op_add_cc)
3807 TRANS(TSUBcc, ALL, do_arith, a, CC_OP_TSUB, NULL, NULL, gen_op_sub_cc)
3808 TRANS(TADDccTV, ALL, do_arith, a, CC_OP_TADDTV, NULL, NULL, gen_op_taddcctv)
3809 TRANS(TSUBccTV, ALL, do_arith, a, CC_OP_TSUBTV, NULL, NULL, gen_op_tsubcctv)
3810
3811 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3812 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3813 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3814 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3815 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3816
3817 TRANS(MULX, 64, do_arith, a, -1, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3818 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3819 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3820
3821 TRANS(UDIVX, 64, do_arith, a, -1, gen_op_udivx, NULL, NULL)
3822 TRANS(SDIVX, 64, do_arith, a, -1, gen_op_sdivx, NULL, NULL)
3823 TRANS(UDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_udiv, NULL, gen_op_udivcc)
3824 TRANS(SDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_sdiv, NULL, gen_op_sdivcc)
3825
3826 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3827 TRANS(POPC, 64, do_arith, a, -1, gen_op_popc, NULL, NULL)
3828
3829 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3830 {
3831 /* OR with %g0 is the canonical alias for MOV. */
3832 if (!a->cc && a->rs1 == 0) {
3833 if (a->imm || a->rs2_or_imm == 0) {
3834 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3835 } else if (a->rs2_or_imm & ~0x1f) {
3836 /* For simplicity, we under-decoded the rs2 form. */
3837 return false;
3838 } else {
3839 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3840 }
3841 return advance_pc(dc);
3842 }
3843 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3844 }
3845
3846 static bool trans_ADDC(DisasContext *dc, arg_r_r_ri_cc *a)
3847 {
3848 switch (dc->cc_op) {
3849 case CC_OP_DIV:
3850 case CC_OP_LOGIC:
3851 /* Carry is known to be zero. Fall back to plain ADD. */
3852 return do_arith(dc, a, CC_OP_ADD,
3853 tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc);
3854 case CC_OP_ADD:
3855 case CC_OP_TADD:
3856 case CC_OP_TADDTV:
3857 return do_arith(dc, a, CC_OP_ADDX,
3858 gen_op_addc_add, NULL, gen_op_addccc_add);
3859 case CC_OP_SUB:
3860 case CC_OP_TSUB:
3861 case CC_OP_TSUBTV:
3862 return do_arith(dc, a, CC_OP_ADDX,
3863 gen_op_addc_sub, NULL, gen_op_addccc_sub);
3864 default:
3865 return do_arith(dc, a, CC_OP_ADDX,
3866 gen_op_addc_generic, NULL, gen_op_addccc_generic);
3867 }
3868 }
3869
3870 static bool trans_SUBC(DisasContext *dc, arg_r_r_ri_cc *a)
3871 {
3872 switch (dc->cc_op) {
3873 case CC_OP_DIV:
3874 case CC_OP_LOGIC:
3875 /* Carry is known to be zero. Fall back to plain SUB. */
3876 return do_arith(dc, a, CC_OP_SUB,
3877 tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc);
3878 case CC_OP_ADD:
3879 case CC_OP_TADD:
3880 case CC_OP_TADDTV:
3881 return do_arith(dc, a, CC_OP_SUBX,
3882 gen_op_subc_add, NULL, gen_op_subccc_add);
3883 case CC_OP_SUB:
3884 case CC_OP_TSUB:
3885 case CC_OP_TSUBTV:
3886 return do_arith(dc, a, CC_OP_SUBX,
3887 gen_op_subc_sub, NULL, gen_op_subccc_sub);
3888 default:
3889 return do_arith(dc, a, CC_OP_SUBX,
3890 gen_op_subc_generic, NULL, gen_op_subccc_generic);
3891 }
3892 }
3893
3894 static bool trans_MULScc(DisasContext *dc, arg_r_r_ri_cc *a)
3895 {
3896 update_psr(dc);
3897 return do_arith(dc, a, CC_OP_ADD, NULL, NULL, gen_op_mulscc);
3898 }
3899
3900 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3901 int width, bool cc, bool left)
3902 {
3903 TCGv dst, s1, s2, lo1, lo2;
3904 uint64_t amask, tabl, tabr;
3905 int shift, imask, omask;
3906
3907 dst = gen_dest_gpr(dc, a->rd);
3908 s1 = gen_load_gpr(dc, a->rs1);
3909 s2 = gen_load_gpr(dc, a->rs2);
3910
3911 if (cc) {
3912 tcg_gen_mov_tl(cpu_cc_src, s1);
3913 tcg_gen_mov_tl(cpu_cc_src2, s2);
3914 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
3915 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3916 dc->cc_op = CC_OP_SUB;
3917 }
3918
3919 /*
3920 * Theory of operation: there are two tables, left and right (not to
3921 * be confused with the left and right versions of the opcode). These
3922 * are indexed by the low 3 bits of the inputs. To make things "easy",
3923 * these tables are loaded into two constants, TABL and TABR below.
3924 * The operation index = (input & imask) << shift calculates the index
3925 * into the constant, while val = (table >> index) & omask calculates
3926 * the value we're looking for.
3927 */
3928 switch (width) {
3929 case 8:
3930 imask = 0x7;
3931 shift = 3;
3932 omask = 0xff;
3933 if (left) {
3934 tabl = 0x80c0e0f0f8fcfeffULL;
3935 tabr = 0xff7f3f1f0f070301ULL;
3936 } else {
3937 tabl = 0x0103070f1f3f7fffULL;
3938 tabr = 0xfffefcf8f0e0c080ULL;
3939 }
3940 break;
3941 case 16:
3942 imask = 0x6;
3943 shift = 1;
3944 omask = 0xf;
3945 if (left) {
3946 tabl = 0x8cef;
3947 tabr = 0xf731;
3948 } else {
3949 tabl = 0x137f;
3950 tabr = 0xfec8;
3951 }
3952 break;
3953 case 32:
3954 imask = 0x4;
3955 shift = 0;
3956 omask = 0x3;
3957 if (left) {
3958 tabl = (2 << 2) | 3;
3959 tabr = (3 << 2) | 1;
3960 } else {
3961 tabl = (1 << 2) | 3;
3962 tabr = (3 << 2) | 2;
3963 }
3964 break;
3965 default:
3966 abort();
3967 }
3968
3969 lo1 = tcg_temp_new();
3970 lo2 = tcg_temp_new();
3971 tcg_gen_andi_tl(lo1, s1, imask);
3972 tcg_gen_andi_tl(lo2, s2, imask);
3973 tcg_gen_shli_tl(lo1, lo1, shift);
3974 tcg_gen_shli_tl(lo2, lo2, shift);
3975
3976 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3977 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3978 tcg_gen_andi_tl(lo1, lo1, omask);
3979 tcg_gen_andi_tl(lo2, lo2, omask);
3980
3981 amask = address_mask_i(dc, -8);
3982 tcg_gen_andi_tl(s1, s1, amask);
3983 tcg_gen_andi_tl(s2, s2, amask);
3984
3985 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3986 tcg_gen_and_tl(lo2, lo2, lo1);
3987 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3988
3989 gen_store_gpr(dc, a->rd, dst);
3990 return advance_pc(dc);
3991 }
3992
3993 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3994 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3995 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3996 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3997 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3998 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3999
4000 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
4001 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
4002 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
4003 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
4004 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
4005 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
4006
4007 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
4008 void (*func)(TCGv, TCGv, TCGv))
4009 {
4010 TCGv dst = gen_dest_gpr(dc, a->rd);
4011 TCGv src1 = gen_load_gpr(dc, a->rs1);
4012 TCGv src2 = gen_load_gpr(dc, a->rs2);
4013
4014 func(dst, src1, src2);
4015 gen_store_gpr(dc, a->rd, dst);
4016 return advance_pc(dc);
4017 }
4018
4019 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
4020 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
4021 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
4022
4023 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
4024 {
4025 #ifdef TARGET_SPARC64
4026 TCGv tmp = tcg_temp_new();
4027
4028 tcg_gen_add_tl(tmp, s1, s2);
4029 tcg_gen_andi_tl(dst, tmp, -8);
4030 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4031 #else
4032 g_assert_not_reached();
4033 #endif
4034 }
4035
4036 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4037 {
4038 #ifdef TARGET_SPARC64
4039 TCGv tmp = tcg_temp_new();
4040
4041 tcg_gen_add_tl(tmp, s1, s2);
4042 tcg_gen_andi_tl(dst, tmp, -8);
4043 tcg_gen_neg_tl(tmp, tmp);
4044 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4045 #else
4046 g_assert_not_reached();
4047 #endif
4048 }
4049
4050 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4051 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4052
4053 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4054 {
4055 #ifdef TARGET_SPARC64
4056 tcg_gen_add_tl(dst, s1, s2);
4057 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4058 #else
4059 g_assert_not_reached();
4060 #endif
4061 }
4062
4063 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4064
4065 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4066 {
4067 TCGv dst, src1, src2;
4068
4069 /* Reject 64-bit shifts for sparc32. */
4070 if (avail_32(dc) && a->x) {
4071 return false;
4072 }
4073
4074 src2 = tcg_temp_new();
4075 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4076 src1 = gen_load_gpr(dc, a->rs1);
4077 dst = gen_dest_gpr(dc, a->rd);
4078
4079 if (l) {
4080 tcg_gen_shl_tl(dst, src1, src2);
4081 if (!a->x) {
4082 tcg_gen_ext32u_tl(dst, dst);
4083 }
4084 } else if (u) {
4085 if (!a->x) {
4086 tcg_gen_ext32u_tl(dst, src1);
4087 src1 = dst;
4088 }
4089 tcg_gen_shr_tl(dst, src1, src2);
4090 } else {
4091 if (!a->x) {
4092 tcg_gen_ext32s_tl(dst, src1);
4093 src1 = dst;
4094 }
4095 tcg_gen_sar_tl(dst, src1, src2);
4096 }
4097 gen_store_gpr(dc, a->rd, dst);
4098 return advance_pc(dc);
4099 }
4100
4101 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4102 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4103 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4104
4105 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4106 {
4107 TCGv dst, src1;
4108
4109 /* Reject 64-bit shifts for sparc32. */
4110 if (avail_32(dc) && (a->x || a->i >= 32)) {
4111 return false;
4112 }
4113
4114 src1 = gen_load_gpr(dc, a->rs1);
4115 dst = gen_dest_gpr(dc, a->rd);
4116
4117 if (avail_32(dc) || a->x) {
4118 if (l) {
4119 tcg_gen_shli_tl(dst, src1, a->i);
4120 } else if (u) {
4121 tcg_gen_shri_tl(dst, src1, a->i);
4122 } else {
4123 tcg_gen_sari_tl(dst, src1, a->i);
4124 }
4125 } else {
4126 if (l) {
4127 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4128 } else if (u) {
4129 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4130 } else {
4131 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4132 }
4133 }
4134 gen_store_gpr(dc, a->rd, dst);
4135 return advance_pc(dc);
4136 }
4137
4138 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4139 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4140 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4141
4142 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4143 {
4144 /* For simplicity, we under-decoded the rs2 form. */
4145 if (!imm && rs2_or_imm & ~0x1f) {
4146 return NULL;
4147 }
4148 if (imm || rs2_or_imm == 0) {
4149 return tcg_constant_tl(rs2_or_imm);
4150 } else {
4151 return cpu_regs[rs2_or_imm];
4152 }
4153 }
4154
4155 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4156 {
4157 TCGv dst = gen_load_gpr(dc, rd);
4158
4159 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, cmp->c2, src2, dst);
4160 gen_store_gpr(dc, rd, dst);
4161 return advance_pc(dc);
4162 }
4163
4164 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4165 {
4166 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4167 DisasCompare cmp;
4168
4169 if (src2 == NULL) {
4170 return false;
4171 }
4172 gen_compare(&cmp, a->cc, a->cond, dc);
4173 return do_mov_cond(dc, &cmp, a->rd, src2);
4174 }
4175
4176 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4177 {
4178 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4179 DisasCompare cmp;
4180
4181 if (src2 == NULL) {
4182 return false;
4183 }
4184 gen_fcompare(&cmp, a->cc, a->cond);
4185 return do_mov_cond(dc, &cmp, a->rd, src2);
4186 }
4187
4188 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4189 {
4190 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4191 DisasCompare cmp;
4192
4193 if (src2 == NULL) {
4194 return false;
4195 }
4196 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
4197 return do_mov_cond(dc, &cmp, a->rd, src2);
4198 }
4199
4200 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4201 bool (*func)(DisasContext *dc, int rd, TCGv src))
4202 {
4203 TCGv src1, sum;
4204
4205 /* For simplicity, we under-decoded the rs2 form. */
4206 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4207 return false;
4208 }
4209
4210 /*
4211 * Always load the sum into a new temporary.
4212 * This is required to capture the value across a window change,
4213 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4214 */
4215 sum = tcg_temp_new();
4216 src1 = gen_load_gpr(dc, a->rs1);
4217 if (a->imm || a->rs2_or_imm == 0) {
4218 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4219 } else {
4220 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4221 }
4222 return func(dc, a->rd, sum);
4223 }
4224
4225 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4226 {
4227 /*
4228 * Preserve pc across advance, so that we can delay
4229 * the writeback to rd until after src is consumed.
4230 */
4231 target_ulong cur_pc = dc->pc;
4232
4233 gen_check_align(dc, src, 3);
4234
4235 gen_mov_pc_npc(dc);
4236 tcg_gen_mov_tl(cpu_npc, src);
4237 gen_address_mask(dc, cpu_npc);
4238 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4239
4240 dc->npc = DYNAMIC_PC_LOOKUP;
4241 return true;
4242 }
4243
4244 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4245
4246 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4247 {
4248 if (!supervisor(dc)) {
4249 return raise_priv(dc);
4250 }
4251
4252 gen_check_align(dc, src, 3);
4253
4254 gen_mov_pc_npc(dc);
4255 tcg_gen_mov_tl(cpu_npc, src);
4256 gen_helper_rett(tcg_env);
4257
4258 dc->npc = DYNAMIC_PC;
4259 return true;
4260 }
4261
4262 TRANS(RETT, 32, do_add_special, a, do_rett)
4263
4264 static bool do_return(DisasContext *dc, int rd, TCGv src)
4265 {
4266 gen_check_align(dc, src, 3);
4267
4268 gen_mov_pc_npc(dc);
4269 tcg_gen_mov_tl(cpu_npc, src);
4270 gen_address_mask(dc, cpu_npc);
4271
4272 gen_helper_restore(tcg_env);
4273 dc->npc = DYNAMIC_PC_LOOKUP;
4274 return true;
4275 }
4276
4277 TRANS(RETURN, 64, do_add_special, a, do_return)
4278
4279 static bool do_save(DisasContext *dc, int rd, TCGv src)
4280 {
4281 gen_helper_save(tcg_env);
4282 gen_store_gpr(dc, rd, src);
4283 return advance_pc(dc);
4284 }
4285
4286 TRANS(SAVE, ALL, do_add_special, a, do_save)
4287
4288 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4289 {
4290 gen_helper_restore(tcg_env);
4291 gen_store_gpr(dc, rd, src);
4292 return advance_pc(dc);
4293 }
4294
4295 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4296
4297 static bool do_done_retry(DisasContext *dc, bool done)
4298 {
4299 if (!supervisor(dc)) {
4300 return raise_priv(dc);
4301 }
4302 dc->npc = DYNAMIC_PC;
4303 dc->pc = DYNAMIC_PC;
4304 translator_io_start(&dc->base);
4305 if (done) {
4306 gen_helper_done(tcg_env);
4307 } else {
4308 gen_helper_retry(tcg_env);
4309 }
4310 return true;
4311 }
4312
4313 TRANS(DONE, 64, do_done_retry, true)
4314 TRANS(RETRY, 64, do_done_retry, false)
4315
4316 /*
4317 * Major opcode 11 -- load and store instructions
4318 */
4319
4320 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4321 {
4322 TCGv addr, tmp = NULL;
4323
4324 /* For simplicity, we under-decoded the rs2 form. */
4325 if (!imm && rs2_or_imm & ~0x1f) {
4326 return NULL;
4327 }
4328
4329 addr = gen_load_gpr(dc, rs1);
4330 if (rs2_or_imm) {
4331 tmp = tcg_temp_new();
4332 if (imm) {
4333 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4334 } else {
4335 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4336 }
4337 addr = tmp;
4338 }
4339 if (AM_CHECK(dc)) {
4340 if (!tmp) {
4341 tmp = tcg_temp_new();
4342 }
4343 tcg_gen_ext32u_tl(tmp, addr);
4344 addr = tmp;
4345 }
4346 return addr;
4347 }
4348
4349 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4350 {
4351 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4352 DisasASI da;
4353
4354 if (addr == NULL) {
4355 return false;
4356 }
4357 da = resolve_asi(dc, a->asi, mop);
4358
4359 reg = gen_dest_gpr(dc, a->rd);
4360 gen_ld_asi(dc, &da, reg, addr);
4361 gen_store_gpr(dc, a->rd, reg);
4362 return advance_pc(dc);
4363 }
4364
4365 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4366 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4367 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4368 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4369 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4370 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4371 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4372
4373 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4374 {
4375 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4376 DisasASI da;
4377
4378 if (addr == NULL) {
4379 return false;
4380 }
4381 da = resolve_asi(dc, a->asi, mop);
4382
4383 reg = gen_load_gpr(dc, a->rd);
4384 gen_st_asi(dc, &da, reg, addr);
4385 return advance_pc(dc);
4386 }
4387
4388 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4389 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4390 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4391 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4392
4393 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4394 {
4395 TCGv addr;
4396 DisasASI da;
4397
4398 if (a->rd & 1) {
4399 return false;
4400 }
4401 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4402 if (addr == NULL) {
4403 return false;
4404 }
4405 da = resolve_asi(dc, a->asi, MO_TEUQ);
4406 gen_ldda_asi(dc, &da, addr, a->rd);
4407 return advance_pc(dc);
4408 }
4409
4410 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4411 {
4412 TCGv addr;
4413 DisasASI da;
4414
4415 if (a->rd & 1) {
4416 return false;
4417 }
4418 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4419 if (addr == NULL) {
4420 return false;
4421 }
4422 da = resolve_asi(dc, a->asi, MO_TEUQ);
4423 gen_stda_asi(dc, &da, addr, a->rd);
4424 return advance_pc(dc);
4425 }
4426
4427 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4428 {
4429 TCGv addr, reg;
4430 DisasASI da;
4431
4432 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4433 if (addr == NULL) {
4434 return false;
4435 }
4436 da = resolve_asi(dc, a->asi, MO_UB);
4437
4438 reg = gen_dest_gpr(dc, a->rd);
4439 gen_ldstub_asi(dc, &da, reg, addr);
4440 gen_store_gpr(dc, a->rd, reg);
4441 return advance_pc(dc);
4442 }
4443
4444 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4445 {
4446 TCGv addr, dst, src;
4447 DisasASI da;
4448
4449 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4450 if (addr == NULL) {
4451 return false;
4452 }
4453 da = resolve_asi(dc, a->asi, MO_TEUL);
4454
4455 dst = gen_dest_gpr(dc, a->rd);
4456 src = gen_load_gpr(dc, a->rd);
4457 gen_swap_asi(dc, &da, dst, src, addr);
4458 gen_store_gpr(dc, a->rd, dst);
4459 return advance_pc(dc);
4460 }
4461
4462 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4463 {
4464 TCGv addr, o, n, c;
4465 DisasASI da;
4466
4467 addr = gen_ldst_addr(dc, a->rs1, true, 0);
4468 if (addr == NULL) {
4469 return false;
4470 }
4471 da = resolve_asi(dc, a->asi, mop);
4472
4473 o = gen_dest_gpr(dc, a->rd);
4474 n = gen_load_gpr(dc, a->rd);
4475 c = gen_load_gpr(dc, a->rs2_or_imm);
4476 gen_cas_asi(dc, &da, o, n, c, addr);
4477 gen_store_gpr(dc, a->rd, o);
4478 return advance_pc(dc);
4479 }
4480
4481 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4482 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4483
4484 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4485 {
4486 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4487 DisasASI da;
4488
4489 if (addr == NULL) {
4490 return false;
4491 }
4492 if (gen_trap_ifnofpu(dc)) {
4493 return true;
4494 }
4495 if (sz == MO_128 && gen_trap_float128(dc)) {
4496 return true;
4497 }
4498 da = resolve_asi(dc, a->asi, MO_TE | sz);
4499 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4500 gen_update_fprs_dirty(dc, a->rd);
4501 return advance_pc(dc);
4502 }
4503
4504 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4505 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4506 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4507
4508 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4509 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4510 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4511
4512 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4513 {
4514 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4515 DisasASI da;
4516
4517 if (addr == NULL) {
4518 return false;
4519 }
4520 if (gen_trap_ifnofpu(dc)) {
4521 return true;
4522 }
4523 if (sz == MO_128 && gen_trap_float128(dc)) {
4524 return true;
4525 }
4526 da = resolve_asi(dc, a->asi, MO_TE | sz);
4527 gen_stf_asi(dc, &da, sz, addr, a->rd);
4528 return advance_pc(dc);
4529 }
4530
4531 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4532 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4533 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4534
4535 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4536 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4537 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4538
4539 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4540 {
4541 if (!avail_32(dc)) {
4542 return false;
4543 }
4544 if (!supervisor(dc)) {
4545 return raise_priv(dc);
4546 }
4547 if (gen_trap_ifnofpu(dc)) {
4548 return true;
4549 }
4550 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4551 return true;
4552 }
4553
4554 static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
4555 target_ulong new_mask, target_ulong old_mask)
4556 {
4557 TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4558 if (addr == NULL) {
4559 return false;
4560 }
4561 if (gen_trap_ifnofpu(dc)) {
4562 return true;
4563 }
4564 tmp = tcg_temp_new();
4565 tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN);
4566 tcg_gen_andi_tl(tmp, tmp, new_mask);
4567 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask);
4568 tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp);
4569 gen_helper_set_fsr(tcg_env, cpu_fsr);
4570 return advance_pc(dc);
4571 }
4572
4573 TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
4574 TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
4575
4576 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4577 {
4578 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4579 if (addr == NULL) {
4580 return false;
4581 }
4582 if (gen_trap_ifnofpu(dc)) {
4583 return true;
4584 }
4585 tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4586 return advance_pc(dc);
4587 }
4588
4589 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4590 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4591
4592 static bool do_ff(DisasContext *dc, arg_r_r *a,
4593 void (*func)(TCGv_i32, TCGv_i32))
4594 {
4595 TCGv_i32 tmp;
4596
4597 if (gen_trap_ifnofpu(dc)) {
4598 return true;
4599 }
4600
4601 tmp = gen_load_fpr_F(dc, a->rs);
4602 func(tmp, tmp);
4603 gen_store_fpr_F(dc, a->rd, tmp);
4604 return advance_pc(dc);
4605 }
4606
4607 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4608 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4609 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4610 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4611 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4612
4613 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4614 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4615 {
4616 TCGv_i32 tmp;
4617
4618 if (gen_trap_ifnofpu(dc)) {
4619 return true;
4620 }
4621
4622 gen_op_clear_ieee_excp_and_FTT();
4623 tmp = gen_load_fpr_F(dc, a->rs);
4624 func(tmp, tcg_env, tmp);
4625 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4626 gen_store_fpr_F(dc, a->rd, tmp);
4627 return advance_pc(dc);
4628 }
4629
4630 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4631 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4632 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4633
4634 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4635 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4636 {
4637 TCGv_i32 dst;
4638 TCGv_i64 src;
4639
4640 if (gen_trap_ifnofpu(dc)) {
4641 return true;
4642 }
4643
4644 gen_op_clear_ieee_excp_and_FTT();
4645 dst = gen_dest_fpr_F(dc);
4646 src = gen_load_fpr_D(dc, a->rs);
4647 func(dst, tcg_env, src);
4648 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4649 gen_store_fpr_F(dc, a->rd, dst);
4650 return advance_pc(dc);
4651 }
4652
4653 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4654 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4655 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4656
4657 static bool do_dd(DisasContext *dc, arg_r_r *a,
4658 void (*func)(TCGv_i64, TCGv_i64))
4659 {
4660 TCGv_i64 dst, src;
4661
4662 if (gen_trap_ifnofpu(dc)) {
4663 return true;
4664 }
4665
4666 dst = gen_dest_fpr_D(dc, a->rd);
4667 src = gen_load_fpr_D(dc, a->rs);
4668 func(dst, src);
4669 gen_store_fpr_D(dc, a->rd, dst);
4670 return advance_pc(dc);
4671 }
4672
4673 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4674 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4675 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4676 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4677 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4678
4679 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4680 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4681 {
4682 TCGv_i64 dst, src;
4683
4684 if (gen_trap_ifnofpu(dc)) {
4685 return true;
4686 }
4687
4688 gen_op_clear_ieee_excp_and_FTT();
4689 dst = gen_dest_fpr_D(dc, a->rd);
4690 src = gen_load_fpr_D(dc, a->rs);
4691 func(dst, tcg_env, src);
4692 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4693 gen_store_fpr_D(dc, a->rd, dst);
4694 return advance_pc(dc);
4695 }
4696
4697 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4698 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4699 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4700
4701 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4702 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4703 {
4704 TCGv_i64 dst;
4705 TCGv_i32 src;
4706
4707 if (gen_trap_ifnofpu(dc)) {
4708 return true;
4709 }
4710
4711 gen_op_clear_ieee_excp_and_FTT();
4712 dst = gen_dest_fpr_D(dc, a->rd);
4713 src = gen_load_fpr_F(dc, a->rs);
4714 func(dst, tcg_env, src);
4715 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4716 gen_store_fpr_D(dc, a->rd, dst);
4717 return advance_pc(dc);
4718 }
4719
4720 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4721 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4722 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4723
4724 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4725 void (*func)(TCGv_env))
4726 {
4727 if (gen_trap_ifnofpu(dc)) {
4728 return true;
4729 }
4730 if (gen_trap_float128(dc)) {
4731 return true;
4732 }
4733
4734 gen_op_clear_ieee_excp_and_FTT();
4735 gen_op_load_fpr_QT1(QFPREG(a->rs));
4736 func(tcg_env);
4737 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4738 gen_op_store_QT0_fpr(QFPREG(a->rd));
4739 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4740 return advance_pc(dc);
4741 }
4742
4743 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4744
4745 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4746 void (*func)(TCGv_i32, TCGv_env))
4747 {
4748 TCGv_i32 dst;
4749
4750 if (gen_trap_ifnofpu(dc)) {
4751 return true;
4752 }
4753 if (gen_trap_float128(dc)) {
4754 return true;
4755 }
4756
4757 gen_op_clear_ieee_excp_and_FTT();
4758 gen_op_load_fpr_QT1(QFPREG(a->rs));
4759 dst = gen_dest_fpr_F(dc);
4760 func(dst, tcg_env);
4761 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4762 gen_store_fpr_F(dc, a->rd, dst);
4763 return advance_pc(dc);
4764 }
4765
4766 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4767 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4768
4769 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4770 void (*func)(TCGv_i64, TCGv_env))
4771 {
4772 TCGv_i64 dst;
4773
4774 if (gen_trap_ifnofpu(dc)) {
4775 return true;
4776 }
4777 if (gen_trap_float128(dc)) {
4778 return true;
4779 }
4780
4781 gen_op_clear_ieee_excp_and_FTT();
4782 gen_op_load_fpr_QT1(QFPREG(a->rs));
4783 dst = gen_dest_fpr_D(dc, a->rd);
4784 func(dst, tcg_env);
4785 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4786 gen_store_fpr_D(dc, a->rd, dst);
4787 return advance_pc(dc);
4788 }
4789
4790 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4791 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4792
4793 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4794 void (*func)(TCGv_env, TCGv_i32))
4795 {
4796 TCGv_i32 src;
4797
4798 if (gen_trap_ifnofpu(dc)) {
4799 return true;
4800 }
4801 if (gen_trap_float128(dc)) {
4802 return true;
4803 }
4804
4805 gen_op_clear_ieee_excp_and_FTT();
4806 src = gen_load_fpr_F(dc, a->rs);
4807 func(tcg_env, src);
4808 gen_op_store_QT0_fpr(QFPREG(a->rd));
4809 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4810 return advance_pc(dc);
4811 }
4812
4813 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4814 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4815
4816 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4817 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4818 {
4819 TCGv_i32 src1, src2;
4820
4821 if (gen_trap_ifnofpu(dc)) {
4822 return true;
4823 }
4824
4825 src1 = gen_load_fpr_F(dc, a->rs1);
4826 src2 = gen_load_fpr_F(dc, a->rs2);
4827 func(src1, src1, src2);
4828 gen_store_fpr_F(dc, a->rd, src1);
4829 return advance_pc(dc);
4830 }
4831
4832 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4833 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4834 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4835 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4836 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4837 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4838 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4839 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4840 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4841 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4842 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4843 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4844
4845 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4846 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4847 {
4848 TCGv_i32 src1, src2;
4849
4850 if (gen_trap_ifnofpu(dc)) {
4851 return true;
4852 }
4853
4854 gen_op_clear_ieee_excp_and_FTT();
4855 src1 = gen_load_fpr_F(dc, a->rs1);
4856 src2 = gen_load_fpr_F(dc, a->rs2);
4857 func(src1, tcg_env, src1, src2);
4858 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4859 gen_store_fpr_F(dc, a->rd, src1);
4860 return advance_pc(dc);
4861 }
4862
4863 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4864 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4865 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4866 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4867
4868 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4869 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4870 {
4871 TCGv_i64 dst, src1, src2;
4872
4873 if (gen_trap_ifnofpu(dc)) {
4874 return true;
4875 }
4876
4877 dst = gen_dest_fpr_D(dc, a->rd);
4878 src1 = gen_load_fpr_D(dc, a->rs1);
4879 src2 = gen_load_fpr_D(dc, a->rs2);
4880 func(dst, src1, src2);
4881 gen_store_fpr_D(dc, a->rd, dst);
4882 return advance_pc(dc);
4883 }
4884
4885 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
4886 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
4887 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
4888 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4889 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4890 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
4891 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
4892 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
4893 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
4894
4895 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4896 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4897 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4898 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4899 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4900 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4901 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4902 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4903 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4904 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4905 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4906 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4907
4908 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4909 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4910 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4911
4912 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4913 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4914 {
4915 TCGv_i64 dst, src1, src2;
4916
4917 if (gen_trap_ifnofpu(dc)) {
4918 return true;
4919 }
4920
4921 gen_op_clear_ieee_excp_and_FTT();
4922 dst = gen_dest_fpr_D(dc, a->rd);
4923 src1 = gen_load_fpr_D(dc, a->rs1);
4924 src2 = gen_load_fpr_D(dc, a->rs2);
4925 func(dst, tcg_env, src1, src2);
4926 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4927 gen_store_fpr_D(dc, a->rd, dst);
4928 return advance_pc(dc);
4929 }
4930
4931 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4932 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4933 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4934 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4935
4936 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4937 {
4938 TCGv_i64 dst;
4939 TCGv_i32 src1, src2;
4940
4941 if (gen_trap_ifnofpu(dc)) {
4942 return true;
4943 }
4944 if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4945 return raise_unimpfpop(dc);
4946 }
4947
4948 gen_op_clear_ieee_excp_and_FTT();
4949 dst = gen_dest_fpr_D(dc, a->rd);
4950 src1 = gen_load_fpr_F(dc, a->rs1);
4951 src2 = gen_load_fpr_F(dc, a->rs2);
4952 gen_helper_fsmuld(dst, tcg_env, src1, src2);
4953 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4954 gen_store_fpr_D(dc, a->rd, dst);
4955 return advance_pc(dc);
4956 }
4957
4958 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4959 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4960 {
4961 TCGv_i64 dst, src0, src1, src2;
4962
4963 if (gen_trap_ifnofpu(dc)) {
4964 return true;
4965 }
4966
4967 dst = gen_dest_fpr_D(dc, a->rd);
4968 src0 = gen_load_fpr_D(dc, a->rd);
4969 src1 = gen_load_fpr_D(dc, a->rs1);
4970 src2 = gen_load_fpr_D(dc, a->rs2);
4971 func(dst, src0, src1, src2);
4972 gen_store_fpr_D(dc, a->rd, dst);
4973 return advance_pc(dc);
4974 }
4975
4976 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4977
4978 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4979 void (*func)(TCGv_env))
4980 {
4981 if (gen_trap_ifnofpu(dc)) {
4982 return true;
4983 }
4984 if (gen_trap_float128(dc)) {
4985 return true;
4986 }
4987
4988 gen_op_clear_ieee_excp_and_FTT();
4989 gen_op_load_fpr_QT0(QFPREG(a->rs1));
4990 gen_op_load_fpr_QT1(QFPREG(a->rs2));
4991 func(tcg_env);
4992 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4993 gen_op_store_QT0_fpr(QFPREG(a->rd));
4994 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4995 return advance_pc(dc);
4996 }
4997
4998 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
4999 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5000 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5001 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5002
5003 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5004 {
5005 TCGv_i64 src1, src2;
5006
5007 if (gen_trap_ifnofpu(dc)) {
5008 return true;
5009 }
5010 if (gen_trap_float128(dc)) {
5011 return true;
5012 }
5013
5014 gen_op_clear_ieee_excp_and_FTT();
5015 src1 = gen_load_fpr_D(dc, a->rs1);
5016 src2 = gen_load_fpr_D(dc, a->rs2);
5017 gen_helper_fdmulq(tcg_env, src1, src2);
5018 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
5019 gen_op_store_QT0_fpr(QFPREG(a->rd));
5020 gen_update_fprs_dirty(dc, QFPREG(a->rd));
5021 return advance_pc(dc);
5022 }
5023
5024 #define CHECK_IU_FEATURE(dc, FEATURE) \
5025 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
5026 goto illegal_insn;
5027 #define CHECK_FPU_FEATURE(dc, FEATURE) \
5028 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
5029 goto nfpu_insn;
5030
5031 /* before an instruction, dc->pc must be static */
5032 static void disas_sparc_legacy(DisasContext *dc, unsigned int insn)
5033 {
5034 unsigned int opc, rs1, rs2, rd;
5035 TCGv cpu_src1 __attribute__((unused));
5036 TCGv_i32 cpu_src1_32, cpu_src2_32;
5037 TCGv_i64 cpu_src1_64, cpu_src2_64;
5038 TCGv_i32 cpu_dst_32 __attribute__((unused));
5039 TCGv_i64 cpu_dst_64 __attribute__((unused));
5040
5041 opc = GET_FIELD(insn, 0, 1);
5042 rd = GET_FIELD(insn, 2, 6);
5043
5044 switch (opc) {
5045 case 0:
5046 goto illegal_insn; /* in decodetree */
5047 case 1:
5048 g_assert_not_reached(); /* in decodetree */
5049 case 2: /* FPU & Logical Operations */
5050 {
5051 unsigned int xop = GET_FIELD(insn, 7, 12);
5052 TCGv cpu_dst __attribute__((unused)) = tcg_temp_new();
5053
5054 if (xop == 0x34) { /* FPU Operations */
5055 if (gen_trap_ifnofpu(dc)) {
5056 goto jmp_insn;
5057 }
5058 gen_op_clear_ieee_excp_and_FTT();
5059 rs1 = GET_FIELD(insn, 13, 17);
5060 rs2 = GET_FIELD(insn, 27, 31);
5061 xop = GET_FIELD(insn, 18, 26);
5062
5063 switch (xop) {
5064 case 0x1: /* fmovs */
5065 case 0x5: /* fnegs */
5066 case 0x9: /* fabss */
5067 case 0x2: /* V9 fmovd */
5068 case 0x6: /* V9 fnegd */
5069 case 0xa: /* V9 fabsd */
5070 case 0x29: /* fsqrts */
5071 case 0xc4: /* fitos */
5072 case 0xd1: /* fstoi */
5073 case 0x2a: /* fsqrtd */
5074 case 0x82: /* V9 fdtox */
5075 case 0x88: /* V9 fxtod */
5076 case 0x2b: /* fsqrtq */
5077 case 0x41: /* fadds */
5078 case 0x45: /* fsubs */
5079 case 0x49: /* fmuls */
5080 case 0x4d: /* fdivs */
5081 case 0x42: /* faddd */
5082 case 0x46: /* fsubd */
5083 case 0x4a: /* fmuld */
5084 case 0x4e: /* fdivd */
5085 case 0x43: /* faddq */
5086 case 0x47: /* fsubq */
5087 case 0x4b: /* fmulq */
5088 case 0x4f: /* fdivq */
5089 case 0x69: /* fsmuld */
5090 case 0x6e: /* fdmulq */
5091 case 0xc6: /* fdtos */
5092 case 0xd2: /* fdtoi */
5093 case 0x84: /* V9 fxtos */
5094 case 0xc8: /* fitod */
5095 case 0xc9: /* fstod */
5096 case 0x81: /* V9 fstox */
5097 case 0xc7: /* fqtos */
5098 case 0xd3: /* fqtoi */
5099 case 0xcb: /* fqtod */
5100 case 0x83: /* V9 fqtox */
5101 case 0xcc: /* fitoq */
5102 case 0xcd: /* fstoq */
5103 g_assert_not_reached(); /* in decodetree */
5104 case 0xce: /* fdtoq */
5105 CHECK_FPU_FEATURE(dc, FLOAT128);
5106 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
5107 break;
5108 #ifdef TARGET_SPARC64
5109 case 0x3: /* V9 fmovq */
5110 CHECK_FPU_FEATURE(dc, FLOAT128);
5111 gen_move_Q(dc, rd, rs2);
5112 break;
5113 case 0x7: /* V9 fnegq */
5114 CHECK_FPU_FEATURE(dc, FLOAT128);
5115 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
5116 break;
5117 case 0xb: /* V9 fabsq */
5118 CHECK_FPU_FEATURE(dc, FLOAT128);
5119 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
5120 break;
5121 case 0x8c: /* V9 fxtoq */
5122 CHECK_FPU_FEATURE(dc, FLOAT128);
5123 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
5124 break;
5125 #endif
5126 default:
5127 goto illegal_insn;
5128 }
5129 } else if (xop == 0x35) { /* FPU Operations */
5130 #ifdef TARGET_SPARC64
5131 int cond;
5132 #endif
5133 if (gen_trap_ifnofpu(dc)) {
5134 goto jmp_insn;
5135 }
5136 gen_op_clear_ieee_excp_and_FTT();
5137 rs1 = GET_FIELD(insn, 13, 17);
5138 rs2 = GET_FIELD(insn, 27, 31);
5139 xop = GET_FIELD(insn, 18, 26);
5140
5141 #ifdef TARGET_SPARC64
5142 #define FMOVR(sz) \
5143 do { \
5144 DisasCompare cmp; \
5145 cond = GET_FIELD_SP(insn, 10, 12); \
5146 cpu_src1 = get_src1(dc, insn); \
5147 gen_compare_reg(&cmp, cond, cpu_src1); \
5148 gen_fmov##sz(dc, &cmp, rd, rs2); \
5149 } while (0)
5150
5151 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
5152 FMOVR(s);
5153 break;
5154 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
5155 FMOVR(d);
5156 break;
5157 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
5158 CHECK_FPU_FEATURE(dc, FLOAT128);
5159 FMOVR(q);
5160 break;
5161 }
5162 #undef FMOVR
5163 #endif
5164 switch (xop) {
5165 #ifdef TARGET_SPARC64
5166 #define FMOVCC(fcc, sz) \
5167 do { \
5168 DisasCompare cmp; \
5169 cond = GET_FIELD_SP(insn, 14, 17); \
5170 gen_fcompare(&cmp, fcc, cond); \
5171 gen_fmov##sz(dc, &cmp, rd, rs2); \
5172 } while (0)
5173
5174 case 0x001: /* V9 fmovscc %fcc0 */
5175 FMOVCC(0, s);
5176 break;
5177 case 0x002: /* V9 fmovdcc %fcc0 */
5178 FMOVCC(0, d);
5179 break;
5180 case 0x003: /* V9 fmovqcc %fcc0 */
5181 CHECK_FPU_FEATURE(dc, FLOAT128);
5182 FMOVCC(0, q);
5183 break;
5184 case 0x041: /* V9 fmovscc %fcc1 */
5185 FMOVCC(1, s);
5186 break;
5187 case 0x042: /* V9 fmovdcc %fcc1 */
5188 FMOVCC(1, d);
5189 break;
5190 case 0x043: /* V9 fmovqcc %fcc1 */
5191 CHECK_FPU_FEATURE(dc, FLOAT128);
5192 FMOVCC(1, q);
5193 break;
5194 case 0x081: /* V9 fmovscc %fcc2 */
5195 FMOVCC(2, s);
5196 break;
5197 case 0x082: /* V9 fmovdcc %fcc2 */
5198 FMOVCC(2, d);
5199 break;
5200 case 0x083: /* V9 fmovqcc %fcc2 */
5201 CHECK_FPU_FEATURE(dc, FLOAT128);
5202 FMOVCC(2, q);
5203 break;
5204 case 0x0c1: /* V9 fmovscc %fcc3 */
5205 FMOVCC(3, s);
5206 break;
5207 case 0x0c2: /* V9 fmovdcc %fcc3 */
5208 FMOVCC(3, d);
5209 break;
5210 case 0x0c3: /* V9 fmovqcc %fcc3 */
5211 CHECK_FPU_FEATURE(dc, FLOAT128);
5212 FMOVCC(3, q);
5213 break;
5214 #undef FMOVCC
5215 #define FMOVCC(xcc, sz) \
5216 do { \
5217 DisasCompare cmp; \
5218 cond = GET_FIELD_SP(insn, 14, 17); \
5219 gen_compare(&cmp, xcc, cond, dc); \
5220 gen_fmov##sz(dc, &cmp, rd, rs2); \
5221 } while (0)
5222
5223 case 0x101: /* V9 fmovscc %icc */
5224 FMOVCC(0, s);
5225 break;
5226 case 0x102: /* V9 fmovdcc %icc */
5227 FMOVCC(0, d);
5228 break;
5229 case 0x103: /* V9 fmovqcc %icc */
5230 CHECK_FPU_FEATURE(dc, FLOAT128);
5231 FMOVCC(0, q);
5232 break;
5233 case 0x181: /* V9 fmovscc %xcc */
5234 FMOVCC(1, s);
5235 break;
5236 case 0x182: /* V9 fmovdcc %xcc */
5237 FMOVCC(1, d);
5238 break;
5239 case 0x183: /* V9 fmovqcc %xcc */
5240 CHECK_FPU_FEATURE(dc, FLOAT128);
5241 FMOVCC(1, q);
5242 break;
5243 #undef FMOVCC
5244 #endif
5245 case 0x51: /* fcmps, V9 %fcc */
5246 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5247 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
5248 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
5249 break;
5250 case 0x52: /* fcmpd, V9 %fcc */
5251 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5252 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5253 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
5254 break;
5255 case 0x53: /* fcmpq, V9 %fcc */
5256 CHECK_FPU_FEATURE(dc, FLOAT128);
5257 gen_op_load_fpr_QT0(QFPREG(rs1));
5258 gen_op_load_fpr_QT1(QFPREG(rs2));
5259 gen_op_fcmpq(rd & 3);
5260 break;
5261 case 0x55: /* fcmpes, V9 %fcc */
5262 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5263 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
5264 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
5265 break;
5266 case 0x56: /* fcmped, V9 %fcc */
5267 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5268 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5269 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
5270 break;
5271 case 0x57: /* fcmpeq, V9 %fcc */
5272 CHECK_FPU_FEATURE(dc, FLOAT128);
5273 gen_op_load_fpr_QT0(QFPREG(rs1));
5274 gen_op_load_fpr_QT1(QFPREG(rs2));
5275 gen_op_fcmpeq(rd & 3);
5276 break;
5277 default:
5278 goto illegal_insn;
5279 }
5280 } else if (xop == 0x36) {
5281 #ifdef TARGET_SPARC64
5282 /* VIS */
5283 int opf = GET_FIELD_SP(insn, 5, 13);
5284 rs1 = GET_FIELD(insn, 13, 17);
5285 rs2 = GET_FIELD(insn, 27, 31);
5286 if (gen_trap_ifnofpu(dc)) {
5287 goto jmp_insn;
5288 }
5289
5290 switch (opf) {
5291 case 0x000: /* VIS I edge8cc */
5292 case 0x001: /* VIS II edge8n */
5293 case 0x002: /* VIS I edge8lcc */
5294 case 0x003: /* VIS II edge8ln */
5295 case 0x004: /* VIS I edge16cc */
5296 case 0x005: /* VIS II edge16n */
5297 case 0x006: /* VIS I edge16lcc */
5298 case 0x007: /* VIS II edge16ln */
5299 case 0x008: /* VIS I edge32cc */
5300 case 0x009: /* VIS II edge32n */
5301 case 0x00a: /* VIS I edge32lcc */
5302 case 0x00b: /* VIS II edge32ln */
5303 case 0x010: /* VIS I array8 */
5304 case 0x012: /* VIS I array16 */
5305 case 0x014: /* VIS I array32 */
5306 case 0x018: /* VIS I alignaddr */
5307 case 0x01a: /* VIS I alignaddrl */
5308 case 0x019: /* VIS II bmask */
5309 case 0x067: /* VIS I fnot2s */
5310 case 0x06b: /* VIS I fnot1s */
5311 case 0x075: /* VIS I fsrc1s */
5312 case 0x079: /* VIS I fsrc2s */
5313 case 0x066: /* VIS I fnot2 */
5314 case 0x06a: /* VIS I fnot1 */
5315 case 0x074: /* VIS I fsrc1 */
5316 case 0x078: /* VIS I fsrc2 */
5317 case 0x051: /* VIS I fpadd16s */
5318 case 0x053: /* VIS I fpadd32s */
5319 case 0x055: /* VIS I fpsub16s */
5320 case 0x057: /* VIS I fpsub32s */
5321 case 0x063: /* VIS I fnors */
5322 case 0x065: /* VIS I fandnot2s */
5323 case 0x069: /* VIS I fandnot1s */
5324 case 0x06d: /* VIS I fxors */
5325 case 0x06f: /* VIS I fnands */
5326 case 0x071: /* VIS I fands */
5327 case 0x073: /* VIS I fxnors */
5328 case 0x077: /* VIS I fornot2s */
5329 case 0x07b: /* VIS I fornot1s */
5330 case 0x07d: /* VIS I fors */
5331 case 0x050: /* VIS I fpadd16 */
5332 case 0x052: /* VIS I fpadd32 */
5333 case 0x054: /* VIS I fpsub16 */
5334 case 0x056: /* VIS I fpsub32 */
5335 case 0x062: /* VIS I fnor */
5336 case 0x064: /* VIS I fandnot2 */
5337 case 0x068: /* VIS I fandnot1 */
5338 case 0x06c: /* VIS I fxor */
5339 case 0x06e: /* VIS I fnand */
5340 case 0x070: /* VIS I fand */
5341 case 0x072: /* VIS I fxnor */
5342 case 0x076: /* VIS I fornot2 */
5343 case 0x07a: /* VIS I fornot1 */
5344 case 0x07c: /* VIS I for */
5345 case 0x031: /* VIS I fmul8x16 */
5346 case 0x033: /* VIS I fmul8x16au */
5347 case 0x035: /* VIS I fmul8x16al */
5348 case 0x036: /* VIS I fmul8sux16 */
5349 case 0x037: /* VIS I fmul8ulx16 */
5350 case 0x038: /* VIS I fmuld8sux16 */
5351 case 0x039: /* VIS I fmuld8ulx16 */
5352 case 0x04b: /* VIS I fpmerge */
5353 case 0x04d: /* VIS I fexpand */
5354 case 0x03e: /* VIS I pdist */
5355 case 0x03a: /* VIS I fpack32 */
5356 case 0x048: /* VIS I faligndata */
5357 case 0x04c: /* VIS II bshuffle */
5358 g_assert_not_reached(); /* in decodetree */
5359 case 0x020: /* VIS I fcmple16 */
5360 CHECK_FPU_FEATURE(dc, VIS1);
5361 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5362 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5363 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
5364 gen_store_gpr(dc, rd, cpu_dst);
5365 break;
5366 case 0x022: /* VIS I fcmpne16 */
5367 CHECK_FPU_FEATURE(dc, VIS1);
5368 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5369 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5370 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
5371 gen_store_gpr(dc, rd, cpu_dst);
5372 break;
5373 case 0x024: /* VIS I fcmple32 */
5374 CHECK_FPU_FEATURE(dc, VIS1);
5375 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5376 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5377 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
5378 gen_store_gpr(dc, rd, cpu_dst);
5379 break;
5380 case 0x026: /* VIS I fcmpne32 */
5381 CHECK_FPU_FEATURE(dc, VIS1);
5382 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5383 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5384 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
5385 gen_store_gpr(dc, rd, cpu_dst);
5386 break;
5387 case 0x028: /* VIS I fcmpgt16 */
5388 CHECK_FPU_FEATURE(dc, VIS1);
5389 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5390 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5391 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
5392 gen_store_gpr(dc, rd, cpu_dst);
5393 break;
5394 case 0x02a: /* VIS I fcmpeq16 */
5395 CHECK_FPU_FEATURE(dc, VIS1);
5396 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5397 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5398 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
5399 gen_store_gpr(dc, rd, cpu_dst);
5400 break;
5401 case 0x02c: /* VIS I fcmpgt32 */
5402 CHECK_FPU_FEATURE(dc, VIS1);
5403 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5404 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5405 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
5406 gen_store_gpr(dc, rd, cpu_dst);
5407 break;
5408 case 0x02e: /* VIS I fcmpeq32 */
5409 CHECK_FPU_FEATURE(dc, VIS1);
5410 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5411 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
5412 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
5413 gen_store_gpr(dc, rd, cpu_dst);
5414 break;
5415 case 0x03b: /* VIS I fpack16 */
5416 CHECK_FPU_FEATURE(dc, VIS1);
5417 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5418 cpu_dst_32 = gen_dest_fpr_F(dc);
5419 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
5420 gen_store_fpr_F(dc, rd, cpu_dst_32);
5421 break;
5422 case 0x03d: /* VIS I fpackfix */
5423 CHECK_FPU_FEATURE(dc, VIS1);
5424 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5425 cpu_dst_32 = gen_dest_fpr_F(dc);
5426 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
5427 gen_store_fpr_F(dc, rd, cpu_dst_32);
5428 break;
5429 case 0x060: /* VIS I fzero */
5430 CHECK_FPU_FEATURE(dc, VIS1);
5431 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5432 tcg_gen_movi_i64(cpu_dst_64, 0);
5433 gen_store_fpr_D(dc, rd, cpu_dst_64);
5434 break;
5435 case 0x061: /* VIS I fzeros */
5436 CHECK_FPU_FEATURE(dc, VIS1);
5437 cpu_dst_32 = gen_dest_fpr_F(dc);
5438 tcg_gen_movi_i32(cpu_dst_32, 0);
5439 gen_store_fpr_F(dc, rd, cpu_dst_32);
5440 break;
5441 case 0x07e: /* VIS I fone */
5442 CHECK_FPU_FEATURE(dc, VIS1);
5443 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5444 tcg_gen_movi_i64(cpu_dst_64, -1);
5445 gen_store_fpr_D(dc, rd, cpu_dst_64);
5446 break;
5447 case 0x07f: /* VIS I fones */
5448 CHECK_FPU_FEATURE(dc, VIS1);
5449 cpu_dst_32 = gen_dest_fpr_F(dc);
5450 tcg_gen_movi_i32(cpu_dst_32, -1);
5451 gen_store_fpr_F(dc, rd, cpu_dst_32);
5452 break;
5453 case 0x080: /* VIS I shutdown */
5454 case 0x081: /* VIS II siam */
5455 // XXX
5456 goto illegal_insn;
5457 default:
5458 goto illegal_insn;
5459 }
5460 #endif
5461 } else {
5462 goto illegal_insn; /* in decodetree */
5463 }
5464 }
5465 break;
5466 case 3: /* load/store instructions */
5467 goto illegal_insn; /* in decodetree */
5468 }
5469 advance_pc(dc);
5470 jmp_insn:
5471 return;
5472 illegal_insn:
5473 gen_exception(dc, TT_ILL_INSN);
5474 return;
5475 nfpu_insn:
5476 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5477 return;
5478 }
5479
5480 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5481 {
5482 DisasContext *dc = container_of(dcbase, DisasContext, base);
5483 CPUSPARCState *env = cpu_env(cs);
5484 int bound;
5485
5486 dc->pc = dc->base.pc_first;
5487 dc->npc = (target_ulong)dc->base.tb->cs_base;
5488 dc->cc_op = CC_OP_DYNAMIC;
5489 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5490 dc->def = &env->def;
5491 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5492 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5493 #ifndef CONFIG_USER_ONLY
5494 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5495 #endif
5496 #ifdef TARGET_SPARC64
5497 dc->fprs_dirty = 0;
5498 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5499 #ifndef CONFIG_USER_ONLY
5500 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5501 #endif
5502 #endif
5503 /*
5504 * if we reach a page boundary, we stop generation so that the
5505 * PC of a TT_TFAULT exception is always in the right page
5506 */
5507 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5508 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5509 }
5510
5511 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5512 {
5513 }
5514
5515 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5516 {
5517 DisasContext *dc = container_of(dcbase, DisasContext, base);
5518 target_ulong npc = dc->npc;
5519
5520 if (npc & 3) {
5521 switch (npc) {
5522 case JUMP_PC:
5523 assert(dc->jump_pc[1] == dc->pc + 4);
5524 npc = dc->jump_pc[0] | JUMP_PC;
5525 break;
5526 case DYNAMIC_PC:
5527 case DYNAMIC_PC_LOOKUP:
5528 npc = DYNAMIC_PC;
5529 break;
5530 default:
5531 g_assert_not_reached();
5532 }
5533 }
5534 tcg_gen_insn_start(dc->pc, npc);
5535 }
5536
5537 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5538 {
5539 DisasContext *dc = container_of(dcbase, DisasContext, base);
5540 CPUSPARCState *env = cpu_env(cs);
5541 unsigned int insn;
5542
5543 insn = translator_ldl(env, &dc->base, dc->pc);
5544 dc->base.pc_next += 4;
5545
5546 if (!decode(dc, insn)) {
5547 disas_sparc_legacy(dc, insn);
5548 }
5549
5550 if (dc->base.is_jmp == DISAS_NORETURN) {
5551 return;
5552 }
5553 if (dc->pc != dc->base.pc_next) {
5554 dc->base.is_jmp = DISAS_TOO_MANY;
5555 }
5556 }
5557
5558 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5559 {
5560 DisasContext *dc = container_of(dcbase, DisasContext, base);
5561 DisasDelayException *e, *e_next;
5562 bool may_lookup;
5563
5564 switch (dc->base.is_jmp) {
5565 case DISAS_NEXT:
5566 case DISAS_TOO_MANY:
5567 if (((dc->pc | dc->npc) & 3) == 0) {
5568 /* static PC and NPC: we can use direct chaining */
5569 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5570 break;
5571 }
5572
5573 may_lookup = true;
5574 if (dc->pc & 3) {
5575 switch (dc->pc) {
5576 case DYNAMIC_PC_LOOKUP:
5577 break;
5578 case DYNAMIC_PC:
5579 may_lookup = false;
5580 break;
5581 default:
5582 g_assert_not_reached();
5583 }
5584 } else {
5585 tcg_gen_movi_tl(cpu_pc, dc->pc);
5586 }
5587
5588 if (dc->npc & 3) {
5589 switch (dc->npc) {
5590 case JUMP_PC:
5591 gen_generic_branch(dc);
5592 break;
5593 case DYNAMIC_PC:
5594 may_lookup = false;
5595 break;
5596 case DYNAMIC_PC_LOOKUP:
5597 break;
5598 default:
5599 g_assert_not_reached();
5600 }
5601 } else {
5602 tcg_gen_movi_tl(cpu_npc, dc->npc);
5603 }
5604 if (may_lookup) {
5605 tcg_gen_lookup_and_goto_ptr();
5606 } else {
5607 tcg_gen_exit_tb(NULL, 0);
5608 }
5609 break;
5610
5611 case DISAS_NORETURN:
5612 break;
5613
5614 case DISAS_EXIT:
5615 /* Exit TB */
5616 save_state(dc);
5617 tcg_gen_exit_tb(NULL, 0);
5618 break;
5619
5620 default:
5621 g_assert_not_reached();
5622 }
5623
5624 for (e = dc->delay_excp_list; e ; e = e_next) {
5625 gen_set_label(e->lab);
5626
5627 tcg_gen_movi_tl(cpu_pc, e->pc);
5628 if (e->npc % 4 == 0) {
5629 tcg_gen_movi_tl(cpu_npc, e->npc);
5630 }
5631 gen_helper_raise_exception(tcg_env, e->excp);
5632
5633 e_next = e->next;
5634 g_free(e);
5635 }
5636 }
5637
5638 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5639 CPUState *cpu, FILE *logfile)
5640 {
5641 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5642 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5643 }
5644
5645 static const TranslatorOps sparc_tr_ops = {
5646 .init_disas_context = sparc_tr_init_disas_context,
5647 .tb_start = sparc_tr_tb_start,
5648 .insn_start = sparc_tr_insn_start,
5649 .translate_insn = sparc_tr_translate_insn,
5650 .tb_stop = sparc_tr_tb_stop,
5651 .disas_log = sparc_tr_disas_log,
5652 };
5653
5654 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5655 target_ulong pc, void *host_pc)
5656 {
5657 DisasContext dc = {};
5658
5659 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5660 }
5661
5662 void sparc_tcg_init(void)
5663 {
5664 static const char gregnames[32][4] = {
5665 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5666 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5667 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5668 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5669 };
5670 static const char fregnames[32][4] = {
5671 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5672 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5673 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5674 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5675 };
5676
5677 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5678 #ifdef TARGET_SPARC64
5679 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5680 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5681 #endif
5682 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5683 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5684 };
5685
5686 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5687 #ifdef TARGET_SPARC64
5688 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5689 #endif
5690 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5691 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5692 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5693 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5694 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5695 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5696 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5697 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5698 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5699 };
5700
5701 unsigned int i;
5702
5703 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5704 offsetof(CPUSPARCState, regwptr),
5705 "regwptr");
5706
5707 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5708 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5709 }
5710
5711 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5712 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5713 }
5714
5715 cpu_regs[0] = NULL;
5716 for (i = 1; i < 8; ++i) {
5717 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5718 offsetof(CPUSPARCState, gregs[i]),
5719 gregnames[i]);
5720 }
5721
5722 for (i = 8; i < 32; ++i) {
5723 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5724 (i - 8) * sizeof(target_ulong),
5725 gregnames[i]);
5726 }
5727
5728 for (i = 0; i < TARGET_DPREGS; i++) {
5729 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5730 offsetof(CPUSPARCState, fpr[i]),
5731 fregnames[i]);
5732 }
5733 }
5734
5735 void sparc_restore_state_to_opc(CPUState *cs,
5736 const TranslationBlock *tb,
5737 const uint64_t *data)
5738 {
5739 SPARCCPU *cpu = SPARC_CPU(cs);
5740 CPUSPARCState *env = &cpu->env;
5741 target_ulong pc = data[0];
5742 target_ulong npc = data[1];
5743
5744 env->pc = pc;
5745 if (npc == DYNAMIC_PC) {
5746 /* dynamic NPC: already stored */
5747 } else if (npc & JUMP_PC) {
5748 /* jump PC: use 'cond' and the jump targets of the translation */
5749 if (env->cond) {
5750 env->npc = npc & ~3;
5751 } else {
5752 env->npc = pc + 4;
5753 }
5754 } else {
5755 env->npc = npc;
5756 }
5757 }