]> git.ipfire.org Git - thirdparty/qemu.git/blob - target/sparc/translate.c
target/sparc: Split cexc and ftt from env->fsr
[thirdparty/qemu.git] / target / sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
33
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef HELPER_H
37
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rett(E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_done(E) qemu_build_not_reached()
46 # define gen_helper_flushw(E) qemu_build_not_reached()
47 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
48 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
49 # define gen_helper_restored(E) qemu_build_not_reached()
50 # define gen_helper_retry(E) qemu_build_not_reached()
51 # define gen_helper_saved(E) qemu_build_not_reached()
52 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
53 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
54 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
55 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
56 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
57 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
58 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
59 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
60 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
61 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
62 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
63 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
64 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
86 # define FSR_LDXFSR_MASK 0
87 # define FSR_LDXFSR_OLDMASK 0
88 # define MAXTL_MASK 0
89 #endif
90
91 /* Dynamic PC, must exit to main loop. */
92 #define DYNAMIC_PC 1
93 /* Dynamic PC, one of two values according to jump_pc[T2]. */
94 #define JUMP_PC 2
95 /* Dynamic PC, may lookup next TB. */
96 #define DYNAMIC_PC_LOOKUP 3
97
98 #define DISAS_EXIT DISAS_TARGET_0
99
100 /* global register indexes */
101 static TCGv_ptr cpu_regwptr;
102 static TCGv cpu_fsr, cpu_pc, cpu_npc;
103 static TCGv cpu_regs[32];
104 static TCGv cpu_y;
105 static TCGv cpu_tbr;
106 static TCGv cpu_cond;
107 static TCGv cpu_cc_N;
108 static TCGv cpu_cc_V;
109 static TCGv cpu_icc_Z;
110 static TCGv cpu_icc_C;
111 #ifdef TARGET_SPARC64
112 static TCGv cpu_xcc_Z;
113 static TCGv cpu_xcc_C;
114 static TCGv_i32 cpu_fprs;
115 static TCGv cpu_gsr;
116 #else
117 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
118 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
119 #endif
120
121 #ifdef TARGET_SPARC64
122 #define cpu_cc_Z cpu_xcc_Z
123 #define cpu_cc_C cpu_xcc_C
124 #else
125 #define cpu_cc_Z cpu_icc_Z
126 #define cpu_cc_C cpu_icc_C
127 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
128 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
129 #endif
130
131 /* Floating point registers */
132 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
133
134 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
135 #ifdef TARGET_SPARC64
136 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
137 # define env64_field_offsetof(X) env_field_offsetof(X)
138 #else
139 # define env32_field_offsetof(X) env_field_offsetof(X)
140 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
141 #endif
142
143 typedef struct DisasCompare {
144 TCGCond cond;
145 TCGv c1;
146 int c2;
147 } DisasCompare;
148
149 typedef struct DisasDelayException {
150 struct DisasDelayException *next;
151 TCGLabel *lab;
152 TCGv_i32 excp;
153 /* Saved state at parent insn. */
154 target_ulong pc;
155 target_ulong npc;
156 } DisasDelayException;
157
158 typedef struct DisasContext {
159 DisasContextBase base;
160 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
161 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
162
163 /* Used when JUMP_PC value is used. */
164 DisasCompare jump;
165 target_ulong jump_pc[2];
166
167 int mem_idx;
168 bool cpu_cond_live;
169 bool fpu_enabled;
170 bool address_mask_32bit;
171 #ifndef CONFIG_USER_ONLY
172 bool supervisor;
173 #ifdef TARGET_SPARC64
174 bool hypervisor;
175 #endif
176 #endif
177
178 sparc_def_t *def;
179 #ifdef TARGET_SPARC64
180 int fprs_dirty;
181 int asi;
182 #endif
183 DisasDelayException *delay_excp_list;
184 } DisasContext;
185
186 // This function uses non-native bit order
187 #define GET_FIELD(X, FROM, TO) \
188 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
189
190 // This function uses the order in the manuals, i.e. bit 0 is 2^0
191 #define GET_FIELD_SP(X, FROM, TO) \
192 GET_FIELD(X, 31 - (TO), 31 - (FROM))
193
194 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
195 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
196
197 #ifdef TARGET_SPARC64
198 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
199 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
200 #else
201 #define DFPREG(r) (r & 0x1e)
202 #define QFPREG(r) (r & 0x1c)
203 #endif
204
205 #define UA2005_HTRAP_MASK 0xff
206 #define V8_TRAP_MASK 0x7f
207
208 #define IS_IMM (insn & (1<<13))
209
210 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
211 {
212 #if defined(TARGET_SPARC64)
213 int bit = (rd < 32) ? 1 : 2;
214 /* If we know we've already set this bit within the TB,
215 we can avoid setting it again. */
216 if (!(dc->fprs_dirty & bit)) {
217 dc->fprs_dirty |= bit;
218 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
219 }
220 #endif
221 }
222
223 /* floating point registers moves */
224 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
225 {
226 TCGv_i32 ret = tcg_temp_new_i32();
227 if (src & 1) {
228 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
229 } else {
230 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
231 }
232 return ret;
233 }
234
235 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
236 {
237 TCGv_i64 t = tcg_temp_new_i64();
238
239 tcg_gen_extu_i32_i64(t, v);
240 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
241 (dst & 1 ? 0 : 32), 32);
242 gen_update_fprs_dirty(dc, dst);
243 }
244
245 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
246 {
247 src = DFPREG(src);
248 return cpu_fpr[src / 2];
249 }
250
251 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
252 {
253 dst = DFPREG(dst);
254 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
255 gen_update_fprs_dirty(dc, dst);
256 }
257
258 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
259 {
260 return cpu_fpr[DFPREG(dst) / 2];
261 }
262
263 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
264 {
265 TCGv_i128 ret = tcg_temp_new_i128();
266
267 src = QFPREG(src);
268 tcg_gen_concat_i64_i128(ret, cpu_fpr[src / 2 + 1], cpu_fpr[src / 2]);
269 return ret;
270 }
271
272 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
273 {
274 dst = DFPREG(dst);
275 tcg_gen_extr_i128_i64(cpu_fpr[dst / 2 + 1], cpu_fpr[dst / 2], v);
276 gen_update_fprs_dirty(dc, dst);
277 }
278
279 /* moves */
280 #ifdef CONFIG_USER_ONLY
281 #define supervisor(dc) 0
282 #define hypervisor(dc) 0
283 #else
284 #ifdef TARGET_SPARC64
285 #define hypervisor(dc) (dc->hypervisor)
286 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
287 #else
288 #define supervisor(dc) (dc->supervisor)
289 #define hypervisor(dc) 0
290 #endif
291 #endif
292
293 #if !defined(TARGET_SPARC64)
294 # define AM_CHECK(dc) false
295 #elif defined(TARGET_ABI32)
296 # define AM_CHECK(dc) true
297 #elif defined(CONFIG_USER_ONLY)
298 # define AM_CHECK(dc) false
299 #else
300 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
301 #endif
302
303 static void gen_address_mask(DisasContext *dc, TCGv addr)
304 {
305 if (AM_CHECK(dc)) {
306 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
307 }
308 }
309
310 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
311 {
312 return AM_CHECK(dc) ? (uint32_t)addr : addr;
313 }
314
315 static TCGv gen_load_gpr(DisasContext *dc, int reg)
316 {
317 if (reg > 0) {
318 assert(reg < 32);
319 return cpu_regs[reg];
320 } else {
321 TCGv t = tcg_temp_new();
322 tcg_gen_movi_tl(t, 0);
323 return t;
324 }
325 }
326
327 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
328 {
329 if (reg > 0) {
330 assert(reg < 32);
331 tcg_gen_mov_tl(cpu_regs[reg], v);
332 }
333 }
334
335 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
336 {
337 if (reg > 0) {
338 assert(reg < 32);
339 return cpu_regs[reg];
340 } else {
341 return tcg_temp_new();
342 }
343 }
344
345 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
346 {
347 return translator_use_goto_tb(&s->base, pc) &&
348 translator_use_goto_tb(&s->base, npc);
349 }
350
351 static void gen_goto_tb(DisasContext *s, int tb_num,
352 target_ulong pc, target_ulong npc)
353 {
354 if (use_goto_tb(s, pc, npc)) {
355 /* jump to same page: we can use a direct jump */
356 tcg_gen_goto_tb(tb_num);
357 tcg_gen_movi_tl(cpu_pc, pc);
358 tcg_gen_movi_tl(cpu_npc, npc);
359 tcg_gen_exit_tb(s->base.tb, tb_num);
360 } else {
361 /* jump to another page: we can use an indirect jump */
362 tcg_gen_movi_tl(cpu_pc, pc);
363 tcg_gen_movi_tl(cpu_npc, npc);
364 tcg_gen_lookup_and_goto_ptr();
365 }
366 }
367
368 static TCGv gen_carry32(void)
369 {
370 if (TARGET_LONG_BITS == 64) {
371 TCGv t = tcg_temp_new();
372 tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
373 return t;
374 }
375 return cpu_icc_C;
376 }
377
378 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
379 {
380 TCGv z = tcg_constant_tl(0);
381
382 if (cin) {
383 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
384 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
385 } else {
386 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
387 }
388 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
389 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
390 tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
391 if (TARGET_LONG_BITS == 64) {
392 /*
393 * Carry-in to bit 32 is result ^ src1 ^ src2.
394 * We already have the src xor term in Z, from computation of V.
395 */
396 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
397 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
398 }
399 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
400 tcg_gen_mov_tl(dst, cpu_cc_N);
401 }
402
403 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
404 {
405 gen_op_addcc_int(dst, src1, src2, NULL);
406 }
407
408 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
409 {
410 TCGv t = tcg_temp_new();
411
412 /* Save the tag bits around modification of dst. */
413 tcg_gen_or_tl(t, src1, src2);
414
415 gen_op_addcc(dst, src1, src2);
416
417 /* Incorprate tag bits into icc.V */
418 tcg_gen_andi_tl(t, t, 3);
419 tcg_gen_neg_tl(t, t);
420 tcg_gen_ext32u_tl(t, t);
421 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
422 }
423
424 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
425 {
426 tcg_gen_add_tl(dst, src1, src2);
427 tcg_gen_add_tl(dst, dst, gen_carry32());
428 }
429
430 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
431 {
432 gen_op_addcc_int(dst, src1, src2, gen_carry32());
433 }
434
435 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
436 {
437 TCGv z = tcg_constant_tl(0);
438
439 if (cin) {
440 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
441 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
442 } else {
443 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
444 }
445 tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
446 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
447 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
448 tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
449 #ifdef TARGET_SPARC64
450 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
451 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
452 #endif
453 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
454 tcg_gen_mov_tl(dst, cpu_cc_N);
455 }
456
457 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
458 {
459 gen_op_subcc_int(dst, src1, src2, NULL);
460 }
461
462 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
463 {
464 TCGv t = tcg_temp_new();
465
466 /* Save the tag bits around modification of dst. */
467 tcg_gen_or_tl(t, src1, src2);
468
469 gen_op_subcc(dst, src1, src2);
470
471 /* Incorprate tag bits into icc.V */
472 tcg_gen_andi_tl(t, t, 3);
473 tcg_gen_neg_tl(t, t);
474 tcg_gen_ext32u_tl(t, t);
475 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
476 }
477
478 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
479 {
480 tcg_gen_sub_tl(dst, src1, src2);
481 tcg_gen_sub_tl(dst, dst, gen_carry32());
482 }
483
484 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
485 {
486 gen_op_subcc_int(dst, src1, src2, gen_carry32());
487 }
488
489 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
490 {
491 TCGv zero = tcg_constant_tl(0);
492 TCGv t_src1 = tcg_temp_new();
493 TCGv t_src2 = tcg_temp_new();
494 TCGv t0 = tcg_temp_new();
495
496 tcg_gen_ext32u_tl(t_src1, src1);
497 tcg_gen_ext32u_tl(t_src2, src2);
498
499 /*
500 * if (!(env->y & 1))
501 * src2 = 0;
502 */
503 tcg_gen_andi_tl(t0, cpu_y, 0x1);
504 tcg_gen_movcond_tl(TCG_COND_EQ, t_src2, t0, zero, zero, t_src2);
505
506 /*
507 * b2 = src1 & 1;
508 * y = (b2 << 31) | (y >> 1);
509 */
510 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
511 tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
512
513 // b1 = N ^ V;
514 tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
515
516 /*
517 * src1 = (b1 << 31) | (src1 >> 1)
518 */
519 tcg_gen_andi_tl(t0, t0, 1u << 31);
520 tcg_gen_shri_tl(t_src1, t_src1, 1);
521 tcg_gen_or_tl(t_src1, t_src1, t0);
522
523 gen_op_addcc(dst, t_src1, t_src2);
524 }
525
526 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
527 {
528 #if TARGET_LONG_BITS == 32
529 if (sign_ext) {
530 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
531 } else {
532 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
533 }
534 #else
535 TCGv t0 = tcg_temp_new_i64();
536 TCGv t1 = tcg_temp_new_i64();
537
538 if (sign_ext) {
539 tcg_gen_ext32s_i64(t0, src1);
540 tcg_gen_ext32s_i64(t1, src2);
541 } else {
542 tcg_gen_ext32u_i64(t0, src1);
543 tcg_gen_ext32u_i64(t1, src2);
544 }
545
546 tcg_gen_mul_i64(dst, t0, t1);
547 tcg_gen_shri_i64(cpu_y, dst, 32);
548 #endif
549 }
550
551 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
552 {
553 /* zero-extend truncated operands before multiplication */
554 gen_op_multiply(dst, src1, src2, 0);
555 }
556
557 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
558 {
559 /* sign-extend truncated operands before multiplication */
560 gen_op_multiply(dst, src1, src2, 1);
561 }
562
563 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
564 {
565 #ifdef TARGET_SPARC64
566 gen_helper_sdiv(dst, tcg_env, src1, src2);
567 tcg_gen_ext32s_tl(dst, dst);
568 #else
569 TCGv_i64 t64 = tcg_temp_new_i64();
570 gen_helper_sdiv(t64, tcg_env, src1, src2);
571 tcg_gen_trunc_i64_tl(dst, t64);
572 #endif
573 }
574
575 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
576 {
577 TCGv_i64 t64;
578
579 #ifdef TARGET_SPARC64
580 t64 = cpu_cc_V;
581 #else
582 t64 = tcg_temp_new_i64();
583 #endif
584
585 gen_helper_udiv(t64, tcg_env, src1, src2);
586
587 #ifdef TARGET_SPARC64
588 tcg_gen_ext32u_tl(cpu_cc_N, t64);
589 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
590 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
591 tcg_gen_movi_tl(cpu_icc_C, 0);
592 #else
593 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
594 #endif
595 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
596 tcg_gen_movi_tl(cpu_cc_C, 0);
597 tcg_gen_mov_tl(dst, cpu_cc_N);
598 }
599
600 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
601 {
602 TCGv_i64 t64;
603
604 #ifdef TARGET_SPARC64
605 t64 = cpu_cc_V;
606 #else
607 t64 = tcg_temp_new_i64();
608 #endif
609
610 gen_helper_sdiv(t64, tcg_env, src1, src2);
611
612 #ifdef TARGET_SPARC64
613 tcg_gen_ext32s_tl(cpu_cc_N, t64);
614 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
615 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
616 tcg_gen_movi_tl(cpu_icc_C, 0);
617 #else
618 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
619 #endif
620 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
621 tcg_gen_movi_tl(cpu_cc_C, 0);
622 tcg_gen_mov_tl(dst, cpu_cc_N);
623 }
624
625 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
626 {
627 gen_helper_taddcctv(dst, tcg_env, src1, src2);
628 }
629
630 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
631 {
632 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
633 }
634
635 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
636 {
637 tcg_gen_ctpop_tl(dst, src2);
638 }
639
640 #ifndef TARGET_SPARC64
641 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
642 {
643 g_assert_not_reached();
644 }
645 #endif
646
647 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
648 {
649 gen_helper_array8(dst, src1, src2);
650 tcg_gen_shli_tl(dst, dst, 1);
651 }
652
653 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
654 {
655 gen_helper_array8(dst, src1, src2);
656 tcg_gen_shli_tl(dst, dst, 2);
657 }
658
659 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
660 {
661 #ifdef TARGET_SPARC64
662 gen_helper_fpack16(dst, cpu_gsr, src);
663 #else
664 g_assert_not_reached();
665 #endif
666 }
667
668 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
669 {
670 #ifdef TARGET_SPARC64
671 gen_helper_fpackfix(dst, cpu_gsr, src);
672 #else
673 g_assert_not_reached();
674 #endif
675 }
676
677 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
678 {
679 #ifdef TARGET_SPARC64
680 gen_helper_fpack32(dst, cpu_gsr, src1, src2);
681 #else
682 g_assert_not_reached();
683 #endif
684 }
685
686 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
687 {
688 #ifdef TARGET_SPARC64
689 TCGv t1, t2, shift;
690
691 t1 = tcg_temp_new();
692 t2 = tcg_temp_new();
693 shift = tcg_temp_new();
694
695 tcg_gen_andi_tl(shift, cpu_gsr, 7);
696 tcg_gen_shli_tl(shift, shift, 3);
697 tcg_gen_shl_tl(t1, s1, shift);
698
699 /*
700 * A shift of 64 does not produce 0 in TCG. Divide this into a
701 * shift of (up to 63) followed by a constant shift of 1.
702 */
703 tcg_gen_xori_tl(shift, shift, 63);
704 tcg_gen_shr_tl(t2, s2, shift);
705 tcg_gen_shri_tl(t2, t2, 1);
706
707 tcg_gen_or_tl(dst, t1, t2);
708 #else
709 g_assert_not_reached();
710 #endif
711 }
712
713 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
714 {
715 #ifdef TARGET_SPARC64
716 gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
717 #else
718 g_assert_not_reached();
719 #endif
720 }
721
722 // 1
723 static void gen_op_eval_ba(TCGv dst)
724 {
725 tcg_gen_movi_tl(dst, 1);
726 }
727
728 // 0
729 static void gen_op_eval_bn(TCGv dst)
730 {
731 tcg_gen_movi_tl(dst, 0);
732 }
733
734 /*
735 FPSR bit field FCC1 | FCC0:
736 0 =
737 1 <
738 2 >
739 3 unordered
740 */
741 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
742 unsigned int fcc_offset)
743 {
744 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
745 tcg_gen_andi_tl(reg, reg, 0x1);
746 }
747
748 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
749 {
750 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
751 tcg_gen_andi_tl(reg, reg, 0x1);
752 }
753
754 // !0: FCC0 | FCC1
755 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
756 {
757 TCGv t0 = tcg_temp_new();
758 gen_mov_reg_FCC0(dst, src, fcc_offset);
759 gen_mov_reg_FCC1(t0, src, fcc_offset);
760 tcg_gen_or_tl(dst, dst, t0);
761 }
762
763 // 1 or 2: FCC0 ^ FCC1
764 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
765 {
766 TCGv t0 = tcg_temp_new();
767 gen_mov_reg_FCC0(dst, src, fcc_offset);
768 gen_mov_reg_FCC1(t0, src, fcc_offset);
769 tcg_gen_xor_tl(dst, dst, t0);
770 }
771
772 // 1 or 3: FCC0
773 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
774 {
775 gen_mov_reg_FCC0(dst, src, fcc_offset);
776 }
777
778 // 1: FCC0 & !FCC1
779 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
780 {
781 TCGv t0 = tcg_temp_new();
782 gen_mov_reg_FCC0(dst, src, fcc_offset);
783 gen_mov_reg_FCC1(t0, src, fcc_offset);
784 tcg_gen_andc_tl(dst, dst, t0);
785 }
786
787 // 2 or 3: FCC1
788 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
789 {
790 gen_mov_reg_FCC1(dst, src, fcc_offset);
791 }
792
793 // 2: !FCC0 & FCC1
794 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
795 {
796 TCGv t0 = tcg_temp_new();
797 gen_mov_reg_FCC0(dst, src, fcc_offset);
798 gen_mov_reg_FCC1(t0, src, fcc_offset);
799 tcg_gen_andc_tl(dst, t0, dst);
800 }
801
802 // 3: FCC0 & FCC1
803 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
804 {
805 TCGv t0 = tcg_temp_new();
806 gen_mov_reg_FCC0(dst, src, fcc_offset);
807 gen_mov_reg_FCC1(t0, src, fcc_offset);
808 tcg_gen_and_tl(dst, dst, t0);
809 }
810
811 // 0: !(FCC0 | FCC1)
812 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
813 {
814 TCGv t0 = tcg_temp_new();
815 gen_mov_reg_FCC0(dst, src, fcc_offset);
816 gen_mov_reg_FCC1(t0, src, fcc_offset);
817 tcg_gen_or_tl(dst, dst, t0);
818 tcg_gen_xori_tl(dst, dst, 0x1);
819 }
820
821 // 0 or 3: !(FCC0 ^ FCC1)
822 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
823 {
824 TCGv t0 = tcg_temp_new();
825 gen_mov_reg_FCC0(dst, src, fcc_offset);
826 gen_mov_reg_FCC1(t0, src, fcc_offset);
827 tcg_gen_xor_tl(dst, dst, t0);
828 tcg_gen_xori_tl(dst, dst, 0x1);
829 }
830
831 // 0 or 2: !FCC0
832 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
833 {
834 gen_mov_reg_FCC0(dst, src, fcc_offset);
835 tcg_gen_xori_tl(dst, dst, 0x1);
836 }
837
838 // !1: !(FCC0 & !FCC1)
839 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
840 {
841 TCGv t0 = tcg_temp_new();
842 gen_mov_reg_FCC0(dst, src, fcc_offset);
843 gen_mov_reg_FCC1(t0, src, fcc_offset);
844 tcg_gen_andc_tl(dst, dst, t0);
845 tcg_gen_xori_tl(dst, dst, 0x1);
846 }
847
848 // 0 or 1: !FCC1
849 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
850 {
851 gen_mov_reg_FCC1(dst, src, fcc_offset);
852 tcg_gen_xori_tl(dst, dst, 0x1);
853 }
854
855 // !2: !(!FCC0 & FCC1)
856 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
857 {
858 TCGv t0 = tcg_temp_new();
859 gen_mov_reg_FCC0(dst, src, fcc_offset);
860 gen_mov_reg_FCC1(t0, src, fcc_offset);
861 tcg_gen_andc_tl(dst, t0, dst);
862 tcg_gen_xori_tl(dst, dst, 0x1);
863 }
864
865 // !3: !(FCC0 & FCC1)
866 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
867 {
868 TCGv t0 = tcg_temp_new();
869 gen_mov_reg_FCC0(dst, src, fcc_offset);
870 gen_mov_reg_FCC1(t0, src, fcc_offset);
871 tcg_gen_and_tl(dst, dst, t0);
872 tcg_gen_xori_tl(dst, dst, 0x1);
873 }
874
875 static void finishing_insn(DisasContext *dc)
876 {
877 /*
878 * From here, there is no future path through an unwinding exception.
879 * If the current insn cannot raise an exception, the computation of
880 * cpu_cond may be able to be elided.
881 */
882 if (dc->cpu_cond_live) {
883 tcg_gen_discard_tl(cpu_cond);
884 dc->cpu_cond_live = false;
885 }
886 }
887
888 static void gen_generic_branch(DisasContext *dc)
889 {
890 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
891 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
892 TCGv c2 = tcg_constant_tl(dc->jump.c2);
893
894 tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
895 }
896
897 /* call this function before using the condition register as it may
898 have been set for a jump */
899 static void flush_cond(DisasContext *dc)
900 {
901 if (dc->npc == JUMP_PC) {
902 gen_generic_branch(dc);
903 dc->npc = DYNAMIC_PC_LOOKUP;
904 }
905 }
906
907 static void save_npc(DisasContext *dc)
908 {
909 if (dc->npc & 3) {
910 switch (dc->npc) {
911 case JUMP_PC:
912 gen_generic_branch(dc);
913 dc->npc = DYNAMIC_PC_LOOKUP;
914 break;
915 case DYNAMIC_PC:
916 case DYNAMIC_PC_LOOKUP:
917 break;
918 default:
919 g_assert_not_reached();
920 }
921 } else {
922 tcg_gen_movi_tl(cpu_npc, dc->npc);
923 }
924 }
925
926 static void save_state(DisasContext *dc)
927 {
928 tcg_gen_movi_tl(cpu_pc, dc->pc);
929 save_npc(dc);
930 }
931
932 static void gen_exception(DisasContext *dc, int which)
933 {
934 finishing_insn(dc);
935 save_state(dc);
936 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
937 dc->base.is_jmp = DISAS_NORETURN;
938 }
939
940 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
941 {
942 DisasDelayException *e = g_new0(DisasDelayException, 1);
943
944 e->next = dc->delay_excp_list;
945 dc->delay_excp_list = e;
946
947 e->lab = gen_new_label();
948 e->excp = excp;
949 e->pc = dc->pc;
950 /* Caller must have used flush_cond before branch. */
951 assert(e->npc != JUMP_PC);
952 e->npc = dc->npc;
953
954 return e->lab;
955 }
956
957 static TCGLabel *delay_exception(DisasContext *dc, int excp)
958 {
959 return delay_exceptionv(dc, tcg_constant_i32(excp));
960 }
961
962 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
963 {
964 TCGv t = tcg_temp_new();
965 TCGLabel *lab;
966
967 tcg_gen_andi_tl(t, addr, mask);
968
969 flush_cond(dc);
970 lab = delay_exception(dc, TT_UNALIGNED);
971 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
972 }
973
974 static void gen_mov_pc_npc(DisasContext *dc)
975 {
976 finishing_insn(dc);
977
978 if (dc->npc & 3) {
979 switch (dc->npc) {
980 case JUMP_PC:
981 gen_generic_branch(dc);
982 tcg_gen_mov_tl(cpu_pc, cpu_npc);
983 dc->pc = DYNAMIC_PC_LOOKUP;
984 break;
985 case DYNAMIC_PC:
986 case DYNAMIC_PC_LOOKUP:
987 tcg_gen_mov_tl(cpu_pc, cpu_npc);
988 dc->pc = dc->npc;
989 break;
990 default:
991 g_assert_not_reached();
992 }
993 } else {
994 dc->pc = dc->npc;
995 }
996 }
997
998 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
999 DisasContext *dc)
1000 {
1001 TCGv t1;
1002
1003 cmp->c1 = t1 = tcg_temp_new();
1004 cmp->c2 = 0;
1005
1006 switch (cond & 7) {
1007 case 0x0: /* never */
1008 cmp->cond = TCG_COND_NEVER;
1009 cmp->c1 = tcg_constant_tl(0);
1010 break;
1011
1012 case 0x1: /* eq: Z */
1013 cmp->cond = TCG_COND_EQ;
1014 if (TARGET_LONG_BITS == 32 || xcc) {
1015 tcg_gen_mov_tl(t1, cpu_cc_Z);
1016 } else {
1017 tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1018 }
1019 break;
1020
1021 case 0x2: /* le: Z | (N ^ V) */
1022 /*
1023 * Simplify:
1024 * cc_Z || (N ^ V) < 0 NE
1025 * cc_Z && !((N ^ V) < 0) EQ
1026 * cc_Z & ~((N ^ V) >> TLB) EQ
1027 */
1028 cmp->cond = TCG_COND_EQ;
1029 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1030 tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1031 tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1032 if (TARGET_LONG_BITS == 64 && !xcc) {
1033 tcg_gen_ext32u_tl(t1, t1);
1034 }
1035 break;
1036
1037 case 0x3: /* lt: N ^ V */
1038 cmp->cond = TCG_COND_LT;
1039 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1040 if (TARGET_LONG_BITS == 64 && !xcc) {
1041 tcg_gen_ext32s_tl(t1, t1);
1042 }
1043 break;
1044
1045 case 0x4: /* leu: Z | C */
1046 /*
1047 * Simplify:
1048 * cc_Z == 0 || cc_C != 0 NE
1049 * cc_Z != 0 && cc_C == 0 EQ
1050 * cc_Z & (cc_C ? 0 : -1) EQ
1051 * cc_Z & (cc_C - 1) EQ
1052 */
1053 cmp->cond = TCG_COND_EQ;
1054 if (TARGET_LONG_BITS == 32 || xcc) {
1055 tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1056 tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1057 } else {
1058 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1059 tcg_gen_subi_tl(t1, t1, 1);
1060 tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1061 tcg_gen_ext32u_tl(t1, t1);
1062 }
1063 break;
1064
1065 case 0x5: /* ltu: C */
1066 cmp->cond = TCG_COND_NE;
1067 if (TARGET_LONG_BITS == 32 || xcc) {
1068 tcg_gen_mov_tl(t1, cpu_cc_C);
1069 } else {
1070 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1071 }
1072 break;
1073
1074 case 0x6: /* neg: N */
1075 cmp->cond = TCG_COND_LT;
1076 if (TARGET_LONG_BITS == 32 || xcc) {
1077 tcg_gen_mov_tl(t1, cpu_cc_N);
1078 } else {
1079 tcg_gen_ext32s_tl(t1, cpu_cc_N);
1080 }
1081 break;
1082
1083 case 0x7: /* vs: V */
1084 cmp->cond = TCG_COND_LT;
1085 if (TARGET_LONG_BITS == 32 || xcc) {
1086 tcg_gen_mov_tl(t1, cpu_cc_V);
1087 } else {
1088 tcg_gen_ext32s_tl(t1, cpu_cc_V);
1089 }
1090 break;
1091 }
1092 if (cond & 8) {
1093 cmp->cond = tcg_invert_cond(cmp->cond);
1094 }
1095 }
1096
1097 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1098 {
1099 unsigned int offset;
1100 TCGv r_dst;
1101
1102 /* For now we still generate a straight boolean result. */
1103 cmp->cond = TCG_COND_NE;
1104 cmp->c1 = r_dst = tcg_temp_new();
1105 cmp->c2 = 0;
1106
1107 switch (cc) {
1108 default:
1109 case 0x0:
1110 offset = 0;
1111 break;
1112 case 0x1:
1113 offset = 32 - 10;
1114 break;
1115 case 0x2:
1116 offset = 34 - 10;
1117 break;
1118 case 0x3:
1119 offset = 36 - 10;
1120 break;
1121 }
1122
1123 switch (cond) {
1124 case 0x0:
1125 gen_op_eval_bn(r_dst);
1126 break;
1127 case 0x1:
1128 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1129 break;
1130 case 0x2:
1131 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1132 break;
1133 case 0x3:
1134 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1135 break;
1136 case 0x4:
1137 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1138 break;
1139 case 0x5:
1140 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1141 break;
1142 case 0x6:
1143 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1144 break;
1145 case 0x7:
1146 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1147 break;
1148 case 0x8:
1149 gen_op_eval_ba(r_dst);
1150 break;
1151 case 0x9:
1152 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1153 break;
1154 case 0xa:
1155 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1156 break;
1157 case 0xb:
1158 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1159 break;
1160 case 0xc:
1161 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1162 break;
1163 case 0xd:
1164 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1165 break;
1166 case 0xe:
1167 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1168 break;
1169 case 0xf:
1170 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1171 break;
1172 }
1173 }
1174
1175 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1176 {
1177 static const TCGCond cond_reg[4] = {
1178 TCG_COND_NEVER, /* reserved */
1179 TCG_COND_EQ,
1180 TCG_COND_LE,
1181 TCG_COND_LT,
1182 };
1183 TCGCond tcond;
1184
1185 if ((cond & 3) == 0) {
1186 return false;
1187 }
1188 tcond = cond_reg[cond & 3];
1189 if (cond & 4) {
1190 tcond = tcg_invert_cond(tcond);
1191 }
1192
1193 cmp->cond = tcond;
1194 cmp->c1 = tcg_temp_new();
1195 cmp->c2 = 0;
1196 tcg_gen_mov_tl(cmp->c1, r_src);
1197 return true;
1198 }
1199
1200 static void gen_op_clear_ieee_excp_and_FTT(void)
1201 {
1202 tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1203 offsetof(CPUSPARCState, fsr_cexc_ftt));
1204 }
1205
1206 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1207 {
1208 gen_op_clear_ieee_excp_and_FTT();
1209 tcg_gen_mov_i32(dst, src);
1210 }
1211
1212 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1213 {
1214 gen_op_clear_ieee_excp_and_FTT();
1215 tcg_gen_xori_i32(dst, src, 1u << 31);
1216 }
1217
1218 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1219 {
1220 gen_op_clear_ieee_excp_and_FTT();
1221 tcg_gen_andi_i32(dst, src, ~(1u << 31));
1222 }
1223
1224 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1225 {
1226 gen_op_clear_ieee_excp_and_FTT();
1227 tcg_gen_mov_i64(dst, src);
1228 }
1229
1230 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1231 {
1232 gen_op_clear_ieee_excp_and_FTT();
1233 tcg_gen_xori_i64(dst, src, 1ull << 63);
1234 }
1235
1236 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1237 {
1238 gen_op_clear_ieee_excp_and_FTT();
1239 tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1240 }
1241
1242 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1243 {
1244 TCGv_i64 l = tcg_temp_new_i64();
1245 TCGv_i64 h = tcg_temp_new_i64();
1246
1247 tcg_gen_extr_i128_i64(l, h, src);
1248 tcg_gen_xori_i64(h, h, 1ull << 63);
1249 tcg_gen_concat_i64_i128(dst, l, h);
1250 }
1251
1252 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1253 {
1254 TCGv_i64 l = tcg_temp_new_i64();
1255 TCGv_i64 h = tcg_temp_new_i64();
1256
1257 tcg_gen_extr_i128_i64(l, h, src);
1258 tcg_gen_andi_i64(h, h, ~(1ull << 63));
1259 tcg_gen_concat_i64_i128(dst, l, h);
1260 }
1261
1262 #ifdef TARGET_SPARC64
1263 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1264 {
1265 switch (fccno) {
1266 case 0:
1267 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1268 break;
1269 case 1:
1270 gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1271 break;
1272 case 2:
1273 gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1274 break;
1275 case 3:
1276 gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1277 break;
1278 }
1279 }
1280
1281 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1282 {
1283 switch (fccno) {
1284 case 0:
1285 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1286 break;
1287 case 1:
1288 gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1289 break;
1290 case 2:
1291 gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1292 break;
1293 case 3:
1294 gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1295 break;
1296 }
1297 }
1298
1299 static void gen_op_fcmpq(int fccno, TCGv_i128 r_rs1, TCGv_i128 r_rs2)
1300 {
1301 switch (fccno) {
1302 case 0:
1303 gen_helper_fcmpq(cpu_fsr, tcg_env, r_rs1, r_rs2);
1304 break;
1305 case 1:
1306 gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1307 break;
1308 case 2:
1309 gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1310 break;
1311 case 3:
1312 gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1313 break;
1314 }
1315 }
1316
1317 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1318 {
1319 switch (fccno) {
1320 case 0:
1321 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1322 break;
1323 case 1:
1324 gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1325 break;
1326 case 2:
1327 gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1328 break;
1329 case 3:
1330 gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1331 break;
1332 }
1333 }
1334
1335 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1336 {
1337 switch (fccno) {
1338 case 0:
1339 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1340 break;
1341 case 1:
1342 gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1343 break;
1344 case 2:
1345 gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1346 break;
1347 case 3:
1348 gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1349 break;
1350 }
1351 }
1352
1353 static void gen_op_fcmpeq(int fccno, TCGv_i128 r_rs1, TCGv_i128 r_rs2)
1354 {
1355 switch (fccno) {
1356 case 0:
1357 gen_helper_fcmpeq(cpu_fsr, tcg_env, r_rs1, r_rs2);
1358 break;
1359 case 1:
1360 gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1361 break;
1362 case 2:
1363 gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1364 break;
1365 case 3:
1366 gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1367 break;
1368 }
1369 }
1370
1371 #else
1372
1373 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1374 {
1375 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1376 }
1377
1378 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1379 {
1380 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1381 }
1382
1383 static void gen_op_fcmpq(int fccno, TCGv_i128 r_rs1, TCGv_i128 r_rs2)
1384 {
1385 gen_helper_fcmpq(cpu_fsr, tcg_env, r_rs1, r_rs2);
1386 }
1387
1388 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1389 {
1390 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1391 }
1392
1393 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1394 {
1395 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1396 }
1397
1398 static void gen_op_fcmpeq(int fccno, TCGv_i128 r_rs1, TCGv_i128 r_rs2)
1399 {
1400 gen_helper_fcmpeq(cpu_fsr, tcg_env, r_rs1, r_rs2);
1401 }
1402 #endif
1403
1404 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1405 {
1406 /*
1407 * CEXC is only set when succesfully completing an FPop,
1408 * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1409 * Thus we can simply store FTT into this field.
1410 */
1411 tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1412 offsetof(CPUSPARCState, fsr_cexc_ftt));
1413 gen_exception(dc, TT_FP_EXCP);
1414 }
1415
1416 static int gen_trap_ifnofpu(DisasContext *dc)
1417 {
1418 #if !defined(CONFIG_USER_ONLY)
1419 if (!dc->fpu_enabled) {
1420 gen_exception(dc, TT_NFPU_INSN);
1421 return 1;
1422 }
1423 #endif
1424 return 0;
1425 }
1426
1427 /* asi moves */
1428 typedef enum {
1429 GET_ASI_HELPER,
1430 GET_ASI_EXCP,
1431 GET_ASI_DIRECT,
1432 GET_ASI_DTWINX,
1433 GET_ASI_BLOCK,
1434 GET_ASI_SHORT,
1435 GET_ASI_BCOPY,
1436 GET_ASI_BFILL,
1437 } ASIType;
1438
1439 typedef struct {
1440 ASIType type;
1441 int asi;
1442 int mem_idx;
1443 MemOp memop;
1444 } DisasASI;
1445
1446 /*
1447 * Build DisasASI.
1448 * For asi == -1, treat as non-asi.
1449 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1450 */
1451 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1452 {
1453 ASIType type = GET_ASI_HELPER;
1454 int mem_idx = dc->mem_idx;
1455
1456 if (asi == -1) {
1457 /* Artificial "non-asi" case. */
1458 type = GET_ASI_DIRECT;
1459 goto done;
1460 }
1461
1462 #ifndef TARGET_SPARC64
1463 /* Before v9, all asis are immediate and privileged. */
1464 if (asi < 0) {
1465 gen_exception(dc, TT_ILL_INSN);
1466 type = GET_ASI_EXCP;
1467 } else if (supervisor(dc)
1468 /* Note that LEON accepts ASI_USERDATA in user mode, for
1469 use with CASA. Also note that previous versions of
1470 QEMU allowed (and old versions of gcc emitted) ASI_P
1471 for LEON, which is incorrect. */
1472 || (asi == ASI_USERDATA
1473 && (dc->def->features & CPU_FEATURE_CASA))) {
1474 switch (asi) {
1475 case ASI_USERDATA: /* User data access */
1476 mem_idx = MMU_USER_IDX;
1477 type = GET_ASI_DIRECT;
1478 break;
1479 case ASI_KERNELDATA: /* Supervisor data access */
1480 mem_idx = MMU_KERNEL_IDX;
1481 type = GET_ASI_DIRECT;
1482 break;
1483 case ASI_M_BYPASS: /* MMU passthrough */
1484 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1485 mem_idx = MMU_PHYS_IDX;
1486 type = GET_ASI_DIRECT;
1487 break;
1488 case ASI_M_BCOPY: /* Block copy, sta access */
1489 mem_idx = MMU_KERNEL_IDX;
1490 type = GET_ASI_BCOPY;
1491 break;
1492 case ASI_M_BFILL: /* Block fill, stda access */
1493 mem_idx = MMU_KERNEL_IDX;
1494 type = GET_ASI_BFILL;
1495 break;
1496 }
1497
1498 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1499 * permissions check in get_physical_address(..).
1500 */
1501 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1502 } else {
1503 gen_exception(dc, TT_PRIV_INSN);
1504 type = GET_ASI_EXCP;
1505 }
1506 #else
1507 if (asi < 0) {
1508 asi = dc->asi;
1509 }
1510 /* With v9, all asis below 0x80 are privileged. */
1511 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1512 down that bit into DisasContext. For the moment that's ok,
1513 since the direct implementations below doesn't have any ASIs
1514 in the restricted [0x30, 0x7f] range, and the check will be
1515 done properly in the helper. */
1516 if (!supervisor(dc) && asi < 0x80) {
1517 gen_exception(dc, TT_PRIV_ACT);
1518 type = GET_ASI_EXCP;
1519 } else {
1520 switch (asi) {
1521 case ASI_REAL: /* Bypass */
1522 case ASI_REAL_IO: /* Bypass, non-cacheable */
1523 case ASI_REAL_L: /* Bypass LE */
1524 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1525 case ASI_TWINX_REAL: /* Real address, twinx */
1526 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1527 case ASI_QUAD_LDD_PHYS:
1528 case ASI_QUAD_LDD_PHYS_L:
1529 mem_idx = MMU_PHYS_IDX;
1530 break;
1531 case ASI_N: /* Nucleus */
1532 case ASI_NL: /* Nucleus LE */
1533 case ASI_TWINX_N:
1534 case ASI_TWINX_NL:
1535 case ASI_NUCLEUS_QUAD_LDD:
1536 case ASI_NUCLEUS_QUAD_LDD_L:
1537 if (hypervisor(dc)) {
1538 mem_idx = MMU_PHYS_IDX;
1539 } else {
1540 mem_idx = MMU_NUCLEUS_IDX;
1541 }
1542 break;
1543 case ASI_AIUP: /* As if user primary */
1544 case ASI_AIUPL: /* As if user primary LE */
1545 case ASI_TWINX_AIUP:
1546 case ASI_TWINX_AIUP_L:
1547 case ASI_BLK_AIUP_4V:
1548 case ASI_BLK_AIUP_L_4V:
1549 case ASI_BLK_AIUP:
1550 case ASI_BLK_AIUPL:
1551 mem_idx = MMU_USER_IDX;
1552 break;
1553 case ASI_AIUS: /* As if user secondary */
1554 case ASI_AIUSL: /* As if user secondary LE */
1555 case ASI_TWINX_AIUS:
1556 case ASI_TWINX_AIUS_L:
1557 case ASI_BLK_AIUS_4V:
1558 case ASI_BLK_AIUS_L_4V:
1559 case ASI_BLK_AIUS:
1560 case ASI_BLK_AIUSL:
1561 mem_idx = MMU_USER_SECONDARY_IDX;
1562 break;
1563 case ASI_S: /* Secondary */
1564 case ASI_SL: /* Secondary LE */
1565 case ASI_TWINX_S:
1566 case ASI_TWINX_SL:
1567 case ASI_BLK_COMMIT_S:
1568 case ASI_BLK_S:
1569 case ASI_BLK_SL:
1570 case ASI_FL8_S:
1571 case ASI_FL8_SL:
1572 case ASI_FL16_S:
1573 case ASI_FL16_SL:
1574 if (mem_idx == MMU_USER_IDX) {
1575 mem_idx = MMU_USER_SECONDARY_IDX;
1576 } else if (mem_idx == MMU_KERNEL_IDX) {
1577 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1578 }
1579 break;
1580 case ASI_P: /* Primary */
1581 case ASI_PL: /* Primary LE */
1582 case ASI_TWINX_P:
1583 case ASI_TWINX_PL:
1584 case ASI_BLK_COMMIT_P:
1585 case ASI_BLK_P:
1586 case ASI_BLK_PL:
1587 case ASI_FL8_P:
1588 case ASI_FL8_PL:
1589 case ASI_FL16_P:
1590 case ASI_FL16_PL:
1591 break;
1592 }
1593 switch (asi) {
1594 case ASI_REAL:
1595 case ASI_REAL_IO:
1596 case ASI_REAL_L:
1597 case ASI_REAL_IO_L:
1598 case ASI_N:
1599 case ASI_NL:
1600 case ASI_AIUP:
1601 case ASI_AIUPL:
1602 case ASI_AIUS:
1603 case ASI_AIUSL:
1604 case ASI_S:
1605 case ASI_SL:
1606 case ASI_P:
1607 case ASI_PL:
1608 type = GET_ASI_DIRECT;
1609 break;
1610 case ASI_TWINX_REAL:
1611 case ASI_TWINX_REAL_L:
1612 case ASI_TWINX_N:
1613 case ASI_TWINX_NL:
1614 case ASI_TWINX_AIUP:
1615 case ASI_TWINX_AIUP_L:
1616 case ASI_TWINX_AIUS:
1617 case ASI_TWINX_AIUS_L:
1618 case ASI_TWINX_P:
1619 case ASI_TWINX_PL:
1620 case ASI_TWINX_S:
1621 case ASI_TWINX_SL:
1622 case ASI_QUAD_LDD_PHYS:
1623 case ASI_QUAD_LDD_PHYS_L:
1624 case ASI_NUCLEUS_QUAD_LDD:
1625 case ASI_NUCLEUS_QUAD_LDD_L:
1626 type = GET_ASI_DTWINX;
1627 break;
1628 case ASI_BLK_COMMIT_P:
1629 case ASI_BLK_COMMIT_S:
1630 case ASI_BLK_AIUP_4V:
1631 case ASI_BLK_AIUP_L_4V:
1632 case ASI_BLK_AIUP:
1633 case ASI_BLK_AIUPL:
1634 case ASI_BLK_AIUS_4V:
1635 case ASI_BLK_AIUS_L_4V:
1636 case ASI_BLK_AIUS:
1637 case ASI_BLK_AIUSL:
1638 case ASI_BLK_S:
1639 case ASI_BLK_SL:
1640 case ASI_BLK_P:
1641 case ASI_BLK_PL:
1642 type = GET_ASI_BLOCK;
1643 break;
1644 case ASI_FL8_S:
1645 case ASI_FL8_SL:
1646 case ASI_FL8_P:
1647 case ASI_FL8_PL:
1648 memop = MO_UB;
1649 type = GET_ASI_SHORT;
1650 break;
1651 case ASI_FL16_S:
1652 case ASI_FL16_SL:
1653 case ASI_FL16_P:
1654 case ASI_FL16_PL:
1655 memop = MO_TEUW;
1656 type = GET_ASI_SHORT;
1657 break;
1658 }
1659 /* The little-endian asis all have bit 3 set. */
1660 if (asi & 8) {
1661 memop ^= MO_BSWAP;
1662 }
1663 }
1664 #endif
1665
1666 done:
1667 return (DisasASI){ type, asi, mem_idx, memop };
1668 }
1669
1670 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1671 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1672 TCGv_i32 asi, TCGv_i32 mop)
1673 {
1674 g_assert_not_reached();
1675 }
1676
1677 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1678 TCGv_i32 asi, TCGv_i32 mop)
1679 {
1680 g_assert_not_reached();
1681 }
1682 #endif
1683
1684 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1685 {
1686 switch (da->type) {
1687 case GET_ASI_EXCP:
1688 break;
1689 case GET_ASI_DTWINX: /* Reserved for ldda. */
1690 gen_exception(dc, TT_ILL_INSN);
1691 break;
1692 case GET_ASI_DIRECT:
1693 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1694 break;
1695 default:
1696 {
1697 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1698 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1699
1700 save_state(dc);
1701 #ifdef TARGET_SPARC64
1702 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1703 #else
1704 {
1705 TCGv_i64 t64 = tcg_temp_new_i64();
1706 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1707 tcg_gen_trunc_i64_tl(dst, t64);
1708 }
1709 #endif
1710 }
1711 break;
1712 }
1713 }
1714
1715 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1716 {
1717 switch (da->type) {
1718 case GET_ASI_EXCP:
1719 break;
1720
1721 case GET_ASI_DTWINX: /* Reserved for stda. */
1722 if (TARGET_LONG_BITS == 32) {
1723 gen_exception(dc, TT_ILL_INSN);
1724 break;
1725 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1726 /* Pre OpenSPARC CPUs don't have these */
1727 gen_exception(dc, TT_ILL_INSN);
1728 break;
1729 }
1730 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1731 /* fall through */
1732
1733 case GET_ASI_DIRECT:
1734 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1735 break;
1736
1737 case GET_ASI_BCOPY:
1738 assert(TARGET_LONG_BITS == 32);
1739 /*
1740 * Copy 32 bytes from the address in SRC to ADDR.
1741 *
1742 * From Ross RT625 hyperSPARC manual, section 4.6:
1743 * "Block Copy and Block Fill will work only on cache line boundaries."
1744 *
1745 * It does not specify if an unaliged address is truncated or trapped.
1746 * Previous qemu behaviour was to truncate to 4 byte alignment, which
1747 * is obviously wrong. The only place I can see this used is in the
1748 * Linux kernel which begins with page alignment, advancing by 32,
1749 * so is always aligned. Assume truncation as the simpler option.
1750 *
1751 * Since the loads and stores are paired, allow the copy to happen
1752 * in the host endianness. The copy need not be atomic.
1753 */
1754 {
1755 MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1756 TCGv saddr = tcg_temp_new();
1757 TCGv daddr = tcg_temp_new();
1758 TCGv_i128 tmp = tcg_temp_new_i128();
1759
1760 tcg_gen_andi_tl(saddr, src, -32);
1761 tcg_gen_andi_tl(daddr, addr, -32);
1762 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1763 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1764 tcg_gen_addi_tl(saddr, saddr, 16);
1765 tcg_gen_addi_tl(daddr, daddr, 16);
1766 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1767 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1768 }
1769 break;
1770
1771 default:
1772 {
1773 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1774 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1775
1776 save_state(dc);
1777 #ifdef TARGET_SPARC64
1778 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1779 #else
1780 {
1781 TCGv_i64 t64 = tcg_temp_new_i64();
1782 tcg_gen_extu_tl_i64(t64, src);
1783 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1784 }
1785 #endif
1786
1787 /* A write to a TLB register may alter page maps. End the TB. */
1788 dc->npc = DYNAMIC_PC;
1789 }
1790 break;
1791 }
1792 }
1793
1794 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1795 TCGv dst, TCGv src, TCGv addr)
1796 {
1797 switch (da->type) {
1798 case GET_ASI_EXCP:
1799 break;
1800 case GET_ASI_DIRECT:
1801 tcg_gen_atomic_xchg_tl(dst, addr, src,
1802 da->mem_idx, da->memop | MO_ALIGN);
1803 break;
1804 default:
1805 /* ??? Should be DAE_invalid_asi. */
1806 gen_exception(dc, TT_DATA_ACCESS);
1807 break;
1808 }
1809 }
1810
1811 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1812 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1813 {
1814 switch (da->type) {
1815 case GET_ASI_EXCP:
1816 return;
1817 case GET_ASI_DIRECT:
1818 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1819 da->mem_idx, da->memop | MO_ALIGN);
1820 break;
1821 default:
1822 /* ??? Should be DAE_invalid_asi. */
1823 gen_exception(dc, TT_DATA_ACCESS);
1824 break;
1825 }
1826 }
1827
1828 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1829 {
1830 switch (da->type) {
1831 case GET_ASI_EXCP:
1832 break;
1833 case GET_ASI_DIRECT:
1834 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1835 da->mem_idx, MO_UB);
1836 break;
1837 default:
1838 /* ??? In theory, this should be raise DAE_invalid_asi.
1839 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1840 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1841 gen_helper_exit_atomic(tcg_env);
1842 } else {
1843 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1844 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1845 TCGv_i64 s64, t64;
1846
1847 save_state(dc);
1848 t64 = tcg_temp_new_i64();
1849 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1850
1851 s64 = tcg_constant_i64(0xff);
1852 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1853
1854 tcg_gen_trunc_i64_tl(dst, t64);
1855
1856 /* End the TB. */
1857 dc->npc = DYNAMIC_PC;
1858 }
1859 break;
1860 }
1861 }
1862
1863 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1864 TCGv addr, int rd)
1865 {
1866 MemOp memop = da->memop;
1867 MemOp size = memop & MO_SIZE;
1868 TCGv_i32 d32;
1869 TCGv_i64 d64;
1870 TCGv addr_tmp;
1871
1872 /* TODO: Use 128-bit load/store below. */
1873 if (size == MO_128) {
1874 memop = (memop & ~MO_SIZE) | MO_64;
1875 }
1876
1877 switch (da->type) {
1878 case GET_ASI_EXCP:
1879 break;
1880
1881 case GET_ASI_DIRECT:
1882 memop |= MO_ALIGN_4;
1883 switch (size) {
1884 case MO_32:
1885 d32 = tcg_temp_new_i32();
1886 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1887 gen_store_fpr_F(dc, rd, d32);
1888 break;
1889
1890 case MO_64:
1891 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
1892 break;
1893
1894 case MO_128:
1895 d64 = tcg_temp_new_i64();
1896 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1897 addr_tmp = tcg_temp_new();
1898 tcg_gen_addi_tl(addr_tmp, addr, 8);
1899 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1900 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1901 break;
1902 default:
1903 g_assert_not_reached();
1904 }
1905 break;
1906
1907 case GET_ASI_BLOCK:
1908 /* Valid for lddfa on aligned registers only. */
1909 if (orig_size == MO_64 && (rd & 7) == 0) {
1910 /* The first operation checks required alignment. */
1911 addr_tmp = tcg_temp_new();
1912 for (int i = 0; ; ++i) {
1913 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1914 memop | (i == 0 ? MO_ALIGN_64 : 0));
1915 if (i == 7) {
1916 break;
1917 }
1918 tcg_gen_addi_tl(addr_tmp, addr, 8);
1919 addr = addr_tmp;
1920 }
1921 } else {
1922 gen_exception(dc, TT_ILL_INSN);
1923 }
1924 break;
1925
1926 case GET_ASI_SHORT:
1927 /* Valid for lddfa only. */
1928 if (orig_size == MO_64) {
1929 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1930 memop | MO_ALIGN);
1931 } else {
1932 gen_exception(dc, TT_ILL_INSN);
1933 }
1934 break;
1935
1936 default:
1937 {
1938 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1939 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1940
1941 save_state(dc);
1942 /* According to the table in the UA2011 manual, the only
1943 other asis that are valid for ldfa/lddfa/ldqfa are
1944 the NO_FAULT asis. We still need a helper for these,
1945 but we can just use the integer asi helper for them. */
1946 switch (size) {
1947 case MO_32:
1948 d64 = tcg_temp_new_i64();
1949 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1950 d32 = tcg_temp_new_i32();
1951 tcg_gen_extrl_i64_i32(d32, d64);
1952 gen_store_fpr_F(dc, rd, d32);
1953 break;
1954 case MO_64:
1955 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
1956 r_asi, r_mop);
1957 break;
1958 case MO_128:
1959 d64 = tcg_temp_new_i64();
1960 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1961 addr_tmp = tcg_temp_new();
1962 tcg_gen_addi_tl(addr_tmp, addr, 8);
1963 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
1964 r_asi, r_mop);
1965 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1966 break;
1967 default:
1968 g_assert_not_reached();
1969 }
1970 }
1971 break;
1972 }
1973 }
1974
1975 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1976 TCGv addr, int rd)
1977 {
1978 MemOp memop = da->memop;
1979 MemOp size = memop & MO_SIZE;
1980 TCGv_i32 d32;
1981 TCGv addr_tmp;
1982
1983 /* TODO: Use 128-bit load/store below. */
1984 if (size == MO_128) {
1985 memop = (memop & ~MO_SIZE) | MO_64;
1986 }
1987
1988 switch (da->type) {
1989 case GET_ASI_EXCP:
1990 break;
1991
1992 case GET_ASI_DIRECT:
1993 memop |= MO_ALIGN_4;
1994 switch (size) {
1995 case MO_32:
1996 d32 = gen_load_fpr_F(dc, rd);
1997 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
1998 break;
1999 case MO_64:
2000 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2001 memop | MO_ALIGN_4);
2002 break;
2003 case MO_128:
2004 /* Only 4-byte alignment required. However, it is legal for the
2005 cpu to signal the alignment fault, and the OS trap handler is
2006 required to fix it up. Requiring 16-byte alignment here avoids
2007 having to probe the second page before performing the first
2008 write. */
2009 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2010 memop | MO_ALIGN_16);
2011 addr_tmp = tcg_temp_new();
2012 tcg_gen_addi_tl(addr_tmp, addr, 8);
2013 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2014 break;
2015 default:
2016 g_assert_not_reached();
2017 }
2018 break;
2019
2020 case GET_ASI_BLOCK:
2021 /* Valid for stdfa on aligned registers only. */
2022 if (orig_size == MO_64 && (rd & 7) == 0) {
2023 /* The first operation checks required alignment. */
2024 addr_tmp = tcg_temp_new();
2025 for (int i = 0; ; ++i) {
2026 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2027 memop | (i == 0 ? MO_ALIGN_64 : 0));
2028 if (i == 7) {
2029 break;
2030 }
2031 tcg_gen_addi_tl(addr_tmp, addr, 8);
2032 addr = addr_tmp;
2033 }
2034 } else {
2035 gen_exception(dc, TT_ILL_INSN);
2036 }
2037 break;
2038
2039 case GET_ASI_SHORT:
2040 /* Valid for stdfa only. */
2041 if (orig_size == MO_64) {
2042 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2043 memop | MO_ALIGN);
2044 } else {
2045 gen_exception(dc, TT_ILL_INSN);
2046 }
2047 break;
2048
2049 default:
2050 /* According to the table in the UA2011 manual, the only
2051 other asis that are valid for ldfa/lddfa/ldqfa are
2052 the PST* asis, which aren't currently handled. */
2053 gen_exception(dc, TT_ILL_INSN);
2054 break;
2055 }
2056 }
2057
2058 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2059 {
2060 TCGv hi = gen_dest_gpr(dc, rd);
2061 TCGv lo = gen_dest_gpr(dc, rd + 1);
2062
2063 switch (da->type) {
2064 case GET_ASI_EXCP:
2065 return;
2066
2067 case GET_ASI_DTWINX:
2068 #ifdef TARGET_SPARC64
2069 {
2070 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2071 TCGv_i128 t = tcg_temp_new_i128();
2072
2073 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2074 /*
2075 * Note that LE twinx acts as if each 64-bit register result is
2076 * byte swapped. We perform one 128-bit LE load, so must swap
2077 * the order of the writebacks.
2078 */
2079 if ((mop & MO_BSWAP) == MO_TE) {
2080 tcg_gen_extr_i128_i64(lo, hi, t);
2081 } else {
2082 tcg_gen_extr_i128_i64(hi, lo, t);
2083 }
2084 }
2085 break;
2086 #else
2087 g_assert_not_reached();
2088 #endif
2089
2090 case GET_ASI_DIRECT:
2091 {
2092 TCGv_i64 tmp = tcg_temp_new_i64();
2093
2094 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2095
2096 /* Note that LE ldda acts as if each 32-bit register
2097 result is byte swapped. Having just performed one
2098 64-bit bswap, we need now to swap the writebacks. */
2099 if ((da->memop & MO_BSWAP) == MO_TE) {
2100 tcg_gen_extr_i64_tl(lo, hi, tmp);
2101 } else {
2102 tcg_gen_extr_i64_tl(hi, lo, tmp);
2103 }
2104 }
2105 break;
2106
2107 default:
2108 /* ??? In theory we've handled all of the ASIs that are valid
2109 for ldda, and this should raise DAE_invalid_asi. However,
2110 real hardware allows others. This can be seen with e.g.
2111 FreeBSD 10.3 wrt ASI_IC_TAG. */
2112 {
2113 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2114 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2115 TCGv_i64 tmp = tcg_temp_new_i64();
2116
2117 save_state(dc);
2118 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2119
2120 /* See above. */
2121 if ((da->memop & MO_BSWAP) == MO_TE) {
2122 tcg_gen_extr_i64_tl(lo, hi, tmp);
2123 } else {
2124 tcg_gen_extr_i64_tl(hi, lo, tmp);
2125 }
2126 }
2127 break;
2128 }
2129
2130 gen_store_gpr(dc, rd, hi);
2131 gen_store_gpr(dc, rd + 1, lo);
2132 }
2133
2134 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2135 {
2136 TCGv hi = gen_load_gpr(dc, rd);
2137 TCGv lo = gen_load_gpr(dc, rd + 1);
2138
2139 switch (da->type) {
2140 case GET_ASI_EXCP:
2141 break;
2142
2143 case GET_ASI_DTWINX:
2144 #ifdef TARGET_SPARC64
2145 {
2146 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2147 TCGv_i128 t = tcg_temp_new_i128();
2148
2149 /*
2150 * Note that LE twinx acts as if each 64-bit register result is
2151 * byte swapped. We perform one 128-bit LE store, so must swap
2152 * the order of the construction.
2153 */
2154 if ((mop & MO_BSWAP) == MO_TE) {
2155 tcg_gen_concat_i64_i128(t, lo, hi);
2156 } else {
2157 tcg_gen_concat_i64_i128(t, hi, lo);
2158 }
2159 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2160 }
2161 break;
2162 #else
2163 g_assert_not_reached();
2164 #endif
2165
2166 case GET_ASI_DIRECT:
2167 {
2168 TCGv_i64 t64 = tcg_temp_new_i64();
2169
2170 /* Note that LE stda acts as if each 32-bit register result is
2171 byte swapped. We will perform one 64-bit LE store, so now
2172 we must swap the order of the construction. */
2173 if ((da->memop & MO_BSWAP) == MO_TE) {
2174 tcg_gen_concat_tl_i64(t64, lo, hi);
2175 } else {
2176 tcg_gen_concat_tl_i64(t64, hi, lo);
2177 }
2178 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2179 }
2180 break;
2181
2182 case GET_ASI_BFILL:
2183 assert(TARGET_LONG_BITS == 32);
2184 /*
2185 * Store 32 bytes of [rd:rd+1] to ADDR.
2186 * See comments for GET_ASI_COPY above.
2187 */
2188 {
2189 MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2190 TCGv_i64 t8 = tcg_temp_new_i64();
2191 TCGv_i128 t16 = tcg_temp_new_i128();
2192 TCGv daddr = tcg_temp_new();
2193
2194 tcg_gen_concat_tl_i64(t8, lo, hi);
2195 tcg_gen_concat_i64_i128(t16, t8, t8);
2196 tcg_gen_andi_tl(daddr, addr, -32);
2197 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2198 tcg_gen_addi_tl(daddr, daddr, 16);
2199 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2200 }
2201 break;
2202
2203 default:
2204 /* ??? In theory we've handled all of the ASIs that are valid
2205 for stda, and this should raise DAE_invalid_asi. */
2206 {
2207 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2208 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2209 TCGv_i64 t64 = tcg_temp_new_i64();
2210
2211 /* See above. */
2212 if ((da->memop & MO_BSWAP) == MO_TE) {
2213 tcg_gen_concat_tl_i64(t64, lo, hi);
2214 } else {
2215 tcg_gen_concat_tl_i64(t64, hi, lo);
2216 }
2217
2218 save_state(dc);
2219 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2220 }
2221 break;
2222 }
2223 }
2224
2225 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2226 {
2227 #ifdef TARGET_SPARC64
2228 TCGv_i32 c32, zero, dst, s1, s2;
2229 TCGv_i64 c64 = tcg_temp_new_i64();
2230
2231 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2232 or fold the comparison down to 32 bits and use movcond_i32. Choose
2233 the later. */
2234 c32 = tcg_temp_new_i32();
2235 tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2236 tcg_gen_extrl_i64_i32(c32, c64);
2237
2238 s1 = gen_load_fpr_F(dc, rs);
2239 s2 = gen_load_fpr_F(dc, rd);
2240 dst = tcg_temp_new_i32();
2241 zero = tcg_constant_i32(0);
2242
2243 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2244
2245 gen_store_fpr_F(dc, rd, dst);
2246 #else
2247 qemu_build_not_reached();
2248 #endif
2249 }
2250
2251 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2252 {
2253 #ifdef TARGET_SPARC64
2254 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2255 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2256 gen_load_fpr_D(dc, rs),
2257 gen_load_fpr_D(dc, rd));
2258 gen_store_fpr_D(dc, rd, dst);
2259 #else
2260 qemu_build_not_reached();
2261 #endif
2262 }
2263
2264 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2265 {
2266 #ifdef TARGET_SPARC64
2267 int qd = QFPREG(rd);
2268 int qs = QFPREG(rs);
2269 TCGv c2 = tcg_constant_tl(cmp->c2);
2270
2271 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, c2,
2272 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2273 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, c2,
2274 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2275
2276 gen_update_fprs_dirty(dc, qd);
2277 #else
2278 qemu_build_not_reached();
2279 #endif
2280 }
2281
2282 #ifdef TARGET_SPARC64
2283 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2284 {
2285 TCGv_i32 r_tl = tcg_temp_new_i32();
2286
2287 /* load env->tl into r_tl */
2288 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2289
2290 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2291 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2292
2293 /* calculate offset to current trap state from env->ts, reuse r_tl */
2294 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2295 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2296
2297 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2298 {
2299 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2300 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2301 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2302 }
2303 }
2304 #endif
2305
2306 static int extract_dfpreg(DisasContext *dc, int x)
2307 {
2308 return DFPREG(x);
2309 }
2310
2311 static int extract_qfpreg(DisasContext *dc, int x)
2312 {
2313 return QFPREG(x);
2314 }
2315
2316 /* Include the auto-generated decoder. */
2317 #include "decode-insns.c.inc"
2318
2319 #define TRANS(NAME, AVAIL, FUNC, ...) \
2320 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2321 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2322
2323 #define avail_ALL(C) true
2324 #ifdef TARGET_SPARC64
2325 # define avail_32(C) false
2326 # define avail_ASR17(C) false
2327 # define avail_CASA(C) true
2328 # define avail_DIV(C) true
2329 # define avail_MUL(C) true
2330 # define avail_POWERDOWN(C) false
2331 # define avail_64(C) true
2332 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2333 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2334 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2335 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2336 #else
2337 # define avail_32(C) true
2338 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2339 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2340 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2341 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2342 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2343 # define avail_64(C) false
2344 # define avail_GL(C) false
2345 # define avail_HYPV(C) false
2346 # define avail_VIS1(C) false
2347 # define avail_VIS2(C) false
2348 #endif
2349
2350 /* Default case for non jump instructions. */
2351 static bool advance_pc(DisasContext *dc)
2352 {
2353 TCGLabel *l1;
2354
2355 finishing_insn(dc);
2356
2357 if (dc->npc & 3) {
2358 switch (dc->npc) {
2359 case DYNAMIC_PC:
2360 case DYNAMIC_PC_LOOKUP:
2361 dc->pc = dc->npc;
2362 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2363 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2364 break;
2365
2366 case JUMP_PC:
2367 /* we can do a static jump */
2368 l1 = gen_new_label();
2369 tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2370
2371 /* jump not taken */
2372 gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2373
2374 /* jump taken */
2375 gen_set_label(l1);
2376 gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2377
2378 dc->base.is_jmp = DISAS_NORETURN;
2379 break;
2380
2381 default:
2382 g_assert_not_reached();
2383 }
2384 } else {
2385 dc->pc = dc->npc;
2386 dc->npc = dc->npc + 4;
2387 }
2388 return true;
2389 }
2390
2391 /*
2392 * Major opcodes 00 and 01 -- branches, call, and sethi
2393 */
2394
2395 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2396 bool annul, int disp)
2397 {
2398 target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2399 target_ulong npc;
2400
2401 finishing_insn(dc);
2402
2403 if (cmp->cond == TCG_COND_ALWAYS) {
2404 if (annul) {
2405 dc->pc = dest;
2406 dc->npc = dest + 4;
2407 } else {
2408 gen_mov_pc_npc(dc);
2409 dc->npc = dest;
2410 }
2411 return true;
2412 }
2413
2414 if (cmp->cond == TCG_COND_NEVER) {
2415 npc = dc->npc;
2416 if (npc & 3) {
2417 gen_mov_pc_npc(dc);
2418 if (annul) {
2419 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2420 }
2421 tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2422 } else {
2423 dc->pc = npc + (annul ? 4 : 0);
2424 dc->npc = dc->pc + 4;
2425 }
2426 return true;
2427 }
2428
2429 flush_cond(dc);
2430 npc = dc->npc;
2431
2432 if (annul) {
2433 TCGLabel *l1 = gen_new_label();
2434
2435 tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2436 gen_goto_tb(dc, 0, npc, dest);
2437 gen_set_label(l1);
2438 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2439
2440 dc->base.is_jmp = DISAS_NORETURN;
2441 } else {
2442 if (npc & 3) {
2443 switch (npc) {
2444 case DYNAMIC_PC:
2445 case DYNAMIC_PC_LOOKUP:
2446 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2447 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2448 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2449 cmp->c1, tcg_constant_tl(cmp->c2),
2450 tcg_constant_tl(dest), cpu_npc);
2451 dc->pc = npc;
2452 break;
2453 default:
2454 g_assert_not_reached();
2455 }
2456 } else {
2457 dc->pc = npc;
2458 dc->npc = JUMP_PC;
2459 dc->jump = *cmp;
2460 dc->jump_pc[0] = dest;
2461 dc->jump_pc[1] = npc + 4;
2462
2463 /* The condition for cpu_cond is always NE -- normalize. */
2464 if (cmp->cond == TCG_COND_NE) {
2465 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2466 } else {
2467 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2468 }
2469 dc->cpu_cond_live = true;
2470 }
2471 }
2472 return true;
2473 }
2474
2475 static bool raise_priv(DisasContext *dc)
2476 {
2477 gen_exception(dc, TT_PRIV_INSN);
2478 return true;
2479 }
2480
2481 static bool raise_unimpfpop(DisasContext *dc)
2482 {
2483 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2484 return true;
2485 }
2486
2487 static bool gen_trap_float128(DisasContext *dc)
2488 {
2489 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2490 return false;
2491 }
2492 return raise_unimpfpop(dc);
2493 }
2494
2495 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2496 {
2497 DisasCompare cmp;
2498
2499 gen_compare(&cmp, a->cc, a->cond, dc);
2500 return advance_jump_cond(dc, &cmp, a->a, a->i);
2501 }
2502
2503 TRANS(Bicc, ALL, do_bpcc, a)
2504 TRANS(BPcc, 64, do_bpcc, a)
2505
2506 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2507 {
2508 DisasCompare cmp;
2509
2510 if (gen_trap_ifnofpu(dc)) {
2511 return true;
2512 }
2513 gen_fcompare(&cmp, a->cc, a->cond);
2514 return advance_jump_cond(dc, &cmp, a->a, a->i);
2515 }
2516
2517 TRANS(FBPfcc, 64, do_fbpfcc, a)
2518 TRANS(FBfcc, ALL, do_fbpfcc, a)
2519
2520 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2521 {
2522 DisasCompare cmp;
2523
2524 if (!avail_64(dc)) {
2525 return false;
2526 }
2527 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2528 return false;
2529 }
2530 return advance_jump_cond(dc, &cmp, a->a, a->i);
2531 }
2532
2533 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2534 {
2535 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2536
2537 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2538 gen_mov_pc_npc(dc);
2539 dc->npc = target;
2540 return true;
2541 }
2542
2543 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2544 {
2545 /*
2546 * For sparc32, always generate the no-coprocessor exception.
2547 * For sparc64, always generate illegal instruction.
2548 */
2549 #ifdef TARGET_SPARC64
2550 return false;
2551 #else
2552 gen_exception(dc, TT_NCP_INSN);
2553 return true;
2554 #endif
2555 }
2556
2557 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2558 {
2559 /* Special-case %g0 because that's the canonical nop. */
2560 if (a->rd) {
2561 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2562 }
2563 return advance_pc(dc);
2564 }
2565
2566 /*
2567 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2568 */
2569
2570 static bool do_tcc(DisasContext *dc, int cond, int cc,
2571 int rs1, bool imm, int rs2_or_imm)
2572 {
2573 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2574 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2575 DisasCompare cmp;
2576 TCGLabel *lab;
2577 TCGv_i32 trap;
2578
2579 /* Trap never. */
2580 if (cond == 0) {
2581 return advance_pc(dc);
2582 }
2583
2584 /*
2585 * Immediate traps are the most common case. Since this value is
2586 * live across the branch, it really pays to evaluate the constant.
2587 */
2588 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2589 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2590 } else {
2591 trap = tcg_temp_new_i32();
2592 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2593 if (imm) {
2594 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2595 } else {
2596 TCGv_i32 t2 = tcg_temp_new_i32();
2597 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2598 tcg_gen_add_i32(trap, trap, t2);
2599 }
2600 tcg_gen_andi_i32(trap, trap, mask);
2601 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2602 }
2603
2604 finishing_insn(dc);
2605
2606 /* Trap always. */
2607 if (cond == 8) {
2608 save_state(dc);
2609 gen_helper_raise_exception(tcg_env, trap);
2610 dc->base.is_jmp = DISAS_NORETURN;
2611 return true;
2612 }
2613
2614 /* Conditional trap. */
2615 flush_cond(dc);
2616 lab = delay_exceptionv(dc, trap);
2617 gen_compare(&cmp, cc, cond, dc);
2618 tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2619
2620 return advance_pc(dc);
2621 }
2622
2623 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2624 {
2625 if (avail_32(dc) && a->cc) {
2626 return false;
2627 }
2628 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2629 }
2630
2631 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2632 {
2633 if (avail_64(dc)) {
2634 return false;
2635 }
2636 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2637 }
2638
2639 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2640 {
2641 if (avail_32(dc)) {
2642 return false;
2643 }
2644 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2645 }
2646
2647 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2648 {
2649 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2650 return advance_pc(dc);
2651 }
2652
2653 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2654 {
2655 if (avail_32(dc)) {
2656 return false;
2657 }
2658 if (a->mmask) {
2659 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2660 tcg_gen_mb(a->mmask | TCG_BAR_SC);
2661 }
2662 if (a->cmask) {
2663 /* For #Sync, etc, end the TB to recognize interrupts. */
2664 dc->base.is_jmp = DISAS_EXIT;
2665 }
2666 return advance_pc(dc);
2667 }
2668
2669 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2670 TCGv (*func)(DisasContext *, TCGv))
2671 {
2672 if (!priv) {
2673 return raise_priv(dc);
2674 }
2675 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2676 return advance_pc(dc);
2677 }
2678
2679 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2680 {
2681 return cpu_y;
2682 }
2683
2684 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2685 {
2686 /*
2687 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2688 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2689 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2690 */
2691 if (avail_64(dc) && a->rs1 != 0) {
2692 return false;
2693 }
2694 return do_rd_special(dc, true, a->rd, do_rdy);
2695 }
2696
2697 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2698 {
2699 uint32_t val;
2700
2701 /*
2702 * TODO: There are many more fields to be filled,
2703 * some of which are writable.
2704 */
2705 val = dc->def->nwindows - 1; /* [4:0] NWIN */
2706 val |= 1 << 8; /* [8] V8 */
2707
2708 return tcg_constant_tl(val);
2709 }
2710
2711 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2712
2713 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2714 {
2715 gen_helper_rdccr(dst, tcg_env);
2716 return dst;
2717 }
2718
2719 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2720
2721 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2722 {
2723 #ifdef TARGET_SPARC64
2724 return tcg_constant_tl(dc->asi);
2725 #else
2726 qemu_build_not_reached();
2727 #endif
2728 }
2729
2730 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2731
2732 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2733 {
2734 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2735
2736 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2737 if (translator_io_start(&dc->base)) {
2738 dc->base.is_jmp = DISAS_EXIT;
2739 }
2740 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2741 tcg_constant_i32(dc->mem_idx));
2742 return dst;
2743 }
2744
2745 /* TODO: non-priv access only allowed when enabled. */
2746 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2747
2748 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2749 {
2750 return tcg_constant_tl(address_mask_i(dc, dc->pc));
2751 }
2752
2753 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2754
2755 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2756 {
2757 tcg_gen_ext_i32_tl(dst, cpu_fprs);
2758 return dst;
2759 }
2760
2761 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2762
2763 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2764 {
2765 gen_trap_ifnofpu(dc);
2766 return cpu_gsr;
2767 }
2768
2769 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2770
2771 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2772 {
2773 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2774 return dst;
2775 }
2776
2777 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2778
2779 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2780 {
2781 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2782 return dst;
2783 }
2784
2785 /* TODO: non-priv access only allowed when enabled. */
2786 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2787
2788 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2789 {
2790 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2791
2792 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2793 if (translator_io_start(&dc->base)) {
2794 dc->base.is_jmp = DISAS_EXIT;
2795 }
2796 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2797 tcg_constant_i32(dc->mem_idx));
2798 return dst;
2799 }
2800
2801 /* TODO: non-priv access only allowed when enabled. */
2802 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2803
2804 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2805 {
2806 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2807 return dst;
2808 }
2809
2810 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2811 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2812
2813 /*
2814 * UltraSPARC-T1 Strand status.
2815 * HYPV check maybe not enough, UA2005 & UA2007 describe
2816 * this ASR as impl. dep
2817 */
2818 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2819 {
2820 return tcg_constant_tl(1);
2821 }
2822
2823 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2824
2825 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2826 {
2827 gen_helper_rdpsr(dst, tcg_env);
2828 return dst;
2829 }
2830
2831 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2832
2833 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2834 {
2835 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2836 return dst;
2837 }
2838
2839 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2840
2841 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2842 {
2843 TCGv_i32 tl = tcg_temp_new_i32();
2844 TCGv_ptr tp = tcg_temp_new_ptr();
2845
2846 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2847 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2848 tcg_gen_shli_i32(tl, tl, 3);
2849 tcg_gen_ext_i32_ptr(tp, tl);
2850 tcg_gen_add_ptr(tp, tp, tcg_env);
2851
2852 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2853 return dst;
2854 }
2855
2856 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2857
2858 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2859 {
2860 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2861 return dst;
2862 }
2863
2864 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2865
2866 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2867 {
2868 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2869 return dst;
2870 }
2871
2872 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2873
2874 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2875 {
2876 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2877 return dst;
2878 }
2879
2880 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2881
2882 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2883 {
2884 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2885 return dst;
2886 }
2887
2888 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2889 do_rdhstick_cmpr)
2890
2891 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2892 {
2893 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2894 return dst;
2895 }
2896
2897 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2898
2899 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2900 {
2901 #ifdef TARGET_SPARC64
2902 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2903
2904 gen_load_trap_state_at_tl(r_tsptr);
2905 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2906 return dst;
2907 #else
2908 qemu_build_not_reached();
2909 #endif
2910 }
2911
2912 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2913
2914 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2915 {
2916 #ifdef TARGET_SPARC64
2917 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2918
2919 gen_load_trap_state_at_tl(r_tsptr);
2920 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2921 return dst;
2922 #else
2923 qemu_build_not_reached();
2924 #endif
2925 }
2926
2927 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2928
2929 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2930 {
2931 #ifdef TARGET_SPARC64
2932 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2933
2934 gen_load_trap_state_at_tl(r_tsptr);
2935 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2936 return dst;
2937 #else
2938 qemu_build_not_reached();
2939 #endif
2940 }
2941
2942 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2943
2944 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2945 {
2946 #ifdef TARGET_SPARC64
2947 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2948
2949 gen_load_trap_state_at_tl(r_tsptr);
2950 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2951 return dst;
2952 #else
2953 qemu_build_not_reached();
2954 #endif
2955 }
2956
2957 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
2958 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
2959
2960 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
2961 {
2962 return cpu_tbr;
2963 }
2964
2965 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2966 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2967
2968 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
2969 {
2970 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
2971 return dst;
2972 }
2973
2974 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
2975
2976 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
2977 {
2978 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
2979 return dst;
2980 }
2981
2982 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
2983
2984 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
2985 {
2986 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
2987 return dst;
2988 }
2989
2990 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
2991
2992 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
2993 {
2994 gen_helper_rdcwp(dst, tcg_env);
2995 return dst;
2996 }
2997
2998 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
2999
3000 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3001 {
3002 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3003 return dst;
3004 }
3005
3006 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3007
3008 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3009 {
3010 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3011 return dst;
3012 }
3013
3014 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3015 do_rdcanrestore)
3016
3017 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3018 {
3019 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3020 return dst;
3021 }
3022
3023 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3024
3025 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3026 {
3027 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3028 return dst;
3029 }
3030
3031 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3032
3033 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3034 {
3035 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3036 return dst;
3037 }
3038
3039 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3040
3041 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3042 {
3043 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3044 return dst;
3045 }
3046
3047 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3048
3049 /* UA2005 strand status */
3050 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3051 {
3052 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3053 return dst;
3054 }
3055
3056 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3057
3058 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3059 {
3060 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3061 return dst;
3062 }
3063
3064 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3065
3066 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3067 {
3068 if (avail_64(dc)) {
3069 gen_helper_flushw(tcg_env);
3070 return advance_pc(dc);
3071 }
3072 return false;
3073 }
3074
3075 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3076 void (*func)(DisasContext *, TCGv))
3077 {
3078 TCGv src;
3079
3080 /* For simplicity, we under-decoded the rs2 form. */
3081 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3082 return false;
3083 }
3084 if (!priv) {
3085 return raise_priv(dc);
3086 }
3087
3088 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3089 src = tcg_constant_tl(a->rs2_or_imm);
3090 } else {
3091 TCGv src1 = gen_load_gpr(dc, a->rs1);
3092 if (a->rs2_or_imm == 0) {
3093 src = src1;
3094 } else {
3095 src = tcg_temp_new();
3096 if (a->imm) {
3097 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3098 } else {
3099 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3100 }
3101 }
3102 }
3103 func(dc, src);
3104 return advance_pc(dc);
3105 }
3106
3107 static void do_wry(DisasContext *dc, TCGv src)
3108 {
3109 tcg_gen_ext32u_tl(cpu_y, src);
3110 }
3111
3112 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3113
3114 static void do_wrccr(DisasContext *dc, TCGv src)
3115 {
3116 gen_helper_wrccr(tcg_env, src);
3117 }
3118
3119 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3120
3121 static void do_wrasi(DisasContext *dc, TCGv src)
3122 {
3123 TCGv tmp = tcg_temp_new();
3124
3125 tcg_gen_ext8u_tl(tmp, src);
3126 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3127 /* End TB to notice changed ASI. */
3128 dc->base.is_jmp = DISAS_EXIT;
3129 }
3130
3131 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3132
3133 static void do_wrfprs(DisasContext *dc, TCGv src)
3134 {
3135 #ifdef TARGET_SPARC64
3136 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3137 dc->fprs_dirty = 0;
3138 dc->base.is_jmp = DISAS_EXIT;
3139 #else
3140 qemu_build_not_reached();
3141 #endif
3142 }
3143
3144 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3145
3146 static void do_wrgsr(DisasContext *dc, TCGv src)
3147 {
3148 gen_trap_ifnofpu(dc);
3149 tcg_gen_mov_tl(cpu_gsr, src);
3150 }
3151
3152 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3153
3154 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3155 {
3156 gen_helper_set_softint(tcg_env, src);
3157 }
3158
3159 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3160
3161 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3162 {
3163 gen_helper_clear_softint(tcg_env, src);
3164 }
3165
3166 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3167
3168 static void do_wrsoftint(DisasContext *dc, TCGv src)
3169 {
3170 gen_helper_write_softint(tcg_env, src);
3171 }
3172
3173 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3174
3175 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3176 {
3177 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3178
3179 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3180 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3181 translator_io_start(&dc->base);
3182 gen_helper_tick_set_limit(r_tickptr, src);
3183 /* End TB to handle timer interrupt */
3184 dc->base.is_jmp = DISAS_EXIT;
3185 }
3186
3187 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3188
3189 static void do_wrstick(DisasContext *dc, TCGv src)
3190 {
3191 #ifdef TARGET_SPARC64
3192 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3193
3194 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3195 translator_io_start(&dc->base);
3196 gen_helper_tick_set_count(r_tickptr, src);
3197 /* End TB to handle timer interrupt */
3198 dc->base.is_jmp = DISAS_EXIT;
3199 #else
3200 qemu_build_not_reached();
3201 #endif
3202 }
3203
3204 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3205
3206 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3207 {
3208 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3209
3210 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3211 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3212 translator_io_start(&dc->base);
3213 gen_helper_tick_set_limit(r_tickptr, src);
3214 /* End TB to handle timer interrupt */
3215 dc->base.is_jmp = DISAS_EXIT;
3216 }
3217
3218 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3219
3220 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3221 {
3222 finishing_insn(dc);
3223 save_state(dc);
3224 gen_helper_power_down(tcg_env);
3225 }
3226
3227 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3228
3229 static void do_wrpsr(DisasContext *dc, TCGv src)
3230 {
3231 gen_helper_wrpsr(tcg_env, src);
3232 dc->base.is_jmp = DISAS_EXIT;
3233 }
3234
3235 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3236
3237 static void do_wrwim(DisasContext *dc, TCGv src)
3238 {
3239 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3240 TCGv tmp = tcg_temp_new();
3241
3242 tcg_gen_andi_tl(tmp, src, mask);
3243 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3244 }
3245
3246 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3247
3248 static void do_wrtpc(DisasContext *dc, TCGv src)
3249 {
3250 #ifdef TARGET_SPARC64
3251 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3252
3253 gen_load_trap_state_at_tl(r_tsptr);
3254 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3255 #else
3256 qemu_build_not_reached();
3257 #endif
3258 }
3259
3260 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3261
3262 static void do_wrtnpc(DisasContext *dc, TCGv src)
3263 {
3264 #ifdef TARGET_SPARC64
3265 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3266
3267 gen_load_trap_state_at_tl(r_tsptr);
3268 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3269 #else
3270 qemu_build_not_reached();
3271 #endif
3272 }
3273
3274 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3275
3276 static void do_wrtstate(DisasContext *dc, TCGv src)
3277 {
3278 #ifdef TARGET_SPARC64
3279 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3280
3281 gen_load_trap_state_at_tl(r_tsptr);
3282 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3283 #else
3284 qemu_build_not_reached();
3285 #endif
3286 }
3287
3288 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3289
3290 static void do_wrtt(DisasContext *dc, TCGv src)
3291 {
3292 #ifdef TARGET_SPARC64
3293 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3294
3295 gen_load_trap_state_at_tl(r_tsptr);
3296 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3297 #else
3298 qemu_build_not_reached();
3299 #endif
3300 }
3301
3302 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3303
3304 static void do_wrtick(DisasContext *dc, TCGv src)
3305 {
3306 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3307
3308 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3309 translator_io_start(&dc->base);
3310 gen_helper_tick_set_count(r_tickptr, src);
3311 /* End TB to handle timer interrupt */
3312 dc->base.is_jmp = DISAS_EXIT;
3313 }
3314
3315 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3316
3317 static void do_wrtba(DisasContext *dc, TCGv src)
3318 {
3319 tcg_gen_mov_tl(cpu_tbr, src);
3320 }
3321
3322 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3323
3324 static void do_wrpstate(DisasContext *dc, TCGv src)
3325 {
3326 save_state(dc);
3327 if (translator_io_start(&dc->base)) {
3328 dc->base.is_jmp = DISAS_EXIT;
3329 }
3330 gen_helper_wrpstate(tcg_env, src);
3331 dc->npc = DYNAMIC_PC;
3332 }
3333
3334 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3335
3336 static void do_wrtl(DisasContext *dc, TCGv src)
3337 {
3338 save_state(dc);
3339 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3340 dc->npc = DYNAMIC_PC;
3341 }
3342
3343 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3344
3345 static void do_wrpil(DisasContext *dc, TCGv src)
3346 {
3347 if (translator_io_start(&dc->base)) {
3348 dc->base.is_jmp = DISAS_EXIT;
3349 }
3350 gen_helper_wrpil(tcg_env, src);
3351 }
3352
3353 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3354
3355 static void do_wrcwp(DisasContext *dc, TCGv src)
3356 {
3357 gen_helper_wrcwp(tcg_env, src);
3358 }
3359
3360 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3361
3362 static void do_wrcansave(DisasContext *dc, TCGv src)
3363 {
3364 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3365 }
3366
3367 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3368
3369 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3370 {
3371 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3372 }
3373
3374 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3375
3376 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3377 {
3378 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3379 }
3380
3381 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3382
3383 static void do_wrotherwin(DisasContext *dc, TCGv src)
3384 {
3385 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3386 }
3387
3388 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3389
3390 static void do_wrwstate(DisasContext *dc, TCGv src)
3391 {
3392 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3393 }
3394
3395 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3396
3397 static void do_wrgl(DisasContext *dc, TCGv src)
3398 {
3399 gen_helper_wrgl(tcg_env, src);
3400 }
3401
3402 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3403
3404 /* UA2005 strand status */
3405 static void do_wrssr(DisasContext *dc, TCGv src)
3406 {
3407 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3408 }
3409
3410 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3411
3412 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3413
3414 static void do_wrhpstate(DisasContext *dc, TCGv src)
3415 {
3416 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3417 dc->base.is_jmp = DISAS_EXIT;
3418 }
3419
3420 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3421
3422 static void do_wrhtstate(DisasContext *dc, TCGv src)
3423 {
3424 TCGv_i32 tl = tcg_temp_new_i32();
3425 TCGv_ptr tp = tcg_temp_new_ptr();
3426
3427 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3428 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3429 tcg_gen_shli_i32(tl, tl, 3);
3430 tcg_gen_ext_i32_ptr(tp, tl);
3431 tcg_gen_add_ptr(tp, tp, tcg_env);
3432
3433 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3434 }
3435
3436 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3437
3438 static void do_wrhintp(DisasContext *dc, TCGv src)
3439 {
3440 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3441 }
3442
3443 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3444
3445 static void do_wrhtba(DisasContext *dc, TCGv src)
3446 {
3447 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3448 }
3449
3450 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3451
3452 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3453 {
3454 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3455
3456 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3457 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3458 translator_io_start(&dc->base);
3459 gen_helper_tick_set_limit(r_tickptr, src);
3460 /* End TB to handle timer interrupt */
3461 dc->base.is_jmp = DISAS_EXIT;
3462 }
3463
3464 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3465 do_wrhstick_cmpr)
3466
3467 static bool do_saved_restored(DisasContext *dc, bool saved)
3468 {
3469 if (!supervisor(dc)) {
3470 return raise_priv(dc);
3471 }
3472 if (saved) {
3473 gen_helper_saved(tcg_env);
3474 } else {
3475 gen_helper_restored(tcg_env);
3476 }
3477 return advance_pc(dc);
3478 }
3479
3480 TRANS(SAVED, 64, do_saved_restored, true)
3481 TRANS(RESTORED, 64, do_saved_restored, false)
3482
3483 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3484 {
3485 return advance_pc(dc);
3486 }
3487
3488 /*
3489 * TODO: Need a feature bit for sparcv8.
3490 * In the meantime, treat all 32-bit cpus like sparcv7.
3491 */
3492 TRANS(NOP_v7, 32, trans_NOP, a)
3493 TRANS(NOP_v9, 64, trans_NOP, a)
3494
3495 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3496 void (*func)(TCGv, TCGv, TCGv),
3497 void (*funci)(TCGv, TCGv, target_long),
3498 bool logic_cc)
3499 {
3500 TCGv dst, src1;
3501
3502 /* For simplicity, we under-decoded the rs2 form. */
3503 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3504 return false;
3505 }
3506
3507 if (logic_cc) {
3508 dst = cpu_cc_N;
3509 } else {
3510 dst = gen_dest_gpr(dc, a->rd);
3511 }
3512 src1 = gen_load_gpr(dc, a->rs1);
3513
3514 if (a->imm || a->rs2_or_imm == 0) {
3515 if (funci) {
3516 funci(dst, src1, a->rs2_or_imm);
3517 } else {
3518 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3519 }
3520 } else {
3521 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3522 }
3523
3524 if (logic_cc) {
3525 if (TARGET_LONG_BITS == 64) {
3526 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3527 tcg_gen_movi_tl(cpu_icc_C, 0);
3528 }
3529 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3530 tcg_gen_movi_tl(cpu_cc_C, 0);
3531 tcg_gen_movi_tl(cpu_cc_V, 0);
3532 }
3533
3534 gen_store_gpr(dc, a->rd, dst);
3535 return advance_pc(dc);
3536 }
3537
3538 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3539 void (*func)(TCGv, TCGv, TCGv),
3540 void (*funci)(TCGv, TCGv, target_long),
3541 void (*func_cc)(TCGv, TCGv, TCGv))
3542 {
3543 if (a->cc) {
3544 return do_arith_int(dc, a, func_cc, NULL, false);
3545 }
3546 return do_arith_int(dc, a, func, funci, false);
3547 }
3548
3549 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3550 void (*func)(TCGv, TCGv, TCGv),
3551 void (*funci)(TCGv, TCGv, target_long))
3552 {
3553 return do_arith_int(dc, a, func, funci, a->cc);
3554 }
3555
3556 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3557 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3558 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3559 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3560
3561 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3562 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3563 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3564 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3565
3566 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3567 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3568 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3569 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3570 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3571
3572 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3573 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3574 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3575 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3576
3577 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3578 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3579
3580 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3581 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3582
3583 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3584 {
3585 /* OR with %g0 is the canonical alias for MOV. */
3586 if (!a->cc && a->rs1 == 0) {
3587 if (a->imm || a->rs2_or_imm == 0) {
3588 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3589 } else if (a->rs2_or_imm & ~0x1f) {
3590 /* For simplicity, we under-decoded the rs2 form. */
3591 return false;
3592 } else {
3593 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3594 }
3595 return advance_pc(dc);
3596 }
3597 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3598 }
3599
3600 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3601 {
3602 TCGv_i64 t1, t2;
3603 TCGv dst;
3604
3605 if (!avail_DIV(dc)) {
3606 return false;
3607 }
3608 /* For simplicity, we under-decoded the rs2 form. */
3609 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3610 return false;
3611 }
3612
3613 if (unlikely(a->rs2_or_imm == 0)) {
3614 gen_exception(dc, TT_DIV_ZERO);
3615 return true;
3616 }
3617
3618 if (a->imm) {
3619 t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3620 } else {
3621 TCGLabel *lab;
3622 TCGv_i32 n2;
3623
3624 finishing_insn(dc);
3625 flush_cond(dc);
3626
3627 n2 = tcg_temp_new_i32();
3628 tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3629
3630 lab = delay_exception(dc, TT_DIV_ZERO);
3631 tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3632
3633 t2 = tcg_temp_new_i64();
3634 #ifdef TARGET_SPARC64
3635 tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3636 #else
3637 tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3638 #endif
3639 }
3640
3641 t1 = tcg_temp_new_i64();
3642 tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3643
3644 tcg_gen_divu_i64(t1, t1, t2);
3645 tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3646
3647 dst = gen_dest_gpr(dc, a->rd);
3648 tcg_gen_trunc_i64_tl(dst, t1);
3649 gen_store_gpr(dc, a->rd, dst);
3650 return advance_pc(dc);
3651 }
3652
3653 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3654 {
3655 TCGv dst, src1, src2;
3656
3657 if (!avail_64(dc)) {
3658 return false;
3659 }
3660 /* For simplicity, we under-decoded the rs2 form. */
3661 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3662 return false;
3663 }
3664
3665 if (unlikely(a->rs2_or_imm == 0)) {
3666 gen_exception(dc, TT_DIV_ZERO);
3667 return true;
3668 }
3669
3670 if (a->imm) {
3671 src2 = tcg_constant_tl(a->rs2_or_imm);
3672 } else {
3673 TCGLabel *lab;
3674
3675 finishing_insn(dc);
3676 flush_cond(dc);
3677
3678 lab = delay_exception(dc, TT_DIV_ZERO);
3679 src2 = cpu_regs[a->rs2_or_imm];
3680 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3681 }
3682
3683 dst = gen_dest_gpr(dc, a->rd);
3684 src1 = gen_load_gpr(dc, a->rs1);
3685
3686 tcg_gen_divu_tl(dst, src1, src2);
3687 gen_store_gpr(dc, a->rd, dst);
3688 return advance_pc(dc);
3689 }
3690
3691 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3692 {
3693 TCGv dst, src1, src2;
3694
3695 if (!avail_64(dc)) {
3696 return false;
3697 }
3698 /* For simplicity, we under-decoded the rs2 form. */
3699 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3700 return false;
3701 }
3702
3703 if (unlikely(a->rs2_or_imm == 0)) {
3704 gen_exception(dc, TT_DIV_ZERO);
3705 return true;
3706 }
3707
3708 dst = gen_dest_gpr(dc, a->rd);
3709 src1 = gen_load_gpr(dc, a->rs1);
3710
3711 if (a->imm) {
3712 if (unlikely(a->rs2_or_imm == -1)) {
3713 tcg_gen_neg_tl(dst, src1);
3714 gen_store_gpr(dc, a->rd, dst);
3715 return advance_pc(dc);
3716 }
3717 src2 = tcg_constant_tl(a->rs2_or_imm);
3718 } else {
3719 TCGLabel *lab;
3720 TCGv t1, t2;
3721
3722 finishing_insn(dc);
3723 flush_cond(dc);
3724
3725 lab = delay_exception(dc, TT_DIV_ZERO);
3726 src2 = cpu_regs[a->rs2_or_imm];
3727 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3728
3729 /*
3730 * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3731 * Set SRC2 to 1 as a new divisor, to produce the correct result.
3732 */
3733 t1 = tcg_temp_new();
3734 t2 = tcg_temp_new();
3735 tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3736 tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3737 tcg_gen_and_tl(t1, t1, t2);
3738 tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3739 tcg_constant_tl(1), src2);
3740 src2 = t1;
3741 }
3742
3743 tcg_gen_div_tl(dst, src1, src2);
3744 gen_store_gpr(dc, a->rd, dst);
3745 return advance_pc(dc);
3746 }
3747
3748 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3749 int width, bool cc, bool left)
3750 {
3751 TCGv dst, s1, s2, lo1, lo2;
3752 uint64_t amask, tabl, tabr;
3753 int shift, imask, omask;
3754
3755 dst = gen_dest_gpr(dc, a->rd);
3756 s1 = gen_load_gpr(dc, a->rs1);
3757 s2 = gen_load_gpr(dc, a->rs2);
3758
3759 if (cc) {
3760 gen_op_subcc(cpu_cc_N, s1, s2);
3761 }
3762
3763 /*
3764 * Theory of operation: there are two tables, left and right (not to
3765 * be confused with the left and right versions of the opcode). These
3766 * are indexed by the low 3 bits of the inputs. To make things "easy",
3767 * these tables are loaded into two constants, TABL and TABR below.
3768 * The operation index = (input & imask) << shift calculates the index
3769 * into the constant, while val = (table >> index) & omask calculates
3770 * the value we're looking for.
3771 */
3772 switch (width) {
3773 case 8:
3774 imask = 0x7;
3775 shift = 3;
3776 omask = 0xff;
3777 if (left) {
3778 tabl = 0x80c0e0f0f8fcfeffULL;
3779 tabr = 0xff7f3f1f0f070301ULL;
3780 } else {
3781 tabl = 0x0103070f1f3f7fffULL;
3782 tabr = 0xfffefcf8f0e0c080ULL;
3783 }
3784 break;
3785 case 16:
3786 imask = 0x6;
3787 shift = 1;
3788 omask = 0xf;
3789 if (left) {
3790 tabl = 0x8cef;
3791 tabr = 0xf731;
3792 } else {
3793 tabl = 0x137f;
3794 tabr = 0xfec8;
3795 }
3796 break;
3797 case 32:
3798 imask = 0x4;
3799 shift = 0;
3800 omask = 0x3;
3801 if (left) {
3802 tabl = (2 << 2) | 3;
3803 tabr = (3 << 2) | 1;
3804 } else {
3805 tabl = (1 << 2) | 3;
3806 tabr = (3 << 2) | 2;
3807 }
3808 break;
3809 default:
3810 abort();
3811 }
3812
3813 lo1 = tcg_temp_new();
3814 lo2 = tcg_temp_new();
3815 tcg_gen_andi_tl(lo1, s1, imask);
3816 tcg_gen_andi_tl(lo2, s2, imask);
3817 tcg_gen_shli_tl(lo1, lo1, shift);
3818 tcg_gen_shli_tl(lo2, lo2, shift);
3819
3820 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3821 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3822 tcg_gen_andi_tl(lo1, lo1, omask);
3823 tcg_gen_andi_tl(lo2, lo2, omask);
3824
3825 amask = address_mask_i(dc, -8);
3826 tcg_gen_andi_tl(s1, s1, amask);
3827 tcg_gen_andi_tl(s2, s2, amask);
3828
3829 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3830 tcg_gen_and_tl(lo2, lo2, lo1);
3831 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3832
3833 gen_store_gpr(dc, a->rd, dst);
3834 return advance_pc(dc);
3835 }
3836
3837 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3838 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3839 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3840 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3841 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3842 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3843
3844 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3845 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3846 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3847 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3848 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3849 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3850
3851 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3852 void (*func)(TCGv, TCGv, TCGv))
3853 {
3854 TCGv dst = gen_dest_gpr(dc, a->rd);
3855 TCGv src1 = gen_load_gpr(dc, a->rs1);
3856 TCGv src2 = gen_load_gpr(dc, a->rs2);
3857
3858 func(dst, src1, src2);
3859 gen_store_gpr(dc, a->rd, dst);
3860 return advance_pc(dc);
3861 }
3862
3863 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3864 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3865 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3866
3867 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3868 {
3869 #ifdef TARGET_SPARC64
3870 TCGv tmp = tcg_temp_new();
3871
3872 tcg_gen_add_tl(tmp, s1, s2);
3873 tcg_gen_andi_tl(dst, tmp, -8);
3874 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3875 #else
3876 g_assert_not_reached();
3877 #endif
3878 }
3879
3880 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3881 {
3882 #ifdef TARGET_SPARC64
3883 TCGv tmp = tcg_temp_new();
3884
3885 tcg_gen_add_tl(tmp, s1, s2);
3886 tcg_gen_andi_tl(dst, tmp, -8);
3887 tcg_gen_neg_tl(tmp, tmp);
3888 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3889 #else
3890 g_assert_not_reached();
3891 #endif
3892 }
3893
3894 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3895 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3896
3897 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3898 {
3899 #ifdef TARGET_SPARC64
3900 tcg_gen_add_tl(dst, s1, s2);
3901 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3902 #else
3903 g_assert_not_reached();
3904 #endif
3905 }
3906
3907 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3908
3909 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3910 {
3911 TCGv dst, src1, src2;
3912
3913 /* Reject 64-bit shifts for sparc32. */
3914 if (avail_32(dc) && a->x) {
3915 return false;
3916 }
3917
3918 src2 = tcg_temp_new();
3919 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3920 src1 = gen_load_gpr(dc, a->rs1);
3921 dst = gen_dest_gpr(dc, a->rd);
3922
3923 if (l) {
3924 tcg_gen_shl_tl(dst, src1, src2);
3925 if (!a->x) {
3926 tcg_gen_ext32u_tl(dst, dst);
3927 }
3928 } else if (u) {
3929 if (!a->x) {
3930 tcg_gen_ext32u_tl(dst, src1);
3931 src1 = dst;
3932 }
3933 tcg_gen_shr_tl(dst, src1, src2);
3934 } else {
3935 if (!a->x) {
3936 tcg_gen_ext32s_tl(dst, src1);
3937 src1 = dst;
3938 }
3939 tcg_gen_sar_tl(dst, src1, src2);
3940 }
3941 gen_store_gpr(dc, a->rd, dst);
3942 return advance_pc(dc);
3943 }
3944
3945 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3946 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3947 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3948
3949 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3950 {
3951 TCGv dst, src1;
3952
3953 /* Reject 64-bit shifts for sparc32. */
3954 if (avail_32(dc) && (a->x || a->i >= 32)) {
3955 return false;
3956 }
3957
3958 src1 = gen_load_gpr(dc, a->rs1);
3959 dst = gen_dest_gpr(dc, a->rd);
3960
3961 if (avail_32(dc) || a->x) {
3962 if (l) {
3963 tcg_gen_shli_tl(dst, src1, a->i);
3964 } else if (u) {
3965 tcg_gen_shri_tl(dst, src1, a->i);
3966 } else {
3967 tcg_gen_sari_tl(dst, src1, a->i);
3968 }
3969 } else {
3970 if (l) {
3971 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
3972 } else if (u) {
3973 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
3974 } else {
3975 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
3976 }
3977 }
3978 gen_store_gpr(dc, a->rd, dst);
3979 return advance_pc(dc);
3980 }
3981
3982 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
3983 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
3984 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
3985
3986 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
3987 {
3988 /* For simplicity, we under-decoded the rs2 form. */
3989 if (!imm && rs2_or_imm & ~0x1f) {
3990 return NULL;
3991 }
3992 if (imm || rs2_or_imm == 0) {
3993 return tcg_constant_tl(rs2_or_imm);
3994 } else {
3995 return cpu_regs[rs2_or_imm];
3996 }
3997 }
3998
3999 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4000 {
4001 TCGv dst = gen_load_gpr(dc, rd);
4002 TCGv c2 = tcg_constant_tl(cmp->c2);
4003
4004 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4005 gen_store_gpr(dc, rd, dst);
4006 return advance_pc(dc);
4007 }
4008
4009 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4010 {
4011 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4012 DisasCompare cmp;
4013
4014 if (src2 == NULL) {
4015 return false;
4016 }
4017 gen_compare(&cmp, a->cc, a->cond, dc);
4018 return do_mov_cond(dc, &cmp, a->rd, src2);
4019 }
4020
4021 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4022 {
4023 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4024 DisasCompare cmp;
4025
4026 if (src2 == NULL) {
4027 return false;
4028 }
4029 gen_fcompare(&cmp, a->cc, a->cond);
4030 return do_mov_cond(dc, &cmp, a->rd, src2);
4031 }
4032
4033 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4034 {
4035 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4036 DisasCompare cmp;
4037
4038 if (src2 == NULL) {
4039 return false;
4040 }
4041 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4042 return false;
4043 }
4044 return do_mov_cond(dc, &cmp, a->rd, src2);
4045 }
4046
4047 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4048 bool (*func)(DisasContext *dc, int rd, TCGv src))
4049 {
4050 TCGv src1, sum;
4051
4052 /* For simplicity, we under-decoded the rs2 form. */
4053 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4054 return false;
4055 }
4056
4057 /*
4058 * Always load the sum into a new temporary.
4059 * This is required to capture the value across a window change,
4060 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4061 */
4062 sum = tcg_temp_new();
4063 src1 = gen_load_gpr(dc, a->rs1);
4064 if (a->imm || a->rs2_or_imm == 0) {
4065 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4066 } else {
4067 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4068 }
4069 return func(dc, a->rd, sum);
4070 }
4071
4072 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4073 {
4074 /*
4075 * Preserve pc across advance, so that we can delay
4076 * the writeback to rd until after src is consumed.
4077 */
4078 target_ulong cur_pc = dc->pc;
4079
4080 gen_check_align(dc, src, 3);
4081
4082 gen_mov_pc_npc(dc);
4083 tcg_gen_mov_tl(cpu_npc, src);
4084 gen_address_mask(dc, cpu_npc);
4085 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4086
4087 dc->npc = DYNAMIC_PC_LOOKUP;
4088 return true;
4089 }
4090
4091 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4092
4093 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4094 {
4095 if (!supervisor(dc)) {
4096 return raise_priv(dc);
4097 }
4098
4099 gen_check_align(dc, src, 3);
4100
4101 gen_mov_pc_npc(dc);
4102 tcg_gen_mov_tl(cpu_npc, src);
4103 gen_helper_rett(tcg_env);
4104
4105 dc->npc = DYNAMIC_PC;
4106 return true;
4107 }
4108
4109 TRANS(RETT, 32, do_add_special, a, do_rett)
4110
4111 static bool do_return(DisasContext *dc, int rd, TCGv src)
4112 {
4113 gen_check_align(dc, src, 3);
4114 gen_helper_restore(tcg_env);
4115
4116 gen_mov_pc_npc(dc);
4117 tcg_gen_mov_tl(cpu_npc, src);
4118 gen_address_mask(dc, cpu_npc);
4119
4120 dc->npc = DYNAMIC_PC_LOOKUP;
4121 return true;
4122 }
4123
4124 TRANS(RETURN, 64, do_add_special, a, do_return)
4125
4126 static bool do_save(DisasContext *dc, int rd, TCGv src)
4127 {
4128 gen_helper_save(tcg_env);
4129 gen_store_gpr(dc, rd, src);
4130 return advance_pc(dc);
4131 }
4132
4133 TRANS(SAVE, ALL, do_add_special, a, do_save)
4134
4135 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4136 {
4137 gen_helper_restore(tcg_env);
4138 gen_store_gpr(dc, rd, src);
4139 return advance_pc(dc);
4140 }
4141
4142 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4143
4144 static bool do_done_retry(DisasContext *dc, bool done)
4145 {
4146 if (!supervisor(dc)) {
4147 return raise_priv(dc);
4148 }
4149 dc->npc = DYNAMIC_PC;
4150 dc->pc = DYNAMIC_PC;
4151 translator_io_start(&dc->base);
4152 if (done) {
4153 gen_helper_done(tcg_env);
4154 } else {
4155 gen_helper_retry(tcg_env);
4156 }
4157 return true;
4158 }
4159
4160 TRANS(DONE, 64, do_done_retry, true)
4161 TRANS(RETRY, 64, do_done_retry, false)
4162
4163 /*
4164 * Major opcode 11 -- load and store instructions
4165 */
4166
4167 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4168 {
4169 TCGv addr, tmp = NULL;
4170
4171 /* For simplicity, we under-decoded the rs2 form. */
4172 if (!imm && rs2_or_imm & ~0x1f) {
4173 return NULL;
4174 }
4175
4176 addr = gen_load_gpr(dc, rs1);
4177 if (rs2_or_imm) {
4178 tmp = tcg_temp_new();
4179 if (imm) {
4180 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4181 } else {
4182 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4183 }
4184 addr = tmp;
4185 }
4186 if (AM_CHECK(dc)) {
4187 if (!tmp) {
4188 tmp = tcg_temp_new();
4189 }
4190 tcg_gen_ext32u_tl(tmp, addr);
4191 addr = tmp;
4192 }
4193 return addr;
4194 }
4195
4196 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4197 {
4198 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4199 DisasASI da;
4200
4201 if (addr == NULL) {
4202 return false;
4203 }
4204 da = resolve_asi(dc, a->asi, mop);
4205
4206 reg = gen_dest_gpr(dc, a->rd);
4207 gen_ld_asi(dc, &da, reg, addr);
4208 gen_store_gpr(dc, a->rd, reg);
4209 return advance_pc(dc);
4210 }
4211
4212 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4213 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4214 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4215 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4216 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4217 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4218 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4219
4220 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4221 {
4222 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4223 DisasASI da;
4224
4225 if (addr == NULL) {
4226 return false;
4227 }
4228 da = resolve_asi(dc, a->asi, mop);
4229
4230 reg = gen_load_gpr(dc, a->rd);
4231 gen_st_asi(dc, &da, reg, addr);
4232 return advance_pc(dc);
4233 }
4234
4235 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4236 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4237 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4238 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4239
4240 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4241 {
4242 TCGv addr;
4243 DisasASI da;
4244
4245 if (a->rd & 1) {
4246 return false;
4247 }
4248 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4249 if (addr == NULL) {
4250 return false;
4251 }
4252 da = resolve_asi(dc, a->asi, MO_TEUQ);
4253 gen_ldda_asi(dc, &da, addr, a->rd);
4254 return advance_pc(dc);
4255 }
4256
4257 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4258 {
4259 TCGv addr;
4260 DisasASI da;
4261
4262 if (a->rd & 1) {
4263 return false;
4264 }
4265 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4266 if (addr == NULL) {
4267 return false;
4268 }
4269 da = resolve_asi(dc, a->asi, MO_TEUQ);
4270 gen_stda_asi(dc, &da, addr, a->rd);
4271 return advance_pc(dc);
4272 }
4273
4274 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4275 {
4276 TCGv addr, reg;
4277 DisasASI da;
4278
4279 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4280 if (addr == NULL) {
4281 return false;
4282 }
4283 da = resolve_asi(dc, a->asi, MO_UB);
4284
4285 reg = gen_dest_gpr(dc, a->rd);
4286 gen_ldstub_asi(dc, &da, reg, addr);
4287 gen_store_gpr(dc, a->rd, reg);
4288 return advance_pc(dc);
4289 }
4290
4291 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4292 {
4293 TCGv addr, dst, src;
4294 DisasASI da;
4295
4296 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4297 if (addr == NULL) {
4298 return false;
4299 }
4300 da = resolve_asi(dc, a->asi, MO_TEUL);
4301
4302 dst = gen_dest_gpr(dc, a->rd);
4303 src = gen_load_gpr(dc, a->rd);
4304 gen_swap_asi(dc, &da, dst, src, addr);
4305 gen_store_gpr(dc, a->rd, dst);
4306 return advance_pc(dc);
4307 }
4308
4309 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4310 {
4311 TCGv addr, o, n, c;
4312 DisasASI da;
4313
4314 addr = gen_ldst_addr(dc, a->rs1, true, 0);
4315 if (addr == NULL) {
4316 return false;
4317 }
4318 da = resolve_asi(dc, a->asi, mop);
4319
4320 o = gen_dest_gpr(dc, a->rd);
4321 n = gen_load_gpr(dc, a->rd);
4322 c = gen_load_gpr(dc, a->rs2_or_imm);
4323 gen_cas_asi(dc, &da, o, n, c, addr);
4324 gen_store_gpr(dc, a->rd, o);
4325 return advance_pc(dc);
4326 }
4327
4328 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4329 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4330
4331 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4332 {
4333 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4334 DisasASI da;
4335
4336 if (addr == NULL) {
4337 return false;
4338 }
4339 if (gen_trap_ifnofpu(dc)) {
4340 return true;
4341 }
4342 if (sz == MO_128 && gen_trap_float128(dc)) {
4343 return true;
4344 }
4345 da = resolve_asi(dc, a->asi, MO_TE | sz);
4346 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4347 gen_update_fprs_dirty(dc, a->rd);
4348 return advance_pc(dc);
4349 }
4350
4351 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4352 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4353 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4354
4355 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4356 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4357 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4358
4359 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4360 {
4361 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4362 DisasASI da;
4363
4364 if (addr == NULL) {
4365 return false;
4366 }
4367 if (gen_trap_ifnofpu(dc)) {
4368 return true;
4369 }
4370 if (sz == MO_128 && gen_trap_float128(dc)) {
4371 return true;
4372 }
4373 da = resolve_asi(dc, a->asi, MO_TE | sz);
4374 gen_stf_asi(dc, &da, sz, addr, a->rd);
4375 return advance_pc(dc);
4376 }
4377
4378 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4379 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4380 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4381
4382 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4383 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4384 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4385
4386 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4387 {
4388 if (!avail_32(dc)) {
4389 return false;
4390 }
4391 if (!supervisor(dc)) {
4392 return raise_priv(dc);
4393 }
4394 if (gen_trap_ifnofpu(dc)) {
4395 return true;
4396 }
4397 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4398 return true;
4399 }
4400
4401 static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
4402 target_ulong new_mask, target_ulong old_mask)
4403 {
4404 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4405 TCGv tnew, told;
4406
4407 if (addr == NULL) {
4408 return false;
4409 }
4410 if (gen_trap_ifnofpu(dc)) {
4411 return true;
4412 }
4413 tnew = tcg_temp_new();
4414 told = tcg_temp_new();
4415 tcg_gen_qemu_ld_tl(tnew, addr, dc->mem_idx, mop | MO_ALIGN);
4416 tcg_gen_andi_tl(tnew, tnew, new_mask);
4417 tcg_gen_andi_tl(told, cpu_fsr, old_mask);
4418 tcg_gen_or_tl(tnew, tnew, told);
4419 gen_helper_set_fsr_noftt(tcg_env, tnew);
4420 return advance_pc(dc);
4421 }
4422
4423 TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
4424 TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
4425
4426 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4427 {
4428 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4429 TCGv fsr;
4430
4431 if (addr == NULL) {
4432 return false;
4433 }
4434 if (gen_trap_ifnofpu(dc)) {
4435 return true;
4436 }
4437
4438 fsr = tcg_temp_new();
4439 gen_helper_get_fsr(fsr, tcg_env);
4440 tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4441 return advance_pc(dc);
4442 }
4443
4444 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4445 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4446
4447 static bool do_fc(DisasContext *dc, int rd, bool c)
4448 {
4449 uint64_t mask;
4450
4451 if (gen_trap_ifnofpu(dc)) {
4452 return true;
4453 }
4454
4455 if (rd & 1) {
4456 mask = MAKE_64BIT_MASK(0, 32);
4457 } else {
4458 mask = MAKE_64BIT_MASK(32, 32);
4459 }
4460 if (c) {
4461 tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4462 } else {
4463 tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4464 }
4465 gen_update_fprs_dirty(dc, rd);
4466 return advance_pc(dc);
4467 }
4468
4469 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4470 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4471
4472 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4473 {
4474 if (gen_trap_ifnofpu(dc)) {
4475 return true;
4476 }
4477
4478 tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4479 gen_update_fprs_dirty(dc, rd);
4480 return advance_pc(dc);
4481 }
4482
4483 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4484 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4485
4486 static bool do_ff(DisasContext *dc, arg_r_r *a,
4487 void (*func)(TCGv_i32, TCGv_i32))
4488 {
4489 TCGv_i32 tmp;
4490
4491 if (gen_trap_ifnofpu(dc)) {
4492 return true;
4493 }
4494
4495 tmp = gen_load_fpr_F(dc, a->rs);
4496 func(tmp, tmp);
4497 gen_store_fpr_F(dc, a->rd, tmp);
4498 return advance_pc(dc);
4499 }
4500
4501 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4502 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4503 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4504 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4505 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4506
4507 static bool do_fd(DisasContext *dc, arg_r_r *a,
4508 void (*func)(TCGv_i32, TCGv_i64))
4509 {
4510 TCGv_i32 dst;
4511 TCGv_i64 src;
4512
4513 if (gen_trap_ifnofpu(dc)) {
4514 return true;
4515 }
4516
4517 dst = tcg_temp_new_i32();
4518 src = gen_load_fpr_D(dc, a->rs);
4519 func(dst, src);
4520 gen_store_fpr_F(dc, a->rd, dst);
4521 return advance_pc(dc);
4522 }
4523
4524 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4525 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4526
4527 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4528 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4529 {
4530 TCGv_i32 tmp;
4531
4532 if (gen_trap_ifnofpu(dc)) {
4533 return true;
4534 }
4535
4536 tmp = gen_load_fpr_F(dc, a->rs);
4537 func(tmp, tcg_env, tmp);
4538 gen_store_fpr_F(dc, a->rd, tmp);
4539 return advance_pc(dc);
4540 }
4541
4542 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4543 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4544 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4545
4546 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4547 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4548 {
4549 TCGv_i32 dst;
4550 TCGv_i64 src;
4551
4552 if (gen_trap_ifnofpu(dc)) {
4553 return true;
4554 }
4555
4556 dst = tcg_temp_new_i32();
4557 src = gen_load_fpr_D(dc, a->rs);
4558 func(dst, tcg_env, src);
4559 gen_store_fpr_F(dc, a->rd, dst);
4560 return advance_pc(dc);
4561 }
4562
4563 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4564 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4565 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4566
4567 static bool do_dd(DisasContext *dc, arg_r_r *a,
4568 void (*func)(TCGv_i64, TCGv_i64))
4569 {
4570 TCGv_i64 dst, src;
4571
4572 if (gen_trap_ifnofpu(dc)) {
4573 return true;
4574 }
4575
4576 dst = gen_dest_fpr_D(dc, a->rd);
4577 src = gen_load_fpr_D(dc, a->rs);
4578 func(dst, src);
4579 gen_store_fpr_D(dc, a->rd, dst);
4580 return advance_pc(dc);
4581 }
4582
4583 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4584 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4585 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4586 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4587 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4588
4589 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4590 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4591 {
4592 TCGv_i64 dst, src;
4593
4594 if (gen_trap_ifnofpu(dc)) {
4595 return true;
4596 }
4597
4598 dst = gen_dest_fpr_D(dc, a->rd);
4599 src = gen_load_fpr_D(dc, a->rs);
4600 func(dst, tcg_env, src);
4601 gen_store_fpr_D(dc, a->rd, dst);
4602 return advance_pc(dc);
4603 }
4604
4605 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4606 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4607 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4608
4609 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4610 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4611 {
4612 TCGv_i64 dst;
4613 TCGv_i32 src;
4614
4615 if (gen_trap_ifnofpu(dc)) {
4616 return true;
4617 }
4618
4619 dst = gen_dest_fpr_D(dc, a->rd);
4620 src = gen_load_fpr_F(dc, a->rs);
4621 func(dst, tcg_env, src);
4622 gen_store_fpr_D(dc, a->rd, dst);
4623 return advance_pc(dc);
4624 }
4625
4626 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4627 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4628 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4629
4630 static bool do_qq(DisasContext *dc, arg_r_r *a,
4631 void (*func)(TCGv_i128, TCGv_i128))
4632 {
4633 TCGv_i128 t;
4634
4635 if (gen_trap_ifnofpu(dc)) {
4636 return true;
4637 }
4638 if (gen_trap_float128(dc)) {
4639 return true;
4640 }
4641
4642 gen_op_clear_ieee_excp_and_FTT();
4643 t = gen_load_fpr_Q(dc, a->rs);
4644 func(t, t);
4645 gen_store_fpr_Q(dc, a->rd, t);
4646 return advance_pc(dc);
4647 }
4648
4649 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4650 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4651 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4652
4653 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4654 void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4655 {
4656 TCGv_i128 t;
4657
4658 if (gen_trap_ifnofpu(dc)) {
4659 return true;
4660 }
4661 if (gen_trap_float128(dc)) {
4662 return true;
4663 }
4664
4665 t = gen_load_fpr_Q(dc, a->rs);
4666 func(t, tcg_env, t);
4667 gen_store_fpr_Q(dc, a->rd, t);
4668 return advance_pc(dc);
4669 }
4670
4671 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4672
4673 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4674 void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4675 {
4676 TCGv_i128 src;
4677 TCGv_i32 dst;
4678
4679 if (gen_trap_ifnofpu(dc)) {
4680 return true;
4681 }
4682 if (gen_trap_float128(dc)) {
4683 return true;
4684 }
4685
4686 src = gen_load_fpr_Q(dc, a->rs);
4687 dst = tcg_temp_new_i32();
4688 func(dst, tcg_env, src);
4689 gen_store_fpr_F(dc, a->rd, dst);
4690 return advance_pc(dc);
4691 }
4692
4693 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4694 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4695
4696 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4697 void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4698 {
4699 TCGv_i128 src;
4700 TCGv_i64 dst;
4701
4702 if (gen_trap_ifnofpu(dc)) {
4703 return true;
4704 }
4705 if (gen_trap_float128(dc)) {
4706 return true;
4707 }
4708
4709 src = gen_load_fpr_Q(dc, a->rs);
4710 dst = gen_dest_fpr_D(dc, a->rd);
4711 func(dst, tcg_env, src);
4712 gen_store_fpr_D(dc, a->rd, dst);
4713 return advance_pc(dc);
4714 }
4715
4716 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4717 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4718
4719 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4720 void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4721 {
4722 TCGv_i32 src;
4723 TCGv_i128 dst;
4724
4725 if (gen_trap_ifnofpu(dc)) {
4726 return true;
4727 }
4728 if (gen_trap_float128(dc)) {
4729 return true;
4730 }
4731
4732 src = gen_load_fpr_F(dc, a->rs);
4733 dst = tcg_temp_new_i128();
4734 func(dst, tcg_env, src);
4735 gen_store_fpr_Q(dc, a->rd, dst);
4736 return advance_pc(dc);
4737 }
4738
4739 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4740 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4741
4742 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4743 void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4744 {
4745 TCGv_i64 src;
4746 TCGv_i128 dst;
4747
4748 if (gen_trap_ifnofpu(dc)) {
4749 return true;
4750 }
4751 if (gen_trap_float128(dc)) {
4752 return true;
4753 }
4754
4755 src = gen_load_fpr_D(dc, a->rs);
4756 dst = tcg_temp_new_i128();
4757 func(dst, tcg_env, src);
4758 gen_store_fpr_Q(dc, a->rd, dst);
4759 return advance_pc(dc);
4760 }
4761
4762 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4763 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4764
4765 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4766 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4767 {
4768 TCGv_i32 src1, src2;
4769
4770 if (gen_trap_ifnofpu(dc)) {
4771 return true;
4772 }
4773
4774 src1 = gen_load_fpr_F(dc, a->rs1);
4775 src2 = gen_load_fpr_F(dc, a->rs2);
4776 func(src1, src1, src2);
4777 gen_store_fpr_F(dc, a->rd, src1);
4778 return advance_pc(dc);
4779 }
4780
4781 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4782 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4783 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4784 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4785 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4786 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4787 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4788 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4789 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4790 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4791 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4792 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4793
4794 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4795 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4796 {
4797 TCGv_i32 src1, src2;
4798
4799 if (gen_trap_ifnofpu(dc)) {
4800 return true;
4801 }
4802
4803 src1 = gen_load_fpr_F(dc, a->rs1);
4804 src2 = gen_load_fpr_F(dc, a->rs2);
4805 func(src1, tcg_env, src1, src2);
4806 gen_store_fpr_F(dc, a->rd, src1);
4807 return advance_pc(dc);
4808 }
4809
4810 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4811 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4812 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4813 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4814
4815 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4816 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4817 {
4818 TCGv_i64 dst, src1, src2;
4819
4820 if (gen_trap_ifnofpu(dc)) {
4821 return true;
4822 }
4823
4824 dst = gen_dest_fpr_D(dc, a->rd);
4825 src1 = gen_load_fpr_D(dc, a->rs1);
4826 src2 = gen_load_fpr_D(dc, a->rs2);
4827 func(dst, src1, src2);
4828 gen_store_fpr_D(dc, a->rd, dst);
4829 return advance_pc(dc);
4830 }
4831
4832 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
4833 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
4834 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
4835 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4836 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4837 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
4838 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
4839 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
4840 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
4841
4842 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4843 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4844 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4845 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4846 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4847 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4848 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4849 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4850 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4851 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4852 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4853 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4854
4855 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4856 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4857 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4858
4859 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4860 void (*func)(TCGv, TCGv_i64, TCGv_i64))
4861 {
4862 TCGv_i64 src1, src2;
4863 TCGv dst;
4864
4865 if (gen_trap_ifnofpu(dc)) {
4866 return true;
4867 }
4868
4869 dst = gen_dest_gpr(dc, a->rd);
4870 src1 = gen_load_fpr_D(dc, a->rs1);
4871 src2 = gen_load_fpr_D(dc, a->rs2);
4872 func(dst, src1, src2);
4873 gen_store_gpr(dc, a->rd, dst);
4874 return advance_pc(dc);
4875 }
4876
4877 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4878 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4879 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4880 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4881
4882 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4883 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4884 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4885 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4886
4887 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4888 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4889 {
4890 TCGv_i64 dst, src1, src2;
4891
4892 if (gen_trap_ifnofpu(dc)) {
4893 return true;
4894 }
4895
4896 dst = gen_dest_fpr_D(dc, a->rd);
4897 src1 = gen_load_fpr_D(dc, a->rs1);
4898 src2 = gen_load_fpr_D(dc, a->rs2);
4899 func(dst, tcg_env, src1, src2);
4900 gen_store_fpr_D(dc, a->rd, dst);
4901 return advance_pc(dc);
4902 }
4903
4904 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4905 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4906 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4907 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4908
4909 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4910 {
4911 TCGv_i64 dst;
4912 TCGv_i32 src1, src2;
4913
4914 if (gen_trap_ifnofpu(dc)) {
4915 return true;
4916 }
4917 if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4918 return raise_unimpfpop(dc);
4919 }
4920
4921 dst = gen_dest_fpr_D(dc, a->rd);
4922 src1 = gen_load_fpr_F(dc, a->rs1);
4923 src2 = gen_load_fpr_F(dc, a->rs2);
4924 gen_helper_fsmuld(dst, tcg_env, src1, src2);
4925 gen_store_fpr_D(dc, a->rd, dst);
4926 return advance_pc(dc);
4927 }
4928
4929 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4930 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4931 {
4932 TCGv_i64 dst, src0, src1, src2;
4933
4934 if (gen_trap_ifnofpu(dc)) {
4935 return true;
4936 }
4937
4938 dst = gen_dest_fpr_D(dc, a->rd);
4939 src0 = gen_load_fpr_D(dc, a->rd);
4940 src1 = gen_load_fpr_D(dc, a->rs1);
4941 src2 = gen_load_fpr_D(dc, a->rs2);
4942 func(dst, src0, src1, src2);
4943 gen_store_fpr_D(dc, a->rd, dst);
4944 return advance_pc(dc);
4945 }
4946
4947 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4948
4949 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4950 void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
4951 {
4952 TCGv_i128 src1, src2;
4953
4954 if (gen_trap_ifnofpu(dc)) {
4955 return true;
4956 }
4957 if (gen_trap_float128(dc)) {
4958 return true;
4959 }
4960
4961 src1 = gen_load_fpr_Q(dc, a->rs1);
4962 src2 = gen_load_fpr_Q(dc, a->rs2);
4963 func(src1, tcg_env, src1, src2);
4964 gen_store_fpr_Q(dc, a->rd, src1);
4965 return advance_pc(dc);
4966 }
4967
4968 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
4969 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
4970 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
4971 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
4972
4973 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
4974 {
4975 TCGv_i64 src1, src2;
4976 TCGv_i128 dst;
4977
4978 if (gen_trap_ifnofpu(dc)) {
4979 return true;
4980 }
4981 if (gen_trap_float128(dc)) {
4982 return true;
4983 }
4984
4985 src1 = gen_load_fpr_D(dc, a->rs1);
4986 src2 = gen_load_fpr_D(dc, a->rs2);
4987 dst = tcg_temp_new_i128();
4988 gen_helper_fdmulq(dst, tcg_env, src1, src2);
4989 gen_store_fpr_Q(dc, a->rd, dst);
4990 return advance_pc(dc);
4991 }
4992
4993 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
4994 void (*func)(DisasContext *, DisasCompare *, int, int))
4995 {
4996 DisasCompare cmp;
4997
4998 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4999 return false;
5000 }
5001 if (gen_trap_ifnofpu(dc)) {
5002 return true;
5003 }
5004 if (is_128 && gen_trap_float128(dc)) {
5005 return true;
5006 }
5007
5008 gen_op_clear_ieee_excp_and_FTT();
5009 func(dc, &cmp, a->rd, a->rs2);
5010 return advance_pc(dc);
5011 }
5012
5013 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5014 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5015 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5016
5017 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5018 void (*func)(DisasContext *, DisasCompare *, int, int))
5019 {
5020 DisasCompare cmp;
5021
5022 if (gen_trap_ifnofpu(dc)) {
5023 return true;
5024 }
5025 if (is_128 && gen_trap_float128(dc)) {
5026 return true;
5027 }
5028
5029 gen_op_clear_ieee_excp_and_FTT();
5030 gen_compare(&cmp, a->cc, a->cond, dc);
5031 func(dc, &cmp, a->rd, a->rs2);
5032 return advance_pc(dc);
5033 }
5034
5035 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5036 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5037 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5038
5039 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5040 void (*func)(DisasContext *, DisasCompare *, int, int))
5041 {
5042 DisasCompare cmp;
5043
5044 if (gen_trap_ifnofpu(dc)) {
5045 return true;
5046 }
5047 if (is_128 && gen_trap_float128(dc)) {
5048 return true;
5049 }
5050
5051 gen_op_clear_ieee_excp_and_FTT();
5052 gen_fcompare(&cmp, a->cc, a->cond);
5053 func(dc, &cmp, a->rd, a->rs2);
5054 return advance_pc(dc);
5055 }
5056
5057 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5058 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5059 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5060
5061 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5062 {
5063 TCGv_i32 src1, src2;
5064
5065 if (avail_32(dc) && a->cc != 0) {
5066 return false;
5067 }
5068 if (gen_trap_ifnofpu(dc)) {
5069 return true;
5070 }
5071
5072 src1 = gen_load_fpr_F(dc, a->rs1);
5073 src2 = gen_load_fpr_F(dc, a->rs2);
5074 if (e) {
5075 gen_op_fcmpes(a->cc, src1, src2);
5076 } else {
5077 gen_op_fcmps(a->cc, src1, src2);
5078 }
5079 return advance_pc(dc);
5080 }
5081
5082 TRANS(FCMPs, ALL, do_fcmps, a, false)
5083 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5084
5085 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5086 {
5087 TCGv_i64 src1, src2;
5088
5089 if (avail_32(dc) && a->cc != 0) {
5090 return false;
5091 }
5092 if (gen_trap_ifnofpu(dc)) {
5093 return true;
5094 }
5095
5096 src1 = gen_load_fpr_D(dc, a->rs1);
5097 src2 = gen_load_fpr_D(dc, a->rs2);
5098 if (e) {
5099 gen_op_fcmped(a->cc, src1, src2);
5100 } else {
5101 gen_op_fcmpd(a->cc, src1, src2);
5102 }
5103 return advance_pc(dc);
5104 }
5105
5106 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5107 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5108
5109 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5110 {
5111 TCGv_i128 src1, src2;
5112
5113 if (avail_32(dc) && a->cc != 0) {
5114 return false;
5115 }
5116 if (gen_trap_ifnofpu(dc)) {
5117 return true;
5118 }
5119 if (gen_trap_float128(dc)) {
5120 return true;
5121 }
5122
5123 src1 = gen_load_fpr_Q(dc, a->rs1);
5124 src2 = gen_load_fpr_Q(dc, a->rs2);
5125 if (e) {
5126 gen_op_fcmpeq(a->cc, src1, src2);
5127 } else {
5128 gen_op_fcmpq(a->cc, src1, src2);
5129 }
5130 return advance_pc(dc);
5131 }
5132
5133 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5134 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5135
5136 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5137 {
5138 DisasContext *dc = container_of(dcbase, DisasContext, base);
5139 CPUSPARCState *env = cpu_env(cs);
5140 int bound;
5141
5142 dc->pc = dc->base.pc_first;
5143 dc->npc = (target_ulong)dc->base.tb->cs_base;
5144 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5145 dc->def = &env->def;
5146 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5147 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5148 #ifndef CONFIG_USER_ONLY
5149 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5150 #endif
5151 #ifdef TARGET_SPARC64
5152 dc->fprs_dirty = 0;
5153 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5154 #ifndef CONFIG_USER_ONLY
5155 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5156 #endif
5157 #endif
5158 /*
5159 * if we reach a page boundary, we stop generation so that the
5160 * PC of a TT_TFAULT exception is always in the right page
5161 */
5162 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5163 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5164 }
5165
5166 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5167 {
5168 }
5169
5170 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5171 {
5172 DisasContext *dc = container_of(dcbase, DisasContext, base);
5173 target_ulong npc = dc->npc;
5174
5175 if (npc & 3) {
5176 switch (npc) {
5177 case JUMP_PC:
5178 assert(dc->jump_pc[1] == dc->pc + 4);
5179 npc = dc->jump_pc[0] | JUMP_PC;
5180 break;
5181 case DYNAMIC_PC:
5182 case DYNAMIC_PC_LOOKUP:
5183 npc = DYNAMIC_PC;
5184 break;
5185 default:
5186 g_assert_not_reached();
5187 }
5188 }
5189 tcg_gen_insn_start(dc->pc, npc);
5190 }
5191
5192 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5193 {
5194 DisasContext *dc = container_of(dcbase, DisasContext, base);
5195 CPUSPARCState *env = cpu_env(cs);
5196 unsigned int insn;
5197
5198 insn = translator_ldl(env, &dc->base, dc->pc);
5199 dc->base.pc_next += 4;
5200
5201 if (!decode(dc, insn)) {
5202 gen_exception(dc, TT_ILL_INSN);
5203 }
5204
5205 if (dc->base.is_jmp == DISAS_NORETURN) {
5206 return;
5207 }
5208 if (dc->pc != dc->base.pc_next) {
5209 dc->base.is_jmp = DISAS_TOO_MANY;
5210 }
5211 }
5212
5213 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5214 {
5215 DisasContext *dc = container_of(dcbase, DisasContext, base);
5216 DisasDelayException *e, *e_next;
5217 bool may_lookup;
5218
5219 finishing_insn(dc);
5220
5221 switch (dc->base.is_jmp) {
5222 case DISAS_NEXT:
5223 case DISAS_TOO_MANY:
5224 if (((dc->pc | dc->npc) & 3) == 0) {
5225 /* static PC and NPC: we can use direct chaining */
5226 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5227 break;
5228 }
5229
5230 may_lookup = true;
5231 if (dc->pc & 3) {
5232 switch (dc->pc) {
5233 case DYNAMIC_PC_LOOKUP:
5234 break;
5235 case DYNAMIC_PC:
5236 may_lookup = false;
5237 break;
5238 default:
5239 g_assert_not_reached();
5240 }
5241 } else {
5242 tcg_gen_movi_tl(cpu_pc, dc->pc);
5243 }
5244
5245 if (dc->npc & 3) {
5246 switch (dc->npc) {
5247 case JUMP_PC:
5248 gen_generic_branch(dc);
5249 break;
5250 case DYNAMIC_PC:
5251 may_lookup = false;
5252 break;
5253 case DYNAMIC_PC_LOOKUP:
5254 break;
5255 default:
5256 g_assert_not_reached();
5257 }
5258 } else {
5259 tcg_gen_movi_tl(cpu_npc, dc->npc);
5260 }
5261 if (may_lookup) {
5262 tcg_gen_lookup_and_goto_ptr();
5263 } else {
5264 tcg_gen_exit_tb(NULL, 0);
5265 }
5266 break;
5267
5268 case DISAS_NORETURN:
5269 break;
5270
5271 case DISAS_EXIT:
5272 /* Exit TB */
5273 save_state(dc);
5274 tcg_gen_exit_tb(NULL, 0);
5275 break;
5276
5277 default:
5278 g_assert_not_reached();
5279 }
5280
5281 for (e = dc->delay_excp_list; e ; e = e_next) {
5282 gen_set_label(e->lab);
5283
5284 tcg_gen_movi_tl(cpu_pc, e->pc);
5285 if (e->npc % 4 == 0) {
5286 tcg_gen_movi_tl(cpu_npc, e->npc);
5287 }
5288 gen_helper_raise_exception(tcg_env, e->excp);
5289
5290 e_next = e->next;
5291 g_free(e);
5292 }
5293 }
5294
5295 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5296 CPUState *cpu, FILE *logfile)
5297 {
5298 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5299 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5300 }
5301
5302 static const TranslatorOps sparc_tr_ops = {
5303 .init_disas_context = sparc_tr_init_disas_context,
5304 .tb_start = sparc_tr_tb_start,
5305 .insn_start = sparc_tr_insn_start,
5306 .translate_insn = sparc_tr_translate_insn,
5307 .tb_stop = sparc_tr_tb_stop,
5308 .disas_log = sparc_tr_disas_log,
5309 };
5310
5311 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5312 vaddr pc, void *host_pc)
5313 {
5314 DisasContext dc = {};
5315
5316 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5317 }
5318
5319 void sparc_tcg_init(void)
5320 {
5321 static const char gregnames[32][4] = {
5322 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5323 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5324 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5325 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5326 };
5327 static const char fregnames[32][4] = {
5328 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5329 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5330 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5331 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5332 };
5333
5334 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5335 #ifdef TARGET_SPARC64
5336 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5337 { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5338 { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5339 #endif
5340 { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5341 { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5342 { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5343 { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5344 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5345 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5346 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5347 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5348 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5349 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5350 };
5351
5352 unsigned int i;
5353
5354 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5355 offsetof(CPUSPARCState, regwptr),
5356 "regwptr");
5357
5358 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5359 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5360 }
5361
5362 cpu_regs[0] = NULL;
5363 for (i = 1; i < 8; ++i) {
5364 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5365 offsetof(CPUSPARCState, gregs[i]),
5366 gregnames[i]);
5367 }
5368
5369 for (i = 8; i < 32; ++i) {
5370 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5371 (i - 8) * sizeof(target_ulong),
5372 gregnames[i]);
5373 }
5374
5375 for (i = 0; i < TARGET_DPREGS; i++) {
5376 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5377 offsetof(CPUSPARCState, fpr[i]),
5378 fregnames[i]);
5379 }
5380
5381 #ifdef TARGET_SPARC64
5382 cpu_fprs = tcg_global_mem_new_i32(tcg_env,
5383 offsetof(CPUSPARCState, fprs), "fprs");
5384 #endif
5385 }
5386
5387 void sparc_restore_state_to_opc(CPUState *cs,
5388 const TranslationBlock *tb,
5389 const uint64_t *data)
5390 {
5391 SPARCCPU *cpu = SPARC_CPU(cs);
5392 CPUSPARCState *env = &cpu->env;
5393 target_ulong pc = data[0];
5394 target_ulong npc = data[1];
5395
5396 env->pc = pc;
5397 if (npc == DYNAMIC_PC) {
5398 /* dynamic NPC: already stored */
5399 } else if (npc & JUMP_PC) {
5400 /* jump PC: use 'cond' and the jump targets of the translation */
5401 if (env->cond) {
5402 env->npc = npc & ~3;
5403 } else {
5404 env->npc = pc + 4;
5405 }
5406 } else {
5407 env->npc = npc;
5408 }
5409 }