]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | SPARC translation | |
3 | ||
4 | Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at> | |
5 | Copyright (C) 2003-2005 Fabrice Bellard | |
6 | ||
7 | This library is free software; you can redistribute it and/or | |
8 | modify it under the terms of the GNU Lesser General Public | |
9 | License as published by the Free Software Foundation; either | |
10 | version 2.1 of the License, or (at your option) any later version. | |
11 | ||
12 | This library is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | Lesser General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU Lesser General Public | |
18 | License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | #include "qemu/osdep.h" | |
22 | ||
23 | #include "cpu.h" | |
24 | #include "disas/disas.h" | |
25 | #include "exec/helper-proto.h" | |
26 | #include "exec/exec-all.h" | |
27 | #include "tcg/tcg-op.h" | |
28 | #include "tcg/tcg-op-gvec.h" | |
29 | #include "exec/helper-gen.h" | |
30 | #include "exec/translator.h" | |
31 | #include "exec/log.h" | |
32 | #include "asi.h" | |
33 | ||
34 | #define HELPER_H "helper.h" | |
35 | #include "exec/helper-info.c.inc" | |
36 | #undef HELPER_H | |
37 | ||
38 | #ifdef TARGET_SPARC64 | |
39 | # define gen_helper_rdpsr(D, E) qemu_build_not_reached() | |
40 | # define gen_helper_rett(E) qemu_build_not_reached() | |
41 | # define gen_helper_power_down(E) qemu_build_not_reached() | |
42 | # define gen_helper_wrpsr(E, S) qemu_build_not_reached() | |
43 | #else | |
44 | # define gen_helper_clear_softint(E, S) qemu_build_not_reached() | |
45 | # define gen_helper_done(E) qemu_build_not_reached() | |
46 | # define gen_helper_fabsd(D, S) qemu_build_not_reached() | |
47 | # define gen_helper_flushw(E) qemu_build_not_reached() | |
48 | # define gen_helper_fnegd(D, S) qemu_build_not_reached() | |
49 | # define gen_helper_rdccr(D, E) qemu_build_not_reached() | |
50 | # define gen_helper_rdcwp(D, E) qemu_build_not_reached() | |
51 | # define gen_helper_restored(E) qemu_build_not_reached() | |
52 | # define gen_helper_retry(E) qemu_build_not_reached() | |
53 | # define gen_helper_saved(E) qemu_build_not_reached() | |
54 | # define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached() | |
55 | # define gen_helper_set_softint(E, S) qemu_build_not_reached() | |
56 | # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached() | |
57 | # define gen_helper_tick_set_count(P, S) qemu_build_not_reached() | |
58 | # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached() | |
59 | # define gen_helper_udivx(D, E, A, B) qemu_build_not_reached() | |
60 | # define gen_helper_wrccr(E, S) qemu_build_not_reached() | |
61 | # define gen_helper_wrcwp(E, S) qemu_build_not_reached() | |
62 | # define gen_helper_wrgl(E, S) qemu_build_not_reached() | |
63 | # define gen_helper_write_softint(E, S) qemu_build_not_reached() | |
64 | # define gen_helper_wrpil(E, S) qemu_build_not_reached() | |
65 | # define gen_helper_wrpstate(E, S) qemu_build_not_reached() | |
66 | # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; }) | |
67 | # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; }) | |
68 | # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; }) | |
69 | # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; }) | |
70 | # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; }) | |
71 | # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; }) | |
72 | # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; }) | |
73 | # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; }) | |
74 | # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; }) | |
75 | # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; }) | |
76 | # define FSR_LDXFSR_MASK 0 | |
77 | # define FSR_LDXFSR_OLDMASK 0 | |
78 | # define MAXTL_MASK 0 | |
79 | #endif | |
80 | ||
81 | /* Dynamic PC, must exit to main loop. */ | |
82 | #define DYNAMIC_PC 1 | |
83 | /* Dynamic PC, one of two values according to jump_pc[T2]. */ | |
84 | #define JUMP_PC 2 | |
85 | /* Dynamic PC, may lookup next TB. */ | |
86 | #define DYNAMIC_PC_LOOKUP 3 | |
87 | ||
88 | #define DISAS_EXIT DISAS_TARGET_0 | |
89 | ||
90 | /* global register indexes */ | |
91 | static TCGv_ptr cpu_regwptr; | |
92 | static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst; | |
93 | static TCGv_i32 cpu_cc_op; | |
94 | static TCGv_i32 cpu_psr; | |
95 | static TCGv cpu_fsr, cpu_pc, cpu_npc; | |
96 | static TCGv cpu_regs[32]; | |
97 | static TCGv cpu_y; | |
98 | static TCGv cpu_tbr; | |
99 | static TCGv cpu_cond; | |
100 | #ifdef TARGET_SPARC64 | |
101 | static TCGv_i32 cpu_xcc, cpu_fprs; | |
102 | static TCGv cpu_gsr; | |
103 | #else | |
104 | # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; }) | |
105 | # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; }) | |
106 | #endif | |
107 | /* Floating point registers */ | |
108 | static TCGv_i64 cpu_fpr[TARGET_DPREGS]; | |
109 | ||
110 | #define env_field_offsetof(X) offsetof(CPUSPARCState, X) | |
111 | #ifdef TARGET_SPARC64 | |
112 | # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; }) | |
113 | # define env64_field_offsetof(X) env_field_offsetof(X) | |
114 | #else | |
115 | # define env32_field_offsetof(X) env_field_offsetof(X) | |
116 | # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; }) | |
117 | #endif | |
118 | ||
119 | typedef struct DisasDelayException { | |
120 | struct DisasDelayException *next; | |
121 | TCGLabel *lab; | |
122 | TCGv_i32 excp; | |
123 | /* Saved state at parent insn. */ | |
124 | target_ulong pc; | |
125 | target_ulong npc; | |
126 | } DisasDelayException; | |
127 | ||
128 | typedef struct DisasContext { | |
129 | DisasContextBase base; | |
130 | target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */ | |
131 | target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */ | |
132 | target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */ | |
133 | int mem_idx; | |
134 | bool fpu_enabled; | |
135 | bool address_mask_32bit; | |
136 | #ifndef CONFIG_USER_ONLY | |
137 | bool supervisor; | |
138 | #ifdef TARGET_SPARC64 | |
139 | bool hypervisor; | |
140 | #endif | |
141 | #endif | |
142 | ||
143 | uint32_t cc_op; /* current CC operation */ | |
144 | sparc_def_t *def; | |
145 | #ifdef TARGET_SPARC64 | |
146 | int fprs_dirty; | |
147 | int asi; | |
148 | #endif | |
149 | DisasDelayException *delay_excp_list; | |
150 | } DisasContext; | |
151 | ||
152 | typedef struct { | |
153 | TCGCond cond; | |
154 | bool is_bool; | |
155 | TCGv c1, c2; | |
156 | } DisasCompare; | |
157 | ||
158 | // This function uses non-native bit order | |
159 | #define GET_FIELD(X, FROM, TO) \ | |
160 | ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1)) | |
161 | ||
162 | // This function uses the order in the manuals, i.e. bit 0 is 2^0 | |
163 | #define GET_FIELD_SP(X, FROM, TO) \ | |
164 | GET_FIELD(X, 31 - (TO), 31 - (FROM)) | |
165 | ||
166 | #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1) | |
167 | #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1)) | |
168 | ||
169 | #ifdef TARGET_SPARC64 | |
170 | #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e)) | |
171 | #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c)) | |
172 | #else | |
173 | #define DFPREG(r) (r & 0x1e) | |
174 | #define QFPREG(r) (r & 0x1c) | |
175 | #endif | |
176 | ||
177 | #define UA2005_HTRAP_MASK 0xff | |
178 | #define V8_TRAP_MASK 0x7f | |
179 | ||
180 | #define IS_IMM (insn & (1<<13)) | |
181 | ||
182 | static void gen_update_fprs_dirty(DisasContext *dc, int rd) | |
183 | { | |
184 | #if defined(TARGET_SPARC64) | |
185 | int bit = (rd < 32) ? 1 : 2; | |
186 | /* If we know we've already set this bit within the TB, | |
187 | we can avoid setting it again. */ | |
188 | if (!(dc->fprs_dirty & bit)) { | |
189 | dc->fprs_dirty |= bit; | |
190 | tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit); | |
191 | } | |
192 | #endif | |
193 | } | |
194 | ||
195 | /* floating point registers moves */ | |
196 | static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src) | |
197 | { | |
198 | TCGv_i32 ret = tcg_temp_new_i32(); | |
199 | if (src & 1) { | |
200 | tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]); | |
201 | } else { | |
202 | tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]); | |
203 | } | |
204 | return ret; | |
205 | } | |
206 | ||
207 | static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v) | |
208 | { | |
209 | TCGv_i64 t = tcg_temp_new_i64(); | |
210 | ||
211 | tcg_gen_extu_i32_i64(t, v); | |
212 | tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t, | |
213 | (dst & 1 ? 0 : 32), 32); | |
214 | gen_update_fprs_dirty(dc, dst); | |
215 | } | |
216 | ||
217 | static TCGv_i32 gen_dest_fpr_F(DisasContext *dc) | |
218 | { | |
219 | return tcg_temp_new_i32(); | |
220 | } | |
221 | ||
222 | static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src) | |
223 | { | |
224 | src = DFPREG(src); | |
225 | return cpu_fpr[src / 2]; | |
226 | } | |
227 | ||
228 | static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v) | |
229 | { | |
230 | dst = DFPREG(dst); | |
231 | tcg_gen_mov_i64(cpu_fpr[dst / 2], v); | |
232 | gen_update_fprs_dirty(dc, dst); | |
233 | } | |
234 | ||
235 | static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst) | |
236 | { | |
237 | return cpu_fpr[DFPREG(dst) / 2]; | |
238 | } | |
239 | ||
240 | static void gen_op_load_fpr_QT0(unsigned int src) | |
241 | { | |
242 | tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) + | |
243 | offsetof(CPU_QuadU, ll.upper)); | |
244 | tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) + | |
245 | offsetof(CPU_QuadU, ll.lower)); | |
246 | } | |
247 | ||
248 | static void gen_op_load_fpr_QT1(unsigned int src) | |
249 | { | |
250 | tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) + | |
251 | offsetof(CPU_QuadU, ll.upper)); | |
252 | tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) + | |
253 | offsetof(CPU_QuadU, ll.lower)); | |
254 | } | |
255 | ||
256 | static void gen_op_store_QT0_fpr(unsigned int dst) | |
257 | { | |
258 | tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) + | |
259 | offsetof(CPU_QuadU, ll.upper)); | |
260 | tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) + | |
261 | offsetof(CPU_QuadU, ll.lower)); | |
262 | } | |
263 | ||
264 | #ifdef TARGET_SPARC64 | |
265 | static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs) | |
266 | { | |
267 | rd = QFPREG(rd); | |
268 | rs = QFPREG(rs); | |
269 | ||
270 | tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]); | |
271 | tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]); | |
272 | gen_update_fprs_dirty(dc, rd); | |
273 | } | |
274 | #endif | |
275 | ||
276 | /* moves */ | |
277 | #ifdef CONFIG_USER_ONLY | |
278 | #define supervisor(dc) 0 | |
279 | #define hypervisor(dc) 0 | |
280 | #else | |
281 | #ifdef TARGET_SPARC64 | |
282 | #define hypervisor(dc) (dc->hypervisor) | |
283 | #define supervisor(dc) (dc->supervisor | dc->hypervisor) | |
284 | #else | |
285 | #define supervisor(dc) (dc->supervisor) | |
286 | #define hypervisor(dc) 0 | |
287 | #endif | |
288 | #endif | |
289 | ||
290 | #if !defined(TARGET_SPARC64) | |
291 | # define AM_CHECK(dc) false | |
292 | #elif defined(TARGET_ABI32) | |
293 | # define AM_CHECK(dc) true | |
294 | #elif defined(CONFIG_USER_ONLY) | |
295 | # define AM_CHECK(dc) false | |
296 | #else | |
297 | # define AM_CHECK(dc) ((dc)->address_mask_32bit) | |
298 | #endif | |
299 | ||
300 | static void gen_address_mask(DisasContext *dc, TCGv addr) | |
301 | { | |
302 | if (AM_CHECK(dc)) { | |
303 | tcg_gen_andi_tl(addr, addr, 0xffffffffULL); | |
304 | } | |
305 | } | |
306 | ||
307 | static target_ulong address_mask_i(DisasContext *dc, target_ulong addr) | |
308 | { | |
309 | return AM_CHECK(dc) ? (uint32_t)addr : addr; | |
310 | } | |
311 | ||
312 | static TCGv gen_load_gpr(DisasContext *dc, int reg) | |
313 | { | |
314 | if (reg > 0) { | |
315 | assert(reg < 32); | |
316 | return cpu_regs[reg]; | |
317 | } else { | |
318 | TCGv t = tcg_temp_new(); | |
319 | tcg_gen_movi_tl(t, 0); | |
320 | return t; | |
321 | } | |
322 | } | |
323 | ||
324 | static void gen_store_gpr(DisasContext *dc, int reg, TCGv v) | |
325 | { | |
326 | if (reg > 0) { | |
327 | assert(reg < 32); | |
328 | tcg_gen_mov_tl(cpu_regs[reg], v); | |
329 | } | |
330 | } | |
331 | ||
332 | static TCGv gen_dest_gpr(DisasContext *dc, int reg) | |
333 | { | |
334 | if (reg > 0) { | |
335 | assert(reg < 32); | |
336 | return cpu_regs[reg]; | |
337 | } else { | |
338 | return tcg_temp_new(); | |
339 | } | |
340 | } | |
341 | ||
342 | static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc) | |
343 | { | |
344 | return translator_use_goto_tb(&s->base, pc) && | |
345 | translator_use_goto_tb(&s->base, npc); | |
346 | } | |
347 | ||
348 | static void gen_goto_tb(DisasContext *s, int tb_num, | |
349 | target_ulong pc, target_ulong npc) | |
350 | { | |
351 | if (use_goto_tb(s, pc, npc)) { | |
352 | /* jump to same page: we can use a direct jump */ | |
353 | tcg_gen_goto_tb(tb_num); | |
354 | tcg_gen_movi_tl(cpu_pc, pc); | |
355 | tcg_gen_movi_tl(cpu_npc, npc); | |
356 | tcg_gen_exit_tb(s->base.tb, tb_num); | |
357 | } else { | |
358 | /* jump to another page: we can use an indirect jump */ | |
359 | tcg_gen_movi_tl(cpu_pc, pc); | |
360 | tcg_gen_movi_tl(cpu_npc, npc); | |
361 | tcg_gen_lookup_and_goto_ptr(); | |
362 | } | |
363 | } | |
364 | ||
365 | // XXX suboptimal | |
366 | static void gen_mov_reg_N(TCGv reg, TCGv_i32 src) | |
367 | { | |
368 | tcg_gen_extu_i32_tl(reg, src); | |
369 | tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1); | |
370 | } | |
371 | ||
372 | static void gen_mov_reg_Z(TCGv reg, TCGv_i32 src) | |
373 | { | |
374 | tcg_gen_extu_i32_tl(reg, src); | |
375 | tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1); | |
376 | } | |
377 | ||
378 | static void gen_mov_reg_V(TCGv reg, TCGv_i32 src) | |
379 | { | |
380 | tcg_gen_extu_i32_tl(reg, src); | |
381 | tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1); | |
382 | } | |
383 | ||
384 | static void gen_mov_reg_C(TCGv reg, TCGv_i32 src) | |
385 | { | |
386 | tcg_gen_extu_i32_tl(reg, src); | |
387 | tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1); | |
388 | } | |
389 | ||
390 | static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2) | |
391 | { | |
392 | tcg_gen_mov_tl(cpu_cc_src, src1); | |
393 | tcg_gen_mov_tl(cpu_cc_src2, src2); | |
394 | tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2); | |
395 | tcg_gen_mov_tl(dst, cpu_cc_dst); | |
396 | } | |
397 | ||
398 | static TCGv_i32 gen_add32_carry32(void) | |
399 | { | |
400 | TCGv_i32 carry_32, cc_src1_32, cc_src2_32; | |
401 | ||
402 | /* Carry is computed from a previous add: (dst < src) */ | |
403 | #if TARGET_LONG_BITS == 64 | |
404 | cc_src1_32 = tcg_temp_new_i32(); | |
405 | cc_src2_32 = tcg_temp_new_i32(); | |
406 | tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst); | |
407 | tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src); | |
408 | #else | |
409 | cc_src1_32 = cpu_cc_dst; | |
410 | cc_src2_32 = cpu_cc_src; | |
411 | #endif | |
412 | ||
413 | carry_32 = tcg_temp_new_i32(); | |
414 | tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32); | |
415 | ||
416 | return carry_32; | |
417 | } | |
418 | ||
419 | static TCGv_i32 gen_sub32_carry32(void) | |
420 | { | |
421 | TCGv_i32 carry_32, cc_src1_32, cc_src2_32; | |
422 | ||
423 | /* Carry is computed from a previous borrow: (src1 < src2) */ | |
424 | #if TARGET_LONG_BITS == 64 | |
425 | cc_src1_32 = tcg_temp_new_i32(); | |
426 | cc_src2_32 = tcg_temp_new_i32(); | |
427 | tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src); | |
428 | tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2); | |
429 | #else | |
430 | cc_src1_32 = cpu_cc_src; | |
431 | cc_src2_32 = cpu_cc_src2; | |
432 | #endif | |
433 | ||
434 | carry_32 = tcg_temp_new_i32(); | |
435 | tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32); | |
436 | ||
437 | return carry_32; | |
438 | } | |
439 | ||
440 | static void gen_op_addc_int(TCGv dst, TCGv src1, TCGv src2, | |
441 | TCGv_i32 carry_32, bool update_cc) | |
442 | { | |
443 | tcg_gen_add_tl(dst, src1, src2); | |
444 | ||
445 | #ifdef TARGET_SPARC64 | |
446 | TCGv carry = tcg_temp_new(); | |
447 | tcg_gen_extu_i32_tl(carry, carry_32); | |
448 | tcg_gen_add_tl(dst, dst, carry); | |
449 | #else | |
450 | tcg_gen_add_i32(dst, dst, carry_32); | |
451 | #endif | |
452 | ||
453 | if (update_cc) { | |
454 | tcg_debug_assert(dst == cpu_cc_dst); | |
455 | tcg_gen_mov_tl(cpu_cc_src, src1); | |
456 | tcg_gen_mov_tl(cpu_cc_src2, src2); | |
457 | } | |
458 | } | |
459 | ||
460 | static void gen_op_addc_int_add(TCGv dst, TCGv src1, TCGv src2, bool update_cc) | |
461 | { | |
462 | TCGv discard; | |
463 | ||
464 | if (TARGET_LONG_BITS == 64) { | |
465 | gen_op_addc_int(dst, src1, src2, gen_add32_carry32(), update_cc); | |
466 | return; | |
467 | } | |
468 | ||
469 | /* | |
470 | * We can re-use the host's hardware carry generation by using | |
471 | * an ADD2 opcode. We discard the low part of the output. | |
472 | * Ideally we'd combine this operation with the add that | |
473 | * generated the carry in the first place. | |
474 | */ | |
475 | discard = tcg_temp_new(); | |
476 | tcg_gen_add2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2); | |
477 | ||
478 | if (update_cc) { | |
479 | tcg_debug_assert(dst == cpu_cc_dst); | |
480 | tcg_gen_mov_tl(cpu_cc_src, src1); | |
481 | tcg_gen_mov_tl(cpu_cc_src2, src2); | |
482 | } | |
483 | } | |
484 | ||
485 | static void gen_op_addc_add(TCGv dst, TCGv src1, TCGv src2) | |
486 | { | |
487 | gen_op_addc_int_add(dst, src1, src2, false); | |
488 | } | |
489 | ||
490 | static void gen_op_addccc_add(TCGv dst, TCGv src1, TCGv src2) | |
491 | { | |
492 | gen_op_addc_int_add(dst, src1, src2, true); | |
493 | } | |
494 | ||
495 | static void gen_op_addc_sub(TCGv dst, TCGv src1, TCGv src2) | |
496 | { | |
497 | gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), false); | |
498 | } | |
499 | ||
500 | static void gen_op_addccc_sub(TCGv dst, TCGv src1, TCGv src2) | |
501 | { | |
502 | gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), true); | |
503 | } | |
504 | ||
505 | static void gen_op_addc_int_generic(TCGv dst, TCGv src1, TCGv src2, | |
506 | bool update_cc) | |
507 | { | |
508 | TCGv_i32 carry_32 = tcg_temp_new_i32(); | |
509 | gen_helper_compute_C_icc(carry_32, tcg_env); | |
510 | gen_op_addc_int(dst, src1, src2, carry_32, update_cc); | |
511 | } | |
512 | ||
513 | static void gen_op_addc_generic(TCGv dst, TCGv src1, TCGv src2) | |
514 | { | |
515 | gen_op_addc_int_generic(dst, src1, src2, false); | |
516 | } | |
517 | ||
518 | static void gen_op_addccc_generic(TCGv dst, TCGv src1, TCGv src2) | |
519 | { | |
520 | gen_op_addc_int_generic(dst, src1, src2, true); | |
521 | } | |
522 | ||
523 | static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2) | |
524 | { | |
525 | tcg_gen_mov_tl(cpu_cc_src, src1); | |
526 | tcg_gen_mov_tl(cpu_cc_src2, src2); | |
527 | tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2); | |
528 | tcg_gen_mov_tl(dst, cpu_cc_dst); | |
529 | } | |
530 | ||
531 | static void gen_op_subc_int(TCGv dst, TCGv src1, TCGv src2, | |
532 | TCGv_i32 carry_32, bool update_cc) | |
533 | { | |
534 | TCGv carry; | |
535 | ||
536 | #if TARGET_LONG_BITS == 64 | |
537 | carry = tcg_temp_new(); | |
538 | tcg_gen_extu_i32_i64(carry, carry_32); | |
539 | #else | |
540 | carry = carry_32; | |
541 | #endif | |
542 | ||
543 | tcg_gen_sub_tl(dst, src1, src2); | |
544 | tcg_gen_sub_tl(dst, dst, carry); | |
545 | ||
546 | if (update_cc) { | |
547 | tcg_debug_assert(dst == cpu_cc_dst); | |
548 | tcg_gen_mov_tl(cpu_cc_src, src1); | |
549 | tcg_gen_mov_tl(cpu_cc_src2, src2); | |
550 | } | |
551 | } | |
552 | ||
553 | static void gen_op_subc_add(TCGv dst, TCGv src1, TCGv src2) | |
554 | { | |
555 | gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), false); | |
556 | } | |
557 | ||
558 | static void gen_op_subccc_add(TCGv dst, TCGv src1, TCGv src2) | |
559 | { | |
560 | gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), true); | |
561 | } | |
562 | ||
563 | static void gen_op_subc_int_sub(TCGv dst, TCGv src1, TCGv src2, bool update_cc) | |
564 | { | |
565 | TCGv discard; | |
566 | ||
567 | if (TARGET_LONG_BITS == 64) { | |
568 | gen_op_subc_int(dst, src1, src2, gen_sub32_carry32(), update_cc); | |
569 | return; | |
570 | } | |
571 | ||
572 | /* | |
573 | * We can re-use the host's hardware carry generation by using | |
574 | * a SUB2 opcode. We discard the low part of the output. | |
575 | */ | |
576 | discard = tcg_temp_new(); | |
577 | tcg_gen_sub2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2); | |
578 | ||
579 | if (update_cc) { | |
580 | tcg_debug_assert(dst == cpu_cc_dst); | |
581 | tcg_gen_mov_tl(cpu_cc_src, src1); | |
582 | tcg_gen_mov_tl(cpu_cc_src2, src2); | |
583 | } | |
584 | } | |
585 | ||
586 | static void gen_op_subc_sub(TCGv dst, TCGv src1, TCGv src2) | |
587 | { | |
588 | gen_op_subc_int_sub(dst, src1, src2, false); | |
589 | } | |
590 | ||
591 | static void gen_op_subccc_sub(TCGv dst, TCGv src1, TCGv src2) | |
592 | { | |
593 | gen_op_subc_int_sub(dst, src1, src2, true); | |
594 | } | |
595 | ||
596 | static void gen_op_subc_int_generic(TCGv dst, TCGv src1, TCGv src2, | |
597 | bool update_cc) | |
598 | { | |
599 | TCGv_i32 carry_32 = tcg_temp_new_i32(); | |
600 | ||
601 | gen_helper_compute_C_icc(carry_32, tcg_env); | |
602 | gen_op_subc_int(dst, src1, src2, carry_32, update_cc); | |
603 | } | |
604 | ||
605 | static void gen_op_subc_generic(TCGv dst, TCGv src1, TCGv src2) | |
606 | { | |
607 | gen_op_subc_int_generic(dst, src1, src2, false); | |
608 | } | |
609 | ||
610 | static void gen_op_subccc_generic(TCGv dst, TCGv src1, TCGv src2) | |
611 | { | |
612 | gen_op_subc_int_generic(dst, src1, src2, true); | |
613 | } | |
614 | ||
615 | static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2) | |
616 | { | |
617 | TCGv r_temp, zero, t0; | |
618 | ||
619 | r_temp = tcg_temp_new(); | |
620 | t0 = tcg_temp_new(); | |
621 | ||
622 | /* old op: | |
623 | if (!(env->y & 1)) | |
624 | T1 = 0; | |
625 | */ | |
626 | zero = tcg_constant_tl(0); | |
627 | tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff); | |
628 | tcg_gen_andi_tl(r_temp, cpu_y, 0x1); | |
629 | tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff); | |
630 | tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero, | |
631 | zero, cpu_cc_src2); | |
632 | ||
633 | // b2 = T0 & 1; | |
634 | // env->y = (b2 << 31) | (env->y >> 1); | |
635 | tcg_gen_extract_tl(t0, cpu_y, 1, 31); | |
636 | tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1); | |
637 | ||
638 | // b1 = N ^ V; | |
639 | gen_mov_reg_N(t0, cpu_psr); | |
640 | gen_mov_reg_V(r_temp, cpu_psr); | |
641 | tcg_gen_xor_tl(t0, t0, r_temp); | |
642 | ||
643 | // T0 = (b1 << 31) | (T0 >> 1); | |
644 | // src1 = T0; | |
645 | tcg_gen_shli_tl(t0, t0, 31); | |
646 | tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1); | |
647 | tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0); | |
648 | ||
649 | tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2); | |
650 | ||
651 | tcg_gen_mov_tl(dst, cpu_cc_dst); | |
652 | } | |
653 | ||
654 | static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext) | |
655 | { | |
656 | #if TARGET_LONG_BITS == 32 | |
657 | if (sign_ext) { | |
658 | tcg_gen_muls2_tl(dst, cpu_y, src1, src2); | |
659 | } else { | |
660 | tcg_gen_mulu2_tl(dst, cpu_y, src1, src2); | |
661 | } | |
662 | #else | |
663 | TCGv t0 = tcg_temp_new_i64(); | |
664 | TCGv t1 = tcg_temp_new_i64(); | |
665 | ||
666 | if (sign_ext) { | |
667 | tcg_gen_ext32s_i64(t0, src1); | |
668 | tcg_gen_ext32s_i64(t1, src2); | |
669 | } else { | |
670 | tcg_gen_ext32u_i64(t0, src1); | |
671 | tcg_gen_ext32u_i64(t1, src2); | |
672 | } | |
673 | ||
674 | tcg_gen_mul_i64(dst, t0, t1); | |
675 | tcg_gen_shri_i64(cpu_y, dst, 32); | |
676 | #endif | |
677 | } | |
678 | ||
679 | static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2) | |
680 | { | |
681 | /* zero-extend truncated operands before multiplication */ | |
682 | gen_op_multiply(dst, src1, src2, 0); | |
683 | } | |
684 | ||
685 | static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2) | |
686 | { | |
687 | /* sign-extend truncated operands before multiplication */ | |
688 | gen_op_multiply(dst, src1, src2, 1); | |
689 | } | |
690 | ||
691 | static void gen_op_udivx(TCGv dst, TCGv src1, TCGv src2) | |
692 | { | |
693 | gen_helper_udivx(dst, tcg_env, src1, src2); | |
694 | } | |
695 | ||
696 | static void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2) | |
697 | { | |
698 | gen_helper_sdivx(dst, tcg_env, src1, src2); | |
699 | } | |
700 | ||
701 | static void gen_op_udiv(TCGv dst, TCGv src1, TCGv src2) | |
702 | { | |
703 | gen_helper_udiv(dst, tcg_env, src1, src2); | |
704 | } | |
705 | ||
706 | static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2) | |
707 | { | |
708 | gen_helper_sdiv(dst, tcg_env, src1, src2); | |
709 | } | |
710 | ||
711 | static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2) | |
712 | { | |
713 | gen_helper_udiv_cc(dst, tcg_env, src1, src2); | |
714 | } | |
715 | ||
716 | static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2) | |
717 | { | |
718 | gen_helper_sdiv_cc(dst, tcg_env, src1, src2); | |
719 | } | |
720 | ||
721 | static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2) | |
722 | { | |
723 | gen_helper_taddcctv(dst, tcg_env, src1, src2); | |
724 | } | |
725 | ||
726 | static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2) | |
727 | { | |
728 | gen_helper_tsubcctv(dst, tcg_env, src1, src2); | |
729 | } | |
730 | ||
731 | static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2) | |
732 | { | |
733 | tcg_gen_ctpop_tl(dst, src2); | |
734 | } | |
735 | ||
736 | #ifndef TARGET_SPARC64 | |
737 | static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2) | |
738 | { | |
739 | g_assert_not_reached(); | |
740 | } | |
741 | #endif | |
742 | ||
743 | static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2) | |
744 | { | |
745 | gen_helper_array8(dst, src1, src2); | |
746 | tcg_gen_shli_tl(dst, dst, 1); | |
747 | } | |
748 | ||
749 | static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2) | |
750 | { | |
751 | gen_helper_array8(dst, src1, src2); | |
752 | tcg_gen_shli_tl(dst, dst, 2); | |
753 | } | |
754 | ||
755 | static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2) | |
756 | { | |
757 | #ifdef TARGET_SPARC64 | |
758 | gen_helper_fpack32(dst, cpu_gsr, src1, src2); | |
759 | #else | |
760 | g_assert_not_reached(); | |
761 | #endif | |
762 | } | |
763 | ||
764 | static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2) | |
765 | { | |
766 | #ifdef TARGET_SPARC64 | |
767 | TCGv t1, t2, shift; | |
768 | ||
769 | t1 = tcg_temp_new(); | |
770 | t2 = tcg_temp_new(); | |
771 | shift = tcg_temp_new(); | |
772 | ||
773 | tcg_gen_andi_tl(shift, cpu_gsr, 7); | |
774 | tcg_gen_shli_tl(shift, shift, 3); | |
775 | tcg_gen_shl_tl(t1, s1, shift); | |
776 | ||
777 | /* | |
778 | * A shift of 64 does not produce 0 in TCG. Divide this into a | |
779 | * shift of (up to 63) followed by a constant shift of 1. | |
780 | */ | |
781 | tcg_gen_xori_tl(shift, shift, 63); | |
782 | tcg_gen_shr_tl(t2, s2, shift); | |
783 | tcg_gen_shri_tl(t2, t2, 1); | |
784 | ||
785 | tcg_gen_or_tl(dst, t1, t2); | |
786 | #else | |
787 | g_assert_not_reached(); | |
788 | #endif | |
789 | } | |
790 | ||
791 | static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2) | |
792 | { | |
793 | #ifdef TARGET_SPARC64 | |
794 | gen_helper_bshuffle(dst, cpu_gsr, src1, src2); | |
795 | #else | |
796 | g_assert_not_reached(); | |
797 | #endif | |
798 | } | |
799 | ||
800 | // 1 | |
801 | static void gen_op_eval_ba(TCGv dst) | |
802 | { | |
803 | tcg_gen_movi_tl(dst, 1); | |
804 | } | |
805 | ||
806 | // Z | |
807 | static void gen_op_eval_be(TCGv dst, TCGv_i32 src) | |
808 | { | |
809 | gen_mov_reg_Z(dst, src); | |
810 | } | |
811 | ||
812 | // Z | (N ^ V) | |
813 | static void gen_op_eval_ble(TCGv dst, TCGv_i32 src) | |
814 | { | |
815 | TCGv t0 = tcg_temp_new(); | |
816 | gen_mov_reg_N(t0, src); | |
817 | gen_mov_reg_V(dst, src); | |
818 | tcg_gen_xor_tl(dst, dst, t0); | |
819 | gen_mov_reg_Z(t0, src); | |
820 | tcg_gen_or_tl(dst, dst, t0); | |
821 | } | |
822 | ||
823 | // N ^ V | |
824 | static void gen_op_eval_bl(TCGv dst, TCGv_i32 src) | |
825 | { | |
826 | TCGv t0 = tcg_temp_new(); | |
827 | gen_mov_reg_V(t0, src); | |
828 | gen_mov_reg_N(dst, src); | |
829 | tcg_gen_xor_tl(dst, dst, t0); | |
830 | } | |
831 | ||
832 | // C | Z | |
833 | static void gen_op_eval_bleu(TCGv dst, TCGv_i32 src) | |
834 | { | |
835 | TCGv t0 = tcg_temp_new(); | |
836 | gen_mov_reg_Z(t0, src); | |
837 | gen_mov_reg_C(dst, src); | |
838 | tcg_gen_or_tl(dst, dst, t0); | |
839 | } | |
840 | ||
841 | // C | |
842 | static void gen_op_eval_bcs(TCGv dst, TCGv_i32 src) | |
843 | { | |
844 | gen_mov_reg_C(dst, src); | |
845 | } | |
846 | ||
847 | // V | |
848 | static void gen_op_eval_bvs(TCGv dst, TCGv_i32 src) | |
849 | { | |
850 | gen_mov_reg_V(dst, src); | |
851 | } | |
852 | ||
853 | // 0 | |
854 | static void gen_op_eval_bn(TCGv dst) | |
855 | { | |
856 | tcg_gen_movi_tl(dst, 0); | |
857 | } | |
858 | ||
859 | // N | |
860 | static void gen_op_eval_bneg(TCGv dst, TCGv_i32 src) | |
861 | { | |
862 | gen_mov_reg_N(dst, src); | |
863 | } | |
864 | ||
865 | // !Z | |
866 | static void gen_op_eval_bne(TCGv dst, TCGv_i32 src) | |
867 | { | |
868 | gen_mov_reg_Z(dst, src); | |
869 | tcg_gen_xori_tl(dst, dst, 0x1); | |
870 | } | |
871 | ||
872 | // !(Z | (N ^ V)) | |
873 | static void gen_op_eval_bg(TCGv dst, TCGv_i32 src) | |
874 | { | |
875 | gen_op_eval_ble(dst, src); | |
876 | tcg_gen_xori_tl(dst, dst, 0x1); | |
877 | } | |
878 | ||
879 | // !(N ^ V) | |
880 | static void gen_op_eval_bge(TCGv dst, TCGv_i32 src) | |
881 | { | |
882 | gen_op_eval_bl(dst, src); | |
883 | tcg_gen_xori_tl(dst, dst, 0x1); | |
884 | } | |
885 | ||
886 | // !(C | Z) | |
887 | static void gen_op_eval_bgu(TCGv dst, TCGv_i32 src) | |
888 | { | |
889 | gen_op_eval_bleu(dst, src); | |
890 | tcg_gen_xori_tl(dst, dst, 0x1); | |
891 | } | |
892 | ||
893 | // !C | |
894 | static void gen_op_eval_bcc(TCGv dst, TCGv_i32 src) | |
895 | { | |
896 | gen_mov_reg_C(dst, src); | |
897 | tcg_gen_xori_tl(dst, dst, 0x1); | |
898 | } | |
899 | ||
900 | // !N | |
901 | static void gen_op_eval_bpos(TCGv dst, TCGv_i32 src) | |
902 | { | |
903 | gen_mov_reg_N(dst, src); | |
904 | tcg_gen_xori_tl(dst, dst, 0x1); | |
905 | } | |
906 | ||
907 | // !V | |
908 | static void gen_op_eval_bvc(TCGv dst, TCGv_i32 src) | |
909 | { | |
910 | gen_mov_reg_V(dst, src); | |
911 | tcg_gen_xori_tl(dst, dst, 0x1); | |
912 | } | |
913 | ||
914 | /* | |
915 | FPSR bit field FCC1 | FCC0: | |
916 | 0 = | |
917 | 1 < | |
918 | 2 > | |
919 | 3 unordered | |
920 | */ | |
921 | static void gen_mov_reg_FCC0(TCGv reg, TCGv src, | |
922 | unsigned int fcc_offset) | |
923 | { | |
924 | tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset); | |
925 | tcg_gen_andi_tl(reg, reg, 0x1); | |
926 | } | |
927 | ||
928 | static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset) | |
929 | { | |
930 | tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset); | |
931 | tcg_gen_andi_tl(reg, reg, 0x1); | |
932 | } | |
933 | ||
934 | // !0: FCC0 | FCC1 | |
935 | static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset) | |
936 | { | |
937 | TCGv t0 = tcg_temp_new(); | |
938 | gen_mov_reg_FCC0(dst, src, fcc_offset); | |
939 | gen_mov_reg_FCC1(t0, src, fcc_offset); | |
940 | tcg_gen_or_tl(dst, dst, t0); | |
941 | } | |
942 | ||
943 | // 1 or 2: FCC0 ^ FCC1 | |
944 | static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset) | |
945 | { | |
946 | TCGv t0 = tcg_temp_new(); | |
947 | gen_mov_reg_FCC0(dst, src, fcc_offset); | |
948 | gen_mov_reg_FCC1(t0, src, fcc_offset); | |
949 | tcg_gen_xor_tl(dst, dst, t0); | |
950 | } | |
951 | ||
952 | // 1 or 3: FCC0 | |
953 | static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset) | |
954 | { | |
955 | gen_mov_reg_FCC0(dst, src, fcc_offset); | |
956 | } | |
957 | ||
958 | // 1: FCC0 & !FCC1 | |
959 | static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset) | |
960 | { | |
961 | TCGv t0 = tcg_temp_new(); | |
962 | gen_mov_reg_FCC0(dst, src, fcc_offset); | |
963 | gen_mov_reg_FCC1(t0, src, fcc_offset); | |
964 | tcg_gen_andc_tl(dst, dst, t0); | |
965 | } | |
966 | ||
967 | // 2 or 3: FCC1 | |
968 | static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset) | |
969 | { | |
970 | gen_mov_reg_FCC1(dst, src, fcc_offset); | |
971 | } | |
972 | ||
973 | // 2: !FCC0 & FCC1 | |
974 | static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset) | |
975 | { | |
976 | TCGv t0 = tcg_temp_new(); | |
977 | gen_mov_reg_FCC0(dst, src, fcc_offset); | |
978 | gen_mov_reg_FCC1(t0, src, fcc_offset); | |
979 | tcg_gen_andc_tl(dst, t0, dst); | |
980 | } | |
981 | ||
982 | // 3: FCC0 & FCC1 | |
983 | static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset) | |
984 | { | |
985 | TCGv t0 = tcg_temp_new(); | |
986 | gen_mov_reg_FCC0(dst, src, fcc_offset); | |
987 | gen_mov_reg_FCC1(t0, src, fcc_offset); | |
988 | tcg_gen_and_tl(dst, dst, t0); | |
989 | } | |
990 | ||
991 | // 0: !(FCC0 | FCC1) | |
992 | static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset) | |
993 | { | |
994 | TCGv t0 = tcg_temp_new(); | |
995 | gen_mov_reg_FCC0(dst, src, fcc_offset); | |
996 | gen_mov_reg_FCC1(t0, src, fcc_offset); | |
997 | tcg_gen_or_tl(dst, dst, t0); | |
998 | tcg_gen_xori_tl(dst, dst, 0x1); | |
999 | } | |
1000 | ||
1001 | // 0 or 3: !(FCC0 ^ FCC1) | |
1002 | static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset) | |
1003 | { | |
1004 | TCGv t0 = tcg_temp_new(); | |
1005 | gen_mov_reg_FCC0(dst, src, fcc_offset); | |
1006 | gen_mov_reg_FCC1(t0, src, fcc_offset); | |
1007 | tcg_gen_xor_tl(dst, dst, t0); | |
1008 | tcg_gen_xori_tl(dst, dst, 0x1); | |
1009 | } | |
1010 | ||
1011 | // 0 or 2: !FCC0 | |
1012 | static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset) | |
1013 | { | |
1014 | gen_mov_reg_FCC0(dst, src, fcc_offset); | |
1015 | tcg_gen_xori_tl(dst, dst, 0x1); | |
1016 | } | |
1017 | ||
1018 | // !1: !(FCC0 & !FCC1) | |
1019 | static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset) | |
1020 | { | |
1021 | TCGv t0 = tcg_temp_new(); | |
1022 | gen_mov_reg_FCC0(dst, src, fcc_offset); | |
1023 | gen_mov_reg_FCC1(t0, src, fcc_offset); | |
1024 | tcg_gen_andc_tl(dst, dst, t0); | |
1025 | tcg_gen_xori_tl(dst, dst, 0x1); | |
1026 | } | |
1027 | ||
1028 | // 0 or 1: !FCC1 | |
1029 | static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset) | |
1030 | { | |
1031 | gen_mov_reg_FCC1(dst, src, fcc_offset); | |
1032 | tcg_gen_xori_tl(dst, dst, 0x1); | |
1033 | } | |
1034 | ||
1035 | // !2: !(!FCC0 & FCC1) | |
1036 | static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset) | |
1037 | { | |
1038 | TCGv t0 = tcg_temp_new(); | |
1039 | gen_mov_reg_FCC0(dst, src, fcc_offset); | |
1040 | gen_mov_reg_FCC1(t0, src, fcc_offset); | |
1041 | tcg_gen_andc_tl(dst, t0, dst); | |
1042 | tcg_gen_xori_tl(dst, dst, 0x1); | |
1043 | } | |
1044 | ||
1045 | // !3: !(FCC0 & FCC1) | |
1046 | static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset) | |
1047 | { | |
1048 | TCGv t0 = tcg_temp_new(); | |
1049 | gen_mov_reg_FCC0(dst, src, fcc_offset); | |
1050 | gen_mov_reg_FCC1(t0, src, fcc_offset); | |
1051 | tcg_gen_and_tl(dst, dst, t0); | |
1052 | tcg_gen_xori_tl(dst, dst, 0x1); | |
1053 | } | |
1054 | ||
1055 | static void gen_branch2(DisasContext *dc, target_ulong pc1, | |
1056 | target_ulong pc2, TCGv r_cond) | |
1057 | { | |
1058 | TCGLabel *l1 = gen_new_label(); | |
1059 | ||
1060 | tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1); | |
1061 | ||
1062 | gen_goto_tb(dc, 0, pc1, pc1 + 4); | |
1063 | ||
1064 | gen_set_label(l1); | |
1065 | gen_goto_tb(dc, 1, pc2, pc2 + 4); | |
1066 | } | |
1067 | ||
1068 | static void gen_generic_branch(DisasContext *dc) | |
1069 | { | |
1070 | TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]); | |
1071 | TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]); | |
1072 | TCGv zero = tcg_constant_tl(0); | |
1073 | ||
1074 | tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1); | |
1075 | } | |
1076 | ||
1077 | /* call this function before using the condition register as it may | |
1078 | have been set for a jump */ | |
1079 | static void flush_cond(DisasContext *dc) | |
1080 | { | |
1081 | if (dc->npc == JUMP_PC) { | |
1082 | gen_generic_branch(dc); | |
1083 | dc->npc = DYNAMIC_PC_LOOKUP; | |
1084 | } | |
1085 | } | |
1086 | ||
1087 | static void save_npc(DisasContext *dc) | |
1088 | { | |
1089 | if (dc->npc & 3) { | |
1090 | switch (dc->npc) { | |
1091 | case JUMP_PC: | |
1092 | gen_generic_branch(dc); | |
1093 | dc->npc = DYNAMIC_PC_LOOKUP; | |
1094 | break; | |
1095 | case DYNAMIC_PC: | |
1096 | case DYNAMIC_PC_LOOKUP: | |
1097 | break; | |
1098 | default: | |
1099 | g_assert_not_reached(); | |
1100 | } | |
1101 | } else { | |
1102 | tcg_gen_movi_tl(cpu_npc, dc->npc); | |
1103 | } | |
1104 | } | |
1105 | ||
1106 | static void update_psr(DisasContext *dc) | |
1107 | { | |
1108 | if (dc->cc_op != CC_OP_FLAGS) { | |
1109 | dc->cc_op = CC_OP_FLAGS; | |
1110 | gen_helper_compute_psr(tcg_env); | |
1111 | } | |
1112 | } | |
1113 | ||
1114 | static void save_state(DisasContext *dc) | |
1115 | { | |
1116 | tcg_gen_movi_tl(cpu_pc, dc->pc); | |
1117 | save_npc(dc); | |
1118 | } | |
1119 | ||
1120 | static void gen_exception(DisasContext *dc, int which) | |
1121 | { | |
1122 | save_state(dc); | |
1123 | gen_helper_raise_exception(tcg_env, tcg_constant_i32(which)); | |
1124 | dc->base.is_jmp = DISAS_NORETURN; | |
1125 | } | |
1126 | ||
1127 | static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp) | |
1128 | { | |
1129 | DisasDelayException *e = g_new0(DisasDelayException, 1); | |
1130 | ||
1131 | e->next = dc->delay_excp_list; | |
1132 | dc->delay_excp_list = e; | |
1133 | ||
1134 | e->lab = gen_new_label(); | |
1135 | e->excp = excp; | |
1136 | e->pc = dc->pc; | |
1137 | /* Caller must have used flush_cond before branch. */ | |
1138 | assert(e->npc != JUMP_PC); | |
1139 | e->npc = dc->npc; | |
1140 | ||
1141 | return e->lab; | |
1142 | } | |
1143 | ||
1144 | static TCGLabel *delay_exception(DisasContext *dc, int excp) | |
1145 | { | |
1146 | return delay_exceptionv(dc, tcg_constant_i32(excp)); | |
1147 | } | |
1148 | ||
1149 | static void gen_check_align(DisasContext *dc, TCGv addr, int mask) | |
1150 | { | |
1151 | TCGv t = tcg_temp_new(); | |
1152 | TCGLabel *lab; | |
1153 | ||
1154 | tcg_gen_andi_tl(t, addr, mask); | |
1155 | ||
1156 | flush_cond(dc); | |
1157 | lab = delay_exception(dc, TT_UNALIGNED); | |
1158 | tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab); | |
1159 | } | |
1160 | ||
1161 | static void gen_mov_pc_npc(DisasContext *dc) | |
1162 | { | |
1163 | if (dc->npc & 3) { | |
1164 | switch (dc->npc) { | |
1165 | case JUMP_PC: | |
1166 | gen_generic_branch(dc); | |
1167 | tcg_gen_mov_tl(cpu_pc, cpu_npc); | |
1168 | dc->pc = DYNAMIC_PC_LOOKUP; | |
1169 | break; | |
1170 | case DYNAMIC_PC: | |
1171 | case DYNAMIC_PC_LOOKUP: | |
1172 | tcg_gen_mov_tl(cpu_pc, cpu_npc); | |
1173 | dc->pc = dc->npc; | |
1174 | break; | |
1175 | default: | |
1176 | g_assert_not_reached(); | |
1177 | } | |
1178 | } else { | |
1179 | dc->pc = dc->npc; | |
1180 | } | |
1181 | } | |
1182 | ||
1183 | static void gen_op_next_insn(void) | |
1184 | { | |
1185 | tcg_gen_mov_tl(cpu_pc, cpu_npc); | |
1186 | tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); | |
1187 | } | |
1188 | ||
1189 | static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, | |
1190 | DisasContext *dc) | |
1191 | { | |
1192 | static int subcc_cond[16] = { | |
1193 | TCG_COND_NEVER, | |
1194 | TCG_COND_EQ, | |
1195 | TCG_COND_LE, | |
1196 | TCG_COND_LT, | |
1197 | TCG_COND_LEU, | |
1198 | TCG_COND_LTU, | |
1199 | -1, /* neg */ | |
1200 | -1, /* overflow */ | |
1201 | TCG_COND_ALWAYS, | |
1202 | TCG_COND_NE, | |
1203 | TCG_COND_GT, | |
1204 | TCG_COND_GE, | |
1205 | TCG_COND_GTU, | |
1206 | TCG_COND_GEU, | |
1207 | -1, /* pos */ | |
1208 | -1, /* no overflow */ | |
1209 | }; | |
1210 | ||
1211 | static int logic_cond[16] = { | |
1212 | TCG_COND_NEVER, | |
1213 | TCG_COND_EQ, /* eq: Z */ | |
1214 | TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */ | |
1215 | TCG_COND_LT, /* lt: N ^ V -> N */ | |
1216 | TCG_COND_EQ, /* leu: C | Z -> Z */ | |
1217 | TCG_COND_NEVER, /* ltu: C -> 0 */ | |
1218 | TCG_COND_LT, /* neg: N */ | |
1219 | TCG_COND_NEVER, /* vs: V -> 0 */ | |
1220 | TCG_COND_ALWAYS, | |
1221 | TCG_COND_NE, /* ne: !Z */ | |
1222 | TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */ | |
1223 | TCG_COND_GE, /* ge: !(N ^ V) -> !N */ | |
1224 | TCG_COND_NE, /* gtu: !(C | Z) -> !Z */ | |
1225 | TCG_COND_ALWAYS, /* geu: !C -> 1 */ | |
1226 | TCG_COND_GE, /* pos: !N */ | |
1227 | TCG_COND_ALWAYS, /* vc: !V -> 1 */ | |
1228 | }; | |
1229 | ||
1230 | TCGv_i32 r_src; | |
1231 | TCGv r_dst; | |
1232 | ||
1233 | #ifdef TARGET_SPARC64 | |
1234 | if (xcc) { | |
1235 | r_src = cpu_xcc; | |
1236 | } else { | |
1237 | r_src = cpu_psr; | |
1238 | } | |
1239 | #else | |
1240 | r_src = cpu_psr; | |
1241 | #endif | |
1242 | ||
1243 | switch (dc->cc_op) { | |
1244 | case CC_OP_LOGIC: | |
1245 | cmp->cond = logic_cond[cond]; | |
1246 | do_compare_dst_0: | |
1247 | cmp->is_bool = false; | |
1248 | cmp->c2 = tcg_constant_tl(0); | |
1249 | #ifdef TARGET_SPARC64 | |
1250 | if (!xcc) { | |
1251 | cmp->c1 = tcg_temp_new(); | |
1252 | tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst); | |
1253 | break; | |
1254 | } | |
1255 | #endif | |
1256 | cmp->c1 = cpu_cc_dst; | |
1257 | break; | |
1258 | ||
1259 | case CC_OP_SUB: | |
1260 | switch (cond) { | |
1261 | case 6: /* neg */ | |
1262 | case 14: /* pos */ | |
1263 | cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE); | |
1264 | goto do_compare_dst_0; | |
1265 | ||
1266 | case 7: /* overflow */ | |
1267 | case 15: /* !overflow */ | |
1268 | goto do_dynamic; | |
1269 | ||
1270 | default: | |
1271 | cmp->cond = subcc_cond[cond]; | |
1272 | cmp->is_bool = false; | |
1273 | #ifdef TARGET_SPARC64 | |
1274 | if (!xcc) { | |
1275 | /* Note that sign-extension works for unsigned compares as | |
1276 | long as both operands are sign-extended. */ | |
1277 | cmp->c1 = tcg_temp_new(); | |
1278 | cmp->c2 = tcg_temp_new(); | |
1279 | tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src); | |
1280 | tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2); | |
1281 | break; | |
1282 | } | |
1283 | #endif | |
1284 | cmp->c1 = cpu_cc_src; | |
1285 | cmp->c2 = cpu_cc_src2; | |
1286 | break; | |
1287 | } | |
1288 | break; | |
1289 | ||
1290 | default: | |
1291 | do_dynamic: | |
1292 | gen_helper_compute_psr(tcg_env); | |
1293 | dc->cc_op = CC_OP_FLAGS; | |
1294 | /* FALLTHRU */ | |
1295 | ||
1296 | case CC_OP_FLAGS: | |
1297 | /* We're going to generate a boolean result. */ | |
1298 | cmp->cond = TCG_COND_NE; | |
1299 | cmp->is_bool = true; | |
1300 | cmp->c1 = r_dst = tcg_temp_new(); | |
1301 | cmp->c2 = tcg_constant_tl(0); | |
1302 | ||
1303 | switch (cond) { | |
1304 | case 0x0: | |
1305 | gen_op_eval_bn(r_dst); | |
1306 | break; | |
1307 | case 0x1: | |
1308 | gen_op_eval_be(r_dst, r_src); | |
1309 | break; | |
1310 | case 0x2: | |
1311 | gen_op_eval_ble(r_dst, r_src); | |
1312 | break; | |
1313 | case 0x3: | |
1314 | gen_op_eval_bl(r_dst, r_src); | |
1315 | break; | |
1316 | case 0x4: | |
1317 | gen_op_eval_bleu(r_dst, r_src); | |
1318 | break; | |
1319 | case 0x5: | |
1320 | gen_op_eval_bcs(r_dst, r_src); | |
1321 | break; | |
1322 | case 0x6: | |
1323 | gen_op_eval_bneg(r_dst, r_src); | |
1324 | break; | |
1325 | case 0x7: | |
1326 | gen_op_eval_bvs(r_dst, r_src); | |
1327 | break; | |
1328 | case 0x8: | |
1329 | gen_op_eval_ba(r_dst); | |
1330 | break; | |
1331 | case 0x9: | |
1332 | gen_op_eval_bne(r_dst, r_src); | |
1333 | break; | |
1334 | case 0xa: | |
1335 | gen_op_eval_bg(r_dst, r_src); | |
1336 | break; | |
1337 | case 0xb: | |
1338 | gen_op_eval_bge(r_dst, r_src); | |
1339 | break; | |
1340 | case 0xc: | |
1341 | gen_op_eval_bgu(r_dst, r_src); | |
1342 | break; | |
1343 | case 0xd: | |
1344 | gen_op_eval_bcc(r_dst, r_src); | |
1345 | break; | |
1346 | case 0xe: | |
1347 | gen_op_eval_bpos(r_dst, r_src); | |
1348 | break; | |
1349 | case 0xf: | |
1350 | gen_op_eval_bvc(r_dst, r_src); | |
1351 | break; | |
1352 | } | |
1353 | break; | |
1354 | } | |
1355 | } | |
1356 | ||
1357 | static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond) | |
1358 | { | |
1359 | unsigned int offset; | |
1360 | TCGv r_dst; | |
1361 | ||
1362 | /* For now we still generate a straight boolean result. */ | |
1363 | cmp->cond = TCG_COND_NE; | |
1364 | cmp->is_bool = true; | |
1365 | cmp->c1 = r_dst = tcg_temp_new(); | |
1366 | cmp->c2 = tcg_constant_tl(0); | |
1367 | ||
1368 | switch (cc) { | |
1369 | default: | |
1370 | case 0x0: | |
1371 | offset = 0; | |
1372 | break; | |
1373 | case 0x1: | |
1374 | offset = 32 - 10; | |
1375 | break; | |
1376 | case 0x2: | |
1377 | offset = 34 - 10; | |
1378 | break; | |
1379 | case 0x3: | |
1380 | offset = 36 - 10; | |
1381 | break; | |
1382 | } | |
1383 | ||
1384 | switch (cond) { | |
1385 | case 0x0: | |
1386 | gen_op_eval_bn(r_dst); | |
1387 | break; | |
1388 | case 0x1: | |
1389 | gen_op_eval_fbne(r_dst, cpu_fsr, offset); | |
1390 | break; | |
1391 | case 0x2: | |
1392 | gen_op_eval_fblg(r_dst, cpu_fsr, offset); | |
1393 | break; | |
1394 | case 0x3: | |
1395 | gen_op_eval_fbul(r_dst, cpu_fsr, offset); | |
1396 | break; | |
1397 | case 0x4: | |
1398 | gen_op_eval_fbl(r_dst, cpu_fsr, offset); | |
1399 | break; | |
1400 | case 0x5: | |
1401 | gen_op_eval_fbug(r_dst, cpu_fsr, offset); | |
1402 | break; | |
1403 | case 0x6: | |
1404 | gen_op_eval_fbg(r_dst, cpu_fsr, offset); | |
1405 | break; | |
1406 | case 0x7: | |
1407 | gen_op_eval_fbu(r_dst, cpu_fsr, offset); | |
1408 | break; | |
1409 | case 0x8: | |
1410 | gen_op_eval_ba(r_dst); | |
1411 | break; | |
1412 | case 0x9: | |
1413 | gen_op_eval_fbe(r_dst, cpu_fsr, offset); | |
1414 | break; | |
1415 | case 0xa: | |
1416 | gen_op_eval_fbue(r_dst, cpu_fsr, offset); | |
1417 | break; | |
1418 | case 0xb: | |
1419 | gen_op_eval_fbge(r_dst, cpu_fsr, offset); | |
1420 | break; | |
1421 | case 0xc: | |
1422 | gen_op_eval_fbuge(r_dst, cpu_fsr, offset); | |
1423 | break; | |
1424 | case 0xd: | |
1425 | gen_op_eval_fble(r_dst, cpu_fsr, offset); | |
1426 | break; | |
1427 | case 0xe: | |
1428 | gen_op_eval_fbule(r_dst, cpu_fsr, offset); | |
1429 | break; | |
1430 | case 0xf: | |
1431 | gen_op_eval_fbo(r_dst, cpu_fsr, offset); | |
1432 | break; | |
1433 | } | |
1434 | } | |
1435 | ||
1436 | // Inverted logic | |
1437 | static const TCGCond gen_tcg_cond_reg[8] = { | |
1438 | TCG_COND_NEVER, /* reserved */ | |
1439 | TCG_COND_NE, | |
1440 | TCG_COND_GT, | |
1441 | TCG_COND_GE, | |
1442 | TCG_COND_NEVER, /* reserved */ | |
1443 | TCG_COND_EQ, | |
1444 | TCG_COND_LE, | |
1445 | TCG_COND_LT, | |
1446 | }; | |
1447 | ||
1448 | static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src) | |
1449 | { | |
1450 | cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]); | |
1451 | cmp->is_bool = false; | |
1452 | cmp->c1 = r_src; | |
1453 | cmp->c2 = tcg_constant_tl(0); | |
1454 | } | |
1455 | ||
1456 | static void gen_op_clear_ieee_excp_and_FTT(void) | |
1457 | { | |
1458 | tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK); | |
1459 | } | |
1460 | ||
1461 | static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src) | |
1462 | { | |
1463 | gen_op_clear_ieee_excp_and_FTT(); | |
1464 | tcg_gen_mov_i32(dst, src); | |
1465 | } | |
1466 | ||
1467 | static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src) | |
1468 | { | |
1469 | gen_op_clear_ieee_excp_and_FTT(); | |
1470 | gen_helper_fnegs(dst, src); | |
1471 | } | |
1472 | ||
1473 | static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src) | |
1474 | { | |
1475 | gen_op_clear_ieee_excp_and_FTT(); | |
1476 | gen_helper_fabss(dst, src); | |
1477 | } | |
1478 | ||
1479 | static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src) | |
1480 | { | |
1481 | gen_op_clear_ieee_excp_and_FTT(); | |
1482 | tcg_gen_mov_i64(dst, src); | |
1483 | } | |
1484 | ||
1485 | static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src) | |
1486 | { | |
1487 | gen_op_clear_ieee_excp_and_FTT(); | |
1488 | gen_helper_fnegd(dst, src); | |
1489 | } | |
1490 | ||
1491 | static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src) | |
1492 | { | |
1493 | gen_op_clear_ieee_excp_and_FTT(); | |
1494 | gen_helper_fabsd(dst, src); | |
1495 | } | |
1496 | ||
1497 | #ifdef TARGET_SPARC64 | |
1498 | static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) | |
1499 | { | |
1500 | switch (fccno) { | |
1501 | case 0: | |
1502 | gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1503 | break; | |
1504 | case 1: | |
1505 | gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1506 | break; | |
1507 | case 2: | |
1508 | gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1509 | break; | |
1510 | case 3: | |
1511 | gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1512 | break; | |
1513 | } | |
1514 | } | |
1515 | ||
1516 | static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) | |
1517 | { | |
1518 | switch (fccno) { | |
1519 | case 0: | |
1520 | gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1521 | break; | |
1522 | case 1: | |
1523 | gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1524 | break; | |
1525 | case 2: | |
1526 | gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1527 | break; | |
1528 | case 3: | |
1529 | gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1530 | break; | |
1531 | } | |
1532 | } | |
1533 | ||
1534 | static void gen_op_fcmpq(int fccno) | |
1535 | { | |
1536 | switch (fccno) { | |
1537 | case 0: | |
1538 | gen_helper_fcmpq(cpu_fsr, tcg_env); | |
1539 | break; | |
1540 | case 1: | |
1541 | gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env); | |
1542 | break; | |
1543 | case 2: | |
1544 | gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env); | |
1545 | break; | |
1546 | case 3: | |
1547 | gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env); | |
1548 | break; | |
1549 | } | |
1550 | } | |
1551 | ||
1552 | static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) | |
1553 | { | |
1554 | switch (fccno) { | |
1555 | case 0: | |
1556 | gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1557 | break; | |
1558 | case 1: | |
1559 | gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1560 | break; | |
1561 | case 2: | |
1562 | gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1563 | break; | |
1564 | case 3: | |
1565 | gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1566 | break; | |
1567 | } | |
1568 | } | |
1569 | ||
1570 | static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) | |
1571 | { | |
1572 | switch (fccno) { | |
1573 | case 0: | |
1574 | gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1575 | break; | |
1576 | case 1: | |
1577 | gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1578 | break; | |
1579 | case 2: | |
1580 | gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1581 | break; | |
1582 | case 3: | |
1583 | gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1584 | break; | |
1585 | } | |
1586 | } | |
1587 | ||
1588 | static void gen_op_fcmpeq(int fccno) | |
1589 | { | |
1590 | switch (fccno) { | |
1591 | case 0: | |
1592 | gen_helper_fcmpeq(cpu_fsr, tcg_env); | |
1593 | break; | |
1594 | case 1: | |
1595 | gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env); | |
1596 | break; | |
1597 | case 2: | |
1598 | gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env); | |
1599 | break; | |
1600 | case 3: | |
1601 | gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env); | |
1602 | break; | |
1603 | } | |
1604 | } | |
1605 | ||
1606 | #else | |
1607 | ||
1608 | static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2) | |
1609 | { | |
1610 | gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1611 | } | |
1612 | ||
1613 | static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) | |
1614 | { | |
1615 | gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1616 | } | |
1617 | ||
1618 | static void gen_op_fcmpq(int fccno) | |
1619 | { | |
1620 | gen_helper_fcmpq(cpu_fsr, tcg_env); | |
1621 | } | |
1622 | ||
1623 | static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2) | |
1624 | { | |
1625 | gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1626 | } | |
1627 | ||
1628 | static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) | |
1629 | { | |
1630 | gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2); | |
1631 | } | |
1632 | ||
1633 | static void gen_op_fcmpeq(int fccno) | |
1634 | { | |
1635 | gen_helper_fcmpeq(cpu_fsr, tcg_env); | |
1636 | } | |
1637 | #endif | |
1638 | ||
1639 | static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags) | |
1640 | { | |
1641 | tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK); | |
1642 | tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags); | |
1643 | gen_exception(dc, TT_FP_EXCP); | |
1644 | } | |
1645 | ||
1646 | static int gen_trap_ifnofpu(DisasContext *dc) | |
1647 | { | |
1648 | #if !defined(CONFIG_USER_ONLY) | |
1649 | if (!dc->fpu_enabled) { | |
1650 | gen_exception(dc, TT_NFPU_INSN); | |
1651 | return 1; | |
1652 | } | |
1653 | #endif | |
1654 | return 0; | |
1655 | } | |
1656 | ||
1657 | static void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, | |
1658 | void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32)) | |
1659 | { | |
1660 | TCGv_i32 dst, src1, src2; | |
1661 | ||
1662 | src1 = gen_load_fpr_F(dc, rs1); | |
1663 | src2 = gen_load_fpr_F(dc, rs2); | |
1664 | dst = gen_dest_fpr_F(dc); | |
1665 | ||
1666 | gen(dst, tcg_env, src1, src2); | |
1667 | gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); | |
1668 | ||
1669 | gen_store_fpr_F(dc, rd, dst); | |
1670 | } | |
1671 | ||
1672 | static void gen_fop_DD(DisasContext *dc, int rd, int rs, | |
1673 | void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64)) | |
1674 | { | |
1675 | TCGv_i64 dst, src; | |
1676 | ||
1677 | src = gen_load_fpr_D(dc, rs); | |
1678 | dst = gen_dest_fpr_D(dc, rd); | |
1679 | ||
1680 | gen(dst, tcg_env, src); | |
1681 | gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); | |
1682 | ||
1683 | gen_store_fpr_D(dc, rd, dst); | |
1684 | } | |
1685 | ||
1686 | static void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, | |
1687 | void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64)) | |
1688 | { | |
1689 | TCGv_i64 dst, src1, src2; | |
1690 | ||
1691 | src1 = gen_load_fpr_D(dc, rs1); | |
1692 | src2 = gen_load_fpr_D(dc, rs2); | |
1693 | dst = gen_dest_fpr_D(dc, rd); | |
1694 | ||
1695 | gen(dst, tcg_env, src1, src2); | |
1696 | gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); | |
1697 | ||
1698 | gen_store_fpr_D(dc, rd, dst); | |
1699 | } | |
1700 | ||
1701 | static void gen_fop_QQ(DisasContext *dc, int rd, int rs, | |
1702 | void (*gen)(TCGv_ptr)) | |
1703 | { | |
1704 | gen_op_load_fpr_QT1(QFPREG(rs)); | |
1705 | ||
1706 | gen(tcg_env); | |
1707 | gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); | |
1708 | ||
1709 | gen_op_store_QT0_fpr(QFPREG(rd)); | |
1710 | gen_update_fprs_dirty(dc, QFPREG(rd)); | |
1711 | } | |
1712 | ||
1713 | #ifdef TARGET_SPARC64 | |
1714 | static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs, | |
1715 | void (*gen)(TCGv_ptr)) | |
1716 | { | |
1717 | gen_op_load_fpr_QT1(QFPREG(rs)); | |
1718 | ||
1719 | gen(tcg_env); | |
1720 | ||
1721 | gen_op_store_QT0_fpr(QFPREG(rd)); | |
1722 | gen_update_fprs_dirty(dc, QFPREG(rd)); | |
1723 | } | |
1724 | #endif | |
1725 | ||
1726 | static void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2, | |
1727 | void (*gen)(TCGv_ptr)) | |
1728 | { | |
1729 | gen_op_load_fpr_QT0(QFPREG(rs1)); | |
1730 | gen_op_load_fpr_QT1(QFPREG(rs2)); | |
1731 | ||
1732 | gen(tcg_env); | |
1733 | gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); | |
1734 | ||
1735 | gen_op_store_QT0_fpr(QFPREG(rd)); | |
1736 | gen_update_fprs_dirty(dc, QFPREG(rd)); | |
1737 | } | |
1738 | ||
1739 | static void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2, | |
1740 | void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32)) | |
1741 | { | |
1742 | TCGv_i64 dst; | |
1743 | TCGv_i32 src1, src2; | |
1744 | ||
1745 | src1 = gen_load_fpr_F(dc, rs1); | |
1746 | src2 = gen_load_fpr_F(dc, rs2); | |
1747 | dst = gen_dest_fpr_D(dc, rd); | |
1748 | ||
1749 | gen(dst, tcg_env, src1, src2); | |
1750 | gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); | |
1751 | ||
1752 | gen_store_fpr_D(dc, rd, dst); | |
1753 | } | |
1754 | ||
1755 | static void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2, | |
1756 | void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64)) | |
1757 | { | |
1758 | TCGv_i64 src1, src2; | |
1759 | ||
1760 | src1 = gen_load_fpr_D(dc, rs1); | |
1761 | src2 = gen_load_fpr_D(dc, rs2); | |
1762 | ||
1763 | gen(tcg_env, src1, src2); | |
1764 | gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); | |
1765 | ||
1766 | gen_op_store_QT0_fpr(QFPREG(rd)); | |
1767 | gen_update_fprs_dirty(dc, QFPREG(rd)); | |
1768 | } | |
1769 | ||
1770 | #ifdef TARGET_SPARC64 | |
1771 | static void gen_fop_DF(DisasContext *dc, int rd, int rs, | |
1772 | void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32)) | |
1773 | { | |
1774 | TCGv_i64 dst; | |
1775 | TCGv_i32 src; | |
1776 | ||
1777 | src = gen_load_fpr_F(dc, rs); | |
1778 | dst = gen_dest_fpr_D(dc, rd); | |
1779 | ||
1780 | gen(dst, tcg_env, src); | |
1781 | gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); | |
1782 | ||
1783 | gen_store_fpr_D(dc, rd, dst); | |
1784 | } | |
1785 | #endif | |
1786 | ||
1787 | static void gen_ne_fop_DF(DisasContext *dc, int rd, int rs, | |
1788 | void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32)) | |
1789 | { | |
1790 | TCGv_i64 dst; | |
1791 | TCGv_i32 src; | |
1792 | ||
1793 | src = gen_load_fpr_F(dc, rs); | |
1794 | dst = gen_dest_fpr_D(dc, rd); | |
1795 | ||
1796 | gen(dst, tcg_env, src); | |
1797 | ||
1798 | gen_store_fpr_D(dc, rd, dst); | |
1799 | } | |
1800 | ||
1801 | static void gen_fop_FD(DisasContext *dc, int rd, int rs, | |
1802 | void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64)) | |
1803 | { | |
1804 | TCGv_i32 dst; | |
1805 | TCGv_i64 src; | |
1806 | ||
1807 | src = gen_load_fpr_D(dc, rs); | |
1808 | dst = gen_dest_fpr_F(dc); | |
1809 | ||
1810 | gen(dst, tcg_env, src); | |
1811 | gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); | |
1812 | ||
1813 | gen_store_fpr_F(dc, rd, dst); | |
1814 | } | |
1815 | ||
1816 | static void gen_fop_FQ(DisasContext *dc, int rd, int rs, | |
1817 | void (*gen)(TCGv_i32, TCGv_ptr)) | |
1818 | { | |
1819 | TCGv_i32 dst; | |
1820 | ||
1821 | gen_op_load_fpr_QT1(QFPREG(rs)); | |
1822 | dst = gen_dest_fpr_F(dc); | |
1823 | ||
1824 | gen(dst, tcg_env); | |
1825 | gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); | |
1826 | ||
1827 | gen_store_fpr_F(dc, rd, dst); | |
1828 | } | |
1829 | ||
1830 | static void gen_fop_DQ(DisasContext *dc, int rd, int rs, | |
1831 | void (*gen)(TCGv_i64, TCGv_ptr)) | |
1832 | { | |
1833 | TCGv_i64 dst; | |
1834 | ||
1835 | gen_op_load_fpr_QT1(QFPREG(rs)); | |
1836 | dst = gen_dest_fpr_D(dc, rd); | |
1837 | ||
1838 | gen(dst, tcg_env); | |
1839 | gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); | |
1840 | ||
1841 | gen_store_fpr_D(dc, rd, dst); | |
1842 | } | |
1843 | ||
1844 | static void gen_ne_fop_QF(DisasContext *dc, int rd, int rs, | |
1845 | void (*gen)(TCGv_ptr, TCGv_i32)) | |
1846 | { | |
1847 | TCGv_i32 src; | |
1848 | ||
1849 | src = gen_load_fpr_F(dc, rs); | |
1850 | ||
1851 | gen(tcg_env, src); | |
1852 | ||
1853 | gen_op_store_QT0_fpr(QFPREG(rd)); | |
1854 | gen_update_fprs_dirty(dc, QFPREG(rd)); | |
1855 | } | |
1856 | ||
1857 | static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs, | |
1858 | void (*gen)(TCGv_ptr, TCGv_i64)) | |
1859 | { | |
1860 | TCGv_i64 src; | |
1861 | ||
1862 | src = gen_load_fpr_D(dc, rs); | |
1863 | ||
1864 | gen(tcg_env, src); | |
1865 | ||
1866 | gen_op_store_QT0_fpr(QFPREG(rd)); | |
1867 | gen_update_fprs_dirty(dc, QFPREG(rd)); | |
1868 | } | |
1869 | ||
1870 | /* asi moves */ | |
1871 | typedef enum { | |
1872 | GET_ASI_HELPER, | |
1873 | GET_ASI_EXCP, | |
1874 | GET_ASI_DIRECT, | |
1875 | GET_ASI_DTWINX, | |
1876 | GET_ASI_BLOCK, | |
1877 | GET_ASI_SHORT, | |
1878 | GET_ASI_BCOPY, | |
1879 | GET_ASI_BFILL, | |
1880 | } ASIType; | |
1881 | ||
1882 | typedef struct { | |
1883 | ASIType type; | |
1884 | int asi; | |
1885 | int mem_idx; | |
1886 | MemOp memop; | |
1887 | } DisasASI; | |
1888 | ||
1889 | /* | |
1890 | * Build DisasASI. | |
1891 | * For asi == -1, treat as non-asi. | |
1892 | * For ask == -2, treat as immediate offset (v8 error, v9 %asi). | |
1893 | */ | |
1894 | static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop) | |
1895 | { | |
1896 | ASIType type = GET_ASI_HELPER; | |
1897 | int mem_idx = dc->mem_idx; | |
1898 | ||
1899 | if (asi == -1) { | |
1900 | /* Artificial "non-asi" case. */ | |
1901 | type = GET_ASI_DIRECT; | |
1902 | goto done; | |
1903 | } | |
1904 | ||
1905 | #ifndef TARGET_SPARC64 | |
1906 | /* Before v9, all asis are immediate and privileged. */ | |
1907 | if (asi < 0) { | |
1908 | gen_exception(dc, TT_ILL_INSN); | |
1909 | type = GET_ASI_EXCP; | |
1910 | } else if (supervisor(dc) | |
1911 | /* Note that LEON accepts ASI_USERDATA in user mode, for | |
1912 | use with CASA. Also note that previous versions of | |
1913 | QEMU allowed (and old versions of gcc emitted) ASI_P | |
1914 | for LEON, which is incorrect. */ | |
1915 | || (asi == ASI_USERDATA | |
1916 | && (dc->def->features & CPU_FEATURE_CASA))) { | |
1917 | switch (asi) { | |
1918 | case ASI_USERDATA: /* User data access */ | |
1919 | mem_idx = MMU_USER_IDX; | |
1920 | type = GET_ASI_DIRECT; | |
1921 | break; | |
1922 | case ASI_KERNELDATA: /* Supervisor data access */ | |
1923 | mem_idx = MMU_KERNEL_IDX; | |
1924 | type = GET_ASI_DIRECT; | |
1925 | break; | |
1926 | case ASI_M_BYPASS: /* MMU passthrough */ | |
1927 | case ASI_LEON_BYPASS: /* LEON MMU passthrough */ | |
1928 | mem_idx = MMU_PHYS_IDX; | |
1929 | type = GET_ASI_DIRECT; | |
1930 | break; | |
1931 | case ASI_M_BCOPY: /* Block copy, sta access */ | |
1932 | mem_idx = MMU_KERNEL_IDX; | |
1933 | type = GET_ASI_BCOPY; | |
1934 | break; | |
1935 | case ASI_M_BFILL: /* Block fill, stda access */ | |
1936 | mem_idx = MMU_KERNEL_IDX; | |
1937 | type = GET_ASI_BFILL; | |
1938 | break; | |
1939 | } | |
1940 | ||
1941 | /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the | |
1942 | * permissions check in get_physical_address(..). | |
1943 | */ | |
1944 | mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx; | |
1945 | } else { | |
1946 | gen_exception(dc, TT_PRIV_INSN); | |
1947 | type = GET_ASI_EXCP; | |
1948 | } | |
1949 | #else | |
1950 | if (asi < 0) { | |
1951 | asi = dc->asi; | |
1952 | } | |
1953 | /* With v9, all asis below 0x80 are privileged. */ | |
1954 | /* ??? We ought to check cpu_has_hypervisor, but we didn't copy | |
1955 | down that bit into DisasContext. For the moment that's ok, | |
1956 | since the direct implementations below doesn't have any ASIs | |
1957 | in the restricted [0x30, 0x7f] range, and the check will be | |
1958 | done properly in the helper. */ | |
1959 | if (!supervisor(dc) && asi < 0x80) { | |
1960 | gen_exception(dc, TT_PRIV_ACT); | |
1961 | type = GET_ASI_EXCP; | |
1962 | } else { | |
1963 | switch (asi) { | |
1964 | case ASI_REAL: /* Bypass */ | |
1965 | case ASI_REAL_IO: /* Bypass, non-cacheable */ | |
1966 | case ASI_REAL_L: /* Bypass LE */ | |
1967 | case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */ | |
1968 | case ASI_TWINX_REAL: /* Real address, twinx */ | |
1969 | case ASI_TWINX_REAL_L: /* Real address, twinx, LE */ | |
1970 | case ASI_QUAD_LDD_PHYS: | |
1971 | case ASI_QUAD_LDD_PHYS_L: | |
1972 | mem_idx = MMU_PHYS_IDX; | |
1973 | break; | |
1974 | case ASI_N: /* Nucleus */ | |
1975 | case ASI_NL: /* Nucleus LE */ | |
1976 | case ASI_TWINX_N: | |
1977 | case ASI_TWINX_NL: | |
1978 | case ASI_NUCLEUS_QUAD_LDD: | |
1979 | case ASI_NUCLEUS_QUAD_LDD_L: | |
1980 | if (hypervisor(dc)) { | |
1981 | mem_idx = MMU_PHYS_IDX; | |
1982 | } else { | |
1983 | mem_idx = MMU_NUCLEUS_IDX; | |
1984 | } | |
1985 | break; | |
1986 | case ASI_AIUP: /* As if user primary */ | |
1987 | case ASI_AIUPL: /* As if user primary LE */ | |
1988 | case ASI_TWINX_AIUP: | |
1989 | case ASI_TWINX_AIUP_L: | |
1990 | case ASI_BLK_AIUP_4V: | |
1991 | case ASI_BLK_AIUP_L_4V: | |
1992 | case ASI_BLK_AIUP: | |
1993 | case ASI_BLK_AIUPL: | |
1994 | mem_idx = MMU_USER_IDX; | |
1995 | break; | |
1996 | case ASI_AIUS: /* As if user secondary */ | |
1997 | case ASI_AIUSL: /* As if user secondary LE */ | |
1998 | case ASI_TWINX_AIUS: | |
1999 | case ASI_TWINX_AIUS_L: | |
2000 | case ASI_BLK_AIUS_4V: | |
2001 | case ASI_BLK_AIUS_L_4V: | |
2002 | case ASI_BLK_AIUS: | |
2003 | case ASI_BLK_AIUSL: | |
2004 | mem_idx = MMU_USER_SECONDARY_IDX; | |
2005 | break; | |
2006 | case ASI_S: /* Secondary */ | |
2007 | case ASI_SL: /* Secondary LE */ | |
2008 | case ASI_TWINX_S: | |
2009 | case ASI_TWINX_SL: | |
2010 | case ASI_BLK_COMMIT_S: | |
2011 | case ASI_BLK_S: | |
2012 | case ASI_BLK_SL: | |
2013 | case ASI_FL8_S: | |
2014 | case ASI_FL8_SL: | |
2015 | case ASI_FL16_S: | |
2016 | case ASI_FL16_SL: | |
2017 | if (mem_idx == MMU_USER_IDX) { | |
2018 | mem_idx = MMU_USER_SECONDARY_IDX; | |
2019 | } else if (mem_idx == MMU_KERNEL_IDX) { | |
2020 | mem_idx = MMU_KERNEL_SECONDARY_IDX; | |
2021 | } | |
2022 | break; | |
2023 | case ASI_P: /* Primary */ | |
2024 | case ASI_PL: /* Primary LE */ | |
2025 | case ASI_TWINX_P: | |
2026 | case ASI_TWINX_PL: | |
2027 | case ASI_BLK_COMMIT_P: | |
2028 | case ASI_BLK_P: | |
2029 | case ASI_BLK_PL: | |
2030 | case ASI_FL8_P: | |
2031 | case ASI_FL8_PL: | |
2032 | case ASI_FL16_P: | |
2033 | case ASI_FL16_PL: | |
2034 | break; | |
2035 | } | |
2036 | switch (asi) { | |
2037 | case ASI_REAL: | |
2038 | case ASI_REAL_IO: | |
2039 | case ASI_REAL_L: | |
2040 | case ASI_REAL_IO_L: | |
2041 | case ASI_N: | |
2042 | case ASI_NL: | |
2043 | case ASI_AIUP: | |
2044 | case ASI_AIUPL: | |
2045 | case ASI_AIUS: | |
2046 | case ASI_AIUSL: | |
2047 | case ASI_S: | |
2048 | case ASI_SL: | |
2049 | case ASI_P: | |
2050 | case ASI_PL: | |
2051 | type = GET_ASI_DIRECT; | |
2052 | break; | |
2053 | case ASI_TWINX_REAL: | |
2054 | case ASI_TWINX_REAL_L: | |
2055 | case ASI_TWINX_N: | |
2056 | case ASI_TWINX_NL: | |
2057 | case ASI_TWINX_AIUP: | |
2058 | case ASI_TWINX_AIUP_L: | |
2059 | case ASI_TWINX_AIUS: | |
2060 | case ASI_TWINX_AIUS_L: | |
2061 | case ASI_TWINX_P: | |
2062 | case ASI_TWINX_PL: | |
2063 | case ASI_TWINX_S: | |
2064 | case ASI_TWINX_SL: | |
2065 | case ASI_QUAD_LDD_PHYS: | |
2066 | case ASI_QUAD_LDD_PHYS_L: | |
2067 | case ASI_NUCLEUS_QUAD_LDD: | |
2068 | case ASI_NUCLEUS_QUAD_LDD_L: | |
2069 | type = GET_ASI_DTWINX; | |
2070 | break; | |
2071 | case ASI_BLK_COMMIT_P: | |
2072 | case ASI_BLK_COMMIT_S: | |
2073 | case ASI_BLK_AIUP_4V: | |
2074 | case ASI_BLK_AIUP_L_4V: | |
2075 | case ASI_BLK_AIUP: | |
2076 | case ASI_BLK_AIUPL: | |
2077 | case ASI_BLK_AIUS_4V: | |
2078 | case ASI_BLK_AIUS_L_4V: | |
2079 | case ASI_BLK_AIUS: | |
2080 | case ASI_BLK_AIUSL: | |
2081 | case ASI_BLK_S: | |
2082 | case ASI_BLK_SL: | |
2083 | case ASI_BLK_P: | |
2084 | case ASI_BLK_PL: | |
2085 | type = GET_ASI_BLOCK; | |
2086 | break; | |
2087 | case ASI_FL8_S: | |
2088 | case ASI_FL8_SL: | |
2089 | case ASI_FL8_P: | |
2090 | case ASI_FL8_PL: | |
2091 | memop = MO_UB; | |
2092 | type = GET_ASI_SHORT; | |
2093 | break; | |
2094 | case ASI_FL16_S: | |
2095 | case ASI_FL16_SL: | |
2096 | case ASI_FL16_P: | |
2097 | case ASI_FL16_PL: | |
2098 | memop = MO_TEUW; | |
2099 | type = GET_ASI_SHORT; | |
2100 | break; | |
2101 | } | |
2102 | /* The little-endian asis all have bit 3 set. */ | |
2103 | if (asi & 8) { | |
2104 | memop ^= MO_BSWAP; | |
2105 | } | |
2106 | } | |
2107 | #endif | |
2108 | ||
2109 | done: | |
2110 | return (DisasASI){ type, asi, mem_idx, memop }; | |
2111 | } | |
2112 | ||
2113 | #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) | |
2114 | static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a, | |
2115 | TCGv_i32 asi, TCGv_i32 mop) | |
2116 | { | |
2117 | g_assert_not_reached(); | |
2118 | } | |
2119 | ||
2120 | static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r, | |
2121 | TCGv_i32 asi, TCGv_i32 mop) | |
2122 | { | |
2123 | g_assert_not_reached(); | |
2124 | } | |
2125 | #endif | |
2126 | ||
2127 | static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr) | |
2128 | { | |
2129 | switch (da->type) { | |
2130 | case GET_ASI_EXCP: | |
2131 | break; | |
2132 | case GET_ASI_DTWINX: /* Reserved for ldda. */ | |
2133 | gen_exception(dc, TT_ILL_INSN); | |
2134 | break; | |
2135 | case GET_ASI_DIRECT: | |
2136 | tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN); | |
2137 | break; | |
2138 | default: | |
2139 | { | |
2140 | TCGv_i32 r_asi = tcg_constant_i32(da->asi); | |
2141 | TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN); | |
2142 | ||
2143 | save_state(dc); | |
2144 | #ifdef TARGET_SPARC64 | |
2145 | gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop); | |
2146 | #else | |
2147 | { | |
2148 | TCGv_i64 t64 = tcg_temp_new_i64(); | |
2149 | gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop); | |
2150 | tcg_gen_trunc_i64_tl(dst, t64); | |
2151 | } | |
2152 | #endif | |
2153 | } | |
2154 | break; | |
2155 | } | |
2156 | } | |
2157 | ||
2158 | static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr) | |
2159 | { | |
2160 | switch (da->type) { | |
2161 | case GET_ASI_EXCP: | |
2162 | break; | |
2163 | ||
2164 | case GET_ASI_DTWINX: /* Reserved for stda. */ | |
2165 | if (TARGET_LONG_BITS == 32) { | |
2166 | gen_exception(dc, TT_ILL_INSN); | |
2167 | break; | |
2168 | } else if (!(dc->def->features & CPU_FEATURE_HYPV)) { | |
2169 | /* Pre OpenSPARC CPUs don't have these */ | |
2170 | gen_exception(dc, TT_ILL_INSN); | |
2171 | break; | |
2172 | } | |
2173 | /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */ | |
2174 | /* fall through */ | |
2175 | ||
2176 | case GET_ASI_DIRECT: | |
2177 | tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN); | |
2178 | break; | |
2179 | ||
2180 | case GET_ASI_BCOPY: | |
2181 | assert(TARGET_LONG_BITS == 32); | |
2182 | /* Copy 32 bytes from the address in SRC to ADDR. */ | |
2183 | /* ??? The original qemu code suggests 4-byte alignment, dropping | |
2184 | the low bits, but the only place I can see this used is in the | |
2185 | Linux kernel with 32 byte alignment, which would make more sense | |
2186 | as a cacheline-style operation. */ | |
2187 | { | |
2188 | TCGv saddr = tcg_temp_new(); | |
2189 | TCGv daddr = tcg_temp_new(); | |
2190 | TCGv four = tcg_constant_tl(4); | |
2191 | TCGv_i32 tmp = tcg_temp_new_i32(); | |
2192 | int i; | |
2193 | ||
2194 | tcg_gen_andi_tl(saddr, src, -4); | |
2195 | tcg_gen_andi_tl(daddr, addr, -4); | |
2196 | for (i = 0; i < 32; i += 4) { | |
2197 | /* Since the loads and stores are paired, allow the | |
2198 | copy to happen in the host endianness. */ | |
2199 | tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL); | |
2200 | tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL); | |
2201 | tcg_gen_add_tl(saddr, saddr, four); | |
2202 | tcg_gen_add_tl(daddr, daddr, four); | |
2203 | } | |
2204 | } | |
2205 | break; | |
2206 | ||
2207 | default: | |
2208 | { | |
2209 | TCGv_i32 r_asi = tcg_constant_i32(da->asi); | |
2210 | TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN); | |
2211 | ||
2212 | save_state(dc); | |
2213 | #ifdef TARGET_SPARC64 | |
2214 | gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop); | |
2215 | #else | |
2216 | { | |
2217 | TCGv_i64 t64 = tcg_temp_new_i64(); | |
2218 | tcg_gen_extu_tl_i64(t64, src); | |
2219 | gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop); | |
2220 | } | |
2221 | #endif | |
2222 | ||
2223 | /* A write to a TLB register may alter page maps. End the TB. */ | |
2224 | dc->npc = DYNAMIC_PC; | |
2225 | } | |
2226 | break; | |
2227 | } | |
2228 | } | |
2229 | ||
2230 | static void gen_swap_asi(DisasContext *dc, DisasASI *da, | |
2231 | TCGv dst, TCGv src, TCGv addr) | |
2232 | { | |
2233 | switch (da->type) { | |
2234 | case GET_ASI_EXCP: | |
2235 | break; | |
2236 | case GET_ASI_DIRECT: | |
2237 | tcg_gen_atomic_xchg_tl(dst, addr, src, | |
2238 | da->mem_idx, da->memop | MO_ALIGN); | |
2239 | break; | |
2240 | default: | |
2241 | /* ??? Should be DAE_invalid_asi. */ | |
2242 | gen_exception(dc, TT_DATA_ACCESS); | |
2243 | break; | |
2244 | } | |
2245 | } | |
2246 | ||
2247 | static void gen_cas_asi(DisasContext *dc, DisasASI *da, | |
2248 | TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr) | |
2249 | { | |
2250 | switch (da->type) { | |
2251 | case GET_ASI_EXCP: | |
2252 | return; | |
2253 | case GET_ASI_DIRECT: | |
2254 | tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv, | |
2255 | da->mem_idx, da->memop | MO_ALIGN); | |
2256 | break; | |
2257 | default: | |
2258 | /* ??? Should be DAE_invalid_asi. */ | |
2259 | gen_exception(dc, TT_DATA_ACCESS); | |
2260 | break; | |
2261 | } | |
2262 | } | |
2263 | ||
2264 | static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr) | |
2265 | { | |
2266 | switch (da->type) { | |
2267 | case GET_ASI_EXCP: | |
2268 | break; | |
2269 | case GET_ASI_DIRECT: | |
2270 | tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff), | |
2271 | da->mem_idx, MO_UB); | |
2272 | break; | |
2273 | default: | |
2274 | /* ??? In theory, this should be raise DAE_invalid_asi. | |
2275 | But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */ | |
2276 | if (tb_cflags(dc->base.tb) & CF_PARALLEL) { | |
2277 | gen_helper_exit_atomic(tcg_env); | |
2278 | } else { | |
2279 | TCGv_i32 r_asi = tcg_constant_i32(da->asi); | |
2280 | TCGv_i32 r_mop = tcg_constant_i32(MO_UB); | |
2281 | TCGv_i64 s64, t64; | |
2282 | ||
2283 | save_state(dc); | |
2284 | t64 = tcg_temp_new_i64(); | |
2285 | gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop); | |
2286 | ||
2287 | s64 = tcg_constant_i64(0xff); | |
2288 | gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop); | |
2289 | ||
2290 | tcg_gen_trunc_i64_tl(dst, t64); | |
2291 | ||
2292 | /* End the TB. */ | |
2293 | dc->npc = DYNAMIC_PC; | |
2294 | } | |
2295 | break; | |
2296 | } | |
2297 | } | |
2298 | ||
2299 | static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size, | |
2300 | TCGv addr, int rd) | |
2301 | { | |
2302 | MemOp memop = da->memop; | |
2303 | MemOp size = memop & MO_SIZE; | |
2304 | TCGv_i32 d32; | |
2305 | TCGv_i64 d64; | |
2306 | TCGv addr_tmp; | |
2307 | ||
2308 | /* TODO: Use 128-bit load/store below. */ | |
2309 | if (size == MO_128) { | |
2310 | memop = (memop & ~MO_SIZE) | MO_64; | |
2311 | } | |
2312 | ||
2313 | switch (da->type) { | |
2314 | case GET_ASI_EXCP: | |
2315 | break; | |
2316 | ||
2317 | case GET_ASI_DIRECT: | |
2318 | memop |= MO_ALIGN_4; | |
2319 | switch (size) { | |
2320 | case MO_32: | |
2321 | d32 = gen_dest_fpr_F(dc); | |
2322 | tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop); | |
2323 | gen_store_fpr_F(dc, rd, d32); | |
2324 | break; | |
2325 | ||
2326 | case MO_64: | |
2327 | tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop); | |
2328 | break; | |
2329 | ||
2330 | case MO_128: | |
2331 | d64 = tcg_temp_new_i64(); | |
2332 | tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop); | |
2333 | addr_tmp = tcg_temp_new(); | |
2334 | tcg_gen_addi_tl(addr_tmp, addr, 8); | |
2335 | tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop); | |
2336 | tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); | |
2337 | break; | |
2338 | default: | |
2339 | g_assert_not_reached(); | |
2340 | } | |
2341 | break; | |
2342 | ||
2343 | case GET_ASI_BLOCK: | |
2344 | /* Valid for lddfa on aligned registers only. */ | |
2345 | if (orig_size == MO_64 && (rd & 7) == 0) { | |
2346 | /* The first operation checks required alignment. */ | |
2347 | addr_tmp = tcg_temp_new(); | |
2348 | for (int i = 0; ; ++i) { | |
2349 | tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx, | |
2350 | memop | (i == 0 ? MO_ALIGN_64 : 0)); | |
2351 | if (i == 7) { | |
2352 | break; | |
2353 | } | |
2354 | tcg_gen_addi_tl(addr_tmp, addr, 8); | |
2355 | addr = addr_tmp; | |
2356 | } | |
2357 | } else { | |
2358 | gen_exception(dc, TT_ILL_INSN); | |
2359 | } | |
2360 | break; | |
2361 | ||
2362 | case GET_ASI_SHORT: | |
2363 | /* Valid for lddfa only. */ | |
2364 | if (orig_size == MO_64) { | |
2365 | tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, | |
2366 | memop | MO_ALIGN); | |
2367 | } else { | |
2368 | gen_exception(dc, TT_ILL_INSN); | |
2369 | } | |
2370 | break; | |
2371 | ||
2372 | default: | |
2373 | { | |
2374 | TCGv_i32 r_asi = tcg_constant_i32(da->asi); | |
2375 | TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); | |
2376 | ||
2377 | save_state(dc); | |
2378 | /* According to the table in the UA2011 manual, the only | |
2379 | other asis that are valid for ldfa/lddfa/ldqfa are | |
2380 | the NO_FAULT asis. We still need a helper for these, | |
2381 | but we can just use the integer asi helper for them. */ | |
2382 | switch (size) { | |
2383 | case MO_32: | |
2384 | d64 = tcg_temp_new_i64(); | |
2385 | gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop); | |
2386 | d32 = gen_dest_fpr_F(dc); | |
2387 | tcg_gen_extrl_i64_i32(d32, d64); | |
2388 | gen_store_fpr_F(dc, rd, d32); | |
2389 | break; | |
2390 | case MO_64: | |
2391 | gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, | |
2392 | r_asi, r_mop); | |
2393 | break; | |
2394 | case MO_128: | |
2395 | d64 = tcg_temp_new_i64(); | |
2396 | gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop); | |
2397 | addr_tmp = tcg_temp_new(); | |
2398 | tcg_gen_addi_tl(addr_tmp, addr, 8); | |
2399 | gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp, | |
2400 | r_asi, r_mop); | |
2401 | tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); | |
2402 | break; | |
2403 | default: | |
2404 | g_assert_not_reached(); | |
2405 | } | |
2406 | } | |
2407 | break; | |
2408 | } | |
2409 | } | |
2410 | ||
2411 | static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size, | |
2412 | TCGv addr, int rd) | |
2413 | { | |
2414 | MemOp memop = da->memop; | |
2415 | MemOp size = memop & MO_SIZE; | |
2416 | TCGv_i32 d32; | |
2417 | TCGv addr_tmp; | |
2418 | ||
2419 | /* TODO: Use 128-bit load/store below. */ | |
2420 | if (size == MO_128) { | |
2421 | memop = (memop & ~MO_SIZE) | MO_64; | |
2422 | } | |
2423 | ||
2424 | switch (da->type) { | |
2425 | case GET_ASI_EXCP: | |
2426 | break; | |
2427 | ||
2428 | case GET_ASI_DIRECT: | |
2429 | memop |= MO_ALIGN_4; | |
2430 | switch (size) { | |
2431 | case MO_32: | |
2432 | d32 = gen_load_fpr_F(dc, rd); | |
2433 | tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN); | |
2434 | break; | |
2435 | case MO_64: | |
2436 | tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, | |
2437 | memop | MO_ALIGN_4); | |
2438 | break; | |
2439 | case MO_128: | |
2440 | /* Only 4-byte alignment required. However, it is legal for the | |
2441 | cpu to signal the alignment fault, and the OS trap handler is | |
2442 | required to fix it up. Requiring 16-byte alignment here avoids | |
2443 | having to probe the second page before performing the first | |
2444 | write. */ | |
2445 | tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, | |
2446 | memop | MO_ALIGN_16); | |
2447 | addr_tmp = tcg_temp_new(); | |
2448 | tcg_gen_addi_tl(addr_tmp, addr, 8); | |
2449 | tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop); | |
2450 | break; | |
2451 | default: | |
2452 | g_assert_not_reached(); | |
2453 | } | |
2454 | break; | |
2455 | ||
2456 | case GET_ASI_BLOCK: | |
2457 | /* Valid for stdfa on aligned registers only. */ | |
2458 | if (orig_size == MO_64 && (rd & 7) == 0) { | |
2459 | /* The first operation checks required alignment. */ | |
2460 | addr_tmp = tcg_temp_new(); | |
2461 | for (int i = 0; ; ++i) { | |
2462 | tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx, | |
2463 | memop | (i == 0 ? MO_ALIGN_64 : 0)); | |
2464 | if (i == 7) { | |
2465 | break; | |
2466 | } | |
2467 | tcg_gen_addi_tl(addr_tmp, addr, 8); | |
2468 | addr = addr_tmp; | |
2469 | } | |
2470 | } else { | |
2471 | gen_exception(dc, TT_ILL_INSN); | |
2472 | } | |
2473 | break; | |
2474 | ||
2475 | case GET_ASI_SHORT: | |
2476 | /* Valid for stdfa only. */ | |
2477 | if (orig_size == MO_64) { | |
2478 | tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, | |
2479 | memop | MO_ALIGN); | |
2480 | } else { | |
2481 | gen_exception(dc, TT_ILL_INSN); | |
2482 | } | |
2483 | break; | |
2484 | ||
2485 | default: | |
2486 | /* According to the table in the UA2011 manual, the only | |
2487 | other asis that are valid for ldfa/lddfa/ldqfa are | |
2488 | the PST* asis, which aren't currently handled. */ | |
2489 | gen_exception(dc, TT_ILL_INSN); | |
2490 | break; | |
2491 | } | |
2492 | } | |
2493 | ||
2494 | static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd) | |
2495 | { | |
2496 | TCGv hi = gen_dest_gpr(dc, rd); | |
2497 | TCGv lo = gen_dest_gpr(dc, rd + 1); | |
2498 | ||
2499 | switch (da->type) { | |
2500 | case GET_ASI_EXCP: | |
2501 | return; | |
2502 | ||
2503 | case GET_ASI_DTWINX: | |
2504 | #ifdef TARGET_SPARC64 | |
2505 | { | |
2506 | MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16; | |
2507 | TCGv_i128 t = tcg_temp_new_i128(); | |
2508 | ||
2509 | tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop); | |
2510 | /* | |
2511 | * Note that LE twinx acts as if each 64-bit register result is | |
2512 | * byte swapped. We perform one 128-bit LE load, so must swap | |
2513 | * the order of the writebacks. | |
2514 | */ | |
2515 | if ((mop & MO_BSWAP) == MO_TE) { | |
2516 | tcg_gen_extr_i128_i64(lo, hi, t); | |
2517 | } else { | |
2518 | tcg_gen_extr_i128_i64(hi, lo, t); | |
2519 | } | |
2520 | } | |
2521 | break; | |
2522 | #else | |
2523 | g_assert_not_reached(); | |
2524 | #endif | |
2525 | ||
2526 | case GET_ASI_DIRECT: | |
2527 | { | |
2528 | TCGv_i64 tmp = tcg_temp_new_i64(); | |
2529 | ||
2530 | tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN); | |
2531 | ||
2532 | /* Note that LE ldda acts as if each 32-bit register | |
2533 | result is byte swapped. Having just performed one | |
2534 | 64-bit bswap, we need now to swap the writebacks. */ | |
2535 | if ((da->memop & MO_BSWAP) == MO_TE) { | |
2536 | tcg_gen_extr_i64_tl(lo, hi, tmp); | |
2537 | } else { | |
2538 | tcg_gen_extr_i64_tl(hi, lo, tmp); | |
2539 | } | |
2540 | } | |
2541 | break; | |
2542 | ||
2543 | default: | |
2544 | /* ??? In theory we've handled all of the ASIs that are valid | |
2545 | for ldda, and this should raise DAE_invalid_asi. However, | |
2546 | real hardware allows others. This can be seen with e.g. | |
2547 | FreeBSD 10.3 wrt ASI_IC_TAG. */ | |
2548 | { | |
2549 | TCGv_i32 r_asi = tcg_constant_i32(da->asi); | |
2550 | TCGv_i32 r_mop = tcg_constant_i32(da->memop); | |
2551 | TCGv_i64 tmp = tcg_temp_new_i64(); | |
2552 | ||
2553 | save_state(dc); | |
2554 | gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop); | |
2555 | ||
2556 | /* See above. */ | |
2557 | if ((da->memop & MO_BSWAP) == MO_TE) { | |
2558 | tcg_gen_extr_i64_tl(lo, hi, tmp); | |
2559 | } else { | |
2560 | tcg_gen_extr_i64_tl(hi, lo, tmp); | |
2561 | } | |
2562 | } | |
2563 | break; | |
2564 | } | |
2565 | ||
2566 | gen_store_gpr(dc, rd, hi); | |
2567 | gen_store_gpr(dc, rd + 1, lo); | |
2568 | } | |
2569 | ||
2570 | static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd) | |
2571 | { | |
2572 | TCGv hi = gen_load_gpr(dc, rd); | |
2573 | TCGv lo = gen_load_gpr(dc, rd + 1); | |
2574 | ||
2575 | switch (da->type) { | |
2576 | case GET_ASI_EXCP: | |
2577 | break; | |
2578 | ||
2579 | case GET_ASI_DTWINX: | |
2580 | #ifdef TARGET_SPARC64 | |
2581 | { | |
2582 | MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16; | |
2583 | TCGv_i128 t = tcg_temp_new_i128(); | |
2584 | ||
2585 | /* | |
2586 | * Note that LE twinx acts as if each 64-bit register result is | |
2587 | * byte swapped. We perform one 128-bit LE store, so must swap | |
2588 | * the order of the construction. | |
2589 | */ | |
2590 | if ((mop & MO_BSWAP) == MO_TE) { | |
2591 | tcg_gen_concat_i64_i128(t, lo, hi); | |
2592 | } else { | |
2593 | tcg_gen_concat_i64_i128(t, hi, lo); | |
2594 | } | |
2595 | tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop); | |
2596 | } | |
2597 | break; | |
2598 | #else | |
2599 | g_assert_not_reached(); | |
2600 | #endif | |
2601 | ||
2602 | case GET_ASI_DIRECT: | |
2603 | { | |
2604 | TCGv_i64 t64 = tcg_temp_new_i64(); | |
2605 | ||
2606 | /* Note that LE stda acts as if each 32-bit register result is | |
2607 | byte swapped. We will perform one 64-bit LE store, so now | |
2608 | we must swap the order of the construction. */ | |
2609 | if ((da->memop & MO_BSWAP) == MO_TE) { | |
2610 | tcg_gen_concat_tl_i64(t64, lo, hi); | |
2611 | } else { | |
2612 | tcg_gen_concat_tl_i64(t64, hi, lo); | |
2613 | } | |
2614 | tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN); | |
2615 | } | |
2616 | break; | |
2617 | ||
2618 | case GET_ASI_BFILL: | |
2619 | assert(TARGET_LONG_BITS == 32); | |
2620 | /* Store 32 bytes of T64 to ADDR. */ | |
2621 | /* ??? The original qemu code suggests 8-byte alignment, dropping | |
2622 | the low bits, but the only place I can see this used is in the | |
2623 | Linux kernel with 32 byte alignment, which would make more sense | |
2624 | as a cacheline-style operation. */ | |
2625 | { | |
2626 | TCGv_i64 t64 = tcg_temp_new_i64(); | |
2627 | TCGv d_addr = tcg_temp_new(); | |
2628 | TCGv eight = tcg_constant_tl(8); | |
2629 | int i; | |
2630 | ||
2631 | tcg_gen_concat_tl_i64(t64, lo, hi); | |
2632 | tcg_gen_andi_tl(d_addr, addr, -8); | |
2633 | for (i = 0; i < 32; i += 8) { | |
2634 | tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop); | |
2635 | tcg_gen_add_tl(d_addr, d_addr, eight); | |
2636 | } | |
2637 | } | |
2638 | break; | |
2639 | ||
2640 | default: | |
2641 | /* ??? In theory we've handled all of the ASIs that are valid | |
2642 | for stda, and this should raise DAE_invalid_asi. */ | |
2643 | { | |
2644 | TCGv_i32 r_asi = tcg_constant_i32(da->asi); | |
2645 | TCGv_i32 r_mop = tcg_constant_i32(da->memop); | |
2646 | TCGv_i64 t64 = tcg_temp_new_i64(); | |
2647 | ||
2648 | /* See above. */ | |
2649 | if ((da->memop & MO_BSWAP) == MO_TE) { | |
2650 | tcg_gen_concat_tl_i64(t64, lo, hi); | |
2651 | } else { | |
2652 | tcg_gen_concat_tl_i64(t64, hi, lo); | |
2653 | } | |
2654 | ||
2655 | save_state(dc); | |
2656 | gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop); | |
2657 | } | |
2658 | break; | |
2659 | } | |
2660 | } | |
2661 | ||
2662 | #ifdef TARGET_SPARC64 | |
2663 | static TCGv get_src1(DisasContext *dc, unsigned int insn) | |
2664 | { | |
2665 | unsigned int rs1 = GET_FIELD(insn, 13, 17); | |
2666 | return gen_load_gpr(dc, rs1); | |
2667 | } | |
2668 | ||
2669 | static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs) | |
2670 | { | |
2671 | TCGv_i32 c32, zero, dst, s1, s2; | |
2672 | ||
2673 | /* We have two choices here: extend the 32 bit data and use movcond_i64, | |
2674 | or fold the comparison down to 32 bits and use movcond_i32. Choose | |
2675 | the later. */ | |
2676 | c32 = tcg_temp_new_i32(); | |
2677 | if (cmp->is_bool) { | |
2678 | tcg_gen_extrl_i64_i32(c32, cmp->c1); | |
2679 | } else { | |
2680 | TCGv_i64 c64 = tcg_temp_new_i64(); | |
2681 | tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2); | |
2682 | tcg_gen_extrl_i64_i32(c32, c64); | |
2683 | } | |
2684 | ||
2685 | s1 = gen_load_fpr_F(dc, rs); | |
2686 | s2 = gen_load_fpr_F(dc, rd); | |
2687 | dst = gen_dest_fpr_F(dc); | |
2688 | zero = tcg_constant_i32(0); | |
2689 | ||
2690 | tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2); | |
2691 | ||
2692 | gen_store_fpr_F(dc, rd, dst); | |
2693 | } | |
2694 | ||
2695 | static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs) | |
2696 | { | |
2697 | TCGv_i64 dst = gen_dest_fpr_D(dc, rd); | |
2698 | tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2, | |
2699 | gen_load_fpr_D(dc, rs), | |
2700 | gen_load_fpr_D(dc, rd)); | |
2701 | gen_store_fpr_D(dc, rd, dst); | |
2702 | } | |
2703 | ||
2704 | static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs) | |
2705 | { | |
2706 | int qd = QFPREG(rd); | |
2707 | int qs = QFPREG(rs); | |
2708 | ||
2709 | tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2, | |
2710 | cpu_fpr[qs / 2], cpu_fpr[qd / 2]); | |
2711 | tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2, | |
2712 | cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]); | |
2713 | ||
2714 | gen_update_fprs_dirty(dc, qd); | |
2715 | } | |
2716 | ||
2717 | static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr) | |
2718 | { | |
2719 | TCGv_i32 r_tl = tcg_temp_new_i32(); | |
2720 | ||
2721 | /* load env->tl into r_tl */ | |
2722 | tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl)); | |
2723 | ||
2724 | /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */ | |
2725 | tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK); | |
2726 | ||
2727 | /* calculate offset to current trap state from env->ts, reuse r_tl */ | |
2728 | tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state)); | |
2729 | tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts)); | |
2730 | ||
2731 | /* tsptr = env->ts[env->tl & MAXTL_MASK] */ | |
2732 | { | |
2733 | TCGv_ptr r_tl_tmp = tcg_temp_new_ptr(); | |
2734 | tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl); | |
2735 | tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp); | |
2736 | } | |
2737 | } | |
2738 | #endif | |
2739 | ||
2740 | static int extract_dfpreg(DisasContext *dc, int x) | |
2741 | { | |
2742 | return DFPREG(x); | |
2743 | } | |
2744 | ||
2745 | static int extract_qfpreg(DisasContext *dc, int x) | |
2746 | { | |
2747 | return QFPREG(x); | |
2748 | } | |
2749 | ||
2750 | /* Include the auto-generated decoder. */ | |
2751 | #include "decode-insns.c.inc" | |
2752 | ||
2753 | #define TRANS(NAME, AVAIL, FUNC, ...) \ | |
2754 | static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \ | |
2755 | { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); } | |
2756 | ||
2757 | #define avail_ALL(C) true | |
2758 | #ifdef TARGET_SPARC64 | |
2759 | # define avail_32(C) false | |
2760 | # define avail_ASR17(C) false | |
2761 | # define avail_CASA(C) true | |
2762 | # define avail_DIV(C) true | |
2763 | # define avail_MUL(C) true | |
2764 | # define avail_POWERDOWN(C) false | |
2765 | # define avail_64(C) true | |
2766 | # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL) | |
2767 | # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV) | |
2768 | # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1) | |
2769 | # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2) | |
2770 | #else | |
2771 | # define avail_32(C) true | |
2772 | # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17) | |
2773 | # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA) | |
2774 | # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV) | |
2775 | # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL) | |
2776 | # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN) | |
2777 | # define avail_64(C) false | |
2778 | # define avail_GL(C) false | |
2779 | # define avail_HYPV(C) false | |
2780 | # define avail_VIS1(C) false | |
2781 | # define avail_VIS2(C) false | |
2782 | #endif | |
2783 | ||
2784 | /* Default case for non jump instructions. */ | |
2785 | static bool advance_pc(DisasContext *dc) | |
2786 | { | |
2787 | if (dc->npc & 3) { | |
2788 | switch (dc->npc) { | |
2789 | case DYNAMIC_PC: | |
2790 | case DYNAMIC_PC_LOOKUP: | |
2791 | dc->pc = dc->npc; | |
2792 | gen_op_next_insn(); | |
2793 | break; | |
2794 | case JUMP_PC: | |
2795 | /* we can do a static jump */ | |
2796 | gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond); | |
2797 | dc->base.is_jmp = DISAS_NORETURN; | |
2798 | break; | |
2799 | default: | |
2800 | g_assert_not_reached(); | |
2801 | } | |
2802 | } else { | |
2803 | dc->pc = dc->npc; | |
2804 | dc->npc = dc->npc + 4; | |
2805 | } | |
2806 | return true; | |
2807 | } | |
2808 | ||
2809 | /* | |
2810 | * Major opcodes 00 and 01 -- branches, call, and sethi | |
2811 | */ | |
2812 | ||
2813 | static bool advance_jump_uncond_never(DisasContext *dc, bool annul) | |
2814 | { | |
2815 | if (annul) { | |
2816 | dc->pc = dc->npc + 4; | |
2817 | dc->npc = dc->pc + 4; | |
2818 | } else { | |
2819 | dc->pc = dc->npc; | |
2820 | dc->npc = dc->pc + 4; | |
2821 | } | |
2822 | return true; | |
2823 | } | |
2824 | ||
2825 | static bool advance_jump_uncond_always(DisasContext *dc, bool annul, | |
2826 | target_ulong dest) | |
2827 | { | |
2828 | if (annul) { | |
2829 | dc->pc = dest; | |
2830 | dc->npc = dest + 4; | |
2831 | } else { | |
2832 | dc->pc = dc->npc; | |
2833 | dc->npc = dest; | |
2834 | tcg_gen_mov_tl(cpu_pc, cpu_npc); | |
2835 | } | |
2836 | return true; | |
2837 | } | |
2838 | ||
2839 | static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp, | |
2840 | bool annul, target_ulong dest) | |
2841 | { | |
2842 | target_ulong npc = dc->npc; | |
2843 | ||
2844 | if (annul) { | |
2845 | TCGLabel *l1 = gen_new_label(); | |
2846 | ||
2847 | tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1); | |
2848 | gen_goto_tb(dc, 0, npc, dest); | |
2849 | gen_set_label(l1); | |
2850 | gen_goto_tb(dc, 1, npc + 4, npc + 8); | |
2851 | ||
2852 | dc->base.is_jmp = DISAS_NORETURN; | |
2853 | } else { | |
2854 | if (npc & 3) { | |
2855 | switch (npc) { | |
2856 | case DYNAMIC_PC: | |
2857 | case DYNAMIC_PC_LOOKUP: | |
2858 | tcg_gen_mov_tl(cpu_pc, cpu_npc); | |
2859 | tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); | |
2860 | tcg_gen_movcond_tl(cmp->cond, cpu_npc, | |
2861 | cmp->c1, cmp->c2, | |
2862 | tcg_constant_tl(dest), cpu_npc); | |
2863 | dc->pc = npc; | |
2864 | break; | |
2865 | default: | |
2866 | g_assert_not_reached(); | |
2867 | } | |
2868 | } else { | |
2869 | dc->pc = npc; | |
2870 | dc->jump_pc[0] = dest; | |
2871 | dc->jump_pc[1] = npc + 4; | |
2872 | dc->npc = JUMP_PC; | |
2873 | if (cmp->is_bool) { | |
2874 | tcg_gen_mov_tl(cpu_cond, cmp->c1); | |
2875 | } else { | |
2876 | tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2); | |
2877 | } | |
2878 | } | |
2879 | } | |
2880 | return true; | |
2881 | } | |
2882 | ||
2883 | static bool raise_priv(DisasContext *dc) | |
2884 | { | |
2885 | gen_exception(dc, TT_PRIV_INSN); | |
2886 | return true; | |
2887 | } | |
2888 | ||
2889 | static bool raise_unimpfpop(DisasContext *dc) | |
2890 | { | |
2891 | gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP); | |
2892 | return true; | |
2893 | } | |
2894 | ||
2895 | static bool gen_trap_float128(DisasContext *dc) | |
2896 | { | |
2897 | if (dc->def->features & CPU_FEATURE_FLOAT128) { | |
2898 | return false; | |
2899 | } | |
2900 | return raise_unimpfpop(dc); | |
2901 | } | |
2902 | ||
2903 | static bool do_bpcc(DisasContext *dc, arg_bcc *a) | |
2904 | { | |
2905 | target_long target = address_mask_i(dc, dc->pc + a->i * 4); | |
2906 | DisasCompare cmp; | |
2907 | ||
2908 | switch (a->cond) { | |
2909 | case 0x0: | |
2910 | return advance_jump_uncond_never(dc, a->a); | |
2911 | case 0x8: | |
2912 | return advance_jump_uncond_always(dc, a->a, target); | |
2913 | default: | |
2914 | flush_cond(dc); | |
2915 | ||
2916 | gen_compare(&cmp, a->cc, a->cond, dc); | |
2917 | return advance_jump_cond(dc, &cmp, a->a, target); | |
2918 | } | |
2919 | } | |
2920 | ||
2921 | TRANS(Bicc, ALL, do_bpcc, a) | |
2922 | TRANS(BPcc, 64, do_bpcc, a) | |
2923 | ||
2924 | static bool do_fbpfcc(DisasContext *dc, arg_bcc *a) | |
2925 | { | |
2926 | target_long target = address_mask_i(dc, dc->pc + a->i * 4); | |
2927 | DisasCompare cmp; | |
2928 | ||
2929 | if (gen_trap_ifnofpu(dc)) { | |
2930 | return true; | |
2931 | } | |
2932 | switch (a->cond) { | |
2933 | case 0x0: | |
2934 | return advance_jump_uncond_never(dc, a->a); | |
2935 | case 0x8: | |
2936 | return advance_jump_uncond_always(dc, a->a, target); | |
2937 | default: | |
2938 | flush_cond(dc); | |
2939 | ||
2940 | gen_fcompare(&cmp, a->cc, a->cond); | |
2941 | return advance_jump_cond(dc, &cmp, a->a, target); | |
2942 | } | |
2943 | } | |
2944 | ||
2945 | TRANS(FBPfcc, 64, do_fbpfcc, a) | |
2946 | TRANS(FBfcc, ALL, do_fbpfcc, a) | |
2947 | ||
2948 | static bool trans_BPr(DisasContext *dc, arg_BPr *a) | |
2949 | { | |
2950 | target_long target = address_mask_i(dc, dc->pc + a->i * 4); | |
2951 | DisasCompare cmp; | |
2952 | ||
2953 | if (!avail_64(dc)) { | |
2954 | return false; | |
2955 | } | |
2956 | if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) { | |
2957 | return false; | |
2958 | } | |
2959 | ||
2960 | flush_cond(dc); | |
2961 | gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1)); | |
2962 | return advance_jump_cond(dc, &cmp, a->a, target); | |
2963 | } | |
2964 | ||
2965 | static bool trans_CALL(DisasContext *dc, arg_CALL *a) | |
2966 | { | |
2967 | target_long target = address_mask_i(dc, dc->pc + a->i * 4); | |
2968 | ||
2969 | gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc)); | |
2970 | gen_mov_pc_npc(dc); | |
2971 | dc->npc = target; | |
2972 | return true; | |
2973 | } | |
2974 | ||
2975 | static bool trans_NCP(DisasContext *dc, arg_NCP *a) | |
2976 | { | |
2977 | /* | |
2978 | * For sparc32, always generate the no-coprocessor exception. | |
2979 | * For sparc64, always generate illegal instruction. | |
2980 | */ | |
2981 | #ifdef TARGET_SPARC64 | |
2982 | return false; | |
2983 | #else | |
2984 | gen_exception(dc, TT_NCP_INSN); | |
2985 | return true; | |
2986 | #endif | |
2987 | } | |
2988 | ||
2989 | static bool trans_SETHI(DisasContext *dc, arg_SETHI *a) | |
2990 | { | |
2991 | /* Special-case %g0 because that's the canonical nop. */ | |
2992 | if (a->rd) { | |
2993 | gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10)); | |
2994 | } | |
2995 | return advance_pc(dc); | |
2996 | } | |
2997 | ||
2998 | /* | |
2999 | * Major Opcode 10 -- integer, floating-point, vis, and system insns. | |
3000 | */ | |
3001 | ||
3002 | static bool do_tcc(DisasContext *dc, int cond, int cc, | |
3003 | int rs1, bool imm, int rs2_or_imm) | |
3004 | { | |
3005 | int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc) | |
3006 | ? UA2005_HTRAP_MASK : V8_TRAP_MASK); | |
3007 | DisasCompare cmp; | |
3008 | TCGLabel *lab; | |
3009 | TCGv_i32 trap; | |
3010 | ||
3011 | /* Trap never. */ | |
3012 | if (cond == 0) { | |
3013 | return advance_pc(dc); | |
3014 | } | |
3015 | ||
3016 | /* | |
3017 | * Immediate traps are the most common case. Since this value is | |
3018 | * live across the branch, it really pays to evaluate the constant. | |
3019 | */ | |
3020 | if (rs1 == 0 && (imm || rs2_or_imm == 0)) { | |
3021 | trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP); | |
3022 | } else { | |
3023 | trap = tcg_temp_new_i32(); | |
3024 | tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1)); | |
3025 | if (imm) { | |
3026 | tcg_gen_addi_i32(trap, trap, rs2_or_imm); | |
3027 | } else { | |
3028 | TCGv_i32 t2 = tcg_temp_new_i32(); | |
3029 | tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm)); | |
3030 | tcg_gen_add_i32(trap, trap, t2); | |
3031 | } | |
3032 | tcg_gen_andi_i32(trap, trap, mask); | |
3033 | tcg_gen_addi_i32(trap, trap, TT_TRAP); | |
3034 | } | |
3035 | ||
3036 | /* Trap always. */ | |
3037 | if (cond == 8) { | |
3038 | save_state(dc); | |
3039 | gen_helper_raise_exception(tcg_env, trap); | |
3040 | dc->base.is_jmp = DISAS_NORETURN; | |
3041 | return true; | |
3042 | } | |
3043 | ||
3044 | /* Conditional trap. */ | |
3045 | flush_cond(dc); | |
3046 | lab = delay_exceptionv(dc, trap); | |
3047 | gen_compare(&cmp, cc, cond, dc); | |
3048 | tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab); | |
3049 | ||
3050 | return advance_pc(dc); | |
3051 | } | |
3052 | ||
3053 | static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a) | |
3054 | { | |
3055 | if (avail_32(dc) && a->cc) { | |
3056 | return false; | |
3057 | } | |
3058 | return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2); | |
3059 | } | |
3060 | ||
3061 | static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a) | |
3062 | { | |
3063 | if (avail_64(dc)) { | |
3064 | return false; | |
3065 | } | |
3066 | return do_tcc(dc, a->cond, 0, a->rs1, true, a->i); | |
3067 | } | |
3068 | ||
3069 | static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a) | |
3070 | { | |
3071 | if (avail_32(dc)) { | |
3072 | return false; | |
3073 | } | |
3074 | return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i); | |
3075 | } | |
3076 | ||
3077 | static bool trans_STBAR(DisasContext *dc, arg_STBAR *a) | |
3078 | { | |
3079 | tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); | |
3080 | return advance_pc(dc); | |
3081 | } | |
3082 | ||
3083 | static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a) | |
3084 | { | |
3085 | if (avail_32(dc)) { | |
3086 | return false; | |
3087 | } | |
3088 | if (a->mmask) { | |
3089 | /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */ | |
3090 | tcg_gen_mb(a->mmask | TCG_BAR_SC); | |
3091 | } | |
3092 | if (a->cmask) { | |
3093 | /* For #Sync, etc, end the TB to recognize interrupts. */ | |
3094 | dc->base.is_jmp = DISAS_EXIT; | |
3095 | } | |
3096 | return advance_pc(dc); | |
3097 | } | |
3098 | ||
3099 | static bool do_rd_special(DisasContext *dc, bool priv, int rd, | |
3100 | TCGv (*func)(DisasContext *, TCGv)) | |
3101 | { | |
3102 | if (!priv) { | |
3103 | return raise_priv(dc); | |
3104 | } | |
3105 | gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd))); | |
3106 | return advance_pc(dc); | |
3107 | } | |
3108 | ||
3109 | static TCGv do_rdy(DisasContext *dc, TCGv dst) | |
3110 | { | |
3111 | return cpu_y; | |
3112 | } | |
3113 | ||
3114 | static bool trans_RDY(DisasContext *dc, arg_RDY *a) | |
3115 | { | |
3116 | /* | |
3117 | * TODO: Need a feature bit for sparcv8. In the meantime, treat all | |
3118 | * 32-bit cpus like sparcv7, which ignores the rs1 field. | |
3119 | * This matches after all other ASR, so Leon3 Asr17 is handled first. | |
3120 | */ | |
3121 | if (avail_64(dc) && a->rs1 != 0) { | |
3122 | return false; | |
3123 | } | |
3124 | return do_rd_special(dc, true, a->rd, do_rdy); | |
3125 | } | |
3126 | ||
3127 | static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst) | |
3128 | { | |
3129 | uint32_t val; | |
3130 | ||
3131 | /* | |
3132 | * TODO: There are many more fields to be filled, | |
3133 | * some of which are writable. | |
3134 | */ | |
3135 | val = dc->def->nwindows - 1; /* [4:0] NWIN */ | |
3136 | val |= 1 << 8; /* [8] V8 */ | |
3137 | ||
3138 | return tcg_constant_tl(val); | |
3139 | } | |
3140 | ||
3141 | TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config) | |
3142 | ||
3143 | static TCGv do_rdccr(DisasContext *dc, TCGv dst) | |
3144 | { | |
3145 | update_psr(dc); | |
3146 | gen_helper_rdccr(dst, tcg_env); | |
3147 | return dst; | |
3148 | } | |
3149 | ||
3150 | TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr) | |
3151 | ||
3152 | static TCGv do_rdasi(DisasContext *dc, TCGv dst) | |
3153 | { | |
3154 | #ifdef TARGET_SPARC64 | |
3155 | return tcg_constant_tl(dc->asi); | |
3156 | #else | |
3157 | qemu_build_not_reached(); | |
3158 | #endif | |
3159 | } | |
3160 | ||
3161 | TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi) | |
3162 | ||
3163 | static TCGv do_rdtick(DisasContext *dc, TCGv dst) | |
3164 | { | |
3165 | TCGv_ptr r_tickptr = tcg_temp_new_ptr(); | |
3166 | ||
3167 | tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick)); | |
3168 | if (translator_io_start(&dc->base)) { | |
3169 | dc->base.is_jmp = DISAS_EXIT; | |
3170 | } | |
3171 | gen_helper_tick_get_count(dst, tcg_env, r_tickptr, | |
3172 | tcg_constant_i32(dc->mem_idx)); | |
3173 | return dst; | |
3174 | } | |
3175 | ||
3176 | /* TODO: non-priv access only allowed when enabled. */ | |
3177 | TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick) | |
3178 | ||
3179 | static TCGv do_rdpc(DisasContext *dc, TCGv dst) | |
3180 | { | |
3181 | return tcg_constant_tl(address_mask_i(dc, dc->pc)); | |
3182 | } | |
3183 | ||
3184 | TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc) | |
3185 | ||
3186 | static TCGv do_rdfprs(DisasContext *dc, TCGv dst) | |
3187 | { | |
3188 | tcg_gen_ext_i32_tl(dst, cpu_fprs); | |
3189 | return dst; | |
3190 | } | |
3191 | ||
3192 | TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs) | |
3193 | ||
3194 | static TCGv do_rdgsr(DisasContext *dc, TCGv dst) | |
3195 | { | |
3196 | gen_trap_ifnofpu(dc); | |
3197 | return cpu_gsr; | |
3198 | } | |
3199 | ||
3200 | TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr) | |
3201 | ||
3202 | static TCGv do_rdsoftint(DisasContext *dc, TCGv dst) | |
3203 | { | |
3204 | tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint)); | |
3205 | return dst; | |
3206 | } | |
3207 | ||
3208 | TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint) | |
3209 | ||
3210 | static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst) | |
3211 | { | |
3212 | tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr)); | |
3213 | return dst; | |
3214 | } | |
3215 | ||
3216 | /* TODO: non-priv access only allowed when enabled. */ | |
3217 | TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr) | |
3218 | ||
3219 | static TCGv do_rdstick(DisasContext *dc, TCGv dst) | |
3220 | { | |
3221 | TCGv_ptr r_tickptr = tcg_temp_new_ptr(); | |
3222 | ||
3223 | tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick)); | |
3224 | if (translator_io_start(&dc->base)) { | |
3225 | dc->base.is_jmp = DISAS_EXIT; | |
3226 | } | |
3227 | gen_helper_tick_get_count(dst, tcg_env, r_tickptr, | |
3228 | tcg_constant_i32(dc->mem_idx)); | |
3229 | return dst; | |
3230 | } | |
3231 | ||
3232 | /* TODO: non-priv access only allowed when enabled. */ | |
3233 | TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick) | |
3234 | ||
3235 | static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst) | |
3236 | { | |
3237 | tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr)); | |
3238 | return dst; | |
3239 | } | |
3240 | ||
3241 | /* TODO: supervisor access only allowed when enabled by hypervisor. */ | |
3242 | TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr) | |
3243 | ||
3244 | /* | |
3245 | * UltraSPARC-T1 Strand status. | |
3246 | * HYPV check maybe not enough, UA2005 & UA2007 describe | |
3247 | * this ASR as impl. dep | |
3248 | */ | |
3249 | static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst) | |
3250 | { | |
3251 | return tcg_constant_tl(1); | |
3252 | } | |
3253 | ||
3254 | TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status) | |
3255 | ||
3256 | static TCGv do_rdpsr(DisasContext *dc, TCGv dst) | |
3257 | { | |
3258 | update_psr(dc); | |
3259 | gen_helper_rdpsr(dst, tcg_env); | |
3260 | return dst; | |
3261 | } | |
3262 | ||
3263 | TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr) | |
3264 | ||
3265 | static TCGv do_rdhpstate(DisasContext *dc, TCGv dst) | |
3266 | { | |
3267 | tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate)); | |
3268 | return dst; | |
3269 | } | |
3270 | ||
3271 | TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate) | |
3272 | ||
3273 | static TCGv do_rdhtstate(DisasContext *dc, TCGv dst) | |
3274 | { | |
3275 | TCGv_i32 tl = tcg_temp_new_i32(); | |
3276 | TCGv_ptr tp = tcg_temp_new_ptr(); | |
3277 | ||
3278 | tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl)); | |
3279 | tcg_gen_andi_i32(tl, tl, MAXTL_MASK); | |
3280 | tcg_gen_shli_i32(tl, tl, 3); | |
3281 | tcg_gen_ext_i32_ptr(tp, tl); | |
3282 | tcg_gen_add_ptr(tp, tp, tcg_env); | |
3283 | ||
3284 | tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate)); | |
3285 | return dst; | |
3286 | } | |
3287 | ||
3288 | TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate) | |
3289 | ||
3290 | static TCGv do_rdhintp(DisasContext *dc, TCGv dst) | |
3291 | { | |
3292 | tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp)); | |
3293 | return dst; | |
3294 | } | |
3295 | ||
3296 | TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp) | |
3297 | ||
3298 | static TCGv do_rdhtba(DisasContext *dc, TCGv dst) | |
3299 | { | |
3300 | tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba)); | |
3301 | return dst; | |
3302 | } | |
3303 | ||
3304 | TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba) | |
3305 | ||
3306 | static TCGv do_rdhver(DisasContext *dc, TCGv dst) | |
3307 | { | |
3308 | tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver)); | |
3309 | return dst; | |
3310 | } | |
3311 | ||
3312 | TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver) | |
3313 | ||
3314 | static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst) | |
3315 | { | |
3316 | tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr)); | |
3317 | return dst; | |
3318 | } | |
3319 | ||
3320 | TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd, | |
3321 | do_rdhstick_cmpr) | |
3322 | ||
3323 | static TCGv do_rdwim(DisasContext *dc, TCGv dst) | |
3324 | { | |
3325 | tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim)); | |
3326 | return dst; | |
3327 | } | |
3328 | ||
3329 | TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim) | |
3330 | ||
3331 | static TCGv do_rdtpc(DisasContext *dc, TCGv dst) | |
3332 | { | |
3333 | #ifdef TARGET_SPARC64 | |
3334 | TCGv_ptr r_tsptr = tcg_temp_new_ptr(); | |
3335 | ||
3336 | gen_load_trap_state_at_tl(r_tsptr); | |
3337 | tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc)); | |
3338 | return dst; | |
3339 | #else | |
3340 | qemu_build_not_reached(); | |
3341 | #endif | |
3342 | } | |
3343 | ||
3344 | TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc) | |
3345 | ||
3346 | static TCGv do_rdtnpc(DisasContext *dc, TCGv dst) | |
3347 | { | |
3348 | #ifdef TARGET_SPARC64 | |
3349 | TCGv_ptr r_tsptr = tcg_temp_new_ptr(); | |
3350 | ||
3351 | gen_load_trap_state_at_tl(r_tsptr); | |
3352 | tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc)); | |
3353 | return dst; | |
3354 | #else | |
3355 | qemu_build_not_reached(); | |
3356 | #endif | |
3357 | } | |
3358 | ||
3359 | TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc) | |
3360 | ||
3361 | static TCGv do_rdtstate(DisasContext *dc, TCGv dst) | |
3362 | { | |
3363 | #ifdef TARGET_SPARC64 | |
3364 | TCGv_ptr r_tsptr = tcg_temp_new_ptr(); | |
3365 | ||
3366 | gen_load_trap_state_at_tl(r_tsptr); | |
3367 | tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate)); | |
3368 | return dst; | |
3369 | #else | |
3370 | qemu_build_not_reached(); | |
3371 | #endif | |
3372 | } | |
3373 | ||
3374 | TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate) | |
3375 | ||
3376 | static TCGv do_rdtt(DisasContext *dc, TCGv dst) | |
3377 | { | |
3378 | #ifdef TARGET_SPARC64 | |
3379 | TCGv_ptr r_tsptr = tcg_temp_new_ptr(); | |
3380 | ||
3381 | gen_load_trap_state_at_tl(r_tsptr); | |
3382 | tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt)); | |
3383 | return dst; | |
3384 | #else | |
3385 | qemu_build_not_reached(); | |
3386 | #endif | |
3387 | } | |
3388 | ||
3389 | TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt) | |
3390 | TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick) | |
3391 | ||
3392 | static TCGv do_rdtba(DisasContext *dc, TCGv dst) | |
3393 | { | |
3394 | return cpu_tbr; | |
3395 | } | |
3396 | ||
3397 | TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba) | |
3398 | TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba) | |
3399 | ||
3400 | static TCGv do_rdpstate(DisasContext *dc, TCGv dst) | |
3401 | { | |
3402 | tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate)); | |
3403 | return dst; | |
3404 | } | |
3405 | ||
3406 | TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate) | |
3407 | ||
3408 | static TCGv do_rdtl(DisasContext *dc, TCGv dst) | |
3409 | { | |
3410 | tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl)); | |
3411 | return dst; | |
3412 | } | |
3413 | ||
3414 | TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl) | |
3415 | ||
3416 | static TCGv do_rdpil(DisasContext *dc, TCGv dst) | |
3417 | { | |
3418 | tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil)); | |
3419 | return dst; | |
3420 | } | |
3421 | ||
3422 | TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil) | |
3423 | ||
3424 | static TCGv do_rdcwp(DisasContext *dc, TCGv dst) | |
3425 | { | |
3426 | gen_helper_rdcwp(dst, tcg_env); | |
3427 | return dst; | |
3428 | } | |
3429 | ||
3430 | TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp) | |
3431 | ||
3432 | static TCGv do_rdcansave(DisasContext *dc, TCGv dst) | |
3433 | { | |
3434 | tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave)); | |
3435 | return dst; | |
3436 | } | |
3437 | ||
3438 | TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave) | |
3439 | ||
3440 | static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst) | |
3441 | { | |
3442 | tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore)); | |
3443 | return dst; | |
3444 | } | |
3445 | ||
3446 | TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd, | |
3447 | do_rdcanrestore) | |
3448 | ||
3449 | static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst) | |
3450 | { | |
3451 | tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin)); | |
3452 | return dst; | |
3453 | } | |
3454 | ||
3455 | TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin) | |
3456 | ||
3457 | static TCGv do_rdotherwin(DisasContext *dc, TCGv dst) | |
3458 | { | |
3459 | tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin)); | |
3460 | return dst; | |
3461 | } | |
3462 | ||
3463 | TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin) | |
3464 | ||
3465 | static TCGv do_rdwstate(DisasContext *dc, TCGv dst) | |
3466 | { | |
3467 | tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate)); | |
3468 | return dst; | |
3469 | } | |
3470 | ||
3471 | TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate) | |
3472 | ||
3473 | static TCGv do_rdgl(DisasContext *dc, TCGv dst) | |
3474 | { | |
3475 | tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl)); | |
3476 | return dst; | |
3477 | } | |
3478 | ||
3479 | TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl) | |
3480 | ||
3481 | /* UA2005 strand status */ | |
3482 | static TCGv do_rdssr(DisasContext *dc, TCGv dst) | |
3483 | { | |
3484 | tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr)); | |
3485 | return dst; | |
3486 | } | |
3487 | ||
3488 | TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr) | |
3489 | ||
3490 | static TCGv do_rdver(DisasContext *dc, TCGv dst) | |
3491 | { | |
3492 | tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version)); | |
3493 | return dst; | |
3494 | } | |
3495 | ||
3496 | TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver) | |
3497 | ||
3498 | static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a) | |
3499 | { | |
3500 | if (avail_64(dc)) { | |
3501 | gen_helper_flushw(tcg_env); | |
3502 | return advance_pc(dc); | |
3503 | } | |
3504 | return false; | |
3505 | } | |
3506 | ||
3507 | static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv, | |
3508 | void (*func)(DisasContext *, TCGv)) | |
3509 | { | |
3510 | TCGv src; | |
3511 | ||
3512 | /* For simplicity, we under-decoded the rs2 form. */ | |
3513 | if (!a->imm && (a->rs2_or_imm & ~0x1f)) { | |
3514 | return false; | |
3515 | } | |
3516 | if (!priv) { | |
3517 | return raise_priv(dc); | |
3518 | } | |
3519 | ||
3520 | if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) { | |
3521 | src = tcg_constant_tl(a->rs2_or_imm); | |
3522 | } else { | |
3523 | TCGv src1 = gen_load_gpr(dc, a->rs1); | |
3524 | if (a->rs2_or_imm == 0) { | |
3525 | src = src1; | |
3526 | } else { | |
3527 | src = tcg_temp_new(); | |
3528 | if (a->imm) { | |
3529 | tcg_gen_xori_tl(src, src1, a->rs2_or_imm); | |
3530 | } else { | |
3531 | tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm)); | |
3532 | } | |
3533 | } | |
3534 | } | |
3535 | func(dc, src); | |
3536 | return advance_pc(dc); | |
3537 | } | |
3538 | ||
3539 | static void do_wry(DisasContext *dc, TCGv src) | |
3540 | { | |
3541 | tcg_gen_ext32u_tl(cpu_y, src); | |
3542 | } | |
3543 | ||
3544 | TRANS(WRY, ALL, do_wr_special, a, true, do_wry) | |
3545 | ||
3546 | static void do_wrccr(DisasContext *dc, TCGv src) | |
3547 | { | |
3548 | gen_helper_wrccr(tcg_env, src); | |
3549 | } | |
3550 | ||
3551 | TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr) | |
3552 | ||
3553 | static void do_wrasi(DisasContext *dc, TCGv src) | |
3554 | { | |
3555 | TCGv tmp = tcg_temp_new(); | |
3556 | ||
3557 | tcg_gen_ext8u_tl(tmp, src); | |
3558 | tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi)); | |
3559 | /* End TB to notice changed ASI. */ | |
3560 | dc->base.is_jmp = DISAS_EXIT; | |
3561 | } | |
3562 | ||
3563 | TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi) | |
3564 | ||
3565 | static void do_wrfprs(DisasContext *dc, TCGv src) | |
3566 | { | |
3567 | #ifdef TARGET_SPARC64 | |
3568 | tcg_gen_trunc_tl_i32(cpu_fprs, src); | |
3569 | dc->fprs_dirty = 0; | |
3570 | dc->base.is_jmp = DISAS_EXIT; | |
3571 | #else | |
3572 | qemu_build_not_reached(); | |
3573 | #endif | |
3574 | } | |
3575 | ||
3576 | TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs) | |
3577 | ||
3578 | static void do_wrgsr(DisasContext *dc, TCGv src) | |
3579 | { | |
3580 | gen_trap_ifnofpu(dc); | |
3581 | tcg_gen_mov_tl(cpu_gsr, src); | |
3582 | } | |
3583 | ||
3584 | TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr) | |
3585 | ||
3586 | static void do_wrsoftint_set(DisasContext *dc, TCGv src) | |
3587 | { | |
3588 | gen_helper_set_softint(tcg_env, src); | |
3589 | } | |
3590 | ||
3591 | TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set) | |
3592 | ||
3593 | static void do_wrsoftint_clr(DisasContext *dc, TCGv src) | |
3594 | { | |
3595 | gen_helper_clear_softint(tcg_env, src); | |
3596 | } | |
3597 | ||
3598 | TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr) | |
3599 | ||
3600 | static void do_wrsoftint(DisasContext *dc, TCGv src) | |
3601 | { | |
3602 | gen_helper_write_softint(tcg_env, src); | |
3603 | } | |
3604 | ||
3605 | TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint) | |
3606 | ||
3607 | static void do_wrtick_cmpr(DisasContext *dc, TCGv src) | |
3608 | { | |
3609 | TCGv_ptr r_tickptr = tcg_temp_new_ptr(); | |
3610 | ||
3611 | tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr)); | |
3612 | tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick)); | |
3613 | translator_io_start(&dc->base); | |
3614 | gen_helper_tick_set_limit(r_tickptr, src); | |
3615 | /* End TB to handle timer interrupt */ | |
3616 | dc->base.is_jmp = DISAS_EXIT; | |
3617 | } | |
3618 | ||
3619 | TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr) | |
3620 | ||
3621 | static void do_wrstick(DisasContext *dc, TCGv src) | |
3622 | { | |
3623 | #ifdef TARGET_SPARC64 | |
3624 | TCGv_ptr r_tickptr = tcg_temp_new_ptr(); | |
3625 | ||
3626 | tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick)); | |
3627 | translator_io_start(&dc->base); | |
3628 | gen_helper_tick_set_count(r_tickptr, src); | |
3629 | /* End TB to handle timer interrupt */ | |
3630 | dc->base.is_jmp = DISAS_EXIT; | |
3631 | #else | |
3632 | qemu_build_not_reached(); | |
3633 | #endif | |
3634 | } | |
3635 | ||
3636 | TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick) | |
3637 | ||
3638 | static void do_wrstick_cmpr(DisasContext *dc, TCGv src) | |
3639 | { | |
3640 | TCGv_ptr r_tickptr = tcg_temp_new_ptr(); | |
3641 | ||
3642 | tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr)); | |
3643 | tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick)); | |
3644 | translator_io_start(&dc->base); | |
3645 | gen_helper_tick_set_limit(r_tickptr, src); | |
3646 | /* End TB to handle timer interrupt */ | |
3647 | dc->base.is_jmp = DISAS_EXIT; | |
3648 | } | |
3649 | ||
3650 | TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr) | |
3651 | ||
3652 | static void do_wrpowerdown(DisasContext *dc, TCGv src) | |
3653 | { | |
3654 | save_state(dc); | |
3655 | gen_helper_power_down(tcg_env); | |
3656 | } | |
3657 | ||
3658 | TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown) | |
3659 | ||
3660 | static void do_wrpsr(DisasContext *dc, TCGv src) | |
3661 | { | |
3662 | gen_helper_wrpsr(tcg_env, src); | |
3663 | tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
3664 | dc->cc_op = CC_OP_FLAGS; | |
3665 | dc->base.is_jmp = DISAS_EXIT; | |
3666 | } | |
3667 | ||
3668 | TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr) | |
3669 | ||
3670 | static void do_wrwim(DisasContext *dc, TCGv src) | |
3671 | { | |
3672 | target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows); | |
3673 | TCGv tmp = tcg_temp_new(); | |
3674 | ||
3675 | tcg_gen_andi_tl(tmp, src, mask); | |
3676 | tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim)); | |
3677 | } | |
3678 | ||
3679 | TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim) | |
3680 | ||
3681 | static void do_wrtpc(DisasContext *dc, TCGv src) | |
3682 | { | |
3683 | #ifdef TARGET_SPARC64 | |
3684 | TCGv_ptr r_tsptr = tcg_temp_new_ptr(); | |
3685 | ||
3686 | gen_load_trap_state_at_tl(r_tsptr); | |
3687 | tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc)); | |
3688 | #else | |
3689 | qemu_build_not_reached(); | |
3690 | #endif | |
3691 | } | |
3692 | ||
3693 | TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc) | |
3694 | ||
3695 | static void do_wrtnpc(DisasContext *dc, TCGv src) | |
3696 | { | |
3697 | #ifdef TARGET_SPARC64 | |
3698 | TCGv_ptr r_tsptr = tcg_temp_new_ptr(); | |
3699 | ||
3700 | gen_load_trap_state_at_tl(r_tsptr); | |
3701 | tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc)); | |
3702 | #else | |
3703 | qemu_build_not_reached(); | |
3704 | #endif | |
3705 | } | |
3706 | ||
3707 | TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc) | |
3708 | ||
3709 | static void do_wrtstate(DisasContext *dc, TCGv src) | |
3710 | { | |
3711 | #ifdef TARGET_SPARC64 | |
3712 | TCGv_ptr r_tsptr = tcg_temp_new_ptr(); | |
3713 | ||
3714 | gen_load_trap_state_at_tl(r_tsptr); | |
3715 | tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate)); | |
3716 | #else | |
3717 | qemu_build_not_reached(); | |
3718 | #endif | |
3719 | } | |
3720 | ||
3721 | TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate) | |
3722 | ||
3723 | static void do_wrtt(DisasContext *dc, TCGv src) | |
3724 | { | |
3725 | #ifdef TARGET_SPARC64 | |
3726 | TCGv_ptr r_tsptr = tcg_temp_new_ptr(); | |
3727 | ||
3728 | gen_load_trap_state_at_tl(r_tsptr); | |
3729 | tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt)); | |
3730 | #else | |
3731 | qemu_build_not_reached(); | |
3732 | #endif | |
3733 | } | |
3734 | ||
3735 | TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt) | |
3736 | ||
3737 | static void do_wrtick(DisasContext *dc, TCGv src) | |
3738 | { | |
3739 | TCGv_ptr r_tickptr = tcg_temp_new_ptr(); | |
3740 | ||
3741 | tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick)); | |
3742 | translator_io_start(&dc->base); | |
3743 | gen_helper_tick_set_count(r_tickptr, src); | |
3744 | /* End TB to handle timer interrupt */ | |
3745 | dc->base.is_jmp = DISAS_EXIT; | |
3746 | } | |
3747 | ||
3748 | TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick) | |
3749 | ||
3750 | static void do_wrtba(DisasContext *dc, TCGv src) | |
3751 | { | |
3752 | tcg_gen_mov_tl(cpu_tbr, src); | |
3753 | } | |
3754 | ||
3755 | TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba) | |
3756 | ||
3757 | static void do_wrpstate(DisasContext *dc, TCGv src) | |
3758 | { | |
3759 | save_state(dc); | |
3760 | if (translator_io_start(&dc->base)) { | |
3761 | dc->base.is_jmp = DISAS_EXIT; | |
3762 | } | |
3763 | gen_helper_wrpstate(tcg_env, src); | |
3764 | dc->npc = DYNAMIC_PC; | |
3765 | } | |
3766 | ||
3767 | TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate) | |
3768 | ||
3769 | static void do_wrtl(DisasContext *dc, TCGv src) | |
3770 | { | |
3771 | save_state(dc); | |
3772 | tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl)); | |
3773 | dc->npc = DYNAMIC_PC; | |
3774 | } | |
3775 | ||
3776 | TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl) | |
3777 | ||
3778 | static void do_wrpil(DisasContext *dc, TCGv src) | |
3779 | { | |
3780 | if (translator_io_start(&dc->base)) { | |
3781 | dc->base.is_jmp = DISAS_EXIT; | |
3782 | } | |
3783 | gen_helper_wrpil(tcg_env, src); | |
3784 | } | |
3785 | ||
3786 | TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil) | |
3787 | ||
3788 | static void do_wrcwp(DisasContext *dc, TCGv src) | |
3789 | { | |
3790 | gen_helper_wrcwp(tcg_env, src); | |
3791 | } | |
3792 | ||
3793 | TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp) | |
3794 | ||
3795 | static void do_wrcansave(DisasContext *dc, TCGv src) | |
3796 | { | |
3797 | tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave)); | |
3798 | } | |
3799 | ||
3800 | TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave) | |
3801 | ||
3802 | static void do_wrcanrestore(DisasContext *dc, TCGv src) | |
3803 | { | |
3804 | tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore)); | |
3805 | } | |
3806 | ||
3807 | TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore) | |
3808 | ||
3809 | static void do_wrcleanwin(DisasContext *dc, TCGv src) | |
3810 | { | |
3811 | tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin)); | |
3812 | } | |
3813 | ||
3814 | TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin) | |
3815 | ||
3816 | static void do_wrotherwin(DisasContext *dc, TCGv src) | |
3817 | { | |
3818 | tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin)); | |
3819 | } | |
3820 | ||
3821 | TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin) | |
3822 | ||
3823 | static void do_wrwstate(DisasContext *dc, TCGv src) | |
3824 | { | |
3825 | tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate)); | |
3826 | } | |
3827 | ||
3828 | TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate) | |
3829 | ||
3830 | static void do_wrgl(DisasContext *dc, TCGv src) | |
3831 | { | |
3832 | gen_helper_wrgl(tcg_env, src); | |
3833 | } | |
3834 | ||
3835 | TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl) | |
3836 | ||
3837 | /* UA2005 strand status */ | |
3838 | static void do_wrssr(DisasContext *dc, TCGv src) | |
3839 | { | |
3840 | tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr)); | |
3841 | } | |
3842 | ||
3843 | TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr) | |
3844 | ||
3845 | TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba) | |
3846 | ||
3847 | static void do_wrhpstate(DisasContext *dc, TCGv src) | |
3848 | { | |
3849 | tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate)); | |
3850 | dc->base.is_jmp = DISAS_EXIT; | |
3851 | } | |
3852 | ||
3853 | TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate) | |
3854 | ||
3855 | static void do_wrhtstate(DisasContext *dc, TCGv src) | |
3856 | { | |
3857 | TCGv_i32 tl = tcg_temp_new_i32(); | |
3858 | TCGv_ptr tp = tcg_temp_new_ptr(); | |
3859 | ||
3860 | tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl)); | |
3861 | tcg_gen_andi_i32(tl, tl, MAXTL_MASK); | |
3862 | tcg_gen_shli_i32(tl, tl, 3); | |
3863 | tcg_gen_ext_i32_ptr(tp, tl); | |
3864 | tcg_gen_add_ptr(tp, tp, tcg_env); | |
3865 | ||
3866 | tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate)); | |
3867 | } | |
3868 | ||
3869 | TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate) | |
3870 | ||
3871 | static void do_wrhintp(DisasContext *dc, TCGv src) | |
3872 | { | |
3873 | tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp)); | |
3874 | } | |
3875 | ||
3876 | TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp) | |
3877 | ||
3878 | static void do_wrhtba(DisasContext *dc, TCGv src) | |
3879 | { | |
3880 | tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba)); | |
3881 | } | |
3882 | ||
3883 | TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba) | |
3884 | ||
3885 | static void do_wrhstick_cmpr(DisasContext *dc, TCGv src) | |
3886 | { | |
3887 | TCGv_ptr r_tickptr = tcg_temp_new_ptr(); | |
3888 | ||
3889 | tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr)); | |
3890 | tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick)); | |
3891 | translator_io_start(&dc->base); | |
3892 | gen_helper_tick_set_limit(r_tickptr, src); | |
3893 | /* End TB to handle timer interrupt */ | |
3894 | dc->base.is_jmp = DISAS_EXIT; | |
3895 | } | |
3896 | ||
3897 | TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc), | |
3898 | do_wrhstick_cmpr) | |
3899 | ||
3900 | static bool do_saved_restored(DisasContext *dc, bool saved) | |
3901 | { | |
3902 | if (!supervisor(dc)) { | |
3903 | return raise_priv(dc); | |
3904 | } | |
3905 | if (saved) { | |
3906 | gen_helper_saved(tcg_env); | |
3907 | } else { | |
3908 | gen_helper_restored(tcg_env); | |
3909 | } | |
3910 | return advance_pc(dc); | |
3911 | } | |
3912 | ||
3913 | TRANS(SAVED, 64, do_saved_restored, true) | |
3914 | TRANS(RESTORED, 64, do_saved_restored, false) | |
3915 | ||
3916 | static bool trans_NOP(DisasContext *dc, arg_NOP *a) | |
3917 | { | |
3918 | return advance_pc(dc); | |
3919 | } | |
3920 | ||
3921 | /* | |
3922 | * TODO: Need a feature bit for sparcv8. | |
3923 | * In the meantime, treat all 32-bit cpus like sparcv7. | |
3924 | */ | |
3925 | TRANS(NOP_v7, 32, trans_NOP, a) | |
3926 | TRANS(NOP_v9, 64, trans_NOP, a) | |
3927 | ||
3928 | static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op, | |
3929 | void (*func)(TCGv, TCGv, TCGv), | |
3930 | void (*funci)(TCGv, TCGv, target_long)) | |
3931 | { | |
3932 | TCGv dst, src1; | |
3933 | ||
3934 | /* For simplicity, we under-decoded the rs2 form. */ | |
3935 | if (!a->imm && a->rs2_or_imm & ~0x1f) { | |
3936 | return false; | |
3937 | } | |
3938 | ||
3939 | if (a->cc) { | |
3940 | dst = cpu_cc_dst; | |
3941 | } else { | |
3942 | dst = gen_dest_gpr(dc, a->rd); | |
3943 | } | |
3944 | src1 = gen_load_gpr(dc, a->rs1); | |
3945 | ||
3946 | if (a->imm || a->rs2_or_imm == 0) { | |
3947 | if (funci) { | |
3948 | funci(dst, src1, a->rs2_or_imm); | |
3949 | } else { | |
3950 | func(dst, src1, tcg_constant_tl(a->rs2_or_imm)); | |
3951 | } | |
3952 | } else { | |
3953 | func(dst, src1, cpu_regs[a->rs2_or_imm]); | |
3954 | } | |
3955 | gen_store_gpr(dc, a->rd, dst); | |
3956 | ||
3957 | if (a->cc) { | |
3958 | tcg_gen_movi_i32(cpu_cc_op, cc_op); | |
3959 | dc->cc_op = cc_op; | |
3960 | } | |
3961 | return advance_pc(dc); | |
3962 | } | |
3963 | ||
3964 | static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op, | |
3965 | void (*func)(TCGv, TCGv, TCGv), | |
3966 | void (*funci)(TCGv, TCGv, target_long), | |
3967 | void (*func_cc)(TCGv, TCGv, TCGv)) | |
3968 | { | |
3969 | if (a->cc) { | |
3970 | assert(cc_op >= 0); | |
3971 | return do_arith_int(dc, a, cc_op, func_cc, NULL); | |
3972 | } | |
3973 | return do_arith_int(dc, a, cc_op, func, funci); | |
3974 | } | |
3975 | ||
3976 | static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a, | |
3977 | void (*func)(TCGv, TCGv, TCGv), | |
3978 | void (*funci)(TCGv, TCGv, target_long)) | |
3979 | { | |
3980 | return do_arith_int(dc, a, CC_OP_LOGIC, func, funci); | |
3981 | } | |
3982 | ||
3983 | TRANS(ADD, ALL, do_arith, a, CC_OP_ADD, | |
3984 | tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc) | |
3985 | TRANS(SUB, ALL, do_arith, a, CC_OP_SUB, | |
3986 | tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc) | |
3987 | ||
3988 | TRANS(TADDcc, ALL, do_arith, a, CC_OP_TADD, NULL, NULL, gen_op_add_cc) | |
3989 | TRANS(TSUBcc, ALL, do_arith, a, CC_OP_TSUB, NULL, NULL, gen_op_sub_cc) | |
3990 | TRANS(TADDccTV, ALL, do_arith, a, CC_OP_TADDTV, NULL, NULL, gen_op_taddcctv) | |
3991 | TRANS(TSUBccTV, ALL, do_arith, a, CC_OP_TSUBTV, NULL, NULL, gen_op_tsubcctv) | |
3992 | ||
3993 | TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl) | |
3994 | TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl) | |
3995 | TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL) | |
3996 | TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL) | |
3997 | TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL) | |
3998 | ||
3999 | TRANS(MULX, 64, do_arith, a, -1, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL) | |
4000 | TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL) | |
4001 | TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL) | |
4002 | ||
4003 | TRANS(UDIVX, 64, do_arith, a, -1, gen_op_udivx, NULL, NULL) | |
4004 | TRANS(SDIVX, 64, do_arith, a, -1, gen_op_sdivx, NULL, NULL) | |
4005 | TRANS(UDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_udiv, NULL, gen_op_udivcc) | |
4006 | TRANS(SDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_sdiv, NULL, gen_op_sdivcc) | |
4007 | ||
4008 | /* TODO: Should have feature bit -- comes in with UltraSparc T2. */ | |
4009 | TRANS(POPC, 64, do_arith, a, -1, gen_op_popc, NULL, NULL) | |
4010 | ||
4011 | static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a) | |
4012 | { | |
4013 | /* OR with %g0 is the canonical alias for MOV. */ | |
4014 | if (!a->cc && a->rs1 == 0) { | |
4015 | if (a->imm || a->rs2_or_imm == 0) { | |
4016 | gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm)); | |
4017 | } else if (a->rs2_or_imm & ~0x1f) { | |
4018 | /* For simplicity, we under-decoded the rs2 form. */ | |
4019 | return false; | |
4020 | } else { | |
4021 | gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]); | |
4022 | } | |
4023 | return advance_pc(dc); | |
4024 | } | |
4025 | return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl); | |
4026 | } | |
4027 | ||
4028 | static bool trans_ADDC(DisasContext *dc, arg_r_r_ri_cc *a) | |
4029 | { | |
4030 | switch (dc->cc_op) { | |
4031 | case CC_OP_DIV: | |
4032 | case CC_OP_LOGIC: | |
4033 | /* Carry is known to be zero. Fall back to plain ADD. */ | |
4034 | return do_arith(dc, a, CC_OP_ADD, | |
4035 | tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc); | |
4036 | case CC_OP_ADD: | |
4037 | case CC_OP_TADD: | |
4038 | case CC_OP_TADDTV: | |
4039 | return do_arith(dc, a, CC_OP_ADDX, | |
4040 | gen_op_addc_add, NULL, gen_op_addccc_add); | |
4041 | case CC_OP_SUB: | |
4042 | case CC_OP_TSUB: | |
4043 | case CC_OP_TSUBTV: | |
4044 | return do_arith(dc, a, CC_OP_ADDX, | |
4045 | gen_op_addc_sub, NULL, gen_op_addccc_sub); | |
4046 | default: | |
4047 | return do_arith(dc, a, CC_OP_ADDX, | |
4048 | gen_op_addc_generic, NULL, gen_op_addccc_generic); | |
4049 | } | |
4050 | } | |
4051 | ||
4052 | static bool trans_SUBC(DisasContext *dc, arg_r_r_ri_cc *a) | |
4053 | { | |
4054 | switch (dc->cc_op) { | |
4055 | case CC_OP_DIV: | |
4056 | case CC_OP_LOGIC: | |
4057 | /* Carry is known to be zero. Fall back to plain SUB. */ | |
4058 | return do_arith(dc, a, CC_OP_SUB, | |
4059 | tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc); | |
4060 | case CC_OP_ADD: | |
4061 | case CC_OP_TADD: | |
4062 | case CC_OP_TADDTV: | |
4063 | return do_arith(dc, a, CC_OP_SUBX, | |
4064 | gen_op_subc_add, NULL, gen_op_subccc_add); | |
4065 | case CC_OP_SUB: | |
4066 | case CC_OP_TSUB: | |
4067 | case CC_OP_TSUBTV: | |
4068 | return do_arith(dc, a, CC_OP_SUBX, | |
4069 | gen_op_subc_sub, NULL, gen_op_subccc_sub); | |
4070 | default: | |
4071 | return do_arith(dc, a, CC_OP_SUBX, | |
4072 | gen_op_subc_generic, NULL, gen_op_subccc_generic); | |
4073 | } | |
4074 | } | |
4075 | ||
4076 | static bool trans_MULScc(DisasContext *dc, arg_r_r_ri_cc *a) | |
4077 | { | |
4078 | update_psr(dc); | |
4079 | return do_arith(dc, a, CC_OP_ADD, NULL, NULL, gen_op_mulscc); | |
4080 | } | |
4081 | ||
4082 | static bool gen_edge(DisasContext *dc, arg_r_r_r *a, | |
4083 | int width, bool cc, bool left) | |
4084 | { | |
4085 | TCGv dst, s1, s2, lo1, lo2; | |
4086 | uint64_t amask, tabl, tabr; | |
4087 | int shift, imask, omask; | |
4088 | ||
4089 | dst = gen_dest_gpr(dc, a->rd); | |
4090 | s1 = gen_load_gpr(dc, a->rs1); | |
4091 | s2 = gen_load_gpr(dc, a->rs2); | |
4092 | ||
4093 | if (cc) { | |
4094 | tcg_gen_mov_tl(cpu_cc_src, s1); | |
4095 | tcg_gen_mov_tl(cpu_cc_src2, s2); | |
4096 | tcg_gen_sub_tl(cpu_cc_dst, s1, s2); | |
4097 | tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB); | |
4098 | dc->cc_op = CC_OP_SUB; | |
4099 | } | |
4100 | ||
4101 | /* | |
4102 | * Theory of operation: there are two tables, left and right (not to | |
4103 | * be confused with the left and right versions of the opcode). These | |
4104 | * are indexed by the low 3 bits of the inputs. To make things "easy", | |
4105 | * these tables are loaded into two constants, TABL and TABR below. | |
4106 | * The operation index = (input & imask) << shift calculates the index | |
4107 | * into the constant, while val = (table >> index) & omask calculates | |
4108 | * the value we're looking for. | |
4109 | */ | |
4110 | switch (width) { | |
4111 | case 8: | |
4112 | imask = 0x7; | |
4113 | shift = 3; | |
4114 | omask = 0xff; | |
4115 | if (left) { | |
4116 | tabl = 0x80c0e0f0f8fcfeffULL; | |
4117 | tabr = 0xff7f3f1f0f070301ULL; | |
4118 | } else { | |
4119 | tabl = 0x0103070f1f3f7fffULL; | |
4120 | tabr = 0xfffefcf8f0e0c080ULL; | |
4121 | } | |
4122 | break; | |
4123 | case 16: | |
4124 | imask = 0x6; | |
4125 | shift = 1; | |
4126 | omask = 0xf; | |
4127 | if (left) { | |
4128 | tabl = 0x8cef; | |
4129 | tabr = 0xf731; | |
4130 | } else { | |
4131 | tabl = 0x137f; | |
4132 | tabr = 0xfec8; | |
4133 | } | |
4134 | break; | |
4135 | case 32: | |
4136 | imask = 0x4; | |
4137 | shift = 0; | |
4138 | omask = 0x3; | |
4139 | if (left) { | |
4140 | tabl = (2 << 2) | 3; | |
4141 | tabr = (3 << 2) | 1; | |
4142 | } else { | |
4143 | tabl = (1 << 2) | 3; | |
4144 | tabr = (3 << 2) | 2; | |
4145 | } | |
4146 | break; | |
4147 | default: | |
4148 | abort(); | |
4149 | } | |
4150 | ||
4151 | lo1 = tcg_temp_new(); | |
4152 | lo2 = tcg_temp_new(); | |
4153 | tcg_gen_andi_tl(lo1, s1, imask); | |
4154 | tcg_gen_andi_tl(lo2, s2, imask); | |
4155 | tcg_gen_shli_tl(lo1, lo1, shift); | |
4156 | tcg_gen_shli_tl(lo2, lo2, shift); | |
4157 | ||
4158 | tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1); | |
4159 | tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2); | |
4160 | tcg_gen_andi_tl(lo1, lo1, omask); | |
4161 | tcg_gen_andi_tl(lo2, lo2, omask); | |
4162 | ||
4163 | amask = address_mask_i(dc, -8); | |
4164 | tcg_gen_andi_tl(s1, s1, amask); | |
4165 | tcg_gen_andi_tl(s2, s2, amask); | |
4166 | ||
4167 | /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */ | |
4168 | tcg_gen_and_tl(lo2, lo2, lo1); | |
4169 | tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2); | |
4170 | ||
4171 | gen_store_gpr(dc, a->rd, dst); | |
4172 | return advance_pc(dc); | |
4173 | } | |
4174 | ||
4175 | TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0) | |
4176 | TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1) | |
4177 | TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0) | |
4178 | TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1) | |
4179 | TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0) | |
4180 | TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1) | |
4181 | ||
4182 | TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0) | |
4183 | TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1) | |
4184 | TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0) | |
4185 | TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1) | |
4186 | TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0) | |
4187 | TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1) | |
4188 | ||
4189 | static bool do_rrr(DisasContext *dc, arg_r_r_r *a, | |
4190 | void (*func)(TCGv, TCGv, TCGv)) | |
4191 | { | |
4192 | TCGv dst = gen_dest_gpr(dc, a->rd); | |
4193 | TCGv src1 = gen_load_gpr(dc, a->rs1); | |
4194 | TCGv src2 = gen_load_gpr(dc, a->rs2); | |
4195 | ||
4196 | func(dst, src1, src2); | |
4197 | gen_store_gpr(dc, a->rd, dst); | |
4198 | return advance_pc(dc); | |
4199 | } | |
4200 | ||
4201 | TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8) | |
4202 | TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16) | |
4203 | TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32) | |
4204 | ||
4205 | static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2) | |
4206 | { | |
4207 | #ifdef TARGET_SPARC64 | |
4208 | TCGv tmp = tcg_temp_new(); | |
4209 | ||
4210 | tcg_gen_add_tl(tmp, s1, s2); | |
4211 | tcg_gen_andi_tl(dst, tmp, -8); | |
4212 | tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3); | |
4213 | #else | |
4214 | g_assert_not_reached(); | |
4215 | #endif | |
4216 | } | |
4217 | ||
4218 | static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2) | |
4219 | { | |
4220 | #ifdef TARGET_SPARC64 | |
4221 | TCGv tmp = tcg_temp_new(); | |
4222 | ||
4223 | tcg_gen_add_tl(tmp, s1, s2); | |
4224 | tcg_gen_andi_tl(dst, tmp, -8); | |
4225 | tcg_gen_neg_tl(tmp, tmp); | |
4226 | tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3); | |
4227 | #else | |
4228 | g_assert_not_reached(); | |
4229 | #endif | |
4230 | } | |
4231 | ||
4232 | TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr) | |
4233 | TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl) | |
4234 | ||
4235 | static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2) | |
4236 | { | |
4237 | #ifdef TARGET_SPARC64 | |
4238 | tcg_gen_add_tl(dst, s1, s2); | |
4239 | tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32); | |
4240 | #else | |
4241 | g_assert_not_reached(); | |
4242 | #endif | |
4243 | } | |
4244 | ||
4245 | TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask) | |
4246 | ||
4247 | static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u) | |
4248 | { | |
4249 | TCGv dst, src1, src2; | |
4250 | ||
4251 | /* Reject 64-bit shifts for sparc32. */ | |
4252 | if (avail_32(dc) && a->x) { | |
4253 | return false; | |
4254 | } | |
4255 | ||
4256 | src2 = tcg_temp_new(); | |
4257 | tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31); | |
4258 | src1 = gen_load_gpr(dc, a->rs1); | |
4259 | dst = gen_dest_gpr(dc, a->rd); | |
4260 | ||
4261 | if (l) { | |
4262 | tcg_gen_shl_tl(dst, src1, src2); | |
4263 | if (!a->x) { | |
4264 | tcg_gen_ext32u_tl(dst, dst); | |
4265 | } | |
4266 | } else if (u) { | |
4267 | if (!a->x) { | |
4268 | tcg_gen_ext32u_tl(dst, src1); | |
4269 | src1 = dst; | |
4270 | } | |
4271 | tcg_gen_shr_tl(dst, src1, src2); | |
4272 | } else { | |
4273 | if (!a->x) { | |
4274 | tcg_gen_ext32s_tl(dst, src1); | |
4275 | src1 = dst; | |
4276 | } | |
4277 | tcg_gen_sar_tl(dst, src1, src2); | |
4278 | } | |
4279 | gen_store_gpr(dc, a->rd, dst); | |
4280 | return advance_pc(dc); | |
4281 | } | |
4282 | ||
4283 | TRANS(SLL_r, ALL, do_shift_r, a, true, true) | |
4284 | TRANS(SRL_r, ALL, do_shift_r, a, false, true) | |
4285 | TRANS(SRA_r, ALL, do_shift_r, a, false, false) | |
4286 | ||
4287 | static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u) | |
4288 | { | |
4289 | TCGv dst, src1; | |
4290 | ||
4291 | /* Reject 64-bit shifts for sparc32. */ | |
4292 | if (avail_32(dc) && (a->x || a->i >= 32)) { | |
4293 | return false; | |
4294 | } | |
4295 | ||
4296 | src1 = gen_load_gpr(dc, a->rs1); | |
4297 | dst = gen_dest_gpr(dc, a->rd); | |
4298 | ||
4299 | if (avail_32(dc) || a->x) { | |
4300 | if (l) { | |
4301 | tcg_gen_shli_tl(dst, src1, a->i); | |
4302 | } else if (u) { | |
4303 | tcg_gen_shri_tl(dst, src1, a->i); | |
4304 | } else { | |
4305 | tcg_gen_sari_tl(dst, src1, a->i); | |
4306 | } | |
4307 | } else { | |
4308 | if (l) { | |
4309 | tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i); | |
4310 | } else if (u) { | |
4311 | tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i); | |
4312 | } else { | |
4313 | tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i); | |
4314 | } | |
4315 | } | |
4316 | gen_store_gpr(dc, a->rd, dst); | |
4317 | return advance_pc(dc); | |
4318 | } | |
4319 | ||
4320 | TRANS(SLL_i, ALL, do_shift_i, a, true, true) | |
4321 | TRANS(SRL_i, ALL, do_shift_i, a, false, true) | |
4322 | TRANS(SRA_i, ALL, do_shift_i, a, false, false) | |
4323 | ||
4324 | static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm) | |
4325 | { | |
4326 | /* For simplicity, we under-decoded the rs2 form. */ | |
4327 | if (!imm && rs2_or_imm & ~0x1f) { | |
4328 | return NULL; | |
4329 | } | |
4330 | if (imm || rs2_or_imm == 0) { | |
4331 | return tcg_constant_tl(rs2_or_imm); | |
4332 | } else { | |
4333 | return cpu_regs[rs2_or_imm]; | |
4334 | } | |
4335 | } | |
4336 | ||
4337 | static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2) | |
4338 | { | |
4339 | TCGv dst = gen_load_gpr(dc, rd); | |
4340 | ||
4341 | tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, cmp->c2, src2, dst); | |
4342 | gen_store_gpr(dc, rd, dst); | |
4343 | return advance_pc(dc); | |
4344 | } | |
4345 | ||
4346 | static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a) | |
4347 | { | |
4348 | TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm); | |
4349 | DisasCompare cmp; | |
4350 | ||
4351 | if (src2 == NULL) { | |
4352 | return false; | |
4353 | } | |
4354 | gen_compare(&cmp, a->cc, a->cond, dc); | |
4355 | return do_mov_cond(dc, &cmp, a->rd, src2); | |
4356 | } | |
4357 | ||
4358 | static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a) | |
4359 | { | |
4360 | TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm); | |
4361 | DisasCompare cmp; | |
4362 | ||
4363 | if (src2 == NULL) { | |
4364 | return false; | |
4365 | } | |
4366 | gen_fcompare(&cmp, a->cc, a->cond); | |
4367 | return do_mov_cond(dc, &cmp, a->rd, src2); | |
4368 | } | |
4369 | ||
4370 | static bool trans_MOVR(DisasContext *dc, arg_MOVR *a) | |
4371 | { | |
4372 | TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm); | |
4373 | DisasCompare cmp; | |
4374 | ||
4375 | if (src2 == NULL) { | |
4376 | return false; | |
4377 | } | |
4378 | gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1)); | |
4379 | return do_mov_cond(dc, &cmp, a->rd, src2); | |
4380 | } | |
4381 | ||
4382 | static bool do_add_special(DisasContext *dc, arg_r_r_ri *a, | |
4383 | bool (*func)(DisasContext *dc, int rd, TCGv src)) | |
4384 | { | |
4385 | TCGv src1, sum; | |
4386 | ||
4387 | /* For simplicity, we under-decoded the rs2 form. */ | |
4388 | if (!a->imm && a->rs2_or_imm & ~0x1f) { | |
4389 | return false; | |
4390 | } | |
4391 | ||
4392 | /* | |
4393 | * Always load the sum into a new temporary. | |
4394 | * This is required to capture the value across a window change, | |
4395 | * e.g. SAVE and RESTORE, and may be optimized away otherwise. | |
4396 | */ | |
4397 | sum = tcg_temp_new(); | |
4398 | src1 = gen_load_gpr(dc, a->rs1); | |
4399 | if (a->imm || a->rs2_or_imm == 0) { | |
4400 | tcg_gen_addi_tl(sum, src1, a->rs2_or_imm); | |
4401 | } else { | |
4402 | tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]); | |
4403 | } | |
4404 | return func(dc, a->rd, sum); | |
4405 | } | |
4406 | ||
4407 | static bool do_jmpl(DisasContext *dc, int rd, TCGv src) | |
4408 | { | |
4409 | /* | |
4410 | * Preserve pc across advance, so that we can delay | |
4411 | * the writeback to rd until after src is consumed. | |
4412 | */ | |
4413 | target_ulong cur_pc = dc->pc; | |
4414 | ||
4415 | gen_check_align(dc, src, 3); | |
4416 | ||
4417 | gen_mov_pc_npc(dc); | |
4418 | tcg_gen_mov_tl(cpu_npc, src); | |
4419 | gen_address_mask(dc, cpu_npc); | |
4420 | gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc)); | |
4421 | ||
4422 | dc->npc = DYNAMIC_PC_LOOKUP; | |
4423 | return true; | |
4424 | } | |
4425 | ||
4426 | TRANS(JMPL, ALL, do_add_special, a, do_jmpl) | |
4427 | ||
4428 | static bool do_rett(DisasContext *dc, int rd, TCGv src) | |
4429 | { | |
4430 | if (!supervisor(dc)) { | |
4431 | return raise_priv(dc); | |
4432 | } | |
4433 | ||
4434 | gen_check_align(dc, src, 3); | |
4435 | ||
4436 | gen_mov_pc_npc(dc); | |
4437 | tcg_gen_mov_tl(cpu_npc, src); | |
4438 | gen_helper_rett(tcg_env); | |
4439 | ||
4440 | dc->npc = DYNAMIC_PC; | |
4441 | return true; | |
4442 | } | |
4443 | ||
4444 | TRANS(RETT, 32, do_add_special, a, do_rett) | |
4445 | ||
4446 | static bool do_return(DisasContext *dc, int rd, TCGv src) | |
4447 | { | |
4448 | gen_check_align(dc, src, 3); | |
4449 | ||
4450 | gen_mov_pc_npc(dc); | |
4451 | tcg_gen_mov_tl(cpu_npc, src); | |
4452 | gen_address_mask(dc, cpu_npc); | |
4453 | ||
4454 | gen_helper_restore(tcg_env); | |
4455 | dc->npc = DYNAMIC_PC_LOOKUP; | |
4456 | return true; | |
4457 | } | |
4458 | ||
4459 | TRANS(RETURN, 64, do_add_special, a, do_return) | |
4460 | ||
4461 | static bool do_save(DisasContext *dc, int rd, TCGv src) | |
4462 | { | |
4463 | gen_helper_save(tcg_env); | |
4464 | gen_store_gpr(dc, rd, src); | |
4465 | return advance_pc(dc); | |
4466 | } | |
4467 | ||
4468 | TRANS(SAVE, ALL, do_add_special, a, do_save) | |
4469 | ||
4470 | static bool do_restore(DisasContext *dc, int rd, TCGv src) | |
4471 | { | |
4472 | gen_helper_restore(tcg_env); | |
4473 | gen_store_gpr(dc, rd, src); | |
4474 | return advance_pc(dc); | |
4475 | } | |
4476 | ||
4477 | TRANS(RESTORE, ALL, do_add_special, a, do_restore) | |
4478 | ||
4479 | static bool do_done_retry(DisasContext *dc, bool done) | |
4480 | { | |
4481 | if (!supervisor(dc)) { | |
4482 | return raise_priv(dc); | |
4483 | } | |
4484 | dc->npc = DYNAMIC_PC; | |
4485 | dc->pc = DYNAMIC_PC; | |
4486 | translator_io_start(&dc->base); | |
4487 | if (done) { | |
4488 | gen_helper_done(tcg_env); | |
4489 | } else { | |
4490 | gen_helper_retry(tcg_env); | |
4491 | } | |
4492 | return true; | |
4493 | } | |
4494 | ||
4495 | TRANS(DONE, 64, do_done_retry, true) | |
4496 | TRANS(RETRY, 64, do_done_retry, false) | |
4497 | ||
4498 | /* | |
4499 | * Major opcode 11 -- load and store instructions | |
4500 | */ | |
4501 | ||
4502 | static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm) | |
4503 | { | |
4504 | TCGv addr, tmp = NULL; | |
4505 | ||
4506 | /* For simplicity, we under-decoded the rs2 form. */ | |
4507 | if (!imm && rs2_or_imm & ~0x1f) { | |
4508 | return NULL; | |
4509 | } | |
4510 | ||
4511 | addr = gen_load_gpr(dc, rs1); | |
4512 | if (rs2_or_imm) { | |
4513 | tmp = tcg_temp_new(); | |
4514 | if (imm) { | |
4515 | tcg_gen_addi_tl(tmp, addr, rs2_or_imm); | |
4516 | } else { | |
4517 | tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]); | |
4518 | } | |
4519 | addr = tmp; | |
4520 | } | |
4521 | if (AM_CHECK(dc)) { | |
4522 | if (!tmp) { | |
4523 | tmp = tcg_temp_new(); | |
4524 | } | |
4525 | tcg_gen_ext32u_tl(tmp, addr); | |
4526 | addr = tmp; | |
4527 | } | |
4528 | return addr; | |
4529 | } | |
4530 | ||
4531 | static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop) | |
4532 | { | |
4533 | TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); | |
4534 | DisasASI da; | |
4535 | ||
4536 | if (addr == NULL) { | |
4537 | return false; | |
4538 | } | |
4539 | da = resolve_asi(dc, a->asi, mop); | |
4540 | ||
4541 | reg = gen_dest_gpr(dc, a->rd); | |
4542 | gen_ld_asi(dc, &da, reg, addr); | |
4543 | gen_store_gpr(dc, a->rd, reg); | |
4544 | return advance_pc(dc); | |
4545 | } | |
4546 | ||
4547 | TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL) | |
4548 | TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB) | |
4549 | TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW) | |
4550 | TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB) | |
4551 | TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW) | |
4552 | TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL) | |
4553 | TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ) | |
4554 | ||
4555 | static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop) | |
4556 | { | |
4557 | TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); | |
4558 | DisasASI da; | |
4559 | ||
4560 | if (addr == NULL) { | |
4561 | return false; | |
4562 | } | |
4563 | da = resolve_asi(dc, a->asi, mop); | |
4564 | ||
4565 | reg = gen_load_gpr(dc, a->rd); | |
4566 | gen_st_asi(dc, &da, reg, addr); | |
4567 | return advance_pc(dc); | |
4568 | } | |
4569 | ||
4570 | TRANS(STW, ALL, do_st_gpr, a, MO_TEUL) | |
4571 | TRANS(STB, ALL, do_st_gpr, a, MO_UB) | |
4572 | TRANS(STH, ALL, do_st_gpr, a, MO_TEUW) | |
4573 | TRANS(STX, 64, do_st_gpr, a, MO_TEUQ) | |
4574 | ||
4575 | static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a) | |
4576 | { | |
4577 | TCGv addr; | |
4578 | DisasASI da; | |
4579 | ||
4580 | if (a->rd & 1) { | |
4581 | return false; | |
4582 | } | |
4583 | addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); | |
4584 | if (addr == NULL) { | |
4585 | return false; | |
4586 | } | |
4587 | da = resolve_asi(dc, a->asi, MO_TEUQ); | |
4588 | gen_ldda_asi(dc, &da, addr, a->rd); | |
4589 | return advance_pc(dc); | |
4590 | } | |
4591 | ||
4592 | static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a) | |
4593 | { | |
4594 | TCGv addr; | |
4595 | DisasASI da; | |
4596 | ||
4597 | if (a->rd & 1) { | |
4598 | return false; | |
4599 | } | |
4600 | addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); | |
4601 | if (addr == NULL) { | |
4602 | return false; | |
4603 | } | |
4604 | da = resolve_asi(dc, a->asi, MO_TEUQ); | |
4605 | gen_stda_asi(dc, &da, addr, a->rd); | |
4606 | return advance_pc(dc); | |
4607 | } | |
4608 | ||
4609 | static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a) | |
4610 | { | |
4611 | TCGv addr, reg; | |
4612 | DisasASI da; | |
4613 | ||
4614 | addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); | |
4615 | if (addr == NULL) { | |
4616 | return false; | |
4617 | } | |
4618 | da = resolve_asi(dc, a->asi, MO_UB); | |
4619 | ||
4620 | reg = gen_dest_gpr(dc, a->rd); | |
4621 | gen_ldstub_asi(dc, &da, reg, addr); | |
4622 | gen_store_gpr(dc, a->rd, reg); | |
4623 | return advance_pc(dc); | |
4624 | } | |
4625 | ||
4626 | static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a) | |
4627 | { | |
4628 | TCGv addr, dst, src; | |
4629 | DisasASI da; | |
4630 | ||
4631 | addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); | |
4632 | if (addr == NULL) { | |
4633 | return false; | |
4634 | } | |
4635 | da = resolve_asi(dc, a->asi, MO_TEUL); | |
4636 | ||
4637 | dst = gen_dest_gpr(dc, a->rd); | |
4638 | src = gen_load_gpr(dc, a->rd); | |
4639 | gen_swap_asi(dc, &da, dst, src, addr); | |
4640 | gen_store_gpr(dc, a->rd, dst); | |
4641 | return advance_pc(dc); | |
4642 | } | |
4643 | ||
4644 | static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop) | |
4645 | { | |
4646 | TCGv addr, o, n, c; | |
4647 | DisasASI da; | |
4648 | ||
4649 | addr = gen_ldst_addr(dc, a->rs1, true, 0); | |
4650 | if (addr == NULL) { | |
4651 | return false; | |
4652 | } | |
4653 | da = resolve_asi(dc, a->asi, mop); | |
4654 | ||
4655 | o = gen_dest_gpr(dc, a->rd); | |
4656 | n = gen_load_gpr(dc, a->rd); | |
4657 | c = gen_load_gpr(dc, a->rs2_or_imm); | |
4658 | gen_cas_asi(dc, &da, o, n, c, addr); | |
4659 | gen_store_gpr(dc, a->rd, o); | |
4660 | return advance_pc(dc); | |
4661 | } | |
4662 | ||
4663 | TRANS(CASA, CASA, do_casa, a, MO_TEUL) | |
4664 | TRANS(CASXA, 64, do_casa, a, MO_TEUQ) | |
4665 | ||
4666 | static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz) | |
4667 | { | |
4668 | TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); | |
4669 | DisasASI da; | |
4670 | ||
4671 | if (addr == NULL) { | |
4672 | return false; | |
4673 | } | |
4674 | if (gen_trap_ifnofpu(dc)) { | |
4675 | return true; | |
4676 | } | |
4677 | if (sz == MO_128 && gen_trap_float128(dc)) { | |
4678 | return true; | |
4679 | } | |
4680 | da = resolve_asi(dc, a->asi, MO_TE | sz); | |
4681 | gen_ldf_asi(dc, &da, sz, addr, a->rd); | |
4682 | gen_update_fprs_dirty(dc, a->rd); | |
4683 | return advance_pc(dc); | |
4684 | } | |
4685 | ||
4686 | TRANS(LDF, ALL, do_ld_fpr, a, MO_32) | |
4687 | TRANS(LDDF, ALL, do_ld_fpr, a, MO_64) | |
4688 | TRANS(LDQF, ALL, do_ld_fpr, a, MO_128) | |
4689 | ||
4690 | TRANS(LDFA, 64, do_ld_fpr, a, MO_32) | |
4691 | TRANS(LDDFA, 64, do_ld_fpr, a, MO_64) | |
4692 | TRANS(LDQFA, 64, do_ld_fpr, a, MO_128) | |
4693 | ||
4694 | static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz) | |
4695 | { | |
4696 | TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); | |
4697 | DisasASI da; | |
4698 | ||
4699 | if (addr == NULL) { | |
4700 | return false; | |
4701 | } | |
4702 | if (gen_trap_ifnofpu(dc)) { | |
4703 | return true; | |
4704 | } | |
4705 | if (sz == MO_128 && gen_trap_float128(dc)) { | |
4706 | return true; | |
4707 | } | |
4708 | da = resolve_asi(dc, a->asi, MO_TE | sz); | |
4709 | gen_stf_asi(dc, &da, sz, addr, a->rd); | |
4710 | return advance_pc(dc); | |
4711 | } | |
4712 | ||
4713 | TRANS(STF, ALL, do_st_fpr, a, MO_32) | |
4714 | TRANS(STDF, ALL, do_st_fpr, a, MO_64) | |
4715 | TRANS(STQF, ALL, do_st_fpr, a, MO_128) | |
4716 | ||
4717 | TRANS(STFA, 64, do_st_fpr, a, MO_32) | |
4718 | TRANS(STDFA, 64, do_st_fpr, a, MO_64) | |
4719 | TRANS(STQFA, 64, do_st_fpr, a, MO_128) | |
4720 | ||
4721 | static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a) | |
4722 | { | |
4723 | if (!avail_32(dc)) { | |
4724 | return false; | |
4725 | } | |
4726 | if (!supervisor(dc)) { | |
4727 | return raise_priv(dc); | |
4728 | } | |
4729 | if (gen_trap_ifnofpu(dc)) { | |
4730 | return true; | |
4731 | } | |
4732 | gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR); | |
4733 | return true; | |
4734 | } | |
4735 | ||
4736 | static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop, | |
4737 | target_ulong new_mask, target_ulong old_mask) | |
4738 | { | |
4739 | TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); | |
4740 | if (addr == NULL) { | |
4741 | return false; | |
4742 | } | |
4743 | if (gen_trap_ifnofpu(dc)) { | |
4744 | return true; | |
4745 | } | |
4746 | tmp = tcg_temp_new(); | |
4747 | tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN); | |
4748 | tcg_gen_andi_tl(tmp, tmp, new_mask); | |
4749 | tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask); | |
4750 | tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp); | |
4751 | gen_helper_set_fsr(tcg_env, cpu_fsr); | |
4752 | return advance_pc(dc); | |
4753 | } | |
4754 | ||
4755 | TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK) | |
4756 | TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK) | |
4757 | ||
4758 | static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop) | |
4759 | { | |
4760 | TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); | |
4761 | if (addr == NULL) { | |
4762 | return false; | |
4763 | } | |
4764 | if (gen_trap_ifnofpu(dc)) { | |
4765 | return true; | |
4766 | } | |
4767 | tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN); | |
4768 | return advance_pc(dc); | |
4769 | } | |
4770 | ||
4771 | TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL) | |
4772 | TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ) | |
4773 | ||
4774 | static bool do_ff(DisasContext *dc, arg_r_r *a, | |
4775 | void (*func)(TCGv_i32, TCGv_i32)) | |
4776 | { | |
4777 | TCGv_i32 tmp; | |
4778 | ||
4779 | if (gen_trap_ifnofpu(dc)) { | |
4780 | return true; | |
4781 | } | |
4782 | ||
4783 | tmp = gen_load_fpr_F(dc, a->rs); | |
4784 | func(tmp, tmp); | |
4785 | gen_store_fpr_F(dc, a->rd, tmp); | |
4786 | return advance_pc(dc); | |
4787 | } | |
4788 | ||
4789 | TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs) | |
4790 | TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs) | |
4791 | TRANS(FABSs, ALL, do_ff, a, gen_op_fabss) | |
4792 | TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32) | |
4793 | TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32) | |
4794 | ||
4795 | static bool do_env_ff(DisasContext *dc, arg_r_r *a, | |
4796 | void (*func)(TCGv_i32, TCGv_env, TCGv_i32)) | |
4797 | { | |
4798 | TCGv_i32 tmp; | |
4799 | ||
4800 | if (gen_trap_ifnofpu(dc)) { | |
4801 | return true; | |
4802 | } | |
4803 | ||
4804 | gen_op_clear_ieee_excp_and_FTT(); | |
4805 | tmp = gen_load_fpr_F(dc, a->rs); | |
4806 | func(tmp, tcg_env, tmp); | |
4807 | gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); | |
4808 | gen_store_fpr_F(dc, a->rd, tmp); | |
4809 | return advance_pc(dc); | |
4810 | } | |
4811 | ||
4812 | TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts) | |
4813 | TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos) | |
4814 | TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi) | |
4815 | ||
4816 | static bool do_dd(DisasContext *dc, arg_r_r *a, | |
4817 | void (*func)(TCGv_i64, TCGv_i64)) | |
4818 | { | |
4819 | TCGv_i64 dst, src; | |
4820 | ||
4821 | if (gen_trap_ifnofpu(dc)) { | |
4822 | return true; | |
4823 | } | |
4824 | ||
4825 | dst = gen_dest_fpr_D(dc, a->rd); | |
4826 | src = gen_load_fpr_D(dc, a->rs); | |
4827 | func(dst, src); | |
4828 | gen_store_fpr_D(dc, a->rd, dst); | |
4829 | return advance_pc(dc); | |
4830 | } | |
4831 | ||
4832 | TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd) | |
4833 | TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd) | |
4834 | TRANS(FABSd, 64, do_dd, a, gen_op_fabsd) | |
4835 | TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64) | |
4836 | TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64) | |
4837 | ||
4838 | static bool do_fff(DisasContext *dc, arg_r_r_r *a, | |
4839 | void (*func)(TCGv_i32, TCGv_i32, TCGv_i32)) | |
4840 | { | |
4841 | TCGv_i32 src1, src2; | |
4842 | ||
4843 | if (gen_trap_ifnofpu(dc)) { | |
4844 | return true; | |
4845 | } | |
4846 | ||
4847 | src1 = gen_load_fpr_F(dc, a->rs1); | |
4848 | src2 = gen_load_fpr_F(dc, a->rs2); | |
4849 | func(src1, src1, src2); | |
4850 | gen_store_fpr_F(dc, a->rd, src1); | |
4851 | return advance_pc(dc); | |
4852 | } | |
4853 | ||
4854 | TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32) | |
4855 | TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32) | |
4856 | TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32) | |
4857 | TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32) | |
4858 | TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32) | |
4859 | TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32) | |
4860 | TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32) | |
4861 | TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32) | |
4862 | TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32) | |
4863 | TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32) | |
4864 | TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32) | |
4865 | TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32) | |
4866 | ||
4867 | static bool do_ddd(DisasContext *dc, arg_r_r_r *a, | |
4868 | void (*func)(TCGv_i64, TCGv_i64, TCGv_i64)) | |
4869 | { | |
4870 | TCGv_i64 dst, src1, src2; | |
4871 | ||
4872 | if (gen_trap_ifnofpu(dc)) { | |
4873 | return true; | |
4874 | } | |
4875 | ||
4876 | dst = gen_dest_fpr_D(dc, a->rd); | |
4877 | src1 = gen_load_fpr_D(dc, a->rs1); | |
4878 | src2 = gen_load_fpr_D(dc, a->rs2); | |
4879 | func(dst, src1, src2); | |
4880 | gen_store_fpr_D(dc, a->rd, dst); | |
4881 | return advance_pc(dc); | |
4882 | } | |
4883 | ||
4884 | TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16) | |
4885 | TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au) | |
4886 | TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al) | |
4887 | TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16) | |
4888 | TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16) | |
4889 | TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16) | |
4890 | TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16) | |
4891 | TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge) | |
4892 | TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand) | |
4893 | ||
4894 | TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64) | |
4895 | TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64) | |
4896 | TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64) | |
4897 | TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64) | |
4898 | TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64) | |
4899 | TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64) | |
4900 | TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64) | |
4901 | TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64) | |
4902 | TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64) | |
4903 | TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64) | |
4904 | TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64) | |
4905 | TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64) | |
4906 | ||
4907 | TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32) | |
4908 | TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata) | |
4909 | TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle) | |
4910 | ||
4911 | static bool do_dddd(DisasContext *dc, arg_r_r_r *a, | |
4912 | void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) | |
4913 | { | |
4914 | TCGv_i64 dst, src0, src1, src2; | |
4915 | ||
4916 | if (gen_trap_ifnofpu(dc)) { | |
4917 | return true; | |
4918 | } | |
4919 | ||
4920 | dst = gen_dest_fpr_D(dc, a->rd); | |
4921 | src0 = gen_load_fpr_D(dc, a->rd); | |
4922 | src1 = gen_load_fpr_D(dc, a->rs1); | |
4923 | src2 = gen_load_fpr_D(dc, a->rs2); | |
4924 | func(dst, src0, src1, src2); | |
4925 | gen_store_fpr_D(dc, a->rd, dst); | |
4926 | return advance_pc(dc); | |
4927 | } | |
4928 | ||
4929 | TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist) | |
4930 | ||
4931 | #define CHECK_IU_FEATURE(dc, FEATURE) \ | |
4932 | if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \ | |
4933 | goto illegal_insn; | |
4934 | #define CHECK_FPU_FEATURE(dc, FEATURE) \ | |
4935 | if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \ | |
4936 | goto nfpu_insn; | |
4937 | ||
4938 | /* before an instruction, dc->pc must be static */ | |
4939 | static void disas_sparc_legacy(DisasContext *dc, unsigned int insn) | |
4940 | { | |
4941 | unsigned int opc, rs1, rs2, rd; | |
4942 | TCGv cpu_src1 __attribute__((unused)); | |
4943 | TCGv_i32 cpu_src1_32, cpu_src2_32; | |
4944 | TCGv_i64 cpu_src1_64, cpu_src2_64; | |
4945 | TCGv_i32 cpu_dst_32 __attribute__((unused)); | |
4946 | TCGv_i64 cpu_dst_64 __attribute__((unused)); | |
4947 | ||
4948 | opc = GET_FIELD(insn, 0, 1); | |
4949 | rd = GET_FIELD(insn, 2, 6); | |
4950 | ||
4951 | switch (opc) { | |
4952 | case 0: | |
4953 | goto illegal_insn; /* in decodetree */ | |
4954 | case 1: | |
4955 | g_assert_not_reached(); /* in decodetree */ | |
4956 | case 2: /* FPU & Logical Operations */ | |
4957 | { | |
4958 | unsigned int xop = GET_FIELD(insn, 7, 12); | |
4959 | TCGv cpu_dst __attribute__((unused)) = tcg_temp_new(); | |
4960 | ||
4961 | if (xop == 0x34) { /* FPU Operations */ | |
4962 | if (gen_trap_ifnofpu(dc)) { | |
4963 | goto jmp_insn; | |
4964 | } | |
4965 | gen_op_clear_ieee_excp_and_FTT(); | |
4966 | rs1 = GET_FIELD(insn, 13, 17); | |
4967 | rs2 = GET_FIELD(insn, 27, 31); | |
4968 | xop = GET_FIELD(insn, 18, 26); | |
4969 | ||
4970 | switch (xop) { | |
4971 | case 0x1: /* fmovs */ | |
4972 | case 0x5: /* fnegs */ | |
4973 | case 0x9: /* fabss */ | |
4974 | case 0x2: /* V9 fmovd */ | |
4975 | case 0x6: /* V9 fnegd */ | |
4976 | case 0xa: /* V9 fabsd */ | |
4977 | case 0x29: /* fsqrts */ | |
4978 | case 0xc4: /* fitos */ | |
4979 | case 0xd1: /* fstoi */ | |
4980 | g_assert_not_reached(); /* in decodetree */ | |
4981 | case 0x2a: /* fsqrtd */ | |
4982 | gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd); | |
4983 | break; | |
4984 | case 0x2b: /* fsqrtq */ | |
4985 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
4986 | gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq); | |
4987 | break; | |
4988 | case 0x41: /* fadds */ | |
4989 | gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds); | |
4990 | break; | |
4991 | case 0x42: /* faddd */ | |
4992 | gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd); | |
4993 | break; | |
4994 | case 0x43: /* faddq */ | |
4995 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
4996 | gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq); | |
4997 | break; | |
4998 | case 0x45: /* fsubs */ | |
4999 | gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs); | |
5000 | break; | |
5001 | case 0x46: /* fsubd */ | |
5002 | gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd); | |
5003 | break; | |
5004 | case 0x47: /* fsubq */ | |
5005 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5006 | gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq); | |
5007 | break; | |
5008 | case 0x49: /* fmuls */ | |
5009 | gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls); | |
5010 | break; | |
5011 | case 0x4a: /* fmuld */ | |
5012 | gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld); | |
5013 | break; | |
5014 | case 0x4b: /* fmulq */ | |
5015 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5016 | gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq); | |
5017 | break; | |
5018 | case 0x4d: /* fdivs */ | |
5019 | gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs); | |
5020 | break; | |
5021 | case 0x4e: /* fdivd */ | |
5022 | gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd); | |
5023 | break; | |
5024 | case 0x4f: /* fdivq */ | |
5025 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5026 | gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq); | |
5027 | break; | |
5028 | case 0x69: /* fsmuld */ | |
5029 | CHECK_FPU_FEATURE(dc, FSMULD); | |
5030 | gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld); | |
5031 | break; | |
5032 | case 0x6e: /* fdmulq */ | |
5033 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5034 | gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq); | |
5035 | break; | |
5036 | case 0xc6: /* fdtos */ | |
5037 | gen_fop_FD(dc, rd, rs2, gen_helper_fdtos); | |
5038 | break; | |
5039 | case 0xc7: /* fqtos */ | |
5040 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5041 | gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos); | |
5042 | break; | |
5043 | case 0xc8: /* fitod */ | |
5044 | gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod); | |
5045 | break; | |
5046 | case 0xc9: /* fstod */ | |
5047 | gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod); | |
5048 | break; | |
5049 | case 0xcb: /* fqtod */ | |
5050 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5051 | gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod); | |
5052 | break; | |
5053 | case 0xcc: /* fitoq */ | |
5054 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5055 | gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq); | |
5056 | break; | |
5057 | case 0xcd: /* fstoq */ | |
5058 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5059 | gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq); | |
5060 | break; | |
5061 | case 0xce: /* fdtoq */ | |
5062 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5063 | gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq); | |
5064 | break; | |
5065 | case 0xd2: /* fdtoi */ | |
5066 | gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi); | |
5067 | break; | |
5068 | case 0xd3: /* fqtoi */ | |
5069 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5070 | gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi); | |
5071 | break; | |
5072 | #ifdef TARGET_SPARC64 | |
5073 | case 0x3: /* V9 fmovq */ | |
5074 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5075 | gen_move_Q(dc, rd, rs2); | |
5076 | break; | |
5077 | case 0x7: /* V9 fnegq */ | |
5078 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5079 | gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq); | |
5080 | break; | |
5081 | case 0xb: /* V9 fabsq */ | |
5082 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5083 | gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq); | |
5084 | break; | |
5085 | case 0x81: /* V9 fstox */ | |
5086 | gen_fop_DF(dc, rd, rs2, gen_helper_fstox); | |
5087 | break; | |
5088 | case 0x82: /* V9 fdtox */ | |
5089 | gen_fop_DD(dc, rd, rs2, gen_helper_fdtox); | |
5090 | break; | |
5091 | case 0x83: /* V9 fqtox */ | |
5092 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5093 | gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox); | |
5094 | break; | |
5095 | case 0x84: /* V9 fxtos */ | |
5096 | gen_fop_FD(dc, rd, rs2, gen_helper_fxtos); | |
5097 | break; | |
5098 | case 0x88: /* V9 fxtod */ | |
5099 | gen_fop_DD(dc, rd, rs2, gen_helper_fxtod); | |
5100 | break; | |
5101 | case 0x8c: /* V9 fxtoq */ | |
5102 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5103 | gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq); | |
5104 | break; | |
5105 | #endif | |
5106 | default: | |
5107 | goto illegal_insn; | |
5108 | } | |
5109 | } else if (xop == 0x35) { /* FPU Operations */ | |
5110 | #ifdef TARGET_SPARC64 | |
5111 | int cond; | |
5112 | #endif | |
5113 | if (gen_trap_ifnofpu(dc)) { | |
5114 | goto jmp_insn; | |
5115 | } | |
5116 | gen_op_clear_ieee_excp_and_FTT(); | |
5117 | rs1 = GET_FIELD(insn, 13, 17); | |
5118 | rs2 = GET_FIELD(insn, 27, 31); | |
5119 | xop = GET_FIELD(insn, 18, 26); | |
5120 | ||
5121 | #ifdef TARGET_SPARC64 | |
5122 | #define FMOVR(sz) \ | |
5123 | do { \ | |
5124 | DisasCompare cmp; \ | |
5125 | cond = GET_FIELD_SP(insn, 10, 12); \ | |
5126 | cpu_src1 = get_src1(dc, insn); \ | |
5127 | gen_compare_reg(&cmp, cond, cpu_src1); \ | |
5128 | gen_fmov##sz(dc, &cmp, rd, rs2); \ | |
5129 | } while (0) | |
5130 | ||
5131 | if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */ | |
5132 | FMOVR(s); | |
5133 | break; | |
5134 | } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr | |
5135 | FMOVR(d); | |
5136 | break; | |
5137 | } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr | |
5138 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5139 | FMOVR(q); | |
5140 | break; | |
5141 | } | |
5142 | #undef FMOVR | |
5143 | #endif | |
5144 | switch (xop) { | |
5145 | #ifdef TARGET_SPARC64 | |
5146 | #define FMOVCC(fcc, sz) \ | |
5147 | do { \ | |
5148 | DisasCompare cmp; \ | |
5149 | cond = GET_FIELD_SP(insn, 14, 17); \ | |
5150 | gen_fcompare(&cmp, fcc, cond); \ | |
5151 | gen_fmov##sz(dc, &cmp, rd, rs2); \ | |
5152 | } while (0) | |
5153 | ||
5154 | case 0x001: /* V9 fmovscc %fcc0 */ | |
5155 | FMOVCC(0, s); | |
5156 | break; | |
5157 | case 0x002: /* V9 fmovdcc %fcc0 */ | |
5158 | FMOVCC(0, d); | |
5159 | break; | |
5160 | case 0x003: /* V9 fmovqcc %fcc0 */ | |
5161 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5162 | FMOVCC(0, q); | |
5163 | break; | |
5164 | case 0x041: /* V9 fmovscc %fcc1 */ | |
5165 | FMOVCC(1, s); | |
5166 | break; | |
5167 | case 0x042: /* V9 fmovdcc %fcc1 */ | |
5168 | FMOVCC(1, d); | |
5169 | break; | |
5170 | case 0x043: /* V9 fmovqcc %fcc1 */ | |
5171 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5172 | FMOVCC(1, q); | |
5173 | break; | |
5174 | case 0x081: /* V9 fmovscc %fcc2 */ | |
5175 | FMOVCC(2, s); | |
5176 | break; | |
5177 | case 0x082: /* V9 fmovdcc %fcc2 */ | |
5178 | FMOVCC(2, d); | |
5179 | break; | |
5180 | case 0x083: /* V9 fmovqcc %fcc2 */ | |
5181 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5182 | FMOVCC(2, q); | |
5183 | break; | |
5184 | case 0x0c1: /* V9 fmovscc %fcc3 */ | |
5185 | FMOVCC(3, s); | |
5186 | break; | |
5187 | case 0x0c2: /* V9 fmovdcc %fcc3 */ | |
5188 | FMOVCC(3, d); | |
5189 | break; | |
5190 | case 0x0c3: /* V9 fmovqcc %fcc3 */ | |
5191 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5192 | FMOVCC(3, q); | |
5193 | break; | |
5194 | #undef FMOVCC | |
5195 | #define FMOVCC(xcc, sz) \ | |
5196 | do { \ | |
5197 | DisasCompare cmp; \ | |
5198 | cond = GET_FIELD_SP(insn, 14, 17); \ | |
5199 | gen_compare(&cmp, xcc, cond, dc); \ | |
5200 | gen_fmov##sz(dc, &cmp, rd, rs2); \ | |
5201 | } while (0) | |
5202 | ||
5203 | case 0x101: /* V9 fmovscc %icc */ | |
5204 | FMOVCC(0, s); | |
5205 | break; | |
5206 | case 0x102: /* V9 fmovdcc %icc */ | |
5207 | FMOVCC(0, d); | |
5208 | break; | |
5209 | case 0x103: /* V9 fmovqcc %icc */ | |
5210 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5211 | FMOVCC(0, q); | |
5212 | break; | |
5213 | case 0x181: /* V9 fmovscc %xcc */ | |
5214 | FMOVCC(1, s); | |
5215 | break; | |
5216 | case 0x182: /* V9 fmovdcc %xcc */ | |
5217 | FMOVCC(1, d); | |
5218 | break; | |
5219 | case 0x183: /* V9 fmovqcc %xcc */ | |
5220 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5221 | FMOVCC(1, q); | |
5222 | break; | |
5223 | #undef FMOVCC | |
5224 | #endif | |
5225 | case 0x51: /* fcmps, V9 %fcc */ | |
5226 | cpu_src1_32 = gen_load_fpr_F(dc, rs1); | |
5227 | cpu_src2_32 = gen_load_fpr_F(dc, rs2); | |
5228 | gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32); | |
5229 | break; | |
5230 | case 0x52: /* fcmpd, V9 %fcc */ | |
5231 | cpu_src1_64 = gen_load_fpr_D(dc, rs1); | |
5232 | cpu_src2_64 = gen_load_fpr_D(dc, rs2); | |
5233 | gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64); | |
5234 | break; | |
5235 | case 0x53: /* fcmpq, V9 %fcc */ | |
5236 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5237 | gen_op_load_fpr_QT0(QFPREG(rs1)); | |
5238 | gen_op_load_fpr_QT1(QFPREG(rs2)); | |
5239 | gen_op_fcmpq(rd & 3); | |
5240 | break; | |
5241 | case 0x55: /* fcmpes, V9 %fcc */ | |
5242 | cpu_src1_32 = gen_load_fpr_F(dc, rs1); | |
5243 | cpu_src2_32 = gen_load_fpr_F(dc, rs2); | |
5244 | gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32); | |
5245 | break; | |
5246 | case 0x56: /* fcmped, V9 %fcc */ | |
5247 | cpu_src1_64 = gen_load_fpr_D(dc, rs1); | |
5248 | cpu_src2_64 = gen_load_fpr_D(dc, rs2); | |
5249 | gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64); | |
5250 | break; | |
5251 | case 0x57: /* fcmpeq, V9 %fcc */ | |
5252 | CHECK_FPU_FEATURE(dc, FLOAT128); | |
5253 | gen_op_load_fpr_QT0(QFPREG(rs1)); | |
5254 | gen_op_load_fpr_QT1(QFPREG(rs2)); | |
5255 | gen_op_fcmpeq(rd & 3); | |
5256 | break; | |
5257 | default: | |
5258 | goto illegal_insn; | |
5259 | } | |
5260 | } else if (xop == 0x36) { | |
5261 | #ifdef TARGET_SPARC64 | |
5262 | /* VIS */ | |
5263 | int opf = GET_FIELD_SP(insn, 5, 13); | |
5264 | rs1 = GET_FIELD(insn, 13, 17); | |
5265 | rs2 = GET_FIELD(insn, 27, 31); | |
5266 | if (gen_trap_ifnofpu(dc)) { | |
5267 | goto jmp_insn; | |
5268 | } | |
5269 | ||
5270 | switch (opf) { | |
5271 | case 0x000: /* VIS I edge8cc */ | |
5272 | case 0x001: /* VIS II edge8n */ | |
5273 | case 0x002: /* VIS I edge8lcc */ | |
5274 | case 0x003: /* VIS II edge8ln */ | |
5275 | case 0x004: /* VIS I edge16cc */ | |
5276 | case 0x005: /* VIS II edge16n */ | |
5277 | case 0x006: /* VIS I edge16lcc */ | |
5278 | case 0x007: /* VIS II edge16ln */ | |
5279 | case 0x008: /* VIS I edge32cc */ | |
5280 | case 0x009: /* VIS II edge32n */ | |
5281 | case 0x00a: /* VIS I edge32lcc */ | |
5282 | case 0x00b: /* VIS II edge32ln */ | |
5283 | case 0x010: /* VIS I array8 */ | |
5284 | case 0x012: /* VIS I array16 */ | |
5285 | case 0x014: /* VIS I array32 */ | |
5286 | case 0x018: /* VIS I alignaddr */ | |
5287 | case 0x01a: /* VIS I alignaddrl */ | |
5288 | case 0x019: /* VIS II bmask */ | |
5289 | case 0x067: /* VIS I fnot2s */ | |
5290 | case 0x06b: /* VIS I fnot1s */ | |
5291 | case 0x075: /* VIS I fsrc1s */ | |
5292 | case 0x079: /* VIS I fsrc2s */ | |
5293 | case 0x066: /* VIS I fnot2 */ | |
5294 | case 0x06a: /* VIS I fnot1 */ | |
5295 | case 0x074: /* VIS I fsrc1 */ | |
5296 | case 0x078: /* VIS I fsrc2 */ | |
5297 | case 0x051: /* VIS I fpadd16s */ | |
5298 | case 0x053: /* VIS I fpadd32s */ | |
5299 | case 0x055: /* VIS I fpsub16s */ | |
5300 | case 0x057: /* VIS I fpsub32s */ | |
5301 | case 0x063: /* VIS I fnors */ | |
5302 | case 0x065: /* VIS I fandnot2s */ | |
5303 | case 0x069: /* VIS I fandnot1s */ | |
5304 | case 0x06d: /* VIS I fxors */ | |
5305 | case 0x06f: /* VIS I fnands */ | |
5306 | case 0x071: /* VIS I fands */ | |
5307 | case 0x073: /* VIS I fxnors */ | |
5308 | case 0x077: /* VIS I fornot2s */ | |
5309 | case 0x07b: /* VIS I fornot1s */ | |
5310 | case 0x07d: /* VIS I fors */ | |
5311 | case 0x050: /* VIS I fpadd16 */ | |
5312 | case 0x052: /* VIS I fpadd32 */ | |
5313 | case 0x054: /* VIS I fpsub16 */ | |
5314 | case 0x056: /* VIS I fpsub32 */ | |
5315 | case 0x062: /* VIS I fnor */ | |
5316 | case 0x064: /* VIS I fandnot2 */ | |
5317 | case 0x068: /* VIS I fandnot1 */ | |
5318 | case 0x06c: /* VIS I fxor */ | |
5319 | case 0x06e: /* VIS I fnand */ | |
5320 | case 0x070: /* VIS I fand */ | |
5321 | case 0x072: /* VIS I fxnor */ | |
5322 | case 0x076: /* VIS I fornot2 */ | |
5323 | case 0x07a: /* VIS I fornot1 */ | |
5324 | case 0x07c: /* VIS I for */ | |
5325 | case 0x031: /* VIS I fmul8x16 */ | |
5326 | case 0x033: /* VIS I fmul8x16au */ | |
5327 | case 0x035: /* VIS I fmul8x16al */ | |
5328 | case 0x036: /* VIS I fmul8sux16 */ | |
5329 | case 0x037: /* VIS I fmul8ulx16 */ | |
5330 | case 0x038: /* VIS I fmuld8sux16 */ | |
5331 | case 0x039: /* VIS I fmuld8ulx16 */ | |
5332 | case 0x04b: /* VIS I fpmerge */ | |
5333 | case 0x04d: /* VIS I fexpand */ | |
5334 | case 0x03e: /* VIS I pdist */ | |
5335 | case 0x03a: /* VIS I fpack32 */ | |
5336 | case 0x048: /* VIS I faligndata */ | |
5337 | case 0x04c: /* VIS II bshuffle */ | |
5338 | g_assert_not_reached(); /* in decodetree */ | |
5339 | case 0x020: /* VIS I fcmple16 */ | |
5340 | CHECK_FPU_FEATURE(dc, VIS1); | |
5341 | cpu_src1_64 = gen_load_fpr_D(dc, rs1); | |
5342 | cpu_src2_64 = gen_load_fpr_D(dc, rs2); | |
5343 | gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64); | |
5344 | gen_store_gpr(dc, rd, cpu_dst); | |
5345 | break; | |
5346 | case 0x022: /* VIS I fcmpne16 */ | |
5347 | CHECK_FPU_FEATURE(dc, VIS1); | |
5348 | cpu_src1_64 = gen_load_fpr_D(dc, rs1); | |
5349 | cpu_src2_64 = gen_load_fpr_D(dc, rs2); | |
5350 | gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64); | |
5351 | gen_store_gpr(dc, rd, cpu_dst); | |
5352 | break; | |
5353 | case 0x024: /* VIS I fcmple32 */ | |
5354 | CHECK_FPU_FEATURE(dc, VIS1); | |
5355 | cpu_src1_64 = gen_load_fpr_D(dc, rs1); | |
5356 | cpu_src2_64 = gen_load_fpr_D(dc, rs2); | |
5357 | gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64); | |
5358 | gen_store_gpr(dc, rd, cpu_dst); | |
5359 | break; | |
5360 | case 0x026: /* VIS I fcmpne32 */ | |
5361 | CHECK_FPU_FEATURE(dc, VIS1); | |
5362 | cpu_src1_64 = gen_load_fpr_D(dc, rs1); | |
5363 | cpu_src2_64 = gen_load_fpr_D(dc, rs2); | |
5364 | gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64); | |
5365 | gen_store_gpr(dc, rd, cpu_dst); | |
5366 | break; | |
5367 | case 0x028: /* VIS I fcmpgt16 */ | |
5368 | CHECK_FPU_FEATURE(dc, VIS1); | |
5369 | cpu_src1_64 = gen_load_fpr_D(dc, rs1); | |
5370 | cpu_src2_64 = gen_load_fpr_D(dc, rs2); | |
5371 | gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64); | |
5372 | gen_store_gpr(dc, rd, cpu_dst); | |
5373 | break; | |
5374 | case 0x02a: /* VIS I fcmpeq16 */ | |
5375 | CHECK_FPU_FEATURE(dc, VIS1); | |
5376 | cpu_src1_64 = gen_load_fpr_D(dc, rs1); | |
5377 | cpu_src2_64 = gen_load_fpr_D(dc, rs2); | |
5378 | gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64); | |
5379 | gen_store_gpr(dc, rd, cpu_dst); | |
5380 | break; | |
5381 | case 0x02c: /* VIS I fcmpgt32 */ | |
5382 | CHECK_FPU_FEATURE(dc, VIS1); | |
5383 | cpu_src1_64 = gen_load_fpr_D(dc, rs1); | |
5384 | cpu_src2_64 = gen_load_fpr_D(dc, rs2); | |
5385 | gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64); | |
5386 | gen_store_gpr(dc, rd, cpu_dst); | |
5387 | break; | |
5388 | case 0x02e: /* VIS I fcmpeq32 */ | |
5389 | CHECK_FPU_FEATURE(dc, VIS1); | |
5390 | cpu_src1_64 = gen_load_fpr_D(dc, rs1); | |
5391 | cpu_src2_64 = gen_load_fpr_D(dc, rs2); | |
5392 | gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64); | |
5393 | gen_store_gpr(dc, rd, cpu_dst); | |
5394 | break; | |
5395 | case 0x03b: /* VIS I fpack16 */ | |
5396 | CHECK_FPU_FEATURE(dc, VIS1); | |
5397 | cpu_src1_64 = gen_load_fpr_D(dc, rs2); | |
5398 | cpu_dst_32 = gen_dest_fpr_F(dc); | |
5399 | gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64); | |
5400 | gen_store_fpr_F(dc, rd, cpu_dst_32); | |
5401 | break; | |
5402 | case 0x03d: /* VIS I fpackfix */ | |
5403 | CHECK_FPU_FEATURE(dc, VIS1); | |
5404 | cpu_src1_64 = gen_load_fpr_D(dc, rs2); | |
5405 | cpu_dst_32 = gen_dest_fpr_F(dc); | |
5406 | gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64); | |
5407 | gen_store_fpr_F(dc, rd, cpu_dst_32); | |
5408 | break; | |
5409 | case 0x060: /* VIS I fzero */ | |
5410 | CHECK_FPU_FEATURE(dc, VIS1); | |
5411 | cpu_dst_64 = gen_dest_fpr_D(dc, rd); | |
5412 | tcg_gen_movi_i64(cpu_dst_64, 0); | |
5413 | gen_store_fpr_D(dc, rd, cpu_dst_64); | |
5414 | break; | |
5415 | case 0x061: /* VIS I fzeros */ | |
5416 | CHECK_FPU_FEATURE(dc, VIS1); | |
5417 | cpu_dst_32 = gen_dest_fpr_F(dc); | |
5418 | tcg_gen_movi_i32(cpu_dst_32, 0); | |
5419 | gen_store_fpr_F(dc, rd, cpu_dst_32); | |
5420 | break; | |
5421 | case 0x07e: /* VIS I fone */ | |
5422 | CHECK_FPU_FEATURE(dc, VIS1); | |
5423 | cpu_dst_64 = gen_dest_fpr_D(dc, rd); | |
5424 | tcg_gen_movi_i64(cpu_dst_64, -1); | |
5425 | gen_store_fpr_D(dc, rd, cpu_dst_64); | |
5426 | break; | |
5427 | case 0x07f: /* VIS I fones */ | |
5428 | CHECK_FPU_FEATURE(dc, VIS1); | |
5429 | cpu_dst_32 = gen_dest_fpr_F(dc); | |
5430 | tcg_gen_movi_i32(cpu_dst_32, -1); | |
5431 | gen_store_fpr_F(dc, rd, cpu_dst_32); | |
5432 | break; | |
5433 | case 0x080: /* VIS I shutdown */ | |
5434 | case 0x081: /* VIS II siam */ | |
5435 | // XXX | |
5436 | goto illegal_insn; | |
5437 | default: | |
5438 | goto illegal_insn; | |
5439 | } | |
5440 | #endif | |
5441 | } else { | |
5442 | goto illegal_insn; /* in decodetree */ | |
5443 | } | |
5444 | } | |
5445 | break; | |
5446 | case 3: /* load/store instructions */ | |
5447 | goto illegal_insn; /* in decodetree */ | |
5448 | } | |
5449 | advance_pc(dc); | |
5450 | jmp_insn: | |
5451 | return; | |
5452 | illegal_insn: | |
5453 | gen_exception(dc, TT_ILL_INSN); | |
5454 | return; | |
5455 | nfpu_insn: | |
5456 | gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP); | |
5457 | return; | |
5458 | } | |
5459 | ||
5460 | static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) | |
5461 | { | |
5462 | DisasContext *dc = container_of(dcbase, DisasContext, base); | |
5463 | CPUSPARCState *env = cpu_env(cs); | |
5464 | int bound; | |
5465 | ||
5466 | dc->pc = dc->base.pc_first; | |
5467 | dc->npc = (target_ulong)dc->base.tb->cs_base; | |
5468 | dc->cc_op = CC_OP_DYNAMIC; | |
5469 | dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK; | |
5470 | dc->def = &env->def; | |
5471 | dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags); | |
5472 | dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags); | |
5473 | #ifndef CONFIG_USER_ONLY | |
5474 | dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0; | |
5475 | #endif | |
5476 | #ifdef TARGET_SPARC64 | |
5477 | dc->fprs_dirty = 0; | |
5478 | dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff; | |
5479 | #ifndef CONFIG_USER_ONLY | |
5480 | dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0; | |
5481 | #endif | |
5482 | #endif | |
5483 | /* | |
5484 | * if we reach a page boundary, we stop generation so that the | |
5485 | * PC of a TT_TFAULT exception is always in the right page | |
5486 | */ | |
5487 | bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; | |
5488 | dc->base.max_insns = MIN(dc->base.max_insns, bound); | |
5489 | } | |
5490 | ||
5491 | static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs) | |
5492 | { | |
5493 | } | |
5494 | ||
5495 | static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) | |
5496 | { | |
5497 | DisasContext *dc = container_of(dcbase, DisasContext, base); | |
5498 | target_ulong npc = dc->npc; | |
5499 | ||
5500 | if (npc & 3) { | |
5501 | switch (npc) { | |
5502 | case JUMP_PC: | |
5503 | assert(dc->jump_pc[1] == dc->pc + 4); | |
5504 | npc = dc->jump_pc[0] | JUMP_PC; | |
5505 | break; | |
5506 | case DYNAMIC_PC: | |
5507 | case DYNAMIC_PC_LOOKUP: | |
5508 | npc = DYNAMIC_PC; | |
5509 | break; | |
5510 | default: | |
5511 | g_assert_not_reached(); | |
5512 | } | |
5513 | } | |
5514 | tcg_gen_insn_start(dc->pc, npc); | |
5515 | } | |
5516 | ||
5517 | static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) | |
5518 | { | |
5519 | DisasContext *dc = container_of(dcbase, DisasContext, base); | |
5520 | CPUSPARCState *env = cpu_env(cs); | |
5521 | unsigned int insn; | |
5522 | ||
5523 | insn = translator_ldl(env, &dc->base, dc->pc); | |
5524 | dc->base.pc_next += 4; | |
5525 | ||
5526 | if (!decode(dc, insn)) { | |
5527 | disas_sparc_legacy(dc, insn); | |
5528 | } | |
5529 | ||
5530 | if (dc->base.is_jmp == DISAS_NORETURN) { | |
5531 | return; | |
5532 | } | |
5533 | if (dc->pc != dc->base.pc_next) { | |
5534 | dc->base.is_jmp = DISAS_TOO_MANY; | |
5535 | } | |
5536 | } | |
5537 | ||
5538 | static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) | |
5539 | { | |
5540 | DisasContext *dc = container_of(dcbase, DisasContext, base); | |
5541 | DisasDelayException *e, *e_next; | |
5542 | bool may_lookup; | |
5543 | ||
5544 | switch (dc->base.is_jmp) { | |
5545 | case DISAS_NEXT: | |
5546 | case DISAS_TOO_MANY: | |
5547 | if (((dc->pc | dc->npc) & 3) == 0) { | |
5548 | /* static PC and NPC: we can use direct chaining */ | |
5549 | gen_goto_tb(dc, 0, dc->pc, dc->npc); | |
5550 | break; | |
5551 | } | |
5552 | ||
5553 | may_lookup = true; | |
5554 | if (dc->pc & 3) { | |
5555 | switch (dc->pc) { | |
5556 | case DYNAMIC_PC_LOOKUP: | |
5557 | break; | |
5558 | case DYNAMIC_PC: | |
5559 | may_lookup = false; | |
5560 | break; | |
5561 | default: | |
5562 | g_assert_not_reached(); | |
5563 | } | |
5564 | } else { | |
5565 | tcg_gen_movi_tl(cpu_pc, dc->pc); | |
5566 | } | |
5567 | ||
5568 | if (dc->npc & 3) { | |
5569 | switch (dc->npc) { | |
5570 | case JUMP_PC: | |
5571 | gen_generic_branch(dc); | |
5572 | break; | |
5573 | case DYNAMIC_PC: | |
5574 | may_lookup = false; | |
5575 | break; | |
5576 | case DYNAMIC_PC_LOOKUP: | |
5577 | break; | |
5578 | default: | |
5579 | g_assert_not_reached(); | |
5580 | } | |
5581 | } else { | |
5582 | tcg_gen_movi_tl(cpu_npc, dc->npc); | |
5583 | } | |
5584 | if (may_lookup) { | |
5585 | tcg_gen_lookup_and_goto_ptr(); | |
5586 | } else { | |
5587 | tcg_gen_exit_tb(NULL, 0); | |
5588 | } | |
5589 | break; | |
5590 | ||
5591 | case DISAS_NORETURN: | |
5592 | break; | |
5593 | ||
5594 | case DISAS_EXIT: | |
5595 | /* Exit TB */ | |
5596 | save_state(dc); | |
5597 | tcg_gen_exit_tb(NULL, 0); | |
5598 | break; | |
5599 | ||
5600 | default: | |
5601 | g_assert_not_reached(); | |
5602 | } | |
5603 | ||
5604 | for (e = dc->delay_excp_list; e ; e = e_next) { | |
5605 | gen_set_label(e->lab); | |
5606 | ||
5607 | tcg_gen_movi_tl(cpu_pc, e->pc); | |
5608 | if (e->npc % 4 == 0) { | |
5609 | tcg_gen_movi_tl(cpu_npc, e->npc); | |
5610 | } | |
5611 | gen_helper_raise_exception(tcg_env, e->excp); | |
5612 | ||
5613 | e_next = e->next; | |
5614 | g_free(e); | |
5615 | } | |
5616 | } | |
5617 | ||
5618 | static void sparc_tr_disas_log(const DisasContextBase *dcbase, | |
5619 | CPUState *cpu, FILE *logfile) | |
5620 | { | |
5621 | fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); | |
5622 | target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); | |
5623 | } | |
5624 | ||
5625 | static const TranslatorOps sparc_tr_ops = { | |
5626 | .init_disas_context = sparc_tr_init_disas_context, | |
5627 | .tb_start = sparc_tr_tb_start, | |
5628 | .insn_start = sparc_tr_insn_start, | |
5629 | .translate_insn = sparc_tr_translate_insn, | |
5630 | .tb_stop = sparc_tr_tb_stop, | |
5631 | .disas_log = sparc_tr_disas_log, | |
5632 | }; | |
5633 | ||
5634 | void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, | |
5635 | target_ulong pc, void *host_pc) | |
5636 | { | |
5637 | DisasContext dc = {}; | |
5638 | ||
5639 | translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base); | |
5640 | } | |
5641 | ||
5642 | void sparc_tcg_init(void) | |
5643 | { | |
5644 | static const char gregnames[32][4] = { | |
5645 | "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7", | |
5646 | "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7", | |
5647 | "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7", | |
5648 | "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7", | |
5649 | }; | |
5650 | static const char fregnames[32][4] = { | |
5651 | "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14", | |
5652 | "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30", | |
5653 | "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", | |
5654 | "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", | |
5655 | }; | |
5656 | ||
5657 | static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = { | |
5658 | #ifdef TARGET_SPARC64 | |
5659 | { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" }, | |
5660 | { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" }, | |
5661 | #endif | |
5662 | { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" }, | |
5663 | { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" }, | |
5664 | }; | |
5665 | ||
5666 | static const struct { TCGv *ptr; int off; const char *name; } rtl[] = { | |
5667 | #ifdef TARGET_SPARC64 | |
5668 | { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" }, | |
5669 | #endif | |
5670 | { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" }, | |
5671 | { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" }, | |
5672 | { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" }, | |
5673 | { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" }, | |
5674 | { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" }, | |
5675 | { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" }, | |
5676 | { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" }, | |
5677 | { &cpu_y, offsetof(CPUSPARCState, y), "y" }, | |
5678 | { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" }, | |
5679 | }; | |
5680 | ||
5681 | unsigned int i; | |
5682 | ||
5683 | cpu_regwptr = tcg_global_mem_new_ptr(tcg_env, | |
5684 | offsetof(CPUSPARCState, regwptr), | |
5685 | "regwptr"); | |
5686 | ||
5687 | for (i = 0; i < ARRAY_SIZE(r32); ++i) { | |
5688 | *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name); | |
5689 | } | |
5690 | ||
5691 | for (i = 0; i < ARRAY_SIZE(rtl); ++i) { | |
5692 | *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name); | |
5693 | } | |
5694 | ||
5695 | cpu_regs[0] = NULL; | |
5696 | for (i = 1; i < 8; ++i) { | |
5697 | cpu_regs[i] = tcg_global_mem_new(tcg_env, | |
5698 | offsetof(CPUSPARCState, gregs[i]), | |
5699 | gregnames[i]); | |
5700 | } | |
5701 | ||
5702 | for (i = 8; i < 32; ++i) { | |
5703 | cpu_regs[i] = tcg_global_mem_new(cpu_regwptr, | |
5704 | (i - 8) * sizeof(target_ulong), | |
5705 | gregnames[i]); | |
5706 | } | |
5707 | ||
5708 | for (i = 0; i < TARGET_DPREGS; i++) { | |
5709 | cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env, | |
5710 | offsetof(CPUSPARCState, fpr[i]), | |
5711 | fregnames[i]); | |
5712 | } | |
5713 | } | |
5714 | ||
5715 | void sparc_restore_state_to_opc(CPUState *cs, | |
5716 | const TranslationBlock *tb, | |
5717 | const uint64_t *data) | |
5718 | { | |
5719 | SPARCCPU *cpu = SPARC_CPU(cs); | |
5720 | CPUSPARCState *env = &cpu->env; | |
5721 | target_ulong pc = data[0]; | |
5722 | target_ulong npc = data[1]; | |
5723 | ||
5724 | env->pc = pc; | |
5725 | if (npc == DYNAMIC_PC) { | |
5726 | /* dynamic NPC: already stored */ | |
5727 | } else if (npc & JUMP_PC) { | |
5728 | /* jump PC: use 'cond' and the jump targets of the translation */ | |
5729 | if (env->cond) { | |
5730 | env->npc = npc & ~3; | |
5731 | } else { | |
5732 | env->npc = pc + 4; | |
5733 | } | |
5734 | } else { | |
5735 | env->npc = npc; | |
5736 | } | |
5737 | } |