]> git.ipfire.org Git - thirdparty/qemu.git/blame - target-alpha/translate.c
target-alpha: Use a fixed frequency for the RPCC in system mode.
[thirdparty/qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
20#include <stdint.h>
21#include <stdlib.h>
22#include <stdio.h>
23
24#include "cpu.h"
25#include "exec-all.h"
26#include "disas.h"
ae8ecd42 27#include "host-utils.h"
57fec1fe 28#include "tcg-op.h"
ca10f867 29#include "qemu-common.h"
4c9649a9 30
a7812ae4
PB
31#include "helper.h"
32#define GEN_HELPER 1
33#include "helper.h"
34
19188121 35#undef ALPHA_DEBUG_DISAS
f24518b5 36#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
37
38#ifdef ALPHA_DEBUG_DISAS
806991da 39# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
40#else
41# define LOG_DISAS(...) do { } while (0)
42#endif
43
4c9649a9
JM
44typedef struct DisasContext DisasContext;
45struct DisasContext {
4af70374
RH
46 struct TranslationBlock *tb;
47 CPUAlphaState *env;
4c9649a9
JM
48 uint64_t pc;
49 int mem_idx;
f24518b5
RH
50
51 /* Current rounding mode for this TB. */
52 int tb_rm;
53 /* Current flush-to-zero setting for this TB. */
54 int tb_ftz;
4c9649a9
JM
55};
56
4af70374
RH
57/* Return values from translate_one, indicating the state of the TB.
58 Note that zero indicates that we are not exiting the TB. */
59
60typedef enum {
61 NO_EXIT,
62
63 /* We have emitted one or more goto_tb. No fixup required. */
64 EXIT_GOTO_TB,
65
66 /* We are not using a goto_tb (for whatever reason), but have updated
67 the PC (for whatever reason), so there's no need to do it again on
68 exiting the TB. */
69 EXIT_PC_UPDATED,
70
71 /* We are exiting the TB, but have neither emitted a goto_tb, nor
72 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
73 EXIT_PC_STALE,
74
75 /* We are ending the TB with a noreturn function call, e.g. longjmp.
76 No following code will be executed. */
77 EXIT_NORETURN,
4af70374
RH
78} ExitStatus;
79
3761035f 80/* global register indexes */
a7812ae4 81static TCGv_ptr cpu_env;
496cb5b9 82static TCGv cpu_ir[31];
f18cd223 83static TCGv cpu_fir[31];
496cb5b9 84static TCGv cpu_pc;
6910b8f6
RH
85static TCGv cpu_lock_addr;
86static TCGv cpu_lock_st_addr;
87static TCGv cpu_lock_value;
2ace7e55
RH
88static TCGv cpu_unique;
89#ifndef CONFIG_USER_ONLY
90static TCGv cpu_sysval;
91static TCGv cpu_usp;
ab471ade 92#endif
496cb5b9 93
3761035f 94/* register names */
f18cd223 95static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef
PB
96
97#include "gen-icount.h"
98
a5f1b965 99static void alpha_translate_init(void)
2e70f6ef 100{
496cb5b9
AJ
101 int i;
102 char *p;
2e70f6ef 103 static int done_init = 0;
496cb5b9 104
2e70f6ef
PB
105 if (done_init)
106 return;
496cb5b9 107
a7812ae4 108 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
109
110 p = cpu_reg_names;
111 for (i = 0; i < 31; i++) {
112 sprintf(p, "ir%d", i);
a7812ae4
PB
113 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
114 offsetof(CPUState, ir[i]), p);
6ba8dcd7 115 p += (i < 10) ? 4 : 5;
f18cd223
AJ
116
117 sprintf(p, "fir%d", i);
a7812ae4
PB
118 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
119 offsetof(CPUState, fir[i]), p);
f18cd223 120 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
121 }
122
a7812ae4
PB
123 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
124 offsetof(CPUState, pc), "pc");
496cb5b9 125
6910b8f6
RH
126 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
127 offsetof(CPUState, lock_addr),
128 "lock_addr");
129 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
130 offsetof(CPUState, lock_st_addr),
131 "lock_st_addr");
132 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
133 offsetof(CPUState, lock_value),
134 "lock_value");
f4ed8679 135
2ace7e55
RH
136 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
137 offsetof(CPUState, unique), "unique");
138#ifndef CONFIG_USER_ONLY
139 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
140 offsetof(CPUState, sysval), "sysval");
141 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
142 offsetof(CPUState, usp), "usp");
ab471ade
RH
143#endif
144
496cb5b9 145 /* register helpers */
a7812ae4 146#define GEN_HELPER 2
496cb5b9
AJ
147#include "helper.h"
148
2e70f6ef
PB
149 done_init = 1;
150}
151
bf1b03fe 152static void gen_excp_1(int exception, int error_code)
4c9649a9 153{
a7812ae4 154 TCGv_i32 tmp1, tmp2;
6ad02592 155
6ad02592
AJ
156 tmp1 = tcg_const_i32(exception);
157 tmp2 = tcg_const_i32(error_code);
a7812ae4
PB
158 gen_helper_excp(tmp1, tmp2);
159 tcg_temp_free_i32(tmp2);
160 tcg_temp_free_i32(tmp1);
bf1b03fe 161}
8aa3fa20 162
bf1b03fe
RH
163static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
164{
165 tcg_gen_movi_i64(cpu_pc, ctx->pc);
166 gen_excp_1(exception, error_code);
8aa3fa20 167 return EXIT_NORETURN;
4c9649a9
JM
168}
169
8aa3fa20 170static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 171{
8aa3fa20 172 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
173}
174
636aa200 175static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 176{
a7812ae4
PB
177 TCGv tmp = tcg_temp_new();
178 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 179 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
180 tcg_gen_trunc_i64_i32(tmp32, tmp);
181 gen_helper_memory_to_f(t0, tmp32);
182 tcg_temp_free_i32(tmp32);
f18cd223
AJ
183 tcg_temp_free(tmp);
184}
185
636aa200 186static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 187{
a7812ae4 188 TCGv tmp = tcg_temp_new();
f18cd223 189 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 190 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
191 tcg_temp_free(tmp);
192}
193
636aa200 194static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 195{
a7812ae4
PB
196 TCGv tmp = tcg_temp_new();
197 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 198 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
199 tcg_gen_trunc_i64_i32(tmp32, tmp);
200 gen_helper_memory_to_s(t0, tmp32);
201 tcg_temp_free_i32(tmp32);
f18cd223
AJ
202 tcg_temp_free(tmp);
203}
204
636aa200 205static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 206{
f4ed8679 207 tcg_gen_qemu_ld32s(t0, t1, flags);
6910b8f6
RH
208 tcg_gen_mov_i64(cpu_lock_addr, t1);
209 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
210}
211
636aa200 212static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 213{
f4ed8679 214 tcg_gen_qemu_ld64(t0, t1, flags);
6910b8f6
RH
215 tcg_gen_mov_i64(cpu_lock_addr, t1);
216 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
217}
218
636aa200
BS
219static inline void gen_load_mem(DisasContext *ctx,
220 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
221 int flags),
222 int ra, int rb, int32_t disp16, int fp,
223 int clear)
023d8ca2 224{
6910b8f6 225 TCGv addr, va;
023d8ca2 226
6910b8f6
RH
227 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
228 prefetches, which we can treat as nops. No worries about
229 missed exceptions here. */
230 if (unlikely(ra == 31)) {
023d8ca2 231 return;
6910b8f6 232 }
023d8ca2 233
a7812ae4 234 addr = tcg_temp_new();
023d8ca2
AJ
235 if (rb != 31) {
236 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 237 if (clear) {
023d8ca2 238 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 239 }
023d8ca2 240 } else {
6910b8f6 241 if (clear) {
023d8ca2 242 disp16 &= ~0x7;
6910b8f6 243 }
023d8ca2
AJ
244 tcg_gen_movi_i64(addr, disp16);
245 }
6910b8f6
RH
246
247 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
248 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
249
023d8ca2
AJ
250 tcg_temp_free(addr);
251}
252
636aa200 253static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 254{
a7812ae4
PB
255 TCGv_i32 tmp32 = tcg_temp_new_i32();
256 TCGv tmp = tcg_temp_new();
257 gen_helper_f_to_memory(tmp32, t0);
258 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
259 tcg_gen_qemu_st32(tmp, t1, flags);
260 tcg_temp_free(tmp);
a7812ae4 261 tcg_temp_free_i32(tmp32);
f18cd223
AJ
262}
263
636aa200 264static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 265{
a7812ae4
PB
266 TCGv tmp = tcg_temp_new();
267 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
268 tcg_gen_qemu_st64(tmp, t1, flags);
269 tcg_temp_free(tmp);
270}
271
636aa200 272static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 273{
a7812ae4
PB
274 TCGv_i32 tmp32 = tcg_temp_new_i32();
275 TCGv tmp = tcg_temp_new();
276 gen_helper_s_to_memory(tmp32, t0);
277 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
278 tcg_gen_qemu_st32(tmp, t1, flags);
279 tcg_temp_free(tmp);
a7812ae4 280 tcg_temp_free_i32(tmp32);
f18cd223
AJ
281}
282
636aa200
BS
283static inline void gen_store_mem(DisasContext *ctx,
284 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
285 int flags),
286 int ra, int rb, int32_t disp16, int fp,
6910b8f6 287 int clear)
023d8ca2 288{
6910b8f6
RH
289 TCGv addr, va;
290
291 addr = tcg_temp_new();
023d8ca2
AJ
292 if (rb != 31) {
293 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 294 if (clear) {
023d8ca2 295 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 296 }
023d8ca2 297 } else {
6910b8f6 298 if (clear) {
023d8ca2 299 disp16 &= ~0x7;
6910b8f6 300 }
023d8ca2
AJ
301 tcg_gen_movi_i64(addr, disp16);
302 }
6910b8f6
RH
303
304 if (ra == 31) {
305 va = tcg_const_i64(0);
f18cd223 306 } else {
6910b8f6 307 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
023d8ca2 308 }
6910b8f6
RH
309 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
310
023d8ca2 311 tcg_temp_free(addr);
6910b8f6
RH
312 if (ra == 31) {
313 tcg_temp_free(va);
314 }
315}
316
317static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
318 int32_t disp16, int quad)
319{
320 TCGv addr;
321
322 if (ra == 31) {
323 /* ??? Don't bother storing anything. The user can't tell
324 the difference, since the zero register always reads zero. */
325 return NO_EXIT;
326 }
327
328#if defined(CONFIG_USER_ONLY)
329 addr = cpu_lock_st_addr;
330#else
e52458fe 331 addr = tcg_temp_local_new();
6910b8f6
RH
332#endif
333
334 if (rb != 31) {
335 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
336 } else {
337 tcg_gen_movi_i64(addr, disp16);
338 }
339
340#if defined(CONFIG_USER_ONLY)
341 /* ??? This is handled via a complicated version of compare-and-swap
342 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
343 in TCG so that this isn't necessary. */
344 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
345#else
346 /* ??? In system mode we are never multi-threaded, so CAS can be
347 implemented via a non-atomic load-compare-store sequence. */
348 {
349 int lab_fail, lab_done;
350 TCGv val;
351
352 lab_fail = gen_new_label();
353 lab_done = gen_new_label();
e52458fe 354 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
6910b8f6
RH
355
356 val = tcg_temp_new();
357 if (quad) {
358 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
359 } else {
360 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
361 }
e52458fe 362 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
6910b8f6
RH
363
364 if (quad) {
365 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
366 } else {
367 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
368 }
369 tcg_gen_movi_i64(cpu_ir[ra], 1);
370 tcg_gen_br(lab_done);
371
372 gen_set_label(lab_fail);
373 tcg_gen_movi_i64(cpu_ir[ra], 0);
374
375 gen_set_label(lab_done);
376 tcg_gen_movi_i64(cpu_lock_addr, -1);
377
378 tcg_temp_free(addr);
379 return NO_EXIT;
380 }
381#endif
023d8ca2
AJ
382}
383
4af70374 384static int use_goto_tb(DisasContext *ctx, uint64_t dest)
4c9649a9 385{
4af70374
RH
386 /* Check for the dest on the same page as the start of the TB. We
387 also want to suppress goto_tb in the case of single-steping and IO. */
388 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
389 && !ctx->env->singlestep_enabled
390 && !(ctx->tb->cflags & CF_LAST_IO));
391}
dbb30fe6 392
4af70374
RH
393static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
394{
395 uint64_t dest = ctx->pc + (disp << 2);
396
397 if (ra != 31) {
398 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
399 }
400
401 /* Notice branch-to-next; used to initialize RA with the PC. */
402 if (disp == 0) {
403 return 0;
404 } else if (use_goto_tb(ctx, dest)) {
405 tcg_gen_goto_tb(0);
406 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 407 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
408 return EXIT_GOTO_TB;
409 } else {
410 tcg_gen_movi_i64(cpu_pc, dest);
411 return EXIT_PC_UPDATED;
412 }
dbb30fe6
RH
413}
414
4af70374
RH
415static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
416 TCGv cmp, int32_t disp)
dbb30fe6 417{
4af70374 418 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 419 int lab_true = gen_new_label();
9c29504e 420
4af70374
RH
421 if (use_goto_tb(ctx, dest)) {
422 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
423
424 tcg_gen_goto_tb(0);
425 tcg_gen_movi_i64(cpu_pc, ctx->pc);
4b4a72e5 426 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
427
428 gen_set_label(lab_true);
429 tcg_gen_goto_tb(1);
430 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 431 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
4af70374
RH
432
433 return EXIT_GOTO_TB;
434 } else {
435 int lab_over = gen_new_label();
436
437 /* ??? Consider using either
438 movi pc, next
439 addi tmp, pc, disp
440 movcond pc, cond, 0, tmp, pc
441 or
442 setcond tmp, cond, 0
443 movi pc, next
444 neg tmp, tmp
445 andi tmp, tmp, disp
446 add pc, pc, tmp
447 The current diamond subgraph surely isn't efficient. */
448
449 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
450 tcg_gen_movi_i64(cpu_pc, ctx->pc);
451 tcg_gen_br(lab_over);
452 gen_set_label(lab_true);
453 tcg_gen_movi_i64(cpu_pc, dest);
454 gen_set_label(lab_over);
455
456 return EXIT_PC_UPDATED;
457 }
458}
459
460static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
461 int32_t disp, int mask)
462{
463 TCGv cmp_tmp;
464
465 if (unlikely(ra == 31)) {
466 cmp_tmp = tcg_const_i64(0);
467 } else {
468 cmp_tmp = tcg_temp_new();
9c29504e 469 if (mask) {
4af70374 470 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 471 } else {
4af70374 472 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 473 }
9c29504e 474 }
4af70374
RH
475
476 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
477}
478
4af70374 479/* Fold -0.0 for comparison with COND. */
dbb30fe6 480
4af70374 481static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 482{
dbb30fe6 483 uint64_t mzero = 1ull << 63;
f18cd223 484
dbb30fe6
RH
485 switch (cond) {
486 case TCG_COND_LE:
487 case TCG_COND_GT:
488 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 489 tcg_gen_mov_i64(dest, src);
a7812ae4 490 break;
dbb30fe6
RH
491
492 case TCG_COND_EQ:
493 case TCG_COND_NE:
494 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 495 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 496 break;
dbb30fe6
RH
497
498 case TCG_COND_GE:
dbb30fe6 499 case TCG_COND_LT:
4af70374
RH
500 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
501 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
502 tcg_gen_neg_i64(dest, dest);
503 tcg_gen_and_i64(dest, dest, src);
a7812ae4 504 break;
dbb30fe6 505
a7812ae4
PB
506 default:
507 abort();
f18cd223 508 }
dbb30fe6
RH
509}
510
4af70374
RH
511static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
512 int32_t disp)
dbb30fe6 513{
4af70374 514 TCGv cmp_tmp;
dbb30fe6
RH
515
516 if (unlikely(ra == 31)) {
517 /* Very uncommon case, but easier to optimize it to an integer
518 comparison than continuing with the floating point comparison. */
4af70374 519 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
520 }
521
4af70374
RH
522 cmp_tmp = tcg_temp_new();
523 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
524 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
525}
526
bbe1dab4 527static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
4af70374 528 int islit, uint8_t lit, int mask)
4c9649a9 529{
bbe1dab4 530 TCGCond inv_cond = tcg_invert_cond(cond);
9c29504e
AJ
531 int l1;
532
533 if (unlikely(rc == 31))
534 return;
535
536 l1 = gen_new_label();
537
538 if (ra != 31) {
539 if (mask) {
a7812ae4 540 TCGv tmp = tcg_temp_new();
9c29504e
AJ
541 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
542 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
543 tcg_temp_free(tmp);
544 } else
545 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
546 } else {
547 /* Very uncommon case - Do not bother to optimize. */
548 TCGv tmp = tcg_const_i64(0);
549 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
550 tcg_temp_free(tmp);
551 }
552
4c9649a9 553 if (islit)
9c29504e 554 tcg_gen_movi_i64(cpu_ir[rc], lit);
4c9649a9 555 else
dfaa8583 556 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
9c29504e 557 gen_set_label(l1);
4c9649a9
JM
558}
559
bbe1dab4 560static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 561{
4af70374 562 TCGv cmp_tmp;
dbb30fe6
RH
563 int l1;
564
4af70374 565 if (unlikely(rc == 31)) {
dbb30fe6 566 return;
4af70374
RH
567 }
568
569 cmp_tmp = tcg_temp_new();
dbb30fe6 570 if (unlikely(ra == 31)) {
4af70374
RH
571 tcg_gen_movi_i64(cmp_tmp, 0);
572 } else {
573 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
dbb30fe6
RH
574 }
575
576 l1 = gen_new_label();
4af70374
RH
577 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
578 tcg_temp_free(cmp_tmp);
dbb30fe6
RH
579
580 if (rb != 31)
581 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
582 else
583 tcg_gen_movi_i64(cpu_fir[rc], 0);
584 gen_set_label(l1);
585}
586
f24518b5
RH
587#define QUAL_RM_N 0x080 /* Round mode nearest even */
588#define QUAL_RM_C 0x000 /* Round mode chopped */
589#define QUAL_RM_M 0x040 /* Round mode minus infinity */
590#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
591#define QUAL_RM_MASK 0x0c0
592
593#define QUAL_U 0x100 /* Underflow enable (fp output) */
594#define QUAL_V 0x100 /* Overflow enable (int output) */
595#define QUAL_S 0x400 /* Software completion enable */
596#define QUAL_I 0x200 /* Inexact detection enable */
597
598static void gen_qual_roundmode(DisasContext *ctx, int fn11)
599{
600 TCGv_i32 tmp;
601
602 fn11 &= QUAL_RM_MASK;
603 if (fn11 == ctx->tb_rm) {
604 return;
605 }
606 ctx->tb_rm = fn11;
607
608 tmp = tcg_temp_new_i32();
609 switch (fn11) {
610 case QUAL_RM_N:
611 tcg_gen_movi_i32(tmp, float_round_nearest_even);
612 break;
613 case QUAL_RM_C:
614 tcg_gen_movi_i32(tmp, float_round_to_zero);
615 break;
616 case QUAL_RM_M:
617 tcg_gen_movi_i32(tmp, float_round_down);
618 break;
619 case QUAL_RM_D:
620 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
621 break;
622 }
623
624#if defined(CONFIG_SOFTFLOAT_INLINE)
625 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
626 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
627 sets the one field. */
628 tcg_gen_st8_i32(tmp, cpu_env,
629 offsetof(CPUState, fp_status.float_rounding_mode));
630#else
631 gen_helper_setroundmode(tmp);
632#endif
633
634 tcg_temp_free_i32(tmp);
635}
636
637static void gen_qual_flushzero(DisasContext *ctx, int fn11)
638{
639 TCGv_i32 tmp;
640
641 fn11 &= QUAL_U;
642 if (fn11 == ctx->tb_ftz) {
643 return;
644 }
645 ctx->tb_ftz = fn11;
646
647 tmp = tcg_temp_new_i32();
648 if (fn11) {
649 /* Underflow is enabled, use the FPCR setting. */
650 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
651 } else {
652 /* Underflow is disabled, force flush-to-zero. */
653 tcg_gen_movi_i32(tmp, 1);
654 }
655
656#if defined(CONFIG_SOFTFLOAT_INLINE)
657 tcg_gen_st8_i32(tmp, cpu_env,
658 offsetof(CPUState, fp_status.flush_to_zero));
659#else
660 gen_helper_setflushzero(tmp);
661#endif
662
663 tcg_temp_free_i32(tmp);
664}
665
666static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
667{
668 TCGv val = tcg_temp_new();
669 if (reg == 31) {
670 tcg_gen_movi_i64(val, 0);
671 } else if (fn11 & QUAL_S) {
672 gen_helper_ieee_input_s(val, cpu_fir[reg]);
673 } else if (is_cmp) {
674 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
675 } else {
676 gen_helper_ieee_input(val, cpu_fir[reg]);
677 }
678 return val;
679}
680
681static void gen_fp_exc_clear(void)
682{
683#if defined(CONFIG_SOFTFLOAT_INLINE)
684 TCGv_i32 zero = tcg_const_i32(0);
685 tcg_gen_st8_i32(zero, cpu_env,
686 offsetof(CPUState, fp_status.float_exception_flags));
687 tcg_temp_free_i32(zero);
688#else
689 gen_helper_fp_exc_clear();
690#endif
691}
692
693static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
694{
695 /* ??? We ought to be able to do something with imprecise exceptions.
696 E.g. notice we're still in the trap shadow of something within the
697 TB and do not generate the code to signal the exception; end the TB
698 when an exception is forced to arrive, either by consumption of a
699 register value or TRAPB or EXCB. */
700 TCGv_i32 exc = tcg_temp_new_i32();
701 TCGv_i32 reg;
702
703#if defined(CONFIG_SOFTFLOAT_INLINE)
704 tcg_gen_ld8u_i32(exc, cpu_env,
705 offsetof(CPUState, fp_status.float_exception_flags));
706#else
707 gen_helper_fp_exc_get(exc);
708#endif
709
710 if (ignore) {
711 tcg_gen_andi_i32(exc, exc, ~ignore);
712 }
713
714 /* ??? Pass in the regno of the destination so that the helper can
715 set EXC_MASK, which contains a bitmask of destination registers
716 that have caused arithmetic traps. A simple userspace emulation
717 does not require this. We do need it for a guest kernel's entArith,
718 or if we were to do something clever with imprecise exceptions. */
719 reg = tcg_const_i32(rc + 32);
720
721 if (fn11 & QUAL_S) {
722 gen_helper_fp_exc_raise_s(exc, reg);
723 } else {
724 gen_helper_fp_exc_raise(exc, reg);
725 }
726
727 tcg_temp_free_i32(reg);
728 tcg_temp_free_i32(exc);
729}
730
731static inline void gen_fp_exc_raise(int rc, int fn11)
732{
733 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 734}
f24518b5 735
593f17e5
RH
736static void gen_fcvtlq(int rb, int rc)
737{
738 if (unlikely(rc == 31)) {
739 return;
740 }
741 if (unlikely(rb == 31)) {
742 tcg_gen_movi_i64(cpu_fir[rc], 0);
743 } else {
744 TCGv tmp = tcg_temp_new();
745
746 /* The arithmetic right shift here, plus the sign-extended mask below
747 yields a sign-extended result without an explicit ext32s_i64. */
748 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
749 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
750 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
751 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
752 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
753
754 tcg_temp_free(tmp);
755 }
756}
757
735cf45f
RH
758static void gen_fcvtql(int rb, int rc)
759{
760 if (unlikely(rc == 31)) {
761 return;
762 }
763 if (unlikely(rb == 31)) {
764 tcg_gen_movi_i64(cpu_fir[rc], 0);
765 } else {
766 TCGv tmp = tcg_temp_new();
767
768 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
769 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
770 tcg_gen_shli_i64(tmp, tmp, 32);
771 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
772 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
773
774 tcg_temp_free(tmp);
775 }
776}
777
778static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
779{
780 if (rb != 31) {
781 int lab = gen_new_label();
782 TCGv tmp = tcg_temp_new();
783
784 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
785 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
786 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
787
788 gen_set_label(lab);
789 }
790 gen_fcvtql(rb, rc);
791}
792
f24518b5
RH
793#define FARITH2(name) \
794static inline void glue(gen_f, name)(int rb, int rc) \
795{ \
796 if (unlikely(rc == 31)) { \
797 return; \
798 } \
799 if (rb != 31) { \
800 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
801 } else { \
802 TCGv tmp = tcg_const_i64(0); \
803 gen_helper_ ## name (cpu_fir[rc], tmp); \
804 tcg_temp_free(tmp); \
805 } \
806}
f24518b5
RH
807
808/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
809FARITH2(sqrtf)
810FARITH2(sqrtg)
a7812ae4
PB
811FARITH2(cvtgf)
812FARITH2(cvtgq)
813FARITH2(cvtqf)
814FARITH2(cvtqg)
f24518b5
RH
815
816static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
817 int rb, int rc, int fn11)
818{
819 TCGv vb;
820
821 /* ??? This is wrong: the instruction is not a nop, it still may
822 raise exceptions. */
823 if (unlikely(rc == 31)) {
824 return;
825 }
826
827 gen_qual_roundmode(ctx, fn11);
828 gen_qual_flushzero(ctx, fn11);
829 gen_fp_exc_clear();
830
831 vb = gen_ieee_input(rb, fn11, 0);
832 helper(cpu_fir[rc], vb);
833 tcg_temp_free(vb);
834
835 gen_fp_exc_raise(rc, fn11);
836}
837
838#define IEEE_ARITH2(name) \
839static inline void glue(gen_f, name)(DisasContext *ctx, \
840 int rb, int rc, int fn11) \
841{ \
842 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
843}
844IEEE_ARITH2(sqrts)
845IEEE_ARITH2(sqrtt)
846IEEE_ARITH2(cvtst)
847IEEE_ARITH2(cvtts)
848
849static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
850{
851 TCGv vb;
852 int ignore = 0;
853
854 /* ??? This is wrong: the instruction is not a nop, it still may
855 raise exceptions. */
856 if (unlikely(rc == 31)) {
857 return;
858 }
859
860 /* No need to set flushzero, since we have an integer output. */
861 gen_fp_exc_clear();
862 vb = gen_ieee_input(rb, fn11, 0);
863
864 /* Almost all integer conversions use cropped rounding, and most
865 also do not have integer overflow enabled. Special case that. */
866 switch (fn11) {
867 case QUAL_RM_C:
868 gen_helper_cvttq_c(cpu_fir[rc], vb);
869 break;
870 case QUAL_V | QUAL_RM_C:
871 case QUAL_S | QUAL_V | QUAL_RM_C:
872 ignore = float_flag_inexact;
873 /* FALLTHRU */
874 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
875 gen_helper_cvttq_svic(cpu_fir[rc], vb);
876 break;
877 default:
878 gen_qual_roundmode(ctx, fn11);
879 gen_helper_cvttq(cpu_fir[rc], vb);
880 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
881 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
882 break;
883 }
884 tcg_temp_free(vb);
885
886 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
887}
888
f24518b5
RH
889static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
890 int rb, int rc, int fn11)
891{
892 TCGv vb;
893
894 /* ??? This is wrong: the instruction is not a nop, it still may
895 raise exceptions. */
896 if (unlikely(rc == 31)) {
897 return;
898 }
899
900 gen_qual_roundmode(ctx, fn11);
901
902 if (rb == 31) {
903 vb = tcg_const_i64(0);
904 } else {
905 vb = cpu_fir[rb];
906 }
907
908 /* The only exception that can be raised by integer conversion
909 is inexact. Thus we only need to worry about exceptions when
910 inexact handling is requested. */
911 if (fn11 & QUAL_I) {
912 gen_fp_exc_clear();
913 helper(cpu_fir[rc], vb);
914 gen_fp_exc_raise(rc, fn11);
915 } else {
916 helper(cpu_fir[rc], vb);
917 }
918
919 if (rb == 31) {
920 tcg_temp_free(vb);
921 }
922}
923
924#define IEEE_INTCVT(name) \
925static inline void glue(gen_f, name)(DisasContext *ctx, \
926 int rb, int rc, int fn11) \
927{ \
928 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
929}
930IEEE_INTCVT(cvtqs)
931IEEE_INTCVT(cvtqt)
932
dc96be4b
RH
933static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
934{
935 TCGv va, vb, vmask;
936 int za = 0, zb = 0;
937
938 if (unlikely(rc == 31)) {
939 return;
940 }
941
942 vmask = tcg_const_i64(mask);
943
944 TCGV_UNUSED_I64(va);
945 if (ra == 31) {
946 if (inv_a) {
947 va = vmask;
948 } else {
949 za = 1;
950 }
951 } else {
952 va = tcg_temp_new_i64();
953 tcg_gen_mov_i64(va, cpu_fir[ra]);
954 if (inv_a) {
955 tcg_gen_andc_i64(va, vmask, va);
956 } else {
957 tcg_gen_and_i64(va, va, vmask);
958 }
959 }
960
961 TCGV_UNUSED_I64(vb);
962 if (rb == 31) {
963 zb = 1;
964 } else {
965 vb = tcg_temp_new_i64();
966 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
967 }
968
969 switch (za << 1 | zb) {
970 case 0 | 0:
971 tcg_gen_or_i64(cpu_fir[rc], va, vb);
972 break;
973 case 0 | 1:
974 tcg_gen_mov_i64(cpu_fir[rc], va);
975 break;
976 case 2 | 0:
977 tcg_gen_mov_i64(cpu_fir[rc], vb);
978 break;
979 case 2 | 1:
980 tcg_gen_movi_i64(cpu_fir[rc], 0);
981 break;
982 }
983
984 tcg_temp_free(vmask);
985 if (ra != 31) {
986 tcg_temp_free(va);
987 }
988 if (rb != 31) {
989 tcg_temp_free(vb);
990 }
991}
992
993static inline void gen_fcpys(int ra, int rb, int rc)
994{
995 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
996}
997
998static inline void gen_fcpysn(int ra, int rb, int rc)
999{
1000 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1001}
1002
1003static inline void gen_fcpyse(int ra, int rb, int rc)
1004{
1005 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1006}
1007
f24518b5
RH
1008#define FARITH3(name) \
1009static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1010{ \
1011 TCGv va, vb; \
1012 \
1013 if (unlikely(rc == 31)) { \
1014 return; \
1015 } \
1016 if (ra == 31) { \
1017 va = tcg_const_i64(0); \
1018 } else { \
1019 va = cpu_fir[ra]; \
1020 } \
1021 if (rb == 31) { \
1022 vb = tcg_const_i64(0); \
1023 } else { \
1024 vb = cpu_fir[rb]; \
1025 } \
1026 \
1027 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1028 \
1029 if (ra == 31) { \
1030 tcg_temp_free(va); \
1031 } \
1032 if (rb == 31) { \
1033 tcg_temp_free(vb); \
1034 } \
1035}
f24518b5
RH
1036
1037/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
1038FARITH3(addf)
1039FARITH3(subf)
1040FARITH3(mulf)
1041FARITH3(divf)
1042FARITH3(addg)
1043FARITH3(subg)
1044FARITH3(mulg)
1045FARITH3(divg)
1046FARITH3(cmpgeq)
1047FARITH3(cmpglt)
1048FARITH3(cmpgle)
f24518b5
RH
1049
1050static void gen_ieee_arith3(DisasContext *ctx,
1051 void (*helper)(TCGv, TCGv, TCGv),
1052 int ra, int rb, int rc, int fn11)
1053{
1054 TCGv va, vb;
1055
1056 /* ??? This is wrong: the instruction is not a nop, it still may
1057 raise exceptions. */
1058 if (unlikely(rc == 31)) {
1059 return;
1060 }
1061
1062 gen_qual_roundmode(ctx, fn11);
1063 gen_qual_flushzero(ctx, fn11);
1064 gen_fp_exc_clear();
1065
1066 va = gen_ieee_input(ra, fn11, 0);
1067 vb = gen_ieee_input(rb, fn11, 0);
1068 helper(cpu_fir[rc], va, vb);
1069 tcg_temp_free(va);
1070 tcg_temp_free(vb);
1071
1072 gen_fp_exc_raise(rc, fn11);
1073}
1074
1075#define IEEE_ARITH3(name) \
1076static inline void glue(gen_f, name)(DisasContext *ctx, \
1077 int ra, int rb, int rc, int fn11) \
1078{ \
1079 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1080}
1081IEEE_ARITH3(adds)
1082IEEE_ARITH3(subs)
1083IEEE_ARITH3(muls)
1084IEEE_ARITH3(divs)
1085IEEE_ARITH3(addt)
1086IEEE_ARITH3(subt)
1087IEEE_ARITH3(mult)
1088IEEE_ARITH3(divt)
1089
1090static void gen_ieee_compare(DisasContext *ctx,
1091 void (*helper)(TCGv, TCGv, TCGv),
1092 int ra, int rb, int rc, int fn11)
1093{
1094 TCGv va, vb;
1095
1096 /* ??? This is wrong: the instruction is not a nop, it still may
1097 raise exceptions. */
1098 if (unlikely(rc == 31)) {
1099 return;
1100 }
1101
1102 gen_fp_exc_clear();
1103
1104 va = gen_ieee_input(ra, fn11, 1);
1105 vb = gen_ieee_input(rb, fn11, 1);
1106 helper(cpu_fir[rc], va, vb);
1107 tcg_temp_free(va);
1108 tcg_temp_free(vb);
1109
1110 gen_fp_exc_raise(rc, fn11);
1111}
1112
1113#define IEEE_CMP3(name) \
1114static inline void glue(gen_f, name)(DisasContext *ctx, \
1115 int ra, int rb, int rc, int fn11) \
1116{ \
1117 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1118}
1119IEEE_CMP3(cmptun)
1120IEEE_CMP3(cmpteq)
1121IEEE_CMP3(cmptlt)
1122IEEE_CMP3(cmptle)
a7812ae4 1123
248c42f3
RH
1124static inline uint64_t zapnot_mask(uint8_t lit)
1125{
1126 uint64_t mask = 0;
1127 int i;
1128
1129 for (i = 0; i < 8; ++i) {
1130 if ((lit >> i) & 1)
1131 mask |= 0xffull << (i * 8);
1132 }
1133 return mask;
1134}
1135
87d98f95
RH
1136/* Implement zapnot with an immediate operand, which expands to some
1137 form of immediate AND. This is a basic building block in the
1138 definition of many of the other byte manipulation instructions. */
248c42f3 1139static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1140{
87d98f95
RH
1141 switch (lit) {
1142 case 0x00:
248c42f3 1143 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1144 break;
1145 case 0x01:
248c42f3 1146 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1147 break;
1148 case 0x03:
248c42f3 1149 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1150 break;
1151 case 0x0f:
248c42f3 1152 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1153 break;
1154 case 0xff:
248c42f3 1155 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1156 break;
1157 default:
248c42f3 1158 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
1159 break;
1160 }
1161}
1162
1163static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1164{
1165 if (unlikely(rc == 31))
1166 return;
1167 else if (unlikely(ra == 31))
1168 tcg_gen_movi_i64(cpu_ir[rc], 0);
1169 else if (islit)
248c42f3 1170 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
1171 else
1172 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1173}
1174
1175static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1176{
1177 if (unlikely(rc == 31))
1178 return;
1179 else if (unlikely(ra == 31))
1180 tcg_gen_movi_i64(cpu_ir[rc], 0);
1181 else if (islit)
248c42f3 1182 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
1183 else
1184 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1185}
1186
1187
248c42f3 1188/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
1189static void gen_ext_h(int ra, int rb, int rc, int islit,
1190 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1191{
1192 if (unlikely(rc == 31))
1193 return;
377a43b6
RH
1194 else if (unlikely(ra == 31))
1195 tcg_gen_movi_i64(cpu_ir[rc], 0);
1196 else {
dfaa8583 1197 if (islit) {
377a43b6
RH
1198 lit = (64 - (lit & 7) * 8) & 0x3f;
1199 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 1200 } else {
377a43b6 1201 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
1202 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1203 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
1204 tcg_gen_neg_i64(tmp1, tmp1);
1205 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 1206 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 1207 tcg_temp_free(tmp1);
dfaa8583 1208 }
248c42f3 1209 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 1210 }
b3249f63
AJ
1211}
1212
248c42f3 1213/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
1214static void gen_ext_l(int ra, int rb, int rc, int islit,
1215 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1216{
1217 if (unlikely(rc == 31))
1218 return;
377a43b6
RH
1219 else if (unlikely(ra == 31))
1220 tcg_gen_movi_i64(cpu_ir[rc], 0);
1221 else {
dfaa8583 1222 if (islit) {
377a43b6 1223 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 1224 } else {
a7812ae4 1225 TCGv tmp = tcg_temp_new();
b3249f63
AJ
1226 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1227 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1228 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1229 tcg_temp_free(tmp);
fe2b269a 1230 }
248c42f3
RH
1231 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1232 }
1233}
1234
50eb6e5c
RH
1235/* INSWH, INSLH, INSQH */
1236static void gen_ins_h(int ra, int rb, int rc, int islit,
1237 uint8_t lit, uint8_t byte_mask)
1238{
1239 if (unlikely(rc == 31))
1240 return;
1241 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1242 tcg_gen_movi_i64(cpu_ir[rc], 0);
1243 else {
1244 TCGv tmp = tcg_temp_new();
1245
1246 /* The instruction description has us left-shift the byte mask
1247 and extract bits <15:8> and apply that zap at the end. This
1248 is equivalent to simply performing the zap first and shifting
1249 afterward. */
1250 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1251
1252 if (islit) {
1253 /* Note that we have handled the lit==0 case above. */
1254 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1255 } else {
1256 TCGv shift = tcg_temp_new();
1257
1258 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1259 Do this portably by splitting the shift into two parts:
1260 shift_count-1 and 1. Arrange for the -1 by using
1261 ones-complement instead of twos-complement in the negation:
1262 ~((B & 7) * 8) & 63. */
1263
1264 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1265 tcg_gen_shli_i64(shift, shift, 3);
1266 tcg_gen_not_i64(shift, shift);
1267 tcg_gen_andi_i64(shift, shift, 0x3f);
1268
1269 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1270 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1271 tcg_temp_free(shift);
1272 }
1273 tcg_temp_free(tmp);
1274 }
1275}
1276
248c42f3 1277/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1278static void gen_ins_l(int ra, int rb, int rc, int islit,
1279 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1280{
1281 if (unlikely(rc == 31))
1282 return;
1283 else if (unlikely(ra == 31))
1284 tcg_gen_movi_i64(cpu_ir[rc], 0);
1285 else {
1286 TCGv tmp = tcg_temp_new();
1287
1288 /* The instruction description has us left-shift the byte mask
1289 the same number of byte slots as the data and apply the zap
1290 at the end. This is equivalent to simply performing the zap
1291 first and shifting afterward. */
1292 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1293
1294 if (islit) {
1295 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1296 } else {
1297 TCGv shift = tcg_temp_new();
1298 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1299 tcg_gen_shli_i64(shift, shift, 3);
1300 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1301 tcg_temp_free(shift);
1302 }
1303 tcg_temp_free(tmp);
377a43b6 1304 }
b3249f63
AJ
1305}
1306
ffec44f1
RH
1307/* MSKWH, MSKLH, MSKQH */
1308static void gen_msk_h(int ra, int rb, int rc, int islit,
1309 uint8_t lit, uint8_t byte_mask)
1310{
1311 if (unlikely(rc == 31))
1312 return;
1313 else if (unlikely(ra == 31))
1314 tcg_gen_movi_i64(cpu_ir[rc], 0);
1315 else if (islit) {
1316 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1317 } else {
1318 TCGv shift = tcg_temp_new();
1319 TCGv mask = tcg_temp_new();
1320
1321 /* The instruction description is as above, where the byte_mask
1322 is shifted left, and then we extract bits <15:8>. This can be
1323 emulated with a right-shift on the expanded byte mask. This
1324 requires extra care because for an input <2:0> == 0 we need a
1325 shift of 64 bits in order to generate a zero. This is done by
1326 splitting the shift into two parts, the variable shift - 1
1327 followed by a constant 1 shift. The code we expand below is
1328 equivalent to ~((B & 7) * 8) & 63. */
1329
1330 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1331 tcg_gen_shli_i64(shift, shift, 3);
1332 tcg_gen_not_i64(shift, shift);
1333 tcg_gen_andi_i64(shift, shift, 0x3f);
1334 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1335 tcg_gen_shr_i64(mask, mask, shift);
1336 tcg_gen_shri_i64(mask, mask, 1);
1337
1338 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1339
1340 tcg_temp_free(mask);
1341 tcg_temp_free(shift);
1342 }
1343}
1344
14ab1634 1345/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1346static void gen_msk_l(int ra, int rb, int rc, int islit,
1347 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1348{
1349 if (unlikely(rc == 31))
1350 return;
1351 else if (unlikely(ra == 31))
1352 tcg_gen_movi_i64(cpu_ir[rc], 0);
1353 else if (islit) {
1354 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1355 } else {
1356 TCGv shift = tcg_temp_new();
1357 TCGv mask = tcg_temp_new();
1358
1359 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1360 tcg_gen_shli_i64(shift, shift, 3);
1361 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1362 tcg_gen_shl_i64(mask, mask, shift);
1363
1364 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1365
1366 tcg_temp_free(mask);
1367 tcg_temp_free(shift);
1368 }
1369}
1370
04acd307 1371/* Code to call arith3 helpers */
a7812ae4 1372#define ARITH3(name) \
636aa200
BS
1373static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1374 uint8_t lit) \
a7812ae4
PB
1375{ \
1376 if (unlikely(rc == 31)) \
1377 return; \
1378 \
1379 if (ra != 31) { \
1380 if (islit) { \
1381 TCGv tmp = tcg_const_i64(lit); \
1382 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1383 tcg_temp_free(tmp); \
1384 } else \
1385 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1386 } else { \
1387 TCGv tmp1 = tcg_const_i64(0); \
1388 if (islit) { \
1389 TCGv tmp2 = tcg_const_i64(lit); \
1390 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1391 tcg_temp_free(tmp2); \
1392 } else \
1393 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1394 tcg_temp_free(tmp1); \
1395 } \
b3249f63 1396}
a7812ae4
PB
1397ARITH3(cmpbge)
1398ARITH3(addlv)
1399ARITH3(sublv)
1400ARITH3(addqv)
1401ARITH3(subqv)
a7812ae4
PB
1402ARITH3(umulh)
1403ARITH3(mullv)
1404ARITH3(mulqv)
13e4df99
RH
1405ARITH3(minub8)
1406ARITH3(minsb8)
1407ARITH3(minuw4)
1408ARITH3(minsw4)
1409ARITH3(maxub8)
1410ARITH3(maxsb8)
1411ARITH3(maxuw4)
1412ARITH3(maxsw4)
1413ARITH3(perr)
1414
1415#define MVIOP2(name) \
1416static inline void glue(gen_, name)(int rb, int rc) \
1417{ \
1418 if (unlikely(rc == 31)) \
1419 return; \
1420 if (unlikely(rb == 31)) \
1421 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1422 else \
1423 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1424}
1425MVIOP2(pklb)
1426MVIOP2(pkwb)
1427MVIOP2(unpkbl)
1428MVIOP2(unpkbw)
b3249f63 1429
9e05960f
RH
1430static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1431 int islit, uint8_t lit)
01ff9cc8 1432{
9e05960f 1433 TCGv va, vb;
01ff9cc8 1434
9e05960f 1435 if (unlikely(rc == 31)) {
13e4df99 1436 return;
9e05960f 1437 }
01ff9cc8 1438
9e05960f
RH
1439 if (ra == 31) {
1440 va = tcg_const_i64(0);
1441 } else {
1442 va = cpu_ir[ra];
1443 }
1444 if (islit) {
1445 vb = tcg_const_i64(lit);
1446 } else {
1447 vb = cpu_ir[rb];
1448 }
01ff9cc8 1449
9e05960f 1450 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
01ff9cc8 1451
9e05960f
RH
1452 if (ra == 31) {
1453 tcg_temp_free(va);
1454 }
1455 if (islit) {
1456 tcg_temp_free(vb);
1457 }
01ff9cc8
AJ
1458}
1459
ac316ca4
RH
1460static void gen_rx(int ra, int set)
1461{
1462 TCGv_i32 tmp;
1463
1464 if (ra != 31) {
1465 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1466 }
1467
1468 tmp = tcg_const_i32(set);
1469 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1470 tcg_temp_free_i32(tmp);
1471}
1472
2ace7e55
RH
1473static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1474{
1475 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1476 to internal cpu registers. */
1477
1478 /* Unprivileged PAL call */
1479 if (palcode >= 0x80 && palcode < 0xC0) {
1480 switch (palcode) {
1481 case 0x86:
1482 /* IMB */
1483 /* No-op inside QEMU. */
1484 break;
1485 case 0x9E:
1486 /* RDUNIQUE */
1487 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1488 break;
1489 case 0x9F:
1490 /* WRUNIQUE */
1491 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1492 break;
1493 default:
1494 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1495 }
1496 return NO_EXIT;
1497 }
1498
1499#ifndef CONFIG_USER_ONLY
1500 /* Privileged PAL code */
1501 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1502 switch (palcode) {
1503 case 0x01:
1504 /* CFLUSH */
1505 /* No-op inside QEMU. */
1506 break;
1507 case 0x02:
1508 /* DRAINA */
1509 /* No-op inside QEMU. */
1510 break;
1511 case 0x2D:
1512 /* WRVPTPTR */
1513 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUState, vptptr));
1514 break;
1515 case 0x31:
1516 /* WRVAL */
1517 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1518 break;
1519 case 0x32:
1520 /* RDVAL */
1521 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1522 break;
1523
1524 case 0x35: {
1525 /* SWPIPL */
1526 TCGv tmp;
1527
1528 /* Note that we already know we're in kernel mode, so we know
1529 that PS only contains the 3 IPL bits. */
1530 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1531
1532 /* But make sure and store only the 3 IPL bits from the user. */
1533 tmp = tcg_temp_new();
1534 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1535 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUState, ps));
1536 tcg_temp_free(tmp);
1537 break;
1538 }
1539
1540 case 0x36:
1541 /* RDPS */
1542 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1543 break;
1544 case 0x38:
1545 /* WRUSP */
1546 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1547 break;
1548 case 0x3A:
1549 /* RDUSP */
1550 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1551 break;
1552 case 0x3C:
1553 /* WHAMI */
1554 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1555 offsetof(CPUState, cpu_index));
1556 break;
1557
1558 default:
1559 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1560 }
1561 return NO_EXIT;
1562 }
1563#endif
1564
1565 return gen_invalid(ctx);
1566}
1567
26b46094
RH
1568#ifndef CONFIG_USER_ONLY
1569
1570#define PR_BYTE 0x100000
1571#define PR_LONG 0x200000
1572
1573static int cpu_pr_data(int pr)
1574{
1575 switch (pr) {
1576 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1577 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1578 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1579 case 3: return offsetof(CPUAlphaState, trap_arg0);
1580 case 4: return offsetof(CPUAlphaState, trap_arg1);
1581 case 5: return offsetof(CPUAlphaState, trap_arg2);
1582 case 6: return offsetof(CPUAlphaState, exc_addr);
1583 case 7: return offsetof(CPUAlphaState, palbr);
1584 case 8: return offsetof(CPUAlphaState, ptbr);
1585 case 9: return offsetof(CPUAlphaState, vptptr);
1586 case 10: return offsetof(CPUAlphaState, unique);
1587 case 11: return offsetof(CPUAlphaState, sysval);
1588 case 12: return offsetof(CPUAlphaState, usp);
1589
1590 case 32 ... 39:
1591 return offsetof(CPUAlphaState, shadow[pr - 32]);
1592 case 40 ... 63:
1593 return offsetof(CPUAlphaState, scratch[pr - 40]);
1594 }
1595 return 0;
1596}
1597
1598static void gen_mfpr(int ra, int regno)
1599{
1600 int data = cpu_pr_data(regno);
1601
1602 /* In our emulated PALcode, these processor registers have no
1603 side effects from reading. */
1604 if (ra == 31) {
1605 return;
1606 }
1607
1608 /* The basic registers are data only, and unknown registers
1609 are read-zero, write-ignore. */
1610 if (data == 0) {
1611 tcg_gen_movi_i64(cpu_ir[ra], 0);
1612 } else if (data & PR_BYTE) {
1613 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1614 } else if (data & PR_LONG) {
1615 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1616 } else {
1617 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1618 }
1619}
1620
1621static void gen_mtpr(int rb, int regno)
1622{
1623 TCGv tmp;
1624 int data;
1625
1626 if (rb == 31) {
1627 tmp = tcg_const_i64(0);
1628 } else {
1629 tmp = cpu_ir[rb];
1630 }
1631
1632 /* The basic registers are data only, and unknown registers
1633 are read-zero, write-ignore. */
1634 data = cpu_pr_data(regno);
1635 if (data != 0) {
1636 if (data & PR_BYTE) {
1637 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1638 } else if (data & PR_LONG) {
1639 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1640 } else {
1641 tcg_gen_st_i64(tmp, cpu_env, data);
1642 }
1643 }
1644
1645 if (rb == 31) {
1646 tcg_temp_free(tmp);
1647 }
1648}
1649#endif /* !USER_ONLY*/
1650
4af70374 1651static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1652{
1653 uint32_t palcode;
1654 int32_t disp21, disp16, disp12;
f88fe4e3
BS
1655 uint16_t fn11;
1656 uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
adf3c8b6 1657 uint8_t lit;
4af70374 1658 ExitStatus ret;
4c9649a9
JM
1659
1660 /* Decode all instruction fields */
1661 opc = insn >> 26;
1662 ra = (insn >> 21) & 0x1F;
1663 rb = (insn >> 16) & 0x1F;
1664 rc = insn & 0x1F;
13e4df99 1665 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1666 if (rb == 31 && !islit) {
1667 islit = 1;
1668 lit = 0;
1669 } else
1670 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1671 palcode = insn & 0x03FFFFFF;
1672 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1673 disp16 = (int16_t)(insn & 0x0000FFFF);
1674 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
4c9649a9
JM
1675 fn11 = (insn >> 5) & 0x000007FF;
1676 fpfn = fn11 & 0x3F;
1677 fn7 = (insn >> 5) & 0x0000007F;
1678 fn2 = (insn >> 5) & 0x00000003;
806991da 1679 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1680 opc, ra, rb, rc, disp16);
806991da 1681
4af70374 1682 ret = NO_EXIT;
4c9649a9
JM
1683 switch (opc) {
1684 case 0x00:
1685 /* CALL_PAL */
2ace7e55
RH
1686 ret = gen_call_pal(ctx, palcode);
1687 break;
4c9649a9
JM
1688 case 0x01:
1689 /* OPC01 */
1690 goto invalid_opc;
1691 case 0x02:
1692 /* OPC02 */
1693 goto invalid_opc;
1694 case 0x03:
1695 /* OPC03 */
1696 goto invalid_opc;
1697 case 0x04:
1698 /* OPC04 */
1699 goto invalid_opc;
1700 case 0x05:
1701 /* OPC05 */
1702 goto invalid_opc;
1703 case 0x06:
1704 /* OPC06 */
1705 goto invalid_opc;
1706 case 0x07:
1707 /* OPC07 */
1708 goto invalid_opc;
1709 case 0x08:
1710 /* LDA */
1ef4ef4e 1711 if (likely(ra != 31)) {
496cb5b9 1712 if (rb != 31)
3761035f
AJ
1713 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1714 else
1715 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1716 }
4c9649a9
JM
1717 break;
1718 case 0x09:
1719 /* LDAH */
1ef4ef4e 1720 if (likely(ra != 31)) {
496cb5b9 1721 if (rb != 31)
3761035f
AJ
1722 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1723 else
1724 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1725 }
4c9649a9
JM
1726 break;
1727 case 0x0A:
1728 /* LDBU */
a18ad893
RH
1729 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1730 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1731 break;
1732 }
1733 goto invalid_opc;
4c9649a9
JM
1734 case 0x0B:
1735 /* LDQ_U */
f18cd223 1736 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1737 break;
1738 case 0x0C:
1739 /* LDWU */
a18ad893
RH
1740 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1741 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1742 break;
1743 }
1744 goto invalid_opc;
4c9649a9
JM
1745 case 0x0D:
1746 /* STW */
6910b8f6 1747 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1748 break;
1749 case 0x0E:
1750 /* STB */
6910b8f6 1751 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1752 break;
1753 case 0x0F:
1754 /* STQ_U */
6910b8f6 1755 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1756 break;
1757 case 0x10:
1758 switch (fn7) {
1759 case 0x00:
1760 /* ADDL */
30c7183b
AJ
1761 if (likely(rc != 31)) {
1762 if (ra != 31) {
1763 if (islit) {
1764 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1765 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1766 } else {
30c7183b
AJ
1767 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1768 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1769 }
30c7183b
AJ
1770 } else {
1771 if (islit)
dfaa8583 1772 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1773 else
dfaa8583 1774 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1775 }
1776 }
4c9649a9
JM
1777 break;
1778 case 0x02:
1779 /* S4ADDL */
30c7183b
AJ
1780 if (likely(rc != 31)) {
1781 if (ra != 31) {
a7812ae4 1782 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1783 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1784 if (islit)
1785 tcg_gen_addi_i64(tmp, tmp, lit);
1786 else
1787 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1788 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1789 tcg_temp_free(tmp);
30c7183b
AJ
1790 } else {
1791 if (islit)
1792 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1793 else
dfaa8583 1794 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1795 }
1796 }
4c9649a9
JM
1797 break;
1798 case 0x09:
1799 /* SUBL */
30c7183b
AJ
1800 if (likely(rc != 31)) {
1801 if (ra != 31) {
dfaa8583 1802 if (islit)
30c7183b 1803 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1804 else
30c7183b 1805 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1806 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1807 } else {
1808 if (islit)
1809 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1810 else {
30c7183b
AJ
1811 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1812 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1813 }
1814 }
4c9649a9
JM
1815 break;
1816 case 0x0B:
1817 /* S4SUBL */
30c7183b
AJ
1818 if (likely(rc != 31)) {
1819 if (ra != 31) {
a7812ae4 1820 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1821 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1822 if (islit)
1823 tcg_gen_subi_i64(tmp, tmp, lit);
1824 else
1825 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1826 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1827 tcg_temp_free(tmp);
30c7183b
AJ
1828 } else {
1829 if (islit)
1830 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1831 else {
30c7183b
AJ
1832 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1833 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1834 }
30c7183b
AJ
1835 }
1836 }
4c9649a9
JM
1837 break;
1838 case 0x0F:
1839 /* CMPBGE */
a7812ae4 1840 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1841 break;
1842 case 0x12:
1843 /* S8ADDL */
30c7183b
AJ
1844 if (likely(rc != 31)) {
1845 if (ra != 31) {
a7812ae4 1846 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1847 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1848 if (islit)
1849 tcg_gen_addi_i64(tmp, tmp, lit);
1850 else
1851 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1852 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1853 tcg_temp_free(tmp);
30c7183b
AJ
1854 } else {
1855 if (islit)
1856 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1857 else
dfaa8583 1858 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1859 }
1860 }
4c9649a9
JM
1861 break;
1862 case 0x1B:
1863 /* S8SUBL */
30c7183b
AJ
1864 if (likely(rc != 31)) {
1865 if (ra != 31) {
a7812ae4 1866 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1867 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1868 if (islit)
1869 tcg_gen_subi_i64(tmp, tmp, lit);
1870 else
1871 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1872 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1873 tcg_temp_free(tmp);
30c7183b
AJ
1874 } else {
1875 if (islit)
1876 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1877 else
30c7183b
AJ
1878 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1879 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1880 }
30c7183b
AJ
1881 }
1882 }
4c9649a9
JM
1883 break;
1884 case 0x1D:
1885 /* CMPULT */
01ff9cc8 1886 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
1887 break;
1888 case 0x20:
1889 /* ADDQ */
30c7183b
AJ
1890 if (likely(rc != 31)) {
1891 if (ra != 31) {
1892 if (islit)
1893 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1894 else
dfaa8583 1895 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1896 } else {
1897 if (islit)
1898 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1899 else
dfaa8583 1900 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1901 }
1902 }
4c9649a9
JM
1903 break;
1904 case 0x22:
1905 /* S4ADDQ */
30c7183b
AJ
1906 if (likely(rc != 31)) {
1907 if (ra != 31) {
a7812ae4 1908 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1909 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1910 if (islit)
1911 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1912 else
1913 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1914 tcg_temp_free(tmp);
30c7183b
AJ
1915 } else {
1916 if (islit)
1917 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1918 else
dfaa8583 1919 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1920 }
1921 }
4c9649a9
JM
1922 break;
1923 case 0x29:
1924 /* SUBQ */
30c7183b
AJ
1925 if (likely(rc != 31)) {
1926 if (ra != 31) {
1927 if (islit)
1928 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1929 else
dfaa8583 1930 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1931 } else {
1932 if (islit)
1933 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1934 else
dfaa8583 1935 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1936 }
1937 }
4c9649a9
JM
1938 break;
1939 case 0x2B:
1940 /* S4SUBQ */
30c7183b
AJ
1941 if (likely(rc != 31)) {
1942 if (ra != 31) {
a7812ae4 1943 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1944 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1945 if (islit)
1946 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1947 else
1948 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1949 tcg_temp_free(tmp);
30c7183b
AJ
1950 } else {
1951 if (islit)
1952 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1953 else
dfaa8583 1954 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1955 }
1956 }
4c9649a9
JM
1957 break;
1958 case 0x2D:
1959 /* CMPEQ */
01ff9cc8 1960 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
1961 break;
1962 case 0x32:
1963 /* S8ADDQ */
30c7183b
AJ
1964 if (likely(rc != 31)) {
1965 if (ra != 31) {
a7812ae4 1966 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1967 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1968 if (islit)
1969 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1970 else
1971 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1972 tcg_temp_free(tmp);
30c7183b
AJ
1973 } else {
1974 if (islit)
1975 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1976 else
dfaa8583 1977 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1978 }
1979 }
4c9649a9
JM
1980 break;
1981 case 0x3B:
1982 /* S8SUBQ */
30c7183b
AJ
1983 if (likely(rc != 31)) {
1984 if (ra != 31) {
a7812ae4 1985 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1986 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1987 if (islit)
1988 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1989 else
1990 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1991 tcg_temp_free(tmp);
30c7183b
AJ
1992 } else {
1993 if (islit)
1994 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1995 else
dfaa8583 1996 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1997 }
1998 }
4c9649a9
JM
1999 break;
2000 case 0x3D:
2001 /* CMPULE */
01ff9cc8 2002 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
2003 break;
2004 case 0x40:
2005 /* ADDL/V */
a7812ae4 2006 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
2007 break;
2008 case 0x49:
2009 /* SUBL/V */
a7812ae4 2010 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
2011 break;
2012 case 0x4D:
2013 /* CMPLT */
01ff9cc8 2014 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
2015 break;
2016 case 0x60:
2017 /* ADDQ/V */
a7812ae4 2018 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2019 break;
2020 case 0x69:
2021 /* SUBQ/V */
a7812ae4 2022 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2023 break;
2024 case 0x6D:
2025 /* CMPLE */
01ff9cc8 2026 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
2027 break;
2028 default:
2029 goto invalid_opc;
2030 }
2031 break;
2032 case 0x11:
2033 switch (fn7) {
2034 case 0x00:
2035 /* AND */
30c7183b 2036 if (likely(rc != 31)) {
dfaa8583 2037 if (ra == 31)
30c7183b
AJ
2038 tcg_gen_movi_i64(cpu_ir[rc], 0);
2039 else if (islit)
2040 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2041 else
2042 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2043 }
4c9649a9
JM
2044 break;
2045 case 0x08:
2046 /* BIC */
30c7183b
AJ
2047 if (likely(rc != 31)) {
2048 if (ra != 31) {
2049 if (islit)
2050 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2051 else
2052 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2053 } else
2054 tcg_gen_movi_i64(cpu_ir[rc], 0);
2055 }
4c9649a9
JM
2056 break;
2057 case 0x14:
2058 /* CMOVLBS */
bbe1dab4 2059 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2060 break;
2061 case 0x16:
2062 /* CMOVLBC */
bbe1dab4 2063 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2064 break;
2065 case 0x20:
2066 /* BIS */
30c7183b
AJ
2067 if (likely(rc != 31)) {
2068 if (ra != 31) {
2069 if (islit)
2070 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 2071 else
30c7183b 2072 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 2073 } else {
30c7183b
AJ
2074 if (islit)
2075 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2076 else
dfaa8583 2077 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 2078 }
4c9649a9
JM
2079 }
2080 break;
2081 case 0x24:
2082 /* CMOVEQ */
bbe1dab4 2083 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2084 break;
2085 case 0x26:
2086 /* CMOVNE */
bbe1dab4 2087 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2088 break;
2089 case 0x28:
2090 /* ORNOT */
30c7183b 2091 if (likely(rc != 31)) {
dfaa8583 2092 if (ra != 31) {
30c7183b
AJ
2093 if (islit)
2094 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2095 else
2096 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2097 } else {
2098 if (islit)
2099 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2100 else
2101 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2102 }
2103 }
4c9649a9
JM
2104 break;
2105 case 0x40:
2106 /* XOR */
30c7183b
AJ
2107 if (likely(rc != 31)) {
2108 if (ra != 31) {
2109 if (islit)
2110 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2111 else
dfaa8583 2112 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2113 } else {
2114 if (islit)
2115 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2116 else
dfaa8583 2117 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2118 }
2119 }
4c9649a9
JM
2120 break;
2121 case 0x44:
2122 /* CMOVLT */
bbe1dab4 2123 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2124 break;
2125 case 0x46:
2126 /* CMOVGE */
bbe1dab4 2127 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2128 break;
2129 case 0x48:
2130 /* EQV */
30c7183b
AJ
2131 if (likely(rc != 31)) {
2132 if (ra != 31) {
2133 if (islit)
2134 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2135 else
2136 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2137 } else {
2138 if (islit)
2139 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 2140 else
dfaa8583 2141 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2142 }
2143 }
4c9649a9
JM
2144 break;
2145 case 0x61:
2146 /* AMASK */
ae8ecd42 2147 if (likely(rc != 31)) {
a18ad893
RH
2148 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2149
2150 if (islit) {
2151 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2152 } else {
2153 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
1a1f7dbc 2154 }
ae8ecd42 2155 }
4c9649a9
JM
2156 break;
2157 case 0x64:
2158 /* CMOVLE */
bbe1dab4 2159 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2160 break;
2161 case 0x66:
2162 /* CMOVGT */
bbe1dab4 2163 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2164 break;
2165 case 0x6C:
2166 /* IMPLVER */
3761035f 2167 if (rc != 31)
8579095b 2168 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
4c9649a9
JM
2169 break;
2170 default:
2171 goto invalid_opc;
2172 }
2173 break;
2174 case 0x12:
2175 switch (fn7) {
2176 case 0x02:
2177 /* MSKBL */
14ab1634 2178 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2179 break;
2180 case 0x06:
2181 /* EXTBL */
377a43b6 2182 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2183 break;
2184 case 0x0B:
2185 /* INSBL */
248c42f3 2186 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2187 break;
2188 case 0x12:
2189 /* MSKWL */
14ab1634 2190 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2191 break;
2192 case 0x16:
2193 /* EXTWL */
377a43b6 2194 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2195 break;
2196 case 0x1B:
2197 /* INSWL */
248c42f3 2198 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2199 break;
2200 case 0x22:
2201 /* MSKLL */
14ab1634 2202 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2203 break;
2204 case 0x26:
2205 /* EXTLL */
377a43b6 2206 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2207 break;
2208 case 0x2B:
2209 /* INSLL */
248c42f3 2210 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2211 break;
2212 case 0x30:
2213 /* ZAP */
a7812ae4 2214 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
2215 break;
2216 case 0x31:
2217 /* ZAPNOT */
a7812ae4 2218 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
2219 break;
2220 case 0x32:
2221 /* MSKQL */
14ab1634 2222 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2223 break;
2224 case 0x34:
2225 /* SRL */
30c7183b
AJ
2226 if (likely(rc != 31)) {
2227 if (ra != 31) {
2228 if (islit)
2229 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2230 else {
a7812ae4 2231 TCGv shift = tcg_temp_new();
30c7183b
AJ
2232 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2233 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2234 tcg_temp_free(shift);
dfaa8583 2235 }
30c7183b
AJ
2236 } else
2237 tcg_gen_movi_i64(cpu_ir[rc], 0);
2238 }
4c9649a9
JM
2239 break;
2240 case 0x36:
2241 /* EXTQL */
377a43b6 2242 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2243 break;
2244 case 0x39:
2245 /* SLL */
30c7183b
AJ
2246 if (likely(rc != 31)) {
2247 if (ra != 31) {
2248 if (islit)
2249 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2250 else {
a7812ae4 2251 TCGv shift = tcg_temp_new();
30c7183b
AJ
2252 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2253 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2254 tcg_temp_free(shift);
dfaa8583 2255 }
30c7183b
AJ
2256 } else
2257 tcg_gen_movi_i64(cpu_ir[rc], 0);
2258 }
4c9649a9
JM
2259 break;
2260 case 0x3B:
2261 /* INSQL */
248c42f3 2262 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2263 break;
2264 case 0x3C:
2265 /* SRA */
30c7183b
AJ
2266 if (likely(rc != 31)) {
2267 if (ra != 31) {
2268 if (islit)
2269 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2270 else {
a7812ae4 2271 TCGv shift = tcg_temp_new();
30c7183b
AJ
2272 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2273 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2274 tcg_temp_free(shift);
dfaa8583 2275 }
30c7183b
AJ
2276 } else
2277 tcg_gen_movi_i64(cpu_ir[rc], 0);
2278 }
4c9649a9
JM
2279 break;
2280 case 0x52:
2281 /* MSKWH */
ffec44f1 2282 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2283 break;
2284 case 0x57:
2285 /* INSWH */
50eb6e5c 2286 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2287 break;
2288 case 0x5A:
2289 /* EXTWH */
377a43b6 2290 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2291 break;
2292 case 0x62:
2293 /* MSKLH */
ffec44f1 2294 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2295 break;
2296 case 0x67:
2297 /* INSLH */
50eb6e5c 2298 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2299 break;
2300 case 0x6A:
2301 /* EXTLH */
377a43b6 2302 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2303 break;
2304 case 0x72:
2305 /* MSKQH */
ffec44f1 2306 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2307 break;
2308 case 0x77:
2309 /* INSQH */
50eb6e5c 2310 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2311 break;
2312 case 0x7A:
2313 /* EXTQH */
377a43b6 2314 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2315 break;
2316 default:
2317 goto invalid_opc;
2318 }
2319 break;
2320 case 0x13:
2321 switch (fn7) {
2322 case 0x00:
2323 /* MULL */
30c7183b 2324 if (likely(rc != 31)) {
dfaa8583 2325 if (ra == 31)
30c7183b
AJ
2326 tcg_gen_movi_i64(cpu_ir[rc], 0);
2327 else {
2328 if (islit)
2329 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2330 else
2331 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2332 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2333 }
2334 }
4c9649a9
JM
2335 break;
2336 case 0x20:
2337 /* MULQ */
30c7183b 2338 if (likely(rc != 31)) {
dfaa8583 2339 if (ra == 31)
30c7183b
AJ
2340 tcg_gen_movi_i64(cpu_ir[rc], 0);
2341 else if (islit)
2342 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2343 else
2344 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2345 }
4c9649a9
JM
2346 break;
2347 case 0x30:
2348 /* UMULH */
a7812ae4 2349 gen_umulh(ra, rb, rc, islit, lit);
4c9649a9
JM
2350 break;
2351 case 0x40:
2352 /* MULL/V */
a7812ae4 2353 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
2354 break;
2355 case 0x60:
2356 /* MULQ/V */
a7812ae4 2357 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2358 break;
2359 default:
2360 goto invalid_opc;
2361 }
2362 break;
2363 case 0x14:
f24518b5 2364 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2365 case 0x04:
2366 /* ITOFS */
a18ad893 2367 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2368 goto invalid_opc;
a18ad893 2369 }
f18cd223
AJ
2370 if (likely(rc != 31)) {
2371 if (ra != 31) {
a7812ae4 2372 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2373 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2374 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2375 tcg_temp_free_i32(tmp);
f18cd223
AJ
2376 } else
2377 tcg_gen_movi_i64(cpu_fir[rc], 0);
2378 }
4c9649a9
JM
2379 break;
2380 case 0x0A:
2381 /* SQRTF */
a18ad893
RH
2382 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2383 gen_fsqrtf(rb, rc);
2384 break;
2385 }
2386 goto invalid_opc;
4c9649a9
JM
2387 case 0x0B:
2388 /* SQRTS */
a18ad893
RH
2389 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2390 gen_fsqrts(ctx, rb, rc, fn11);
2391 break;
2392 }
2393 goto invalid_opc;
4c9649a9
JM
2394 case 0x14:
2395 /* ITOFF */
a18ad893 2396 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2397 goto invalid_opc;
a18ad893 2398 }
f18cd223
AJ
2399 if (likely(rc != 31)) {
2400 if (ra != 31) {
a7812ae4 2401 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2402 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2403 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2404 tcg_temp_free_i32(tmp);
f18cd223
AJ
2405 } else
2406 tcg_gen_movi_i64(cpu_fir[rc], 0);
2407 }
4c9649a9
JM
2408 break;
2409 case 0x24:
2410 /* ITOFT */
a18ad893 2411 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2412 goto invalid_opc;
a18ad893 2413 }
f18cd223
AJ
2414 if (likely(rc != 31)) {
2415 if (ra != 31)
2416 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2417 else
2418 tcg_gen_movi_i64(cpu_fir[rc], 0);
2419 }
4c9649a9
JM
2420 break;
2421 case 0x2A:
2422 /* SQRTG */
a18ad893
RH
2423 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2424 gen_fsqrtg(rb, rc);
2425 break;
2426 }
2427 goto invalid_opc;
4c9649a9
JM
2428 case 0x02B:
2429 /* SQRTT */
a18ad893
RH
2430 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2431 gen_fsqrtt(ctx, rb, rc, fn11);
2432 break;
2433 }
2434 goto invalid_opc;
4c9649a9
JM
2435 default:
2436 goto invalid_opc;
2437 }
2438 break;
2439 case 0x15:
2440 /* VAX floating point */
2441 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2442 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2443 case 0x00:
2444 /* ADDF */
a7812ae4 2445 gen_faddf(ra, rb, rc);
4c9649a9
JM
2446 break;
2447 case 0x01:
2448 /* SUBF */
a7812ae4 2449 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2450 break;
2451 case 0x02:
2452 /* MULF */
a7812ae4 2453 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2454 break;
2455 case 0x03:
2456 /* DIVF */
a7812ae4 2457 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2458 break;
2459 case 0x1E:
2460 /* CVTDG */
2461#if 0 // TODO
a7812ae4 2462 gen_fcvtdg(rb, rc);
4c9649a9
JM
2463#else
2464 goto invalid_opc;
2465#endif
2466 break;
2467 case 0x20:
2468 /* ADDG */
a7812ae4 2469 gen_faddg(ra, rb, rc);
4c9649a9
JM
2470 break;
2471 case 0x21:
2472 /* SUBG */
a7812ae4 2473 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2474 break;
2475 case 0x22:
2476 /* MULG */
a7812ae4 2477 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2478 break;
2479 case 0x23:
2480 /* DIVG */
a7812ae4 2481 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2482 break;
2483 case 0x25:
2484 /* CMPGEQ */
a7812ae4 2485 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2486 break;
2487 case 0x26:
2488 /* CMPGLT */
a7812ae4 2489 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2490 break;
2491 case 0x27:
2492 /* CMPGLE */
a7812ae4 2493 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2494 break;
2495 case 0x2C:
2496 /* CVTGF */
a7812ae4 2497 gen_fcvtgf(rb, rc);
4c9649a9
JM
2498 break;
2499 case 0x2D:
2500 /* CVTGD */
2501#if 0 // TODO
a7812ae4 2502 gen_fcvtgd(rb, rc);
4c9649a9
JM
2503#else
2504 goto invalid_opc;
2505#endif
2506 break;
2507 case 0x2F:
2508 /* CVTGQ */
a7812ae4 2509 gen_fcvtgq(rb, rc);
4c9649a9
JM
2510 break;
2511 case 0x3C:
2512 /* CVTQF */
a7812ae4 2513 gen_fcvtqf(rb, rc);
4c9649a9
JM
2514 break;
2515 case 0x3E:
2516 /* CVTQG */
a7812ae4 2517 gen_fcvtqg(rb, rc);
4c9649a9
JM
2518 break;
2519 default:
2520 goto invalid_opc;
2521 }
2522 break;
2523 case 0x16:
2524 /* IEEE floating-point */
f24518b5 2525 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2526 case 0x00:
2527 /* ADDS */
f24518b5 2528 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2529 break;
2530 case 0x01:
2531 /* SUBS */
f24518b5 2532 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2533 break;
2534 case 0x02:
2535 /* MULS */
f24518b5 2536 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2537 break;
2538 case 0x03:
2539 /* DIVS */
f24518b5 2540 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2541 break;
2542 case 0x20:
2543 /* ADDT */
f24518b5 2544 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2545 break;
2546 case 0x21:
2547 /* SUBT */
f24518b5 2548 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2549 break;
2550 case 0x22:
2551 /* MULT */
f24518b5 2552 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2553 break;
2554 case 0x23:
2555 /* DIVT */
f24518b5 2556 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2557 break;
2558 case 0x24:
2559 /* CMPTUN */
f24518b5 2560 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2561 break;
2562 case 0x25:
2563 /* CMPTEQ */
f24518b5 2564 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2565 break;
2566 case 0x26:
2567 /* CMPTLT */
f24518b5 2568 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2569 break;
2570 case 0x27:
2571 /* CMPTLE */
f24518b5 2572 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2573 break;
2574 case 0x2C:
a74b4d2c 2575 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2576 /* CVTST */
f24518b5 2577 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2578 } else {
2579 /* CVTTS */
f24518b5 2580 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2581 }
2582 break;
2583 case 0x2F:
2584 /* CVTTQ */
f24518b5 2585 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2586 break;
2587 case 0x3C:
2588 /* CVTQS */
f24518b5 2589 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2590 break;
2591 case 0x3E:
2592 /* CVTQT */
f24518b5 2593 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2594 break;
2595 default:
2596 goto invalid_opc;
2597 }
2598 break;
2599 case 0x17:
2600 switch (fn11) {
2601 case 0x010:
2602 /* CVTLQ */
a7812ae4 2603 gen_fcvtlq(rb, rc);
4c9649a9
JM
2604 break;
2605 case 0x020:
f18cd223 2606 if (likely(rc != 31)) {
a06d48d9 2607 if (ra == rb) {
4c9649a9 2608 /* FMOV */
a06d48d9
RH
2609 if (ra == 31)
2610 tcg_gen_movi_i64(cpu_fir[rc], 0);
2611 else
2612 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2613 } else {
f18cd223 2614 /* CPYS */
a7812ae4 2615 gen_fcpys(ra, rb, rc);
a06d48d9 2616 }
4c9649a9
JM
2617 }
2618 break;
2619 case 0x021:
2620 /* CPYSN */
a7812ae4 2621 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2622 break;
2623 case 0x022:
2624 /* CPYSE */
a7812ae4 2625 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2626 break;
2627 case 0x024:
2628 /* MT_FPCR */
f18cd223 2629 if (likely(ra != 31))
a7812ae4 2630 gen_helper_store_fpcr(cpu_fir[ra]);
f18cd223
AJ
2631 else {
2632 TCGv tmp = tcg_const_i64(0);
a7812ae4 2633 gen_helper_store_fpcr(tmp);
f18cd223
AJ
2634 tcg_temp_free(tmp);
2635 }
4c9649a9
JM
2636 break;
2637 case 0x025:
2638 /* MF_FPCR */
f18cd223 2639 if (likely(ra != 31))
a7812ae4 2640 gen_helper_load_fpcr(cpu_fir[ra]);
4c9649a9
JM
2641 break;
2642 case 0x02A:
2643 /* FCMOVEQ */
bbe1dab4 2644 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2645 break;
2646 case 0x02B:
2647 /* FCMOVNE */
bbe1dab4 2648 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2649 break;
2650 case 0x02C:
2651 /* FCMOVLT */
bbe1dab4 2652 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2653 break;
2654 case 0x02D:
2655 /* FCMOVGE */
bbe1dab4 2656 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2657 break;
2658 case 0x02E:
2659 /* FCMOVLE */
bbe1dab4 2660 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2661 break;
2662 case 0x02F:
2663 /* FCMOVGT */
bbe1dab4 2664 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2665 break;
2666 case 0x030:
2667 /* CVTQL */
a7812ae4 2668 gen_fcvtql(rb, rc);
4c9649a9
JM
2669 break;
2670 case 0x130:
2671 /* CVTQL/V */
4c9649a9
JM
2672 case 0x530:
2673 /* CVTQL/SV */
735cf45f
RH
2674 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2675 /v doesn't do. The only thing I can think is that /sv is a
2676 valid instruction merely for completeness in the ISA. */
2677 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2678 break;
2679 default:
2680 goto invalid_opc;
2681 }
2682 break;
2683 case 0x18:
2684 switch ((uint16_t)disp16) {
2685 case 0x0000:
2686 /* TRAPB */
4af70374 2687 /* No-op. */
4c9649a9
JM
2688 break;
2689 case 0x0400:
2690 /* EXCB */
4af70374 2691 /* No-op. */
4c9649a9
JM
2692 break;
2693 case 0x4000:
2694 /* MB */
2695 /* No-op */
2696 break;
2697 case 0x4400:
2698 /* WMB */
2699 /* No-op */
2700 break;
2701 case 0x8000:
2702 /* FETCH */
2703 /* No-op */
2704 break;
2705 case 0xA000:
2706 /* FETCH_M */
2707 /* No-op */
2708 break;
2709 case 0xC000:
2710 /* RPCC */
3761035f 2711 if (ra != 31)
a7812ae4 2712 gen_helper_load_pcc(cpu_ir[ra]);
4c9649a9
JM
2713 break;
2714 case 0xE000:
2715 /* RC */
ac316ca4 2716 gen_rx(ra, 0);
4c9649a9
JM
2717 break;
2718 case 0xE800:
2719 /* ECB */
4c9649a9
JM
2720 break;
2721 case 0xF000:
2722 /* RS */
ac316ca4 2723 gen_rx(ra, 1);
4c9649a9
JM
2724 break;
2725 case 0xF800:
2726 /* WH64 */
2727 /* No-op */
2728 break;
2729 default:
2730 goto invalid_opc;
2731 }
2732 break;
2733 case 0x19:
2734 /* HW_MFPR (PALcode) */
26b46094 2735#ifndef CONFIG_USER_ONLY
a18ad893 2736 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
26b46094
RH
2737 gen_mfpr(ra, insn & 0xffff);
2738 break;
2739 }
2740#endif
4c9649a9 2741 goto invalid_opc;
4c9649a9 2742 case 0x1A:
49563a72
RH
2743 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2744 prediction stack action, which of course we don't implement. */
2745 if (rb != 31) {
3761035f 2746 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
49563a72 2747 } else {
3761035f 2748 tcg_gen_movi_i64(cpu_pc, 0);
49563a72
RH
2749 }
2750 if (ra != 31) {
1304ca87 2751 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2752 }
4af70374 2753 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2754 break;
2755 case 0x1B:
2756 /* HW_LD (PALcode) */
a18ad893
RH
2757#ifndef CONFIG_USER_ONLY
2758 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2759 TCGv addr;
2760
2761 if (ra == 31) {
2762 break;
2763 }
2764
2765 addr = tcg_temp_new();
8bb6e981
AJ
2766 if (rb != 31)
2767 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2768 else
2769 tcg_gen_movi_i64(addr, disp12);
2770 switch ((insn >> 12) & 0xF) {
2771 case 0x0:
b5d51029 2772 /* Longword physical access (hw_ldl/p) */
2374e73e 2773 gen_helper_ldl_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2774 break;
2775 case 0x1:
b5d51029 2776 /* Quadword physical access (hw_ldq/p) */
2374e73e 2777 gen_helper_ldq_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2778 break;
2779 case 0x2:
b5d51029 2780 /* Longword physical access with lock (hw_ldl_l/p) */
2374e73e 2781 gen_helper_ldl_l_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2782 break;
2783 case 0x3:
b5d51029 2784 /* Quadword physical access with lock (hw_ldq_l/p) */
2374e73e 2785 gen_helper_ldq_l_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2786 break;
2787 case 0x4:
b5d51029 2788 /* Longword virtual PTE fetch (hw_ldl/v) */
2374e73e 2789 goto invalid_opc;
8bb6e981 2790 case 0x5:
b5d51029 2791 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374e73e 2792 goto invalid_opc;
8bb6e981
AJ
2793 break;
2794 case 0x6:
2795 /* Incpu_ir[ra]id */
b5d51029 2796 goto invalid_opc;
8bb6e981
AJ
2797 case 0x7:
2798 /* Incpu_ir[ra]id */
b5d51029 2799 goto invalid_opc;
8bb6e981 2800 case 0x8:
b5d51029 2801 /* Longword virtual access (hw_ldl) */
2374e73e 2802 goto invalid_opc;
8bb6e981 2803 case 0x9:
b5d51029 2804 /* Quadword virtual access (hw_ldq) */
2374e73e 2805 goto invalid_opc;
8bb6e981 2806 case 0xA:
b5d51029 2807 /* Longword virtual access with protection check (hw_ldl/w) */
8417845e 2808 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2809 break;
2810 case 0xB:
b5d51029 2811 /* Quadword virtual access with protection check (hw_ldq/w) */
8417845e 2812 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2813 break;
2814 case 0xC:
b5d51029 2815 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2374e73e 2816 goto invalid_opc;
8bb6e981 2817 case 0xD:
b5d51029 2818 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2374e73e 2819 goto invalid_opc;
8bb6e981
AJ
2820 case 0xE:
2821 /* Longword virtual access with alternate access mode and
2374e73e
RH
2822 protection checks (hw_ldl/wa) */
2823 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2824 break;
2825 case 0xF:
2826 /* Quadword virtual access with alternate access mode and
2374e73e
RH
2827 protection checks (hw_ldq/wa) */
2828 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2829 break;
2830 }
2831 tcg_temp_free(addr);
a18ad893 2832 break;
4c9649a9 2833 }
4c9649a9 2834#endif
a18ad893 2835 goto invalid_opc;
4c9649a9
JM
2836 case 0x1C:
2837 switch (fn7) {
2838 case 0x00:
2839 /* SEXTB */
a18ad893 2840 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
4c9649a9 2841 goto invalid_opc;
a18ad893 2842 }
ae8ecd42
AJ
2843 if (likely(rc != 31)) {
2844 if (islit)
2845 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 2846 else
dfaa8583 2847 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2848 }
4c9649a9
JM
2849 break;
2850 case 0x01:
2851 /* SEXTW */
a18ad893
RH
2852 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2853 if (likely(rc != 31)) {
2854 if (islit) {
2855 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2856 } else {
2857 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2858 }
2859 }
2860 break;
ae8ecd42 2861 }
a18ad893 2862 goto invalid_opc;
4c9649a9
JM
2863 case 0x30:
2864 /* CTPOP */
a18ad893
RH
2865 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2866 if (likely(rc != 31)) {
2867 if (islit) {
2868 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2869 } else {
2870 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2871 }
2872 }
2873 break;
ae8ecd42 2874 }
a18ad893 2875 goto invalid_opc;
4c9649a9
JM
2876 case 0x31:
2877 /* PERR */
a18ad893
RH
2878 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2879 gen_perr(ra, rb, rc, islit, lit);
2880 break;
2881 }
2882 goto invalid_opc;
4c9649a9
JM
2883 case 0x32:
2884 /* CTLZ */
a18ad893
RH
2885 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2886 if (likely(rc != 31)) {
2887 if (islit) {
2888 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2889 } else {
2890 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2891 }
2892 }
2893 break;
ae8ecd42 2894 }
a18ad893 2895 goto invalid_opc;
4c9649a9
JM
2896 case 0x33:
2897 /* CTTZ */
a18ad893
RH
2898 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2899 if (likely(rc != 31)) {
2900 if (islit) {
2901 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2902 } else {
2903 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2904 }
2905 }
2906 break;
ae8ecd42 2907 }
a18ad893 2908 goto invalid_opc;
4c9649a9
JM
2909 case 0x34:
2910 /* UNPKBW */
a18ad893
RH
2911 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2912 if (real_islit || ra != 31) {
2913 goto invalid_opc;
2914 }
2915 gen_unpkbw(rb, rc);
2916 break;
2917 }
2918 goto invalid_opc;
4c9649a9 2919 case 0x35:
13e4df99 2920 /* UNPKBL */
a18ad893
RH
2921 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2922 if (real_islit || ra != 31) {
2923 goto invalid_opc;
2924 }
2925 gen_unpkbl(rb, rc);
2926 break;
2927 }
2928 goto invalid_opc;
4c9649a9
JM
2929 case 0x36:
2930 /* PKWB */
a18ad893
RH
2931 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2932 if (real_islit || ra != 31) {
2933 goto invalid_opc;
2934 }
2935 gen_pkwb(rb, rc);
2936 break;
2937 }
2938 goto invalid_opc;
4c9649a9
JM
2939 case 0x37:
2940 /* PKLB */
a18ad893
RH
2941 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2942 if (real_islit || ra != 31) {
2943 goto invalid_opc;
2944 }
2945 gen_pklb(rb, rc);
2946 break;
2947 }
2948 goto invalid_opc;
4c9649a9
JM
2949 case 0x38:
2950 /* MINSB8 */
a18ad893
RH
2951 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2952 gen_minsb8(ra, rb, rc, islit, lit);
2953 break;
2954 }
2955 goto invalid_opc;
4c9649a9
JM
2956 case 0x39:
2957 /* MINSW4 */
a18ad893
RH
2958 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2959 gen_minsw4(ra, rb, rc, islit, lit);
2960 break;
2961 }
2962 goto invalid_opc;
4c9649a9
JM
2963 case 0x3A:
2964 /* MINUB8 */
a18ad893
RH
2965 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2966 gen_minub8(ra, rb, rc, islit, lit);
2967 break;
2968 }
2969 goto invalid_opc;
4c9649a9
JM
2970 case 0x3B:
2971 /* MINUW4 */
a18ad893
RH
2972 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2973 gen_minuw4(ra, rb, rc, islit, lit);
2974 break;
2975 }
2976 goto invalid_opc;
4c9649a9
JM
2977 case 0x3C:
2978 /* MAXUB8 */
a18ad893
RH
2979 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2980 gen_maxub8(ra, rb, rc, islit, lit);
2981 break;
2982 }
2983 goto invalid_opc;
4c9649a9
JM
2984 case 0x3D:
2985 /* MAXUW4 */
a18ad893
RH
2986 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2987 gen_maxuw4(ra, rb, rc, islit, lit);
2988 break;
2989 }
2990 goto invalid_opc;
4c9649a9
JM
2991 case 0x3E:
2992 /* MAXSB8 */
a18ad893
RH
2993 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2994 gen_maxsb8(ra, rb, rc, islit, lit);
2995 break;
2996 }
2997 goto invalid_opc;
4c9649a9
JM
2998 case 0x3F:
2999 /* MAXSW4 */
a18ad893
RH
3000 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3001 gen_maxsw4(ra, rb, rc, islit, lit);
3002 break;
3003 }
3004 goto invalid_opc;
4c9649a9
JM
3005 case 0x70:
3006 /* FTOIT */
a18ad893 3007 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3008 goto invalid_opc;
a18ad893 3009 }
f18cd223
AJ
3010 if (likely(rc != 31)) {
3011 if (ra != 31)
3012 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3013 else
3014 tcg_gen_movi_i64(cpu_ir[rc], 0);
3015 }
4c9649a9
JM
3016 break;
3017 case 0x78:
3018 /* FTOIS */
a18ad893 3019 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3020 goto invalid_opc;
a18ad893 3021 }
f18cd223 3022 if (rc != 31) {
a7812ae4 3023 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 3024 if (ra != 31)
a7812ae4 3025 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
3026 else {
3027 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 3028 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
3029 tcg_temp_free(tmp2);
3030 }
3031 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 3032 tcg_temp_free_i32(tmp1);
f18cd223 3033 }
4c9649a9
JM
3034 break;
3035 default:
3036 goto invalid_opc;
3037 }
3038 break;
3039 case 0x1D:
3040 /* HW_MTPR (PALcode) */
26b46094 3041#ifndef CONFIG_USER_ONLY
a18ad893
RH
3042 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3043 gen_mtpr(rb, insn & 0xffff);
26b46094
RH
3044 break;
3045 }
3046#endif
4c9649a9 3047 goto invalid_opc;
4c9649a9 3048 case 0x1E:
508b43ea 3049 /* HW_RET (PALcode) */
a18ad893
RH
3050#ifndef CONFIG_USER_ONLY
3051 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3052 if (rb == 31) {
3053 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3054 address from EXC_ADDR. This turns out to be useful for our
3055 emulation PALcode, so continue to accept it. */
3056 TCGv tmp = tcg_temp_new();
3057 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUState, exc_addr));
3058 gen_helper_hw_ret(tmp);
3059 tcg_temp_free(tmp);
3060 } else {
3061 gen_helper_hw_ret(cpu_ir[rb]);
3062 }
3063 ret = EXIT_PC_UPDATED;
3064 break;
4c9649a9 3065 }
4c9649a9 3066#endif
a18ad893 3067 goto invalid_opc;
4c9649a9
JM
3068 case 0x1F:
3069 /* HW_ST (PALcode) */
a18ad893
RH
3070#ifndef CONFIG_USER_ONLY
3071 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
8bb6e981 3072 TCGv addr, val;
a7812ae4 3073 addr = tcg_temp_new();
8bb6e981
AJ
3074 if (rb != 31)
3075 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3076 else
3077 tcg_gen_movi_i64(addr, disp12);
3078 if (ra != 31)
3079 val = cpu_ir[ra];
3080 else {
a7812ae4 3081 val = tcg_temp_new();
8bb6e981
AJ
3082 tcg_gen_movi_i64(val, 0);
3083 }
3084 switch ((insn >> 12) & 0xF) {
3085 case 0x0:
3086 /* Longword physical access */
2374e73e 3087 gen_helper_stl_phys(addr, val);
8bb6e981
AJ
3088 break;
3089 case 0x1:
3090 /* Quadword physical access */
2374e73e 3091 gen_helper_stq_phys(addr, val);
8bb6e981
AJ
3092 break;
3093 case 0x2:
3094 /* Longword physical access with lock */
2374e73e 3095 gen_helper_stl_c_phys(val, addr, val);
8bb6e981
AJ
3096 break;
3097 case 0x3:
3098 /* Quadword physical access with lock */
2374e73e 3099 gen_helper_stq_c_phys(val, addr, val);
8bb6e981
AJ
3100 break;
3101 case 0x4:
3102 /* Longword virtual access */
2374e73e 3103 goto invalid_opc;
8bb6e981
AJ
3104 case 0x5:
3105 /* Quadword virtual access */
2374e73e 3106 goto invalid_opc;
8bb6e981
AJ
3107 case 0x6:
3108 /* Invalid */
3109 goto invalid_opc;
3110 case 0x7:
3111 /* Invalid */
3112 goto invalid_opc;
3113 case 0x8:
3114 /* Invalid */
3115 goto invalid_opc;
3116 case 0x9:
3117 /* Invalid */
3118 goto invalid_opc;
3119 case 0xA:
3120 /* Invalid */
3121 goto invalid_opc;
3122 case 0xB:
3123 /* Invalid */
3124 goto invalid_opc;
3125 case 0xC:
3126 /* Longword virtual access with alternate access mode */
2374e73e 3127 goto invalid_opc;
8bb6e981
AJ
3128 case 0xD:
3129 /* Quadword virtual access with alternate access mode */
2374e73e 3130 goto invalid_opc;
8bb6e981
AJ
3131 case 0xE:
3132 /* Invalid */
3133 goto invalid_opc;
3134 case 0xF:
3135 /* Invalid */
3136 goto invalid_opc;
3137 }
45d46ce8 3138 if (ra == 31)
8bb6e981
AJ
3139 tcg_temp_free(val);
3140 tcg_temp_free(addr);
a18ad893 3141 break;
4c9649a9 3142 }
4c9649a9 3143#endif
a18ad893 3144 goto invalid_opc;
4c9649a9
JM
3145 case 0x20:
3146 /* LDF */
f18cd223 3147 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3148 break;
3149 case 0x21:
3150 /* LDG */
f18cd223 3151 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3152 break;
3153 case 0x22:
3154 /* LDS */
f18cd223 3155 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
3156 break;
3157 case 0x23:
3158 /* LDT */
f18cd223 3159 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3160 break;
3161 case 0x24:
3162 /* STF */
6910b8f6 3163 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3164 break;
3165 case 0x25:
3166 /* STG */
6910b8f6 3167 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3168 break;
3169 case 0x26:
3170 /* STS */
6910b8f6 3171 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
3172 break;
3173 case 0x27:
3174 /* STT */
6910b8f6 3175 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3176 break;
3177 case 0x28:
3178 /* LDL */
f18cd223 3179 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
3180 break;
3181 case 0x29:
3182 /* LDQ */
f18cd223 3183 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3184 break;
3185 case 0x2A:
3186 /* LDL_L */
f4ed8679 3187 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3188 break;
3189 case 0x2B:
3190 /* LDQ_L */
f4ed8679 3191 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3192 break;
3193 case 0x2C:
3194 /* STL */
6910b8f6 3195 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
3196 break;
3197 case 0x2D:
3198 /* STQ */
6910b8f6 3199 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3200 break;
3201 case 0x2E:
3202 /* STL_C */
6910b8f6 3203 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
3204 break;
3205 case 0x2F:
3206 /* STQ_C */
6910b8f6 3207 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
3208 break;
3209 case 0x30:
3210 /* BR */
4af70374 3211 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3212 break;
a7812ae4 3213 case 0x31: /* FBEQ */
4af70374 3214 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 3215 break;
a7812ae4 3216 case 0x32: /* FBLT */
4af70374 3217 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 3218 break;
a7812ae4 3219 case 0x33: /* FBLE */
4af70374 3220 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
3221 break;
3222 case 0x34:
3223 /* BSR */
4af70374 3224 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3225 break;
a7812ae4 3226 case 0x35: /* FBNE */
4af70374 3227 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 3228 break;
a7812ae4 3229 case 0x36: /* FBGE */
4af70374 3230 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 3231 break;
a7812ae4 3232 case 0x37: /* FBGT */
4af70374 3233 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
3234 break;
3235 case 0x38:
3236 /* BLBC */
4af70374 3237 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
3238 break;
3239 case 0x39:
3240 /* BEQ */
4af70374 3241 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
3242 break;
3243 case 0x3A:
3244 /* BLT */
4af70374 3245 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
3246 break;
3247 case 0x3B:
3248 /* BLE */
4af70374 3249 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
3250 break;
3251 case 0x3C:
3252 /* BLBS */
4af70374 3253 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
3254 break;
3255 case 0x3D:
3256 /* BNE */
4af70374 3257 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
3258 break;
3259 case 0x3E:
3260 /* BGE */
4af70374 3261 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
3262 break;
3263 case 0x3F:
3264 /* BGT */
4af70374 3265 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3266 break;
3267 invalid_opc:
8aa3fa20 3268 ret = gen_invalid(ctx);
4c9649a9
JM
3269 break;
3270 }
3271
3272 return ret;
3273}
3274
636aa200
BS
3275static inline void gen_intermediate_code_internal(CPUState *env,
3276 TranslationBlock *tb,
3277 int search_pc)
4c9649a9 3278{
4c9649a9
JM
3279 DisasContext ctx, *ctxp = &ctx;
3280 target_ulong pc_start;
3281 uint32_t insn;
3282 uint16_t *gen_opc_end;
a1d1bb31 3283 CPUBreakpoint *bp;
4c9649a9 3284 int j, lj = -1;
4af70374 3285 ExitStatus ret;
2e70f6ef
PB
3286 int num_insns;
3287 int max_insns;
4c9649a9
JM
3288
3289 pc_start = tb->pc;
4c9649a9 3290 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
3291
3292 ctx.tb = tb;
3293 ctx.env = env;
4c9649a9 3294 ctx.pc = pc_start;
bba9bdce 3295 ctx.mem_idx = cpu_mmu_index(env);
f24518b5
RH
3296
3297 /* ??? Every TB begins with unset rounding mode, to be initialized on
3298 the first fp insn of the TB. Alternately we could define a proper
3299 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3300 to reset the FP_STATUS to that default at the end of any TB that
3301 changes the default. We could even (gasp) dynamiclly figure out
3302 what default would be most efficient given the running program. */
3303 ctx.tb_rm = -1;
3304 /* Similarly for flush-to-zero. */
3305 ctx.tb_ftz = -1;
3306
2e70f6ef
PB
3307 num_insns = 0;
3308 max_insns = tb->cflags & CF_COUNT_MASK;
3309 if (max_insns == 0)
3310 max_insns = CF_COUNT_MASK;
3311
3312 gen_icount_start();
4af70374 3313 do {
72cf2d4f
BS
3314 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3315 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 3316 if (bp->pc == ctx.pc) {
4c9649a9
JM
3317 gen_excp(&ctx, EXCP_DEBUG, 0);
3318 break;
3319 }
3320 }
3321 }
3322 if (search_pc) {
3323 j = gen_opc_ptr - gen_opc_buf;
3324 if (lj < j) {
3325 lj++;
3326 while (lj < j)
3327 gen_opc_instr_start[lj++] = 0;
4c9649a9 3328 }
ed1dda53
AJ
3329 gen_opc_pc[lj] = ctx.pc;
3330 gen_opc_instr_start[lj] = 1;
3331 gen_opc_icount[lj] = num_insns;
4c9649a9 3332 }
2e70f6ef
PB
3333 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3334 gen_io_start();
4c9649a9 3335 insn = ldl_code(ctx.pc);
2e70f6ef 3336 num_insns++;
c4b3be39
RH
3337
3338 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3339 tcg_gen_debug_insn_start(ctx.pc);
3340 }
3341
4c9649a9
JM
3342 ctx.pc += 4;
3343 ret = translate_one(ctxp, insn);
19bf517b 3344
bf1b03fe
RH
3345 /* If we reach a page boundary, are single stepping,
3346 or exhaust instruction count, stop generation. */
3347 if (ret == NO_EXIT
3348 && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3349 || gen_opc_ptr >= gen_opc_end
3350 || num_insns >= max_insns
3351 || singlestep
3352 || env->singlestep_enabled)) {
3353 ret = EXIT_PC_STALE;
1b530a6d 3354 }
4af70374
RH
3355 } while (ret == NO_EXIT);
3356
3357 if (tb->cflags & CF_LAST_IO) {
3358 gen_io_end();
4c9649a9 3359 }
4af70374
RH
3360
3361 switch (ret) {
3362 case EXIT_GOTO_TB:
8aa3fa20 3363 case EXIT_NORETURN:
4af70374
RH
3364 break;
3365 case EXIT_PC_STALE:
496cb5b9 3366 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3367 /* FALLTHRU */
3368 case EXIT_PC_UPDATED:
bf1b03fe
RH
3369 if (env->singlestep_enabled) {
3370 gen_excp_1(EXCP_DEBUG, 0);
3371 } else {
3372 tcg_gen_exit_tb(0);
3373 }
4af70374
RH
3374 break;
3375 default:
3376 abort();
4c9649a9 3377 }
4af70374 3378
2e70f6ef 3379 gen_icount_end(tb, num_insns);
4c9649a9
JM
3380 *gen_opc_ptr = INDEX_op_end;
3381 if (search_pc) {
3382 j = gen_opc_ptr - gen_opc_buf;
3383 lj++;
3384 while (lj <= j)
3385 gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3386 } else {
3387 tb->size = ctx.pc - pc_start;
2e70f6ef 3388 tb->icount = num_insns;
4c9649a9 3389 }
4af70374 3390
806991da 3391#ifdef DEBUG_DISAS
8fec2b8c 3392 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
3393 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3394 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3395 qemu_log("\n");
4c9649a9 3396 }
4c9649a9 3397#endif
4c9649a9
JM
3398}
3399
2cfc5f17 3400void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3401{
2cfc5f17 3402 gen_intermediate_code_internal(env, tb, 0);
4c9649a9
JM
3403}
3404
2cfc5f17 3405void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3406{
2cfc5f17 3407 gen_intermediate_code_internal(env, tb, 1);
4c9649a9
JM
3408}
3409
a964acc6
RH
3410struct cpu_def_t {
3411 const char *name;
3412 int implver, amask;
3413};
3414
3415static const struct cpu_def_t cpu_defs[] = {
3416 { "ev4", IMPLVER_2106x, 0 },
3417 { "ev5", IMPLVER_21164, 0 },
3418 { "ev56", IMPLVER_21164, AMASK_BWX },
3419 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3420 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3421 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3422 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3423 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3424 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3425 { "21064", IMPLVER_2106x, 0 },
3426 { "21164", IMPLVER_21164, 0 },
3427 { "21164a", IMPLVER_21164, AMASK_BWX },
3428 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3429 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3430 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3431 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3432};
3433
aaed909a 3434CPUAlphaState * cpu_alpha_init (const char *cpu_model)
4c9649a9
JM
3435{
3436 CPUAlphaState *env;
a964acc6 3437 int implver, amask, i, max;
4c9649a9
JM
3438
3439 env = qemu_mallocz(sizeof(CPUAlphaState));
4c9649a9 3440 cpu_exec_init(env);
2e70f6ef 3441 alpha_translate_init();
4c9649a9 3442 tlb_flush(env, 1);
a964acc6
RH
3443
3444 /* Default to ev67; no reason not to emulate insns by default. */
3445 implver = IMPLVER_21264;
3446 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3447 | AMASK_TRAP | AMASK_PREFETCH);
3448
3449 max = ARRAY_SIZE(cpu_defs);
3450 for (i = 0; i < max; i++) {
3451 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3452 implver = cpu_defs[i].implver;
3453 amask = cpu_defs[i].amask;
3454 break;
3455 }
3456 }
3457 env->implver = implver;
3458 env->amask = amask;
3459
4c9649a9 3460#if defined (CONFIG_USER_ONLY)
ea879fc7 3461 env->ps = PS_USER_MODE;
2edd07ef
RH
3462 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3463 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
6049f4f8 3464#endif
6910b8f6 3465 env->lock_addr = -1;
26b46094 3466 env->fen = 1;
dad081ee 3467
0bf46a40 3468 qemu_init_vcpu(env);
4c9649a9
JM
3469 return env;
3470}
aaed909a 3471
e87b7cb0 3472void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
3473{
3474 env->pc = gen_opc_pc[pc_pos];
3475}