]> git.ipfire.org Git - thirdparty/qemu.git/blame - target/microblaze/translate.c
target-microblaze: dec_load: Use bool instead of unsigned int
[thirdparty/qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
4acb54ba 25#include "tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
4acb54ba 27#include "microblaze-decode.h"
f08b6170 28#include "exec/cpu_ldst.h"
2ef6175a 29#include "exec/helper-gen.h"
77fc6f5e 30#include "exec/translator.h"
4acb54ba 31
a7e30d84 32#include "trace-tcg.h"
508127e2 33#include "exec/log.h"
a7e30d84
LV
34
35
4acb54ba
EI
36#define SIM_COMPAT 0
37#define DISAS_GNU 1
38#define DISAS_MB 1
39#if DISAS_MB && !SIM_COMPAT
40# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41#else
42# define LOG_DIS(...) do { } while (0)
43#endif
44
45#define D(x)
46
47#define EXTRACT_FIELD(src, start, end) \
48 (((src) >> start) & ((1 << (end - start + 1)) - 1))
49
77fc6f5e
LV
50/* is_jmp field values */
51#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
52#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
53#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54
4acb54ba 55static TCGv env_debug;
4acb54ba
EI
56static TCGv cpu_R[32];
57static TCGv cpu_SR[18];
58static TCGv env_imm;
59static TCGv env_btaken;
60static TCGv env_btarget;
61static TCGv env_iflags;
4a536270 62static TCGv env_res_addr;
11a76217 63static TCGv env_res_val;
4acb54ba 64
022c62cb 65#include "exec/gen-icount.h"
4acb54ba
EI
66
67/* This is the state at translation time. */
68typedef struct DisasContext {
0063ebd6 69 MicroBlazeCPU *cpu;
a5efa644 70 target_ulong pc;
4acb54ba
EI
71
72 /* Decoder. */
73 int type_b;
74 uint32_t ir;
75 uint8_t opcode;
76 uint8_t rd, ra, rb;
77 uint16_t imm;
78
79 unsigned int cpustate_changed;
80 unsigned int delayed_branch;
81 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
82 unsigned int clear_imm;
83 int is_jmp;
84
844bab60
EI
85#define JMP_NOJMP 0
86#define JMP_DIRECT 1
87#define JMP_DIRECT_CC 2
88#define JMP_INDIRECT 3
4acb54ba
EI
89 unsigned int jmp;
90 uint32_t jmp_pc;
91
92 int abort_at_next_insn;
93 int nr_nops;
94 struct TranslationBlock *tb;
95 int singlestep_enabled;
96} DisasContext;
97
38972938 98static const char *regnames[] =
4acb54ba
EI
99{
100 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
104};
105
38972938 106static const char *special_regnames[] =
4acb54ba
EI
107{
108 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
109 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
110 "sr16", "sr17", "sr18"
111};
112
4acb54ba
EI
113static inline void t_sync_flags(DisasContext *dc)
114{
4abf79a4 115 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba
EI
116 if (dc->tb_flags != dc->synced_flags) {
117 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
118 dc->synced_flags = dc->tb_flags;
119 }
120}
121
122static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
123{
124 TCGv_i32 tmp = tcg_const_i32(index);
125
126 t_sync_flags(dc);
127 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 128 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba
EI
129 tcg_temp_free_i32(tmp);
130 dc->is_jmp = DISAS_UPDATE;
131}
132
90aa39a1
SF
133static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
134{
135#ifndef CONFIG_USER_ONLY
136 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
137#else
138 return true;
139#endif
140}
141
4acb54ba
EI
142static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
143{
90aa39a1 144 if (use_goto_tb(dc, dest)) {
4acb54ba
EI
145 tcg_gen_goto_tb(n);
146 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
90aa39a1 147 tcg_gen_exit_tb((uintptr_t)dc->tb + n);
4acb54ba
EI
148 } else {
149 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
150 tcg_gen_exit_tb(0);
151 }
152}
153
ee8b246f
EI
154static void read_carry(DisasContext *dc, TCGv d)
155{
156 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
157}
158
04ec7df7
EI
159/*
160 * write_carry sets the carry bits in MSR based on bit 0 of v.
161 * v[31:1] are ignored.
162 */
ee8b246f
EI
163static void write_carry(DisasContext *dc, TCGv v)
164{
165 TCGv t0 = tcg_temp_new();
166 tcg_gen_shli_tl(t0, v, 31);
167 tcg_gen_sari_tl(t0, t0, 31);
168 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
169 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
170 ~(MSR_C | MSR_CC));
171 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
172 tcg_temp_free(t0);
173}
174
65ab5eb4 175static void write_carryi(DisasContext *dc, bool carry)
8cc9b43f
PC
176{
177 TCGv t0 = tcg_temp_new();
65ab5eb4 178 tcg_gen_movi_tl(t0, carry);
8cc9b43f
PC
179 write_carry(dc, t0);
180 tcg_temp_free(t0);
181}
182
61204ce8
EI
183/* True if ALU operand b is a small immediate that may deserve
184 faster treatment. */
185static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
186{
187 /* Immediate insn without the imm prefix ? */
188 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
189}
190
4acb54ba
EI
191static inline TCGv *dec_alu_op_b(DisasContext *dc)
192{
193 if (dc->type_b) {
194 if (dc->tb_flags & IMM_FLAG)
195 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
196 else
197 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
198 return &env_imm;
199 } else
200 return &cpu_R[dc->rb];
201}
202
203static void dec_add(DisasContext *dc)
204{
205 unsigned int k, c;
40cbf5b7 206 TCGv cf;
4acb54ba
EI
207
208 k = dc->opcode & 4;
209 c = dc->opcode & 2;
210
211 LOG_DIS("add%s%s%s r%d r%d r%d\n",
212 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
213 dc->rd, dc->ra, dc->rb);
214
40cbf5b7
EI
215 /* Take care of the easy cases first. */
216 if (k) {
217 /* k - keep carry, no need to update MSR. */
218 /* If rd == r0, it's a nop. */
219 if (dc->rd) {
220 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
221
222 if (c) {
223 /* c - Add carry into the result. */
224 cf = tcg_temp_new();
225
226 read_carry(dc, cf);
227 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
228 tcg_temp_free(cf);
229 }
230 }
231 return;
232 }
233
234 /* From now on, we can assume k is zero. So we need to update MSR. */
235 /* Extract carry. */
236 cf = tcg_temp_new();
237 if (c) {
238 read_carry(dc, cf);
239 } else {
240 tcg_gen_movi_tl(cf, 0);
241 }
242
243 if (dc->rd) {
244 TCGv ncf = tcg_temp_new();
5d0bb823 245 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
4acb54ba 246 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
247 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
248 write_carry(dc, ncf);
249 tcg_temp_free(ncf);
250 } else {
5d0bb823 251 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 252 write_carry(dc, cf);
4acb54ba 253 }
40cbf5b7 254 tcg_temp_free(cf);
4acb54ba
EI
255}
256
257static void dec_sub(DisasContext *dc)
258{
259 unsigned int u, cmp, k, c;
e0a42ebc 260 TCGv cf, na;
4acb54ba
EI
261
262 u = dc->imm & 2;
263 k = dc->opcode & 4;
264 c = dc->opcode & 2;
265 cmp = (dc->imm & 1) && (!dc->type_b) && k;
266
267 if (cmp) {
268 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
269 if (dc->rd) {
270 if (u)
271 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
272 else
273 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
274 }
e0a42ebc
EI
275 return;
276 }
277
278 LOG_DIS("sub%s%s r%d, r%d r%d\n",
279 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
280
281 /* Take care of the easy cases first. */
282 if (k) {
283 /* k - keep carry, no need to update MSR. */
284 /* If rd == r0, it's a nop. */
285 if (dc->rd) {
4acb54ba 286 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
287
288 if (c) {
289 /* c - Add carry into the result. */
290 cf = tcg_temp_new();
291
292 read_carry(dc, cf);
293 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
294 tcg_temp_free(cf);
295 }
296 }
297 return;
298 }
299
300 /* From now on, we can assume k is zero. So we need to update MSR. */
301 /* Extract carry. And complement a into na. */
302 cf = tcg_temp_new();
303 na = tcg_temp_new();
304 if (c) {
305 read_carry(dc, cf);
306 } else {
307 tcg_gen_movi_tl(cf, 1);
308 }
309
310 /* d = b + ~a + c. carry defaults to 1. */
311 tcg_gen_not_tl(na, cpu_R[dc->ra]);
312
313 if (dc->rd) {
314 TCGv ncf = tcg_temp_new();
5d0bb823 315 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc
EI
316 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
317 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
318 write_carry(dc, ncf);
319 tcg_temp_free(ncf);
320 } else {
5d0bb823 321 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 322 write_carry(dc, cf);
4acb54ba 323 }
e0a42ebc
EI
324 tcg_temp_free(cf);
325 tcg_temp_free(na);
4acb54ba
EI
326}
327
328static void dec_pattern(DisasContext *dc)
329{
330 unsigned int mode;
4acb54ba 331
1567a005 332 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 333 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
8fc5239e 334 && !dc->cpu->cfg.use_pcmp_instr) {
1567a005
EI
335 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
336 t_gen_raise_exception(dc, EXCP_HW_EXCP);
337 }
338
4acb54ba
EI
339 mode = dc->opcode & 3;
340 switch (mode) {
341 case 0:
342 /* pcmpbf. */
343 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
344 if (dc->rd)
345 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
346 break;
347 case 2:
348 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
349 if (dc->rd) {
86112805
RH
350 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
351 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
352 }
353 break;
354 case 3:
355 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
4acb54ba 356 if (dc->rd) {
86112805
RH
357 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
358 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
359 }
360 break;
361 default:
0063ebd6 362 cpu_abort(CPU(dc->cpu),
4acb54ba
EI
363 "unsupported pattern insn opcode=%x\n", dc->opcode);
364 break;
365 }
366}
367
368static void dec_and(DisasContext *dc)
369{
370 unsigned int not;
371
372 if (!dc->type_b && (dc->imm & (1 << 10))) {
373 dec_pattern(dc);
374 return;
375 }
376
377 not = dc->opcode & (1 << 1);
378 LOG_DIS("and%s\n", not ? "n" : "");
379
380 if (!dc->rd)
381 return;
382
383 if (not) {
a235900e 384 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
385 } else
386 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
387}
388
389static void dec_or(DisasContext *dc)
390{
391 if (!dc->type_b && (dc->imm & (1 << 10))) {
392 dec_pattern(dc);
393 return;
394 }
395
396 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
397 if (dc->rd)
398 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
399}
400
401static void dec_xor(DisasContext *dc)
402{
403 if (!dc->type_b && (dc->imm & (1 << 10))) {
404 dec_pattern(dc);
405 return;
406 }
407
408 LOG_DIS("xor r%d\n", dc->rd);
409 if (dc->rd)
410 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
411}
412
4acb54ba
EI
413static inline void msr_read(DisasContext *dc, TCGv d)
414{
415 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
416}
417
418static inline void msr_write(DisasContext *dc, TCGv v)
419{
97b833c5
EI
420 TCGv t;
421
422 t = tcg_temp_new();
4acb54ba 423 dc->cpustate_changed = 1;
97b833c5 424 /* PVR bit is not writable. */
8a84fc6b
EI
425 tcg_gen_andi_tl(t, v, ~MSR_PVR);
426 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
59b1a90b 427 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
97b833c5 428 tcg_temp_free(t);
4acb54ba
EI
429}
430
431static void dec_msr(DisasContext *dc)
432{
0063ebd6 433 CPUState *cs = CPU(dc->cpu);
4acb54ba
EI
434 TCGv t0, t1;
435 unsigned int sr, to, rn;
97ed5ccd 436 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
437
438 sr = dc->imm & ((1 << 14) - 1);
439 to = dc->imm & (1 << 14);
440 dc->type_b = 1;
441 if (to)
442 dc->cpustate_changed = 1;
443
444 /* msrclr and msrset. */
445 if (!(dc->imm & (1 << 15))) {
446 unsigned int clr = dc->ir & (1 << 16);
447
448 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
449 dc->rd, dc->imm);
1567a005 450
56837509 451 if (!dc->cpu->cfg.use_msr_instr) {
1567a005
EI
452 /* nop??? */
453 return;
454 }
455
456 if ((dc->tb_flags & MSR_EE_FLAG)
457 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
458 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
459 t_gen_raise_exception(dc, EXCP_HW_EXCP);
460 return;
461 }
462
4acb54ba
EI
463 if (dc->rd)
464 msr_read(dc, cpu_R[dc->rd]);
465
466 t0 = tcg_temp_new();
467 t1 = tcg_temp_new();
468 msr_read(dc, t0);
469 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
470
471 if (clr) {
472 tcg_gen_not_tl(t1, t1);
473 tcg_gen_and_tl(t0, t0, t1);
474 } else
475 tcg_gen_or_tl(t0, t0, t1);
476 msr_write(dc, t0);
477 tcg_temp_free(t0);
478 tcg_temp_free(t1);
479 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
480 dc->is_jmp = DISAS_UPDATE;
481 return;
482 }
483
1567a005
EI
484 if (to) {
485 if ((dc->tb_flags & MSR_EE_FLAG)
486 && mem_index == MMU_USER_IDX) {
487 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
488 t_gen_raise_exception(dc, EXCP_HW_EXCP);
489 return;
490 }
491 }
492
4acb54ba
EI
493#if !defined(CONFIG_USER_ONLY)
494 /* Catch read/writes to the mmu block. */
495 if ((sr & ~0xff) == 0x1000) {
496 sr &= 7;
497 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
498 if (to)
64254eba 499 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
4acb54ba 500 else
64254eba 501 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
4acb54ba
EI
502 return;
503 }
504#endif
505
506 if (to) {
507 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
508 switch (sr) {
509 case 0:
510 break;
511 case 1:
512 msr_write(dc, cpu_R[dc->ra]);
513 break;
514 case 0x3:
515 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
516 break;
517 case 0x5:
518 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
519 break;
520 case 0x7:
97694c57 521 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
4acb54ba 522 break;
5818dee5 523 case 0x800:
68cee38a 524 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
525 break;
526 case 0x802:
68cee38a 527 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
5818dee5 528 break;
4acb54ba 529 default:
0063ebd6 530 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
531 break;
532 }
533 } else {
534 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
535
536 switch (sr) {
537 case 0:
538 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
539 break;
540 case 1:
541 msr_read(dc, cpu_R[dc->rd]);
542 break;
543 case 0x3:
544 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
545 break;
546 case 0x5:
547 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
548 break;
549 case 0x7:
97694c57 550 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
4acb54ba
EI
551 break;
552 case 0xb:
553 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
554 break;
5818dee5 555 case 0x800:
68cee38a 556 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
557 break;
558 case 0x802:
68cee38a 559 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
5818dee5 560 break;
4acb54ba
EI
561 case 0x2000:
562 case 0x2001:
563 case 0x2002:
564 case 0x2003:
565 case 0x2004:
566 case 0x2005:
567 case 0x2006:
568 case 0x2007:
569 case 0x2008:
570 case 0x2009:
571 case 0x200a:
572 case 0x200b:
573 case 0x200c:
574 rn = sr & 0xf;
575 tcg_gen_ld_tl(cpu_R[dc->rd],
68cee38a 576 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
577 break;
578 default:
a47dddd7 579 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
580 break;
581 }
582 }
ee7dbcf8
EI
583
584 if (dc->rd == 0) {
585 tcg_gen_movi_tl(cpu_R[0], 0);
586 }
4acb54ba
EI
587}
588
4acb54ba
EI
589/* Multiplier unit. */
590static void dec_mul(DisasContext *dc)
591{
16ece88d 592 TCGv tmp;
4acb54ba
EI
593 unsigned int subcode;
594
1567a005 595 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 596 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
9b964318 597 && !dc->cpu->cfg.use_hw_mul) {
1567a005
EI
598 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
599 t_gen_raise_exception(dc, EXCP_HW_EXCP);
600 return;
601 }
602
4acb54ba 603 subcode = dc->imm & 3;
4acb54ba
EI
604
605 if (dc->type_b) {
606 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
16ece88d
RH
607 tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
608 return;
4acb54ba
EI
609 }
610
1567a005 611 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
9b964318 612 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
1567a005
EI
613 /* nop??? */
614 }
615
16ece88d 616 tmp = tcg_temp_new();
4acb54ba
EI
617 switch (subcode) {
618 case 0:
619 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 620 tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
621 break;
622 case 1:
623 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 624 tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
625 break;
626 case 2:
627 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 628 tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
629 break;
630 case 3:
631 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 632 tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
633 break;
634 default:
0063ebd6 635 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
4acb54ba
EI
636 break;
637 }
16ece88d 638 tcg_temp_free(tmp);
4acb54ba
EI
639}
640
641/* Div unit. */
642static void dec_div(DisasContext *dc)
643{
644 unsigned int u;
645
646 u = dc->imm & 2;
647 LOG_DIS("div\n");
648
0063ebd6 649 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
47709e4c 650 && !dc->cpu->cfg.use_div) {
1567a005
EI
651 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
652 t_gen_raise_exception(dc, EXCP_HW_EXCP);
653 }
654
4acb54ba 655 if (u)
64254eba
BS
656 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
657 cpu_R[dc->ra]);
4acb54ba 658 else
64254eba
BS
659 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
660 cpu_R[dc->ra]);
4acb54ba
EI
661 if (!dc->rd)
662 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
663}
664
665static void dec_barrel(DisasContext *dc)
666{
667 TCGv t0;
faa48d74 668 unsigned int imm_w, imm_s;
d09b2585 669 bool s, t, e = false, i = false;
4acb54ba 670
1567a005 671 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 672 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
7faa66aa 673 && !dc->cpu->cfg.use_barrel) {
1567a005
EI
674 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
675 t_gen_raise_exception(dc, EXCP_HW_EXCP);
676 return;
677 }
678
faa48d74
EI
679 if (dc->type_b) {
680 /* Insert and extract are only available in immediate mode. */
d09b2585 681 i = extract32(dc->imm, 15, 1);
faa48d74
EI
682 e = extract32(dc->imm, 14, 1);
683 }
e3e84983
EI
684 s = extract32(dc->imm, 10, 1);
685 t = extract32(dc->imm, 9, 1);
faa48d74
EI
686 imm_w = extract32(dc->imm, 6, 5);
687 imm_s = extract32(dc->imm, 0, 5);
4acb54ba 688
faa48d74
EI
689 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
690 e ? "e" : "",
4acb54ba
EI
691 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
692
faa48d74
EI
693 if (e) {
694 if (imm_w + imm_s > 32 || imm_w == 0) {
695 /* These inputs have an undefined behavior. */
696 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
697 imm_w, imm_s);
698 } else {
699 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
700 }
d09b2585
EI
701 } else if (i) {
702 int width = imm_w - imm_s + 1;
703
704 if (imm_w < imm_s) {
705 /* These inputs have an undefined behavior. */
706 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
707 imm_w, imm_s);
708 } else {
709 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
710 imm_s, width);
711 }
faa48d74
EI
712 } else {
713 t0 = tcg_temp_new();
4acb54ba 714
faa48d74
EI
715 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
716 tcg_gen_andi_tl(t0, t0, 31);
4acb54ba 717
faa48d74
EI
718 if (s) {
719 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
2acf6d53 720 } else {
faa48d74
EI
721 if (t) {
722 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
723 } else {
724 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
725 }
2acf6d53 726 }
faa48d74 727 tcg_temp_free(t0);
4acb54ba
EI
728 }
729}
730
731static void dec_bit(DisasContext *dc)
732{
0063ebd6 733 CPUState *cs = CPU(dc->cpu);
09b9f113 734 TCGv t0;
4acb54ba 735 unsigned int op;
97ed5ccd 736 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba 737
ace2e4da 738 op = dc->ir & ((1 << 9) - 1);
4acb54ba
EI
739 switch (op) {
740 case 0x21:
741 /* src. */
742 t0 = tcg_temp_new();
743
744 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
09b9f113
EI
745 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
746 write_carry(dc, cpu_R[dc->ra]);
4acb54ba 747 if (dc->rd) {
4acb54ba 748 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
09b9f113 749 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
4acb54ba 750 }
4acb54ba
EI
751 tcg_temp_free(t0);
752 break;
753
754 case 0x1:
755 case 0x41:
756 /* srl. */
4acb54ba
EI
757 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
758
bb3cb951
EI
759 /* Update carry. Note that write carry only looks at the LSB. */
760 write_carry(dc, cpu_R[dc->ra]);
4acb54ba
EI
761 if (dc->rd) {
762 if (op == 0x41)
763 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
764 else
765 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
766 }
767 break;
768 case 0x60:
769 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
770 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
771 break;
772 case 0x61:
773 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
774 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
775 break;
776 case 0x64:
f062a3c7
EI
777 case 0x66:
778 case 0x74:
779 case 0x76:
4acb54ba
EI
780 /* wdc. */
781 LOG_DIS("wdc r%d\n", dc->ra);
1567a005
EI
782 if ((dc->tb_flags & MSR_EE_FLAG)
783 && mem_index == MMU_USER_IDX) {
784 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
785 t_gen_raise_exception(dc, EXCP_HW_EXCP);
786 return;
787 }
4acb54ba
EI
788 break;
789 case 0x68:
790 /* wic. */
791 LOG_DIS("wic r%d\n", dc->ra);
1567a005
EI
792 if ((dc->tb_flags & MSR_EE_FLAG)
793 && mem_index == MMU_USER_IDX) {
794 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
795 t_gen_raise_exception(dc, EXCP_HW_EXCP);
796 return;
797 }
4acb54ba 798 break;
48b5e96f
EI
799 case 0xe0:
800 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 801 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
8fc5239e 802 && !dc->cpu->cfg.use_pcmp_instr) {
48b5e96f
EI
803 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
804 t_gen_raise_exception(dc, EXCP_HW_EXCP);
805 }
8fc5239e 806 if (dc->cpu->cfg.use_pcmp_instr) {
5318420c 807 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
48b5e96f
EI
808 }
809 break;
ace2e4da
PC
810 case 0x1e0:
811 /* swapb */
812 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
813 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
814 break;
b8c6a5d9 815 case 0x1e2:
ace2e4da
PC
816 /*swaph */
817 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
818 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
819 break;
4acb54ba 820 default:
a47dddd7
AF
821 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
822 dc->pc, op, dc->rd, dc->ra, dc->rb);
4acb54ba
EI
823 break;
824 }
825}
826
827static inline void sync_jmpstate(DisasContext *dc)
828{
844bab60
EI
829 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
830 if (dc->jmp == JMP_DIRECT) {
831 tcg_gen_movi_tl(env_btaken, 1);
832 }
23979dc5
EI
833 dc->jmp = JMP_INDIRECT;
834 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
4acb54ba
EI
835 }
836}
837
838static void dec_imm(DisasContext *dc)
839{
840 LOG_DIS("imm %x\n", dc->imm << 16);
841 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
842 dc->tb_flags |= IMM_FLAG;
843 dc->clear_imm = 0;
844}
845
4acb54ba
EI
846static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
847{
848 unsigned int extimm = dc->tb_flags & IMM_FLAG;
5818dee5
EI
849 /* Should be set to one if r1 is used by loadstores. */
850 int stackprot = 0;
851
852 /* All load/stores use ra. */
9aaaa181 853 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
5818dee5
EI
854 stackprot = 1;
855 }
4acb54ba 856
9ef55357 857 /* Treat the common cases first. */
4acb54ba 858 if (!dc->type_b) {
4b5ef0b5
EI
859 /* If any of the regs is r0, return a ptr to the other. */
860 if (dc->ra == 0) {
861 return &cpu_R[dc->rb];
862 } else if (dc->rb == 0) {
863 return &cpu_R[dc->ra];
864 }
865
9aaaa181 866 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
5818dee5
EI
867 stackprot = 1;
868 }
869
4acb54ba
EI
870 *t = tcg_temp_new();
871 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
5818dee5
EI
872
873 if (stackprot) {
64254eba 874 gen_helper_stackprot(cpu_env, *t);
5818dee5 875 }
4acb54ba
EI
876 return t;
877 }
878 /* Immediate. */
879 if (!extimm) {
880 if (dc->imm == 0) {
881 return &cpu_R[dc->ra];
882 }
883 *t = tcg_temp_new();
884 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
885 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
886 } else {
887 *t = tcg_temp_new();
888 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
889 }
890
5818dee5 891 if (stackprot) {
64254eba 892 gen_helper_stackprot(cpu_env, *t);
5818dee5 893 }
4acb54ba
EI
894 return t;
895}
896
897static void dec_load(DisasContext *dc)
898{
47acdd63 899 TCGv t, v, *addr;
8534063a
EI
900 unsigned int size;
901 bool rev = false, ex = false;
47acdd63 902 TCGMemOp mop;
4acb54ba 903
47acdd63
RH
904 mop = dc->opcode & 3;
905 size = 1 << mop;
9f8beb66 906 if (!dc->type_b) {
8534063a
EI
907 rev = extract32(dc->ir, 9, 1);
908 ex = extract32(dc->ir, 10, 1);
9f8beb66 909 }
47acdd63
RH
910 mop |= MO_TE;
911 if (rev) {
912 mop ^= MO_BSWAP;
913 }
9f8beb66 914
0187688f 915 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
0063ebd6 916 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
917 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
918 t_gen_raise_exception(dc, EXCP_HW_EXCP);
919 return;
920 }
4acb54ba 921
8cc9b43f
PC
922 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
923 ex ? "x" : "");
9f8beb66 924
4acb54ba
EI
925 t_sync_flags(dc);
926 addr = compute_ldst_addr(dc, &t);
927
9f8beb66
EI
928 /*
929 * When doing reverse accesses we need to do two things.
930 *
4ff9786c 931 * 1. Reverse the address wrt endianness.
9f8beb66
EI
932 * 2. Byteswap the data lanes on the way back into the CPU core.
933 */
934 if (rev && size != 4) {
935 /* Endian reverse the address. t is addr. */
936 switch (size) {
937 case 1:
938 {
939 /* 00 -> 11
940 01 -> 10
941 10 -> 10
942 11 -> 00 */
943 TCGv low = tcg_temp_new();
944
945 /* Force addr into the temp. */
946 if (addr != &t) {
947 t = tcg_temp_new();
948 tcg_gen_mov_tl(t, *addr);
949 addr = &t;
950 }
951
952 tcg_gen_andi_tl(low, t, 3);
953 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
954 tcg_gen_andi_tl(t, t, ~3);
955 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
956 tcg_temp_free(low);
957 break;
958 }
959
960 case 2:
961 /* 00 -> 10
962 10 -> 00. */
963 /* Force addr into the temp. */
964 if (addr != &t) {
965 t = tcg_temp_new();
966 tcg_gen_xori_tl(t, *addr, 2);
967 addr = &t;
968 } else {
969 tcg_gen_xori_tl(t, t, 2);
970 }
971 break;
972 default:
0063ebd6 973 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
974 break;
975 }
976 }
977
8cc9b43f
PC
978 /* lwx does not throw unaligned access errors, so force alignment */
979 if (ex) {
980 /* Force addr into the temp. */
981 if (addr != &t) {
982 t = tcg_temp_new();
983 tcg_gen_mov_tl(t, *addr);
984 addr = &t;
985 }
986 tcg_gen_andi_tl(t, t, ~3);
987 }
988
4acb54ba
EI
989 /* If we get a fault on a dslot, the jmpstate better be in sync. */
990 sync_jmpstate(dc);
968a40f6
EI
991
992 /* Verify alignment if needed. */
47acdd63
RH
993 /*
994 * Microblaze gives MMU faults priority over faults due to
995 * unaligned addresses. That's why we speculatively do the load
996 * into v. If the load succeeds, we verify alignment of the
997 * address and if that succeeds we write into the destination reg.
998 */
999 v = tcg_temp_new();
97ed5ccd 1000 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
a12f6507 1001
0063ebd6 1002 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507 1003 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 1004 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1005 tcg_const_tl(0), tcg_const_tl(size - 1));
4acb54ba
EI
1006 }
1007
47acdd63
RH
1008 if (ex) {
1009 tcg_gen_mov_tl(env_res_addr, *addr);
1010 tcg_gen_mov_tl(env_res_val, v);
1011 }
1012 if (dc->rd) {
1013 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1014 }
1015 tcg_temp_free(v);
1016
8cc9b43f 1017 if (ex) { /* lwx */
b6af0975 1018 /* no support for AXI exclusive so always clear C */
8cc9b43f 1019 write_carryi(dc, 0);
8cc9b43f
PC
1020 }
1021
4acb54ba
EI
1022 if (addr == &t)
1023 tcg_temp_free(t);
1024}
1025
4acb54ba
EI
1026static void dec_store(DisasContext *dc)
1027{
4a536270 1028 TCGv t, *addr, swx_addr;
42a268c2 1029 TCGLabel *swx_skip = NULL;
8cc9b43f 1030 unsigned int size, rev = 0, ex = 0;
47acdd63 1031 TCGMemOp mop;
4acb54ba 1032
47acdd63
RH
1033 mop = dc->opcode & 3;
1034 size = 1 << mop;
9f8beb66
EI
1035 if (!dc->type_b) {
1036 rev = (dc->ir >> 9) & 1;
8cc9b43f 1037 ex = (dc->ir >> 10) & 1;
9f8beb66 1038 }
47acdd63
RH
1039 mop |= MO_TE;
1040 if (rev) {
1041 mop ^= MO_BSWAP;
1042 }
4acb54ba 1043
0187688f 1044 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1045 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
1046 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1047 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1048 return;
1049 }
1050
8cc9b43f
PC
1051 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1052 ex ? "x" : "");
4acb54ba
EI
1053 t_sync_flags(dc);
1054 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1055 sync_jmpstate(dc);
1056 addr = compute_ldst_addr(dc, &t);
968a40f6 1057
083dbf48 1058 swx_addr = tcg_temp_local_new();
8cc9b43f 1059 if (ex) { /* swx */
11a76217 1060 TCGv tval;
8cc9b43f
PC
1061
1062 /* Force addr into the swx_addr. */
1063 tcg_gen_mov_tl(swx_addr, *addr);
1064 addr = &swx_addr;
1065 /* swx does not throw unaligned access errors, so force alignment */
1066 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1067
8cc9b43f
PC
1068 write_carryi(dc, 1);
1069 swx_skip = gen_new_label();
4a536270 1070 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
11a76217
EI
1071
1072 /* Compare the value loaded at lwx with current contents of
1073 the reserved location.
1074 FIXME: This only works for system emulation where we can expect
1075 this compare and the following write to be atomic. For user
1076 emulation we need to add atomicity between threads. */
1077 tval = tcg_temp_new();
97ed5ccd 1078 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
0063ebd6 1079 MO_TEUL);
11a76217 1080 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
8cc9b43f 1081 write_carryi(dc, 0);
11a76217 1082 tcg_temp_free(tval);
8cc9b43f
PC
1083 }
1084
9f8beb66
EI
1085 if (rev && size != 4) {
1086 /* Endian reverse the address. t is addr. */
1087 switch (size) {
1088 case 1:
1089 {
1090 /* 00 -> 11
1091 01 -> 10
1092 10 -> 10
1093 11 -> 00 */
1094 TCGv low = tcg_temp_new();
1095
1096 /* Force addr into the temp. */
1097 if (addr != &t) {
1098 t = tcg_temp_new();
1099 tcg_gen_mov_tl(t, *addr);
1100 addr = &t;
1101 }
1102
1103 tcg_gen_andi_tl(low, t, 3);
1104 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1105 tcg_gen_andi_tl(t, t, ~3);
1106 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
1107 tcg_temp_free(low);
1108 break;
1109 }
1110
1111 case 2:
1112 /* 00 -> 10
1113 10 -> 00. */
1114 /* Force addr into the temp. */
1115 if (addr != &t) {
1116 t = tcg_temp_new();
1117 tcg_gen_xori_tl(t, *addr, 2);
1118 addr = &t;
1119 } else {
1120 tcg_gen_xori_tl(t, t, 2);
1121 }
1122 break;
1123 default:
0063ebd6 1124 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
1125 break;
1126 }
9f8beb66 1127 }
97ed5ccd 1128 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
a12f6507 1129
968a40f6 1130 /* Verify alignment if needed. */
0063ebd6 1131 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507
EI
1132 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1133 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1134 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1135 * the MMU prior to the memaccess, thay way we could put
1136 * the alignment checks in between the probe and the mem
1137 * access.
a12f6507 1138 */
64254eba 1139 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1140 tcg_const_tl(1), tcg_const_tl(size - 1));
968a40f6 1141 }
083dbf48 1142
8cc9b43f
PC
1143 if (ex) {
1144 gen_set_label(swx_skip);
8cc9b43f 1145 }
083dbf48 1146 tcg_temp_free(swx_addr);
968a40f6 1147
4acb54ba
EI
1148 if (addr == &t)
1149 tcg_temp_free(t);
1150}
1151
1152static inline void eval_cc(DisasContext *dc, unsigned int cc,
1153 TCGv d, TCGv a, TCGv b)
1154{
4acb54ba
EI
1155 switch (cc) {
1156 case CC_EQ:
b2565c69 1157 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
4acb54ba
EI
1158 break;
1159 case CC_NE:
b2565c69 1160 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
4acb54ba
EI
1161 break;
1162 case CC_LT:
b2565c69 1163 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
4acb54ba
EI
1164 break;
1165 case CC_LE:
b2565c69 1166 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
4acb54ba
EI
1167 break;
1168 case CC_GE:
b2565c69 1169 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
4acb54ba
EI
1170 break;
1171 case CC_GT:
b2565c69 1172 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
4acb54ba
EI
1173 break;
1174 default:
0063ebd6 1175 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
4acb54ba
EI
1176 break;
1177 }
1178}
1179
1180static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1181{
42a268c2 1182 TCGLabel *l1 = gen_new_label();
4acb54ba
EI
1183 /* Conditional jmp. */
1184 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1185 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1186 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1187 gen_set_label(l1);
1188}
1189
1190static void dec_bcc(DisasContext *dc)
1191{
1192 unsigned int cc;
1193 unsigned int dslot;
1194
1195 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1196 dslot = dc->ir & (1 << 25);
1197 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1198
1199 dc->delayed_branch = 1;
1200 if (dslot) {
1201 dc->delayed_branch = 2;
1202 dc->tb_flags |= D_FLAG;
1203 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1204 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1205 }
1206
61204ce8
EI
1207 if (dec_alu_op_b_is_small_imm(dc)) {
1208 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1209
1210 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
844bab60 1211 dc->jmp = JMP_DIRECT_CC;
23979dc5 1212 dc->jmp_pc = dc->pc + offset;
61204ce8 1213 } else {
23979dc5 1214 dc->jmp = JMP_INDIRECT;
61204ce8
EI
1215 tcg_gen_movi_tl(env_btarget, dc->pc);
1216 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1217 }
61204ce8 1218 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
4acb54ba
EI
1219}
1220
1221static void dec_br(DisasContext *dc)
1222{
9f6113c7 1223 unsigned int dslot, link, abs, mbar;
97ed5ccd 1224 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
1225
1226 dslot = dc->ir & (1 << 20);
1227 abs = dc->ir & (1 << 19);
1228 link = dc->ir & (1 << 18);
9f6113c7
EI
1229
1230 /* Memory barrier. */
1231 mbar = (dc->ir >> 16) & 31;
1232 if (mbar == 2 && dc->imm == 4) {
5d45de97
EI
1233 /* mbar IMM & 16 decodes to sleep. */
1234 if (dc->rd & 16) {
1235 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1236 TCGv_i32 tmp_1 = tcg_const_i32(1);
1237
1238 LOG_DIS("sleep\n");
1239
1240 t_sync_flags(dc);
1241 tcg_gen_st_i32(tmp_1, cpu_env,
1242 -offsetof(MicroBlazeCPU, env)
1243 +offsetof(CPUState, halted));
1244 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1245 gen_helper_raise_exception(cpu_env, tmp_hlt);
1246 tcg_temp_free_i32(tmp_hlt);
1247 tcg_temp_free_i32(tmp_1);
1248 return;
1249 }
9f6113c7
EI
1250 LOG_DIS("mbar %d\n", dc->rd);
1251 /* Break the TB. */
1252 dc->cpustate_changed = 1;
1253 return;
1254 }
1255
4acb54ba
EI
1256 LOG_DIS("br%s%s%s%s imm=%x\n",
1257 abs ? "a" : "", link ? "l" : "",
1258 dc->type_b ? "i" : "", dslot ? "d" : "",
1259 dc->imm);
1260
1261 dc->delayed_branch = 1;
1262 if (dslot) {
1263 dc->delayed_branch = 2;
1264 dc->tb_flags |= D_FLAG;
1265 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1266 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1267 }
1268 if (link && dc->rd)
1269 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1270
1271 dc->jmp = JMP_INDIRECT;
1272 if (abs) {
1273 tcg_gen_movi_tl(env_btaken, 1);
1274 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
ff21f70a
EI
1275 if (link && !dslot) {
1276 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1277 t_gen_raise_exception(dc, EXCP_BREAK);
1278 if (dc->imm == 0) {
1279 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1280 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1281 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1282 return;
1283 }
1284
1285 t_gen_raise_exception(dc, EXCP_DEBUG);
1286 }
1287 }
4acb54ba 1288 } else {
61204ce8
EI
1289 if (dec_alu_op_b_is_small_imm(dc)) {
1290 dc->jmp = JMP_DIRECT;
1291 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1292 } else {
4acb54ba
EI
1293 tcg_gen_movi_tl(env_btaken, 1);
1294 tcg_gen_movi_tl(env_btarget, dc->pc);
1295 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
4acb54ba
EI
1296 }
1297 }
1298}
1299
1300static inline void do_rti(DisasContext *dc)
1301{
1302 TCGv t0, t1;
1303 t0 = tcg_temp_new();
1304 t1 = tcg_temp_new();
1305 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1306 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1307 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1308
1309 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1310 tcg_gen_or_tl(t1, t1, t0);
1311 msr_write(dc, t1);
1312 tcg_temp_free(t1);
1313 tcg_temp_free(t0);
1314 dc->tb_flags &= ~DRTI_FLAG;
1315}
1316
1317static inline void do_rtb(DisasContext *dc)
1318{
1319 TCGv t0, t1;
1320 t0 = tcg_temp_new();
1321 t1 = tcg_temp_new();
1322 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1323 tcg_gen_shri_tl(t0, t1, 1);
1324 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1325
1326 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1327 tcg_gen_or_tl(t1, t1, t0);
1328 msr_write(dc, t1);
1329 tcg_temp_free(t1);
1330 tcg_temp_free(t0);
1331 dc->tb_flags &= ~DRTB_FLAG;
1332}
1333
1334static inline void do_rte(DisasContext *dc)
1335{
1336 TCGv t0, t1;
1337 t0 = tcg_temp_new();
1338 t1 = tcg_temp_new();
1339
1340 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1341 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1342 tcg_gen_shri_tl(t0, t1, 1);
1343 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1344
1345 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1346 tcg_gen_or_tl(t1, t1, t0);
1347 msr_write(dc, t1);
1348 tcg_temp_free(t1);
1349 tcg_temp_free(t0);
1350 dc->tb_flags &= ~DRTE_FLAG;
1351}
1352
1353static void dec_rts(DisasContext *dc)
1354{
1355 unsigned int b_bit, i_bit, e_bit;
97ed5ccd 1356 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
1357
1358 i_bit = dc->ir & (1 << 21);
1359 b_bit = dc->ir & (1 << 22);
1360 e_bit = dc->ir & (1 << 23);
1361
1362 dc->delayed_branch = 2;
1363 dc->tb_flags |= D_FLAG;
1364 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1365 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1366
1367 if (i_bit) {
1368 LOG_DIS("rtid ir=%x\n", dc->ir);
1567a005
EI
1369 if ((dc->tb_flags & MSR_EE_FLAG)
1370 && mem_index == MMU_USER_IDX) {
1371 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1372 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1373 }
4acb54ba
EI
1374 dc->tb_flags |= DRTI_FLAG;
1375 } else if (b_bit) {
1376 LOG_DIS("rtbd ir=%x\n", dc->ir);
1567a005
EI
1377 if ((dc->tb_flags & MSR_EE_FLAG)
1378 && mem_index == MMU_USER_IDX) {
1379 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1380 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1381 }
4acb54ba
EI
1382 dc->tb_flags |= DRTB_FLAG;
1383 } else if (e_bit) {
1384 LOG_DIS("rted ir=%x\n", dc->ir);
1567a005
EI
1385 if ((dc->tb_flags & MSR_EE_FLAG)
1386 && mem_index == MMU_USER_IDX) {
1387 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1388 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1389 }
4acb54ba
EI
1390 dc->tb_flags |= DRTE_FLAG;
1391 } else
1392 LOG_DIS("rts ir=%x\n", dc->ir);
1393
23979dc5 1394 dc->jmp = JMP_INDIRECT;
4acb54ba
EI
1395 tcg_gen_movi_tl(env_btaken, 1);
1396 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1397}
1398
97694c57
EI
1399static int dec_check_fpuv2(DisasContext *dc)
1400{
be67e9ab 1401 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
97694c57
EI
1402 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1403 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1404 }
be67e9ab 1405 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
97694c57
EI
1406}
1407
1567a005
EI
1408static void dec_fpu(DisasContext *dc)
1409{
97694c57
EI
1410 unsigned int fpu_insn;
1411
1567a005 1412 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1413 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
5153bb89 1414 && !dc->cpu->cfg.use_fpu) {
97694c57 1415 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1567a005
EI
1416 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1417 return;
1418 }
1419
97694c57
EI
1420 fpu_insn = (dc->ir >> 7) & 7;
1421
1422 switch (fpu_insn) {
1423 case 0:
64254eba
BS
1424 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1425 cpu_R[dc->rb]);
97694c57
EI
1426 break;
1427
1428 case 1:
64254eba
BS
1429 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1430 cpu_R[dc->rb]);
97694c57
EI
1431 break;
1432
1433 case 2:
64254eba
BS
1434 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1435 cpu_R[dc->rb]);
97694c57
EI
1436 break;
1437
1438 case 3:
64254eba
BS
1439 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1440 cpu_R[dc->rb]);
97694c57
EI
1441 break;
1442
1443 case 4:
1444 switch ((dc->ir >> 4) & 7) {
1445 case 0:
64254eba 1446 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
97694c57
EI
1447 cpu_R[dc->ra], cpu_R[dc->rb]);
1448 break;
1449 case 1:
64254eba 1450 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1451 cpu_R[dc->ra], cpu_R[dc->rb]);
1452 break;
1453 case 2:
64254eba 1454 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
97694c57
EI
1455 cpu_R[dc->ra], cpu_R[dc->rb]);
1456 break;
1457 case 3:
64254eba 1458 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
97694c57
EI
1459 cpu_R[dc->ra], cpu_R[dc->rb]);
1460 break;
1461 case 4:
64254eba 1462 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1463 cpu_R[dc->ra], cpu_R[dc->rb]);
1464 break;
1465 case 5:
64254eba 1466 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
97694c57
EI
1467 cpu_R[dc->ra], cpu_R[dc->rb]);
1468 break;
1469 case 6:
64254eba 1470 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
97694c57
EI
1471 cpu_R[dc->ra], cpu_R[dc->rb]);
1472 break;
1473 default:
71547a3b
BS
1474 qemu_log_mask(LOG_UNIMP,
1475 "unimplemented fcmp fpu_insn=%x pc=%x"
1476 " opc=%x\n",
1477 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1478 dc->abort_at_next_insn = 1;
1479 break;
1480 }
1481 break;
1482
1483 case 5:
1484 if (!dec_check_fpuv2(dc)) {
1485 return;
1486 }
64254eba 1487 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1488 break;
1489
1490 case 6:
1491 if (!dec_check_fpuv2(dc)) {
1492 return;
1493 }
64254eba 1494 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1495 break;
1496
1497 case 7:
1498 if (!dec_check_fpuv2(dc)) {
1499 return;
1500 }
64254eba 1501 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1502 break;
1503
1504 default:
71547a3b
BS
1505 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1506 " opc=%x\n",
1507 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1508 dc->abort_at_next_insn = 1;
1509 break;
1510 }
1567a005
EI
1511}
1512
4acb54ba
EI
1513static void dec_null(DisasContext *dc)
1514{
02b33596 1515 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1516 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
02b33596
EI
1517 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1518 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1519 return;
1520 }
1d512a65 1521 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
4acb54ba
EI
1522 dc->abort_at_next_insn = 1;
1523}
1524
6d76d23e
EI
1525/* Insns connected to FSL or AXI stream attached devices. */
1526static void dec_stream(DisasContext *dc)
1527{
97ed5ccd 1528 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
6d76d23e
EI
1529 TCGv_i32 t_id, t_ctrl;
1530 int ctrl;
1531
1532 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1533 dc->type_b ? "" : "d", dc->imm);
1534
1535 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1536 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1537 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1538 return;
1539 }
1540
1541 t_id = tcg_temp_new();
1542 if (dc->type_b) {
1543 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1544 ctrl = dc->imm >> 10;
1545 } else {
1546 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1547 ctrl = dc->imm >> 5;
1548 }
1549
1550 t_ctrl = tcg_const_tl(ctrl);
1551
1552 if (dc->rd == 0) {
1553 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1554 } else {
1555 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1556 }
1557 tcg_temp_free(t_id);
1558 tcg_temp_free(t_ctrl);
1559}
1560
4acb54ba
EI
1561static struct decoder_info {
1562 struct {
1563 uint32_t bits;
1564 uint32_t mask;
1565 };
1566 void (*dec)(DisasContext *dc);
1567} decinfo[] = {
1568 {DEC_ADD, dec_add},
1569 {DEC_SUB, dec_sub},
1570 {DEC_AND, dec_and},
1571 {DEC_XOR, dec_xor},
1572 {DEC_OR, dec_or},
1573 {DEC_BIT, dec_bit},
1574 {DEC_BARREL, dec_barrel},
1575 {DEC_LD, dec_load},
1576 {DEC_ST, dec_store},
1577 {DEC_IMM, dec_imm},
1578 {DEC_BR, dec_br},
1579 {DEC_BCC, dec_bcc},
1580 {DEC_RTS, dec_rts},
1567a005 1581 {DEC_FPU, dec_fpu},
4acb54ba
EI
1582 {DEC_MUL, dec_mul},
1583 {DEC_DIV, dec_div},
1584 {DEC_MSR, dec_msr},
6d76d23e 1585 {DEC_STREAM, dec_stream},
4acb54ba
EI
1586 {{0, 0}, dec_null}
1587};
1588
64254eba 1589static inline void decode(DisasContext *dc, uint32_t ir)
4acb54ba 1590{
4acb54ba
EI
1591 int i;
1592
64254eba 1593 dc->ir = ir;
4acb54ba
EI
1594 LOG_DIS("%8.8x\t", dc->ir);
1595
1596 if (dc->ir)
1597 dc->nr_nops = 0;
1598 else {
1567a005 1599 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
1600 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1601 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1567a005
EI
1602 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1603 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1604 return;
1605 }
1606
4acb54ba
EI
1607 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1608 dc->nr_nops++;
a47dddd7 1609 if (dc->nr_nops > 4) {
0063ebd6 1610 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
a47dddd7 1611 }
4acb54ba
EI
1612 }
1613 /* bit 2 seems to indicate insn type. */
1614 dc->type_b = ir & (1 << 29);
1615
1616 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1617 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1618 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1619 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1620 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1621
1622 /* Large switch for all insns. */
1623 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1624 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1625 decinfo[i].dec(dc);
1626 break;
1627 }
1628 }
1629}
1630
4acb54ba 1631/* generate intermediate code for basic block 'tb'. */
9c489ea6 1632void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4acb54ba 1633{
9c489ea6 1634 CPUMBState *env = cs->env_ptr;
4e5e1215 1635 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
4acb54ba 1636 uint32_t pc_start;
4acb54ba
EI
1637 struct DisasContext ctx;
1638 struct DisasContext *dc = &ctx;
56371527 1639 uint32_t page_start, org_flags;
4acb54ba
EI
1640 target_ulong npc;
1641 int num_insns;
1642 int max_insns;
1643
4acb54ba 1644 pc_start = tb->pc;
0063ebd6 1645 dc->cpu = cpu;
4acb54ba
EI
1646 dc->tb = tb;
1647 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1648
4acb54ba
EI
1649 dc->is_jmp = DISAS_NEXT;
1650 dc->jmp = 0;
1651 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1652 if (dc->delayed_branch) {
1653 dc->jmp = JMP_INDIRECT;
1654 }
4acb54ba 1655 dc->pc = pc_start;
ed2803da 1656 dc->singlestep_enabled = cs->singlestep_enabled;
4acb54ba
EI
1657 dc->cpustate_changed = 0;
1658 dc->abort_at_next_insn = 0;
1659 dc->nr_nops = 0;
1660
a47dddd7
AF
1661 if (pc_start & 3) {
1662 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1663 }
4acb54ba 1664
56371527 1665 page_start = pc_start & TARGET_PAGE_MASK;
4acb54ba 1666 num_insns = 0;
c5a49c63 1667 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
190ce7fb 1668 if (max_insns == 0) {
4acb54ba 1669 max_insns = CF_COUNT_MASK;
190ce7fb
RH
1670 }
1671 if (max_insns > TCG_MAX_INSNS) {
1672 max_insns = TCG_MAX_INSNS;
1673 }
4acb54ba 1674
cd42d5b2 1675 gen_tb_start(tb);
4acb54ba
EI
1676 do
1677 {
667b8e29 1678 tcg_gen_insn_start(dc->pc);
959082fc 1679 num_insns++;
4acb54ba 1680
b933066a
RH
1681#if SIM_COMPAT
1682 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1683 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1684 gen_helper_debug();
1685 }
1686#endif
1687
1688 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1689 t_gen_raise_exception(dc, EXCP_DEBUG);
1690 dc->is_jmp = DISAS_UPDATE;
522a0d4e
RH
1691 /* The address covered by the breakpoint must be included in
1692 [tb->pc, tb->pc + tb->size) in order to for it to be
1693 properly cleared -- thus we increment the PC here so that
1694 the logic setting tb->size below does the right thing. */
1695 dc->pc += 4;
b933066a
RH
1696 break;
1697 }
1698
4acb54ba
EI
1699 /* Pretty disas. */
1700 LOG_DIS("%8.8x:\t", dc->pc);
1701
c5a49c63 1702 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
4acb54ba 1703 gen_io_start();
959082fc 1704 }
4acb54ba
EI
1705
1706 dc->clear_imm = 1;
64254eba 1707 decode(dc, cpu_ldl_code(env, dc->pc));
4acb54ba
EI
1708 if (dc->clear_imm)
1709 dc->tb_flags &= ~IMM_FLAG;
4acb54ba 1710 dc->pc += 4;
4acb54ba
EI
1711
1712 if (dc->delayed_branch) {
1713 dc->delayed_branch--;
1714 if (!dc->delayed_branch) {
1715 if (dc->tb_flags & DRTI_FLAG)
1716 do_rti(dc);
1717 if (dc->tb_flags & DRTB_FLAG)
1718 do_rtb(dc);
1719 if (dc->tb_flags & DRTE_FLAG)
1720 do_rte(dc);
1721 /* Clear the delay slot flag. */
1722 dc->tb_flags &= ~D_FLAG;
1723 /* If it is a direct jump, try direct chaining. */
23979dc5 1724 if (dc->jmp == JMP_INDIRECT) {
4acb54ba
EI
1725 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1726 dc->is_jmp = DISAS_JUMP;
23979dc5 1727 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1728 t_sync_flags(dc);
1729 gen_goto_tb(dc, 0, dc->jmp_pc);
1730 dc->is_jmp = DISAS_TB_JUMP;
1731 } else if (dc->jmp == JMP_DIRECT_CC) {
42a268c2 1732 TCGLabel *l1 = gen_new_label();
23979dc5 1733 t_sync_flags(dc);
23979dc5
EI
1734 /* Conditional jmp. */
1735 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1736 gen_goto_tb(dc, 1, dc->pc);
1737 gen_set_label(l1);
1738 gen_goto_tb(dc, 0, dc->jmp_pc);
1739
1740 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1741 }
1742 break;
1743 }
1744 }
ed2803da 1745 if (cs->singlestep_enabled) {
4acb54ba 1746 break;
ed2803da 1747 }
4acb54ba 1748 } while (!dc->is_jmp && !dc->cpustate_changed
fe700adb
RH
1749 && !tcg_op_buf_full()
1750 && !singlestep
56371527 1751 && (dc->pc - page_start < TARGET_PAGE_SIZE)
fe700adb 1752 && num_insns < max_insns);
4acb54ba
EI
1753
1754 npc = dc->pc;
844bab60 1755 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1756 if (dc->tb_flags & D_FLAG) {
1757 dc->is_jmp = DISAS_UPDATE;
1758 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1759 sync_jmpstate(dc);
1760 } else
1761 npc = dc->jmp_pc;
1762 }
1763
c5a49c63 1764 if (tb_cflags(tb) & CF_LAST_IO)
4acb54ba
EI
1765 gen_io_end();
1766 /* Force an update if the per-tb cpu state has changed. */
1767 if (dc->is_jmp == DISAS_NEXT
1768 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1769 dc->is_jmp = DISAS_UPDATE;
1770 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1771 }
1772 t_sync_flags(dc);
1773
ed2803da 1774 if (unlikely(cs->singlestep_enabled)) {
6c5f738d
EI
1775 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1776
1777 if (dc->is_jmp != DISAS_JUMP) {
4acb54ba 1778 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
6c5f738d 1779 }
64254eba 1780 gen_helper_raise_exception(cpu_env, tmp);
6c5f738d 1781 tcg_temp_free_i32(tmp);
4acb54ba
EI
1782 } else {
1783 switch(dc->is_jmp) {
1784 case DISAS_NEXT:
1785 gen_goto_tb(dc, 1, npc);
1786 break;
1787 default:
1788 case DISAS_JUMP:
1789 case DISAS_UPDATE:
1790 /* indicate that the hash table must be used
1791 to find the next TB */
1792 tcg_gen_exit_tb(0);
1793 break;
1794 case DISAS_TB_JUMP:
1795 /* nothing more to generate */
1796 break;
1797 }
1798 }
806f352d 1799 gen_tb_end(tb, num_insns);
0a7df5da 1800
4e5e1215
RH
1801 tb->size = dc->pc - pc_start;
1802 tb->icount = num_insns;
4acb54ba
EI
1803
1804#ifdef DEBUG_DISAS
1805#if !SIM_COMPAT
4910e6e4
RH
1806 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1807 && qemu_log_in_addr_range(pc_start)) {
1ee73216 1808 qemu_log_lock();
f01a5e7e 1809 qemu_log("--------------\n");
1d48474d 1810 log_target_disas(cs, pc_start, dc->pc - pc_start);
1ee73216 1811 qemu_log_unlock();
4acb54ba
EI
1812 }
1813#endif
1814#endif
1815 assert(!dc->abort_at_next_insn);
1816}
1817
878096ee
AF
1818void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1819 int flags)
4acb54ba 1820{
878096ee
AF
1821 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1822 CPUMBState *env = &cpu->env;
4acb54ba
EI
1823 int i;
1824
1825 if (!env || !f)
1826 return;
1827
1828 cpu_fprintf(f, "IN: PC=%x %s\n",
1829 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
97694c57 1830 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
4c24aa0a 1831 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
97694c57 1832 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
17c52a43 1833 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
4acb54ba
EI
1834 env->btaken, env->btarget,
1835 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
17c52a43
EI
1836 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1837 (env->sregs[SR_MSR] & MSR_EIP),
1838 (env->sregs[SR_MSR] & MSR_IE));
1839
4acb54ba
EI
1840 for (i = 0; i < 32; i++) {
1841 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1842 if ((i + 1) % 4 == 0)
1843 cpu_fprintf(f, "\n");
1844 }
1845 cpu_fprintf(f, "\n\n");
1846}
1847
cd0c24f9
AF
1848void mb_tcg_init(void)
1849{
1850 int i;
4acb54ba 1851
e1ccc054 1852 env_debug = tcg_global_mem_new(cpu_env,
68cee38a 1853 offsetof(CPUMBState, debug),
4acb54ba 1854 "debug0");
e1ccc054 1855 env_iflags = tcg_global_mem_new(cpu_env,
68cee38a 1856 offsetof(CPUMBState, iflags),
4acb54ba 1857 "iflags");
e1ccc054 1858 env_imm = tcg_global_mem_new(cpu_env,
68cee38a 1859 offsetof(CPUMBState, imm),
4acb54ba 1860 "imm");
e1ccc054 1861 env_btarget = tcg_global_mem_new(cpu_env,
68cee38a 1862 offsetof(CPUMBState, btarget),
4acb54ba 1863 "btarget");
e1ccc054 1864 env_btaken = tcg_global_mem_new(cpu_env,
68cee38a 1865 offsetof(CPUMBState, btaken),
4acb54ba 1866 "btaken");
e1ccc054 1867 env_res_addr = tcg_global_mem_new(cpu_env,
4a536270
EI
1868 offsetof(CPUMBState, res_addr),
1869 "res_addr");
e1ccc054 1870 env_res_val = tcg_global_mem_new(cpu_env,
11a76217
EI
1871 offsetof(CPUMBState, res_val),
1872 "res_val");
4acb54ba 1873 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
e1ccc054 1874 cpu_R[i] = tcg_global_mem_new(cpu_env,
68cee38a 1875 offsetof(CPUMBState, regs[i]),
4acb54ba
EI
1876 regnames[i]);
1877 }
1878 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
e1ccc054 1879 cpu_SR[i] = tcg_global_mem_new(cpu_env,
68cee38a 1880 offsetof(CPUMBState, sregs[i]),
4acb54ba
EI
1881 special_regnames[i]);
1882 }
4acb54ba
EI
1883}
1884
bad729e2
RH
1885void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1886 target_ulong *data)
4acb54ba 1887{
bad729e2 1888 env->sregs[SR_PC] = data[0];
4acb54ba 1889}