]> git.ipfire.org Git - thirdparty/qemu.git/blame - target-microblaze/translate.c
Merge remote-tracking branch 'remotes/kraxel/tags/pull-ui-20160603-1' into staging
[thirdparty/qemu.git] / target-microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
4acb54ba 25#include "tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
4acb54ba 27#include "microblaze-decode.h"
f08b6170 28#include "exec/cpu_ldst.h"
2ef6175a 29#include "exec/helper-gen.h"
4acb54ba 30
a7e30d84 31#include "trace-tcg.h"
508127e2 32#include "exec/log.h"
a7e30d84
LV
33
34
4acb54ba
EI
35#define SIM_COMPAT 0
36#define DISAS_GNU 1
37#define DISAS_MB 1
38#if DISAS_MB && !SIM_COMPAT
39# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40#else
41# define LOG_DIS(...) do { } while (0)
42#endif
43
44#define D(x)
45
46#define EXTRACT_FIELD(src, start, end) \
47 (((src) >> start) & ((1 << (end - start + 1)) - 1))
48
49static TCGv env_debug;
1bcea73e 50static TCGv_env cpu_env;
4acb54ba
EI
51static TCGv cpu_R[32];
52static TCGv cpu_SR[18];
53static TCGv env_imm;
54static TCGv env_btaken;
55static TCGv env_btarget;
56static TCGv env_iflags;
4a536270 57static TCGv env_res_addr;
11a76217 58static TCGv env_res_val;
4acb54ba 59
022c62cb 60#include "exec/gen-icount.h"
4acb54ba
EI
61
62/* This is the state at translation time. */
63typedef struct DisasContext {
0063ebd6 64 MicroBlazeCPU *cpu;
a5efa644 65 target_ulong pc;
4acb54ba
EI
66
67 /* Decoder. */
68 int type_b;
69 uint32_t ir;
70 uint8_t opcode;
71 uint8_t rd, ra, rb;
72 uint16_t imm;
73
74 unsigned int cpustate_changed;
75 unsigned int delayed_branch;
76 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
77 unsigned int clear_imm;
78 int is_jmp;
79
844bab60
EI
80#define JMP_NOJMP 0
81#define JMP_DIRECT 1
82#define JMP_DIRECT_CC 2
83#define JMP_INDIRECT 3
4acb54ba
EI
84 unsigned int jmp;
85 uint32_t jmp_pc;
86
87 int abort_at_next_insn;
88 int nr_nops;
89 struct TranslationBlock *tb;
90 int singlestep_enabled;
91} DisasContext;
92
38972938 93static const char *regnames[] =
4acb54ba
EI
94{
95 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
96 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
97 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
98 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
99};
100
38972938 101static const char *special_regnames[] =
4acb54ba
EI
102{
103 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
104 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
105 "sr16", "sr17", "sr18"
106};
107
4acb54ba
EI
108static inline void t_sync_flags(DisasContext *dc)
109{
4abf79a4 110 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba
EI
111 if (dc->tb_flags != dc->synced_flags) {
112 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
113 dc->synced_flags = dc->tb_flags;
114 }
115}
116
117static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
118{
119 TCGv_i32 tmp = tcg_const_i32(index);
120
121 t_sync_flags(dc);
122 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 123 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba
EI
124 tcg_temp_free_i32(tmp);
125 dc->is_jmp = DISAS_UPDATE;
126}
127
90aa39a1
SF
128static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
129{
130#ifndef CONFIG_USER_ONLY
131 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
132#else
133 return true;
134#endif
135}
136
4acb54ba
EI
137static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
138{
90aa39a1 139 if (use_goto_tb(dc, dest)) {
4acb54ba
EI
140 tcg_gen_goto_tb(n);
141 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
90aa39a1 142 tcg_gen_exit_tb((uintptr_t)dc->tb + n);
4acb54ba
EI
143 } else {
144 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
145 tcg_gen_exit_tb(0);
146 }
147}
148
ee8b246f
EI
149static void read_carry(DisasContext *dc, TCGv d)
150{
151 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
152}
153
04ec7df7
EI
154/*
155 * write_carry sets the carry bits in MSR based on bit 0 of v.
156 * v[31:1] are ignored.
157 */
ee8b246f
EI
158static void write_carry(DisasContext *dc, TCGv v)
159{
160 TCGv t0 = tcg_temp_new();
161 tcg_gen_shli_tl(t0, v, 31);
162 tcg_gen_sari_tl(t0, t0, 31);
163 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
164 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
165 ~(MSR_C | MSR_CC));
166 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
167 tcg_temp_free(t0);
168}
169
65ab5eb4 170static void write_carryi(DisasContext *dc, bool carry)
8cc9b43f
PC
171{
172 TCGv t0 = tcg_temp_new();
65ab5eb4 173 tcg_gen_movi_tl(t0, carry);
8cc9b43f
PC
174 write_carry(dc, t0);
175 tcg_temp_free(t0);
176}
177
61204ce8
EI
178/* True if ALU operand b is a small immediate that may deserve
179 faster treatment. */
180static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
181{
182 /* Immediate insn without the imm prefix ? */
183 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
184}
185
4acb54ba
EI
186static inline TCGv *dec_alu_op_b(DisasContext *dc)
187{
188 if (dc->type_b) {
189 if (dc->tb_flags & IMM_FLAG)
190 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
191 else
192 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
193 return &env_imm;
194 } else
195 return &cpu_R[dc->rb];
196}
197
198static void dec_add(DisasContext *dc)
199{
200 unsigned int k, c;
40cbf5b7 201 TCGv cf;
4acb54ba
EI
202
203 k = dc->opcode & 4;
204 c = dc->opcode & 2;
205
206 LOG_DIS("add%s%s%s r%d r%d r%d\n",
207 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
208 dc->rd, dc->ra, dc->rb);
209
40cbf5b7
EI
210 /* Take care of the easy cases first. */
211 if (k) {
212 /* k - keep carry, no need to update MSR. */
213 /* If rd == r0, it's a nop. */
214 if (dc->rd) {
215 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
216
217 if (c) {
218 /* c - Add carry into the result. */
219 cf = tcg_temp_new();
220
221 read_carry(dc, cf);
222 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
223 tcg_temp_free(cf);
224 }
225 }
226 return;
227 }
228
229 /* From now on, we can assume k is zero. So we need to update MSR. */
230 /* Extract carry. */
231 cf = tcg_temp_new();
232 if (c) {
233 read_carry(dc, cf);
234 } else {
235 tcg_gen_movi_tl(cf, 0);
236 }
237
238 if (dc->rd) {
239 TCGv ncf = tcg_temp_new();
5d0bb823 240 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
4acb54ba 241 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
242 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
243 write_carry(dc, ncf);
244 tcg_temp_free(ncf);
245 } else {
5d0bb823 246 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 247 write_carry(dc, cf);
4acb54ba 248 }
40cbf5b7 249 tcg_temp_free(cf);
4acb54ba
EI
250}
251
252static void dec_sub(DisasContext *dc)
253{
254 unsigned int u, cmp, k, c;
e0a42ebc 255 TCGv cf, na;
4acb54ba
EI
256
257 u = dc->imm & 2;
258 k = dc->opcode & 4;
259 c = dc->opcode & 2;
260 cmp = (dc->imm & 1) && (!dc->type_b) && k;
261
262 if (cmp) {
263 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
264 if (dc->rd) {
265 if (u)
266 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
267 else
268 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
269 }
e0a42ebc
EI
270 return;
271 }
272
273 LOG_DIS("sub%s%s r%d, r%d r%d\n",
274 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
275
276 /* Take care of the easy cases first. */
277 if (k) {
278 /* k - keep carry, no need to update MSR. */
279 /* If rd == r0, it's a nop. */
280 if (dc->rd) {
4acb54ba 281 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
282
283 if (c) {
284 /* c - Add carry into the result. */
285 cf = tcg_temp_new();
286
287 read_carry(dc, cf);
288 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
289 tcg_temp_free(cf);
290 }
291 }
292 return;
293 }
294
295 /* From now on, we can assume k is zero. So we need to update MSR. */
296 /* Extract carry. And complement a into na. */
297 cf = tcg_temp_new();
298 na = tcg_temp_new();
299 if (c) {
300 read_carry(dc, cf);
301 } else {
302 tcg_gen_movi_tl(cf, 1);
303 }
304
305 /* d = b + ~a + c. carry defaults to 1. */
306 tcg_gen_not_tl(na, cpu_R[dc->ra]);
307
308 if (dc->rd) {
309 TCGv ncf = tcg_temp_new();
5d0bb823 310 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc
EI
311 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
312 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
313 write_carry(dc, ncf);
314 tcg_temp_free(ncf);
315 } else {
5d0bb823 316 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 317 write_carry(dc, cf);
4acb54ba 318 }
e0a42ebc
EI
319 tcg_temp_free(cf);
320 tcg_temp_free(na);
4acb54ba
EI
321}
322
323static void dec_pattern(DisasContext *dc)
324{
325 unsigned int mode;
4acb54ba 326
1567a005 327 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
328 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
329 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
1567a005
EI
330 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
331 t_gen_raise_exception(dc, EXCP_HW_EXCP);
332 }
333
4acb54ba
EI
334 mode = dc->opcode & 3;
335 switch (mode) {
336 case 0:
337 /* pcmpbf. */
338 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
339 if (dc->rd)
340 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
341 break;
342 case 2:
343 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
344 if (dc->rd) {
86112805
RH
345 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
346 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
347 }
348 break;
349 case 3:
350 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
4acb54ba 351 if (dc->rd) {
86112805
RH
352 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
353 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
354 }
355 break;
356 default:
0063ebd6 357 cpu_abort(CPU(dc->cpu),
4acb54ba
EI
358 "unsupported pattern insn opcode=%x\n", dc->opcode);
359 break;
360 }
361}
362
363static void dec_and(DisasContext *dc)
364{
365 unsigned int not;
366
367 if (!dc->type_b && (dc->imm & (1 << 10))) {
368 dec_pattern(dc);
369 return;
370 }
371
372 not = dc->opcode & (1 << 1);
373 LOG_DIS("and%s\n", not ? "n" : "");
374
375 if (!dc->rd)
376 return;
377
378 if (not) {
a235900e 379 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
380 } else
381 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
382}
383
384static void dec_or(DisasContext *dc)
385{
386 if (!dc->type_b && (dc->imm & (1 << 10))) {
387 dec_pattern(dc);
388 return;
389 }
390
391 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
392 if (dc->rd)
393 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
394}
395
396static void dec_xor(DisasContext *dc)
397{
398 if (!dc->type_b && (dc->imm & (1 << 10))) {
399 dec_pattern(dc);
400 return;
401 }
402
403 LOG_DIS("xor r%d\n", dc->rd);
404 if (dc->rd)
405 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
406}
407
4acb54ba
EI
408static inline void msr_read(DisasContext *dc, TCGv d)
409{
410 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
411}
412
413static inline void msr_write(DisasContext *dc, TCGv v)
414{
97b833c5
EI
415 TCGv t;
416
417 t = tcg_temp_new();
4acb54ba 418 dc->cpustate_changed = 1;
97b833c5 419 /* PVR bit is not writable. */
8a84fc6b
EI
420 tcg_gen_andi_tl(t, v, ~MSR_PVR);
421 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
97b833c5
EI
422 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
423 tcg_temp_free(t);
4acb54ba
EI
424}
425
426static void dec_msr(DisasContext *dc)
427{
0063ebd6 428 CPUState *cs = CPU(dc->cpu);
4acb54ba
EI
429 TCGv t0, t1;
430 unsigned int sr, to, rn;
97ed5ccd 431 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
432
433 sr = dc->imm & ((1 << 14) - 1);
434 to = dc->imm & (1 << 14);
435 dc->type_b = 1;
436 if (to)
437 dc->cpustate_changed = 1;
438
439 /* msrclr and msrset. */
440 if (!(dc->imm & (1 << 15))) {
441 unsigned int clr = dc->ir & (1 << 16);
442
443 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
444 dc->rd, dc->imm);
1567a005 445
0063ebd6 446 if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
1567a005
EI
447 /* nop??? */
448 return;
449 }
450
451 if ((dc->tb_flags & MSR_EE_FLAG)
452 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
453 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
454 t_gen_raise_exception(dc, EXCP_HW_EXCP);
455 return;
456 }
457
4acb54ba
EI
458 if (dc->rd)
459 msr_read(dc, cpu_R[dc->rd]);
460
461 t0 = tcg_temp_new();
462 t1 = tcg_temp_new();
463 msr_read(dc, t0);
464 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
465
466 if (clr) {
467 tcg_gen_not_tl(t1, t1);
468 tcg_gen_and_tl(t0, t0, t1);
469 } else
470 tcg_gen_or_tl(t0, t0, t1);
471 msr_write(dc, t0);
472 tcg_temp_free(t0);
473 tcg_temp_free(t1);
474 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
475 dc->is_jmp = DISAS_UPDATE;
476 return;
477 }
478
1567a005
EI
479 if (to) {
480 if ((dc->tb_flags & MSR_EE_FLAG)
481 && mem_index == MMU_USER_IDX) {
482 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
483 t_gen_raise_exception(dc, EXCP_HW_EXCP);
484 return;
485 }
486 }
487
4acb54ba
EI
488#if !defined(CONFIG_USER_ONLY)
489 /* Catch read/writes to the mmu block. */
490 if ((sr & ~0xff) == 0x1000) {
491 sr &= 7;
492 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
493 if (to)
64254eba 494 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
4acb54ba 495 else
64254eba 496 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
4acb54ba
EI
497 return;
498 }
499#endif
500
501 if (to) {
502 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
503 switch (sr) {
504 case 0:
505 break;
506 case 1:
507 msr_write(dc, cpu_R[dc->ra]);
508 break;
509 case 0x3:
510 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
511 break;
512 case 0x5:
513 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
514 break;
515 case 0x7:
97694c57 516 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
4acb54ba 517 break;
5818dee5 518 case 0x800:
68cee38a 519 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
520 break;
521 case 0x802:
68cee38a 522 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
5818dee5 523 break;
4acb54ba 524 default:
0063ebd6 525 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
526 break;
527 }
528 } else {
529 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
530
531 switch (sr) {
532 case 0:
533 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
534 break;
535 case 1:
536 msr_read(dc, cpu_R[dc->rd]);
537 break;
538 case 0x3:
539 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
540 break;
541 case 0x5:
542 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
543 break;
544 case 0x7:
97694c57 545 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
4acb54ba
EI
546 break;
547 case 0xb:
548 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
549 break;
5818dee5 550 case 0x800:
68cee38a 551 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
552 break;
553 case 0x802:
68cee38a 554 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
5818dee5 555 break;
4acb54ba
EI
556 case 0x2000:
557 case 0x2001:
558 case 0x2002:
559 case 0x2003:
560 case 0x2004:
561 case 0x2005:
562 case 0x2006:
563 case 0x2007:
564 case 0x2008:
565 case 0x2009:
566 case 0x200a:
567 case 0x200b:
568 case 0x200c:
569 rn = sr & 0xf;
570 tcg_gen_ld_tl(cpu_R[dc->rd],
68cee38a 571 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
572 break;
573 default:
a47dddd7 574 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
575 break;
576 }
577 }
ee7dbcf8
EI
578
579 if (dc->rd == 0) {
580 tcg_gen_movi_tl(cpu_R[0], 0);
581 }
4acb54ba
EI
582}
583
584/* 64-bit signed mul, lower result in d and upper in d2. */
585static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
586{
587 TCGv_i64 t0, t1;
588
589 t0 = tcg_temp_new_i64();
590 t1 = tcg_temp_new_i64();
591
592 tcg_gen_ext_i32_i64(t0, a);
593 tcg_gen_ext_i32_i64(t1, b);
594 tcg_gen_mul_i64(t0, t0, t1);
595
ecc7b3aa 596 tcg_gen_extrl_i64_i32(d, t0);
4acb54ba 597 tcg_gen_shri_i64(t0, t0, 32);
ecc7b3aa 598 tcg_gen_extrl_i64_i32(d2, t0);
4acb54ba
EI
599
600 tcg_temp_free_i64(t0);
601 tcg_temp_free_i64(t1);
602}
603
604/* 64-bit unsigned muls, lower result in d and upper in d2. */
605static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
606{
607 TCGv_i64 t0, t1;
608
609 t0 = tcg_temp_new_i64();
610 t1 = tcg_temp_new_i64();
611
612 tcg_gen_extu_i32_i64(t0, a);
613 tcg_gen_extu_i32_i64(t1, b);
614 tcg_gen_mul_i64(t0, t0, t1);
615
ecc7b3aa 616 tcg_gen_extrl_i64_i32(d, t0);
4acb54ba 617 tcg_gen_shri_i64(t0, t0, 32);
ecc7b3aa 618 tcg_gen_extrl_i64_i32(d2, t0);
4acb54ba
EI
619
620 tcg_temp_free_i64(t0);
621 tcg_temp_free_i64(t1);
622}
623
624/* Multiplier unit. */
625static void dec_mul(DisasContext *dc)
626{
627 TCGv d[2];
628 unsigned int subcode;
629
1567a005 630 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
631 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
632 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
1567a005
EI
633 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
634 t_gen_raise_exception(dc, EXCP_HW_EXCP);
635 return;
636 }
637
4acb54ba
EI
638 subcode = dc->imm & 3;
639 d[0] = tcg_temp_new();
640 d[1] = tcg_temp_new();
641
642 if (dc->type_b) {
643 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
644 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
645 goto done;
646 }
647
1567a005
EI
648 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
649 if (subcode >= 1 && subcode <= 3
0063ebd6 650 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
1567a005
EI
651 /* nop??? */
652 }
653
4acb54ba
EI
654 switch (subcode) {
655 case 0:
656 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
657 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
658 break;
659 case 1:
660 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
661 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
662 break;
663 case 2:
664 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
665 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
666 break;
667 case 3:
668 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
669 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
670 break;
671 default:
0063ebd6 672 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
4acb54ba
EI
673 break;
674 }
675done:
676 tcg_temp_free(d[0]);
677 tcg_temp_free(d[1]);
678}
679
680/* Div unit. */
681static void dec_div(DisasContext *dc)
682{
683 unsigned int u;
684
685 u = dc->imm & 2;
686 LOG_DIS("div\n");
687
0063ebd6
AF
688 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
689 && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
1567a005
EI
690 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
691 t_gen_raise_exception(dc, EXCP_HW_EXCP);
692 }
693
4acb54ba 694 if (u)
64254eba
BS
695 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
696 cpu_R[dc->ra]);
4acb54ba 697 else
64254eba
BS
698 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
699 cpu_R[dc->ra]);
4acb54ba
EI
700 if (!dc->rd)
701 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
702}
703
704static void dec_barrel(DisasContext *dc)
705{
706 TCGv t0;
707 unsigned int s, t;
708
1567a005 709 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
710 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
711 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
1567a005
EI
712 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
713 t_gen_raise_exception(dc, EXCP_HW_EXCP);
714 return;
715 }
716
4acb54ba
EI
717 s = dc->imm & (1 << 10);
718 t = dc->imm & (1 << 9);
719
720 LOG_DIS("bs%s%s r%d r%d r%d\n",
721 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
722
723 t0 = tcg_temp_new();
724
725 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
726 tcg_gen_andi_tl(t0, t0, 31);
727
728 if (s)
729 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
730 else {
731 if (t)
732 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
733 else
734 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
735 }
736}
737
738static void dec_bit(DisasContext *dc)
739{
0063ebd6 740 CPUState *cs = CPU(dc->cpu);
09b9f113 741 TCGv t0;
4acb54ba 742 unsigned int op;
97ed5ccd 743 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba 744
ace2e4da 745 op = dc->ir & ((1 << 9) - 1);
4acb54ba
EI
746 switch (op) {
747 case 0x21:
748 /* src. */
749 t0 = tcg_temp_new();
750
751 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
09b9f113
EI
752 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
753 write_carry(dc, cpu_R[dc->ra]);
4acb54ba 754 if (dc->rd) {
4acb54ba 755 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
09b9f113 756 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
4acb54ba 757 }
4acb54ba
EI
758 tcg_temp_free(t0);
759 break;
760
761 case 0x1:
762 case 0x41:
763 /* srl. */
4acb54ba
EI
764 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
765
bb3cb951
EI
766 /* Update carry. Note that write carry only looks at the LSB. */
767 write_carry(dc, cpu_R[dc->ra]);
4acb54ba
EI
768 if (dc->rd) {
769 if (op == 0x41)
770 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
771 else
772 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
773 }
774 break;
775 case 0x60:
776 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
777 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
778 break;
779 case 0x61:
780 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
781 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
782 break;
783 case 0x64:
f062a3c7
EI
784 case 0x66:
785 case 0x74:
786 case 0x76:
4acb54ba
EI
787 /* wdc. */
788 LOG_DIS("wdc r%d\n", dc->ra);
1567a005
EI
789 if ((dc->tb_flags & MSR_EE_FLAG)
790 && mem_index == MMU_USER_IDX) {
791 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
792 t_gen_raise_exception(dc, EXCP_HW_EXCP);
793 return;
794 }
4acb54ba
EI
795 break;
796 case 0x68:
797 /* wic. */
798 LOG_DIS("wic r%d\n", dc->ra);
1567a005
EI
799 if ((dc->tb_flags & MSR_EE_FLAG)
800 && mem_index == MMU_USER_IDX) {
801 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
802 t_gen_raise_exception(dc, EXCP_HW_EXCP);
803 return;
804 }
4acb54ba 805 break;
48b5e96f
EI
806 case 0xe0:
807 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
808 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
809 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
48b5e96f
EI
810 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
811 t_gen_raise_exception(dc, EXCP_HW_EXCP);
812 }
0063ebd6 813 if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
48b5e96f
EI
814 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
815 }
816 break;
ace2e4da
PC
817 case 0x1e0:
818 /* swapb */
819 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
820 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
821 break;
b8c6a5d9 822 case 0x1e2:
ace2e4da
PC
823 /*swaph */
824 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
825 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
826 break;
4acb54ba 827 default:
a47dddd7
AF
828 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
829 dc->pc, op, dc->rd, dc->ra, dc->rb);
4acb54ba
EI
830 break;
831 }
832}
833
834static inline void sync_jmpstate(DisasContext *dc)
835{
844bab60
EI
836 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
837 if (dc->jmp == JMP_DIRECT) {
838 tcg_gen_movi_tl(env_btaken, 1);
839 }
23979dc5
EI
840 dc->jmp = JMP_INDIRECT;
841 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
4acb54ba
EI
842 }
843}
844
845static void dec_imm(DisasContext *dc)
846{
847 LOG_DIS("imm %x\n", dc->imm << 16);
848 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
849 dc->tb_flags |= IMM_FLAG;
850 dc->clear_imm = 0;
851}
852
4acb54ba
EI
853static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
854{
855 unsigned int extimm = dc->tb_flags & IMM_FLAG;
5818dee5
EI
856 /* Should be set to one if r1 is used by loadstores. */
857 int stackprot = 0;
858
859 /* All load/stores use ra. */
9aaaa181 860 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
5818dee5
EI
861 stackprot = 1;
862 }
4acb54ba 863
9ef55357 864 /* Treat the common cases first. */
4acb54ba 865 if (!dc->type_b) {
4b5ef0b5
EI
866 /* If any of the regs is r0, return a ptr to the other. */
867 if (dc->ra == 0) {
868 return &cpu_R[dc->rb];
869 } else if (dc->rb == 0) {
870 return &cpu_R[dc->ra];
871 }
872
9aaaa181 873 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
5818dee5
EI
874 stackprot = 1;
875 }
876
4acb54ba
EI
877 *t = tcg_temp_new();
878 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
5818dee5
EI
879
880 if (stackprot) {
64254eba 881 gen_helper_stackprot(cpu_env, *t);
5818dee5 882 }
4acb54ba
EI
883 return t;
884 }
885 /* Immediate. */
886 if (!extimm) {
887 if (dc->imm == 0) {
888 return &cpu_R[dc->ra];
889 }
890 *t = tcg_temp_new();
891 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
892 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
893 } else {
894 *t = tcg_temp_new();
895 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
896 }
897
5818dee5 898 if (stackprot) {
64254eba 899 gen_helper_stackprot(cpu_env, *t);
5818dee5 900 }
4acb54ba
EI
901 return t;
902}
903
904static void dec_load(DisasContext *dc)
905{
47acdd63 906 TCGv t, v, *addr;
8cc9b43f 907 unsigned int size, rev = 0, ex = 0;
47acdd63 908 TCGMemOp mop;
4acb54ba 909
47acdd63
RH
910 mop = dc->opcode & 3;
911 size = 1 << mop;
9f8beb66
EI
912 if (!dc->type_b) {
913 rev = (dc->ir >> 9) & 1;
8cc9b43f 914 ex = (dc->ir >> 10) & 1;
9f8beb66 915 }
47acdd63
RH
916 mop |= MO_TE;
917 if (rev) {
918 mop ^= MO_BSWAP;
919 }
9f8beb66 920
0187688f 921 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
0063ebd6 922 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
923 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
924 t_gen_raise_exception(dc, EXCP_HW_EXCP);
925 return;
926 }
4acb54ba 927
8cc9b43f
PC
928 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
929 ex ? "x" : "");
9f8beb66 930
4acb54ba
EI
931 t_sync_flags(dc);
932 addr = compute_ldst_addr(dc, &t);
933
9f8beb66
EI
934 /*
935 * When doing reverse accesses we need to do two things.
936 *
4ff9786c 937 * 1. Reverse the address wrt endianness.
9f8beb66
EI
938 * 2. Byteswap the data lanes on the way back into the CPU core.
939 */
940 if (rev && size != 4) {
941 /* Endian reverse the address. t is addr. */
942 switch (size) {
943 case 1:
944 {
945 /* 00 -> 11
946 01 -> 10
947 10 -> 10
948 11 -> 00 */
949 TCGv low = tcg_temp_new();
950
951 /* Force addr into the temp. */
952 if (addr != &t) {
953 t = tcg_temp_new();
954 tcg_gen_mov_tl(t, *addr);
955 addr = &t;
956 }
957
958 tcg_gen_andi_tl(low, t, 3);
959 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
960 tcg_gen_andi_tl(t, t, ~3);
961 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
962 tcg_gen_mov_tl(env_imm, t);
963 tcg_temp_free(low);
964 break;
965 }
966
967 case 2:
968 /* 00 -> 10
969 10 -> 00. */
970 /* Force addr into the temp. */
971 if (addr != &t) {
972 t = tcg_temp_new();
973 tcg_gen_xori_tl(t, *addr, 2);
974 addr = &t;
975 } else {
976 tcg_gen_xori_tl(t, t, 2);
977 }
978 break;
979 default:
0063ebd6 980 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
981 break;
982 }
983 }
984
8cc9b43f
PC
985 /* lwx does not throw unaligned access errors, so force alignment */
986 if (ex) {
987 /* Force addr into the temp. */
988 if (addr != &t) {
989 t = tcg_temp_new();
990 tcg_gen_mov_tl(t, *addr);
991 addr = &t;
992 }
993 tcg_gen_andi_tl(t, t, ~3);
994 }
995
4acb54ba
EI
996 /* If we get a fault on a dslot, the jmpstate better be in sync. */
997 sync_jmpstate(dc);
968a40f6
EI
998
999 /* Verify alignment if needed. */
47acdd63
RH
1000 /*
1001 * Microblaze gives MMU faults priority over faults due to
1002 * unaligned addresses. That's why we speculatively do the load
1003 * into v. If the load succeeds, we verify alignment of the
1004 * address and if that succeeds we write into the destination reg.
1005 */
1006 v = tcg_temp_new();
97ed5ccd 1007 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
a12f6507 1008
0063ebd6 1009 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507 1010 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 1011 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1012 tcg_const_tl(0), tcg_const_tl(size - 1));
4acb54ba
EI
1013 }
1014
47acdd63
RH
1015 if (ex) {
1016 tcg_gen_mov_tl(env_res_addr, *addr);
1017 tcg_gen_mov_tl(env_res_val, v);
1018 }
1019 if (dc->rd) {
1020 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1021 }
1022 tcg_temp_free(v);
1023
8cc9b43f 1024 if (ex) { /* lwx */
b6af0975 1025 /* no support for AXI exclusive so always clear C */
8cc9b43f 1026 write_carryi(dc, 0);
8cc9b43f
PC
1027 }
1028
4acb54ba
EI
1029 if (addr == &t)
1030 tcg_temp_free(t);
1031}
1032
4acb54ba
EI
1033static void dec_store(DisasContext *dc)
1034{
4a536270 1035 TCGv t, *addr, swx_addr;
42a268c2 1036 TCGLabel *swx_skip = NULL;
8cc9b43f 1037 unsigned int size, rev = 0, ex = 0;
47acdd63 1038 TCGMemOp mop;
4acb54ba 1039
47acdd63
RH
1040 mop = dc->opcode & 3;
1041 size = 1 << mop;
9f8beb66
EI
1042 if (!dc->type_b) {
1043 rev = (dc->ir >> 9) & 1;
8cc9b43f 1044 ex = (dc->ir >> 10) & 1;
9f8beb66 1045 }
47acdd63
RH
1046 mop |= MO_TE;
1047 if (rev) {
1048 mop ^= MO_BSWAP;
1049 }
4acb54ba 1050
0187688f 1051 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1052 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
1053 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1054 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1055 return;
1056 }
1057
8cc9b43f
PC
1058 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1059 ex ? "x" : "");
4acb54ba
EI
1060 t_sync_flags(dc);
1061 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1062 sync_jmpstate(dc);
1063 addr = compute_ldst_addr(dc, &t);
968a40f6 1064
083dbf48 1065 swx_addr = tcg_temp_local_new();
8cc9b43f 1066 if (ex) { /* swx */
11a76217 1067 TCGv tval;
8cc9b43f
PC
1068
1069 /* Force addr into the swx_addr. */
1070 tcg_gen_mov_tl(swx_addr, *addr);
1071 addr = &swx_addr;
1072 /* swx does not throw unaligned access errors, so force alignment */
1073 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1074
8cc9b43f
PC
1075 write_carryi(dc, 1);
1076 swx_skip = gen_new_label();
4a536270 1077 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
11a76217
EI
1078
1079 /* Compare the value loaded at lwx with current contents of
1080 the reserved location.
1081 FIXME: This only works for system emulation where we can expect
1082 this compare and the following write to be atomic. For user
1083 emulation we need to add atomicity between threads. */
1084 tval = tcg_temp_new();
97ed5ccd 1085 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
0063ebd6 1086 MO_TEUL);
11a76217 1087 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
8cc9b43f 1088 write_carryi(dc, 0);
11a76217 1089 tcg_temp_free(tval);
8cc9b43f
PC
1090 }
1091
9f8beb66
EI
1092 if (rev && size != 4) {
1093 /* Endian reverse the address. t is addr. */
1094 switch (size) {
1095 case 1:
1096 {
1097 /* 00 -> 11
1098 01 -> 10
1099 10 -> 10
1100 11 -> 00 */
1101 TCGv low = tcg_temp_new();
1102
1103 /* Force addr into the temp. */
1104 if (addr != &t) {
1105 t = tcg_temp_new();
1106 tcg_gen_mov_tl(t, *addr);
1107 addr = &t;
1108 }
1109
1110 tcg_gen_andi_tl(low, t, 3);
1111 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1112 tcg_gen_andi_tl(t, t, ~3);
1113 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
1114 tcg_gen_mov_tl(env_imm, t);
1115 tcg_temp_free(low);
1116 break;
1117 }
1118
1119 case 2:
1120 /* 00 -> 10
1121 10 -> 00. */
1122 /* Force addr into the temp. */
1123 if (addr != &t) {
1124 t = tcg_temp_new();
1125 tcg_gen_xori_tl(t, *addr, 2);
1126 addr = &t;
1127 } else {
1128 tcg_gen_xori_tl(t, t, 2);
1129 }
1130 break;
1131 default:
0063ebd6 1132 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
1133 break;
1134 }
9f8beb66 1135 }
97ed5ccd 1136 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
a12f6507 1137
968a40f6 1138 /* Verify alignment if needed. */
0063ebd6 1139 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507
EI
1140 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1141 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1142 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1143 * the MMU prior to the memaccess, thay way we could put
1144 * the alignment checks in between the probe and the mem
1145 * access.
a12f6507 1146 */
64254eba 1147 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1148 tcg_const_tl(1), tcg_const_tl(size - 1));
968a40f6 1149 }
083dbf48 1150
8cc9b43f
PC
1151 if (ex) {
1152 gen_set_label(swx_skip);
8cc9b43f 1153 }
083dbf48 1154 tcg_temp_free(swx_addr);
968a40f6 1155
4acb54ba
EI
1156 if (addr == &t)
1157 tcg_temp_free(t);
1158}
1159
1160static inline void eval_cc(DisasContext *dc, unsigned int cc,
1161 TCGv d, TCGv a, TCGv b)
1162{
4acb54ba
EI
1163 switch (cc) {
1164 case CC_EQ:
b2565c69 1165 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
4acb54ba
EI
1166 break;
1167 case CC_NE:
b2565c69 1168 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
4acb54ba
EI
1169 break;
1170 case CC_LT:
b2565c69 1171 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
4acb54ba
EI
1172 break;
1173 case CC_LE:
b2565c69 1174 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
4acb54ba
EI
1175 break;
1176 case CC_GE:
b2565c69 1177 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
4acb54ba
EI
1178 break;
1179 case CC_GT:
b2565c69 1180 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
4acb54ba
EI
1181 break;
1182 default:
0063ebd6 1183 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
4acb54ba
EI
1184 break;
1185 }
1186}
1187
1188static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1189{
42a268c2 1190 TCGLabel *l1 = gen_new_label();
4acb54ba
EI
1191 /* Conditional jmp. */
1192 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1193 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1194 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1195 gen_set_label(l1);
1196}
1197
1198static void dec_bcc(DisasContext *dc)
1199{
1200 unsigned int cc;
1201 unsigned int dslot;
1202
1203 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1204 dslot = dc->ir & (1 << 25);
1205 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1206
1207 dc->delayed_branch = 1;
1208 if (dslot) {
1209 dc->delayed_branch = 2;
1210 dc->tb_flags |= D_FLAG;
1211 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1212 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1213 }
1214
61204ce8
EI
1215 if (dec_alu_op_b_is_small_imm(dc)) {
1216 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1217
1218 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
844bab60 1219 dc->jmp = JMP_DIRECT_CC;
23979dc5 1220 dc->jmp_pc = dc->pc + offset;
61204ce8 1221 } else {
23979dc5 1222 dc->jmp = JMP_INDIRECT;
61204ce8
EI
1223 tcg_gen_movi_tl(env_btarget, dc->pc);
1224 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1225 }
61204ce8 1226 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
4acb54ba
EI
1227}
1228
1229static void dec_br(DisasContext *dc)
1230{
9f6113c7 1231 unsigned int dslot, link, abs, mbar;
97ed5ccd 1232 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
1233
1234 dslot = dc->ir & (1 << 20);
1235 abs = dc->ir & (1 << 19);
1236 link = dc->ir & (1 << 18);
9f6113c7
EI
1237
1238 /* Memory barrier. */
1239 mbar = (dc->ir >> 16) & 31;
1240 if (mbar == 2 && dc->imm == 4) {
5d45de97
EI
1241 /* mbar IMM & 16 decodes to sleep. */
1242 if (dc->rd & 16) {
1243 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1244 TCGv_i32 tmp_1 = tcg_const_i32(1);
1245
1246 LOG_DIS("sleep\n");
1247
1248 t_sync_flags(dc);
1249 tcg_gen_st_i32(tmp_1, cpu_env,
1250 -offsetof(MicroBlazeCPU, env)
1251 +offsetof(CPUState, halted));
1252 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1253 gen_helper_raise_exception(cpu_env, tmp_hlt);
1254 tcg_temp_free_i32(tmp_hlt);
1255 tcg_temp_free_i32(tmp_1);
1256 return;
1257 }
9f6113c7
EI
1258 LOG_DIS("mbar %d\n", dc->rd);
1259 /* Break the TB. */
1260 dc->cpustate_changed = 1;
1261 return;
1262 }
1263
4acb54ba
EI
1264 LOG_DIS("br%s%s%s%s imm=%x\n",
1265 abs ? "a" : "", link ? "l" : "",
1266 dc->type_b ? "i" : "", dslot ? "d" : "",
1267 dc->imm);
1268
1269 dc->delayed_branch = 1;
1270 if (dslot) {
1271 dc->delayed_branch = 2;
1272 dc->tb_flags |= D_FLAG;
1273 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1274 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1275 }
1276 if (link && dc->rd)
1277 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1278
1279 dc->jmp = JMP_INDIRECT;
1280 if (abs) {
1281 tcg_gen_movi_tl(env_btaken, 1);
1282 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
ff21f70a
EI
1283 if (link && !dslot) {
1284 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1285 t_gen_raise_exception(dc, EXCP_BREAK);
1286 if (dc->imm == 0) {
1287 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1288 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1289 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1290 return;
1291 }
1292
1293 t_gen_raise_exception(dc, EXCP_DEBUG);
1294 }
1295 }
4acb54ba 1296 } else {
61204ce8
EI
1297 if (dec_alu_op_b_is_small_imm(dc)) {
1298 dc->jmp = JMP_DIRECT;
1299 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1300 } else {
4acb54ba
EI
1301 tcg_gen_movi_tl(env_btaken, 1);
1302 tcg_gen_movi_tl(env_btarget, dc->pc);
1303 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
4acb54ba
EI
1304 }
1305 }
1306}
1307
1308static inline void do_rti(DisasContext *dc)
1309{
1310 TCGv t0, t1;
1311 t0 = tcg_temp_new();
1312 t1 = tcg_temp_new();
1313 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1314 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1315 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1316
1317 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1318 tcg_gen_or_tl(t1, t1, t0);
1319 msr_write(dc, t1);
1320 tcg_temp_free(t1);
1321 tcg_temp_free(t0);
1322 dc->tb_flags &= ~DRTI_FLAG;
1323}
1324
1325static inline void do_rtb(DisasContext *dc)
1326{
1327 TCGv t0, t1;
1328 t0 = tcg_temp_new();
1329 t1 = tcg_temp_new();
1330 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1331 tcg_gen_shri_tl(t0, t1, 1);
1332 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1333
1334 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1335 tcg_gen_or_tl(t1, t1, t0);
1336 msr_write(dc, t1);
1337 tcg_temp_free(t1);
1338 tcg_temp_free(t0);
1339 dc->tb_flags &= ~DRTB_FLAG;
1340}
1341
1342static inline void do_rte(DisasContext *dc)
1343{
1344 TCGv t0, t1;
1345 t0 = tcg_temp_new();
1346 t1 = tcg_temp_new();
1347
1348 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1349 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1350 tcg_gen_shri_tl(t0, t1, 1);
1351 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1352
1353 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1354 tcg_gen_or_tl(t1, t1, t0);
1355 msr_write(dc, t1);
1356 tcg_temp_free(t1);
1357 tcg_temp_free(t0);
1358 dc->tb_flags &= ~DRTE_FLAG;
1359}
1360
1361static void dec_rts(DisasContext *dc)
1362{
1363 unsigned int b_bit, i_bit, e_bit;
97ed5ccd 1364 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
1365
1366 i_bit = dc->ir & (1 << 21);
1367 b_bit = dc->ir & (1 << 22);
1368 e_bit = dc->ir & (1 << 23);
1369
1370 dc->delayed_branch = 2;
1371 dc->tb_flags |= D_FLAG;
1372 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1373 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1374
1375 if (i_bit) {
1376 LOG_DIS("rtid ir=%x\n", dc->ir);
1567a005
EI
1377 if ((dc->tb_flags & MSR_EE_FLAG)
1378 && mem_index == MMU_USER_IDX) {
1379 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1380 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1381 }
4acb54ba
EI
1382 dc->tb_flags |= DRTI_FLAG;
1383 } else if (b_bit) {
1384 LOG_DIS("rtbd ir=%x\n", dc->ir);
1567a005
EI
1385 if ((dc->tb_flags & MSR_EE_FLAG)
1386 && mem_index == MMU_USER_IDX) {
1387 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1388 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1389 }
4acb54ba
EI
1390 dc->tb_flags |= DRTB_FLAG;
1391 } else if (e_bit) {
1392 LOG_DIS("rted ir=%x\n", dc->ir);
1567a005
EI
1393 if ((dc->tb_flags & MSR_EE_FLAG)
1394 && mem_index == MMU_USER_IDX) {
1395 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1396 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1397 }
4acb54ba
EI
1398 dc->tb_flags |= DRTE_FLAG;
1399 } else
1400 LOG_DIS("rts ir=%x\n", dc->ir);
1401
23979dc5 1402 dc->jmp = JMP_INDIRECT;
4acb54ba
EI
1403 tcg_gen_movi_tl(env_btaken, 1);
1404 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1405}
1406
97694c57
EI
1407static int dec_check_fpuv2(DisasContext *dc)
1408{
be67e9ab 1409 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
97694c57
EI
1410 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1411 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1412 }
be67e9ab 1413 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
97694c57
EI
1414}
1415
1567a005
EI
1416static void dec_fpu(DisasContext *dc)
1417{
97694c57
EI
1418 unsigned int fpu_insn;
1419
1567a005 1420 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1421 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
be67e9ab 1422 && (dc->cpu->cfg.use_fpu != 1)) {
97694c57 1423 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1567a005
EI
1424 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1425 return;
1426 }
1427
97694c57
EI
1428 fpu_insn = (dc->ir >> 7) & 7;
1429
1430 switch (fpu_insn) {
1431 case 0:
64254eba
BS
1432 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1433 cpu_R[dc->rb]);
97694c57
EI
1434 break;
1435
1436 case 1:
64254eba
BS
1437 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1438 cpu_R[dc->rb]);
97694c57
EI
1439 break;
1440
1441 case 2:
64254eba
BS
1442 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1443 cpu_R[dc->rb]);
97694c57
EI
1444 break;
1445
1446 case 3:
64254eba
BS
1447 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1448 cpu_R[dc->rb]);
97694c57
EI
1449 break;
1450
1451 case 4:
1452 switch ((dc->ir >> 4) & 7) {
1453 case 0:
64254eba 1454 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
97694c57
EI
1455 cpu_R[dc->ra], cpu_R[dc->rb]);
1456 break;
1457 case 1:
64254eba 1458 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1459 cpu_R[dc->ra], cpu_R[dc->rb]);
1460 break;
1461 case 2:
64254eba 1462 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
97694c57
EI
1463 cpu_R[dc->ra], cpu_R[dc->rb]);
1464 break;
1465 case 3:
64254eba 1466 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
97694c57
EI
1467 cpu_R[dc->ra], cpu_R[dc->rb]);
1468 break;
1469 case 4:
64254eba 1470 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1471 cpu_R[dc->ra], cpu_R[dc->rb]);
1472 break;
1473 case 5:
64254eba 1474 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
97694c57
EI
1475 cpu_R[dc->ra], cpu_R[dc->rb]);
1476 break;
1477 case 6:
64254eba 1478 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
97694c57
EI
1479 cpu_R[dc->ra], cpu_R[dc->rb]);
1480 break;
1481 default:
71547a3b
BS
1482 qemu_log_mask(LOG_UNIMP,
1483 "unimplemented fcmp fpu_insn=%x pc=%x"
1484 " opc=%x\n",
1485 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1486 dc->abort_at_next_insn = 1;
1487 break;
1488 }
1489 break;
1490
1491 case 5:
1492 if (!dec_check_fpuv2(dc)) {
1493 return;
1494 }
64254eba 1495 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1496 break;
1497
1498 case 6:
1499 if (!dec_check_fpuv2(dc)) {
1500 return;
1501 }
64254eba 1502 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1503 break;
1504
1505 case 7:
1506 if (!dec_check_fpuv2(dc)) {
1507 return;
1508 }
64254eba 1509 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1510 break;
1511
1512 default:
71547a3b
BS
1513 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1514 " opc=%x\n",
1515 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1516 dc->abort_at_next_insn = 1;
1517 break;
1518 }
1567a005
EI
1519}
1520
4acb54ba
EI
1521static void dec_null(DisasContext *dc)
1522{
02b33596 1523 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1524 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
02b33596
EI
1525 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1526 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1527 return;
1528 }
1d512a65 1529 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
4acb54ba
EI
1530 dc->abort_at_next_insn = 1;
1531}
1532
6d76d23e
EI
1533/* Insns connected to FSL or AXI stream attached devices. */
1534static void dec_stream(DisasContext *dc)
1535{
97ed5ccd 1536 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
6d76d23e
EI
1537 TCGv_i32 t_id, t_ctrl;
1538 int ctrl;
1539
1540 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1541 dc->type_b ? "" : "d", dc->imm);
1542
1543 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1544 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1545 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1546 return;
1547 }
1548
1549 t_id = tcg_temp_new();
1550 if (dc->type_b) {
1551 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1552 ctrl = dc->imm >> 10;
1553 } else {
1554 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1555 ctrl = dc->imm >> 5;
1556 }
1557
1558 t_ctrl = tcg_const_tl(ctrl);
1559
1560 if (dc->rd == 0) {
1561 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1562 } else {
1563 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1564 }
1565 tcg_temp_free(t_id);
1566 tcg_temp_free(t_ctrl);
1567}
1568
4acb54ba
EI
1569static struct decoder_info {
1570 struct {
1571 uint32_t bits;
1572 uint32_t mask;
1573 };
1574 void (*dec)(DisasContext *dc);
1575} decinfo[] = {
1576 {DEC_ADD, dec_add},
1577 {DEC_SUB, dec_sub},
1578 {DEC_AND, dec_and},
1579 {DEC_XOR, dec_xor},
1580 {DEC_OR, dec_or},
1581 {DEC_BIT, dec_bit},
1582 {DEC_BARREL, dec_barrel},
1583 {DEC_LD, dec_load},
1584 {DEC_ST, dec_store},
1585 {DEC_IMM, dec_imm},
1586 {DEC_BR, dec_br},
1587 {DEC_BCC, dec_bcc},
1588 {DEC_RTS, dec_rts},
1567a005 1589 {DEC_FPU, dec_fpu},
4acb54ba
EI
1590 {DEC_MUL, dec_mul},
1591 {DEC_DIV, dec_div},
1592 {DEC_MSR, dec_msr},
6d76d23e 1593 {DEC_STREAM, dec_stream},
4acb54ba
EI
1594 {{0, 0}, dec_null}
1595};
1596
64254eba 1597static inline void decode(DisasContext *dc, uint32_t ir)
4acb54ba 1598{
4acb54ba
EI
1599 int i;
1600
64254eba 1601 dc->ir = ir;
4acb54ba
EI
1602 LOG_DIS("%8.8x\t", dc->ir);
1603
1604 if (dc->ir)
1605 dc->nr_nops = 0;
1606 else {
1567a005 1607 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
1608 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1609 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1567a005
EI
1610 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1611 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1612 return;
1613 }
1614
4acb54ba
EI
1615 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1616 dc->nr_nops++;
a47dddd7 1617 if (dc->nr_nops > 4) {
0063ebd6 1618 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
a47dddd7 1619 }
4acb54ba
EI
1620 }
1621 /* bit 2 seems to indicate insn type. */
1622 dc->type_b = ir & (1 << 29);
1623
1624 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1625 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1626 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1627 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1628 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1629
1630 /* Large switch for all insns. */
1631 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1632 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1633 decinfo[i].dec(dc);
1634 break;
1635 }
1636 }
1637}
1638
4acb54ba 1639/* generate intermediate code for basic block 'tb'. */
4e5e1215 1640void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
4acb54ba 1641{
4e5e1215 1642 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
ed2803da 1643 CPUState *cs = CPU(cpu);
4acb54ba 1644 uint32_t pc_start;
4acb54ba
EI
1645 struct DisasContext ctx;
1646 struct DisasContext *dc = &ctx;
1647 uint32_t next_page_start, org_flags;
1648 target_ulong npc;
1649 int num_insns;
1650 int max_insns;
1651
4acb54ba 1652 pc_start = tb->pc;
0063ebd6 1653 dc->cpu = cpu;
4acb54ba
EI
1654 dc->tb = tb;
1655 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1656
4acb54ba
EI
1657 dc->is_jmp = DISAS_NEXT;
1658 dc->jmp = 0;
1659 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1660 if (dc->delayed_branch) {
1661 dc->jmp = JMP_INDIRECT;
1662 }
4acb54ba 1663 dc->pc = pc_start;
ed2803da 1664 dc->singlestep_enabled = cs->singlestep_enabled;
4acb54ba
EI
1665 dc->cpustate_changed = 0;
1666 dc->abort_at_next_insn = 0;
1667 dc->nr_nops = 0;
1668
a47dddd7
AF
1669 if (pc_start & 3) {
1670 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1671 }
4acb54ba
EI
1672
1673 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1674#if !SIM_COMPAT
1675 qemu_log("--------------\n");
a0762859 1676 log_cpu_state(CPU(cpu), 0);
4acb54ba
EI
1677#endif
1678 }
1679
1680 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4acb54ba
EI
1681 num_insns = 0;
1682 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 1683 if (max_insns == 0) {
4acb54ba 1684 max_insns = CF_COUNT_MASK;
190ce7fb
RH
1685 }
1686 if (max_insns > TCG_MAX_INSNS) {
1687 max_insns = TCG_MAX_INSNS;
1688 }
4acb54ba 1689
cd42d5b2 1690 gen_tb_start(tb);
4acb54ba
EI
1691 do
1692 {
667b8e29 1693 tcg_gen_insn_start(dc->pc);
959082fc 1694 num_insns++;
4acb54ba 1695
b933066a
RH
1696#if SIM_COMPAT
1697 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1698 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1699 gen_helper_debug();
1700 }
1701#endif
1702
1703 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1704 t_gen_raise_exception(dc, EXCP_DEBUG);
1705 dc->is_jmp = DISAS_UPDATE;
522a0d4e
RH
1706 /* The address covered by the breakpoint must be included in
1707 [tb->pc, tb->pc + tb->size) in order to for it to be
1708 properly cleared -- thus we increment the PC here so that
1709 the logic setting tb->size below does the right thing. */
1710 dc->pc += 4;
b933066a
RH
1711 break;
1712 }
1713
4acb54ba
EI
1714 /* Pretty disas. */
1715 LOG_DIS("%8.8x:\t", dc->pc);
1716
959082fc 1717 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4acb54ba 1718 gen_io_start();
959082fc 1719 }
4acb54ba
EI
1720
1721 dc->clear_imm = 1;
64254eba 1722 decode(dc, cpu_ldl_code(env, dc->pc));
4acb54ba
EI
1723 if (dc->clear_imm)
1724 dc->tb_flags &= ~IMM_FLAG;
4acb54ba 1725 dc->pc += 4;
4acb54ba
EI
1726
1727 if (dc->delayed_branch) {
1728 dc->delayed_branch--;
1729 if (!dc->delayed_branch) {
1730 if (dc->tb_flags & DRTI_FLAG)
1731 do_rti(dc);
1732 if (dc->tb_flags & DRTB_FLAG)
1733 do_rtb(dc);
1734 if (dc->tb_flags & DRTE_FLAG)
1735 do_rte(dc);
1736 /* Clear the delay slot flag. */
1737 dc->tb_flags &= ~D_FLAG;
1738 /* If it is a direct jump, try direct chaining. */
23979dc5 1739 if (dc->jmp == JMP_INDIRECT) {
4acb54ba
EI
1740 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1741 dc->is_jmp = DISAS_JUMP;
23979dc5 1742 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1743 t_sync_flags(dc);
1744 gen_goto_tb(dc, 0, dc->jmp_pc);
1745 dc->is_jmp = DISAS_TB_JUMP;
1746 } else if (dc->jmp == JMP_DIRECT_CC) {
42a268c2 1747 TCGLabel *l1 = gen_new_label();
23979dc5 1748 t_sync_flags(dc);
23979dc5
EI
1749 /* Conditional jmp. */
1750 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1751 gen_goto_tb(dc, 1, dc->pc);
1752 gen_set_label(l1);
1753 gen_goto_tb(dc, 0, dc->jmp_pc);
1754
1755 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1756 }
1757 break;
1758 }
1759 }
ed2803da 1760 if (cs->singlestep_enabled) {
4acb54ba 1761 break;
ed2803da 1762 }
4acb54ba 1763 } while (!dc->is_jmp && !dc->cpustate_changed
fe700adb
RH
1764 && !tcg_op_buf_full()
1765 && !singlestep
1766 && (dc->pc < next_page_start)
1767 && num_insns < max_insns);
4acb54ba
EI
1768
1769 npc = dc->pc;
844bab60 1770 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1771 if (dc->tb_flags & D_FLAG) {
1772 dc->is_jmp = DISAS_UPDATE;
1773 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1774 sync_jmpstate(dc);
1775 } else
1776 npc = dc->jmp_pc;
1777 }
1778
1779 if (tb->cflags & CF_LAST_IO)
1780 gen_io_end();
1781 /* Force an update if the per-tb cpu state has changed. */
1782 if (dc->is_jmp == DISAS_NEXT
1783 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1784 dc->is_jmp = DISAS_UPDATE;
1785 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1786 }
1787 t_sync_flags(dc);
1788
ed2803da 1789 if (unlikely(cs->singlestep_enabled)) {
6c5f738d
EI
1790 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1791
1792 if (dc->is_jmp != DISAS_JUMP) {
4acb54ba 1793 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
6c5f738d 1794 }
64254eba 1795 gen_helper_raise_exception(cpu_env, tmp);
6c5f738d 1796 tcg_temp_free_i32(tmp);
4acb54ba
EI
1797 } else {
1798 switch(dc->is_jmp) {
1799 case DISAS_NEXT:
1800 gen_goto_tb(dc, 1, npc);
1801 break;
1802 default:
1803 case DISAS_JUMP:
1804 case DISAS_UPDATE:
1805 /* indicate that the hash table must be used
1806 to find the next TB */
1807 tcg_gen_exit_tb(0);
1808 break;
1809 case DISAS_TB_JUMP:
1810 /* nothing more to generate */
1811 break;
1812 }
1813 }
806f352d 1814 gen_tb_end(tb, num_insns);
0a7df5da 1815
4e5e1215
RH
1816 tb->size = dc->pc - pc_start;
1817 tb->icount = num_insns;
4acb54ba
EI
1818
1819#ifdef DEBUG_DISAS
1820#if !SIM_COMPAT
1821 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1822 qemu_log("\n");
1823#if DISAS_GNU
d49190c4 1824 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
4acb54ba 1825#endif
fe700adb
RH
1826 qemu_log("\nisize=%d osize=%d\n",
1827 dc->pc - pc_start, tcg_op_buf_count());
4acb54ba
EI
1828 }
1829#endif
1830#endif
1831 assert(!dc->abort_at_next_insn);
1832}
1833
878096ee
AF
1834void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1835 int flags)
4acb54ba 1836{
878096ee
AF
1837 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1838 CPUMBState *env = &cpu->env;
4acb54ba
EI
1839 int i;
1840
1841 if (!env || !f)
1842 return;
1843
1844 cpu_fprintf(f, "IN: PC=%x %s\n",
1845 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
97694c57 1846 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
4c24aa0a 1847 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
97694c57 1848 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
17c52a43 1849 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
4acb54ba
EI
1850 env->btaken, env->btarget,
1851 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
17c52a43
EI
1852 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1853 (env->sregs[SR_MSR] & MSR_EIP),
1854 (env->sregs[SR_MSR] & MSR_IE));
1855
4acb54ba
EI
1856 for (i = 0; i < 32; i++) {
1857 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1858 if ((i + 1) % 4 == 0)
1859 cpu_fprintf(f, "\n");
1860 }
1861 cpu_fprintf(f, "\n\n");
1862}
1863
b33ab1f7 1864MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
4acb54ba 1865{
b77f98ca 1866 MicroBlazeCPU *cpu;
4acb54ba 1867
b77f98ca 1868 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
4acb54ba 1869
746b03b2 1870 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
4acb54ba 1871
cd0c24f9
AF
1872 return cpu;
1873}
4acb54ba 1874
cd0c24f9
AF
1875void mb_tcg_init(void)
1876{
1877 int i;
4acb54ba
EI
1878
1879 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1880
e1ccc054 1881 env_debug = tcg_global_mem_new(cpu_env,
68cee38a 1882 offsetof(CPUMBState, debug),
4acb54ba 1883 "debug0");
e1ccc054 1884 env_iflags = tcg_global_mem_new(cpu_env,
68cee38a 1885 offsetof(CPUMBState, iflags),
4acb54ba 1886 "iflags");
e1ccc054 1887 env_imm = tcg_global_mem_new(cpu_env,
68cee38a 1888 offsetof(CPUMBState, imm),
4acb54ba 1889 "imm");
e1ccc054 1890 env_btarget = tcg_global_mem_new(cpu_env,
68cee38a 1891 offsetof(CPUMBState, btarget),
4acb54ba 1892 "btarget");
e1ccc054 1893 env_btaken = tcg_global_mem_new(cpu_env,
68cee38a 1894 offsetof(CPUMBState, btaken),
4acb54ba 1895 "btaken");
e1ccc054 1896 env_res_addr = tcg_global_mem_new(cpu_env,
4a536270
EI
1897 offsetof(CPUMBState, res_addr),
1898 "res_addr");
e1ccc054 1899 env_res_val = tcg_global_mem_new(cpu_env,
11a76217
EI
1900 offsetof(CPUMBState, res_val),
1901 "res_val");
4acb54ba 1902 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
e1ccc054 1903 cpu_R[i] = tcg_global_mem_new(cpu_env,
68cee38a 1904 offsetof(CPUMBState, regs[i]),
4acb54ba
EI
1905 regnames[i]);
1906 }
1907 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
e1ccc054 1908 cpu_SR[i] = tcg_global_mem_new(cpu_env,
68cee38a 1909 offsetof(CPUMBState, sregs[i]),
4acb54ba
EI
1910 special_regnames[i]);
1911 }
4acb54ba
EI
1912}
1913
bad729e2
RH
1914void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1915 target_ulong *data)
4acb54ba 1916{
bad729e2 1917 env->sregs[SR_PC] = data[0];
4acb54ba 1918}