]> git.ipfire.org Git - thirdparty/qemu.git/blame - target/microblaze/translate.c
target-microblaze: Introduce a use-hw-mul property
[thirdparty/qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
4acb54ba 25#include "tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
4acb54ba 27#include "microblaze-decode.h"
f08b6170 28#include "exec/cpu_ldst.h"
2ef6175a 29#include "exec/helper-gen.h"
4acb54ba 30
a7e30d84 31#include "trace-tcg.h"
508127e2 32#include "exec/log.h"
a7e30d84
LV
33
34
4acb54ba
EI
35#define SIM_COMPAT 0
36#define DISAS_GNU 1
37#define DISAS_MB 1
38#if DISAS_MB && !SIM_COMPAT
39# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40#else
41# define LOG_DIS(...) do { } while (0)
42#endif
43
44#define D(x)
45
46#define EXTRACT_FIELD(src, start, end) \
47 (((src) >> start) & ((1 << (end - start + 1)) - 1))
48
49static TCGv env_debug;
1bcea73e 50static TCGv_env cpu_env;
4acb54ba
EI
51static TCGv cpu_R[32];
52static TCGv cpu_SR[18];
53static TCGv env_imm;
54static TCGv env_btaken;
55static TCGv env_btarget;
56static TCGv env_iflags;
4a536270 57static TCGv env_res_addr;
11a76217 58static TCGv env_res_val;
4acb54ba 59
022c62cb 60#include "exec/gen-icount.h"
4acb54ba
EI
61
62/* This is the state at translation time. */
63typedef struct DisasContext {
0063ebd6 64 MicroBlazeCPU *cpu;
a5efa644 65 target_ulong pc;
4acb54ba
EI
66
67 /* Decoder. */
68 int type_b;
69 uint32_t ir;
70 uint8_t opcode;
71 uint8_t rd, ra, rb;
72 uint16_t imm;
73
74 unsigned int cpustate_changed;
75 unsigned int delayed_branch;
76 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
77 unsigned int clear_imm;
78 int is_jmp;
79
844bab60
EI
80#define JMP_NOJMP 0
81#define JMP_DIRECT 1
82#define JMP_DIRECT_CC 2
83#define JMP_INDIRECT 3
4acb54ba
EI
84 unsigned int jmp;
85 uint32_t jmp_pc;
86
87 int abort_at_next_insn;
88 int nr_nops;
89 struct TranslationBlock *tb;
90 int singlestep_enabled;
91} DisasContext;
92
38972938 93static const char *regnames[] =
4acb54ba
EI
94{
95 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
96 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
97 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
98 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
99};
100
38972938 101static const char *special_regnames[] =
4acb54ba
EI
102{
103 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
104 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
105 "sr16", "sr17", "sr18"
106};
107
4acb54ba
EI
108static inline void t_sync_flags(DisasContext *dc)
109{
4abf79a4 110 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba
EI
111 if (dc->tb_flags != dc->synced_flags) {
112 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
113 dc->synced_flags = dc->tb_flags;
114 }
115}
116
117static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
118{
119 TCGv_i32 tmp = tcg_const_i32(index);
120
121 t_sync_flags(dc);
122 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 123 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba
EI
124 tcg_temp_free_i32(tmp);
125 dc->is_jmp = DISAS_UPDATE;
126}
127
90aa39a1
SF
128static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
129{
130#ifndef CONFIG_USER_ONLY
131 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
132#else
133 return true;
134#endif
135}
136
4acb54ba
EI
137static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
138{
90aa39a1 139 if (use_goto_tb(dc, dest)) {
4acb54ba
EI
140 tcg_gen_goto_tb(n);
141 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
90aa39a1 142 tcg_gen_exit_tb((uintptr_t)dc->tb + n);
4acb54ba
EI
143 } else {
144 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
145 tcg_gen_exit_tb(0);
146 }
147}
148
ee8b246f
EI
149static void read_carry(DisasContext *dc, TCGv d)
150{
151 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
152}
153
04ec7df7
EI
154/*
155 * write_carry sets the carry bits in MSR based on bit 0 of v.
156 * v[31:1] are ignored.
157 */
ee8b246f
EI
158static void write_carry(DisasContext *dc, TCGv v)
159{
160 TCGv t0 = tcg_temp_new();
161 tcg_gen_shli_tl(t0, v, 31);
162 tcg_gen_sari_tl(t0, t0, 31);
163 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
164 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
165 ~(MSR_C | MSR_CC));
166 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
167 tcg_temp_free(t0);
168}
169
65ab5eb4 170static void write_carryi(DisasContext *dc, bool carry)
8cc9b43f
PC
171{
172 TCGv t0 = tcg_temp_new();
65ab5eb4 173 tcg_gen_movi_tl(t0, carry);
8cc9b43f
PC
174 write_carry(dc, t0);
175 tcg_temp_free(t0);
176}
177
61204ce8
EI
178/* True if ALU operand b is a small immediate that may deserve
179 faster treatment. */
180static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
181{
182 /* Immediate insn without the imm prefix ? */
183 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
184}
185
4acb54ba
EI
186static inline TCGv *dec_alu_op_b(DisasContext *dc)
187{
188 if (dc->type_b) {
189 if (dc->tb_flags & IMM_FLAG)
190 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
191 else
192 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
193 return &env_imm;
194 } else
195 return &cpu_R[dc->rb];
196}
197
198static void dec_add(DisasContext *dc)
199{
200 unsigned int k, c;
40cbf5b7 201 TCGv cf;
4acb54ba
EI
202
203 k = dc->opcode & 4;
204 c = dc->opcode & 2;
205
206 LOG_DIS("add%s%s%s r%d r%d r%d\n",
207 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
208 dc->rd, dc->ra, dc->rb);
209
40cbf5b7
EI
210 /* Take care of the easy cases first. */
211 if (k) {
212 /* k - keep carry, no need to update MSR. */
213 /* If rd == r0, it's a nop. */
214 if (dc->rd) {
215 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
216
217 if (c) {
218 /* c - Add carry into the result. */
219 cf = tcg_temp_new();
220
221 read_carry(dc, cf);
222 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
223 tcg_temp_free(cf);
224 }
225 }
226 return;
227 }
228
229 /* From now on, we can assume k is zero. So we need to update MSR. */
230 /* Extract carry. */
231 cf = tcg_temp_new();
232 if (c) {
233 read_carry(dc, cf);
234 } else {
235 tcg_gen_movi_tl(cf, 0);
236 }
237
238 if (dc->rd) {
239 TCGv ncf = tcg_temp_new();
5d0bb823 240 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
4acb54ba 241 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
242 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
243 write_carry(dc, ncf);
244 tcg_temp_free(ncf);
245 } else {
5d0bb823 246 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 247 write_carry(dc, cf);
4acb54ba 248 }
40cbf5b7 249 tcg_temp_free(cf);
4acb54ba
EI
250}
251
252static void dec_sub(DisasContext *dc)
253{
254 unsigned int u, cmp, k, c;
e0a42ebc 255 TCGv cf, na;
4acb54ba
EI
256
257 u = dc->imm & 2;
258 k = dc->opcode & 4;
259 c = dc->opcode & 2;
260 cmp = (dc->imm & 1) && (!dc->type_b) && k;
261
262 if (cmp) {
263 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
264 if (dc->rd) {
265 if (u)
266 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
267 else
268 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
269 }
e0a42ebc
EI
270 return;
271 }
272
273 LOG_DIS("sub%s%s r%d, r%d r%d\n",
274 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
275
276 /* Take care of the easy cases first. */
277 if (k) {
278 /* k - keep carry, no need to update MSR. */
279 /* If rd == r0, it's a nop. */
280 if (dc->rd) {
4acb54ba 281 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
282
283 if (c) {
284 /* c - Add carry into the result. */
285 cf = tcg_temp_new();
286
287 read_carry(dc, cf);
288 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
289 tcg_temp_free(cf);
290 }
291 }
292 return;
293 }
294
295 /* From now on, we can assume k is zero. So we need to update MSR. */
296 /* Extract carry. And complement a into na. */
297 cf = tcg_temp_new();
298 na = tcg_temp_new();
299 if (c) {
300 read_carry(dc, cf);
301 } else {
302 tcg_gen_movi_tl(cf, 1);
303 }
304
305 /* d = b + ~a + c. carry defaults to 1. */
306 tcg_gen_not_tl(na, cpu_R[dc->ra]);
307
308 if (dc->rd) {
309 TCGv ncf = tcg_temp_new();
5d0bb823 310 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc
EI
311 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
312 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
313 write_carry(dc, ncf);
314 tcg_temp_free(ncf);
315 } else {
5d0bb823 316 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 317 write_carry(dc, cf);
4acb54ba 318 }
e0a42ebc
EI
319 tcg_temp_free(cf);
320 tcg_temp_free(na);
4acb54ba
EI
321}
322
323static void dec_pattern(DisasContext *dc)
324{
325 unsigned int mode;
4acb54ba 326
1567a005 327 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
328 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
329 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
1567a005
EI
330 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
331 t_gen_raise_exception(dc, EXCP_HW_EXCP);
332 }
333
4acb54ba
EI
334 mode = dc->opcode & 3;
335 switch (mode) {
336 case 0:
337 /* pcmpbf. */
338 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
339 if (dc->rd)
340 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
341 break;
342 case 2:
343 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
344 if (dc->rd) {
86112805
RH
345 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
346 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
347 }
348 break;
349 case 3:
350 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
4acb54ba 351 if (dc->rd) {
86112805
RH
352 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
353 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
354 }
355 break;
356 default:
0063ebd6 357 cpu_abort(CPU(dc->cpu),
4acb54ba
EI
358 "unsupported pattern insn opcode=%x\n", dc->opcode);
359 break;
360 }
361}
362
363static void dec_and(DisasContext *dc)
364{
365 unsigned int not;
366
367 if (!dc->type_b && (dc->imm & (1 << 10))) {
368 dec_pattern(dc);
369 return;
370 }
371
372 not = dc->opcode & (1 << 1);
373 LOG_DIS("and%s\n", not ? "n" : "");
374
375 if (!dc->rd)
376 return;
377
378 if (not) {
a235900e 379 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
380 } else
381 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
382}
383
384static void dec_or(DisasContext *dc)
385{
386 if (!dc->type_b && (dc->imm & (1 << 10))) {
387 dec_pattern(dc);
388 return;
389 }
390
391 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
392 if (dc->rd)
393 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
394}
395
396static void dec_xor(DisasContext *dc)
397{
398 if (!dc->type_b && (dc->imm & (1 << 10))) {
399 dec_pattern(dc);
400 return;
401 }
402
403 LOG_DIS("xor r%d\n", dc->rd);
404 if (dc->rd)
405 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
406}
407
4acb54ba
EI
408static inline void msr_read(DisasContext *dc, TCGv d)
409{
410 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
411}
412
413static inline void msr_write(DisasContext *dc, TCGv v)
414{
97b833c5
EI
415 TCGv t;
416
417 t = tcg_temp_new();
4acb54ba 418 dc->cpustate_changed = 1;
97b833c5 419 /* PVR bit is not writable. */
8a84fc6b
EI
420 tcg_gen_andi_tl(t, v, ~MSR_PVR);
421 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
97b833c5
EI
422 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
423 tcg_temp_free(t);
4acb54ba
EI
424}
425
426static void dec_msr(DisasContext *dc)
427{
0063ebd6 428 CPUState *cs = CPU(dc->cpu);
4acb54ba
EI
429 TCGv t0, t1;
430 unsigned int sr, to, rn;
97ed5ccd 431 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
432
433 sr = dc->imm & ((1 << 14) - 1);
434 to = dc->imm & (1 << 14);
435 dc->type_b = 1;
436 if (to)
437 dc->cpustate_changed = 1;
438
439 /* msrclr and msrset. */
440 if (!(dc->imm & (1 << 15))) {
441 unsigned int clr = dc->ir & (1 << 16);
442
443 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
444 dc->rd, dc->imm);
1567a005 445
0063ebd6 446 if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
1567a005
EI
447 /* nop??? */
448 return;
449 }
450
451 if ((dc->tb_flags & MSR_EE_FLAG)
452 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
453 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
454 t_gen_raise_exception(dc, EXCP_HW_EXCP);
455 return;
456 }
457
4acb54ba
EI
458 if (dc->rd)
459 msr_read(dc, cpu_R[dc->rd]);
460
461 t0 = tcg_temp_new();
462 t1 = tcg_temp_new();
463 msr_read(dc, t0);
464 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
465
466 if (clr) {
467 tcg_gen_not_tl(t1, t1);
468 tcg_gen_and_tl(t0, t0, t1);
469 } else
470 tcg_gen_or_tl(t0, t0, t1);
471 msr_write(dc, t0);
472 tcg_temp_free(t0);
473 tcg_temp_free(t1);
474 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
475 dc->is_jmp = DISAS_UPDATE;
476 return;
477 }
478
1567a005
EI
479 if (to) {
480 if ((dc->tb_flags & MSR_EE_FLAG)
481 && mem_index == MMU_USER_IDX) {
482 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
483 t_gen_raise_exception(dc, EXCP_HW_EXCP);
484 return;
485 }
486 }
487
4acb54ba
EI
488#if !defined(CONFIG_USER_ONLY)
489 /* Catch read/writes to the mmu block. */
490 if ((sr & ~0xff) == 0x1000) {
491 sr &= 7;
492 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
493 if (to)
64254eba 494 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
4acb54ba 495 else
64254eba 496 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
4acb54ba
EI
497 return;
498 }
499#endif
500
501 if (to) {
502 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
503 switch (sr) {
504 case 0:
505 break;
506 case 1:
507 msr_write(dc, cpu_R[dc->ra]);
508 break;
509 case 0x3:
510 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
511 break;
512 case 0x5:
513 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
514 break;
515 case 0x7:
97694c57 516 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
4acb54ba 517 break;
5818dee5 518 case 0x800:
68cee38a 519 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
520 break;
521 case 0x802:
68cee38a 522 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
5818dee5 523 break;
4acb54ba 524 default:
0063ebd6 525 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
526 break;
527 }
528 } else {
529 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
530
531 switch (sr) {
532 case 0:
533 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
534 break;
535 case 1:
536 msr_read(dc, cpu_R[dc->rd]);
537 break;
538 case 0x3:
539 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
540 break;
541 case 0x5:
542 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
543 break;
544 case 0x7:
97694c57 545 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
4acb54ba
EI
546 break;
547 case 0xb:
548 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
549 break;
5818dee5 550 case 0x800:
68cee38a 551 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
552 break;
553 case 0x802:
68cee38a 554 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
5818dee5 555 break;
4acb54ba
EI
556 case 0x2000:
557 case 0x2001:
558 case 0x2002:
559 case 0x2003:
560 case 0x2004:
561 case 0x2005:
562 case 0x2006:
563 case 0x2007:
564 case 0x2008:
565 case 0x2009:
566 case 0x200a:
567 case 0x200b:
568 case 0x200c:
569 rn = sr & 0xf;
570 tcg_gen_ld_tl(cpu_R[dc->rd],
68cee38a 571 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
572 break;
573 default:
a47dddd7 574 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
575 break;
576 }
577 }
ee7dbcf8
EI
578
579 if (dc->rd == 0) {
580 tcg_gen_movi_tl(cpu_R[0], 0);
581 }
4acb54ba
EI
582}
583
4acb54ba
EI
584/* Multiplier unit. */
585static void dec_mul(DisasContext *dc)
586{
16ece88d 587 TCGv tmp;
4acb54ba
EI
588 unsigned int subcode;
589
1567a005 590 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 591 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
9b964318 592 && !dc->cpu->cfg.use_hw_mul) {
1567a005
EI
593 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
594 t_gen_raise_exception(dc, EXCP_HW_EXCP);
595 return;
596 }
597
4acb54ba 598 subcode = dc->imm & 3;
4acb54ba
EI
599
600 if (dc->type_b) {
601 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
16ece88d
RH
602 tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
603 return;
4acb54ba
EI
604 }
605
1567a005 606 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
9b964318 607 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
1567a005
EI
608 /* nop??? */
609 }
610
16ece88d 611 tmp = tcg_temp_new();
4acb54ba
EI
612 switch (subcode) {
613 case 0:
614 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 615 tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
616 break;
617 case 1:
618 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 619 tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
620 break;
621 case 2:
622 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 623 tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
624 break;
625 case 3:
626 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 627 tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
628 break;
629 default:
0063ebd6 630 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
4acb54ba
EI
631 break;
632 }
16ece88d 633 tcg_temp_free(tmp);
4acb54ba
EI
634}
635
636/* Div unit. */
637static void dec_div(DisasContext *dc)
638{
639 unsigned int u;
640
641 u = dc->imm & 2;
642 LOG_DIS("div\n");
643
0063ebd6 644 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
47709e4c 645 && !dc->cpu->cfg.use_div) {
1567a005
EI
646 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
647 t_gen_raise_exception(dc, EXCP_HW_EXCP);
648 }
649
4acb54ba 650 if (u)
64254eba
BS
651 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
652 cpu_R[dc->ra]);
4acb54ba 653 else
64254eba
BS
654 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
655 cpu_R[dc->ra]);
4acb54ba
EI
656 if (!dc->rd)
657 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
658}
659
660static void dec_barrel(DisasContext *dc)
661{
662 TCGv t0;
663 unsigned int s, t;
664
1567a005 665 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 666 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
7faa66aa 667 && !dc->cpu->cfg.use_barrel) {
1567a005
EI
668 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
669 t_gen_raise_exception(dc, EXCP_HW_EXCP);
670 return;
671 }
672
4acb54ba
EI
673 s = dc->imm & (1 << 10);
674 t = dc->imm & (1 << 9);
675
676 LOG_DIS("bs%s%s r%d r%d r%d\n",
677 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
678
679 t0 = tcg_temp_new();
680
681 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
682 tcg_gen_andi_tl(t0, t0, 31);
683
684 if (s)
685 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
686 else {
687 if (t)
688 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
689 else
690 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
691 }
692}
693
694static void dec_bit(DisasContext *dc)
695{
0063ebd6 696 CPUState *cs = CPU(dc->cpu);
09b9f113 697 TCGv t0;
4acb54ba 698 unsigned int op;
97ed5ccd 699 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba 700
ace2e4da 701 op = dc->ir & ((1 << 9) - 1);
4acb54ba
EI
702 switch (op) {
703 case 0x21:
704 /* src. */
705 t0 = tcg_temp_new();
706
707 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
09b9f113
EI
708 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
709 write_carry(dc, cpu_R[dc->ra]);
4acb54ba 710 if (dc->rd) {
4acb54ba 711 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
09b9f113 712 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
4acb54ba 713 }
4acb54ba
EI
714 tcg_temp_free(t0);
715 break;
716
717 case 0x1:
718 case 0x41:
719 /* srl. */
4acb54ba
EI
720 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
721
bb3cb951
EI
722 /* Update carry. Note that write carry only looks at the LSB. */
723 write_carry(dc, cpu_R[dc->ra]);
4acb54ba
EI
724 if (dc->rd) {
725 if (op == 0x41)
726 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
727 else
728 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
729 }
730 break;
731 case 0x60:
732 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
733 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
734 break;
735 case 0x61:
736 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
737 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
738 break;
739 case 0x64:
f062a3c7
EI
740 case 0x66:
741 case 0x74:
742 case 0x76:
4acb54ba
EI
743 /* wdc. */
744 LOG_DIS("wdc r%d\n", dc->ra);
1567a005
EI
745 if ((dc->tb_flags & MSR_EE_FLAG)
746 && mem_index == MMU_USER_IDX) {
747 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
748 t_gen_raise_exception(dc, EXCP_HW_EXCP);
749 return;
750 }
4acb54ba
EI
751 break;
752 case 0x68:
753 /* wic. */
754 LOG_DIS("wic r%d\n", dc->ra);
1567a005
EI
755 if ((dc->tb_flags & MSR_EE_FLAG)
756 && mem_index == MMU_USER_IDX) {
757 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
758 t_gen_raise_exception(dc, EXCP_HW_EXCP);
759 return;
760 }
4acb54ba 761 break;
48b5e96f
EI
762 case 0xe0:
763 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
764 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
765 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
48b5e96f
EI
766 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
767 t_gen_raise_exception(dc, EXCP_HW_EXCP);
768 }
0063ebd6 769 if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
5318420c 770 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
48b5e96f
EI
771 }
772 break;
ace2e4da
PC
773 case 0x1e0:
774 /* swapb */
775 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
776 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
777 break;
b8c6a5d9 778 case 0x1e2:
ace2e4da
PC
779 /*swaph */
780 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
781 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
782 break;
4acb54ba 783 default:
a47dddd7
AF
784 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
785 dc->pc, op, dc->rd, dc->ra, dc->rb);
4acb54ba
EI
786 break;
787 }
788}
789
790static inline void sync_jmpstate(DisasContext *dc)
791{
844bab60
EI
792 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
793 if (dc->jmp == JMP_DIRECT) {
794 tcg_gen_movi_tl(env_btaken, 1);
795 }
23979dc5
EI
796 dc->jmp = JMP_INDIRECT;
797 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
4acb54ba
EI
798 }
799}
800
801static void dec_imm(DisasContext *dc)
802{
803 LOG_DIS("imm %x\n", dc->imm << 16);
804 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
805 dc->tb_flags |= IMM_FLAG;
806 dc->clear_imm = 0;
807}
808
4acb54ba
EI
809static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
810{
811 unsigned int extimm = dc->tb_flags & IMM_FLAG;
5818dee5
EI
812 /* Should be set to one if r1 is used by loadstores. */
813 int stackprot = 0;
814
815 /* All load/stores use ra. */
9aaaa181 816 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
5818dee5
EI
817 stackprot = 1;
818 }
4acb54ba 819
9ef55357 820 /* Treat the common cases first. */
4acb54ba 821 if (!dc->type_b) {
4b5ef0b5
EI
822 /* If any of the regs is r0, return a ptr to the other. */
823 if (dc->ra == 0) {
824 return &cpu_R[dc->rb];
825 } else if (dc->rb == 0) {
826 return &cpu_R[dc->ra];
827 }
828
9aaaa181 829 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
5818dee5
EI
830 stackprot = 1;
831 }
832
4acb54ba
EI
833 *t = tcg_temp_new();
834 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
5818dee5
EI
835
836 if (stackprot) {
64254eba 837 gen_helper_stackprot(cpu_env, *t);
5818dee5 838 }
4acb54ba
EI
839 return t;
840 }
841 /* Immediate. */
842 if (!extimm) {
843 if (dc->imm == 0) {
844 return &cpu_R[dc->ra];
845 }
846 *t = tcg_temp_new();
847 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
848 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
849 } else {
850 *t = tcg_temp_new();
851 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
852 }
853
5818dee5 854 if (stackprot) {
64254eba 855 gen_helper_stackprot(cpu_env, *t);
5818dee5 856 }
4acb54ba
EI
857 return t;
858}
859
860static void dec_load(DisasContext *dc)
861{
47acdd63 862 TCGv t, v, *addr;
8cc9b43f 863 unsigned int size, rev = 0, ex = 0;
47acdd63 864 TCGMemOp mop;
4acb54ba 865
47acdd63
RH
866 mop = dc->opcode & 3;
867 size = 1 << mop;
9f8beb66
EI
868 if (!dc->type_b) {
869 rev = (dc->ir >> 9) & 1;
8cc9b43f 870 ex = (dc->ir >> 10) & 1;
9f8beb66 871 }
47acdd63
RH
872 mop |= MO_TE;
873 if (rev) {
874 mop ^= MO_BSWAP;
875 }
9f8beb66 876
0187688f 877 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
0063ebd6 878 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
879 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
880 t_gen_raise_exception(dc, EXCP_HW_EXCP);
881 return;
882 }
4acb54ba 883
8cc9b43f
PC
884 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
885 ex ? "x" : "");
9f8beb66 886
4acb54ba
EI
887 t_sync_flags(dc);
888 addr = compute_ldst_addr(dc, &t);
889
9f8beb66
EI
890 /*
891 * When doing reverse accesses we need to do two things.
892 *
4ff9786c 893 * 1. Reverse the address wrt endianness.
9f8beb66
EI
894 * 2. Byteswap the data lanes on the way back into the CPU core.
895 */
896 if (rev && size != 4) {
897 /* Endian reverse the address. t is addr. */
898 switch (size) {
899 case 1:
900 {
901 /* 00 -> 11
902 01 -> 10
903 10 -> 10
904 11 -> 00 */
905 TCGv low = tcg_temp_new();
906
907 /* Force addr into the temp. */
908 if (addr != &t) {
909 t = tcg_temp_new();
910 tcg_gen_mov_tl(t, *addr);
911 addr = &t;
912 }
913
914 tcg_gen_andi_tl(low, t, 3);
915 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
916 tcg_gen_andi_tl(t, t, ~3);
917 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
918 tcg_gen_mov_tl(env_imm, t);
919 tcg_temp_free(low);
920 break;
921 }
922
923 case 2:
924 /* 00 -> 10
925 10 -> 00. */
926 /* Force addr into the temp. */
927 if (addr != &t) {
928 t = tcg_temp_new();
929 tcg_gen_xori_tl(t, *addr, 2);
930 addr = &t;
931 } else {
932 tcg_gen_xori_tl(t, t, 2);
933 }
934 break;
935 default:
0063ebd6 936 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
937 break;
938 }
939 }
940
8cc9b43f
PC
941 /* lwx does not throw unaligned access errors, so force alignment */
942 if (ex) {
943 /* Force addr into the temp. */
944 if (addr != &t) {
945 t = tcg_temp_new();
946 tcg_gen_mov_tl(t, *addr);
947 addr = &t;
948 }
949 tcg_gen_andi_tl(t, t, ~3);
950 }
951
4acb54ba
EI
952 /* If we get a fault on a dslot, the jmpstate better be in sync. */
953 sync_jmpstate(dc);
968a40f6
EI
954
955 /* Verify alignment if needed. */
47acdd63
RH
956 /*
957 * Microblaze gives MMU faults priority over faults due to
958 * unaligned addresses. That's why we speculatively do the load
959 * into v. If the load succeeds, we verify alignment of the
960 * address and if that succeeds we write into the destination reg.
961 */
962 v = tcg_temp_new();
97ed5ccd 963 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
a12f6507 964
0063ebd6 965 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507 966 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 967 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 968 tcg_const_tl(0), tcg_const_tl(size - 1));
4acb54ba
EI
969 }
970
47acdd63
RH
971 if (ex) {
972 tcg_gen_mov_tl(env_res_addr, *addr);
973 tcg_gen_mov_tl(env_res_val, v);
974 }
975 if (dc->rd) {
976 tcg_gen_mov_tl(cpu_R[dc->rd], v);
977 }
978 tcg_temp_free(v);
979
8cc9b43f 980 if (ex) { /* lwx */
b6af0975 981 /* no support for AXI exclusive so always clear C */
8cc9b43f 982 write_carryi(dc, 0);
8cc9b43f
PC
983 }
984
4acb54ba
EI
985 if (addr == &t)
986 tcg_temp_free(t);
987}
988
4acb54ba
EI
989static void dec_store(DisasContext *dc)
990{
4a536270 991 TCGv t, *addr, swx_addr;
42a268c2 992 TCGLabel *swx_skip = NULL;
8cc9b43f 993 unsigned int size, rev = 0, ex = 0;
47acdd63 994 TCGMemOp mop;
4acb54ba 995
47acdd63
RH
996 mop = dc->opcode & 3;
997 size = 1 << mop;
9f8beb66
EI
998 if (!dc->type_b) {
999 rev = (dc->ir >> 9) & 1;
8cc9b43f 1000 ex = (dc->ir >> 10) & 1;
9f8beb66 1001 }
47acdd63
RH
1002 mop |= MO_TE;
1003 if (rev) {
1004 mop ^= MO_BSWAP;
1005 }
4acb54ba 1006
0187688f 1007 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1008 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
1009 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1010 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1011 return;
1012 }
1013
8cc9b43f
PC
1014 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1015 ex ? "x" : "");
4acb54ba
EI
1016 t_sync_flags(dc);
1017 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1018 sync_jmpstate(dc);
1019 addr = compute_ldst_addr(dc, &t);
968a40f6 1020
083dbf48 1021 swx_addr = tcg_temp_local_new();
8cc9b43f 1022 if (ex) { /* swx */
11a76217 1023 TCGv tval;
8cc9b43f
PC
1024
1025 /* Force addr into the swx_addr. */
1026 tcg_gen_mov_tl(swx_addr, *addr);
1027 addr = &swx_addr;
1028 /* swx does not throw unaligned access errors, so force alignment */
1029 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1030
8cc9b43f
PC
1031 write_carryi(dc, 1);
1032 swx_skip = gen_new_label();
4a536270 1033 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
11a76217
EI
1034
1035 /* Compare the value loaded at lwx with current contents of
1036 the reserved location.
1037 FIXME: This only works for system emulation where we can expect
1038 this compare and the following write to be atomic. For user
1039 emulation we need to add atomicity between threads. */
1040 tval = tcg_temp_new();
97ed5ccd 1041 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
0063ebd6 1042 MO_TEUL);
11a76217 1043 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
8cc9b43f 1044 write_carryi(dc, 0);
11a76217 1045 tcg_temp_free(tval);
8cc9b43f
PC
1046 }
1047
9f8beb66
EI
1048 if (rev && size != 4) {
1049 /* Endian reverse the address. t is addr. */
1050 switch (size) {
1051 case 1:
1052 {
1053 /* 00 -> 11
1054 01 -> 10
1055 10 -> 10
1056 11 -> 00 */
1057 TCGv low = tcg_temp_new();
1058
1059 /* Force addr into the temp. */
1060 if (addr != &t) {
1061 t = tcg_temp_new();
1062 tcg_gen_mov_tl(t, *addr);
1063 addr = &t;
1064 }
1065
1066 tcg_gen_andi_tl(low, t, 3);
1067 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1068 tcg_gen_andi_tl(t, t, ~3);
1069 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
1070 tcg_gen_mov_tl(env_imm, t);
1071 tcg_temp_free(low);
1072 break;
1073 }
1074
1075 case 2:
1076 /* 00 -> 10
1077 10 -> 00. */
1078 /* Force addr into the temp. */
1079 if (addr != &t) {
1080 t = tcg_temp_new();
1081 tcg_gen_xori_tl(t, *addr, 2);
1082 addr = &t;
1083 } else {
1084 tcg_gen_xori_tl(t, t, 2);
1085 }
1086 break;
1087 default:
0063ebd6 1088 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
1089 break;
1090 }
9f8beb66 1091 }
97ed5ccd 1092 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
a12f6507 1093
968a40f6 1094 /* Verify alignment if needed. */
0063ebd6 1095 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507
EI
1096 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1097 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1098 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1099 * the MMU prior to the memaccess, thay way we could put
1100 * the alignment checks in between the probe and the mem
1101 * access.
a12f6507 1102 */
64254eba 1103 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1104 tcg_const_tl(1), tcg_const_tl(size - 1));
968a40f6 1105 }
083dbf48 1106
8cc9b43f
PC
1107 if (ex) {
1108 gen_set_label(swx_skip);
8cc9b43f 1109 }
083dbf48 1110 tcg_temp_free(swx_addr);
968a40f6 1111
4acb54ba
EI
1112 if (addr == &t)
1113 tcg_temp_free(t);
1114}
1115
1116static inline void eval_cc(DisasContext *dc, unsigned int cc,
1117 TCGv d, TCGv a, TCGv b)
1118{
4acb54ba
EI
1119 switch (cc) {
1120 case CC_EQ:
b2565c69 1121 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
4acb54ba
EI
1122 break;
1123 case CC_NE:
b2565c69 1124 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
4acb54ba
EI
1125 break;
1126 case CC_LT:
b2565c69 1127 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
4acb54ba
EI
1128 break;
1129 case CC_LE:
b2565c69 1130 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
4acb54ba
EI
1131 break;
1132 case CC_GE:
b2565c69 1133 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
4acb54ba
EI
1134 break;
1135 case CC_GT:
b2565c69 1136 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
4acb54ba
EI
1137 break;
1138 default:
0063ebd6 1139 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
4acb54ba
EI
1140 break;
1141 }
1142}
1143
1144static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1145{
42a268c2 1146 TCGLabel *l1 = gen_new_label();
4acb54ba
EI
1147 /* Conditional jmp. */
1148 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1149 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1150 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1151 gen_set_label(l1);
1152}
1153
1154static void dec_bcc(DisasContext *dc)
1155{
1156 unsigned int cc;
1157 unsigned int dslot;
1158
1159 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1160 dslot = dc->ir & (1 << 25);
1161 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1162
1163 dc->delayed_branch = 1;
1164 if (dslot) {
1165 dc->delayed_branch = 2;
1166 dc->tb_flags |= D_FLAG;
1167 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1168 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1169 }
1170
61204ce8
EI
1171 if (dec_alu_op_b_is_small_imm(dc)) {
1172 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1173
1174 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
844bab60 1175 dc->jmp = JMP_DIRECT_CC;
23979dc5 1176 dc->jmp_pc = dc->pc + offset;
61204ce8 1177 } else {
23979dc5 1178 dc->jmp = JMP_INDIRECT;
61204ce8
EI
1179 tcg_gen_movi_tl(env_btarget, dc->pc);
1180 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1181 }
61204ce8 1182 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
4acb54ba
EI
1183}
1184
1185static void dec_br(DisasContext *dc)
1186{
9f6113c7 1187 unsigned int dslot, link, abs, mbar;
97ed5ccd 1188 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
1189
1190 dslot = dc->ir & (1 << 20);
1191 abs = dc->ir & (1 << 19);
1192 link = dc->ir & (1 << 18);
9f6113c7
EI
1193
1194 /* Memory barrier. */
1195 mbar = (dc->ir >> 16) & 31;
1196 if (mbar == 2 && dc->imm == 4) {
5d45de97
EI
1197 /* mbar IMM & 16 decodes to sleep. */
1198 if (dc->rd & 16) {
1199 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1200 TCGv_i32 tmp_1 = tcg_const_i32(1);
1201
1202 LOG_DIS("sleep\n");
1203
1204 t_sync_flags(dc);
1205 tcg_gen_st_i32(tmp_1, cpu_env,
1206 -offsetof(MicroBlazeCPU, env)
1207 +offsetof(CPUState, halted));
1208 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1209 gen_helper_raise_exception(cpu_env, tmp_hlt);
1210 tcg_temp_free_i32(tmp_hlt);
1211 tcg_temp_free_i32(tmp_1);
1212 return;
1213 }
9f6113c7
EI
1214 LOG_DIS("mbar %d\n", dc->rd);
1215 /* Break the TB. */
1216 dc->cpustate_changed = 1;
1217 return;
1218 }
1219
4acb54ba
EI
1220 LOG_DIS("br%s%s%s%s imm=%x\n",
1221 abs ? "a" : "", link ? "l" : "",
1222 dc->type_b ? "i" : "", dslot ? "d" : "",
1223 dc->imm);
1224
1225 dc->delayed_branch = 1;
1226 if (dslot) {
1227 dc->delayed_branch = 2;
1228 dc->tb_flags |= D_FLAG;
1229 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1230 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1231 }
1232 if (link && dc->rd)
1233 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1234
1235 dc->jmp = JMP_INDIRECT;
1236 if (abs) {
1237 tcg_gen_movi_tl(env_btaken, 1);
1238 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
ff21f70a
EI
1239 if (link && !dslot) {
1240 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1241 t_gen_raise_exception(dc, EXCP_BREAK);
1242 if (dc->imm == 0) {
1243 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1244 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1245 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1246 return;
1247 }
1248
1249 t_gen_raise_exception(dc, EXCP_DEBUG);
1250 }
1251 }
4acb54ba 1252 } else {
61204ce8
EI
1253 if (dec_alu_op_b_is_small_imm(dc)) {
1254 dc->jmp = JMP_DIRECT;
1255 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1256 } else {
4acb54ba
EI
1257 tcg_gen_movi_tl(env_btaken, 1);
1258 tcg_gen_movi_tl(env_btarget, dc->pc);
1259 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
4acb54ba
EI
1260 }
1261 }
1262}
1263
1264static inline void do_rti(DisasContext *dc)
1265{
1266 TCGv t0, t1;
1267 t0 = tcg_temp_new();
1268 t1 = tcg_temp_new();
1269 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1270 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1271 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1272
1273 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1274 tcg_gen_or_tl(t1, t1, t0);
1275 msr_write(dc, t1);
1276 tcg_temp_free(t1);
1277 tcg_temp_free(t0);
1278 dc->tb_flags &= ~DRTI_FLAG;
1279}
1280
1281static inline void do_rtb(DisasContext *dc)
1282{
1283 TCGv t0, t1;
1284 t0 = tcg_temp_new();
1285 t1 = tcg_temp_new();
1286 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1287 tcg_gen_shri_tl(t0, t1, 1);
1288 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1289
1290 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1291 tcg_gen_or_tl(t1, t1, t0);
1292 msr_write(dc, t1);
1293 tcg_temp_free(t1);
1294 tcg_temp_free(t0);
1295 dc->tb_flags &= ~DRTB_FLAG;
1296}
1297
1298static inline void do_rte(DisasContext *dc)
1299{
1300 TCGv t0, t1;
1301 t0 = tcg_temp_new();
1302 t1 = tcg_temp_new();
1303
1304 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1305 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1306 tcg_gen_shri_tl(t0, t1, 1);
1307 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1308
1309 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1310 tcg_gen_or_tl(t1, t1, t0);
1311 msr_write(dc, t1);
1312 tcg_temp_free(t1);
1313 tcg_temp_free(t0);
1314 dc->tb_flags &= ~DRTE_FLAG;
1315}
1316
1317static void dec_rts(DisasContext *dc)
1318{
1319 unsigned int b_bit, i_bit, e_bit;
97ed5ccd 1320 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
1321
1322 i_bit = dc->ir & (1 << 21);
1323 b_bit = dc->ir & (1 << 22);
1324 e_bit = dc->ir & (1 << 23);
1325
1326 dc->delayed_branch = 2;
1327 dc->tb_flags |= D_FLAG;
1328 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1329 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1330
1331 if (i_bit) {
1332 LOG_DIS("rtid ir=%x\n", dc->ir);
1567a005
EI
1333 if ((dc->tb_flags & MSR_EE_FLAG)
1334 && mem_index == MMU_USER_IDX) {
1335 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1336 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1337 }
4acb54ba
EI
1338 dc->tb_flags |= DRTI_FLAG;
1339 } else if (b_bit) {
1340 LOG_DIS("rtbd ir=%x\n", dc->ir);
1567a005
EI
1341 if ((dc->tb_flags & MSR_EE_FLAG)
1342 && mem_index == MMU_USER_IDX) {
1343 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1344 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1345 }
4acb54ba
EI
1346 dc->tb_flags |= DRTB_FLAG;
1347 } else if (e_bit) {
1348 LOG_DIS("rted ir=%x\n", dc->ir);
1567a005
EI
1349 if ((dc->tb_flags & MSR_EE_FLAG)
1350 && mem_index == MMU_USER_IDX) {
1351 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1352 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1353 }
4acb54ba
EI
1354 dc->tb_flags |= DRTE_FLAG;
1355 } else
1356 LOG_DIS("rts ir=%x\n", dc->ir);
1357
23979dc5 1358 dc->jmp = JMP_INDIRECT;
4acb54ba
EI
1359 tcg_gen_movi_tl(env_btaken, 1);
1360 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1361}
1362
97694c57
EI
1363static int dec_check_fpuv2(DisasContext *dc)
1364{
be67e9ab 1365 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
97694c57
EI
1366 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1367 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1368 }
be67e9ab 1369 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
97694c57
EI
1370}
1371
1567a005
EI
1372static void dec_fpu(DisasContext *dc)
1373{
97694c57
EI
1374 unsigned int fpu_insn;
1375
1567a005 1376 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1377 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
be67e9ab 1378 && (dc->cpu->cfg.use_fpu != 1)) {
97694c57 1379 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1567a005
EI
1380 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1381 return;
1382 }
1383
97694c57
EI
1384 fpu_insn = (dc->ir >> 7) & 7;
1385
1386 switch (fpu_insn) {
1387 case 0:
64254eba
BS
1388 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1389 cpu_R[dc->rb]);
97694c57
EI
1390 break;
1391
1392 case 1:
64254eba
BS
1393 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1394 cpu_R[dc->rb]);
97694c57
EI
1395 break;
1396
1397 case 2:
64254eba
BS
1398 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1399 cpu_R[dc->rb]);
97694c57
EI
1400 break;
1401
1402 case 3:
64254eba
BS
1403 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1404 cpu_R[dc->rb]);
97694c57
EI
1405 break;
1406
1407 case 4:
1408 switch ((dc->ir >> 4) & 7) {
1409 case 0:
64254eba 1410 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
97694c57
EI
1411 cpu_R[dc->ra], cpu_R[dc->rb]);
1412 break;
1413 case 1:
64254eba 1414 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1415 cpu_R[dc->ra], cpu_R[dc->rb]);
1416 break;
1417 case 2:
64254eba 1418 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
97694c57
EI
1419 cpu_R[dc->ra], cpu_R[dc->rb]);
1420 break;
1421 case 3:
64254eba 1422 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
97694c57
EI
1423 cpu_R[dc->ra], cpu_R[dc->rb]);
1424 break;
1425 case 4:
64254eba 1426 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1427 cpu_R[dc->ra], cpu_R[dc->rb]);
1428 break;
1429 case 5:
64254eba 1430 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
97694c57
EI
1431 cpu_R[dc->ra], cpu_R[dc->rb]);
1432 break;
1433 case 6:
64254eba 1434 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
97694c57
EI
1435 cpu_R[dc->ra], cpu_R[dc->rb]);
1436 break;
1437 default:
71547a3b
BS
1438 qemu_log_mask(LOG_UNIMP,
1439 "unimplemented fcmp fpu_insn=%x pc=%x"
1440 " opc=%x\n",
1441 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1442 dc->abort_at_next_insn = 1;
1443 break;
1444 }
1445 break;
1446
1447 case 5:
1448 if (!dec_check_fpuv2(dc)) {
1449 return;
1450 }
64254eba 1451 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1452 break;
1453
1454 case 6:
1455 if (!dec_check_fpuv2(dc)) {
1456 return;
1457 }
64254eba 1458 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1459 break;
1460
1461 case 7:
1462 if (!dec_check_fpuv2(dc)) {
1463 return;
1464 }
64254eba 1465 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1466 break;
1467
1468 default:
71547a3b
BS
1469 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1470 " opc=%x\n",
1471 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1472 dc->abort_at_next_insn = 1;
1473 break;
1474 }
1567a005
EI
1475}
1476
4acb54ba
EI
1477static void dec_null(DisasContext *dc)
1478{
02b33596 1479 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1480 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
02b33596
EI
1481 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1482 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1483 return;
1484 }
1d512a65 1485 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
4acb54ba
EI
1486 dc->abort_at_next_insn = 1;
1487}
1488
6d76d23e
EI
1489/* Insns connected to FSL or AXI stream attached devices. */
1490static void dec_stream(DisasContext *dc)
1491{
97ed5ccd 1492 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
6d76d23e
EI
1493 TCGv_i32 t_id, t_ctrl;
1494 int ctrl;
1495
1496 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1497 dc->type_b ? "" : "d", dc->imm);
1498
1499 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1500 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1501 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1502 return;
1503 }
1504
1505 t_id = tcg_temp_new();
1506 if (dc->type_b) {
1507 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1508 ctrl = dc->imm >> 10;
1509 } else {
1510 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1511 ctrl = dc->imm >> 5;
1512 }
1513
1514 t_ctrl = tcg_const_tl(ctrl);
1515
1516 if (dc->rd == 0) {
1517 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1518 } else {
1519 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1520 }
1521 tcg_temp_free(t_id);
1522 tcg_temp_free(t_ctrl);
1523}
1524
4acb54ba
EI
1525static struct decoder_info {
1526 struct {
1527 uint32_t bits;
1528 uint32_t mask;
1529 };
1530 void (*dec)(DisasContext *dc);
1531} decinfo[] = {
1532 {DEC_ADD, dec_add},
1533 {DEC_SUB, dec_sub},
1534 {DEC_AND, dec_and},
1535 {DEC_XOR, dec_xor},
1536 {DEC_OR, dec_or},
1537 {DEC_BIT, dec_bit},
1538 {DEC_BARREL, dec_barrel},
1539 {DEC_LD, dec_load},
1540 {DEC_ST, dec_store},
1541 {DEC_IMM, dec_imm},
1542 {DEC_BR, dec_br},
1543 {DEC_BCC, dec_bcc},
1544 {DEC_RTS, dec_rts},
1567a005 1545 {DEC_FPU, dec_fpu},
4acb54ba
EI
1546 {DEC_MUL, dec_mul},
1547 {DEC_DIV, dec_div},
1548 {DEC_MSR, dec_msr},
6d76d23e 1549 {DEC_STREAM, dec_stream},
4acb54ba
EI
1550 {{0, 0}, dec_null}
1551};
1552
64254eba 1553static inline void decode(DisasContext *dc, uint32_t ir)
4acb54ba 1554{
4acb54ba
EI
1555 int i;
1556
64254eba 1557 dc->ir = ir;
4acb54ba
EI
1558 LOG_DIS("%8.8x\t", dc->ir);
1559
1560 if (dc->ir)
1561 dc->nr_nops = 0;
1562 else {
1567a005 1563 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
1564 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1565 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1567a005
EI
1566 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1567 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1568 return;
1569 }
1570
4acb54ba
EI
1571 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1572 dc->nr_nops++;
a47dddd7 1573 if (dc->nr_nops > 4) {
0063ebd6 1574 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
a47dddd7 1575 }
4acb54ba
EI
1576 }
1577 /* bit 2 seems to indicate insn type. */
1578 dc->type_b = ir & (1 << 29);
1579
1580 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1581 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1582 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1583 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1584 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1585
1586 /* Large switch for all insns. */
1587 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1588 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1589 decinfo[i].dec(dc);
1590 break;
1591 }
1592 }
1593}
1594
4acb54ba 1595/* generate intermediate code for basic block 'tb'. */
4e5e1215 1596void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
4acb54ba 1597{
4e5e1215 1598 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
ed2803da 1599 CPUState *cs = CPU(cpu);
4acb54ba 1600 uint32_t pc_start;
4acb54ba
EI
1601 struct DisasContext ctx;
1602 struct DisasContext *dc = &ctx;
1603 uint32_t next_page_start, org_flags;
1604 target_ulong npc;
1605 int num_insns;
1606 int max_insns;
1607
4acb54ba 1608 pc_start = tb->pc;
0063ebd6 1609 dc->cpu = cpu;
4acb54ba
EI
1610 dc->tb = tb;
1611 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1612
4acb54ba
EI
1613 dc->is_jmp = DISAS_NEXT;
1614 dc->jmp = 0;
1615 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1616 if (dc->delayed_branch) {
1617 dc->jmp = JMP_INDIRECT;
1618 }
4acb54ba 1619 dc->pc = pc_start;
ed2803da 1620 dc->singlestep_enabled = cs->singlestep_enabled;
4acb54ba
EI
1621 dc->cpustate_changed = 0;
1622 dc->abort_at_next_insn = 0;
1623 dc->nr_nops = 0;
1624
a47dddd7
AF
1625 if (pc_start & 3) {
1626 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1627 }
4acb54ba 1628
4acb54ba 1629 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4acb54ba
EI
1630 num_insns = 0;
1631 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 1632 if (max_insns == 0) {
4acb54ba 1633 max_insns = CF_COUNT_MASK;
190ce7fb
RH
1634 }
1635 if (max_insns > TCG_MAX_INSNS) {
1636 max_insns = TCG_MAX_INSNS;
1637 }
4acb54ba 1638
cd42d5b2 1639 gen_tb_start(tb);
4acb54ba
EI
1640 do
1641 {
667b8e29 1642 tcg_gen_insn_start(dc->pc);
959082fc 1643 num_insns++;
4acb54ba 1644
b933066a
RH
1645#if SIM_COMPAT
1646 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1647 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1648 gen_helper_debug();
1649 }
1650#endif
1651
1652 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1653 t_gen_raise_exception(dc, EXCP_DEBUG);
1654 dc->is_jmp = DISAS_UPDATE;
522a0d4e
RH
1655 /* The address covered by the breakpoint must be included in
1656 [tb->pc, tb->pc + tb->size) in order to for it to be
1657 properly cleared -- thus we increment the PC here so that
1658 the logic setting tb->size below does the right thing. */
1659 dc->pc += 4;
b933066a
RH
1660 break;
1661 }
1662
4acb54ba
EI
1663 /* Pretty disas. */
1664 LOG_DIS("%8.8x:\t", dc->pc);
1665
959082fc 1666 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4acb54ba 1667 gen_io_start();
959082fc 1668 }
4acb54ba
EI
1669
1670 dc->clear_imm = 1;
64254eba 1671 decode(dc, cpu_ldl_code(env, dc->pc));
4acb54ba
EI
1672 if (dc->clear_imm)
1673 dc->tb_flags &= ~IMM_FLAG;
4acb54ba 1674 dc->pc += 4;
4acb54ba
EI
1675
1676 if (dc->delayed_branch) {
1677 dc->delayed_branch--;
1678 if (!dc->delayed_branch) {
1679 if (dc->tb_flags & DRTI_FLAG)
1680 do_rti(dc);
1681 if (dc->tb_flags & DRTB_FLAG)
1682 do_rtb(dc);
1683 if (dc->tb_flags & DRTE_FLAG)
1684 do_rte(dc);
1685 /* Clear the delay slot flag. */
1686 dc->tb_flags &= ~D_FLAG;
1687 /* If it is a direct jump, try direct chaining. */
23979dc5 1688 if (dc->jmp == JMP_INDIRECT) {
4acb54ba
EI
1689 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1690 dc->is_jmp = DISAS_JUMP;
23979dc5 1691 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1692 t_sync_flags(dc);
1693 gen_goto_tb(dc, 0, dc->jmp_pc);
1694 dc->is_jmp = DISAS_TB_JUMP;
1695 } else if (dc->jmp == JMP_DIRECT_CC) {
42a268c2 1696 TCGLabel *l1 = gen_new_label();
23979dc5 1697 t_sync_flags(dc);
23979dc5
EI
1698 /* Conditional jmp. */
1699 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1700 gen_goto_tb(dc, 1, dc->pc);
1701 gen_set_label(l1);
1702 gen_goto_tb(dc, 0, dc->jmp_pc);
1703
1704 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1705 }
1706 break;
1707 }
1708 }
ed2803da 1709 if (cs->singlestep_enabled) {
4acb54ba 1710 break;
ed2803da 1711 }
4acb54ba 1712 } while (!dc->is_jmp && !dc->cpustate_changed
fe700adb
RH
1713 && !tcg_op_buf_full()
1714 && !singlestep
1715 && (dc->pc < next_page_start)
1716 && num_insns < max_insns);
4acb54ba
EI
1717
1718 npc = dc->pc;
844bab60 1719 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1720 if (dc->tb_flags & D_FLAG) {
1721 dc->is_jmp = DISAS_UPDATE;
1722 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1723 sync_jmpstate(dc);
1724 } else
1725 npc = dc->jmp_pc;
1726 }
1727
1728 if (tb->cflags & CF_LAST_IO)
1729 gen_io_end();
1730 /* Force an update if the per-tb cpu state has changed. */
1731 if (dc->is_jmp == DISAS_NEXT
1732 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1733 dc->is_jmp = DISAS_UPDATE;
1734 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1735 }
1736 t_sync_flags(dc);
1737
ed2803da 1738 if (unlikely(cs->singlestep_enabled)) {
6c5f738d
EI
1739 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1740
1741 if (dc->is_jmp != DISAS_JUMP) {
4acb54ba 1742 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
6c5f738d 1743 }
64254eba 1744 gen_helper_raise_exception(cpu_env, tmp);
6c5f738d 1745 tcg_temp_free_i32(tmp);
4acb54ba
EI
1746 } else {
1747 switch(dc->is_jmp) {
1748 case DISAS_NEXT:
1749 gen_goto_tb(dc, 1, npc);
1750 break;
1751 default:
1752 case DISAS_JUMP:
1753 case DISAS_UPDATE:
1754 /* indicate that the hash table must be used
1755 to find the next TB */
1756 tcg_gen_exit_tb(0);
1757 break;
1758 case DISAS_TB_JUMP:
1759 /* nothing more to generate */
1760 break;
1761 }
1762 }
806f352d 1763 gen_tb_end(tb, num_insns);
0a7df5da 1764
4e5e1215
RH
1765 tb->size = dc->pc - pc_start;
1766 tb->icount = num_insns;
4acb54ba
EI
1767
1768#ifdef DEBUG_DISAS
1769#if !SIM_COMPAT
4910e6e4
RH
1770 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1771 && qemu_log_in_addr_range(pc_start)) {
1ee73216 1772 qemu_log_lock();
f01a5e7e 1773 qemu_log("--------------\n");
4acb54ba 1774#if DISAS_GNU
d49190c4 1775 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
4acb54ba 1776#endif
fe700adb
RH
1777 qemu_log("\nisize=%d osize=%d\n",
1778 dc->pc - pc_start, tcg_op_buf_count());
1ee73216 1779 qemu_log_unlock();
4acb54ba
EI
1780 }
1781#endif
1782#endif
1783 assert(!dc->abort_at_next_insn);
1784}
1785
878096ee
AF
1786void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1787 int flags)
4acb54ba 1788{
878096ee
AF
1789 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1790 CPUMBState *env = &cpu->env;
4acb54ba
EI
1791 int i;
1792
1793 if (!env || !f)
1794 return;
1795
1796 cpu_fprintf(f, "IN: PC=%x %s\n",
1797 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
97694c57 1798 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
4c24aa0a 1799 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
97694c57 1800 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
17c52a43 1801 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
4acb54ba
EI
1802 env->btaken, env->btarget,
1803 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
17c52a43
EI
1804 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1805 (env->sregs[SR_MSR] & MSR_EIP),
1806 (env->sregs[SR_MSR] & MSR_IE));
1807
4acb54ba
EI
1808 for (i = 0; i < 32; i++) {
1809 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1810 if ((i + 1) % 4 == 0)
1811 cpu_fprintf(f, "\n");
1812 }
1813 cpu_fprintf(f, "\n\n");
1814}
1815
b33ab1f7 1816MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
4acb54ba 1817{
b77f98ca 1818 MicroBlazeCPU *cpu;
4acb54ba 1819
b77f98ca 1820 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
4acb54ba 1821
746b03b2 1822 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
4acb54ba 1823
cd0c24f9
AF
1824 return cpu;
1825}
4acb54ba 1826
cd0c24f9
AF
1827void mb_tcg_init(void)
1828{
1829 int i;
4acb54ba
EI
1830
1831 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7c255043 1832 tcg_ctx.tcg_env = cpu_env;
4acb54ba 1833
e1ccc054 1834 env_debug = tcg_global_mem_new(cpu_env,
68cee38a 1835 offsetof(CPUMBState, debug),
4acb54ba 1836 "debug0");
e1ccc054 1837 env_iflags = tcg_global_mem_new(cpu_env,
68cee38a 1838 offsetof(CPUMBState, iflags),
4acb54ba 1839 "iflags");
e1ccc054 1840 env_imm = tcg_global_mem_new(cpu_env,
68cee38a 1841 offsetof(CPUMBState, imm),
4acb54ba 1842 "imm");
e1ccc054 1843 env_btarget = tcg_global_mem_new(cpu_env,
68cee38a 1844 offsetof(CPUMBState, btarget),
4acb54ba 1845 "btarget");
e1ccc054 1846 env_btaken = tcg_global_mem_new(cpu_env,
68cee38a 1847 offsetof(CPUMBState, btaken),
4acb54ba 1848 "btaken");
e1ccc054 1849 env_res_addr = tcg_global_mem_new(cpu_env,
4a536270
EI
1850 offsetof(CPUMBState, res_addr),
1851 "res_addr");
e1ccc054 1852 env_res_val = tcg_global_mem_new(cpu_env,
11a76217
EI
1853 offsetof(CPUMBState, res_val),
1854 "res_val");
4acb54ba 1855 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
e1ccc054 1856 cpu_R[i] = tcg_global_mem_new(cpu_env,
68cee38a 1857 offsetof(CPUMBState, regs[i]),
4acb54ba
EI
1858 regnames[i]);
1859 }
1860 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
e1ccc054 1861 cpu_SR[i] = tcg_global_mem_new(cpu_env,
68cee38a 1862 offsetof(CPUMBState, sregs[i]),
4acb54ba
EI
1863 special_regnames[i]);
1864 }
4acb54ba
EI
1865}
1866
bad729e2
RH
1867void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1868 target_ulong *data)
4acb54ba 1869{
bad729e2 1870 env->sregs[SR_PC] = data[0];
4acb54ba 1871}