]> git.ipfire.org Git - thirdparty/qemu.git/blame - target-s390x/helper.c
include/qemu/osdep.h: Don't include qapi/error.h
[thirdparty/qemu.git] / target-s390x / helper.c
CommitLineData
10ec5117
AG
1/*
2 * S/390 helpers
3 *
4 * Copyright (c) 2009 Ulrich Hecht
d5a43964 5 * Copyright (c) 2011 Alexander Graf
10ec5117
AG
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
70539e18 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
10ec5117
AG
19 */
20
9615495a 21#include "qemu/osdep.h"
da34e65c 22#include "qapi/error.h"
10ec5117 23#include "cpu.h"
022c62cb 24#include "exec/gdbstub.h"
1de7afc9 25#include "qemu/timer.h"
f08b6170 26#include "exec/cpu_ldst.h"
ef81522b 27#ifndef CONFIG_USER_ONLY
9c17d615 28#include "sysemu/sysemu.h"
ef81522b 29#endif
10ec5117 30
d5a43964 31//#define DEBUG_S390
d5a43964
AG
32//#define DEBUG_S390_STDOUT
33
34#ifdef DEBUG_S390
35#ifdef DEBUG_S390_STDOUT
36#define DPRINTF(fmt, ...) \
37 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
013a2942 38 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
d5a43964
AG
39#else
40#define DPRINTF(fmt, ...) \
41 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
42#endif
43#else
44#define DPRINTF(fmt, ...) \
45 do { } while (0)
46#endif
47
d5a43964
AG
48
49#ifndef CONFIG_USER_ONLY
8f22e0df 50void s390x_tod_timer(void *opaque)
d5a43964 51{
b8ba6799
AF
52 S390CPU *cpu = opaque;
53 CPUS390XState *env = &cpu->env;
d5a43964
AG
54
55 env->pending_int |= INTERRUPT_TOD;
c3affe56 56 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
d5a43964
AG
57}
58
8f22e0df 59void s390x_cpu_timer(void *opaque)
d5a43964 60{
b8ba6799
AF
61 S390CPU *cpu = opaque;
62 CPUS390XState *env = &cpu->env;
d5a43964
AG
63
64 env->pending_int |= INTERRUPT_CPUTIMER;
c3affe56 65 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
d5a43964
AG
66}
67#endif
10c339a0 68
96b1a8bb 69S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
10ec5117 70{
29e4bcb2 71 S390CPU *cpu;
10ec5117 72
29e4bcb2 73 cpu = S390_CPU(object_new(TYPE_S390_CPU));
1f136632 74
96b1a8bb
MR
75 return cpu;
76}
77
78S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
79{
80 S390CPU *cpu;
81 Error *err = NULL;
82
83 cpu = cpu_s390x_create(cpu_model, &err);
84 if (err != NULL) {
85 goto out;
86 }
87
88 object_property_set_int(OBJECT(cpu), id, "id", &err);
89 if (err != NULL) {
90 goto out;
91 }
92 object_property_set_bool(OBJECT(cpu), true, "realized", &err);
93
94out:
95 if (err) {
96 error_propagate(errp, err);
97 object_unref(OBJECT(cpu));
98 cpu = NULL;
99 }
100 return cpu;
101}
102
103S390CPU *cpu_s390x_init(const char *cpu_model)
104{
105 Error *err = NULL;
106 S390CPU *cpu;
107 /* Use to track CPU ID for linux-user only */
108 static int64_t next_cpu_id;
1f136632 109
96b1a8bb
MR
110 cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
111 if (err) {
112 error_report_err(err);
113 }
564b863d 114 return cpu;
10ec5117
AG
115}
116
d5a43964
AG
117#if defined(CONFIG_USER_ONLY)
118
97a8ea5a 119void s390_cpu_do_interrupt(CPUState *cs)
d5a43964 120{
27103424 121 cs->exception_index = -1;
d5a43964
AG
122}
123
7510454e
AF
124int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
125 int rw, int mmu_idx)
d5a43964 126{
7510454e
AF
127 S390CPU *cpu = S390_CPU(cs);
128
27103424 129 cs->exception_index = EXCP_PGM;
7510454e 130 cpu->env.int_pgm_code = PGM_ADDRESSING;
d5a103cd
RH
131 /* On real machines this value is dropped into LowMem. Since this
132 is userland, simply put this someplace that cpu_loop can find it. */
7510454e 133 cpu->env.__excp_addr = address;
d5a43964
AG
134 return 1;
135}
136
b7e516ce 137#else /* !CONFIG_USER_ONLY */
d5a43964
AG
138
139/* Ensure to exit the TB after this call! */
dfebd7a7 140void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
d5a43964 141{
27103424
AF
142 CPUState *cs = CPU(s390_env_get_cpu(env));
143
144 cs->exception_index = EXCP_PGM;
d5a43964 145 env->int_pgm_code = code;
d5a103cd 146 env->int_pgm_ilen = ilen;
d5a43964
AG
147}
148
7510454e
AF
149int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
150 int rw, int mmu_idx)
10c339a0 151{
7510454e
AF
152 S390CPU *cpu = S390_CPU(cs);
153 CPUS390XState *env = &cpu->env;
c255ac60 154 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
d5a43964 155 target_ulong vaddr, raddr;
10c339a0
AG
156 int prot;
157
7510454e 158 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
07cc7d12 159 __func__, orig_vaddr, rw, mmu_idx);
d5a43964 160
71e47088
BS
161 orig_vaddr &= TARGET_PAGE_MASK;
162 vaddr = orig_vaddr;
d5a43964
AG
163
164 /* 31-Bit mode */
165 if (!(env->psw.mask & PSW_MASK_64)) {
166 vaddr &= 0x7fffffff;
167 }
168
e3e09d87 169 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
d5a43964
AG
170 /* Translation ended in exception */
171 return 1;
172 }
10c339a0 173
d5a43964 174 /* check out of RAM access */
7b3fdbd9 175 if (raddr > ram_size) {
a6f921b0
AF
176 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
177 (uint64_t)raddr, (uint64_t)ram_size);
d5a103cd 178 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
d5a43964
AG
179 return 1;
180 }
10c339a0 181
339aaf5b
AP
182 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
183 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
d5a43964 184
0c591eb0 185 tlb_set_page(cs, orig_vaddr, raddr, prot,
d4c430a8 186 mmu_idx, TARGET_PAGE_SIZE);
d5a43964 187
d4c430a8 188 return 0;
10c339a0 189}
d5a43964 190
00b941e5 191hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
d5a43964 192{
00b941e5
AF
193 S390CPU *cpu = S390_CPU(cs);
194 CPUS390XState *env = &cpu->env;
d5a43964 195 target_ulong raddr;
e3e09d87 196 int prot;
d5a43964
AG
197 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
198
199 /* 31-Bit mode */
200 if (!(env->psw.mask & PSW_MASK_64)) {
201 vaddr &= 0x7fffffff;
202 }
203
234779a2
DH
204 if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
205 return -1;
206 }
d5a43964
AG
207 return raddr;
208}
209
770a6379
DH
210hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
211{
212 hwaddr phys_addr;
213 target_ulong page;
214
215 page = vaddr & TARGET_PAGE_MASK;
216 phys_addr = cpu_get_phys_page_debug(cs, page);
217 phys_addr += (vaddr & ~TARGET_PAGE_MASK);
218
219 return phys_addr;
220}
221
a4e3ad19 222void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
d5a43964 223{
311918b9
AJ
224 uint64_t old_mask = env->psw.mask;
225
eb24f7c6
DH
226 env->psw.addr = addr;
227 env->psw.mask = mask;
3f10341f
DH
228 if (tcg_enabled()) {
229 env->cc_op = (mask >> 44) & 3;
230 }
eb24f7c6 231
311918b9
AJ
232 if ((old_mask ^ mask) & PSW_MASK_PER) {
233 s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
234 }
235
d5a43964 236 if (mask & PSW_MASK_WAIT) {
49e15878 237 S390CPU *cpu = s390_env_get_cpu(env);
eb24f7c6 238 if (s390_cpu_halt(cpu) == 0) {
ef81522b 239#ifndef CONFIG_USER_ONLY
eb24f7c6 240 qemu_system_shutdown_request();
ef81522b 241#endif
d5a43964
AG
242 }
243 }
d5a43964
AG
244}
245
a4e3ad19 246static uint64_t get_psw_mask(CPUS390XState *env)
d5a43964 247{
3f10341f 248 uint64_t r = env->psw.mask;
d5a43964 249
3f10341f
DH
250 if (tcg_enabled()) {
251 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
252 env->cc_vr);
d5a43964 253
3f10341f
DH
254 r &= ~PSW_MASK_CC;
255 assert(!(env->cc_op & ~3));
256 r |= (uint64_t)env->cc_op << 44;
257 }
d5a43964
AG
258
259 return r;
260}
261
4782a23b
CH
262static LowCore *cpu_map_lowcore(CPUS390XState *env)
263{
a47dddd7 264 S390CPU *cpu = s390_env_get_cpu(env);
4782a23b
CH
265 LowCore *lowcore;
266 hwaddr len = sizeof(LowCore);
267
268 lowcore = cpu_physical_memory_map(env->psa, &len, 1);
269
270 if (len < sizeof(LowCore)) {
a47dddd7 271 cpu_abort(CPU(cpu), "Could not map lowcore\n");
4782a23b
CH
272 }
273
274 return lowcore;
275}
276
277static void cpu_unmap_lowcore(LowCore *lowcore)
278{
279 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
280}
281
3f10341f
DH
282void do_restart_interrupt(CPUS390XState *env)
283{
284 uint64_t mask, addr;
285 LowCore *lowcore;
286
287 lowcore = cpu_map_lowcore(env);
288
289 lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
290 lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
291 mask = be64_to_cpu(lowcore->restart_new_psw.mask);
292 addr = be64_to_cpu(lowcore->restart_new_psw.addr);
293
294 cpu_unmap_lowcore(lowcore);
295
296 load_psw(env, mask, addr);
297}
298
a4e3ad19 299static void do_program_interrupt(CPUS390XState *env)
d5a43964
AG
300{
301 uint64_t mask, addr;
302 LowCore *lowcore;
d5a103cd 303 int ilen = env->int_pgm_ilen;
d5a43964 304
d5a103cd
RH
305 switch (ilen) {
306 case ILEN_LATER:
307 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
d5a43964 308 break;
d5a103cd
RH
309 case ILEN_LATER_INC:
310 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
311 env->psw.addr += ilen;
d5a43964 312 break;
d5a103cd
RH
313 default:
314 assert(ilen == 2 || ilen == 4 || ilen == 6);
d5a43964
AG
315 }
316
d5a103cd
RH
317 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
318 __func__, env->int_pgm_code, ilen);
d5a43964 319
4782a23b 320 lowcore = cpu_map_lowcore(env);
d5a43964 321
777c98c3
AJ
322 /* Signal PER events with the exception. */
323 if (env->per_perc_atmid) {
324 env->int_pgm_code |= PGM_PER;
325 lowcore->per_address = cpu_to_be64(env->per_address);
326 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
327 env->per_perc_atmid = 0;
328 }
329
d5a103cd 330 lowcore->pgm_ilen = cpu_to_be16(ilen);
d5a43964
AG
331 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
332 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
333 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
334 mask = be64_to_cpu(lowcore->program_new_psw.mask);
335 addr = be64_to_cpu(lowcore->program_new_psw.addr);
3da0ab35 336 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
d5a43964 337
4782a23b 338 cpu_unmap_lowcore(lowcore);
d5a43964 339
71e47088 340 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
d5a103cd 341 env->int_pgm_code, ilen, env->psw.mask,
d5a43964
AG
342 env->psw.addr);
343
344 load_psw(env, mask, addr);
345}
346
777c98c3
AJ
347static void do_svc_interrupt(CPUS390XState *env)
348{
349 uint64_t mask, addr;
350 LowCore *lowcore;
351
352 lowcore = cpu_map_lowcore(env);
353
354 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
355 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
356 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
357 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
358 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
359 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
360
361 cpu_unmap_lowcore(lowcore);
362
363 load_psw(env, mask, addr);
364
365 /* When a PER event is pending, the PER exception has to happen
366 immediately after the SERVICE CALL one. */
367 if (env->per_perc_atmid) {
368 env->int_pgm_code = PGM_PER;
369 env->int_pgm_ilen = env->int_svc_ilen;
370 do_program_interrupt(env);
371 }
372}
373
d5a43964
AG
374#define VIRTIO_SUBCODE_64 0x0D00
375
a4e3ad19 376static void do_ext_interrupt(CPUS390XState *env)
d5a43964 377{
a47dddd7 378 S390CPU *cpu = s390_env_get_cpu(env);
d5a43964
AG
379 uint64_t mask, addr;
380 LowCore *lowcore;
d5a43964
AG
381 ExtQueue *q;
382
383 if (!(env->psw.mask & PSW_MASK_EXT)) {
a47dddd7 384 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
d5a43964
AG
385 }
386
1a719923 387 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
a47dddd7 388 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
d5a43964
AG
389 }
390
391 q = &env->ext_queue[env->ext_index];
4782a23b 392 lowcore = cpu_map_lowcore(env);
d5a43964
AG
393
394 lowcore->ext_int_code = cpu_to_be16(q->code);
395 lowcore->ext_params = cpu_to_be32(q->param);
396 lowcore->ext_params2 = cpu_to_be64(q->param64);
397 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
398 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
399 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
400 mask = be64_to_cpu(lowcore->external_new_psw.mask);
401 addr = be64_to_cpu(lowcore->external_new_psw.addr);
402
4782a23b 403 cpu_unmap_lowcore(lowcore);
d5a43964
AG
404
405 env->ext_index--;
406 if (env->ext_index == -1) {
407 env->pending_int &= ~INTERRUPT_EXT;
408 }
409
71e47088 410 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
d5a43964
AG
411 env->psw.mask, env->psw.addr);
412
413 load_psw(env, mask, addr);
414}
3110e292 415
5d69c547
CH
416static void do_io_interrupt(CPUS390XState *env)
417{
a47dddd7 418 S390CPU *cpu = s390_env_get_cpu(env);
5d69c547
CH
419 LowCore *lowcore;
420 IOIntQueue *q;
421 uint8_t isc;
422 int disable = 1;
423 int found = 0;
424
425 if (!(env->psw.mask & PSW_MASK_IO)) {
a47dddd7 426 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
5d69c547
CH
427 }
428
429 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
91b0a8f3
CH
430 uint64_t isc_bits;
431
5d69c547
CH
432 if (env->io_index[isc] < 0) {
433 continue;
434 }
1a719923 435 if (env->io_index[isc] >= MAX_IO_QUEUE) {
a47dddd7 436 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
5d69c547
CH
437 isc, env->io_index[isc]);
438 }
439
440 q = &env->io_queue[env->io_index[isc]][isc];
91b0a8f3
CH
441 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
442 if (!(env->cregs[6] & isc_bits)) {
5d69c547
CH
443 disable = 0;
444 continue;
445 }
bd9a8d85
CH
446 if (!found) {
447 uint64_t mask, addr;
5d69c547 448
bd9a8d85
CH
449 found = 1;
450 lowcore = cpu_map_lowcore(env);
5d69c547 451
bd9a8d85
CH
452 lowcore->subchannel_id = cpu_to_be16(q->id);
453 lowcore->subchannel_nr = cpu_to_be16(q->nr);
454 lowcore->io_int_parm = cpu_to_be32(q->parm);
455 lowcore->io_int_word = cpu_to_be32(q->word);
456 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
457 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
458 mask = be64_to_cpu(lowcore->io_new_psw.mask);
459 addr = be64_to_cpu(lowcore->io_new_psw.addr);
5d69c547 460
bd9a8d85
CH
461 cpu_unmap_lowcore(lowcore);
462
463 env->io_index[isc]--;
464
465 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
466 env->psw.mask, env->psw.addr);
467 load_psw(env, mask, addr);
468 }
b22dd124 469 if (env->io_index[isc] >= 0) {
5d69c547
CH
470 disable = 0;
471 }
bd9a8d85 472 continue;
5d69c547
CH
473 }
474
475 if (disable) {
476 env->pending_int &= ~INTERRUPT_IO;
477 }
478
5d69c547
CH
479}
480
481static void do_mchk_interrupt(CPUS390XState *env)
482{
a47dddd7 483 S390CPU *cpu = s390_env_get_cpu(env);
5d69c547
CH
484 uint64_t mask, addr;
485 LowCore *lowcore;
486 MchkQueue *q;
487 int i;
488
489 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
a47dddd7 490 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
5d69c547
CH
491 }
492
1a719923 493 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
a47dddd7 494 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
5d69c547
CH
495 }
496
497 q = &env->mchk_queue[env->mchk_index];
498
499 if (q->type != 1) {
500 /* Don't know how to handle this... */
a47dddd7 501 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
5d69c547
CH
502 }
503 if (!(env->cregs[14] & (1 << 28))) {
504 /* CRW machine checks disabled */
505 return;
506 }
507
508 lowcore = cpu_map_lowcore(env);
509
510 for (i = 0; i < 16; i++) {
c498d8e3 511 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
5d69c547
CH
512 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
513 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
514 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
515 }
516 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
517 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
518 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
519 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
520 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
521 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
522 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
523
524 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
525 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
526 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
527 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
528 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
529 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
530
531 cpu_unmap_lowcore(lowcore);
532
533 env->mchk_index--;
534 if (env->mchk_index == -1) {
535 env->pending_int &= ~INTERRUPT_MCHK;
536 }
537
538 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
539 env->psw.mask, env->psw.addr);
540
541 load_psw(env, mask, addr);
542}
543
97a8ea5a 544void s390_cpu_do_interrupt(CPUState *cs)
3110e292 545{
97a8ea5a
AF
546 S390CPU *cpu = S390_CPU(cs);
547 CPUS390XState *env = &cpu->env;
f9466733 548
0d404541 549 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
27103424 550 __func__, cs->exception_index, env->psw.addr);
d5a43964 551
eb24f7c6 552 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
5d69c547
CH
553 /* handle machine checks */
554 if ((env->psw.mask & PSW_MASK_MCHECK) &&
27103424 555 (cs->exception_index == -1)) {
5d69c547 556 if (env->pending_int & INTERRUPT_MCHK) {
27103424 557 cs->exception_index = EXCP_MCHK;
5d69c547
CH
558 }
559 }
d5a43964
AG
560 /* handle external interrupts */
561 if ((env->psw.mask & PSW_MASK_EXT) &&
27103424 562 cs->exception_index == -1) {
d5a43964
AG
563 if (env->pending_int & INTERRUPT_EXT) {
564 /* code is already in env */
27103424 565 cs->exception_index = EXCP_EXT;
d5a43964 566 } else if (env->pending_int & INTERRUPT_TOD) {
f9466733 567 cpu_inject_ext(cpu, 0x1004, 0, 0);
27103424 568 cs->exception_index = EXCP_EXT;
d5a43964
AG
569 env->pending_int &= ~INTERRUPT_EXT;
570 env->pending_int &= ~INTERRUPT_TOD;
571 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
f9466733 572 cpu_inject_ext(cpu, 0x1005, 0, 0);
27103424 573 cs->exception_index = EXCP_EXT;
d5a43964
AG
574 env->pending_int &= ~INTERRUPT_EXT;
575 env->pending_int &= ~INTERRUPT_TOD;
576 }
577 }
5d69c547
CH
578 /* handle I/O interrupts */
579 if ((env->psw.mask & PSW_MASK_IO) &&
27103424 580 (cs->exception_index == -1)) {
5d69c547 581 if (env->pending_int & INTERRUPT_IO) {
27103424 582 cs->exception_index = EXCP_IO;
5d69c547
CH
583 }
584 }
d5a43964 585
27103424 586 switch (cs->exception_index) {
d5a43964
AG
587 case EXCP_PGM:
588 do_program_interrupt(env);
589 break;
590 case EXCP_SVC:
591 do_svc_interrupt(env);
592 break;
593 case EXCP_EXT:
594 do_ext_interrupt(env);
595 break;
5d69c547
CH
596 case EXCP_IO:
597 do_io_interrupt(env);
598 break;
599 case EXCP_MCHK:
600 do_mchk_interrupt(env);
601 break;
d5a43964 602 }
27103424 603 cs->exception_index = -1;
d5a43964
AG
604
605 if (!env->pending_int) {
259186a7 606 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
d5a43964 607 }
3110e292 608}
d5a43964 609
02bb9bbf
RH
610bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
611{
612 if (interrupt_request & CPU_INTERRUPT_HARD) {
613 S390CPU *cpu = S390_CPU(cs);
614 CPUS390XState *env = &cpu->env;
615
616 if (env->psw.mask & PSW_MASK_EXT) {
617 s390_cpu_do_interrupt(cs);
618 return true;
619 }
620 }
621 return false;
622}
311918b9
AJ
623
624void s390_cpu_recompute_watchpoints(CPUState *cs)
625{
626 const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
627 S390CPU *cpu = S390_CPU(cs);
628 CPUS390XState *env = &cpu->env;
629
630 /* We are called when the watchpoints have changed. First
631 remove them all. */
632 cpu_watchpoint_remove_all(cs, BP_CPU);
633
634 /* Return if PER is not enabled */
635 if (!(env->psw.mask & PSW_MASK_PER)) {
636 return;
637 }
638
639 /* Return if storage-alteration event is not enabled. */
640 if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
641 return;
642 }
643
644 if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
645 /* We can't create a watchoint spanning the whole memory range, so
646 split it in two parts. */
647 cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
648 cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
649 } else if (env->cregs[10] > env->cregs[11]) {
650 /* The address range loops, create two watchpoints. */
651 cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
652 wp_flags, NULL);
653 cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
654
655 } else {
656 /* Default case, create a single watchpoint. */
657 cpu_watchpoint_insert(cs, env->cregs[10],
658 env->cregs[11] - env->cregs[10] + 1,
659 wp_flags, NULL);
660 }
661}
662
663void s390x_cpu_debug_excp_handler(CPUState *cs)
664{
665 S390CPU *cpu = S390_CPU(cs);
666 CPUS390XState *env = &cpu->env;
667 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
668
669 if (wp_hit && wp_hit->flags & BP_CPU) {
670 /* FIXME: When the storage-alteration-space control bit is set,
671 the exception should only be triggered if the memory access
672 is done using an address space with the storage-alteration-event
673 bit set. We have no way to detect that with the current
674 watchpoint code. */
675 cs->watchpoint_hit = NULL;
676
677 env->per_address = env->psw.addr;
678 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
679 /* FIXME: We currently no way to detect the address space used
680 to trigger the watchpoint. For now just consider it is the
681 current default ASC. This turn to be true except when MVCP
682 and MVCS instrutions are not used. */
683 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
684
685 /* Remove all watchpoints to re-execute the code. A PER exception
686 will be triggered, it will call load_psw which will recompute
687 the watchpoints. */
688 cpu_watchpoint_remove_all(cs, BP_CPU);
689 cpu_resume_from_signal(cs, NULL);
690 }
691}
d5a43964 692#endif /* CONFIG_USER_ONLY */