]> git.ipfire.org Git - thirdparty/qemu.git/blame - target/s390x/mem_helper.c
s390x/tcg: XC: Fault-safe handling
[thirdparty/qemu.git] / target / s390x / mem_helper.c
CommitLineData
8ef7f78e
BS
1/*
2 * S/390 memory access helper routines
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
41c6a6dd 10 * version 2.1 of the License, or (at your option) any later version.
8ef7f78e
BS
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
9615495a 21#include "qemu/osdep.h"
8ef7f78e 22#include "cpu.h"
4e58b838 23#include "internal.h"
2ef6175a 24#include "exec/helper-proto.h"
63c91552 25#include "exec/exec-all.h"
f08b6170 26#include "exec/cpu_ldst.h"
303a9ab8 27#include "qemu/int128.h"
5e95612e 28#include "qemu/atomic128.h"
741da0d3
PB
29
30#if !defined(CONFIG_USER_ONLY)
0f5f6691 31#include "hw/s390x/storage-keys.h"
741da0d3 32#endif
8ef7f78e
BS
33
34/*****************************************************************************/
35/* Softmmu support */
8ef7f78e
BS
36
37/* #define DEBUG_HELPER */
38#ifdef DEBUG_HELPER
39#define HELPER_LOG(x...) qemu_log(x)
40#else
41#define HELPER_LOG(x...)
42#endif
43
c07a1009
DH
44static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key)
45{
46 uint16_t pkm = env->cregs[3] >> 16;
47
48 if (env->psw.mask & PSW_MASK_PSTATE) {
49 /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */
50 return pkm & (0x80 >> psw_key);
51 }
52 return true;
53}
54
fbc17598
DH
55static bool is_destructive_overlap(CPUS390XState *env, uint64_t dest,
56 uint64_t src, uint32_t len)
57{
58 if (!len || src == dest) {
59 return false;
60 }
61 /* Take care of wrapping at the end of address space. */
62 if (unlikely(wrap_address(env, src + len - 1) < src)) {
63 return dest > src || dest <= wrap_address(env, src + len - 1);
64 }
65 return dest > src && dest <= src + len - 1;
66}
67
31006af3
AJ
68/* Trigger a SPECIFICATION exception if an address or a length is not
69 naturally aligned. */
70static inline void check_alignment(CPUS390XState *env, uint64_t v,
71 int wordsize, uintptr_t ra)
72{
73 if (v % wordsize) {
8d2f850a 74 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
31006af3
AJ
75 }
76}
77
78/* Load a value from memory according to its size. */
79static inline uint64_t cpu_ldusize_data_ra(CPUS390XState *env, uint64_t addr,
80 int wordsize, uintptr_t ra)
81{
82 switch (wordsize) {
83 case 1:
84 return cpu_ldub_data_ra(env, addr, ra);
85 case 2:
86 return cpu_lduw_data_ra(env, addr, ra);
87 default:
88 abort();
89 }
90}
91
15417787
AJ
92/* Store a to memory according to its size. */
93static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr,
94 uint64_t value, int wordsize,
95 uintptr_t ra)
96{
97 switch (wordsize) {
98 case 1:
99 cpu_stb_data_ra(env, addr, value, ra);
100 break;
101 case 2:
102 cpu_stw_data_ra(env, addr, value, ra);
103 break;
104 default:
105 abort();
106 }
107}
108
70ebd9ce
DH
109/* An access covers at most 4096 bytes and therefore at most two pages. */
110typedef struct S390Access {
111 target_ulong vaddr1;
112 target_ulong vaddr2;
113 char *haddr1;
114 char *haddr2;
115 uint16_t size1;
116 uint16_t size2;
117 /*
118 * If we can't access the host page directly, we'll have to do I/O access
119 * via ld/st helpers. These are internal details, so we store the
120 * mmu idx to do the access here instead of passing it around in the
121 * helpers. Maybe, one day we can get rid of ld/st access - once we can
122 * handle TLB_NOTDIRTY differently. We don't expect these special accesses
123 * to trigger exceptions - only if we would have TLB_NOTDIRTY on LAP
124 * pages, we might trigger a new MMU translation - very unlikely that
125 * the mapping changes in between and we would trigger a fault.
126 */
127 int mmu_idx;
128} S390Access;
129
130static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size,
131 MMUAccessType access_type, int mmu_idx,
132 uintptr_t ra)
8ef7f78e 133{
70ebd9ce
DH
134 S390Access access = {
135 .vaddr1 = vaddr,
136 .size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)),
137 .mmu_idx = mmu_idx,
138 };
fc89efe6 139
70ebd9ce
DH
140 g_assert(size > 0 && size <= 4096);
141 access.haddr1 = probe_access(env, access.vaddr1, access.size1, access_type,
142 mmu_idx, ra);
143
144 if (unlikely(access.size1 != size)) {
145 /* The access crosses page boundaries. */
146 access.vaddr2 = wrap_address(env, vaddr + access.size1);
147 access.size2 = size - access.size1;
148 access.haddr2 = probe_access(env, access.vaddr2, access.size2,
149 access_type, mmu_idx, ra);
150 }
151 return access;
152}
153
154/* Helper to handle memset on a single page. */
155static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
156 uint8_t byte, uint16_t size, int mmu_idx,
157 uintptr_t ra)
158{
159#ifdef CONFIG_USER_ONLY
160 g_assert(haddr);
161 memset(haddr, byte, size);
162#else
163 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
164 int i;
165
166 if (likely(haddr)) {
167 memset(haddr, byte, size);
168 } else {
169 /*
170 * Do a single access and test if we can then get access to the
171 * page. This is especially relevant to speed up TLB_NOTDIRTY.
172 */
173 g_assert(size > 0);
174 helper_ret_stb_mmu(env, vaddr, byte, oi, ra);
175 haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
176 if (likely(haddr)) {
177 memset(haddr + 1, byte, size - 1);
fc89efe6 178 } else {
70ebd9ce
DH
179 for (i = 1; i < size; i++) {
180 helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra);
181 }
fc89efe6 182 }
8ef7f78e 183 }
70ebd9ce
DH
184#endif
185}
186
187static void access_memset(CPUS390XState *env, S390Access *desta,
188 uint8_t byte, uintptr_t ra)
189{
190
191 do_access_memset(env, desta->vaddr1, desta->haddr1, byte, desta->size1,
192 desta->mmu_idx, ra);
193 if (likely(!desta->size2)) {
194 return;
195 }
196 do_access_memset(env, desta->vaddr2, desta->haddr2, byte, desta->size2,
197 desta->mmu_idx, ra);
8ef7f78e
BS
198}
199
b6c636f2
DH
200static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
201 int offset, int mmu_idx, uintptr_t ra)
3e7e5e0b 202{
b6c636f2
DH
203#ifdef CONFIG_USER_ONLY
204 return ldub_p(*haddr + offset);
205#else
206 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
207 uint8_t byte;
208
209 if (likely(*haddr)) {
210 return ldub_p(*haddr + offset);
211 }
212 /*
213 * Do a single access and test if we can then get access to the
214 * page. This is especially relevant to speed up TLB_NOTDIRTY.
215 */
216 byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra);
217 *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx);
218 return byte;
219#endif
220}
221
222static uint8_t access_get_byte(CPUS390XState *env, S390Access *access,
223 int offset, uintptr_t ra)
224{
225 if (offset < access->size1) {
226 return do_access_get_byte(env, access->vaddr1, &access->haddr1,
227 offset, access->mmu_idx, ra);
228 }
229 return do_access_get_byte(env, access->vaddr2, &access->haddr2,
230 offset - access->size1, access->mmu_idx, ra);
231}
232
233static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
234 int offset, uint8_t byte, int mmu_idx,
235 uintptr_t ra)
236{
237#ifdef CONFIG_USER_ONLY
238 stb_p(*haddr + offset, byte);
239#else
240 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
241
242 if (likely(*haddr)) {
243 stb_p(*haddr + offset, byte);
244 return;
245 }
246 /*
247 * Do a single access and test if we can then get access to the
248 * page. This is especially relevant to speed up TLB_NOTDIRTY.
249 */
250 helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra);
251 *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
252#endif
253}
254
255static void access_set_byte(CPUS390XState *env, S390Access *access,
256 int offset, uint8_t byte, uintptr_t ra)
257{
258 if (offset < access->size1) {
259 do_access_set_byte(env, access->vaddr1, &access->haddr1, offset, byte,
260 access->mmu_idx, ra);
261 } else {
262 do_access_set_byte(env, access->vaddr2, &access->haddr2,
263 offset - access->size1, byte, access->mmu_idx, ra);
264 }
265}
266
267/*
268 * Move data with the same semantics as memmove() in case ranges don't overlap
269 * or src > dest. Undefined behavior on destructive overlaps.
270 */
271static void access_memmove(CPUS390XState *env, S390Access *desta,
272 S390Access *srca, uintptr_t ra)
273{
274 int diff;
275
276 g_assert(desta->size1 + desta->size2 == srca->size1 + srca->size2);
277
278 /* Fallback to slow access in case we don't have access to all host pages */
279 if (unlikely(!desta->haddr1 || (desta->size2 && !desta->haddr2) ||
280 !srca->haddr1 || (srca->size2 && !srca->haddr2))) {
281 int i;
282
283 for (i = 0; i < desta->size1 + desta->size2; i++) {
284 uint8_t byte = access_get_byte(env, srca, i, ra);
285
286 access_set_byte(env, desta, i, byte, ra);
287 }
288 return;
289 }
290
291 if (srca->size1 == desta->size1) {
292 memmove(desta->haddr1, srca->haddr1, srca->size1);
293 if (unlikely(srca->size2)) {
294 memmove(desta->haddr2, srca->haddr2, srca->size2);
295 }
296 } else if (srca->size1 < desta->size1) {
297 diff = desta->size1 - srca->size1;
298 memmove(desta->haddr1, srca->haddr1, srca->size1);
299 memmove(desta->haddr1 + srca->size1, srca->haddr2, diff);
300 if (likely(desta->size2)) {
301 memmove(desta->haddr2, srca->haddr2 + diff, desta->size2);
302 }
303 } else {
304 diff = srca->size1 - desta->size1;
305 memmove(desta->haddr1, srca->haddr1, desta->size1);
306 memmove(desta->haddr2, srca->haddr1 + desta->size1, diff);
307 if (likely(srca->size2)) {
308 memmove(desta->haddr2 + diff, srca->haddr2, srca->size2);
3e7e5e0b 309 }
3e7e5e0b
DH
310 }
311}
312
313static int mmu_idx_from_as(uint8_t as)
314{
315 switch (as) {
316 case AS_PRIMARY:
317 return MMU_PRIMARY_IDX;
318 case AS_SECONDARY:
319 return MMU_SECONDARY_IDX;
320 case AS_HOME:
321 return MMU_HOME_IDX;
322 default:
323 /* FIXME AS_ACCREG */
324 g_assert_not_reached();
325 }
326}
327
8ef7f78e 328/* and on array */
349d078a
RH
329static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
330 uint64_t src, uintptr_t ra)
8ef7f78e 331{
349d078a
RH
332 uint32_t i;
333 uint8_t c = 0;
8ef7f78e
BS
334
335 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
336 __func__, l, dest, src);
349d078a 337
8ef7f78e 338 for (i = 0; i <= l; i++) {
349d078a
RH
339 uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
340 x &= cpu_ldub_data_ra(env, dest + i, ra);
341 c |= x;
342 cpu_stb_data_ra(env, dest + i, x, ra);
8ef7f78e 343 }
349d078a
RH
344 return c != 0;
345}
346
347uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
348 uint64_t src)
349{
350 return do_helper_nc(env, l, dest, src, GETPC());
8ef7f78e
BS
351}
352
353/* xor on array */
9c009e88
RH
354static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest,
355 uint64_t src, uintptr_t ra)
8ef7f78e 356{
70ebd9ce 357 const int mmu_idx = cpu_mmu_index(env, false);
a8821dd5 358 S390Access srca1, srca2, desta;
9c009e88
RH
359 uint32_t i;
360 uint8_t c = 0;
8ef7f78e
BS
361
362 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
363 __func__, l, dest, src);
364
a8821dd5
DH
365 /* XC always processes one more byte than specified - maximum is 256 */
366 l++;
367
368 srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
369 srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
370 desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
70ebd9ce 371
8ef7f78e 372 /* xor with itself is the same as memset(0) */
8ef7f78e 373 if (src == dest) {
70ebd9ce 374 access_memset(env, &desta, 0, ra);
8ef7f78e
BS
375 return 0;
376 }
8ef7f78e 377
a8821dd5
DH
378 for (i = 0; i < l; i++) {
379 const uint8_t x = access_get_byte(env, &srca1, i, ra) ^
380 access_get_byte(env, &srca2, i, ra);
381
9c009e88 382 c |= x;
a8821dd5 383 access_set_byte(env, &desta, i, x, ra);
8ef7f78e 384 }
9c009e88
RH
385 return c != 0;
386}
387
388uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
389 uint64_t src)
390{
391 return do_helper_xc(env, l, dest, src, GETPC());
8ef7f78e
BS
392}
393
394/* or on array */
6fc2606e
RH
395static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
396 uint64_t src, uintptr_t ra)
8ef7f78e 397{
8c4a7320
DH
398 const int mmu_idx = cpu_mmu_index(env, false);
399 S390Access srca1, srca2, desta;
6fc2606e
RH
400 uint32_t i;
401 uint8_t c = 0;
8ef7f78e
BS
402
403 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
404 __func__, l, dest, src);
6fc2606e 405
8c4a7320
DH
406 /* OC always processes one more byte than specified - maximum is 256 */
407 l++;
408
409 srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
410 srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
411 desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
412 for (i = 0; i < l; i++) {
413 const uint8_t x = access_get_byte(env, &srca1, i, ra) |
414 access_get_byte(env, &srca2, i, ra);
415
6fc2606e 416 c |= x;
8c4a7320 417 access_set_byte(env, &desta, i, x, ra);
8ef7f78e 418 }
6fc2606e
RH
419 return c != 0;
420}
421
422uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
423 uint64_t src)
424{
425 return do_helper_oc(env, l, dest, src, GETPC());
8ef7f78e
BS
426}
427
428/* memmove */
d376f123
RH
429static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest,
430 uint64_t src, uintptr_t ra)
8ef7f78e 431{
70ebd9ce 432 const int mmu_idx = cpu_mmu_index(env, false);
b6c636f2 433 S390Access srca, desta;
d3696812 434 uint32_t i;
8ef7f78e
BS
435
436 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
437 __func__, l, dest, src);
438
d573ffde
DH
439 /* MVC always copies one more byte than specified - maximum is 256 */
440 l++;
441
b6c636f2 442 srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
70ebd9ce
DH
443 desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
444
d573ffde
DH
445 /*
446 * "When the operands overlap, the result is obtained as if the operands
447 * were processed one byte at a time". Only non-destructive overlaps
448 * behave like memmove().
449 */
d3696812 450 if (dest == src + 1) {
b6c636f2 451 access_memset(env, &desta, access_get_byte(env, &srca, 0, ra), ra);
b7dd1f7f 452 } else if (!is_destructive_overlap(env, dest, src, l)) {
b6c636f2 453 access_memmove(env, &desta, &srca, ra);
d376f123 454 } else {
d573ffde 455 for (i = 0; i < l; i++) {
b7809f36
DH
456 uint8_t byte = access_get_byte(env, &srca, i, ra);
457
458 access_set_byte(env, &desta, i, byte, ra);
d376f123 459 }
8ef7f78e 460 }
8ef7f78e 461
d376f123 462 return env->cc_op;
8ef7f78e
BS
463}
464
d3696812
RH
465void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
466{
467 do_helper_mvc(env, l, dest, src, GETPC());
468}
469
6c9deca8
AJ
470/* move inverse */
471void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
472{
473 uintptr_t ra = GETPC();
474 int i;
475
476 for (i = 0; i <= l; i++) {
477 uint8_t v = cpu_ldub_data_ra(env, src - i, ra);
478 cpu_stb_data_ra(env, dest + i, v, ra);
479 }
480}
481
256dab6f
AJ
482/* move numerics */
483void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
484{
485 uintptr_t ra = GETPC();
486 int i;
487
488 for (i = 0; i <= l; i++) {
489 uint8_t v = cpu_ldub_data_ra(env, dest + i, ra) & 0xf0;
490 v |= cpu_ldub_data_ra(env, src + i, ra) & 0x0f;
491 cpu_stb_data_ra(env, dest + i, v, ra);
492 }
493}
494
fdc0a747
AJ
495/* move with offset */
496void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
497{
498 uintptr_t ra = GETPC();
499 int len_dest = l >> 4;
500 int len_src = l & 0xf;
501 uint8_t byte_dest, byte_src;
502 int i;
503
504 src += len_src;
505 dest += len_dest;
506
507 /* Handle rightmost byte */
508 byte_src = cpu_ldub_data_ra(env, src, ra);
509 byte_dest = cpu_ldub_data_ra(env, dest, ra);
510 byte_dest = (byte_dest & 0x0f) | (byte_src << 4);
511 cpu_stb_data_ra(env, dest, byte_dest, ra);
512
513 /* Process remaining bytes from right to left */
514 for (i = 1; i <= len_dest; i++) {
515 byte_dest = byte_src >> 4;
516 if (len_src - i >= 0) {
517 byte_src = cpu_ldub_data_ra(env, src - i, ra);
518 } else {
519 byte_src = 0;
520 }
521 byte_dest |= byte_src << 4;
522 cpu_stb_data_ra(env, dest - i, byte_dest, ra);
523 }
524}
525
01f8db88
AJ
526/* move zones */
527void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
528{
529 uintptr_t ra = GETPC();
530 int i;
531
532 for (i = 0; i <= l; i++) {
533 uint8_t b = cpu_ldub_data_ra(env, dest + i, ra) & 0x0f;
534 b |= cpu_ldub_data_ra(env, src + i, ra) & 0xf0;
535 cpu_stb_data_ra(env, dest + i, b, ra);
536 }
537}
538
8ef7f78e 539/* compare unsigned byte arrays */
e79f56f4
RH
540static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1,
541 uint64_t s2, uintptr_t ra)
8ef7f78e 542{
e79f56f4
RH
543 uint32_t i;
544 uint32_t cc = 0;
8ef7f78e
BS
545
546 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
547 __func__, l, s1, s2);
e79f56f4 548
8ef7f78e 549 for (i = 0; i <= l; i++) {
e79f56f4
RH
550 uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra);
551 uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra);
8ef7f78e
BS
552 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
553 if (x < y) {
554 cc = 1;
e79f56f4 555 break;
8ef7f78e
BS
556 } else if (x > y) {
557 cc = 2;
e79f56f4 558 break;
8ef7f78e
BS
559 }
560 }
e79f56f4 561
8ef7f78e
BS
562 HELPER_LOG("\n");
563 return cc;
564}
565
e79f56f4
RH
566uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
567{
568 return do_helper_clc(env, l, s1, s2, GETPC());
569}
570
8ef7f78e 571/* compare logical under mask */
19b0516f
BS
572uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
573 uint64_t addr)
8ef7f78e 574{
868b5cbd
RH
575 uintptr_t ra = GETPC();
576 uint32_t cc = 0;
8ef7f78e
BS
577
578 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
579 mask, addr);
868b5cbd 580
8ef7f78e
BS
581 while (mask) {
582 if (mask & 8) {
868b5cbd
RH
583 uint8_t d = cpu_ldub_data_ra(env, addr, ra);
584 uint8_t r = extract32(r1, 24, 8);
8ef7f78e
BS
585 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
586 addr);
587 if (r < d) {
588 cc = 1;
589 break;
590 } else if (r > d) {
591 cc = 2;
592 break;
593 }
594 addr++;
595 }
596 mask = (mask << 1) & 0xf;
597 r1 <<= 8;
598 }
868b5cbd 599
8ef7f78e
BS
600 HELPER_LOG("\n");
601 return cc;
602}
603
a5c3cedd 604static inline uint64_t get_address(CPUS390XState *env, int reg)
8ef7f78e 605{
a5c3cedd 606 return wrap_address(env, env->regs[reg]);
8ef7f78e
BS
607}
608
d292671a
DH
609/*
610 * Store the address to the given register, zeroing out unused leftmost
611 * bits in bit positions 32-63 (24-bit and 31-bit mode only).
612 */
613static inline void set_address_zero(CPUS390XState *env, int reg,
614 uint64_t address)
615{
616 if (env->psw.mask & PSW_MASK_64) {
617 env->regs[reg] = address;
618 } else {
619 if (!(env->psw.mask & PSW_MASK_32)) {
620 address &= 0x00ffffff;
621 } else {
622 address &= 0x7fffffff;
623 }
624 env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
625 }
626}
627
a65047af
AJ
628static inline void set_address(CPUS390XState *env, int reg, uint64_t address)
629{
630 if (env->psw.mask & PSW_MASK_64) {
631 /* 64-Bit mode */
632 env->regs[reg] = address;
633 } else {
634 if (!(env->psw.mask & PSW_MASK_32)) {
635 /* 24-Bit mode. According to the PoO it is implementation
636 dependent if bits 32-39 remain unchanged or are set to
637 zeros. Choose the former so that the function can also be
638 used for TRT. */
639 env->regs[reg] = deposit64(env->regs[reg], 0, 24, address);
640 } else {
641 /* 31-Bit mode. According to the PoO it is implementation
642 dependent if bit 32 remains unchanged or is set to zero.
643 Choose the latter so that the function can also be used for
644 TRT. */
645 address &= 0x7fffffff;
646 env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
647 }
648 }
649}
650
a7627565
DH
651static inline uint64_t wrap_length32(CPUS390XState *env, uint64_t length)
652{
653 if (!(env->psw.mask & PSW_MASK_64)) {
654 return (uint32_t)length;
655 }
656 return length;
657}
658
659static inline uint64_t wrap_length31(CPUS390XState *env, uint64_t length)
29a58fd8
AJ
660{
661 if (!(env->psw.mask & PSW_MASK_64)) {
662 /* 24-Bit and 31-Bit mode */
663 length &= 0x7fffffff;
664 }
665 return length;
666}
667
668static inline uint64_t get_length(CPUS390XState *env, int reg)
669{
a7627565 670 return wrap_length31(env, env->regs[reg]);
29a58fd8
AJ
671}
672
673static inline void set_length(CPUS390XState *env, int reg, uint64_t length)
674{
675 if (env->psw.mask & PSW_MASK_64) {
676 /* 64-Bit mode */
677 env->regs[reg] = length;
678 } else {
679 /* 24-Bit and 31-Bit mode */
680 env->regs[reg] = deposit64(env->regs[reg], 0, 32, length);
681 }
682}
683
8ef7f78e 684/* search string (c is byte to search, r2 is string, r1 end of string) */
7591db78 685void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
8ef7f78e 686{
4663e822 687 uintptr_t ra = GETPC();
7591db78 688 uint64_t end, str;
4600c994 689 uint32_t len;
7591db78 690 uint8_t v, c = env->regs[0];
8ef7f78e 691
7591db78
RH
692 /* Bits 32-55 must contain all 0. */
693 if (env->regs[0] & 0xffffff00u) {
8d2f850a 694 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
7591db78 695 }
8ef7f78e 696
7591db78
RH
697 str = get_address(env, r2);
698 end = get_address(env, r1);
4600c994
RH
699
700 /* Lest we fail to service interrupts in a timely manner, limit the
e03ba136 701 amount of work we're willing to do. For now, let's cap at 8k. */
4600c994
RH
702 for (len = 0; len < 0x2000; ++len) {
703 if (str + len == end) {
704 /* Character not found. R1 & R2 are unmodified. */
705 env->cc_op = 2;
7591db78 706 return;
4600c994 707 }
4663e822 708 v = cpu_ldub_data_ra(env, str + len, ra);
4600c994
RH
709 if (v == c) {
710 /* Character found. Set R1 to the location; R2 is unmodified. */
711 env->cc_op = 1;
7591db78
RH
712 set_address(env, r1, str + len);
713 return;
8ef7f78e
BS
714 }
715 }
716
be7acb58
RH
717 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
718 env->cc_op = 3;
719 set_address(env, r2, str + len);
720}
721
722void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2)
723{
724 uintptr_t ra = GETPC();
725 uint32_t len;
726 uint16_t v, c = env->regs[0];
727 uint64_t end, str, adj_end;
728
729 /* Bits 32-47 of R0 must be zero. */
730 if (env->regs[0] & 0xffff0000u) {
8d2f850a 731 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
be7acb58
RH
732 }
733
734 str = get_address(env, r2);
735 end = get_address(env, r1);
736
737 /* If the LSB of the two addresses differ, use one extra byte. */
738 adj_end = end + ((str ^ end) & 1);
739
740 /* Lest we fail to service interrupts in a timely manner, limit the
741 amount of work we're willing to do. For now, let's cap at 8k. */
742 for (len = 0; len < 0x2000; len += 2) {
743 if (str + len == adj_end) {
744 /* End of input found. */
745 env->cc_op = 2;
746 return;
747 }
748 v = cpu_lduw_data_ra(env, str + len, ra);
749 if (v == c) {
750 /* Character found. Set R1 to the location; R2 is unmodified. */
751 env->cc_op = 1;
752 set_address(env, r1, str + len);
753 return;
754 }
755 }
756
4600c994 757 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
4600c994 758 env->cc_op = 3;
7591db78 759 set_address(env, r2, str + len);
8ef7f78e
BS
760}
761
762/* unsigned string compare (c is string terminator) */
aa31bf60 763uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
8ef7f78e 764{
3cc8ca3d 765 uintptr_t ra = GETPC();
aa31bf60 766 uint32_t len;
8ef7f78e
BS
767
768 c = c & 0xff;
a5c3cedd
AJ
769 s1 = wrap_address(env, s1);
770 s2 = wrap_address(env, s2);
aa31bf60
RH
771
772 /* Lest we fail to service interrupts in a timely manner, limit the
e03ba136 773 amount of work we're willing to do. For now, let's cap at 8k. */
aa31bf60 774 for (len = 0; len < 0x2000; ++len) {
3cc8ca3d
RH
775 uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra);
776 uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra);
aa31bf60
RH
777 if (v1 == v2) {
778 if (v1 == c) {
779 /* Equal. CC=0, and don't advance the registers. */
780 env->cc_op = 0;
781 env->retxl = s2;
782 return s1;
783 }
784 } else {
785 /* Unequal. CC={1,2}, and advance the registers. Note that
786 the terminator need not be zero, but the string that contains
787 the terminator is by definition "low". */
788 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
789 env->retxl = s2 + len;
790 return s1 + len;
8ef7f78e 791 }
8ef7f78e
BS
792 }
793
aa31bf60
RH
794 /* CPU-determined bytes equal; advance the registers. */
795 env->cc_op = 3;
796 env->retxl = s2 + len;
797 return s1 + len;
8ef7f78e
BS
798}
799
800/* move page */
7cf96fca 801uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
8ef7f78e 802{
b6c636f2 803 const int mmu_idx = cpu_mmu_index(env, false);
bf349f1a
DH
804 const bool f = extract64(r0, 11, 1);
805 const bool s = extract64(r0, 10, 1);
b6c636f2
DH
806 uintptr_t ra = GETPC();
807 S390Access srca, desta;
bf349f1a
DH
808
809 if ((f && s) || extract64(r0, 12, 4)) {
810 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, GETPC());
811 }
812
a3910396
DH
813 r1 = wrap_address(env, r1 & TARGET_PAGE_MASK);
814 r2 = wrap_address(env, r2 & TARGET_PAGE_MASK);
815
816 /*
817 * TODO:
818 * - Access key handling
819 * - CC-option with surpression of page-translation exceptions
820 * - Store r1/r2 register identifiers at real location 162
821 */
b6c636f2
DH
822 srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx,
823 ra);
824 desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, mmu_idx,
825 ra);
826 access_memmove(env, &desta, &srca, ra);
7cf96fca 827 return 0; /* data moved */
8ef7f78e
BS
828}
829
2bb525e2
DH
830/* string copy */
831uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
8ef7f78e 832{
2bb525e2
DH
833 const uint64_t d = get_address(env, r1);
834 const uint64_t s = get_address(env, r2);
835 const uint8_t c = env->regs[0];
08a4cb79 836 uintptr_t ra = GETPC();
aa31bf60 837 uint32_t len;
8ef7f78e 838
2bb525e2 839 if (env->regs[0] & 0xffffff00ull) {
087b8193
DH
840 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
841 }
aa31bf60
RH
842
843 /* Lest we fail to service interrupts in a timely manner, limit the
e03ba136 844 amount of work we're willing to do. For now, let's cap at 8k. */
aa31bf60 845 for (len = 0; len < 0x2000; ++len) {
08a4cb79
RH
846 uint8_t v = cpu_ldub_data_ra(env, s + len, ra);
847 cpu_stb_data_ra(env, d + len, v, ra);
8ef7f78e 848 if (v == c) {
2bb525e2
DH
849 set_address_zero(env, r1, d + len);
850 return 1;
8ef7f78e 851 }
8ef7f78e 852 }
2bb525e2
DH
853 set_address_zero(env, r1, d + len);
854 set_address_zero(env, r2, s + len);
855 return 3;
8ef7f78e
BS
856}
857
8ef7f78e 858/* load access registers r1 to r3 from memory at a2 */
19b0516f 859void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
8ef7f78e 860{
9393c020 861 uintptr_t ra = GETPC();
8ef7f78e
BS
862 int i;
863
21fc97c5
DH
864 if (a2 & 0x3) {
865 /* we either came here by lam or lamy, which have different lengths */
866 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
867 }
868
8ef7f78e 869 for (i = r1;; i = (i + 1) % 16) {
9393c020 870 env->aregs[i] = cpu_ldl_data_ra(env, a2, ra);
8ef7f78e
BS
871 a2 += 4;
872
873 if (i == r3) {
874 break;
875 }
876 }
877}
878
879/* store access registers r1 to r3 in memory at a2 */
19b0516f 880void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
8ef7f78e 881{
44cf6c2e 882 uintptr_t ra = GETPC();
8ef7f78e
BS
883 int i;
884
21fc97c5
DH
885 if (a2 & 0x3) {
886 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
887 }
888
8ef7f78e 889 for (i = r1;; i = (i + 1) % 16) {
44cf6c2e 890 cpu_stl_data_ra(env, a2, env->aregs[i], ra);
8ef7f78e
BS
891 a2 += 4;
892
893 if (i == r3) {
894 break;
895 }
896 }
897}
898
d3327121
AJ
899/* move long helper */
900static inline uint32_t do_mvcl(CPUS390XState *env,
901 uint64_t *dest, uint64_t *destlen,
902 uint64_t *src, uint64_t *srclen,
16f2e4b8 903 uint16_t pad, int wordsize, uintptr_t ra)
8ef7f78e 904{
70ebd9ce 905 const int mmu_idx = cpu_mmu_index(env, false);
86678418 906 int len = MIN(*destlen, -(*dest | TARGET_PAGE_MASK));
b6c636f2 907 S390Access srca, desta;
86678418 908 int i, cc;
8ef7f78e 909
d3327121 910 if (*destlen == *srclen) {
8ef7f78e 911 cc = 0;
d3327121 912 } else if (*destlen < *srclen) {
8ef7f78e
BS
913 cc = 1;
914 } else {
915 cc = 2;
916 }
917
86678418
DH
918 if (!*destlen) {
919 return cc;
920 }
8ef7f78e 921
86678418
DH
922 /*
923 * Only perform one type of type of operation (move/pad) at a time.
924 * Stay within single pages.
925 */
926 if (*srclen) {
927 /* Copy the src array */
928 len = MIN(MIN(*srclen, -(*src | TARGET_PAGE_MASK)), len);
929 *destlen -= len;
930 *srclen -= len;
b6c636f2
DH
931 srca = access_prepare(env, *src, len, MMU_DATA_LOAD, mmu_idx, ra);
932 desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
933 access_memmove(env, &desta, &srca, ra);
86678418
DH
934 *src = wrap_address(env, *src + len);
935 *dest = wrap_address(env, *dest + len);
936 } else if (wordsize == 1) {
937 /* Pad the remaining area */
938 *destlen -= len;
70ebd9ce
DH
939 desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
940 access_memset(env, &desta, pad, ra);
86678418 941 *dest = wrap_address(env, *dest + len);
16f2e4b8 942 } else {
b8e7b2fe
DH
943 desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
944
86678418
DH
945 /* The remaining length selects the padding byte. */
946 for (i = 0; i < len; (*destlen)--, i++) {
947 if (*destlen & 1) {
b8e7b2fe 948 access_set_byte(env, &desta, i, pad, ra);
86678418 949 } else {
b8e7b2fe 950 access_set_byte(env, &desta, i, pad >> 8, ra);
86678418 951 }
16f2e4b8 952 }
b8e7b2fe 953 *dest = wrap_address(env, *dest + len);
16f2e4b8 954 }
8ef7f78e 955
86678418 956 return *destlen ? 3 : cc;
d3327121
AJ
957}
958
959/* move long */
960uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
961{
70ebd9ce 962 const int mmu_idx = cpu_mmu_index(env, false);
d3327121
AJ
963 uintptr_t ra = GETPC();
964 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
965 uint64_t dest = get_address(env, r1);
966 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
967 uint64_t src = get_address(env, r2);
968 uint8_t pad = env->regs[r2 + 1] >> 24;
b6c636f2 969 S390Access srca, desta;
f1c2e27c 970 uint32_t cc, cur_len;
d3327121 971
fbc17598
DH
972 if (is_destructive_overlap(env, dest, src, MIN(srclen, destlen))) {
973 cc = 3;
f1c2e27c
DH
974 } else if (srclen == destlen) {
975 cc = 0;
976 } else if (destlen < srclen) {
977 cc = 1;
fbc17598 978 } else {
f1c2e27c
DH
979 cc = 2;
980 }
981
982 /* We might have to zero-out some bits even if there was no action. */
983 if (unlikely(!destlen || cc == 3)) {
984 set_address_zero(env, r2, src);
985 set_address_zero(env, r1, dest);
986 return cc;
987 } else if (!srclen) {
988 set_address_zero(env, r2, src);
fbc17598 989 }
8ef7f78e 990
f1c2e27c
DH
991 /*
992 * Only perform one type of type of operation (move/pad) in one step.
993 * Stay within single pages.
994 */
995 while (destlen) {
996 cur_len = MIN(destlen, -(dest | TARGET_PAGE_MASK));
997 if (!srclen) {
70ebd9ce
DH
998 desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx,
999 ra);
1000 access_memset(env, &desta, pad, ra);
f1c2e27c
DH
1001 } else {
1002 cur_len = MIN(MIN(srclen, -(src | TARGET_PAGE_MASK)), cur_len);
8ef7f78e 1003
b6c636f2
DH
1004 srca = access_prepare(env, src, cur_len, MMU_DATA_LOAD, mmu_idx,
1005 ra);
1006 desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx,
1007 ra);
1008 access_memmove(env, &desta, &srca, ra);
f1c2e27c
DH
1009 src = wrap_address(env, src + cur_len);
1010 srclen -= cur_len;
1011 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen);
1012 set_address_zero(env, r2, src);
1013 }
1014 dest = wrap_address(env, dest + cur_len);
1015 destlen -= cur_len;
1016 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen);
1017 set_address_zero(env, r1, dest);
1018
1019 /* TODO: Deliver interrupts. */
1020 }
8ef7f78e
BS
1021 return cc;
1022}
1023
d3327121 1024/* move long extended */
19b0516f
BS
1025uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1026 uint32_t r3)
8ef7f78e 1027{
453e4c07 1028 uintptr_t ra = GETPC();
29a58fd8 1029 uint64_t destlen = get_length(env, r1 + 1);
a5c3cedd 1030 uint64_t dest = get_address(env, r1);
29a58fd8 1031 uint64_t srclen = get_length(env, r3 + 1);
a5c3cedd 1032 uint64_t src = get_address(env, r3);
d3327121 1033 uint8_t pad = a2;
8ef7f78e
BS
1034 uint32_t cc;
1035
16f2e4b8
AJ
1036 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra);
1037
1038 set_length(env, r1 + 1, destlen);
1039 set_length(env, r3 + 1, srclen);
1040 set_address(env, r1, dest);
1041 set_address(env, r3, src);
1042
1043 return cc;
1044}
1045
1046/* move long unicode */
1047uint32_t HELPER(mvclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1048 uint32_t r3)
1049{
1050 uintptr_t ra = GETPC();
1051 uint64_t destlen = get_length(env, r1 + 1);
1052 uint64_t dest = get_address(env, r1);
1053 uint64_t srclen = get_length(env, r3 + 1);
1054 uint64_t src = get_address(env, r3);
1055 uint16_t pad = a2;
1056 uint32_t cc;
1057
1058 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 2, ra);
8ef7f78e 1059
d3327121
AJ
1060 set_length(env, r1 + 1, destlen);
1061 set_length(env, r3 + 1, srclen);
a65047af
AJ
1062 set_address(env, r1, dest);
1063 set_address(env, r3, src);
8ef7f78e
BS
1064
1065 return cc;
1066}
1067
5c2b48a8
AJ
1068/* compare logical long helper */
1069static inline uint32_t do_clcl(CPUS390XState *env,
1070 uint64_t *src1, uint64_t *src1len,
1071 uint64_t *src3, uint64_t *src3len,
31006af3
AJ
1072 uint16_t pad, uint64_t limit,
1073 int wordsize, uintptr_t ra)
5c2b48a8
AJ
1074{
1075 uint64_t len = MAX(*src1len, *src3len);
8ef7f78e
BS
1076 uint32_t cc = 0;
1077
31006af3
AJ
1078 check_alignment(env, *src1len | *src3len, wordsize, ra);
1079
84aa07f1 1080 if (!len) {
8ef7f78e
BS
1081 return cc;
1082 }
1083
84aa07f1 1084 /* Lest we fail to service interrupts in a timely manner, limit the
5c2b48a8
AJ
1085 amount of work we're willing to do. */
1086 if (len > limit) {
1087 len = limit;
84aa07f1 1088 cc = 3;
8ef7f78e
BS
1089 }
1090
31006af3
AJ
1091 for (; len; len -= wordsize) {
1092 uint16_t v1 = pad;
1093 uint16_t v3 = pad;
84aa07f1 1094
5c2b48a8 1095 if (*src1len) {
31006af3 1096 v1 = cpu_ldusize_data_ra(env, *src1, wordsize, ra);
84aa07f1 1097 }
5c2b48a8 1098 if (*src3len) {
31006af3 1099 v3 = cpu_ldusize_data_ra(env, *src3, wordsize, ra);
84aa07f1
AJ
1100 }
1101
1102 if (v1 != v3) {
1103 cc = (v1 < v3) ? 1 : 2;
8ef7f78e
BS
1104 break;
1105 }
84aa07f1 1106
5c2b48a8 1107 if (*src1len) {
31006af3
AJ
1108 *src1 += wordsize;
1109 *src1len -= wordsize;
84aa07f1 1110 }
5c2b48a8 1111 if (*src3len) {
31006af3
AJ
1112 *src3 += wordsize;
1113 *src3len -= wordsize;
84aa07f1 1114 }
8ef7f78e
BS
1115 }
1116
5c2b48a8
AJ
1117 return cc;
1118}
1119
1120
1121/* compare logical long */
1122uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
1123{
1124 uintptr_t ra = GETPC();
1125 uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24);
1126 uint64_t src1 = get_address(env, r1);
1127 uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24);
1128 uint64_t src3 = get_address(env, r2);
1129 uint8_t pad = env->regs[r2 + 1] >> 24;
1130 uint32_t cc;
1131
31006af3 1132 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, 1, ra);
5c2b48a8
AJ
1133
1134 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len);
1135 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len);
1136 set_address(env, r1, src1);
1137 set_address(env, r2, src3);
1138
1139 return cc;
1140}
1141
1142/* compare logical long extended memcompare insn with padding */
1143uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1144 uint32_t r3)
1145{
1146 uintptr_t ra = GETPC();
1147 uint64_t src1len = get_length(env, r1 + 1);
1148 uint64_t src1 = get_address(env, r1);
1149 uint64_t src3len = get_length(env, r3 + 1);
1150 uint64_t src3 = get_address(env, r3);
1151 uint8_t pad = a2;
1152 uint32_t cc;
1153
31006af3
AJ
1154 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, 1, ra);
1155
1156 set_length(env, r1 + 1, src1len);
1157 set_length(env, r3 + 1, src3len);
1158 set_address(env, r1, src1);
1159 set_address(env, r3, src3);
1160
1161 return cc;
1162}
1163
1164/* compare logical long unicode memcompare insn with padding */
1165uint32_t HELPER(clclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1166 uint32_t r3)
1167{
1168 uintptr_t ra = GETPC();
1169 uint64_t src1len = get_length(env, r1 + 1);
1170 uint64_t src1 = get_address(env, r1);
1171 uint64_t src3len = get_length(env, r3 + 1);
1172 uint64_t src3 = get_address(env, r3);
1173 uint16_t pad = a2;
1174 uint32_t cc = 0;
1175
1176 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x1000, 2, ra);
5c2b48a8 1177
84aa07f1
AJ
1178 set_length(env, r1 + 1, src1len);
1179 set_length(env, r3 + 1, src3len);
1180 set_address(env, r1, src1);
1181 set_address(env, r3, src3);
8ef7f78e
BS
1182
1183 return cc;
1184}
1185
1186/* checksum */
374724f9
RH
1187uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
1188 uint64_t src, uint64_t src_len)
8ef7f78e 1189{
498644e9 1190 uintptr_t ra = GETPC();
374724f9
RH
1191 uint64_t max_len, len;
1192 uint64_t cksm = (uint32_t)r1;
8ef7f78e 1193
374724f9 1194 /* Lest we fail to service interrupts in a timely manner, limit the
e03ba136 1195 amount of work we're willing to do. For now, let's cap at 8k. */
374724f9 1196 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
8ef7f78e 1197
374724f9
RH
1198 /* Process full words as available. */
1199 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
498644e9 1200 cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra);
8ef7f78e
BS
1201 }
1202
374724f9 1203 switch (max_len - len) {
8ef7f78e 1204 case 1:
498644e9 1205 cksm += cpu_ldub_data_ra(env, src, ra) << 24;
374724f9 1206 len += 1;
8ef7f78e
BS
1207 break;
1208 case 2:
498644e9 1209 cksm += cpu_lduw_data_ra(env, src, ra) << 16;
374724f9 1210 len += 2;
8ef7f78e
BS
1211 break;
1212 case 3:
498644e9
RH
1213 cksm += cpu_lduw_data_ra(env, src, ra) << 16;
1214 cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8;
374724f9 1215 len += 3;
8ef7f78e
BS
1216 break;
1217 }
1218
374724f9
RH
1219 /* Fold the carry from the checksum. Note that we can see carry-out
1220 during folding more than once (but probably not more than twice). */
1221 while (cksm > 0xffffffffull) {
1222 cksm = (uint32_t)cksm + (cksm >> 32);
1223 }
1224
1225 /* Indicate whether or not we've processed everything. */
1226 env->cc_op = (len == src_len ? 0 : 3);
8ef7f78e 1227
374724f9
RH
1228 /* Return both cksm and processed length. */
1229 env->retxl = cksm;
1230 return len;
8ef7f78e
BS
1231}
1232
76c57490
AJ
1233void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src)
1234{
1235 uintptr_t ra = GETPC();
1236 int len_dest = len >> 4;
1237 int len_src = len & 0xf;
1238 uint8_t b;
1239
1240 dest += len_dest;
1241 src += len_src;
1242
1243 /* last byte is special, it only flips the nibbles */
1244 b = cpu_ldub_data_ra(env, src, ra);
1245 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
1246 src--;
1247 len_src--;
1248
1249 /* now pack every value */
3cea0927 1250 while (len_dest > 0) {
76c57490
AJ
1251 b = 0;
1252
3cea0927 1253 if (len_src >= 0) {
76c57490
AJ
1254 b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
1255 src--;
1256 len_src--;
1257 }
3cea0927 1258 if (len_src >= 0) {
76c57490
AJ
1259 b |= cpu_ldub_data_ra(env, src, ra) << 4;
1260 src--;
1261 len_src--;
1262 }
1263
1264 len_dest--;
1265 dest--;
1266 cpu_stb_data_ra(env, dest, b, ra);
1267 }
1268}
1269
4e256bef
AJ
1270static inline void do_pkau(CPUS390XState *env, uint64_t dest, uint64_t src,
1271 uint32_t srclen, int ssize, uintptr_t ra)
3bd3d6d3 1272{
3bd3d6d3
AJ
1273 int i;
1274 /* The destination operand is always 16 bytes long. */
1275 const int destlen = 16;
1276
1277 /* The operands are processed from right to left. */
1278 src += srclen - 1;
1279 dest += destlen - 1;
1280
1281 for (i = 0; i < destlen; i++) {
1282 uint8_t b = 0;
1283
1284 /* Start with a positive sign */
1285 if (i == 0) {
1286 b = 0xc;
4e256bef 1287 } else if (srclen > ssize) {
3bd3d6d3 1288 b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
4e256bef
AJ
1289 src -= ssize;
1290 srclen -= ssize;
3bd3d6d3
AJ
1291 }
1292
4e256bef 1293 if (srclen > ssize) {
3bd3d6d3 1294 b |= cpu_ldub_data_ra(env, src, ra) << 4;
4e256bef
AJ
1295 src -= ssize;
1296 srclen -= ssize;
3bd3d6d3
AJ
1297 }
1298
1299 cpu_stb_data_ra(env, dest, b, ra);
1300 dest--;
1301 }
1302}
1303
4e256bef
AJ
1304
1305void HELPER(pka)(CPUS390XState *env, uint64_t dest, uint64_t src,
1306 uint32_t srclen)
1307{
1308 do_pkau(env, dest, src, srclen, 1, GETPC());
1309}
1310
1311void HELPER(pku)(CPUS390XState *env, uint64_t dest, uint64_t src,
1312 uint32_t srclen)
1313{
1314 do_pkau(env, dest, src, srclen, 2, GETPC());
1315}
1316
19b0516f
BS
1317void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
1318 uint64_t src)
8ef7f78e 1319{
84e1b98b 1320 uintptr_t ra = GETPC();
8ef7f78e
BS
1321 int len_dest = len >> 4;
1322 int len_src = len & 0xf;
1323 uint8_t b;
1324 int second_nibble = 0;
1325
1326 dest += len_dest;
1327 src += len_src;
1328
1329 /* last byte is special, it only flips the nibbles */
84e1b98b
RH
1330 b = cpu_ldub_data_ra(env, src, ra);
1331 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
8ef7f78e
BS
1332 src--;
1333 len_src--;
1334
1335 /* now pad every nibble with 0xf0 */
1336
1337 while (len_dest > 0) {
1338 uint8_t cur_byte = 0;
1339
1340 if (len_src > 0) {
84e1b98b 1341 cur_byte = cpu_ldub_data_ra(env, src, ra);
8ef7f78e
BS
1342 }
1343
1344 len_dest--;
1345 dest--;
1346
1347 /* only advance one nibble at a time */
1348 if (second_nibble) {
1349 cur_byte >>= 4;
1350 len_src--;
1351 src--;
1352 }
1353 second_nibble = !second_nibble;
1354
1355 /* digit */
1356 cur_byte = (cur_byte & 0xf);
1357 /* zone bits */
1358 cur_byte |= 0xf0;
1359
84e1b98b 1360 cpu_stb_data_ra(env, dest, cur_byte, ra);
8ef7f78e
BS
1361 }
1362}
1363
15417787
AJ
1364static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest,
1365 uint32_t destlen, int dsize, uint64_t src,
1366 uintptr_t ra)
1a35f08a 1367{
1a35f08a
AJ
1368 int i;
1369 uint32_t cc;
1370 uint8_t b;
1371 /* The source operand is always 16 bytes long. */
1372 const int srclen = 16;
1373
1374 /* The operands are processed from right to left. */
1375 src += srclen - 1;
15417787 1376 dest += destlen - dsize;
1a35f08a
AJ
1377
1378 /* Check for the sign. */
1379 b = cpu_ldub_data_ra(env, src, ra);
1380 src--;
1381 switch (b & 0xf) {
1382 case 0xa:
1383 case 0xc:
1384 case 0xe ... 0xf:
1385 cc = 0; /* plus */
1386 break;
1387 case 0xb:
1388 case 0xd:
1389 cc = 1; /* minus */
1390 break;
1391 default:
1392 case 0x0 ... 0x9:
1393 cc = 3; /* invalid */
1394 break;
1395 }
1396
1397 /* Now pad every nibble with 0x30, advancing one nibble at a time. */
15417787
AJ
1398 for (i = 0; i < destlen; i += dsize) {
1399 if (i == (31 * dsize)) {
1400 /* If length is 32/64 bytes, the leftmost byte is 0. */
1a35f08a 1401 b = 0;
15417787 1402 } else if (i % (2 * dsize)) {
1a35f08a
AJ
1403 b = cpu_ldub_data_ra(env, src, ra);
1404 src--;
1405 } else {
1406 b >>= 4;
1407 }
15417787
AJ
1408 cpu_stsize_data_ra(env, dest, 0x30 + (b & 0xf), dsize, ra);
1409 dest -= dsize;
1a35f08a
AJ
1410 }
1411
1412 return cc;
1413}
1414
15417787
AJ
1415uint32_t HELPER(unpka)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
1416 uint64_t src)
1417{
1418 return do_unpkau(env, dest, destlen, 1, src, GETPC());
1419}
1420
1421uint32_t HELPER(unpku)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
1422 uint64_t src)
1423{
1424 return do_unpkau(env, dest, destlen, 2, src, GETPC());
1425}
1426
5d4a655a
AJ
1427uint32_t HELPER(tp)(CPUS390XState *env, uint64_t dest, uint32_t destlen)
1428{
1429 uintptr_t ra = GETPC();
1430 uint32_t cc = 0;
1431 int i;
1432
1433 for (i = 0; i < destlen; i++) {
1434 uint8_t b = cpu_ldub_data_ra(env, dest + i, ra);
1435 /* digit */
1436 cc |= (b & 0xf0) > 0x90 ? 2 : 0;
1437
1438 if (i == (destlen - 1)) {
1439 /* sign */
1440 cc |= (b & 0xf) < 0xa ? 1 : 0;
1441 } else {
1442 /* digit */
1443 cc |= (b & 0xf) > 0x9 ? 2 : 0;
1444 }
1445 }
1446
1447 return cc;
1448}
1449
d376f123
RH
1450static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array,
1451 uint64_t trans, uintptr_t ra)
8ef7f78e 1452{
981a8ea0 1453 uint32_t i;
8ef7f78e
BS
1454
1455 for (i = 0; i <= len; i++) {
981a8ea0
RH
1456 uint8_t byte = cpu_ldub_data_ra(env, array + i, ra);
1457 uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
1458 cpu_stb_data_ra(env, array + i, new_byte, ra);
8ef7f78e 1459 }
d376f123
RH
1460
1461 return env->cc_op;
8ef7f78e
BS
1462}
1463
981a8ea0
RH
1464void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
1465 uint64_t trans)
1466{
d376f123 1467 do_helper_tr(env, len, array, trans, GETPC());
981a8ea0
RH
1468}
1469
3f4de675
AJ
1470uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
1471 uint64_t len, uint64_t trans)
1472{
d46cd62f 1473 uintptr_t ra = GETPC();
3f4de675
AJ
1474 uint8_t end = env->regs[0] & 0xff;
1475 uint64_t l = len;
1476 uint64_t i;
d46cd62f 1477 uint32_t cc = 0;
3f4de675
AJ
1478
1479 if (!(env->psw.mask & PSW_MASK_64)) {
1480 array &= 0x7fffffff;
1481 l = (uint32_t)l;
1482 }
1483
1484 /* Lest we fail to service interrupts in a timely manner, limit the
1485 amount of work we're willing to do. For now, let's cap at 8k. */
1486 if (l > 0x2000) {
1487 l = 0x2000;
d46cd62f 1488 cc = 3;
3f4de675
AJ
1489 }
1490
1491 for (i = 0; i < l; i++) {
1492 uint8_t byte, new_byte;
1493
d46cd62f 1494 byte = cpu_ldub_data_ra(env, array + i, ra);
3f4de675
AJ
1495
1496 if (byte == end) {
d46cd62f 1497 cc = 1;
3f4de675
AJ
1498 break;
1499 }
1500
d46cd62f
RH
1501 new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
1502 cpu_stb_data_ra(env, array + i, new_byte, ra);
3f4de675
AJ
1503 }
1504
d46cd62f 1505 env->cc_op = cc;
3f4de675
AJ
1506 env->retxl = len - i;
1507 return array + i;
1508}
1509
b213c9f5
RH
1510static inline uint32_t do_helper_trt(CPUS390XState *env, int len,
1511 uint64_t array, uint64_t trans,
1512 int inc, uintptr_t ra)
54f00775 1513{
b213c9f5 1514 int i;
54f00775
AJ
1515
1516 for (i = 0; i <= len; i++) {
b213c9f5 1517 uint8_t byte = cpu_ldub_data_ra(env, array + i * inc, ra);
2c7e5f8c 1518 uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra);
54f00775
AJ
1519
1520 if (sbyte != 0) {
b213c9f5 1521 set_address(env, 1, array + i * inc);
2c7e5f8c
RH
1522 env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte);
1523 return (i == len) ? 2 : 1;
54f00775
AJ
1524 }
1525 }
1526
2c7e5f8c
RH
1527 return 0;
1528}
1529
ad8c851d
PZ
1530static uint32_t do_helper_trt_fwd(CPUS390XState *env, uint32_t len,
1531 uint64_t array, uint64_t trans,
1532 uintptr_t ra)
1533{
1534 return do_helper_trt(env, len, array, trans, 1, ra);
1535}
1536
2c7e5f8c
RH
1537uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
1538 uint64_t trans)
1539{
b213c9f5
RH
1540 return do_helper_trt(env, len, array, trans, 1, GETPC());
1541}
1542
ad8c851d
PZ
1543static uint32_t do_helper_trt_bkwd(CPUS390XState *env, uint32_t len,
1544 uint64_t array, uint64_t trans,
1545 uintptr_t ra)
1546{
1547 return do_helper_trt(env, len, array, trans, -1, ra);
1548}
1549
b213c9f5
RH
1550uint32_t HELPER(trtr)(CPUS390XState *env, uint32_t len, uint64_t array,
1551 uint64_t trans)
1552{
1553 return do_helper_trt(env, len, array, trans, -1, GETPC());
54f00775
AJ
1554}
1555
4065ae76
AJ
1556/* Translate one/two to one/two */
1557uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2,
1558 uint32_t tst, uint32_t sizes)
1559{
1560 uintptr_t ra = GETPC();
1561 int dsize = (sizes & 1) ? 1 : 2;
1562 int ssize = (sizes & 2) ? 1 : 2;
3c39c800 1563 uint64_t tbl = get_address(env, 1);
4065ae76
AJ
1564 uint64_t dst = get_address(env, r1);
1565 uint64_t len = get_length(env, r1 + 1);
1566 uint64_t src = get_address(env, r2);
1567 uint32_t cc = 3;
1568 int i;
1569
3c39c800
RH
1570 /* The lower address bits of TBL are ignored. For TROO, TROT, it's
1571 the low 3 bits (double-word aligned). For TRTO, TRTT, it's either
1572 the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */
1573 if (ssize == 2 && !s390_has_feat(S390_FEAT_ETF2_ENH)) {
1574 tbl &= -4096;
1575 } else {
1576 tbl &= -8;
1577 }
1578
4065ae76
AJ
1579 check_alignment(env, len, ssize, ra);
1580
1581 /* Lest we fail to service interrupts in a timely manner, */
1582 /* limit the amount of work we're willing to do. */
1583 for (i = 0; i < 0x2000; i++) {
1584 uint16_t sval = cpu_ldusize_data_ra(env, src, ssize, ra);
1585 uint64_t tble = tbl + (sval * dsize);
1586 uint16_t dval = cpu_ldusize_data_ra(env, tble, dsize, ra);
1587 if (dval == tst) {
1588 cc = 1;
1589 break;
1590 }
1591 cpu_stsize_data_ra(env, dst, dval, dsize, ra);
1592
1593 len -= ssize;
1594 src += ssize;
1595 dst += dsize;
1596
1597 if (len == 0) {
1598 cc = 0;
1599 break;
1600 }
1601 }
1602
1603 set_address(env, r1, dst);
1604 set_length(env, r1 + 1, len);
1605 set_address(env, r2, src);
1606
1607 return cc;
1608}
1609
0c9fa168
RH
1610void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
1611 uint32_t r1, uint32_t r3)
303a9ab8
RH
1612{
1613 uintptr_t ra = GETPC();
1614 Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
1615 Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1616 Int128 oldv;
0c9fa168 1617 uint64_t oldh, oldl;
303a9ab8
RH
1618 bool fail;
1619
0c9fa168 1620 check_alignment(env, addr, 16, ra);
c0080f1b 1621
0c9fa168
RH
1622 oldh = cpu_ldq_data_ra(env, addr + 0, ra);
1623 oldl = cpu_ldq_data_ra(env, addr + 8, ra);
303a9ab8 1624
0c9fa168
RH
1625 oldv = int128_make128(oldl, oldh);
1626 fail = !int128_eq(oldv, cmpv);
1627 if (fail) {
1628 newv = oldv;
303a9ab8
RH
1629 }
1630
0c9fa168
RH
1631 cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra);
1632 cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra);
1633
303a9ab8
RH
1634 env->cc_op = fail;
1635 env->regs[r1] = int128_gethi(oldv);
1636 env->regs[r1 + 1] = int128_getlo(oldv);
1637}
1638
6476615d
EC
1639void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
1640 uint32_t r1, uint32_t r3)
1641{
0c9fa168
RH
1642 uintptr_t ra = GETPC();
1643 Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
1644 Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1645 int mem_idx;
1646 TCGMemOpIdx oi;
1647 Int128 oldv;
1648 bool fail;
1649
830bf10c 1650 assert(HAVE_CMPXCHG128);
0c9fa168
RH
1651
1652 mem_idx = cpu_mmu_index(env, false);
1653 oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1654 oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
1655 fail = !int128_eq(oldv, cmpv);
1656
1657 env->cc_op = fail;
1658 env->regs[r1] = int128_gethi(oldv);
1659 env->regs[r1 + 1] = int128_getlo(oldv);
6476615d
EC
1660}
1661
1662static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
1663 uint64_t a2, bool parallel)
c67ba303 1664{
c67ba303 1665 uint32_t mem_idx = cpu_mmu_index(env, false);
c67ba303
RH
1666 uintptr_t ra = GETPC();
1667 uint32_t fc = extract32(env->regs[0], 0, 8);
1668 uint32_t sc = extract32(env->regs[0], 8, 8);
1669 uint64_t pl = get_address(env, 1) & -16;
1670 uint64_t svh, svl;
1671 uint32_t cc;
1672
1673 /* Sanity check the function code and storage characteristic. */
1674 if (fc > 1 || sc > 3) {
1675 if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2)) {
1676 goto spec_exception;
1677 }
1678 if (fc > 2 || sc > 4 || (fc == 2 && (r3 & 1))) {
1679 goto spec_exception;
1680 }
1681 }
1682
1683 /* Sanity check the alignments. */
dc95b31d 1684 if (extract32(a1, 0, fc + 2) || extract32(a2, 0, sc)) {
c67ba303
RH
1685 goto spec_exception;
1686 }
1687
1688 /* Sanity check writability of the store address. */
9cd9cdae 1689 probe_write(env, a2, 1 << sc, mem_idx, ra);
c67ba303 1690
5e95612e
RH
1691 /*
1692 * Note that the compare-and-swap is atomic, and the store is atomic,
1693 * but the complete operation is not. Therefore we do not need to
1694 * assert serial context in order to implement this. That said,
1695 * restart early if we can't support either operation that is supposed
1696 * to be atomic.
1697 */
6476615d 1698 if (parallel) {
5e95612e
RH
1699 uint32_t max = 2;
1700#ifdef CONFIG_ATOMIC64
1701 max = 3;
c67ba303 1702#endif
5e95612e
RH
1703 if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) ||
1704 (HAVE_ATOMIC128 ? 0 : sc > max)) {
29a0af61 1705 cpu_loop_exit_atomic(env_cpu(env), ra);
c67ba303
RH
1706 }
1707 }
1708
1709 /* All loads happen before all stores. For simplicity, load the entire
1710 store value area from the parameter list. */
1711 svh = cpu_ldq_data_ra(env, pl + 16, ra);
1712 svl = cpu_ldq_data_ra(env, pl + 24, ra);
1713
1714 switch (fc) {
1715 case 0:
1716 {
1717 uint32_t nv = cpu_ldl_data_ra(env, pl, ra);
1718 uint32_t cv = env->regs[r3];
1719 uint32_t ov;
1720
6476615d 1721 if (parallel) {
c67ba303
RH
1722#ifdef CONFIG_USER_ONLY
1723 uint32_t *haddr = g2h(a1);
1724 ov = atomic_cmpxchg__nocheck(haddr, cv, nv);
1725#else
1726 TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
1727 ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
1728#endif
1729 } else {
1730 ov = cpu_ldl_data_ra(env, a1, ra);
1731 cpu_stl_data_ra(env, a1, (ov == cv ? nv : ov), ra);
1732 }
1733 cc = (ov != cv);
1734 env->regs[r3] = deposit64(env->regs[r3], 32, 32, ov);
1735 }
1736 break;
1737
1738 case 1:
1739 {
1740 uint64_t nv = cpu_ldq_data_ra(env, pl, ra);
1741 uint64_t cv = env->regs[r3];
1742 uint64_t ov;
1743
6476615d 1744 if (parallel) {
c67ba303
RH
1745#ifdef CONFIG_ATOMIC64
1746# ifdef CONFIG_USER_ONLY
1747 uint64_t *haddr = g2h(a1);
1748 ov = atomic_cmpxchg__nocheck(haddr, cv, nv);
1749# else
1750 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
1751 ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
1752# endif
1753#else
6476615d 1754 /* Note that we asserted !parallel above. */
c67ba303
RH
1755 g_assert_not_reached();
1756#endif
1757 } else {
1758 ov = cpu_ldq_data_ra(env, a1, ra);
1759 cpu_stq_data_ra(env, a1, (ov == cv ? nv : ov), ra);
1760 }
1761 cc = (ov != cv);
1762 env->regs[r3] = ov;
1763 }
1764 break;
1765
1766 case 2:
1767 {
1768 uint64_t nvh = cpu_ldq_data_ra(env, pl, ra);
1769 uint64_t nvl = cpu_ldq_data_ra(env, pl + 8, ra);
1770 Int128 nv = int128_make128(nvl, nvh);
1771 Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1772 Int128 ov;
1773
5e95612e 1774 if (!parallel) {
c67ba303
RH
1775 uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra);
1776 uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra);
1777
1778 ov = int128_make128(ol, oh);
1779 cc = !int128_eq(ov, cv);
1780 if (cc) {
1781 nv = ov;
1782 }
1783
1784 cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
1785 cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
5e95612e
RH
1786 } else if (HAVE_CMPXCHG128) {
1787 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1788 ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
1789 cc = !int128_eq(ov, cv);
1790 } else {
1791 /* Note that we asserted !parallel above. */
1792 g_assert_not_reached();
c67ba303
RH
1793 }
1794
1795 env->regs[r3 + 0] = int128_gethi(ov);
1796 env->regs[r3 + 1] = int128_getlo(ov);
1797 }
1798 break;
1799
1800 default:
1801 g_assert_not_reached();
1802 }
1803
1804 /* Store only if the comparison succeeded. Note that above we use a pair
1805 of 64-bit big-endian loads, so for sc < 3 we must extract the value
1806 from the most-significant bits of svh. */
1807 if (cc == 0) {
1808 switch (sc) {
1809 case 0:
1810 cpu_stb_data_ra(env, a2, svh >> 56, ra);
1811 break;
1812 case 1:
1813 cpu_stw_data_ra(env, a2, svh >> 48, ra);
1814 break;
1815 case 2:
1816 cpu_stl_data_ra(env, a2, svh >> 32, ra);
1817 break;
1818 case 3:
1819 cpu_stq_data_ra(env, a2, svh, ra);
1820 break;
1821 case 4:
5e95612e
RH
1822 if (!parallel) {
1823 cpu_stq_data_ra(env, a2 + 0, svh, ra);
1824 cpu_stq_data_ra(env, a2 + 8, svl, ra);
1825 } else if (HAVE_ATOMIC128) {
c67ba303
RH
1826 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1827 Int128 sv = int128_make128(svl, svh);
1828 helper_atomic_sto_be_mmu(env, a2, sv, oi, ra);
5e95612e 1829 } else {
6476615d 1830 /* Note that we asserted !parallel above. */
c67ba303 1831 g_assert_not_reached();
c67ba303 1832 }
de4e05d1 1833 break;
c67ba303
RH
1834 default:
1835 g_assert_not_reached();
1836 }
1837 }
1838
1839 return cc;
1840
1841 spec_exception:
8d2f850a 1842 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
c67ba303
RH
1843 g_assert_not_reached();
1844}
1845
6476615d
EC
1846uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
1847{
1848 return do_csst(env, r3, a1, a2, false);
1849}
1850
1851uint32_t HELPER(csst_parallel)(CPUS390XState *env, uint32_t r3, uint64_t a1,
1852 uint64_t a2)
1853{
1854 return do_csst(env, r3, a1, a2, true);
1855}
1856
8ef7f78e 1857#if !defined(CONFIG_USER_ONLY)
19b0516f 1858void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
8ef7f78e 1859{
97ae2149 1860 uintptr_t ra = GETPC();
311918b9 1861 bool PERchanged = false;
8ef7f78e 1862 uint64_t src = a2;
97ae2149 1863 uint32_t i;
8ef7f78e 1864
21fc97c5
DH
1865 if (src & 0x7) {
1866 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
1867 }
1868
8ef7f78e 1869 for (i = r1;; i = (i + 1) % 16) {
97ae2149 1870 uint64_t val = cpu_ldq_data_ra(env, src, ra);
311918b9
AJ
1871 if (env->cregs[i] != val && i >= 9 && i <= 11) {
1872 PERchanged = true;
1873 }
1874 env->cregs[i] = val;
8ef7f78e 1875 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
97ae2149 1876 i, src, val);
8ef7f78e
BS
1877 src += sizeof(uint64_t);
1878
1879 if (i == r3) {
1880 break;
1881 }
1882 }
1883
311918b9 1884 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
dc79e928 1885 s390_cpu_recompute_watchpoints(env_cpu(env));
311918b9
AJ
1886 }
1887
dc79e928 1888 tlb_flush(env_cpu(env));
8ef7f78e
BS
1889}
1890
19b0516f 1891void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
8ef7f78e 1892{
1b642a73 1893 uintptr_t ra = GETPC();
311918b9 1894 bool PERchanged = false;
8ef7f78e 1895 uint64_t src = a2;
1b642a73 1896 uint32_t i;
8ef7f78e 1897
21fc97c5
DH
1898 if (src & 0x3) {
1899 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
1900 }
1901
8ef7f78e 1902 for (i = r1;; i = (i + 1) % 16) {
1b642a73 1903 uint32_t val = cpu_ldl_data_ra(env, src, ra);
311918b9
AJ
1904 if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
1905 PERchanged = true;
1906 }
1b642a73
RH
1907 env->cregs[i] = deposit64(env->cregs[i], 0, 32, val);
1908 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val);
8ef7f78e
BS
1909 src += sizeof(uint32_t);
1910
1911 if (i == r3) {
1912 break;
1913 }
1914 }
1915
311918b9 1916 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
dc79e928 1917 s390_cpu_recompute_watchpoints(env_cpu(env));
311918b9
AJ
1918 }
1919
dc79e928 1920 tlb_flush(env_cpu(env));
8ef7f78e
BS
1921}
1922
19b0516f 1923void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
8ef7f78e 1924{
75d6240c 1925 uintptr_t ra = GETPC();
8ef7f78e 1926 uint64_t dest = a2;
75d6240c 1927 uint32_t i;
8ef7f78e 1928
21fc97c5
DH
1929 if (dest & 0x7) {
1930 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
1931 }
1932
8ef7f78e 1933 for (i = r1;; i = (i + 1) % 16) {
75d6240c 1934 cpu_stq_data_ra(env, dest, env->cregs[i], ra);
8ef7f78e
BS
1935 dest += sizeof(uint64_t);
1936
1937 if (i == r3) {
1938 break;
1939 }
1940 }
1941}
1942
19b0516f 1943void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
8ef7f78e 1944{
75d6240c 1945 uintptr_t ra = GETPC();
8ef7f78e 1946 uint64_t dest = a2;
75d6240c 1947 uint32_t i;
8ef7f78e 1948
21fc97c5
DH
1949 if (dest & 0x3) {
1950 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
1951 }
1952
8ef7f78e 1953 for (i = r1;; i = (i + 1) % 16) {
75d6240c 1954 cpu_stl_data_ra(env, dest, env->cregs[i], ra);
8ef7f78e
BS
1955 dest += sizeof(uint32_t);
1956
1957 if (i == r3) {
1958 break;
1959 }
1960 }
1961}
1962
f79f1ca4
TH
1963uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
1964{
aef2b01a 1965 uintptr_t ra = GETPC();
f79f1ca4
TH
1966 int i;
1967
e26131c9 1968 real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK;
f79f1ca4 1969
f79f1ca4 1970 for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
e26131c9 1971 cpu_stq_real_ra(env, real_addr + i, 0, ra);
f79f1ca4
TH
1972 }
1973
1974 return 0;
1975}
1976
bb879430 1977uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2)
8ef7f78e 1978{
dc79e928
RH
1979 S390CPU *cpu = env_archcpu(env);
1980 CPUState *cs = env_cpu(env);
bb879430
DH
1981
1982 /*
1983 * TODO: we currently don't handle all access protection types
1984 * (including access-list and key-controlled) as well as AR mode.
1985 */
1986 if (!s390_cpu_virt_mem_check_write(cpu, a1, 0, 1)) {
1987 /* Fetching permitted; storing permitted */
1988 return 0;
1989 }
b5e85329
DH
1990
1991 if (env->int_pgm_code == PGM_PROTECTION) {
1992 /* retry if reading is possible */
bed04a2b 1993 cs->exception_index = -1;
b5e85329
DH
1994 if (!s390_cpu_virt_mem_check_read(cpu, a1, 0, 1)) {
1995 /* Fetching permitted; storing not permitted */
1996 return 1;
1997 }
1998 }
1999
bb879430
DH
2000 switch (env->int_pgm_code) {
2001 case PGM_PROTECTION:
bb879430 2002 /* Fetching not permitted; storing not permitted */
bed04a2b 2003 cs->exception_index = -1;
bb879430 2004 return 2;
b5e85329
DH
2005 case PGM_ADDRESSING:
2006 case PGM_TRANS_SPEC:
2007 /* exceptions forwarded to the guest */
2008 s390_cpu_virt_mem_handle_exc(cpu, GETPC());
2009 return 0;
bb879430 2010 }
b5e85329
DH
2011
2012 /* Translation not available */
bed04a2b 2013 cs->exception_index = -1;
b5e85329 2014 return 3;
8ef7f78e
BS
2015}
2016
2017/* insert storage key extended */
19b0516f 2018uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
8ef7f78e 2019{
0f5f6691
JH
2020 static S390SKeysState *ss;
2021 static S390SKeysClass *skeyclass;
a5c3cedd 2022 uint64_t addr = wrap_address(env, r2);
0f5f6691 2023 uint8_t key;
8ef7f78e
BS
2024
2025 if (addr > ram_size) {
2026 return 0;
2027 }
2028
0f5f6691
JH
2029 if (unlikely(!ss)) {
2030 ss = s390_get_skeys_device();
2031 skeyclass = S390_SKEYS_GET_CLASS(ss);
2032 }
2033
2034 if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) {
2035 return 0;
2036 }
2037 return key;
8ef7f78e
BS
2038}
2039
2040/* set storage key extended */
2bbde27f 2041void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
8ef7f78e 2042{
0f5f6691
JH
2043 static S390SKeysState *ss;
2044 static S390SKeysClass *skeyclass;
a5c3cedd 2045 uint64_t addr = wrap_address(env, r2);
0f5f6691 2046 uint8_t key;
8ef7f78e
BS
2047
2048 if (addr > ram_size) {
2049 return;
2050 }
2051
0f5f6691
JH
2052 if (unlikely(!ss)) {
2053 ss = s390_get_skeys_device();
2054 skeyclass = S390_SKEYS_GET_CLASS(ss);
2055 }
2056
2057 key = (uint8_t) r1;
2058 skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
5b773a11
DH
2059 /*
2060 * As we can only flush by virtual address and not all the entries
2061 * that point to a physical address we have to flush the whole TLB.
2062 */
2063 tlb_flush_all_cpus_synced(env_cpu(env));
8ef7f78e
BS
2064}
2065
2066/* reset reference bit extended */
5cc69c54 2067uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
8ef7f78e 2068{
0f5f6691
JH
2069 static S390SKeysState *ss;
2070 static S390SKeysClass *skeyclass;
2071 uint8_t re, key;
8ef7f78e
BS
2072
2073 if (r2 > ram_size) {
2074 return 0;
2075 }
2076
0f5f6691
JH
2077 if (unlikely(!ss)) {
2078 ss = s390_get_skeys_device();
2079 skeyclass = S390_SKEYS_GET_CLASS(ss);
2080 }
2081
2082 if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
2083 return 0;
2084 }
2085
8ef7f78e 2086 re = key & (SK_R | SK_C);
0f5f6691
JH
2087 key &= ~SK_R;
2088
2089 if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
2090 return 0;
2091 }
5b773a11
DH
2092 /*
2093 * As we can only flush by virtual address and not all the entries
2094 * that point to a physical address we have to flush the whole TLB.
2095 */
2096 tlb_flush_all_cpus_synced(env_cpu(env));
8ef7f78e
BS
2097
2098 /*
2099 * cc
2100 *
2101 * 0 Reference bit zero; change bit zero
2102 * 1 Reference bit zero; change bit one
2103 * 2 Reference bit one; change bit zero
2104 * 3 Reference bit one; change bit one
2105 */
2106
2107 return re >> 1;
2108}
2109
a3084e80 2110uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
8ef7f78e 2111{
43df3e71 2112 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
efb1a76e 2113 S390Access srca, desta;
b90fb26b 2114 uintptr_t ra = GETPC();
efb1a76e 2115 int cc = 0;
8ef7f78e 2116
a3084e80
AJ
2117 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2118 __func__, l, a1, a2);
2119
43df3e71
DH
2120 if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) ||
2121 psw_as == AS_HOME || psw_as == AS_ACCREG) {
2122 s390_program_interrupt(env, PGM_SPECIAL_OP, ILEN_AUTO, ra);
2123 }
2124
373290d8 2125 l = wrap_length32(env, l);
a3084e80 2126 if (l > 256) {
8ef7f78e
BS
2127 /* max 256 */
2128 l = 256;
2129 cc = 3;
373290d8
DH
2130 } else if (!l) {
2131 return cc;
8ef7f78e
BS
2132 }
2133
efb1a76e
DH
2134 /* TODO: Access key handling */
2135 srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra);
2136 desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra);
2137 access_memmove(env, &desta, &srca, ra);
8ef7f78e
BS
2138 return cc;
2139}
2140
a3084e80 2141uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
8ef7f78e 2142{
43df3e71 2143 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
efb1a76e 2144 S390Access srca, desta;
b90fb26b 2145 uintptr_t ra = GETPC();
efb1a76e 2146 int cc = 0;
a3084e80 2147
8ef7f78e
BS
2148 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2149 __func__, l, a1, a2);
2150
43df3e71
DH
2151 if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) ||
2152 psw_as == AS_HOME || psw_as == AS_ACCREG) {
2153 s390_program_interrupt(env, PGM_SPECIAL_OP, ILEN_AUTO, ra);
2154 }
2155
373290d8 2156 l = wrap_length32(env, l);
a3084e80
AJ
2157 if (l > 256) {
2158 /* max 256 */
2159 l = 256;
2160 cc = 3;
373290d8
DH
2161 } else if (!l) {
2162 return cc;
a3084e80 2163 }
8ef7f78e 2164
efb1a76e
DH
2165 /* TODO: Access key handling */
2166 srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra);
2167 desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra);
2168 access_memmove(env, &desta, &srca, ra);
a3084e80 2169 return cc;
8ef7f78e
BS
2170}
2171
be7f28de
DH
2172void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
2173{
dc79e928 2174 CPUState *cs = env_cpu(env);
be7f28de
DH
2175 const uintptr_t ra = GETPC();
2176 uint64_t table, entry, raddr;
2177 uint16_t entries, i, index = 0;
2178
2179 if (r2 & 0xff000) {
8d2f850a 2180 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
be7f28de
DH
2181 }
2182
2183 if (!(r2 & 0x800)) {
2184 /* invalidation-and-clearing operation */
adab99be 2185 table = r1 & ASCE_ORIGIN;
be7f28de
DH
2186 entries = (r2 & 0x7ff) + 1;
2187
adab99be
TH
2188 switch (r1 & ASCE_TYPE_MASK) {
2189 case ASCE_TYPE_REGION1:
be7f28de
DH
2190 index = (r2 >> 53) & 0x7ff;
2191 break;
adab99be 2192 case ASCE_TYPE_REGION2:
be7f28de
DH
2193 index = (r2 >> 42) & 0x7ff;
2194 break;
adab99be 2195 case ASCE_TYPE_REGION3:
be7f28de
DH
2196 index = (r2 >> 31) & 0x7ff;
2197 break;
adab99be 2198 case ASCE_TYPE_SEGMENT:
be7f28de
DH
2199 index = (r2 >> 20) & 0x7ff;
2200 break;
2201 }
2202 for (i = 0; i < entries; i++) {
2203 /* addresses are not wrapped in 24/31bit mode but table index is */
2204 raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
8eb82de9 2205 entry = cpu_ldq_real_ra(env, raddr, ra);
adab99be 2206 if (!(entry & REGION_ENTRY_INV)) {
be7f28de 2207 /* we are allowed to not store if already invalid */
adab99be 2208 entry |= REGION_ENTRY_INV;
8eb82de9 2209 cpu_stq_real_ra(env, raddr, entry, ra);
be7f28de
DH
2210 }
2211 }
2212 }
2213
2214 /* We simply flush the complete tlb, therefore we can ignore r3. */
2215 if (m4 & 1) {
2216 tlb_flush(cs);
2217 } else {
2218 tlb_flush_all_cpus_synced(cs);
2219 }
2220}
2221
8ef7f78e 2222/* invalidate pte */
1f58720c
AJ
2223void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
2224 uint32_t m4)
8ef7f78e 2225{
dc79e928 2226 CPUState *cs = env_cpu(env);
8eb82de9 2227 const uintptr_t ra = GETPC();
8ef7f78e 2228 uint64_t page = vaddr & TARGET_PAGE_MASK;
8a4719f5 2229 uint64_t pte_addr, pte;
8ef7f78e 2230
8a4719f5 2231 /* Compute the page table entry address */
adab99be 2232 pte_addr = (pto & SEGMENT_ENTRY_ORIGIN);
1f58720c 2233 pte_addr += (vaddr & VADDR_PX) >> 9;
8a4719f5
AJ
2234
2235 /* Mark the page table entry as invalid */
8eb82de9 2236 pte = cpu_ldq_real_ra(env, pte_addr, ra);
adab99be 2237 pte |= PAGE_INVALID;
8eb82de9 2238 cpu_stq_real_ra(env, pte_addr, pte, ra);
8ef7f78e
BS
2239
2240 /* XXX we exploit the fact that Linux passes the exact virtual
2241 address here - it's not obliged to! */
1f58720c 2242 if (m4 & 1) {
97b95aae
DH
2243 if (vaddr & ~VADDR_PX) {
2244 tlb_flush_page(cs, page);
2245 /* XXX 31-bit hack */
2246 tlb_flush_page(cs, page ^ 0x80000000);
2247 } else {
2248 /* looks like we don't have a valid virtual address */
2249 tlb_flush(cs);
2250 }
8ef7f78e 2251 } else {
97b95aae
DH
2252 if (vaddr & ~VADDR_PX) {
2253 tlb_flush_page_all_cpus_synced(cs, page);
2254 /* XXX 31-bit hack */
2255 tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000);
2256 } else {
2257 /* looks like we don't have a valid virtual address */
2258 tlb_flush_all_cpus_synced(cs);
2259 }
8ef7f78e
BS
2260 }
2261}
2262
2263/* flush local tlb */
19b0516f 2264void HELPER(ptlb)(CPUS390XState *env)
8ef7f78e 2265{
dc79e928 2266 tlb_flush(env_cpu(env));
8ef7f78e
BS
2267}
2268
31a18b45
RH
2269/* flush global tlb */
2270void HELPER(purge)(CPUS390XState *env)
2271{
dc79e928 2272 tlb_flush_all_cpus_synced(env_cpu(env));
31a18b45
RH
2273}
2274
9c3fd85b
RH
2275/* load using real address */
2276uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
2277{
34499dad 2278 return cpu_ldl_real_ra(env, wrap_address(env, addr), GETPC());
9c3fd85b
RH
2279}
2280
2281uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
2282{
34499dad 2283 return cpu_ldq_real_ra(env, wrap_address(env, addr), GETPC());
9c3fd85b
RH
2284}
2285
8ef7f78e 2286/* store using real address */
204504e2 2287void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
8ef7f78e 2288{
4ae43341 2289 cpu_stl_real_ra(env, wrap_address(env, addr), (uint32_t)v1, GETPC());
2f543949
AJ
2290
2291 if ((env->psw.mask & PSW_MASK_PER) &&
2292 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
2293 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
2294 /* PSW is saved just before calling the helper. */
2295 env->per_address = env->psw.addr;
2296 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
2297 }
8ef7f78e
BS
2298}
2299
9c3fd85b
RH
2300void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
2301{
4ae43341 2302 cpu_stq_real_ra(env, wrap_address(env, addr), v1, GETPC());
2f543949
AJ
2303
2304 if ((env->psw.mask & PSW_MASK_PER) &&
2305 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
2306 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
2307 /* PSW is saved just before calling the helper. */
2308 env->per_address = env->psw.addr;
2309 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
2310 }
9c3fd85b
RH
2311}
2312
8ef7f78e 2313/* load real address */
d8fe4a9c 2314uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
8ef7f78e 2315{
dc79e928 2316 CPUState *cs = env_cpu(env);
8ef7f78e 2317 uint32_t cc = 0;
8ef7f78e
BS
2318 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2319 uint64_t ret;
b157fbe6 2320 int old_exc, flags;
8ef7f78e
BS
2321
2322 /* XXX incomplete - has more corner cases */
2323 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
8d2f850a 2324 s390_program_interrupt(env, PGM_SPECIAL_OP, 2, GETPC());
8ef7f78e
BS
2325 }
2326
b157fbe6 2327 old_exc = cs->exception_index;
e3e09d87 2328 if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
8ef7f78e
BS
2329 cc = 3;
2330 }
27103424 2331 if (cs->exception_index == EXCP_PGM) {
8ef7f78e
BS
2332 ret = env->int_pgm_code | 0x80000000;
2333 } else {
2334 ret |= addr & ~TARGET_PAGE_MASK;
2335 }
27103424 2336 cs->exception_index = old_exc;
8ef7f78e 2337
d8fe4a9c
RH
2338 env->cc_op = cc;
2339 return ret;
8ef7f78e 2340}
8ef7f78e 2341#endif
a5cfc223 2342
e22dfdb2 2343/* load pair from quadword */
0c9fa168 2344uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
e22dfdb2
AJ
2345{
2346 uintptr_t ra = GETPC();
2347 uint64_t hi, lo;
2348
0c9fa168
RH
2349 check_alignment(env, addr, 16, ra);
2350 hi = cpu_ldq_data_ra(env, addr + 0, ra);
2351 lo = cpu_ldq_data_ra(env, addr + 8, ra);
2352
2353 env->retxl = lo;
2354 return hi;
2355}
2356
2357uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
2358{
2359 uintptr_t ra = GETPC();
2360 uint64_t hi, lo;
830bf10c
RH
2361 int mem_idx;
2362 TCGMemOpIdx oi;
2363 Int128 v;
0c9fa168 2364
830bf10c
RH
2365 assert(HAVE_ATOMIC128);
2366
2367 mem_idx = cpu_mmu_index(env, false);
2368 oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
2369 v = helper_atomic_ldo_be_mmu(env, addr, oi, ra);
2370 hi = int128_gethi(v);
2371 lo = int128_getlo(v);
e22dfdb2
AJ
2372
2373 env->retxl = lo;
2374 return hi;
2375}
2376
0c9fa168
RH
2377/* store pair to quadword */
2378void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
2379 uint64_t low, uint64_t high)
6476615d 2380{
0c9fa168 2381 uintptr_t ra = GETPC();
6476615d 2382
0c9fa168
RH
2383 check_alignment(env, addr, 16, ra);
2384 cpu_stq_data_ra(env, addr + 0, high, ra);
2385 cpu_stq_data_ra(env, addr + 8, low, ra);
6476615d
EC
2386}
2387
0c9fa168
RH
2388void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
2389 uint64_t low, uint64_t high)
c21b610f
AJ
2390{
2391 uintptr_t ra = GETPC();
830bf10c
RH
2392 int mem_idx;
2393 TCGMemOpIdx oi;
2394 Int128 v;
c21b610f 2395
830bf10c
RH
2396 assert(HAVE_ATOMIC128);
2397
2398 mem_idx = cpu_mmu_index(env, false);
2399 oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
2400 v = int128_make128(low, high);
2401 helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
c21b610f
AJ
2402}
2403
303c681a
RH
2404/* Execute instruction. This instruction executes an insn modified with
2405 the contents of r1. It does not change the executed instruction in memory;
2406 it does not change the program counter.
2407
2408 Perform this by recording the modified instruction in env->ex_value.
2409 This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
a5cfc223 2410*/
83500793 2411void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr)
a5cfc223 2412{
83500793
RH
2413 uint64_t insn = cpu_lduw_code(env, addr);
2414 uint8_t opc = insn >> 8;
2415
2416 /* Or in the contents of R1[56:63]. */
2417 insn |= r1 & 0xff;
2418
2419 /* Load the rest of the instruction. */
2420 insn <<= 48;
2421 switch (get_ilen(opc)) {
2422 case 2:
2423 break;
2424 case 4:
2425 insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32;
2426 break;
2427 case 6:
2428 insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16;
2429 break;
2430 default:
2431 g_assert_not_reached();
2432 }
2433
d376f123
RH
2434 /* The very most common cases can be sped up by avoiding a new TB. */
2435 if ((opc & 0xf0) == 0xd0) {
2436 typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t,
2437 uint64_t, uintptr_t);
2438 static const dx_helper dx[16] = {
ad8c851d 2439 [0x0] = do_helper_trt_bkwd,
d376f123
RH
2440 [0x2] = do_helper_mvc,
2441 [0x4] = do_helper_nc,
2442 [0x5] = do_helper_clc,
2443 [0x6] = do_helper_oc,
2444 [0x7] = do_helper_xc,
2445 [0xc] = do_helper_tr,
ad8c851d 2446 [0xd] = do_helper_trt_fwd,
d376f123
RH
2447 };
2448 dx_helper helper = dx[opc & 0xf];
2449
2450 if (helper) {
2451 uint32_t l = extract64(insn, 48, 8);
2452 uint32_t b1 = extract64(insn, 44, 4);
2453 uint32_t d1 = extract64(insn, 32, 12);
2454 uint32_t b2 = extract64(insn, 28, 4);
2455 uint32_t d2 = extract64(insn, 16, 12);
a5c3cedd
AJ
2456 uint64_t a1 = wrap_address(env, env->regs[b1] + d1);
2457 uint64_t a2 = wrap_address(env, env->regs[b2] + d2);
d376f123
RH
2458
2459 env->cc_op = helper(env, l, a1, a2, 0);
2460 env->psw.addr += ilen;
2461 return;
2462 }
2463 } else if (opc == 0x0a) {
2464 env->int_svc_code = extract64(insn, 48, 8);
2465 env->int_svc_ilen = ilen;
2466 helper_exception(env, EXCP_SVC);
2467 g_assert_not_reached();
2468 }
2469
303c681a
RH
2470 /* Record the insn we want to execute as well as the ilen to use
2471 during the execution of the target insn. This will also ensure
2472 that ex_value is non-zero, which flags that we are in a state
2473 that requires such execution. */
2474 env->ex_value = insn | ilen;
a5cfc223 2475}
3e7e5e0b
DH
2476
2477uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
2478 uint64_t len)
2479{
2480 const uint8_t psw_key = (env->psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY;
2481 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
2482 const uint64_t r0 = env->regs[0];
2483 const uintptr_t ra = GETPC();
3e7e5e0b
DH
2484 uint8_t dest_key, dest_as, dest_k, dest_a;
2485 uint8_t src_key, src_as, src_k, src_a;
2486 uint64_t val;
2487 int cc = 0;
2488
2489 HELPER_LOG("%s dest %" PRIx64 ", src %" PRIx64 ", len %" PRIx64 "\n",
2490 __func__, dest, src, len);
2491
2492 if (!(env->psw.mask & PSW_MASK_DAT)) {
8d2f850a 2493 s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
3e7e5e0b
DH
2494 }
2495
2496 /* OAC (operand access control) for the first operand -> dest */
2497 val = (r0 & 0xffff0000ULL) >> 16;
2498 dest_key = (val >> 12) & 0xf;
2499 dest_as = (val >> 6) & 0x3;
2500 dest_k = (val >> 1) & 0x1;
2501 dest_a = val & 0x1;
2502
2503 /* OAC (operand access control) for the second operand -> src */
2504 val = (r0 & 0x0000ffffULL);
2505 src_key = (val >> 12) & 0xf;
2506 src_as = (val >> 6) & 0x3;
2507 src_k = (val >> 1) & 0x1;
2508 src_a = val & 0x1;
2509
2510 if (!dest_k) {
2511 dest_key = psw_key;
2512 }
2513 if (!src_k) {
2514 src_key = psw_key;
2515 }
2516 if (!dest_a) {
2517 dest_as = psw_as;
2518 }
2519 if (!src_a) {
2520 src_as = psw_as;
2521 }
2522
2523 if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) {
8d2f850a 2524 s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
3e7e5e0b
DH
2525 }
2526 if (!(env->cregs[0] & CR0_SECONDARY) &&
2527 (dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) {
8d2f850a 2528 s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
3e7e5e0b
DH
2529 }
2530 if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) {
8d2f850a 2531 s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
3e7e5e0b
DH
2532 }
2533
a7627565 2534 len = wrap_length32(env, len);
3e7e5e0b
DH
2535 if (len > 4096) {
2536 cc = 3;
2537 len = 4096;
2538 }
2539
2540 /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */
2541 if (src_as == AS_ACCREG || dest_as == AS_ACCREG ||
2542 (env->psw.mask & PSW_MASK_PSTATE)) {
2543 qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n",
2544 __func__);
8d2f850a 2545 s390_program_interrupt(env, PGM_ADDRESSING, 6, ra);
3e7e5e0b
DH
2546 }
2547
b6c636f2
DH
2548 /* FIXME: Access using correct keys and AR-mode */
2549 if (len) {
2550 S390Access srca = access_prepare(env, src, len, MMU_DATA_LOAD,
2551 mmu_idx_from_as(src_as), ra);
2552 S390Access desta = access_prepare(env, dest, len, MMU_DATA_STORE,
2553 mmu_idx_from_as(dest_as), ra);
2554
2555 access_memmove(env, &desta, &srca, ra);
2556 }
3e7e5e0b
DH
2557
2558 return cc;
2559}
941ef3db
RH
2560
2561/* Decode a Unicode character. A return value < 0 indicates success, storing
2562 the UTF-32 result into OCHAR and the input length into OLEN. A return
2563 value >= 0 indicates failure, and the CC value to be returned. */
2564typedef int (*decode_unicode_fn)(CPUS390XState *env, uint64_t addr,
2565 uint64_t ilen, bool enh_check, uintptr_t ra,
2566 uint32_t *ochar, uint32_t *olen);
2567
2568/* Encode a Unicode character. A return value < 0 indicates success, storing
2569 the bytes into ADDR and the output length into OLEN. A return value >= 0
2570 indicates failure, and the CC value to be returned. */
2571typedef int (*encode_unicode_fn)(CPUS390XState *env, uint64_t addr,
2572 uint64_t ilen, uintptr_t ra, uint32_t c,
2573 uint32_t *olen);
2574
2575static int decode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2576 bool enh_check, uintptr_t ra,
2577 uint32_t *ochar, uint32_t *olen)
2578{
2579 uint8_t s0, s1, s2, s3;
2580 uint32_t c, l;
2581
2582 if (ilen < 1) {
2583 return 0;
2584 }
2585 s0 = cpu_ldub_data_ra(env, addr, ra);
2586 if (s0 <= 0x7f) {
2587 /* one byte character */
2588 l = 1;
2589 c = s0;
2590 } else if (s0 <= (enh_check ? 0xc1 : 0xbf)) {
2591 /* invalid character */
2592 return 2;
2593 } else if (s0 <= 0xdf) {
2594 /* two byte character */
2595 l = 2;
2596 if (ilen < 2) {
2597 return 0;
2598 }
2599 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2600 c = s0 & 0x1f;
2601 c = (c << 6) | (s1 & 0x3f);
2602 if (enh_check && (s1 & 0xc0) != 0x80) {
2603 return 2;
2604 }
2605 } else if (s0 <= 0xef) {
2606 /* three byte character */
2607 l = 3;
2608 if (ilen < 3) {
2609 return 0;
2610 }
2611 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2612 s2 = cpu_ldub_data_ra(env, addr + 2, ra);
2613 c = s0 & 0x0f;
2614 c = (c << 6) | (s1 & 0x3f);
2615 c = (c << 6) | (s2 & 0x3f);
2616 /* Fold the byte-by-byte range descriptions in the PoO into
2617 tests against the complete value. It disallows encodings
2618 that could be smaller, and the UTF-16 surrogates. */
2619 if (enh_check
2620 && ((s1 & 0xc0) != 0x80
2621 || (s2 & 0xc0) != 0x80
2622 || c < 0x1000
2623 || (c >= 0xd800 && c <= 0xdfff))) {
2624 return 2;
2625 }
2626 } else if (s0 <= (enh_check ? 0xf4 : 0xf7)) {
2627 /* four byte character */
2628 l = 4;
2629 if (ilen < 4) {
2630 return 0;
2631 }
2632 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2633 s2 = cpu_ldub_data_ra(env, addr + 2, ra);
2634 s3 = cpu_ldub_data_ra(env, addr + 3, ra);
2635 c = s0 & 0x07;
2636 c = (c << 6) | (s1 & 0x3f);
2637 c = (c << 6) | (s2 & 0x3f);
2638 c = (c << 6) | (s3 & 0x3f);
2639 /* See above. */
2640 if (enh_check
2641 && ((s1 & 0xc0) != 0x80
2642 || (s2 & 0xc0) != 0x80
2643 || (s3 & 0xc0) != 0x80
2644 || c < 0x010000
2645 || c > 0x10ffff)) {
2646 return 2;
2647 }
2648 } else {
2649 /* invalid character */
2650 return 2;
2651 }
2652
2653 *ochar = c;
2654 *olen = l;
2655 return -1;
2656}
2657
2658static int decode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2659 bool enh_check, uintptr_t ra,
2660 uint32_t *ochar, uint32_t *olen)
2661{
2662 uint16_t s0, s1;
2663 uint32_t c, l;
2664
2665 if (ilen < 2) {
2666 return 0;
2667 }
2668 s0 = cpu_lduw_data_ra(env, addr, ra);
2669 if ((s0 & 0xfc00) != 0xd800) {
2670 /* one word character */
2671 l = 2;
2672 c = s0;
2673 } else {
2674 /* two word character */
2675 l = 4;
2676 if (ilen < 4) {
2677 return 0;
2678 }
2679 s1 = cpu_lduw_data_ra(env, addr + 2, ra);
2680 c = extract32(s0, 6, 4) + 1;
2681 c = (c << 6) | (s0 & 0x3f);
2682 c = (c << 10) | (s1 & 0x3ff);
2683 if (enh_check && (s1 & 0xfc00) != 0xdc00) {
2684 /* invalid surrogate character */
2685 return 2;
2686 }
2687 }
2688
2689 *ochar = c;
2690 *olen = l;
2691 return -1;
2692}
2693
2694static int decode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2695 bool enh_check, uintptr_t ra,
2696 uint32_t *ochar, uint32_t *olen)
2697{
2698 uint32_t c;
2699
2700 if (ilen < 4) {
2701 return 0;
2702 }
2703 c = cpu_ldl_data_ra(env, addr, ra);
2704 if ((c >= 0xd800 && c <= 0xdbff) || c > 0x10ffff) {
2705 /* invalid unicode character */
2706 return 2;
2707 }
2708
2709 *ochar = c;
2710 *olen = 4;
2711 return -1;
2712}
2713
2714static int encode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2715 uintptr_t ra, uint32_t c, uint32_t *olen)
2716{
2717 uint8_t d[4];
2718 uint32_t l, i;
2719
2720 if (c <= 0x7f) {
2721 /* one byte character */
2722 l = 1;
2723 d[0] = c;
2724 } else if (c <= 0x7ff) {
2725 /* two byte character */
2726 l = 2;
2727 d[1] = 0x80 | extract32(c, 0, 6);
2728 d[0] = 0xc0 | extract32(c, 6, 5);
2729 } else if (c <= 0xffff) {
2730 /* three byte character */
2731 l = 3;
2732 d[2] = 0x80 | extract32(c, 0, 6);
2733 d[1] = 0x80 | extract32(c, 6, 6);
2734 d[0] = 0xe0 | extract32(c, 12, 4);
2735 } else {
2736 /* four byte character */
2737 l = 4;
2738 d[3] = 0x80 | extract32(c, 0, 6);
2739 d[2] = 0x80 | extract32(c, 6, 6);
2740 d[1] = 0x80 | extract32(c, 12, 6);
2741 d[0] = 0xf0 | extract32(c, 18, 3);
2742 }
2743
2744 if (ilen < l) {
2745 return 1;
2746 }
2747 for (i = 0; i < l; ++i) {
2748 cpu_stb_data_ra(env, addr + i, d[i], ra);
2749 }
2750
2751 *olen = l;
2752 return -1;
2753}
2754
2755static int encode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2756 uintptr_t ra, uint32_t c, uint32_t *olen)
2757{
2758 uint16_t d0, d1;
2759
2760 if (c <= 0xffff) {
2761 /* one word character */
2762 if (ilen < 2) {
2763 return 1;
2764 }
2765 cpu_stw_data_ra(env, addr, c, ra);
2766 *olen = 2;
2767 } else {
2768 /* two word character */
2769 if (ilen < 4) {
2770 return 1;
2771 }
2772 d1 = 0xdc00 | extract32(c, 0, 10);
2773 d0 = 0xd800 | extract32(c, 10, 6);
2774 d0 = deposit32(d0, 6, 4, extract32(c, 16, 5) - 1);
2775 cpu_stw_data_ra(env, addr + 0, d0, ra);
2776 cpu_stw_data_ra(env, addr + 2, d1, ra);
2777 *olen = 4;
2778 }
2779
2780 return -1;
2781}
2782
2783static int encode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2784 uintptr_t ra, uint32_t c, uint32_t *olen)
2785{
2786 if (ilen < 4) {
2787 return 1;
2788 }
2789 cpu_stl_data_ra(env, addr, c, ra);
2790 *olen = 4;
2791 return -1;
2792}
2793
2794static inline uint32_t convert_unicode(CPUS390XState *env, uint32_t r1,
2795 uint32_t r2, uint32_t m3, uintptr_t ra,
2796 decode_unicode_fn decode,
2797 encode_unicode_fn encode)
2798{
2799 uint64_t dst = get_address(env, r1);
2800 uint64_t dlen = get_length(env, r1 + 1);
2801 uint64_t src = get_address(env, r2);
2802 uint64_t slen = get_length(env, r2 + 1);
2803 bool enh_check = m3 & 1;
2804 int cc, i;
2805
2806 /* Lest we fail to service interrupts in a timely manner, limit the
2807 amount of work we're willing to do. For now, let's cap at 256. */
2808 for (i = 0; i < 256; ++i) {
2809 uint32_t c, ilen, olen;
2810
2811 cc = decode(env, src, slen, enh_check, ra, &c, &ilen);
2812 if (unlikely(cc >= 0)) {
2813 break;
2814 }
2815 cc = encode(env, dst, dlen, ra, c, &olen);
2816 if (unlikely(cc >= 0)) {
2817 break;
2818 }
2819
2820 src += ilen;
2821 slen -= ilen;
2822 dst += olen;
2823 dlen -= olen;
2824 cc = 3;
2825 }
2826
2827 set_address(env, r1, dst);
2828 set_length(env, r1 + 1, dlen);
2829 set_address(env, r2, src);
2830 set_length(env, r2 + 1, slen);
2831
2832 return cc;
2833}
2834
2835uint32_t HELPER(cu12)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2836{
2837 return convert_unicode(env, r1, r2, m3, GETPC(),
2838 decode_utf8, encode_utf16);
2839}
2840
2841uint32_t HELPER(cu14)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2842{
2843 return convert_unicode(env, r1, r2, m3, GETPC(),
2844 decode_utf8, encode_utf32);
2845}
2846
2847uint32_t HELPER(cu21)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2848{
2849 return convert_unicode(env, r1, r2, m3, GETPC(),
2850 decode_utf16, encode_utf8);
2851}
2852
2853uint32_t HELPER(cu24)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2854{
2855 return convert_unicode(env, r1, r2, m3, GETPC(),
2856 decode_utf16, encode_utf32);
2857}
2858
2859uint32_t HELPER(cu41)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2860{
2861 return convert_unicode(env, r1, r2, m3, GETPC(),
2862 decode_utf32, encode_utf8);
2863}
2864
2865uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2866{
2867 return convert_unicode(env, r1, r2, m3, GETPC(),
2868 decode_utf32, encode_utf16);
2869}
c5a7392c
DH
2870
2871void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len,
2872 uintptr_t ra)
2873{
c5a7392c
DH
2874 /* test the actual access, not just any access to the page due to LAP */
2875 while (len) {
46750128 2876 const uint64_t pagelen = -(addr | TARGET_PAGE_MASK);
c5a7392c
DH
2877 const uint64_t curlen = MIN(pagelen, len);
2878
2879 probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra);
2880 addr = wrap_address(env, addr + curlen);
2881 len -= curlen;
2882 }
c5a7392c
DH
2883}
2884
2885void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len)
2886{
2887 probe_write_access(env, addr, len, GETPC());
2888}