struct kvm_s390_sie_block {
atomic_t cpuflags; /* 0x0000 */
__u32 : 1; /* 0x0004 */
- __u32 prefix : 18;
- __u32 : 1;
+ __u32 prefix : 19;
__u32 ibc : 12;
__u8 reserved08[4]; /* 0x0008 */
#define PROG_IN_SIE (1<<0)
#endif
}
-#define GUEST_PREFIX_SHIFT 13
+#define GUEST_PREFIX_SHIFT 12
+#define GUEST_PREFIX_MASK_ZARCH 0x7fffe
+#define GUEST_PREFIX_MASK_ESA 0x7ffff
static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
{
return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
prefix);
vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
+ vcpu->arch.sie_block->prefix &= GUEST_PREFIX_MASK_ZARCH;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
}
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
/* READ_ONCE does not work on bitfields - use a temporary variable */
const uint32_t __new_prefix = scb_o->prefix;
- const uint32_t new_prefix = READ_ONCE(__new_prefix);
+ uint32_t new_prefix = READ_ONCE(__new_prefix);
const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
bool had_tx = scb_s->ecb & ECB_TE;
unsigned long new_mso = 0;
scb_s->icpua = scb_o->icpua;
+ if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_ZARCH))
+ new_prefix &= GUEST_PREFIX_MASK_ESA;
+ else
+ new_prefix &= GUEST_PREFIX_MASK_ZARCH;
+
if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
/* if the hva of the prefix changes, we have to remap the prefix */