/* pseries hcall tracing */
extern struct static_key hcall_tracepoint_key;
void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
-void __trace_hcall_exit(long opcode, unsigned long retval,
- unsigned long *retbuf);
+void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
/* OPAL tracing */
#ifdef HAVE_JUMP_LABEL
extern struct static_key opal_tracepoint_key;
void emulation_assist_interrupt(struct pt_regs *regs);
/* signals, syscalls and interrupts */
-#ifdef CONFIG_PPC64
-int sys_swapcontext(struct ucontext __user *old_ctx,
- struct ucontext __user *new_ctx,
- long ctx_size, long r6, long r7, long r8, struct pt_regs *regs);
-#else
long sys_swapcontext(struct ucontext __user *old_ctx,
struct ucontext __user *new_ctx,
- int ctx_size, int r6, int r7, int r8, struct pt_regs *regs);
-int sys_debug_setcontext(struct ucontext __user *ctx,
- int ndbg, struct sig_dbg_op __user *dbg,
- int r6, int r7, int r8,
- struct pt_regs *regs);
+ long ctx_size);
+#ifdef CONFIG_PPC32
+long sys_debug_setcontext(struct ucontext __user *ctx,
+ int ndbg, struct sig_dbg_op __user *dbg);
int
ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp);
unsigned long __init early_init(unsigned long dt_ptr);
void pnv_power9_force_smt4_catch(void);
void pnv_power9_force_smt4_release(void);
+ /* Transaction memory related */
void tm_enable(void);
void tm_disable(void);
void tm_abort(uint8_t cause);
+
+ struct kvm_vcpu;
+ void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
+ void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
+
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/compat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <asm/paca.h>
#include <asm/lppaca.h>
#include <asm/cache.h>
-#include <asm/compat.h>
#include <asm/mmu.h>
#include <asm/hvcall.h>
#include <asm/xics.h>
OFFSET(PACAKMSR, paca_struct, kernel_msr);
OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
+ OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled);
#ifdef CONFIG_PPC_BOOK3S
OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
#ifdef CONFIG_PPC_MM_SLICES
OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack);
OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid);
OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid);
- OFFSET(VCPU_GPRS, kvm_vcpu, arch.gpr);
+ OFFSET(VCPU_GPRS, kvm_vcpu, arch.regs.gpr);
OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave);
OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr);
#ifdef CONFIG_ALTIVEC
OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr);
#endif
- OFFSET(VCPU_XER, kvm_vcpu, arch.xer);
- OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr);
- OFFSET(VCPU_LR, kvm_vcpu, arch.lr);
+ OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
+ OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
+ OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
#ifdef CONFIG_PPC_BOOK3S
OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
#endif
OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
- OFFSET(VCPU_PC, kvm_vcpu, arch.pc);
+ OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0);
#else /* CONFIG_PPC_BOOK3S */
OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
- OFFSET(VCPU_XER, kvm_vcpu, arch.xer);
- OFFSET(VCPU_LR, kvm_vcpu, arch.lr);
- OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr);
- OFFSET(VCPU_PC, kvm_vcpu, arch.pc);
+ OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
+ OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
+ OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
+ OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9);
OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear);
npte = 1ul << (order - 4);
/* Allocate reverse map array */
- rev = vmalloc(sizeof(struct revmap_entry) * npte);
+ rev = vmalloc(array_size(npte, sizeof(struct revmap_entry)));
if (!rev) {
if (cma)
kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));
if (!cpu_has_feature(CPU_FTR_HVMODE))
return -EINVAL;
+ if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE))
+ return -EINVAL;
+
/* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
host_lpid = mfspr(SPRN_LPID);
rsvd_lpid = LPID_RSVD;
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
+ /*
+ * RWMR values for POWER8. These control the rate at which PURR
+ * and SPURR count and should be set according to the number of
+ * online threads in the vcore being run.
+ */
+ #define RWMR_RPA_P8_1THREAD 0x164520C62609AECA
+ #define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9
+ #define RWMR_RPA_P8_3THREAD 0x164520C62609AECA
+ #define RWMR_RPA_P8_4THREAD 0x199A421245058DA9
+ #define RWMR_RPA_P8_5THREAD 0x164520C62609AECA
+ #define RWMR_RPA_P8_6THREAD 0x164520C62609AECA
+ #define RWMR_RPA_P8_7THREAD 0x164520C62609AECA
+ #define RWMR_RPA_P8_8THREAD 0x164520C62609AECA
+
+ static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = {
+ RWMR_RPA_P8_1THREAD,
+ RWMR_RPA_P8_1THREAD,
+ RWMR_RPA_P8_2THREAD,
+ RWMR_RPA_P8_3THREAD,
+ RWMR_RPA_P8_4THREAD,
+ RWMR_RPA_P8_5THREAD,
+ RWMR_RPA_P8_6THREAD,
+ RWMR_RPA_P8_7THREAD,
+ RWMR_RPA_P8_8THREAD,
+ };
+
static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
int *ip)
{
pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
- vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
+ vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
for (r = 0; r < 16; ++r)
pr_err("r%2d = %.16lx r%d = %.16lx\n",
r, kvmppc_get_gpr(vcpu, r),
r+16, kvmppc_get_gpr(vcpu, r+16));
pr_err("ctr = %.16lx lr = %.16lx\n",
- vcpu->arch.ctr, vcpu->arch.lr);
+ vcpu->arch.regs.ctr, vcpu->arch.regs.link);
pr_err("srr0 = %.16llx srr1 = %.16llx\n",
vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
- vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
+ vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
pr_err("fault dar = %.16lx dsisr = %.8x\n",
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
*val = get_reg_val(id, vcpu->arch.dec_expires +
vcpu->arch.vcore->tb_offset);
break;
+ case KVM_REG_PPC_ONLINE:
+ *val = get_reg_val(id, vcpu->arch.online);
+ break;
default:
r = -EINVAL;
break;
vcpu->arch.dec_expires = set_reg_val(id, *val) -
vcpu->arch.vcore->tb_offset;
break;
+ case KVM_REG_PPC_ONLINE:
+ i = set_reg_val(id, *val);
+ if (i && !vcpu->arch.online)
+ atomic_inc(&vcpu->arch.vcore->online_count);
+ else if (!i && vcpu->arch.online)
+ atomic_dec(&vcpu->arch.vcore->online_count);
+ vcpu->arch.online = i;
+ break;
default:
r = -EINVAL;
break;
}
}
+ /*
+ * On POWER8, set RWMR register.
+ * Since it only affects PURR and SPURR, it doesn't affect
+ * the host, so we don't save/restore the host value.
+ */
+ if (is_power8) {
+ unsigned long rwmr_val = RWMR_RPA_P8_8THREAD;
+ int n_online = atomic_read(&vc->online_count);
+
+ /*
+ * Use the 8-thread value if we're doing split-core
+ * or if the vcore's online count looks bogus.
+ */
+ if (split == 1 && threads_per_subcore == MAX_SMT_THREADS &&
+ n_online >= 1 && n_online <= MAX_SMT_THREADS)
+ rwmr_val = p8_rwmr_values[n_online];
+ mtspr(SPRN_RWMR, rwmr_val);
+ }
+
/* Start all the threads */
active = 0;
for (sub = 0; sub < core_info.n_subcores; ++sub) {
for (sub = 0; sub < core_info.n_subcores; ++sub)
spin_unlock(&core_info.vc[sub]->lock);
+ if (kvm_is_radix(vc->kvm)) {
+ int tmp = pcpu;
+
+ /*
+ * Do we need to flush the process scoped TLB for the LPAR?
+ *
+ * On POWER9, individual threads can come in here, but the
+ * TLB is shared between the 4 threads in a core, hence
+ * invalidating on one thread invalidates for all.
+ * Thus we make all 4 threads use the same bit here.
+ *
+ * Hash must be flushed in realmode in order to use tlbiel.
+ */
+ mtspr(SPRN_LPID, vc->kvm->arch.lpid);
+ isync();
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ tmp &= ~0x3UL;
+
+ if (cpumask_test_cpu(tmp, &vc->kvm->arch.need_tlb_flush)) {
+ radix__local_flush_tlb_lpid_guest(vc->kvm->arch.lpid);
+ /* Clear the bit after the TLB flush */
+ cpumask_clear_cpu(tmp, &vc->kvm->arch.need_tlb_flush);
+ }
+ }
+
/*
* Interrupts will be enabled once we get into the guest,
* so tell lockdep that we're about to enable interrupts.
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
+ this_cpu_disable_ftrace();
+
trap = __kvmppc_vcore_entry();
+ this_cpu_enable_ftrace();
+
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
trace_hardirqs_off();
}
#endif
+ /*
+ * Force online to 1 for the sake of old userspace which doesn't
+ * set it.
+ */
+ if (!vcpu->arch.online) {
+ atomic_inc(&vcpu->arch.vcore->online_count);
+ vcpu->arch.online = 1;
+ }
+
kvmppc_core_prepare_to_enter(vcpu);
/* No need to go into the guest when all we'll do is come back out */
static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
unsigned long npages)
{
- slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
+ slot->arch.rmap = vzalloc(array_size(npages, sizeof(*slot->arch.rmap)));
if (!slot->arch.rmap)
return -ENOMEM;
*/
snprintf(buf, sizeof(buf), "vm%d", current->pid);
kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
- if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
- kvmppc_mmu_debugfs_init(kvm);
+ kvmppc_mmu_debugfs_init(kvm);
return 0;
}
extsw reg, reg; \
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
- #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
-
/* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE 1
#define NAPPING_NOVCPU 2
ld r2,PACATOC(r13)
+ li r0,0
+ stb r0,PACA_FTRACE_ENABLED(r13)
+
li r0,KVM_HWTHREAD_IN_KVM
stb r0,HSTATE_HWTHREAD_STATE(r13)
/* Primary thread switches to guest partition. */
cmpwi r6,0
bne 10f
+
+ /* Radix has already switched LPID and flushed core TLB */
+ bne cr7, 22f
+
lwz r7,KVM_LPID(r9)
BEGIN_FTR_SECTION
ld r6,KVM_SDR1(r9)
mtspr SPRN_LPID,r7
isync
- /* See if we need to flush the TLB */
+ /* See if we need to flush the TLB. Hash has to be done in RM */
lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
BEGIN_FTR_SECTION
/*
li r7,0x800 /* IS field = 0b10 */
ptesync
li r0,0 /* RS for P9 version of tlbiel */
- bne cr7, 29f
28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
addi r7,r7,0x1000
bdnz 28b
- b 30f
- 29: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */
- addi r7,r7,0x1000
- bdnz 29b
- 30: ptesync
+ ptesync
23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
andc r7,r7,r8
stdcx. r7,0,r6
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
*/
- bl kvmppc_restore_tm
+ mr r3, r4
+ ld r4, VCPU_MSR(r3)
+ bl kvmppc_restore_tm_hv
+ ld r4, HSTATE_KVM_VCPU(r13)
91:
#endif
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
*/
- bl kvmppc_save_tm
+ mr r3, r9
+ ld r4, VCPU_MSR(r3)
+ bl kvmppc_save_tm_hv
+ ld r9, HSTATE_KVM_VCPU(r13)
91:
#endif
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
*/
- ld r9, HSTATE_KVM_VCPU(r13)
- bl kvmppc_save_tm
+ ld r3, HSTATE_KVM_VCPU(r13)
+ ld r4, VCPU_MSR(r3)
+ bl kvmppc_save_tm_hv
91:
#endif
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
*/
- bl kvmppc_restore_tm
+ mr r3, r4
+ ld r4, VCPU_MSR(r3)
+ bl kvmppc_restore_tm_hv
+ ld r4, HSTATE_KVM_VCPU(r13)
91:
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Save transactional state and TM-related registers.
- * Called with r9 pointing to the vcpu struct.
+ * Called with r3 pointing to the vcpu struct and r4 containing
+ * the guest MSR value.
* This can modify all checkpointed registers, but
- * restores r1, r2 and r9 (vcpu pointer) before exit.
+ * restores r1 and r2 before exit.
*/
- kvmppc_save_tm:
+ kvmppc_save_tm_hv:
+ /* See if we need to handle fake suspend mode */
+ BEGIN_FTR_SECTION
+ b __kvmppc_save_tm
+ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
+
+ lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
+ cmpwi r0, 0
+ beq __kvmppc_save_tm
+
+ /* The following code handles the fake_suspend = 1 case */
mflr r0
std r0, PPC_LR_STKOFF(r1)
stdu r1, -PPC_MIN_STKFRM(r1)
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
mtmsrd r8
- ld r5, VCPU_MSR(r9)
- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
- beq 1f /* TM not active in guest. */
-
- std r1, HSTATE_HOST_R1(r13)
- li r3, TM_CAUSE_KVM_RESCHED
-
- BEGIN_FTR_SECTION
- lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
- cmpwi r0, 0
- beq 3f
rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
beq 4f
- BEGIN_FTR_SECTION_NESTED(96)
+ BEGIN_FTR_SECTION
bl pnv_power9_force_smt4_catch
- END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
+ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
nop
- b 6f
- 3:
- /* Emulation of the treclaim instruction needs TEXASR before treclaim */
- mfspr r6, SPRN_TEXASR
- std r6, VCPU_ORIG_TEXASR(r9)
- 6:
- END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
- /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+ std r1, HSTATE_HOST_R1(r13)
+
+ /* Clear the MSR RI since r1, r13 may be foobar. */
li r5, 0
mtmsrd r5, 1
- /* All GPRs are volatile at this point. */
+ /* We have to treclaim here because that's the only way to do S->N */
+ li r3, TM_CAUSE_KVM_RESCHED
TRECLAIM(R3)
- /* Temporarily store r13 and r9 so we have some regs to play with */
- SET_SCRATCH0(r13)
- GET_PACA(r13)
- std r9, PACATMSCRATCH(r13)
-
- /* If doing TM emulation on POWER9 DD2.2, check for fake suspend mode */
- BEGIN_FTR_SECTION
- lbz r9, HSTATE_FAKE_SUSPEND(r13)
- cmpwi r9, 0
- beq 2f
/*
* We were in fake suspend, so we are not going to save the
* register state as the guest checkpointed state (since
* we already have it), therefore we can now use any volatile GPR.
*/
- /* Reload stack pointer and TOC. */
+ /* Reload PACA pointer, stack pointer and TOC. */
+ GET_PACA(r13)
ld r1, HSTATE_HOST_R1(r13)
ld r2, PACATOC(r13)
+
/* Set MSR RI now we have r1 and r13 back. */
li r5, MSR_RI
mtmsrd r5, 1
+
HMT_MEDIUM
ld r6, HSTATE_DSCR(r13)
mtspr SPRN_DSCR, r6
li r0, PSSCR_FAKE_SUSPEND
andc r3, r3, r0
mtspr SPRN_PSSCR, r3
- ld r9, HSTATE_KVM_VCPU(r13)
- /* Don't save TEXASR, use value from last exit in real suspend state */
- b 11f
- 2:
- END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
+ /* Don't save TEXASR, use value from last exit in real suspend state */
ld r9, HSTATE_KVM_VCPU(r13)
-
- /* Get a few more GPRs free. */
- std r29, VCPU_GPRS_TM(29)(r9)
- std r30, VCPU_GPRS_TM(30)(r9)
- std r31, VCPU_GPRS_TM(31)(r9)
-
- /* Save away PPR and DSCR soon so don't run with user values. */
- mfspr r31, SPRN_PPR
- HMT_MEDIUM
- mfspr r30, SPRN_DSCR
- ld r29, HSTATE_DSCR(r13)
- mtspr SPRN_DSCR, r29
-
- /* Save all but r9, r13 & r29-r31 */
- reg = 0
- .rept 29
- .if (reg != 9) && (reg != 13)
- std reg, VCPU_GPRS_TM(reg)(r9)
- .endif
- reg = reg + 1
- .endr
- /* ... now save r13 */
- GET_SCRATCH0(r4)
- std r4, VCPU_GPRS_TM(13)(r9)
- /* ... and save r9 */
- ld r4, PACATMSCRATCH(r13)
- std r4, VCPU_GPRS_TM(9)(r9)
-
- /* Reload stack pointer and TOC. */
- ld r1, HSTATE_HOST_R1(r13)
- ld r2, PACATOC(r13)
-
- /* Set MSR RI now we have r1 and r13 back. */
- li r5, MSR_RI
- mtmsrd r5, 1
-
- /* Save away checkpinted SPRs. */
- std r31, VCPU_PPR_TM(r9)
- std r30, VCPU_DSCR_TM(r9)
- mflr r5
- mfcr r6
- mfctr r7
- mfspr r8, SPRN_AMR
- mfspr r10, SPRN_TAR
- mfxer r11
- std r5, VCPU_LR_TM(r9)
- stw r6, VCPU_CR_TM(r9)
- std r7, VCPU_CTR_TM(r9)
- std r8, VCPU_AMR_TM(r9)
- std r10, VCPU_TAR_TM(r9)
- std r11, VCPU_XER_TM(r9)
-
- /* Restore r12 as trap number. */
- lwz r12, VCPU_TRAP(r9)
-
- /* Save FP/VSX. */
- addi r3, r9, VCPU_FPRS_TM
- bl store_fp_state
- addi r3, r9, VCPU_VRS_TM
- bl store_vr_state
- mfspr r6, SPRN_VRSAVE
- stw r6, VCPU_VRSAVE_TM(r9)
- 1:
- /*
- * We need to save these SPRs after the treclaim so that the software
- * error code is recorded correctly in the TEXASR. Also the user may
- * change these outside of a transaction, so they must always be
- * context switched.
- */
- mfspr r7, SPRN_TEXASR
- std r7, VCPU_TEXASR(r9)
- 11:
mfspr r5, SPRN_TFHAR
mfspr r6, SPRN_TFIAR
std r5, VCPU_TFHAR(r9)
/*
* Restore transactional state and TM-related registers.
- * Called with r4 pointing to the vcpu struct.
+ * Called with r3 pointing to the vcpu struct
+ * and r4 containing the guest MSR value.
* This potentially modifies all checkpointed registers.
- * It restores r1, r2, r4 from the PACA.
+ * It restores r1 and r2 from the PACA.
*/
- kvmppc_restore_tm:
+ kvmppc_restore_tm_hv:
+ /*
+ * If we are doing TM emulation for the guest on a POWER9 DD2,
+ * then we don't actually do a trechkpt -- we either set up
+ * fake-suspend mode, or emulate a TM rollback.
+ */
+ BEGIN_FTR_SECTION
+ b __kvmppc_restore_tm
+ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
mflr r0
std r0, PPC_LR_STKOFF(r1)
- /* Turn on TM/FP/VSX/VMX so we can restore them. */
+ li r0, 0
+ stb r0, HSTATE_FAKE_SUSPEND(r13)
+
+ /* Turn on TM so we can restore TM SPRs */
mfmsr r5
- li r6, MSR_TM >> 32
- sldi r6, r6, 32
- or r5, r5, r6
- ori r5, r5, MSR_FP
- oris r5, r5, (MSR_VEC | MSR_VSX)@h
+ li r0, 1
+ rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG
mtmsrd r5
/*
* The user may change these outside of a transaction, so they must
* always be context switched.
*/
- ld r5, VCPU_TFHAR(r4)
- ld r6, VCPU_TFIAR(r4)
- ld r7, VCPU_TEXASR(r4)
+ ld r5, VCPU_TFHAR(r3)
+ ld r6, VCPU_TFIAR(r3)
+ ld r7, VCPU_TEXASR(r3)
mtspr SPRN_TFHAR, r5
mtspr SPRN_TFIAR, r6
mtspr SPRN_TEXASR, r7
- li r0, 0
- stb r0, HSTATE_FAKE_SUSPEND(r13)
- ld r5, VCPU_MSR(r4)
- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+ rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
beqlr /* TM not active in guest */
- std r1, HSTATE_HOST_R1(r13)
- /* Make sure the failure summary is set, otherwise we'll program check
- * when we trechkpt. It's possible that this might have been not set
- * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
- * host.
- */
+ /* Make sure the failure summary is set */
oris r7, r7, (TEXASR_FS)@h
mtspr SPRN_TEXASR, r7
- /*
- * If we are doing TM emulation for the guest on a POWER9 DD2,
- * then we don't actually do a trechkpt -- we either set up
- * fake-suspend mode, or emulate a TM rollback.
- */
- BEGIN_FTR_SECTION
- b .Ldo_tm_fake_load
- END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
-
- /*
- * We need to load up the checkpointed state for the guest.
- * We need to do this early as it will blow away any GPRs, VSRs and
- * some SPRs.
- */
-
- mr r31, r4
- addi r3, r31, VCPU_FPRS_TM
- bl load_fp_state
- addi r3, r31, VCPU_VRS_TM
- bl load_vr_state
- mr r4, r31
- lwz r7, VCPU_VRSAVE_TM(r4)
- mtspr SPRN_VRSAVE, r7
-
- ld r5, VCPU_LR_TM(r4)
- lwz r6, VCPU_CR_TM(r4)
- ld r7, VCPU_CTR_TM(r4)
- ld r8, VCPU_AMR_TM(r4)
- ld r9, VCPU_TAR_TM(r4)
- ld r10, VCPU_XER_TM(r4)
- mtlr r5
- mtcr r6
- mtctr r7
- mtspr SPRN_AMR, r8
- mtspr SPRN_TAR, r9
- mtxer r10
-
- /*
- * Load up PPR and DSCR values but don't put them in the actual SPRs
- * till the last moment to avoid running with userspace PPR and DSCR for
- * too long.
- */
- ld r29, VCPU_DSCR_TM(r4)
- ld r30, VCPU_PPR_TM(r4)
-
- std r2, PACATMSCRATCH(r13) /* Save TOC */
-
- /* Clear the MSR RI since r1, r13 are all going to be foobar. */
- li r5, 0
- mtmsrd r5, 1
-
- /* Load GPRs r0-r28 */
- reg = 0
- .rept 29
- ld reg, VCPU_GPRS_TM(reg)(r31)
- reg = reg + 1
- .endr
-
- mtspr SPRN_DSCR, r29
- mtspr SPRN_PPR, r30
-
- /* Load final GPRs */
- ld 29, VCPU_GPRS_TM(29)(r31)
- ld 30, VCPU_GPRS_TM(30)(r31)
- ld 31, VCPU_GPRS_TM(31)(r31)
-
- /* TM checkpointed state is now setup. All GPRs are now volatile. */
- TRECHKPT
-
- /* Now let's get back the state we need. */
- HMT_MEDIUM
- GET_PACA(r13)
- ld r29, HSTATE_DSCR(r13)
- mtspr SPRN_DSCR, r29
- ld r4, HSTATE_KVM_VCPU(r13)
- ld r1, HSTATE_HOST_R1(r13)
- ld r2, PACATMSCRATCH(r13)
-
- /* Set the MSR RI since we have our registers back. */
- li r5, MSR_RI
- mtmsrd r5, 1
- 9:
- ld r0, PPC_LR_STKOFF(r1)
- mtlr r0
- blr
-
- .Ldo_tm_fake_load:
cmpwi r5, 1 /* check for suspended state */
bgt 10f
stb r5, HSTATE_FAKE_SUSPEND(r13)
- b 9b /* and return */
+ b 9f /* and return */
10: stdu r1, -PPC_MIN_STKFRM(r1)
/* guest is in transactional state, so simulate rollback */
- mr r3, r4
bl kvmhv_emulate_tm_rollback
nop
- ld r4, HSTATE_KVM_VCPU(r13) /* our vcpu pointer has been trashed */
addi r1, r1, PPC_MIN_STKFRM
- b 9b
- #endif
+ 9: ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
+ blr
+ #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
/*
* We come here if we get any exception or interrupt while we are
bcl 20, 31, .+4
5: mflr r3
addi r3, r3, 9f - 5b
+ li r4, -1
+ rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
ld r4, PACAKMSR(r13)
mtspr SPRN_SRR0, r3
mtspr SPRN_SRR1, r4
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case KVM_CAP_PPC_HTM:
- r = hv_enabled &&
- (!!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
- cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
+ r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
+ (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
break;
#endif
default:
}
}
+ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
+ u32 gpr)
+ {
+ union kvmppc_one_reg val;
+ int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
+
+ if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ val.vsx32val[0] = gpr;
+ val.vsx32val[1] = gpr;
+ val.vsx32val[2] = gpr;
+ val.vsx32val[3] = gpr;
+ VCPU_VSX_VR(vcpu, index) = val.vval;
+ } else {
+ val.vsx32val[0] = gpr;
+ val.vsx32val[1] = gpr;
+ VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
+ VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
+ }
+ }
+
static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
u32 gpr32)
{
#endif /* CONFIG_VSX */
#ifdef CONFIG_ALTIVEC
+ static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
+ int index, int element_size)
+ {
+ int offset;
+ int elts = sizeof(vector128)/element_size;
+
+ if ((index < 0) || (index >= elts))
+ return -1;
+
+ if (kvmppc_need_byteswap(vcpu))
+ offset = elts - index - 1;
+ else
+ offset = index;
+
+ return offset;
+ }
+
+ static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
+ int index)
+ {
+ return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
+ }
+
+ static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
+ int index)
+ {
+ return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
+ }
+
+ static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
+ int index)
+ {
+ return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
+ }
+
+ static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
+ int index)
+ {
+ return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
+ }
+
+
static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
- u64 gpr)
+ u64 gpr)
{
+ union kvmppc_one_reg val;
+ int offset = kvmppc_get_vmx_dword_offset(vcpu,
+ vcpu->arch.mmio_vmx_offset);
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
- u32 hi, lo;
- u32 di;
- #ifdef __BIG_ENDIAN
- hi = gpr >> 32;
- lo = gpr & 0xffffffff;
- #else
- lo = gpr >> 32;
- hi = gpr & 0xffffffff;
- #endif
+ if (offset == -1)
+ return;
+
+ val.vval = VCPU_VSX_VR(vcpu, index);
+ val.vsxval[offset] = gpr;
+ VCPU_VSX_VR(vcpu, index) = val.vval;
+ }
+
+ static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
+ u32 gpr32)
+ {
+ union kvmppc_one_reg val;
+ int offset = kvmppc_get_vmx_word_offset(vcpu,
+ vcpu->arch.mmio_vmx_offset);
+ int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
- di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
- if (di > 1)
+ if (offset == -1)
return;
- if (vcpu->arch.mmio_host_swabbed)
- di = 1 - di;
+ val.vval = VCPU_VSX_VR(vcpu, index);
+ val.vsx32val[offset] = gpr32;
+ VCPU_VSX_VR(vcpu, index) = val.vval;
+ }
+
+ static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
+ u16 gpr16)
+ {
+ union kvmppc_one_reg val;
+ int offset = kvmppc_get_vmx_hword_offset(vcpu,
+ vcpu->arch.mmio_vmx_offset);
+ int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
+
+ if (offset == -1)
+ return;
+
+ val.vval = VCPU_VSX_VR(vcpu, index);
+ val.vsx16val[offset] = gpr16;
+ VCPU_VSX_VR(vcpu, index) = val.vval;
+ }
+
+ static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
+ u8 gpr8)
+ {
+ union kvmppc_one_reg val;
+ int offset = kvmppc_get_vmx_byte_offset(vcpu,
+ vcpu->arch.mmio_vmx_offset);
+ int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
+
+ if (offset == -1)
+ return;
- VCPU_VSX_VR(vcpu, index).u[di * 2] = hi;
- VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo;
+ val.vval = VCPU_VSX_VR(vcpu, index);
+ val.vsx8val[offset] = gpr8;
+ VCPU_VSX_VR(vcpu, index) = val.vval;
}
#endif /* CONFIG_ALTIVEC */
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
break;
case KVM_MMIO_REG_FPR:
+ if (vcpu->kvm->arch.kvm_ops->giveup_ext)
+ vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
+
VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
break;
#ifdef CONFIG_PPC_BOOK3S
#endif
#ifdef CONFIG_VSX
case KVM_MMIO_REG_VSX:
- if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD)
+ if (vcpu->kvm->arch.kvm_ops->giveup_ext)
+ vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
+
+ if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
kvmppc_set_vsr_dword(vcpu, gpr);
- else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD)
+ else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
kvmppc_set_vsr_word(vcpu, gpr);
- else if (vcpu->arch.mmio_vsx_copy_type ==
+ else if (vcpu->arch.mmio_copy_type ==
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
kvmppc_set_vsr_dword_dump(vcpu, gpr);
+ else if (vcpu->arch.mmio_copy_type ==
+ KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
+ kvmppc_set_vsr_word_dump(vcpu, gpr);
break;
#endif
#ifdef CONFIG_ALTIVEC
case KVM_MMIO_REG_VMX:
- kvmppc_set_vmx_dword(vcpu, gpr);
+ if (vcpu->kvm->arch.kvm_ops->giveup_ext)
+ vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
+
+ if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
+ kvmppc_set_vmx_dword(vcpu, gpr);
+ else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
+ kvmppc_set_vmx_word(vcpu, gpr);
+ else if (vcpu->arch.mmio_copy_type ==
+ KVMPPC_VMX_COPY_HWORD)
+ kvmppc_set_vmx_hword(vcpu, gpr);
+ else if (vcpu->arch.mmio_copy_type ==
+ KVMPPC_VMX_COPY_BYTE)
+ kvmppc_set_vmx_byte(vcpu, gpr);
break;
#endif
default:
u32 dword_offset, word_offset;
union kvmppc_one_reg reg;
int vsx_offset = 0;
- int copy_type = vcpu->arch.mmio_vsx_copy_type;
+ int copy_type = vcpu->arch.mmio_copy_type;
int result = 0;
switch (copy_type) {
#endif /* CONFIG_VSX */
#ifdef CONFIG_ALTIVEC
- /* handle quadword load access in two halves */
- int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int rt, int is_default_endian)
+ int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes, int is_default_endian)
{
enum emulation_result emulated = EMULATE_DONE;
+ if (vcpu->arch.mmio_vsx_copy_nums > 2)
+ return EMULATE_FAIL;
+
while (vcpu->arch.mmio_vmx_copy_nums) {
- emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
+ emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
is_default_endian, 0);
if (emulated != EMULATE_DONE)
vcpu->arch.paddr_accessed += run->mmio.len;
vcpu->arch.mmio_vmx_copy_nums--;
+ vcpu->arch.mmio_vmx_offset++;
}
return emulated;
}
- static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
+ int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
{
- vector128 vrs = VCPU_VSX_VR(vcpu, rs);
- u32 di;
- u64 w0, w1;
+ union kvmppc_one_reg reg;
+ int vmx_offset = 0;
+ int result = 0;
- di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
- if (di > 1)
+ vmx_offset =
+ kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
+
+ if (vmx_offset == -1)
return -1;
- if (vcpu->arch.mmio_host_swabbed)
- di = 1 - di;
+ reg.vval = VCPU_VSX_VR(vcpu, index);
+ *val = reg.vsxval[vmx_offset];
- w0 = vrs.u[di * 2];
- w1 = vrs.u[di * 2 + 1];
+ return result;
+ }
- #ifdef __BIG_ENDIAN
- *val = (w0 << 32) | w1;
- #else
- *val = (w1 << 32) | w0;
- #endif
- return 0;
+ int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ union kvmppc_one_reg reg;
+ int vmx_offset = 0;
+ int result = 0;
+
+ vmx_offset =
+ kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
+
+ if (vmx_offset == -1)
+ return -1;
+
+ reg.vval = VCPU_VSX_VR(vcpu, index);
+ *val = reg.vsx32val[vmx_offset];
+
+ return result;
+ }
+
+ int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ union kvmppc_one_reg reg;
+ int vmx_offset = 0;
+ int result = 0;
+
+ vmx_offset =
+ kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
+
+ if (vmx_offset == -1)
+ return -1;
+
+ reg.vval = VCPU_VSX_VR(vcpu, index);
+ *val = reg.vsx16val[vmx_offset];
+
+ return result;
}
- /* handle quadword store in two halves */
- int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int rs, int is_default_endian)
+ int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ union kvmppc_one_reg reg;
+ int vmx_offset = 0;
+ int result = 0;
+
+ vmx_offset =
+ kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
+
+ if (vmx_offset == -1)
+ return -1;
+
+ reg.vval = VCPU_VSX_VR(vcpu, index);
+ *val = reg.vsx8val[vmx_offset];
+
+ return result;
+ }
+
+ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rs, unsigned int bytes, int is_default_endian)
{
u64 val = 0;
+ unsigned int index = rs & KVM_MMIO_REG_MASK;
enum emulation_result emulated = EMULATE_DONE;
+ if (vcpu->arch.mmio_vsx_copy_nums > 2)
+ return EMULATE_FAIL;
+
vcpu->arch.io_gpr = rs;
while (vcpu->arch.mmio_vmx_copy_nums) {
- if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1)
+ switch (vcpu->arch.mmio_copy_type) {
+ case KVMPPC_VMX_COPY_DWORD:
+ if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
+ return EMULATE_FAIL;
+
+ break;
+ case KVMPPC_VMX_COPY_WORD:
+ if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
+ return EMULATE_FAIL;
+ break;
+ case KVMPPC_VMX_COPY_HWORD:
+ if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
+ return EMULATE_FAIL;
+ break;
+ case KVMPPC_VMX_COPY_BYTE:
+ if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
+ return EMULATE_FAIL;
+ break;
+ default:
return EMULATE_FAIL;
+ }
- emulated = kvmppc_handle_store(run, vcpu, val, 8,
+ emulated = kvmppc_handle_store(run, vcpu, val, bytes,
is_default_endian);
if (emulated != EMULATE_DONE)
break;
vcpu->arch.paddr_accessed += run->mmio.len;
vcpu->arch.mmio_vmx_copy_nums--;
+ vcpu->arch.mmio_vmx_offset++;
}
return emulated;
vcpu->arch.paddr_accessed += run->mmio.len;
if (!vcpu->mmio_is_write) {
- emulated = kvmppc_handle_load128_by2x64(run, vcpu,
- vcpu->arch.io_gpr, 1);
+ emulated = kvmppc_handle_vmx_load(run, vcpu,
+ vcpu->arch.io_gpr, run->mmio.len, 1);
} else {
- emulated = kvmppc_handle_store128_by2x64(run, vcpu,
- vcpu->arch.io_gpr, 1);
+ emulated = kvmppc_handle_vmx_store(run, vcpu,
+ vcpu->arch.io_gpr, run->mmio.len, 1);
}
switch (emulated) {
}
#endif
#ifdef CONFIG_ALTIVEC
- if (vcpu->arch.mmio_vmx_copy_nums > 0)
+ if (vcpu->arch.mmio_vmx_copy_nums > 0) {
vcpu->arch.mmio_vmx_copy_nums--;
+ vcpu->arch.mmio_vmx_offset++;
+ }
if (vcpu->arch.mmio_vmx_copy_nums > 0) {
r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
void __user *argp = (void __user *)arg;
long r;
- vcpu_load(vcpu);
-
switch (ioctl) {
case KVM_ENABLE_CAP:
{
struct kvm_enable_cap cap;
r = -EFAULT;
+ vcpu_load(vcpu);
if (copy_from_user(&cap, argp, sizeof(cap)))
goto out;
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
+ vcpu_put(vcpu);
break;
}
case KVM_DIRTY_TLB: {
struct kvm_dirty_tlb dirty;
r = -EFAULT;
+ vcpu_load(vcpu);
if (copy_from_user(&dirty, argp, sizeof(dirty)))
goto out;
r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
+ vcpu_put(vcpu);
break;
}
#endif
}
out:
- vcpu_put(vcpu);
return r;
}
-int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}