--- /dev/null
+From 48eaef0518a565d3852e301c860e1af6a6db5a84 Mon Sep 17 00:00:00 2001
+From: Andreas Schwab <schwab@linux-m68k.org>
+Date: Mon, 30 Dec 2013 15:36:56 +0100
+Subject: KVM: PPC: Book3S HV: use xics_wake_cpu only when defined
+
+From: Andreas Schwab <schwab@linux-m68k.org>
+
+commit 48eaef0518a565d3852e301c860e1af6a6db5a84 upstream.
+
+Signed-off-by: Andreas Schwab <schwab@linux-m68k.org>
+Signed-off-by: Alexander Graf <agraf@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -82,10 +82,13 @@ void kvmppc_fast_vcpu_kick(struct kvm_vc
+
+ /* CPU points to the first thread of the core */
+ if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
++#ifdef CONFIG_KVM_XICS
+ int real_cpu = cpu + vcpu->arch.ptid;
+ if (paca[real_cpu].kvm_hstate.xics_phys)
+ xics_wake_cpu(real_cpu);
+- else if (cpu_online(cpu))
++ else
++#endif
++ if (cpu_online(cpu))
+ smp_send_reschedule(cpu);
+ }
+ put_cpu();
+@@ -1090,7 +1093,9 @@ static void kvmppc_start_thread(struct k
+ smp_wmb();
+ #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
+ if (vcpu->arch.ptid) {
++#ifdef CONFIG_KVM_XICS
+ xics_wake_cpu(cpu);
++#endif
+ ++vc->n_woken;
+ }
+ #endif
--- /dev/null
+From 70713fe315ed14cd1bb07d1a7f33e973d136ae3d Mon Sep 17 00:00:00 2001
+From: Mihai Caraman <mihai.caraman@freescale.com>
+Date: Thu, 9 Jan 2014 17:01:05 +0200
+Subject: KVM: PPC: e500: Fix bad address type in deliver_tlb_misss()
+
+From: Mihai Caraman <mihai.caraman@freescale.com>
+
+commit 70713fe315ed14cd1bb07d1a7f33e973d136ae3d upstream.
+
+Use gva_t instead of unsigned int for eaddr in deliver_tlb_miss().
+
+Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
+Signed-off-by: Alexander Graf <agraf@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/e500_mmu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kvm/e500_mmu.c
++++ b/arch/powerpc/kvm/e500_mmu.c
+@@ -127,7 +127,7 @@ static int kvmppc_e500_tlb_index(struct
+ }
+
+ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
+- unsigned int eaddr, int as)
++ gva_t eaddr, int as)
+ {
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ unsigned int victim, tsized;
--- /dev/null
+From 57737c49dd72c96cfbcd4f66559f3ffc399aeb4f Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Fri, 31 Jan 2014 21:33:17 +0100
+Subject: parisc: fix cache-flushing
+
+From: Helge Deller <deller@gmx.de>
+
+commit 57737c49dd72c96cfbcd4f66559f3ffc399aeb4f upstream.
+
+This commit:
+f8dae00684d678afa13041ef170cecfd1297ed40: parisc: Ensure full cache coherency for kmap/kunmap
+caused negative caching side-effects, e.g. hanging processes with expect and
+too many inequivalent alias messages from flush_dcache_page() on Debian 5 systems.
+
+This patch now partly reverts it and has been in production use on our debian buildd
+makeservers since a week without any major problems.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/cacheflush.h | 2 --
+ arch/parisc/include/asm/page.h | 3 ++-
+ arch/parisc/kernel/cache.c | 14 ++++++++++++++
+ 3 files changed, 16 insertions(+), 3 deletions(-)
+
+--- a/arch/parisc/include/asm/cacheflush.h
++++ b/arch/parisc/include/asm/cacheflush.h
+@@ -132,7 +132,6 @@ void mark_rodata_ro(void);
+ static inline void *kmap(struct page *page)
+ {
+ might_sleep();
+- flush_dcache_page(page);
+ return page_address(page);
+ }
+
+@@ -144,7 +143,6 @@ static inline void kunmap(struct page *p
+ static inline void *kmap_atomic(struct page *page)
+ {
+ pagefault_disable();
+- flush_dcache_page(page);
+ return page_address(page);
+ }
+
+--- a/arch/parisc/include/asm/page.h
++++ b/arch/parisc/include/asm/page.h
+@@ -29,7 +29,8 @@ struct page;
+ void clear_page_asm(void *page);
+ void copy_page_asm(void *to, void *from);
+ #define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
+-#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom)
++void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
++ struct page *pg);
+
+ /* #define CONFIG_PARISC_TMPALIAS */
+
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -388,6 +388,20 @@ void flush_kernel_dcache_page_addr(void
+ }
+ EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
+
++void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
++ struct page *pg)
++{
++ /* Copy using kernel mapping. No coherency is needed (all in
++ kunmap) for the `to' page. However, the `from' page needs to
++ be flushed through a mapping equivalent to the user mapping
++ before it can be accessed through the kernel mapping. */
++ preempt_disable();
++ flush_dcache_page_asm(__pa(vfrom), vaddr);
++ preempt_enable();
++ copy_page_asm(vto, vfrom);
++}
++EXPORT_SYMBOL(copy_user_page);
++
+ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+ {
+ unsigned long flags;