--- /dev/null
+From 12f267a20aecf8b84a2a9069b9011f1661c779b4 Mon Sep 17 00:00:00 2001
+From: Vyacheslav Dubeyko <slava@dubeyko.com>
+Date: Wed, 17 Apr 2013 15:58:33 -0700
+Subject: hfsplus: fix potential overflow in hfsplus_file_truncate()
+
+From: Vyacheslav Dubeyko <slava@dubeyko.com>
+
+commit 12f267a20aecf8b84a2a9069b9011f1661c779b4 upstream.
+
+Change a u32 to loff_t hfsplus_file_truncate().
+
+Signed-off-by: Vyacheslav Dubeyko <slava@dubeyko.com>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Hin-Tak Leung <htl10@users.sourceforge.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/hfsplus/extents.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/hfsplus/extents.c
++++ b/fs/hfsplus/extents.c
+@@ -517,7 +517,7 @@ void hfsplus_file_truncate(struct inode
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ void *fsdata;
+- u32 size = inode->i_size;
++ loff_t size = inode->i_size;
+
+ res = pagecache_write_begin(NULL, mapping, size, 0,
+ AOP_FLAG_UNINTERRUPTIBLE,
--- /dev/null
+From 0b79459b482e85cb7426aa7da683a9f2c97aeae1 Mon Sep 17 00:00:00 2001
+From: Andy Honig <ahonig@google.com>
+Date: Wed, 20 Feb 2013 14:48:10 -0800
+Subject: KVM: x86: Convert MSR_KVM_SYSTEM_TIME to use gfn_to_hva_cache functions (CVE-2013-1797)
+
+From: Andy Honig <ahonig@google.com>
+
+commit 0b79459b482e85cb7426aa7da683a9f2c97aeae1 upstream.
+
+There is a potential use after free issue with the handling of
+MSR_KVM_SYSTEM_TIME. If the guest specifies a GPA in a movable or removable
+memory such as frame buffers then KVM might continue to write to that
+address even after it's removed via KVM_SET_USER_MEMORY_REGION. KVM pins
+the page in memory so it's unlikely to cause an issue, but if the user
+space component re-purposes the memory previously used for the guest, then
+the guest will be able to corrupt that memory.
+
+Tested: Tested against kvmclock unit test
+
+Signed-off-by: Andrew Honig <ahonig@google.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/include/asm/kvm_host.h | 4 ++--
+ arch/x86/kvm/x86.c | 39 ++++++++++++++-------------------------
+ 2 files changed, 16 insertions(+), 27 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -407,8 +407,8 @@ struct kvm_vcpu_arch {
+ gpa_t time;
+ struct pvclock_vcpu_time_info hv_clock;
+ unsigned int hw_tsc_khz;
+- unsigned int time_offset;
+- struct page *time_page;
++ struct gfn_to_hva_cache pv_time;
++ bool pv_time_enabled;
+
+ struct {
+ u64 msr_val;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1114,7 +1114,6 @@ static int kvm_guest_time_update(struct
+ {
+ unsigned long flags;
+ struct kvm_vcpu_arch *vcpu = &v->arch;
+- void *shared_kaddr;
+ unsigned long this_tsc_khz;
+ s64 kernel_ns, max_kernel_ns;
+ u64 tsc_timestamp;
+@@ -1150,7 +1149,7 @@ static int kvm_guest_time_update(struct
+
+ local_irq_restore(flags);
+
+- if (!vcpu->time_page)
++ if (!vcpu->pv_time_enabled)
+ return 0;
+
+ /*
+@@ -1208,14 +1207,9 @@ static int kvm_guest_time_update(struct
+ */
+ vcpu->hv_clock.version += 2;
+
+- shared_kaddr = kmap_atomic(vcpu->time_page);
+-
+- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
+- sizeof(vcpu->hv_clock));
+-
+- kunmap_atomic(shared_kaddr);
+-
+- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
++ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
++ &vcpu->hv_clock,
++ sizeof(vcpu->hv_clock));
+ return 0;
+ }
+
+@@ -1504,10 +1498,7 @@ static int kvm_pv_enable_async_pf(struct
+
+ static void kvmclock_reset(struct kvm_vcpu *vcpu)
+ {
+- if (vcpu->arch.time_page) {
+- kvm_release_page_dirty(vcpu->arch.time_page);
+- vcpu->arch.time_page = NULL;
+- }
++ vcpu->arch.pv_time_enabled = false;
+ }
+
+ static void accumulate_steal_time(struct kvm_vcpu *vcpu)
+@@ -1602,6 +1593,7 @@ int kvm_set_msr_common(struct kvm_vcpu *
+ break;
+ case MSR_KVM_SYSTEM_TIME_NEW:
+ case MSR_KVM_SYSTEM_TIME: {
++ u64 gpa_offset;
+ kvmclock_reset(vcpu);
+
+ vcpu->arch.time = data;
+@@ -1611,21 +1603,17 @@ int kvm_set_msr_common(struct kvm_vcpu *
+ if (!(data & 1))
+ break;
+
+- /* ...but clean it before doing the actual write */
+- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
++ gpa_offset = data & ~(PAGE_MASK | 1);
+
+ /* Check that the address is 32-byte aligned. */
+- if (vcpu->arch.time_offset &
+- (sizeof(struct pvclock_vcpu_time_info) - 1))
++ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
+ break;
+
+- vcpu->arch.time_page =
+- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+-
+- if (is_error_page(vcpu->arch.time_page)) {
+- kvm_release_page_clean(vcpu->arch.time_page);
+- vcpu->arch.time_page = NULL;
+- }
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
++ &vcpu->arch.pv_time, data & ~1ULL))
++ vcpu->arch.pv_time_enabled = false;
++ else
++ vcpu->arch.pv_time_enabled = true;
+ break;
+ }
+ case MSR_KVM_ASYNC_PF_EN:
+@@ -6172,6 +6160,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *
+ if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
+ goto fail_free_mce_banks;
+
++ vcpu->arch.pv_time_enabled = false;
+ kvm_async_pf_hash_reset(vcpu);
+ kvm_pmu_init(vcpu);
+
--- /dev/null
+From c300aa64ddf57d9c5d9c898a64b36877345dd4a9 Mon Sep 17 00:00:00 2001
+From: Andy Honig <ahonig@google.com>
+Date: Mon, 11 Mar 2013 09:34:52 -0700
+Subject: KVM: x86: fix for buffer overflow in handling of MSR_KVM_SYSTEM_TIME (CVE-2013-1796)
+
+From: Andy Honig <ahonig@google.com>
+
+commit c300aa64ddf57d9c5d9c898a64b36877345dd4a9 upstream.
+
+If the guest sets the GPA of the time_page so that the request to update the
+time straddles a page then KVM will write onto an incorrect page. The
+write is done byusing kmap atomic to get a pointer to the page for the time
+structure and then performing a memcpy to that page starting at an offset
+that the guest controls. Well behaved guests always provide a 32-byte aligned
+address, however a malicious guest could use this to corrupt host kernel
+memory.
+
+Tested: Tested against kvmclock unit test.
+
+Signed-off-by: Andrew Honig <ahonig@google.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1614,6 +1614,11 @@ int kvm_set_msr_common(struct kvm_vcpu *
+ /* ...but clean it before doing the actual write */
+ vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+
++ /* Check that the address is 32-byte aligned. */
++ if (vcpu->arch.time_offset &
++ (sizeof(struct pvclock_vcpu_time_info) - 1))
++ break;
++
+ vcpu->arch.time_page =
+ gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+