--- /dev/null
+From 12f267a20aecf8b84a2a9069b9011f1661c779b4 Mon Sep 17 00:00:00 2001
+From: Vyacheslav Dubeyko <slava@dubeyko.com>
+Date: Wed, 17 Apr 2013 15:58:33 -0700
+Subject: hfsplus: fix potential overflow in hfsplus_file_truncate()
+
+From: Vyacheslav Dubeyko <slava@dubeyko.com>
+
+commit 12f267a20aecf8b84a2a9069b9011f1661c779b4 upstream.
+
+Change a u32 to loff_t hfsplus_file_truncate().
+
+Signed-off-by: Vyacheslav Dubeyko <slava@dubeyko.com>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Hin-Tak Leung <htl10@users.sourceforge.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/hfsplus/extents.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/hfsplus/extents.c
++++ b/fs/hfsplus/extents.c
+@@ -533,7 +533,7 @@ void hfsplus_file_truncate(struct inode
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ void *fsdata;
+- u32 size = inode->i_size;
++ loff_t size = inode->i_size;
+
+ res = pagecache_write_begin(NULL, mapping, size, 0,
+ AOP_FLAG_UNINTERRUPTIBLE,
--- /dev/null
+From 8f964525a121f2ff2df948dac908dcc65be21b5b Mon Sep 17 00:00:00 2001
+From: Andrew Honig <ahonig@google.com>
+Date: Fri, 29 Mar 2013 09:35:21 -0700
+Subject: KVM: Allow cross page reads and writes from cached translations.
+
+From: Andrew Honig <ahonig@google.com>
+
+commit 8f964525a121f2ff2df948dac908dcc65be21b5b upstream.
+
+This patch adds support for kvm_gfn_to_hva_cache_init functions for
+reads and writes that will cross a page. If the range falls within
+the same memslot, then this will be a fast operation. If the range
+is split between two memslots, then the slower kvm_read_guest and
+kvm_write_guest are used.
+
+Tested: Test against kvm_clock unit tests.
+
+Signed-off-by: Andrew Honig <ahonig@google.com>
+Signed-off-by: Gleb Natapov <gleb@redhat.com>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/lapic.c | 2 -
+ arch/x86/kvm/x86.c | 13 +++++-------
+ include/linux/kvm_host.h | 2 -
+ include/linux/kvm_types.h | 1
+ virt/kvm/kvm_main.c | 47 ++++++++++++++++++++++++++++++++++++----------
+ 5 files changed, 46 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1781,7 +1781,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_v
+ if (!pv_eoi_enabled(vcpu))
+ return 0;
+ return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
+- addr);
++ addr, sizeof(u8));
+ }
+
+ void kvm_lapic_init(void)
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1825,7 +1825,8 @@ static int kvm_pv_enable_async_pf(struct
+ return 0;
+ }
+
+- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
++ sizeof(u32)))
+ return 1;
+
+ vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
+@@ -1953,12 +1954,9 @@ int kvm_set_msr_common(struct kvm_vcpu *
+
+ gpa_offset = data & ~(PAGE_MASK | 1);
+
+- /* Check that the address is 32-byte aligned. */
+- if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
+- break;
+-
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+- &vcpu->arch.pv_time, data & ~1ULL))
++ &vcpu->arch.pv_time, data & ~1ULL,
++ sizeof(struct pvclock_vcpu_time_info)))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
+@@ -1978,7 +1976,8 @@ int kvm_set_msr_common(struct kvm_vcpu *
+ return 1;
+
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
+- data & KVM_STEAL_VALID_BITS))
++ data & KVM_STEAL_VALID_BITS,
++ sizeof(struct kvm_steal_time)))
+ return 1;
+
+ vcpu->arch.st.msr_val = data;
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -511,7 +511,7 @@ int kvm_write_guest(struct kvm *kvm, gpa
+ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len);
+ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+- gpa_t gpa);
++ gpa_t gpa, unsigned long len);
+ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
+ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
+ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
+--- a/include/linux/kvm_types.h
++++ b/include/linux/kvm_types.h
+@@ -71,6 +71,7 @@ struct gfn_to_hva_cache {
+ u64 generation;
+ gpa_t gpa;
+ unsigned long hva;
++ unsigned long len;
+ struct kvm_memory_slot *memslot;
+ };
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1476,21 +1476,38 @@ int kvm_write_guest(struct kvm *kvm, gpa
+ }
+
+ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+- gpa_t gpa)
++ gpa_t gpa, unsigned long len)
+ {
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ int offset = offset_in_page(gpa);
+- gfn_t gfn = gpa >> PAGE_SHIFT;
++ gfn_t start_gfn = gpa >> PAGE_SHIFT;
++ gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
++ gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
++ gfn_t nr_pages_avail;
+
+ ghc->gpa = gpa;
+ ghc->generation = slots->generation;
+- ghc->memslot = gfn_to_memslot(kvm, gfn);
+- ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
+- if (!kvm_is_error_hva(ghc->hva))
++ ghc->len = len;
++ ghc->memslot = gfn_to_memslot(kvm, start_gfn);
++ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
++ if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
+ ghc->hva += offset;
+- else
+- return -EFAULT;
+-
++ } else {
++ /*
++ * If the requested region crosses two memslots, we still
++ * verify that the entire region is valid here.
++ */
++ while (start_gfn <= end_gfn) {
++ ghc->memslot = gfn_to_memslot(kvm, start_gfn);
++ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
++ &nr_pages_avail);
++ if (kvm_is_error_hva(ghc->hva))
++ return -EFAULT;
++ start_gfn += nr_pages_avail;
++ }
++ /* Use the slow path for cross page reads and writes. */
++ ghc->memslot = NULL;
++ }
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
+@@ -1501,8 +1518,13 @@ int kvm_write_guest_cached(struct kvm *k
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ int r;
+
++ BUG_ON(len > ghc->len);
++
+ if (slots->generation != ghc->generation)
+- kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
++ kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
++
++ if (unlikely(!ghc->memslot))
++ return kvm_write_guest(kvm, ghc->gpa, data, len);
+
+ if (kvm_is_error_hva(ghc->hva))
+ return -EFAULT;
+@@ -1522,8 +1544,13 @@ int kvm_read_guest_cached(struct kvm *kv
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ int r;
+
++ BUG_ON(len > ghc->len);
++
+ if (slots->generation != ghc->generation)
+- kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
++ kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
++
++ if (unlikely(!ghc->memslot))
++ return kvm_read_guest(kvm, ghc->gpa, data, len);
+
+ if (kvm_is_error_hva(ghc->hva))
+ return -EFAULT;
--- /dev/null
+From a2c118bfab8bc6b8bb213abfc35201e441693d55 Mon Sep 17 00:00:00 2001
+From: Andy Honig <ahonig@google.com>
+Date: Wed, 20 Feb 2013 14:49:16 -0800
+Subject: KVM: Fix bounds checking in ioapic indirect register reads (CVE-2013-1798)
+
+From: Andy Honig <ahonig@google.com>
+
+commit a2c118bfab8bc6b8bb213abfc35201e441693d55 upstream.
+
+If the guest specifies a IOAPIC_REG_SELECT with an invalid value and follows
+that with a read of the IOAPIC_REG_WINDOW KVM does not properly validate
+that request. ioapic_read_indirect contains an
+ASSERT(redir_index < IOAPIC_NUM_PINS), but the ASSERT has no effect in
+non-debug builds. In recent kernels this allows a guest to cause a kernel
+oops by reading invalid memory. In older kernels (pre-3.3) this allows a
+guest to read from large ranges of host memory.
+
+Tested: tested against apic unit tests.
+
+Signed-off-by: Andrew Honig <ahonig@google.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/ioapic.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/virt/kvm/ioapic.c
++++ b/virt/kvm/ioapic.c
+@@ -73,9 +73,12 @@ static unsigned long ioapic_read_indirec
+ u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
+ u64 redir_content;
+
+- ASSERT(redir_index < IOAPIC_NUM_PINS);
++ if (redir_index < IOAPIC_NUM_PINS)
++ redir_content =
++ ioapic->redirtbl[redir_index].bits;
++ else
++ redir_content = ~0ULL;
+
+- redir_content = ioapic->redirtbl[redir_index].bits;
+ result = (ioapic->ioregsel & 0x1) ?
+ (redir_content >> 32) & 0xffffffff :
+ redir_content & 0xffffffff;
--- /dev/null
+From 0b79459b482e85cb7426aa7da683a9f2c97aeae1 Mon Sep 17 00:00:00 2001
+From: Andy Honig <ahonig@google.com>
+Date: Wed, 20 Feb 2013 14:48:10 -0800
+Subject: KVM: x86: Convert MSR_KVM_SYSTEM_TIME to use gfn_to_hva_cache functions (CVE-2013-1797)
+
+From: Andy Honig <ahonig@google.com>
+
+commit 0b79459b482e85cb7426aa7da683a9f2c97aeae1 upstream.
+
+There is a potential use after free issue with the handling of
+MSR_KVM_SYSTEM_TIME. If the guest specifies a GPA in a movable or removable
+memory such as frame buffers then KVM might continue to write to that
+address even after it's removed via KVM_SET_USER_MEMORY_REGION. KVM pins
+the page in memory so it's unlikely to cause an issue, but if the user
+space component re-purposes the memory previously used for the guest, then
+the guest will be able to corrupt that memory.
+
+Tested: Tested against kvmclock unit test
+
+Signed-off-by: Andrew Honig <ahonig@google.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h | 4 +--
+ arch/x86/kvm/x86.c | 47 +++++++++++++++++-----------------------
+ 2 files changed, 22 insertions(+), 29 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -419,8 +419,8 @@ struct kvm_vcpu_arch {
+ gpa_t time;
+ struct pvclock_vcpu_time_info hv_clock;
+ unsigned int hw_tsc_khz;
+- unsigned int time_offset;
+- struct page *time_page;
++ struct gfn_to_hva_cache pv_time;
++ bool pv_time_enabled;
+ /* set guest stopped flag in pvclock flags field */
+ bool pvclock_set_guest_stopped_request;
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1408,10 +1408,9 @@ static int kvm_guest_time_update(struct
+ unsigned long flags, this_tsc_khz;
+ struct kvm_vcpu_arch *vcpu = &v->arch;
+ struct kvm_arch *ka = &v->kvm->arch;
+- void *shared_kaddr;
+ s64 kernel_ns, max_kernel_ns;
+ u64 tsc_timestamp, host_tsc;
+- struct pvclock_vcpu_time_info *guest_hv_clock;
++ struct pvclock_vcpu_time_info guest_hv_clock;
+ u8 pvclock_flags;
+ bool use_master_clock;
+
+@@ -1465,7 +1464,7 @@ static int kvm_guest_time_update(struct
+
+ local_irq_restore(flags);
+
+- if (!vcpu->time_page)
++ if (!vcpu->pv_time_enabled)
+ return 0;
+
+ /*
+@@ -1527,12 +1526,12 @@ static int kvm_guest_time_update(struct
+ */
+ vcpu->hv_clock.version += 2;
+
+- shared_kaddr = kmap_atomic(vcpu->time_page);
+-
+- guest_hv_clock = shared_kaddr + vcpu->time_offset;
++ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
++ &guest_hv_clock, sizeof(guest_hv_clock))))
++ return 0;
+
+ /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
+- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
++ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
+
+ if (vcpu->pvclock_set_guest_stopped_request) {
+ pvclock_flags |= PVCLOCK_GUEST_STOPPED;
+@@ -1545,12 +1544,9 @@ static int kvm_guest_time_update(struct
+
+ vcpu->hv_clock.flags = pvclock_flags;
+
+- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
+- sizeof(vcpu->hv_clock));
+-
+- kunmap_atomic(shared_kaddr);
+-
+- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
++ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
++ &vcpu->hv_clock,
++ sizeof(vcpu->hv_clock));
+ return 0;
+ }
+
+@@ -1839,10 +1835,7 @@ static int kvm_pv_enable_async_pf(struct
+
+ static void kvmclock_reset(struct kvm_vcpu *vcpu)
+ {
+- if (vcpu->arch.time_page) {
+- kvm_release_page_dirty(vcpu->arch.time_page);
+- vcpu->arch.time_page = NULL;
+- }
++ vcpu->arch.pv_time_enabled = false;
+ }
+
+ static void accumulate_steal_time(struct kvm_vcpu *vcpu)
+@@ -1948,6 +1941,7 @@ int kvm_set_msr_common(struct kvm_vcpu *
+ break;
+ case MSR_KVM_SYSTEM_TIME_NEW:
+ case MSR_KVM_SYSTEM_TIME: {
++ u64 gpa_offset;
+ kvmclock_reset(vcpu);
+
+ vcpu->arch.time = data;
+@@ -1957,19 +1951,17 @@ int kvm_set_msr_common(struct kvm_vcpu *
+ if (!(data & 1))
+ break;
+
+- /* ...but clean it before doing the actual write */
+- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
++ gpa_offset = data & ~(PAGE_MASK | 1);
+
+ /* Check that the address is 32-byte aligned. */
+- if (vcpu->arch.time_offset &
+- (sizeof(struct pvclock_vcpu_time_info) - 1))
++ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
+ break;
+
+- vcpu->arch.time_page =
+- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+-
+- if (is_error_page(vcpu->arch.time_page))
+- vcpu->arch.time_page = NULL;
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
++ &vcpu->arch.pv_time, data & ~1ULL))
++ vcpu->arch.pv_time_enabled = false;
++ else
++ vcpu->arch.pv_time_enabled = true;
+
+ break;
+ }
+@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(s
+ */
+ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
+ {
+- if (!vcpu->arch.time_page)
++ if (!vcpu->arch.pv_time_enabled)
+ return -EINVAL;
+ vcpu->arch.pvclock_set_guest_stopped_request = true;
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+@@ -6666,6 +6658,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *
+ goto fail_free_wbinvd_dirty_mask;
+
+ vcpu->arch.ia32_tsc_adjust_msr = 0x0;
++ vcpu->arch.pv_time_enabled = false;
+ kvm_async_pf_hash_reset(vcpu);
+ kvm_pmu_init(vcpu);
+
--- /dev/null
+From c300aa64ddf57d9c5d9c898a64b36877345dd4a9 Mon Sep 17 00:00:00 2001
+From: Andy Honig <ahonig@google.com>
+Date: Mon, 11 Mar 2013 09:34:52 -0700
+Subject: KVM: x86: fix for buffer overflow in handling of MSR_KVM_SYSTEM_TIME (CVE-2013-1796)
+
+From: Andy Honig <ahonig@google.com>
+
+commit c300aa64ddf57d9c5d9c898a64b36877345dd4a9 upstream.
+
+If the guest sets the GPA of the time_page so that the request to update the
+time straddles a page then KVM will write onto an incorrect page. The
+write is done byusing kmap atomic to get a pointer to the page for the time
+structure and then performing a memcpy to that page starting at an offset
+that the guest controls. Well behaved guests always provide a 32-byte aligned
+address, however a malicious guest could use this to corrupt host kernel
+memory.
+
+Tested: Tested against kvmclock unit test.
+
+Signed-off-by: Andrew Honig <ahonig@google.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1960,6 +1960,11 @@ int kvm_set_msr_common(struct kvm_vcpu *
+ /* ...but clean it before doing the actual write */
+ vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+
++ /* Check that the address is 32-byte aligned. */
++ if (vcpu->arch.time_offset &
++ (sizeof(struct pvclock_vcpu_time_info) - 1))
++ break;
++
+ vcpu->arch.time_page =
+ gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+
--- /dev/null
+From c8dc9c654794a765ca61baed07f84ed8aaa7ca8c Mon Sep 17 00:00:00 2001
+From: Joe Lawrence <Joe.Lawrence@stratus.com>
+Date: Thu, 21 Feb 2013 13:28:09 +1100
+Subject: md: raid1,10: Handle REQ_WRITE_SAME flag in write bios
+
+From: Joe Lawrence <Joe.Lawrence@stratus.com>
+
+commit c8dc9c654794a765ca61baed07f84ed8aaa7ca8c upstream.
+
+Set mddev queue's max_write_same_sectors to its chunk_sector value (before
+disk_stack_limits merges the underlying disk limits.) With that in place,
+be sure to handle writes coming down from the block layer that have the
+REQ_WRITE_SAME flag set. That flag needs to be copied into any newly cloned
+write bio.
+
+Signed-off-by: Joe Lawrence <joe.lawrence@stratus.com>
+Acked-by: "Martin K. Petersen" <martin.petersen@oracle.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 7 ++++++-
+ drivers/md/raid10.c | 9 +++++++--
+ 2 files changed, 13 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1001,6 +1001,7 @@ static void make_request(struct mddev *m
+ const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
+ const unsigned long do_discard = (bio->bi_rw
+ & (REQ_DISCARD | REQ_SECURE));
++ const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
+ struct md_rdev *blocked_rdev;
+ struct blk_plug_cb *cb;
+ struct raid1_plug_cb *plug = NULL;
+@@ -1302,7 +1303,8 @@ read_again:
+ conf->mirrors[i].rdev->data_offset);
+ mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
+ mbio->bi_end_io = raid1_end_write_request;
+- mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
++ mbio->bi_rw =
++ WRITE | do_flush_fua | do_sync | do_discard | do_same;
+ mbio->bi_private = r1_bio;
+
+ atomic_inc(&r1_bio->remaining);
+@@ -2819,6 +2821,9 @@ static int run(struct mddev *mddev)
+ if (IS_ERR(conf))
+ return PTR_ERR(conf);
+
++ if (mddev->queue)
++ blk_queue_max_write_same_sectors(mddev->queue,
++ mddev->chunk_sectors);
+ rdev_for_each(rdev, mddev) {
+ if (!mddev->gendisk)
+ continue;
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1106,6 +1106,7 @@ static void make_request(struct mddev *m
+ const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
+ const unsigned long do_discard = (bio->bi_rw
+ & (REQ_DISCARD | REQ_SECURE));
++ const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
+ unsigned long flags;
+ struct md_rdev *blocked_rdev;
+ struct blk_plug_cb *cb;
+@@ -1461,7 +1462,8 @@ retry_write:
+ rdev));
+ mbio->bi_bdev = rdev->bdev;
+ mbio->bi_end_io = raid10_end_write_request;
+- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
++ mbio->bi_rw =
++ WRITE | do_sync | do_fua | do_discard | do_same;
+ mbio->bi_private = r10_bio;
+
+ atomic_inc(&r10_bio->remaining);
+@@ -1503,7 +1505,8 @@ retry_write:
+ r10_bio, rdev));
+ mbio->bi_bdev = rdev->bdev;
+ mbio->bi_end_io = raid10_end_write_request;
+- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
++ mbio->bi_rw =
++ WRITE | do_sync | do_fua | do_discard | do_same;
+ mbio->bi_private = r10_bio;
+
+ atomic_inc(&r10_bio->remaining);
+@@ -3570,6 +3573,8 @@ static int run(struct mddev *mddev)
+ if (mddev->queue) {
+ blk_queue_max_discard_sectors(mddev->queue,
+ mddev->chunk_sectors);
++ blk_queue_max_write_same_sectors(mddev->queue,
++ mddev->chunk_sectors);
+ blk_queue_io_min(mddev->queue, chunk_size);
+ if (conf->geo.raid_disks % conf->geo.near_copies)
+ blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
hugetlbfs-add-swap-entry-check-in-follow_hugetlb_page.patch
fs-binfmt_elf.c-fix-hugetlb-memory-check-in-vma_dump_size.patch
kernel-signal.c-stop-info-leak-via-the-tkill-and-the-tgkill-syscalls.patch
+hfsplus-fix-potential-overflow-in-hfsplus_file_truncate.patch
+md-raid1-10-handle-req_write_same-flag-in-write-bios.patch
+kvm-x86-fix-for-buffer-overflow-in-handling-of-msr_kvm_system_time-cve-2013-1796.patch
+kvm-x86-convert-msr_kvm_system_time-to-use-gfn_to_hva_cache-functions-cve-2013-1797.patch
+kvm-fix-bounds-checking-in-ioapic-indirect-register-reads-cve-2013-1798.patch
+kvm-allow-cross-page-reads-and-writes-from-cached-translations.patch