--- /dev/null
+From 79367a65743975e5cac8d24d08eccc7fdae832b0 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 6 Jun 2018 16:43:02 +0200
+Subject: KVM: x86: introduce linear_{read,write}_system
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 79367a65743975e5cac8d24d08eccc7fdae832b0 upstream.
+
+Wrap the common invocation of ctxt->ops->read_std and ctxt->ops->write_std, so
+as to have a smaller patch when the functions grow another argument.
+
+Fixes: 129a72a0d3c8 ("KVM: x86: Introduce segmented_write_std", 2017-01-12)
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/emulate.c | 64 ++++++++++++++++++++++++-------------------------
+ 1 file changed, 32 insertions(+), 32 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -790,6 +790,19 @@ static inline int jmp_rel(struct x86_emu
+ return assign_eip_near(ctxt, ctxt->_eip + rel);
+ }
+
++static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
++ void *data, unsigned size)
++{
++ return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
++}
++
++static int linear_write_system(struct x86_emulate_ctxt *ctxt,
++ ulong linear, void *data,
++ unsigned int size)
++{
++ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
++}
++
+ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
+ struct segmented_address addr,
+ void *data,
+@@ -1488,8 +1501,7 @@ static int read_interrupt_descriptor(str
+ return emulate_gp(ctxt, index << 3 | 0x2);
+
+ addr = dt.address + index * 8;
+- return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
+- &ctxt->exception);
++ return linear_read_system(ctxt, addr, desc, sizeof *desc);
+ }
+
+ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
+@@ -1552,8 +1564,7 @@ static int read_segment_descriptor(struc
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+- return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
+- &ctxt->exception);
++ return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
+ }
+
+ /* allowed just for 8 bytes segments */
+@@ -1567,8 +1578,7 @@ static int write_segment_descriptor(stru
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+- return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
+- &ctxt->exception);
++ return linear_write_system(ctxt, addr, desc, sizeof *desc);
+ }
+
+ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+@@ -1729,8 +1739,7 @@ static int __load_segment_descriptor(str
+ return ret;
+ }
+ } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
+- ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
+- sizeof(base3), &ctxt->exception);
++ ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ if (is_noncanonical_address(get_desc_base(&seg_desc) |
+@@ -2043,11 +2052,11 @@ static int __emulate_int_real(struct x86
+ eip_addr = dt.address + (irq << 2);
+ cs_addr = dt.address + (irq << 2) + 2;
+
+- rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
++ rc = linear_read_system(ctxt, cs_addr, &cs, 2);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+- rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
++ rc = linear_read_system(ctxt, eip_addr, &eip, 2);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+@@ -3025,35 +3034,30 @@ static int task_switch_16(struct x86_emu
+ u16 tss_selector, u16 old_tss_sel,
+ ulong old_tss_base, struct desc_struct *new_desc)
+ {
+- const struct x86_emulate_ops *ops = ctxt->ops;
+ struct tss_segment_16 tss_seg;
+ int ret;
+ u32 new_tss_base = get_desc_base(new_desc);
+
+- ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+- &ctxt->exception);
++ ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ save_state_to_tss16(ctxt, &tss_seg);
+
+- ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+- &ctxt->exception);
++ ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+- ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
+- &ctxt->exception);
++ ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ if (old_tss_sel != 0xffff) {
+ tss_seg.prev_task_link = old_tss_sel;
+
+- ret = ops->write_std(ctxt, new_tss_base,
+- &tss_seg.prev_task_link,
+- sizeof tss_seg.prev_task_link,
+- &ctxt->exception);
++ ret = linear_write_system(ctxt, new_tss_base,
++ &tss_seg.prev_task_link,
++ sizeof tss_seg.prev_task_link);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ }
+@@ -3169,38 +3173,34 @@ static int task_switch_32(struct x86_emu
+ u16 tss_selector, u16 old_tss_sel,
+ ulong old_tss_base, struct desc_struct *new_desc)
+ {
+- const struct x86_emulate_ops *ops = ctxt->ops;
+ struct tss_segment_32 tss_seg;
+ int ret;
+ u32 new_tss_base = get_desc_base(new_desc);
+ u32 eip_offset = offsetof(struct tss_segment_32, eip);
+ u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
+
+- ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+- &ctxt->exception);
++ ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ save_state_to_tss32(ctxt, &tss_seg);
+
+ /* Only GP registers and segment selectors are saved */
+- ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
+- ldt_sel_offset - eip_offset, &ctxt->exception);
++ ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
++ ldt_sel_offset - eip_offset);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+- ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
+- &ctxt->exception);
++ ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ if (old_tss_sel != 0xffff) {
+ tss_seg.prev_task_link = old_tss_sel;
+
+- ret = ops->write_std(ctxt, new_tss_base,
+- &tss_seg.prev_task_link,
+- sizeof tss_seg.prev_task_link,
+- &ctxt->exception);
++ ret = linear_write_system(ctxt, new_tss_base,
++ &tss_seg.prev_task_link,
++ sizeof tss_seg.prev_task_link);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ }
--- /dev/null
+From ce14e868a54edeb2e30cb7a7b104a2fc4b9d76ca Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 6 Jun 2018 17:37:49 +0200
+Subject: KVM: x86: pass kvm_vcpu to kvm_read_guest_virt and kvm_write_guest_virt_system
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit ce14e868a54edeb2e30cb7a7b104a2fc4b9d76ca upstream.
+
+Int the next patch the emulator's .read_std and .write_std callbacks will
+grow another argument, which is not needed in kvm_read_guest_virt and
+kvm_write_guest_virt_system's callers. Since we have to make separate
+functions, let's give the currently existing names a nicer interface, too.
+
+Fixes: 129a72a0d3c8 ("KVM: x86: Introduce segmented_write_std", 2017-01-12)
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 23 ++++++++++-------------
+ arch/x86/kvm/x86.c | 39 ++++++++++++++++++++++++++-------------
+ arch/x86/kvm/x86.h | 4 ++--
+ 3 files changed, 38 insertions(+), 28 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6692,8 +6692,7 @@ static int nested_vmx_check_vmptr(struct
+ vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
+ return 1;
+
+- if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
+- sizeof(vmptr), &e)) {
++ if (kvm_read_guest_virt(vcpu, gva, &vmptr, sizeof(vmptr), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+@@ -7211,8 +7210,8 @@ static int handle_vmread(struct kvm_vcpu
+ vmx_instruction_info, true, &gva))
+ return 1;
+ /* _system ok, as nested_vmx_check_permission verified cpl=0 */
+- kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
+- &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
++ kvm_write_guest_virt_system(vcpu, gva, &field_value,
++ (is_long_mode(vcpu) ? 8 : 4), NULL);
+ }
+
+ nested_vmx_succeed(vcpu);
+@@ -7247,8 +7246,8 @@ static int handle_vmwrite(struct kvm_vcp
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+ vmx_instruction_info, false, &gva))
+ return 1;
+- if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
+- &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
++ if (kvm_read_guest_virt(vcpu, gva, &field_value,
++ (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+@@ -7338,9 +7337,9 @@ static int handle_vmptrst(struct kvm_vcp
+ vmx_instruction_info, true, &vmcs_gva))
+ return 1;
+ /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
+- if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
+- (void *)&to_vmx(vcpu)->nested.current_vmptr,
+- sizeof(u64), &e)) {
++ if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
++ (void *)&to_vmx(vcpu)->nested.current_vmptr,
++ sizeof(u64), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+@@ -7394,8 +7393,7 @@ static int handle_invept(struct kvm_vcpu
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmx_instruction_info, false, &gva))
+ return 1;
+- if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
+- sizeof(operand), &e)) {
++ if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+@@ -7454,8 +7452,7 @@ static int handle_invvpid(struct kvm_vcp
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmx_instruction_info, false, &gva))
+ return 1;
+- if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid,
+- sizeof(u32), &e)) {
++ if (kvm_read_guest_virt(vcpu, gva, &vpid, sizeof(u32), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4245,11 +4245,10 @@ static int kvm_fetch_guest_virt(struct x
+ return X86EMUL_CONTINUE;
+ }
+
+-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
++int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
+ gva_t addr, void *val, unsigned int bytes,
+ struct x86_exception *exception)
+ {
+- struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+
+ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
+@@ -4257,9 +4256,9 @@ int kvm_read_guest_virt(struct x86_emula
+ }
+ EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
+
+-static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+- gva_t addr, void *val, unsigned int bytes,
+- struct x86_exception *exception)
++static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
++ gva_t addr, void *val, unsigned int bytes,
++ struct x86_exception *exception)
+ {
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
+@@ -4274,18 +4273,16 @@ static int kvm_read_guest_phys_system(st
+ return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
+ }
+
+-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+- gva_t addr, void *val,
+- unsigned int bytes,
+- struct x86_exception *exception)
++static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
++ struct kvm_vcpu *vcpu, u32 access,
++ struct x86_exception *exception)
+ {
+- struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ void *data = val;
+ int r = X86EMUL_CONTINUE;
+
+ while (bytes) {
+ gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
+- PFERR_WRITE_MASK,
++ access,
+ exception);
+ unsigned offset = addr & (PAGE_SIZE-1);
+ unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
+@@ -4306,6 +4303,22 @@ int kvm_write_guest_virt_system(struct x
+ out:
+ return r;
+ }
++
++static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
++ unsigned int bytes, struct x86_exception *exception)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++
++ return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
++ PFERR_WRITE_MASK, exception);
++}
++
++int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
++ unsigned int bytes, struct x86_exception *exception)
++{
++ return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
++ PFERR_WRITE_MASK, exception);
++}
+ EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
+
+ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
+@@ -5025,8 +5038,8 @@ static void emulator_set_hflags(struct x
+ static const struct x86_emulate_ops emulate_ops = {
+ .read_gpr = emulator_read_gpr,
+ .write_gpr = emulator_write_gpr,
+- .read_std = kvm_read_guest_virt_system,
+- .write_std = kvm_write_guest_virt_system,
++ .read_std = emulator_read_std,
++ .write_std = emulator_write_std,
+ .read_phys = kvm_read_guest_phys_system,
+ .fetch = kvm_fetch_guest_virt,
+ .read_emulated = emulator_read_emulated,
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -164,11 +164,11 @@ int kvm_inject_realmode_interrupt(struct
+
+ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
+
+-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
++int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
+ gva_t addr, void *val, unsigned int bytes,
+ struct x86_exception *exception);
+
+-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
++int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
+ gva_t addr, void *val, unsigned int bytes,
+ struct x86_exception *exception);
+
--- /dev/null
+From aa2f80e752c75e593b3820f42c416ed9458fa73e Mon Sep 17 00:00:00 2001
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+Date: Thu, 10 May 2018 08:41:13 +0200
+Subject: serial: samsung: fix maxburst parameter for DMA transactions
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+commit aa2f80e752c75e593b3820f42c416ed9458fa73e upstream.
+
+The best granularity of residue that DMA engine can report is in the BURST
+units, so the serial driver must use MAXBURST = 1 and DMA_SLAVE_BUSWIDTH_1_BYTE
+if it relies on exact number of bytes transferred by DMA engine.
+
+Fixes: 62c37eedb74c ("serial: samsung: add dma reqest/release functions")
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Krzysztof Kozlowski <krzk@kernel.org>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/samsung.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -860,15 +860,12 @@ static int s3c24xx_serial_request_dma(st
+ dma->rx_conf.direction = DMA_DEV_TO_MEM;
+ dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH;
+- dma->rx_conf.src_maxburst = 16;
++ dma->rx_conf.src_maxburst = 1;
+
+ dma->tx_conf.direction = DMA_MEM_TO_DEV;
+ dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH;
+- if (dma_get_cache_alignment() >= 16)
+- dma->tx_conf.dst_maxburst = 16;
+- else
+- dma->tx_conf.dst_maxburst = 1;
++ dma->tx_conf.dst_maxburst = 1;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
x86-crypto-x86-fpu-remove-x86_feature_eager_fpu-ifdef-from-the-crc32c-code.patch
gpio-no-null-owner.patch
clarify-and-fix-max_lfs_filesize-macros.patch
+kvm-x86-introduce-linear_-read-write-_system.patch
+kvm-x86-pass-kvm_vcpu-to-kvm_read_guest_virt-and-kvm_write_guest_virt_system.patch
+usb-storage-add-support-for-fl_always_sync-flag-in-the-uas-driver.patch
+usb-storage-add-compatibility-quirk-flags-for-g-technologies-g-drive.patch
+serial-samsung-fix-maxburst-parameter-for-dma-transactions.patch
+vmw_balloon-fixing-double-free-when-batching-mode-is-off.patch
--- /dev/null
+From ca7d9515d0e6825351ce106066cea1f60e40b1c8 Mon Sep 17 00:00:00 2001
+From: Alexander Kappner <agk@godking.net>
+Date: Fri, 18 May 2018 21:50:16 -0700
+Subject: usb-storage: Add compatibility quirk flags for G-Technologies G-Drive
+
+From: Alexander Kappner <agk@godking.net>
+
+commit ca7d9515d0e6825351ce106066cea1f60e40b1c8 upstream.
+
+The "G-Drive" (sold by G-Technology) external USB 3.0 drive
+ hangs on write access under UAS and usb-storage:
+
+[ 136.079121] sd 15:0:0:0: [sdi] tag#0 FAILED Result: hostbyte=DID_OK driverbyte=DRIVER_SENSE
+[ 136.079144] sd 15:0:0:0: [sdi] tag#0 Sense Key : Illegal Request [current]
+[ 136.079152] sd 15:0:0:0: [sdi] tag#0 Add. Sense: Invalid field in cdb
+[ 136.079176] sd 15:0:0:0: [sdi] tag#0 CDB: Write(16) 8a 08 00 00 00 00 00 00 00 00 00 00 00 08 00 00
+[ 136.079180] print_req_error: critical target error, dev sdi, sector 0
+[ 136.079183] Buffer I/O error on dev sdi, logical block 0, lost sync page write
+[ 136.173148] EXT4-fs (sdi): mounted filesystem with ordered data mode. Opts: (null)
+[ 140.583998] sd 15:0:0:0: [sdi] tag#0 FAILED Result: hostbyte=DID_OK driverbyte=DRIVER_SENSE
+[ 140.584010] sd 15:0:0:0: [sdi] tag#0 Sense Key : Illegal Request [current]
+[ 140.584016] sd 15:0:0:0: [sdi] tag#0 Add. Sense: Invalid field in cdb
+[ 140.584022] sd 15:0:0:0: [sdi] tag#0 CDB: Write(16) 8a 08 00 00 00 00 e8 c4 00 18 00 00 00 08 00 00
+[ 140.584025] print_req_error: critical target error, dev sdi, sector 3905159192
+[ 140.584044] print_req_error: critical target error, dev sdi, sector 3905159192
+[ 140.584052] Aborting journal on device sdi-8.
+
+The proposed patch adds compatibility quirks. Because the drive requires two
+quirks (one to work with UAS, and another to work with usb-storage), adding this
+under unusual_devs.h and not just unusual_uas.h so kernels compiled without UAS
+receive the quirk. With the patch, the drive works reliably on UAS and usb-
+storage.
+(tested on NEC Corporation uPD720200 USB 3.0 host controller).
+
+Signed-off-by: Alexander Kappner <agk@godking.net>
+Acked-by: Alan Stern <stern@rowland.harvard.edu>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/storage/unusual_devs.h | 9 +++++++++
+ drivers/usb/storage/unusual_uas.h | 9 +++++++++
+ 2 files changed, 18 insertions(+)
+
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2213,6 +2213,15 @@ UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x
+ "Micro Mini 1GB",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
+
++/* "G-DRIVE" external HDD hangs on write without these.
++ * Patch submitted by Alexander Kappner <agk@godking.net>
++ */
++UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
++ "SimpleTech",
++ "External HDD",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_ALWAYS_SYNC),
++
+ /*
+ * Nick Bowler <nbowler@elliptictech.com>
+ * SCSI stack spams (otherwise harmless) error messages.
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -182,3 +182,12 @@ UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x99
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
++
++/* "G-DRIVE" external HDD hangs on write without these.
++ * Patch submitted by Alexander Kappner <agk@godking.net>
++ */
++UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
++ "SimpleTech",
++ "External HDD",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_ALWAYS_SYNC),
--- /dev/null
+From 8c4e97ddfe73a0958bb0abf7e6a3bc4cc3e04936 Mon Sep 17 00:00:00 2001
+From: Alexander Kappner <agk@godking.net>
+Date: Fri, 18 May 2018 21:50:15 -0700
+Subject: usb-storage: Add support for FL_ALWAYS_SYNC flag in the UAS driver
+
+From: Alexander Kappner <agk@godking.net>
+
+commit 8c4e97ddfe73a0958bb0abf7e6a3bc4cc3e04936 upstream.
+
+The ALWAYS_SYNC flag is currently honored by the usb-storage driver but not UAS
+and is required to work around devices that become unstable upon being
+queried for cache. This code is taken straight from:
+drivers/usb/storage/scsiglue.c:284
+
+Signed-off-by: Alexander Kappner <agk@godking.net>
+Acked-by: Alan Stern <stern@rowland.harvard.edu>
+Cc: stable <stable@vger.kernel.org>
+Acked-by: Oliver Neukum <oneukum@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/storage/uas.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -811,6 +811,12 @@ static int uas_slave_configure(struct sc
+ if (devinfo->flags & US_FL_BROKEN_FUA)
+ sdev->broken_fua = 1;
+
++ /* UAS also needs to support FL_ALWAYS_SYNC */
++ if (devinfo->flags & US_FL_ALWAYS_SYNC) {
++ sdev->skip_ms_page_3f = 1;
++ sdev->skip_ms_page_8 = 1;
++ sdev->wce_default_on = 1;
++ }
+ scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
+ return 0;
+ }
--- /dev/null
+From b23220fe054e92f616b82450fae8cd3ab176cc60 Mon Sep 17 00:00:00 2001
+From: Gil Kupfer <gilkup@gmail.com>
+Date: Fri, 1 Jun 2018 00:47:47 -0700
+Subject: vmw_balloon: fixing double free when batching mode is off
+
+From: Gil Kupfer <gilkup@gmail.com>
+
+commit b23220fe054e92f616b82450fae8cd3ab176cc60 upstream.
+
+The balloon.page field is used for two different purposes if batching is
+on or off. If batching is on, the field point to the page which is used
+to communicate with with the hypervisor. If it is off, balloon.page
+points to the page that is about to be (un)locked.
+
+Unfortunately, this dual-purpose of the field introduced a bug: when the
+balloon is popped (e.g., when the machine is reset or the balloon driver
+is explicitly removed), the balloon driver frees, unconditionally, the
+page that is held in balloon.page. As a result, if batching is
+disabled, this leads to double freeing the last page that is sent to the
+hypervisor.
+
+The following error occurs during rmmod when kernel checkers are on, and
+the balloon is not empty:
+
+[ 42.307653] ------------[ cut here ]------------
+[ 42.307657] Kernel BUG at ffffffffba1e4b28 [verbose debug info unavailable]
+[ 42.307720] invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC
+[ 42.312512] Modules linked in: vmw_vsock_vmci_transport vsock ppdev joydev vmw_balloon(-) input_leds serio_raw vmw_vmci parport_pc shpchp parport i2c_piix4 nfit mac_hid autofs4 vmwgfx drm_kms_helper hid_generic syscopyarea sysfillrect usbhid sysimgblt fb_sys_fops hid ttm mptspi scsi_transport_spi ahci mptscsih drm psmouse vmxnet3 libahci mptbase pata_acpi
+[ 42.312766] CPU: 10 PID: 1527 Comm: rmmod Not tainted 4.12.0+ #5
+[ 42.312803] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 09/30/2016
+[ 42.313042] task: ffff9bf9680f8000 task.stack: ffffbfefc1638000
+[ 42.313290] RIP: 0010:__free_pages+0x38/0x40
+[ 42.313510] RSP: 0018:ffffbfefc163be98 EFLAGS: 00010246
+[ 42.313731] RAX: 000000000000003e RBX: ffffffffc02b9720 RCX: 0000000000000006
+[ 42.313972] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff9bf97e08e0a0
+[ 42.314201] RBP: ffffbfefc163be98 R08: 0000000000000000 R09: 0000000000000000
+[ 42.314435] R10: 0000000000000000 R11: 0000000000000000 R12: ffffffffc02b97e4
+[ 42.314505] R13: ffffffffc02b9748 R14: ffffffffc02b9728 R15: 0000000000000200
+[ 42.314550] FS: 00007f3af5fec700(0000) GS:ffff9bf97e080000(0000) knlGS:0000000000000000
+[ 42.314599] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 42.314635] CR2: 00007f44f6f4ab24 CR3: 00000003a7d12000 CR4: 00000000000006e0
+[ 42.314864] Call Trace:
+[ 42.315774] vmballoon_pop+0x102/0x130 [vmw_balloon]
+[ 42.315816] vmballoon_exit+0x42/0xd64 [vmw_balloon]
+[ 42.315853] SyS_delete_module+0x1e2/0x250
+[ 42.315891] entry_SYSCALL_64_fastpath+0x23/0xc2
+[ 42.315924] RIP: 0033:0x7f3af5b0e8e7
+[ 42.315949] RSP: 002b:00007fffe6ce0148 EFLAGS: 00000206 ORIG_RAX: 00000000000000b0
+[ 42.315996] RAX: ffffffffffffffda RBX: 000055be676401e0 RCX: 00007f3af5b0e8e7
+[ 42.316951] RDX: 000000000000000a RSI: 0000000000000800 RDI: 000055be67640248
+[ 42.317887] RBP: 0000000000000003 R08: 0000000000000000 R09: 1999999999999999
+[ 42.318845] R10: 0000000000000883 R11: 0000000000000206 R12: 00007fffe6cdf130
+[ 42.319755] R13: 0000000000000000 R14: 0000000000000000 R15: 000055be676401e0
+[ 42.320606] Code: c0 74 1c f0 ff 4f 1c 74 02 5d c3 85 f6 74 07 e8 0f d8 ff ff 5d c3 31 f6 e8 c6 fb ff ff 5d c3 48 c7 c6 c8 0f c5 ba e8 58 be 02 00 <0f> 0b 66 0f 1f 44 00 00 66 66 66 66 90 48 85 ff 75 01 c3 55 48
+[ 42.323462] RIP: __free_pages+0x38/0x40 RSP: ffffbfefc163be98
+[ 42.325735] ---[ end trace 872e008e33f81508 ]---
+
+To solve the bug, we eliminate the dual purpose of balloon.page.
+
+Fixes: f220a80f0c2e ("VMware balloon: add batching to the vmw_balloon.")
+Cc: stable@vger.kernel.org
+Reported-by: Oleksandr Natalenko <onatalen@redhat.com>
+Signed-off-by: Gil Kupfer <gilkup@gmail.com>
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Reviewed-by: Xavier Deguillard <xdeguillard@vmware.com>
+Tested-by: Oleksandr Natalenko <oleksandr@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/vmw_balloon.c | 23 +++++++----------------
+ 1 file changed, 7 insertions(+), 16 deletions(-)
+
+--- a/drivers/misc/vmw_balloon.c
++++ b/drivers/misc/vmw_balloon.c
+@@ -576,15 +576,9 @@ static void vmballoon_pop(struct vmballo
+ }
+ }
+
+- if (b->batch_page) {
+- vunmap(b->batch_page);
+- b->batch_page = NULL;
+- }
+-
+- if (b->page) {
+- __free_page(b->page);
+- b->page = NULL;
+- }
++ /* Clearing the batch_page unconditionally has no adverse effect */
++ free_page((unsigned long)b->batch_page);
++ b->batch_page = NULL;
+ }
+
+ /*
+@@ -991,16 +985,13 @@ static const struct vmballoon_ops vmball
+
+ static bool vmballoon_init_batching(struct vmballoon *b)
+ {
+- b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
+- if (!b->page)
+- return false;
++ struct page *page;
+
+- b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
+- if (!b->batch_page) {
+- __free_page(b->page);
++ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
++ if (!page)
+ return false;
+- }
+
++ b->batch_page = page_address(page);
+ return true;
+ }
+