From: Greg Kroah-Hartman Date: Mon, 20 May 2019 12:13:53 +0000 (+0200) Subject: 4.4-stable patches X-Git-Tag: v4.9.178~2 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=fbc4be2a73a2b71f6c0033bfd3952b107506984c;p=thirdparty%2Fkernel%2Fstable-queue.git 4.4-stable patches added patches: alsa-hda-realtek-fix-for-lenovo-b50-70-inverted-internal-microphone-bug.patch ext4-zero-out-the-unused-memory-region-in-the-extent-tree-block.patch fs-writeback.c-use-rcu_barrier-to-wait-for-inflight-wb-switches-going-into-workqueue-when-umount.patch kvm-x86-skip-efer-vs.-guest-cpuid-checks-for-host-initiated-writes.patch writeback-synchronize-sync-2-against-cgroup-writeback-membership-switches.patch --- diff --git a/queue-4.4/alsa-hda-realtek-fix-for-lenovo-b50-70-inverted-internal-microphone-bug.patch b/queue-4.4/alsa-hda-realtek-fix-for-lenovo-b50-70-inverted-internal-microphone-bug.patch new file mode 100644 index 00000000000..fea22588b69 --- /dev/null +++ b/queue-4.4/alsa-hda-realtek-fix-for-lenovo-b50-70-inverted-internal-microphone-bug.patch @@ -0,0 +1,41 @@ +From 56df90b631fc027fe28b70d41352d820797239bb Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Micha=C5=82=20Wadowski?= +Date: Tue, 14 May 2019 16:58:00 +0200 +Subject: ALSA: hda/realtek - Fix for Lenovo B50-70 inverted internal microphone bug +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Michał Wadowski + +commit 56df90b631fc027fe28b70d41352d820797239bb upstream. + +Add patch for realtek codec in Lenovo B50-70 that fixes inverted +internal microphone channel. +Device IdeaPad Y410P has the same PCI SSID as Lenovo B50-70, +but first one is about fix the noise and it didn't seem help in a +later kernel version. +So I replaced IdeaPad Y410P device description with B50-70 and apply +inverted microphone fix. + +Bugzilla: https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1524215 +Signed-off-by: Michał Wadowski +Cc: +Signed-off-by: Takashi Iwai +Signed-off-by: Greg Kroah-Hartman + +--- + sound/pci/hda/patch_realtek.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -5778,7 +5778,7 @@ static const struct snd_pci_quirk alc269 + SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), +- SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), ++ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), + SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), + SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), diff --git a/queue-4.4/ext4-zero-out-the-unused-memory-region-in-the-extent-tree-block.patch b/queue-4.4/ext4-zero-out-the-unused-memory-region-in-the-extent-tree-block.patch new file mode 100644 index 00000000000..f6af383fc43 --- /dev/null +++ b/queue-4.4/ext4-zero-out-the-unused-memory-region-in-the-extent-tree-block.patch @@ -0,0 +1,82 @@ +From 592acbf16821288ecdc4192c47e3774a4c48bb64 Mon Sep 17 00:00:00 2001 +From: Sriram Rajagopalan +Date: Fri, 10 May 2019 19:28:06 -0400 +Subject: ext4: zero out the unused memory region in the extent tree block + +From: Sriram Rajagopalan + +commit 592acbf16821288ecdc4192c47e3774a4c48bb64 upstream. + +This commit zeroes out the unused memory region in the buffer_head +corresponding to the extent metablock after writing the extent header +and the corresponding extent node entries. + +This is done to prevent random uninitialized data from getting into +the filesystem when the extent block is synced. + +This fixes CVE-2019-11833. + +Signed-off-by: Sriram Rajagopalan +Signed-off-by: Theodore Ts'o +Cc: stable@kernel.org +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/extents.c | 17 +++++++++++++++-- + 1 file changed, 15 insertions(+), 2 deletions(-) + +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -1049,6 +1049,7 @@ static int ext4_ext_split(handle_t *hand + __le32 border; + ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ + int err = 0; ++ size_t ext_size = 0; + + /* make decision: where to split? */ + /* FIXME: now decision is simplest: at current extent */ +@@ -1140,6 +1141,10 @@ static int ext4_ext_split(handle_t *hand + le16_add_cpu(&neh->eh_entries, m); + } + ++ /* zero out unused area in the extent block */ ++ ext_size = sizeof(struct ext4_extent_header) + ++ sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries); ++ memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); + ext4_extent_block_csum_set(inode, neh); + set_buffer_uptodate(bh); + unlock_buffer(bh); +@@ -1219,6 +1224,11 @@ static int ext4_ext_split(handle_t *hand + sizeof(struct ext4_extent_idx) * m); + le16_add_cpu(&neh->eh_entries, m); + } ++ /* zero out unused area in the extent block */ ++ ext_size = sizeof(struct ext4_extent_header) + ++ (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries)); ++ memset(bh->b_data + ext_size, 0, ++ inode->i_sb->s_blocksize - ext_size); + ext4_extent_block_csum_set(inode, neh); + set_buffer_uptodate(bh); + unlock_buffer(bh); +@@ -1284,6 +1294,7 @@ static int ext4_ext_grow_indepth(handle_ + ext4_fsblk_t newblock, goal = 0; + struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; + int err = 0; ++ size_t ext_size = 0; + + /* Try to prepend new index to old one */ + if (ext_depth(inode)) +@@ -1309,9 +1320,11 @@ static int ext4_ext_grow_indepth(handle_ + goto out; + } + ++ ext_size = sizeof(EXT4_I(inode)->i_data); + /* move top-level index/leaf into new block */ +- memmove(bh->b_data, EXT4_I(inode)->i_data, +- sizeof(EXT4_I(inode)->i_data)); ++ memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size); ++ /* zero out unused area in the extent block */ ++ memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); + + /* set size of new block */ + neh = ext_block_hdr(bh); diff --git a/queue-4.4/fs-writeback.c-use-rcu_barrier-to-wait-for-inflight-wb-switches-going-into-workqueue-when-umount.patch b/queue-4.4/fs-writeback.c-use-rcu_barrier-to-wait-for-inflight-wb-switches-going-into-workqueue-when-umount.patch new file mode 100644 index 00000000000..7e246b7fc6e --- /dev/null +++ b/queue-4.4/fs-writeback.c-use-rcu_barrier-to-wait-for-inflight-wb-switches-going-into-workqueue-when-umount.patch @@ -0,0 +1,75 @@ +From ec084de929e419e51bcdafaafe567d9e7d0273b7 Mon Sep 17 00:00:00 2001 +From: Jiufei Xue +Date: Fri, 17 May 2019 14:31:44 -0700 +Subject: fs/writeback.c: use rcu_barrier() to wait for inflight wb switches going into workqueue when umount + +From: Jiufei Xue + +commit ec084de929e419e51bcdafaafe567d9e7d0273b7 upstream. + +synchronize_rcu() didn't wait for call_rcu() callbacks, so inode wb +switch may not go to the workqueue after synchronize_rcu(). Thus +previous scheduled switches was not finished even flushing the +workqueue, which will cause a NULL pointer dereferenced followed below. + + VFS: Busy inodes after unmount of vdd. Self-destruct in 5 seconds. Have a nice day... + BUG: unable to handle kernel NULL pointer dereference at 0000000000000278 + evict+0xb3/0x180 + iput+0x1b0/0x230 + inode_switch_wbs_work_fn+0x3c0/0x6a0 + worker_thread+0x4e/0x490 + ? process_one_work+0x410/0x410 + kthread+0xe6/0x100 + ret_from_fork+0x39/0x50 + +Replace the synchronize_rcu() call with a rcu_barrier() to wait for all +pending callbacks to finish. And inc isw_nr_in_flight after call_rcu() +in inode_switch_wbs() to make more sense. + +Link: http://lkml.kernel.org/r/20190429024108.54150-1-jiufei.xue@linux.alibaba.com +Signed-off-by: Jiufei Xue +Acked-by: Tejun Heo +Suggested-by: Tejun Heo +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + fs/fs-writeback.c | 11 ++++++++--- + 1 file changed, 8 insertions(+), 3 deletions(-) + +--- a/fs/fs-writeback.c ++++ b/fs/fs-writeback.c +@@ -530,8 +530,6 @@ static void inode_switch_wbs(struct inod + ihold(inode); + isw->inode = inode; + +- atomic_inc(&isw_nr_in_flight); +- + /* + * In addition to synchronizing among switchers, I_WB_SWITCH tells + * the RCU protected stat update paths to grab the mapping's +@@ -539,6 +537,9 @@ static void inode_switch_wbs(struct inod + * Let's continue after I_WB_SWITCH is guaranteed to be visible. + */ + call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); ++ ++ atomic_inc(&isw_nr_in_flight); ++ + goto out_unlock; + + out_free: +@@ -910,7 +911,11 @@ restart: + void cgroup_writeback_umount(void) + { + if (atomic_read(&isw_nr_in_flight)) { +- synchronize_rcu(); ++ /* ++ * Use rcu_barrier() to wait for all pending callbacks to ++ * ensure that all in-flight wb switches are in the workqueue. ++ */ ++ rcu_barrier(); + flush_workqueue(isw_wq); + } + } diff --git a/queue-4.4/kvm-x86-skip-efer-vs.-guest-cpuid-checks-for-host-initiated-writes.patch b/queue-4.4/kvm-x86-skip-efer-vs.-guest-cpuid-checks-for-host-initiated-writes.patch new file mode 100644 index 00000000000..69548d6b21c --- /dev/null +++ b/queue-4.4/kvm-x86-skip-efer-vs.-guest-cpuid-checks-for-host-initiated-writes.patch @@ -0,0 +1,97 @@ +From 11988499e62b310f3bf6f6d0a807a06d3f9ccc96 Mon Sep 17 00:00:00 2001 +From: Sean Christopherson +Date: Tue, 2 Apr 2019 08:19:15 -0700 +Subject: KVM: x86: Skip EFER vs. guest CPUID checks for host-initiated writes + +From: Sean Christopherson + +commit 11988499e62b310f3bf6f6d0a807a06d3f9ccc96 upstream. + +KVM allows userspace to violate consistency checks related to the +guest's CPUID model to some degree. Generally speaking, userspace has +carte blanche when it comes to guest state so long as jamming invalid +state won't negatively affect the host. + +Currently this is seems to be a non-issue as most of the interesting +EFER checks are missing, e.g. NX and LME, but those will be added +shortly. Proactively exempt userspace from the CPUID checks so as not +to break userspace. + +Note, the efer_reserved_bits check still applies to userspace writes as +that mask reflects the host's capabilities, e.g. KVM shouldn't allow a +guest to run with NX=1 if it has been disabled in the host. + +Fixes: d80174745ba39 ("KVM: SVM: Only allow setting of EFER_SVME when CPUID SVM is set") +Cc: stable@vger.kernel.org +Signed-off-by: Sean Christopherson +Signed-off-by: Paolo Bonzini +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kvm/x86.c | 33 ++++++++++++++++++++++----------- + 1 file changed, 22 insertions(+), 11 deletions(-) + +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -990,11 +990,8 @@ static u32 emulated_msrs[] = { + + static unsigned num_emulated_msrs; + +-bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) ++static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) + { +- if (efer & efer_reserved_bits) +- return false; +- + if (efer & EFER_FFXSR) { + struct kvm_cpuid_entry2 *feat; + +@@ -1012,19 +1009,33 @@ bool kvm_valid_efer(struct kvm_vcpu *vcp + } + + return true; ++ ++} ++bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) ++{ ++ if (efer & efer_reserved_bits) ++ return false; ++ ++ return __kvm_valid_efer(vcpu, efer); + } + EXPORT_SYMBOL_GPL(kvm_valid_efer); + +-static int set_efer(struct kvm_vcpu *vcpu, u64 efer) ++static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + { + u64 old_efer = vcpu->arch.efer; ++ u64 efer = msr_info->data; + +- if (!kvm_valid_efer(vcpu, efer)) +- return 1; ++ if (efer & efer_reserved_bits) ++ return false; + +- if (is_paging(vcpu) +- && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) +- return 1; ++ if (!msr_info->host_initiated) { ++ if (!__kvm_valid_efer(vcpu, efer)) ++ return 1; ++ ++ if (is_paging(vcpu) && ++ (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) ++ return 1; ++ } + + efer &= ~EFER_LMA; + efer |= vcpu->arch.efer & EFER_LMA; +@@ -2055,7 +2066,7 @@ int kvm_set_msr_common(struct kvm_vcpu * + break; + + case MSR_EFER: +- return set_efer(vcpu, data); ++ return set_efer(vcpu, msr_info); + case MSR_K7_HWCR: + data &= ~(u64)0x40; /* ignore flush filter disable */ + data &= ~(u64)0x100; /* ignore ignne emulation enable */ diff --git a/queue-4.4/series b/queue-4.4/series index c80390410e1..23f3d680e07 100644 --- a/queue-4.4/series +++ b/queue-4.4/series @@ -24,3 +24,8 @@ crypto-gcm-fix-incompatibility-between-gcm-and-gcm_base.patch crypto-chacha20poly1305-set-cra_name-correctly.patch crypto-salsa20-don-t-access-already-freed-walk.iv.patch crypto-arm-aes-neonbs-don-t-access-already-freed-walk.iv.patch +writeback-synchronize-sync-2-against-cgroup-writeback-membership-switches.patch +fs-writeback.c-use-rcu_barrier-to-wait-for-inflight-wb-switches-going-into-workqueue-when-umount.patch +ext4-zero-out-the-unused-memory-region-in-the-extent-tree-block.patch +alsa-hda-realtek-fix-for-lenovo-b50-70-inverted-internal-microphone-bug.patch +kvm-x86-skip-efer-vs.-guest-cpuid-checks-for-host-initiated-writes.patch diff --git a/queue-4.4/writeback-synchronize-sync-2-against-cgroup-writeback-membership-switches.patch b/queue-4.4/writeback-synchronize-sync-2-against-cgroup-writeback-membership-switches.patch new file mode 100644 index 00000000000..b4d54ef685a --- /dev/null +++ b/queue-4.4/writeback-synchronize-sync-2-against-cgroup-writeback-membership-switches.patch @@ -0,0 +1,161 @@ +From 7fc5854f8c6efae9e7624970ab49a1eac2faefb1 Mon Sep 17 00:00:00 2001 +From: Tejun Heo +Date: Tue, 12 Dec 2017 08:38:30 -0800 +Subject: writeback: synchronize sync(2) against cgroup writeback membership switches + +From: Tejun Heo + +commit 7fc5854f8c6efae9e7624970ab49a1eac2faefb1 upstream. + +sync_inodes_sb() can race against cgwb (cgroup writeback) membership +switches and fail to writeback some inodes. For example, if an inode +switches to another wb while sync_inodes_sb() is in progress, the new +wb might not be visible to bdi_split_work_to_wbs() at all or the inode +might jump from a wb which hasn't issued writebacks yet to one which +already has. + +This patch adds backing_dev_info->wb_switch_rwsem to synchronize cgwb +switch path against sync_inodes_sb() so that sync_inodes_sb() is +guaranteed to see all the target wbs and inodes can't jump wbs to +escape syncing. + +v2: Fixed misplaced rwsem init. Spotted by Jiufei. + +Signed-off-by: Tejun Heo +Reported-by: Jiufei Xue +Link: http://lkml.kernel.org/r/dc694ae2-f07f-61e1-7097-7c8411cee12d@gmail.com +Acked-by: Jan Kara +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman + +--- + fs/fs-writeback.c | 40 +++++++++++++++++++++++++++++++++++++-- + include/linux/backing-dev-defs.h | 1 + mm/backing-dev.c | 1 + 3 files changed, 40 insertions(+), 2 deletions(-) + +--- a/fs/fs-writeback.c ++++ b/fs/fs-writeback.c +@@ -331,11 +331,22 @@ struct inode_switch_wbs_context { + struct work_struct work; + }; + ++static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) ++{ ++ down_write(&bdi->wb_switch_rwsem); ++} ++ ++static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) ++{ ++ up_write(&bdi->wb_switch_rwsem); ++} ++ + static void inode_switch_wbs_work_fn(struct work_struct *work) + { + struct inode_switch_wbs_context *isw = + container_of(work, struct inode_switch_wbs_context, work); + struct inode *inode = isw->inode; ++ struct backing_dev_info *bdi = inode_to_bdi(inode); + struct address_space *mapping = inode->i_mapping; + struct bdi_writeback *old_wb = inode->i_wb; + struct bdi_writeback *new_wb = isw->new_wb; +@@ -344,6 +355,12 @@ static void inode_switch_wbs_work_fn(str + void **slot; + + /* ++ * If @inode switches cgwb membership while sync_inodes_sb() is ++ * being issued, sync_inodes_sb() might miss it. Synchronize. ++ */ ++ down_read(&bdi->wb_switch_rwsem); ++ ++ /* + * By the time control reaches here, RCU grace period has passed + * since I_WB_SWITCH assertion and all wb stat update transactions + * between unlocked_inode_to_wb_begin/end() are guaranteed to be +@@ -435,6 +452,8 @@ skip_switch: + spin_unlock(&new_wb->list_lock); + spin_unlock(&old_wb->list_lock); + ++ up_read(&bdi->wb_switch_rwsem); ++ + if (switched) { + wb_wakeup(new_wb); + wb_put(old_wb); +@@ -475,9 +494,18 @@ static void inode_switch_wbs(struct inod + if (inode->i_state & I_WB_SWITCH) + return; + ++ /* ++ * Avoid starting new switches while sync_inodes_sb() is in ++ * progress. Otherwise, if the down_write protected issue path ++ * blocks heavily, we might end up starting a large number of ++ * switches which will block on the rwsem. ++ */ ++ if (!down_read_trylock(&bdi->wb_switch_rwsem)) ++ return; ++ + isw = kzalloc(sizeof(*isw), GFP_ATOMIC); + if (!isw) +- return; ++ goto out_unlock; + + /* find and pin the new wb */ + rcu_read_lock(); +@@ -511,12 +539,14 @@ static void inode_switch_wbs(struct inod + * Let's continue after I_WB_SWITCH is guaranteed to be visible. + */ + call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); +- return; ++ goto out_unlock; + + out_free: + if (isw->new_wb) + wb_put(isw->new_wb); + kfree(isw); ++out_unlock: ++ up_read(&bdi->wb_switch_rwsem); + } + + /** +@@ -896,6 +926,9 @@ fs_initcall(cgroup_writeback_init); + + #else /* CONFIG_CGROUP_WRITEBACK */ + ++static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { } ++static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { } ++ + static struct bdi_writeback * + locked_inode_to_wb_and_lock_list(struct inode *inode) + __releases(&inode->i_lock) +@@ -2341,8 +2374,11 @@ void sync_inodes_sb(struct super_block * + return; + WARN_ON(!rwsem_is_locked(&sb->s_umount)); + ++ /* protect against inode wb switch, see inode_switch_wbs_work_fn() */ ++ bdi_down_write_wb_switch_rwsem(bdi); + bdi_split_work_to_wbs(bdi, &work, false); + wb_wait_for_completion(bdi, &done); ++ bdi_up_write_wb_switch_rwsem(bdi); + + wait_sb_inodes(sb); + } +--- a/include/linux/backing-dev-defs.h ++++ b/include/linux/backing-dev-defs.h +@@ -157,6 +157,7 @@ struct backing_dev_info { + struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ + struct rb_root cgwb_congested_tree; /* their congested states */ + atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */ ++ struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ + #else + struct bdi_writeback_congested *wb_congested; + #endif +--- a/mm/backing-dev.c ++++ b/mm/backing-dev.c +@@ -669,6 +669,7 @@ static int cgwb_bdi_init(struct backing_ + INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); + bdi->cgwb_congested_tree = RB_ROOT; + atomic_set(&bdi->usage_cnt, 1); ++ init_rwsem(&bdi->wb_switch_rwsem); + + ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); + if (!ret) {