--- /dev/null
+From cc9dbfa9707868fb0ca864c05e0c42d3f4d15cf2 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 13 Nov 2019 12:12:59 +0100
+Subject: ALSA: usb-audio: Fix incorrect NULL check in create_yamaha_midi_quirk()
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit cc9dbfa9707868fb0ca864c05e0c42d3f4d15cf2 upstream.
+
+The commit 60849562a5db ("ALSA: usb-audio: Fix possible NULL
+dereference at create_yamaha_midi_quirk()") added NULL checks in
+create_yamaha_midi_quirk(), but there was an overlook. The code
+allows one of either injd or outjd is NULL, but the second if check
+made returning -ENODEV if any of them is NULL. Fix it in a proper
+form.
+
+Fixes: 60849562a5db ("ALSA: usb-audio: Fix possible NULL dereference at create_yamaha_midi_quirk()")
+Reported-by: Pavel Machek <pavel@denx.de>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20191113111259.24123-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/quirks.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -248,8 +248,8 @@ static int create_yamaha_midi_quirk(stru
+ NULL, USB_MS_MIDI_OUT_JACK);
+ if (!injd && !outjd)
+ return -ENODEV;
+- if (!(injd && snd_usb_validate_midi_desc(injd)) ||
+- !(outjd && snd_usb_validate_midi_desc(outjd)))
++ if ((injd && !snd_usb_validate_midi_desc(injd)) ||
++ (outjd && !snd_usb_validate_midi_desc(outjd)))
+ return -ENODEV;
+ if (injd && (injd->bLength < 5 ||
+ (injd->bJackType != USB_MS_EMBEDDED &&
--- /dev/null
+From 976a68f06b2ea49e2ab67a5f84919a8b105db8be Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 14 Nov 2019 17:56:12 +0100
+Subject: ALSA: usb-audio: Fix incorrect size check for processing/extension units
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 976a68f06b2ea49e2ab67a5f84919a8b105db8be upstream.
+
+The recently introduced unit descriptor validation had some bug for
+processing and extension units, it counts a bControlSize byte twice so
+it expected a bigger size than it should have been. This seems
+resulting in a probe error on a few devices.
+
+Fix the calculation for proper checks of PU and EU.
+
+Fixes: 57f8770620e9 ("ALSA: usb-audio: More validations of descriptor units")
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20191114165613.7422-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/validate.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/sound/usb/validate.c
++++ b/sound/usb/validate.c
+@@ -81,9 +81,9 @@ static bool validate_processing_unit(con
+ switch (v->protocol) {
+ case UAC_VERSION_1:
+ default:
+- /* bNrChannels, wChannelConfig, iChannelNames, bControlSize */
+- len += 1 + 2 + 1 + 1;
+- if (d->bLength < len) /* bControlSize */
++ /* bNrChannels, wChannelConfig, iChannelNames */
++ len += 1 + 2 + 1;
++ if (d->bLength < len + 1) /* bControlSize */
+ return false;
+ m = hdr[len];
+ len += 1 + m + 1; /* bControlSize, bmControls, iProcessing */
--- /dev/null
+From 167beb1756791e0806365a3f86a0da10d7a327ee Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Sat, 9 Nov 2019 19:16:58 +0100
+Subject: ALSA: usb-audio: Fix missing error check at mixer resolution test
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 167beb1756791e0806365a3f86a0da10d7a327ee upstream.
+
+A check of the return value from get_cur_mix_raw() is missing at the
+resolution test code in get_min_max_with_quirks(), which may leave the
+variable untouched, leading to a random uninitialized value, as
+detected by syzkaller fuzzer.
+
+Add the missing return error check for fixing that.
+
+Reported-and-tested-by: syzbot+abe1ab7afc62c6bb6377@syzkaller.appspotmail.com
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20191109181658.30368-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/mixer.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1229,7 +1229,8 @@ static int get_min_max_with_quirks(struc
+ if (cval->min + cval->res < cval->max) {
+ int last_valid_res = cval->res;
+ int saved, test, check;
+- get_cur_mix_raw(cval, minchn, &saved);
++ if (get_cur_mix_raw(cval, minchn, &saved) < 0)
++ goto no_res_check;
+ for (;;) {
+ test = saved;
+ if (test < cval->max)
+@@ -1249,6 +1250,7 @@ static int get_min_max_with_quirks(struc
+ snd_usb_set_cur_mix_value(cval, minchn, 0, saved);
+ }
+
++no_res_check:
+ cval->initialized = 1;
+ }
+
--- /dev/null
+From 528699317dd6dc722dccc11b68800cf945109390 Mon Sep 17 00:00:00 2001
+From: Henry Lin <henryl@nvidia.com>
+Date: Wed, 13 Nov 2019 10:14:19 +0800
+Subject: ALSA: usb-audio: not submit urb for stopped endpoint
+
+From: Henry Lin <henryl@nvidia.com>
+
+commit 528699317dd6dc722dccc11b68800cf945109390 upstream.
+
+While output urb's snd_complete_urb() is executing, calling
+prepare_outbound_urb() may cause endpoint stopped before
+prepare_outbound_urb() returns and result in next urb submitted
+to stopped endpoint. usb-audio driver cannot re-use it afterwards as
+the urb is still hold by usb stack.
+
+This change checks EP_FLAG_RUNNING flag after prepare_outbound_urb() again
+to let snd_complete_urb() know the endpoint already stopped and does not
+submit next urb. Below kind of error will be fixed:
+
+[ 213.153103] usb 1-2: timeout: still 1 active urbs on EP #1
+[ 213.164121] usb 1-2: cannot submit urb 0, error -16: unknown error
+
+Signed-off-by: Henry Lin <henryl@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20191113021420.13377-1-henryl@nvidia.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/endpoint.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -388,6 +388,9 @@ static void snd_complete_urb(struct urb
+ }
+
+ prepare_outbound_urb(ep, ctx);
++ /* can be stopped during prepare callback */
++ if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
++ goto exit_clear;
+ } else {
+ retire_inbound_urb(ep, ctx);
+ /* can be stopped during retire callback */
--- /dev/null
+From e6c617102c7e4ac1398cb0b98ff1f0727755b520 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Fri, 8 Nov 2019 16:11:56 +0000
+Subject: Btrfs: fix log context list corruption after rename exchange operation
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit e6c617102c7e4ac1398cb0b98ff1f0727755b520 upstream.
+
+During rename exchange we might have successfully log the new name in the
+source root's log tree, in which case we leave our log context (allocated
+on stack) in the root's list of log contextes. However we might fail to
+log the new name in the destination root, in which case we fallback to
+a transaction commit later and never sync the log of the source root,
+which causes the source root log context to remain in the list of log
+contextes. This later causes invalid memory accesses because the context
+was allocated on stack and after rename exchange finishes the stack gets
+reused and overwritten for other purposes.
+
+The kernel's linked list corruption detector (CONFIG_DEBUG_LIST=y) can
+detect this and report something like the following:
+
+ [ 691.489929] ------------[ cut here ]------------
+ [ 691.489947] list_add corruption. prev->next should be next (ffff88819c944530), but was ffff8881c23f7be4. (prev=ffff8881c23f7a38).
+ [ 691.489967] WARNING: CPU: 2 PID: 28933 at lib/list_debug.c:28 __list_add_valid+0x95/0xe0
+ (...)
+ [ 691.489998] CPU: 2 PID: 28933 Comm: fsstress Not tainted 5.4.0-rc6-btrfs-next-62 #1
+ [ 691.490001] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-0-ga698c8995f-prebuilt.qemu.org 04/01/2014
+ [ 691.490003] RIP: 0010:__list_add_valid+0x95/0xe0
+ (...)
+ [ 691.490007] RSP: 0018:ffff8881f0b3faf8 EFLAGS: 00010282
+ [ 691.490010] RAX: 0000000000000000 RBX: ffff88819c944530 RCX: 0000000000000000
+ [ 691.490011] RDX: 0000000000000001 RSI: 0000000000000008 RDI: ffffffffa2c497e0
+ [ 691.490013] RBP: ffff8881f0b3fe68 R08: ffffed103eaa4115 R09: ffffed103eaa4114
+ [ 691.490015] R10: ffff88819c944000 R11: ffffed103eaa4115 R12: 7fffffffffffffff
+ [ 691.490016] R13: ffff8881b4035610 R14: ffff8881e7b84728 R15: 1ffff1103e167f7b
+ [ 691.490019] FS: 00007f4b25ea2e80(0000) GS:ffff8881f5500000(0000) knlGS:0000000000000000
+ [ 691.490021] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ [ 691.490022] CR2: 00007fffbb2d4eec CR3: 00000001f2a4a004 CR4: 00000000003606e0
+ [ 691.490025] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ [ 691.490027] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ [ 691.490029] Call Trace:
+ [ 691.490058] btrfs_log_inode_parent+0x667/0x2730 [btrfs]
+ [ 691.490083] ? join_transaction+0x24a/0xce0 [btrfs]
+ [ 691.490107] ? btrfs_end_log_trans+0x80/0x80 [btrfs]
+ [ 691.490111] ? dget_parent+0xb8/0x460
+ [ 691.490116] ? lock_downgrade+0x6b0/0x6b0
+ [ 691.490121] ? rwlock_bug.part.0+0x90/0x90
+ [ 691.490127] ? do_raw_spin_unlock+0x142/0x220
+ [ 691.490151] btrfs_log_dentry_safe+0x65/0x90 [btrfs]
+ [ 691.490172] btrfs_sync_file+0x9f1/0xc00 [btrfs]
+ [ 691.490195] ? btrfs_file_write_iter+0x1800/0x1800 [btrfs]
+ [ 691.490198] ? rcu_read_lock_any_held.part.11+0x20/0x20
+ [ 691.490204] ? __do_sys_newstat+0x88/0xd0
+ [ 691.490207] ? cp_new_stat+0x5d0/0x5d0
+ [ 691.490218] ? do_fsync+0x38/0x60
+ [ 691.490220] do_fsync+0x38/0x60
+ [ 691.490224] __x64_sys_fdatasync+0x32/0x40
+ [ 691.490228] do_syscall_64+0x9f/0x540
+ [ 691.490233] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+ [ 691.490235] RIP: 0033:0x7f4b253ad5f0
+ (...)
+ [ 691.490239] RSP: 002b:00007fffbb2d6078 EFLAGS: 00000246 ORIG_RAX: 000000000000004b
+ [ 691.490242] RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 00007f4b253ad5f0
+ [ 691.490244] RDX: 00007fffbb2d5fe0 RSI: 00007fffbb2d5fe0 RDI: 0000000000000003
+ [ 691.490245] RBP: 000000000000000d R08: 0000000000000001 R09: 00007fffbb2d608c
+ [ 691.490247] R10: 00000000000002e8 R11: 0000000000000246 R12: 00000000000001f4
+ [ 691.490248] R13: 0000000051eb851f R14: 00007fffbb2d6120 R15: 00005635a498bda0
+
+This started happening recently when running some test cases from fstests
+like btrfs/004 for example, because support for rename exchange was added
+last week to fsstress from fstests.
+
+So fix this by deleting the log context for the source root from the list
+if we have logged the new name in the source root.
+
+Reported-by: Su Yue <Damenly_Su@gmx.com>
+Fixes: d4682ba03ef618 ("Btrfs: sync log after logging new name")
+CC: stable@vger.kernel.org # 4.19+
+Tested-by: Su Yue <Damenly_Su@gmx.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/inode.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9723,6 +9723,18 @@ out_fail:
+ commit_transaction = true;
+ }
+ if (commit_transaction) {
++ /*
++ * We may have set commit_transaction when logging the new name
++ * in the destination root, in which case we left the source
++ * root context in the list of log contextes. So make sure we
++ * remove it to avoid invalid memory accesses, since the context
++ * was allocated in our stack frame.
++ */
++ if (sync_log_root) {
++ mutex_lock(&root->log_mutex);
++ list_del_init(&ctx_root.list);
++ mutex_unlock(&root->log_mutex);
++ }
+ ret = btrfs_commit_transaction(trans);
+ } else {
+ int ret2;
+@@ -9736,6 +9748,9 @@ out_notrans:
+ if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
+ up_read(&fs_info->subvol_sem);
+
++ ASSERT(list_empty(&ctx_root.list));
++ ASSERT(list_empty(&ctx_dest.list));
++
+ return ret;
+ }
+
--- /dev/null
+From 937c6b27c73e02cd4114f95f5c37ba2c29fadba1 Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Wed, 9 Oct 2019 17:02:30 +0200
+Subject: cgroup: freezer: call cgroup_enter_frozen() with preemption disabled in ptrace_stop()
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+commit 937c6b27c73e02cd4114f95f5c37ba2c29fadba1 upstream.
+
+ptrace_stop() does preempt_enable_no_resched() to avoid the preemption,
+but after that cgroup_enter_frozen() does spin_lock/unlock and this adds
+another preemption point.
+
+Reported-and-tested-by: Bruce Ashfield <bruce.ashfield@gmail.com>
+Fixes: 76f969e8948d ("cgroup: cgroup v2 freezer")
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Acked-by: Roman Gushchin <guro@fb.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/signal.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2205,8 +2205,8 @@ static void ptrace_stop(int exit_code, i
+ */
+ preempt_disable();
+ read_unlock(&tasklist_lock);
+- preempt_enable_no_resched();
+ cgroup_enter_frozen();
++ preempt_enable_no_resched();
+ freezable_schedule();
+ cgroup_leave_frozen(true);
+ } else {
--- /dev/null
+From 2f216a8507153578efc309c821528a6b81628cd2 Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Fri, 1 Nov 2019 16:20:24 +0200
+Subject: drm/i915: update rawclk also on resume
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit 2f216a8507153578efc309c821528a6b81628cd2 upstream.
+
+Since CNP it's possible for rawclk to have two different values, 19.2
+and 24 MHz. If the value indicated by SFUSE_STRAP register is different
+from the power on default for PCH_RAWCLK_FREQ, we'll end up having a
+mismatch between the rawclk hardware and software states after
+suspend/resume. On previous platforms this used to work by accident,
+because the power on defaults worked just fine.
+
+Update the rawclk also on resume. The natural place to do this would be
+intel_modeset_init_hw(), however VLV/CHV need it done before
+intel_power_domains_init_hw(). Thus put it there even if it feels
+slightly out of place.
+
+v2: Call intel_update_rawclck() in intel_power_domains_init_hw() for all
+ platforms (Ville).
+
+Reported-by: Shawn Lee <shawn.c.lee@intel.com>
+Cc: Shawn Lee <shawn.c.lee@intel.com>
+Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Tested-by: Shawn Lee <shawn.c.lee@intel.com>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191101142024.13877-1-jani.nikula@intel.com
+(cherry picked from commit 59ed05ccdded5eb18ce012eff3d01798ac8535fa)
+Cc: <stable@vger.kernel.org> # v4.15+
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/display/intel_display_power.c | 3 +++
+ drivers/gpu/drm/i915/i915_drv.c | 3 ---
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_display_power.c
++++ b/drivers/gpu/drm/i915/display/intel_display_power.c
+@@ -4345,6 +4345,9 @@ void intel_power_domains_init_hw(struct
+
+ power_domains->initializing = true;
+
++ /* Must happen before power domain init on VLV/CHV */
++ intel_update_rawclk(i915);
++
+ if (INTEL_GEN(i915) >= 11) {
+ icl_display_core_init(i915, resume);
+ } else if (IS_CANNONLAKE(i915)) {
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -708,9 +708,6 @@ static int i915_load_modeset_init(struct
+ if (ret)
+ goto cleanup_vga_client;
+
+- /* must happen before intel_power_domains_init_hw() on VLV/CHV */
+- intel_update_rawclk(dev_priv);
+-
+ intel_power_domains_init_hw(dev_priv, false);
+
+ intel_csr_ucode_init(dev_priv);
--- /dev/null
+From e72b9dd6a5f17d0fb51f16f8685f3004361e83d0 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Sun, 3 Nov 2019 13:45:04 -0500
+Subject: ecryptfs_lookup_interpose(): lower_dentry->d_inode is not stable
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit e72b9dd6a5f17d0fb51f16f8685f3004361e83d0 upstream.
+
+lower_dentry can't go from positive to negative (we have it pinned),
+but it *can* go from negative to positive. So fetching ->d_inode
+into a local variable, doing a blocking allocation, checking that
+now ->d_inode is non-NULL and feeding the value we'd fetched
+earlier to a function that won't accept NULL is not a good idea.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ecryptfs/inode.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -311,7 +311,7 @@ static int ecryptfs_i_size_read(struct d
+ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
+ struct dentry *lower_dentry)
+ {
+- struct inode *inode, *lower_inode = d_inode(lower_dentry);
++ struct inode *inode, *lower_inode;
+ struct ecryptfs_dentry_info *dentry_info;
+ struct vfsmount *lower_mnt;
+ int rc = 0;
+@@ -331,7 +331,15 @@ static struct dentry *ecryptfs_lookup_in
+ dentry_info->lower_path.mnt = lower_mnt;
+ dentry_info->lower_path.dentry = lower_dentry;
+
+- if (d_really_is_negative(lower_dentry)) {
++ /*
++ * negative dentry can go positive under us here - its parent is not
++ * locked. That's OK and that could happen just as we return from
++ * ecryptfs_lookup() anyway. Just need to be careful and fetch
++ * ->d_inode only once - it's not stable here.
++ */
++ lower_inode = READ_ONCE(lower_dentry->d_inode);
++
++ if (!lower_inode) {
+ /* We want to add because we couldn't find in lower */
+ d_add(dentry, NULL);
+ return NULL;
--- /dev/null
+From 762c69685ff7ad5ad7fee0656671e20a0c9c864d Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Sun, 3 Nov 2019 13:55:43 -0500
+Subject: ecryptfs_lookup_interpose(): lower_dentry->d_parent is not stable either
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 762c69685ff7ad5ad7fee0656671e20a0c9c864d upstream.
+
+We need to get the underlying dentry of parent; sure, absent the races
+it is the parent of underlying dentry, but there's nothing to prevent
+losing a timeslice to preemtion in the middle of evaluation of
+lower_dentry->d_parent->d_inode, having another process move lower_dentry
+around and have its (ex)parent not pinned anymore and freed on memory
+pressure. Then we regain CPU and try to fetch ->d_inode from memory
+that is freed by that point.
+
+dentry->d_parent *is* stable here - it's an argument of ->lookup() and
+we are guaranteed that it won't be moved anywhere until we feed it
+to d_add/d_splice_alias. So we safely go that way to get to its
+underlying dentry.
+
+Cc: stable@vger.kernel.org # since 2009 or so
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ecryptfs/inode.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -311,9 +311,9 @@ static int ecryptfs_i_size_read(struct d
+ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
+ struct dentry *lower_dentry)
+ {
++ struct path *path = ecryptfs_dentry_to_lower_path(dentry->d_parent);
+ struct inode *inode, *lower_inode;
+ struct ecryptfs_dentry_info *dentry_info;
+- struct vfsmount *lower_mnt;
+ int rc = 0;
+
+ dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
+@@ -322,13 +322,12 @@ static struct dentry *ecryptfs_lookup_in
+ return ERR_PTR(-ENOMEM);
+ }
+
+- lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
+ fsstack_copy_attr_atime(d_inode(dentry->d_parent),
+- d_inode(lower_dentry->d_parent));
++ d_inode(path->dentry));
+ BUG_ON(!d_count(lower_dentry));
+
+ ecryptfs_set_dentry_private(dentry, dentry_info);
+- dentry_info->lower_path.mnt = lower_mnt;
++ dentry_info->lower_path.mnt = mntget(path->mnt);
+ dentry_info->lower_path.dentry = lower_dentry;
+
+ /*
--- /dev/null
+From 7574c0db2e68c4d0bae9d415a683bdd8b2a761e9 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Wed, 13 Nov 2019 19:29:38 +0100
+Subject: i2c: acpi: Force bus speed to 400KHz if a Silead touchscreen is present
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 7574c0db2e68c4d0bae9d415a683bdd8b2a761e9 upstream.
+
+Many cheap devices use Silead touchscreen controllers. Testing has shown
+repeatedly that these touchscreen controllers work fine at 400KHz, but for
+unknown reasons do not work properly at 100KHz. This has been seen on
+both ARM and x86 devices using totally different i2c controllers.
+
+On some devices the ACPI tables list another device at the same I2C-bus
+as only being capable of 100KHz, testing has shown that these other
+devices work fine at 400KHz (as can be expected of any recent I2C hw).
+
+This commit makes i2c_acpi_find_bus_speed() always return 400KHz if a
+Silead touchscreen controller is present, fixing the touchscreen not
+working on devices which ACPI tables' wrongly list another device on the
+same bus as only being capable of 100KHz.
+
+Specifically this fixes the touchscreen on the Jumper EZpad 6 m4 not
+working.
+
+Reported-by: youling 257 <youling257@gmail.com>
+Tested-by: youling 257 <youling257@gmail.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+[wsa: rewording warning a little]
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Cc: stable@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/i2c-core-acpi.c | 28 +++++++++++++++++++++++++++-
+ 1 file changed, 27 insertions(+), 1 deletion(-)
+
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -39,6 +39,7 @@ struct i2c_acpi_lookup {
+ int index;
+ u32 speed;
+ u32 min_speed;
++ u32 force_speed;
+ };
+
+ /**
+@@ -285,6 +286,19 @@ i2c_acpi_match_device(const struct acpi_
+ return acpi_match_device(matches, &client->dev);
+ }
+
++static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
++ /*
++ * These Silead touchscreen controllers only work at 400KHz, for
++ * some reason they do not work at 100KHz. On some devices the ACPI
++ * tables list another device at their bus as only being capable
++ * of 100KHz, testing has shown that these other devices work fine
++ * at 400KHz (as can be expected of any recent i2c hw) so we force
++ * the speed of the bus to 400 KHz if a Silead device is present.
++ */
++ { "MSSL1680", 0 },
++ {}
++};
++
+ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+ {
+@@ -303,6 +317,9 @@ static acpi_status i2c_acpi_lookup_speed
+ if (lookup->speed <= lookup->min_speed)
+ lookup->min_speed = lookup->speed;
+
++ if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0)
++ lookup->force_speed = 400000;
++
+ return AE_OK;
+ }
+
+@@ -340,7 +357,16 @@ u32 i2c_acpi_find_bus_speed(struct devic
+ return 0;
+ }
+
+- return lookup.min_speed != UINT_MAX ? lookup.min_speed : 0;
++ if (lookup.force_speed) {
++ if (lookup.force_speed != lookup.min_speed)
++ dev_warn(dev, FW_BUG "DSDT uses known not-working I2C bus speed %d, forcing it to %d\n",
++ lookup.min_speed, lookup.force_speed);
++ return lookup.force_speed;
++ } else if (lookup.min_speed != UINT_MAX) {
++ return lookup.min_speed;
++ } else {
++ return 0;
++ }
+ }
+ EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
+
--- /dev/null
+From c2be3865a1763c4be39574937e1aae27e917af4d Mon Sep 17 00:00:00 2001
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Fri, 25 Oct 2019 15:58:36 -0400
+Subject: IB/hfi1: Calculate flow weight based on QP MTU for TID RDMA
+
+From: Kaike Wan <kaike.wan@intel.com>
+
+commit c2be3865a1763c4be39574937e1aae27e917af4d upstream.
+
+For a TID RDMA WRITE request, a QP on the responder side could be put into
+a queue when a hardware flow is not available. A RNR NAK will be returned
+to the requester with a RNR timeout value based on the position of the QP
+in the queue. The tid_rdma_flow_wt variable is used to calculate the
+timeout value and is determined by using a MTU of 4096 at the module
+loading time. This could reduce the timeout value by half from the desired
+value, leading to excessive RNR retries.
+
+This patch fixes the issue by calculating the flow weight with the real
+MTU assigned to the QP.
+
+Fixes: 07b923701e38 ("IB/hfi1: Add functions to receive TID RDMA WRITE request")
+Link: https://lore.kernel.org/r/20191025195836.106825.77769.stgit@awfm-01.aw.intel.com
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/init.c | 1 -
+ drivers/infiniband/hw/hfi1/tid_rdma.c | 13 +++++--------
+ drivers/infiniband/hw/hfi1/tid_rdma.h | 3 +--
+ 3 files changed, 6 insertions(+), 11 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -1489,7 +1489,6 @@ static int __init hfi1_mod_init(void)
+ goto bail_dev;
+ }
+
+- hfi1_compute_tid_rdma_flow_wt();
+ /*
+ * These must be called before the driver is registered with
+ * the PCI subsystem.
+--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
++++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
+@@ -107,8 +107,6 @@ static u32 mask_generation(u32 a)
+ * C - Capcode
+ */
+
+-static u32 tid_rdma_flow_wt;
+-
+ static void tid_rdma_trigger_resume(struct work_struct *work);
+ static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
+ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
+@@ -3380,18 +3378,17 @@ u32 hfi1_build_tid_rdma_write_req(struct
+ return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
+ }
+
+-void hfi1_compute_tid_rdma_flow_wt(void)
++static u32 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp)
+ {
+ /*
+ * Heuristic for computing the RNR timeout when waiting on the flow
+ * queue. Rather than a computationaly expensive exact estimate of when
+ * a flow will be available, we assume that if a QP is at position N in
+ * the flow queue it has to wait approximately (N + 1) * (number of
+- * segments between two sync points), assuming PMTU of 4K. The rationale
+- * for this is that flows are released and recycled at each sync point.
++ * segments between two sync points). The rationale for this is that
++ * flows are released and recycled at each sync point.
+ */
+- tid_rdma_flow_wt = MAX_TID_FLOW_PSN * enum_to_mtu(OPA_MTU_4096) /
+- TID_RDMA_MAX_SEGMENT_SIZE;
++ return (MAX_TID_FLOW_PSN * qp->pmtu) >> TID_RDMA_SEGMENT_SHIFT;
+ }
+
+ static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
+@@ -3514,7 +3511,7 @@ static void hfi1_tid_write_alloc_resourc
+ if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
+ ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
+ if (ret) {
+- to_seg = tid_rdma_flow_wt *
++ to_seg = hfi1_compute_tid_rdma_flow_wt(qp) *
+ position_in_queue(qpriv,
+ &rcd->flow_queue);
+ break;
+--- a/drivers/infiniband/hw/hfi1/tid_rdma.h
++++ b/drivers/infiniband/hw/hfi1/tid_rdma.h
+@@ -17,6 +17,7 @@
+ #define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
+ #define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
+ #define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
++#define TID_RDMA_SEGMENT_SHIFT 18
+
+ /*
+ * Bit definitions for priv->s_flags.
+@@ -274,8 +275,6 @@ u32 hfi1_build_tid_rdma_write_req(struct
+ struct ib_other_headers *ohdr,
+ u32 *bth1, u32 *bth2, u32 *len);
+
+-void hfi1_compute_tid_rdma_flow_wt(void);
+-
+ void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
+
+ u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
--- /dev/null
+From a9c3c4c597704b3a1a2b9bef990e7d8a881f6533 Mon Sep 17 00:00:00 2001
+From: James Erwin <james.erwin@intel.com>
+Date: Fri, 1 Nov 2019 15:20:59 -0400
+Subject: IB/hfi1: Ensure full Gen3 speed in a Gen4 system
+
+From: James Erwin <james.erwin@intel.com>
+
+commit a9c3c4c597704b3a1a2b9bef990e7d8a881f6533 upstream.
+
+If an hfi1 card is inserted in a Gen4 systems, the driver will avoid the
+gen3 speed bump and the card will operate at half speed.
+
+This is because the driver avoids the gen3 speed bump when the parent bus
+speed isn't identical to gen3, 8.0GT/s. This is not compatible with gen4
+and newer speeds.
+
+Fix by relaxing the test to explicitly look for the lower capability
+speeds which inherently allows for gen4 and all future speeds.
+
+Fixes: 7724105686e7 ("IB/hfi1: add driver files")
+Link: https://lore.kernel.org/r/20191101192059.106248.1699.stgit@awfm-01.aw.intel.com
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Reviewed-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: James Erwin <james.erwin@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/pcie.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hfi1/pcie.c
++++ b/drivers/infiniband/hw/hfi1/pcie.c
+@@ -319,7 +319,9 @@ int pcie_speeds(struct hfi1_devdata *dd)
+ /*
+ * bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
+ */
+- if (parent && dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) {
++ if (parent &&
++ (dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT ||
++ dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) {
+ dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
+ dd->link_gen3_capable = 0;
+ }
--- /dev/null
+From c1abd865bd125015783286b353abb8da51644f59 Mon Sep 17 00:00:00 2001
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Fri, 25 Oct 2019 15:58:30 -0400
+Subject: IB/hfi1: Ensure r_tid_ack is valid before building TID RDMA ACK packet
+
+From: Kaike Wan <kaike.wan@intel.com>
+
+commit c1abd865bd125015783286b353abb8da51644f59 upstream.
+
+The index r_tid_ack is used to indicate the next TID RDMA WRITE request to
+acknowledge in the ring s_ack_queue[] on the responder side and should be
+set to a valid index other than its initial value before r_tid_tail is
+advanced to the next TID RDMA WRITE request and particularly before a TID
+RDMA ACK is built. Otherwise, a NULL pointer dereference may result:
+
+ BUG: unable to handle kernel paging request at ffff9a32d27abff8
+ IP: [<ffffffffc0d87ea6>] hfi1_make_tid_rdma_pkt+0x476/0xcb0 [hfi1]
+ PGD 2749032067 PUD 0
+ Oops: 0000 1 SMP
+ Modules linked in: osp(OE) ofd(OE) lfsck(OE) ost(OE) mgc(OE) osd_zfs(OE) lquota(OE) lustre(OE) lmv(OE) mdc(OE) lov(OE) fid(OE) fld(OE) ko2iblnd(OE) ptlrpc(OE) obdclass(OE) lnet(OE) libcfs(OE) ib_ipoib(OE) hfi1(OE) rdmavt(OE) nfsv3 nfs_acl rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd grace fscache ib_isert iscsi_target_mod target_core_mod ib_ucm dm_mirror dm_region_hash dm_log mlx5_ib dm_mod zfs(POE) rpcrdma sunrpc rdma_ucm ib_uverbs opa_vnic ib_iser zunicode(POE) ib_umad zavl(POE) icp(POE) sb_edac intel_powerclamp coretemp rdma_cm intel_rapl iosf_mbi iw_cm libiscsi scsi_transport_iscsi kvm ib_cm iTCO_wdt mxm_wmi iTCO_vendor_support irqbypass crc32_pclmul ghash_clmulni_intel aesni_intel lrw gf128mul glue_helper ablk_helper cryptd zcommon(POE) znvpair(POE) pcspkr spl(OE) mei_me
+ sg mei ioatdma lpc_ich joydev i2c_i801 shpchp ipmi_si ipmi_devintf ipmi_msghandler wmi acpi_power_meter ip_tables xfs libcrc32c sd_mod crc_t10dif crct10dif_generic mgag200 mlx5_core drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ixgbe ahci ttm mlxfw ib_core libahci devlink mdio crct10dif_pclmul crct10dif_common drm ptp libata megaraid_sas crc32c_intel i2c_algo_bit pps_core i2c_core dca [last unloaded: rdmavt]
+ CPU: 15 PID: 68691 Comm: kworker/15:2H Kdump: loaded Tainted: P W OE ------------ 3.10.0-862.2.3.el7_lustre.x86_64 #1
+ Hardware name: Intel Corporation S2600WTT/S2600WTT, BIOS SE5C610.86B.01.01.0016.033120161139 03/31/2016
+ Workqueue: hfi0_0 _hfi1_do_tid_send [hfi1]
+ task: ffff9a01f47faf70 ti: ffff9a11776a8000 task.ti: ffff9a11776a8000
+ RIP: 0010:[<ffffffffc0d87ea6>] [<ffffffffc0d87ea6>] hfi1_make_tid_rdma_pkt+0x476/0xcb0 [hfi1]
+ RSP: 0018:ffff9a11776abd08 EFLAGS: 00010002
+ RAX: ffff9a32d27abfc0 RBX: ffff99f2d27aa000 RCX: 00000000ffffffff
+ RDX: 0000000000000000 RSI: 0000000000000220 RDI: ffff99f2ffc05300
+ RBP: ffff9a11776abd88 R08: 000000000001c310 R09: ffffffffc0d87ad4
+ R10: 0000000000000000 R11: 0000000000000000 R12: ffff9a117a423c00
+ R13: ffff9a117a423c00 R14: ffff9a03500c0000 R15: ffff9a117a423cb8
+ FS: 0000000000000000(0000) GS:ffff9a117e9c0000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: ffff9a32d27abff8 CR3: 0000002748a0e000 CR4: 00000000001607e0
+ Call Trace:
+ [<ffffffffc0d88874>] _hfi1_do_tid_send+0x194/0x320 [hfi1]
+ [<ffffffffaf0b2dff>] process_one_work+0x17f/0x440
+ [<ffffffffaf0b3ac6>] worker_thread+0x126/0x3c0
+ [<ffffffffaf0b39a0>] ? manage_workers.isra.24+0x2a0/0x2a0
+ [<ffffffffaf0bae31>] kthread+0xd1/0xe0
+ [<ffffffffaf0bad60>] ? insert_kthread_work+0x40/0x40
+ [<ffffffffaf71f5f7>] ret_from_fork_nospec_begin+0x21/0x21
+ [<ffffffffaf0bad60>] ? insert_kthread_work+0x40/0x40
+ hfi1 0000:05:00.0: hfi1_0: reserved_op: opcode 0xf2, slot 2, rsv_used 1, rsv_ops 1
+ Code: 00 00 41 8b 8d d8 02 00 00 89 c8 48 89 45 b0 48 c1 65 b0 06 48 8b 83 a0 01 00 00 48 01 45 b0 48 8b 45 b0 41 80 bd 10 03 00 00 00 <48> 8b 50 38 4c 8d 7a 50 74 45 8b b2 d0 00 00 00 85 f6 0f 85 72
+ RIP [<ffffffffc0d87ea6>] hfi1_make_tid_rdma_pkt+0x476/0xcb0 [hfi1]
+ RSP <ffff9a11776abd08>
+ CR2: ffff9a32d27abff8
+
+This problem can happen if a RESYNC request is received before r_tid_ack
+is modified.
+
+This patch fixes the issue by making sure that r_tid_ack is set to a valid
+value before a TID RDMA ACK is built. Functions are defined to simplify
+the code.
+
+Fixes: 07b923701e38 ("IB/hfi1: Add functions to receive TID RDMA WRITE request")
+Fixes: 7cf0ad679de4 ("IB/hfi1: Add a function to receive TID RDMA RESYNC packet")
+Link: https://lore.kernel.org/r/20191025195830.106825.44022.stgit@awfm-01.aw.intel.com
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/tid_rdma.c | 44 ++++++++++++++++++++--------------
+ 1 file changed, 27 insertions(+), 17 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
++++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
+@@ -136,6 +136,26 @@ static void update_r_next_psn_fecn(struc
+ struct tid_rdma_flow *flow,
+ bool fecn);
+
++static void validate_r_tid_ack(struct hfi1_qp_priv *priv)
++{
++ if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
++ priv->r_tid_ack = priv->r_tid_tail;
++}
++
++static void tid_rdma_schedule_ack(struct rvt_qp *qp)
++{
++ struct hfi1_qp_priv *priv = qp->priv;
++
++ priv->s_flags |= RVT_S_ACK_PENDING;
++ hfi1_schedule_tid_send(qp);
++}
++
++static void tid_rdma_trigger_ack(struct rvt_qp *qp)
++{
++ validate_r_tid_ack(qp->priv);
++ tid_rdma_schedule_ack(qp);
++}
++
+ static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
+ {
+ return
+@@ -2997,10 +3017,7 @@ nak_psn:
+ qpriv->s_nak_state = IB_NAK_PSN_ERROR;
+ /* We are NAK'ing the next expected PSN */
+ qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
+- qpriv->s_flags |= RVT_S_ACK_PENDING;
+- if (qpriv->r_tid_ack == HFI1_QP_WQE_INVALID)
+- qpriv->r_tid_ack = qpriv->r_tid_tail;
+- hfi1_schedule_tid_send(qp);
++ tid_rdma_trigger_ack(qp);
+ }
+ goto unlock;
+ }
+@@ -3518,7 +3535,7 @@ static void hfi1_tid_write_alloc_resourc
+ /*
+ * If overtaking req->acked_tail, send an RNR NAK. Because the
+ * QP is not queued in this case, and the issue can only be
+- * caused due a delay in scheduling the second leg which we
++ * caused by a delay in scheduling the second leg which we
+ * cannot estimate, we use a rather arbitrary RNR timeout of
+ * (MAX_FLOWS / 2) segments
+ */
+@@ -3526,8 +3543,7 @@ static void hfi1_tid_write_alloc_resourc
+ MAX_FLOWS)) {
+ ret = -EAGAIN;
+ to_seg = MAX_FLOWS >> 1;
+- qpriv->s_flags |= RVT_S_ACK_PENDING;
+- hfi1_schedule_tid_send(qp);
++ tid_rdma_trigger_ack(qp);
+ break;
+ }
+
+@@ -4327,8 +4343,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(str
+ trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
+ req);
+ trace_hfi1_tid_write_rsp_rcv_data(qp);
+- if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
+- priv->r_tid_ack = priv->r_tid_tail;
++ validate_r_tid_ack(priv);
+
+ if (opcode == TID_OP(WRITE_DATA_LAST)) {
+ release_rdma_sge_mr(e);
+@@ -4367,8 +4382,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(str
+ }
+
+ done:
+- priv->s_flags |= RVT_S_ACK_PENDING;
+- hfi1_schedule_tid_send(qp);
++ tid_rdma_schedule_ack(qp);
+ exit:
+ priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
+ if (fecn)
+@@ -4380,10 +4394,7 @@ send_nak:
+ if (!priv->s_nak_state) {
+ priv->s_nak_state = IB_NAK_PSN_ERROR;
+ priv->s_nak_psn = flow->flow_state.r_next_psn;
+- priv->s_flags |= RVT_S_ACK_PENDING;
+- if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
+- priv->r_tid_ack = priv->r_tid_tail;
+- hfi1_schedule_tid_send(qp);
++ tid_rdma_trigger_ack(qp);
+ }
+ goto done;
+ }
+@@ -4931,8 +4942,7 @@ void hfi1_rc_rcv_tid_rdma_resync(struct
+ qpriv->resync = true;
+ /* RESYNC request always gets a TID RDMA ACK. */
+ qpriv->s_nak_state = 0;
+- qpriv->s_flags |= RVT_S_ACK_PENDING;
+- hfi1_schedule_tid_send(qp);
++ tid_rdma_trigger_ack(qp);
+ bail:
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
--- /dev/null
+From ce8e8087cf3b5b4f19d29248bfc7deef95525490 Mon Sep 17 00:00:00 2001
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Fri, 25 Oct 2019 15:58:42 -0400
+Subject: IB/hfi1: TID RDMA WRITE should not return IB_WC_RNR_RETRY_EXC_ERR
+
+From: Kaike Wan <kaike.wan@intel.com>
+
+commit ce8e8087cf3b5b4f19d29248bfc7deef95525490 upstream.
+
+Normal RDMA WRITE request never returns IB_WC_RNR_RETRY_EXC_ERR to ULPs
+because it does not need post receive buffer on the responder side.
+Consequently, as an enhancement to normal RDMA WRITE request inside the
+hfi1 driver, TID RDMA WRITE request should not return such an error status
+to ULPs, although it does receive RNR NAKs from the responder when TID
+resources are not available. This behavior is violated when
+qp->s_rnr_retry_cnt is set in current hfi1 implementation.
+
+This patch enforces these semantics by avoiding any reaction to the updates
+of the RNR QP attributes.
+
+Fixes: 3c6cb20a0d17 ("IB/hfi1: Add TID RDMA WRITE functionality into RDMA verbs")
+Link: https://lore.kernel.org/r/20191025195842.106825.71532.stgit@awfm-01.aw.intel.com
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/rc.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -2210,15 +2210,15 @@ int do_rc_ack(struct rvt_qp *qp, u32 aet
+ if (qp->s_flags & RVT_S_WAIT_RNR)
+ goto bail_stop;
+ rdi = ib_to_rvt(qp->ibqp.device);
+- if (qp->s_rnr_retry == 0 &&
+- !((rdi->post_parms[wqe->wr.opcode].flags &
+- RVT_OPERATION_IGN_RNR_CNT) &&
+- qp->s_rnr_retry_cnt == 0)) {
+- status = IB_WC_RNR_RETRY_EXC_ERR;
+- goto class_b;
++ if (!(rdi->post_parms[wqe->wr.opcode].flags &
++ RVT_OPERATION_IGN_RNR_CNT)) {
++ if (qp->s_rnr_retry == 0) {
++ status = IB_WC_RNR_RETRY_EXC_ERR;
++ goto class_b;
++ }
++ if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
++ qp->s_rnr_retry--;
+ }
+- if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
+- qp->s_rnr_retry--;
+
+ /*
+ * The last valid PSN is the previous PSN. For TID RDMA WRITE
--- /dev/null
+From 22bb13653410424d9fce8d447506a41f8292f22f Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Fri, 4 Oct 2019 16:49:34 -0400
+Subject: IB/hfi1: Use a common pad buffer for 9B and 16B packets
+
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+
+commit 22bb13653410424d9fce8d447506a41f8292f22f upstream.
+
+There is no reason for a different pad buffer for the two
+packet types.
+
+Expand the current buffer allocation to allow for both
+packet types.
+
+Fixes: f8195f3b14a0 ("IB/hfi1: Eliminate allocation while atomic")
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Kaike Wan <kaike.wan@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Link: https://lore.kernel.org/r/20191004204934.26838.13099.stgit@awfm-01.aw.intel.com
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/sdma.c | 5 +++--
+ drivers/infiniband/hw/hfi1/verbs.c | 10 ++++------
+ 2 files changed, 7 insertions(+), 8 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -65,6 +65,7 @@
+ #define SDMA_DESCQ_CNT 2048
+ #define SDMA_DESC_INTR 64
+ #define INVALID_TAIL 0xffff
++#define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32))
+
+ static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
+ module_param(sdma_descq_cnt, uint, S_IRUGO);
+@@ -1296,7 +1297,7 @@ void sdma_clean(struct hfi1_devdata *dd,
+ struct sdma_engine *sde;
+
+ if (dd->sdma_pad_dma) {
+- dma_free_coherent(&dd->pcidev->dev, 4,
++ dma_free_coherent(&dd->pcidev->dev, SDMA_PAD,
+ (void *)dd->sdma_pad_dma,
+ dd->sdma_pad_phys);
+ dd->sdma_pad_dma = NULL;
+@@ -1491,7 +1492,7 @@ int sdma_init(struct hfi1_devdata *dd, u
+ }
+
+ /* Allocate memory for pad */
+- dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
++ dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
+ &dd->sdma_pad_phys, GFP_KERNEL);
+ if (!dd->sdma_pad_dma) {
+ dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -147,9 +147,6 @@ static int pio_wait(struct rvt_qp *qp,
+ /* Length of buffer to create verbs txreq cache name */
+ #define TXREQ_NAME_LEN 24
+
+-/* 16B trailing buffer */
+-static const u8 trail_buf[MAX_16B_PADDING];
+-
+ static uint wss_threshold = 80;
+ module_param(wss_threshold, uint, S_IRUGO);
+ MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
+@@ -820,8 +817,8 @@ static int build_verbs_tx_desc(
+
+ /* add icrc, lt byte, and padding to flit */
+ if (extra_bytes)
+- ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
+- (void *)trail_buf, extra_bytes);
++ ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
++ sde->dd->sdma_pad_phys, extra_bytes);
+
+ bail_txadd:
+ return ret;
+@@ -1089,7 +1086,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *q
+ }
+ /* add icrc, lt byte, and padding to flit */
+ if (extra_bytes)
+- seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
++ seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
++ extra_bytes);
+
+ seg_pio_copy_end(pbuf);
+ }
--- /dev/null
+From fa3a5a1880c91bb92594ad42dfe9eedad7996b86 Mon Sep 17 00:00:00 2001
+From: Oliver Neukum <oneukum@suse.com>
+Date: Fri, 15 Nov 2019 11:35:05 -0800
+Subject: Input: ff-memless - kill timer in destroy()
+
+From: Oliver Neukum <oneukum@suse.com>
+
+commit fa3a5a1880c91bb92594ad42dfe9eedad7996b86 upstream.
+
+No timer must be left running when the device goes away.
+
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+Reported-and-tested-by: syzbot+b6c55daa701fc389e286@syzkaller.appspotmail.com
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/1573726121.17351.3.camel@suse.com
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/ff-memless.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/input/ff-memless.c
++++ b/drivers/input/ff-memless.c
+@@ -489,6 +489,15 @@ static void ml_ff_destroy(struct ff_devi
+ {
+ struct ml_device *ml = ff->private;
+
++ /*
++ * Even though we stop all playing effects when tearing down
++ * an input device (via input_device_flush() that calls into
++ * input_ff_flush() that stops and erases all effects), we
++ * do not actually stop the timer, and therefore we should
++ * do it here.
++ */
++ del_timer_sync(&ml->timer);
++
+ kfree(ml->private);
+ }
+
--- /dev/null
+From 549766ac2ac1f6c8bb85906bbcea759541bb19a2 Mon Sep 17 00:00:00 2001
+From: Lucas Stach <l.stach@pengutronix.de>
+Date: Tue, 12 Nov 2019 16:47:08 -0800
+Subject: Input: synaptics-rmi4 - clear IRQ enables for F54
+
+From: Lucas Stach <l.stach@pengutronix.de>
+
+commit 549766ac2ac1f6c8bb85906bbcea759541bb19a2 upstream.
+
+The driver for F54 just polls the status and doesn't even have a IRQ
+handler registered. Make sure to disable all F54 IRQs, so we don't crash
+the kernel on a nonexistent handler.
+
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Link: https://lore.kernel.org/r/20191105114402.6009-1-l.stach@pengutronix.de
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/rmi4/rmi_f54.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/input/rmi4/rmi_f54.c
++++ b/drivers/input/rmi4/rmi_f54.c
+@@ -601,7 +601,7 @@ static int rmi_f54_config(struct rmi_fun
+ {
+ struct rmi_driver *drv = fn->rmi_dev->driver;
+
+- drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
++ drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
+
+ return 0;
+ }
--- /dev/null
+From ba60cf9f78f0d7c8e73c7390608f7f818ee68aa0 Mon Sep 17 00:00:00 2001
+From: Chuhong Yuan <hslester96@gmail.com>
+Date: Fri, 15 Nov 2019 11:32:36 -0800
+Subject: Input: synaptics-rmi4 - destroy F54 poller workqueue when removing
+
+From: Chuhong Yuan <hslester96@gmail.com>
+
+commit ba60cf9f78f0d7c8e73c7390608f7f818ee68aa0 upstream.
+
+The driver forgets to destroy workqueue in remove() similarly to what is
+done when probe() fails. Add a call to destroy_workqueue() to fix it.
+
+Since unregistration will wait for the work to finish, we do not need to
+cancel/flush the work instance in remove().
+
+Signed-off-by: Chuhong Yuan <hslester96@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191114023405.31477-1-hslester96@gmail.com
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/rmi4/rmi_f54.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/input/rmi4/rmi_f54.c
++++ b/drivers/input/rmi4/rmi_f54.c
+@@ -730,6 +730,7 @@ static void rmi_f54_remove(struct rmi_fu
+
+ video_unregister_device(&f54->vdev);
+ v4l2_device_unregister(&f54->v4l2);
++ destroy_workqueue(f54->workqueue);
+ }
+
+ struct rmi_function_handler rmi_f54_handler = {
--- /dev/null
+From f6aabe1ff1d9d7bad0879253011216438bdb2530 Mon Sep 17 00:00:00 2001
+From: Andrew Duggan <aduggan@synaptics.com>
+Date: Mon, 4 Nov 2019 16:06:44 -0800
+Subject: Input: synaptics-rmi4 - disable the relative position IRQ in the F12 driver
+
+From: Andrew Duggan <aduggan@synaptics.com>
+
+commit f6aabe1ff1d9d7bad0879253011216438bdb2530 upstream.
+
+This patch fixes an issue seen on HID touchpads which report finger
+positions using RMI4 Function 12. The issue manifests itself as
+spurious button presses as described in:
+https://www.spinics.net/lists/linux-input/msg58618.html
+
+Commit 24d28e4f1271 ("Input: synaptics-rmi4 - convert irq distribution
+to irq_domain") switched the RMI4 driver to using an irq_domain to handle
+RMI4 function interrupts. Functions with more then one interrupt now have
+each interrupt mapped to their own IRQ and IRQ handler. The result of
+this change is that the F12 IRQ handler was now getting called twice. Once
+for the absolute data interrupt and once for the relative data interrupt.
+For HID devices, calling rmi_f12_attention() a second time causes the
+attn_data data pointer and size to be set incorrectly. When the touchpad
+button is pressed, F30 will generate an interrupt and attempt to read the
+F30 data from the invalid attn_data data pointer and report incorrect
+button events.
+
+This patch disables the F12 relative interrupt which prevents
+rmi_f12_attention() from being called twice.
+
+Signed-off-by: Andrew Duggan <aduggan@synaptics.com>
+Reported-by: Simon Wood <simon@mungewell.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191025002527.3189-2-aduggan@synaptics.com
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/rmi4/rmi_f12.c | 28 ++++++++++++++++++++++++++--
+ 1 file changed, 26 insertions(+), 2 deletions(-)
+
+--- a/drivers/input/rmi4/rmi_f12.c
++++ b/drivers/input/rmi4/rmi_f12.c
+@@ -55,6 +55,9 @@ struct f12_data {
+
+ const struct rmi_register_desc_item *data15;
+ u16 data15_offset;
++
++ unsigned long *abs_mask;
++ unsigned long *rel_mask;
+ };
+
+ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
+@@ -291,9 +294,18 @@ static int rmi_f12_write_control_regs(st
+ static int rmi_f12_config(struct rmi_function *fn)
+ {
+ struct rmi_driver *drv = fn->rmi_dev->driver;
++ struct f12_data *f12 = dev_get_drvdata(&fn->dev);
++ struct rmi_2d_sensor *sensor;
+ int ret;
+
+- drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
++ sensor = &f12->sensor;
++
++ if (!sensor->report_abs)
++ drv->clear_irq_bits(fn->rmi_dev, f12->abs_mask);
++ else
++ drv->set_irq_bits(fn->rmi_dev, f12->abs_mask);
++
++ drv->clear_irq_bits(fn->rmi_dev, f12->rel_mask);
+
+ ret = rmi_f12_write_control_regs(fn);
+ if (ret)
+@@ -315,9 +327,12 @@ static int rmi_f12_probe(struct rmi_func
+ struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ u16 data_offset = 0;
++ int mask_size;
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
+
++ mask_size = BITS_TO_LONGS(drvdata->irq_count) * sizeof(unsigned long);
++
+ ret = rmi_read(fn->rmi_dev, query_addr, &buf);
+ if (ret < 0) {
+ dev_err(&fn->dev, "Failed to read general info register: %d\n",
+@@ -332,10 +347,19 @@ static int rmi_f12_probe(struct rmi_func
+ return -ENODEV;
+ }
+
+- f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data), GFP_KERNEL);
++ f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data) + mask_size * 2,
++ GFP_KERNEL);
+ if (!f12)
+ return -ENOMEM;
+
++ f12->abs_mask = (unsigned long *)((char *)f12
++ + sizeof(struct f12_data));
++ f12->rel_mask = (unsigned long *)((char *)f12
++ + sizeof(struct f12_data) + mask_size);
++
++ set_bit(fn->irq_pos, f12->abs_mask);
++ set_bit(fn->irq_pos + 1, f12->rel_mask);
++
+ f12->has_dribble = !!(buf & BIT(3));
+
+ if (fn->dev.of_node) {
--- /dev/null
+From 5d40d95e7e64756cc30606c2ba169271704d47cb Mon Sep 17 00:00:00 2001
+From: Andrew Duggan <aduggan@synaptics.com>
+Date: Mon, 4 Nov 2019 16:07:30 -0800
+Subject: Input: synaptics-rmi4 - do not consume more data than we have (F11, F12)
+
+From: Andrew Duggan <aduggan@synaptics.com>
+
+commit 5d40d95e7e64756cc30606c2ba169271704d47cb upstream.
+
+Currently, rmi_f11_attention() and rmi_f12_attention() functions update
+the attn_data data pointer and size based on the size of the expected
+size of the attention data. However, if the actual valid data in the
+attn buffer is less then the expected value then the updated data
+pointer will point to memory beyond the end of the attn buffer. Using
+the calculated valid_bytes instead will prevent this from happening.
+
+Signed-off-by: Andrew Duggan <aduggan@synaptics.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191025002527.3189-3-aduggan@synaptics.com
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/rmi4/rmi_f11.c | 4 ++--
+ drivers/input/rmi4/rmi_f12.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/input/rmi4/rmi_f11.c
++++ b/drivers/input/rmi4/rmi_f11.c
+@@ -1284,8 +1284,8 @@ static irqreturn_t rmi_f11_attention(int
+ valid_bytes = f11->sensor.attn_size;
+ memcpy(f11->sensor.data_pkt, drvdata->attn_data.data,
+ valid_bytes);
+- drvdata->attn_data.data += f11->sensor.attn_size;
+- drvdata->attn_data.size -= f11->sensor.attn_size;
++ drvdata->attn_data.data += valid_bytes;
++ drvdata->attn_data.size -= valid_bytes;
+ } else {
+ error = rmi_read_block(rmi_dev,
+ data_base_addr, f11->sensor.data_pkt,
+--- a/drivers/input/rmi4/rmi_f12.c
++++ b/drivers/input/rmi4/rmi_f12.c
+@@ -212,8 +212,8 @@ static irqreturn_t rmi_f12_attention(int
+ valid_bytes = sensor->attn_size;
+ memcpy(sensor->data_pkt, drvdata->attn_data.data,
+ valid_bytes);
+- drvdata->attn_data.data += sensor->attn_size;
+- drvdata->attn_data.size -= sensor->attn_size;
++ drvdata->attn_data.data += valid_bytes;
++ drvdata->attn_data.size -= valid_bytes;
+ } else {
+ retval = rmi_read_block(rmi_dev, f12->data_addr,
+ sensor->data_pkt, sensor->pkt_size);
--- /dev/null
+From 003f01c780020daa9a06dea1db495b553a868c29 Mon Sep 17 00:00:00 2001
+From: Lucas Stach <l.stach@pengutronix.de>
+Date: Mon, 4 Nov 2019 15:58:34 -0800
+Subject: Input: synaptics-rmi4 - fix video buffer size
+
+From: Lucas Stach <l.stach@pengutronix.de>
+
+commit 003f01c780020daa9a06dea1db495b553a868c29 upstream.
+
+The video buffer used by the queue is a vb2_v4l2_buffer, not a plain
+vb2_buffer. Using the wrong type causes the allocation of the buffer
+storage to be too small, causing a out of bounds write when
+__init_vb2_v4l2_buffer initializes the buffer.
+
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Fixes: 3a762dbd5347 ("[media] Input: synaptics-rmi4 - add support for F54 diagnostics")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191104114454.10500-1-l.stach@pengutronix.de
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/rmi4/rmi_f54.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/input/rmi4/rmi_f54.c
++++ b/drivers/input/rmi4/rmi_f54.c
+@@ -359,7 +359,7 @@ static const struct vb2_ops rmi_f54_queu
+ static const struct vb2_queue rmi_f54_queue = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ,
+- .buf_struct_size = sizeof(struct vb2_buffer),
++ .buf_struct_size = sizeof(struct vb2_v4l2_buffer),
+ .ops = &rmi_f54_queue_ops,
+ .mem_ops = &vb2_vmalloc_memops,
+ .timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
--- /dev/null
+From 5e559561a8d7e6d4adfce6aa8fbf3daa3dec1577 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Wed, 13 Nov 2019 16:12:46 -0700
+Subject: io_uring: ensure registered buffer import returns the IO length
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 5e559561a8d7e6d4adfce6aa8fbf3daa3dec1577 upstream.
+
+A test case was reported where two linked reads with registered buffers
+failed the second link always. This is because we set the expected value
+of a request in req->result, and if we don't get this result, then we
+fail the dependent links. For some reason the registered buffer import
+returned -ERROR/0, while the normal import returns -ERROR/length. This
+broke linked commands with registered buffers.
+
+Fix this by making io_import_fixed() correctly return the mapped length.
+
+Cc: stable@vger.kernel.org # v5.3
+Reported-by: 李通洲 <carter.li@eoitek.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1179,7 +1179,7 @@ static int io_import_fixed(struct io_rin
+ }
+ }
+
+- return 0;
++ return len;
+ }
+
+ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
--- /dev/null
+From 4e7120d79edb31e4ee68e6f8421448e4603be1e9 Mon Sep 17 00:00:00 2001
+From: Eric Auger <eric.auger@redhat.com>
+Date: Fri, 8 Nov 2019 16:58:03 +0100
+Subject: iommu/vt-d: Fix QI_DEV_IOTLB_PFSID and QI_DEV_EIOTLB_PFSID macros
+
+From: Eric Auger <eric.auger@redhat.com>
+
+commit 4e7120d79edb31e4ee68e6f8421448e4603be1e9 upstream.
+
+For both PASID-based-Device-TLB Invalidate Descriptor and
+Device-TLB Invalidate Descriptor, the Physical Function Source-ID
+value is split according to this layout:
+
+PFSID[3:0] is set at offset 12 and PFSID[15:4] is put at offset 52.
+Fix the part laid out at offset 52.
+
+Fixes: 0f725561e1684 ("iommu/vt-d: Add definitions for PFSID")
+Signed-off-by: Eric Auger <eric.auger@redhat.com>
+Acked-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
+Cc: stable@vger.kernel.org # v4.19+
+Acked-by: Lu Baolu <baolu.lu@linux.intel.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/intel-iommu.h | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/include/linux/intel-iommu.h
++++ b/include/linux/intel-iommu.h
+@@ -334,7 +334,8 @@ enum {
+ #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
+ #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
+ #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+-#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
++#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
++ ((u64)((pfsid >> 4) & 0xfff) << 52))
+ #define QI_DEV_IOTLB_SIZE 1
+ #define QI_DEV_IOTLB_MAX_INVS 32
+
+@@ -358,7 +359,8 @@ enum {
+ #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
+ #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
+ #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
+-#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
++#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
++ ((u64)((pfsid >> 4) & 0xfff) << 52))
+ #define QI_DEV_EIOTLB_MAX_INVS 32
+
+ /* Page group response descriptor QW0 */
--- /dev/null
+From a78986aae9b2988f8493f9f65a587ee433e83bc3 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Mon, 11 Nov 2019 14:12:27 -0800
+Subject: KVM: MMU: Do not treat ZONE_DEVICE pages as being reserved
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit a78986aae9b2988f8493f9f65a587ee433e83bc3 upstream.
+
+Explicitly exempt ZONE_DEVICE pages from kvm_is_reserved_pfn() and
+instead manually handle ZONE_DEVICE on a case-by-case basis. For things
+like page refcounts, KVM needs to treat ZONE_DEVICE pages like normal
+pages, e.g. put pages grabbed via gup(). But for flows such as setting
+A/D bits or shifting refcounts for transparent huge pages, KVM needs to
+to avoid processing ZONE_DEVICE pages as the flows in question lack the
+underlying machinery for proper handling of ZONE_DEVICE pages.
+
+This fixes a hang reported by Adam Borowski[*] in dev_pagemap_cleanup()
+when running a KVM guest backed with /dev/dax memory, as KVM straight up
+doesn't put any references to ZONE_DEVICE pages acquired by gup().
+
+Note, Dan Williams proposed an alternative solution of doing put_page()
+on ZONE_DEVICE pages immediately after gup() in order to simplify the
+auditing needed to ensure is_zone_device_page() is called if and only if
+the backing device is pinned (via gup()). But that approach would break
+kvm_vcpu_{un}map() as KVM requires the page to be pinned from map() 'til
+unmap() when accessing guest memory, unlike KVM's secondary MMU, which
+coordinates with mmu_notifier invalidations to avoid creating stale
+page references, i.e. doesn't rely on pages being pinned.
+
+[*] http://lkml.kernel.org/r/20190919115547.GA17963@angband.pl
+
+Reported-by: Adam Borowski <kilobyte@angband.pl>
+Analyzed-by: David Hildenbrand <david@redhat.com>
+Acked-by: Dan Williams <dan.j.williams@intel.com>
+Cc: stable@vger.kernel.org
+Fixes: 3565fce3a659 ("mm, x86: get_user_pages() for dax mappings")
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c | 8 ++++----
+ include/linux/kvm_host.h | 1 +
+ virt/kvm/kvm_main.c | 26 +++++++++++++++++++++++---
+ 3 files changed, 28 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3352,7 +3352,7 @@ static void transparent_hugepage_adjust(
+ * here.
+ */
+ if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
+- level == PT_PAGE_TABLE_LEVEL &&
++ !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
+ PageTransCompoundMap(pfn_to_page(pfn)) &&
+ !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
+ unsigned long mask;
+@@ -5961,9 +5961,9 @@ restart:
+ * the guest, and the guest page table is using 4K page size
+ * mapping if the indirect sp has level = 1.
+ */
+- if (sp->role.direct &&
+- !kvm_is_reserved_pfn(pfn) &&
+- PageTransCompoundMap(pfn_to_page(pfn))) {
++ if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
++ !kvm_is_zone_device_pfn(pfn) &&
++ PageTransCompoundMap(pfn_to_page(pfn))) {
+ pte_list_remove(rmap_head, sptep);
+
+ if (kvm_available_flush_tlb_with_range())
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -966,6 +966,7 @@ int kvm_cpu_has_pending_timer(struct kvm
+ void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+
+ bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
++bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
+
+ struct kvm_irq_ack_notifier {
+ struct hlist_node link;
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -150,10 +150,30 @@ __weak int kvm_arch_mmu_notifier_invalid
+ return 0;
+ }
+
++bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
++{
++ /*
++ * The metadata used by is_zone_device_page() to determine whether or
++ * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
++ * the device has been pinned, e.g. by get_user_pages(). WARN if the
++ * page_count() is zero to help detect bad usage of this helper.
++ */
++ if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
++ return false;
++
++ return is_zone_device_page(pfn_to_page(pfn));
++}
++
+ bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
+ {
++ /*
++ * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
++ * perspective they are "normal" pages, albeit with slightly different
++ * usage rules.
++ */
+ if (pfn_valid(pfn))
+- return PageReserved(pfn_to_page(pfn));
++ return PageReserved(pfn_to_page(pfn)) &&
++ !kvm_is_zone_device_pfn(pfn);
+
+ return true;
+ }
+@@ -1882,7 +1902,7 @@ EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty)
+
+ void kvm_set_pfn_dirty(kvm_pfn_t pfn)
+ {
+- if (!kvm_is_reserved_pfn(pfn)) {
++ if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) {
+ struct page *page = pfn_to_page(pfn);
+
+ SetPageDirty(page);
+@@ -1892,7 +1912,7 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
+
+ void kvm_set_pfn_accessed(kvm_pfn_t pfn)
+ {
+- if (!kvm_is_reserved_pfn(pfn))
++ if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
+ mark_page_accessed(pfn_to_page(pfn));
+ }
+ EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
--- /dev/null
+From 0362f326d86c645b5e96b7dbc3ee515986ed019d Mon Sep 17 00:00:00 2001
+From: Roman Gushchin <guro@fb.com>
+Date: Fri, 15 Nov 2019 17:34:46 -0800
+Subject: mm: hugetlb: switch to css_tryget() in hugetlb_cgroup_charge_cgroup()
+
+From: Roman Gushchin <guro@fb.com>
+
+commit 0362f326d86c645b5e96b7dbc3ee515986ed019d upstream.
+
+An exiting task might belong to an offline cgroup. In this case an
+attempt to grab a cgroup reference from the task can end up with an
+infinite loop in hugetlb_cgroup_charge_cgroup(), because neither the
+cgroup will become online, neither the task will be migrated to a live
+cgroup.
+
+Fix this by switching over to css_tryget(). As css_tryget_online()
+can't guarantee that the cgroup won't go offline, in most cases the
+check doesn't make sense. In this particular case users of
+hugetlb_cgroup_charge_cgroup() are not affected by this change.
+
+A similar problem is described by commit 18fa84a2db0e ("cgroup: Use
+css_tryget() instead of css_tryget_online() in task_get_css()").
+
+Link: http://lkml.kernel.org/r/20191106225131.3543616-2-guro@fb.com
+Signed-off-by: Roman Gushchin <guro@fb.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Tejun Heo <tj@kernel.org>
+Reviewed-by: Shakeel Butt <shakeelb@google.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/hugetlb_cgroup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/hugetlb_cgroup.c
++++ b/mm/hugetlb_cgroup.c
+@@ -196,7 +196,7 @@ int hugetlb_cgroup_charge_cgroup(int idx
+ again:
+ rcu_read_lock();
+ h_cg = hugetlb_cgroup_from_task(current);
+- if (!css_tryget_online(&h_cg->css)) {
++ if (!css_tryget(&h_cg->css)) {
+ rcu_read_unlock();
+ goto again;
+ }
--- /dev/null
+From 00d484f354d85845991b40141d40ba9e5eb60faf Mon Sep 17 00:00:00 2001
+From: Roman Gushchin <guro@fb.com>
+Date: Fri, 15 Nov 2019 17:34:43 -0800
+Subject: mm: memcg: switch to css_tryget() in get_mem_cgroup_from_mm()
+
+From: Roman Gushchin <guro@fb.com>
+
+commit 00d484f354d85845991b40141d40ba9e5eb60faf upstream.
+
+We've encountered a rcu stall in get_mem_cgroup_from_mm():
+
+ rcu: INFO: rcu_sched self-detected stall on CPU
+ rcu: 33-....: (21000 ticks this GP) idle=6c6/1/0x4000000000000002 softirq=35441/35441 fqs=5017
+ (t=21031 jiffies g=324821 q=95837) NMI backtrace for cpu 33
+ <...>
+ RIP: 0010:get_mem_cgroup_from_mm+0x2f/0x90
+ <...>
+ __memcg_kmem_charge+0x55/0x140
+ __alloc_pages_nodemask+0x267/0x320
+ pipe_write+0x1ad/0x400
+ new_sync_write+0x127/0x1c0
+ __kernel_write+0x4f/0xf0
+ dump_emit+0x91/0xc0
+ writenote+0xa0/0xc0
+ elf_core_dump+0x11af/0x1430
+ do_coredump+0xc65/0xee0
+ get_signal+0x132/0x7c0
+ do_signal+0x36/0x640
+ exit_to_usermode_loop+0x61/0xd0
+ do_syscall_64+0xd4/0x100
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+The problem is caused by an exiting task which is associated with an
+offline memcg. We're iterating over and over in the do {} while
+(!css_tryget_online()) loop, but obviously the memcg won't become online
+and the exiting task won't be migrated to a live memcg.
+
+Let's fix it by switching from css_tryget_online() to css_tryget().
+
+As css_tryget_online() cannot guarantee that the memcg won't go offline,
+the check is usually useless, except some rare cases when for example it
+determines if something should be presented to a user.
+
+A similar problem is described by commit 18fa84a2db0e ("cgroup: Use
+css_tryget() instead of css_tryget_online() in task_get_css()").
+
+Johannes:
+
+: The bug aside, it doesn't matter whether the cgroup is online for the
+: callers. It used to matter when offlining needed to evacuate all charges
+: from the memcg, and so needed to prevent new ones from showing up, but we
+: don't care now.
+
+Link: http://lkml.kernel.org/r/20191106225131.3543616-1-guro@fb.com
+Signed-off-by: Roman Gushchin <guro@fb.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Tejun Heo <tj@kernel.org>
+Reviewed-by: Shakeel Butt <shakeeb@google.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Michal Koutn <mkoutny@suse.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memcontrol.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -962,7 +962,7 @@ struct mem_cgroup *get_mem_cgroup_from_m
+ if (unlikely(!memcg))
+ memcg = root_mem_cgroup;
+ }
+- } while (!css_tryget_online(&memcg->css));
++ } while (!css_tryget(&memcg->css));
+ rcu_read_unlock();
+ return memcg;
+ }
--- /dev/null
+From 2c91f8fc6c999fe10185d8ad99fda1759f662f70 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Fri, 15 Nov 2019 17:34:57 -0800
+Subject: mm/memory_hotplug: fix try_offline_node()
+
+From: David Hildenbrand <david@redhat.com>
+
+commit 2c91f8fc6c999fe10185d8ad99fda1759f662f70 upstream.
+
+try_offline_node() is pretty much broken right now:
+
+ - The node span is updated when onlining memory, not when adding it. We
+ ignore memory that was mever onlined. Bad.
+
+ - We touch possible garbage memmaps. The pfn_to_nid(pfn) can easily
+ trigger a kernel panic. Bad for memory that is offline but also bad
+ for subsection hotadd with ZONE_DEVICE, whereby the memmap of the
+ first PFN of a section might contain garbage.
+
+ - Sections belonging to mixed nodes are not properly considered.
+
+As memory blocks might belong to multiple nodes, we would have to walk
+all pageblocks (or at least subsections) within present sections.
+However, we don't have a way to identify whether a memmap that is not
+online was initialized (relevant for ZONE_DEVICE). This makes things
+more complicated.
+
+Luckily, we can piggy pack on the node span and the nid stored in memory
+blocks. Currently, the node span is grown when calling
+move_pfn_range_to_zone() - e.g., when onlining memory, and shrunk when
+removing memory, before calling try_offline_node(). Sysfs links are
+created via link_mem_sections(), e.g., during boot or when adding
+memory.
+
+If the node still spans memory or if any memory block belongs to the
+nid, we don't set the node offline. As memory blocks that span multiple
+nodes cannot get offlined, the nid stored in memory blocks is reliable
+enough (for such online memory blocks, the node still spans the memory).
+
+Introduce for_each_memory_block() to efficiently walk all memory blocks.
+
+Note: We will soon stop shrinking the ZONE_DEVICE zone and the node span
+when removing ZONE_DEVICE memory to fix similar issues (access of
+garbage memmaps) - until we have a reliable way to identify whether
+these memmaps were properly initialized. This implies later, that once
+a node had ZONE_DEVICE memory, we won't be able to set a node offline -
+which should be acceptable.
+
+Since commit f1dd2cd13c4b ("mm, memory_hotplug: do not associate
+hotadded memory to zones until online") memory that is added is not
+assoziated with a zone/node (memmap not initialized). The introducing
+commit 60a5a19e7419 ("memory-hotplug: remove sysfs file of node")
+already missed that we could have multiple nodes for a section and that
+the zone/node span is updated when onlining pages, not when adding them.
+
+I tested this by hotplugging two DIMMs to a memory-less and cpu-less
+NUMA node. The node is properly onlined when adding the DIMMs. When
+removing the DIMMs, the node is properly offlined.
+
+Masayoshi Mizuma reported:
+
+: Without this patch, memory hotplug fails as panic:
+:
+: BUG: kernel NULL pointer dereference, address: 0000000000000000
+: ...
+: Call Trace:
+: remove_memory_block_devices+0x81/0xc0
+: try_remove_memory+0xb4/0x130
+: __remove_memory+0xa/0x20
+: acpi_memory_device_remove+0x84/0x100
+: acpi_bus_trim+0x57/0x90
+: acpi_bus_trim+0x2e/0x90
+: acpi_device_hotplug+0x2b2/0x4d0
+: acpi_hotplug_work_fn+0x1a/0x30
+: process_one_work+0x171/0x380
+: worker_thread+0x49/0x3f0
+: kthread+0xf8/0x130
+: ret_from_fork+0x35/0x40
+
+[david@redhat.com: v3]
+ Link: http://lkml.kernel.org/r/20191102120221.7553-1-david@redhat.com
+Link: http://lkml.kernel.org/r/20191028105458.28320-1-david@redhat.com
+Fixes: 60a5a19e7419 ("memory-hotplug: remove sysfs file of node")
+Fixes: f1dd2cd13c4b ("mm, memory_hotplug: do not associate hotadded memory to zones until online") # visiable after d0dc12e86b319
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
+Cc: Tang Chen <tangchen@cn.fujitsu.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: "Rafael J. Wysocki" <rafael@kernel.org>
+Cc: Keith Busch <keith.busch@intel.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+Cc: Jani Nikula <jani.nikula@intel.com>
+Cc: Nayna Jain <nayna@linux.ibm.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Stephen Rothwell <sfr@canb.auug.org.au>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/memory.c | 36 ++++++++++++++++++++++++++++++++++++
+ include/linux/memory.h | 1 +
+ mm/memory_hotplug.c | 47 +++++++++++++++++++++++++++++------------------
+ 3 files changed, 66 insertions(+), 18 deletions(-)
+
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -884,3 +884,39 @@ int walk_memory_blocks(unsigned long sta
+ }
+ return ret;
+ }
++
++struct for_each_memory_block_cb_data {
++ walk_memory_blocks_func_t func;
++ void *arg;
++};
++
++static int for_each_memory_block_cb(struct device *dev, void *data)
++{
++ struct memory_block *mem = to_memory_block(dev);
++ struct for_each_memory_block_cb_data *cb_data = data;
++
++ return cb_data->func(mem, cb_data->arg);
++}
++
++/**
++ * for_each_memory_block - walk through all present memory blocks
++ *
++ * @arg: argument passed to func
++ * @func: callback for each memory block walked
++ *
++ * This function walks through all present memory blocks, calling func on
++ * each memory block.
++ *
++ * In case func() returns an error, walking is aborted and the error is
++ * returned.
++ */
++int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
++{
++ struct for_each_memory_block_cb_data cb_data = {
++ .func = func,
++ .arg = arg,
++ };
++
++ return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
++ for_each_memory_block_cb);
++}
+--- a/include/linux/memory.h
++++ b/include/linux/memory.h
+@@ -120,6 +120,7 @@ extern struct memory_block *find_memory_
+ typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
+ extern int walk_memory_blocks(unsigned long start, unsigned long size,
+ void *arg, walk_memory_blocks_func_t func);
++extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
+ #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
+ #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
+
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1687,6 +1687,18 @@ static int check_cpu_on_node(pg_data_t *
+ return 0;
+ }
+
++static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg)
++{
++ int nid = *(int *)arg;
++
++ /*
++ * If a memory block belongs to multiple nodes, the stored nid is not
++ * reliable. However, such blocks are always online (e.g., cannot get
++ * offlined) and, therefore, are still spanned by the node.
++ */
++ return mem->nid == nid ? -EEXIST : 0;
++}
++
+ /**
+ * try_offline_node
+ * @nid: the node ID
+@@ -1699,25 +1711,24 @@ static int check_cpu_on_node(pg_data_t *
+ void try_offline_node(int nid)
+ {
+ pg_data_t *pgdat = NODE_DATA(nid);
+- unsigned long start_pfn = pgdat->node_start_pfn;
+- unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
+- unsigned long pfn;
+-
+- for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+- unsigned long section_nr = pfn_to_section_nr(pfn);
+-
+- if (!present_section_nr(section_nr))
+- continue;
+-
+- if (pfn_to_nid(pfn) != nid)
+- continue;
+-
+- /*
+- * some memory sections of this node are not removed, and we
+- * can't offline node now.
+- */
++ int rc;
++
++ /*
++ * If the node still spans pages (especially ZONE_DEVICE), don't
++ * offline it. A node spans memory after move_pfn_range_to_zone(),
++ * e.g., after the memory block was onlined.
++ */
++ if (pgdat->node_spanned_pages)
++ return;
++
++ /*
++ * Especially offline memory blocks might not be spanned by the
++ * node. They will get spanned by the node once they get onlined.
++ * However, they link to the node in sysfs and can get onlined later.
++ */
++ rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb);
++ if (rc)
+ return;
+- }
+
+ if (check_cpu_on_node(pgdat))
+ return;
--- /dev/null
+From a85dfc305a21acfc48fa28a0fa0a0cb6ad496120 Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@linux.alibaba.com>
+Date: Fri, 15 Nov 2019 17:34:33 -0800
+Subject: mm: mempolicy: fix the wrong return value and potential pages leak of mbind
+
+From: Yang Shi <yang.shi@linux.alibaba.com>
+
+commit a85dfc305a21acfc48fa28a0fa0a0cb6ad496120 upstream.
+
+Commit d883544515aa ("mm: mempolicy: make the behavior consistent when
+MPOL_MF_MOVE* and MPOL_MF_STRICT were specified") fixed the return value
+of mbind() for a couple of corner cases. But, it altered the errno for
+some other cases, for example, mbind() should return -EFAULT when part
+or all of the memory range specified by nodemask and maxnode points
+outside your accessible address space, or there was an unmapped hole in
+the specified memory range specified by addr and len.
+
+Fix this by preserving the errno returned by queue_pages_range(). And,
+the pagelist may be not empty even though queue_pages_range() returns
+error, put the pages back to LRU since mbind_range() is not called to
+really apply the policy so those pages should not be migrated, this is
+also the old behavior before the problematic commit.
+
+Link: http://lkml.kernel.org/r/1572454731-3925-1-git-send-email-yang.shi@linux.alibaba.com
+Fixes: d883544515aa ("mm: mempolicy: make the behavior consistent when MPOL_MF_MOVE* and MPOL_MF_STRICT were specified")
+Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com>
+Reported-by: Li Xinhai <lixinhai.lxh@gmail.com>
+Reviewed-by: Li Xinhai <lixinhai.lxh@gmail.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: <stable@vger.kernel.org> [4.19 and 5.2+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mempolicy.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -666,7 +666,9 @@ static int queue_pages_test_walk(unsigne
+ * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ * specified.
+ * 0 - queue pages successfully or no misplaced page.
+- * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
++ * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
++ * memory range specified by nodemask and maxnode points outside
++ * your accessible address space (-EFAULT)
+ */
+ static int
+ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+@@ -1287,7 +1289,7 @@ static long do_mbind(unsigned long start
+ flags | MPOL_MF_INVERT, &pagelist);
+
+ if (ret < 0) {
+- err = -EIO;
++ err = ret;
+ goto up_out;
+ }
+
+@@ -1306,10 +1308,12 @@ static long do_mbind(unsigned long start
+
+ if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
+ err = -EIO;
+- } else
+- putback_movable_pages(&pagelist);
+-
++ } else {
+ up_out:
++ if (!list_empty(&pagelist))
++ putback_movable_pages(&pagelist);
++ }
++
+ up_write(&mm->mmap_sem);
+ mpol_out:
+ mpol_put(new);
--- /dev/null
+From 5df373e95689b9519b8557da7c5bd0db0856d776 Mon Sep 17 00:00:00 2001
+From: Vinayak Menon <vinmenon@codeaurora.org>
+Date: Fri, 15 Nov 2019 17:35:00 -0800
+Subject: mm/page_io.c: do not free shared swap slots
+
+From: Vinayak Menon <vinmenon@codeaurora.org>
+
+commit 5df373e95689b9519b8557da7c5bd0db0856d776 upstream.
+
+The following race is observed due to which a processes faulting on a
+swap entry, finds the page neither in swapcache nor swap. This causes
+zram to give a zero filled page that gets mapped to the process,
+resulting in a user space crash later.
+
+Consider parent and child processes Pa and Pb sharing the same swap slot
+with swap_count 2. Swap is on zram with SWP_SYNCHRONOUS_IO set.
+Virtual address 'VA' of Pa and Pb points to the shared swap entry.
+
+Pa Pb
+
+fault on VA fault on VA
+do_swap_page do_swap_page
+lookup_swap_cache fails lookup_swap_cache fails
+ Pb scheduled out
+swapin_readahead (deletes zram entry)
+swap_free (makes swap_count 1)
+ Pb scheduled in
+ swap_readpage (swap_count == 1)
+ Takes SWP_SYNCHRONOUS_IO path
+ zram enrty absent
+ zram gives a zero filled page
+
+Fix this by making sure that swap slot is freed only when swap count
+drops down to one.
+
+Link: http://lkml.kernel.org/r/1571743294-14285-1-git-send-email-vinmenon@codeaurora.org
+Fixes: aa8d22a11da9 ("mm: swap: SWP_SYNCHRONOUS_IO: skip swapcache only if swapped page has no other reference")
+Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
+Suggested-by: Minchan Kim <minchan@google.com>
+Acked-by: Minchan Kim <minchan@kernel.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_io.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/mm/page_io.c
++++ b/mm/page_io.c
+@@ -73,6 +73,7 @@ static void swap_slot_free_notify(struct
+ {
+ struct swap_info_struct *sis;
+ struct gendisk *disk;
++ swp_entry_t entry;
+
+ /*
+ * There is no guarantee that the page is in swap cache - the software
+@@ -104,11 +105,10 @@ static void swap_slot_free_notify(struct
+ * we again wish to reclaim it.
+ */
+ disk = sis->bdev->bd_disk;
+- if (disk->fops->swap_slot_free_notify) {
+- swp_entry_t entry;
++ entry.val = page_private(page);
++ if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
+ unsigned long offset;
+
+- entry.val = page_private(page);
+ offset = swp_offset(entry);
+
+ SetPageDirty(page);
--- /dev/null
+From aea4df4c53f754cc229edde6c5465e481311cc49 Mon Sep 17 00:00:00 2001
+From: Laura Abbott <labbott@redhat.com>
+Date: Fri, 15 Nov 2019 17:34:50 -0800
+Subject: mm: slub: really fix slab walking for init_on_free
+
+From: Laura Abbott <labbott@redhat.com>
+
+commit aea4df4c53f754cc229edde6c5465e481311cc49 upstream.
+
+Commit 1b7e816fc80e ("mm: slub: Fix slab walking for init_on_free")
+fixed one problem with the slab walking but missed a key detail: When
+walking the list, the head and tail pointers need to be updated since we
+end up reversing the list as a result. Without doing this, bulk free is
+broken.
+
+One way this is exposed is a NULL pointer with slub_debug=F:
+
+ =============================================================================
+ BUG skbuff_head_cache (Tainted: G T): Object already free
+ -----------------------------------------------------------------------------
+
+ INFO: Slab 0x000000000d2d2f8f objects=16 used=3 fp=0x0000000064309071 flags=0x3fff00000000201
+ BUG: kernel NULL pointer dereference, address: 0000000000000000
+ Oops: 0000 [#1] PREEMPT SMP PTI
+ RIP: 0010:print_trailer+0x70/0x1d5
+ Call Trace:
+ <IRQ>
+ free_debug_processing.cold.37+0xc9/0x149
+ __slab_free+0x22a/0x3d0
+ kmem_cache_free_bulk+0x415/0x420
+ __kfree_skb_flush+0x30/0x40
+ net_rx_action+0x2dd/0x480
+ __do_softirq+0xf0/0x246
+ irq_exit+0x93/0xb0
+ do_IRQ+0xa0/0x110
+ common_interrupt+0xf/0xf
+ </IRQ>
+
+Given we're now almost identical to the existing debugging code which
+correctly walks the list, combine with that.
+
+Link: https://lkml.kernel.org/r/20191104170303.GA50361@gandi.net
+Link: http://lkml.kernel.org/r/20191106222208.26815-1-labbott@redhat.com
+Fixes: 1b7e816fc80e ("mm: slub: Fix slab walking for init_on_free")
+Signed-off-by: Laura Abbott <labbott@redhat.com>
+Reported-by: Thibaut Sautereau <thibaut.sautereau@clip-os.org>
+Acked-by: David Rientjes <rientjes@google.com>
+Tested-by: Alexander Potapenko <glider@google.com>
+Acked-by: Alexander Potapenko <glider@google.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <clipos@ssi.gouv.fr>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/slub.c | 39 +++++++++------------------------------
+ 1 file changed, 9 insertions(+), 30 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1432,12 +1432,15 @@ static inline bool slab_free_freelist_ho
+ void *old_tail = *tail ? *tail : *head;
+ int rsize;
+
+- if (slab_want_init_on_free(s)) {
+- void *p = NULL;
++ /* Head and tail of the reconstructed freelist */
++ *head = NULL;
++ *tail = NULL;
++
++ do {
++ object = next;
++ next = get_freepointer(s, object);
+
+- do {
+- object = next;
+- next = get_freepointer(s, object);
++ if (slab_want_init_on_free(s)) {
+ /*
+ * Clear the object and the metadata, but don't touch
+ * the redzone.
+@@ -1447,29 +1450,8 @@ static inline bool slab_free_freelist_ho
+ : 0;
+ memset((char *)object + s->inuse, 0,
+ s->size - s->inuse - rsize);
+- set_freepointer(s, object, p);
+- p = object;
+- } while (object != old_tail);
+- }
+-
+-/*
+- * Compiler cannot detect this function can be removed if slab_free_hook()
+- * evaluates to nothing. Thus, catch all relevant config debug options here.
+- */
+-#if defined(CONFIG_LOCKDEP) || \
+- defined(CONFIG_DEBUG_KMEMLEAK) || \
+- defined(CONFIG_DEBUG_OBJECTS_FREE) || \
+- defined(CONFIG_KASAN)
+-
+- next = *head;
+
+- /* Head and tail of the reconstructed freelist */
+- *head = NULL;
+- *tail = NULL;
+-
+- do {
+- object = next;
+- next = get_freepointer(s, object);
++ }
+ /* If object's reuse doesn't have to be delayed */
+ if (!slab_free_hook(s, object)) {
+ /* Move object to the new freelist */
+@@ -1484,9 +1466,6 @@ static inline bool slab_free_freelist_ho
+ *tail = NULL;
+
+ return *head != NULL;
+-#else
+- return true;
+-#endif
+ }
+
+ static void *setup_object(struct kmem_cache *s, struct page *page,
--- /dev/null
+From fed23c5829ecab4ddc712d7b0046e59610ca3ba4 Mon Sep 17 00:00:00 2001
+From: Eugen Hristev <eugen.hristev@microchip.com>
+Date: Thu, 14 Nov 2019 12:59:26 +0000
+Subject: mmc: sdhci-of-at91: fix quirk2 overwrite
+
+From: Eugen Hristev <eugen.hristev@microchip.com>
+
+commit fed23c5829ecab4ddc712d7b0046e59610ca3ba4 upstream.
+
+The quirks2 are parsed and set (e.g. from DT) before the quirk for broken
+HS200 is set in the driver.
+The driver needs to enable just this flag, not rewrite the whole quirk set.
+
+Fixes: 7871aa60ae00 ("mmc: sdhci-of-at91: add quirk for broken HS200")
+Signed-off-by: Eugen Hristev <eugen.hristev@microchip.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-of-at91.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci-of-at91.c
++++ b/drivers/mmc/host/sdhci-of-at91.c
+@@ -358,7 +358,7 @@ static int sdhci_at91_probe(struct platf
+ pm_runtime_use_autosuspend(&pdev->dev);
+
+ /* HS200 is broken at this moment */
+- host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
++ host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
+
+ ret = sdhci_add_host(host);
+ if (ret)
--- /dev/null
+From 40a1dcee2d1846a24619fe9ca45c661ca0db7dda Mon Sep 17 00:00:00 2001
+From: Corentin Labbe <clabbe@baylibre.com>
+Date: Sun, 10 Nov 2019 11:30:48 +0000
+Subject: net: ethernet: dwmac-sun8i: Use the correct function in exit path
+
+From: Corentin Labbe <clabbe@baylibre.com>
+
+commit 40a1dcee2d1846a24619fe9ca45c661ca0db7dda upstream.
+
+When PHY is not powered, the probe function fail and some resource are
+still unallocated.
+Furthermore some BUG happens:
+dwmac-sun8i 5020000.ethernet: EMAC reset timeout
+------------[ cut here ]------------
+kernel BUG at /linux-next/net/core/dev.c:9844!
+
+So let's use the right function (stmmac_pltfr_remove) in the error path.
+
+Fixes: 9f93ac8d4085 ("net-next: stmmac: Add dwmac-sun8i")
+Cc: <stable@vger.kernel.org> # v4.15+
+Signed-off-by: Corentin Labbe <clabbe@baylibre.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+@@ -1225,7 +1225,7 @@ static int sun8i_dwmac_probe(struct plat
+ dwmac_mux:
+ sun8i_dwmac_unset_syscon(gmac);
+ dwmac_exit:
+- sun8i_dwmac_exit(pdev, plat_dat->bsp_priv);
++ stmmac_pltfr_remove(pdev);
+ return ret;
+ }
+
--- /dev/null
+From 2f5841349df281ecf8f81cc82d869b8476f0db0b Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 8 Nov 2019 21:34:24 +0100
+Subject: ntp/y2038: Remove incorrect time_t truncation
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 2f5841349df281ecf8f81cc82d869b8476f0db0b upstream.
+
+A cast to 'time_t' was accidentally left in place during the
+conversion of __do_adjtimex() to 64-bit timestamps, so the
+resulting value is incorrectly truncated.
+
+Remove the cast so the 64-bit time gets propagated correctly.
+
+Fixes: ead25417f82e ("timex: use __kernel_timex internally")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20191108203435.112759-2-arnd@arndb.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/ntp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -771,7 +771,7 @@ int __do_adjtimex(struct __kernel_timex
+ /* fill PPS status fields */
+ pps_fill_timex(txc);
+
+- txc->time.tv_sec = (time_t)ts->tv_sec;
++ txc->time.tv_sec = ts->tv_sec;
+ txc->time.tv_usec = ts->tv_nsec;
+ if (!(time_status & STA_NANO))
+ txc->time.tv_usec = ts->tv_nsec / NSEC_PER_USEC;
--- /dev/null
+From ed77d88752aea56b33731aee42e7146379b90769 Mon Sep 17 00:00:00 2001
+From: Matt Roper <matthew.d.roper@intel.com>
+Date: Tue, 12 Nov 2019 14:47:56 -0800
+Subject: Revert "drm/i915/ehl: Update MOCS table for EHL"
+
+From: Matt Roper <matthew.d.roper@intel.com>
+
+commit ed77d88752aea56b33731aee42e7146379b90769 upstream.
+
+This reverts commit f4071997f1de016780ec6b79c63d90cd5886ee83.
+
+These extra EHL entries won't behave as expected without a bit more work
+on the kernel side so let's drop them until that kernel work has had a
+chance to land. Userspace trying to use these new entries won't get the
+advantage of the new functionality these entries are meant to provide,
+but at least it won't misbehave.
+
+When we do add these back in the future, we'll probably want to
+explicitly use separate tables for ICL and EHL so that userspace
+software that mistakenly uses these entries (which are undefined on ICL)
+sees the same behavior it sees with all the other undefined entries.
+
+Cc: Francisco Jerez <francisco.jerez.plata@intel.com>
+Cc: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Lucas De Marchi <lucas.demarchi@intel.com>
+Cc: <stable@vger.kernel.org> # v5.3+
+Fixes: f4071997f1de ("drm/i915/ehl: Update MOCS table for EHL")
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191112224757.25116-1-matthew.d.roper@intel.com
+Reviewed-by: Francisco Jerez <currojerez@riseup.net>
+(cherry picked from commit 046091758b50a5fff79726a31c1391614a3d84c8)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gt/intel_mocs.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
++++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
+@@ -200,14 +200,6 @@ static const struct drm_i915_mocs_entry
+ MOCS_ENTRY(15, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
+ L3_3_WB), \
+- /* Bypass LLC - Uncached (EHL+) */ \
+- MOCS_ENTRY(16, \
+- LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+- L3_1_UC), \
+- /* Bypass LLC - L3 (Read-Only) (EHL+) */ \
+- MOCS_ENTRY(17, \
+- LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+- L3_3_WB), \
+ /* Self-Snoop - L3 + LLC */ \
+ MOCS_ENTRY(18, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
dpaa2-eth-free-already-allocated-channels-on-probe-defer.patch
devlink-add-method-for-time-stamp-on-reporter-s-dump.patch
net-smc-fix-refcount-non-blocking-connect-part-2.patch
+alsa-usb-audio-fix-missing-error-check-at-mixer-resolution-test.patch
+alsa-usb-audio-not-submit-urb-for-stopped-endpoint.patch
+alsa-usb-audio-fix-incorrect-null-check-in-create_yamaha_midi_quirk.patch
+alsa-usb-audio-fix-incorrect-size-check-for-processing-extension-units.patch
+btrfs-fix-log-context-list-corruption-after-rename-exchange-operation.patch
+cgroup-freezer-call-cgroup_enter_frozen-with-preemption-disabled-in-ptrace_stop.patch
+input-ff-memless-kill-timer-in-destroy.patch
+input-synaptics-rmi4-fix-video-buffer-size.patch
+input-synaptics-rmi4-disable-the-relative-position-irq-in-the-f12-driver.patch
+input-synaptics-rmi4-do-not-consume-more-data-than-we-have-f11-f12.patch
+input-synaptics-rmi4-clear-irq-enables-for-f54.patch
+input-synaptics-rmi4-destroy-f54-poller-workqueue-when-removing.patch
+kvm-mmu-do-not-treat-zone_device-pages-as-being-reserved.patch
+ib-hfi1-ensure-r_tid_ack-is-valid-before-building-tid-rdma-ack-packet.patch
+ib-hfi1-calculate-flow-weight-based-on-qp-mtu-for-tid-rdma.patch
+ib-hfi1-tid-rdma-write-should-not-return-ib_wc_rnr_retry_exc_err.patch
+ib-hfi1-ensure-full-gen3-speed-in-a-gen4-system.patch
+ib-hfi1-use-a-common-pad-buffer-for-9b-and-16b-packets.patch
+i2c-acpi-force-bus-speed-to-400khz-if-a-silead-touchscreen-is-present.patch
+x86-quirks-disable-hpet-on-intel-coffe-lake-platforms.patch
+ecryptfs_lookup_interpose-lower_dentry-d_inode-is-not-stable.patch
+ecryptfs_lookup_interpose-lower_dentry-d_parent-is-not-stable-either.patch
+io_uring-ensure-registered-buffer-import-returns-the-io-length.patch
+drm-i915-update-rawclk-also-on-resume.patch
+revert-drm-i915-ehl-update-mocs-table-for-ehl.patch
+ntp-y2038-remove-incorrect-time_t-truncation.patch
+net-ethernet-dwmac-sun8i-use-the-correct-function-in-exit-path.patch
+iommu-vt-d-fix-qi_dev_iotlb_pfsid-and-qi_dev_eiotlb_pfsid-macros.patch
+mm-mempolicy-fix-the-wrong-return-value-and-potential-pages-leak-of-mbind.patch
+mm-memcg-switch-to-css_tryget-in-get_mem_cgroup_from_mm.patch
+mm-hugetlb-switch-to-css_tryget-in-hugetlb_cgroup_charge_cgroup.patch
+mm-slub-really-fix-slab-walking-for-init_on_free.patch
+mm-memory_hotplug-fix-try_offline_node.patch
+mm-page_io.c-do-not-free-shared-swap-slots.patch
+mmc-sdhci-of-at91-fix-quirk2-overwrite.patch
--- /dev/null
+From fc5db58539b49351e76f19817ed1102bf7c712d0 Mon Sep 17 00:00:00 2001
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Date: Wed, 16 Oct 2019 18:38:16 +0800
+Subject: x86/quirks: Disable HPET on Intel Coffe Lake platforms
+
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+
+commit fc5db58539b49351e76f19817ed1102bf7c712d0 upstream.
+
+Some Coffee Lake platforms have a skewed HPET timer once the SoCs entered
+PC10, which in consequence marks TSC as unstable because HPET is used as
+watchdog clocksource for TSC.
+
+Harry Pan tried to work around it in the clocksource watchdog code [1]
+thereby creating a circular dependency between HPET and TSC. This also
+ignores the fact, that HPET is not only unsuitable as watchdog clocksource
+on these systems, it becomes unusable in general.
+
+Disable HPET on affected platforms.
+
+Suggested-by: Feng Tang <feng.tang@intel.com>
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=203183
+Link: https://lore.kernel.org/lkml/20190516090651.1396-1-harry.pan@intel.com/ [1]
+Link: https://lkml.kernel.org/r/20191016103816.30650-1-kai.heng.feng@canonical.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/early-quirks.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kernel/early-quirks.c
++++ b/arch/x86/kernel/early-quirks.c
+@@ -709,6 +709,8 @@ static struct chipset early_qrk[] __init
+ */
+ { PCI_VENDOR_ID_INTEL, 0x0f00,
+ PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
++ { PCI_VENDOR_ID_INTEL, 0x3ec4,
++ PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+ { PCI_VENDOR_ID_BROADCOM, 0x4331,
+ PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
+ {}