--- /dev/null
+From 3f12888dfae2a48741c4caa9214885b3aaf350f9 Mon Sep 17 00:00:00 2001
+From: Wenwen Wang <wang6495@umn.edu>
+Date: Sat, 5 May 2018 13:38:03 -0500
+Subject: ALSA: control: fix a redundant-copy issue
+
+From: Wenwen Wang <wang6495@umn.edu>
+
+commit 3f12888dfae2a48741c4caa9214885b3aaf350f9 upstream.
+
+In snd_ctl_elem_add_compat(), the fields of the struct 'data' need to be
+copied from the corresponding fields of the struct 'data32' in userspace.
+This is achieved by invoking copy_from_user() and get_user() functions. The
+problem here is that the 'type' field is copied twice. One is by
+copy_from_user() and one is by get_user(). Given that the 'type' field is
+not used between the two copies, the second copy is *completely* redundant
+and should be removed for better performance and cleanup. Also, these two
+copies can cause inconsistent data: as the struct 'data32' resides in
+userspace and a malicious userspace process can race to change the 'type'
+field between the two copies to cause inconsistent data. Depending on how
+the data is used in the future, such an inconsistency may cause potential
+security risks.
+
+For above reasons, we should take out the second copy.
+
+Signed-off-by: Wenwen Wang <wang6495@umn.edu>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/control_compat.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/sound/core/control_compat.c
++++ b/sound/core/control_compat.c
+@@ -400,8 +400,7 @@ static int snd_ctl_elem_add_compat(struc
+ if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) ||
+ copy_from_user(&data->type, &data32->type, 3 * sizeof(u32)))
+ goto error;
+- if (get_user(data->owner, &data32->owner) ||
+- get_user(data->type, &data32->type))
++ if (get_user(data->owner, &data32->owner))
+ goto error;
+ switch (data->type) {
+ case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
--- /dev/null
+From c8beccc19b92f5172994c0732db689c08f4f98e5 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Tue, 8 May 2018 09:27:46 +0200
+Subject: ALSA: hda: Add Lenovo C50 All in one to the power_save blacklist
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit c8beccc19b92f5172994c0732db689c08f4f98e5 upstream.
+
+Power-saving is causing loud plops on the Lenovo C50 All in one, add it
+to the blacklist.
+
+BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1572975
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/hda_intel.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2072,6 +2072,8 @@ static struct snd_pci_quirk power_save_b
+ SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
++ /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
++ SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
+ SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
+ {}
--- /dev/null
+From 21493316a3c4598f308d5a9fa31cc74639c4caff Mon Sep 17 00:00:00 2001
+From: Federico Cuello <fedux@fedux.com.ar>
+Date: Wed, 9 May 2018 00:13:38 +0200
+Subject: ALSA: usb: mixer: volume quirk for CM102-A+/102S+
+
+From: Federico Cuello <fedux@fedux.com.ar>
+
+commit 21493316a3c4598f308d5a9fa31cc74639c4caff upstream.
+
+Currently it's not possible to set volume lower than 26% (it just mutes).
+
+Also fixes this warning:
+
+ Warning! Unlikely big volume range (=9472), cval->res is probably wrong.
+ [13] FU [PCM Playback Volume] ch = 2, val = -9473/-1/1
+
+, and volume works fine for full range.
+
+Signed-off-by: Federico Cuello <fedux@fedux.com.ar>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/mixer.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -904,6 +904,14 @@ static void volume_control_quirks(struct
+ }
+ break;
+
++ case USB_ID(0x0d8c, 0x0103):
++ if (!strcmp(kctl->id.name, "PCM Playback Volume")) {
++ usb_audio_info(chip,
++ "set volume quirk for CM102-A+/102S+\n");
++ cval->min = -256;
++ }
++ break;
++
+ case USB_ID(0x0471, 0x0101):
+ case USB_ID(0x0471, 0x0104):
+ case USB_ID(0x0471, 0x0105):
--- /dev/null
+From 7791e4aa59ad724e0b4c8b4dea547a5735108972 Mon Sep 17 00:00:00 2001
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Date: Thu, 25 Feb 2016 15:09:19 -0800
+Subject: cpufreq: intel_pstate: Enable HWP by default
+
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+
+commit 7791e4aa59ad724e0b4c8b4dea547a5735108972 upstream.
+
+If the processor supports HWP, enable it by default without checking
+for the cpu model. This will allow to enable HWP in all supported
+processors without driver change.
+
+Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Thomas Renninger <trenn@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/intel_pstate.c | 34 ++++++++++++++++++++++------------
+ 1 file changed, 22 insertions(+), 12 deletions(-)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1361,6 +1361,11 @@ static inline bool intel_pstate_platform
+ static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
+ #endif /* CONFIG_ACPI */
+
++static const struct x86_cpu_id hwp_support_ids[] __initconst = {
++ { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
++ {}
++};
++
+ static int __init intel_pstate_init(void)
+ {
+ int cpu, rc = 0;
+@@ -1370,17 +1375,16 @@ static int __init intel_pstate_init(void
+ if (no_load)
+ return -ENODEV;
+
++ if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
++ copy_cpu_funcs(&core_params.funcs);
++ hwp_active++;
++ goto hwp_cpu_matched;
++ }
++
+ id = x86_match_cpu(intel_pstate_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+- /*
+- * The Intel pstate driver will be ignored if the platform
+- * firmware has its own power management modes.
+- */
+- if (intel_pstate_platform_pwr_mgmt_exists())
+- return -ENODEV;
+-
+ cpu_def = (struct cpu_defaults *)id->driver_data;
+
+ copy_pid_params(&cpu_def->pid_policy);
+@@ -1389,17 +1393,20 @@ static int __init intel_pstate_init(void
+ if (intel_pstate_msrs_not_valid())
+ return -ENODEV;
+
++hwp_cpu_matched:
++ /*
++ * The Intel pstate driver will be ignored if the platform
++ * firmware has its own power management modes.
++ */
++ if (intel_pstate_platform_pwr_mgmt_exists())
++ return -ENODEV;
++
+ pr_info("Intel P-state driver initializing.\n");
+
+ all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
+ if (!all_cpu_data)
+ return -ENOMEM;
+
+- if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
+- pr_info("intel_pstate: HWP enabled\n");
+- hwp_active++;
+- }
+-
+ if (!hwp_active && hwp_only)
+ goto out;
+
+@@ -1410,6 +1417,9 @@ static int __init intel_pstate_init(void
+ intel_pstate_debug_expose_params();
+ intel_pstate_sysfs_expose_params();
+
++ if (hwp_active)
++ pr_info("intel_pstate: HWP enabled\n");
++
+ return rc;
+ out:
+ get_online_cpus();
--- /dev/null
+From 75274b33e779ae40a750bcb4bd0b07c4dfef4746 Mon Sep 17 00:00:00 2001
+From: Anders Roxell <anders.roxell@linaro.org>
+Date: Wed, 27 Jan 2016 20:26:54 +0100
+Subject: cpuidle: coupled: remove unused define cpuidle_coupled_lock
+
+From: Anders Roxell <anders.roxell@linaro.org>
+
+commit 75274b33e779ae40a750bcb4bd0b07c4dfef4746 upstream.
+
+This was found with the -RT patch enabled, but the fix should apply to
+non-RT also.
+
+Used multi_v7_defconfig+PREEMPT_RT_FULL=y and this caused a compilation
+warning without this fix:
+../drivers/cpuidle/coupled.c:122:21: warning: 'cpuidle_coupled_lock'
+defined but not used [-Wunused-variable]
+
+Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpuidle/coupled.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/cpuidle/coupled.c
++++ b/drivers/cpuidle/coupled.c
+@@ -119,7 +119,6 @@ struct cpuidle_coupled {
+
+ #define CPUIDLE_COUPLED_NOT_IDLE (-1)
+
+-static DEFINE_MUTEX(cpuidle_coupled_lock);
+ static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
+
+ /*
--- /dev/null
+From ebded02788b5d7c7600f8cff26ae07896d568649 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@techsingularity.net>
+Date: Tue, 15 Mar 2016 14:55:39 -0700
+Subject: mm: filemap: avoid unnecessary calls to lock_page when waiting for IO to complete during a read
+
+From: Mel Gorman <mgorman@techsingularity.net>
+
+commit ebded02788b5d7c7600f8cff26ae07896d568649 upstream.
+
+In the generic read paths the kernel looks up a page in the page cache
+and if it's up to date, it is used. If not, the page lock is acquired
+to wait for IO to complete and then check the page. If multiple
+processes are waiting on IO, they all serialise against the lock and
+duplicate the checks. This is unnecessary.
+
+The page lock in itself does not give any guarantees to the callers
+about the page state as it can be immediately truncated or reclaimed
+after the page is unlocked. It's sufficient to wait_on_page_locked and
+then continue if the page is up to date on wakeup.
+
+It is possible that a truncated but up-to-date page is returned but the
+reference taken during read prevents it disappearing underneath the
+caller and the data is still valid if PageUptodate.
+
+The overall impact is small as even if processes serialise on the lock,
+the lock section is tiny once the IO is complete. Profiles indicated
+that unlock_page and friends are generally a tiny portion of a
+read-intensive workload. An artificial test was created that had
+instances of dd access a cache-cold file on an ext4 filesystem and
+measure how long the read took.
+
+paralleldd
+ 4.4.0 4.4.0
+ vanilla avoidlock
+Amean Elapsd-1 5.28 ( 0.00%) 5.15 ( 2.50%)
+Amean Elapsd-4 5.29 ( 0.00%) 5.17 ( 2.12%)
+Amean Elapsd-7 5.28 ( 0.00%) 5.18 ( 1.78%)
+Amean Elapsd-12 5.20 ( 0.00%) 5.33 ( -2.50%)
+Amean Elapsd-21 5.14 ( 0.00%) 5.21 ( -1.41%)
+Amean Elapsd-30 5.30 ( 0.00%) 5.12 ( 3.38%)
+Amean Elapsd-48 5.78 ( 0.00%) 5.42 ( 6.21%)
+Amean Elapsd-79 6.78 ( 0.00%) 6.62 ( 2.46%)
+Amean Elapsd-110 9.09 ( 0.00%) 8.99 ( 1.15%)
+Amean Elapsd-128 10.60 ( 0.00%) 10.43 ( 1.66%)
+
+The impact is small but intuitively, it makes sense to avoid unnecessary
+calls to lock_page.
+
+Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/filemap.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 49 insertions(+)
+
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1581,6 +1581,15 @@ find_page:
+ index, last_index - index);
+ }
+ if (!PageUptodate(page)) {
++ /*
++ * See comment in do_read_cache_page on why
++ * wait_on_page_locked is used to avoid unnecessarily
++ * serialisations and why it's safe.
++ */
++ wait_on_page_locked_killable(page);
++ if (PageUptodate(page))
++ goto page_ok;
++
+ if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
+ !mapping->a_ops->is_partially_uptodate)
+ goto page_not_up_to_date;
+@@ -2253,12 +2262,52 @@ filler:
+ if (PageUptodate(page))
+ goto out;
+
++ /*
++ * Page is not up to date and may be locked due one of the following
++ * case a: Page is being filled and the page lock is held
++ * case b: Read/write error clearing the page uptodate status
++ * case c: Truncation in progress (page locked)
++ * case d: Reclaim in progress
++ *
++ * Case a, the page will be up to date when the page is unlocked.
++ * There is no need to serialise on the page lock here as the page
++ * is pinned so the lock gives no additional protection. Even if the
++ * the page is truncated, the data is still valid if PageUptodate as
++ * it's a race vs truncate race.
++ * Case b, the page will not be up to date
++ * Case c, the page may be truncated but in itself, the data may still
++ * be valid after IO completes as it's a read vs truncate race. The
++ * operation must restart if the page is not uptodate on unlock but
++ * otherwise serialising on page lock to stabilise the mapping gives
++ * no additional guarantees to the caller as the page lock is
++ * released before return.
++ * Case d, similar to truncation. If reclaim holds the page lock, it
++ * will be a race with remove_mapping that determines if the mapping
++ * is valid on unlock but otherwise the data is valid and there is
++ * no need to serialise with page lock.
++ *
++ * As the page lock gives no additional guarantee, we optimistically
++ * wait on the page to be unlocked and check if it's up to date and
++ * use the page if it is. Otherwise, the page lock is required to
++ * distinguish between the different cases. The motivation is that we
++ * avoid spurious serialisations and wakeups when multiple processes
++ * wait on the same page for IO to complete.
++ */
++ wait_on_page_locked(page);
++ if (PageUptodate(page))
++ goto out;
++
++ /* Distinguish between all the cases under the safety of the lock */
+ lock_page(page);
++
++ /* Case c or d, restart the operation */
+ if (!page->mapping) {
+ unlock_page(page);
+ page_cache_release(page);
+ goto repeat;
+ }
++
++ /* Someone else locked and filled the page in a very small window */
+ if (PageUptodate(page)) {
+ unlock_page(page);
+ goto out;
--- /dev/null
+From 32b635298ff4e991d8d8f64dc23782b02eec29c3 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@techsingularity.net>
+Date: Tue, 15 Mar 2016 14:55:36 -0700
+Subject: mm: filemap: remove redundant code in do_read_cache_page
+
+From: Mel Gorman <mgorman@techsingularity.net>
+
+commit 32b635298ff4e991d8d8f64dc23782b02eec29c3 upstream.
+
+do_read_cache_page and __read_cache_page duplicate page filler code when
+filling the page for the first time. This patch simply removes the
+duplicate logic.
+
+Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/filemap.c | 43 ++++++++++++-------------------------------
+ 1 file changed, 12 insertions(+), 31 deletions(-)
+
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2215,7 +2215,7 @@ static struct page *wait_on_page_read(st
+ return page;
+ }
+
+-static struct page *__read_cache_page(struct address_space *mapping,
++static struct page *do_read_cache_page(struct address_space *mapping,
+ pgoff_t index,
+ int (*filler)(void *, struct page *),
+ void *data,
+@@ -2237,31 +2237,19 @@ repeat:
+ /* Presumably ENOMEM for radix tree node */
+ return ERR_PTR(err);
+ }
++
++filler:
+ err = filler(data, page);
+ if (err < 0) {
+ page_cache_release(page);
+- page = ERR_PTR(err);
+- } else {
+- page = wait_on_page_read(page);
++ return ERR_PTR(err);
+ }
+- }
+- return page;
+-}
+-
+-static struct page *do_read_cache_page(struct address_space *mapping,
+- pgoff_t index,
+- int (*filler)(void *, struct page *),
+- void *data,
+- gfp_t gfp)
+
+-{
+- struct page *page;
+- int err;
+-
+-retry:
+- page = __read_cache_page(mapping, index, filler, data, gfp);
+- if (IS_ERR(page))
+- return page;
++ page = wait_on_page_read(page);
++ if (IS_ERR(page))
++ return page;
++ goto out;
++ }
+ if (PageUptodate(page))
+ goto out;
+
+@@ -2269,21 +2257,14 @@ retry:
+ if (!page->mapping) {
+ unlock_page(page);
+ page_cache_release(page);
+- goto retry;
++ goto repeat;
+ }
+ if (PageUptodate(page)) {
+ unlock_page(page);
+ goto out;
+ }
+- err = filler(data, page);
+- if (err < 0) {
+- page_cache_release(page);
+- return ERR_PTR(err);
+- } else {
+- page = wait_on_page_read(page);
+- if (IS_ERR(page))
+- return page;
+- }
++ goto filler;
++
+ out:
+ mark_page_accessed(page);
+ return page;
--- /dev/null
+From 349524bc0da698ec77f2057cf4a4948eb6349265 Mon Sep 17 00:00:00 2001
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Date: Wed, 10 Jan 2018 17:10:12 +1100
+Subject: powerpc: Don't preempt_disable() in show_cpuinfo()
+
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+
+commit 349524bc0da698ec77f2057cf4a4948eb6349265 upstream.
+
+This causes warnings from cpufreq mutex code. This is also rather
+unnecessary and ineffective. If we really want to prevent concurrent
+unplug, we could take the unplug read lock but I don't see this being
+critical.
+
+Fixes: cd77b5ce208c ("powerpc/powernv/cpufreq: Fix the frequency read by /proc/cpuinfo")
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/setup-common.c | 11 -----------
+ 1 file changed, 11 deletions(-)
+
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -217,14 +217,6 @@ static int show_cpuinfo(struct seq_file
+ unsigned short maj;
+ unsigned short min;
+
+- /* We only show online cpus: disable preempt (overzealous, I
+- * knew) to prevent cpu going down. */
+- preempt_disable();
+- if (!cpu_online(cpu_id)) {
+- preempt_enable();
+- return 0;
+- }
+-
+ #ifdef CONFIG_SMP
+ pvr = per_cpu(cpu_pvr, cpu_id);
+ #else
+@@ -329,9 +321,6 @@ static int show_cpuinfo(struct seq_file
+ #ifdef CONFIG_SMP
+ seq_printf(m, "\n");
+ #endif
+-
+- preempt_enable();
+-
+ /* If this is the last cpu, print the summary */
+ if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
+ show_cpuinfo_summary(m);
--- /dev/null
+From 786842b62f81f20d14894925e8c225328ee8144b Mon Sep 17 00:00:00 2001
+From: Stewart Smith <stewart@linux.vnet.ibm.com>
+Date: Wed, 9 Dec 2015 17:18:18 +1100
+Subject: powerpc/powernv: panic() on OPAL < V3
+
+From: Stewart Smith <stewart@linux.vnet.ibm.com>
+
+commit 786842b62f81f20d14894925e8c225328ee8144b upstream.
+
+The OpenPower Abstraction Layer firmware went through a couple
+of iterations in the lab before being released. What we now know
+as OPAL advertises itself as OPALv3.
+
+OPALv2 and OPALv1 never made it outside the lab, and the possibility
+of anyone at all ever building a mainline kernel today and expecting
+it to boot on such hardware is zero.
+
+Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/powernv/opal.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/arch/powerpc/platforms/powernv/opal.c
++++ b/arch/powerpc/platforms/powernv/opal.c
+@@ -103,11 +103,8 @@ int __init early_init_dt_scan_opal(unsig
+ powerpc_firmware_features |= FW_FEATURE_OPALv2;
+ powerpc_firmware_features |= FW_FEATURE_OPALv3;
+ pr_info("OPAL V3 detected !\n");
+- } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
+- powerpc_firmware_features |= FW_FEATURE_OPALv2;
+- pr_info("OPAL V2 detected !\n");
+ } else {
+- pr_info("OPAL V1 detected !\n");
++ panic("OPAL != V3 detected, no longer supported.\n");
+ }
+
+ /* Reinit all cores with the right endian */
--- /dev/null
+From e4d54f71d29997344b4c4c8d47708240f9f23a5c Mon Sep 17 00:00:00 2001
+From: Stewart Smith <stewart@linux.vnet.ibm.com>
+Date: Wed, 9 Dec 2015 17:18:20 +1100
+Subject: powerpc/powernv: remove FW_FEATURE_OPALv3 and just use FW_FEATURE_OPAL
+
+From: Stewart Smith <stewart@linux.vnet.ibm.com>
+
+commit e4d54f71d29997344b4c4c8d47708240f9f23a5c upstream.
+
+Long ago, only in the lab, there was OPALv1 and OPALv2. Now there is
+just OPALv3, with nobody ever expecting anything on pre-OPALv3 to
+be cared about or supported by mainline kernels.
+
+So, let's remove FW_FEATURE_OPALv3 and instead use FW_FEATURE_OPAL
+exclusively.
+
+Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/firmware.h | 3 -
+ arch/powerpc/platforms/powernv/eeh-powernv.c | 4 -
+ arch/powerpc/platforms/powernv/idle.c | 2
+ arch/powerpc/platforms/powernv/opal-xscom.c | 2
+ arch/powerpc/platforms/powernv/opal.c | 25 ++++-----
+ arch/powerpc/platforms/powernv/pci-ioda.c | 2
+ arch/powerpc/platforms/powernv/setup.c | 8 +-
+ arch/powerpc/platforms/powernv/smp.c | 74 +++++++++++----------------
+ drivers/cpufreq/powernv-cpufreq.c | 2
+ drivers/cpuidle/cpuidle-powernv.c | 2
+ 10 files changed, 54 insertions(+), 70 deletions(-)
+
+--- a/arch/powerpc/include/asm/firmware.h
++++ b/arch/powerpc/include/asm/firmware.h
+@@ -51,7 +51,6 @@
+ #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
+ #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
+ #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
+-#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000)
+
+ #ifndef __ASSEMBLY__
+
+@@ -69,7 +68,7 @@ enum {
+ FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
+ FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
+ FW_FEATURE_PSERIES_ALWAYS = 0,
+- FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv3,
++ FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL,
+ FW_FEATURE_POWERNV_ALWAYS = 0,
+ FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+ FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
++++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
+@@ -48,8 +48,8 @@ static int pnv_eeh_init(void)
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+
+- if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
+- pr_warn("%s: OPALv3 is required !\n",
++ if (!firmware_has_feature(FW_FEATURE_OPAL)) {
++ pr_warn("%s: OPAL is required !\n",
+ __func__);
+ return -EINVAL;
+ }
+--- a/arch/powerpc/platforms/powernv/idle.c
++++ b/arch/powerpc/platforms/powernv/idle.c
+@@ -242,7 +242,7 @@ static int __init pnv_init_idle_states(v
+ if (cpuidle_disable != IDLE_NO_OVERRIDE)
+ goto out;
+
+- if (!firmware_has_feature(FW_FEATURE_OPALv3))
++ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ goto out;
+
+ power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
+--- a/arch/powerpc/platforms/powernv/opal-xscom.c
++++ b/arch/powerpc/platforms/powernv/opal-xscom.c
+@@ -126,7 +126,7 @@ static const struct scom_controller opal
+
+ static int opal_xscom_init(void)
+ {
+- if (firmware_has_feature(FW_FEATURE_OPALv3))
++ if (firmware_has_feature(FW_FEATURE_OPAL))
+ scom_init(&opal_scom_controller);
+ return 0;
+ }
+--- a/arch/powerpc/platforms/powernv/opal.c
++++ b/arch/powerpc/platforms/powernv/opal.c
+@@ -98,10 +98,9 @@ int __init early_init_dt_scan_opal(unsig
+ pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
+ opal.size, sizep, runtimesz);
+
+- powerpc_firmware_features |= FW_FEATURE_OPAL;
+ if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
+- powerpc_firmware_features |= FW_FEATURE_OPALv3;
+- pr_info("OPAL V3 detected !\n");
++ powerpc_firmware_features |= FW_FEATURE_OPAL;
++ pr_info("OPAL detected !\n");
+ } else {
+ panic("OPAL != V3 detected, no longer supported.\n");
+ }
+@@ -348,17 +347,15 @@ int opal_put_chars(uint32_t vtermno, con
+ * enough room and be done with it
+ */
+ spin_lock_irqsave(&opal_write_lock, flags);
+- if (firmware_has_feature(FW_FEATURE_OPALv3)) {
+- rc = opal_console_write_buffer_space(vtermno, &olen);
+- len = be64_to_cpu(olen);
+- if (rc || len < total_len) {
+- spin_unlock_irqrestore(&opal_write_lock, flags);
+- /* Closed -> drop characters */
+- if (rc)
+- return total_len;
+- opal_poll_events(NULL);
+- return -EAGAIN;
+- }
++ rc = opal_console_write_buffer_space(vtermno, &olen);
++ len = be64_to_cpu(olen);
++ if (rc || len < total_len) {
++ spin_unlock_irqrestore(&opal_write_lock, flags);
++ /* Closed -> drop characters */
++ if (rc)
++ return total_len;
++ opal_poll_events(NULL);
++ return -EAGAIN;
+ }
+
+ /* We still try to handle partial completions, though they
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -344,7 +344,7 @@ static void __init pnv_ioda_parse_m64_wi
+ return;
+ }
+
+- if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
++ if (!firmware_has_feature(FW_FEATURE_OPAL)) {
+ pr_info(" Firmware too old to support M64 window\n");
+ return;
+ }
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -140,8 +140,8 @@ static void pnv_show_cpuinfo(struct seq_
+ if (root)
+ model = of_get_property(root, "model", NULL);
+ seq_printf(m, "machine\t\t: PowerNV %s\n", model);
+- if (firmware_has_feature(FW_FEATURE_OPALv3))
+- seq_printf(m, "firmware\t: OPAL v3\n");
++ if (firmware_has_feature(FW_FEATURE_OPAL))
++ seq_printf(m, "firmware\t: OPAL\n");
+ else
+ seq_printf(m, "firmware\t: BML\n");
+ of_node_put(root);
+@@ -270,9 +270,9 @@ static void pnv_kexec_cpu_down(int crash
+ {
+ xics_kexec_teardown_cpu(secondary);
+
+- /* On OPAL v3, we return all CPUs to firmware */
++ /* On OPAL, we return all CPUs to firmware */
+
+- if (!firmware_has_feature(FW_FEATURE_OPALv3))
++ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return;
+
+ if (secondary) {
+--- a/arch/powerpc/platforms/powernv/smp.c
++++ b/arch/powerpc/platforms/powernv/smp.c
+@@ -61,14 +61,15 @@ static int pnv_smp_kick_cpu(int nr)
+ unsigned long start_here =
+ __pa(ppc_function_entry(generic_secondary_smp_init));
+ long rc;
++ uint8_t status;
+
+ BUG_ON(nr < 0 || nr >= NR_CPUS);
+
+ /*
+- * If we already started or OPALv3 is not supported, we just
++ * If we already started or OPAL is not supported, we just
+ * kick the CPU via the PACA
+ */
+- if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv3))
++ if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPAL))
+ goto kick;
+
+ /*
+@@ -77,55 +78,42 @@ static int pnv_smp_kick_cpu(int nr)
+ * first time. OPAL v3 allows us to query OPAL to know if it
+ * has the CPUs, so we do that
+ */
+- if (firmware_has_feature(FW_FEATURE_OPALv3)) {
+- uint8_t status;
+-
+- rc = opal_query_cpu_status(pcpu, &status);
+- if (rc != OPAL_SUCCESS) {
+- pr_warn("OPAL Error %ld querying CPU %d state\n",
+- rc, nr);
+- return -ENODEV;
+- }
++ rc = opal_query_cpu_status(pcpu, &status);
++ if (rc != OPAL_SUCCESS) {
++ pr_warn("OPAL Error %ld querying CPU %d state\n", rc, nr);
++ return -ENODEV;
++ }
+
+- /*
+- * Already started, just kick it, probably coming from
+- * kexec and spinning
+- */
+- if (status == OPAL_THREAD_STARTED)
+- goto kick;
++ /*
++ * Already started, just kick it, probably coming from
++ * kexec and spinning
++ */
++ if (status == OPAL_THREAD_STARTED)
++ goto kick;
+
+- /*
+- * Available/inactive, let's kick it
+- */
+- if (status == OPAL_THREAD_INACTIVE) {
+- pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n",
+- nr, pcpu);
+- rc = opal_start_cpu(pcpu, start_here);
+- if (rc != OPAL_SUCCESS) {
+- pr_warn("OPAL Error %ld starting CPU %d\n",
+- rc, nr);
+- return -ENODEV;
+- }
+- } else {
+- /*
+- * An unavailable CPU (or any other unknown status)
+- * shouldn't be started. It should also
+- * not be in the possible map but currently it can
+- * happen
+- */
+- pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
+- " (status %d)...\n", nr, pcpu, status);
++ /*
++ * Available/inactive, let's kick it
++ */
++ if (status == OPAL_THREAD_INACTIVE) {
++ pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
++ rc = opal_start_cpu(pcpu, start_here);
++ if (rc != OPAL_SUCCESS) {
++ pr_warn("OPAL Error %ld starting CPU %d\n", rc, nr);
+ return -ENODEV;
+ }
+ } else {
+ /*
+- * On OPAL v2, we just kick it and hope for the best,
+- * we must not test the error from opal_start_cpu() or
+- * we would fail to get CPUs from kexec.
++ * An unavailable CPU (or any other unknown status)
++ * shouldn't be started. It should also
++ * not be in the possible map but currently it can
++ * happen
+ */
+- opal_start_cpu(pcpu, start_here);
++ pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
++ " (status %d)...\n", nr, pcpu, status);
++ return -ENODEV;
+ }
+- kick:
++
++kick:
+ return smp_generic_kick_cpu(nr);
+ }
+
+--- a/drivers/cpufreq/powernv-cpufreq.c
++++ b/drivers/cpufreq/powernv-cpufreq.c
+@@ -592,7 +592,7 @@ static int __init powernv_cpufreq_init(v
+ int rc = 0;
+
+ /* Don't probe on pseries (guest) platforms */
+- if (!firmware_has_feature(FW_FEATURE_OPALv3))
++ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return -ENODEV;
+
+ /* Discover pstates from device tree and init */
+--- a/drivers/cpuidle/cpuidle-powernv.c
++++ b/drivers/cpuidle/cpuidle-powernv.c
+@@ -282,7 +282,7 @@ static int powernv_idle_probe(void)
+ if (cpuidle_disable != IDLE_NO_OVERRIDE)
+ return -ENODEV;
+
+- if (firmware_has_feature(FW_FEATURE_OPALv3)) {
++ if (firmware_has_feature(FW_FEATURE_OPAL)) {
+ cpuidle_state_table = powernv_states;
+ /* Device tree can indicate more idle states */
+ max_idle_state = powernv_add_idle_states();
--- /dev/null
+From 7261aafc095763b119136a562540dea7b1ccf657 Mon Sep 17 00:00:00 2001
+From: Stewart Smith <stewart@linux.vnet.ibm.com>
+Date: Wed, 9 Dec 2015 17:18:19 +1100
+Subject: powerpc/powernv: Remove OPALv2 firmware define and references
+
+From: Stewart Smith <stewart@linux.vnet.ibm.com>
+
+commit 7261aafc095763b119136a562540dea7b1ccf657 upstream.
+
+OPALv2 only ever existed in the lab and didn't escape to the world.
+All OPAL systems in the wild are OPALv3.
+
+The probability of there being an OPALv2 system still powered on
+anywhere inside IBM is approximately zero, let alone anyone
+expecting to run mainline kernels.
+
+So, start to remove references to OPALv2.
+
+Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/firmware.h | 4 +---
+ arch/powerpc/platforms/powernv/opal.c | 8 ++------
+ arch/powerpc/platforms/powernv/setup.c | 4 ----
+ arch/powerpc/platforms/powernv/smp.c | 4 ++--
+ 4 files changed, 5 insertions(+), 15 deletions(-)
+
+--- a/arch/powerpc/include/asm/firmware.h
++++ b/arch/powerpc/include/asm/firmware.h
+@@ -47,7 +47,6 @@
+ #define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000)
+ #define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000)
+ #define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000)
+-#define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000)
+ #define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000)
+ #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
+ #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
+@@ -70,8 +69,7 @@ enum {
+ FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
+ FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
+ FW_FEATURE_PSERIES_ALWAYS = 0,
+- FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
+- FW_FEATURE_OPALv3,
++ FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv3,
+ FW_FEATURE_POWERNV_ALWAYS = 0,
+ FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+ FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+--- a/arch/powerpc/platforms/powernv/opal.c
++++ b/arch/powerpc/platforms/powernv/opal.c
+@@ -100,7 +100,6 @@ int __init early_init_dt_scan_opal(unsig
+
+ powerpc_firmware_features |= FW_FEATURE_OPAL;
+ if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
+- powerpc_firmware_features |= FW_FEATURE_OPALv2;
+ powerpc_firmware_features |= FW_FEATURE_OPALv3;
+ pr_info("OPAL V3 detected !\n");
+ } else {
+@@ -349,7 +348,7 @@ int opal_put_chars(uint32_t vtermno, con
+ * enough room and be done with it
+ */
+ spin_lock_irqsave(&opal_write_lock, flags);
+- if (firmware_has_feature(FW_FEATURE_OPALv2)) {
++ if (firmware_has_feature(FW_FEATURE_OPALv3)) {
+ rc = opal_console_write_buffer_space(vtermno, &olen);
+ len = be64_to_cpu(olen);
+ if (rc || len < total_len) {
+@@ -693,10 +692,7 @@ static int __init opal_init(void)
+ }
+
+ /* Register OPAL consoles if any ports */
+- if (firmware_has_feature(FW_FEATURE_OPALv2))
+- consoles = of_find_node_by_path("/ibm,opal/consoles");
+- else
+- consoles = of_node_get(opal_node);
++ consoles = of_find_node_by_path("/ibm,opal/consoles");
+ if (consoles) {
+ for_each_child_of_node(consoles, np) {
+ if (strcmp(np->name, "serial"))
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -142,10 +142,6 @@ static void pnv_show_cpuinfo(struct seq_
+ seq_printf(m, "machine\t\t: PowerNV %s\n", model);
+ if (firmware_has_feature(FW_FEATURE_OPALv3))
+ seq_printf(m, "firmware\t: OPAL v3\n");
+- else if (firmware_has_feature(FW_FEATURE_OPALv2))
+- seq_printf(m, "firmware\t: OPAL v2\n");
+- else if (firmware_has_feature(FW_FEATURE_OPAL))
+- seq_printf(m, "firmware\t: OPAL v1\n");
+ else
+ seq_printf(m, "firmware\t: BML\n");
+ of_node_put(root);
+--- a/arch/powerpc/platforms/powernv/smp.c
++++ b/arch/powerpc/platforms/powernv/smp.c
+@@ -65,10 +65,10 @@ static int pnv_smp_kick_cpu(int nr)
+ BUG_ON(nr < 0 || nr >= NR_CPUS);
+
+ /*
+- * If we already started or OPALv2 is not supported, we just
++ * If we already started or OPALv3 is not supported, we just
+ * kick the CPU via the PACA
+ */
+- if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2))
++ if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv3))
+ goto kick;
+
+ /*
--- /dev/null
+From 84ad5802a33a4964a49b8f7d24d80a214a096b19 Mon Sep 17 00:00:00 2001
+From: Johannes Weiner <hannes@cmpxchg.org>
+Date: Thu, 14 Jan 2016 15:20:18 -0800
+Subject: proc: meminfo: estimate available memory more conservatively
+
+From: Johannes Weiner <hannes@cmpxchg.org>
+
+commit 84ad5802a33a4964a49b8f7d24d80a214a096b19 upstream.
+
+The MemAvailable item in /proc/meminfo is to give users a hint of how
+much memory is allocatable without causing swapping, so it excludes the
+zones' low watermarks as unavailable to userspace.
+
+However, for a userspace allocation, kswapd will actually reclaim until
+the free pages hit a combination of the high watermark and the page
+allocator's lowmem protection that keeps a certain amount of DMA and
+DMA32 memory from userspace as well.
+
+Subtract the full amount we know to be unavailable to userspace from the
+number of free pages when calculating MemAvailable.
+
+Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/proc/meminfo.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/fs/proc/meminfo.c
++++ b/fs/proc/meminfo.c
+@@ -57,11 +57,8 @@ static int meminfo_proc_show(struct seq_
+ /*
+ * Estimate the amount of memory available for userspace allocations,
+ * without causing swapping.
+- *
+- * Free memory cannot be taken below the low watermark, before the
+- * system starts swapping.
+ */
+- available = i.freeram - wmark_low;
++ available = i.freeram - totalreserve_pages;
+
+ /*
+ * Not all the page cache can be freed, otherwise the system will
pipe-cap-initial-pipe-capacity-according-to-pipe-max-size-limit.patch
futex-futex_wake_op-fix-sign_extend32-sign-bits.patch
kernel-exit.c-avoid-undefined-behaviour-when-calling-wait4.patch
+usbip-usbip_host-refine-probe-and-disconnect-debug-msgs-to-be-useful.patch
+usbip-usbip_host-delete-device-from-busid_table-after-rebind.patch
+usbip-usbip_host-run-rebind-from-exit-when-module-is-removed.patch
+usbip-usbip_host-fix-null-ptr-deref-and-use-after-free-errors.patch
+usbip-usbip_host-fix-bad-unlock-balance-during-stub_probe.patch
+alsa-usb-mixer-volume-quirk-for-cm102-a-102s.patch
+alsa-hda-add-lenovo-c50-all-in-one-to-the-power_save-blacklist.patch
+alsa-control-fix-a-redundant-copy-issue.patch
+spi-pxa2xx-allow-64-bit-dma.patch
+powerpc-powernv-panic-on-opal-v3.patch
+powerpc-powernv-remove-opalv2-firmware-define-and-references.patch
+powerpc-powernv-remove-fw_feature_opalv3-and-just-use-fw_feature_opal.patch
+cpuidle-coupled-remove-unused-define-cpuidle_coupled_lock.patch
+powerpc-don-t-preempt_disable-in-show_cpuinfo.patch
+vmscan-do-not-force-scan-file-lru-if-its-absolute-size-is-small.patch
+proc-meminfo-estimate-available-memory-more-conservatively.patch
+mm-filemap-remove-redundant-code-in-do_read_cache_page.patch
+mm-filemap-avoid-unnecessary-calls-to-lock_page-when-waiting-for-io-to-complete-during-a-read.patch
+signals-avoid-unnecessary-taking-of-sighand-siglock.patch
+cpufreq-intel_pstate-enable-hwp-by-default.patch
+tracing-x86-xen-remove-zero-data-size-trace-events-trace_xen_mmu_flush_tlb-_all.patch
--- /dev/null
+From c7be96af89d4b53211862d8599b2430e8900ed92 Mon Sep 17 00:00:00 2001
+From: Waiman Long <Waiman.Long@hpe.com>
+Date: Wed, 14 Dec 2016 15:04:10 -0800
+Subject: signals: avoid unnecessary taking of sighand->siglock
+
+From: Waiman Long <Waiman.Long@hpe.com>
+
+commit c7be96af89d4b53211862d8599b2430e8900ed92 upstream.
+
+When running certain database workload on a high-end system with many
+CPUs, it was found that spinlock contention in the sigprocmask syscalls
+became a significant portion of the overall CPU cycles as shown below.
+
+ 9.30% 9.30% 905387 dataserver /proc/kcore 0x7fff8163f4d2
+ [k] _raw_spin_lock_irq
+ |
+ ---_raw_spin_lock_irq
+ |
+ |--99.34%-- __set_current_blocked
+ | sigprocmask
+ | sys_rt_sigprocmask
+ | system_call_fastpath
+ | |
+ | |--50.63%-- __swapcontext
+ | | |
+ | | |--99.91%-- upsleepgeneric
+ | |
+ | |--49.36%-- __setcontext
+ | | ktskRun
+
+Looking further into the swapcontext function in glibc, it was found that
+the function always call sigprocmask() without checking if there are
+changes in the signal mask.
+
+A check was added to the __set_current_blocked() function to avoid taking
+the sighand->siglock spinlock if there is no change in the signal mask.
+This will prevent unneeded spinlock contention when many threads are
+trying to call sigprocmask().
+
+With this patch applied, the spinlock contention in sigprocmask() was
+gone.
+
+Link: http://lkml.kernel.org/r/1474979209-11867-1-git-send-email-Waiman.Long@hpe.com
+Signed-off-by: Waiman Long <Waiman.Long@hpe.com>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Stas Sergeev <stsp@list.ru>
+Cc: Scott J Norton <scott.norton@hpe.com>
+Cc: Douglas Hatch <doug.hatch@hpe.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/signal.h | 17 +++++++++++++++++
+ kernel/signal.c | 7 +++++++
+ 2 files changed, 24 insertions(+)
+
+--- a/include/linux/signal.h
++++ b/include/linux/signal.h
+@@ -97,6 +97,23 @@ static inline int sigisemptyset(sigset_t
+ }
+ }
+
++static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2)
++{
++ switch (_NSIG_WORDS) {
++ case 4:
++ return (set1->sig[3] == set2->sig[3]) &&
++ (set1->sig[2] == set2->sig[2]) &&
++ (set1->sig[1] == set2->sig[1]) &&
++ (set1->sig[0] == set2->sig[0]);
++ case 2:
++ return (set1->sig[1] == set2->sig[1]) &&
++ (set1->sig[0] == set2->sig[0]);
++ case 1:
++ return set1->sig[0] == set2->sig[0];
++ }
++ return 0;
++}
++
+ #define sigmask(sig) (1UL << ((sig) - 1))
+
+ #ifndef __HAVE_ARCH_SIG_SETOPS
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2495,6 +2495,13 @@ void __set_current_blocked(const sigset_
+ {
+ struct task_struct *tsk = current;
+
++ /*
++ * In case the signal mask hasn't changed, there is nothing we need
++ * to do. The current->blocked shouldn't be modified by other task.
++ */
++ if (sigequalsets(&tsk->blocked, newset))
++ return;
++
+ spin_lock_irq(&tsk->sighand->siglock);
+ __set_task_blocked(tsk, newset);
+ spin_unlock_irq(&tsk->sighand->siglock);
--- /dev/null
+From efc4a13724b852ddaa3358402a8dec024ffbcb17 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Thu, 19 Apr 2018 19:53:32 +0300
+Subject: spi: pxa2xx: Allow 64-bit DMA
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit efc4a13724b852ddaa3358402a8dec024ffbcb17 upstream.
+
+Currently the 32-bit device address only is supported for DMA. However,
+starting from Intel Sunrisepoint PCH the DMA address of the device FIFO
+can be 64-bit.
+
+Change the respective variable to be compatible with DMA engine
+expectations, i.e. to phys_addr_t.
+
+Fixes: 34cadd9c1bcb ("spi: pxa2xx: Add support for Intel Sunrisepoint")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-pxa2xx.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/spi/spi-pxa2xx.h
++++ b/drivers/spi/spi-pxa2xx.h
+@@ -38,7 +38,7 @@ struct driver_data {
+
+ /* SSP register addresses */
+ void __iomem *ioaddr;
+- u32 ssdr_physical;
++ phys_addr_t ssdr_physical;
+
+ /* SSP masks*/
+ u32 dma_cr1;
--- /dev/null
+From 45dd9b0666a162f8e4be76096716670cf1741f0e Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Wed, 9 May 2018 14:36:09 -0400
+Subject: tracing/x86/xen: Remove zero data size trace events trace_xen_mmu_flush_tlb{_all}
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 45dd9b0666a162f8e4be76096716670cf1741f0e upstream.
+
+Doing an audit of trace events, I discovered two trace events in the xen
+subsystem that use a hack to create zero data size trace events. This is not
+what trace events are for. Trace events add memory footprint overhead, and
+if all you need to do is see if a function is hit or not, simply make that
+function noinline and use function tracer filtering.
+
+Worse yet, the hack used was:
+
+ __array(char, x, 0)
+
+Which creates a static string of zero in length. There's assumptions about
+such constructs in ftrace that this is a dynamic string that is nul
+terminated. This is not the case with these tracepoints and can cause
+problems in various parts of ftrace.
+
+Nuke the trace events!
+
+Link: http://lkml.kernel.org/r/20180509144605.5a220327@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Fixes: 95a7d76897c1e ("xen/mmu: Use Xen specific TLB flush instead of the generic one.")
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/mmu.c | 4 ----
+ include/trace/events/xen.h | 16 ----------------
+ 2 files changed, 20 deletions(-)
+
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -1316,8 +1316,6 @@ void xen_flush_tlb_all(void)
+ struct mmuext_op *op;
+ struct multicall_space mcs;
+
+- trace_xen_mmu_flush_tlb_all(0);
+-
+ preempt_disable();
+
+ mcs = xen_mc_entry(sizeof(*op));
+@@ -1335,8 +1333,6 @@ static void xen_flush_tlb(void)
+ struct mmuext_op *op;
+ struct multicall_space mcs;
+
+- trace_xen_mmu_flush_tlb(0);
+-
+ preempt_disable();
+
+ mcs = xen_mc_entry(sizeof(*op));
+--- a/include/trace/events/xen.h
++++ b/include/trace/events/xen.h
+@@ -377,22 +377,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
+ DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
+ DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
+
+-TRACE_EVENT(xen_mmu_flush_tlb_all,
+- TP_PROTO(int x),
+- TP_ARGS(x),
+- TP_STRUCT__entry(__array(char, x, 0)),
+- TP_fast_assign((void)x),
+- TP_printk("%s", "")
+- );
+-
+-TRACE_EVENT(xen_mmu_flush_tlb,
+- TP_PROTO(int x),
+- TP_ARGS(x),
+- TP_STRUCT__entry(__array(char, x, 0)),
+- TP_fast_assign((void)x),
+- TP_printk("%s", "")
+- );
+-
+ TRACE_EVENT(xen_mmu_flush_tlb_single,
+ TP_PROTO(unsigned long addr),
+ TP_ARGS(addr),
--- /dev/null
+From 1e180f167d4e413afccbbb4a421b48b2de832549 Mon Sep 17 00:00:00 2001
+From: "Shuah Khan (Samsung OSG)" <shuah@kernel.org>
+Date: Mon, 30 Apr 2018 16:17:19 -0600
+Subject: usbip: usbip_host: delete device from busid_table after rebind
+
+From: Shuah Khan (Samsung OSG) <shuah@kernel.org>
+
+commit 1e180f167d4e413afccbbb4a421b48b2de832549 upstream.
+
+Device is left in the busid_table after unbind and rebind. Rebind
+initiates usb bus scan and the original driver claims the device.
+After rescan the device should be deleted from the busid_table as
+it no longer belongs to usbip_host.
+
+Fix it to delete the device after device_attach() succeeds.
+
+Signed-off-by: Shuah Khan (Samsung OSG) <shuah@kernel.org>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/usbip/stub_main.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/usb/usbip/stub_main.c
++++ b/drivers/usb/usbip/stub_main.c
+@@ -201,6 +201,9 @@ static ssize_t rebind_store(struct devic
+ if (!bid)
+ return -ENODEV;
+
++ /* mark the device for deletion so probe ignores it during rescan */
++ bid->status = STUB_BUSID_OTHER;
++
+ /* device_attach() callers should hold parent lock for USB */
+ if (bid->udev->dev.parent)
+ device_lock(bid->udev->dev.parent);
+@@ -212,6 +215,9 @@ static ssize_t rebind_store(struct devic
+ return ret;
+ }
+
++ /* delete device from busid_table */
++ del_match_busid((char *) buf);
++
+ return count;
+ }
+
--- /dev/null
+From c171654caa875919be3c533d3518da8be5be966e Mon Sep 17 00:00:00 2001
+From: "Shuah Khan (Samsung OSG)" <shuah@kernel.org>
+Date: Tue, 15 May 2018 17:57:23 -0600
+Subject: usbip: usbip_host: fix bad unlock balance during stub_probe()
+
+From: Shuah Khan (Samsung OSG) <shuah@kernel.org>
+
+commit c171654caa875919be3c533d3518da8be5be966e upstream.
+
+stub_probe() calls put_busid_priv() in an error path when device isn't
+found in the busid_table. Fix it by making put_busid_priv() safe to be
+called with null struct bus_id_priv pointer.
+
+This problem happens when "usbip bind" is run without loading usbip_host
+driver and then running modprobe. The first failed bind attempt unbinds
+the device from the original driver and when usbip_host is modprobed,
+stub_probe() runs and doesn't find the device in its busid table and calls
+put_busid_priv(0 with null bus_id_priv pointer.
+
+usbip-host 3-10.2: 3-10.2 is not in match_busid table... skip!
+
+[ 367.359679] =====================================
+[ 367.359681] WARNING: bad unlock balance detected!
+[ 367.359683] 4.17.0-rc4+ #5 Not tainted
+[ 367.359685] -------------------------------------
+[ 367.359688] modprobe/2768 is trying to release lock (
+[ 367.359689]
+==================================================================
+[ 367.359696] BUG: KASAN: null-ptr-deref in print_unlock_imbalance_bug+0x99/0x110
+[ 367.359699] Read of size 8 at addr 0000000000000058 by task modprobe/2768
+
+[ 367.359705] CPU: 4 PID: 2768 Comm: modprobe Not tainted 4.17.0-rc4+ #5
+
+Fixes: 22076557b07c ("usbip: usbip_host: fix NULL-ptr deref and use-after-free errors") in usb-linus
+Signed-off-by: Shuah Khan (Samsung OSG) <shuah@kernel.org>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/usbip/stub_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/usbip/stub_main.c
++++ b/drivers/usb/usbip/stub_main.c
+@@ -96,7 +96,8 @@ struct bus_id_priv *get_busid_priv(const
+
+ void put_busid_priv(struct bus_id_priv *bid)
+ {
+- spin_unlock(&bid->busid_lock);
++ if (bid)
++ spin_unlock(&bid->busid_lock);
+ }
+
+ static int add_match_busid(char *busid)
--- /dev/null
+From 22076557b07c12086eeb16b8ce2b0b735f7a27e7 Mon Sep 17 00:00:00 2001
+From: "Shuah Khan (Samsung OSG)" <shuah@kernel.org>
+Date: Mon, 14 May 2018 20:49:58 -0600
+Subject: usbip: usbip_host: fix NULL-ptr deref and use-after-free errors
+
+From: Shuah Khan (Samsung OSG) <shuah@kernel.org>
+
+commit 22076557b07c12086eeb16b8ce2b0b735f7a27e7 upstream.
+
+usbip_host updates device status without holding lock from stub probe,
+disconnect and rebind code paths. When multiple requests to import a
+device are received, these unprotected code paths step all over each
+other and drive fails with NULL-ptr deref and use-after-free errors.
+
+The driver uses a table lock to protect the busid array for adding and
+deleting busids to the table. However, the probe, disconnect and rebind
+paths get the busid table entry and update the status without holding
+the busid table lock. Add a new finer grain lock to protect the busid
+entry. This new lock will be held to search and update the busid entry
+fields from get_busid_idx(), add_match_busid() and del_match_busid().
+
+match_busid_show() does the same to access the busid entry fields.
+
+get_busid_priv() changed to return the pointer to the busid entry holding
+the busid lock. stub_probe(), stub_disconnect() and stub_device_rebind()
+call put_busid_priv() to release the busid lock before returning. This
+changes fixes the unprotected code paths eliminating the race conditions
+in updating the busid entries.
+
+Reported-by: Jakub Jirasek
+Signed-off-by: Shuah Khan (Samsung OSG) <shuah@kernel.org>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/usbip/stub.h | 2 ++
+ drivers/usb/usbip/stub_dev.c | 33 +++++++++++++++++++++++----------
+ drivers/usb/usbip/stub_main.c | 40 +++++++++++++++++++++++++++++++++++-----
+ 3 files changed, 60 insertions(+), 15 deletions(-)
+
+--- a/drivers/usb/usbip/stub.h
++++ b/drivers/usb/usbip/stub.h
+@@ -88,6 +88,7 @@ struct bus_id_priv {
+ struct stub_device *sdev;
+ struct usb_device *udev;
+ char shutdown_busid;
++ spinlock_t busid_lock;
+ };
+
+ /* stub_priv is allocated from stub_priv_cache */
+@@ -98,6 +99,7 @@ extern struct usb_device_driver stub_dri
+
+ /* stub_main.c */
+ struct bus_id_priv *get_busid_priv(const char *busid);
++void put_busid_priv(struct bus_id_priv *bid);
+ int del_match_busid(char *busid);
+ void stub_device_cleanup_urbs(struct stub_device *sdev);
+
+--- a/drivers/usb/usbip/stub_dev.c
++++ b/drivers/usb/usbip/stub_dev.c
+@@ -314,7 +314,7 @@ static int stub_probe(struct usb_device
+ struct stub_device *sdev = NULL;
+ const char *udev_busid = dev_name(&udev->dev);
+ struct bus_id_priv *busid_priv;
+- int rc;
++ int rc = 0;
+
+ dev_dbg(&udev->dev, "Enter probe\n");
+
+@@ -331,13 +331,15 @@ static int stub_probe(struct usb_device
+ * other matched drivers by the driver core.
+ * See driver_probe_device() in driver/base/dd.c
+ */
+- return -ENODEV;
++ rc = -ENODEV;
++ goto call_put_busid_priv;
+ }
+
+ if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
+ dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
+ udev_busid);
+- return -ENODEV;
++ rc = -ENODEV;
++ goto call_put_busid_priv;
+ }
+
+ if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
+@@ -345,13 +347,16 @@ static int stub_probe(struct usb_device
+ "%s is attached on vhci_hcd... skip!\n",
+ udev_busid);
+
+- return -ENODEV;
++ rc = -ENODEV;
++ goto call_put_busid_priv;
+ }
+
+ /* ok, this is my device */
+ sdev = stub_device_alloc(udev);
+- if (!sdev)
+- return -ENOMEM;
++ if (!sdev) {
++ rc = -ENOMEM;
++ goto call_put_busid_priv;
++ }
+
+ dev_info(&udev->dev,
+ "usbip-host: register new device (bus %u dev %u)\n",
+@@ -383,7 +388,9 @@ static int stub_probe(struct usb_device
+ }
+ busid_priv->status = STUB_BUSID_ALLOC;
+
+- return 0;
++ rc = 0;
++ goto call_put_busid_priv;
++
+ err_files:
+ usb_hub_release_port(udev->parent, udev->portnum,
+ (struct usb_dev_state *) udev);
+@@ -394,6 +401,9 @@ err_port:
+
+ busid_priv->sdev = NULL;
+ stub_device_free(sdev);
++
++call_put_busid_priv:
++ put_busid_priv(busid_priv);
+ return rc;
+ }
+
+@@ -432,7 +442,7 @@ static void stub_disconnect(struct usb_d
+ /* get stub_device */
+ if (!sdev) {
+ dev_err(&udev->dev, "could not get device");
+- return;
++ goto call_put_busid_priv;
+ }
+
+ dev_set_drvdata(&udev->dev, NULL);
+@@ -447,12 +457,12 @@ static void stub_disconnect(struct usb_d
+ (struct usb_dev_state *) udev);
+ if (rc) {
+ dev_dbg(&udev->dev, "unable to release port\n");
+- return;
++ goto call_put_busid_priv;
+ }
+
+ /* If usb reset is called from event handler */
+ if (busid_priv->sdev->ud.eh == current)
+- return;
++ goto call_put_busid_priv;
+
+ /* shutdown the current connection */
+ shutdown_busid(busid_priv);
+@@ -465,6 +475,9 @@ static void stub_disconnect(struct usb_d
+
+ if (busid_priv->status == STUB_BUSID_ALLOC)
+ busid_priv->status = STUB_BUSID_ADDED;
++
++call_put_busid_priv:
++ put_busid_priv(busid_priv);
+ }
+
+ #ifdef CONFIG_PM
+--- a/drivers/usb/usbip/stub_main.c
++++ b/drivers/usb/usbip/stub_main.c
+@@ -40,6 +40,8 @@ static spinlock_t busid_table_lock;
+
+ static void init_busid_table(void)
+ {
++ int i;
++
+ /*
+ * This also sets the bus_table[i].status to
+ * STUB_BUSID_OTHER, which is 0.
+@@ -47,6 +49,9 @@ static void init_busid_table(void)
+ memset(busid_table, 0, sizeof(busid_table));
+
+ spin_lock_init(&busid_table_lock);
++
++ for (i = 0; i < MAX_BUSID; i++)
++ spin_lock_init(&busid_table[i].busid_lock);
+ }
+
+ /*
+@@ -58,15 +63,20 @@ static int get_busid_idx(const char *bus
+ int i;
+ int idx = -1;
+
+- for (i = 0; i < MAX_BUSID; i++)
++ for (i = 0; i < MAX_BUSID; i++) {
++ spin_lock(&busid_table[i].busid_lock);
+ if (busid_table[i].name[0])
+ if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
+ idx = i;
++ spin_unlock(&busid_table[i].busid_lock);
+ break;
+ }
++ spin_unlock(&busid_table[i].busid_lock);
++ }
+ return idx;
+ }
+
++/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
+ struct bus_id_priv *get_busid_priv(const char *busid)
+ {
+ int idx;
+@@ -74,13 +84,21 @@ struct bus_id_priv *get_busid_priv(const
+
+ spin_lock(&busid_table_lock);
+ idx = get_busid_idx(busid);
+- if (idx >= 0)
++ if (idx >= 0) {
+ bid = &(busid_table[idx]);
++ /* get busid_lock before returning */
++ spin_lock(&bid->busid_lock);
++ }
+ spin_unlock(&busid_table_lock);
+
+ return bid;
+ }
+
++void put_busid_priv(struct bus_id_priv *bid)
++{
++ spin_unlock(&bid->busid_lock);
++}
++
+ static int add_match_busid(char *busid)
+ {
+ int i;
+@@ -93,15 +111,19 @@ static int add_match_busid(char *busid)
+ goto out;
+ }
+
+- for (i = 0; i < MAX_BUSID; i++)
++ for (i = 0; i < MAX_BUSID; i++) {
++ spin_lock(&busid_table[i].busid_lock);
+ if (!busid_table[i].name[0]) {
+ strlcpy(busid_table[i].name, busid, BUSID_SIZE);
+ if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
+ (busid_table[i].status != STUB_BUSID_REMOV))
+ busid_table[i].status = STUB_BUSID_ADDED;
+ ret = 0;
++ spin_unlock(&busid_table[i].busid_lock);
+ break;
+ }
++ spin_unlock(&busid_table[i].busid_lock);
++ }
+
+ out:
+ spin_unlock(&busid_table_lock);
+@@ -122,6 +144,8 @@ int del_match_busid(char *busid)
+ /* found */
+ ret = 0;
+
++ spin_lock(&busid_table[idx].busid_lock);
++
+ if (busid_table[idx].status == STUB_BUSID_OTHER)
+ memset(busid_table[idx].name, 0, BUSID_SIZE);
+
+@@ -129,6 +153,7 @@ int del_match_busid(char *busid)
+ (busid_table[idx].status != STUB_BUSID_ADDED))
+ busid_table[idx].status = STUB_BUSID_REMOV;
+
++ spin_unlock(&busid_table[idx].busid_lock);
+ out:
+ spin_unlock(&busid_table_lock);
+
+@@ -141,9 +166,12 @@ static ssize_t show_match_busid(struct d
+ char *out = buf;
+
+ spin_lock(&busid_table_lock);
+- for (i = 0; i < MAX_BUSID; i++)
++ for (i = 0; i < MAX_BUSID; i++) {
++ spin_lock(&busid_table[i].busid_lock);
+ if (busid_table[i].name[0])
+ out += sprintf(out, "%s ", busid_table[i].name);
++ spin_unlock(&busid_table[i].busid_lock);
++ }
+ spin_unlock(&busid_table_lock);
+ out += sprintf(out, "\n");
+
+@@ -219,7 +247,7 @@ static void stub_device_rebind(void)
+ }
+ spin_unlock(&busid_table_lock);
+
+- /* now run rebind */
++ /* now run rebind - no need to hold locks. driver files are removed */
+ for (i = 0; i < MAX_BUSID; i++) {
+ if (busid_table[i].name[0] &&
+ busid_table[i].shutdown_busid) {
+@@ -249,6 +277,8 @@ static ssize_t rebind_store(struct devic
+
+ /* mark the device for deletion so probe ignores it during rescan */
+ bid->status = STUB_BUSID_OTHER;
++ /* release the busid lock */
++ put_busid_priv(bid);
+
+ ret = do_rebind((char *) buf, bid);
+ if (ret < 0)
--- /dev/null
+From 28b68acc4a88dcf91fd1dcf2577371dc9bf574cc Mon Sep 17 00:00:00 2001
+From: Shuah Khan <shuahkh@osg.samsung.com>
+Date: Wed, 11 Apr 2018 18:13:30 -0600
+Subject: usbip: usbip_host: refine probe and disconnect debug msgs to be useful
+
+From: Shuah Khan <shuahkh@osg.samsung.com>
+
+commit 28b68acc4a88dcf91fd1dcf2577371dc9bf574cc upstream.
+
+Refine probe and disconnect debug msgs to be useful and say what is
+in progress.
+
+Signed-off-by: Shuah Khan <shuahkh@osg.samsung.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/usbip/stub_dev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/usbip/stub_dev.c
++++ b/drivers/usb/usbip/stub_dev.c
+@@ -316,7 +316,7 @@ static int stub_probe(struct usb_device
+ struct bus_id_priv *busid_priv;
+ int rc;
+
+- dev_dbg(&udev->dev, "Enter\n");
++ dev_dbg(&udev->dev, "Enter probe\n");
+
+ /* check we should claim or not by busid_table */
+ busid_priv = get_busid_priv(udev_busid);
+@@ -419,7 +419,7 @@ static void stub_disconnect(struct usb_d
+ struct bus_id_priv *busid_priv;
+ int rc;
+
+- dev_dbg(&udev->dev, "Enter\n");
++ dev_dbg(&udev->dev, "Enter disconnect\n");
+
+ busid_priv = get_busid_priv(udev_busid);
+ if (!busid_priv) {
--- /dev/null
+From 7510df3f29d44685bab7b1918b61a8ccd57126a9 Mon Sep 17 00:00:00 2001
+From: "Shuah Khan (Samsung OSG)" <shuah@kernel.org>
+Date: Mon, 30 Apr 2018 16:17:20 -0600
+Subject: usbip: usbip_host: run rebind from exit when module is removed
+
+From: Shuah Khan (Samsung OSG) <shuah@kernel.org>
+
+commit 7510df3f29d44685bab7b1918b61a8ccd57126a9 upstream.
+
+After removing usbip_host module, devices it releases are left without
+a driver. For example, when a keyboard or a mass storage device are
+bound to usbip_host when it is removed, these devices are no longer
+bound to any driver.
+
+Fix it to run device_attach() from the module exit routine to restore
+the devices to their original drivers. This includes cleanup changes
+and moving device_attach() code to a common routine to be called from
+rebind_store() and usbip_host_exit().
+
+Signed-off-by: Shuah Khan (Samsung OSG) <shuah@kernel.org>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/usbip/stub_dev.c | 6 ----
+ drivers/usb/usbip/stub_main.c | 60 +++++++++++++++++++++++++++++++++++-------
+ 2 files changed, 52 insertions(+), 14 deletions(-)
+
+--- a/drivers/usb/usbip/stub_dev.c
++++ b/drivers/usb/usbip/stub_dev.c
+@@ -463,12 +463,8 @@ static void stub_disconnect(struct usb_d
+ busid_priv->sdev = NULL;
+ stub_device_free(sdev);
+
+- if (busid_priv->status == STUB_BUSID_ALLOC) {
++ if (busid_priv->status == STUB_BUSID_ALLOC)
+ busid_priv->status = STUB_BUSID_ADDED;
+- } else {
+- busid_priv->status = STUB_BUSID_OTHER;
+- del_match_busid((char *)udev_busid);
+- }
+ }
+
+ #ifdef CONFIG_PM
+--- a/drivers/usb/usbip/stub_main.c
++++ b/drivers/usb/usbip/stub_main.c
+@@ -28,6 +28,7 @@
+ #define DRIVER_DESC "USB/IP Host Driver"
+
+ struct kmem_cache *stub_priv_cache;
++
+ /*
+ * busid_tables defines matching busids that usbip can grab. A user can change
+ * dynamically what device is locally used and what device is exported to a
+@@ -184,6 +185,51 @@ static ssize_t store_match_busid(struct
+ static DRIVER_ATTR(match_busid, S_IRUSR | S_IWUSR, show_match_busid,
+ store_match_busid);
+
++static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
++{
++ int ret;
++
++ /* device_attach() callers should hold parent lock for USB */
++ if (busid_priv->udev->dev.parent)
++ device_lock(busid_priv->udev->dev.parent);
++ ret = device_attach(&busid_priv->udev->dev);
++ if (busid_priv->udev->dev.parent)
++ device_unlock(busid_priv->udev->dev.parent);
++ if (ret < 0) {
++ dev_err(&busid_priv->udev->dev, "rebind failed\n");
++ return ret;
++ }
++ return 0;
++}
++
++static void stub_device_rebind(void)
++{
++#if IS_MODULE(CONFIG_USBIP_HOST)
++ struct bus_id_priv *busid_priv;
++ int i;
++
++ /* update status to STUB_BUSID_OTHER so probe ignores the device */
++ spin_lock(&busid_table_lock);
++ for (i = 0; i < MAX_BUSID; i++) {
++ if (busid_table[i].name[0] &&
++ busid_table[i].shutdown_busid) {
++ busid_priv = &(busid_table[i]);
++ busid_priv->status = STUB_BUSID_OTHER;
++ }
++ }
++ spin_unlock(&busid_table_lock);
++
++ /* now run rebind */
++ for (i = 0; i < MAX_BUSID; i++) {
++ if (busid_table[i].name[0] &&
++ busid_table[i].shutdown_busid) {
++ busid_priv = &(busid_table[i]);
++ do_rebind(busid_table[i].name, busid_priv);
++ }
++ }
++#endif
++}
++
+ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
+ size_t count)
+ {
+@@ -204,16 +250,9 @@ static ssize_t rebind_store(struct devic
+ /* mark the device for deletion so probe ignores it during rescan */
+ bid->status = STUB_BUSID_OTHER;
+
+- /* device_attach() callers should hold parent lock for USB */
+- if (bid->udev->dev.parent)
+- device_lock(bid->udev->dev.parent);
+- ret = device_attach(&bid->udev->dev);
+- if (bid->udev->dev.parent)
+- device_unlock(bid->udev->dev.parent);
+- if (ret < 0) {
+- dev_err(&bid->udev->dev, "rebind failed\n");
++ ret = do_rebind((char *) buf, bid);
++ if (ret < 0)
+ return ret;
+- }
+
+ /* delete device from busid_table */
+ del_match_busid((char *) buf);
+@@ -339,6 +378,9 @@ static void __exit usbip_host_exit(void)
+ */
+ usb_deregister_device_driver(&stub_driver);
+
++ /* initiate scan to attach devices */
++ stub_device_rebind();
++
+ kmem_cache_destroy(stub_priv_cache);
+ }
+
--- /dev/null
+From 316bda0e6cc5f36f94b4af8bded16d642c90ad75 Mon Sep 17 00:00:00 2001
+From: Vladimir Davydov <vdavydov@virtuozzo.com>
+Date: Thu, 14 Jan 2016 15:19:38 -0800
+Subject: vmscan: do not force-scan file lru if its absolute size is small
+
+From: Vladimir Davydov <vdavydov@virtuozzo.com>
+
+commit 316bda0e6cc5f36f94b4af8bded16d642c90ad75 upstream.
+
+We assume there is enough inactive page cache if the size of inactive
+file lru is greater than the size of active file lru, in which case we
+force-scan file lru ignoring anonymous pages. While this logic works
+fine when there are plenty of page cache pages, it fails if the size of
+file lru is small (several MB): in this case (lru_size >> prio) will be
+0 for normal scan priorities, as a result, if inactive file lru happens
+to be larger than active file lru, anonymous pages of a cgroup will
+never get evicted unless the system experiences severe memory pressure,
+even if there are gigabytes of unused anonymous memory there, which is
+unfair in respect to other cgroups, whose workloads might be page cache
+oriented.
+
+This patch attempts to fix this by elaborating the "enough inactive page
+cache" check: it makes it not only check that inactive lru size > active
+lru size, but also that we will scan something from the cgroup at the
+current scan priority. If these conditions do not hold, we proceed to
+SCAN_FRACT as usual.
+
+Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/vmscan.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2057,10 +2057,16 @@ static void get_scan_count(struct lruvec
+ }
+
+ /*
+- * There is enough inactive page cache, do not reclaim
+- * anything from the anonymous working set right now.
++ * If there is enough inactive page cache, i.e. if the size of the
++ * inactive list is greater than that of the active list *and* the
++ * inactive list actually has some pages to scan on this priority, we
++ * do not reclaim anything from the anonymous working set right now.
++ * Without the second condition we could end up never scanning an
++ * lruvec even if it has plenty of old anonymous pages unless the
++ * system is under heavy pressure.
+ */
+- if (!inactive_file_is_low(lruvec)) {
++ if (!inactive_file_is_low(lruvec) &&
++ get_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) {
+ scan_balance = SCAN_FILE;
+ goto out;
+ }