]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
fixes for 5.4
authorSasha Levin <sashal@kernel.org>
Mon, 3 Feb 2020 22:42:38 +0000 (17:42 -0500)
committerSasha Levin <sashal@kernel.org>
Mon, 3 Feb 2020 22:42:38 +0000 (17:42 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.4/asoc-topology-fix-soc_tplg_fe_link_create-link-dobj-.patch [new file with mode: 0644]
queue-5.4/cpuidle-teo-avoid-using-early-hits-incorrectly.patch [new file with mode: 0644]
queue-5.4/dm-thin-fix-use-after-free-in-metadata_pre_commit_ca.patch [new file with mode: 0644]
queue-5.4/dma-direct-exclude-dma_direct_map_resource-from-the-.patch [new file with mode: 0644]
queue-5.4/flow_dissector-fix-to-use-new-variables-for-port-ran.patch [new file with mode: 0644]
queue-5.4/mm-migrate.c-also-overwrite-error-when-it-is-bigger-.patch [new file with mode: 0644]
queue-5.4/perf-report-fix-no-libunwind-compiled-warning-break-.patch [new file with mode: 0644]
queue-5.4/revert-rsi-fix-potential-null-dereference-in-rsi_pro.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/tracing-uprobe-fix-to-make-trace_uprobe_filter-align.patch [new file with mode: 0644]

diff --git a/queue-5.4/asoc-topology-fix-soc_tplg_fe_link_create-link-dobj-.patch b/queue-5.4/asoc-topology-fix-soc_tplg_fe_link_create-link-dobj-.patch
new file mode 100644 (file)
index 0000000..c7e9299
--- /dev/null
@@ -0,0 +1,67 @@
+From 1eb7e6766ba1fc7d272406fb4a9574787902c982 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jan 2020 20:07:52 +0100
+Subject: ASoC: topology: fix soc_tplg_fe_link_create() - link->dobj
+ initialization order
+
+From: Jaroslav Kysela <perex@perex.cz>
+
+[ Upstream commit 8ce1cbd6ce0b1bda0c980c64fee4c1e1378355f1 ]
+
+The code which checks the return value for snd_soc_add_dai_link() call
+in soc_tplg_fe_link_create() moved the snd_soc_add_dai_link() call before
+link->dobj members initialization.
+
+While it does not affect the latest kernels, the old soc-core.c code
+in the stable kernels is affected. The snd_soc_add_dai_link() function uses
+the link->dobj.type member to check, if the link structure is valid.
+
+Reorder the link->dobj initialization to make things work again.
+It's harmless for the recent code (and the structure should be properly
+initialized before other calls anyway).
+
+The problem is in stable linux-5.4.y since version 5.4.11 when the
+upstream commit 76d270364932 was applied.
+
+Fixes: 76d270364932 ("ASoC: topology: Check return value for snd_soc_add_dai_link()")
+Cc: Dragos Tarcatu <dragos_tarcatu@mentor.com>
+Cc: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Cc: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Cc: Mark Brown <broonie@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Jaroslav Kysela <perex@perex.cz>
+Link: https://lore.kernel.org/r/20200122190752.3081016-1-perex@perex.cz
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/soc-topology.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index 7ccbca47240d7..fef01e1dd15c5 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -1891,6 +1891,10 @@ static int soc_tplg_fe_link_create(struct soc_tplg *tplg,
+       link->num_codecs = 1;
+       link->num_platforms = 1;
++      link->dobj.index = tplg->index;
++      link->dobj.ops = tplg->ops;
++      link->dobj.type = SND_SOC_DOBJ_DAI_LINK;
++
+       if (strlen(pcm->pcm_name)) {
+               link->name = kstrdup(pcm->pcm_name, GFP_KERNEL);
+               link->stream_name = kstrdup(pcm->pcm_name, GFP_KERNEL);
+@@ -1927,9 +1931,6 @@ static int soc_tplg_fe_link_create(struct soc_tplg *tplg,
+               goto err;
+       }
+-      link->dobj.index = tplg->index;
+-      link->dobj.ops = tplg->ops;
+-      link->dobj.type = SND_SOC_DOBJ_DAI_LINK;
+       list_add(&link->dobj.list, &tplg->comp->dobj_list);
+       return 0;
+-- 
+2.20.1
+
diff --git a/queue-5.4/cpuidle-teo-avoid-using-early-hits-incorrectly.patch b/queue-5.4/cpuidle-teo-avoid-using-early-hits-incorrectly.patch
new file mode 100644 (file)
index 0000000..99ddf68
--- /dev/null
@@ -0,0 +1,94 @@
+From 2f5ef1ce6eb968f01df8d4598a1a72d675871306 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Nov 2019 01:03:24 +0100
+Subject: cpuidle: teo: Avoid using "early hits" incorrectly
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 63f202e5edf161c2ccffa286a9a701e995427b15 ]
+
+If the current state with the maximum "early hits" metric in
+teo_select() is also the one "matching" the expected idle duration,
+it will be used as the candidate one for selection even if its
+"misses" metric is greater than its "hits" metric, which is not
+correct.
+
+In that case, the candidate state should be shallower than the
+current one and its "early hits" metric should be the maximum
+among the idle states shallower than the current one.
+
+To make that happen, modify teo_select() to save the index of
+the state whose "early hits" metric is the maximum for the
+range of states below the current one and go back to that state
+if it turns out that the current one should be rejected.
+
+Fixes: 159e48560f51 ("cpuidle: teo: Fix "early hits" handling for disabled idle states")
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpuidle/governors/teo.c | 21 +++++++++++++++++----
+ 1 file changed, 17 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
+index 703047434ee19..c71773c88890b 100644
+--- a/drivers/cpuidle/governors/teo.c
++++ b/drivers/cpuidle/governors/teo.c
+@@ -234,7 +234,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+       struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+       int latency_req = cpuidle_governor_latency_req(dev->cpu);
+       unsigned int duration_us, hits, misses, early_hits;
+-      int max_early_idx, constraint_idx, idx, i;
++      int max_early_idx, prev_max_early_idx, constraint_idx, idx, i;
+       ktime_t delta_tick;
+       if (dev->last_state_idx >= 0) {
+@@ -251,6 +251,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+       misses = 0;
+       early_hits = 0;
+       max_early_idx = -1;
++      prev_max_early_idx = -1;
+       constraint_idx = drv->state_count;
+       idx = -1;
+@@ -303,6 +304,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+                        */
+                       if (!(tick_nohz_tick_stopped() &&
+                             drv->states[idx].target_residency < TICK_USEC)) {
++                              prev_max_early_idx = max_early_idx;
+                               early_hits = cpu_data->states[i].early_hits;
+                               max_early_idx = idx;
+                       }
+@@ -329,6 +331,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+               if (early_hits < cpu_data->states[i].early_hits &&
+                   !(tick_nohz_tick_stopped() &&
+                     drv->states[i].target_residency < TICK_USEC)) {
++                      prev_max_early_idx = max_early_idx;
+                       early_hits = cpu_data->states[i].early_hits;
+                       max_early_idx = i;
+               }
+@@ -342,9 +345,19 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+        * "early hits" metric, but if that cannot be determined, just use the
+        * state selected so far.
+        */
+-      if (hits <= misses && max_early_idx >= 0) {
+-              idx = max_early_idx;
+-              duration_us = drv->states[idx].target_residency;
++      if (hits <= misses) {
++              /*
++               * The current candidate state is not suitable, so take the one
++               * whose "early hits" metric is the maximum for the range of
++               * shallower states.
++               */
++              if (idx == max_early_idx)
++                      max_early_idx = prev_max_early_idx;
++
++              if (max_early_idx >= 0) {
++                      idx = max_early_idx;
++                      duration_us = drv->states[idx].target_residency;
++              }
+       }
+       /*
+-- 
+2.20.1
+
diff --git a/queue-5.4/dm-thin-fix-use-after-free-in-metadata_pre_commit_ca.patch b/queue-5.4/dm-thin-fix-use-after-free-in-metadata_pre_commit_ca.patch
new file mode 100644 (file)
index 0000000..d8b2600
--- /dev/null
@@ -0,0 +1,70 @@
+From b15220ca2557345b348b6ad5064115dbd13671a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Jan 2020 12:29:04 -0500
+Subject: dm thin: fix use-after-free in metadata_pre_commit_callback
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+[ Upstream commit a4a8d286586d4b28c8517a51db8d86954aadc74b ]
+
+dm-thin uses struct pool to hold the state of the pool. There may be
+multiple pool_c's pointing to a given pool, each pool_c represents a
+loaded target. pool_c's may be created and destroyed arbitrarily and the
+pool contains a reference count of pool_c's pointing to it.
+
+Since commit 694cfe7f31db3 ("dm thin: Flush data device before
+committing metadata") a pointer to pool_c is passed to
+dm_pool_register_pre_commit_callback and this function stores it in
+pmd->pre_commit_context. If this pool_c is freed, but pool is not
+(because there is another pool_c referencing it), we end up in a
+situation where pmd->pre_commit_context structure points to freed
+pool_c. It causes a crash in metadata_pre_commit_callback.
+
+Fix this by moving the dm_pool_register_pre_commit_callback() from
+pool_ctr() to pool_preresume(). This way the in-core thin-pool metadata
+is only ever armed with callback data whose lifetime matches the
+active thin-pool target.
+
+In should be noted that this fix preserves the ability to load a
+thin-pool table that uses a different data block device (that contains
+the same data) -- though it is unclear if that capability is still
+useful and/or needed.
+
+Fixes: 694cfe7f31db3 ("dm thin: Flush data device before committing metadata")
+Cc: stable@vger.kernel.org
+Reported-by: Zdenek Kabelac <zkabelac@redhat.com>
+Reported-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-thin.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 1696bfd23ad12..69201bdf7f4c6 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -3420,10 +3420,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
+       if (r)
+               goto out_flags_changed;
+-      dm_pool_register_pre_commit_callback(pt->pool->pmd,
+-                                           metadata_pre_commit_callback,
+-                                           pt);
+-
+       pt->callbacks.congested_fn = pool_is_congested;
+       dm_table_add_target_callbacks(ti->table, &pt->callbacks);
+@@ -3587,6 +3583,9 @@ static int pool_preresume(struct dm_target *ti)
+       if (r)
+               return r;
++      dm_pool_register_pre_commit_callback(pool->pmd,
++                                           metadata_pre_commit_callback, pt);
++
+       r = maybe_resize_data_dev(ti, &need_commit1);
+       if (r)
+               return r;
+-- 
+2.20.1
+
diff --git a/queue-5.4/dma-direct-exclude-dma_direct_map_resource-from-the-.patch b/queue-5.4/dma-direct-exclude-dma_direct_map_resource-from-the-.patch
new file mode 100644 (file)
index 0000000..9f7bd60
--- /dev/null
@@ -0,0 +1,131 @@
+From fa46024e864ac17a8ac8ec7dcfb126d10419ffcb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Nov 2019 17:38:58 +0100
+Subject: dma-direct: exclude dma_direct_map_resource from the min_low_pfn
+ check
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 68a33b1794665ba8a1d1ef1d3bfcc7c587d380a6 ]
+
+The valid memory address check in dma_capable only makes sense when mapping
+normal memory, not when using dma_map_resource to map a device resource.
+Add a new boolean argument to dma_capable to exclude that check for the
+dma_map_resource case.
+
+Fixes: b12d66278dd6 ("dma-direct: check for overflows on 32 bit DMA addresses")
+Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/amd_gart_64.c | 4 ++--
+ drivers/xen/swiotlb-xen.c     | 4 ++--
+ include/linux/dma-direct.h    | 5 +++--
+ kernel/dma/direct.c           | 4 ++--
+ kernel/dma/swiotlb.c          | 2 +-
+ 5 files changed, 10 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
+index a6ac3712db8bc..5cfab41e8509c 100644
+--- a/arch/x86/kernel/amd_gart_64.c
++++ b/arch/x86/kernel/amd_gart_64.c
+@@ -185,13 +185,13 @@ static void iommu_full(struct device *dev, size_t size, int dir)
+ static inline int
+ need_iommu(struct device *dev, unsigned long addr, size_t size)
+ {
+-      return force_iommu || !dma_capable(dev, addr, size);
++      return force_iommu || !dma_capable(dev, addr, size, true);
+ }
+ static inline int
+ nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
+ {
+-      return !dma_capable(dev, addr, size);
++      return !dma_capable(dev, addr, size, true);
+ }
+ /* Map a single continuous physical area into the IOMMU.
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index bd3a10dfac157..8123cccec40dd 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -375,7 +375,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
+        * we can safely return the device addr and not worry about bounce
+        * buffering it.
+        */
+-      if (dma_capable(dev, dev_addr, size) &&
++      if (dma_capable(dev, dev_addr, size, true) &&
+           !range_straddles_page_boundary(phys, size) &&
+               !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
+               swiotlb_force != SWIOTLB_FORCE)
+@@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
+       /*
+        * Ensure that the address returned is DMA'ble
+        */
+-      if (unlikely(!dma_capable(dev, dev_addr, size))) {
++      if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
+               swiotlb_tbl_unmap_single(dev, map, size, size, dir,
+                               attrs | DMA_ATTR_SKIP_CPU_SYNC);
+               return DMA_MAPPING_ERROR;
+diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
+index 6a18a97b76a87..076c2ecb7fa06 100644
+--- a/include/linux/dma-direct.h
++++ b/include/linux/dma-direct.h
+@@ -25,14 +25,15 @@ static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+       return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+ }
+-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
++static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
++              bool is_ram)
+ {
+       dma_addr_t end = addr + size - 1;
+       if (!dev->dma_mask)
+               return false;
+-      if (!IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
++      if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
+           min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
+               return false;
+diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
+index 867fd72cb2605..c4bf29361f75e 100644
+--- a/kernel/dma/direct.c
++++ b/kernel/dma/direct.c
+@@ -326,7 +326,7 @@ static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
+               size_t size)
+ {
+       return swiotlb_force != SWIOTLB_FORCE &&
+-              dma_capable(dev, dma_addr, size);
++              dma_capable(dev, dma_addr, size, true);
+ }
+ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
+@@ -375,7 +375,7 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
+ {
+       dma_addr_t dma_addr = paddr;
+-      if (unlikely(!dma_capable(dev, dma_addr, size))) {
++      if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
+               report_addr(dev, dma_addr, size);
+               return DMA_MAPPING_ERROR;
+       }
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 673a2cdb2656b..9280d6f8271ed 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -678,7 +678,7 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
+       /* Ensure that the address returned is DMA'ble */
+       *dma_addr = __phys_to_dma(dev, *phys);
+-      if (unlikely(!dma_capable(dev, *dma_addr, size))) {
++      if (unlikely(!dma_capable(dev, *dma_addr, size, true))) {
+               swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
+                       attrs | DMA_ATTR_SKIP_CPU_SYNC);
+               return false;
+-- 
+2.20.1
+
diff --git a/queue-5.4/flow_dissector-fix-to-use-new-variables-for-port-ran.patch b/queue-5.4/flow_dissector-fix-to-use-new-variables-for-port-ran.patch
new file mode 100644 (file)
index 0000000..4dfd68f
--- /dev/null
@@ -0,0 +1,62 @@
+From d60069e724c6521618064131ddbd2d8cce6fce60 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jan 2020 16:05:32 +0900
+Subject: flow_dissector: Fix to use new variables for port ranges in bpf hook
+
+From: Yoshiki Komachi <komachi.yoshiki@gmail.com>
+
+[ Upstream commit 59fb9b62fb6c929a756563152a89f39b07cf8893 ]
+
+This patch applies new flag (FLOW_DISSECTOR_KEY_PORTS_RANGE) and
+field (tp_range) to BPF flow dissector to generate appropriate flow
+keys when classified by specified port ranges.
+
+Fixes: 8ffb055beae5 ("cls_flower: Fix the behavior using port ranges with hw-offload")
+Signed-off-by: Yoshiki Komachi <komachi.yoshiki@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Petar Penkov <ppenkov@google.com>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20200117070533.402240-2-komachi.yoshiki@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/flow_dissector.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 1292f3f0f93f7..96b2566c298dd 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -758,10 +758,10 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
+                                    struct flow_dissector *flow_dissector,
+                                    void *target_container)
+ {
++      struct flow_dissector_key_ports *key_ports = NULL;
+       struct flow_dissector_key_control *key_control;
+       struct flow_dissector_key_basic *key_basic;
+       struct flow_dissector_key_addrs *key_addrs;
+-      struct flow_dissector_key_ports *key_ports;
+       struct flow_dissector_key_tags *key_tags;
+       key_control = skb_flow_dissector_target(flow_dissector,
+@@ -800,10 +800,17 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
+               key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+       }
+-      if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) {
++      if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
+               key_ports = skb_flow_dissector_target(flow_dissector,
+                                                     FLOW_DISSECTOR_KEY_PORTS,
+                                                     target_container);
++      else if (dissector_uses_key(flow_dissector,
++                                  FLOW_DISSECTOR_KEY_PORTS_RANGE))
++              key_ports = skb_flow_dissector_target(flow_dissector,
++                                                    FLOW_DISSECTOR_KEY_PORTS_RANGE,
++                                                    target_container);
++
++      if (key_ports) {
+               key_ports->src = flow_keys->sport;
+               key_ports->dst = flow_keys->dport;
+       }
+-- 
+2.20.1
+
diff --git a/queue-5.4/mm-migrate.c-also-overwrite-error-when-it-is-bigger-.patch b/queue-5.4/mm-migrate.c-also-overwrite-error-when-it-is-bigger-.patch
new file mode 100644 (file)
index 0000000..3948df8
--- /dev/null
@@ -0,0 +1,53 @@
+From 691a9a93ee62ca22a24bfb3148efa25926e6ed2a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Jan 2020 22:11:14 -0800
+Subject: mm/migrate.c: also overwrite error when it is bigger than zero
+
+From: Wei Yang <richardw.yang@linux.intel.com>
+
+[ Upstream commit dfe9aa23cab7880a794db9eb2d176c00ed064eb6 ]
+
+If we get here after successfully adding page to list, err would be 1 to
+indicate the page is queued in the list.
+
+Current code has two problems:
+
+  * on success, 0 is not returned
+  * on error, if add_page_for_migratioin() return 1, and the following err1
+    from do_move_pages_to_node() is set, the err1 is not returned since err
+    is 1
+
+And these behaviors break the user interface.
+
+Link: http://lkml.kernel.org/r/20200119065753.21694-1-richardw.yang@linux.intel.com
+Fixes: e0153fc2c760 ("mm: move_pages: return valid node id in status if the page is already on the target node").
+Signed-off-by: Wei Yang <richardw.yang@linux.intel.com>
+Acked-by: Yang Shi <yang.shi@linux.alibaba.com>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/migrate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 45d3303e00226..6956627ebf8b3 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1680,7 +1680,7 @@ out_flush:
+       err1 = do_move_pages_to_node(mm, &pagelist, current_node);
+       if (!err1)
+               err1 = store_status(status, start, current_node, i - start);
+-      if (!err)
++      if (err >= 0)
+               err = err1;
+ out:
+       return err;
+-- 
+2.20.1
+
diff --git a/queue-5.4/perf-report-fix-no-libunwind-compiled-warning-break-.patch b/queue-5.4/perf-report-fix-no-libunwind-compiled-warning-break-.patch
new file mode 100644 (file)
index 0000000..ef966e6
--- /dev/null
@@ -0,0 +1,58 @@
+From 25c8959a2d5344adc62ae061da34a4f5b03ab44f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jan 2020 03:17:45 +0800
+Subject: perf report: Fix no libunwind compiled warning break s390 issue
+
+From: Jin Yao <yao.jin@linux.intel.com>
+
+[ Upstream commit c3314a74f86dc00827e0945c8e5039fc3aebaa3c ]
+
+Commit 800d3f561659 ("perf report: Add warning when libunwind not
+compiled in") breaks the s390 platform. S390 uses libdw-dwarf-unwind for
+call chain unwinding and had no support for libunwind.
+
+So the warning "Please install libunwind development packages during the
+perf build." caused the confusion even if the call-graph is displayed
+correctly.
+
+This patch adds checking for HAVE_DWARF_SUPPORT, which is set when
+libdw-dwarf-unwind is compiled in.
+
+Fixes: 800d3f561659 ("perf report: Add warning when libunwind not compiled in")
+Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
+Reviewed-by: Thomas Richter <tmricht@linux.ibm.com>
+Tested-by: Thomas Richter <tmricht@linux.ibm.com>
+Acked-by: Jiri Olsa <jolsa@redhat.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Jin Yao <yao.jin@intel.com>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lore.kernel.org/lkml/20200107191745.18415-1-yao.jin@linux.intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/builtin-report.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
+index ea3f0745d7ad6..6407dff405d96 100644
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -399,10 +399,10 @@ static int report__setup_sample_type(struct report *rep)
+                               PERF_SAMPLE_BRANCH_ANY))
+               rep->nonany_branch_mode = true;
+-#ifndef HAVE_LIBUNWIND_SUPPORT
++#if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_DWARF_SUPPORT)
+       if (dwarf_callchain_users) {
+-              ui__warning("Please install libunwind development packages "
+-                          "during the perf build.\n");
++              ui__warning("Please install libunwind or libdw "
++                          "development packages during the perf build.\n");
+       }
+ #endif
+-- 
+2.20.1
+
diff --git a/queue-5.4/revert-rsi-fix-potential-null-dereference-in-rsi_pro.patch b/queue-5.4/revert-rsi-fix-potential-null-dereference-in-rsi_pro.patch
new file mode 100644 (file)
index 0000000..b3020de
--- /dev/null
@@ -0,0 +1,40 @@
+From c295f877fe865ee7d1a9c025325747b0350c5b42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Oct 2019 16:44:21 +0200
+Subject: Revert "rsi: fix potential null dereference in rsi_probe()"
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit c5dcf8f0e850a504235a0af51f73d51b6ddc0933 ]
+
+This reverts commit f170d44bc4ec2feae5f6206980e7ae7fbf0432a0.
+
+USB core will never call a USB-driver probe function with a NULL
+device-id pointer.
+
+Reverting before removing the existing checks in order to document this
+and prevent the offending commit from being "autoselected" for stable.
+
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
+index d6cdabef264db..4b9e406b84612 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
++++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
+@@ -805,7 +805,7 @@ static int rsi_probe(struct usb_interface *pfunction,
+               adapter->device_model = RSI_DEV_9116;
+       } else {
+               rsi_dbg(ERR_ZONE, "%s: Unsupported RSI device id 0x%x\n",
+-                      __func__, id ? id->idProduct : 0x0);
++                      __func__, id->idProduct);
+               goto err1;
+       }
+-- 
+2.20.1
+
index 345e3d39ea4fffc9671e31f6644d2d2cf5837c08..e7bd55ca354ded36d45f250da8d4281bb1b84692 100644 (file)
@@ -89,3 +89,12 @@ netfilter-conntrack-sctp-use-distinct-states-for-new.patch
 netfilter-nf_tables_offload-fix-check-the-chain-offl.patch
 net-fix-skb-csum-update-in-inet_proto_csum_replace16.patch
 btrfs-do-not-zero-f_bavail-if-we-have-available-space.patch
+cpuidle-teo-avoid-using-early-hits-incorrectly.patch
+flow_dissector-fix-to-use-new-variables-for-port-ran.patch
+dm-thin-fix-use-after-free-in-metadata_pre_commit_ca.patch
+perf-report-fix-no-libunwind-compiled-warning-break-.patch
+dma-direct-exclude-dma_direct_map_resource-from-the-.patch
+mm-migrate.c-also-overwrite-error-when-it-is-bigger-.patch
+asoc-topology-fix-soc_tplg_fe_link_create-link-dobj-.patch
+revert-rsi-fix-potential-null-dereference-in-rsi_pro.patch
+tracing-uprobe-fix-to-make-trace_uprobe_filter-align.patch
diff --git a/queue-5.4/tracing-uprobe-fix-to-make-trace_uprobe_filter-align.patch b/queue-5.4/tracing-uprobe-fix-to-make-trace_uprobe_filter-align.patch
new file mode 100644 (file)
index 0000000..5653e43
--- /dev/null
@@ -0,0 +1,207 @@
+From 9a898cedc0168fe7658af271a2f1bdf4a8bf71ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jan 2020 12:23:25 +0900
+Subject: tracing/uprobe: Fix to make trace_uprobe_filter alignment safe
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+[ Upstream commit b61387cb732cf283d318b2165c44913525fe545f ]
+
+Commit 99c9a923e97a ("tracing/uprobe: Fix double perf_event
+linking on multiprobe uprobe") moved trace_uprobe_filter on
+trace_probe_event. However, since it introduced a flexible
+data structure with char array and type casting, the
+alignment of trace_uprobe_filter can be broken.
+
+This changes the type of the array to trace_uprobe_filter
+data strucure to fix it.
+
+Link: http://lore.kernel.org/r/20200120124022.GA14897@hirez.programming.kicks-ass.net
+Link: http://lkml.kernel.org/r/157966340499.5107.10978352478952144902.stgit@devnote2
+
+Fixes: 99c9a923e97a ("tracing/uprobe: Fix double perf_event linking on multiprobe uprobe")
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_kprobe.c |  2 +-
+ kernel/trace/trace_probe.c  |  9 ++++++---
+ kernel/trace/trace_probe.h  | 10 ++++++++--
+ kernel/trace/trace_uprobe.c | 29 +++++++----------------------
+ 4 files changed, 22 insertions(+), 28 deletions(-)
+
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 3e5f9c7d939cc..3f54dc2f6e1c6 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -290,7 +290,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
+       INIT_HLIST_NODE(&tk->rp.kp.hlist);
+       INIT_LIST_HEAD(&tk->rp.kp.list);
+-      ret = trace_probe_init(&tk->tp, event, group, 0);
++      ret = trace_probe_init(&tk->tp, event, group, false);
+       if (ret < 0)
+               goto error;
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index bba18cf44a30e..9ae87be422f2a 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -984,16 +984,19 @@ void trace_probe_cleanup(struct trace_probe *tp)
+ }
+ int trace_probe_init(struct trace_probe *tp, const char *event,
+-                   const char *group, size_t event_data_size)
++                   const char *group, bool alloc_filter)
+ {
+       struct trace_event_call *call;
++      size_t size = sizeof(struct trace_probe_event);
+       int ret = 0;
+       if (!event || !group)
+               return -EINVAL;
+-      tp->event = kzalloc(sizeof(struct trace_probe_event) + event_data_size,
+-                          GFP_KERNEL);
++      if (alloc_filter)
++              size += sizeof(struct trace_uprobe_filter);
++
++      tp->event = kzalloc(size, GFP_KERNEL);
+       if (!tp->event)
+               return -ENOMEM;
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index 03e4e180058d5..a0ff9e200ef6f 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -223,6 +223,12 @@ struct probe_arg {
+       const struct fetch_type *type;  /* Type of this argument */
+ };
++struct trace_uprobe_filter {
++      rwlock_t                rwlock;
++      int                     nr_systemwide;
++      struct list_head        perf_events;
++};
++
+ /* Event call and class holder */
+ struct trace_probe_event {
+       unsigned int                    flags;  /* For TP_FLAG_* */
+@@ -230,7 +236,7 @@ struct trace_probe_event {
+       struct trace_event_call         call;
+       struct list_head                files;
+       struct list_head                probes;
+-      char                            data[0];
++      struct trace_uprobe_filter      filter[0];
+ };
+ struct trace_probe {
+@@ -323,7 +329,7 @@ static inline bool trace_probe_has_single_file(struct trace_probe *tp)
+ }
+ int trace_probe_init(struct trace_probe *tp, const char *event,
+-                   const char *group, size_t event_data_size);
++                   const char *group, bool alloc_filter);
+ void trace_probe_cleanup(struct trace_probe *tp);
+ int trace_probe_append(struct trace_probe *tp, struct trace_probe *to);
+ void trace_probe_unlink(struct trace_probe *tp);
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index f66e202fec13a..2619bc5ed520c 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -34,12 +34,6 @@ struct uprobe_trace_entry_head {
+ #define DATAOF_TRACE_ENTRY(entry, is_return)          \
+       ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
+-struct trace_uprobe_filter {
+-      rwlock_t                rwlock;
+-      int                     nr_systemwide;
+-      struct list_head        perf_events;
+-};
+-
+ static int trace_uprobe_create(int argc, const char **argv);
+ static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
+ static int trace_uprobe_release(struct dyn_event *ev);
+@@ -263,14 +257,6 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
+ }
+ NOKPROBE_SYMBOL(process_fetch_insn)
+-static struct trace_uprobe_filter *
+-trace_uprobe_get_filter(struct trace_uprobe *tu)
+-{
+-      struct trace_probe_event *event = tu->tp.event;
+-
+-      return (struct trace_uprobe_filter *)&event->data[0];
+-}
+-
+ static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
+ {
+       rwlock_init(&filter->rwlock);
+@@ -358,8 +344,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
+       if (!tu)
+               return ERR_PTR(-ENOMEM);
+-      ret = trace_probe_init(&tu->tp, event, group,
+-                              sizeof(struct trace_uprobe_filter));
++      ret = trace_probe_init(&tu->tp, event, group, true);
+       if (ret < 0)
+               goto error;
+@@ -367,7 +352,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
+       tu->consumer.handler = uprobe_dispatcher;
+       if (is_ret)
+               tu->consumer.ret_handler = uretprobe_dispatcher;
+-      init_trace_uprobe_filter(trace_uprobe_get_filter(tu));
++      init_trace_uprobe_filter(tu->tp.event->filter);
+       return tu;
+ error:
+@@ -1076,7 +1061,7 @@ static void __probe_event_disable(struct trace_probe *tp)
+       struct trace_uprobe *tu;
+       tu = container_of(tp, struct trace_uprobe, tp);
+-      WARN_ON(!uprobe_filter_is_empty(trace_uprobe_get_filter(tu)));
++      WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
+       list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+               tu = container_of(pos, struct trace_uprobe, tp);
+@@ -1117,7 +1102,7 @@ static int probe_event_enable(struct trace_event_call *call,
+       }
+       tu = container_of(tp, struct trace_uprobe, tp);
+-      WARN_ON(!uprobe_filter_is_empty(trace_uprobe_get_filter(tu)));
++      WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
+       if (enabled)
+               return 0;
+@@ -1281,7 +1266,7 @@ static int uprobe_perf_close(struct trace_event_call *call,
+               return -ENODEV;
+       tu = container_of(tp, struct trace_uprobe, tp);
+-      if (trace_uprobe_filter_remove(trace_uprobe_get_filter(tu), event))
++      if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
+               return 0;
+       list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+@@ -1306,7 +1291,7 @@ static int uprobe_perf_open(struct trace_event_call *call,
+               return -ENODEV;
+       tu = container_of(tp, struct trace_uprobe, tp);
+-      if (trace_uprobe_filter_add(trace_uprobe_get_filter(tu), event))
++      if (trace_uprobe_filter_add(tu->tp.event->filter, event))
+               return 0;
+       list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+@@ -1328,7 +1313,7 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc,
+       int ret;
+       tu = container_of(uc, struct trace_uprobe, consumer);
+-      filter = trace_uprobe_get_filter(tu);
++      filter = tu->tp.event->filter;
+       read_lock(&filter->rwlock);
+       ret = __uprobe_perf_filter(filter, mm);
+-- 
+2.20.1
+