From: Greg Kroah-Hartman Date: Thu, 11 Oct 2018 15:07:34 +0000 (+0200) Subject: 4.14-stable patches X-Git-Tag: v3.18.124~8 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=e351d934706d71ada1a684d4097d383ba515017e;p=thirdparty%2Fkernel%2Fstable-queue.git 4.14-stable patches added patches: ath10k-fix-scan-crash-due-to-incorrect-length-calculation.patch rds-rds_ib_recv_alloc_cache-should-call-alloc_percpu_gfp-instead.patch virtio_balloon-fix-deadlock-on-oom.patch virtio_balloon-fix-increment-of-vb-num_pfns-in-fill_balloon.patch --- diff --git a/queue-4.14/ath10k-fix-scan-crash-due-to-incorrect-length-calculation.patch b/queue-4.14/ath10k-fix-scan-crash-due-to-incorrect-length-calculation.patch new file mode 100644 index 00000000000..c9dbb7d4004 --- /dev/null +++ b/queue-4.14/ath10k-fix-scan-crash-due-to-incorrect-length-calculation.patch @@ -0,0 +1,61 @@ +From c8291988806407e02a01b4b15b4504eafbcc04e0 Mon Sep 17 00:00:00 2001 +From: Zhi Chen +Date: Mon, 18 Jun 2018 17:00:39 +0300 +Subject: ath10k: fix scan crash due to incorrect length calculation + +From: Zhi Chen + +commit c8291988806407e02a01b4b15b4504eafbcc04e0 upstream. + +Length of WMI scan message was not calculated correctly. The allocated +buffer was smaller than what we expected. So WMI message corrupted +skb_info, which is at the end of skb->data. This fix takes TLV header +into account even if the element is zero-length. + +Crash log: + [49.629986] Unhandled kernel unaligned access[#1]: + [49.634932] CPU: 0 PID: 1176 Comm: logd Not tainted 4.4.60 #180 + [49.641040] task: 83051460 ti: 8329c000 task.ti: 8329c000 + [49.646608] $ 0 : 00000000 00000001 80984a80 00000000 + [49.652038] $ 4 : 45259e89 8046d484 8046df30 8024ba70 + [49.657468] $ 8 : 00000000 804cc4c0 00000001 20306320 + [49.662898] $12 : 33322037 000110f2 00000000 31203930 + [49.668327] $16 : 82792b40 80984a80 00000001 804207fc + [49.673757] $20 : 00000000 0000012c 00000040 80470000 + [49.679186] $24 : 00000000 8024af7c + [49.684617] $28 : 8329c000 8329db88 00000001 802c58d0 + [49.690046] Hi : 00000000 + [49.693022] Lo : 453c0000 + [49.696013] epc : 800efae4 put_page+0x0/0x58 + [49.700615] ra : 802c58d0 skb_release_data+0x148/0x1d4 + [49.706184] Status: 1000fc03 KERNEL EXL IE + [49.710531] Cause : 00800010 (ExcCode 04) + [49.714669] BadVA : 45259e89 + [49.717644] PrId : 00019374 (MIPS 24Kc) + +Signed-off-by: Zhi Chen +Signed-off-by: Kalle Valo +Cc: Brian Norris +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/wireless/ath/ath10k/wmi-tlv.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c +@@ -1486,10 +1486,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct + bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr); + ie_len = roundup(arg->ie_len, 4); + len = (sizeof(*tlv) + sizeof(*cmd)) + +- (arg->n_channels ? sizeof(*tlv) + chan_len : 0) + +- (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) + +- (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) + +- (arg->ie_len ? sizeof(*tlv) + ie_len : 0); ++ sizeof(*tlv) + chan_len + ++ sizeof(*tlv) + ssid_len + ++ sizeof(*tlv) + bssid_len + ++ sizeof(*tlv) + ie_len; + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) diff --git a/queue-4.14/rds-rds_ib_recv_alloc_cache-should-call-alloc_percpu_gfp-instead.patch b/queue-4.14/rds-rds_ib_recv_alloc_cache-should-call-alloc_percpu_gfp-instead.patch new file mode 100644 index 00000000000..afc674c8629 --- /dev/null +++ b/queue-4.14/rds-rds_ib_recv_alloc_cache-should-call-alloc_percpu_gfp-instead.patch @@ -0,0 +1,83 @@ +From f394ad28feffbeebab77c8bf9a203bd49b957c9a Mon Sep 17 00:00:00 2001 +From: Ka-Cheong Poon +Date: Mon, 30 Jul 2018 22:48:41 -0700 +Subject: rds: rds_ib_recv_alloc_cache() should call alloc_percpu_gfp() instead + +From: Ka-Cheong Poon + +commit f394ad28feffbeebab77c8bf9a203bd49b957c9a upstream. + +Currently, rds_ib_conn_alloc() calls rds_ib_recv_alloc_caches() +without passing along the gfp_t flag. But rds_ib_recv_alloc_caches() +and rds_ib_recv_alloc_cache() should take a gfp_t parameter so that +rds_ib_recv_alloc_cache() can call alloc_percpu_gfp() using the +correct flag instead of calling alloc_percpu(). + +Signed-off-by: Ka-Cheong Poon +Acked-by: Santosh Shilimkar +Signed-off-by: David S. Miller +Cc: HÃ¥kon Bugge +Signed-off-by: Greg Kroah-Hartman + +--- + net/rds/ib.h | 2 +- + net/rds/ib_cm.c | 2 +- + net/rds/ib_recv.c | 10 +++++----- + 3 files changed, 7 insertions(+), 7 deletions(-) + +--- a/net/rds/ib.h ++++ b/net/rds/ib.h +@@ -373,7 +373,7 @@ void rds_ib_mr_cqe_handler(struct rds_ib + int rds_ib_recv_init(void); + void rds_ib_recv_exit(void); + int rds_ib_recv_path(struct rds_conn_path *conn); +-int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic); ++int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp); + void rds_ib_recv_free_caches(struct rds_ib_connection *ic); + void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp); + void rds_ib_inc_free(struct rds_incoming *inc); +--- a/net/rds/ib_cm.c ++++ b/net/rds/ib_cm.c +@@ -946,7 +946,7 @@ int rds_ib_conn_alloc(struct rds_connect + if (!ic) + return -ENOMEM; + +- ret = rds_ib_recv_alloc_caches(ic); ++ ret = rds_ib_recv_alloc_caches(ic, gfp); + if (ret) { + kfree(ic); + return ret; +--- a/net/rds/ib_recv.c ++++ b/net/rds/ib_recv.c +@@ -98,12 +98,12 @@ static void rds_ib_cache_xfer_to_ready(s + } + } + +-static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache) ++static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp) + { + struct rds_ib_cache_head *head; + int cpu; + +- cache->percpu = alloc_percpu(struct rds_ib_cache_head); ++ cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp); + if (!cache->percpu) + return -ENOMEM; + +@@ -118,13 +118,13 @@ static int rds_ib_recv_alloc_cache(struc + return 0; + } + +-int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic) ++int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp) + { + int ret; + +- ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs); ++ ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp); + if (!ret) { +- ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags); ++ ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp); + if (ret) + free_percpu(ic->i_cache_incs.percpu); + } diff --git a/queue-4.14/series b/queue-4.14/series index 58fb5283d75..163506db743 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -39,3 +39,7 @@ perf-utils-move-is_directory-to-path.h.patch f2fs-fix-invalid-memory-access.patch ucma-fix-a-use-after-free-in-ucma_resolve_ip.patch ubifs-check-for-name-being-null-while-mounting.patch +rds-rds_ib_recv_alloc_cache-should-call-alloc_percpu_gfp-instead.patch +virtio_balloon-fix-deadlock-on-oom.patch +virtio_balloon-fix-increment-of-vb-num_pfns-in-fill_balloon.patch +ath10k-fix-scan-crash-due-to-incorrect-length-calculation.patch diff --git a/queue-4.14/virtio_balloon-fix-deadlock-on-oom.patch b/queue-4.14/virtio_balloon-fix-deadlock-on-oom.patch new file mode 100644 index 00000000000..a407574342d --- /dev/null +++ b/queue-4.14/virtio_balloon-fix-deadlock-on-oom.patch @@ -0,0 +1,212 @@ +From c7cdff0e864713a089d7cb3a2b1136ba9a54881a Mon Sep 17 00:00:00 2001 +From: "Michael S. Tsirkin" +Date: Fri, 13 Oct 2017 16:11:48 +0300 +Subject: virtio_balloon: fix deadlock on OOM + +From: Michael S. Tsirkin + +commit c7cdff0e864713a089d7cb3a2b1136ba9a54881a upstream. + +fill_balloon doing memory allocations under balloon_lock +can cause a deadlock when leak_balloon is called from +virtballoon_oom_notify and tries to take same lock. + +To fix, split page allocation and enqueue and do allocations outside the lock. + +Here's a detailed analysis of the deadlock by Tetsuo Handa: + +In leak_balloon(), mutex_lock(&vb->balloon_lock) is called in order to +serialize against fill_balloon(). But in fill_balloon(), +alloc_page(GFP_HIGHUSER[_MOVABLE] | __GFP_NOMEMALLOC | __GFP_NORETRY) is +called with vb->balloon_lock mutex held. Since GFP_HIGHUSER[_MOVABLE] +implies __GFP_DIRECT_RECLAIM | __GFP_IO | __GFP_FS, despite __GFP_NORETRY +is specified, this allocation attempt might indirectly depend on somebody +else's __GFP_DIRECT_RECLAIM memory allocation. And such indirect +__GFP_DIRECT_RECLAIM memory allocation might call leak_balloon() via +virtballoon_oom_notify() via blocking_notifier_call_chain() callback via +out_of_memory() when it reached __alloc_pages_may_oom() and held oom_lock +mutex. Since vb->balloon_lock mutex is already held by fill_balloon(), it +will cause OOM lockup. + + Thread1 Thread2 + fill_balloon() + takes a balloon_lock + balloon_page_enqueue() + alloc_page(GFP_HIGHUSER_MOVABLE) + direct reclaim (__GFP_FS context) takes a fs lock + waits for that fs lock alloc_page(GFP_NOFS) + __alloc_pages_may_oom() + takes the oom_lock + out_of_memory() + blocking_notifier_call_chain() + leak_balloon() + tries to take that balloon_lock and deadlocks + +Reported-by: Tetsuo Handa +Cc: Michal Hocko +Cc: Wei Wang +Signed-off-by: Michael S. Tsirkin +Signed-off-by: Sudip Mukherjee +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/virtio/virtio_balloon.c | 24 +++++++++++++++++++----- + include/linux/balloon_compaction.h | 35 ++++++++++++++++++++++++++++++++++- + mm/balloon_compaction.c | 28 +++++++++++++++++++++------- + 3 files changed, 74 insertions(+), 13 deletions(-) + +--- a/drivers/virtio/virtio_balloon.c ++++ b/drivers/virtio/virtio_balloon.c +@@ -143,16 +143,17 @@ static void set_page_pfns(struct virtio_ + + static unsigned fill_balloon(struct virtio_balloon *vb, size_t num) + { +- struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; + unsigned num_allocated_pages; ++ unsigned num_pfns; ++ struct page *page; ++ LIST_HEAD(pages); + + /* We can only do one array worth at a time. */ + num = min(num, ARRAY_SIZE(vb->pfns)); + +- mutex_lock(&vb->balloon_lock); +- for (vb->num_pfns = 0; vb->num_pfns < num; +- vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { +- struct page *page = balloon_page_enqueue(vb_dev_info); ++ for (num_pfns = 0; num_pfns < num; ++ num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { ++ struct page *page = balloon_page_alloc(); + + if (!page) { + dev_info_ratelimited(&vb->vdev->dev, +@@ -162,6 +163,19 @@ static unsigned fill_balloon(struct virt + msleep(200); + break; + } ++ ++ balloon_page_push(&pages, page); ++ } ++ ++ mutex_lock(&vb->balloon_lock); ++ ++ vb->num_pfns = 0; ++ ++ while ((page = balloon_page_pop(&pages))) { ++ balloon_page_enqueue(&vb->vb_dev_info, page); ++ ++ vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE; ++ + set_page_pfns(vb, vb->pfns + vb->num_pfns, page); + vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; + if (!virtio_has_feature(vb->vdev, +--- a/include/linux/balloon_compaction.h ++++ b/include/linux/balloon_compaction.h +@@ -50,6 +50,7 @@ + #include + #include + #include ++#include + + /* + * Balloon device information descriptor. +@@ -67,7 +68,9 @@ struct balloon_dev_info { + struct inode *inode; + }; + +-extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); ++extern struct page *balloon_page_alloc(void); ++extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, ++ struct page *page); + extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); + + static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) +@@ -193,4 +196,34 @@ static inline gfp_t balloon_mapping_gfp_ + } + + #endif /* CONFIG_BALLOON_COMPACTION */ ++ ++/* ++ * balloon_page_push - insert a page into a page list. ++ * @head : pointer to list ++ * @page : page to be added ++ * ++ * Caller must ensure the page is private and protect the list. ++ */ ++static inline void balloon_page_push(struct list_head *pages, struct page *page) ++{ ++ list_add(&page->lru, pages); ++} ++ ++/* ++ * balloon_page_pop - remove a page from a page list. ++ * @head : pointer to list ++ * @page : page to be added ++ * ++ * Caller must ensure the page is private and protect the list. ++ */ ++static inline struct page *balloon_page_pop(struct list_head *pages) ++{ ++ struct page *page = list_first_entry_or_null(pages, struct page, lru); ++ ++ if (!page) ++ return NULL; ++ ++ list_del(&page->lru); ++ return page; ++} + #endif /* _LINUX_BALLOON_COMPACTION_H */ +--- a/mm/balloon_compaction.c ++++ b/mm/balloon_compaction.c +@@ -11,22 +11,37 @@ + #include + + /* ++ * balloon_page_alloc - allocates a new page for insertion into the balloon ++ * page list. ++ * ++ * Driver must call it to properly allocate a new enlisted balloon page. ++ * Driver must call balloon_page_enqueue before definitively removing it from ++ * the guest system. This function returns the page address for the recently ++ * allocated page or NULL in the case we fail to allocate a new page this turn. ++ */ ++struct page *balloon_page_alloc(void) ++{ ++ struct page *page = alloc_page(balloon_mapping_gfp_mask() | ++ __GFP_NOMEMALLOC | __GFP_NORETRY); ++ return page; ++} ++EXPORT_SYMBOL_GPL(balloon_page_alloc); ++ ++/* + * balloon_page_enqueue - allocates a new page and inserts it into the balloon + * page list. + * @b_dev_info: balloon device descriptor where we will insert a new page to ++ * @page: new page to enqueue - allocated using balloon_page_alloc. + * +- * Driver must call it to properly allocate a new enlisted balloon page ++ * Driver must call it to properly enqueue a new allocated balloon page + * before definitively removing it from the guest system. + * This function returns the page address for the recently enqueued page or + * NULL in the case we fail to allocate a new page this turn. + */ +-struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info) ++void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, ++ struct page *page) + { + unsigned long flags; +- struct page *page = alloc_page(balloon_mapping_gfp_mask() | +- __GFP_NOMEMALLOC | __GFP_NORETRY); +- if (!page) +- return NULL; + + /* + * Block others from accessing the 'page' when we get around to +@@ -39,7 +54,6 @@ struct page *balloon_page_enqueue(struct + __count_vm_event(BALLOON_INFLATE); + spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); + unlock_page(page); +- return page; + } + EXPORT_SYMBOL_GPL(balloon_page_enqueue); + diff --git a/queue-4.14/virtio_balloon-fix-increment-of-vb-num_pfns-in-fill_balloon.patch b/queue-4.14/virtio_balloon-fix-increment-of-vb-num_pfns-in-fill_balloon.patch new file mode 100644 index 00000000000..9d09bc91fff --- /dev/null +++ b/queue-4.14/virtio_balloon-fix-increment-of-vb-num_pfns-in-fill_balloon.patch @@ -0,0 +1,47 @@ +From d9e427f6ab8142d6868eb719e6a7851aafea56b6 Mon Sep 17 00:00:00 2001 +From: Jan Stancek +Date: Fri, 1 Dec 2017 10:50:28 +0100 +Subject: virtio_balloon: fix increment of vb->num_pfns in fill_balloon() + +From: Jan Stancek + +commit d9e427f6ab8142d6868eb719e6a7851aafea56b6 upstream. + +commit c7cdff0e8647 ("virtio_balloon: fix deadlock on OOM") +changed code to increment vb->num_pfns before call to +set_page_pfns(), which used to happen only after. + +This patch fixes boot hang for me on ppc64le KVM guests. + +Fixes: c7cdff0e8647 ("virtio_balloon: fix deadlock on OOM") +Cc: Michael S. Tsirkin +Cc: Tetsuo Handa +Cc: Michal Hocko +Cc: Wei Wang +Cc: stable@vger.kernel.org +Signed-off-by: Jan Stancek +Signed-off-by: Michael S. Tsirkin +Signed-off-by: Sudip Mukherjee +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/virtio/virtio_balloon.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +--- a/drivers/virtio/virtio_balloon.c ++++ b/drivers/virtio/virtio_balloon.c +@@ -174,13 +174,12 @@ static unsigned fill_balloon(struct virt + while ((page = balloon_page_pop(&pages))) { + balloon_page_enqueue(&vb->vb_dev_info, page); + +- vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE; +- + set_page_pfns(vb, vb->pfns + vb->num_pfns, page); + vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; + if (!virtio_has_feature(vb->vdev, + VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) + adjust_managed_page_count(page, -1); ++ vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE; + } + + num_allocated_pages = vb->num_pfns;