From: Greg Kroah-Hartman Date: Thu, 2 May 2019 12:12:54 +0000 (+0200) Subject: 4.14-stable patches X-Git-Tag: v4.9.173~12 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a459c59833e7d1a9cf6078bef9d4ef364438d82e;p=thirdparty%2Fkernel%2Fstable-queue.git 4.14-stable patches added patches: fs-prevent-page-refcount-overflow-in-pipe_buf_get.patch mm-add-try_get_page-helper-function.patch mm-make-page-ref-count-overflow-check-tighter-and-more-explicit.patch mm-prevent-get_user_pages-from-overflowing-page-refcount.patch usbnet-ipheth-fix-potential-null-pointer-dereference-in-ipheth_carrier_set.patch usbnet-ipheth-prevent-tx-queue-timeouts-when-device-not-ready.patch --- diff --git a/queue-4.14/fs-prevent-page-refcount-overflow-in-pipe_buf_get.patch b/queue-4.14/fs-prevent-page-refcount-overflow-in-pipe_buf_get.patch new file mode 100644 index 00000000000..357c26bd7b0 --- /dev/null +++ b/queue-4.14/fs-prevent-page-refcount-overflow-in-pipe_buf_get.patch @@ -0,0 +1,158 @@ +From 15fab63e1e57be9fdb5eec1bbc5916e9825e9acb Mon Sep 17 00:00:00 2001 +From: Matthew Wilcox +Date: Fri, 5 Apr 2019 14:02:10 -0700 +Subject: fs: prevent page refcount overflow in pipe_buf_get + +From: Matthew Wilcox + +commit 15fab63e1e57be9fdb5eec1bbc5916e9825e9acb upstream. + +Change pipe_buf_get() to return a bool indicating whether it succeeded +in raising the refcount of the page (if the thing in the pipe is a page). +This removes another mechanism for overflowing the page refcount. All +callers converted to handle a failure. + +Reported-by: Jann Horn +Signed-off-by: Matthew Wilcox +Cc: stable@kernel.org +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + fs/fuse/dev.c | 12 ++++++------ + fs/pipe.c | 4 ++-- + fs/splice.c | 12 ++++++++++-- + include/linux/pipe_fs_i.h | 10 ++++++---- + kernel/trace/trace.c | 4 ++++ + 5 files changed, 28 insertions(+), 14 deletions(-) + +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -1981,10 +1981,8 @@ static ssize_t fuse_dev_splice_write(str + rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len; + + ret = -EINVAL; +- if (rem < len) { +- pipe_unlock(pipe); +- goto out; +- } ++ if (rem < len) ++ goto out_free; + + rem = len; + while (rem) { +@@ -2002,7 +2000,9 @@ static ssize_t fuse_dev_splice_write(str + pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); + pipe->nrbufs--; + } else { +- pipe_buf_get(pipe, ibuf); ++ if (!pipe_buf_get(pipe, ibuf)) ++ goto out_free; ++ + *obuf = *ibuf; + obuf->flags &= ~PIPE_BUF_FLAG_GIFT; + obuf->len = rem; +@@ -2025,11 +2025,11 @@ static ssize_t fuse_dev_splice_write(str + ret = fuse_dev_do_write(fud, &cs, len); + + pipe_lock(pipe); ++out_free: + for (idx = 0; idx < nbuf; idx++) + pipe_buf_release(pipe, &bufs[idx]); + pipe_unlock(pipe); + +-out: + kfree(bufs); + return ret; + } +--- a/fs/pipe.c ++++ b/fs/pipe.c +@@ -194,9 +194,9 @@ EXPORT_SYMBOL(generic_pipe_buf_steal); + * in the tee() system call, when we duplicate the buffers in one + * pipe into another. + */ +-void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) ++bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) + { +- get_page(buf->page); ++ return try_get_page(buf->page); + } + EXPORT_SYMBOL(generic_pipe_buf_get); + +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -1571,7 +1571,11 @@ retry: + * Get a reference to this pipe buffer, + * so we can copy the contents over. + */ +- pipe_buf_get(ipipe, ibuf); ++ if (!pipe_buf_get(ipipe, ibuf)) { ++ if (ret == 0) ++ ret = -EFAULT; ++ break; ++ } + *obuf = *ibuf; + + /* +@@ -1645,7 +1649,11 @@ static int link_pipe(struct pipe_inode_i + * Get a reference to this pipe buffer, + * so we can copy the contents over. + */ +- pipe_buf_get(ipipe, ibuf); ++ if (!pipe_buf_get(ipipe, ibuf)) { ++ if (ret == 0) ++ ret = -EFAULT; ++ break; ++ } + + obuf = opipe->bufs + nbuf; + *obuf = *ibuf; +--- a/include/linux/pipe_fs_i.h ++++ b/include/linux/pipe_fs_i.h +@@ -108,18 +108,20 @@ struct pipe_buf_operations { + /* + * Get a reference to the pipe buffer. + */ +- void (*get)(struct pipe_inode_info *, struct pipe_buffer *); ++ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); + }; + + /** + * pipe_buf_get - get a reference to a pipe_buffer + * @pipe: the pipe that the buffer belongs to + * @buf: the buffer to get a reference to ++ * ++ * Return: %true if the reference was successfully obtained. + */ +-static inline void pipe_buf_get(struct pipe_inode_info *pipe, ++static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) + { +- buf->ops->get(pipe, buf); ++ return buf->ops->get(pipe, buf); + } + + /** +@@ -179,7 +181,7 @@ struct pipe_inode_info *alloc_pipe_info( + void free_pipe_info(struct pipe_inode_info *); + + /* Generic pipe buffer ops functions */ +-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); ++bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); + int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); + int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); + int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *); +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -6744,7 +6744,11 @@ static void buffer_pipe_buf_get(struct p + { + struct buffer_ref *ref = (struct buffer_ref *)buf->private; + ++ if (refcount_read(&ref->refcount) > INT_MAX/2) ++ return false; ++ + refcount_inc(&ref->refcount); ++ return true; + } + + /* Pipe buffer operations for a buffer. */ diff --git a/queue-4.14/mm-add-try_get_page-helper-function.patch b/queue-4.14/mm-add-try_get_page-helper-function.patch new file mode 100644 index 00000000000..996a8225d02 --- /dev/null +++ b/queue-4.14/mm-add-try_get_page-helper-function.patch @@ -0,0 +1,56 @@ +From 88b1a17dfc3ed7728316478fae0f5ad508f50397 Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Thu, 11 Apr 2019 10:14:59 -0700 +Subject: mm: add 'try_get_page()' helper function + +From: Linus Torvalds + +commit 88b1a17dfc3ed7728316478fae0f5ad508f50397 upstream. + +This is the same as the traditional 'get_page()' function, but instead +of unconditionally incrementing the reference count of the page, it only +does so if the count was "safe". It returns whether the reference count +was incremented (and is marked __must_check, since the caller obviously +has to be aware of it). + +Also like 'get_page()', you can't use this function unless you already +had a reference to the page. The intent is that you can use this +exactly like get_page(), but in situations where you want to limit the +maximum reference count. + +The code currently does an unconditional WARN_ON_ONCE() if we ever hit +the reference count issues (either zero or negative), as a notification +that the conditional non-increment actually happened. + +NOTE! The count access for the "safety" check is inherently racy, but +that doesn't matter since the buffer we use is basically half the range +of the reference count (ie we look at the sign of the count). + +Acked-by: Matthew Wilcox +Cc: Jann Horn +Cc: stable@kernel.org +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + include/linux/mm.h | 9 +++++++++ + 1 file changed, 9 insertions(+) + +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -839,6 +839,15 @@ static inline void get_page(struct page + page_ref_inc(page); + } + ++static inline __must_check bool try_get_page(struct page *page) ++{ ++ page = compound_head(page); ++ if (WARN_ON_ONCE(page_ref_count(page) <= 0)) ++ return false; ++ page_ref_inc(page); ++ return true; ++} ++ + static inline void put_page(struct page *page) + { + page = compound_head(page); diff --git a/queue-4.14/mm-make-page-ref-count-overflow-check-tighter-and-more-explicit.patch b/queue-4.14/mm-make-page-ref-count-overflow-check-tighter-and-more-explicit.patch new file mode 100644 index 00000000000..4fc5cc05a97 --- /dev/null +++ b/queue-4.14/mm-make-page-ref-count-overflow-check-tighter-and-more-explicit.patch @@ -0,0 +1,52 @@ +From f958d7b528b1b40c44cfda5eabe2d82760d868c3 Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Thu, 11 Apr 2019 10:06:20 -0700 +Subject: mm: make page ref count overflow check tighter and more explicit + +From: Linus Torvalds + +commit f958d7b528b1b40c44cfda5eabe2d82760d868c3 upstream. + +We have a VM_BUG_ON() to check that the page reference count doesn't +underflow (or get close to overflow) by checking the sign of the count. + +That's all fine, but we actually want to allow people to use a "get page +ref unless it's already very high" helper function, and we want that one +to use the sign of the page ref (without triggering this VM_BUG_ON). + +Change the VM_BUG_ON to only check for small underflows (or _very_ close +to overflowing), and ignore overflows which have strayed into negative +territory. + +Acked-by: Matthew Wilcox +Cc: Jann Horn +Cc: stable@kernel.org +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + include/linux/mm.h | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -824,6 +824,10 @@ static inline bool is_device_public_page + #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ + + ++/* 127: arbitrary random number, small enough to assemble well */ ++#define page_ref_zero_or_close_to_overflow(page) \ ++ ((unsigned int) page_ref_count(page) + 127u <= 127u) ++ + static inline void get_page(struct page *page) + { + page = compound_head(page); +@@ -831,7 +835,7 @@ static inline void get_page(struct page + * Getting a normal page or the head of a compound page + * requires to already have an elevated page->_refcount. + */ +- VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); ++ VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page); + page_ref_inc(page); + } + diff --git a/queue-4.14/mm-prevent-get_user_pages-from-overflowing-page-refcount.patch b/queue-4.14/mm-prevent-get_user_pages-from-overflowing-page-refcount.patch new file mode 100644 index 00000000000..da0a05af468 --- /dev/null +++ b/queue-4.14/mm-prevent-get_user_pages-from-overflowing-page-refcount.patch @@ -0,0 +1,153 @@ +From 8fde12ca79aff9b5ba951fce1a2641901b8d8e64 Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Thu, 11 Apr 2019 10:49:19 -0700 +Subject: mm: prevent get_user_pages() from overflowing page refcount + +From: Linus Torvalds + +commit 8fde12ca79aff9b5ba951fce1a2641901b8d8e64 upstream. + +If the page refcount wraps around past zero, it will be freed while +there are still four billion references to it. One of the possible +avenues for an attacker to try to make this happen is by doing direct IO +on a page multiple times. This patch makes get_user_pages() refuse to +take a new page reference if there are already more than two billion +references to the page. + +Reported-by: Jann Horn +Acked-by: Matthew Wilcox +Cc: stable@kernel.org +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/gup.c | 45 ++++++++++++++++++++++++++++++++++----------- + mm/hugetlb.c | 13 +++++++++++++ + 2 files changed, 47 insertions(+), 11 deletions(-) + +--- a/mm/gup.c ++++ b/mm/gup.c +@@ -153,7 +153,10 @@ retry: + } + + if (flags & FOLL_GET) { +- get_page(page); ++ if (unlikely(!try_get_page(page))) { ++ page = ERR_PTR(-ENOMEM); ++ goto out; ++ } + + /* drop the pgmap reference now that we hold the page */ + if (pgmap) { +@@ -280,7 +283,10 @@ retry_locked: + if (pmd_trans_unstable(pmd)) + ret = -EBUSY; + } else { +- get_page(page); ++ if (unlikely(!try_get_page(page))) { ++ spin_unlock(ptl); ++ return ERR_PTR(-ENOMEM); ++ } + spin_unlock(ptl); + lock_page(page); + ret = split_huge_page(page); +@@ -464,7 +470,10 @@ static int get_gate_page(struct mm_struc + if (is_device_public_page(*page)) + goto unmap; + } +- get_page(*page); ++ if (unlikely(!try_get_page(*page))) { ++ ret = -ENOMEM; ++ goto unmap; ++ } + out: + ret = 0; + unmap: +@@ -1365,6 +1374,20 @@ static void undo_dev_pagemap(int *nr, in + } + } + ++/* ++ * Return the compund head page with ref appropriately incremented, ++ * or NULL if that failed. ++ */ ++static inline struct page *try_get_compound_head(struct page *page, int refs) ++{ ++ struct page *head = compound_head(page); ++ if (WARN_ON_ONCE(page_ref_count(head) < 0)) ++ return NULL; ++ if (unlikely(!page_cache_add_speculative(head, refs))) ++ return NULL; ++ return head; ++} ++ + #ifdef __HAVE_ARCH_PTE_SPECIAL + static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +@@ -1399,9 +1422,9 @@ static int gup_pte_range(pmd_t pmd, unsi + + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + page = pte_page(pte); +- head = compound_head(page); + +- if (!page_cache_get_speculative(head)) ++ head = try_get_compound_head(page, 1); ++ if (!head) + goto pte_unmap; + + if (unlikely(pte_val(pte) != pte_val(*ptep))) { +@@ -1537,8 +1560,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_ + refs++; + } while (addr += PAGE_SIZE, addr != end); + +- head = compound_head(pmd_page(orig)); +- if (!page_cache_add_speculative(head, refs)) { ++ head = try_get_compound_head(pmd_page(orig), refs); ++ if (!head) { + *nr -= refs; + return 0; + } +@@ -1575,8 +1598,8 @@ static int gup_huge_pud(pud_t orig, pud_ + refs++; + } while (addr += PAGE_SIZE, addr != end); + +- head = compound_head(pud_page(orig)); +- if (!page_cache_add_speculative(head, refs)) { ++ head = try_get_compound_head(pud_page(orig), refs); ++ if (!head) { + *nr -= refs; + return 0; + } +@@ -1612,8 +1635,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_ + refs++; + } while (addr += PAGE_SIZE, addr != end); + +- head = compound_head(pgd_page(orig)); +- if (!page_cache_add_speculative(head, refs)) { ++ head = try_get_compound_head(pgd_page(orig), refs); ++ if (!head) { + *nr -= refs; + return 0; + } +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -4255,6 +4255,19 @@ long follow_hugetlb_page(struct mm_struc + + pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; + page = pte_page(huge_ptep_get(pte)); ++ ++ /* ++ * Instead of doing 'try_get_page()' below in the same_page ++ * loop, just check the count once here. ++ */ ++ if (unlikely(page_count(page) <= 0)) { ++ if (pages) { ++ spin_unlock(ptl); ++ remainder = 0; ++ err = -ENOMEM; ++ break; ++ } ++ } + same_page: + if (pages) { + pages[i] = mem_map_offset(page, pfn_offset); diff --git a/queue-4.14/series b/queue-4.14/series index 8495c3ed8e5..9d233b6d9ee 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -1 +1,7 @@ selinux-use-kernel-linux-socket.h-for-genheaders-and-mdp.patch +usbnet-ipheth-prevent-tx-queue-timeouts-when-device-not-ready.patch +usbnet-ipheth-fix-potential-null-pointer-dereference-in-ipheth_carrier_set.patch +mm-make-page-ref-count-overflow-check-tighter-and-more-explicit.patch +mm-add-try_get_page-helper-function.patch +mm-prevent-get_user_pages-from-overflowing-page-refcount.patch +fs-prevent-page-refcount-overflow-in-pipe_buf_get.patch diff --git a/queue-4.14/usbnet-ipheth-fix-potential-null-pointer-dereference-in-ipheth_carrier_set.patch b/queue-4.14/usbnet-ipheth-fix-potential-null-pointer-dereference-in-ipheth_carrier_set.patch new file mode 100644 index 00000000000..01a28013ab5 --- /dev/null +++ b/queue-4.14/usbnet-ipheth-fix-potential-null-pointer-dereference-in-ipheth_carrier_set.patch @@ -0,0 +1,45 @@ +From 61c59355e0154a938b28710dfa6c1d8be2ddcefa Mon Sep 17 00:00:00 2001 +From: "Gustavo A. R. Silva" +Date: Fri, 17 Nov 2017 14:02:09 -0600 +Subject: usbnet: ipheth: fix potential null pointer dereference in ipheth_carrier_set + +From: Gustavo A. R. Silva + +commit 61c59355e0154a938b28710dfa6c1d8be2ddcefa upstream. + +_dev_ is being dereferenced before it is null checked, hence there +is a potential null pointer dereference. + +Fix this by moving the pointer dereference after _dev_ has been null +checked. + +Addresses-Coverity-ID: 1462020 +Fixes: bb1b40c7cb86 ("usbnet: ipheth: prevent TX queue timeouts when device not ready") +Signed-off-by: Gustavo A. R. Silva +Signed-off-by: David S. Miller +Signed-off-by: Guenter Roeck +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/usb/ipheth.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/drivers/net/usb/ipheth.c ++++ b/drivers/net/usb/ipheth.c +@@ -290,12 +290,15 @@ static void ipheth_sndbulk_callback(stru + + static int ipheth_carrier_set(struct ipheth_device *dev) + { +- struct usb_device *udev = dev->udev; ++ struct usb_device *udev; + int retval; ++ + if (!dev) + return 0; + if (!dev->confirmed_pairing) + return 0; ++ ++ udev = dev->udev; + retval = usb_control_msg(udev, + usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP), + IPHETH_CMD_CARRIER_CHECK, /* request */ diff --git a/queue-4.14/usbnet-ipheth-prevent-tx-queue-timeouts-when-device-not-ready.patch b/queue-4.14/usbnet-ipheth-prevent-tx-queue-timeouts-when-device-not-ready.patch new file mode 100644 index 00000000000..e3a81841f9a --- /dev/null +++ b/queue-4.14/usbnet-ipheth-prevent-tx-queue-timeouts-when-device-not-ready.patch @@ -0,0 +1,187 @@ +From bb1b40c7cb863f0800a6410c7dcb86cf3f28d3b1 Mon Sep 17 00:00:00 2001 +From: Alexander Kappner +Date: Mon, 13 Nov 2017 17:44:20 -0800 +Subject: usbnet: ipheth: prevent TX queue timeouts when device not ready + +From: Alexander Kappner + +commit bb1b40c7cb863f0800a6410c7dcb86cf3f28d3b1 upstream. + +iOS devices require the host to be "trusted" before servicing network +packets. Establishing trust requires the user to confirm a dialog on the +iOS device.Until trust is established, the iOS device will silently discard +network packets from the host. Currently, the ipheth driver does not detect +whether an iOS device has established trust with the host, and immediately +sets up the transmit queues. + +This causes the following problems: + +- Kernel taint due to WARN() in netdev watchdog. +- Dmesg spam ("TX timeout"). +- Disruption of user space networking activity (dhcpd, etc...) when new +interface comes up but cannot be used. +- Unnecessary host and device wakeups and USB traffic + +Example dmesg output: + +[ 1101.319778] NETDEV WATCHDOG: eth1 (ipheth): transmit queue 0 timed out +[ 1101.319817] ------------[ cut here ]------------ +[ 1101.319828] WARNING: CPU: 0 PID: 0 at net/sched/sch_generic.c:316 dev_watchdog+0x20f/0x220 +[ 1101.319831] Modules linked in: ipheth usbmon nvidia_drm(PO) nvidia_modeset(PO) nvidia(PO) iwlmvm mac80211 iwlwifi btusb btrtl btbcm btintel qmi_wwan bluetooth cfg80211 ecdh_generic thinkpad_acpi rfkill [last unloaded: ipheth] +[ 1101.319861] CPU: 0 PID: 0 Comm: swapper/0 Tainted: P O 4.13.12.1 #1 +[ 1101.319864] Hardware name: LENOVO 20ENCTO1WW/20ENCTO1WW, BIOS N1EET62W (1.35 ) 11/10/2016 +[ 1101.319867] task: ffffffff81e11500 task.stack: ffffffff81e00000 +[ 1101.319873] RIP: 0010:dev_watchdog+0x20f/0x220 +[ 1101.319876] RSP: 0018:ffff8810a3c03e98 EFLAGS: 00010292 +[ 1101.319880] RAX: 000000000000003a RBX: 0000000000000000 RCX: 0000000000000000 +[ 1101.319883] RDX: ffff8810a3c15c48 RSI: ffffffff81ccbfc2 RDI: 00000000ffffffff +[ 1101.319886] RBP: ffff880c04ebc41c R08: 0000000000000000 R09: 0000000000000379 +[ 1101.319889] R10: 00000100696589d0 R11: 0000000000000378 R12: ffff880c04ebc000 +[ 1101.319892] R13: 0000000000000000 R14: 0000000000000001 R15: ffff880c2865fc80 +[ 1101.319896] FS: 0000000000000000(0000) GS:ffff8810a3c00000(0000) knlGS:0000000000000000 +[ 1101.319899] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ 1101.319902] CR2: 00007f3ff24ac000 CR3: 0000000001e0a000 CR4: 00000000003406f0 +[ 1101.319905] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 +[ 1101.319908] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 +[ 1101.319910] Call Trace: +[ 1101.319914] +[ 1101.319921] ? dev_graft_qdisc+0x70/0x70 +[ 1101.319928] ? dev_graft_qdisc+0x70/0x70 +[ 1101.319934] ? call_timer_fn+0x2e/0x170 +[ 1101.319939] ? dev_graft_qdisc+0x70/0x70 +[ 1101.319944] ? run_timer_softirq+0x1ea/0x440 +[ 1101.319951] ? timerqueue_add+0x54/0x80 +[ 1101.319956] ? enqueue_hrtimer+0x38/0xa0 +[ 1101.319963] ? __do_softirq+0xed/0x2e7 +[ 1101.319970] ? irq_exit+0xb4/0xc0 +[ 1101.319976] ? smp_apic_timer_interrupt+0x39/0x50 +[ 1101.319981] ? apic_timer_interrupt+0x8c/0xa0 +[ 1101.319983] +[ 1101.319992] ? cpuidle_enter_state+0xfa/0x2a0 +[ 1101.319999] ? do_idle+0x1a3/0x1f0 +[ 1101.320004] ? cpu_startup_entry+0x5f/0x70 +[ 1101.320011] ? start_kernel+0x444/0x44c +[ 1101.320017] ? early_idt_handler_array+0x120/0x120 +[ 1101.320023] ? x86_64_start_kernel+0x145/0x154 +[ 1101.320028] ? secondary_startup_64+0x9f/0x9f +[ 1101.320033] Code: 20 04 00 00 eb 9f 4c 89 e7 c6 05 59 44 71 00 01 e8 a7 df fd ff 89 d9 4c 89 e6 48 c7 c7 70 b7 cd 81 48 89 c2 31 c0 e8 97 64 90 ff <0f> ff eb bf 66 66 66 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 +[ 1101.320103] ---[ end trace 0cc4d251e2b57080 ]--- +[ 1101.320110] ipheth 1-5:4.2: ipheth_tx_timeout: TX timeout + +The last message "TX timeout" is repeated every 5 seconds until trust is +established or the device is disconnected, filling up dmesg. + +The proposed patch eliminates the problem by, upon connection, keeping the +TX queue and carrier disabled until a packet is first received from the iOS +device. This is reflected by the confirmed_pairing variable in the device +structure. Only after at least one packet has been received from the iOS +device, the transmit queue and carrier are brought up during the periodic +device poll in ipheth_carrier_set. Because the iOS device will always send +a packet immediately upon trust being established, this should not delay +the interface becoming useable. To prevent failed UBRs in +ipheth_rcvbulk_callback from perpetually re-enabling the queue if it was +disabled, a new check is added so only successful transfers re-enable the +queue, whereas failed transfers only trigger an immediate poll. + +This has the added benefit of removing the periodic control requests to the +iOS device until trust has been established and thus should reduce wakeup +events on both the host and the iOS device. + +Signed-off-by: Alexander Kappner +Signed-off-by: David S. Miller +[groeck: Fixed context conflict seen because 45611c61dd50 was applied first] +Signed-off-by: Guenter Roeck +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/usb/ipheth.c | 30 +++++++++++++++++++++--------- + 1 file changed, 21 insertions(+), 9 deletions(-) + +--- a/drivers/net/usb/ipheth.c ++++ b/drivers/net/usb/ipheth.c +@@ -148,6 +148,7 @@ struct ipheth_device { + u8 bulk_in; + u8 bulk_out; + struct delayed_work carrier_work; ++ bool confirmed_pairing; + }; + + static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags); +@@ -259,7 +260,7 @@ static void ipheth_rcvbulk_callback(stru + + dev->net->stats.rx_packets++; + dev->net->stats.rx_bytes += len; +- ++ dev->confirmed_pairing = true; + netif_rx(skb); + ipheth_rx_submit(dev, GFP_ATOMIC); + } +@@ -280,14 +281,21 @@ static void ipheth_sndbulk_callback(stru + dev_err(&dev->intf->dev, "%s: urb status: %d\n", + __func__, status); + +- netif_wake_queue(dev->net); ++ if (status == 0) ++ netif_wake_queue(dev->net); ++ else ++ // on URB error, trigger immediate poll ++ schedule_delayed_work(&dev->carrier_work, 0); + } + + static int ipheth_carrier_set(struct ipheth_device *dev) + { + struct usb_device *udev = dev->udev; + int retval; +- ++ if (!dev) ++ return 0; ++ if (!dev->confirmed_pairing) ++ return 0; + retval = usb_control_msg(udev, + usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP), + IPHETH_CMD_CARRIER_CHECK, /* request */ +@@ -302,11 +310,14 @@ static int ipheth_carrier_set(struct iph + return retval; + } + +- if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) ++ if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) { + netif_carrier_on(dev->net); +- else ++ if (dev->tx_urb->status != -EINPROGRESS) ++ netif_wake_queue(dev->net); ++ } else { + netif_carrier_off(dev->net); +- ++ netif_stop_queue(dev->net); ++ } + return 0; + } + +@@ -386,7 +397,6 @@ static int ipheth_open(struct net_device + return retval; + + schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT); +- netif_start_queue(net); + return retval; + } + +@@ -489,7 +499,7 @@ static int ipheth_probe(struct usb_inter + dev->udev = udev; + dev->net = netdev; + dev->intf = intf; +- ++ dev->confirmed_pairing = false; + /* Set up endpoints */ + hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM); + if (hintf == NULL) { +@@ -540,7 +550,9 @@ static int ipheth_probe(struct usb_inter + retval = -EIO; + goto err_register_netdev; + } +- ++ // carrier down and transmit queues stopped until packet from device ++ netif_carrier_off(netdev); ++ netif_tx_stop_all_queues(netdev); + dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n"); + return 0; +