From ecbd86f3b6230581b6811692782f0495e99b548d Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 1 Mar 2016 14:00:29 -0800 Subject: [PATCH] 3.10-stable patches added patches: cdc-acm-exclude-samsung-phone-04e8-685d.patch libata-fix-sff-host-state-machine-locking-while-polling.patch revert-workqueue-make-sure-delayed-work-run-in-local-cpu.patch rfkill-fix-rfkill_fop_read-wait_event-usage.patch --- ...-acm-exclude-samsung-phone-04e8-685d.patch | 33 ++++ ...-state-machine-locking-while-polling.patch | 183 ++++++++++++++++++ ...e-sure-delayed-work-run-in-local-cpu.patch | 75 +++++++ ...fix-rfkill_fop_read-wait_event-usage.patch | 63 ++++++ queue-3.10/series | 4 + 5 files changed, 358 insertions(+) create mode 100644 queue-3.10/cdc-acm-exclude-samsung-phone-04e8-685d.patch create mode 100644 queue-3.10/libata-fix-sff-host-state-machine-locking-while-polling.patch create mode 100644 queue-3.10/revert-workqueue-make-sure-delayed-work-run-in-local-cpu.patch create mode 100644 queue-3.10/rfkill-fix-rfkill_fop_read-wait_event-usage.patch diff --git a/queue-3.10/cdc-acm-exclude-samsung-phone-04e8-685d.patch b/queue-3.10/cdc-acm-exclude-samsung-phone-04e8-685d.patch new file mode 100644 index 00000000000..da264857170 --- /dev/null +++ b/queue-3.10/cdc-acm-exclude-samsung-phone-04e8-685d.patch @@ -0,0 +1,33 @@ +From e912e685f372ab62a2405a1acd923597f524e94a Mon Sep 17 00:00:00 2001 +From: Oliver Neukum +Date: Mon, 18 Jan 2016 15:45:18 +0100 +Subject: cdc-acm:exclude Samsung phone 04e8:685d + +From: Oliver Neukum + +commit e912e685f372ab62a2405a1acd923597f524e94a upstream. + +This phone needs to be handled by a specialised firmware tool +and is reported to crash irrevocably if cdc-acm takes it. + +Signed-off-by: Oliver Neukum +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/usb/class/cdc-acm.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1726,6 +1726,11 @@ static const struct usb_device_id acm_id + }, + #endif + ++ /*Samsung phone in firmware update mode */ ++ { USB_DEVICE(0x04e8, 0x685d), ++ .driver_info = IGNORE_DEVICE, ++ }, ++ + /* Exclude Infineon Flash Loader utility */ + { USB_DEVICE(0x058b, 0x0041), + .driver_info = IGNORE_DEVICE, diff --git a/queue-3.10/libata-fix-sff-host-state-machine-locking-while-polling.patch b/queue-3.10/libata-fix-sff-host-state-machine-locking-while-polling.patch new file mode 100644 index 00000000000..523a9c31165 --- /dev/null +++ b/queue-3.10/libata-fix-sff-host-state-machine-locking-while-polling.patch @@ -0,0 +1,183 @@ +From 8eee1d3ed5b6fc8e14389567c9a6f53f82bb7224 Mon Sep 17 00:00:00 2001 +From: Tejun Heo +Date: Mon, 1 Feb 2016 11:33:21 -0500 +Subject: libata: fix sff host state machine locking while polling + +From: Tejun Heo + +commit 8eee1d3ed5b6fc8e14389567c9a6f53f82bb7224 upstream. + +The bulk of ATA host state machine is implemented by +ata_sff_hsm_move(). The function is called from either the interrupt +handler or, if polling, a work item. Unlike from the interrupt path, +the polling path calls the function without holding the host lock and +ata_sff_hsm_move() selectively grabs the lock. + +This is completely broken. If an IRQ triggers while polling is in +progress, the two can easily race and end up accessing the hardware +and updating state machine state at the same time. This can put the +state machine in an illegal state and lead to a crash like the +following. + + kernel BUG at drivers/ata/libata-sff.c:1302! + invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN + Modules linked in: + CPU: 1 PID: 10679 Comm: syz-executor Not tainted 4.5.0-rc1+ #300 + Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 + task: ffff88002bd00000 ti: ffff88002e048000 task.ti: ffff88002e048000 + RIP: 0010:[] [] ata_sff_hsm_move+0x619/0x1c60 + ... + Call Trace: + + [] __ata_sff_port_intr+0x1e1/0x3a0 drivers/ata/libata-sff.c:1584 + [] ata_bmdma_port_intr+0x71/0x400 drivers/ata/libata-sff.c:2877 + [< inline >] __ata_sff_interrupt drivers/ata/libata-sff.c:1629 + [] ata_bmdma_interrupt+0x253/0x580 drivers/ata/libata-sff.c:2902 + [] handle_irq_event_percpu+0x108/0x7e0 kernel/irq/handle.c:157 + [] handle_irq_event+0xa7/0x140 kernel/irq/handle.c:205 + [] handle_edge_irq+0x1e3/0x8d0 kernel/irq/chip.c:623 + [< inline >] generic_handle_irq_desc include/linux/irqdesc.h:146 + [] handle_irq+0x10c/0x2a0 arch/x86/kernel/irq_64.c:78 + [] do_IRQ+0x7d/0x1a0 arch/x86/kernel/irq.c:240 + [] common_interrupt+0x8c/0x8c arch/x86/entry/entry_64.S:520 + + [< inline >] rcu_lock_acquire include/linux/rcupdate.h:490 + [< inline >] rcu_read_lock include/linux/rcupdate.h:874 + [] filemap_map_pages+0x131/0xba0 mm/filemap.c:2145 + [< inline >] do_fault_around mm/memory.c:2943 + [< inline >] do_read_fault mm/memory.c:2962 + [< inline >] do_fault mm/memory.c:3133 + [< inline >] handle_pte_fault mm/memory.c:3308 + [< inline >] __handle_mm_fault mm/memory.c:3418 + [] handle_mm_fault+0x2516/0x49a0 mm/memory.c:3447 + [] __do_page_fault+0x376/0x960 arch/x86/mm/fault.c:1238 + [] trace_do_page_fault+0xe8/0x420 arch/x86/mm/fault.c:1331 + [] do_async_page_fault+0x14/0xd0 arch/x86/kernel/kvm.c:264 + [] async_page_fault+0x28/0x30 arch/x86/entry/entry_64.S:986 + +Fix it by ensuring that the polling path is holding the host lock +before entering ata_sff_hsm_move() so that all hardware accesses and +state updates are performed under the host lock. + +Signed-off-by: Tejun Heo +Reported-and-tested-by: Dmitry Vyukov +Link: http://lkml.kernel.org/g/CACT4Y+b_JsOxJu2EZyEf+mOXORc_zid5V1-pLZSroJVxyWdSpw@mail.gmail.com +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/ata/libata-sff.c | 32 +++++++++++--------------------- + 1 file changed, 11 insertions(+), 21 deletions(-) + +--- a/drivers/ata/libata-sff.c ++++ b/drivers/ata/libata-sff.c +@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struc + static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) + { + struct ata_port *ap = qc->ap; +- unsigned long flags; + + if (ap->ops->error_handler) { + if (in_wq) { +- spin_lock_irqsave(ap->lock, flags); +- + /* EH might have kicked in while host lock is + * released. + */ +@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct a + } else + ata_port_freeze(ap); + } +- +- spin_unlock_irqrestore(ap->lock, flags); + } else { + if (likely(!(qc->err_mask & AC_ERR_HSM))) + ata_qc_complete(qc); +@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct a + } + } else { + if (in_wq) { +- spin_lock_irqsave(ap->lock, flags); + ata_sff_irq_on(ap); + ata_qc_complete(qc); +- spin_unlock_irqrestore(ap->lock, flags); + } else + ata_qc_complete(qc); + } +@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap + { + struct ata_link *link = qc->dev->link; + struct ata_eh_info *ehi = &link->eh_info; +- unsigned long flags = 0; + int poll_next; + ++ lockdep_assert_held(ap->lock); ++ + WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); + + /* Make sure ata_sff_qc_issue() does not throw things +@@ -1112,14 +1106,6 @@ fsm_start: + } + } + +- /* Send the CDB (atapi) or the first data block (ata pio out). +- * During the state transition, interrupt handler shouldn't +- * be invoked before the data transfer is complete and +- * hsm_task_state is changed. Hence, the following locking. +- */ +- if (in_wq) +- spin_lock_irqsave(ap->lock, flags); +- + if (qc->tf.protocol == ATA_PROT_PIO) { + /* PIO data out protocol. + * send first data block. +@@ -1135,9 +1121,6 @@ fsm_start: + /* send CDB */ + atapi_send_cdb(ap, qc); + +- if (in_wq) +- spin_unlock_irqrestore(ap->lock, flags); +- + /* if polling, ata_sff_pio_task() handles the rest. + * otherwise, interrupt handler takes over from here. + */ +@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work + u8 status; + int poll_next; + ++ spin_lock_irq(ap->lock); ++ + BUG_ON(ap->sff_pio_task_link == NULL); + /* qc can be NULL if timeout occurred */ + qc = ata_qc_from_tag(ap, link->active_tag); + if (!qc) { + ap->sff_pio_task_link = NULL; +- return; ++ goto out_unlock; + } + + fsm_start: +@@ -1381,11 +1366,14 @@ fsm_start: + */ + status = ata_sff_busy_wait(ap, ATA_BUSY, 5); + if (status & ATA_BUSY) { ++ spin_unlock_irq(ap->lock); + ata_msleep(ap, 2); ++ spin_lock_irq(ap->lock); ++ + status = ata_sff_busy_wait(ap, ATA_BUSY, 10); + if (status & ATA_BUSY) { + ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); +- return; ++ goto out_unlock; + } + } + +@@ -1402,6 +1390,8 @@ fsm_start: + */ + if (poll_next) + goto fsm_start; ++out_unlock: ++ spin_unlock_irq(ap->lock); + } + + /** diff --git a/queue-3.10/revert-workqueue-make-sure-delayed-work-run-in-local-cpu.patch b/queue-3.10/revert-workqueue-make-sure-delayed-work-run-in-local-cpu.patch new file mode 100644 index 00000000000..4dab2a6491f --- /dev/null +++ b/queue-3.10/revert-workqueue-make-sure-delayed-work-run-in-local-cpu.patch @@ -0,0 +1,75 @@ +From 041bd12e272c53a35c54c13875839bcb98c999ce Mon Sep 17 00:00:00 2001 +From: Tejun Heo +Date: Tue, 9 Feb 2016 16:11:26 -0500 +Subject: Revert "workqueue: make sure delayed work run in local cpu" + +From: Tejun Heo + +commit 041bd12e272c53a35c54c13875839bcb98c999ce upstream. + +This reverts commit 874bbfe600a660cba9c776b3957b1ce393151b76. + +Workqueue used to implicity guarantee that work items queued without +explicit CPU specified are put on the local CPU. Recent changes in +timer broke the guarantee and led to vmstat breakage which was fixed +by 176bed1de5bf ("vmstat: explicitly schedule per-cpu work on the CPU +we need it to run on"). + +vmstat is the most likely to expose the issue and it's quite possible +that there are other similar problems which are a lot more difficult +to trigger. As a preventive measure, 874bbfe600a6 ("workqueue: make +sure delayed work run in local cpu") was applied to restore the local +CPU guarnatee. Unfortunately, the change exposed a bug in timer code +which got fixed by 22b886dd1018 ("timers: Use proper base migration in +add_timer_on()"). Due to code restructuring, the commit couldn't be +backported beyond certain point and stable kernels which only had +874bbfe600a6 started crashing. + +The local CPU guarantee was accidental more than anything else and we +want to get rid of it anyway. As, with the vmstat case fixed, +874bbfe600a6 is causing more problems than it's fixing, it has been +decided to take the chance and officially break the guarantee by +reverting the commit. A debug feature will be added to force foreign +CPU assignment to expose cases relying on the guarantee and fixes for +the individual cases will be backported to stable as necessary. + +Signed-off-by: Tejun Heo +Fixes: 874bbfe600a6 ("workqueue: make sure delayed work run in local cpu") +Link: http://lkml.kernel.org/g/20160120211926.GJ10810@quack.suse.cz +Cc: Mike Galbraith +Cc: Henrique de Moraes Holschuh +Cc: Daniel Bilik +Cc: Jan Kara +Cc: Shaohua Li +Cc: Sasha Levin +Cc: Ben Hutchings +Cc: Thomas Gleixner +Cc: Daniel Bilik +Cc: Jiri Slaby +Cc: Michal Hocko +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/workqueue.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -1450,13 +1450,13 @@ static void __queue_delayed_work(int cpu + timer_stats_timer_set_start_info(&dwork->timer); + + dwork->wq = wq; +- /* timer isn't guaranteed to run in this cpu, record earlier */ +- if (cpu == WORK_CPU_UNBOUND) +- cpu = raw_smp_processor_id(); + dwork->cpu = cpu; + timer->expires = jiffies + delay; + +- add_timer_on(timer, cpu); ++ if (unlikely(cpu != WORK_CPU_UNBOUND)) ++ add_timer_on(timer, cpu); ++ else ++ add_timer(timer); + } + + /** diff --git a/queue-3.10/rfkill-fix-rfkill_fop_read-wait_event-usage.patch b/queue-3.10/rfkill-fix-rfkill_fop_read-wait_event-usage.patch new file mode 100644 index 00000000000..5b1a1e753f8 --- /dev/null +++ b/queue-3.10/rfkill-fix-rfkill_fop_read-wait_event-usage.patch @@ -0,0 +1,63 @@ +From 6736fde9672ff6717ac576e9bba2fd5f3dfec822 Mon Sep 17 00:00:00 2001 +From: Johannes Berg +Date: Tue, 26 Jan 2016 11:29:03 +0100 +Subject: rfkill: fix rfkill_fop_read wait_event usage + +From: Johannes Berg + +commit 6736fde9672ff6717ac576e9bba2fd5f3dfec822 upstream. + +The code within wait_event_interruptible() is called with +!TASK_RUNNING, so mustn't call any functions that can sleep, +like mutex_lock(). + +Since we re-check the list_empty() in a loop after the wait, +it's safe to simply use list_empty() without locking. + +This bug has existed forever, but was only discovered now +because all userspace implementations, including the default +'rfkill' tool, use poll() or select() to get a readable fd +before attempting to read. + +Fixes: c64fb01627e24 ("rfkill: create useful userspace interface") +Reported-by: Dmitry Vyukov +Signed-off-by: Johannes Berg +Signed-off-by: Greg Kroah-Hartman + +--- + net/rfkill/core.c | 16 ++++------------ + 1 file changed, 4 insertions(+), 12 deletions(-) + +--- a/net/rfkill/core.c ++++ b/net/rfkill/core.c +@@ -1088,17 +1088,6 @@ static unsigned int rfkill_fop_poll(stru + return res; + } + +-static bool rfkill_readable(struct rfkill_data *data) +-{ +- bool r; +- +- mutex_lock(&data->mtx); +- r = !list_empty(&data->events); +- mutex_unlock(&data->mtx); +- +- return r; +-} +- + static ssize_t rfkill_fop_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) + { +@@ -1115,8 +1104,11 @@ static ssize_t rfkill_fop_read(struct fi + goto out; + } + mutex_unlock(&data->mtx); ++ /* since we re-check and it just compares pointers, ++ * using !list_empty() without locking isn't a problem ++ */ + ret = wait_event_interruptible(data->read_wait, +- rfkill_readable(data)); ++ !list_empty(&data->events)); + mutex_lock(&data->mtx); + + if (ret) diff --git a/queue-3.10/series b/queue-3.10/series index f86805dd7e7..4684ef4bb1e 100644 --- a/queue-3.10/series +++ b/queue-3.10/series @@ -66,3 +66,7 @@ drm-radeon-hold-reference-to-fences-in-radeon_sa_bo_new.patch drm-radeon-use-post-decrement-in-error-handling.patch ib-qib-fix-mcast-detach-when-qp-not-attached.patch libceph-don-t-bail-early-from-try_read-when-skipping-a-message.patch +cdc-acm-exclude-samsung-phone-04e8-685d.patch +rfkill-fix-rfkill_fop_read-wait_event-usage.patch +revert-workqueue-make-sure-delayed-work-run-in-local-cpu.patch +libata-fix-sff-host-state-machine-locking-while-polling.patch -- 2.47.3