--- /dev/null
+From e912e685f372ab62a2405a1acd923597f524e94a Mon Sep 17 00:00:00 2001
+From: Oliver Neukum <oneukum@suse.com>
+Date: Mon, 18 Jan 2016 15:45:18 +0100
+Subject: cdc-acm:exclude Samsung phone 04e8:685d
+
+From: Oliver Neukum <oneukum@suse.com>
+
+commit e912e685f372ab62a2405a1acd923597f524e94a upstream.
+
+This phone needs to be handled by a specialised firmware tool
+and is reported to crash irrevocably if cdc-acm takes it.
+
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/class/cdc-acm.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1810,6 +1810,11 @@ static const struct usb_device_id acm_id
+ },
+ #endif
+
++ /*Samsung phone in firmware update mode */
++ { USB_DEVICE(0x04e8, 0x685d),
++ .driver_info = IGNORE_DEVICE,
++ },
++
+ /* Exclude Infineon Flash Loader utility */
+ { USB_DEVICE(0x058b, 0x0041),
+ .driver_info = IGNORE_DEVICE,
--- /dev/null
+From 8eee1d3ed5b6fc8e14389567c9a6f53f82bb7224 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 1 Feb 2016 11:33:21 -0500
+Subject: libata: fix sff host state machine locking while polling
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 8eee1d3ed5b6fc8e14389567c9a6f53f82bb7224 upstream.
+
+The bulk of ATA host state machine is implemented by
+ata_sff_hsm_move(). The function is called from either the interrupt
+handler or, if polling, a work item. Unlike from the interrupt path,
+the polling path calls the function without holding the host lock and
+ata_sff_hsm_move() selectively grabs the lock.
+
+This is completely broken. If an IRQ triggers while polling is in
+progress, the two can easily race and end up accessing the hardware
+and updating state machine state at the same time. This can put the
+state machine in an illegal state and lead to a crash like the
+following.
+
+ kernel BUG at drivers/ata/libata-sff.c:1302!
+ invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN
+ Modules linked in:
+ CPU: 1 PID: 10679 Comm: syz-executor Not tainted 4.5.0-rc1+ #300
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
+ task: ffff88002bd00000 ti: ffff88002e048000 task.ti: ffff88002e048000
+ RIP: 0010:[<ffffffff83a83409>] [<ffffffff83a83409>] ata_sff_hsm_move+0x619/0x1c60
+ ...
+ Call Trace:
+ <IRQ>
+ [<ffffffff83a84c31>] __ata_sff_port_intr+0x1e1/0x3a0 drivers/ata/libata-sff.c:1584
+ [<ffffffff83a85611>] ata_bmdma_port_intr+0x71/0x400 drivers/ata/libata-sff.c:2877
+ [< inline >] __ata_sff_interrupt drivers/ata/libata-sff.c:1629
+ [<ffffffff83a85bf3>] ata_bmdma_interrupt+0x253/0x580 drivers/ata/libata-sff.c:2902
+ [<ffffffff81479f98>] handle_irq_event_percpu+0x108/0x7e0 kernel/irq/handle.c:157
+ [<ffffffff8147a717>] handle_irq_event+0xa7/0x140 kernel/irq/handle.c:205
+ [<ffffffff81484573>] handle_edge_irq+0x1e3/0x8d0 kernel/irq/chip.c:623
+ [< inline >] generic_handle_irq_desc include/linux/irqdesc.h:146
+ [<ffffffff811a92bc>] handle_irq+0x10c/0x2a0 arch/x86/kernel/irq_64.c:78
+ [<ffffffff811a7e4d>] do_IRQ+0x7d/0x1a0 arch/x86/kernel/irq.c:240
+ [<ffffffff86653d4c>] common_interrupt+0x8c/0x8c arch/x86/entry/entry_64.S:520
+ <EOI>
+ [< inline >] rcu_lock_acquire include/linux/rcupdate.h:490
+ [< inline >] rcu_read_lock include/linux/rcupdate.h:874
+ [<ffffffff8164b4a1>] filemap_map_pages+0x131/0xba0 mm/filemap.c:2145
+ [< inline >] do_fault_around mm/memory.c:2943
+ [< inline >] do_read_fault mm/memory.c:2962
+ [< inline >] do_fault mm/memory.c:3133
+ [< inline >] handle_pte_fault mm/memory.c:3308
+ [< inline >] __handle_mm_fault mm/memory.c:3418
+ [<ffffffff816efb16>] handle_mm_fault+0x2516/0x49a0 mm/memory.c:3447
+ [<ffffffff8127dc16>] __do_page_fault+0x376/0x960 arch/x86/mm/fault.c:1238
+ [<ffffffff8127e358>] trace_do_page_fault+0xe8/0x420 arch/x86/mm/fault.c:1331
+ [<ffffffff8126f514>] do_async_page_fault+0x14/0xd0 arch/x86/kernel/kvm.c:264
+ [<ffffffff86655578>] async_page_fault+0x28/0x30 arch/x86/entry/entry_64.S:986
+
+Fix it by ensuring that the polling path is holding the host lock
+before entering ata_sff_hsm_move() so that all hardware accesses and
+state updates are performed under the host lock.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-and-tested-by: Dmitry Vyukov <dvyukov@google.com>
+Link: http://lkml.kernel.org/g/CACT4Y+b_JsOxJu2EZyEf+mOXORc_zid5V1-pLZSroJVxyWdSpw@mail.gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/libata-sff.c | 32 +++++++++++---------------------
+ 1 file changed, 11 insertions(+), 21 deletions(-)
+
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struc
+ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ {
+ struct ata_port *ap = qc->ap;
+- unsigned long flags;
+
+ if (ap->ops->error_handler) {
+ if (in_wq) {
+- spin_lock_irqsave(ap->lock, flags);
+-
+ /* EH might have kicked in while host lock is
+ * released.
+ */
+@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct a
+ } else
+ ata_port_freeze(ap);
+ }
+-
+- spin_unlock_irqrestore(ap->lock, flags);
+ } else {
+ if (likely(!(qc->err_mask & AC_ERR_HSM)))
+ ata_qc_complete(qc);
+@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct a
+ }
+ } else {
+ if (in_wq) {
+- spin_lock_irqsave(ap->lock, flags);
+ ata_sff_irq_on(ap);
+ ata_qc_complete(qc);
+- spin_unlock_irqrestore(ap->lock, flags);
+ } else
+ ata_qc_complete(qc);
+ }
+@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap
+ {
+ struct ata_link *link = qc->dev->link;
+ struct ata_eh_info *ehi = &link->eh_info;
+- unsigned long flags = 0;
+ int poll_next;
+
++ lockdep_assert_held(ap->lock);
++
+ WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
+
+ /* Make sure ata_sff_qc_issue() does not throw things
+@@ -1112,14 +1106,6 @@ fsm_start:
+ }
+ }
+
+- /* Send the CDB (atapi) or the first data block (ata pio out).
+- * During the state transition, interrupt handler shouldn't
+- * be invoked before the data transfer is complete and
+- * hsm_task_state is changed. Hence, the following locking.
+- */
+- if (in_wq)
+- spin_lock_irqsave(ap->lock, flags);
+-
+ if (qc->tf.protocol == ATA_PROT_PIO) {
+ /* PIO data out protocol.
+ * send first data block.
+@@ -1135,9 +1121,6 @@ fsm_start:
+ /* send CDB */
+ atapi_send_cdb(ap, qc);
+
+- if (in_wq)
+- spin_unlock_irqrestore(ap->lock, flags);
+-
+ /* if polling, ata_sff_pio_task() handles the rest.
+ * otherwise, interrupt handler takes over from here.
+ */
+@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work
+ u8 status;
+ int poll_next;
+
++ spin_lock_irq(ap->lock);
++
+ BUG_ON(ap->sff_pio_task_link == NULL);
+ /* qc can be NULL if timeout occurred */
+ qc = ata_qc_from_tag(ap, link->active_tag);
+ if (!qc) {
+ ap->sff_pio_task_link = NULL;
+- return;
++ goto out_unlock;
+ }
+
+ fsm_start:
+@@ -1381,11 +1366,14 @@ fsm_start:
+ */
+ status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
+ if (status & ATA_BUSY) {
++ spin_unlock_irq(ap->lock);
+ ata_msleep(ap, 2);
++ spin_lock_irq(ap->lock);
++
+ status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
+ if (status & ATA_BUSY) {
+ ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
+- return;
++ goto out_unlock;
+ }
+ }
+
+@@ -1402,6 +1390,8 @@ fsm_start:
+ */
+ if (poll_next)
+ goto fsm_start;
++out_unlock:
++ spin_unlock_irq(ap->lock);
+ }
+
+ /**
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
-@@ -2304,7 +2304,7 @@ static int read_partial_message(struct c
+@@ -2279,7 +2279,7 @@ static int read_partial_message(struct c
con->in_base_pos = -front_len - middle_len - data_len -
sizeof(m->footer);
con->in_tag = CEPH_MSGR_TAG_READY;
} else if ((s64)seq - (s64)con->in_seq > 1) {
pr_err("read_partial_message bad seq %lld expected %lld\n",
seq, con->in_seq + 1);
-@@ -2337,7 +2337,7 @@ static int read_partial_message(struct c
+@@ -2312,7 +2312,7 @@ static int read_partial_message(struct c
sizeof(m->footer);
con->in_tag = CEPH_MSGR_TAG_READY;
con->in_seq++;
+++ /dev/null
-From 67645d7619738e51c668ca69f097cb90b5470422 Mon Sep 17 00:00:00 2001
-From: Ilya Dryomov <idryomov@gmail.com>
-Date: Mon, 28 Dec 2015 13:18:34 +0300
-Subject: libceph: fix ceph_msg_revoke()
-
-From: Ilya Dryomov <idryomov@gmail.com>
-
-commit 67645d7619738e51c668ca69f097cb90b5470422 upstream.
-
-There are a number of problems with revoking a "was sending" message:
-
-(1) We never make any attempt to revoke data - only kvecs contibute to
-con->out_skip. However, once the header (envelope) is written to the
-socket, our peer learns data_len and sets itself to expect at least
-data_len bytes to follow front or front+middle. If ceph_msg_revoke()
-is called while the messenger is sending message's data portion,
-anything we send after that call is counted by the OSD towards the now
-revoked message's data portion. The effects vary, the most common one
-is the eventual hang - higher layers get stuck waiting for the reply to
-the message that was sent out after ceph_msg_revoke() returned and
-treated by the OSD as a bunch of data bytes. This is what Matt ran
-into.
-
-(2) Flat out zeroing con->out_kvec_bytes worth of bytes to handle kvecs
-is wrong. If ceph_msg_revoke() is called before the tag is sent out or
-while the messenger is sending the header, we will get a connection
-reset, either due to a bad tag (0 is not a valid tag) or a bad header
-CRC, which kind of defeats the purpose of revoke. Currently the kernel
-client refuses to work with header CRCs disabled, but that will likely
-change in the future, making this even worse.
-
-(3) con->out_skip is not reset on connection reset, leading to one or
-more spurious connection resets if we happen to get a real one between
-con->out_skip is set in ceph_msg_revoke() and before it's cleared in
-write_partial_skip().
-
-Fixing (1) and (3) is trivial. The idea behind fixing (2) is to never
-zero the tag or the header, i.e. send out tag+header regardless of when
-ceph_msg_revoke() is called. That way the header is always correct, no
-unnecessary resets are induced and revoke stands ready for disabled
-CRCs. Since ceph_msg_revoke() rips out con->out_msg, introduce a new
-"message out temp" and copy the header into it before sending.
-
-Reported-by: Matt Conner <matt.conner@keepertech.com>
-Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
-Tested-by: Matt Conner <matt.conner@keepertech.com>
-Reviewed-by: Sage Weil <sage@redhat.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- include/linux/ceph/messenger.h | 2 -
- net/ceph/messenger.c | 78 +++++++++++++++++++++++++++++++----------
- 2 files changed, 60 insertions(+), 20 deletions(-)
-
---- a/include/linux/ceph/messenger.h
-+++ b/include/linux/ceph/messenger.h
-@@ -216,6 +216,7 @@ struct ceph_connection {
- struct ceph_entity_addr actual_peer_addr;
-
- /* message out temps */
-+ struct ceph_msg_header out_hdr;
- struct ceph_msg *out_msg; /* sending message (== tail of
- out_sent) */
- bool out_msg_done;
-@@ -225,7 +226,6 @@ struct ceph_connection {
- int out_kvec_left; /* kvec's left in out_kvec */
- int out_skip; /* skip this many bytes */
- int out_kvec_bytes; /* total bytes left */
-- bool out_kvec_is_msg; /* kvec refers to out_msg */
- int out_more; /* there is more data after the kvecs */
- __le64 out_temp_ack; /* for writing an ack */
-
---- a/net/ceph/messenger.c
-+++ b/net/ceph/messenger.c
-@@ -665,6 +665,8 @@ static void reset_connection(struct ceph
- }
- con->in_seq = 0;
- con->in_seq_acked = 0;
-+
-+ con->out_skip = 0;
- }
-
- /*
-@@ -764,6 +766,8 @@ static u32 get_global_seq(struct ceph_me
-
- static void con_out_kvec_reset(struct ceph_connection *con)
- {
-+ BUG_ON(con->out_skip);
-+
- con->out_kvec_left = 0;
- con->out_kvec_bytes = 0;
- con->out_kvec_cur = &con->out_kvec[0];
-@@ -772,9 +776,9 @@ static void con_out_kvec_reset(struct ce
- static void con_out_kvec_add(struct ceph_connection *con,
- size_t size, void *data)
- {
-- int index;
-+ int index = con->out_kvec_left;
-
-- index = con->out_kvec_left;
-+ BUG_ON(con->out_skip);
- BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
-
- con->out_kvec[index].iov_len = size;
-@@ -783,6 +787,27 @@ static void con_out_kvec_add(struct ceph
- con->out_kvec_bytes += size;
- }
-
-+/*
-+ * Chop off a kvec from the end. Return residual number of bytes for
-+ * that kvec, i.e. how many bytes would have been written if the kvec
-+ * hadn't been nuked.
-+ */
-+static int con_out_kvec_skip(struct ceph_connection *con)
-+{
-+ int off = con->out_kvec_cur - con->out_kvec;
-+ int skip = 0;
-+
-+ if (con->out_kvec_bytes > 0) {
-+ skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
-+ BUG_ON(con->out_kvec_bytes < skip);
-+ BUG_ON(!con->out_kvec_left);
-+ con->out_kvec_bytes -= skip;
-+ con->out_kvec_left--;
-+ }
-+
-+ return skip;
-+}
-+
- #ifdef CONFIG_BLOCK
-
- /*
-@@ -1184,7 +1209,6 @@ static void prepare_write_message_footer
- m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
-
- dout("prepare_write_message_footer %p\n", con);
-- con->out_kvec_is_msg = true;
- con->out_kvec[v].iov_base = &m->footer;
- con->out_kvec[v].iov_len = sizeof(m->footer);
- con->out_kvec_bytes += sizeof(m->footer);
-@@ -1202,7 +1226,6 @@ static void prepare_write_message(struct
- u32 crc;
-
- con_out_kvec_reset(con);
-- con->out_kvec_is_msg = true;
- con->out_msg_done = false;
-
- /* Sneak an ack in there first? If we can get it into the same
-@@ -1242,18 +1265,19 @@ static void prepare_write_message(struct
-
- /* tag + hdr + front + middle */
- con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
-- con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
-+ con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
- con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
-
- if (m->middle)
- con_out_kvec_add(con, m->middle->vec.iov_len,
- m->middle->vec.iov_base);
-
-- /* fill in crc (except data pages), footer */
-+ /* fill in hdr crc and finalize hdr */
- crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
- con->out_msg->hdr.crc = cpu_to_le32(crc);
-- con->out_msg->footer.flags = 0;
-+ memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
-
-+ /* fill in front and middle crc, footer */
- crc = crc32c(0, m->front.iov_base, m->front.iov_len);
- con->out_msg->footer.front_crc = cpu_to_le32(crc);
- if (m->middle) {
-@@ -1265,6 +1289,7 @@ static void prepare_write_message(struct
- dout("%s front_crc %u middle_crc %u\n", __func__,
- le32_to_cpu(con->out_msg->footer.front_crc),
- le32_to_cpu(con->out_msg->footer.middle_crc));
-+ con->out_msg->footer.flags = 0;
-
- /* is there a data payload? */
- con->out_msg->footer.data_crc = 0;
-@@ -1459,7 +1484,6 @@ static int write_partial_kvec(struct cep
- }
- }
- con->out_kvec_left = 0;
-- con->out_kvec_is_msg = false;
- ret = 1;
- out:
- dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
-@@ -1551,6 +1575,7 @@ static int write_partial_skip(struct cep
- {
- int ret;
-
-+ dout("%s %p %d left\n", __func__, con, con->out_skip);
- while (con->out_skip > 0) {
- size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
-
-@@ -2454,13 +2479,13 @@ more:
-
- more_kvec:
- /* kvec data queued? */
-- if (con->out_skip) {
-- ret = write_partial_skip(con);
-+ if (con->out_kvec_left) {
-+ ret = write_partial_kvec(con);
- if (ret <= 0)
- goto out;
- }
-- if (con->out_kvec_left) {
-- ret = write_partial_kvec(con);
-+ if (con->out_skip) {
-+ ret = write_partial_skip(con);
- if (ret <= 0)
- goto out;
- }
-@@ -2974,16 +2999,31 @@ void ceph_msg_revoke(struct ceph_msg *ms
- ceph_msg_put(msg);
- }
- if (con->out_msg == msg) {
-- dout("%s %p msg %p - was sending\n", __func__, con, msg);
-- con->out_msg = NULL;
-- if (con->out_kvec_is_msg) {
-- con->out_skip = con->out_kvec_bytes;
-- con->out_kvec_is_msg = false;
-- }
-- msg->hdr.seq = 0;
-+ BUG_ON(con->out_skip);
-+ /* footer */
-+ if (con->out_msg_done) {
-+ con->out_skip += con_out_kvec_skip(con);
-+ } else {
-+ BUG_ON(!msg->data_length);
-+ if (con->peer_features & CEPH_FEATURE_MSG_AUTH)
-+ con->out_skip += sizeof(msg->footer);
-+ else
-+ con->out_skip += sizeof(msg->old_footer);
-+ }
-+ /* data, middle, front */
-+ if (msg->data_length)
-+ con->out_skip += msg->cursor.total_resid;
-+ if (msg->middle)
-+ con->out_skip += con_out_kvec_skip(con);
-+ con->out_skip += con_out_kvec_skip(con);
-
-+ dout("%s %p msg %p - was sending, will write %d skip %d\n",
-+ __func__, con, msg, con->out_kvec_bytes, con->out_skip);
-+ msg->hdr.seq = 0;
-+ con->out_msg = NULL;
- ceph_msg_put(msg);
- }
-+
- mutex_unlock(&con->mutex);
- }
-
--- /dev/null
+From 041bd12e272c53a35c54c13875839bcb98c999ce Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 9 Feb 2016 16:11:26 -0500
+Subject: Revert "workqueue: make sure delayed work run in local cpu"
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 041bd12e272c53a35c54c13875839bcb98c999ce upstream.
+
+This reverts commit 874bbfe600a660cba9c776b3957b1ce393151b76.
+
+Workqueue used to implicity guarantee that work items queued without
+explicit CPU specified are put on the local CPU. Recent changes in
+timer broke the guarantee and led to vmstat breakage which was fixed
+by 176bed1de5bf ("vmstat: explicitly schedule per-cpu work on the CPU
+we need it to run on").
+
+vmstat is the most likely to expose the issue and it's quite possible
+that there are other similar problems which are a lot more difficult
+to trigger. As a preventive measure, 874bbfe600a6 ("workqueue: make
+sure delayed work run in local cpu") was applied to restore the local
+CPU guarnatee. Unfortunately, the change exposed a bug in timer code
+which got fixed by 22b886dd1018 ("timers: Use proper base migration in
+add_timer_on()"). Due to code restructuring, the commit couldn't be
+backported beyond certain point and stable kernels which only had
+874bbfe600a6 started crashing.
+
+The local CPU guarantee was accidental more than anything else and we
+want to get rid of it anyway. As, with the vmstat case fixed,
+874bbfe600a6 is causing more problems than it's fixing, it has been
+decided to take the chance and officially break the guarantee by
+reverting the commit. A debug feature will be added to force foreign
+CPU assignment to expose cases relying on the guarantee and fixes for
+the individual cases will be backported to stable as necessary.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Fixes: 874bbfe600a6 ("workqueue: make sure delayed work run in local cpu")
+Link: http://lkml.kernel.org/g/20160120211926.GJ10810@quack.suse.cz
+Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
+Cc: Henrique de Moraes Holschuh <hmh@hmh.eng.br>
+Cc: Daniel Bilik <daniel.bilik@neosystem.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Shaohua Li <shli@fb.com>
+Cc: Sasha Levin <sasha.levin@oracle.com>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Daniel Bilik <daniel.bilik@neosystem.cz>
+Cc: Jiri Slaby <jslaby@suse.cz>
+Cc: Michal Hocko <mhocko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/workqueue.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1475,13 +1475,13 @@ static void __queue_delayed_work(int cpu
+ timer_stats_timer_set_start_info(&dwork->timer);
+
+ dwork->wq = wq;
+- /* timer isn't guaranteed to run in this cpu, record earlier */
+- if (cpu == WORK_CPU_UNBOUND)
+- cpu = raw_smp_processor_id();
+ dwork->cpu = cpu;
+ timer->expires = jiffies + delay;
+
+- add_timer_on(timer, cpu);
++ if (unlikely(cpu != WORK_CPU_UNBOUND))
++ add_timer_on(timer, cpu);
++ else
++ add_timer(timer);
+ }
+
+ /**
--- /dev/null
+From 6736fde9672ff6717ac576e9bba2fd5f3dfec822 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Tue, 26 Jan 2016 11:29:03 +0100
+Subject: rfkill: fix rfkill_fop_read wait_event usage
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit 6736fde9672ff6717ac576e9bba2fd5f3dfec822 upstream.
+
+The code within wait_event_interruptible() is called with
+!TASK_RUNNING, so mustn't call any functions that can sleep,
+like mutex_lock().
+
+Since we re-check the list_empty() in a loop after the wait,
+it's safe to simply use list_empty() without locking.
+
+This bug has existed forever, but was only discovered now
+because all userspace implementations, including the default
+'rfkill' tool, use poll() or select() to get a readable fd
+before attempting to read.
+
+Fixes: c64fb01627e24 ("rfkill: create useful userspace interface")
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/rfkill/core.c | 16 ++++------------
+ 1 file changed, 4 insertions(+), 12 deletions(-)
+
+--- a/net/rfkill/core.c
++++ b/net/rfkill/core.c
+@@ -1078,17 +1078,6 @@ static unsigned int rfkill_fop_poll(stru
+ return res;
+ }
+
+-static bool rfkill_readable(struct rfkill_data *data)
+-{
+- bool r;
+-
+- mutex_lock(&data->mtx);
+- r = !list_empty(&data->events);
+- mutex_unlock(&data->mtx);
+-
+- return r;
+-}
+-
+ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+ {
+@@ -1105,8 +1094,11 @@ static ssize_t rfkill_fop_read(struct fi
+ goto out;
+ }
+ mutex_unlock(&data->mtx);
++ /* since we re-check and it just compares pointers,
++ * using !list_empty() without locking isn't a problem
++ */
+ ret = wait_event_interruptible(data->read_wait,
+- rfkill_readable(data));
++ !list_empty(&data->events));
+ mutex_lock(&data->mtx);
+
+ if (ret)
acpi-pci-hotplug-unlock-in-error-path-in-acpiphp_enable_slot.patch
ib-qib-fix-mcast-detach-when-qp-not-attached.patch
hwmon-ads1015-handle-negative-conversion-values-correctly.patch
-libceph-fix-ceph_msg_revoke.patch
libceph-don-t-bail-early-from-try_read-when-skipping-a-message.patch
+cdc-acm-exclude-samsung-phone-04e8-685d.patch
+rfkill-fix-rfkill_fop_read-wait_event-usage.patch
+revert-workqueue-make-sure-delayed-work-run-in-local-cpu.patch
+libata-fix-sff-host-state-machine-locking-while-polling.patch