--- /dev/null
+From d908f8478a8d18e66c80a12adb27764920c1f1ca Mon Sep 17 00:00:00 2001
+From: Oliver Neukum <oneukum@suse.de>
+Date: Thu, 20 Nov 2014 14:54:35 +0100
+Subject: cdc-acm: memory leak in error case
+
+From: Oliver Neukum <oneukum@suse.de>
+
+commit d908f8478a8d18e66c80a12adb27764920c1f1ca upstream.
+
+If probe() fails not only the attributes need to be removed
+but also the memory freed.
+
+Reported-by: Ahmed Tamrawi <ahmedtamrawi@gmail.com>
+Signed-off-by: Oliver Neukum <oneukum@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/class/cdc-acm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1449,6 +1449,7 @@ alloc_fail8:
+ &dev_attr_wCountryCodes);
+ device_remove_file(&acm->control->dev,
+ &dev_attr_iCountryCodeRelDate);
++ kfree(acm->country_codes);
+ }
+ device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
+ alloc_fail7:
--- /dev/null
+From 04a258c162a85c0f4ae56be67634dc43c9a4fa9b Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Tue, 4 Nov 2014 13:40:11 +0100
+Subject: Drivers: hv: vmbus: Fix a race condition when unregistering a device
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+commit 04a258c162a85c0f4ae56be67634dc43c9a4fa9b upstream.
+
+When build with Debug the following crash is sometimes observed:
+Call Trace:
+ [<ffffffff812b9600>] string+0x40/0x100
+ [<ffffffff812bb038>] vsnprintf+0x218/0x5e0
+ [<ffffffff810baf7d>] ? trace_hardirqs_off+0xd/0x10
+ [<ffffffff812bb4c1>] vscnprintf+0x11/0x30
+ [<ffffffff8107a2f0>] vprintk+0xd0/0x5c0
+ [<ffffffffa0051ea0>] ? vmbus_process_rescind_offer+0x0/0x110 [hv_vmbus]
+ [<ffffffff8155c71c>] printk+0x41/0x45
+ [<ffffffffa004ebac>] vmbus_device_unregister+0x2c/0x40 [hv_vmbus]
+ [<ffffffffa0051ecb>] vmbus_process_rescind_offer+0x2b/0x110 [hv_vmbus]
+...
+
+This happens due to the following race: between 'if (channel->device_obj)' check
+in vmbus_process_rescind_offer() and pr_debug() in vmbus_device_unregister() the
+device can disappear. Fix the issue by taking an additional reference to the
+device before proceeding to vmbus_device_unregister().
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hv/channel_mgmt.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -202,9 +202,16 @@ static void vmbus_process_rescind_offer(
+ unsigned long flags;
+ struct vmbus_channel *primary_channel;
+ struct vmbus_channel_relid_released msg;
++ struct device *dev;
++
++ if (channel->device_obj) {
++ dev = get_device(&channel->device_obj->device);
++ if (dev) {
++ vmbus_device_unregister(channel->device_obj);
++ put_device(dev);
++ }
++ }
+
+- if (channel->device_obj)
+- vmbus_device_unregister(channel->device_obj);
+ memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
+ msg.child_relid = channel->offermsg.child_relid;
+ msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
--- /dev/null
+From 5fabcb4c33fe11c7e3afdf805fde26c1a54d0953 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@fb.com>
+Date: Wed, 19 Nov 2014 13:06:22 -0700
+Subject: genhd: check for int overflow in disk_expand_part_tbl()
+
+From: Jens Axboe <axboe@fb.com>
+
+commit 5fabcb4c33fe11c7e3afdf805fde26c1a54d0953 upstream.
+
+We can get here from blkdev_ioctl() -> blkpg_ioctl() -> add_partition()
+with a user passed in partno value. If we pass in 0x7fffffff, the
+new target in disk_expand_part_tbl() overflows the 'int' and we
+access beyond the end of ptbl->part[] and even write to it when we
+do the rcu_assign_pointer() to assign the new partition.
+
+Reported-by: David Ramos <daramos@stanford.edu>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/genhd.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -1070,9 +1070,16 @@ int disk_expand_part_tbl(struct gendisk
+ struct disk_part_tbl *old_ptbl = disk->part_tbl;
+ struct disk_part_tbl *new_ptbl;
+ int len = old_ptbl ? old_ptbl->len : 0;
+- int target = partno + 1;
++ int i, target;
+ size_t size;
+- int i;
++
++ /*
++ * check for int overflow, since we can get here from blkpg_ioctl()
++ * with a user passed 'partno'.
++ */
++ target = partno + 1;
++ if (target < 0)
++ return -EINVAL;
+
+ /* disk_max_parts() is zero during initialization, ignore if so */
+ if (disk_max_parts(disk) && target > disk_max_parts(disk))
--- /dev/null
+From 8bfbe2de769afda051c56aba5450391670e769fc Mon Sep 17 00:00:00 2001
+From: Christian Riesch <christian.riesch@omicron.at>
+Date: Thu, 13 Nov 2014 05:53:26 +0100
+Subject: n_tty: Fix read_buf race condition, increment read_head after pushing data
+
+From: Christian Riesch <christian.riesch@omicron.at>
+
+commit 8bfbe2de769afda051c56aba5450391670e769fc upstream.
+
+Commit 19e2ad6a09f0c06dbca19c98e5f4584269d913dd ("n_tty: Remove overflow
+tests from receive_buf() path") moved the increment of read_head into
+the arguments list of read_buf_addr(). Function calls represent a
+sequence point in C. Therefore read_head is incremented before the
+character c is placed in the buffer. Since the circular read buffer is
+a lock-less design since commit 6d76bd2618535c581f1673047b8341fd291abc67
+("n_tty: Make N_TTY ldisc receive path lockless"), this creates a race
+condition that leads to communication errors.
+
+This patch modifies the code to increment read_head _after_ the data
+is placed in the buffer and thus fixes the race for non-SMP machines.
+To fix the problem for SMP machines, memory barriers must be added in
+a separate patch.
+
+Signed-off-by: Christian Riesch <christian.riesch@omicron.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/n_tty.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -321,7 +321,8 @@ static void n_tty_check_unthrottle(struc
+
+ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
+ {
+- *read_buf_addr(ldata, ldata->read_head++) = c;
++ *read_buf_addr(ldata, ldata->read_head) = c;
++ ldata->read_head++;
+ }
+
+ /**
--- /dev/null
+From 1ff383a4c3eda8893ec61b02831826e1b1f46b41 Mon Sep 17 00:00:00 2001
+From: Robert Baldyga <r.baldyga@samsung.com>
+Date: Mon, 24 Nov 2014 07:56:21 +0100
+Subject: serial: samsung: wait for transfer completion before clock disable
+
+From: Robert Baldyga <r.baldyga@samsung.com>
+
+commit 1ff383a4c3eda8893ec61b02831826e1b1f46b41 upstream.
+
+This patch adds waiting until transmit buffer and shifter will be empty
+before clock disabling.
+
+Without this fix it's possible to have clock disabled while data was
+not transmited yet, which causes unproper state of TX line and problems
+in following data transfers.
+
+Signed-off-by: Robert Baldyga <r.baldyga@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/samsung.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -544,11 +544,15 @@ static void s3c24xx_serial_pm(struct uar
+ unsigned int old)
+ {
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
++ int timeout = 10000;
+
+ ourport->pm_level = level;
+
+ switch (level) {
+ case 3:
++ while (--timeout && !s3c24xx_serial_txempty_nofifo(port))
++ udelay(100);
++
+ if (!IS_ERR(ourport->baudclk))
+ clk_disable_unprepare(ourport->baudclk);
+
alsa-hda-fix-wrong-gpio_dir-gpio_mask-hint-setups-for-idt-stac-codecs.patch
usb-cdc-acm-check-for-valid-interfaces.patch
add-usb_ehci_exynos-to-multi_v7_defconfig.patch
+genhd-check-for-int-overflow-in-disk_expand_part_tbl.patch
+cdc-acm-memory-leak-in-error-case.patch
+writeback-fix-a-subtle-race-condition-in-i_dirty-clearing.patch
+tracing-sched-check-preempt_count-for-current-when-reading-task-state.patch
+serial-samsung-wait-for-transfer-completion-before-clock-disable.patch
+n_tty-fix-read_buf-race-condition-increment-read_head-after-pushing-data.patch
+drivers-hv-vmbus-fix-a-race-condition-when-unregistering-a-device.patch
--- /dev/null
+From aee4e5f3d3abb7a2239dd02f6d8fb173413fd02f Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Wed, 10 Dec 2014 17:31:07 -0500
+Subject: tracing/sched: Check preempt_count() for current when reading task->state
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit aee4e5f3d3abb7a2239dd02f6d8fb173413fd02f upstream.
+
+When recording the state of a task for the sched_switch tracepoint a check of
+task_preempt_count() is performed to see if PREEMPT_ACTIVE is set. This is
+because, technically, a task being preempted is really in the TASK_RUNNING
+state, and that is what should be recorded when tracing a sched_switch,
+even if the task put itself into another state (it hasn't scheduled out
+in that state yet).
+
+But with the change to use per_cpu preempt counts, the
+task_thread_info(p)->preempt_count is no longer used, and instead
+task_preempt_count(p) is used.
+
+The problem is that this does not use the current preempt count but a stale
+one from a previous sched_switch. The task_preempt_count(p) uses
+saved_preempt_count and not preempt_count(). But for tracing sched_switch,
+if p is current, we really want preempt_count().
+
+I hit this bug when I was tracing sleep and the call from do_nanosleep()
+scheduled out in the "RUNNING" state.
+
+ sleep-4290 [000] 537272.259992: sched_switch: sleep:4290 [120] R ==> swapper/0:0 [120]
+ sleep-4290 [000] 537272.260015: kernel_stack: <stack trace>
+=> __schedule (ffffffff8150864a)
+=> schedule (ffffffff815089f8)
+=> do_nanosleep (ffffffff8150b76c)
+=> hrtimer_nanosleep (ffffffff8108d66b)
+=> SyS_nanosleep (ffffffff8108d750)
+=> return_to_handler (ffffffff8150e8e5)
+=> tracesys_phase2 (ffffffff8150c844)
+
+After a bit of hair pulling, I found that the state was really
+TASK_INTERRUPTIBLE, but the saved_preempt_count had an old PREEMPT_ACTIVE
+set and caused the sched_switch tracepoint to show it as RUNNING.
+
+Link: http://lkml.kernel.org/r/20141210174428.3cb7542a@gandalf.local.home
+
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Fixes: 01028747559a "sched: Create more preempt_count accessors"
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/trace/events/sched.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/trace/events/sched.h
++++ b/include/trace/events/sched.h
+@@ -100,7 +100,7 @@ static inline long __trace_sched_switch_
+ /*
+ * For all intents and purposes a preempted task is a running task.
+ */
+- if (task_preempt_count(p) & PREEMPT_ACTIVE)
++ if (preempt_count() & PREEMPT_ACTIVE)
+ state = TASK_RUNNING | TASK_STATE_MAX;
+ #endif
+
--- /dev/null
+From 9c6ac78eb3521c5937b2dd8a7d1b300f41092f45 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Fri, 24 Oct 2014 15:38:21 -0400
+Subject: writeback: fix a subtle race condition in I_DIRTY clearing
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 9c6ac78eb3521c5937b2dd8a7d1b300f41092f45 upstream.
+
+After invoking ->dirty_inode(), __mark_inode_dirty() does smp_mb() and
+tests inode->i_state locklessly to see whether it already has all the
+necessary I_DIRTY bits set. The comment above the barrier doesn't
+contain any useful information - memory barriers can't ensure "changes
+are seen by all cpus" by itself.
+
+And it sure enough was broken. Please consider the following
+scenario.
+
+ CPU 0 CPU 1
+ -------------------------------------------------------------------------------
+
+ enters __writeback_single_inode()
+ grabs inode->i_lock
+ tests PAGECACHE_TAG_DIRTY which is clear
+ enters __set_page_dirty()
+ grabs mapping->tree_lock
+ sets PAGECACHE_TAG_DIRTY
+ releases mapping->tree_lock
+ leaves __set_page_dirty()
+
+ enters __mark_inode_dirty()
+ smp_mb()
+ sees I_DIRTY_PAGES set
+ leaves __mark_inode_dirty()
+ clears I_DIRTY_PAGES
+ releases inode->i_lock
+
+Now @inode has dirty pages w/ I_DIRTY_PAGES clear. This doesn't seem
+to lead to an immediately critical problem because requeue_inode()
+later checks PAGECACHE_TAG_DIRTY instead of I_DIRTY_PAGES when
+deciding whether the inode needs to be requeued for IO and there are
+enough unintentional memory barriers inbetween, so while the inode
+ends up with inconsistent I_DIRTY_PAGES flag, it doesn't fall off the
+IO list.
+
+The lack of explicit barrier may also theoretically affect the other
+I_DIRTY bits which deal with metadata dirtiness. There is no
+guarantee that a strong enough barrier exists between
+I_DIRTY_[DATA]SYNC clearing and write_inode() writing out the dirtied
+inode. Filesystem inode writeout path likely has enough stuff which
+can behave as full barrier but it's theoretically possible that the
+writeout may not see all the updates from ->dirty_inode().
+
+Fix it by adding an explicit smp_mb() after I_DIRTY clearing. Note
+that I_DIRTY_PAGES needs a special treatment as it always needs to be
+cleared to be interlocked with the lockless test on
+__mark_inode_dirty() side. It's cleared unconditionally and
+reinstated after smp_mb() if the mapping still has dirty pages.
+
+Also add comments explaining how and why the barriers are paired.
+
+Lightly tested.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Mikulas Patocka <mpatocka@redhat.com>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fs-writeback.c | 29 ++++++++++++++++++++++-------
+ 1 file changed, 22 insertions(+), 7 deletions(-)
+
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -476,12 +476,28 @@ __writeback_single_inode(struct inode *i
+ * write_inode()
+ */
+ spin_lock(&inode->i_lock);
+- /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
+- if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+- inode->i_state &= ~I_DIRTY_PAGES;
++
+ dirty = inode->i_state & I_DIRTY;
+- inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
++ inode->i_state &= ~I_DIRTY;
++
++ /*
++ * Paired with smp_mb() in __mark_inode_dirty(). This allows
++ * __mark_inode_dirty() to test i_state without grabbing i_lock -
++ * either they see the I_DIRTY bits cleared or we see the dirtied
++ * inode.
++ *
++ * I_DIRTY_PAGES is always cleared together above even if @mapping
++ * still has dirty pages. The flag is reinstated after smp_mb() if
++ * necessary. This guarantees that either __mark_inode_dirty()
++ * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
++ */
++ smp_mb();
++
++ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
++ inode->i_state |= I_DIRTY_PAGES;
++
+ spin_unlock(&inode->i_lock);
++
+ /* Don't write the inode if only I_DIRTY_PAGES was set */
+ if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
+ int err = write_inode(inode, wbc);
+@@ -1145,12 +1161,11 @@ void __mark_inode_dirty(struct inode *in
+ }
+
+ /*
+- * make sure that changes are seen by all cpus before we test i_state
+- * -- mikulas
++ * Paired with smp_mb() in __writeback_single_inode() for the
++ * following lockless i_state test. See there for details.
+ */
+ smp_mb();
+
+- /* avoid the locking if we can */
+ if ((inode->i_state & flags) == flags)
+ return;
+