--- /dev/null
+From b6328a07bd6b3d31b64f85864fe74f3b08c010ca Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Wed, 30 Jul 2014 00:23:09 +0200
+Subject: ACPI / PNP: Fix acpi_pnp_match()
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+commit b6328a07bd6b3d31b64f85864fe74f3b08c010ca upstream.
+
+The acpi_pnp_match() function is used for finding the ACPI device
+object that should be associated with the given PNP device.
+Unfortunately, the check used by that function is not strict enough
+and may cause success to be returned for a wrong ACPI device object.
+
+To fix that, use the observation that the pointer to the ACPI
+device object in question is already stored in the data field
+in struct pnp_dev, so acpi_pnp_match() can simply use that
+field to do its job.
+
+This problem was uncovered in 3.14 by commit 202317a573b2 (ACPI / scan:
+Add acpi_device objects for all device nodes in the namespace).
+
+Fixes: 202317a573b2 (ACPI / scan: Add acpi_device objects for all device nodes in the namespace)
+Reported-and-tested-by: Vinson Lee <vlee@twopensource.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pnp/pnpacpi/core.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/pnp/pnpacpi/core.c
++++ b/drivers/pnp/pnpacpi/core.c
+@@ -339,8 +339,7 @@ static int __init acpi_pnp_match(struct
+ struct pnp_dev *pnp = _pnp;
+
+ /* true means it matched */
+- return !acpi->physical_node_count
+- && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
++ return pnp->data == acpi;
+ }
+
+ static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev)
--- /dev/null
+From d8c712ea471ce7a4fd1734ad2211adf8469ddddc Mon Sep 17 00:00:00 2001
+From: Greg Thelen <gthelen@google.com>
+Date: Thu, 31 Jul 2014 09:07:19 -0700
+Subject: dm bufio: fully initialize shrinker
+
+From: Greg Thelen <gthelen@google.com>
+
+commit d8c712ea471ce7a4fd1734ad2211adf8469ddddc upstream.
+
+1d3d4437eae1 ("vmscan: per-node deferred work") added a flags field to
+struct shrinker assuming that all shrinkers were zero filled. The dm
+bufio shrinker is not zero filled, which leaves arbitrary kmalloc() data
+in flags. So far the only defined flags bit is SHRINKER_NUMA_AWARE.
+But there are proposed patches which add other bits to shrinker.flags
+(e.g. memcg awareness).
+
+Rather than simply initializing the shrinker, this patch uses kzalloc()
+when allocating the dm_bufio_client to ensure that the embedded shrinker
+and any other similar structures are zeroed.
+
+This fixes theoretical over aggressive shrinking of dm bufio objects.
+If the uninitialized dm_bufio_client.shrinker.flags contains
+SHRINKER_NUMA_AWARE then shrink_slab() would call the dm shrinker for
+each numa node rather than just once. This has been broken since 3.12.
+
+Signed-off-by: Greg Thelen <gthelen@google.com>
+Acked-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-bufio.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1541,7 +1541,7 @@ struct dm_bufio_client *dm_bufio_client_
+ BUG_ON(block_size < 1 << SECTOR_SHIFT ||
+ (block_size & (block_size - 1)));
+
+- c = kmalloc(sizeof(*c), GFP_KERNEL);
++ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
+ r = -ENOMEM;
+ goto bad_client;
--- /dev/null
+From 44fa816bb778edbab6b6ddaaf24908dd6295937e Mon Sep 17 00:00:00 2001
+From: Anssi Hannula <anssi.hannula@iki.fi>
+Date: Fri, 1 Aug 2014 11:55:47 -0400
+Subject: dm cache: fix race affecting dirty block count
+
+From: Anssi Hannula <anssi.hannula@iki.fi>
+
+commit 44fa816bb778edbab6b6ddaaf24908dd6295937e upstream.
+
+nr_dirty is updated without locking, causing it to drift so that it is
+non-zero (either a small positive integer, or a very large one when an
+underflow occurs) even when there are no actual dirty blocks. This was
+due to a race between the workqueue and map function accessing nr_dirty
+in parallel without proper protection.
+
+People were seeing under runs due to a race on increment/decrement of
+nr_dirty, see: https://lkml.org/lkml/2014/6/3/648
+
+Fix this by using an atomic_t for nr_dirty.
+
+Reported-by: roma1390@gmail.com
+Signed-off-by: Anssi Hannula <anssi.hannula@iki.fi>
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -231,7 +231,7 @@ struct cache {
+ /*
+ * cache_size entries, dirty if set
+ */
+- dm_cblock_t nr_dirty;
++ atomic_t nr_dirty;
+ unsigned long *dirty_bitset;
+
+ /*
+@@ -492,7 +492,7 @@ static bool is_dirty(struct cache *cache
+ static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
+ {
+ if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
+- cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
++ atomic_inc(&cache->nr_dirty);
+ policy_set_dirty(cache->policy, oblock);
+ }
+ }
+@@ -501,8 +501,7 @@ static void clear_dirty(struct cache *ca
+ {
+ if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
+ policy_clear_dirty(cache->policy, oblock);
+- cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
+- if (!from_cblock(cache->nr_dirty))
++ if (atomic_dec_return(&cache->nr_dirty) == 0)
+ dm_table_event(cache->ti->table);
+ }
+ }
+@@ -2269,7 +2268,7 @@ static int cache_create(struct cache_arg
+ atomic_set(&cache->quiescing_ack, 0);
+
+ r = -ENOMEM;
+- cache->nr_dirty = 0;
++ atomic_set(&cache->nr_dirty, 0);
+ cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
+ if (!cache->dirty_bitset) {
+ *error = "could not allocate dirty bitset";
+@@ -2808,7 +2807,7 @@ static void cache_status(struct dm_targe
+
+ residency = policy_residency(cache->policy);
+
+- DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ",
++ DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
+ (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
+ (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
+ (unsigned long long)nr_blocks_metadata,
+@@ -2821,7 +2820,7 @@ static void cache_status(struct dm_targe
+ (unsigned) atomic_read(&cache->stats.write_miss),
+ (unsigned) atomic_read(&cache->stats.demotion),
+ (unsigned) atomic_read(&cache->stats.promotion),
+- (unsigned long long) from_cblock(cache->nr_dirty));
++ (unsigned long) atomic_read(&cache->nr_dirty));
+
+ if (writethrough_mode(&cache->features))
+ DMEMIT("1 writethrough ");
--- /dev/null
+From 381676d5e86596b11e22a62f196e192df6091373 Mon Sep 17 00:00:00 2001
+From: Peter Meerwald <pmeerw@pmeerw.net>
+Date: Wed, 16 Jul 2014 19:32:00 +0100
+Subject: iio:bma180: Fix scale factors to report correct acceleration units
+
+From: Peter Meerwald <pmeerw@pmeerw.net>
+
+commit 381676d5e86596b11e22a62f196e192df6091373 upstream.
+
+The userspace interface for acceleration sensors is documented as using
+m/s^2 units [Documentation/ABI/testing/sysfs-bus-iio]
+
+The fullscale raw values for the BMA80 corresponds to -/+ 1, 1.5, 2, etc G
+depending on the selected mode.
+
+The scale table was converting to G rather than m/s^2.
+Change the scaling table to match the documented interface.
+
+See commit 71702e6e, iio: mma8452: Use correct acceleration units,
+for a related fix.
+
+Signed-off-by: Peter Meerwald <pmeerw@pmeerw.net>
+Cc: Oleksandr Kravchenko <o.v.kravchenko@globallogic.com>
+Signed-off-by: Jonathan Cameron <jic23@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/accel/bma180.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/iio/accel/bma180.c
++++ b/drivers/iio/accel/bma180.c
+@@ -68,13 +68,13 @@
+ /* Defaults values */
+ #define BMA180_DEF_PMODE 0
+ #define BMA180_DEF_BW 20
+-#define BMA180_DEF_SCALE 250
++#define BMA180_DEF_SCALE 2452
+
+ /* Available values for sysfs */
+ #define BMA180_FLP_FREQ_AVAILABLE \
+ "10 20 40 75 150 300"
+ #define BMA180_SCALE_AVAILABLE \
+- "0.000130 0.000190 0.000250 0.000380 0.000500 0.000990 0.001980"
++ "0.001275 0.001863 0.002452 0.003727 0.004903 0.009709 0.019417"
+
+ struct bma180_data {
+ struct i2c_client *client;
+@@ -94,7 +94,7 @@ enum bma180_axis {
+ };
+
+ static int bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
+-static int scale_table[] = { 130, 190, 250, 380, 500, 990, 1980 };
++static int scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
+
+ static int bma180_get_acc_reg(struct bma180_data *data, enum bma180_axis axis)
+ {
--- /dev/null
+From 9b2a4d35a6ceaf217be61ed8eb3c16986244f640 Mon Sep 17 00:00:00 2001
+From: Peter Meerwald <pmeerw@pmeerw.net>
+Date: Wed, 16 Jul 2014 19:32:00 +0100
+Subject: iio:bma180: Missing check for frequency fractional part
+
+From: Peter Meerwald <pmeerw@pmeerw.net>
+
+commit 9b2a4d35a6ceaf217be61ed8eb3c16986244f640 upstream.
+
+val2 should be zero
+
+This will make no difference for correct inputs but will reject
+incorrect ones with a decimal part in the value written to the sysfs
+interface.
+
+Signed-off-by: Peter Meerwald <pmeerw@pmeerw.net>
+Cc: Oleksandr Kravchenko <o.v.kravchenko@globallogic.com>
+Signed-off-by: Jonathan Cameron <jic23@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/accel/bma180.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/iio/accel/bma180.c
++++ b/drivers/iio/accel/bma180.c
+@@ -376,6 +376,8 @@ static int bma180_write_raw(struct iio_d
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
++ if (val2)
++ return -EINVAL;
+ mutex_lock(&data->mutex);
+ ret = bma180_set_bw(data, val);
+ mutex_unlock(&data->mutex);
--- /dev/null
+From 61bd55ce1667809f022be88da77db17add90ea4e Mon Sep 17 00:00:00 2001
+From: Lars-Peter Clausen <lars@metafoo.de>
+Date: Thu, 17 Jul 2014 16:59:00 +0100
+Subject: iio: buffer: Fix demux table creation
+
+From: Lars-Peter Clausen <lars@metafoo.de>
+
+commit 61bd55ce1667809f022be88da77db17add90ea4e upstream.
+
+When creating the demux table we need to iterate over the selected scan mask for
+the buffer to get the samples which should be copied to destination buffer.
+Right now the code uses the mask which contains all active channels, which means
+the demux table contains entries which causes it to copy all the samples from
+source to destination buffer one by one without doing any demuxing.
+
+Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Jonathan Cameron <jic23@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/industrialio-buffer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iio/industrialio-buffer.c
++++ b/drivers/iio/industrialio-buffer.c
+@@ -949,7 +949,7 @@ static int iio_buffer_update_demux(struc
+
+ /* Now we have the two masks, work from least sig and build up sizes */
+ for_each_set_bit(out_ind,
+- indio_dev->active_scan_mask,
++ buffer->scan_mask,
+ indio_dev->masklength) {
+ in_ind = find_next_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength,
--- /dev/null
+From 2bcf2e92c3918ce62ab4e934256e47e9a16d19c3 Mon Sep 17 00:00:00 2001
+From: Michal Hocko <mhocko@suse.cz>
+Date: Wed, 30 Jul 2014 16:08:33 -0700
+Subject: memcg: oom_notify use-after-free fix
+
+From: Michal Hocko <mhocko@suse.cz>
+
+commit 2bcf2e92c3918ce62ab4e934256e47e9a16d19c3 upstream.
+
+Paul Furtado has reported the following GPF:
+
+ general protection fault: 0000 [#1] SMP
+ Modules linked in: ipv6 dm_mod xen_netfront coretemp hwmon x86_pkg_temp_thermal crc32_pclmul crc32c_intel ghash_clmulni_intel aesni_intel ablk_helper cryptd lrw gf128mul glue_helper aes_x86_64 microcode pcspkr ext4 jbd2 mbcache raid0 xen_blkfront
+ CPU: 3 PID: 3062 Comm: java Not tainted 3.16.0-rc5 #1
+ task: ffff8801cfe8f170 ti: ffff8801d2ec4000 task.ti: ffff8801d2ec4000
+ RIP: e030:mem_cgroup_oom_synchronize+0x140/0x240
+ RSP: e02b:ffff8801d2ec7d48 EFLAGS: 00010283
+ RAX: 0000000000000001 RBX: ffff88009d633800 RCX: 000000000000000e
+ RDX: fffffffffffffffe RSI: ffff88009d630200 RDI: ffff88009d630200
+ RBP: ffff8801d2ec7da8 R08: 0000000000000012 R09: 00000000fffffffe
+ R10: 0000000000000000 R11: 0000000000000000 R12: ffff88009d633800
+ R13: ffff8801d2ec7d48 R14: dead000000100100 R15: ffff88009d633a30
+ FS: 00007f1748bb4700(0000) GS:ffff8801def80000(0000) knlGS:0000000000000000
+ CS: e033 DS: 0000 ES: 0000 CR0: 000000008005003b
+ CR2: 00007f4110300308 CR3: 00000000c05f7000 CR4: 0000000000002660
+ Call Trace:
+ pagefault_out_of_memory+0x18/0x90
+ mm_fault_error+0xa9/0x1a0
+ __do_page_fault+0x478/0x4c0
+ do_page_fault+0x2c/0x40
+ page_fault+0x28/0x30
+ Code: 44 00 00 48 89 df e8 40 ca ff ff 48 85 c0 49 89 c4 74 35 4c 8b b0 30 02 00 00 4c 8d b8 30 02 00 00 4d 39 fe 74 1b 0f 1f 44 00 00 <49> 8b 7e 10 be 01 00 00 00 e8 42 d2 04 00 4d 8b 36 4d 39 fe 75
+ RIP mem_cgroup_oom_synchronize+0x140/0x240
+
+Commit fb2a6fc56be6 ("mm: memcg: rework and document OOM waiting and
+wakeup") has moved mem_cgroup_oom_notify outside of memcg_oom_lock
+assuming it is protected by the hierarchical OOM-lock.
+
+Although this is true for the notification part the protection doesn't
+cover unregistration of event which can happen in parallel now so
+mem_cgroup_oom_notify can see already unlinked and/or freed
+mem_cgroup_eventfd_list.
+
+Fix this by using memcg_oom_lock also in mem_cgroup_oom_notify.
+
+Addresses https://bugzilla.kernel.org/show_bug.cgi?id=80881
+
+Fixes: fb2a6fc56be6 (mm: memcg: rework and document OOM waiting and wakeup)
+Signed-off-by: Michal Hocko <mhocko@suse.cz>
+Reported-by: Paul Furtado <paulfurtado91@gmail.com>
+Tested-by: Paul Furtado <paulfurtado91@gmail.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memcontrol.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -5544,8 +5544,12 @@ static int mem_cgroup_oom_notify_cb(stru
+ {
+ struct mem_cgroup_eventfd_list *ev;
+
++ spin_lock(&memcg_oom_lock);
++
+ list_for_each_entry(ev, &memcg->oom_notify, list)
+ eventfd_signal(ev->eventfd, 1);
++
++ spin_unlock(&memcg_oom_lock);
+ return 0;
+ }
+
--- /dev/null
+From f6789593d5cea42a4ecb1cbeab6a23ade5ebbba7 Mon Sep 17 00:00:00 2001
+From: Maxim Patlasov <MPatlasov@parallels.com>
+Date: Wed, 30 Jul 2014 16:08:21 -0700
+Subject: mm/page-writeback.c: fix divide by zero in bdi_dirty_limits()
+
+From: Maxim Patlasov <MPatlasov@parallels.com>
+
+commit f6789593d5cea42a4ecb1cbeab6a23ade5ebbba7 upstream.
+
+Under memory pressure, it is possible for dirty_thresh, calculated by
+global_dirty_limits() in balance_dirty_pages(), to equal zero. Then, if
+strictlimit is true, bdi_dirty_limits() tries to resolve the proportion:
+
+ bdi_bg_thresh : bdi_thresh = background_thresh : dirty_thresh
+
+by dividing by zero.
+
+Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com>
+Acked-by: Rik van Riel <riel@redhat.com>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Wu Fengguang <fengguang.wu@intel.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page-writeback.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -1324,9 +1324,9 @@ static inline void bdi_dirty_limits(stru
+ *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
+
+ if (bdi_bg_thresh)
+- *bdi_bg_thresh = div_u64((u64)*bdi_thresh *
+- background_thresh,
+- dirty_thresh);
++ *bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh *
++ background_thresh,
++ dirty_thresh) : 0;
+
+ /*
+ * In order to avoid the stacked BDI deadlock we need
--- /dev/null
+From b104a35d32025ca740539db2808aa3385d0f30eb Mon Sep 17 00:00:00 2001
+From: David Rientjes <rientjes@google.com>
+Date: Wed, 30 Jul 2014 16:08:24 -0700
+Subject: mm, thp: do not allow thp faults to avoid cpuset restrictions
+
+From: David Rientjes <rientjes@google.com>
+
+commit b104a35d32025ca740539db2808aa3385d0f30eb upstream.
+
+The page allocator relies on __GFP_WAIT to determine if ALLOC_CPUSET
+should be set in allocflags. ALLOC_CPUSET controls if a page allocation
+should be restricted only to the set of allowed cpuset mems.
+
+Transparent hugepages clears __GFP_WAIT when defrag is disabled to prevent
+the fault path from using memory compaction or direct reclaim. Thus, it
+is unfairly able to allocate outside of its cpuset mems restriction as a
+side-effect.
+
+This patch ensures that ALLOC_CPUSET is only cleared when the gfp mask is
+truly GFP_ATOMIC by verifying it is also not a thp allocation.
+
+Signed-off-by: David Rientjes <rientjes@google.com>
+Reported-by: Alex Thorlton <athorlton@sgi.com>
+Tested-by: Alex Thorlton <athorlton@sgi.com>
+Cc: Bob Liu <lliubbo@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Hedi Berriche <hedi@sgi.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2435,7 +2435,7 @@ static inline int
+ gfp_to_alloc_flags(gfp_t gfp_mask)
+ {
+ int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
+- const gfp_t wait = gfp_mask & __GFP_WAIT;
++ const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
+
+ /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
+ BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
+@@ -2444,20 +2444,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
+ * The caller may dip into page reserves a bit more if the caller
+ * cannot run direct reclaim, or if the caller has realtime scheduling
+ * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
+- * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
++ * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
+ */
+ alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
+
+- if (!wait) {
++ if (atomic) {
+ /*
+- * Not worth trying to allocate harder for
+- * __GFP_NOMEMALLOC even if it can't schedule.
++ * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
++ * if it can't schedule.
+ */
+- if (!(gfp_mask & __GFP_NOMEMALLOC))
++ if (!(gfp_mask & __GFP_NOMEMALLOC))
+ alloc_flags |= ALLOC_HARDER;
+ /*
+- * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
+- * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
++ * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
++ * comment for __cpuset_node_allowed_softwall().
+ */
+ alloc_flags &= ~ALLOC_CPUSET;
+ } else if (unlikely(rt_task(current)) && !in_interrupt())
--- /dev/null
+From aac74dc495456412c4130a1167ce4beb6c1f0b38 Mon Sep 17 00:00:00 2001
+From: John Stultz <john.stultz@linaro.org>
+Date: Wed, 4 Jun 2014 16:11:40 -0700
+Subject: printk: rename printk_sched to printk_deferred
+
+From: John Stultz <john.stultz@linaro.org>
+
+commit aac74dc495456412c4130a1167ce4beb6c1f0b38 upstream.
+
+After learning we'll need some sort of deferred printk functionality in
+the timekeeping core, Peter suggested we rename the printk_sched function
+so it can be reused by needed subsystems.
+
+This only changes the function name. No logic changes.
+
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Jiri Bohac <jbohac@suse.cz>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/printk.h | 6 +++---
+ kernel/printk/printk.c | 2 +-
+ kernel/sched/core.c | 2 +-
+ kernel/sched/deadline.c | 2 +-
+ kernel/sched/rt.c | 2 +-
+ 5 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -128,9 +128,9 @@ asmlinkage __printf(1, 2) __cold
+ int printk(const char *fmt, ...);
+
+ /*
+- * Special printk facility for scheduler use only, _DO_NOT_USE_ !
++ * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
+ */
+-__printf(1, 2) __cold int printk_sched(const char *fmt, ...);
++__printf(1, 2) __cold int printk_deferred(const char *fmt, ...);
+
+ /*
+ * Please don't use printk_ratelimit(), because it shares ratelimiting state
+@@ -165,7 +165,7 @@ int printk(const char *s, ...)
+ return 0;
+ }
+ static inline __printf(1, 2) __cold
+-int printk_sched(const char *s, ...)
++int printk_deferred(const char *s, ...)
+ {
+ return 0;
+ }
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2474,7 +2474,7 @@ void wake_up_klogd(void)
+ preempt_enable();
+ }
+
+-int printk_sched(const char *fmt, ...)
++int printk_deferred(const char *fmt, ...)
+ {
+ unsigned long flags;
+ va_list args;
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1320,7 +1320,7 @@ out:
+ * leave kernel.
+ */
+ if (p->mm && printk_ratelimit()) {
+- printk_sched("process %d (%s) no longer affine to cpu%d\n",
++ printk_deferred("process %d (%s) no longer affine to cpu%d\n",
+ task_pid_nr(p), p->comm, cpu);
+ }
+ }
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -352,7 +352,7 @@ static void replenish_dl_entity(struct s
+
+ if (!lag_once) {
+ lag_once = true;
+- printk_sched("sched: DL replenish lagged to much\n");
++ printk_deferred("sched: DL replenish lagged to much\n");
+ }
+ dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
+ dl_se->runtime = pi_se->dl_runtime;
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -857,7 +857,7 @@ static int sched_rt_runtime_exceeded(str
+
+ if (!once) {
+ once = true;
+- printk_sched("sched: RT throttling activated\n");
++ printk_deferred("sched: RT throttling activated\n");
+ }
+ } else {
+ /*
--- /dev/null
+From f723aa1817dd8f4fe005aab52ba70c8ab0ef9457 Mon Sep 17 00:00:00 2001
+From: Stephen Boyd <sboyd@codeaurora.org>
+Date: Wed, 23 Jul 2014 21:03:50 -0700
+Subject: sched_clock: Avoid corrupting hrtimer tree during suspend
+
+From: Stephen Boyd <sboyd@codeaurora.org>
+
+commit f723aa1817dd8f4fe005aab52ba70c8ab0ef9457 upstream.
+
+During suspend we call sched_clock_poll() to update the epoch and
+accumulated time and reprogram the sched_clock_timer to fire
+before the next wrap-around time. Unfortunately,
+sched_clock_poll() doesn't restart the timer, instead it relies
+on the hrtimer layer to do that and during suspend we aren't
+calling that function from the hrtimer layer. Instead, we're
+reprogramming the expires time while the hrtimer is enqueued,
+which can cause the hrtimer tree to be corrupted. Furthermore, we
+restart the timer during suspend but we update the epoch during
+resume which seems counter-intuitive.
+
+Let's fix this by saving the accumulated state and canceling the
+timer during suspend. On resume we can update the epoch and
+restart the timer similar to what we would do if we were starting
+the clock for the first time.
+
+Fixes: a08ca5d1089d "sched_clock: Use an hrtimer instead of timer"
+Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Link: http://lkml.kernel.org/r/1406174630-23458-1-git-send-email-john.stultz@linaro.org
+Cc: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/sched_clock.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/time/sched_clock.c
++++ b/kernel/time/sched_clock.c
+@@ -204,7 +204,8 @@ void __init sched_clock_postinit(void)
+
+ static int sched_clock_suspend(void)
+ {
+- sched_clock_poll(&sched_clock_timer);
++ update_sched_clock();
++ hrtimer_cancel(&sched_clock_timer);
+ cd.suspended = true;
+ return 0;
+ }
+@@ -212,6 +213,7 @@ static int sched_clock_suspend(void)
+ static void sched_clock_resume(void)
+ {
+ cd.epoch_cyc = read_sched_clock();
++ hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
+ cd.suspended = false;
+ }
+
revert-mac80211-move-bufferable-mmpdu-check-to-fix-ap-mode-scan.patch
rapidio-tsi721_dma-fix-failure-to-obtain-transaction-descriptor.patch
scsi-handle-flush-errors-properly.patch
+mm-page-writeback.c-fix-divide-by-zero-in-bdi_dirty_limits.patch
+mm-thp-do-not-allow-thp-faults-to-avoid-cpuset-restrictions.patch
+memcg-oom_notify-use-after-free-fix.patch
+staging-vt6655-fix-disassociated-messages-every-10-seconds.patch
+acpi-pnp-fix-acpi_pnp_match.patch
+iio-bma180-fix-scale-factors-to-report-correct-acceleration-units.patch
+iio-bma180-missing-check-for-frequency-fractional-part.patch
+iio-buffer-fix-demux-table-creation.patch
+dm-bufio-fully-initialize-shrinker.patch
+dm-cache-fix-race-affecting-dirty-block-count.patch
+printk-rename-printk_sched-to-printk_deferred.patch
+sched_clock-avoid-corrupting-hrtimer-tree-during-suspend.patch
+timer-fix-lock-inversion-between-hrtimer_bases.lock-and-scheduler-locks.patch
--- /dev/null
+From 4aa0abed3a2a11b7d71ad560c1a3e7631c5a31cd Mon Sep 17 00:00:00 2001
+From: Malcolm Priestley <tvboxspy@gmail.com>
+Date: Wed, 23 Jul 2014 21:35:12 +0100
+Subject: staging: vt6655: Fix disassociated messages every 10 seconds
+
+From: Malcolm Priestley <tvboxspy@gmail.com>
+
+commit 4aa0abed3a2a11b7d71ad560c1a3e7631c5a31cd upstream.
+
+byReAssocCount is incremented every second resulting in
+disassociated message being send every 10 seconds whether
+connection or not.
+
+byReAssocCount should only advance while eCommandState
+is in WLAN_ASSOCIATE_WAIT
+
+Change existing scope to if condition.
+
+Signed-off-by: Malcolm Priestley <tvboxspy@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/vt6655/bssdb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/vt6655/bssdb.c
++++ b/drivers/staging/vt6655/bssdb.c
+@@ -983,7 +983,7 @@ start:
+ pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
+ }
+
+- {
++ if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) {
+ pDevice->byReAssocCount++;
+ /* 10 sec timeout */
+ if ((pDevice->byReAssocCount > 10) && (!pDevice->bLinkPass)) {
--- /dev/null
+From 504d58745c9ca28d33572e2d8a9990b43e06075d Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 1 Aug 2014 12:20:02 +0200
+Subject: timer: Fix lock inversion between hrtimer_bases.lock and scheduler locks
+
+From: Jan Kara <jack@suse.cz>
+
+commit 504d58745c9ca28d33572e2d8a9990b43e06075d upstream.
+
+clockevents_increase_min_delta() calls printk() from under
+hrtimer_bases.lock. That causes lock inversion on scheduler locks because
+printk() can call into the scheduler. Lockdep puts it as:
+
+======================================================
+[ INFO: possible circular locking dependency detected ]
+3.15.0-rc8-06195-g939f04b #2 Not tainted
+-------------------------------------------------------
+trinity-main/74 is trying to acquire lock:
+ (&port_lock_key){-.....}, at: [<811c60be>] serial8250_console_write+0x8c/0x10c
+
+but task is already holding lock:
+ (hrtimer_bases.lock){-.-...}, at: [<8103caeb>] hrtimer_try_to_cancel+0x13/0x66
+
+which lock already depends on the new lock.
+
+the existing dependency chain (in reverse order) is:
+
+-> #5 (hrtimer_bases.lock){-.-...}:
+ [<8104a942>] lock_acquire+0x92/0x101
+ [<8142f11d>] _raw_spin_lock_irqsave+0x2e/0x3e
+ [<8103c918>] __hrtimer_start_range_ns+0x1c/0x197
+ [<8107ec20>] perf_swevent_start_hrtimer.part.41+0x7a/0x85
+ [<81080792>] task_clock_event_start+0x3a/0x3f
+ [<810807a4>] task_clock_event_add+0xd/0x14
+ [<8108259a>] event_sched_in+0xb6/0x17a
+ [<810826a2>] group_sched_in+0x44/0x122
+ [<81082885>] ctx_sched_in.isra.67+0x105/0x11f
+ [<810828e6>] perf_event_sched_in.isra.70+0x47/0x4b
+ [<81082bf6>] __perf_install_in_context+0x8b/0xa3
+ [<8107eb8e>] remote_function+0x12/0x2a
+ [<8105f5af>] smp_call_function_single+0x2d/0x53
+ [<8107e17d>] task_function_call+0x30/0x36
+ [<8107fb82>] perf_install_in_context+0x87/0xbb
+ [<810852c9>] SYSC_perf_event_open+0x5c6/0x701
+ [<810856f9>] SyS_perf_event_open+0x17/0x19
+ [<8142f8ee>] syscall_call+0x7/0xb
+
+-> #4 (&ctx->lock){......}:
+ [<8104a942>] lock_acquire+0x92/0x101
+ [<8142f04c>] _raw_spin_lock+0x21/0x30
+ [<81081df3>] __perf_event_task_sched_out+0x1dc/0x34f
+ [<8142cacc>] __schedule+0x4c6/0x4cb
+ [<8142cae0>] schedule+0xf/0x11
+ [<8142f9a6>] work_resched+0x5/0x30
+
+-> #3 (&rq->lock){-.-.-.}:
+ [<8104a942>] lock_acquire+0x92/0x101
+ [<8142f04c>] _raw_spin_lock+0x21/0x30
+ [<81040873>] __task_rq_lock+0x33/0x3a
+ [<8104184c>] wake_up_new_task+0x25/0xc2
+ [<8102474b>] do_fork+0x15c/0x2a0
+ [<810248a9>] kernel_thread+0x1a/0x1f
+ [<814232a2>] rest_init+0x1a/0x10e
+ [<817af949>] start_kernel+0x303/0x308
+ [<817af2ab>] i386_start_kernel+0x79/0x7d
+
+-> #2 (&p->pi_lock){-.-...}:
+ [<8104a942>] lock_acquire+0x92/0x101
+ [<8142f11d>] _raw_spin_lock_irqsave+0x2e/0x3e
+ [<810413dd>] try_to_wake_up+0x1d/0xd6
+ [<810414cd>] default_wake_function+0xb/0xd
+ [<810461f3>] __wake_up_common+0x39/0x59
+ [<81046346>] __wake_up+0x29/0x3b
+ [<811b8733>] tty_wakeup+0x49/0x51
+ [<811c3568>] uart_write_wakeup+0x17/0x19
+ [<811c5dc1>] serial8250_tx_chars+0xbc/0xfb
+ [<811c5f28>] serial8250_handle_irq+0x54/0x6a
+ [<811c5f57>] serial8250_default_handle_irq+0x19/0x1c
+ [<811c56d8>] serial8250_interrupt+0x38/0x9e
+ [<810510e7>] handle_irq_event_percpu+0x5f/0x1e2
+ [<81051296>] handle_irq_event+0x2c/0x43
+ [<81052cee>] handle_level_irq+0x57/0x80
+ [<81002a72>] handle_irq+0x46/0x5c
+ [<810027df>] do_IRQ+0x32/0x89
+ [<8143036e>] common_interrupt+0x2e/0x33
+ [<8142f23c>] _raw_spin_unlock_irqrestore+0x3f/0x49
+ [<811c25a4>] uart_start+0x2d/0x32
+ [<811c2c04>] uart_write+0xc7/0xd6
+ [<811bc6f6>] n_tty_write+0xb8/0x35e
+ [<811b9beb>] tty_write+0x163/0x1e4
+ [<811b9cd9>] redirected_tty_write+0x6d/0x75
+ [<810b6ed6>] vfs_write+0x75/0xb0
+ [<810b7265>] SyS_write+0x44/0x77
+ [<8142f8ee>] syscall_call+0x7/0xb
+
+-> #1 (&tty->write_wait){-.....}:
+ [<8104a942>] lock_acquire+0x92/0x101
+ [<8142f11d>] _raw_spin_lock_irqsave+0x2e/0x3e
+ [<81046332>] __wake_up+0x15/0x3b
+ [<811b8733>] tty_wakeup+0x49/0x51
+ [<811c3568>] uart_write_wakeup+0x17/0x19
+ [<811c5dc1>] serial8250_tx_chars+0xbc/0xfb
+ [<811c5f28>] serial8250_handle_irq+0x54/0x6a
+ [<811c5f57>] serial8250_default_handle_irq+0x19/0x1c
+ [<811c56d8>] serial8250_interrupt+0x38/0x9e
+ [<810510e7>] handle_irq_event_percpu+0x5f/0x1e2
+ [<81051296>] handle_irq_event+0x2c/0x43
+ [<81052cee>] handle_level_irq+0x57/0x80
+ [<81002a72>] handle_irq+0x46/0x5c
+ [<810027df>] do_IRQ+0x32/0x89
+ [<8143036e>] common_interrupt+0x2e/0x33
+ [<8142f23c>] _raw_spin_unlock_irqrestore+0x3f/0x49
+ [<811c25a4>] uart_start+0x2d/0x32
+ [<811c2c04>] uart_write+0xc7/0xd6
+ [<811bc6f6>] n_tty_write+0xb8/0x35e
+ [<811b9beb>] tty_write+0x163/0x1e4
+ [<811b9cd9>] redirected_tty_write+0x6d/0x75
+ [<810b6ed6>] vfs_write+0x75/0xb0
+ [<810b7265>] SyS_write+0x44/0x77
+ [<8142f8ee>] syscall_call+0x7/0xb
+
+-> #0 (&port_lock_key){-.....}:
+ [<8104a62d>] __lock_acquire+0x9ea/0xc6d
+ [<8104a942>] lock_acquire+0x92/0x101
+ [<8142f11d>] _raw_spin_lock_irqsave+0x2e/0x3e
+ [<811c60be>] serial8250_console_write+0x8c/0x10c
+ [<8104e402>] call_console_drivers.constprop.31+0x87/0x118
+ [<8104f5d5>] console_unlock+0x1d7/0x398
+ [<8104fb70>] vprintk_emit+0x3da/0x3e4
+ [<81425f76>] printk+0x17/0x19
+ [<8105bfa0>] clockevents_program_min_delta+0x104/0x116
+ [<8105c548>] clockevents_program_event+0xe7/0xf3
+ [<8105cc1c>] tick_program_event+0x1e/0x23
+ [<8103c43c>] hrtimer_force_reprogram+0x88/0x8f
+ [<8103c49e>] __remove_hrtimer+0x5b/0x79
+ [<8103cb21>] hrtimer_try_to_cancel+0x49/0x66
+ [<8103cb4b>] hrtimer_cancel+0xd/0x18
+ [<8107f102>] perf_swevent_cancel_hrtimer.part.60+0x2b/0x30
+ [<81080705>] task_clock_event_stop+0x20/0x64
+ [<81080756>] task_clock_event_del+0xd/0xf
+ [<81081350>] event_sched_out+0xab/0x11e
+ [<810813e0>] group_sched_out+0x1d/0x66
+ [<81081682>] ctx_sched_out+0xaf/0xbf
+ [<81081e04>] __perf_event_task_sched_out+0x1ed/0x34f
+ [<8142cacc>] __schedule+0x4c6/0x4cb
+ [<8142cae0>] schedule+0xf/0x11
+ [<8142f9a6>] work_resched+0x5/0x30
+
+other info that might help us debug this:
+
+Chain exists of:
+ &port_lock_key --> &ctx->lock --> hrtimer_bases.lock
+
+ Possible unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(hrtimer_bases.lock);
+ lock(&ctx->lock);
+ lock(hrtimer_bases.lock);
+ lock(&port_lock_key);
+
+ *** DEADLOCK ***
+
+4 locks held by trinity-main/74:
+ #0: (&rq->lock){-.-.-.}, at: [<8142c6f3>] __schedule+0xed/0x4cb
+ #1: (&ctx->lock){......}, at: [<81081df3>] __perf_event_task_sched_out+0x1dc/0x34f
+ #2: (hrtimer_bases.lock){-.-...}, at: [<8103caeb>] hrtimer_try_to_cancel+0x13/0x66
+ #3: (console_lock){+.+...}, at: [<8104fb5d>] vprintk_emit+0x3c7/0x3e4
+
+stack backtrace:
+CPU: 0 PID: 74 Comm: trinity-main Not tainted 3.15.0-rc8-06195-g939f04b #2
+ 00000000 81c3a310 8b995c14 81426f69 8b995c44 81425a99 8161f671 8161f570
+ 8161f538 8161f559 8161f538 8b995c78 8b142bb0 00000004 8b142fdc 8b142bb0
+ 8b995ca8 8104a62d 8b142fac 000016f2 81c3a310 00000001 00000001 00000003
+Call Trace:
+ [<81426f69>] dump_stack+0x16/0x18
+ [<81425a99>] print_circular_bug+0x18f/0x19c
+ [<8104a62d>] __lock_acquire+0x9ea/0xc6d
+ [<8104a942>] lock_acquire+0x92/0x101
+ [<811c60be>] ? serial8250_console_write+0x8c/0x10c
+ [<811c6032>] ? wait_for_xmitr+0x76/0x76
+ [<8142f11d>] _raw_spin_lock_irqsave+0x2e/0x3e
+ [<811c60be>] ? serial8250_console_write+0x8c/0x10c
+ [<811c60be>] serial8250_console_write+0x8c/0x10c
+ [<8104af87>] ? lock_release+0x191/0x223
+ [<811c6032>] ? wait_for_xmitr+0x76/0x76
+ [<8104e402>] call_console_drivers.constprop.31+0x87/0x118
+ [<8104f5d5>] console_unlock+0x1d7/0x398
+ [<8104fb70>] vprintk_emit+0x3da/0x3e4
+ [<81425f76>] printk+0x17/0x19
+ [<8105bfa0>] clockevents_program_min_delta+0x104/0x116
+ [<8105cc1c>] tick_program_event+0x1e/0x23
+ [<8103c43c>] hrtimer_force_reprogram+0x88/0x8f
+ [<8103c49e>] __remove_hrtimer+0x5b/0x79
+ [<8103cb21>] hrtimer_try_to_cancel+0x49/0x66
+ [<8103cb4b>] hrtimer_cancel+0xd/0x18
+ [<8107f102>] perf_swevent_cancel_hrtimer.part.60+0x2b/0x30
+ [<81080705>] task_clock_event_stop+0x20/0x64
+ [<81080756>] task_clock_event_del+0xd/0xf
+ [<81081350>] event_sched_out+0xab/0x11e
+ [<810813e0>] group_sched_out+0x1d/0x66
+ [<81081682>] ctx_sched_out+0xaf/0xbf
+ [<81081e04>] __perf_event_task_sched_out+0x1ed/0x34f
+ [<8104416d>] ? __dequeue_entity+0x23/0x27
+ [<81044505>] ? pick_next_task_fair+0xb1/0x120
+ [<8142cacc>] __schedule+0x4c6/0x4cb
+ [<81047574>] ? trace_hardirqs_off_caller+0xd7/0x108
+ [<810475b0>] ? trace_hardirqs_off+0xb/0xd
+ [<81056346>] ? rcu_irq_exit+0x64/0x77
+
+Fix the problem by using printk_deferred() which does not call into the
+scheduler.
+
+Reported-by: Fengguang Wu <fengguang.wu@intel.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/clockevents.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/kernel/time/clockevents.c
++++ b/kernel/time/clockevents.c
+@@ -146,7 +146,8 @@ static int clockevents_increase_min_delt
+ {
+ /* Nothing to do if we already reached the limit */
+ if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
+- printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
++ printk_deferred(KERN_WARNING
++ "CE: Reprogramming failure. Giving up\n");
+ dev->next_event.tv64 = KTIME_MAX;
+ return -ETIME;
+ }
+@@ -159,9 +160,10 @@ static int clockevents_increase_min_delt
+ if (dev->min_delta_ns > MIN_DELTA_LIMIT)
+ dev->min_delta_ns = MIN_DELTA_LIMIT;
+
+- printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
+- dev->name ? dev->name : "?",
+- (unsigned long long) dev->min_delta_ns);
++ printk_deferred(KERN_WARNING
++ "CE: %s increased min_delta_ns to %llu nsec\n",
++ dev->name ? dev->name : "?",
++ (unsigned long long) dev->min_delta_ns);
+ return 0;
+ }
+