--- /dev/null
+From 6cad1376954e591c3c41500c4e586e183e7ffe6d Mon Sep 17 00:00:00 2001
+From: James Ralston <james.d.ralston@intel.com>
+Date: Wed, 27 Aug 2014 14:31:58 -0700
+Subject: ata_piix: Add Device IDs for Intel 9 Series PCH
+
+From: James Ralston <james.d.ralston@intel.com>
+
+commit 6cad1376954e591c3c41500c4e586e183e7ffe6d upstream.
+
+This patch adds the IDE mode SATA Device IDs for the Intel 9 Series PCH.
+
+Signed-off-by: James Ralston <james.d.ralston@intel.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/ata_piix.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -340,6 +340,14 @@ static const struct pci_device_id piix_p
+ { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+ /* SATA Controller IDE (Coleto Creek) */
+ { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (9 Series) */
++ { 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
++ /* SATA Controller IDE (9 Series) */
++ { 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
++ /* SATA Controller IDE (9 Series) */
++ { 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (9 Series) */
++ { 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+
+ { } /* terminate list */
+ };
--- /dev/null
+From d21ccfd0a60ea3dece3e1d142f52694abf87a0b1 Mon Sep 17 00:00:00 2001
+From: Johannes Stezenbach <js@sig21.net>
+Date: Fri, 12 Sep 2014 22:36:51 +0200
+Subject: ath9k_htc: fix random decryption failure
+
+From: Johannes Stezenbach <js@sig21.net>
+
+commit d21ccfd0a60ea3dece3e1d142f52694abf87a0b1 upstream.
+
+In v3.15 the driver stopped to accept network packets after successful
+authentification, which could be worked around by passing the
+nohwcrypt=1 module parameter. This was not reproducible by
+everyone, and showed random behaviour in some tests.
+It was caused by an uninitialized variable introduced
+in 4ed1a8d4a257 ("ath9k_htc: use ath9k_cmn_rx_accept") and
+used in 341b29b9cd2f ("ath9k_htc: use ath9k_cmn_rx_skb_postprocess").
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=78581
+Fixes: 341b29b9cd2f ("ath9k_htc: use ath9k_cmn_rx_skb_postprocess")
+Signed-off-by: Johannes Stezenbach <js@sig21.net>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+@@ -978,7 +978,7 @@ static bool ath9k_rx_prepare(struct ath9
+ struct ath_hw *ah = common->ah;
+ struct ath_htc_rx_status *rxstatus;
+ struct ath_rx_status rx_stats;
+- bool decrypt_error;
++ bool decrypt_error = false;
+
+ if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
+ ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
--- /dev/null
+From 2da78092dda13f1efd26edbbf99a567776913750 Mon Sep 17 00:00:00 2001
+From: Keith Busch <keith.busch@intel.com>
+Date: Tue, 26 Aug 2014 09:05:36 -0600
+Subject: block: Fix dev_t minor allocation lifetime
+
+From: Keith Busch <keith.busch@intel.com>
+
+commit 2da78092dda13f1efd26edbbf99a567776913750 upstream.
+
+Releases the dev_t minor when all references are closed to prevent
+another device from acquiring the same major/minor.
+
+Since the partition's release may be invoked from call_rcu's soft-irq
+context, the ext_dev_idr's mutex had to be replaced with a spinlock so
+as not so sleep.
+
+Signed-off-by: Keith Busch <keith.busch@intel.com>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/genhd.c | 24 ++++++++++++++----------
+ block/partition-generic.c | 2 +-
+ 2 files changed, 15 insertions(+), 11 deletions(-)
+
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -28,10 +28,10 @@ struct kobject *block_depr;
+ /* for extended dynamic devt allocation, currently only one major is used */
+ #define NR_EXT_DEVT (1 << MINORBITS)
+
+-/* For extended devt allocation. ext_devt_mutex prevents look up
++/* For extended devt allocation. ext_devt_lock prevents look up
+ * results from going away underneath its user.
+ */
+-static DEFINE_MUTEX(ext_devt_mutex);
++static DEFINE_SPINLOCK(ext_devt_lock);
+ static DEFINE_IDR(ext_devt_idr);
+
+ static struct device_type disk_type;
+@@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *par
+ }
+
+ /* allocate ext devt */
+- mutex_lock(&ext_devt_mutex);
+- idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
+- mutex_unlock(&ext_devt_mutex);
++ idr_preload(GFP_KERNEL);
++
++ spin_lock(&ext_devt_lock);
++ idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
++ spin_unlock(&ext_devt_lock);
++
++ idr_preload_end();
+ if (idx < 0)
+ return idx == -ENOSPC ? -EBUSY : idx;
+
+@@ -447,9 +451,9 @@ void blk_free_devt(dev_t devt)
+ return;
+
+ if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
+- mutex_lock(&ext_devt_mutex);
++ spin_lock(&ext_devt_lock);
+ idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
+- mutex_unlock(&ext_devt_mutex);
++ spin_unlock(&ext_devt_lock);
+ }
+ }
+
+@@ -665,7 +669,6 @@ void del_gendisk(struct gendisk *disk)
+ sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
+ pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
+ device_del(disk_to_dev(disk));
+- blk_free_devt(disk_to_dev(disk)->devt);
+ }
+ EXPORT_SYMBOL(del_gendisk);
+
+@@ -690,13 +693,13 @@ struct gendisk *get_gendisk(dev_t devt,
+ } else {
+ struct hd_struct *part;
+
+- mutex_lock(&ext_devt_mutex);
++ spin_lock(&ext_devt_lock);
+ part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
+ if (part && get_disk(part_to_disk(part))) {
+ *partno = part->partno;
+ disk = part_to_disk(part);
+ }
+- mutex_unlock(&ext_devt_mutex);
++ spin_unlock(&ext_devt_lock);
+ }
+
+ return disk;
+@@ -1098,6 +1101,7 @@ static void disk_release(struct device *
+ {
+ struct gendisk *disk = dev_to_disk(dev);
+
++ blk_free_devt(dev->devt);
+ disk_release_events(disk);
+ kfree(disk->random);
+ disk_replace_part_tbl(disk, NULL);
+--- a/block/partition-generic.c
++++ b/block/partition-generic.c
+@@ -211,6 +211,7 @@ static const struct attribute_group *par
+ static void part_release(struct device *dev)
+ {
+ struct hd_struct *p = dev_to_part(dev);
++ blk_free_devt(dev->devt);
+ free_part_stats(p);
+ free_part_info(p);
+ kfree(p);
+@@ -253,7 +254,6 @@ void delete_partition(struct gendisk *di
+ rcu_assign_pointer(ptbl->last_lookup, NULL);
+ kobject_put(part->holder_dir);
+ device_del(part_to_dev(part));
+- blk_free_devt(part_devt(part));
+
+ hd_struct_put(part);
+ }
--- /dev/null
+From 87c4790330810fe5caf0172d9320cf24ef19cebe Mon Sep 17 00:00:00 2001
+From: Arend van Spriel <arend@broadcom.com>
+Date: Fri, 12 Sep 2014 16:19:30 +0200
+Subject: brcmfmac: handle IF event for P2P_DEVICE interface
+
+From: Arend van Spriel <arend@broadcom.com>
+
+commit 87c4790330810fe5caf0172d9320cf24ef19cebe upstream.
+
+The firmware notifies about interface changes through the IF event
+which has a NO_IF flag that means host can ignore the event. This
+behaviour was introduced in the driver by:
+
+ commit 2ee8382fc6c763c76396a6aaff77a27089eed3aa
+ Author: Arend van Spriel <arend@broadcom.com>
+ Date: Sat Aug 10 12:27:24 2013 +0200
+
+ brcmfmac: ignore IF event if firmware indicates it
+
+It turns out that the IF event for the P2P_DEVICE also has this
+flag set, but the event should not be ignored in this scenario.
+The mentioned commit caused a regression in 3.12 kernel in creation
+of the P2P_DEVICE interface.
+
+Reviewed-by: Hante Meuleman <meuleman@broadcom.com>
+Reviewed-by: Franky (Zhenhui) Lin <frankyl@broadcom.com>
+Reviewed-by: Daniel (Deognyoun) Kim <dekim@broadcom.com>
+Reviewed-by: Pieter-Paul Giesberts <pieterpg@broadcom.com>
+Signed-off-by: Arend van Spriel <arend@broadcom.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/brcm80211/brcmfmac/fweh.c | 12 +++++++++---
+ drivers/net/wireless/brcm80211/brcmfmac/fweh.h | 2 ++
+ 2 files changed, 11 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+@@ -185,7 +185,13 @@ static void brcmf_fweh_handle_if_event(s
+ ifevent->action, ifevent->ifidx, ifevent->bssidx,
+ ifevent->flags, ifevent->role);
+
+- if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) {
++ /* The P2P Device interface event must not be ignored
++ * contrary to what firmware tells us. The only way to
++ * distinguish the P2P Device is by looking at the ifidx
++ * and bssidx received.
++ */
++ if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) &&
++ (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
+ brcmf_dbg(EVENT, "event can be ignored\n");
+ return;
+ }
+@@ -210,12 +216,12 @@ static void brcmf_fweh_handle_if_event(s
+ return;
+ }
+
+- if (ifevent->action == BRCMF_E_IF_CHANGE)
++ if (ifp && ifevent->action == BRCMF_E_IF_CHANGE)
+ brcmf_fws_reset_interface(ifp);
+
+ err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
+
+- if (ifevent->action == BRCMF_E_IF_DEL) {
++ if (ifp && ifevent->action == BRCMF_E_IF_DEL) {
+ brcmf_fws_del_interface(ifp);
+ brcmf_del_if(drvr, ifevent->bssidx);
+ }
+--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
++++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+@@ -167,6 +167,8 @@ enum brcmf_fweh_event_code {
+ #define BRCMF_E_IF_ROLE_STA 0
+ #define BRCMF_E_IF_ROLE_AP 1
+ #define BRCMF_E_IF_ROLE_WDS 2
++#define BRCMF_E_IF_ROLE_P2P_GO 3
++#define BRCMF_E_IF_ROLE_P2P_CLIENT 4
+
+ /**
+ * definitions for event packet validation.
--- /dev/null
+From 40aa978eccec61347cd47b97c598df49acde8be5 Mon Sep 17 00:00:00 2001
+From: Anssi Hannula <anssi.hannula@iki.fi>
+Date: Fri, 5 Sep 2014 03:11:28 +0300
+Subject: dm cache: fix race causing dirty blocks to be marked as clean
+
+From: Anssi Hannula <anssi.hannula@iki.fi>
+
+commit 40aa978eccec61347cd47b97c598df49acde8be5 upstream.
+
+When a writeback or a promotion of a block is completed, the cell of
+that block is removed from the prison, the block is marked as clean, and
+the clear_dirty() callback of the cache policy is called.
+
+Unfortunately, performing those actions in this order allows an incoming
+new write bio for that block to come in before clearing the dirty status
+is completed and therefore possibly causing one of these two scenarios:
+
+Scenario A:
+
+Thread 1 Thread 2
+cell_defer() .
+- cell removed from prison .
+- detained bios queued .
+. incoming write bio
+. remapped to cache
+. set_dirty() called,
+. but block already dirty
+. => it does nothing
+clear_dirty() .
+- block marked clean .
+- policy clear_dirty() called .
+
+Result: Block is marked clean even though it is actually dirty. No
+writeback will occur.
+
+Scenario B:
+
+Thread 1 Thread 2
+cell_defer() .
+- cell removed from prison .
+- detained bios queued .
+clear_dirty() .
+- block marked clean .
+. incoming write bio
+. remapped to cache
+. set_dirty() called
+. - block marked dirty
+. - policy set_dirty() called
+- policy clear_dirty() called .
+
+Result: Block is properly marked as dirty, but policy thinks it is clean
+and therefore never asks us to writeback it.
+This case is visible in "dmsetup status" dirty block count (which
+normally decreases to 0 on a quiet device).
+
+Fix these issues by calling clear_dirty() before calling cell_defer().
+Incoming bios for that block will then be detained in the cell and
+released only after clear_dirty() has completed, so the race will not
+occur.
+
+Found by inspecting the code after noticing spurious dirty counts
+(scenario B).
+
+Signed-off-by: Anssi Hannula <anssi.hannula@iki.fi>
+Acked-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -873,8 +873,8 @@ static void migration_success_pre_commit
+ struct cache *cache = mg->cache;
+
+ if (mg->writeback) {
+- cell_defer(cache, mg->old_ocell, false);
+ clear_dirty(cache, mg->old_oblock, mg->cblock);
++ cell_defer(cache, mg->old_ocell, false);
+ cleanup_migration(mg);
+ return;
+
+@@ -929,13 +929,13 @@ static void migration_success_post_commi
+ }
+
+ } else {
++ clear_dirty(cache, mg->new_oblock, mg->cblock);
+ if (mg->requeue_holder)
+ cell_defer(cache, mg->new_ocell, true);
+ else {
+ bio_endio(mg->new_ocell->holder, 0);
+ cell_defer(cache, mg->new_ocell, false);
+ }
+- clear_dirty(cache, mg->new_oblock, mg->cblock);
+ cleanup_migration(mg);
+ }
+ }
--- /dev/null
+From d49ec52ff6ddcda178fc2476a109cf1bd1fa19ed Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Thu, 28 Aug 2014 11:09:31 -0400
+Subject: dm crypt: fix access beyond the end of allocated space
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit d49ec52ff6ddcda178fc2476a109cf1bd1fa19ed upstream.
+
+The DM crypt target accesses memory beyond allocated space resulting in
+a crash on 32 bit x86 systems.
+
+This bug is very old (it dates back to 2.6.25 commit 3a7f6c990ad04 "dm
+crypt: use async crypto"). However, this bug was masked by the fact
+that kmalloc rounds the size up to the next power of two. This bug
+wasn't exposed until 3.17-rc1 commit 298a9fa08a ("dm crypt: use per-bio
+data"). By switching to using per-bio data there was no longer any
+padding beyond the end of a dm-crypt allocated memory block.
+
+To minimize allocation overhead dm-crypt puts several structures into one
+block allocated with kmalloc. The block holds struct ablkcipher_request,
+cipher-specific scratch pad (crypto_ablkcipher_reqsize(any_tfm(cc))),
+struct dm_crypt_request and an initialization vector.
+
+The variable dmreq_start is set to offset of struct dm_crypt_request
+within this memory block. dm-crypt allocates the block with this size:
+cc->dmreq_start + sizeof(struct dm_crypt_request) + cc->iv_size.
+
+When accessing the initialization vector, dm-crypt uses the function
+iv_of_dmreq, which performs this calculation: ALIGN((unsigned long)(dmreq
++ 1), crypto_ablkcipher_alignmask(any_tfm(cc)) + 1).
+
+dm-crypt allocated "cc->iv_size" bytes beyond the end of dm_crypt_request
+structure. However, when dm-crypt accesses the initialization vector, it
+takes a pointer to the end of dm_crypt_request, aligns it, and then uses
+it as the initialization vector. If the end of dm_crypt_request is not
+aligned on a crypto_ablkcipher_alignmask(any_tfm(cc)) boundary the
+alignment causes the initialization vector to point beyond the allocated
+space.
+
+Fix this bug by calculating the variable iv_size_padding and adding it
+to the allocated size.
+
+Also correct the alignment of dm_crypt_request. struct dm_crypt_request
+is specific to dm-crypt (it isn't used by the crypto subsystem at all),
+so it is aligned on __alignof__(struct dm_crypt_request).
+
+Also align per_bio_data_size on ARCH_KMALLOC_MINALIGN, so that it is
+aligned as if the block was allocated with kmalloc.
+
+Reported-by: Krzysztof Kolasa <kkolasa@winsoft.pl>
+Tested-by: Milan Broz <gmazyland@gmail.com>
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-crypt.c | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1681,6 +1681,7 @@ static int crypt_ctr(struct dm_target *t
+ unsigned int key_size, opt_params;
+ unsigned long long tmpll;
+ int ret;
++ size_t iv_size_padding;
+ struct dm_arg_set as;
+ const char *opt_string;
+ char dummy;
+@@ -1717,12 +1718,23 @@ static int crypt_ctr(struct dm_target *t
+
+ cc->dmreq_start = sizeof(struct ablkcipher_request);
+ cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
+- cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
+- cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
+- ~(crypto_tfm_ctx_alignment() - 1);
++ cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
++
++ if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
++ /* Allocate the padding exactly */
++ iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
++ & crypto_ablkcipher_alignmask(any_tfm(cc));
++ } else {
++ /*
++ * If the cipher requires greater alignment than kmalloc
++ * alignment, we don't know the exact position of the
++ * initialization vector. We must assume worst case.
++ */
++ iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
++ }
+
+ cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
+- sizeof(struct dm_crypt_request) + cc->iv_size);
++ sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
+ if (!cc->req_pool) {
+ ti->error = "Cannot allocate crypt request mempool";
+ goto bad;
--- /dev/null
+From c680e41b3a2e944185c74bf60531e3d316d3ecc4 Mon Sep 17 00:00:00 2001
+From: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+Date: Tue, 9 Sep 2014 14:50:51 -0700
+Subject: eventpoll: fix uninitialized variable in epoll_ctl
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+
+commit c680e41b3a2e944185c74bf60531e3d316d3ecc4 upstream.
+
+When calling epoll_ctl with operation EPOLL_CTL_DEL, structure epds is
+not initialized but ep_take_care_of_epollwakeup reads its event field.
+When this unintialized field has EPOLLWAKEUP bit set, a capability check
+is done for CAP_BLOCK_SUSPEND in ep_take_care_of_epollwakeup. This
+produces unexpected messages in the audit log, such as (on a system
+running SELinux):
+
+ type=AVC msg=audit(1408212798.866:410): avc: denied
+ { block_suspend } for pid=7754 comm="dbus-daemon" capability=36
+ scontext=unconfined_u:unconfined_r:unconfined_t
+ tcontext=unconfined_u:unconfined_r:unconfined_t
+ tclass=capability2 permissive=1
+
+ type=SYSCALL msg=audit(1408212798.866:410): arch=c000003e syscall=233
+ success=yes exit=0 a0=3 a1=2 a2=9 a3=7fffd4d66ec0 items=0 ppid=1
+ pid=7754 auid=1000 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0
+ fsgid=0 tty=(none) ses=3 comm="dbus-daemon"
+ exe="/usr/bin/dbus-daemon"
+ subj=unconfined_u:unconfined_r:unconfined_t key=(null)
+
+("arch=c000003e syscall=233 a1=2" means "epoll_ctl(op=EPOLL_CTL_DEL)")
+
+Remove use of epds in epoll_ctl when op == EPOLL_CTL_DEL.
+
+Fixes: 4d7e30d98939 ("epoll: Add a flag, EPOLLWAKEUP, to prevent suspend while epoll events are ready")
+Signed-off-by: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: Arve Hjønnevåg <arve@android.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/eventpoll.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1852,7 +1852,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, in
+ goto error_tgt_fput;
+
+ /* Check if EPOLLWAKEUP is allowed */
+- ep_take_care_of_epollwakeup(&epds);
++ if (ep_op_has_event(op))
++ ep_take_care_of_epollwakeup(&epds);
+
+ /*
+ * We have to check that the file structure underneath the file descriptor
--- /dev/null
+From 6ff66ac77aeaa9c13db28784e1c50c027a1f487b Mon Sep 17 00:00:00 2001
+From: Fabian Frederick <fabf@skynet.be>
+Date: Thu, 25 Sep 2014 16:05:27 -0700
+Subject: fs/cachefiles: add missing \n to kerror conversions
+
+From: Fabian Frederick <fabf@skynet.be>
+
+commit 6ff66ac77aeaa9c13db28784e1c50c027a1f487b upstream.
+
+Commit 0227d6abb378 ("fs/cachefiles: replace kerror by pr_err") didn't
+include newline featuring in original kerror definition
+
+Signed-off-by: Fabian Frederick <fabf@skynet.be>
+Reported-by: David Howells <dhowells@redhat.com>
+Acked-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cachefiles/bind.c | 8 ++++----
+ fs/cachefiles/daemon.c | 30 +++++++++++++++---------------
+ fs/cachefiles/internal.h | 2 +-
+ fs/cachefiles/main.c | 2 +-
+ fs/cachefiles/namei.c | 14 +++++++-------
+ fs/cachefiles/xattr.c | 10 +++++-----
+ 6 files changed, 33 insertions(+), 33 deletions(-)
+
+--- a/fs/cachefiles/bind.c
++++ b/fs/cachefiles/bind.c
+@@ -50,18 +50,18 @@ int cachefiles_daemon_bind(struct cachef
+ cache->brun_percent < 100);
+
+ if (*args) {
+- pr_err("'bind' command doesn't take an argument");
++ pr_err("'bind' command doesn't take an argument\n");
+ return -EINVAL;
+ }
+
+ if (!cache->rootdirname) {
+- pr_err("No cache directory specified");
++ pr_err("No cache directory specified\n");
+ return -EINVAL;
+ }
+
+ /* don't permit already bound caches to be re-bound */
+ if (test_bit(CACHEFILES_READY, &cache->flags)) {
+- pr_err("Cache already bound");
++ pr_err("Cache already bound\n");
+ return -EBUSY;
+ }
+
+@@ -248,7 +248,7 @@ error_open_root:
+ kmem_cache_free(cachefiles_object_jar, fsdef);
+ error_root_object:
+ cachefiles_end_secure(cache, saved_cred);
+- pr_err("Failed to register: %d", ret);
++ pr_err("Failed to register: %d\n", ret);
+ return ret;
+ }
+
+--- a/fs/cachefiles/daemon.c
++++ b/fs/cachefiles/daemon.c
+@@ -315,7 +315,7 @@ static unsigned int cachefiles_daemon_po
+ static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
+ char *args)
+ {
+- pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%");
++ pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
+
+ return -EINVAL;
+ }
+@@ -475,12 +475,12 @@ static int cachefiles_daemon_dir(struct
+ _enter(",%s", args);
+
+ if (!*args) {
+- pr_err("Empty directory specified");
++ pr_err("Empty directory specified\n");
+ return -EINVAL;
+ }
+
+ if (cache->rootdirname) {
+- pr_err("Second cache directory specified");
++ pr_err("Second cache directory specified\n");
+ return -EEXIST;
+ }
+
+@@ -503,12 +503,12 @@ static int cachefiles_daemon_secctx(stru
+ _enter(",%s", args);
+
+ if (!*args) {
+- pr_err("Empty security context specified");
++ pr_err("Empty security context specified\n");
+ return -EINVAL;
+ }
+
+ if (cache->secctx) {
+- pr_err("Second security context specified");
++ pr_err("Second security context specified\n");
+ return -EINVAL;
+ }
+
+@@ -531,7 +531,7 @@ static int cachefiles_daemon_tag(struct
+ _enter(",%s", args);
+
+ if (!*args) {
+- pr_err("Empty tag specified");
++ pr_err("Empty tag specified\n");
+ return -EINVAL;
+ }
+
+@@ -562,12 +562,12 @@ static int cachefiles_daemon_cull(struct
+ goto inval;
+
+ if (!test_bit(CACHEFILES_READY, &cache->flags)) {
+- pr_err("cull applied to unready cache");
++ pr_err("cull applied to unready cache\n");
+ return -EIO;
+ }
+
+ if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
+- pr_err("cull applied to dead cache");
++ pr_err("cull applied to dead cache\n");
+ return -EIO;
+ }
+
+@@ -587,11 +587,11 @@ static int cachefiles_daemon_cull(struct
+
+ notdir:
+ path_put(&path);
+- pr_err("cull command requires dirfd to be a directory");
++ pr_err("cull command requires dirfd to be a directory\n");
+ return -ENOTDIR;
+
+ inval:
+- pr_err("cull command requires dirfd and filename");
++ pr_err("cull command requires dirfd and filename\n");
+ return -EINVAL;
+ }
+
+@@ -614,7 +614,7 @@ static int cachefiles_daemon_debug(struc
+ return 0;
+
+ inval:
+- pr_err("debug command requires mask");
++ pr_err("debug command requires mask\n");
+ return -EINVAL;
+ }
+
+@@ -634,12 +634,12 @@ static int cachefiles_daemon_inuse(struc
+ goto inval;
+
+ if (!test_bit(CACHEFILES_READY, &cache->flags)) {
+- pr_err("inuse applied to unready cache");
++ pr_err("inuse applied to unready cache\n");
+ return -EIO;
+ }
+
+ if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
+- pr_err("inuse applied to dead cache");
++ pr_err("inuse applied to dead cache\n");
+ return -EIO;
+ }
+
+@@ -659,11 +659,11 @@ static int cachefiles_daemon_inuse(struc
+
+ notdir:
+ path_put(&path);
+- pr_err("inuse command requires dirfd to be a directory");
++ pr_err("inuse command requires dirfd to be a directory\n");
+ return -ENOTDIR;
+
+ inval:
+- pr_err("inuse command requires dirfd and filename");
++ pr_err("inuse command requires dirfd and filename\n");
+ return -EINVAL;
+ }
+
+--- a/fs/cachefiles/internal.h
++++ b/fs/cachefiles/internal.h
+@@ -255,7 +255,7 @@ extern int cachefiles_remove_object_xatt
+
+ #define cachefiles_io_error(___cache, FMT, ...) \
+ do { \
+- pr_err("I/O Error: " FMT, ##__VA_ARGS__); \
++ pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \
+ fscache_io_error(&(___cache)->cache); \
+ set_bit(CACHEFILES_DEAD, &(___cache)->flags); \
+ } while (0)
+--- a/fs/cachefiles/main.c
++++ b/fs/cachefiles/main.c
+@@ -84,7 +84,7 @@ error_proc:
+ error_object_jar:
+ misc_deregister(&cachefiles_dev);
+ error_dev:
+- pr_err("failed to register: %d", ret);
++ pr_err("failed to register: %d\n", ret);
+ return ret;
+ }
+
+--- a/fs/cachefiles/namei.c
++++ b/fs/cachefiles/namei.c
+@@ -543,7 +543,7 @@ lookup_again:
+ next, next->d_inode, next->d_inode->i_ino);
+
+ } else if (!S_ISDIR(next->d_inode->i_mode)) {
+- pr_err("inode %lu is not a directory",
++ pr_err("inode %lu is not a directory\n",
+ next->d_inode->i_ino);
+ ret = -ENOBUFS;
+ goto error;
+@@ -574,7 +574,7 @@ lookup_again:
+ } else if (!S_ISDIR(next->d_inode->i_mode) &&
+ !S_ISREG(next->d_inode->i_mode)
+ ) {
+- pr_err("inode %lu is not a file or directory",
++ pr_err("inode %lu is not a file or directory\n",
+ next->d_inode->i_ino);
+ ret = -ENOBUFS;
+ goto error;
+@@ -768,7 +768,7 @@ struct dentry *cachefiles_get_directory(
+ ASSERT(subdir->d_inode);
+
+ if (!S_ISDIR(subdir->d_inode->i_mode)) {
+- pr_err("%s is not a directory", dirname);
++ pr_err("%s is not a directory\n", dirname);
+ ret = -EIO;
+ goto check_error;
+ }
+@@ -795,13 +795,13 @@ check_error:
+ mkdir_error:
+ mutex_unlock(&dir->d_inode->i_mutex);
+ dput(subdir);
+- pr_err("mkdir %s failed with error %d", dirname, ret);
++ pr_err("mkdir %s failed with error %d\n", dirname, ret);
+ return ERR_PTR(ret);
+
+ lookup_error:
+ mutex_unlock(&dir->d_inode->i_mutex);
+ ret = PTR_ERR(subdir);
+- pr_err("Lookup %s failed with error %d", dirname, ret);
++ pr_err("Lookup %s failed with error %d\n", dirname, ret);
+ return ERR_PTR(ret);
+
+ nomem_d_alloc:
+@@ -891,7 +891,7 @@ lookup_error:
+ if (ret == -EIO) {
+ cachefiles_io_error(cache, "Lookup failed");
+ } else if (ret != -ENOMEM) {
+- pr_err("Internal error: %d", ret);
++ pr_err("Internal error: %d\n", ret);
+ ret = -EIO;
+ }
+
+@@ -950,7 +950,7 @@ error:
+ }
+
+ if (ret != -ENOMEM) {
+- pr_err("Internal error: %d", ret);
++ pr_err("Internal error: %d\n", ret);
+ ret = -EIO;
+ }
+
+--- a/fs/cachefiles/xattr.c
++++ b/fs/cachefiles/xattr.c
+@@ -51,7 +51,7 @@ int cachefiles_check_object_type(struct
+ }
+
+ if (ret != -EEXIST) {
+- pr_err("Can't set xattr on %*.*s [%lu] (err %d)",
++ pr_err("Can't set xattr on %*.*s [%lu] (err %d)\n",
+ dentry->d_name.len, dentry->d_name.len,
+ dentry->d_name.name, dentry->d_inode->i_ino,
+ -ret);
+@@ -64,7 +64,7 @@ int cachefiles_check_object_type(struct
+ if (ret == -ERANGE)
+ goto bad_type_length;
+
+- pr_err("Can't read xattr on %*.*s [%lu] (err %d)",
++ pr_err("Can't read xattr on %*.*s [%lu] (err %d)\n",
+ dentry->d_name.len, dentry->d_name.len,
+ dentry->d_name.name, dentry->d_inode->i_ino,
+ -ret);
+@@ -85,14 +85,14 @@ error:
+ return ret;
+
+ bad_type_length:
+- pr_err("Cache object %lu type xattr length incorrect",
++ pr_err("Cache object %lu type xattr length incorrect\n",
+ dentry->d_inode->i_ino);
+ ret = -EIO;
+ goto error;
+
+ bad_type:
+ xtype[2] = 0;
+- pr_err("Cache object %*.*s [%lu] type %s not %s",
++ pr_err("Cache object %*.*s [%lu] type %s not %s\n",
+ dentry->d_name.len, dentry->d_name.len,
+ dentry->d_name.name, dentry->d_inode->i_ino,
+ xtype, type);
+@@ -293,7 +293,7 @@ error:
+ return ret;
+
+ bad_type_length:
+- pr_err("Cache object %lu xattr length incorrect",
++ pr_err("Cache object %lu xattr length incorrect\n",
+ dentry->d_inode->i_ino);
+ ret = -EIO;
+ goto error;
--- /dev/null
+From 7e8824816bda16bb11ff5ff1e1212d642e57b0b3 Mon Sep 17 00:00:00 2001
+From: Andrey Vagin <avagin@openvz.org>
+Date: Tue, 9 Sep 2014 14:51:06 -0700
+Subject: fs/notify: don't show f_handle if exportfs_encode_inode_fh failed
+
+From: Andrey Vagin <avagin@openvz.org>
+
+commit 7e8824816bda16bb11ff5ff1e1212d642e57b0b3 upstream.
+
+Currently we handle only ENOSPC. In case of other errors the file_handle
+variable isn't filled properly and we will show a part of stack.
+
+Signed-off-by: Andrey Vagin <avagin@openvz.org>
+Acked-by: Cyrill Gorcunov <gorcunov@openvz.org>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/notify/fdinfo.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/notify/fdinfo.c
++++ b/fs/notify/fdinfo.c
+@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_
+ size = f.handle.handle_bytes >> 2;
+
+ ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
+- if ((ret == FILEID_INVALID) || (ret == -ENOSPC)) {
++ if ((ret == FILEID_INVALID) || (ret < 0)) {
+ WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
+ return 0;
+ }
--- /dev/null
+From 1fc98d11cac6dd66342e5580cb2687e5b1e9a613 Mon Sep 17 00:00:00 2001
+From: Andrey Vagin <avagin@openvz.org>
+Date: Tue, 9 Sep 2014 14:51:04 -0700
+Subject: fsnotify/fdinfo: use named constants instead of hardcoded values
+
+From: Andrey Vagin <avagin@openvz.org>
+
+commit 1fc98d11cac6dd66342e5580cb2687e5b1e9a613 upstream.
+
+MAX_HANDLE_SZ is equal to 128, but currently the size of pad is only 64
+bytes, so exportfs_encode_inode_fh can return an error.
+
+Signed-off-by: Andrey Vagin <avagin@openvz.org>
+Acked-by: Cyrill Gorcunov <gorcunov@openvz.org>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/notify/fdinfo.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/notify/fdinfo.c
++++ b/fs/notify/fdinfo.c
+@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_
+ {
+ struct {
+ struct file_handle handle;
+- u8 pad[64];
++ u8 pad[MAX_HANDLE_SZ];
+ } f;
+ int size, ret, i;
+
+@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_
+ size = f.handle.handle_bytes >> 2;
+
+ ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
+- if ((ret == 255) || (ret == -ENOSPC)) {
++ if ((ret == FILEID_INVALID) || (ret == -ENOSPC)) {
+ WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
+ return 0;
+ }
--- /dev/null
+From 33b7f99cf003ca6c1d31c42b50e1100ad71aaec0 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Fri, 15 Aug 2014 17:23:02 -0400
+Subject: ftrace: Allow ftrace_ops to use the hashes from other ops
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 33b7f99cf003ca6c1d31c42b50e1100ad71aaec0 upstream.
+
+Currently the top level debug file system function tracer shares its
+ftrace_ops with the function graph tracer. This was thought to be fine
+because the tracers are not used together, as one can only enable
+function or function_graph tracer in the current_tracer file.
+
+But that assumption proved to be incorrect. The function profiler
+can use the function graph tracer when function tracing is enabled.
+Since all function graph users uses the function tracing ftrace_ops
+this causes a conflict and when a user enables both function profiling
+as well as the function tracer it will crash ftrace and disable it.
+
+The quick solution so far is to move them as separate ftrace_ops like
+it was earlier. The problem though is to synchronize the functions that
+are traced because both function and function_graph tracer are limited
+by the selections made in the set_ftrace_filter and set_ftrace_notrace
+files.
+
+To handle this, a new structure is made called ftrace_ops_hash. This
+structure will now hold the filter_hash and notrace_hash, and the
+ftrace_ops will point to this structure. That will allow two ftrace_ops
+to share the same hashes.
+
+Since most ftrace_ops do not share the hashes, and to keep allocation
+simple, the ftrace_ops structure will include both a pointer to the
+ftrace_ops_hash called func_hash, as well as the structure itself,
+called local_hash. When the ops are registered, the func_hash pointer
+will be initialized to point to the local_hash within the ftrace_ops
+structure. Some of the ftrace internal ftrace_ops will be initialized
+statically. This will allow for the function and function_graph tracer
+to have separate ops but still share the same hash tables that determine
+what functions they trace.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ include/linux/ftrace.h | 13 +++++-
+ kernel/trace/ftrace.c | 98 +++++++++++++++++++++++++------------------------
+ 2 files changed, 62 insertions(+), 49 deletions(-)
+
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -103,6 +103,15 @@ enum {
+ FTRACE_OPS_FL_DELETED = 1 << 8,
+ };
+
++#ifdef CONFIG_DYNAMIC_FTRACE
++/* The hash used to know what functions callbacks trace */
++struct ftrace_ops_hash {
++ struct ftrace_hash *notrace_hash;
++ struct ftrace_hash *filter_hash;
++ struct mutex regex_lock;
++};
++#endif
++
+ /*
+ * Note, ftrace_ops can be referenced outside of RCU protection.
+ * (Although, for perf, the control ops prevent that). If ftrace_ops is
+@@ -121,8 +130,8 @@ struct ftrace_ops {
+ int __percpu *disabled;
+ void *private;
+ #ifdef CONFIG_DYNAMIC_FTRACE
+- struct ftrace_hash *notrace_hash;
+- struct ftrace_hash *filter_hash;
++ struct ftrace_ops_hash local_hash;
++ struct ftrace_ops_hash *func_hash;
+ struct mutex regex_lock;
+ #endif
+ };
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -65,15 +65,17 @@
+ #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
+
+ #ifdef CONFIG_DYNAMIC_FTRACE
+-#define INIT_REGEX_LOCK(opsname) \
+- .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock),
++#define INIT_OPS_HASH(opsname) \
++ .func_hash = &opsname.local_hash, \
++ .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
+ #else
+-#define INIT_REGEX_LOCK(opsname)
++#define INIT_OPS_HASH(opsname)
+ #endif
+
+ static struct ftrace_ops ftrace_list_end __read_mostly = {
+ .func = ftrace_stub,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
++ INIT_OPS_HASH(ftrace_list_end)
+ };
+
+ /* ftrace_enabled is a method to turn ftrace on or off */
+@@ -143,7 +145,8 @@ static inline void ftrace_ops_init(struc
+ {
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
+- mutex_init(&ops->regex_lock);
++ mutex_init(&ops->local_hash.regex_lock);
++ ops->func_hash = &ops->local_hash;
+ ops->flags |= FTRACE_OPS_FL_INITIALIZED;
+ }
+ #endif
+@@ -902,7 +905,7 @@ static void unregister_ftrace_profiler(v
+ static struct ftrace_ops ftrace_profile_ops __read_mostly = {
+ .func = function_profile_call,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+- INIT_REGEX_LOCK(ftrace_profile_ops)
++ INIT_OPS_HASH(ftrace_profile_ops)
+ };
+
+ static int register_ftrace_profiler(void)
+@@ -1082,11 +1085,12 @@ static const struct ftrace_hash empty_ha
+ #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
+
+ static struct ftrace_ops global_ops = {
+- .func = ftrace_stub,
+- .notrace_hash = EMPTY_HASH,
+- .filter_hash = EMPTY_HASH,
+- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+- INIT_REGEX_LOCK(global_ops)
++ .func = ftrace_stub,
++ .local_hash.notrace_hash = EMPTY_HASH,
++ .local_hash.filter_hash = EMPTY_HASH,
++ INIT_OPS_HASH(global_ops)
++ .flags = FTRACE_OPS_FL_RECURSION_SAFE |
++ FTRACE_OPS_FL_INITIALIZED,
+ };
+
+ struct ftrace_page {
+@@ -1227,8 +1231,8 @@ static void free_ftrace_hash_rcu(struct
+ void ftrace_free_filter(struct ftrace_ops *ops)
+ {
+ ftrace_ops_init(ops);
+- free_ftrace_hash(ops->filter_hash);
+- free_ftrace_hash(ops->notrace_hash);
++ free_ftrace_hash(ops->func_hash->filter_hash);
++ free_ftrace_hash(ops->func_hash->notrace_hash);
+ }
+
+ static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
+@@ -1394,8 +1398,8 @@ ftrace_ops_test(struct ftrace_ops *ops,
+ return 0;
+ #endif
+
+- filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
+- notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
++ filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
++ notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
+
+ if ((ftrace_hash_empty(filter_hash) ||
+ ftrace_lookup_ip(filter_hash, ip)) &&
+@@ -1519,14 +1523,14 @@ static void __ftrace_hash_rec_update(str
+ * gets inversed.
+ */
+ if (filter_hash) {
+- hash = ops->filter_hash;
+- other_hash = ops->notrace_hash;
++ hash = ops->func_hash->filter_hash;
++ other_hash = ops->func_hash->notrace_hash;
+ if (ftrace_hash_empty(hash))
+ all = 1;
+ } else {
+ inc = !inc;
+- hash = ops->notrace_hash;
+- other_hash = ops->filter_hash;
++ hash = ops->func_hash->notrace_hash;
++ other_hash = ops->func_hash->filter_hash;
+ /*
+ * If the notrace hash has no items,
+ * then there's nothing to do.
+@@ -2196,8 +2200,8 @@ static inline int ops_traces_mod(struct
+ * Filter_hash being empty will default to trace module.
+ * But notrace hash requires a test of individual module functions.
+ */
+- return ftrace_hash_empty(ops->filter_hash) &&
+- ftrace_hash_empty(ops->notrace_hash);
++ return ftrace_hash_empty(ops->func_hash->filter_hash) &&
++ ftrace_hash_empty(ops->func_hash->notrace_hash);
+ }
+
+ /*
+@@ -2219,12 +2223,12 @@ ops_references_rec(struct ftrace_ops *op
+ return 0;
+
+ /* The function must be in the filter */
+- if (!ftrace_hash_empty(ops->filter_hash) &&
+- !ftrace_lookup_ip(ops->filter_hash, rec->ip))
++ if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
++ !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
+ return 0;
+
+ /* If in notrace hash, we ignore it too */
+- if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
++ if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
+ return 0;
+
+ return 1;
+@@ -2544,10 +2548,10 @@ t_next(struct seq_file *m, void *v, loff
+ } else {
+ rec = &iter->pg->records[iter->idx++];
+ if (((iter->flags & FTRACE_ITER_FILTER) &&
+- !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
++ !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
+
+ ((iter->flags & FTRACE_ITER_NOTRACE) &&
+- !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
++ !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
+
+ ((iter->flags & FTRACE_ITER_ENABLED) &&
+ !(rec->flags & FTRACE_FL_ENABLED))) {
+@@ -2596,7 +2600,7 @@ static void *t_start(struct seq_file *m,
+ * functions are enabled.
+ */
+ if (iter->flags & FTRACE_ITER_FILTER &&
+- ftrace_hash_empty(ops->filter_hash)) {
++ ftrace_hash_empty(ops->func_hash->filter_hash)) {
+ if (*pos > 0)
+ return t_hash_start(m, pos);
+ iter->flags |= FTRACE_ITER_PRINTALL;
+@@ -2750,12 +2754,12 @@ ftrace_regex_open(struct ftrace_ops *ops
+ iter->ops = ops;
+ iter->flags = flag;
+
+- mutex_lock(&ops->regex_lock);
++ mutex_lock(&ops->func_hash->regex_lock);
+
+ if (flag & FTRACE_ITER_NOTRACE)
+- hash = ops->notrace_hash;
++ hash = ops->func_hash->notrace_hash;
+ else
+- hash = ops->filter_hash;
++ hash = ops->func_hash->filter_hash;
+
+ if (file->f_mode & FMODE_WRITE) {
+ iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
+@@ -2788,7 +2792,7 @@ ftrace_regex_open(struct ftrace_ops *ops
+ file->private_data = iter;
+
+ out_unlock:
+- mutex_unlock(&ops->regex_lock);
++ mutex_unlock(&ops->func_hash->regex_lock);
+
+ return ret;
+ }
+@@ -3026,7 +3030,7 @@ static struct ftrace_ops trace_probe_ops
+ {
+ .func = function_trace_probe_call,
+ .flags = FTRACE_OPS_FL_INITIALIZED,
+- INIT_REGEX_LOCK(trace_probe_ops)
++ INIT_OPS_HASH(trace_probe_ops)
+ };
+
+ static int ftrace_probe_registered;
+@@ -3089,7 +3093,7 @@ register_ftrace_function_probe(char *glo
+ void *data)
+ {
+ struct ftrace_func_probe *entry;
+- struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
++ struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
+ struct ftrace_hash *hash;
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+@@ -3106,7 +3110,7 @@ register_ftrace_function_probe(char *glo
+ if (WARN_ON(not))
+ return -EINVAL;
+
+- mutex_lock(&trace_probe_ops.regex_lock);
++ mutex_lock(&trace_probe_ops.func_hash->regex_lock);
+
+ hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+ if (!hash) {
+@@ -3175,7 +3179,7 @@ register_ftrace_function_probe(char *glo
+ out_unlock:
+ mutex_unlock(&ftrace_lock);
+ out:
+- mutex_unlock(&trace_probe_ops.regex_lock);
++ mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
+ free_ftrace_hash(hash);
+
+ return count;
+@@ -3193,7 +3197,7 @@ __unregister_ftrace_function_probe(char
+ struct ftrace_func_entry *rec_entry;
+ struct ftrace_func_probe *entry;
+ struct ftrace_func_probe *p;
+- struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
++ struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
+ struct list_head free_list;
+ struct ftrace_hash *hash;
+ struct hlist_node *tmp;
+@@ -3215,7 +3219,7 @@ __unregister_ftrace_function_probe(char
+ return;
+ }
+
+- mutex_lock(&trace_probe_ops.regex_lock);
++ mutex_lock(&trace_probe_ops.func_hash->regex_lock);
+
+ hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+ if (!hash)
+@@ -3268,7 +3272,7 @@ __unregister_ftrace_function_probe(char
+ mutex_unlock(&ftrace_lock);
+
+ out_unlock:
+- mutex_unlock(&trace_probe_ops.regex_lock);
++ mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
+ free_ftrace_hash(hash);
+ }
+
+@@ -3464,12 +3468,12 @@ ftrace_set_hash(struct ftrace_ops *ops,
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
+- mutex_lock(&ops->regex_lock);
++ mutex_lock(&ops->func_hash->regex_lock);
+
+ if (enable)
+- orig_hash = &ops->filter_hash;
++ orig_hash = &ops->func_hash->filter_hash;
+ else
+- orig_hash = &ops->notrace_hash;
++ orig_hash = &ops->func_hash->notrace_hash;
+
+ hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+ if (!hash) {
+@@ -3497,7 +3501,7 @@ ftrace_set_hash(struct ftrace_ops *ops,
+ mutex_unlock(&ftrace_lock);
+
+ out_regex_unlock:
+- mutex_unlock(&ops->regex_lock);
++ mutex_unlock(&ops->func_hash->regex_lock);
+
+ free_ftrace_hash(hash);
+ return ret;
+@@ -3704,15 +3708,15 @@ int ftrace_regex_release(struct inode *i
+
+ trace_parser_put(parser);
+
+- mutex_lock(&iter->ops->regex_lock);
++ mutex_lock(&iter->ops->func_hash->regex_lock);
+
+ if (file->f_mode & FMODE_WRITE) {
+ filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
+
+ if (filter_hash)
+- orig_hash = &iter->ops->filter_hash;
++ orig_hash = &iter->ops->func_hash->filter_hash;
+ else
+- orig_hash = &iter->ops->notrace_hash;
++ orig_hash = &iter->ops->func_hash->notrace_hash;
+
+ mutex_lock(&ftrace_lock);
+ ret = ftrace_hash_move(iter->ops, filter_hash,
+@@ -3723,7 +3727,7 @@ int ftrace_regex_release(struct inode *i
+ mutex_unlock(&ftrace_lock);
+ }
+
+- mutex_unlock(&iter->ops->regex_lock);
++ mutex_unlock(&iter->ops->func_hash->regex_lock);
+ free_ftrace_hash(iter->hash);
+ kfree(iter);
+
+@@ -4335,7 +4339,7 @@ void __init ftrace_init(void)
+ static struct ftrace_ops global_ops = {
+ .func = ftrace_stub,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+- INIT_REGEX_LOCK(global_ops)
++ INIT_OPS_HASH(global_ops)
+ };
+
+ static int __init ftrace_nodyn_init(void)
+@@ -4437,7 +4441,7 @@ ftrace_ops_control_func(unsigned long ip
+ static struct ftrace_ops control_ops = {
+ .func = ftrace_ops_control_func,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+- INIT_REGEX_LOCK(control_ops)
++ INIT_OPS_HASH(control_ops)
+ };
+
+ static inline void
--- /dev/null
+From 5f151b240192a1557119d5375af71efc26825bc8 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Fri, 15 Aug 2014 17:18:46 -0400
+Subject: ftrace: Fix function_profiler and function tracer together
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 5f151b240192a1557119d5375af71efc26825bc8 upstream.
+
+The latest rewrite of ftrace removed the separate ftrace_ops of
+the function tracer and the function graph tracer and had them
+share the same ftrace_ops. This simplified the accounting by removing
+the multiple layers of functions called, where the global_ops func
+would call a special list that would iterate over the other ops that
+were registered within it (like function and function graph), which
+itself was registered to the ftrace ops list of all functions
+currently active. If that sounds confusing, the code that implemented
+it was also confusing and its removal is a good thing.
+
+The problem with this change was that it assumed that the function
+and function graph tracer can never be used at the same time.
+This is mostly true, but there is an exception. That is when the
+function profiler uses the function graph tracer to profile.
+The function profiler can be activated the same time as the function
+tracer, and this breaks the assumption and the result is that ftrace
+will crash (it detects the error and shuts itself down, it does not
+cause a kernel oops).
+
+To solve this issue, a previous change allowed the hash tables
+for the functions traced by a ftrace_ops to be a pointer and let
+multiple ftrace_ops share the same hash. This allows the function
+and function_graph tracer to have separate ftrace_ops, but still
+share the hash, which is what is done.
+
+Now the function and function graph tracers have separate ftrace_ops
+again, and the function tracer can be run while the function_profile
+is active.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ kernel/trace/ftrace.c | 49 ++++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 36 insertions(+), 13 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -68,8 +68,12 @@
+ #define INIT_OPS_HASH(opsname) \
+ .func_hash = &opsname.local_hash, \
+ .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
++#define ASSIGN_OPS_HASH(opsname, val) \
++ .func_hash = val, \
++ .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
+ #else
+ #define INIT_OPS_HASH(opsname)
++#define ASSIGN_OPS_HASH(opsname, val)
+ #endif
+
+ static struct ftrace_ops ftrace_list_end __read_mostly = {
+@@ -110,6 +114,7 @@ static struct ftrace_ops *ftrace_ops_lis
+ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
+ ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
+ static struct ftrace_ops global_ops;
++static struct ftrace_ops graph_ops;
+ static struct ftrace_ops control_ops;
+
+ #if ARCH_SUPPORTS_FTRACE_OPS
+@@ -4339,7 +4344,6 @@ void __init ftrace_init(void)
+ static struct ftrace_ops global_ops = {
+ .func = ftrace_stub,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+- INIT_OPS_HASH(global_ops)
+ };
+
+ static int __init ftrace_nodyn_init(void)
+@@ -4877,6 +4881,14 @@ ftrace_enable_sysctl(struct ctl_table *t
+
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
++static struct ftrace_ops graph_ops = {
++ .func = ftrace_stub,
++ .flags = FTRACE_OPS_FL_RECURSION_SAFE |
++ FTRACE_OPS_FL_INITIALIZED |
++ FTRACE_OPS_FL_STUB,
++ ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
++};
++
+ static int ftrace_graph_active;
+
+ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
+@@ -5039,12 +5051,28 @@ static int ftrace_graph_entry_test(struc
+ */
+ static void update_function_graph_func(void)
+ {
+- if (ftrace_ops_list == &ftrace_list_end ||
+- (ftrace_ops_list == &global_ops &&
+- global_ops.next == &ftrace_list_end))
+- ftrace_graph_entry = __ftrace_graph_entry;
+- else
++ struct ftrace_ops *op;
++ bool do_test = false;
++
++ /*
++ * The graph and global ops share the same set of functions
++ * to test. If any other ops is on the list, then
++ * the graph tracing needs to test if its the function
++ * it should call.
++ */
++ do_for_each_ftrace_op(op, ftrace_ops_list) {
++ if (op != &global_ops && op != &graph_ops &&
++ op != &ftrace_list_end) {
++ do_test = true;
++ /* in double loop, break out with goto */
++ goto out;
++ }
++ } while_for_each_ftrace_op(op);
++ out:
++ if (do_test)
+ ftrace_graph_entry = ftrace_graph_entry_test;
++ else
++ ftrace_graph_entry = __ftrace_graph_entry;
+ }
+
+ static struct notifier_block ftrace_suspend_notifier = {
+@@ -5085,11 +5113,7 @@ int register_ftrace_graph(trace_func_gra
+ ftrace_graph_entry = ftrace_graph_entry_test;
+ update_function_graph_func();
+
+- /* Function graph doesn't use the .func field of global_ops */
+- global_ops.flags |= FTRACE_OPS_FL_STUB;
+-
+- ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
+-
++ ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
+ out:
+ mutex_unlock(&ftrace_lock);
+ return ret;
+@@ -5106,8 +5130,7 @@ void unregister_ftrace_graph(void)
+ ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
+ ftrace_graph_entry = ftrace_graph_entry_stub;
+ __ftrace_graph_entry = ftrace_graph_entry_stub;
+- ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
+- global_ops.flags &= ~FTRACE_OPS_FL_STUB;
++ ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
+ unregister_pm_notifier(&ftrace_suspend_notifier);
+ unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
+
--- /dev/null
+From 84261912ebee41269004e8a9f3614ba38ef6b206 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Mon, 18 Aug 2014 13:21:08 -0400
+Subject: ftrace: Update all ftrace_ops for a ftrace_hash_ops update
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 84261912ebee41269004e8a9f3614ba38ef6b206 upstream.
+
+When updating what an ftrace_ops traces, if it is registered (that is,
+actively tracing), and that ftrace_ops uses the shared global_ops
+local_hash, then we need to update all tracers that are active and
+also share the global_ops' ftrace_hash_ops.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ kernel/trace/ftrace.c | 43 +++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 39 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1298,9 +1298,9 @@ alloc_and_copy_ftrace_hash(int size_bits
+ }
+
+ static void
+-ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
++ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
+ static void
+-ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
++ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
+
+ static int
+ ftrace_hash_move(struct ftrace_ops *ops, int enable,
+@@ -1320,7 +1320,7 @@ ftrace_hash_move(struct ftrace_ops *ops,
+ * Remove the current set, update the hash and add
+ * them back.
+ */
+- ftrace_hash_rec_disable(ops, enable);
++ ftrace_hash_rec_disable_modify(ops, enable);
+
+ /*
+ * If the new source is empty, just free dst and assign it
+@@ -1369,7 +1369,7 @@ ftrace_hash_move(struct ftrace_ops *ops,
+ * On success, we enable the new hash.
+ * On failure, we re-enable the original hash.
+ */
+- ftrace_hash_rec_enable(ops, enable);
++ ftrace_hash_rec_enable_modify(ops, enable);
+
+ return ret;
+ }
+@@ -1613,6 +1613,41 @@ static void ftrace_hash_rec_enable(struc
+ __ftrace_hash_rec_update(ops, filter_hash, 1);
+ }
+
++static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
++ int filter_hash, int inc)
++{
++ struct ftrace_ops *op;
++
++ __ftrace_hash_rec_update(ops, filter_hash, inc);
++
++ if (ops->func_hash != &global_ops.local_hash)
++ return;
++
++ /*
++ * If the ops shares the global_ops hash, then we need to update
++ * all ops that are enabled and use this hash.
++ */
++ do_for_each_ftrace_op(op, ftrace_ops_list) {
++ /* Already done */
++ if (op == ops)
++ continue;
++ if (op->func_hash == &global_ops.local_hash)
++ __ftrace_hash_rec_update(op, filter_hash, inc);
++ } while_for_each_ftrace_op(op);
++}
++
++static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
++ int filter_hash)
++{
++ ftrace_hash_rec_update_modify(ops, filter_hash, 0);
++}
++
++static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
++ int filter_hash)
++{
++ ftrace_hash_rec_update_modify(ops, filter_hash, 1);
++}
++
+ static void print_ip_ins(const char *fmt, unsigned char *p)
+ {
+ int i;
--- /dev/null
+From 13c42c2f43b19aab3195f2d357db00d1e885eaa8 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 11 Sep 2014 23:44:35 +0200
+Subject: futex: Unlock hb->lock in futex_wait_requeue_pi() error path
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 13c42c2f43b19aab3195f2d357db00d1e885eaa8 upstream.
+
+futex_wait_requeue_pi() calls futex_wait_setup(). If
+futex_wait_setup() succeeds it returns with hb->lock held and
+preemption disabled. Now the sanity check after this does:
+
+ if (match_futex(&q.key, &key2)) {
+ ret = -EINVAL;
+ goto out_put_keys;
+ }
+
+which releases the keys but does not release hb->lock.
+
+So we happily return to user space with hb->lock held and therefor
+preemption disabled.
+
+Unlock hb->lock before taking the exit route.
+
+Reported-by: Dave "Trinity" Jones <davej@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Darren Hart <dvhart@linux.intel.com>
+Reviewed-by: Davidlohr Bueso <dave@stgolabs.net>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1409112318500.4178@nanos
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/futex.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2628,6 +2628,7 @@ static int futex_wait_requeue_pi(u32 __u
+ * shared futexes. We need to compare the keys:
+ */
+ if (match_futex(&q.key, &key2)) {
++ queue_unlock(hb);
+ ret = -EINVAL;
+ goto out_put_keys;
+ }
--- /dev/null
+From 46f341ffcfb5d8530f7d1e60f3be06cce6661b62 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@fb.com>
+Date: Tue, 16 Sep 2014 13:38:51 -0600
+Subject: genhd: fix leftover might_sleep() in blk_free_devt()
+
+From: Jens Axboe <axboe@fb.com>
+
+commit 46f341ffcfb5d8530f7d1e60f3be06cce6661b62 upstream.
+
+Commit 2da78092 changed the locking from a mutex to a spinlock,
+so we now longer sleep in this context. But there was a leftover
+might_sleep() in there, which now triggers since we do the final
+free from an RCU callback. Get rid of it.
+
+Reported-by: Pontus Fuchs <pontus.fuchs@gmail.com>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/genhd.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -445,8 +445,6 @@ int blk_alloc_devt(struct hd_struct *par
+ */
+ void blk_free_devt(dev_t devt)
+ {
+- might_sleep();
+-
+ if (devt == MKDEV(0, 0))
+ return;
+
--- /dev/null
+From 324b0398781e7afb846378dd2d8a4374faaf236b Mon Sep 17 00:00:00 2001
+From: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Date: Tue, 16 Sep 2014 16:23:15 +0300
+Subject: gpio: Fix potential NULL handler data in chained irqchip handler
+
+From: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+
+commit 324b0398781e7afb846378dd2d8a4374faaf236b upstream.
+
+There is possibility with misconfigured pins that interrupt occurs instantly
+after setting irq_set_chained_handler() in gpiochip_set_chained_irqchip().
+Now if handler gets called before irq_set_handler_data() the handler gets
+NULL handler data.
+
+Fix this by moving irq_set_handler_data() call before
+irq_set_chained_handler() in gpiochip_set_chained_irqchip().
+
+Reviewed-by: Alexandre Courbot <acourbot@nvidia.com>
+Signed-off-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpiolib.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -1368,12 +1368,12 @@ void gpiochip_set_chained_irqchip(struct
+ return;
+ }
+
+- irq_set_chained_handler(parent_irq, parent_handler);
+ /*
+ * The parent irqchip is already using the chip_data for this
+ * irqchip, so our callbacks simply use the handler_data.
+ */
+ irq_set_handler_data(parent_irq, gpiochip);
++ irq_set_chained_handler(parent_irq, parent_handler);
+ }
+ EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
+
--- /dev/null
+From 39c627a084475e8a690a4a9e7601410ca173ddd2 Mon Sep 17 00:00:00 2001
+From: Robert Coulson <rob.coulson@gmail.com>
+Date: Thu, 28 Aug 2014 10:45:43 -0700
+Subject: hwmon: (ds1621) Update zbits after conversion rate change
+
+From: Robert Coulson <rob.coulson@gmail.com>
+
+commit 39c627a084475e8a690a4a9e7601410ca173ddd2 upstream.
+
+After the conversion rate is changed, the zbits are not updated,
+but should be, since they are used later in the set_temp function.
+
+Fixes: a50d9a4d9ad3 ("hwmon: (ds1621) Fix temperature rounding operations")
+Reported-by: Murat Ilsever <murat.ilsever@gmail.com>
+Signed-off-by: Robert Coulson <rob.coulson@gmail.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwmon/ds1621.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/hwmon/ds1621.c
++++ b/drivers/hwmon/ds1621.c
+@@ -309,6 +309,7 @@ static ssize_t set_convrate(struct devic
+ data->conf |= (resol << DS1621_REG_CONFIG_RESOL_SHIFT);
+ i2c_smbus_write_byte_data(client, DS1621_REG_CONF, data->conf);
+ data->update_interval = ds1721_convrates[resol];
++ data->zbits = 7 - resol;
+ mutex_unlock(&data->update_lock);
+
+ return count;
--- /dev/null
+From c01206796139e2b1feb7539bc72174fef1c6dc6e Mon Sep 17 00:00:00 2001
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Date: Wed, 10 Sep 2014 13:50:37 -0700
+Subject: Input: atkbd - do not try 'deactivate' keyboard on any LG laptops
+
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+commit c01206796139e2b1feb7539bc72174fef1c6dc6e upstream.
+
+We are getting more and more reports about LG laptops not having
+functioning keyboard if we try to deactivate keyboard during probe.
+Given that having keyboard deactivated is merely "nice to have"
+instead of a hard requirement for probing, let's disable it on all
+LG boxes instead of trying to hunt down particular models.
+
+This change is prompted by patches trying to add "LG Electronics"/"ROCKY"
+and "LG Electronics"/"LW60-F27B" to the DMI list.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=77051
+
+Reported-by: Jaime Velasco Juan <jsagarribay@gmail.com>
+Reported-by: Georgios Tsalikis <georgios@tsalikis.net>
+Tested-by: Jaime Velasco Juan <jsagarribay@gmail.com>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/keyboard/atkbd.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+--- a/drivers/input/keyboard/atkbd.c
++++ b/drivers/input/keyboard/atkbd.c
+@@ -1791,14 +1791,6 @@ static const struct dmi_system_id atkbd_
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
+- },
+- .callback = atkbd_deactivate_fixup,
+- },
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
+ },
+ .callback = atkbd_deactivate_fixup,
+ },
--- /dev/null
+From 271329b3c798b2102120f5df829071c211ef00ed Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Mon, 8 Sep 2014 14:39:52 -0700
+Subject: Input: elantech - fix detection of touchpad on ASUS s301l
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 271329b3c798b2102120f5df829071c211ef00ed upstream.
+
+Adjust Elantech signature validation to account fo rnewer models of
+touchpads.
+
+Reported-and-tested-by: MÃ rius Monton <marius.monton@gmail.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/mouse/elantech.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1253,6 +1253,13 @@ static bool elantech_is_signature_valid(
+ if (param[1] == 0)
+ return true;
+
++ /*
++ * Some models have a revision higher then 20. Meaning param[2] may
++ * be 10 or 20, skip the rates check for these.
++ */
++ if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
++ return true;
++
+ for (i = 0; i < ARRAY_SIZE(rates); i++)
+ if (param[2] == rates[i])
+ return false;
--- /dev/null
+From cc18a69c92d0972bc2fc5a047ee3be1e8398171b Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Wed, 10 Sep 2014 13:53:37 -0700
+Subject: Input: i8042 - add Fujitsu U574 to no_timeout dmi table
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit cc18a69c92d0972bc2fc5a047ee3be1e8398171b upstream.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=69731
+
+Reported-by: Jason Robinson <mail@jasonrobinson.me>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/serio/i8042-x86ia64io.h | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -608,6 +608,14 @@ static const struct dmi_system_id __init
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ },
+ },
++ {
++ /* Fujitsu U574 laptop */
++ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
++ },
++ },
+ { }
+ };
+
--- /dev/null
+From d2682118f4bb3ceb835f91c1a694407a31bb7378 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Thu, 11 Sep 2014 10:10:26 -0700
+Subject: Input: i8042 - add nomux quirk for Avatar AVIU-145A6
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit d2682118f4bb3ceb835f91c1a694407a31bb7378 upstream.
+
+The sys_vendor / product_name are somewhat generic unfortunately, so this
+may lead to some false positives. But nomux usually does no harm, where as
+not having it clearly is causing problems on the Avatar AVIU-145A6.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=77391
+
+Reported-by: Hugo P <saurosii@gmail.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/serio/i8042-x86ia64io.h | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -465,6 +465,13 @@ static const struct dmi_system_id __init
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ },
+ },
++ {
++ /* Avatar AVIU-145A6 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
++ },
++ },
+ { }
+ };
+
--- /dev/null
+From a80d8b02751060a178bb1f7a6b7a93645a7a308b Mon Sep 17 00:00:00 2001
+From: John Sung <penmount.touch@gmail.com>
+Date: Tue, 9 Sep 2014 10:06:51 -0700
+Subject: Input: serport - add compat handling for SPIOCSTYPE ioctl
+
+From: John Sung <penmount.touch@gmail.com>
+
+commit a80d8b02751060a178bb1f7a6b7a93645a7a308b upstream.
+
+When running a 32-bit inputattach utility in a 64-bit system, there will be
+error code "inputattach: can't set device type". This is caused by the
+serport device driver not supporting compat_ioctl, so that SPIOCSTYPE ioctl
+fails.
+
+Signed-off-by: John Sung <penmount.touch@gmail.com>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/serio/serport.c | 45 +++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 38 insertions(+), 7 deletions(-)
+
+--- a/drivers/input/serio/serport.c
++++ b/drivers/input/serio/serport.c
+@@ -21,6 +21,7 @@
+ #include <linux/init.h>
+ #include <linux/serio.h>
+ #include <linux/tty.h>
++#include <linux/compat.h>
+
+ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
+ MODULE_DESCRIPTION("Input device TTY line discipline");
+@@ -198,28 +199,55 @@ static ssize_t serport_ldisc_read(struct
+ return 0;
+ }
+
++static void serport_set_type(struct tty_struct *tty, unsigned long type)
++{
++ struct serport *serport = tty->disc_data;
++
++ serport->id.proto = type & 0x000000ff;
++ serport->id.id = (type & 0x0000ff00) >> 8;
++ serport->id.extra = (type & 0x00ff0000) >> 16;
++}
++
+ /*
+ * serport_ldisc_ioctl() allows to set the port protocol, and device ID
+ */
+
+-static int serport_ldisc_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg)
++static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
++ unsigned int cmd, unsigned long arg)
+ {
+- struct serport *serport = (struct serport*) tty->disc_data;
+- unsigned long type;
+-
+ if (cmd == SPIOCSTYPE) {
++ unsigned long type;
++
+ if (get_user(type, (unsigned long __user *) arg))
+ return -EFAULT;
+
+- serport->id.proto = type & 0x000000ff;
+- serport->id.id = (type & 0x0000ff00) >> 8;
+- serport->id.extra = (type & 0x00ff0000) >> 16;
++ serport_set_type(tty, type);
++ return 0;
++ }
++
++ return -EINVAL;
++}
++
++#ifdef CONFIG_COMPAT
++#define COMPAT_SPIOCSTYPE _IOW('q', 0x01, compat_ulong_t)
++static long serport_ldisc_compat_ioctl(struct tty_struct *tty,
++ struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ if (cmd == COMPAT_SPIOCSTYPE) {
++ void __user *uarg = compat_ptr(arg);
++ compat_ulong_t compat_type;
++
++ if (get_user(compat_type, (compat_ulong_t __user *)uarg))
++ return -EFAULT;
+
++ serport_set_type(tty, compat_type);
+ return 0;
+ }
+
+ return -EINVAL;
+ }
++#endif
+
+ static void serport_ldisc_write_wakeup(struct tty_struct * tty)
+ {
+@@ -243,6 +271,9 @@ static struct tty_ldisc_ops serport_ldis
+ .close = serport_ldisc_close,
+ .read = serport_ldisc_read,
+ .ioctl = serport_ldisc_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = serport_ldisc_compat_ioctl,
++#endif
+ .receive_buf = serport_ldisc_receive,
+ .write_wakeup = serport_ldisc_write_wakeup
+ };
--- /dev/null
+From 5715fc764f7753d464dbe094b5ef9cffa6e479a4 Mon Sep 17 00:00:00 2001
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Date: Sat, 30 Aug 2014 13:51:06 -0700
+Subject: Input: synaptics - add support for ForcePads
+
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+commit 5715fc764f7753d464dbe094b5ef9cffa6e479a4 upstream.
+
+ForcePads are found on HP EliteBook 1040 laptops. They lack any kind of
+physical buttons, instead they generate primary button click when user
+presses somewhat hard on the surface of the touchpad. Unfortunately they
+also report primary button click whenever there are 2 or more contacts
+on the pad, messing up all multi-finger gestures (2-finger scrolling,
+multi-finger tapping, etc). To cope with this behavior we introduce a
+delay (currently 50 msecs) in reporting primary press in case more
+contacts appear.
+
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/mouse/synaptics.c | 68 ++++++++++++++++++++++++++++++----------
+ drivers/input/mouse/synaptics.h | 11 ++++++
+ 2 files changed, 63 insertions(+), 16 deletions(-)
+
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -626,10 +626,61 @@ static int synaptics_parse_hw_state(cons
+ ((buf[0] & 0x04) >> 1) |
+ ((buf[3] & 0x04) >> 2));
+
++ if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
++ SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
++ hw->w == 2) {
++ synaptics_parse_agm(buf, priv, hw);
++ return 1;
++ }
++
++ hw->x = (((buf[3] & 0x10) << 8) |
++ ((buf[1] & 0x0f) << 8) |
++ buf[4]);
++ hw->y = (((buf[3] & 0x20) << 7) |
++ ((buf[1] & 0xf0) << 4) |
++ buf[5]);
++ hw->z = buf[2];
++
+ hw->left = (buf[0] & 0x01) ? 1 : 0;
+ hw->right = (buf[0] & 0x02) ? 1 : 0;
+
+- if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
++ if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) {
++ /*
++ * ForcePads, like Clickpads, use middle button
++ * bits to report primary button clicks.
++ * Unfortunately they report primary button not
++ * only when user presses on the pad above certain
++ * threshold, but also when there are more than one
++ * finger on the touchpad, which interferes with
++ * out multi-finger gestures.
++ */
++ if (hw->z == 0) {
++ /* No contacts */
++ priv->press = priv->report_press = false;
++ } else if (hw->w >= 4 && ((buf[0] ^ buf[3]) & 0x01)) {
++ /*
++ * Single-finger touch with pressure above
++ * the threshold. If pressure stays long
++ * enough, we'll start reporting primary
++ * button. We rely on the device continuing
++ * sending data even if finger does not
++ * move.
++ */
++ if (!priv->press) {
++ priv->press_start = jiffies;
++ priv->press = true;
++ } else if (time_after(jiffies,
++ priv->press_start +
++ msecs_to_jiffies(50))) {
++ priv->report_press = true;
++ }
++ } else {
++ priv->press = false;
++ }
++
++ hw->left = priv->report_press;
++
++ } else if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
+ /*
+ * Clickpad's button is transmitted as middle button,
+ * however, since it is primary button, we will report
+@@ -648,21 +699,6 @@ static int synaptics_parse_hw_state(cons
+ hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
+ }
+
+- if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
+- SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
+- hw->w == 2) {
+- synaptics_parse_agm(buf, priv, hw);
+- return 1;
+- }
+-
+- hw->x = (((buf[3] & 0x10) << 8) |
+- ((buf[1] & 0x0f) << 8) |
+- buf[4]);
+- hw->y = (((buf[3] & 0x20) << 7) |
+- ((buf[1] & 0xf0) << 4) |
+- buf[5]);
+- hw->z = buf[2];
+-
+ if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
+ ((buf[0] ^ buf[3]) & 0x02)) {
+ switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
+--- a/drivers/input/mouse/synaptics.h
++++ b/drivers/input/mouse/synaptics.h
+@@ -78,6 +78,11 @@
+ * 2 0x08 image sensor image sensor tracks 5 fingers, but only
+ * reports 2.
+ * 2 0x20 report min query 0x0f gives min coord reported
++ * 2 0x80 forcepad forcepad is a variant of clickpad that
++ * does not have physical buttons but rather
++ * uses pressure above certain threshold to
++ * report primary clicks. Forcepads also have
++ * clickpad bit set.
+ */
+ #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
+ #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
+@@ -86,6 +91,7 @@
+ #define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000)
+ #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
+ #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800)
++#define SYN_CAP_FORCEPAD(ex0c) ((ex0c) & 0x008000)
+
+ /* synaptics modes query bits */
+ #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
+@@ -177,6 +183,11 @@ struct synaptics_data {
+ */
+ struct synaptics_hw_state agm;
+ bool agm_pending; /* new AGM packet received */
++
++ /* ForcePad handling */
++ unsigned long press_start;
++ bool press;
++ bool report_press;
+ };
+
+ void synaptics_module_init(void);
--- /dev/null
+From acbbe6fbb240a927ee1f5994f04d31267d422215 Mon Sep 17 00:00:00 2001
+From: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Date: Tue, 9 Sep 2014 14:51:01 -0700
+Subject: kcmp: fix standard comparison bug
+
+From: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+
+commit acbbe6fbb240a927ee1f5994f04d31267d422215 upstream.
+
+The C operator <= defines a perfectly fine total ordering on the set of
+values representable in a long. However, unlike its namesake in the
+integers, it is not translation invariant, meaning that we do not have
+"b <= c" iff "a+b <= a+c" for all a,b,c.
+
+This means that it is always wrong to try to boil down the relationship
+between two longs to a question about the sign of their difference,
+because the resulting relation [a LEQ b iff a-b <= 0] is neither
+anti-symmetric or transitive. The former is due to -LONG_MIN==LONG_MIN
+(take any two a,b with a-b = LONG_MIN; then a LEQ b and b LEQ a, but a !=
+b). The latter can either be seen observing that x LEQ x+1 for all x,
+implying x LEQ x+1 LEQ x+2 ... LEQ x-1 LEQ x; or more directly with the
+simple example a=LONG_MIN, b=0, c=1, for which a-b < 0, b-c < 0, but a-c >
+0.
+
+Note that it makes absolutely no difference that a transmogrying bijection
+has been applied before the comparison is done. In fact, had the
+obfuscation not been done, one could probably not observe the bug
+(assuming all values being compared always lie in one half of the address
+space, the mathematical value of a-b is always representable in a long).
+As it stands, one can easily obtain three file descriptors exhibiting the
+non-transitivity of kcmp().
+
+Side note 1: I can't see that ensuring the MSB of the multiplier is
+set serves any purpose other than obfuscating the obfuscating code.
+
+Side note 2:
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <assert.h>
+#include <sys/syscall.h>
+
+enum kcmp_type {
+ KCMP_FILE,
+ KCMP_VM,
+ KCMP_FILES,
+ KCMP_FS,
+ KCMP_SIGHAND,
+ KCMP_IO,
+ KCMP_SYSVSEM,
+ KCMP_TYPES,
+};
+pid_t pid;
+
+int kcmp(pid_t pid1, pid_t pid2, int type,
+ unsigned long idx1, unsigned long idx2)
+{
+ return syscall(SYS_kcmp, pid1, pid2, type, idx1, idx2);
+}
+int cmp_fd(int fd1, int fd2)
+{
+ int c = kcmp(pid, pid, KCMP_FILE, fd1, fd2);
+ if (c < 0) {
+ perror("kcmp");
+ exit(1);
+ }
+ assert(0 <= c && c < 3);
+ return c;
+}
+int cmp_fdp(const void *a, const void *b)
+{
+ static const int normalize[] = {0, -1, 1};
+ return normalize[cmp_fd(*(int*)a, *(int*)b)];
+}
+#define MAX 100 /* This is plenty; I've seen it trigger for MAX==3 */
+int main(int argc, char *argv[])
+{
+ int r, s, count = 0;
+ int REL[3] = {0,0,0};
+ int fd[MAX];
+ pid = getpid();
+ while (count < MAX) {
+ r = open("/dev/null", O_RDONLY);
+ if (r < 0)
+ break;
+ fd[count++] = r;
+ }
+ printf("opened %d file descriptors\n", count);
+ for (r = 0; r < count; ++r) {
+ for (s = r+1; s < count; ++s) {
+ REL[cmp_fd(fd[r], fd[s])]++;
+ }
+ }
+ printf("== %d\t< %d\t> %d\n", REL[0], REL[1], REL[2]);
+ qsort(fd, count, sizeof(fd[0]), cmp_fdp);
+ memset(REL, 0, sizeof(REL));
+
+ for (r = 0; r < count; ++r) {
+ for (s = r+1; s < count; ++s) {
+ REL[cmp_fd(fd[r], fd[s])]++;
+ }
+ }
+ printf("== %d\t< %d\t> %d\n", REL[0], REL[1], REL[2]);
+ return (REL[0] + REL[2] != 0);
+}
+
+Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Reviewed-by: Cyrill Gorcunov <gorcunov@openvz.org>
+"Eric W. Biederman" <ebiederm@xmission.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/kcmp.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/kernel/kcmp.c
++++ b/kernel/kcmp.c
+@@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int t
+ */
+ static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
+ {
+- long ret;
++ long t1, t2;
+
+- ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
++ t1 = kptr_obfuscate((long)v1, type);
++ t2 = kptr_obfuscate((long)v2, type);
+
+- return (ret < 0) | ((ret > 0) << 1);
++ return (t1 < t2) | ((t1 > t2) << 1);
+ }
+
+ /* The caller must have pinned the task */
--- /dev/null
+From 000a7d66ec30898f46869be01ab8205b056385d0 Mon Sep 17 00:00:00 2001
+From: Patrick Palka <patrick@parcs.ath.cx>
+Date: Tue, 9 Sep 2014 14:50:48 -0700
+Subject: kernel/printk/printk.c: fix faulty logic in the case of recursive printk
+
+From: Patrick Palka <patrick@parcs.ath.cx>
+
+commit 000a7d66ec30898f46869be01ab8205b056385d0 upstream.
+
+We shouldn't set text_len in the code path that detects printk recursion
+because text_len corresponds to the length of the string inside textbuf.
+A few lines down from the line
+
+ text_len = strlen(recursion_msg);
+
+is the line
+
+ text_len += vscnprintf(text + text_len, ...);
+
+So if printk detects recursion, it sets text_len to 29 (the length of
+recursion_msg) and logs an error. Then the message supplied by the
+caller of printk is stored inside textbuf but offset by 29 bytes. This
+means that the output of the recursive call to printk will contain 29
+bytes of garbage in front of it.
+
+This defect is caused by commit 458df9fd4815 ("printk: remove separate
+printk_sched buffers and use printk buf instead") which turned the line
+
+ text_len = vscnprintf(text, ...);
+
+into
+
+ text_len += vscnprintf(text + text_len, ...);
+
+To fix this, this patch avoids setting text_len when logging the printk
+recursion error. This patch also marks unlikely() the branch leading up
+to this code.
+
+Fixes: 458df9fd4815b478 ("printk: remove separate printk_sched buffers and use printk buf instead")
+Signed-off-by: Patrick Palka <patrick@parcs.ath.cx>
+Reviewed-by: Petr Mladek <pmladek@suse.cz>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Acked-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/printk/printk.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1617,15 +1617,15 @@ asmlinkage int vprintk_emit(int facility
+ raw_spin_lock(&logbuf_lock);
+ logbuf_cpu = this_cpu;
+
+- if (recursion_bug) {
++ if (unlikely(recursion_bug)) {
+ static const char recursion_msg[] =
+ "BUG: recent printk recursion!";
+
+ recursion_bug = 0;
+- text_len = strlen(recursion_msg);
+ /* emit KERN_CRIT message */
+ printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
+- NULL, 0, recursion_msg, text_len);
++ NULL, 0, recursion_msg,
++ strlen(recursion_msg));
+ }
+
+ /*
--- /dev/null
+From 7c17705e77b12b20fb8afb7c1b15dcdb126c0c12 Mon Sep 17 00:00:00 2001
+From: "J. Bruce Fields" <bfields@redhat.com>
+Date: Fri, 29 Aug 2014 16:25:50 -0400
+Subject: lockd: fix rpcbind crash on lockd startup failure
+
+From: "J. Bruce Fields" <bfields@redhat.com>
+
+commit 7c17705e77b12b20fb8afb7c1b15dcdb126c0c12 upstream.
+
+Nikita Yuschenko reported that booting a kernel with init=/bin/sh and
+then nfs mounting without portmap or rpcbind running using a busybox
+mount resulted in:
+
+ # mount -t nfs 10.30.130.21:/opt /mnt
+ svc: failed to register lockdv1 RPC service (errno 111).
+ lockd_up: makesock failed, error=-111
+ Unable to handle kernel paging request for data at address 0x00000030
+ Faulting instruction address: 0xc055e65c
+ Oops: Kernel access of bad area, sig: 11 [#1]
+ MPC85xx CDS
+ Modules linked in:
+ CPU: 0 PID: 1338 Comm: mount Not tainted 3.10.44.cge #117
+ task: cf29cea0 ti: cf35c000 task.ti: cf35c000
+ NIP: c055e65c LR: c0566490 CTR: c055e648
+ REGS: cf35dad0 TRAP: 0300 Not tainted (3.10.44.cge)
+ MSR: 00029000 <CE,EE,ME> CR: 22442488 XER: 20000000
+ DEAR: 00000030, ESR: 00000000
+
+ GPR00: c05606f4 cf35db80 cf29cea0 cf0ded80 cf0dedb8 00000001 1dec3086
+ 00000000
+ GPR08: 00000000 c07b1640 00000007 1dec3086 22442482 100b9758 00000000
+ 10090ae8
+ GPR16: 00000000 000186a5 00000000 00000000 100c3018 bfa46edc 100b0000
+ bfa46ef0
+ GPR24: cf386ae0 c07834f0 00000000 c0565f88 00000001 cf0dedb8 00000000
+ cf0ded80
+ NIP [c055e65c] call_start+0x14/0x34
+ LR [c0566490] __rpc_execute+0x70/0x250
+ Call Trace:
+ [cf35db80] [00000080] 0x80 (unreliable)
+ [cf35dbb0] [c05606f4] rpc_run_task+0x9c/0xc4
+ [cf35dbc0] [c0560840] rpc_call_sync+0x50/0xb8
+ [cf35dbf0] [c056ee90] rpcb_register_call+0x54/0x84
+ [cf35dc10] [c056f24c] rpcb_register+0xf8/0x10c
+ [cf35dc70] [c0569e18] svc_unregister.isra.23+0x100/0x108
+ [cf35dc90] [c0569e38] svc_rpcb_cleanup+0x18/0x30
+ [cf35dca0] [c0198c5c] lockd_up+0x1dc/0x2e0
+ [cf35dcd0] [c0195348] nlmclnt_init+0x2c/0xc8
+ [cf35dcf0] [c015bb5c] nfs_start_lockd+0x98/0xec
+ [cf35dd20] [c015ce6c] nfs_create_server+0x1e8/0x3f4
+ [cf35dd90] [c0171590] nfs3_create_server+0x10/0x44
+ [cf35dda0] [c016528c] nfs_try_mount+0x158/0x1e4
+ [cf35de20] [c01670d0] nfs_fs_mount+0x434/0x8c8
+ [cf35de70] [c00cd3bc] mount_fs+0x20/0xbc
+ [cf35de90] [c00e4f88] vfs_kern_mount+0x50/0x104
+ [cf35dec0] [c00e6e0c] do_mount+0x1d0/0x8e0
+ [cf35df10] [c00e75ac] SyS_mount+0x90/0xd0
+ [cf35df40] [c000ccf4] ret_from_syscall+0x0/0x3c
+
+The addition of svc_shutdown_net() resulted in two calls to
+svc_rpcb_cleanup(); the second is no longer necessary and crashes when
+it calls rpcb_register_call with clnt=NULL.
+
+Reported-by: Nikita Yushchenko <nyushchenko@dev.rtsoft.ru>
+Fixes: 679b033df484 "lockd: ensure we tear down any live sockets when socket creation fails during lockd_up"
+Acked-by: Jeff Layton <jlayton@primarydata.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/lockd/svc.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -253,13 +253,11 @@ static int lockd_up_net(struct svc_serv
+
+ error = make_socks(serv, net);
+ if (error < 0)
+- goto err_socks;
++ goto err_bind;
+ set_grace_period(net);
+ dprintk("lockd_up_net: per-net data created; net=%p\n", net);
+ return 0;
+
+-err_socks:
+- svc_rpcb_cleanup(serv, net);
+ err_bind:
+ ln->nlmsvc_users--;
+ return error;
--- /dev/null
+From 22fdcf02f6e80d64a927f702dd9d631a927d87d4 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+Date: Thu, 5 Jun 2014 11:31:01 -0400
+Subject: lockdep: Revert lockdep check in raw_seqcount_begin()
+
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+
+commit 22fdcf02f6e80d64a927f702dd9d631a927d87d4 upstream.
+
+This commit reverts the addition of lockdep checking to raw_seqcount_begin
+for the following reasons:
+
+ 1) It violates the naming convention that raw_* functions should not
+ do lockdep checks (a convention that is also followed by the other
+ raw_*_seqcount_begin functions).
+
+ 2) raw_seqcount_begin does not spin, so it can only be part of an ABBA
+ deadlock in very special circumstances (for instance if a lock
+ is held across the entire raw_seqcount_begin()+read_seqcount_retry()
+ loop while also being taken inside the write_seqcount protected area).
+
+ 3) It is causing false positives with some existing callers, and there
+ is no non-lockdep alternative for those callers to use.
+
+None of the three existing callers (__d_lookup_rcu, netdev_get_name, and
+the NFS state code) appear to use the function in a manner that is ABBA
+deadlock prone.
+
+Fixes: 1ca7d67cf5d5: seqcount: Add lockdep functionality to seqcount/seqlock
+Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Cc: John Stultz <john.stultz@linaro.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Waiman Long <Waiman.Long@hp.com>
+Cc: Stephen Boyd <sboyd@codeaurora.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Link: http://lkml.kernel.org/r/CAHQdGtRR6SvEhXiqWo24hoUh9AU9cL82Z8Z-d8-7u951F_d+5g@mail.gmail.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/seqlock.h | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -164,8 +164,6 @@ static inline unsigned read_seqcount_beg
+ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
+ {
+ unsigned ret = ACCESS_ONCE(s->sequence);
+-
+- seqcount_lockdep_reader_access(s);
+ smp_rmb();
+ return ret & ~1;
+ }
--- /dev/null
+From d4a5fca592b9ab52b90bb261a90af3c8f53be011 Mon Sep 17 00:00:00 2001
+From: David Rientjes <rientjes@google.com>
+Date: Thu, 25 Sep 2014 16:05:20 -0700
+Subject: mm, slab: initialize object alignment on cache creation
+
+From: David Rientjes <rientjes@google.com>
+
+commit d4a5fca592b9ab52b90bb261a90af3c8f53be011 upstream.
+
+Since commit 4590685546a3 ("mm/sl[aou]b: Common alignment code"), the
+"ralign" automatic variable in __kmem_cache_create() may be used as
+uninitialized.
+
+The proper alignment defaults to BYTES_PER_WORD and can be overridden by
+SLAB_RED_ZONE or the alignment specified by the caller.
+
+This fixes https://bugzilla.kernel.org/show_bug.cgi?id=85031
+
+Signed-off-by: David Rientjes <rientjes@google.com>
+Reported-by: Andrei Elovikov <a.elovikov@gmail.com>
+Acked-by: Christoph Lameter <cl@linux.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/slab.c | 11 ++---------
+ 1 file changed, 2 insertions(+), 9 deletions(-)
+
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -2224,7 +2224,8 @@ static int __init_refok setup_cpu_cache(
+ int
+ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+ {
+- size_t left_over, freelist_size, ralign;
++ size_t left_over, freelist_size;
++ size_t ralign = BYTES_PER_WORD;
+ gfp_t gfp;
+ int err;
+ size_t size = cachep->size;
+@@ -2257,14 +2258,6 @@ __kmem_cache_create (struct kmem_cache *
+ size &= ~(BYTES_PER_WORD - 1);
+ }
+
+- /*
+- * Redzoning and user store require word alignment or possibly larger.
+- * Note this will be overridden by architecture or caller mandated
+- * alignment if either is greater than BYTES_PER_WORD.
+- */
+- if (flags & SLAB_STORE_USER)
+- ralign = BYTES_PER_WORD;
+-
+ if (flags & SLAB_RED_ZONE) {
+ ralign = REDZONE_ALIGN;
+ /* If redzoning, ensure that the second redzone is suitably
--- /dev/null
+From dbab31aa2ceec2d201966fa0b552f151310ba5f4 Mon Sep 17 00:00:00 2001
+From: Peter Feiner <pfeiner@google.com>
+Date: Thu, 25 Sep 2014 16:05:29 -0700
+Subject: mm: softdirty: keep bit when zapping file pte
+
+From: Peter Feiner <pfeiner@google.com>
+
+commit dbab31aa2ceec2d201966fa0b552f151310ba5f4 upstream.
+
+This fixes the same bug as b43790eedd31 ("mm: softdirty: don't forget to
+save file map softdiry bit on unmap") and 9aed8614af5a ("mm/memory.c:
+don't forget to set softdirty on file mapped fault") where the return
+value of pte_*mksoft_dirty was being ignored.
+
+To be sure that no other pte/pmd "mk" function return values were being
+ignored, I annotated the functions in arch/x86/include/asm/pgtable.h
+with __must_check and rebuilt.
+
+The userspace effect of this bug is that the softdirty mark might be
+lost if a file mapped pte get zapped.
+
+Signed-off-by: Peter Feiner <pfeiner@google.com>
+Acked-by: Cyrill Gorcunov <gorcunov@openvz.org>
+Cc: Pavel Emelyanov <xemul@parallels.com>
+Cc: Jamie Liu <jamieliu@google.com>
+Cc: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memory.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1125,7 +1125,7 @@ again:
+ addr) != page->index) {
+ pte_t ptfile = pgoff_to_pte(page->index);
+ if (pte_soft_dirty(ptent))
+- pte_file_mksoft_dirty(ptfile);
++ ptfile = pte_file_mksoft_dirty(ptfile);
+ set_pte_at(mm, addr, pte, ptfile);
+ }
+ if (PageAnon(page))
--- /dev/null
+From 56d7acc792c0d98f38f22058671ee715ff197023 Mon Sep 17 00:00:00 2001
+From: Andreas Rohner <andreas.rohner@gmx.net>
+Date: Thu, 25 Sep 2014 16:05:14 -0700
+Subject: nilfs2: fix data loss with mmap()
+
+From: Andreas Rohner <andreas.rohner@gmx.net>
+
+commit 56d7acc792c0d98f38f22058671ee715ff197023 upstream.
+
+This bug leads to reproducible silent data loss, despite the use of
+msync(), sync() and a clean unmount of the file system. It is easily
+reproducible with the following script:
+
+ ----------------[BEGIN SCRIPT]--------------------
+ mkfs.nilfs2 -f /dev/sdb
+ mount /dev/sdb /mnt
+
+ dd if=/dev/zero bs=1M count=30 of=/mnt/testfile
+
+ umount /mnt
+ mount /dev/sdb /mnt
+ CHECKSUM_BEFORE="$(md5sum /mnt/testfile)"
+
+ /root/mmaptest/mmaptest /mnt/testfile 30 10 5
+
+ sync
+ CHECKSUM_AFTER="$(md5sum /mnt/testfile)"
+ umount /mnt
+ mount /dev/sdb /mnt
+ CHECKSUM_AFTER_REMOUNT="$(md5sum /mnt/testfile)"
+ umount /mnt
+
+ echo "BEFORE MMAP:\t$CHECKSUM_BEFORE"
+ echo "AFTER MMAP:\t$CHECKSUM_AFTER"
+ echo "AFTER REMOUNT:\t$CHECKSUM_AFTER_REMOUNT"
+ ----------------[END SCRIPT]--------------------
+
+The mmaptest tool looks something like this (very simplified, with
+error checking removed):
+
+ ----------------[BEGIN mmaptest]--------------------
+ data = mmap(NULL, file_size - file_offset, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, file_offset);
+
+ for (i = 0; i < write_count; ++i) {
+ memcpy(data + i * 4096, buf, sizeof(buf));
+ msync(data, file_size - file_offset, MS_SYNC))
+ }
+ ----------------[END mmaptest]--------------------
+
+The output of the script looks something like this:
+
+ BEFORE MMAP: 281ed1d5ae50e8419f9b978aab16de83 /mnt/testfile
+ AFTER MMAP: 6604a1c31f10780331a6850371b3a313 /mnt/testfile
+ AFTER REMOUNT: 281ed1d5ae50e8419f9b978aab16de83 /mnt/testfile
+
+So it is clear, that the changes done using mmap() do not survive a
+remount. This can be reproduced a 100% of the time. The problem was
+introduced in commit 136e8770cd5d ("nilfs2: fix issue of
+nilfs_set_page_dirty() for page at EOF boundary").
+
+If the page was read with mpage_readpage() or mpage_readpages() for
+example, then it has no buffers attached to it. In that case
+page_has_buffers(page) in nilfs_set_page_dirty() will be false.
+Therefore nilfs_set_file_dirty() is never called and the pages are never
+collected and never written to disk.
+
+This patch fixes the problem by also calling nilfs_set_file_dirty() if the
+page has no buffers attached to it.
+
+[akpm@linux-foundation.org: s/PAGE_SHIFT/PAGE_CACHE_SHIFT/]
+Signed-off-by: Andreas Rohner <andreas.rohner@gmx.net>
+Tested-by: Andreas Rohner <andreas.rohner@gmx.net>
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nilfs2/inode.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -24,6 +24,7 @@
+ #include <linux/buffer_head.h>
+ #include <linux/gfp.h>
+ #include <linux/mpage.h>
++#include <linux/pagemap.h>
+ #include <linux/writeback.h>
+ #include <linux/aio.h>
+ #include "nilfs.h"
+@@ -219,10 +220,10 @@ static int nilfs_writepage(struct page *
+
+ static int nilfs_set_page_dirty(struct page *page)
+ {
++ struct inode *inode = page->mapping->host;
+ int ret = __set_page_dirty_nobuffers(page);
+
+ if (page_has_buffers(page)) {
+- struct inode *inode = page->mapping->host;
+ unsigned nr_dirty = 0;
+ struct buffer_head *bh, *head;
+
+@@ -245,6 +246,10 @@ static int nilfs_set_page_dirty(struct p
+
+ if (nr_dirty)
+ nilfs_set_file_dirty(inode, nr_dirty);
++ } else if (ret) {
++ unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
++
++ nilfs_set_file_dirty(inode, nr_dirty);
+ }
+ return ret;
+ }
--- /dev/null
+From 5760a97c7143c208fa3a8f8cad0ed7dd672ebd28 Mon Sep 17 00:00:00 2001
+From: Joseph Qi <joseph.qi@huawei.com>
+Date: Thu, 25 Sep 2014 16:05:16 -0700
+Subject: ocfs2/dlm: do not get resource spinlock if lockres is new
+
+From: Joseph Qi <joseph.qi@huawei.com>
+
+commit 5760a97c7143c208fa3a8f8cad0ed7dd672ebd28 upstream.
+
+There is a deadlock case which reported by Guozhonghua:
+ https://oss.oracle.com/pipermail/ocfs2-devel/2014-September/010079.html
+
+This case is caused by &res->spinlock and &dlm->master_lock
+misordering in different threads.
+
+It was introduced by commit 8d400b81cc83 ("ocfs2/dlm: Clean up refmap
+helpers"). Since lockres is new, it doesn't not require the
+&res->spinlock. So remove it.
+
+Fixes: 8d400b81cc83 ("ocfs2/dlm: Clean up refmap helpers")
+Signed-off-by: Joseph Qi <joseph.qi@huawei.com>
+Reviewed-by: joyce.xue <xuejiufei@huawei.com>
+Reported-by: Guozhonghua <guozhonghua@h3c.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Mark Fasheh <mfasheh@suse.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/dlm/dlmmaster.c | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -655,12 +655,9 @@ void dlm_lockres_clear_refmap_bit(struct
+ clear_bit(bit, res->refmap);
+ }
+
+-
+-void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
++static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+ {
+- assert_spin_locked(&res->spinlock);
+-
+ res->inflight_locks++;
+
+ mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
+@@ -668,6 +665,13 @@ void dlm_lockres_grab_inflight_ref(struc
+ __builtin_return_address(0));
+ }
+
++void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
++ struct dlm_lock_resource *res)
++{
++ assert_spin_locked(&res->spinlock);
++ __dlm_lockres_grab_inflight_ref(dlm, res);
++}
++
+ void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+ {
+@@ -894,10 +898,8 @@ lookup:
+ /* finally add the lockres to its hash bucket */
+ __dlm_insert_lockres(dlm, res);
+
+- /* Grab inflight ref to pin the resource */
+- spin_lock(&res->spinlock);
+- dlm_lockres_grab_inflight_ref(dlm, res);
+- spin_unlock(&res->spinlock);
++ /* since this lockres is new it doesn't not require the spinlock */
++ __dlm_lockres_grab_inflight_ref(dlm, res);
+
+ /* get an extra ref on the mle in case this is a BLOCK
+ * if so, the creator of the BLOCK may try to put the last
--- /dev/null
+From f0d279654dea22b7a6ad34b9334aee80cda62cde Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Fri, 15 Aug 2014 16:06:06 -0400
+Subject: percpu: fix pcpu_alloc_pages() failure path
+
+From: Tejun Heo <tj@kernel.org>
+
+commit f0d279654dea22b7a6ad34b9334aee80cda62cde upstream.
+
+When pcpu_alloc_pages() fails midway, pcpu_free_pages() is invoked to
+free what has already been allocated. The invocation is across the
+whole requested range and pcpu_free_pages() will try to free all
+non-NULL pages; unfortunately, this is incorrect as
+pcpu_get_pages_and_bitmap(), unlike what its comment suggests, doesn't
+clear the pages array and thus the array may have entries from the
+previous invocations making the partial failure path free incorrect
+pages.
+
+Fix it by open-coding the partial freeing of the already allocated
+pages.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/percpu-vm.c | 21 +++++++++++++++------
+ 1 file changed, 15 insertions(+), 6 deletions(-)
+
+--- a/mm/percpu-vm.c
++++ b/mm/percpu-vm.c
+@@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_
+ int page_start, int page_end)
+ {
+ const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
+- unsigned int cpu;
++ unsigned int cpu, tcpu;
+ int i;
+
+ for_each_possible_cpu(cpu) {
+@@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_
+ struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
+
+ *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
+- if (!*pagep) {
+- pcpu_free_pages(chunk, pages, populated,
+- page_start, page_end);
+- return -ENOMEM;
+- }
++ if (!*pagep)
++ goto err;
+ }
+ }
+ return 0;
++
++err:
++ while (--i >= page_start)
++ __free_page(pages[pcpu_page_idx(cpu, i)]);
++
++ for_each_possible_cpu(tcpu) {
++ if (tcpu == cpu)
++ break;
++ for (i = page_start; i < page_end; i++)
++ __free_page(pages[pcpu_page_idx(tcpu, i)]);
++ }
++ return -ENOMEM;
+ }
+
+ /**
--- /dev/null
+From 3189eddbcafcc4d827f7f19facbeddec4424eba8 Mon Sep 17 00:00:00 2001
+From: Honggang Li <enjoymindful@gmail.com>
+Date: Tue, 12 Aug 2014 21:36:15 +0800
+Subject: percpu: free percpu allocation info for uniprocessor system
+
+From: Honggang Li <enjoymindful@gmail.com>
+
+commit 3189eddbcafcc4d827f7f19facbeddec4424eba8 upstream.
+
+Currently, only SMP system free the percpu allocation info.
+Uniprocessor system should free it too. For example, one x86 UML
+virtual machine with 256MB memory, UML kernel wastes one page memory.
+
+Signed-off-by: Honggang Li <enjoymindful@gmail.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/percpu.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1933,6 +1933,8 @@ void __init setup_per_cpu_areas(void)
+
+ if (pcpu_setup_first_chunk(ai, fc) < 0)
+ panic("Failed to initialize percpu areas.");
++
++ pcpu_free_alloc_info(ai);
+ }
+
+ #endif /* CONFIG_SMP */
--- /dev/null
+From 849f5169097e1ba35b90ac9df76b5bb6f9c0aabd Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Fri, 15 Aug 2014 16:06:10 -0400
+Subject: percpu: perform tlb flush after pcpu_map_pages() failure
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 849f5169097e1ba35b90ac9df76b5bb6f9c0aabd upstream.
+
+If pcpu_map_pages() fails midway, it unmaps the already mapped pages.
+Currently, it doesn't flush tlb after the partial unmapping. This may
+be okay in most cases as the established mapping hasn't been used at
+that point but it can go wrong and when it goes wrong it'd be
+extremely difficult to track down.
+
+Flush tlb after the partial unmapping.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/percpu-vm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/percpu-vm.c
++++ b/mm/percpu-vm.c
+@@ -272,6 +272,7 @@ err:
+ __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
+ page_end - page_start);
+ }
++ pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
+ return err;
+ }
+
--- /dev/null
+From a5fe8e7695dc3f547e955ad2b662e3e72969e506 Mon Sep 17 00:00:00 2001
+From: Eliad Peller <eliad@wizery.com>
+Date: Wed, 11 Jun 2014 10:23:35 +0300
+Subject: regulatory: add NUL to alpha2
+
+From: Eliad Peller <eliad@wizery.com>
+
+commit a5fe8e7695dc3f547e955ad2b662e3e72969e506 upstream.
+
+alpha2 is defined as 2-chars array, but is used in multiple
+places as string (e.g. with nla_put_string calls), which
+might leak kernel data.
+
+Solve it by simply adding an extra char for the NULL
+terminator, making such operations safe.
+
+Signed-off-by: Eliad Peller <eliadx.peller@intel.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/net/regulatory.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/net/regulatory.h
++++ b/include/net/regulatory.h
+@@ -167,7 +167,7 @@ struct ieee80211_reg_rule {
+ struct ieee80211_regdomain {
+ struct rcu_head rcu_head;
+ u32 n_reg_rules;
+- char alpha2[2];
++ char alpha2[3];
+ enum nl80211_dfs_regions dfs_region;
+ struct ieee80211_reg_rule reg_rules[];
+ };
--- /dev/null
+From bb512ad0732232f1d2693bb68f31a76bed8f22ae Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Mon, 25 Aug 2014 12:08:09 +0200
+Subject: Revert "mac80211: disable uAPSD if all ACs are under ACM"
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit bb512ad0732232f1d2693bb68f31a76bed8f22ae upstream.
+
+This reverts commit 24aa11ab8ae03292d38ec0dbd9bc2ac49fe8a6dd.
+
+That commit was wrong since it uses data that hasn't even been set
+up yet, but might be a hold-over from a previous connection.
+
+Additionally, it seems like a driver-specific workaround that
+shouldn't have been in mac80211 to start with.
+
+Fixes: 24aa11ab8ae0 ("mac80211: disable uAPSD if all ACs are under ACM")
+Reviewed-by: Luciano Coelho <luciano.coelho@intel.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/mac80211/mlme.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -4355,8 +4355,7 @@ int ieee80211_mgd_assoc(struct ieee80211
+ rcu_read_unlock();
+
+ if (bss->wmm_used && bss->uapsd_supported &&
+- (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD) &&
+- sdata->wmm_acm != 0xff) {
++ (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
+ assoc_data->uapsd = true;
+ ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
+ } else {
--- /dev/null
+From c66517165610b911e4c6d268f28d8c640832dbd1 Mon Sep 17 00:00:00 2001
+From: Larry Finger <Larry.Finger@lwfinger.net>
+Date: Sun, 24 Aug 2014 17:49:43 -0500
+Subject: rtlwifi: rtl8192cu: Add new ID
+
+From: Larry Finger <Larry.Finger@lwfinger.net>
+
+commit c66517165610b911e4c6d268f28d8c640832dbd1 upstream.
+
+The Sitecom WLA-2102 adapter uses this driver.
+
+Reported-by: Nico Baggus <nico-linux@noci.xs4all.nl>
+Signed-off-by: Larry Finger <Larry.Finger@lwfinger.net>
+Cc: Nico Baggus <nico-linux@noci.xs4all.nl>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/rtlwifi/rtl8192cu/sw.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -317,6 +317,7 @@ static struct usb_device_id rtl8192c_usb
+ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
++ {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */
+ {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
+ {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+ {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
--- /dev/null
+From 03bd4e1f7265548832a76e7919a81f3137c44fd1 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpeng.li@linux.intel.com>
+Date: Wed, 24 Sep 2014 16:38:05 +0800
+Subject: sched: Fix unreleased llc_shared_mask bit during CPU hotplug
+
+From: Wanpeng Li <wanpeng.li@linux.intel.com>
+
+commit 03bd4e1f7265548832a76e7919a81f3137c44fd1 upstream.
+
+The following bug can be triggered by hot adding and removing a large number of
+xen domain0's vcpus repeatedly:
+
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000004 IP: [..] find_busiest_group
+ PGD 5a9d5067 PUD 13067 PMD 0
+ Oops: 0000 [#3] SMP
+ [...]
+ Call Trace:
+ load_balance
+ ? _raw_spin_unlock_irqrestore
+ idle_balance
+ __schedule
+ schedule
+ schedule_timeout
+ ? lock_timer_base
+ schedule_timeout_uninterruptible
+ msleep
+ lock_device_hotplug_sysfs
+ online_store
+ dev_attr_store
+ sysfs_write_file
+ vfs_write
+ SyS_write
+ system_call_fastpath
+
+Last level cache shared mask is built during CPU up and the
+build_sched_domain() routine takes advantage of it to setup
+the sched domain CPU topology.
+
+However, llc_shared_mask is not released during CPU disable,
+which leads to an invalid sched domainCPU topology.
+
+This patch fix it by releasing the llc_shared_mask correctly
+during CPU disable.
+
+Yasuaki also reported that this can happen on real hardware:
+
+ https://lkml.org/lkml/2014/7/22/1018
+
+His case is here:
+
+ ==
+ Here is an example on my system.
+ My system has 4 sockets and each socket has 15 cores and HT is
+ enabled. In this case, each core of sockes is numbered as
+ follows:
+
+ | CPU#
+ Socket#0 | 0-14 , 60-74
+ Socket#1 | 15-29, 75-89
+ Socket#2 | 30-44, 90-104
+ Socket#3 | 45-59, 105-119
+
+ Then llc_shared_mask of CPU#30 has 0x3fff80000001fffc0000000.
+
+ It means that last level cache of Socket#2 is shared with
+ CPU#30-44 and 90-104.
+
+ When hot-removing socket#2 and #3, each core of sockets is
+ numbered as follows:
+
+ | CPU#
+ Socket#0 | 0-14 , 60-74
+ Socket#1 | 15-29, 75-89
+
+ But llc_shared_mask is not cleared. So llc_shared_mask of CPU#30
+ remains having 0x3fff80000001fffc0000000.
+
+ After that, when hot-adding socket#2 and #3, each core of
+ sockets is numbered as follows:
+
+ | CPU#
+ Socket#0 | 0-14 , 60-74
+ Socket#1 | 15-29, 75-89
+ Socket#2 | 30-59
+ Socket#3 | 90-119
+
+ Then llc_shared_mask of CPU#30 becomes
+ 0x3fff8000fffffffc0000000. It means that last level cache of
+ Socket#2 is shared with CPU#30-59 and 90-104. So the mask has
+ the wrong value.
+
+Signed-off-by: Wanpeng Li <wanpeng.li@linux.intel.com>
+Tested-by: Linn Crosetto <linn@hp.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Toshi Kani <toshi.kani@hp.com>
+Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Prarit Bhargava <prarit@redhat.com>
+Cc: Steven Rostedt <srostedt@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/1411547885-48165-1-git-send-email-wanpeng.li@linux.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/smpboot.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1292,6 +1292,9 @@ static void remove_siblinginfo(int cpu)
+
+ for_each_cpu(sibling, cpu_sibling_mask(cpu))
+ cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
++ for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
++ cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
++ cpumask_clear(cpu_llc_shared_mask(cpu));
+ cpumask_clear(cpu_sibling_mask(cpu));
+ cpumask_clear(cpu_core_mask(cpu));
+ c->phys_proc_id = 0;
iwlwifi-mvm-treat-eapols-like-mgmt-frames-wrt-rate.patch
iwlwifi-mvm-set-mac_filter_in_beacon-correctly-for-sta-p2p-client.patch
workqueue-apply-__wq_ordered-to-create_singlethread_workqueue.patch
+futex-unlock-hb-lock-in-futex_wait_requeue_pi-error-path.patch
+block-fix-dev_t-minor-allocation-lifetime.patch
+dm-cache-fix-race-causing-dirty-blocks-to-be-marked-as-clean.patch
+dm-crypt-fix-access-beyond-the-end-of-allocated-space.patch
+input-serport-add-compat-handling-for-spiocstype-ioctl.patch
+input-synaptics-add-support-for-forcepads.patch
+input-elantech-fix-detection-of-touchpad-on-asus-s301l.patch
+input-atkbd-do-not-try-deactivate-keyboard-on-any-lg-laptops.patch
+input-i8042-add-fujitsu-u574-to-no_timeout-dmi-table.patch
+input-i8042-add-nomux-quirk-for-avatar-aviu-145a6.patch
+hwmon-ds1621-update-zbits-after-conversion-rate-change.patch
+ata_piix-add-device-ids-for-intel-9-series-pch.patch
+gpio-fix-potential-null-handler-data-in-chained-irqchip-handler.patch
+percpu-free-percpu-allocation-info-for-uniprocessor-system.patch
+percpu-fix-pcpu_alloc_pages-failure-path.patch
+percpu-perform-tlb-flush-after-pcpu_map_pages-failure.patch
+regulatory-add-nul-to-alpha2.patch
+rtlwifi-rtl8192cu-add-new-id.patch
+lockd-fix-rpcbind-crash-on-lockd-startup-failure.patch
+lockdep-revert-lockdep-check-in-raw_seqcount_begin.patch
+genhd-fix-leftover-might_sleep-in-blk_free_devt.patch
+usb-dwc3-fix-trb-completion-when-multiple-trbs-are-started.patch
+ftrace-allow-ftrace_ops-to-use-the-hashes-from-other-ops.patch
+ftrace-fix-function_profiler-and-function-tracer-together.patch
+ftrace-update-all-ftrace_ops-for-a-ftrace_hash_ops-update.patch
+revert-mac80211-disable-uapsd-if-all-acs-are-under-acm.patch
+kernel-printk-printk.c-fix-faulty-logic-in-the-case-of-recursive-printk.patch
+eventpoll-fix-uninitialized-variable-in-epoll_ctl.patch
+kcmp-fix-standard-comparison-bug.patch
+fsnotify-fdinfo-use-named-constants-instead-of-hardcoded-values.patch
+fs-notify-don-t-show-f_handle-if-exportfs_encode_inode_fh-failed.patch
+nilfs2-fix-data-loss-with-mmap.patch
+ocfs2-dlm-do-not-get-resource-spinlock-if-lockres-is-new.patch
+mm-slab-initialize-object-alignment-on-cache-creation.patch
+fs-cachefiles-add-missing-n-to-kerror-conversions.patch
+mm-softdirty-keep-bit-when-zapping-file-pte.patch
+sched-fix-unreleased-llc_shared_mask-bit-during-cpu-hotplug.patch
+brcmfmac-handle-if-event-for-p2p_device-interface.patch
+ath9k_htc-fix-random-decryption-failure.patch
--- /dev/null
+From 0b93a4c838fa10370d72f86fe712426ac63804de Mon Sep 17 00:00:00 2001
+From: Felipe Balbi <balbi@ti.com>
+Date: Thu, 4 Sep 2014 10:28:10 -0500
+Subject: usb: dwc3: fix TRB completion when multiple TRBs are started
+
+From: Felipe Balbi <balbi@ti.com>
+
+commit 0b93a4c838fa10370d72f86fe712426ac63804de upstream.
+
+After commit 2ec2a8be (usb: dwc3: gadget:
+always enable IOC on bulk/interrupt transfers)
+we created a situation where it was possible to
+hang a bulk/interrupt endpoint if we had more
+than one pending request in our queue and they
+were both started with a single Start Transfer
+command.
+
+The problems triggers because we had not enabled
+Transfer In Progress event for those endpoints
+and we were not able to process early giveback
+of requests completed without LST bit set.
+
+Fix the problem by finally enabling Xfer In Progress
+event for all endpoint types, except control.
+
+Fixes: 2ec2a8be (usb: dwc3: gadget: always
+ enable IOC on bulk/interrupt transfers)
+Reported-by: Pratyush Anand <pratyush.anand@st.com>
+Signed-off-by: Felipe Balbi <balbi@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/usb/dwc3/gadget.c | 8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -527,7 +527,7 @@ static int dwc3_gadget_set_ep_config(str
+ dep->stream_capable = true;
+ }
+
+- if (usb_endpoint_xfer_isoc(desc))
++ if (!usb_endpoint_xfer_control(desc))
+ params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
+
+ /*
+@@ -2042,12 +2042,6 @@ static void dwc3_endpoint_interrupt(stru
+ dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
+ break;
+ case DWC3_DEPEVT_XFERINPROGRESS:
+- if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+- dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
+- dep->name);
+- return;
+- }
+-
+ dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
+ break;
+ case DWC3_DEPEVT_XFERNOTREADY: