]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 1 Mar 2016 07:33:31 +0000 (23:33 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 1 Mar 2016 07:33:31 +0000 (23:33 -0800)
added patches:
arc-dw2-unwind-remove-falllback-linear-search-thru-fde-entries.patch
devres-fix-a-for-loop-bounds-check.patch
dmaengine-dw-convert-to-__ffs.patch
drm-radeon-make-rv770_set_sw_state-failures-non-fatal.patch
lockd-create-nsm-handles-per-net-namespace.patch
mac-validate-mac_partition-is-within-sector.patch
megaraid_sas-do-not-use-page_size-for-max_sectors.patch
megaraid_sas-smap-restriction-do-not-access-user-memory-from-ioctl-code.patch
mmc-remove-bondage-between-req_meta-and-reliable-write.patch
ring-buffer-update-read-stamp-with-first-real-commit-on-page.patch
storvsc-don-t-set-the-srb_flags_queue_action_enable-flag.patch
target-fix-compare_and_write-non-zero-sgl-offset-data-corruption.patch
target-fix-race-for-scf_compare_and_write_post-checking.patch
vfs-avoid-softlockups-with-sendfile-2.patch
virtio-fix-memory-leak-of-virtio-ida-cache-layers.patch
wm831x_power-use-irqf_oneshot-to-request-threaded-irqs.patch

17 files changed:
queue-3.14/arc-dw2-unwind-remove-falllback-linear-search-thru-fde-entries.patch [new file with mode: 0644]
queue-3.14/devres-fix-a-for-loop-bounds-check.patch [new file with mode: 0644]
queue-3.14/dmaengine-dw-convert-to-__ffs.patch [new file with mode: 0644]
queue-3.14/drm-radeon-make-rv770_set_sw_state-failures-non-fatal.patch [new file with mode: 0644]
queue-3.14/lockd-create-nsm-handles-per-net-namespace.patch [new file with mode: 0644]
queue-3.14/mac-validate-mac_partition-is-within-sector.patch [new file with mode: 0644]
queue-3.14/megaraid_sas-do-not-use-page_size-for-max_sectors.patch [new file with mode: 0644]
queue-3.14/megaraid_sas-smap-restriction-do-not-access-user-memory-from-ioctl-code.patch [new file with mode: 0644]
queue-3.14/mmc-remove-bondage-between-req_meta-and-reliable-write.patch [new file with mode: 0644]
queue-3.14/ring-buffer-update-read-stamp-with-first-real-commit-on-page.patch [new file with mode: 0644]
queue-3.14/series
queue-3.14/storvsc-don-t-set-the-srb_flags_queue_action_enable-flag.patch [new file with mode: 0644]
queue-3.14/target-fix-compare_and_write-non-zero-sgl-offset-data-corruption.patch [new file with mode: 0644]
queue-3.14/target-fix-race-for-scf_compare_and_write_post-checking.patch [new file with mode: 0644]
queue-3.14/vfs-avoid-softlockups-with-sendfile-2.patch [new file with mode: 0644]
queue-3.14/virtio-fix-memory-leak-of-virtio-ida-cache-layers.patch [new file with mode: 0644]
queue-3.14/wm831x_power-use-irqf_oneshot-to-request-threaded-irqs.patch [new file with mode: 0644]

diff --git a/queue-3.14/arc-dw2-unwind-remove-falllback-linear-search-thru-fde-entries.patch b/queue-3.14/arc-dw2-unwind-remove-falllback-linear-search-thru-fde-entries.patch
new file mode 100644 (file)
index 0000000..f07b33d
--- /dev/null
@@ -0,0 +1,94 @@
+From 2e22502c080f27afeab5e6f11e618fb7bc7aea53 Mon Sep 17 00:00:00 2001
+From: Vineet Gupta <vgupta@synopsys.com>
+Date: Mon, 23 Nov 2015 19:32:51 +0530
+Subject: ARC: dw2 unwind: Remove falllback linear search thru FDE entries
+
+From: Vineet Gupta <vgupta@synopsys.com>
+
+commit 2e22502c080f27afeab5e6f11e618fb7bc7aea53 upstream.
+
+Fixes STAR 9000953410: "perf callgraph profiling causing RCU stalls"
+
+| perf record -g -c 15000 -e cycles /sbin/hackbench
+|
+| INFO: rcu_preempt self-detected stall on CPU
+| 1: (1 GPs behind) idle=609/140000000000002/0 softirq=2914/2915 fqs=603
+| Task dump for CPU 1:
+
+in-kernel dwarf unwinder has a fast binary lookup and a fallback linear
+search (which iterates thru each of ~11K entries) thus takes 2 orders of
+magnitude longer (~3 million cycles vs. 2000). Routines written in hand
+assembler lack dwarf info (as we don't support assembler CFI pseudo-ops
+yet) fail the unwinder binary lookup, hit linear search, failing
+nevertheless in the end.
+
+However the linear search is pointless as binary lookup tables are created
+from it in first place. It is impossible to have binary lookup fail while
+succeed the linear search. It is pure waste of cycles thus removed by
+this patch.
+
+This manifested as RCU stalls / NMI watchdog splat when running
+hackbench under perf with callgraph profiling. The triggering condition
+was perf counter overflowing in routine lacking dwarf info (like memset)
+leading to patheic 3 million cycle unwinder slow path and by the time it
+returned new interrupts were already pending (Timer, IPI) and taken
+rightaway. The original memset didn't make forward progress, system kept
+accruing more interrupts and more unwinder delayes in a vicious feedback
+loop, ultimately triggering the NMI diagnostic.
+
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/kernel/unwind.c |   37 ++++---------------------------------
+ 1 file changed, 4 insertions(+), 33 deletions(-)
+
+--- a/arch/arc/kernel/unwind.c
++++ b/arch/arc/kernel/unwind.c
+@@ -986,42 +986,13 @@ int arc_unwind(struct unwind_frame_info
+                                                           (const u8 *)(fde +
+                                                                        1) +
+                                                           *fde, ptrType);
+-                              if (pc >= endLoc)
++                              if (pc >= endLoc) {
+                                       fde = NULL;
+-                      } else
+-                              fde = NULL;
+-              }
+-              if (fde == NULL) {
+-                      for (fde = table->address, tableSize = table->size;
+-                           cie = NULL, tableSize > sizeof(*fde)
+-                           && tableSize - sizeof(*fde) >= *fde;
+-                           tableSize -= sizeof(*fde) + *fde,
+-                           fde += 1 + *fde / sizeof(*fde)) {
+-                              cie = cie_for_fde(fde, table);
+-                              if (cie == &bad_cie) {
+                                       cie = NULL;
+-                                      break;
+                               }
+-                              if (cie == NULL
+-                                  || cie == &not_fde
+-                                  || (ptrType = fde_pointer_type(cie)) < 0)
+-                                      continue;
+-                              ptr = (const u8 *)(fde + 2);
+-                              startLoc = read_pointer(&ptr,
+-                                                      (const u8 *)(fde + 1) +
+-                                                      *fde, ptrType);
+-                              if (!startLoc)
+-                                      continue;
+-                              if (!(ptrType & DW_EH_PE_indirect))
+-                                      ptrType &=
+-                                          DW_EH_PE_FORM | DW_EH_PE_signed;
+-                              endLoc =
+-                                  startLoc + read_pointer(&ptr,
+-                                                          (const u8 *)(fde +
+-                                                                       1) +
+-                                                          *fde, ptrType);
+-                              if (pc >= startLoc && pc < endLoc)
+-                                      break;
++                      } else {
++                              fde = NULL;
++                              cie = NULL;
+                       }
+               }
+       }
diff --git a/queue-3.14/devres-fix-a-for-loop-bounds-check.patch b/queue-3.14/devres-fix-a-for-loop-bounds-check.patch
new file mode 100644 (file)
index 0000000..d8c4e3d
--- /dev/null
@@ -0,0 +1,35 @@
+From 1f35d04a02a652f14566f875aef3a6f2af4cb77b Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Mon, 21 Sep 2015 19:21:51 +0300
+Subject: devres: fix a for loop bounds check
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 1f35d04a02a652f14566f875aef3a6f2af4cb77b upstream.
+
+The iomap[] array has PCIM_IOMAP_MAX (6) elements and not
+DEVICE_COUNT_RESOURCE (16).  This bug was found using a static checker.
+It may be that the "if (!(mask & (1 << i)))" check means we never
+actually go past the end of the array in real life.
+
+Fixes: ec04b075843d ('iomap: implement pcim_iounmap_regions()')
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/devres.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/devres.c
++++ b/lib/devres.c
+@@ -423,7 +423,7 @@ void pcim_iounmap_regions(struct pci_dev
+       if (!iomap)
+               return;
+-      for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++      for (i = 0; i < PCIM_IOMAP_MAX; i++) {
+               if (!(mask & (1 << i)))
+                       continue;
diff --git a/queue-3.14/dmaengine-dw-convert-to-__ffs.patch b/queue-3.14/dmaengine-dw-convert-to-__ffs.patch
new file mode 100644 (file)
index 0000000..7d63b08
--- /dev/null
@@ -0,0 +1,79 @@
+From 39416677b95bf1ab8bbfa229ec7e511c96ad5d0c Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Mon, 28 Sep 2015 18:57:04 +0300
+Subject: dmaengine: dw: convert to __ffs()
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 39416677b95bf1ab8bbfa229ec7e511c96ad5d0c upstream.
+
+We replace __fls() by __ffs() since we have to find a *minimum* data width that
+satisfies both source and destination.
+
+While here, rename dwc_fast_fls() to dwc_fast_ffs() which it really is.
+
+Fixes: 4c2d56c574db (dw_dmac: introduce dwc_fast_fls())
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/dw/core.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -176,7 +176,7 @@ static void dwc_initialize(struct dw_dma
+ /*----------------------------------------------------------------------*/
+-static inline unsigned int dwc_fast_fls(unsigned long long v)
++static inline unsigned int dwc_fast_ffs(unsigned long long v)
+ {
+       /*
+        * We can be a lot more clever here, but this should take care
+@@ -720,7 +720,7 @@ dwc_prep_dma_memcpy(struct dma_chan *cha
+                          dw->data_width[dwc->dst_master]);
+       src_width = dst_width = min_t(unsigned int, data_width,
+-                                    dwc_fast_fls(src | dest | len));
++                                    dwc_fast_ffs(src | dest | len));
+       ctllo = DWC_DEFAULT_CTLLO(chan)
+                       | DWC_CTLL_DST_WIDTH(dst_width)
+@@ -799,7 +799,7 @@ dwc_prep_slave_sg(struct dma_chan *chan,
+       switch (direction) {
+       case DMA_MEM_TO_DEV:
+-              reg_width = __fls(sconfig->dst_addr_width);
++              reg_width = __ffs(sconfig->dst_addr_width);
+               reg = sconfig->dst_addr;
+               ctllo = (DWC_DEFAULT_CTLLO(chan)
+                               | DWC_CTLL_DST_WIDTH(reg_width)
+@@ -819,7 +819,7 @@ dwc_prep_slave_sg(struct dma_chan *chan,
+                       len = sg_dma_len(sg);
+                       mem_width = min_t(unsigned int,
+-                                        data_width, dwc_fast_fls(mem | len));
++                                        data_width, dwc_fast_ffs(mem | len));
+ slave_sg_todev_fill_desc:
+                       desc = dwc_desc_get(dwc);
+@@ -859,7 +859,7 @@ slave_sg_todev_fill_desc:
+               }
+               break;
+       case DMA_DEV_TO_MEM:
+-              reg_width = __fls(sconfig->src_addr_width);
++              reg_width = __ffs(sconfig->src_addr_width);
+               reg = sconfig->src_addr;
+               ctllo = (DWC_DEFAULT_CTLLO(chan)
+                               | DWC_CTLL_SRC_WIDTH(reg_width)
+@@ -879,7 +879,7 @@ slave_sg_todev_fill_desc:
+                       len = sg_dma_len(sg);
+                       mem_width = min_t(unsigned int,
+-                                        data_width, dwc_fast_fls(mem | len));
++                                        data_width, dwc_fast_ffs(mem | len));
+ slave_sg_fromdev_fill_desc:
+                       desc = dwc_desc_get(dwc);
diff --git a/queue-3.14/drm-radeon-make-rv770_set_sw_state-failures-non-fatal.patch b/queue-3.14/drm-radeon-make-rv770_set_sw_state-failures-non-fatal.patch
new file mode 100644 (file)
index 0000000..2a7c53a
--- /dev/null
@@ -0,0 +1,33 @@
+From 4e7697ed79d0c0d5f869c87a6b3ce3d5cd1a07d6 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 23 Nov 2015 16:43:29 -0500
+Subject: drm/radeon: make rv770_set_sw_state failures non-fatal
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 4e7697ed79d0c0d5f869c87a6b3ce3d5cd1a07d6 upstream.
+
+On some cards it takes a relatively long time for the change
+to take place.  Make a timeout non-fatal.
+
+bug:
+https://bugs.freedesktop.org/show_bug.cgi?id=76130
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/rv770_dpm.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/rv770_dpm.c
++++ b/drivers/gpu/drm/radeon/rv770_dpm.c
+@@ -1415,7 +1415,7 @@ int rv770_resume_smc(struct radeon_devic
+ int rv770_set_sw_state(struct radeon_device *rdev)
+ {
+       if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK)
+-              return -EINVAL;
++              DRM_ERROR("rv770_set_sw_state failed\n");
+       return 0;
+ }
diff --git a/queue-3.14/lockd-create-nsm-handles-per-net-namespace.patch b/queue-3.14/lockd-create-nsm-handles-per-net-namespace.patch
new file mode 100644 (file)
index 0000000..6ed7d6f
--- /dev/null
@@ -0,0 +1,269 @@
+From 0ad95472bf169a3501991f8f33f5147f792a8116 Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Wed, 23 Sep 2015 15:49:29 +0300
+Subject: lockd: create NSM handles per net namespace
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit 0ad95472bf169a3501991f8f33f5147f792a8116 upstream.
+
+Commit cb7323fffa85 ("lockd: create and use per-net NSM
+ RPC clients on MON/UNMON requests") introduced per-net
+NSM RPC clients. Unfortunately this doesn't make any sense
+without per-net nsm_handle.
+
+E.g. the following scenario could happen
+Two hosts (X and Y) in different namespaces (A and B) share
+the same nsm struct.
+
+1. nsm_monitor(host_X) called => NSM rpc client created,
+       nsm->sm_monitored bit set.
+2. nsm_mointor(host-Y) called => nsm->sm_monitored already set,
+       we just exit. Thus in namespace B ln->nsm_clnt == NULL.
+3. host X destroyed => nsm->sm_count decremented to 1
+4. host Y destroyed => nsm_unmonitor() => nsm_mon_unmon() => NULL-ptr
+       dereference of *ln->nsm_clnt
+
+So this could be fixed by making per-net nsm_handles list,
+instead of global. Thus different net namespaces will not be able
+share the same nsm_handle.
+
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/lockd/host.c             |    7 ++++---
+ fs/lockd/mon.c              |   36 ++++++++++++++++++++++--------------
+ fs/lockd/netns.h            |    1 +
+ fs/lockd/svc.c              |    1 +
+ fs/lockd/svc4proc.c         |    2 +-
+ fs/lockd/svcproc.c          |    2 +-
+ include/linux/lockd/lockd.h |    9 ++++++---
+ 7 files changed, 36 insertions(+), 22 deletions(-)
+
+--- a/fs/lockd/host.c
++++ b/fs/lockd/host.c
+@@ -116,7 +116,7 @@ static struct nlm_host *nlm_alloc_host(s
+               atomic_inc(&nsm->sm_count);
+       else {
+               host = NULL;
+-              nsm = nsm_get_handle(ni->sap, ni->salen,
++              nsm = nsm_get_handle(ni->net, ni->sap, ni->salen,
+                                       ni->hostname, ni->hostname_len);
+               if (unlikely(nsm == NULL)) {
+                       dprintk("lockd: %s failed; no nsm handle\n",
+@@ -534,17 +534,18 @@ static struct nlm_host *next_host_state(
+ /**
+  * nlm_host_rebooted - Release all resources held by rebooted host
++ * @net:  network namespace
+  * @info: pointer to decoded results of NLM_SM_NOTIFY call
+  *
+  * We were notified that the specified host has rebooted.  Release
+  * all resources held by that peer.
+  */
+-void nlm_host_rebooted(const struct nlm_reboot *info)
++void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info)
+ {
+       struct nsm_handle *nsm;
+       struct nlm_host *host;
+-      nsm = nsm_reboot_lookup(info);
++      nsm = nsm_reboot_lookup(net, info);
+       if (unlikely(nsm == NULL))
+               return;
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -51,7 +51,6 @@ struct nsm_res {
+ };
+ static const struct rpc_program       nsm_program;
+-static                                LIST_HEAD(nsm_handles);
+ static                                DEFINE_SPINLOCK(nsm_lock);
+ /*
+@@ -259,33 +258,35 @@ void nsm_unmonitor(const struct nlm_host
+       }
+ }
+-static struct nsm_handle *nsm_lookup_hostname(const char *hostname,
+-                                            const size_t len)
++static struct nsm_handle *nsm_lookup_hostname(const struct list_head *nsm_handles,
++                                      const char *hostname, const size_t len)
+ {
+       struct nsm_handle *nsm;
+-      list_for_each_entry(nsm, &nsm_handles, sm_link)
++      list_for_each_entry(nsm, nsm_handles, sm_link)
+               if (strlen(nsm->sm_name) == len &&
+                   memcmp(nsm->sm_name, hostname, len) == 0)
+                       return nsm;
+       return NULL;
+ }
+-static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap)
++static struct nsm_handle *nsm_lookup_addr(const struct list_head *nsm_handles,
++                                      const struct sockaddr *sap)
+ {
+       struct nsm_handle *nsm;
+-      list_for_each_entry(nsm, &nsm_handles, sm_link)
++      list_for_each_entry(nsm, nsm_handles, sm_link)
+               if (rpc_cmp_addr(nsm_addr(nsm), sap))
+                       return nsm;
+       return NULL;
+ }
+-static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv)
++static struct nsm_handle *nsm_lookup_priv(const struct list_head *nsm_handles,
++                                      const struct nsm_private *priv)
+ {
+       struct nsm_handle *nsm;
+-      list_for_each_entry(nsm, &nsm_handles, sm_link)
++      list_for_each_entry(nsm, nsm_handles, sm_link)
+               if (memcmp(nsm->sm_priv.data, priv->data,
+                                       sizeof(priv->data)) == 0)
+                       return nsm;
+@@ -350,6 +351,7 @@ static struct nsm_handle *nsm_create_han
+ /**
+  * nsm_get_handle - Find or create a cached nsm_handle
++ * @net: network namespace
+  * @sap: pointer to socket address of handle to find
+  * @salen: length of socket address
+  * @hostname: pointer to C string containing hostname to find
+@@ -362,11 +364,13 @@ static struct nsm_handle *nsm_create_han
+  * @hostname cannot be found in the handle cache.  Returns NULL if
+  * an error occurs.
+  */
+-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
++struct nsm_handle *nsm_get_handle(const struct net *net,
++                                const struct sockaddr *sap,
+                                 const size_t salen, const char *hostname,
+                                 const size_t hostname_len)
+ {
+       struct nsm_handle *cached, *new = NULL;
++      struct lockd_net *ln = net_generic(net, lockd_net_id);
+       if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
+               if (printk_ratelimit()) {
+@@ -381,9 +385,10 @@ retry:
+       spin_lock(&nsm_lock);
+       if (nsm_use_hostnames && hostname != NULL)
+-              cached = nsm_lookup_hostname(hostname, hostname_len);
++              cached = nsm_lookup_hostname(&ln->nsm_handles,
++                                      hostname, hostname_len);
+       else
+-              cached = nsm_lookup_addr(sap);
++              cached = nsm_lookup_addr(&ln->nsm_handles, sap);
+       if (cached != NULL) {
+               atomic_inc(&cached->sm_count);
+@@ -397,7 +402,7 @@ retry:
+       }
+       if (new != NULL) {
+-              list_add(&new->sm_link, &nsm_handles);
++              list_add(&new->sm_link, &ln->nsm_handles);
+               spin_unlock(&nsm_lock);
+               dprintk("lockd: created nsm_handle for %s (%s)\n",
+                               new->sm_name, new->sm_addrbuf);
+@@ -414,19 +419,22 @@ retry:
+ /**
+  * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle
++ * @net:  network namespace
+  * @info: pointer to NLMPROC_SM_NOTIFY arguments
+  *
+  * Returns a matching nsm_handle if found in the nsm cache. The returned
+  * nsm_handle's reference count is bumped. Otherwise returns NULL if some
+  * error occurred.
+  */
+-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info)
++struct nsm_handle *nsm_reboot_lookup(const struct net *net,
++                              const struct nlm_reboot *info)
+ {
+       struct nsm_handle *cached;
++      struct lockd_net *ln = net_generic(net, lockd_net_id);
+       spin_lock(&nsm_lock);
+-      cached = nsm_lookup_priv(&info->priv);
++      cached = nsm_lookup_priv(&ln->nsm_handles, &info->priv);
+       if (unlikely(cached == NULL)) {
+               spin_unlock(&nsm_lock);
+               dprintk("lockd: never saw rebooted peer '%.*s' before\n",
+--- a/fs/lockd/netns.h
++++ b/fs/lockd/netns.h
+@@ -16,6 +16,7 @@ struct lockd_net {
+       spinlock_t nsm_clnt_lock;
+       unsigned int nsm_users;
+       struct rpc_clnt *nsm_clnt;
++      struct list_head nsm_handles;
+ };
+ extern int lockd_net_id;
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -583,6 +583,7 @@ static int lockd_init_net(struct net *ne
+       INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
+       INIT_LIST_HEAD(&ln->grace_list);
+       spin_lock_init(&ln->nsm_clnt_lock);
++      INIT_LIST_HEAD(&ln->nsm_handles);
+       return 0;
+ }
+--- a/fs/lockd/svc4proc.c
++++ b/fs/lockd/svc4proc.c
+@@ -421,7 +421,7 @@ nlm4svc_proc_sm_notify(struct svc_rqst *
+               return rpc_system_err;
+       }
+-      nlm_host_rebooted(argp);
++      nlm_host_rebooted(SVC_NET(rqstp), argp);
+       return rpc_success;
+ }
+--- a/fs/lockd/svcproc.c
++++ b/fs/lockd/svcproc.c
+@@ -464,7 +464,7 @@ nlmsvc_proc_sm_notify(struct svc_rqst *r
+               return rpc_system_err;
+       }
+-      nlm_host_rebooted(argp);
++      nlm_host_rebooted(SVC_NET(rqstp), argp);
+       return rpc_success;
+ }
+--- a/include/linux/lockd/lockd.h
++++ b/include/linux/lockd/lockd.h
+@@ -236,7 +236,8 @@ void                 nlm_rebind_host(struct nlm_host
+ struct nlm_host * nlm_get_host(struct nlm_host *);
+ void            nlm_shutdown_hosts(void);
+ void            nlm_shutdown_hosts_net(struct net *net);
+-void            nlm_host_rebooted(const struct nlm_reboot *);
++void            nlm_host_rebooted(const struct net *net,
++                                      const struct nlm_reboot *);
+ /*
+  * Host monitoring
+@@ -244,11 +245,13 @@ void               nlm_host_rebooted(const struct n
+ int             nsm_monitor(const struct nlm_host *host);
+ void            nsm_unmonitor(const struct nlm_host *host);
+-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
++struct nsm_handle *nsm_get_handle(const struct net *net,
++                                      const struct sockaddr *sap,
+                                       const size_t salen,
+                                       const char *hostname,
+                                       const size_t hostname_len);
+-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info);
++struct nsm_handle *nsm_reboot_lookup(const struct net *net,
++                                      const struct nlm_reboot *info);
+ void            nsm_release(struct nsm_handle *nsm);
+ /*
diff --git a/queue-3.14/mac-validate-mac_partition-is-within-sector.patch b/queue-3.14/mac-validate-mac_partition-is-within-sector.patch
new file mode 100644 (file)
index 0000000..9657685
--- /dev/null
@@ -0,0 +1,49 @@
+From 02e2a5bfebe99edcf9d694575a75032d53fe1b73 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 19 Nov 2015 17:18:54 -0800
+Subject: mac: validate mac_partition is within sector
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 02e2a5bfebe99edcf9d694575a75032d53fe1b73 upstream.
+
+If md->signature == MAC_DRIVER_MAGIC and md->block_size == 1023, a single
+512 byte sector would be read (secsize / 512). However the partition
+structure would be located past the end of the buffer (secsize % 512).
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/partitions/mac.c |   10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/block/partitions/mac.c
++++ b/block/partitions/mac.c
+@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitio
+       Sector sect;
+       unsigned char *data;
+       int slot, blocks_in_map;
+-      unsigned secsize;
++      unsigned secsize, datasize, partoffset;
+ #ifdef CONFIG_PPC_PMAC
+       int found_root = 0;
+       int found_root_goodness = 0;
+@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitio
+       }
+       secsize = be16_to_cpu(md->block_size);
+       put_dev_sector(sect);
+-      data = read_part_sector(state, secsize/512, &sect);
++      datasize = round_down(secsize, 512);
++      data = read_part_sector(state, datasize / 512, &sect);
+       if (!data)
+               return -1;
+-      part = (struct mac_partition *) (data + secsize%512);
++      partoffset = secsize % 512;
++      if (partoffset + sizeof(*part) > datasize)
++              return -1;
++      part = (struct mac_partition *) (data + partoffset);
+       if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
+               put_dev_sector(sect);
+               return 0;               /* not a MacOS disk */
diff --git a/queue-3.14/megaraid_sas-do-not-use-page_size-for-max_sectors.patch b/queue-3.14/megaraid_sas-do-not-use-page_size-for-max_sectors.patch
new file mode 100644 (file)
index 0000000..b6c7200
--- /dev/null
@@ -0,0 +1,48 @@
+From 357ae967ad66e357f78b5cfb5ab6ca07fb4a7758 Mon Sep 17 00:00:00 2001
+From: "sumit.saxena@avagotech.com" <sumit.saxena@avagotech.com>
+Date: Thu, 15 Oct 2015 13:40:04 +0530
+Subject: megaraid_sas: Do not use PAGE_SIZE for max_sectors
+
+From: sumit.saxena@avagotech.com <sumit.saxena@avagotech.com>
+
+commit 357ae967ad66e357f78b5cfb5ab6ca07fb4a7758 upstream.
+
+Do not use PAGE_SIZE marco to calculate max_sectors per I/O
+request. Driver code assumes PAGE_SIZE will be always 4096 which can
+lead to wrongly calculated value if PAGE_SIZE is not 4096. This issue
+was reported in Ubuntu Bugzilla Bug #1475166.
+
+Signed-off-by: Sumit Saxena <sumit.saxena@avagotech.com>
+Signed-off-by: Kashyap Desai <kashyap.desai@avagotech.com>
+Reviewed-by: Tomas Henzl <thenzl@redhat.com>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/megaraid/megaraid_sas.h      |    2 ++
+ drivers/scsi/megaraid/megaraid_sas_base.c |    2 +-
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -334,6 +334,8 @@ enum MR_EVT_ARGS {
+       MR_EVT_ARGS_GENERIC,
+ };
++
++#define SGE_BUFFER_SIZE       4096
+ /*
+  * define constants for device list query options
+  */
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3821,7 +3821,7 @@ static int megasas_init_fw(struct megasa
+               }
+       }
+       instance->max_sectors_per_req = instance->max_num_sge *
+-                                              PAGE_SIZE / 512;
++                                              SGE_BUFFER_SIZE / 512;
+       if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
+               instance->max_sectors_per_req = tmp_sectors;
diff --git a/queue-3.14/megaraid_sas-smap-restriction-do-not-access-user-memory-from-ioctl-code.patch b/queue-3.14/megaraid_sas-smap-restriction-do-not-access-user-memory-from-ioctl-code.patch
new file mode 100644 (file)
index 0000000..1a4fdf3
--- /dev/null
@@ -0,0 +1,53 @@
+From 323c4a02c631d00851d8edc4213c4d184ef83647 Mon Sep 17 00:00:00 2001
+From: "sumit.saxena@avagotech.com" <sumit.saxena@avagotech.com>
+Date: Thu, 15 Oct 2015 13:40:54 +0530
+Subject: megaraid_sas : SMAP restriction--do not access user memory from IOCTL code
+
+From: sumit.saxena@avagotech.com <sumit.saxena@avagotech.com>
+
+commit 323c4a02c631d00851d8edc4213c4d184ef83647 upstream.
+
+This is an issue on SMAP enabled CPUs and 32 bit apps running on 64 bit
+OS. Do not access user memory from kernel code. The SMAP bit restricts
+accessing user memory from kernel code.
+
+Signed-off-by: Sumit Saxena <sumit.saxena@avagotech.com>
+Signed-off-by: Kashyap Desai <kashyap.desai@avagotech.com>
+Reviewed-by: Tomas Henzl <thenzl@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/megaraid/megaraid_sas_base.c |   13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -5281,6 +5281,9 @@ static int megasas_mgmt_compat_ioctl_fw(
+       int i;
+       int error = 0;
+       compat_uptr_t ptr;
++      unsigned long local_raw_ptr;
++      u32 local_sense_off;
++      u32 local_sense_len;
+       if (clear_user(ioc, sizeof(*ioc)))
+               return -EFAULT;
+@@ -5298,9 +5301,15 @@ static int megasas_mgmt_compat_ioctl_fw(
+        * sense_len is not null, so prepare the 64bit value under
+        * the same condition.
+        */
+-      if (ioc->sense_len) {
++      if (get_user(local_raw_ptr, ioc->frame.raw) ||
++              get_user(local_sense_off, &ioc->sense_off) ||
++              get_user(local_sense_len, &ioc->sense_len))
++              return -EFAULT;
++
++
++      if (local_sense_len) {
+               void __user **sense_ioc_ptr =
+-                      (void __user **)(ioc->frame.raw + ioc->sense_off);
++                      (void __user **)((u8*)local_raw_ptr + local_sense_off);
+               compat_uptr_t *sense_cioc_ptr =
+                       (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+               if (get_user(ptr, sense_cioc_ptr) ||
diff --git a/queue-3.14/mmc-remove-bondage-between-req_meta-and-reliable-write.patch b/queue-3.14/mmc-remove-bondage-between-req_meta-and-reliable-write.patch
new file mode 100644 (file)
index 0000000..465e931
--- /dev/null
@@ -0,0 +1,66 @@
+From d3df0465db00cf4ed9f90d0bfc3b827d32b9c796 Mon Sep 17 00:00:00 2001
+From: Luca Porzio <lporzio@micron.com>
+Date: Fri, 6 Nov 2015 15:12:26 +0000
+Subject: mmc: remove bondage between REQ_META and reliable write
+
+From: Luca Porzio <lporzio@micron.com>
+
+commit d3df0465db00cf4ed9f90d0bfc3b827d32b9c796 upstream.
+
+Anytime a write operation is performed with Reliable Write flag enabled,
+the eMMC device is enforced to bypass the cache and do a write to the
+underling NVM device by Jedec specification; this causes a performance
+penalty since write operations can't be optimized by the device cache.
+
+In our tests, we replayed a typical mobile daily trace pattern and found
+~9% overall time reduction in trace replay by using this patch. Also the
+write ops within 4KB~64KB chunk size range get a 40~60% performance
+improvement by using the patch (as this range of write chunks are the ones
+affected by REQ_META).
+
+This patch has been discussed in the Mobile & Embedded Linux Storage Forum
+and it's the results of feedbacks from many people. We also checked with
+fsdevl and f2fs mailing list developers that this change in the usage of
+REQ_META is not affecting FS behavior and we got positive feedbacks.
+Reporting here the feedbacks:
+http://comments.gmane.org/gmane.linux.file-systems/97219
+http://thread.gmane.org/gmane.linux.file-systems.f2fs/3178/focus=3183
+
+Signed-off-by: Bruce Ford <bford@micron.com>
+Signed-off-by: Luca Porzio <lporzio@micron.com>
+Fixes: ce39f9d17c14 ("mmc: support packed write command for eMMC4.5 devices")
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/card/block.c |   11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -62,8 +62,7 @@ MODULE_ALIAS("mmc:block");
+ #define MMC_SANITIZE_REQ_TIMEOUT 240000
+ #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
+-#define mmc_req_rel_wr(req)   (((req->cmd_flags & REQ_FUA) || \
+-                                (req->cmd_flags & REQ_META)) && \
++#define mmc_req_rel_wr(req)   ((req->cmd_flags & REQ_FUA) && \
+                                 (rq_data_dir(req) == WRITE))
+ #define PACKED_CMD_VER        0x01
+ #define PACKED_CMD_WR 0x02
+@@ -1328,13 +1327,9 @@ static void mmc_blk_rw_rq_prep(struct mm
+       /*
+        * Reliable writes are used to implement Forced Unit Access and
+-       * REQ_META accesses, and are supported only on MMCs.
+-       *
+-       * XXX: this really needs a good explanation of why REQ_META
+-       * is treated special.
++       * are supported only on MMCs.
+        */
+-      bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
+-                        (req->cmd_flags & REQ_META)) &&
++      bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
+               (rq_data_dir(req) == WRITE) &&
+               (md->flags & MMC_BLK_REL_WR);
diff --git a/queue-3.14/ring-buffer-update-read-stamp-with-first-real-commit-on-page.patch b/queue-3.14/ring-buffer-update-read-stamp-with-first-real-commit-on-page.patch
new file mode 100644 (file)
index 0000000..5fa0dc6
--- /dev/null
@@ -0,0 +1,62 @@
+From b81f472a208d3e2b4392faa6d17037a89442f4ce Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Mon, 23 Nov 2015 10:35:36 -0500
+Subject: ring-buffer: Update read stamp with first real commit on page
+
+From: Steven Rostedt (Red Hat) <rostedt@goodmis.org>
+
+commit b81f472a208d3e2b4392faa6d17037a89442f4ce upstream.
+
+Do not update the read stamp after swapping out the reader page from the
+write buffer. If the reader page is swapped out of the buffer before an
+event is written to it, then the read_stamp may get an out of date
+timestamp, as the page timestamp is updated on the first commit to that
+page.
+
+rb_get_reader_page() only returns a page if it has an event on it, otherwise
+it will return NULL. At that point, check if the page being returned has
+events and has not been read yet. Then at that point update the read_stamp
+to match the time stamp of the reader page.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ring_buffer.c |   12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1949,12 +1949,6 @@ rb_set_commit_to_write(struct ring_buffe
+               goto again;
+ }
+-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+-{
+-      cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
+-      cpu_buffer->reader_page->read = 0;
+-}
+-
+ static void rb_inc_iter(struct ring_buffer_iter *iter)
+ {
+       struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+@@ -3592,7 +3586,7 @@ rb_get_reader_page(struct ring_buffer_pe
+       /* Finally update the reader page to the new head */
+       cpu_buffer->reader_page = reader;
+-      rb_reset_reader_page(cpu_buffer);
++      cpu_buffer->reader_page->read = 0;
+       if (overwrite != cpu_buffer->last_overrun) {
+               cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
+@@ -3602,6 +3596,10 @@ rb_get_reader_page(struct ring_buffer_pe
+       goto again;
+  out:
++      /* Update the read_stamp on the first event */
++      if (reader && reader->read == 0)
++              cpu_buffer->read_stamp = reader->page->time_stamp;
++
+       arch_spin_unlock(&cpu_buffer->lock);
+       local_irq_restore(flags);
index e62d61b814cc9edd9649a93d46e87d545cd2b9f7..f18b7ee90e55248150762bae11bed28d4e181adc 100644 (file)
@@ -51,3 +51,19 @@ dts-vt8500-add-sdhc-node-to-dts-file-for-wm8650.patch
 clocksource-drivers-vt8500-increase-the-minimum-delta.patch
 async_tx-use-gfp_nowait-rather-than-gfp_io.patch
 drm-radeon-unconditionally-set-sysfs_initialized.patch
+drm-radeon-make-rv770_set_sw_state-failures-non-fatal.patch
+lockd-create-nsm-handles-per-net-namespace.patch
+devres-fix-a-for-loop-bounds-check.patch
+wm831x_power-use-irqf_oneshot-to-request-threaded-irqs.patch
+dmaengine-dw-convert-to-__ffs.patch
+megaraid_sas-do-not-use-page_size-for-max_sectors.patch
+megaraid_sas-smap-restriction-do-not-access-user-memory-from-ioctl-code.patch
+storvsc-don-t-set-the-srb_flags_queue_action_enable-flag.patch
+mmc-remove-bondage-between-req_meta-and-reliable-write.patch
+mac-validate-mac_partition-is-within-sector.patch
+arc-dw2-unwind-remove-falllback-linear-search-thru-fde-entries.patch
+vfs-avoid-softlockups-with-sendfile-2.patch
+target-fix-race-for-scf_compare_and_write_post-checking.patch
+target-fix-compare_and_write-non-zero-sgl-offset-data-corruption.patch
+ring-buffer-update-read-stamp-with-first-real-commit-on-page.patch
+virtio-fix-memory-leak-of-virtio-ida-cache-layers.patch
diff --git a/queue-3.14/storvsc-don-t-set-the-srb_flags_queue_action_enable-flag.patch b/queue-3.14/storvsc-don-t-set-the-srb_flags_queue_action_enable-flag.patch
new file mode 100644 (file)
index 0000000..cda4653
--- /dev/null
@@ -0,0 +1,32 @@
+From 8cf308e1225f5f93575f03cc4dbef24516fa81c9 Mon Sep 17 00:00:00 2001
+From: "K. Y. Srinivasan" <kys@microsoft.com>
+Date: Mon, 31 Aug 2015 08:21:54 -0700
+Subject: storvsc: Don't set the SRB_FLAGS_QUEUE_ACTION_ENABLE flag
+
+From: K. Y. Srinivasan <kys@microsoft.com>
+
+commit 8cf308e1225f5f93575f03cc4dbef24516fa81c9 upstream.
+
+Don't set the SRB_FLAGS_QUEUE_ACTION_ENABLE flag since we are not specifying
+tags.  Without this, the qlogic driver doesn't work properly with storvsc.
+
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: James Bottomley <JBottomley@Odin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/storvsc_drv.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1610,8 +1610,7 @@ static int storvsc_queuecommand(struct S
+       vm_srb->win8_extension.time_out_value = 60;
+       vm_srb->win8_extension.srb_flags |=
+-              (SRB_FLAGS_QUEUE_ACTION_ENABLE |
+-              SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
++              SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
+       /* Build the SRB */
+       switch (scmnd->sc_data_direction) {
diff --git a/queue-3.14/target-fix-compare_and_write-non-zero-sgl-offset-data-corruption.patch b/queue-3.14/target-fix-compare_and_write-non-zero-sgl-offset-data-corruption.patch
new file mode 100644 (file)
index 0000000..e2f9a50
--- /dev/null
@@ -0,0 +1,75 @@
+From d94e5a61357a04938ce14d6033b4d33a3c5fd780 Mon Sep 17 00:00:00 2001
+From: Jan Engelhardt <jengelh@inai.de>
+Date: Mon, 23 Nov 2015 17:46:32 +0100
+Subject: target: fix COMPARE_AND_WRITE non zero SGL offset data corruption
+
+From: Jan Engelhardt <jengelh@inai.de>
+
+commit d94e5a61357a04938ce14d6033b4d33a3c5fd780 upstream.
+
+target_core_sbc's compare_and_write functionality suffers from taking
+data at the wrong memory location when writing a CAW request to disk
+when a SGL offset is non-zero.
+
+This can happen with loopback and vhost-scsi fabric drivers when
+SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is used to map existing user-space
+SGL memory into COMPARE_AND_WRITE READ/WRITE payload buffers.
+
+Given the following sample LIO subtopology,
+
+% targetcli ls /loopback/
+o- loopback ................................. [1 Target]
+  o- naa.6001405ebb8df14a ....... [naa.60014059143ed2b3]
+    o- luns ................................... [2 LUNs]
+      o- lun0 ................ [iblock/ram0 (/dev/ram0)]
+      o- lun1 ................ [iblock/ram1 (/dev/ram1)]
+% lsscsi -g
+[3:0:1:0]    disk    LIO-ORG  IBLOCK           4.0   /dev/sdc   /dev/sg3
+[3:0:1:1]    disk    LIO-ORG  IBLOCK           4.0   /dev/sdd   /dev/sg4
+
+the following bug can be observed in Linux 4.3 and 4.4~rc1:
+
+% perl -e 'print chr$_ for 0..255,reverse 0..255' >rand
+% perl -e 'print "\0" x 512' >zero
+% cat rand >/dev/sdd
+% sg_compare_and_write -i rand -D zero --lba 0 /dev/sdd
+% sg_compare_and_write -i zero -D rand --lba 0 /dev/sdd
+Miscompare reported
+% hexdump -Cn 512 /dev/sdd
+00000000  0f 0e 0d 0c 0b 0a 09 08  07 06 05 04 03 02 01 00
+00000010  00 00 00 00 00 00 00 00  00 00 00 00 00 00 00 00
+*
+00000200
+
+Rather than writing all-zeroes as instructed with the -D file, it
+corrupts the data in the sector by splicing some of the original
+bytes in. The page of the first entry of cmd->t_data_sg includes the
+CDB, and sg->offset is set to a position past the CDB. I presume that
+sg->offset is also the right choice to use for subsequent sglist
+members.
+
+Signed-off-by: Jan Engelhardt <jengelh@netitwork.de>
+Tested-by: Douglas Gilbert <dgilbert@interlog.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/target_core_sbc.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -502,11 +502,11 @@ static sense_reason_t compare_and_write_
+               if (block_size < PAGE_SIZE) {
+                       sg_set_page(&write_sg[i], m.page, block_size,
+-                                  block_size);
++                                  m.piter.sg->offset + block_size);
+               } else {
+                       sg_miter_next(&m);
+                       sg_set_page(&write_sg[i], m.page, block_size,
+-                                  0);
++                                  m.piter.sg->offset);
+               }
+               len -= block_size;
+               i++;
diff --git a/queue-3.14/target-fix-race-for-scf_compare_and_write_post-checking.patch b/queue-3.14/target-fix-race-for-scf_compare_and_write_post-checking.patch
new file mode 100644 (file)
index 0000000..246eb7e
--- /dev/null
@@ -0,0 +1,132 @@
+From 057085e522f8bf94c2e691a5b76880f68060f8ba Mon Sep 17 00:00:00 2001
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+Date: Thu, 5 Nov 2015 23:37:59 -0800
+Subject: target: Fix race for SCF_COMPARE_AND_WRITE_POST checking
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit 057085e522f8bf94c2e691a5b76880f68060f8ba upstream.
+
+This patch addresses a race + use after free where the first
+stage of COMPARE_AND_WRITE in compare_and_write_callback()
+is rescheduled after the backend sends the secondary WRITE,
+resulting in second stage compare_and_write_post() callback
+completing in target_complete_ok_work() before the first
+can return.
+
+Because current code depends on checking se_cmd->se_cmd_flags
+after return from se_cmd->transport_complete_callback(),
+this results in first stage having SCF_COMPARE_AND_WRITE_POST
+set, which incorrectly falls through into second stage CAW
+processing code, eventually triggering a NULL pointer
+dereference due to use after free.
+
+To address this bug, pass in a new *post_ret parameter into
+se_cmd->transport_complete_callback(), and depend upon this
+value instead of ->se_cmd_flags to determine when to return
+or fall through into ->queue_status() code for CAW.
+
+Cc: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/target_core_sbc.c       |   13 +++++++++----
+ drivers/target/target_core_transport.c |   16 +++++++++-------
+ include/target/target_core_base.h      |    2 +-
+ 3 files changed, 19 insertions(+), 12 deletions(-)
+
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -314,7 +314,8 @@ sbc_setup_write_same(struct se_cmd *cmd,
+       return 0;
+ }
+-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
++static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
++                                         int *post_ret)
+ {
+       unsigned char *buf, *addr;
+       struct scatterlist *sg;
+@@ -378,7 +379,8 @@ sbc_execute_rw(struct se_cmd *cmd)
+                              cmd->data_direction);
+ }
+-static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
++static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
++                                           int *post_ret)
+ {
+       struct se_device *dev = cmd->se_dev;
+@@ -388,8 +390,10 @@ static sense_reason_t compare_and_write_
+        * sent to the backend driver.
+        */
+       spin_lock_irq(&cmd->t_state_lock);
+-      if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
++      if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
+               cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
++              *post_ret = 1;
++      }
+       spin_unlock_irq(&cmd->t_state_lock);
+       /*
+@@ -401,7 +405,8 @@ static sense_reason_t compare_and_write_
+       return TCM_NO_SENSE;
+ }
+-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
++static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
++                                               int *post_ret)
+ {
+       struct se_device *dev = cmd->se_dev;
+       struct scatterlist *write_sg = NULL, *sg;
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1581,7 +1581,7 @@ bool target_stop_cmd(struct se_cmd *cmd,
+ void transport_generic_request_failure(struct se_cmd *cmd,
+               sense_reason_t sense_reason)
+ {
+-      int ret = 0;
++      int ret = 0, post_ret = 0;
+       pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+               " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
+@@ -1604,7 +1604,7 @@ void transport_generic_request_failure(s
+        */
+       if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+            cmd->transport_complete_callback)
+-              cmd->transport_complete_callback(cmd, false);
++              cmd->transport_complete_callback(cmd, false, &post_ret);
+       switch (sense_reason) {
+       case TCM_NON_EXISTENT_LUN:
+@@ -1940,11 +1940,13 @@ static void target_complete_ok_work(stru
+        */
+       if (cmd->transport_complete_callback) {
+               sense_reason_t rc;
+-
+-              rc = cmd->transport_complete_callback(cmd, true);
+-              if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
+-                      if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+-                          !cmd->data_length)
++              bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
++              bool zero_dl = !(cmd->data_length);
++              int post_ret = 0;
++
++              rc = cmd->transport_complete_callback(cmd, true, &post_ret);
++              if (!rc && !post_ret) {
++                      if (caw && zero_dl)
+                               goto queue_rsp;
+                       return;
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -513,7 +513,7 @@ struct se_cmd {
+       sense_reason_t          (*execute_cmd)(struct se_cmd *);
+       sense_reason_t          (*execute_rw)(struct se_cmd *, struct scatterlist *,
+                                             u32, enum dma_data_direction);
+-      sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
++      sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
+       unsigned char           *t_task_cdb;
+       unsigned char           __t_task_cdb[TCM_MAX_COMMAND_SIZE];
diff --git a/queue-3.14/vfs-avoid-softlockups-with-sendfile-2.patch b/queue-3.14/vfs-avoid-softlockups-with-sendfile-2.patch
new file mode 100644 (file)
index 0000000..1ef7057
--- /dev/null
@@ -0,0 +1,40 @@
+From c2489e07c0a71a56fb2c84bc0ee66cddfca7d068 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 23 Nov 2015 13:09:51 +0100
+Subject: vfs: Avoid softlockups with sendfile(2)
+
+From: Jan Kara <jack@suse.cz>
+
+commit c2489e07c0a71a56fb2c84bc0ee66cddfca7d068 upstream.
+
+The following test program from Dmitry can cause softlockups or RCU
+stalls as it copies 1GB from tmpfs into eventfd and we don't have any
+scheduling point at that path in sendfile(2) implementation:
+
+        int r1 = eventfd(0, 0);
+        int r2 = memfd_create("", 0);
+        unsigned long n = 1<<30;
+        fallocate(r2, 0, 0, n);
+        sendfile(r1, r2, 0, n);
+
+Add cond_resched() into __splice_from_pipe() to fix the problem.
+
+CC: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/splice.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -949,6 +949,7 @@ ssize_t __splice_from_pipe(struct pipe_i
+       splice_from_pipe_begin(sd);
+       do {
++              cond_resched();
+               ret = splice_from_pipe_next(pipe, sd);
+               if (ret > 0)
+                       ret = splice_from_pipe_feed(pipe, sd, actor);
diff --git a/queue-3.14/virtio-fix-memory-leak-of-virtio-ida-cache-layers.patch b/queue-3.14/virtio-fix-memory-leak-of-virtio-ida-cache-layers.patch
new file mode 100644 (file)
index 0000000..c95c70b
--- /dev/null
@@ -0,0 +1,39 @@
+From c13f99b7e945dad5273a8b7ee230f4d1f22d3354 Mon Sep 17 00:00:00 2001
+From: Suman Anna <s-anna@ti.com>
+Date: Wed, 16 Sep 2015 19:29:17 -0500
+Subject: virtio: fix memory leak of virtio ida cache layers
+
+From: Suman Anna <s-anna@ti.com>
+
+commit c13f99b7e945dad5273a8b7ee230f4d1f22d3354 upstream.
+
+The virtio core uses a static ida named virtio_index_ida for
+assigning index numbers to virtio devices during registration.
+The ida core may allocate some internal idr cache layers and
+an ida bitmap upon any ida allocation, and all these layers are
+truely freed only upon the ida destruction. The virtio_index_ida
+is not destroyed at present, leading to a memory leak when using
+the virtio core as a module and atleast one virtio device is
+registered and unregistered.
+
+Fix this by invoking ida_destroy() in the virtio core module
+exit.
+
+Signed-off-by: Suman Anna <s-anna@ti.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/virtio/virtio.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/virtio/virtio.c
++++ b/drivers/virtio/virtio.c
+@@ -249,6 +249,7 @@ static int virtio_init(void)
+ static void __exit virtio_exit(void)
+ {
+       bus_unregister(&virtio_bus);
++      ida_destroy(&virtio_index_ida);
+ }
+ core_initcall(virtio_init);
+ module_exit(virtio_exit);
diff --git a/queue-3.14/wm831x_power-use-irqf_oneshot-to-request-threaded-irqs.patch b/queue-3.14/wm831x_power-use-irqf_oneshot-to-request-threaded-irqs.patch
new file mode 100644 (file)
index 0000000..c53f9df
--- /dev/null
@@ -0,0 +1,53 @@
+From 90adf98d9530054b8e665ba5a928de4307231d84 Mon Sep 17 00:00:00 2001
+From: Valentin Rothberg <valentinrothberg@gmail.com>
+Date: Tue, 22 Sep 2015 19:00:40 +0200
+Subject: wm831x_power: Use IRQF_ONESHOT to request threaded IRQs
+
+From: Valentin Rothberg <valentinrothberg@gmail.com>
+
+commit 90adf98d9530054b8e665ba5a928de4307231d84 upstream.
+
+Since commit 1c6c69525b40 ("genirq: Reject bogus threaded irq requests")
+threaded IRQs without a primary handler need to be requested with
+IRQF_ONESHOT, otherwise the request will fail.
+
+scripts/coccinelle/misc/irqf_oneshot.cocci detected this issue.
+
+Fixes: b5874f33bbaf ("wm831x_power: Use genirq")
+Signed-off-by: Valentin Rothberg <valentinrothberg@gmail.com>
+Signed-off-by: Sebastian Reichel <sre@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/power/wm831x_power.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/power/wm831x_power.c
++++ b/drivers/power/wm831x_power.c
+@@ -567,7 +567,7 @@ static int wm831x_power_probe(struct pla
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
+       ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
+-                                 IRQF_TRIGGER_RISING, "System power low",
++                                 IRQF_TRIGGER_RISING | IRQF_ONESHOT, "System power low",
+                                  power);
+       if (ret != 0) {
+               dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
+@@ -577,7 +577,7 @@ static int wm831x_power_probe(struct pla
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
+       ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
+-                                 IRQF_TRIGGER_RISING, "Power source",
++                                 IRQF_TRIGGER_RISING | IRQF_ONESHOT, "Power source",
+                                  power);
+       if (ret != 0) {
+               dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n",
+@@ -590,7 +590,7 @@ static int wm831x_power_probe(struct pla
+                                platform_get_irq_byname(pdev,
+                                                        wm831x_bat_irqs[i]));
+               ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
+-                                         IRQF_TRIGGER_RISING,
++                                         IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                          wm831x_bat_irqs[i],
+                                          power);
+               if (ret != 0) {