]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 1 Mar 2016 07:33:26 +0000 (23:33 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 1 Mar 2016 07:33:26 +0000 (23:33 -0800)
added patches:
arc-dw2-unwind-remove-falllback-linear-search-thru-fde-entries.patch
devres-fix-a-for-loop-bounds-check.patch
lockd-create-nsm-handles-per-net-namespace.patch
mac-validate-mac_partition-is-within-sector.patch
megaraid_sas-do-not-use-page_size-for-max_sectors.patch
megaraid_sas-smap-restriction-do-not-access-user-memory-from-ioctl-code.patch
mmc-remove-bondage-between-req_meta-and-reliable-write.patch
ring-buffer-update-read-stamp-with-first-real-commit-on-page.patch
vfs-avoid-softlockups-with-sendfile-2.patch
virtio-fix-memory-leak-of-virtio-ida-cache-layers.patch
wm831x_power-use-irqf_oneshot-to-request-threaded-irqs.patch

12 files changed:
queue-3.10/arc-dw2-unwind-remove-falllback-linear-search-thru-fde-entries.patch [new file with mode: 0644]
queue-3.10/devres-fix-a-for-loop-bounds-check.patch [new file with mode: 0644]
queue-3.10/lockd-create-nsm-handles-per-net-namespace.patch [new file with mode: 0644]
queue-3.10/mac-validate-mac_partition-is-within-sector.patch [new file with mode: 0644]
queue-3.10/megaraid_sas-do-not-use-page_size-for-max_sectors.patch [new file with mode: 0644]
queue-3.10/megaraid_sas-smap-restriction-do-not-access-user-memory-from-ioctl-code.patch [new file with mode: 0644]
queue-3.10/mmc-remove-bondage-between-req_meta-and-reliable-write.patch [new file with mode: 0644]
queue-3.10/ring-buffer-update-read-stamp-with-first-real-commit-on-page.patch [new file with mode: 0644]
queue-3.10/series
queue-3.10/vfs-avoid-softlockups-with-sendfile-2.patch [new file with mode: 0644]
queue-3.10/virtio-fix-memory-leak-of-virtio-ida-cache-layers.patch [new file with mode: 0644]
queue-3.10/wm831x_power-use-irqf_oneshot-to-request-threaded-irqs.patch [new file with mode: 0644]

diff --git a/queue-3.10/arc-dw2-unwind-remove-falllback-linear-search-thru-fde-entries.patch b/queue-3.10/arc-dw2-unwind-remove-falllback-linear-search-thru-fde-entries.patch
new file mode 100644 (file)
index 0000000..815de8d
--- /dev/null
@@ -0,0 +1,94 @@
+From 2e22502c080f27afeab5e6f11e618fb7bc7aea53 Mon Sep 17 00:00:00 2001
+From: Vineet Gupta <vgupta@synopsys.com>
+Date: Mon, 23 Nov 2015 19:32:51 +0530
+Subject: ARC: dw2 unwind: Remove falllback linear search thru FDE entries
+
+From: Vineet Gupta <vgupta@synopsys.com>
+
+commit 2e22502c080f27afeab5e6f11e618fb7bc7aea53 upstream.
+
+Fixes STAR 9000953410: "perf callgraph profiling causing RCU stalls"
+
+| perf record -g -c 15000 -e cycles /sbin/hackbench
+|
+| INFO: rcu_preempt self-detected stall on CPU
+| 1: (1 GPs behind) idle=609/140000000000002/0 softirq=2914/2915 fqs=603
+| Task dump for CPU 1:
+
+in-kernel dwarf unwinder has a fast binary lookup and a fallback linear
+search (which iterates thru each of ~11K entries) thus takes 2 orders of
+magnitude longer (~3 million cycles vs. 2000). Routines written in hand
+assembler lack dwarf info (as we don't support assembler CFI pseudo-ops
+yet) fail the unwinder binary lookup, hit linear search, failing
+nevertheless in the end.
+
+However the linear search is pointless as binary lookup tables are created
+from it in first place. It is impossible to have binary lookup fail while
+succeed the linear search. It is pure waste of cycles thus removed by
+this patch.
+
+This manifested as RCU stalls / NMI watchdog splat when running
+hackbench under perf with callgraph profiling. The triggering condition
+was perf counter overflowing in routine lacking dwarf info (like memset)
+leading to patheic 3 million cycle unwinder slow path and by the time it
+returned new interrupts were already pending (Timer, IPI) and taken
+rightaway. The original memset didn't make forward progress, system kept
+accruing more interrupts and more unwinder delayes in a vicious feedback
+loop, ultimately triggering the NMI diagnostic.
+
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/kernel/unwind.c |   37 ++++---------------------------------
+ 1 file changed, 4 insertions(+), 33 deletions(-)
+
+--- a/arch/arc/kernel/unwind.c
++++ b/arch/arc/kernel/unwind.c
+@@ -984,42 +984,13 @@ int arc_unwind(struct unwind_frame_info
+                                                           (const u8 *)(fde +
+                                                                        1) +
+                                                           *fde, ptrType);
+-                              if (pc >= endLoc)
++                              if (pc >= endLoc) {
+                                       fde = NULL;
+-                      } else
+-                              fde = NULL;
+-              }
+-              if (fde == NULL) {
+-                      for (fde = table->address, tableSize = table->size;
+-                           cie = NULL, tableSize > sizeof(*fde)
+-                           && tableSize - sizeof(*fde) >= *fde;
+-                           tableSize -= sizeof(*fde) + *fde,
+-                           fde += 1 + *fde / sizeof(*fde)) {
+-                              cie = cie_for_fde(fde, table);
+-                              if (cie == &bad_cie) {
+                                       cie = NULL;
+-                                      break;
+                               }
+-                              if (cie == NULL
+-                                  || cie == &not_fde
+-                                  || (ptrType = fde_pointer_type(cie)) < 0)
+-                                      continue;
+-                              ptr = (const u8 *)(fde + 2);
+-                              startLoc = read_pointer(&ptr,
+-                                                      (const u8 *)(fde + 1) +
+-                                                      *fde, ptrType);
+-                              if (!startLoc)
+-                                      continue;
+-                              if (!(ptrType & DW_EH_PE_indirect))
+-                                      ptrType &=
+-                                          DW_EH_PE_FORM | DW_EH_PE_signed;
+-                              endLoc =
+-                                  startLoc + read_pointer(&ptr,
+-                                                          (const u8 *)(fde +
+-                                                                       1) +
+-                                                          *fde, ptrType);
+-                              if (pc >= startLoc && pc < endLoc)
+-                                      break;
++                      } else {
++                              fde = NULL;
++                              cie = NULL;
+                       }
+               }
+       }
diff --git a/queue-3.10/devres-fix-a-for-loop-bounds-check.patch b/queue-3.10/devres-fix-a-for-loop-bounds-check.patch
new file mode 100644 (file)
index 0000000..d8c4e3d
--- /dev/null
@@ -0,0 +1,35 @@
+From 1f35d04a02a652f14566f875aef3a6f2af4cb77b Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Mon, 21 Sep 2015 19:21:51 +0300
+Subject: devres: fix a for loop bounds check
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 1f35d04a02a652f14566f875aef3a6f2af4cb77b upstream.
+
+The iomap[] array has PCIM_IOMAP_MAX (6) elements and not
+DEVICE_COUNT_RESOURCE (16).  This bug was found using a static checker.
+It may be that the "if (!(mask & (1 << i)))" check means we never
+actually go past the end of the array in real life.
+
+Fixes: ec04b075843d ('iomap: implement pcim_iounmap_regions()')
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/devres.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/devres.c
++++ b/lib/devres.c
+@@ -423,7 +423,7 @@ void pcim_iounmap_regions(struct pci_dev
+       if (!iomap)
+               return;
+-      for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++      for (i = 0; i < PCIM_IOMAP_MAX; i++) {
+               if (!(mask & (1 << i)))
+                       continue;
diff --git a/queue-3.10/lockd-create-nsm-handles-per-net-namespace.patch b/queue-3.10/lockd-create-nsm-handles-per-net-namespace.patch
new file mode 100644 (file)
index 0000000..6ed7d6f
--- /dev/null
@@ -0,0 +1,269 @@
+From 0ad95472bf169a3501991f8f33f5147f792a8116 Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Wed, 23 Sep 2015 15:49:29 +0300
+Subject: lockd: create NSM handles per net namespace
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit 0ad95472bf169a3501991f8f33f5147f792a8116 upstream.
+
+Commit cb7323fffa85 ("lockd: create and use per-net NSM
+ RPC clients on MON/UNMON requests") introduced per-net
+NSM RPC clients. Unfortunately this doesn't make any sense
+without per-net nsm_handle.
+
+E.g. the following scenario could happen
+Two hosts (X and Y) in different namespaces (A and B) share
+the same nsm struct.
+
+1. nsm_monitor(host_X) called => NSM rpc client created,
+       nsm->sm_monitored bit set.
+2. nsm_mointor(host-Y) called => nsm->sm_monitored already set,
+       we just exit. Thus in namespace B ln->nsm_clnt == NULL.
+3. host X destroyed => nsm->sm_count decremented to 1
+4. host Y destroyed => nsm_unmonitor() => nsm_mon_unmon() => NULL-ptr
+       dereference of *ln->nsm_clnt
+
+So this could be fixed by making per-net nsm_handles list,
+instead of global. Thus different net namespaces will not be able
+share the same nsm_handle.
+
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/lockd/host.c             |    7 ++++---
+ fs/lockd/mon.c              |   36 ++++++++++++++++++++++--------------
+ fs/lockd/netns.h            |    1 +
+ fs/lockd/svc.c              |    1 +
+ fs/lockd/svc4proc.c         |    2 +-
+ fs/lockd/svcproc.c          |    2 +-
+ include/linux/lockd/lockd.h |    9 ++++++---
+ 7 files changed, 36 insertions(+), 22 deletions(-)
+
+--- a/fs/lockd/host.c
++++ b/fs/lockd/host.c
+@@ -116,7 +116,7 @@ static struct nlm_host *nlm_alloc_host(s
+               atomic_inc(&nsm->sm_count);
+       else {
+               host = NULL;
+-              nsm = nsm_get_handle(ni->sap, ni->salen,
++              nsm = nsm_get_handle(ni->net, ni->sap, ni->salen,
+                                       ni->hostname, ni->hostname_len);
+               if (unlikely(nsm == NULL)) {
+                       dprintk("lockd: %s failed; no nsm handle\n",
+@@ -534,17 +534,18 @@ static struct nlm_host *next_host_state(
+ /**
+  * nlm_host_rebooted - Release all resources held by rebooted host
++ * @net:  network namespace
+  * @info: pointer to decoded results of NLM_SM_NOTIFY call
+  *
+  * We were notified that the specified host has rebooted.  Release
+  * all resources held by that peer.
+  */
+-void nlm_host_rebooted(const struct nlm_reboot *info)
++void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info)
+ {
+       struct nsm_handle *nsm;
+       struct nlm_host *host;
+-      nsm = nsm_reboot_lookup(info);
++      nsm = nsm_reboot_lookup(net, info);
+       if (unlikely(nsm == NULL))
+               return;
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -51,7 +51,6 @@ struct nsm_res {
+ };
+ static const struct rpc_program       nsm_program;
+-static                                LIST_HEAD(nsm_handles);
+ static                                DEFINE_SPINLOCK(nsm_lock);
+ /*
+@@ -259,33 +258,35 @@ void nsm_unmonitor(const struct nlm_host
+       }
+ }
+-static struct nsm_handle *nsm_lookup_hostname(const char *hostname,
+-                                            const size_t len)
++static struct nsm_handle *nsm_lookup_hostname(const struct list_head *nsm_handles,
++                                      const char *hostname, const size_t len)
+ {
+       struct nsm_handle *nsm;
+-      list_for_each_entry(nsm, &nsm_handles, sm_link)
++      list_for_each_entry(nsm, nsm_handles, sm_link)
+               if (strlen(nsm->sm_name) == len &&
+                   memcmp(nsm->sm_name, hostname, len) == 0)
+                       return nsm;
+       return NULL;
+ }
+-static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap)
++static struct nsm_handle *nsm_lookup_addr(const struct list_head *nsm_handles,
++                                      const struct sockaddr *sap)
+ {
+       struct nsm_handle *nsm;
+-      list_for_each_entry(nsm, &nsm_handles, sm_link)
++      list_for_each_entry(nsm, nsm_handles, sm_link)
+               if (rpc_cmp_addr(nsm_addr(nsm), sap))
+                       return nsm;
+       return NULL;
+ }
+-static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv)
++static struct nsm_handle *nsm_lookup_priv(const struct list_head *nsm_handles,
++                                      const struct nsm_private *priv)
+ {
+       struct nsm_handle *nsm;
+-      list_for_each_entry(nsm, &nsm_handles, sm_link)
++      list_for_each_entry(nsm, nsm_handles, sm_link)
+               if (memcmp(nsm->sm_priv.data, priv->data,
+                                       sizeof(priv->data)) == 0)
+                       return nsm;
+@@ -350,6 +351,7 @@ static struct nsm_handle *nsm_create_han
+ /**
+  * nsm_get_handle - Find or create a cached nsm_handle
++ * @net: network namespace
+  * @sap: pointer to socket address of handle to find
+  * @salen: length of socket address
+  * @hostname: pointer to C string containing hostname to find
+@@ -362,11 +364,13 @@ static struct nsm_handle *nsm_create_han
+  * @hostname cannot be found in the handle cache.  Returns NULL if
+  * an error occurs.
+  */
+-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
++struct nsm_handle *nsm_get_handle(const struct net *net,
++                                const struct sockaddr *sap,
+                                 const size_t salen, const char *hostname,
+                                 const size_t hostname_len)
+ {
+       struct nsm_handle *cached, *new = NULL;
++      struct lockd_net *ln = net_generic(net, lockd_net_id);
+       if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
+               if (printk_ratelimit()) {
+@@ -381,9 +385,10 @@ retry:
+       spin_lock(&nsm_lock);
+       if (nsm_use_hostnames && hostname != NULL)
+-              cached = nsm_lookup_hostname(hostname, hostname_len);
++              cached = nsm_lookup_hostname(&ln->nsm_handles,
++                                      hostname, hostname_len);
+       else
+-              cached = nsm_lookup_addr(sap);
++              cached = nsm_lookup_addr(&ln->nsm_handles, sap);
+       if (cached != NULL) {
+               atomic_inc(&cached->sm_count);
+@@ -397,7 +402,7 @@ retry:
+       }
+       if (new != NULL) {
+-              list_add(&new->sm_link, &nsm_handles);
++              list_add(&new->sm_link, &ln->nsm_handles);
+               spin_unlock(&nsm_lock);
+               dprintk("lockd: created nsm_handle for %s (%s)\n",
+                               new->sm_name, new->sm_addrbuf);
+@@ -414,19 +419,22 @@ retry:
+ /**
+  * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle
++ * @net:  network namespace
+  * @info: pointer to NLMPROC_SM_NOTIFY arguments
+  *
+  * Returns a matching nsm_handle if found in the nsm cache. The returned
+  * nsm_handle's reference count is bumped. Otherwise returns NULL if some
+  * error occurred.
+  */
+-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info)
++struct nsm_handle *nsm_reboot_lookup(const struct net *net,
++                              const struct nlm_reboot *info)
+ {
+       struct nsm_handle *cached;
++      struct lockd_net *ln = net_generic(net, lockd_net_id);
+       spin_lock(&nsm_lock);
+-      cached = nsm_lookup_priv(&info->priv);
++      cached = nsm_lookup_priv(&ln->nsm_handles, &info->priv);
+       if (unlikely(cached == NULL)) {
+               spin_unlock(&nsm_lock);
+               dprintk("lockd: never saw rebooted peer '%.*s' before\n",
+--- a/fs/lockd/netns.h
++++ b/fs/lockd/netns.h
+@@ -16,6 +16,7 @@ struct lockd_net {
+       spinlock_t nsm_clnt_lock;
+       unsigned int nsm_users;
+       struct rpc_clnt *nsm_clnt;
++      struct list_head nsm_handles;
+ };
+ extern int lockd_net_id;
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -583,6 +583,7 @@ static int lockd_init_net(struct net *ne
+       INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
+       INIT_LIST_HEAD(&ln->grace_list);
+       spin_lock_init(&ln->nsm_clnt_lock);
++      INIT_LIST_HEAD(&ln->nsm_handles);
+       return 0;
+ }
+--- a/fs/lockd/svc4proc.c
++++ b/fs/lockd/svc4proc.c
+@@ -421,7 +421,7 @@ nlm4svc_proc_sm_notify(struct svc_rqst *
+               return rpc_system_err;
+       }
+-      nlm_host_rebooted(argp);
++      nlm_host_rebooted(SVC_NET(rqstp), argp);
+       return rpc_success;
+ }
+--- a/fs/lockd/svcproc.c
++++ b/fs/lockd/svcproc.c
+@@ -464,7 +464,7 @@ nlmsvc_proc_sm_notify(struct svc_rqst *r
+               return rpc_system_err;
+       }
+-      nlm_host_rebooted(argp);
++      nlm_host_rebooted(SVC_NET(rqstp), argp);
+       return rpc_success;
+ }
+--- a/include/linux/lockd/lockd.h
++++ b/include/linux/lockd/lockd.h
+@@ -236,7 +236,8 @@ void                 nlm_rebind_host(struct nlm_host
+ struct nlm_host * nlm_get_host(struct nlm_host *);
+ void            nlm_shutdown_hosts(void);
+ void            nlm_shutdown_hosts_net(struct net *net);
+-void            nlm_host_rebooted(const struct nlm_reboot *);
++void            nlm_host_rebooted(const struct net *net,
++                                      const struct nlm_reboot *);
+ /*
+  * Host monitoring
+@@ -244,11 +245,13 @@ void               nlm_host_rebooted(const struct n
+ int             nsm_monitor(const struct nlm_host *host);
+ void            nsm_unmonitor(const struct nlm_host *host);
+-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
++struct nsm_handle *nsm_get_handle(const struct net *net,
++                                      const struct sockaddr *sap,
+                                       const size_t salen,
+                                       const char *hostname,
+                                       const size_t hostname_len);
+-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info);
++struct nsm_handle *nsm_reboot_lookup(const struct net *net,
++                                      const struct nlm_reboot *info);
+ void            nsm_release(struct nsm_handle *nsm);
+ /*
diff --git a/queue-3.10/mac-validate-mac_partition-is-within-sector.patch b/queue-3.10/mac-validate-mac_partition-is-within-sector.patch
new file mode 100644 (file)
index 0000000..9657685
--- /dev/null
@@ -0,0 +1,49 @@
+From 02e2a5bfebe99edcf9d694575a75032d53fe1b73 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 19 Nov 2015 17:18:54 -0800
+Subject: mac: validate mac_partition is within sector
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 02e2a5bfebe99edcf9d694575a75032d53fe1b73 upstream.
+
+If md->signature == MAC_DRIVER_MAGIC and md->block_size == 1023, a single
+512 byte sector would be read (secsize / 512). However the partition
+structure would be located past the end of the buffer (secsize % 512).
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/partitions/mac.c |   10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/block/partitions/mac.c
++++ b/block/partitions/mac.c
+@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitio
+       Sector sect;
+       unsigned char *data;
+       int slot, blocks_in_map;
+-      unsigned secsize;
++      unsigned secsize, datasize, partoffset;
+ #ifdef CONFIG_PPC_PMAC
+       int found_root = 0;
+       int found_root_goodness = 0;
+@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitio
+       }
+       secsize = be16_to_cpu(md->block_size);
+       put_dev_sector(sect);
+-      data = read_part_sector(state, secsize/512, &sect);
++      datasize = round_down(secsize, 512);
++      data = read_part_sector(state, datasize / 512, &sect);
+       if (!data)
+               return -1;
+-      part = (struct mac_partition *) (data + secsize%512);
++      partoffset = secsize % 512;
++      if (partoffset + sizeof(*part) > datasize)
++              return -1;
++      part = (struct mac_partition *) (data + partoffset);
+       if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
+               put_dev_sector(sect);
+               return 0;               /* not a MacOS disk */
diff --git a/queue-3.10/megaraid_sas-do-not-use-page_size-for-max_sectors.patch b/queue-3.10/megaraid_sas-do-not-use-page_size-for-max_sectors.patch
new file mode 100644 (file)
index 0000000..f0f23e1
--- /dev/null
@@ -0,0 +1,48 @@
+From 357ae967ad66e357f78b5cfb5ab6ca07fb4a7758 Mon Sep 17 00:00:00 2001
+From: "sumit.saxena@avagotech.com" <sumit.saxena@avagotech.com>
+Date: Thu, 15 Oct 2015 13:40:04 +0530
+Subject: megaraid_sas: Do not use PAGE_SIZE for max_sectors
+
+From: sumit.saxena@avagotech.com <sumit.saxena@avagotech.com>
+
+commit 357ae967ad66e357f78b5cfb5ab6ca07fb4a7758 upstream.
+
+Do not use PAGE_SIZE marco to calculate max_sectors per I/O
+request. Driver code assumes PAGE_SIZE will be always 4096 which can
+lead to wrongly calculated value if PAGE_SIZE is not 4096. This issue
+was reported in Ubuntu Bugzilla Bug #1475166.
+
+Signed-off-by: Sumit Saxena <sumit.saxena@avagotech.com>
+Signed-off-by: Kashyap Desai <kashyap.desai@avagotech.com>
+Reviewed-by: Tomas Henzl <thenzl@redhat.com>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/megaraid/megaraid_sas.h      |    2 ++
+ drivers/scsi/megaraid/megaraid_sas_base.c |    2 +-
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -300,6 +300,8 @@ enum MR_EVT_ARGS {
+       MR_EVT_ARGS_GENERIC,
+ };
++
++#define SGE_BUFFER_SIZE       4096
+ /*
+  * define constants for device list query options
+  */
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3602,7 +3602,7 @@ static int megasas_init_fw(struct megasa
+       }
+       instance->max_sectors_per_req = instance->max_num_sge *
+-                                              PAGE_SIZE / 512;
++                                              SGE_BUFFER_SIZE / 512;
+       if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
+               instance->max_sectors_per_req = tmp_sectors;
diff --git a/queue-3.10/megaraid_sas-smap-restriction-do-not-access-user-memory-from-ioctl-code.patch b/queue-3.10/megaraid_sas-smap-restriction-do-not-access-user-memory-from-ioctl-code.patch
new file mode 100644 (file)
index 0000000..1c10b33
--- /dev/null
@@ -0,0 +1,53 @@
+From 323c4a02c631d00851d8edc4213c4d184ef83647 Mon Sep 17 00:00:00 2001
+From: "sumit.saxena@avagotech.com" <sumit.saxena@avagotech.com>
+Date: Thu, 15 Oct 2015 13:40:54 +0530
+Subject: megaraid_sas : SMAP restriction--do not access user memory from IOCTL code
+
+From: sumit.saxena@avagotech.com <sumit.saxena@avagotech.com>
+
+commit 323c4a02c631d00851d8edc4213c4d184ef83647 upstream.
+
+This is an issue on SMAP enabled CPUs and 32 bit apps running on 64 bit
+OS. Do not access user memory from kernel code. The SMAP bit restricts
+accessing user memory from kernel code.
+
+Signed-off-by: Sumit Saxena <sumit.saxena@avagotech.com>
+Signed-off-by: Kashyap Desai <kashyap.desai@avagotech.com>
+Reviewed-by: Tomas Henzl <thenzl@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/megaraid/megaraid_sas_base.c |   13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -5051,6 +5051,9 @@ static int megasas_mgmt_compat_ioctl_fw(
+       int i;
+       int error = 0;
+       compat_uptr_t ptr;
++      unsigned long local_raw_ptr;
++      u32 local_sense_off;
++      u32 local_sense_len;
+       if (clear_user(ioc, sizeof(*ioc)))
+               return -EFAULT;
+@@ -5068,9 +5071,15 @@ static int megasas_mgmt_compat_ioctl_fw(
+        * sense_len is not null, so prepare the 64bit value under
+        * the same condition.
+        */
+-      if (ioc->sense_len) {
++      if (get_user(local_raw_ptr, ioc->frame.raw) ||
++              get_user(local_sense_off, &ioc->sense_off) ||
++              get_user(local_sense_len, &ioc->sense_len))
++              return -EFAULT;
++
++
++      if (local_sense_len) {
+               void __user **sense_ioc_ptr =
+-                      (void __user **)(ioc->frame.raw + ioc->sense_off);
++                      (void __user **)((u8*)local_raw_ptr + local_sense_off);
+               compat_uptr_t *sense_cioc_ptr =
+                       (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+               if (get_user(ptr, sense_cioc_ptr) ||
diff --git a/queue-3.10/mmc-remove-bondage-between-req_meta-and-reliable-write.patch b/queue-3.10/mmc-remove-bondage-between-req_meta-and-reliable-write.patch
new file mode 100644 (file)
index 0000000..d52d4e7
--- /dev/null
@@ -0,0 +1,66 @@
+From d3df0465db00cf4ed9f90d0bfc3b827d32b9c796 Mon Sep 17 00:00:00 2001
+From: Luca Porzio <lporzio@micron.com>
+Date: Fri, 6 Nov 2015 15:12:26 +0000
+Subject: mmc: remove bondage between REQ_META and reliable write
+
+From: Luca Porzio <lporzio@micron.com>
+
+commit d3df0465db00cf4ed9f90d0bfc3b827d32b9c796 upstream.
+
+Anytime a write operation is performed with Reliable Write flag enabled,
+the eMMC device is enforced to bypass the cache and do a write to the
+underling NVM device by Jedec specification; this causes a performance
+penalty since write operations can't be optimized by the device cache.
+
+In our tests, we replayed a typical mobile daily trace pattern and found
+~9% overall time reduction in trace replay by using this patch. Also the
+write ops within 4KB~64KB chunk size range get a 40~60% performance
+improvement by using the patch (as this range of write chunks are the ones
+affected by REQ_META).
+
+This patch has been discussed in the Mobile & Embedded Linux Storage Forum
+and it's the results of feedbacks from many people. We also checked with
+fsdevl and f2fs mailing list developers that this change in the usage of
+REQ_META is not affecting FS behavior and we got positive feedbacks.
+Reporting here the feedbacks:
+http://comments.gmane.org/gmane.linux.file-systems/97219
+http://thread.gmane.org/gmane.linux.file-systems.f2fs/3178/focus=3183
+
+Signed-off-by: Bruce Ford <bford@micron.com>
+Signed-off-by: Luca Porzio <lporzio@micron.com>
+Fixes: ce39f9d17c14 ("mmc: support packed write command for eMMC4.5 devices")
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/card/block.c |   11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -59,8 +59,7 @@ MODULE_ALIAS("mmc:block");
+ #define INAND_CMD38_ARG_SECTRIM2 0x88
+ #define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        /* 10 minute timeout */
+-#define mmc_req_rel_wr(req)   (((req->cmd_flags & REQ_FUA) || \
+-                                (req->cmd_flags & REQ_META)) && \
++#define mmc_req_rel_wr(req)   ((req->cmd_flags & REQ_FUA) && \
+                                 (rq_data_dir(req) == WRITE))
+ #define PACKED_CMD_VER        0x01
+ #define PACKED_CMD_WR 0x02
+@@ -1300,13 +1299,9 @@ static void mmc_blk_rw_rq_prep(struct mm
+       /*
+        * Reliable writes are used to implement Forced Unit Access and
+-       * REQ_META accesses, and are supported only on MMCs.
+-       *
+-       * XXX: this really needs a good explanation of why REQ_META
+-       * is treated special.
++       * are supported only on MMCs.
+        */
+-      bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
+-                        (req->cmd_flags & REQ_META)) &&
++      bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
+               (rq_data_dir(req) == WRITE) &&
+               (md->flags & MMC_BLK_REL_WR);
diff --git a/queue-3.10/ring-buffer-update-read-stamp-with-first-real-commit-on-page.patch b/queue-3.10/ring-buffer-update-read-stamp-with-first-real-commit-on-page.patch
new file mode 100644 (file)
index 0000000..5111477
--- /dev/null
@@ -0,0 +1,62 @@
+From b81f472a208d3e2b4392faa6d17037a89442f4ce Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Mon, 23 Nov 2015 10:35:36 -0500
+Subject: ring-buffer: Update read stamp with first real commit on page
+
+From: Steven Rostedt (Red Hat) <rostedt@goodmis.org>
+
+commit b81f472a208d3e2b4392faa6d17037a89442f4ce upstream.
+
+Do not update the read stamp after swapping out the reader page from the
+write buffer. If the reader page is swapped out of the buffer before an
+event is written to it, then the read_stamp may get an out of date
+timestamp, as the page timestamp is updated on the first commit to that
+page.
+
+rb_get_reader_page() only returns a page if it has an event on it, otherwise
+it will return NULL. At that point, check if the page being returned has
+events and has not been read yet. Then at that point update the read_stamp
+to match the time stamp of the reader page.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ring_buffer.c |   12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1948,12 +1948,6 @@ rb_set_commit_to_write(struct ring_buffe
+               goto again;
+ }
+-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+-{
+-      cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
+-      cpu_buffer->reader_page->read = 0;
+-}
+-
+ static void rb_inc_iter(struct ring_buffer_iter *iter)
+ {
+       struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+@@ -3591,7 +3585,7 @@ rb_get_reader_page(struct ring_buffer_pe
+       /* Finally update the reader page to the new head */
+       cpu_buffer->reader_page = reader;
+-      rb_reset_reader_page(cpu_buffer);
++      cpu_buffer->reader_page->read = 0;
+       if (overwrite != cpu_buffer->last_overrun) {
+               cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
+@@ -3601,6 +3595,10 @@ rb_get_reader_page(struct ring_buffer_pe
+       goto again;
+  out:
++      /* Update the read_stamp on the first event */
++      if (reader && reader->read == 0)
++              cpu_buffer->read_stamp = reader->page->time_stamp;
++
+       arch_spin_unlock(&cpu_buffer->lock);
+       local_irq_restore(flags);
index 41b3e7efe0016dba89ce3996d5eb45db6a29c778..f0af1565e6ac6c50375546847fa83c1814b0897b 100644 (file)
@@ -22,3 +22,14 @@ unix-correctly-track-in-flight-fds-in-sending-process-user_struct.patch
 genirq-prevent-chip-buslock-deadlock.patch
 dts-vt8500-add-sdhc-node-to-dts-file-for-wm8650.patch
 clocksource-drivers-vt8500-increase-the-minimum-delta.patch
+lockd-create-nsm-handles-per-net-namespace.patch
+devres-fix-a-for-loop-bounds-check.patch
+wm831x_power-use-irqf_oneshot-to-request-threaded-irqs.patch
+megaraid_sas-do-not-use-page_size-for-max_sectors.patch
+megaraid_sas-smap-restriction-do-not-access-user-memory-from-ioctl-code.patch
+mmc-remove-bondage-between-req_meta-and-reliable-write.patch
+mac-validate-mac_partition-is-within-sector.patch
+arc-dw2-unwind-remove-falllback-linear-search-thru-fde-entries.patch
+vfs-avoid-softlockups-with-sendfile-2.patch
+ring-buffer-update-read-stamp-with-first-real-commit-on-page.patch
+virtio-fix-memory-leak-of-virtio-ida-cache-layers.patch
diff --git a/queue-3.10/vfs-avoid-softlockups-with-sendfile-2.patch b/queue-3.10/vfs-avoid-softlockups-with-sendfile-2.patch
new file mode 100644 (file)
index 0000000..1ef7057
--- /dev/null
@@ -0,0 +1,40 @@
+From c2489e07c0a71a56fb2c84bc0ee66cddfca7d068 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 23 Nov 2015 13:09:51 +0100
+Subject: vfs: Avoid softlockups with sendfile(2)
+
+From: Jan Kara <jack@suse.cz>
+
+commit c2489e07c0a71a56fb2c84bc0ee66cddfca7d068 upstream.
+
+The following test program from Dmitry can cause softlockups or RCU
+stalls as it copies 1GB from tmpfs into eventfd and we don't have any
+scheduling point at that path in sendfile(2) implementation:
+
+        int r1 = eventfd(0, 0);
+        int r2 = memfd_create("", 0);
+        unsigned long n = 1<<30;
+        fallocate(r2, 0, 0, n);
+        sendfile(r1, r2, 0, n);
+
+Add cond_resched() into __splice_from_pipe() to fix the problem.
+
+CC: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/splice.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -949,6 +949,7 @@ ssize_t __splice_from_pipe(struct pipe_i
+       splice_from_pipe_begin(sd);
+       do {
++              cond_resched();
+               ret = splice_from_pipe_next(pipe, sd);
+               if (ret > 0)
+                       ret = splice_from_pipe_feed(pipe, sd, actor);
diff --git a/queue-3.10/virtio-fix-memory-leak-of-virtio-ida-cache-layers.patch b/queue-3.10/virtio-fix-memory-leak-of-virtio-ida-cache-layers.patch
new file mode 100644 (file)
index 0000000..b1a178a
--- /dev/null
@@ -0,0 +1,39 @@
+From c13f99b7e945dad5273a8b7ee230f4d1f22d3354 Mon Sep 17 00:00:00 2001
+From: Suman Anna <s-anna@ti.com>
+Date: Wed, 16 Sep 2015 19:29:17 -0500
+Subject: virtio: fix memory leak of virtio ida cache layers
+
+From: Suman Anna <s-anna@ti.com>
+
+commit c13f99b7e945dad5273a8b7ee230f4d1f22d3354 upstream.
+
+The virtio core uses a static ida named virtio_index_ida for
+assigning index numbers to virtio devices during registration.
+The ida core may allocate some internal idr cache layers and
+an ida bitmap upon any ida allocation, and all these layers are
+truely freed only upon the ida destruction. The virtio_index_ida
+is not destroyed at present, leading to a memory leak when using
+the virtio core as a module and atleast one virtio device is
+registered and unregistered.
+
+Fix this by invoking ida_destroy() in the virtio core module
+exit.
+
+Signed-off-by: Suman Anna <s-anna@ti.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/virtio/virtio.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/virtio/virtio.c
++++ b/drivers/virtio/virtio.c
+@@ -238,6 +238,7 @@ static int virtio_init(void)
+ static void __exit virtio_exit(void)
+ {
+       bus_unregister(&virtio_bus);
++      ida_destroy(&virtio_index_ida);
+ }
+ core_initcall(virtio_init);
+ module_exit(virtio_exit);
diff --git a/queue-3.10/wm831x_power-use-irqf_oneshot-to-request-threaded-irqs.patch b/queue-3.10/wm831x_power-use-irqf_oneshot-to-request-threaded-irqs.patch
new file mode 100644 (file)
index 0000000..c53f9df
--- /dev/null
@@ -0,0 +1,53 @@
+From 90adf98d9530054b8e665ba5a928de4307231d84 Mon Sep 17 00:00:00 2001
+From: Valentin Rothberg <valentinrothberg@gmail.com>
+Date: Tue, 22 Sep 2015 19:00:40 +0200
+Subject: wm831x_power: Use IRQF_ONESHOT to request threaded IRQs
+
+From: Valentin Rothberg <valentinrothberg@gmail.com>
+
+commit 90adf98d9530054b8e665ba5a928de4307231d84 upstream.
+
+Since commit 1c6c69525b40 ("genirq: Reject bogus threaded irq requests")
+threaded IRQs without a primary handler need to be requested with
+IRQF_ONESHOT, otherwise the request will fail.
+
+scripts/coccinelle/misc/irqf_oneshot.cocci detected this issue.
+
+Fixes: b5874f33bbaf ("wm831x_power: Use genirq")
+Signed-off-by: Valentin Rothberg <valentinrothberg@gmail.com>
+Signed-off-by: Sebastian Reichel <sre@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/power/wm831x_power.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/power/wm831x_power.c
++++ b/drivers/power/wm831x_power.c
+@@ -567,7 +567,7 @@ static int wm831x_power_probe(struct pla
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
+       ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
+-                                 IRQF_TRIGGER_RISING, "System power low",
++                                 IRQF_TRIGGER_RISING | IRQF_ONESHOT, "System power low",
+                                  power);
+       if (ret != 0) {
+               dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
+@@ -577,7 +577,7 @@ static int wm831x_power_probe(struct pla
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
+       ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
+-                                 IRQF_TRIGGER_RISING, "Power source",
++                                 IRQF_TRIGGER_RISING | IRQF_ONESHOT, "Power source",
+                                  power);
+       if (ret != 0) {
+               dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n",
+@@ -590,7 +590,7 @@ static int wm831x_power_probe(struct pla
+                                platform_get_irq_byname(pdev,
+                                                        wm831x_bat_irqs[i]));
+               ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
+-                                         IRQF_TRIGGER_RISING,
++                                         IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                          wm831x_bat_irqs[i],
+                                          power);
+               if (ret != 0) {