]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 28 Jul 2017 01:33:48 +0000 (18:33 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 28 Jul 2017 01:33:48 +0000 (18:33 -0700)
added patches:
af_key-add-lock-to-key-dump.patch
pstore-make-spinlock-per-zone-instead-of-global.patch

queue-4.9/af_key-add-lock-to-key-dump.patch [new file with mode: 0644]
queue-4.9/pstore-make-spinlock-per-zone-instead-of-global.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/af_key-add-lock-to-key-dump.patch b/queue-4.9/af_key-add-lock-to-key-dump.patch
new file mode 100644 (file)
index 0000000..3d93b90
--- /dev/null
@@ -0,0 +1,147 @@
+From 89e357d83c06b6fac581c3ca7f0ee3ae7e67109e Mon Sep 17 00:00:00 2001
+From: Yuejie Shi <syjcnss@gmail.com>
+Date: Fri, 31 Mar 2017 15:10:20 +0800
+Subject: af_key: Add lock to key dump
+
+From: Yuejie Shi <syjcnss@gmail.com>
+
+commit 89e357d83c06b6fac581c3ca7f0ee3ae7e67109e upstream.
+
+A dump may come in the middle of another dump, modifying its dump
+structure members. This race condition will result in NULL pointer
+dereference in kernel. So add a lock to prevent that race.
+
+Fixes: 83321d6b9872 ("[AF_KEY]: Dump SA/SP entries non-atomically")
+Signed-off-by: Yuejie Shi <syjcnss@gmail.com>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Mark Salyzyn <salyzyn@android.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/key/af_key.c |   46 ++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 38 insertions(+), 8 deletions(-)
+
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -63,6 +63,7 @@ struct pfkey_sock {
+               } u;
+               struct sk_buff  *skb;
+       } dump;
++      struct mutex dump_lock;
+ };
+ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
+@@ -143,6 +144,7 @@ static int pfkey_create(struct net *net,
+ {
+       struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
+       struct sock *sk;
++      struct pfkey_sock *pfk;
+       int err;
+       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+@@ -157,6 +159,9 @@ static int pfkey_create(struct net *net,
+       if (sk == NULL)
+               goto out;
++      pfk = pfkey_sk(sk);
++      mutex_init(&pfk->dump_lock);
++
+       sock->ops = &pfkey_ops;
+       sock_init_data(sock, sk);
+@@ -285,13 +290,23 @@ static int pfkey_do_dump(struct pfkey_so
+       struct sadb_msg *hdr;
+       int rc;
++      mutex_lock(&pfk->dump_lock);
++      if (!pfk->dump.dump) {
++              rc = 0;
++              goto out;
++      }
++
+       rc = pfk->dump.dump(pfk);
+-      if (rc == -ENOBUFS)
+-              return 0;
++      if (rc == -ENOBUFS) {
++              rc = 0;
++              goto out;
++      }
+       if (pfk->dump.skb) {
+-              if (!pfkey_can_dump(&pfk->sk))
+-                      return 0;
++              if (!pfkey_can_dump(&pfk->sk)) {
++                      rc = 0;
++                      goto out;
++              }
+               hdr = (struct sadb_msg *) pfk->dump.skb->data;
+               hdr->sadb_msg_seq = 0;
+@@ -302,6 +317,9 @@ static int pfkey_do_dump(struct pfkey_so
+       }
+       pfkey_terminate_dump(pfk);
++
++out:
++      mutex_unlock(&pfk->dump_lock);
+       return rc;
+ }
+@@ -1806,19 +1824,26 @@ static int pfkey_dump(struct sock *sk, s
+       struct xfrm_address_filter *filter = NULL;
+       struct pfkey_sock *pfk = pfkey_sk(sk);
+-      if (pfk->dump.dump != NULL)
++      mutex_lock(&pfk->dump_lock);
++      if (pfk->dump.dump != NULL) {
++              mutex_unlock(&pfk->dump_lock);
+               return -EBUSY;
++      }
+       proto = pfkey_satype2proto(hdr->sadb_msg_satype);
+-      if (proto == 0)
++      if (proto == 0) {
++              mutex_unlock(&pfk->dump_lock);
+               return -EINVAL;
++      }
+       if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
+               struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
+               filter = kmalloc(sizeof(*filter), GFP_KERNEL);
+-              if (filter == NULL)
++              if (filter == NULL) {
++                      mutex_unlock(&pfk->dump_lock);
+                       return -ENOMEM;
++              }
+               memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
+                      sizeof(xfrm_address_t));
+@@ -1834,6 +1859,7 @@ static int pfkey_dump(struct sock *sk, s
+       pfk->dump.dump = pfkey_dump_sa;
+       pfk->dump.done = pfkey_dump_sa_done;
+       xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
++      mutex_unlock(&pfk->dump_lock);
+       return pfkey_do_dump(pfk);
+ }
+@@ -2693,14 +2719,18 @@ static int pfkey_spddump(struct sock *sk
+ {
+       struct pfkey_sock *pfk = pfkey_sk(sk);
+-      if (pfk->dump.dump != NULL)
++      mutex_lock(&pfk->dump_lock);
++      if (pfk->dump.dump != NULL) {
++              mutex_unlock(&pfk->dump_lock);
+               return -EBUSY;
++      }
+       pfk->dump.msg_version = hdr->sadb_msg_version;
+       pfk->dump.msg_portid = hdr->sadb_msg_pid;
+       pfk->dump.dump = pfkey_dump_sp;
+       pfk->dump.done = pfkey_dump_sp_done;
+       xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
++      mutex_unlock(&pfk->dump_lock);
+       return pfkey_do_dump(pfk);
+ }
diff --git a/queue-4.9/pstore-make-spinlock-per-zone-instead-of-global.patch b/queue-4.9/pstore-make-spinlock-per-zone-instead-of-global.patch
new file mode 100644 (file)
index 0000000..92786d0
--- /dev/null
@@ -0,0 +1,93 @@
+From 109704492ef637956265ec2eb72ae7b3b39eb6f4 Mon Sep 17 00:00:00 2001
+From: Joel Fernandes <joelaf@google.com>
+Date: Thu, 20 Oct 2016 00:34:00 -0700
+Subject: pstore: Make spinlock per zone instead of global
+
+From: Joel Fernandes <joelaf@google.com>
+
+commit 109704492ef637956265ec2eb72ae7b3b39eb6f4 upstream.
+
+Currently pstore has a global spinlock for all zones. Since the zones
+are independent and modify different areas of memory, there's no need
+to have a global lock, so we should use a per-zone lock as introduced
+here. Also, when ramoops's ftrace use-case has a FTRACE_PER_CPU flag
+introduced later, which splits the ftrace memory area into a single zone
+per CPU, it will eliminate the need for locking. In preparation for this,
+make the locking optional.
+
+Signed-off-by: Joel Fernandes <joelaf@google.com>
+[kees: updated commit message]
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Cc: Leo Yan <leo.yan@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram_core.c       |   11 +++++------
+ include/linux/pstore_ram.h |    1 +
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -48,8 +48,6 @@ static inline size_t buffer_start(struct
+       return atomic_read(&prz->buffer->start);
+ }
+-static DEFINE_RAW_SPINLOCK(buffer_lock);
+-
+ /* increase and wrap the start pointer, returning the old value */
+ static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
+ {
+@@ -57,7 +55,7 @@ static size_t buffer_start_add(struct pe
+       int new;
+       unsigned long flags;
+-      raw_spin_lock_irqsave(&buffer_lock, flags);
++      raw_spin_lock_irqsave(&prz->buffer_lock, flags);
+       old = atomic_read(&prz->buffer->start);
+       new = old + a;
+@@ -65,7 +63,7 @@ static size_t buffer_start_add(struct pe
+               new -= prz->buffer_size;
+       atomic_set(&prz->buffer->start, new);
+-      raw_spin_unlock_irqrestore(&buffer_lock, flags);
++      raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
+       return old;
+ }
+@@ -77,7 +75,7 @@ static void buffer_size_add(struct persi
+       size_t new;
+       unsigned long flags;
+-      raw_spin_lock_irqsave(&buffer_lock, flags);
++      raw_spin_lock_irqsave(&prz->buffer_lock, flags);
+       old = atomic_read(&prz->buffer->size);
+       if (old == prz->buffer_size)
+@@ -89,7 +87,7 @@ static void buffer_size_add(struct persi
+       atomic_set(&prz->buffer->size, new);
+ exit:
+-      raw_spin_unlock_irqrestore(&buffer_lock, flags);
++      raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
+ }
+ static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
+@@ -493,6 +491,7 @@ static int persistent_ram_post_init(stru
+       prz->buffer->sig = sig;
+       persistent_ram_zap(prz);
++      prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);
+       return 0;
+ }
+--- a/include/linux/pstore_ram.h
++++ b/include/linux/pstore_ram.h
+@@ -40,6 +40,7 @@ struct persistent_ram_zone {
+       void *vaddr;
+       struct persistent_ram_buffer *buffer;
+       size_t buffer_size;
++      raw_spinlock_t buffer_lock;
+       /* ECC correction */
+       char *par_buffer;
index 363bf4a361bd097af933be31fa6758228cb62465..4c9e8d0eac5c5e8ddcd0a94d06c18b969326d222 100644 (file)
@@ -1 +1,2 @@
 af_key-add-lock-to-key-dump.patch
+pstore-make-spinlock-per-zone-instead-of-global.patch