]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 4 Aug 2017 19:56:28 +0000 (12:56 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 4 Aug 2017 19:56:28 +0000 (12:56 -0700)
added patches:
net-skb_needs_check-accepts-checksum_none-for-tx.patch
pstore-allow-prz-to-control-need-for-locking.patch
pstore-correctly-initialize-spinlock-and-flags.patch
pstore-use-dynamic-spinlock-initializer.patch

queue-4.9/net-skb_needs_check-accepts-checksum_none-for-tx.patch [new file with mode: 0644]
queue-4.9/pstore-allow-prz-to-control-need-for-locking.patch [new file with mode: 0644]
queue-4.9/pstore-correctly-initialize-spinlock-and-flags.patch [new file with mode: 0644]
queue-4.9/pstore-use-dynamic-spinlock-initializer.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/net-skb_needs_check-accepts-checksum_none-for-tx.patch b/queue-4.9/net-skb_needs_check-accepts-checksum_none-for-tx.patch
new file mode 100644 (file)
index 0000000..db986ec
--- /dev/null
@@ -0,0 +1,42 @@
+From 6e7bc478c9a006c701c14476ec9d389a484b4864 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 3 Feb 2017 14:29:42 -0800
+Subject: net: skb_needs_check() accepts CHECKSUM_NONE for tx
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 6e7bc478c9a006c701c14476ec9d389a484b4864 upstream.
+
+My recent change missed fact that UFO would perform a complete
+UDP checksum before segmenting in frags.
+
+In this case skb->ip_summed is set to CHECKSUM_NONE.
+
+We need to add this valid case to skb_needs_check()
+
+Fixes: b2504a5dbef3 ("net: reduce skb_warn_bad_offload() noise")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/core/dev.c |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2702,9 +2702,10 @@ EXPORT_SYMBOL(skb_mac_gso_segment);
+ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
+ {
+       if (tx_path)
+-              return skb->ip_summed != CHECKSUM_PARTIAL;
+-      else
+-              return skb->ip_summed == CHECKSUM_NONE;
++              return skb->ip_summed != CHECKSUM_PARTIAL &&
++                     skb->ip_summed != CHECKSUM_NONE;
++
++      return skb->ip_summed == CHECKSUM_NONE;
+ }
+ /**
diff --git a/queue-4.9/pstore-allow-prz-to-control-need-for-locking.patch b/queue-4.9/pstore-allow-prz-to-control-need-for-locking.patch
new file mode 100644 (file)
index 0000000..344b5cb
--- /dev/null
@@ -0,0 +1,162 @@
+From 663deb47880f2283809669563c5a52ac7c6aef1a Mon Sep 17 00:00:00 2001
+From: Joel Fernandes <joelaf@google.com>
+Date: Thu, 20 Oct 2016 00:34:01 -0700
+Subject: pstore: Allow prz to control need for locking
+
+From: Joel Fernandes <joelaf@google.com>
+
+commit 663deb47880f2283809669563c5a52ac7c6aef1a upstream.
+
+In preparation of not locking at all for certain buffers depending on if
+there's contention, make locking optional depending on the initialization
+of the prz.
+
+Signed-off-by: Joel Fernandes <joelaf@google.com>
+[kees: moved locking flag into prz instead of via caller arguments]
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram.c            |    5 +++--
+ fs/pstore/ram_core.c       |   24 +++++++++++++++---------
+ include/linux/pstore_ram.h |   10 +++++++++-
+ 3 files changed, 27 insertions(+), 12 deletions(-)
+
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -434,7 +434,7 @@ static int ramoops_init_przs(struct devi
+       for (i = 0; i < cxt->max_dump_cnt; i++) {
+               cxt->przs[i] = persistent_ram_new(*paddr, cxt->record_size, 0,
+                                                 &cxt->ecc_info,
+-                                                cxt->memtype);
++                                                cxt->memtype, 0);
+               if (IS_ERR(cxt->przs[i])) {
+                       err = PTR_ERR(cxt->przs[i]);
+                       dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
+@@ -471,7 +471,8 @@ static int ramoops_init_prz(struct devic
+               return -ENOMEM;
+       }
+-      *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype);
++      *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
++                                cxt->memtype, 0);
+       if (IS_ERR(*prz)) {
+               int err = PTR_ERR(*prz);
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -53,9 +53,10 @@ static size_t buffer_start_add(struct pe
+ {
+       int old;
+       int new;
+-      unsigned long flags;
++      unsigned long flags = 0;
+-      raw_spin_lock_irqsave(&prz->buffer_lock, flags);
++      if (!(prz->flags & PRZ_FLAG_NO_LOCK))
++              raw_spin_lock_irqsave(&prz->buffer_lock, flags);
+       old = atomic_read(&prz->buffer->start);
+       new = old + a;
+@@ -63,7 +64,8 @@ static size_t buffer_start_add(struct pe
+               new -= prz->buffer_size;
+       atomic_set(&prz->buffer->start, new);
+-      raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
++      if (!(prz->flags & PRZ_FLAG_NO_LOCK))
++              raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
+       return old;
+ }
+@@ -73,9 +75,10 @@ static void buffer_size_add(struct persi
+ {
+       size_t old;
+       size_t new;
+-      unsigned long flags;
++      unsigned long flags = 0;
+-      raw_spin_lock_irqsave(&prz->buffer_lock, flags);
++      if (!(prz->flags & PRZ_FLAG_NO_LOCK))
++              raw_spin_lock_irqsave(&prz->buffer_lock, flags);
+       old = atomic_read(&prz->buffer->size);
+       if (old == prz->buffer_size)
+@@ -87,7 +90,8 @@ static void buffer_size_add(struct persi
+       atomic_set(&prz->buffer->size, new);
+ exit:
+-      raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
++      if (!(prz->flags & PRZ_FLAG_NO_LOCK))
++              raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
+ }
+ static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
+@@ -463,7 +467,8 @@ static int persistent_ram_buffer_map(phy
+ }
+ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
+-                                  struct persistent_ram_ecc_info *ecc_info)
++                                  struct persistent_ram_ecc_info *ecc_info,
++                                  unsigned long flags)
+ {
+       int ret;
+@@ -492,6 +497,7 @@ static int persistent_ram_post_init(stru
+       prz->buffer->sig = sig;
+       persistent_ram_zap(prz);
+       prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);
++      prz->flags = flags;
+       return 0;
+ }
+@@ -516,7 +522,7 @@ void persistent_ram_free(struct persiste
+ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+                       u32 sig, struct persistent_ram_ecc_info *ecc_info,
+-                      unsigned int memtype)
++                      unsigned int memtype, u32 flags)
+ {
+       struct persistent_ram_zone *prz;
+       int ret = -ENOMEM;
+@@ -531,7 +537,7 @@ struct persistent_ram_zone *persistent_r
+       if (ret)
+               goto err;
+-      ret = persistent_ram_post_init(prz, sig, ecc_info);
++      ret = persistent_ram_post_init(prz, sig, ecc_info, flags);
+       if (ret)
+               goto err;
+--- a/include/linux/pstore_ram.h
++++ b/include/linux/pstore_ram.h
+@@ -24,6 +24,13 @@
+ #include <linux/list.h>
+ #include <linux/types.h>
++/*
++ * Choose whether access to the RAM zone requires locking or not.  If a zone
++ * can be written to from different CPUs like with ftrace for example, then
++ * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
++ */
++#define PRZ_FLAG_NO_LOCK      BIT(0)
++
+ struct persistent_ram_buffer;
+ struct rs_control;
+@@ -40,6 +47,7 @@ struct persistent_ram_zone {
+       void *vaddr;
+       struct persistent_ram_buffer *buffer;
+       size_t buffer_size;
++      u32 flags;
+       raw_spinlock_t buffer_lock;
+       /* ECC correction */
+@@ -56,7 +64,7 @@ struct persistent_ram_zone {
+ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+                       u32 sig, struct persistent_ram_ecc_info *ecc_info,
+-                      unsigned int memtype);
++                      unsigned int memtype, u32 flags);
+ void persistent_ram_free(struct persistent_ram_zone *prz);
+ void persistent_ram_zap(struct persistent_ram_zone *prz);
diff --git a/queue-4.9/pstore-correctly-initialize-spinlock-and-flags.patch b/queue-4.9/pstore-correctly-initialize-spinlock-and-flags.patch
new file mode 100644 (file)
index 0000000..a732b0c
--- /dev/null
@@ -0,0 +1,113 @@
+From 76d5692a58031696e282384cbd893832bc92bd76 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 9 Feb 2017 15:43:44 -0800
+Subject: pstore: Correctly initialize spinlock and flags
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 76d5692a58031696e282384cbd893832bc92bd76 upstream.
+
+The ram backend wasn't always initializing its spinlock correctly. Since
+it was coming from kzalloc memory, though, it was harmless on
+architectures that initialize unlocked spinlocks to 0 (at least x86 and
+ARM). This also fixes a possibly ignored flag setting too.
+
+When running under CONFIG_DEBUG_SPINLOCK, the following Oops was visible:
+
+[    0.760836] persistent_ram: found existing buffer, size 29988, start 29988
+[    0.765112] persistent_ram: found existing buffer, size 30105, start 30105
+[    0.769435] persistent_ram: found existing buffer, size 118542, start 118542
+[    0.785960] persistent_ram: found existing buffer, size 0, start 0
+[    0.786098] persistent_ram: found existing buffer, size 0, start 0
+[    0.786131] pstore: using zlib compression
+[    0.790716] BUG: spinlock bad magic on CPU#0, swapper/0/1
+[    0.790729]  lock: 0xffffffc0d1ca9bb0, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
+[    0.790742] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.10.0-rc2+ #913
+[    0.790747] Hardware name: Google Kevin (DT)
+[    0.790750] Call trace:
+[    0.790768] [<ffffff900808ae88>] dump_backtrace+0x0/0x2bc
+[    0.790780] [<ffffff900808b164>] show_stack+0x20/0x28
+[    0.790794] [<ffffff9008460ee0>] dump_stack+0xa4/0xcc
+[    0.790809] [<ffffff9008113cfc>] spin_dump+0xe0/0xf0
+[    0.790821] [<ffffff9008113d3c>] spin_bug+0x30/0x3c
+[    0.790834] [<ffffff9008113e28>] do_raw_spin_lock+0x50/0x1b8
+[    0.790846] [<ffffff9008a2d2ec>] _raw_spin_lock_irqsave+0x54/0x6c
+[    0.790862] [<ffffff90083ac3b4>] buffer_size_add+0x48/0xcc
+[    0.790875] [<ffffff90083acb34>] persistent_ram_write+0x60/0x11c
+[    0.790888] [<ffffff90083aab1c>] ramoops_pstore_write_buf+0xd4/0x2a4
+[    0.790900] [<ffffff90083a9d3c>] pstore_console_write+0xf0/0x134
+[    0.790912] [<ffffff900811c304>] console_unlock+0x48c/0x5e8
+[    0.790923] [<ffffff900811da18>] register_console+0x3b0/0x4d4
+[    0.790935] [<ffffff90083aa7d0>] pstore_register+0x1a8/0x234
+[    0.790947] [<ffffff90083ac250>] ramoops_probe+0x6b8/0x7d4
+[    0.790961] [<ffffff90085ca548>] platform_drv_probe+0x7c/0xd0
+[    0.790972] [<ffffff90085c76ac>] driver_probe_device+0x1b4/0x3bc
+[    0.790982] [<ffffff90085c7ac8>] __device_attach_driver+0xc8/0xf4
+[    0.790996] [<ffffff90085c4bfc>] bus_for_each_drv+0xb4/0xe4
+[    0.791006] [<ffffff90085c7414>] __device_attach+0xd0/0x158
+[    0.791016] [<ffffff90085c7b18>] device_initial_probe+0x24/0x30
+[    0.791026] [<ffffff90085c648c>] bus_probe_device+0x50/0xe4
+[    0.791038] [<ffffff90085c35b8>] device_add+0x3a4/0x76c
+[    0.791051] [<ffffff90087d0e84>] of_device_add+0x74/0x84
+[    0.791062] [<ffffff90087d19b8>] of_platform_device_create_pdata+0xc0/0x100
+[    0.791073] [<ffffff90087d1a2c>] of_platform_device_create+0x34/0x40
+[    0.791086] [<ffffff900903c910>] of_platform_default_populate_init+0x58/0x78
+[    0.791097] [<ffffff90080831fc>] do_one_initcall+0x88/0x160
+[    0.791109] [<ffffff90090010ac>] kernel_init_freeable+0x264/0x31c
+[    0.791123] [<ffffff9008a25bd0>] kernel_init+0x18/0x11c
+[    0.791133] [<ffffff9008082ec0>] ret_from_fork+0x10/0x50
+[    0.793717] console [pstore-1] enabled
+[    0.797845] pstore: Registered ramoops as persistent store backend
+[    0.804647] ramoops: attached 0x100000@0xf7edc000, ecc: 0/0
+
+Fixes: 663deb47880f ("pstore: Allow prz to control need for locking")
+Fixes: 109704492ef6 ("pstore: Make spinlock per zone instead of global")
+Reported-by: Brian Norris <briannorris@chromium.org>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram_core.c |   12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -467,8 +467,7 @@ static int persistent_ram_buffer_map(phy
+ }
+ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
+-                                  struct persistent_ram_ecc_info *ecc_info,
+-                                  unsigned long flags)
++                                  struct persistent_ram_ecc_info *ecc_info)
+ {
+       int ret;
+@@ -494,10 +493,9 @@ static int persistent_ram_post_init(stru
+                        prz->buffer->sig);
+       }
++      /* Rewind missing or invalid memory area. */
+       prz->buffer->sig = sig;
+       persistent_ram_zap(prz);
+-      prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);
+-      prz->flags = flags;
+       return 0;
+ }
+@@ -533,11 +531,15 @@ struct persistent_ram_zone *persistent_r
+               goto err;
+       }
++      /* Initialize general buffer state. */
++      prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);
++      prz->flags = flags;
++
+       ret = persistent_ram_buffer_map(start, size, prz, memtype);
+       if (ret)
+               goto err;
+-      ret = persistent_ram_post_init(prz, sig, ecc_info, flags);
++      ret = persistent_ram_post_init(prz, sig, ecc_info);
+       if (ret)
+               goto err;
diff --git a/queue-4.9/pstore-use-dynamic-spinlock-initializer.patch b/queue-4.9/pstore-use-dynamic-spinlock-initializer.patch
new file mode 100644 (file)
index 0000000..6300145
--- /dev/null
@@ -0,0 +1,33 @@
+From e9a330c4289f2ba1ca4bf98c2b430ab165a8931b Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Sun, 5 Mar 2017 22:08:58 -0800
+Subject: pstore: Use dynamic spinlock initializer
+
+From: Kees Cook <keescook@chromium.org>
+
+commit e9a330c4289f2ba1ca4bf98c2b430ab165a8931b upstream.
+
+The per-prz spinlock should be using the dynamic initializer so that
+lockdep can correctly track it. Without this, under lockdep, we get a
+warning at boot that the lock is in non-static memory.
+
+Fixes: 109704492ef6 ("pstore: Make spinlock per zone instead of global")
+Fixes: 76d5692a5803 ("pstore: Correctly initialize spinlock and flags")
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram_core.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -532,7 +532,7 @@ struct persistent_ram_zone *persistent_r
+       }
+       /* Initialize general buffer state. */
+-      prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);
++      raw_spin_lock_init(&prz->buffer_lock);
+       prz->flags = flags;
+       ret = persistent_ram_buffer_map(start, size, prz, memtype);
index 698cdc5f6a76dd115cee037e79a8b4c9eade3e50..ce39558381627d9e7042bf0d789d2b595e14f8d5 100644 (file)
@@ -38,3 +38,7 @@ rdma-uverbs-fix-the-check-for-port-number.patch
 ipmi-watchdog-fix-watchdog-timeout-set-on-reboot.patch
 dentry-name-snapshots.patch
 v4l-s5c73m3-fix-negation-operator.patch
+pstore-allow-prz-to-control-need-for-locking.patch
+pstore-correctly-initialize-spinlock-and-flags.patch
+pstore-use-dynamic-spinlock-initializer.patch
+net-skb_needs_check-accepts-checksum_none-for-tx.patch