]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 4.4
authorSasha Levin <sashal@kernel.org>
Sat, 5 Sep 2020 16:56:27 +0000 (12:56 -0400)
committerSasha Levin <sashal@kernel.org>
Sat, 5 Sep 2020 16:56:27 +0000 (12:56 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.4/ceph-don-t-allow-setlease-on-cephfs.patch [new file with mode: 0644]
queue-4.4/hwmon-applesmc-check-status-earlier.patch [new file with mode: 0644]
queue-4.4/s390-don-t-trace-preemption-in-percpu-macros.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/xen-xenbus-fix-granting-of-vmalloc-d-memory.patch [new file with mode: 0644]

diff --git a/queue-4.4/ceph-don-t-allow-setlease-on-cephfs.patch b/queue-4.4/ceph-don-t-allow-setlease-on-cephfs.patch
new file mode 100644 (file)
index 0000000..2798228
--- /dev/null
@@ -0,0 +1,39 @@
+From e2b042d890bbcf6c0ff7bd834f525beb371f0426 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Aug 2020 11:00:26 -0400
+Subject: ceph: don't allow setlease on cephfs
+
+From: Jeff Layton <jlayton@kernel.org>
+
+[ Upstream commit 496ceaf12432b3d136dcdec48424312e71359ea7 ]
+
+Leases don't currently work correctly on kcephfs, as they are not broken
+when caps are revoked. They could eventually be implemented similarly to
+how we did them in libcephfs, but for now don't allow them.
+
+[ idryomov: no need for simple_nosetlease() in ceph_dir_fops and
+  ceph_snapdir_fops ]
+
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Reviewed-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/file.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index c8222bfe1e566..3e6ebe40f06fb 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -1433,6 +1433,7 @@ const struct file_operations ceph_file_fops = {
+       .mmap = ceph_mmap,
+       .fsync = ceph_fsync,
+       .lock = ceph_lock,
++      .setlease = simple_nosetlease,
+       .flock = ceph_flock,
+       .splice_read = generic_file_splice_read,
+       .splice_write = iter_file_splice_write,
+-- 
+2.25.1
+
diff --git a/queue-4.4/hwmon-applesmc-check-status-earlier.patch b/queue-4.4/hwmon-applesmc-check-status-earlier.patch
new file mode 100644 (file)
index 0000000..6fa6502
--- /dev/null
@@ -0,0 +1,123 @@
+From 158dcf8dd78cf1a59ffc2aacd88afbe96e0ce803 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Aug 2020 06:19:32 -0700
+Subject: hwmon: (applesmc) check status earlier.
+
+From: Tom Rix <trix@redhat.com>
+
+[ Upstream commit cecf7560f00a8419396a2ed0f6e5d245ccb4feac ]
+
+clang static analysis reports this representative problem
+
+applesmc.c:758:10: warning: 1st function call argument is an
+  uninitialized value
+        left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2;
+               ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+buffer is filled by the earlier call
+
+       ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, ...
+
+This problem is reported because a goto skips the status check.
+Other similar problems use data from applesmc_read_key before checking
+the status.  So move the checks to before the use.
+
+Signed-off-by: Tom Rix <trix@redhat.com>
+Reviewed-by: Henrik Rydberg <rydberg@bitmath.org>
+Link: https://lore.kernel.org/r/20200820131932.10590-1-trix@redhat.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/applesmc.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
+index 0af7fd311979d..587fc5c686b3c 100644
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -758,15 +758,18 @@ static ssize_t applesmc_light_show(struct device *dev,
+       }
+       ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length);
++      if (ret)
++              goto out;
+       /* newer macbooks report a single 10-bit bigendian value */
+       if (data_length == 10) {
+               left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2;
+               goto out;
+       }
+       left = buffer[2];
++
++      ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
+       if (ret)
+               goto out;
+-      ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
+       right = buffer[2];
+ out:
+@@ -814,12 +817,11 @@ static ssize_t applesmc_show_fan_speed(struct device *dev,
+       sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
+       ret = applesmc_read_key(newkey, buffer, 2);
+-      speed = ((buffer[0] << 8 | buffer[1]) >> 2);
+-
+       if (ret)
+               return ret;
+-      else
+-              return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
++
++      speed = ((buffer[0] << 8 | buffer[1]) >> 2);
++      return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
+ }
+ static ssize_t applesmc_store_fan_speed(struct device *dev,
+@@ -854,12 +856,11 @@ static ssize_t applesmc_show_fan_manual(struct device *dev,
+       u8 buffer[2];
+       ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
+-      manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
+-
+       if (ret)
+               return ret;
+-      else
+-              return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
++
++      manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
++      return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
+ }
+ static ssize_t applesmc_store_fan_manual(struct device *dev,
+@@ -875,10 +876,11 @@ static ssize_t applesmc_store_fan_manual(struct device *dev,
+               return -EINVAL;
+       ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
+-      val = (buffer[0] << 8 | buffer[1]);
+       if (ret)
+               goto out;
++      val = (buffer[0] << 8 | buffer[1]);
++
+       if (input)
+               val = val | (0x01 << to_index(attr));
+       else
+@@ -954,13 +956,12 @@ static ssize_t applesmc_key_count_show(struct device *dev,
+       u32 count;
+       ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4);
+-      count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
+-                                              ((u32)buffer[2]<<8) + buffer[3];
+-
+       if (ret)
+               return ret;
+-      else
+-              return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
++
++      count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
++                                              ((u32)buffer[2]<<8) + buffer[3];
++      return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
+ }
+ static ssize_t applesmc_key_at_index_read_show(struct device *dev,
+-- 
+2.25.1
+
diff --git a/queue-4.4/s390-don-t-trace-preemption-in-percpu-macros.patch b/queue-4.4/s390-don-t-trace-preemption-in-percpu-macros.patch
new file mode 100644 (file)
index 0000000..b03e555
--- /dev/null
@@ -0,0 +1,139 @@
+From 73b1514318909b3745bfb14067f1764e01b4ac72 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Aug 2020 09:48:23 +0200
+Subject: s390: don't trace preemption in percpu macros
+
+From: Sven Schnelle <svens@linux.ibm.com>
+
+[ Upstream commit 1196f12a2c960951d02262af25af0bb1775ebcc2 ]
+
+Since commit a21ee6055c30 ("lockdep: Change hardirq{s_enabled,_context}
+to per-cpu variables") the lockdep code itself uses percpu variables. This
+leads to recursions because the percpu macros are calling preempt_enable()
+which might call trace_preempt_on().
+
+Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/include/asm/percpu.h | 28 ++++++++++++++--------------
+ 1 file changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
+index 6d6556ca24aa2..f715419a72cf0 100644
+--- a/arch/s390/include/asm/percpu.h
++++ b/arch/s390/include/asm/percpu.h
+@@ -28,7 +28,7 @@
+       typedef typeof(pcp) pcp_op_T__;                                 \
+       pcp_op_T__ old__, new__, prev__;                                \
+       pcp_op_T__ *ptr__;                                              \
+-      preempt_disable();                                              \
++      preempt_disable_notrace();                                      \
+       ptr__ = raw_cpu_ptr(&(pcp));                                    \
+       prev__ = *ptr__;                                                \
+       do {                                                            \
+@@ -36,7 +36,7 @@
+               new__ = old__ op (val);                                 \
+               prev__ = cmpxchg(ptr__, old__, new__);                  \
+       } while (prev__ != old__);                                      \
+-      preempt_enable();                                               \
++      preempt_enable_notrace();                                       \
+       new__;                                                          \
+ })
+@@ -67,7 +67,7 @@
+       typedef typeof(pcp) pcp_op_T__;                                 \
+       pcp_op_T__ val__ = (val);                                       \
+       pcp_op_T__ old__, *ptr__;                                       \
+-      preempt_disable();                                              \
++      preempt_disable_notrace();                                      \
+       ptr__ = raw_cpu_ptr(&(pcp));                            \
+       if (__builtin_constant_p(val__) &&                              \
+           ((szcast)val__ > -129) && ((szcast)val__ < 128)) {          \
+@@ -83,7 +83,7 @@
+                       : [val__] "d" (val__)                           \
+                       : "cc");                                        \
+       }                                                               \
+-      preempt_enable();                                               \
++      preempt_enable_notrace();                                       \
+ }
+ #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
+@@ -94,14 +94,14 @@
+       typedef typeof(pcp) pcp_op_T__;                                 \
+       pcp_op_T__ val__ = (val);                                       \
+       pcp_op_T__ old__, *ptr__;                                       \
+-      preempt_disable();                                              \
++      preempt_disable_notrace();                                      \
+       ptr__ = raw_cpu_ptr(&(pcp));                                    \
+       asm volatile(                                                   \
+               op "    %[old__],%[val__],%[ptr__]\n"                   \
+               : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)           \
+               : [val__] "d" (val__)                                   \
+               : "cc");                                                \
+-      preempt_enable();                                               \
++      preempt_enable_notrace();                                               \
+       old__ + val__;                                                  \
+ })
+@@ -113,14 +113,14 @@
+       typedef typeof(pcp) pcp_op_T__;                                 \
+       pcp_op_T__ val__ = (val);                                       \
+       pcp_op_T__ old__, *ptr__;                                       \
+-      preempt_disable();                                              \
++      preempt_disable_notrace();                                      \
+       ptr__ = raw_cpu_ptr(&(pcp));                                    \
+       asm volatile(                                                   \
+               op "    %[old__],%[val__],%[ptr__]\n"                   \
+               : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)           \
+               : [val__] "d" (val__)                                   \
+               : "cc");                                                \
+-      preempt_enable();                                               \
++      preempt_enable_notrace();                                       \
+ }
+ #define this_cpu_and_4(pcp, val)      arch_this_cpu_to_op(pcp, val, "lan")
+@@ -135,10 +135,10 @@
+       typedef typeof(pcp) pcp_op_T__;                                 \
+       pcp_op_T__ ret__;                                               \
+       pcp_op_T__ *ptr__;                                              \
+-      preempt_disable();                                              \
++      preempt_disable_notrace();                                      \
+       ptr__ = raw_cpu_ptr(&(pcp));                                    \
+       ret__ = cmpxchg(ptr__, oval, nval);                             \
+-      preempt_enable();                                               \
++      preempt_enable_notrace();                                       \
+       ret__;                                                          \
+ })
+@@ -151,10 +151,10 @@
+ ({                                                                    \
+       typeof(pcp) *ptr__;                                             \
+       typeof(pcp) ret__;                                              \
+-      preempt_disable();                                              \
++      preempt_disable_notrace();                                      \
+       ptr__ = raw_cpu_ptr(&(pcp));                                    \
+       ret__ = xchg(ptr__, nval);                                      \
+-      preempt_enable();                                               \
++      preempt_enable_notrace();                                       \
+       ret__;                                                          \
+ })
+@@ -170,11 +170,11 @@
+       typeof(pcp1) *p1__;                                             \
+       typeof(pcp2) *p2__;                                             \
+       int ret__;                                                      \
+-      preempt_disable();                                              \
++      preempt_disable_notrace();                                      \
+       p1__ = raw_cpu_ptr(&(pcp1));                                    \
+       p2__ = raw_cpu_ptr(&(pcp2));                                    \
+       ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__);   \
+-      preempt_enable();                                               \
++      preempt_enable_notrace();                                       \
+       ret__;                                                          \
+ })
+-- 
+2.25.1
+
index 026c903bf434427134b3c481f99d8f4ea26a9420..886f9ce51aba12fbc4f36edf85f6e9e823fcf57d 100644 (file)
@@ -2,3 +2,7 @@ hid-core-correctly-handle-reportsize-being-zero.patch
 hid-core-sanitize-event-code-and-type-when-mapping-input.patch
 perf-record-stat-explicitly-call-out-event-modifiers-in-the-documentation.patch
 mm-page_alloc-remove-unnecessary-variable-from-free_pcppages_bulk.patch
+hwmon-applesmc-check-status-earlier.patch
+ceph-don-t-allow-setlease-on-cephfs.patch
+s390-don-t-trace-preemption-in-percpu-macros.patch
+xen-xenbus-fix-granting-of-vmalloc-d-memory.patch
diff --git a/queue-4.4/xen-xenbus-fix-granting-of-vmalloc-d-memory.patch b/queue-4.4/xen-xenbus-fix-granting-of-vmalloc-d-memory.patch
new file mode 100644 (file)
index 0000000..0490279
--- /dev/null
@@ -0,0 +1,47 @@
+From aca44ae7d2b14277fead2142c4d9a6e07efae2ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Aug 2020 11:31:52 +0200
+Subject: xen/xenbus: Fix granting of vmalloc'd memory
+
+From: Simon Leiner <simon@leiner.me>
+
+[ Upstream commit d742db70033c745e410523e00522ee0cfe2aa416 ]
+
+On some architectures (like ARM), virt_to_gfn cannot be used for
+vmalloc'd memory because of its reliance on virt_to_phys. This patch
+introduces a check for vmalloc'd addresses and obtains the PFN using
+vmalloc_to_pfn in that case.
+
+Signed-off-by: Simon Leiner <simon@leiner.me>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+Link: https://lore.kernel.org/r/20200825093153.35500-1-simon@leiner.me
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/xenbus/xenbus_client.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
+index df27cefb2fa35..266f446ba331c 100644
+--- a/drivers/xen/xenbus/xenbus_client.c
++++ b/drivers/xen/xenbus/xenbus_client.c
+@@ -384,8 +384,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
+       int i, j;
+       for (i = 0; i < nr_pages; i++) {
+-              err = gnttab_grant_foreign_access(dev->otherend_id,
+-                                                virt_to_gfn(vaddr), 0);
++              unsigned long gfn;
++
++              if (is_vmalloc_addr(vaddr))
++                      gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
++              else
++                      gfn = virt_to_gfn(vaddr);
++
++              err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
+               if (err < 0) {
+                       xenbus_dev_fatal(dev, err,
+                                        "granting access to ring page");
+-- 
+2.25.1
+