]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.20-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 7 Mar 2019 12:25:53 +0000 (13:25 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 7 Mar 2019 12:25:53 +0000 (13:25 +0100)
added patches:
cpufreq-use-struct-kobj_attribute-instead-of-struct-global_attr.patch
staging-erofs-fix-mis-acted-tail-merging-behavior.patch

queue-4.20/cpufreq-use-struct-kobj_attribute-instead-of-struct-global_attr.patch [new file with mode: 0644]
queue-4.20/staging-erofs-fix-mis-acted-tail-merging-behavior.patch [new file with mode: 0644]

diff --git a/queue-4.20/cpufreq-use-struct-kobj_attribute-instead-of-struct-global_attr.patch b/queue-4.20/cpufreq-use-struct-kobj_attribute-instead-of-struct-global_attr.patch
new file mode 100644 (file)
index 0000000..499c422
--- /dev/null
@@ -0,0 +1,184 @@
+From 625c85a62cb7d3c79f6e16de3cfa972033658250 Mon Sep 17 00:00:00 2001
+From: Viresh Kumar <viresh.kumar@linaro.org>
+Date: Fri, 25 Jan 2019 12:53:07 +0530
+Subject: cpufreq: Use struct kobj_attribute instead of struct global_attr
+
+From: Viresh Kumar <viresh.kumar@linaro.org>
+
+commit 625c85a62cb7d3c79f6e16de3cfa972033658250 upstream.
+
+The cpufreq_global_kobject is created using kobject_create_and_add()
+helper, which assigns the kobj_type as dynamic_kobj_ktype and show/store
+routines are set to kobj_attr_show() and kobj_attr_store().
+
+These routines pass struct kobj_attribute as an argument to the
+show/store callbacks. But all the cpufreq files created using the
+cpufreq_global_kobject expect the argument to be of type struct
+attribute. Things work fine currently as no one accesses the "attr"
+argument. We may not see issues even if the argument is used, as struct
+kobj_attribute has struct attribute as its first element and so they
+will both get same address.
+
+But this is logically incorrect and we should rather use struct
+kobj_attribute instead of struct global_attr in the cpufreq core and
+drivers and the show/store callbacks should take struct kobj_attribute
+as argument instead.
+
+This bug is caught using CFI CLANG builds in android kernel which
+catches mismatch in function prototypes for such callbacks.
+
+Reported-by: Donghee Han <dh.han@samsung.com>
+Reported-by: Sangkyu Kim <skwith.kim@samsung.com>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/cpufreq.c      |    6 +++---
+ drivers/cpufreq/intel_pstate.c |   23 ++++++++++++-----------
+ include/linux/cpufreq.h        |   12 ++----------
+ 3 files changed, 17 insertions(+), 24 deletions(-)
+
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -545,13 +545,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transit
+  *                          SYSFS INTERFACE                          *
+  *********************************************************************/
+ static ssize_t show_boost(struct kobject *kobj,
+-                               struct attribute *attr, char *buf)
++                        struct kobj_attribute *attr, char *buf)
+ {
+       return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
+ }
+-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+-                                const char *buf, size_t count)
++static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
++                         const char *buf, size_t count)
+ {
+       int ret, enable;
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -873,7 +873,7 @@ static void intel_pstate_update_policies
+ /************************** sysfs begin ************************/
+ #define show_one(file_name, object)                                   \
+       static ssize_t show_##file_name                                 \
+-      (struct kobject *kobj, struct attribute *attr, char *buf)       \
++      (struct kobject *kobj, struct kobj_attribute *attr, char *buf)  \
+       {                                                               \
+               return sprintf(buf, "%u\n", global.object);             \
+       }
+@@ -882,7 +882,7 @@ static ssize_t intel_pstate_show_status(
+ static int intel_pstate_update_status(const char *buf, size_t size);
+ static ssize_t show_status(struct kobject *kobj,
+-                         struct attribute *attr, char *buf)
++                         struct kobj_attribute *attr, char *buf)
+ {
+       ssize_t ret;
+@@ -893,7 +893,7 @@ static ssize_t show_status(struct kobjec
+       return ret;
+ }
+-static ssize_t store_status(struct kobject *a, struct attribute *b,
++static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
+                           const char *buf, size_t count)
+ {
+       char *p = memchr(buf, '\n', count);
+@@ -907,7 +907,7 @@ static ssize_t store_status(struct kobje
+ }
+ static ssize_t show_turbo_pct(struct kobject *kobj,
+-                              struct attribute *attr, char *buf)
++                              struct kobj_attribute *attr, char *buf)
+ {
+       struct cpudata *cpu;
+       int total, no_turbo, turbo_pct;
+@@ -933,7 +933,7 @@ static ssize_t show_turbo_pct(struct kob
+ }
+ static ssize_t show_num_pstates(struct kobject *kobj,
+-                              struct attribute *attr, char *buf)
++                              struct kobj_attribute *attr, char *buf)
+ {
+       struct cpudata *cpu;
+       int total;
+@@ -954,7 +954,7 @@ static ssize_t show_num_pstates(struct k
+ }
+ static ssize_t show_no_turbo(struct kobject *kobj,
+-                           struct attribute *attr, char *buf)
++                           struct kobj_attribute *attr, char *buf)
+ {
+       ssize_t ret;
+@@ -976,7 +976,7 @@ static ssize_t show_no_turbo(struct kobj
+       return ret;
+ }
+-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
++static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
+                             const char *buf, size_t count)
+ {
+       unsigned int input;
+@@ -1023,7 +1023,7 @@ static ssize_t store_no_turbo(struct kob
+       return count;
+ }
+-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
++static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
+                                 const char *buf, size_t count)
+ {
+       unsigned int input;
+@@ -1053,7 +1053,7 @@ static ssize_t store_max_perf_pct(struct
+       return count;
+ }
+-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
++static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
+                                 const char *buf, size_t count)
+ {
+       unsigned int input;
+@@ -1085,12 +1085,13 @@ static ssize_t store_min_perf_pct(struct
+ }
+ static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
+-                              struct attribute *attr, char *buf)
++                              struct kobj_attribute *attr, char *buf)
+ {
+       return sprintf(buf, "%u\n", hwp_boost);
+ }
+-static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
++static ssize_t store_hwp_dynamic_boost(struct kobject *a,
++                                     struct kobj_attribute *b,
+                                      const char *buf, size_t count)
+ {
+       unsigned int input;
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -254,20 +254,12 @@ __ATTR(_name, 0644, show_##_name, store_
+ static struct freq_attr _name =                       \
+ __ATTR(_name, 0200, NULL, store_##_name)
+-struct global_attr {
+-      struct attribute attr;
+-      ssize_t (*show)(struct kobject *kobj,
+-                      struct attribute *attr, char *buf);
+-      ssize_t (*store)(struct kobject *a, struct attribute *b,
+-                       const char *c, size_t count);
+-};
+-
+ #define define_one_global_ro(_name)           \
+-static struct global_attr _name =             \
++static struct kobj_attribute _name =          \
+ __ATTR(_name, 0444, show_##_name, NULL)
+ #define define_one_global_rw(_name)           \
+-static struct global_attr _name =             \
++static struct kobj_attribute _name =          \
+ __ATTR(_name, 0644, show_##_name, store_##_name)
diff --git a/queue-4.20/staging-erofs-fix-mis-acted-tail-merging-behavior.patch b/queue-4.20/staging-erofs-fix-mis-acted-tail-merging-behavior.patch
new file mode 100644 (file)
index 0000000..edbc734
--- /dev/null
@@ -0,0 +1,175 @@
+From a112152f6f3a2a88caa6f414d540bd49e406af60 Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Wed, 27 Feb 2019 13:33:32 +0800
+Subject: staging: erofs: fix mis-acted TAIL merging behavior
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit a112152f6f3a2a88caa6f414d540bd49e406af60 upstream.
+
+EROFS has an optimized path called TAIL merging, which is designed
+to merge multiple reads and the corresponding decompressions into
+one if these requests read continuous pages almost at the same time.
+
+In general, it behaves as follows:
+ ________________________________________________________________
+  ... |  TAIL  .  HEAD  |  PAGE  |  PAGE  |  TAIL    . HEAD | ...
+ _____|_combined page A_|________|________|_combined page B_|____
+        1  ]  ->  [  2                          ]  ->  [ 3
+If the above three reads are requested in the order 1-2-3, it will
+generate a large work chain rather than 3 individual work chains
+to reduce scheduling overhead and boost up sequential read.
+
+However, if Read 2 is processed slightly earlier than Read 1,
+currently it still generates 2 individual work chains (chain 1, 2)
+but it does in-place decompression for combined page A, moreover,
+if chain 2 decompresses ahead of chain 1, it will be a race and
+lead to corrupted decompressed page. This patch fixes it.
+
+Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support")
+Cc: <stable@vger.kernel.org> # 4.19+
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/unzip_vle.c |   70 ++++++++++++++++++++++++--------------
+ 1 file changed, 45 insertions(+), 25 deletions(-)
+
+--- a/drivers/staging/erofs/unzip_vle.c
++++ b/drivers/staging/erofs/unzip_vle.c
+@@ -59,15 +59,30 @@ enum z_erofs_vle_work_role {
+       Z_EROFS_VLE_WORK_SECONDARY,
+       Z_EROFS_VLE_WORK_PRIMARY,
+       /*
+-       * The current work has at least been linked with the following
+-       * processed chained works, which means if the processing page
+-       * is the tail partial page of the work, the current work can
+-       * safely use the whole page, as illustrated below:
+-       * +--------------+-------------------------------------------+
+-       * |  tail page   |      head page (of the previous work)     |
+-       * +--------------+-------------------------------------------+
+-       *   /\  which belongs to the current work
+-       * [  (*) this page can be used for the current work itself.  ]
++       * The current work was the tail of an exist chain, and the previous
++       * processed chained works are all decided to be hooked up to it.
++       * A new chain should be created for the remaining unprocessed works,
++       * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
++       * the next work cannot reuse the whole page in the following scenario:
++       *  ________________________________________________________________
++       * |      tail (partial) page     |       head (partial) page       |
++       * |  (belongs to the next work)  |  (belongs to the current work)  |
++       * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
++       */
++      Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
++      /*
++       * The current work has been linked with the processed chained works,
++       * and could be also linked with the potential remaining works, which
++       * means if the processing page is the tail partial page of the work,
++       * the current work can safely use the whole page (since the next work
++       * is under control) for in-place decompression, as illustrated below:
++       *  ________________________________________________________________
++       * |  tail (partial) page  |          head (partial) page           |
++       * | (of the current work) |         (of the previous work)         |
++       * |  PRIMARY_FOLLOWED or  |                                        |
++       * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
++       *
++       * [  (*) the above page can be used for the current work itself.  ]
+        */
+       Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
+       Z_EROFS_VLE_WORK_MAX
+@@ -236,10 +251,10 @@ static int z_erofs_vle_work_add_page(
+       return ret ? 0 : -EAGAIN;
+ }
+-static inline bool try_to_claim_workgroup(
+-      struct z_erofs_vle_workgroup *grp,
+-      z_erofs_vle_owned_workgrp_t *owned_head,
+-      bool *hosted)
++static enum z_erofs_vle_work_role
++try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
++                     z_erofs_vle_owned_workgrp_t *owned_head,
++                     bool *hosted)
+ {
+       DBG_BUGON(*hosted == true);
+@@ -253,6 +268,9 @@ retry:
+               *owned_head = grp;
+               *hosted = true;
++              /* lucky, I am the followee :) */
++              return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
++
+       } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
+               /*
+                * type 2, link to the end of a existing open chain,
+@@ -262,12 +280,11 @@ retry:
+               if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next,
+                       Z_EROFS_VLE_WORKGRP_TAIL, *owned_head))
+                       goto retry;
+-
+               *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
+-      } else
+-              return false;   /* :( better luck next time */
++              return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
++      }
+-      return true;    /* lucky, I am the followee :) */
++      return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
+ }
+ struct z_erofs_vle_work_finder {
+@@ -345,12 +362,9 @@ z_erofs_vle_work_lookup(const struct z_e
+       *f->hosted = false;
+       if (!primary)
+               *f->role = Z_EROFS_VLE_WORK_SECONDARY;
+-      /* claim the workgroup if possible */
+-      else if (try_to_claim_workgroup(grp, f->owned_head, f->hosted))
+-              *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+-      else
+-              *f->role = Z_EROFS_VLE_WORK_PRIMARY;
+-
++      else    /* claim the workgroup if possible */
++              *f->role = try_to_claim_workgroup(grp, f->owned_head,
++                                                f->hosted);
+       return work;
+ }
+@@ -410,6 +424,9 @@ z_erofs_vle_work_register(const struct z
+       return work;
+ }
++#define builder_is_hooked(builder) \
++      ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
++
+ #define builder_is_followed(builder) \
+       ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
+@@ -584,7 +601,7 @@ static int z_erofs_do_read_page(struct z
+       struct z_erofs_vle_work_builder *const builder = &fe->builder;
+       const loff_t offset = page_offset(page);
+-      bool tight = builder_is_followed(builder);
++      bool tight = builder_is_hooked(builder);
+       struct z_erofs_vle_work *work = builder->work;
+ #ifdef EROFS_FS_HAS_MANAGED_CACHE
+@@ -649,7 +666,7 @@ repeat:
+               builder->role = Z_EROFS_VLE_WORK_PRIMARY;
+ #endif
+-      tight &= builder_is_followed(builder);
++      tight &= builder_is_hooked(builder);
+       work = builder->work;
+ hitted:
+       cur = end - min_t(unsigned int, offset + end - map->m_la, end);
+@@ -664,6 +681,9 @@ hitted:
+                       (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
+                               Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
++      if (cur)
++              tight &= builder_is_followed(builder);
++
+ retry:
+       err = z_erofs_vle_work_add_page(builder, page, page_type);
+       /* should allocate an additional staging page for pagevec */