From 7d0228a900fe9c22d34a5fce0b726a9a761a4ade Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 7 Mar 2019 13:25:37 +0100 Subject: [PATCH] 4.19-stable patches added patches: cpufreq-use-struct-kobj_attribute-instead-of-struct-global_attr.patch staging-erofs-fix-mis-acted-tail-merging-behavior.patch --- ...ribute-instead-of-struct-global_attr.patch | 184 ++++++++++++++++++ ...-fix-mis-acted-tail-merging-behavior.patch | 175 +++++++++++++++++ 2 files changed, 359 insertions(+) create mode 100644 queue-4.19/cpufreq-use-struct-kobj_attribute-instead-of-struct-global_attr.patch create mode 100644 queue-4.19/staging-erofs-fix-mis-acted-tail-merging-behavior.patch diff --git a/queue-4.19/cpufreq-use-struct-kobj_attribute-instead-of-struct-global_attr.patch b/queue-4.19/cpufreq-use-struct-kobj_attribute-instead-of-struct-global_attr.patch new file mode 100644 index 00000000000..a2160505c5c --- /dev/null +++ b/queue-4.19/cpufreq-use-struct-kobj_attribute-instead-of-struct-global_attr.patch @@ -0,0 +1,184 @@ +From 625c85a62cb7d3c79f6e16de3cfa972033658250 Mon Sep 17 00:00:00 2001 +From: Viresh Kumar +Date: Fri, 25 Jan 2019 12:53:07 +0530 +Subject: cpufreq: Use struct kobj_attribute instead of struct global_attr + +From: Viresh Kumar + +commit 625c85a62cb7d3c79f6e16de3cfa972033658250 upstream. + +The cpufreq_global_kobject is created using kobject_create_and_add() +helper, which assigns the kobj_type as dynamic_kobj_ktype and show/store +routines are set to kobj_attr_show() and kobj_attr_store(). + +These routines pass struct kobj_attribute as an argument to the +show/store callbacks. But all the cpufreq files created using the +cpufreq_global_kobject expect the argument to be of type struct +attribute. Things work fine currently as no one accesses the "attr" +argument. We may not see issues even if the argument is used, as struct +kobj_attribute has struct attribute as its first element and so they +will both get same address. + +But this is logically incorrect and we should rather use struct +kobj_attribute instead of struct global_attr in the cpufreq core and +drivers and the show/store callbacks should take struct kobj_attribute +as argument instead. + +This bug is caught using CFI CLANG builds in android kernel which +catches mismatch in function prototypes for such callbacks. + +Reported-by: Donghee Han +Reported-by: Sangkyu Kim +Signed-off-by: Viresh Kumar +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/cpufreq/cpufreq.c | 6 +++--- + drivers/cpufreq/intel_pstate.c | 23 ++++++++++++----------- + include/linux/cpufreq.h | 12 ++---------- + 3 files changed, 17 insertions(+), 24 deletions(-) + +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -545,13 +545,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transit + * SYSFS INTERFACE * + *********************************************************************/ + static ssize_t show_boost(struct kobject *kobj, +- struct attribute *attr, char *buf) ++ struct kobj_attribute *attr, char *buf) + { + return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); + } + +-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, +- const char *buf, size_t count) ++static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr, ++ const char *buf, size_t count) + { + int ret, enable; + +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -833,7 +833,7 @@ static void intel_pstate_update_policies + /************************** sysfs begin ************************/ + #define show_one(file_name, object) \ + static ssize_t show_##file_name \ +- (struct kobject *kobj, struct attribute *attr, char *buf) \ ++ (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ + { \ + return sprintf(buf, "%u\n", global.object); \ + } +@@ -842,7 +842,7 @@ static ssize_t intel_pstate_show_status( + static int intel_pstate_update_status(const char *buf, size_t size); + + static ssize_t show_status(struct kobject *kobj, +- struct attribute *attr, char *buf) ++ struct kobj_attribute *attr, char *buf) + { + ssize_t ret; + +@@ -853,7 +853,7 @@ static ssize_t show_status(struct kobjec + return ret; + } + +-static ssize_t store_status(struct kobject *a, struct attribute *b, ++static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, + const char *buf, size_t count) + { + char *p = memchr(buf, '\n', count); +@@ -867,7 +867,7 @@ static ssize_t store_status(struct kobje + } + + static ssize_t show_turbo_pct(struct kobject *kobj, +- struct attribute *attr, char *buf) ++ struct kobj_attribute *attr, char *buf) + { + struct cpudata *cpu; + int total, no_turbo, turbo_pct; +@@ -893,7 +893,7 @@ static ssize_t show_turbo_pct(struct kob + } + + static ssize_t show_num_pstates(struct kobject *kobj, +- struct attribute *attr, char *buf) ++ struct kobj_attribute *attr, char *buf) + { + struct cpudata *cpu; + int total; +@@ -914,7 +914,7 @@ static ssize_t show_num_pstates(struct k + } + + static ssize_t show_no_turbo(struct kobject *kobj, +- struct attribute *attr, char *buf) ++ struct kobj_attribute *attr, char *buf) + { + ssize_t ret; + +@@ -936,7 +936,7 @@ static ssize_t show_no_turbo(struct kobj + return ret; + } + +-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, ++static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, + const char *buf, size_t count) + { + unsigned int input; +@@ -983,7 +983,7 @@ static ssize_t store_no_turbo(struct kob + return count; + } + +-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, ++static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, + const char *buf, size_t count) + { + unsigned int input; +@@ -1013,7 +1013,7 @@ static ssize_t store_max_perf_pct(struct + return count; + } + +-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, ++static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, + const char *buf, size_t count) + { + unsigned int input; +@@ -1045,12 +1045,13 @@ static ssize_t store_min_perf_pct(struct + } + + static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, +- struct attribute *attr, char *buf) ++ struct kobj_attribute *attr, char *buf) + { + return sprintf(buf, "%u\n", hwp_boost); + } + +-static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b, ++static ssize_t store_hwp_dynamic_boost(struct kobject *a, ++ struct kobj_attribute *b, + const char *buf, size_t count) + { + unsigned int input; +--- a/include/linux/cpufreq.h ++++ b/include/linux/cpufreq.h +@@ -254,20 +254,12 @@ __ATTR(_name, 0644, show_##_name, store_ + static struct freq_attr _name = \ + __ATTR(_name, 0200, NULL, store_##_name) + +-struct global_attr { +- struct attribute attr; +- ssize_t (*show)(struct kobject *kobj, +- struct attribute *attr, char *buf); +- ssize_t (*store)(struct kobject *a, struct attribute *b, +- const char *c, size_t count); +-}; +- + #define define_one_global_ro(_name) \ +-static struct global_attr _name = \ ++static struct kobj_attribute _name = \ + __ATTR(_name, 0444, show_##_name, NULL) + + #define define_one_global_rw(_name) \ +-static struct global_attr _name = \ ++static struct kobj_attribute _name = \ + __ATTR(_name, 0644, show_##_name, store_##_name) + + diff --git a/queue-4.19/staging-erofs-fix-mis-acted-tail-merging-behavior.patch b/queue-4.19/staging-erofs-fix-mis-acted-tail-merging-behavior.patch new file mode 100644 index 00000000000..34cc1bed804 --- /dev/null +++ b/queue-4.19/staging-erofs-fix-mis-acted-tail-merging-behavior.patch @@ -0,0 +1,175 @@ +From a112152f6f3a2a88caa6f414d540bd49e406af60 Mon Sep 17 00:00:00 2001 +From: Gao Xiang +Date: Wed, 27 Feb 2019 13:33:32 +0800 +Subject: staging: erofs: fix mis-acted TAIL merging behavior + +From: Gao Xiang + +commit a112152f6f3a2a88caa6f414d540bd49e406af60 upstream. + +EROFS has an optimized path called TAIL merging, which is designed +to merge multiple reads and the corresponding decompressions into +one if these requests read continuous pages almost at the same time. + +In general, it behaves as follows: + ________________________________________________________________ + ... | TAIL . HEAD | PAGE | PAGE | TAIL . HEAD | ... + _____|_combined page A_|________|________|_combined page B_|____ + 1 ] -> [ 2 ] -> [ 3 +If the above three reads are requested in the order 1-2-3, it will +generate a large work chain rather than 3 individual work chains +to reduce scheduling overhead and boost up sequential read. + +However, if Read 2 is processed slightly earlier than Read 1, +currently it still generates 2 individual work chains (chain 1, 2) +but it does in-place decompression for combined page A, moreover, +if chain 2 decompresses ahead of chain 1, it will be a race and +lead to corrupted decompressed page. This patch fixes it. + +Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support") +Cc: # 4.19+ +Signed-off-by: Gao Xiang +Reviewed-by: Chao Yu +Signed-off-by: Greg Kroah-Hartman + + +--- + drivers/staging/erofs/unzip_vle.c | 69 ++++++++++++++++++++++++-------------- + 1 file changed, 44 insertions(+), 25 deletions(-) + +--- a/drivers/staging/erofs/unzip_vle.c ++++ b/drivers/staging/erofs/unzip_vle.c +@@ -57,15 +57,30 @@ enum z_erofs_vle_work_role { + Z_EROFS_VLE_WORK_SECONDARY, + Z_EROFS_VLE_WORK_PRIMARY, + /* +- * The current work has at least been linked with the following +- * processed chained works, which means if the processing page +- * is the tail partial page of the work, the current work can +- * safely use the whole page, as illustrated below: +- * +--------------+-------------------------------------------+ +- * | tail page | head page (of the previous work) | +- * +--------------+-------------------------------------------+ +- * /\ which belongs to the current work +- * [ (*) this page can be used for the current work itself. ] ++ * The current work was the tail of an exist chain, and the previous ++ * processed chained works are all decided to be hooked up to it. ++ * A new chain should be created for the remaining unprocessed works, ++ * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED, ++ * the next work cannot reuse the whole page in the following scenario: ++ * ________________________________________________________________ ++ * | tail (partial) page | head (partial) page | ++ * | (belongs to the next work) | (belongs to the current work) | ++ * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________| ++ */ ++ Z_EROFS_VLE_WORK_PRIMARY_HOOKED, ++ /* ++ * The current work has been linked with the processed chained works, ++ * and could be also linked with the potential remaining works, which ++ * means if the processing page is the tail partial page of the work, ++ * the current work can safely use the whole page (since the next work ++ * is under control) for in-place decompression, as illustrated below: ++ * ________________________________________________________________ ++ * | tail (partial) page | head (partial) page | ++ * | (of the current work) | (of the previous work) | ++ * | PRIMARY_FOLLOWED or | | ++ * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________| ++ * ++ * [ (*) the above page can be used for the current work itself. ] + */ + Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED, + Z_EROFS_VLE_WORK_MAX +@@ -234,10 +249,10 @@ static int z_erofs_vle_work_add_page( + return ret ? 0 : -EAGAIN; + } + +-static inline bool try_to_claim_workgroup( +- struct z_erofs_vle_workgroup *grp, +- z_erofs_vle_owned_workgrp_t *owned_head, +- bool *hosted) ++static enum z_erofs_vle_work_role ++try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp, ++ z_erofs_vle_owned_workgrp_t *owned_head, ++ bool *hosted) + { + DBG_BUGON(*hosted == true); + +@@ -251,6 +266,9 @@ retry: + + *owned_head = grp; + *hosted = true; ++ /* lucky, I am the followee :) */ ++ return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED; ++ + } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) { + /* + * type 2, link to the end of a existing open chain, +@@ -260,12 +278,11 @@ retry: + if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next, + Z_EROFS_VLE_WORKGRP_TAIL, *owned_head)) + goto retry; +- + *owned_head = Z_EROFS_VLE_WORKGRP_TAIL; +- } else +- return false; /* :( better luck next time */ ++ return Z_EROFS_VLE_WORK_PRIMARY_HOOKED; ++ } + +- return true; /* lucky, I am the followee :) */ ++ return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */ + } + + static struct z_erofs_vle_work * +@@ -337,12 +354,8 @@ z_erofs_vle_work_lookup(struct super_blo + *hosted = false; + if (!primary) + *role = Z_EROFS_VLE_WORK_SECONDARY; +- /* claim the workgroup if possible */ +- else if (try_to_claim_workgroup(grp, owned_head, hosted)) +- *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED; +- else +- *role = Z_EROFS_VLE_WORK_PRIMARY; +- ++ else /* claim the workgroup if possible */ ++ *role = try_to_claim_workgroup(grp, owned_head, hosted); + return work; + } + +@@ -419,6 +432,9 @@ static inline void __update_workgrp_llen + } + } + ++#define builder_is_hooked(builder) \ ++ ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED) ++ + #define builder_is_followed(builder) \ + ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED) + +@@ -583,7 +599,7 @@ static int z_erofs_do_read_page(struct z + struct z_erofs_vle_work_builder *const builder = &fe->builder; + const loff_t offset = page_offset(page); + +- bool tight = builder_is_followed(builder); ++ bool tight = builder_is_hooked(builder); + struct z_erofs_vle_work *work = builder->work; + + #ifdef EROFS_FS_HAS_MANAGED_CACHE +@@ -646,7 +662,7 @@ repeat: + builder->role = Z_EROFS_VLE_WORK_PRIMARY; + #endif + +- tight &= builder_is_followed(builder); ++ tight &= builder_is_hooked(builder); + work = builder->work; + hitted: + cur = end - min_t(unsigned, offset + end - map->m_la, end); +@@ -661,6 +677,9 @@ hitted: + (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : + Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED)); + ++ if (cur) ++ tight &= builder_is_followed(builder); ++ + retry: + err = z_erofs_vle_work_add_page(builder, page, page_type); + /* should allocate an additional staging page for pagevec */ -- 2.47.3