From: Greg Kroah-Hartman Date: Fri, 26 Jul 2019 14:42:33 +0000 (+0200) Subject: fix up a bunch of build errors all over the place X-Git-Tag: v5.2.4~4 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=66af8147ac36f55fbc089873ee50ec03358c5308;p=thirdparty%2Fkernel%2Fstable-queue.git fix up a bunch of build errors all over the place --- diff --git a/queue-4.14/perf-core-fix-exclusive-events-grouping.patch b/queue-4.14/perf-core-fix-exclusive-events-grouping.patch deleted file mode 100644 index 46a7dabde28..00000000000 --- a/queue-4.14/perf-core-fix-exclusive-events-grouping.patch +++ /dev/null @@ -1,171 +0,0 @@ -From 8a58ddae23796c733c5dfbd717538d89d036c5bd Mon Sep 17 00:00:00 2001 -From: Alexander Shishkin -Date: Mon, 1 Jul 2019 14:07:55 +0300 -Subject: perf/core: Fix exclusive events' grouping - -From: Alexander Shishkin - -commit 8a58ddae23796c733c5dfbd717538d89d036c5bd upstream. - -So far, we tried to disallow grouping exclusive events for the fear of -complications they would cause with moving between contexts. Specifically, -moving a software group to a hardware context would violate the exclusivity -rules if both groups contain matching exclusive events. - -This attempt was, however, unsuccessful: the check that we have in the -perf_event_open() syscall is both wrong (looks at wrong PMU) and -insufficient (group leader may still be exclusive), as can be illustrated -by running: - - $ perf record -e '{intel_pt//,cycles}' uname - $ perf record -e '{cycles,intel_pt//}' uname - -ultimately successfully. - -Furthermore, we are completely free to trigger the exclusivity violation -by: - - perf -e '{cycles,intel_pt//}' -e '{intel_pt//,instructions}' - -even though the helpful perf record will not allow that, the ABI will. - -The warning later in the perf_event_open() path will also not trigger, because -it's also wrong. - -Fix all this by validating the original group before moving, getting rid -of broken safeguards and placing a useful one to perf_install_in_context(). - -Signed-off-by: Alexander Shishkin -Signed-off-by: Peter Zijlstra (Intel) -Cc: -Cc: Arnaldo Carvalho de Melo -Cc: Jiri Olsa -Cc: Linus Torvalds -Cc: Peter Zijlstra -Cc: Stephane Eranian -Cc: Thomas Gleixner -Cc: Vince Weaver -Cc: mathieu.poirier@linaro.org -Cc: will.deacon@arm.com -Fixes: bed5b25ad9c8a ("perf: Add a pmu capability for "exclusive" events") -Link: https://lkml.kernel.org/r/20190701110755.24646-1-alexander.shishkin@linux.intel.com -Signed-off-by: Ingo Molnar -Signed-off-by: Greg Kroah-Hartman - ---- - include/linux/perf_event.h | 5 +++++ - kernel/events/core.c | 34 ++++++++++++++++++++++------------ - 2 files changed, 27 insertions(+), 12 deletions(-) - ---- a/include/linux/perf_event.h -+++ b/include/linux/perf_event.h -@@ -1017,6 +1017,11 @@ static inline int is_software_event(stru - return event->event_caps & PERF_EV_CAP_SOFTWARE; - } - -+static inline int is_exclusive_pmu(struct pmu *pmu) -+{ -+ return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE; -+} -+ - extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; - - extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); ---- a/kernel/events/core.c -+++ b/kernel/events/core.c -@@ -2442,6 +2442,9 @@ unlock: - return ret; - } - -+static bool exclusive_event_installable(struct perf_event *event, -+ struct perf_event_context *ctx); -+ - /* - * Attach a performance event to a context. - * -@@ -2456,6 +2459,8 @@ perf_install_in_context(struct perf_even - - lockdep_assert_held(&ctx->mutex); - -+ WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); -+ - if (event->cpu != -1) - event->cpu = cpu; - -@@ -4132,7 +4137,7 @@ static int exclusive_event_init(struct p - { - struct pmu *pmu = event->pmu; - -- if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) -+ if (!is_exclusive_pmu(pmu)) - return 0; - - /* -@@ -4163,7 +4168,7 @@ static void exclusive_event_destroy(stru - { - struct pmu *pmu = event->pmu; - -- if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) -+ if (!is_exclusive_pmu(pmu)) - return; - - /* see comment in exclusive_event_init() */ -@@ -4183,14 +4188,15 @@ static bool exclusive_event_match(struct - return false; - } - --/* Called under the same ctx::mutex as perf_install_in_context() */ - static bool exclusive_event_installable(struct perf_event *event, - struct perf_event_context *ctx) - { - struct perf_event *iter_event; - struct pmu *pmu = event->pmu; - -- if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) -+ lockdep_assert_held(&ctx->mutex); -+ -+ if (!is_exclusive_pmu(pmu)) - return true; - - list_for_each_entry(iter_event, &ctx->event_list, event_entry) { -@@ -10155,11 +10161,6 @@ SYSCALL_DEFINE5(perf_event_open, - goto err_alloc; - } - -- if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) { -- err = -EBUSY; -- goto err_context; -- } -- - /* - * Look up the group leader (we will attach this event to it): - */ -@@ -10247,6 +10248,18 @@ SYSCALL_DEFINE5(perf_event_open, - move_group = 0; - } - } -+ -+ /* -+ * Failure to create exclusive events returns -EBUSY. -+ */ -+ err = -EBUSY; -+ if (!exclusive_event_installable(group_leader, ctx)) -+ goto err_locked; -+ -+ for_each_sibling_event(sibling, group_leader) { -+ if (!exclusive_event_installable(sibling, ctx)) -+ goto err_locked; -+ } - } else { - mutex_lock(&ctx->mutex); - } -@@ -10283,9 +10296,6 @@ SYSCALL_DEFINE5(perf_event_open, - * because we need to serialize with concurrent event creation. - */ - if (!exclusive_event_installable(event, ctx)) { -- /* exclusive and group stuff are assumed mutually exclusive */ -- WARN_ON_ONCE(move_group); -- - err = -EBUSY; - goto err_locked; - } diff --git a/queue-4.14/series b/queue-4.14/series index de8d35b4892..239b20a9557 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -205,7 +205,6 @@ net-bridge-stp-don-t-cache-eth-dest-pointer-before-skb-pull.patch dma-buf-balance-refcount-inbalance.patch dma-buf-discard-old-fence_excl-on-retrying-get_fences_rcu-for-realloc.patch mips-lb60-fix-pin-mappings.patch -perf-core-fix-exclusive-events-grouping.patch ext4-don-t-allow-any-modifications-to-an-immutable-file.patch ext4-enforce-the-immutable-flag-on-open-files.patch mm-add-filemap_fdatawait_range_keep_errors.patch diff --git a/queue-4.19/perf-core-fix-race-between-close-and-fork.patch b/queue-4.19/perf-core-fix-race-between-close-and-fork.patch index 0c37be8bb4f..88b38aea981 100644 --- a/queue-4.19/perf-core-fix-race-between-close-and-fork.patch +++ b/queue-4.19/perf-core-fix-race-between-close-and-fork.patch @@ -176,7 +176,7 @@ Signed-off-by: Greg Kroah-Hartman + * + * Wait for all events to drop their context reference. + */ -+ wait_var_event(&ctx->refcount, refcount_read(&ctx->refcount) == 1); ++ wait_var_event(&ctx->refcount, atomic_read(&ctx->refcount) == 1); + put_ctx(ctx); /* must be last */ } } diff --git a/queue-4.9/ext4-don-t-allow-any-modifications-to-an-immutable-file.patch b/queue-4.9/ext4-don-t-allow-any-modifications-to-an-immutable-file.patch deleted file mode 100644 index a7527e133f7..00000000000 --- a/queue-4.9/ext4-don-t-allow-any-modifications-to-an-immutable-file.patch +++ /dev/null @@ -1,98 +0,0 @@ -From 2e53840362771c73eb0a5ff71611507e64e8eecd Mon Sep 17 00:00:00 2001 -From: "Darrick J. Wong" -Date: Sun, 9 Jun 2019 21:41:41 -0400 -Subject: ext4: don't allow any modifications to an immutable file - -From: Darrick J. Wong - -commit 2e53840362771c73eb0a5ff71611507e64e8eecd upstream. - -Don't allow any modifications to a file that's marked immutable, which -means that we have to flush all the writable pages to make the readonly -and we have to check the setattr/setflags parameters more closely. - -Signed-off-by: Darrick J. Wong -Signed-off-by: Theodore Ts'o -Cc: stable@kernel.org -Signed-off-by: Greg Kroah-Hartman - ---- - fs/ext4/ioctl.c | 46 +++++++++++++++++++++++++++++++++++++++++++++- - 1 file changed, 45 insertions(+), 1 deletion(-) - ---- a/fs/ext4/ioctl.c -+++ b/fs/ext4/ioctl.c -@@ -201,6 +201,29 @@ static int uuid_is_zero(__u8 u[16]) - return 1; - } - -+/* -+ * If immutable is set and we are not clearing it, we're not allowed to change -+ * anything else in the inode. Don't error out if we're only trying to set -+ * immutable on an immutable file. -+ */ -+static int ext4_ioctl_check_immutable(struct inode *inode, __u32 new_projid, -+ unsigned int flags) -+{ -+ struct ext4_inode_info *ei = EXT4_I(inode); -+ unsigned int oldflags = ei->i_flags; -+ -+ if (!(oldflags & EXT4_IMMUTABLE_FL) || !(flags & EXT4_IMMUTABLE_FL)) -+ return 0; -+ -+ if ((oldflags & ~EXT4_IMMUTABLE_FL) != (flags & ~EXT4_IMMUTABLE_FL)) -+ return -EPERM; -+ if (ext4_has_feature_project(inode->i_sb) && -+ __kprojid_val(ei->i_projid) != new_projid) -+ return -EPERM; -+ -+ return 0; -+} -+ - static int ext4_ioctl_setflags(struct inode *inode, - unsigned int flags) - { -@@ -251,6 +274,20 @@ static int ext4_ioctl_setflags(struct in - } else if (oldflags & EXT4_EOFBLOCKS_FL) - ext4_truncate(inode); - -+ /* -+ * Wait for all pending directio and then flush all the dirty pages -+ * for this file. The flush marks all the pages readonly, so any -+ * subsequent attempt to write to the file (particularly mmap pages) -+ * will come through the filesystem and fail. -+ */ -+ if (S_ISREG(inode->i_mode) && !IS_IMMUTABLE(inode) && -+ (flags & EXT4_IMMUTABLE_FL)) { -+ inode_dio_wait(inode); -+ err = filemap_write_and_wait(inode->i_mapping); -+ if (err) -+ goto flags_out; -+ } -+ - handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); - if (IS_ERR(handle)) { - err = PTR_ERR(handle); -@@ -462,7 +499,11 @@ long ext4_ioctl(struct file *filp, unsig - flags = ext4_mask_flags(inode->i_mode, flags); - - inode_lock(inode); -- err = ext4_ioctl_setflags(inode, flags); -+ err = ext4_ioctl_check_immutable(inode, -+ from_kprojid(&init_user_ns, ei->i_projid), -+ flags); -+ if (!err) -+ err = ext4_ioctl_setflags(inode, flags); - inode_unlock(inode); - mnt_drop_write_file(filp); - return err; -@@ -884,6 +925,9 @@ resizefs_out: - inode_lock(inode); - flags = (ei->i_flags & ~EXT4_FL_XFLAG_VISIBLE) | - (flags & EXT4_FL_XFLAG_VISIBLE); -+ err = ext4_ioctl_check_immutable(inode, fa.fsx_projid, flags); -+ if (err) -+ goto out; - err = ext4_ioctl_setflags(inode, flags); - inode_unlock(inode); - mnt_drop_write_file(filp); diff --git a/queue-4.9/perf-core-fix-exclusive-events-grouping.patch b/queue-4.9/perf-core-fix-exclusive-events-grouping.patch deleted file mode 100644 index 47dec77c046..00000000000 --- a/queue-4.9/perf-core-fix-exclusive-events-grouping.patch +++ /dev/null @@ -1,171 +0,0 @@ -From 8a58ddae23796c733c5dfbd717538d89d036c5bd Mon Sep 17 00:00:00 2001 -From: Alexander Shishkin -Date: Mon, 1 Jul 2019 14:07:55 +0300 -Subject: perf/core: Fix exclusive events' grouping - -From: Alexander Shishkin - -commit 8a58ddae23796c733c5dfbd717538d89d036c5bd upstream. - -So far, we tried to disallow grouping exclusive events for the fear of -complications they would cause with moving between contexts. Specifically, -moving a software group to a hardware context would violate the exclusivity -rules if both groups contain matching exclusive events. - -This attempt was, however, unsuccessful: the check that we have in the -perf_event_open() syscall is both wrong (looks at wrong PMU) and -insufficient (group leader may still be exclusive), as can be illustrated -by running: - - $ perf record -e '{intel_pt//,cycles}' uname - $ perf record -e '{cycles,intel_pt//}' uname - -ultimately successfully. - -Furthermore, we are completely free to trigger the exclusivity violation -by: - - perf -e '{cycles,intel_pt//}' -e '{intel_pt//,instructions}' - -even though the helpful perf record will not allow that, the ABI will. - -The warning later in the perf_event_open() path will also not trigger, because -it's also wrong. - -Fix all this by validating the original group before moving, getting rid -of broken safeguards and placing a useful one to perf_install_in_context(). - -Signed-off-by: Alexander Shishkin -Signed-off-by: Peter Zijlstra (Intel) -Cc: -Cc: Arnaldo Carvalho de Melo -Cc: Jiri Olsa -Cc: Linus Torvalds -Cc: Peter Zijlstra -Cc: Stephane Eranian -Cc: Thomas Gleixner -Cc: Vince Weaver -Cc: mathieu.poirier@linaro.org -Cc: will.deacon@arm.com -Fixes: bed5b25ad9c8a ("perf: Add a pmu capability for "exclusive" events") -Link: https://lkml.kernel.org/r/20190701110755.24646-1-alexander.shishkin@linux.intel.com -Signed-off-by: Ingo Molnar -Signed-off-by: Greg Kroah-Hartman - ---- - include/linux/perf_event.h | 5 +++++ - kernel/events/core.c | 34 ++++++++++++++++++++++------------ - 2 files changed, 27 insertions(+), 12 deletions(-) - ---- a/include/linux/perf_event.h -+++ b/include/linux/perf_event.h -@@ -1016,6 +1016,11 @@ static inline int is_software_event(stru - return event->event_caps & PERF_EV_CAP_SOFTWARE; - } - -+static inline int is_exclusive_pmu(struct pmu *pmu) -+{ -+ return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE; -+} -+ - extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; - - extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); ---- a/kernel/events/core.c -+++ b/kernel/events/core.c -@@ -2324,6 +2324,9 @@ unlock: - return ret; - } - -+static bool exclusive_event_installable(struct perf_event *event, -+ struct perf_event_context *ctx); -+ - /* - * Attach a performance event to a context. - * -@@ -2338,6 +2341,8 @@ perf_install_in_context(struct perf_even - - lockdep_assert_held(&ctx->mutex); - -+ WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); -+ - if (event->cpu != -1) - event->cpu = cpu; - -@@ -3994,7 +3999,7 @@ static int exclusive_event_init(struct p - { - struct pmu *pmu = event->pmu; - -- if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) -+ if (!is_exclusive_pmu(pmu)) - return 0; - - /* -@@ -4025,7 +4030,7 @@ static void exclusive_event_destroy(stru - { - struct pmu *pmu = event->pmu; - -- if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) -+ if (!is_exclusive_pmu(pmu)) - return; - - /* see comment in exclusive_event_init() */ -@@ -4045,14 +4050,15 @@ static bool exclusive_event_match(struct - return false; - } - --/* Called under the same ctx::mutex as perf_install_in_context() */ - static bool exclusive_event_installable(struct perf_event *event, - struct perf_event_context *ctx) - { - struct perf_event *iter_event; - struct pmu *pmu = event->pmu; - -- if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) -+ lockdep_assert_held(&ctx->mutex); -+ -+ if (!is_exclusive_pmu(pmu)) - return true; - - list_for_each_entry(iter_event, &ctx->event_list, event_entry) { -@@ -9833,11 +9839,6 @@ SYSCALL_DEFINE5(perf_event_open, - goto err_alloc; - } - -- if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) { -- err = -EBUSY; -- goto err_context; -- } -- - /* - * Look up the group leader (we will attach this event to it): - */ -@@ -9925,6 +9926,18 @@ SYSCALL_DEFINE5(perf_event_open, - move_group = 0; - } - } -+ -+ /* -+ * Failure to create exclusive events returns -EBUSY. -+ */ -+ err = -EBUSY; -+ if (!exclusive_event_installable(group_leader, ctx)) -+ goto err_locked; -+ -+ for_each_sibling_event(sibling, group_leader) { -+ if (!exclusive_event_installable(sibling, ctx)) -+ goto err_locked; -+ } - } else { - mutex_lock(&ctx->mutex); - } -@@ -9944,9 +9957,6 @@ SYSCALL_DEFINE5(perf_event_open, - * because we need to serialize with concurrent event creation. - */ - if (!exclusive_event_installable(event, ctx)) { -- /* exclusive and group stuff are assumed mutually exclusive */ -- WARN_ON_ONCE(move_group); -- - err = -EBUSY; - goto err_locked; - } diff --git a/queue-4.9/series b/queue-4.9/series index a2e60ad0245..288373029a2 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -125,6 +125,4 @@ dm-bufio-fix-deadlock-with-loop-device.patch compiler.h-kasan-avoid-duplicating-__read_once_size_.patch compiler.h-add-read_word_at_a_time-function.patch lib-strscpy-shut-up-kasan-false-positives-in-strscpy.patch -perf-core-fix-exclusive-events-grouping.patch -ext4-don-t-allow-any-modifications-to-an-immutable-file.patch ext4-allow-directory-holes.patch