From: Greg Kroah-Hartman Date: Fri, 23 Oct 2015 17:41:53 +0000 (-0700) Subject: 3.14-stable patches X-Git-Tag: v3.14.56~5 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=532053b204eaec2b18370020834fbc9f1e2a8113;p=thirdparty%2Fkernel%2Fstable-queue.git 3.14-stable patches added patches: rbd-fix-double-free-on-rbd_dev-header_name.patch sched-preempt-fix-cond_resched_lock-and-cond_resched_softirq.patch sched-preempt-rename-preempt_check_offset-to-preempt_disable_offset.patch --- diff --git a/queue-3.14/rbd-fix-double-free-on-rbd_dev-header_name.patch b/queue-3.14/rbd-fix-double-free-on-rbd_dev-header_name.patch new file mode 100644 index 00000000000..379a51aa392 --- /dev/null +++ b/queue-3.14/rbd-fix-double-free-on-rbd_dev-header_name.patch @@ -0,0 +1,35 @@ +From 3ebe138ac642a195c7f2efdb918f464734421fd6 Mon Sep 17 00:00:00 2001 +From: Ilya Dryomov +Date: Mon, 31 Aug 2015 15:21:39 +0300 +Subject: rbd: fix double free on rbd_dev->header_name + +From: Ilya Dryomov + +commit 3ebe138ac642a195c7f2efdb918f464734421fd6 upstream. + +If rbd_dev_image_probe() in rbd_dev_probe_parent() fails, header_name +is freed twice: once in rbd_dev_probe_parent() and then in its caller +rbd_dev_image_probe() (rbd_dev_image_probe() is called recursively to +handle parent images). + +rbd_dev_probe_parent() is responsible for probing the parent, so it +shouldn't muck with clone's fields. + +Signed-off-by: Ilya Dryomov +Reviewed-by: Alex Elder +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/block/rbd.c | 1 - + 1 file changed, 1 deletion(-) + +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -4851,7 +4851,6 @@ static int rbd_dev_probe_parent(struct r + out_err: + if (parent) { + rbd_dev_unparent(rbd_dev); +- kfree(rbd_dev->header_name); + rbd_dev_destroy(parent); + } else { + rbd_put_client(rbdc); diff --git a/queue-3.14/sched-preempt-fix-cond_resched_lock-and-cond_resched_softirq.patch b/queue-3.14/sched-preempt-fix-cond_resched_lock-and-cond_resched_softirq.patch new file mode 100644 index 00000000000..2364f626309 --- /dev/null +++ b/queue-3.14/sched-preempt-fix-cond_resched_lock-and-cond_resched_softirq.patch @@ -0,0 +1,175 @@ +From fe32d3cd5e8eb0f82e459763374aa80797023403 Mon Sep 17 00:00:00 2001 +From: Konstantin Khlebnikov +Date: Wed, 15 Jul 2015 12:52:04 +0300 +Subject: sched/preempt: Fix cond_resched_lock() and cond_resched_softirq() + +From: Konstantin Khlebnikov + +commit fe32d3cd5e8eb0f82e459763374aa80797023403 upstream. + +These functions check should_resched() before unlocking spinlock/bh-enable: +preempt_count always non-zero => should_resched() always returns false. +cond_resched_lock() worked iff spin_needbreak is set. + +This patch adds argument "preempt_offset" to should_resched(). + +preempt_count offset constants for that: + + PREEMPT_DISABLE_OFFSET - offset after preempt_disable() + PREEMPT_LOCK_OFFSET - offset after spin_lock() + SOFTIRQ_DISABLE_OFFSET - offset after local_bh_distable() + SOFTIRQ_LOCK_OFFSET - offset after spin_lock_bh() + +Signed-off-by: Konstantin Khlebnikov +Signed-off-by: Peter Zijlstra (Intel) +Cc: Alexander Graf +Cc: Boris Ostrovsky +Cc: David Vrabel +Cc: Linus Torvalds +Cc: Mike Galbraith +Cc: Paul Mackerras +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Fixes: bdb438065890 ("sched: Extract the basic add/sub preempt_count modifiers") +Link: http://lkml.kernel.org/r/20150715095204.12246.98268.stgit@buzz +Signed-off-by: Ingo Molnar +Signed-off-by: Mike Galbraith +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/preempt.h | 4 ++-- + include/asm-generic/preempt.h | 5 +++-- + include/linux/preempt.h | 5 +++-- + include/linux/preempt_mask.h | 14 +++++++++++--- + include/linux/sched.h | 6 ------ + kernel/sched/core.c | 6 +++--- + 6 files changed, 22 insertions(+), 18 deletions(-) + +--- a/arch/x86/include/asm/preempt.h ++++ b/arch/x86/include/asm/preempt.h +@@ -105,9 +105,9 @@ static __always_inline bool __preempt_co + /* + * Returns true when we need to resched and can (barring IRQ state). + */ +-static __always_inline bool should_resched(void) ++static __always_inline bool should_resched(int preempt_offset) + { +- return unlikely(!__this_cpu_read_4(__preempt_count)); ++ return unlikely(__this_cpu_read_4(__preempt_count) == preempt_offset); + } + + #ifdef CONFIG_PREEMPT +--- a/include/asm-generic/preempt.h ++++ b/include/asm-generic/preempt.h +@@ -74,9 +74,10 @@ static __always_inline bool __preempt_co + /* + * Returns true when we need to resched and can (barring IRQ state). + */ +-static __always_inline bool should_resched(void) ++static __always_inline bool should_resched(int preempt_offset) + { +- return unlikely(!preempt_count() && tif_need_resched()); ++ return unlikely(preempt_count() == preempt_offset && ++ tif_need_resched()); + } + + #ifdef CONFIG_PREEMPT +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -22,7 +22,8 @@ + #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) + extern void preempt_count_add(int val); + extern void preempt_count_sub(int val); +-#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); }) ++#define preempt_count_dec_and_test() \ ++ ({ preempt_count_sub(1); should_resched(0); }) + #else + #define preempt_count_add(val) __preempt_count_add(val) + #define preempt_count_sub(val) __preempt_count_sub(val) +@@ -61,7 +62,7 @@ do { \ + + #define preempt_check_resched() \ + do { \ +- if (should_resched()) \ ++ if (should_resched(0)) \ + __preempt_schedule(); \ + } while (0) + +--- a/include/linux/preempt_mask.h ++++ b/include/linux/preempt_mask.h +@@ -71,13 +71,21 @@ + */ + #define in_nmi() (preempt_count() & NMI_MASK) + ++/* ++ * The preempt_count offset after preempt_disable(); ++ */ + #if defined(CONFIG_PREEMPT_COUNT) +-# define PREEMPT_DISABLE_OFFSET 1 ++# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET + #else +-# define PREEMPT_DISABLE_OFFSET 0 ++# define PREEMPT_DISABLE_OFFSET 0 + #endif + + /* ++ * The preempt_count offset after spin_lock() ++ */ ++#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET ++ ++/* + * The preempt_count offset needed for things like: + * + * spin_lock_bh() +@@ -90,7 +98,7 @@ + * + * Work as expected. + */ +-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET) ++#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET) + + /* + * Are we running in atomic context? WARNING: this macro cannot +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -2647,12 +2647,6 @@ extern int _cond_resched(void); + + extern int __cond_resched_lock(spinlock_t *lock); + +-#ifdef CONFIG_PREEMPT_COUNT +-#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET +-#else +-#define PREEMPT_LOCK_OFFSET 0 +-#endif +- + #define cond_resched_lock(lock) ({ \ + __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ + __cond_resched_lock(lock); \ +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4113,7 +4113,7 @@ static void __cond_resched(void) + + int __sched _cond_resched(void) + { +- if (should_resched()) { ++ if (should_resched(0)) { + __cond_resched(); + return 1; + } +@@ -4131,7 +4131,7 @@ EXPORT_SYMBOL(_cond_resched); + */ + int __cond_resched_lock(spinlock_t *lock) + { +- int resched = should_resched(); ++ int resched = should_resched(PREEMPT_LOCK_OFFSET); + int ret = 0; + + lockdep_assert_held(lock); +@@ -4153,7 +4153,7 @@ int __sched __cond_resched_softirq(void) + { + BUG_ON(!in_softirq()); + +- if (should_resched()) { ++ if (should_resched(SOFTIRQ_DISABLE_OFFSET)) { + local_bh_enable(); + __cond_resched(); + local_bh_disable(); diff --git a/queue-3.14/sched-preempt-rename-preempt_check_offset-to-preempt_disable_offset.patch b/queue-3.14/sched-preempt-rename-preempt_check_offset-to-preempt_disable_offset.patch new file mode 100644 index 00000000000..1dce62bbc94 --- /dev/null +++ b/queue-3.14/sched-preempt-rename-preempt_check_offset-to-preempt_disable_offset.patch @@ -0,0 +1,60 @@ +From 90b62b5129d5cb50f62f40e684de7a1961e57197 Mon Sep 17 00:00:00 2001 +From: Frederic Weisbecker +Date: Tue, 12 May 2015 16:41:48 +0200 +Subject: sched/preempt: Rename PREEMPT_CHECK_OFFSET to PREEMPT_DISABLE_OFFSET + +From: Frederic Weisbecker + +commit 90b62b5129d5cb50f62f40e684de7a1961e57197 upstream. + +"CHECK" suggests it's only used as a comparison mask. But now it's used +further as a config-conditional preempt disabler offset. Lets +disambiguate this name. + +Signed-off-by: Frederic Weisbecker +Signed-off-by: Peter Zijlstra (Intel) +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Link: http://lkml.kernel.org/r/1431441711-29753-4-git-send-email-fweisbec@gmail.com +Signed-off-by: Ingo Molnar +Signed-off-by: Mike Galbraith +Signed-off-by: Greg Kroah-Hartman + + +--- + include/linux/preempt_mask.h | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/include/linux/preempt_mask.h ++++ b/include/linux/preempt_mask.h +@@ -72,9 +72,9 @@ + #define in_nmi() (preempt_count() & NMI_MASK) + + #if defined(CONFIG_PREEMPT_COUNT) +-# define PREEMPT_CHECK_OFFSET 1 ++# define PREEMPT_DISABLE_OFFSET 1 + #else +-# define PREEMPT_CHECK_OFFSET 0 ++# define PREEMPT_DISABLE_OFFSET 0 + #endif + + /* +@@ -90,7 +90,7 @@ + * + * Work as expected. + */ +-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET) ++#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET) + + /* + * Are we running in atomic context? WARNING: this macro cannot +@@ -106,7 +106,7 @@ + * (used by the scheduler, *after* releasing the kernel lock) + */ + #define in_atomic_preempt_off() \ +- ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) ++ ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET) + + #ifdef CONFIG_PREEMPT_COUNT + # define preemptible() (preempt_count() == 0 && !irqs_disabled()) diff --git a/queue-3.14/series b/queue-3.14/series index 1f61661d067..281b0595176 100644 --- a/queue-3.14/series +++ b/queue-3.14/series @@ -20,3 +20,6 @@ workqueue-make-sure-delayed-work-run-in-local-cpu.patch drm-nouveau-fbcon-take-runpm-reference-when-userspace-has-an-open-fd.patch drm-radeon-add-pm-sysfs-files-late.patch dm-thin-fix-missing-pool-reference-count-decrement-in-pool_ctr-error-path.patch +rbd-fix-double-free-on-rbd_dev-header_name.patch +sched-preempt-rename-preempt_check_offset-to-preempt_disable_offset.patch +sched-preempt-fix-cond_resched_lock-and-cond_resched_softirq.patch