From: Greg Kroah-Hartman Date: Thu, 17 May 2018 09:38:52 +0000 (+0200) Subject: 4.4-stable patches X-Git-Tag: v4.16.10~22 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=7eb50402f9c98953a8c4647d5d38548e5d7434b6;p=thirdparty%2Fkernel%2Fstable-queue.git 4.4-stable patches added patches: alsa-timer-call-notifier-in-the-same-spinlock.patch arm64-add-work-around-for-arm-cortex-a55-erratum-1024718.patch arm64-introduce-mov_q-macro-to-move-a-constant-into-a-64-bit-register.patch audit-move-calcs-after-alloc-and-check-when-logging-set-loginuid.patch futex-remove-duplicated-code-and-fix-undefined-behaviour.patch futex-remove-unnecessary-warning-from-get_futex_key.patch --- diff --git a/queue-4.4/alsa-timer-call-notifier-in-the-same-spinlock.patch b/queue-4.4/alsa-timer-call-notifier-in-the-same-spinlock.patch new file mode 100644 index 00000000000..00766f80ed8 --- /dev/null +++ b/queue-4.4/alsa-timer-call-notifier-in-the-same-spinlock.patch @@ -0,0 +1,347 @@ +From f65e0d299807d8a11812845c972493c3f9a18e10 Mon Sep 17 00:00:00 2001 +From: Takashi Iwai +Date: Wed, 10 Feb 2016 12:47:03 +0100 +Subject: ALSA: timer: Call notifier in the same spinlock + +From: Takashi Iwai + +commit f65e0d299807d8a11812845c972493c3f9a18e10 upstream. + +snd_timer_notify1() is called outside the spinlock and it retakes the +lock after the unlock. This is rather racy, and it's safer to move +snd_timer_notify() call inside the main spinlock. + +The patch also contains a slight refactoring / cleanup of the code. +Now all start/stop/continue/pause look more symmetric and a bit better +readable. + +Signed-off-by: Takashi Iwai +Cc: Ben Hutchings +Signed-off-by: Greg Kroah-Hartman + +--- + sound/core/timer.c | 220 ++++++++++++++++++++++++----------------------------- + 1 file changed, 102 insertions(+), 118 deletions(-) + +--- a/sound/core/timer.c ++++ b/sound/core/timer.c +@@ -318,8 +318,6 @@ int snd_timer_open(struct snd_timer_inst + return 0; + } + +-static int _snd_timer_stop(struct snd_timer_instance *timeri, int event); +- + /* + * close a timer instance + */ +@@ -408,7 +406,6 @@ unsigned long snd_timer_resolution(struc + static void snd_timer_notify1(struct snd_timer_instance *ti, int event) + { + struct snd_timer *timer; +- unsigned long flags; + unsigned long resolution = 0; + struct snd_timer_instance *ts; + struct timespec tstamp; +@@ -432,34 +429,66 @@ static void snd_timer_notify1(struct snd + return; + if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) + return; +- spin_lock_irqsave(&timer->lock, flags); + list_for_each_entry(ts, &ti->slave_active_head, active_list) + if (ts->ccallback) + ts->ccallback(ts, event + 100, &tstamp, resolution); +- spin_unlock_irqrestore(&timer->lock, flags); + } + +-static int snd_timer_start1(struct snd_timer *timer, struct snd_timer_instance *timeri, +- unsigned long sticks) ++/* start/continue a master timer */ ++static int snd_timer_start1(struct snd_timer_instance *timeri, ++ bool start, unsigned long ticks) + { ++ struct snd_timer *timer; ++ int result; ++ unsigned long flags; ++ ++ timer = timeri->timer; ++ if (!timer) ++ return -EINVAL; ++ ++ spin_lock_irqsave(&timer->lock, flags); ++ if (timer->card && timer->card->shutdown) { ++ result = -ENODEV; ++ goto unlock; ++ } ++ if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING | ++ SNDRV_TIMER_IFLG_START)) { ++ result = -EBUSY; ++ goto unlock; ++ } ++ ++ if (start) ++ timeri->ticks = timeri->cticks = ticks; ++ else if (!timeri->cticks) ++ timeri->cticks = 1; ++ timeri->pticks = 0; ++ + list_move_tail(&timeri->active_list, &timer->active_list_head); + if (timer->running) { + if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) + goto __start_now; + timer->flags |= SNDRV_TIMER_FLG_RESCHED; + timeri->flags |= SNDRV_TIMER_IFLG_START; +- return 1; /* delayed start */ ++ result = 1; /* delayed start */ + } else { +- timer->sticks = sticks; ++ if (start) ++ timer->sticks = ticks; + timer->hw.start(timer); + __start_now: + timer->running++; + timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; +- return 0; ++ result = 0; + } ++ snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START : ++ SNDRV_TIMER_EVENT_CONTINUE); ++ unlock: ++ spin_unlock_irqrestore(&timer->lock, flags); ++ return result; + } + +-static int snd_timer_start_slave(struct snd_timer_instance *timeri) ++/* start/continue a slave timer */ ++static int snd_timer_start_slave(struct snd_timer_instance *timeri, ++ bool start) + { + unsigned long flags; + +@@ -473,88 +502,37 @@ static int snd_timer_start_slave(struct + spin_lock(&timeri->timer->lock); + list_add_tail(&timeri->active_list, + &timeri->master->slave_active_head); ++ snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START : ++ SNDRV_TIMER_EVENT_CONTINUE); + spin_unlock(&timeri->timer->lock); + } + spin_unlock_irqrestore(&slave_active_lock, flags); + return 1; /* delayed start */ + } + +-/* +- * start the timer instance +- */ +-int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks) +-{ +- struct snd_timer *timer; +- int result = -EINVAL; +- unsigned long flags; +- +- if (timeri == NULL || ticks < 1) +- return -EINVAL; +- if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { +- result = snd_timer_start_slave(timeri); +- if (result >= 0) +- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); +- return result; +- } +- timer = timeri->timer; +- if (timer == NULL) +- return -EINVAL; +- if (timer->card && timer->card->shutdown) +- return -ENODEV; +- spin_lock_irqsave(&timer->lock, flags); +- if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING | +- SNDRV_TIMER_IFLG_START)) { +- result = -EBUSY; +- goto unlock; +- } +- timeri->ticks = timeri->cticks = ticks; +- timeri->pticks = 0; +- result = snd_timer_start1(timer, timeri, ticks); +- unlock: +- spin_unlock_irqrestore(&timer->lock, flags); +- if (result >= 0) +- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); +- return result; +-} +- +-static int _snd_timer_stop(struct snd_timer_instance *timeri, int event) ++/* stop/pause a master timer */ ++static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop) + { + struct snd_timer *timer; ++ int result = 0; + unsigned long flags; + +- if (snd_BUG_ON(!timeri)) +- return -ENXIO; +- +- if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { +- spin_lock_irqsave(&slave_active_lock, flags); +- if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) { +- spin_unlock_irqrestore(&slave_active_lock, flags); +- return -EBUSY; +- } +- if (timeri->timer) +- spin_lock(&timeri->timer->lock); +- timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING; +- list_del_init(&timeri->ack_list); +- list_del_init(&timeri->active_list); +- if (timeri->timer) +- spin_unlock(&timeri->timer->lock); +- spin_unlock_irqrestore(&slave_active_lock, flags); +- goto __end; +- } + timer = timeri->timer; + if (!timer) + return -EINVAL; + spin_lock_irqsave(&timer->lock, flags); + if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING | + SNDRV_TIMER_IFLG_START))) { +- spin_unlock_irqrestore(&timer->lock, flags); +- return -EBUSY; ++ result = -EBUSY; ++ goto unlock; + } + list_del_init(&timeri->ack_list); + list_del_init(&timeri->active_list); +- if (timer->card && timer->card->shutdown) { +- spin_unlock_irqrestore(&timer->lock, flags); +- return 0; ++ if (timer->card && timer->card->shutdown) ++ goto unlock; ++ if (stop) { ++ timeri->cticks = timeri->ticks; ++ timeri->pticks = 0; + } + if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) && + !(--timer->running)) { +@@ -569,35 +547,60 @@ static int _snd_timer_stop(struct snd_ti + } + } + timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START); ++ snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : ++ SNDRV_TIMER_EVENT_CONTINUE); ++ unlock: + spin_unlock_irqrestore(&timer->lock, flags); +- __end: +- if (event != SNDRV_TIMER_EVENT_RESOLUTION) +- snd_timer_notify1(timeri, event); ++ return result; ++} ++ ++/* stop/pause a slave timer */ ++static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&slave_active_lock, flags); ++ if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) { ++ spin_unlock_irqrestore(&slave_active_lock, flags); ++ return -EBUSY; ++ } ++ timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING; ++ if (timeri->timer) { ++ spin_lock(&timeri->timer->lock); ++ list_del_init(&timeri->ack_list); ++ list_del_init(&timeri->active_list); ++ snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : ++ SNDRV_TIMER_EVENT_CONTINUE); ++ spin_unlock(&timeri->timer->lock); ++ } ++ spin_unlock_irqrestore(&slave_active_lock, flags); + return 0; + } + + /* ++ * start the timer instance ++ */ ++int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks) ++{ ++ if (timeri == NULL || ticks < 1) ++ return -EINVAL; ++ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) ++ return snd_timer_start_slave(timeri, true); ++ else ++ return snd_timer_start1(timeri, true, ticks); ++} ++ ++/* + * stop the timer instance. + * + * do not call this from the timer callback! + */ + int snd_timer_stop(struct snd_timer_instance *timeri) + { +- struct snd_timer *timer; +- unsigned long flags; +- int err; +- +- err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP); +- if (err < 0) +- return err; +- timer = timeri->timer; +- if (!timer) +- return -EINVAL; +- spin_lock_irqsave(&timer->lock, flags); +- timeri->cticks = timeri->ticks; +- timeri->pticks = 0; +- spin_unlock_irqrestore(&timer->lock, flags); +- return 0; ++ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) ++ return snd_timer_stop_slave(timeri, true); ++ else ++ return snd_timer_stop1(timeri, true); + } + + /* +@@ -605,32 +608,10 @@ int snd_timer_stop(struct snd_timer_inst + */ + int snd_timer_continue(struct snd_timer_instance *timeri) + { +- struct snd_timer *timer; +- int result = -EINVAL; +- unsigned long flags; +- +- if (timeri == NULL) +- return result; + if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) +- return snd_timer_start_slave(timeri); +- timer = timeri->timer; +- if (! timer) +- return -EINVAL; +- if (timer->card && timer->card->shutdown) +- return -ENODEV; +- spin_lock_irqsave(&timer->lock, flags); +- if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) { +- result = -EBUSY; +- goto unlock; +- } +- if (!timeri->cticks) +- timeri->cticks = 1; +- timeri->pticks = 0; +- result = snd_timer_start1(timer, timeri, timer->sticks); +- unlock: +- spin_unlock_irqrestore(&timer->lock, flags); +- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE); +- return result; ++ return snd_timer_start_slave(timeri, false); ++ else ++ return snd_timer_start1(timeri, false, 0); + } + + /* +@@ -638,7 +619,10 @@ int snd_timer_continue(struct snd_timer_ + */ + int snd_timer_pause(struct snd_timer_instance * timeri) + { +- return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE); ++ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) ++ return snd_timer_stop_slave(timeri, false); ++ else ++ return snd_timer_stop1(timeri, false); + } + + /* diff --git a/queue-4.4/arm64-add-work-around-for-arm-cortex-a55-erratum-1024718.patch b/queue-4.4/arm64-add-work-around-for-arm-cortex-a55-erratum-1024718.patch new file mode 100644 index 00000000000..7fd79b927e6 --- /dev/null +++ b/queue-4.4/arm64-add-work-around-for-arm-cortex-a55-erratum-1024718.patch @@ -0,0 +1,156 @@ +From ece1397cbc89c51914fae1aec729539cfd8bd62b Mon Sep 17 00:00:00 2001 +From: Suzuki K Poulose +Date: Mon, 26 Mar 2018 15:12:49 +0100 +Subject: arm64: Add work around for Arm Cortex-A55 Erratum 1024718 + +From: Suzuki K Poulose + +commit ece1397cbc89c51914fae1aec729539cfd8bd62b upstream. + +Some variants of the Arm Cortex-55 cores (r0p0, r0p1, r1p0) suffer +from an erratum 1024718, which causes incorrect updates when DBM/AP +bits in a page table entry is modified without a break-before-make +sequence. The work around is to skip enabling the hardware DBM feature +on the affected cores. The hardware Access Flag management features +is not affected. There are some other cores suffering from this +errata, which could be added to the midr_list to trigger the work +around. + +Cc: Catalin Marinas +Cc: ckadabi@codeaurora.org +Reviewed-by: Dave Martin +Signed-off-by: Suzuki K Poulose +Signed-off-by: Will Deacon +Signed-off-by: Suzuki K Poulose +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm64/Kconfig | 14 ++++++++++++ + arch/arm64/include/asm/assembler.h | 40 +++++++++++++++++++++++++++++++++++++ + arch/arm64/include/asm/cputype.h | 11 ++++++++++ + arch/arm64/mm/proc.S | 5 ++++ + 4 files changed, 70 insertions(+) + +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -375,6 +375,20 @@ config ARM64_ERRATUM_843419 + + If unsure, say Y. + ++config ARM64_ERRATUM_1024718 ++ bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update" ++ default y ++ help ++ This option adds work around for Arm Cortex-A55 Erratum 1024718. ++ ++ Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect ++ update of the hardware dirty bit when the DBM/AP bits are updated ++ without a break-before-make. The work around is to disable the usage ++ of hardware DBM locally on the affected cores. CPUs not affected by ++ erratum will continue to use the feature. ++ ++ If unsure, say Y. ++ + config CAVIUM_ERRATUM_22375 + bool "Cavium erratum 22375, 24313" + default y +--- a/arch/arm64/include/asm/assembler.h ++++ b/arch/arm64/include/asm/assembler.h +@@ -23,6 +23,7 @@ + #ifndef __ASM_ASSEMBLER_H + #define __ASM_ASSEMBLER_H + ++#include + #include + #include + +@@ -224,4 +225,43 @@ lr .req x30 // link register + movk \reg, :abs_g0_nc:\val + .endm + ++/* ++ * Check the MIDR_EL1 of the current CPU for a given model and a range of ++ * variant/revision. See asm/cputype.h for the macros used below. ++ * ++ * model: MIDR_CPU_PART of CPU ++ * rv_min: Minimum of MIDR_CPU_VAR_REV() ++ * rv_max: Maximum of MIDR_CPU_VAR_REV() ++ * res: Result register. ++ * tmp1, tmp2, tmp3: Temporary registers ++ * ++ * Corrupts: res, tmp1, tmp2, tmp3 ++ * Returns: 0, if the CPU id doesn't match. Non-zero otherwise ++ */ ++ .macro cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3 ++ mrs \res, midr_el1 ++ mov_q \tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK) ++ mov_q \tmp2, MIDR_CPU_PART_MASK ++ and \tmp3, \res, \tmp2 // Extract model ++ and \tmp1, \res, \tmp1 // rev & variant ++ mov_q \tmp2, \model ++ cmp \tmp3, \tmp2 ++ cset \res, eq ++ cbz \res, .Ldone\@ // Model matches ? ++ ++ .if (\rv_min != 0) // Skip min check if rv_min == 0 ++ mov_q \tmp3, \rv_min ++ cmp \tmp1, \tmp3 ++ cset \res, ge ++ .endif // \rv_min != 0 ++ /* Skip rv_max check if rv_min == rv_max && rv_min != 0 */ ++ .if ((\rv_min != \rv_max) || \rv_min == 0) ++ mov_q \tmp2, \rv_max ++ cmp \tmp1, \tmp2 ++ cset \tmp2, le ++ and \res, \res, \tmp2 ++ .endif ++.Ldone\@: ++ .endm ++ + #endif /* __ASM_ASSEMBLER_H */ +--- a/arch/arm64/include/asm/cputype.h ++++ b/arch/arm64/include/asm/cputype.h +@@ -57,6 +57,14 @@ + #define MIDR_IMPLEMENTOR(midr) \ + (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) + ++#define MIDR_CPU_VAR_REV(var, rev) \ ++ (((var) << MIDR_VARIANT_SHIFT) | (rev)) ++ ++#define MIDR_CPU_PART_MASK \ ++ (MIDR_IMPLEMENTOR_MASK | \ ++ MIDR_ARCHITECTURE_MASK | \ ++ MIDR_PARTNUM_MASK) ++ + #define MIDR_CPU_PART(imp, partnum) \ + (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \ + (0xf << MIDR_ARCHITECTURE_SHIFT) | \ +@@ -70,11 +78,14 @@ + #define ARM_CPU_PART_FOUNDATION 0xD00 + #define ARM_CPU_PART_CORTEX_A57 0xD07 + #define ARM_CPU_PART_CORTEX_A53 0xD03 ++#define ARM_CPU_PART_CORTEX_A55 0xD05 + + #define APM_CPU_PART_POTENZA 0x000 + + #define CAVIUM_CPU_PART_THUNDERX 0x0A1 + ++#define MIDR_CORTEX_A55 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) ++ + #ifndef __ASSEMBLY__ + + /* +--- a/arch/arm64/mm/proc.S ++++ b/arch/arm64/mm/proc.S +@@ -221,6 +221,11 @@ ENTRY(__cpu_setup) + cbz x9, 2f + cmp x9, #2 + b.lt 1f ++#ifdef CONFIG_ARM64_ERRATUM_1024718 ++ /* Disable hardware DBM on Cortex-A55 r0p0, r0p1 & r1p0 */ ++ cpu_midr_match MIDR_CORTEX_A55, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(1, 0), x1, x2, x3, x4 ++ cbnz x1, 1f ++#endif + orr x10, x10, #TCR_HD // hardware Dirty flag update + 1: orr x10, x10, #TCR_HA // hardware Access flag update + 2: diff --git a/queue-4.4/arm64-introduce-mov_q-macro-to-move-a-constant-into-a-64-bit-register.patch b/queue-4.4/arm64-introduce-mov_q-macro-to-move-a-constant-into-a-64-bit-register.patch new file mode 100644 index 00000000000..b4bb17ae2ee --- /dev/null +++ b/queue-4.4/arm64-introduce-mov_q-macro-to-move-a-constant-into-a-64-bit-register.patch @@ -0,0 +1,50 @@ +From 30b5ba5cf333cc650e474eaf2cc1ae91bc7cf89f Mon Sep 17 00:00:00 2001 +From: Ard Biesheuvel +Date: Mon, 18 Apr 2016 17:09:44 +0200 +Subject: arm64: introduce mov_q macro to move a constant into a 64-bit register + +From: Ard Biesheuvel + +commit 30b5ba5cf333cc650e474eaf2cc1ae91bc7cf89f upstream. + +Implement a macro mov_q that can be used to move an immediate constant +into a 64-bit register, using between 2 and 4 movz/movk instructions +(depending on the operand) + +Acked-by: Catalin Marinas +Signed-off-by: Ard Biesheuvel +Signed-off-by: Will Deacon +Signed-off-by: Suzuki K Poulose +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm64/include/asm/assembler.h | 20 ++++++++++++++++++++ + 1 file changed, 20 insertions(+) + +--- a/arch/arm64/include/asm/assembler.h ++++ b/arch/arm64/include/asm/assembler.h +@@ -204,4 +204,24 @@ lr .req x30 // link register + .size __pi_##x, . - x; \ + ENDPROC(x) + ++ /* ++ * mov_q - move an immediate constant into a 64-bit register using ++ * between 2 and 4 movz/movk instructions (depending on the ++ * magnitude and sign of the operand) ++ */ ++ .macro mov_q, reg, val ++ .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff) ++ movz \reg, :abs_g1_s:\val ++ .else ++ .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff) ++ movz \reg, :abs_g2_s:\val ++ .else ++ movz \reg, :abs_g3:\val ++ movk \reg, :abs_g2_nc:\val ++ .endif ++ movk \reg, :abs_g1_nc:\val ++ .endif ++ movk \reg, :abs_g0_nc:\val ++ .endm ++ + #endif /* __ASM_ASSEMBLER_H */ diff --git a/queue-4.4/audit-move-calcs-after-alloc-and-check-when-logging-set-loginuid.patch b/queue-4.4/audit-move-calcs-after-alloc-and-check-when-logging-set-loginuid.patch new file mode 100644 index 00000000000..7d689018725 --- /dev/null +++ b/queue-4.4/audit-move-calcs-after-alloc-and-check-when-logging-set-loginuid.patch @@ -0,0 +1,44 @@ +From 76a658c20efd541a62838d9ff68ce94170d7a549 Mon Sep 17 00:00:00 2001 +From: Richard Guy Briggs +Date: Tue, 28 Jun 2016 12:06:58 -0400 +Subject: audit: move calcs after alloc and check when logging set loginuid + +From: Richard Guy Briggs + +commit 76a658c20efd541a62838d9ff68ce94170d7a549 upstream. + +Move the calculations of values after the allocation in case the +allocation fails. This avoids wasting effort in the rare case that it +fails, but more importantly saves us extra logic to release the tty +ref. + +Signed-off-by: Richard Guy Briggs +Signed-off-by: Paul Moore +Cc: Ben Hutchings +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/auditsc.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +--- a/kernel/auditsc.c ++++ b/kernel/auditsc.c +@@ -1981,14 +1981,15 @@ static void audit_log_set_loginuid(kuid_ + if (!audit_enabled) + return; + ++ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); ++ if (!ab) ++ return; ++ + uid = from_kuid(&init_user_ns, task_uid(current)); + oldloginuid = from_kuid(&init_user_ns, koldloginuid); + loginuid = from_kuid(&init_user_ns, kloginuid), + tty = audit_get_tty(current); + +- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); +- if (!ab) +- return; + audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid); + audit_log_task_context(ab); + audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d", diff --git a/queue-4.4/futex-remove-duplicated-code-and-fix-undefined-behaviour.patch b/queue-4.4/futex-remove-duplicated-code-and-fix-undefined-behaviour.patch new file mode 100644 index 00000000000..96c8be003e5 --- /dev/null +++ b/queue-4.4/futex-remove-duplicated-code-and-fix-undefined-behaviour.patch @@ -0,0 +1,1087 @@ +From 30d6e0a4190d37740e9447e4e4815f06992dd8c3 Mon Sep 17 00:00:00 2001 +From: Jiri Slaby +Date: Thu, 24 Aug 2017 09:31:05 +0200 +Subject: futex: Remove duplicated code and fix undefined behaviour + +From: Jiri Slaby + +commit 30d6e0a4190d37740e9447e4e4815f06992dd8c3 upstream. + +There is code duplicated over all architecture's headers for +futex_atomic_op_inuser. Namely op decoding, access_ok check for uaddr, +and comparison of the result. + +Remove this duplication and leave up to the arches only the needed +assembly which is now in arch_futex_atomic_op_inuser. + +This effectively distributes the Will Deacon's arm64 fix for undefined +behaviour reported by UBSAN to all architectures. The fix was done in +commit 5f16a046f8e1 (arm64: futex: Fix undefined behaviour with +FUTEX_OP_OPARG_SHIFT usage). Look there for an example dump. + +And as suggested by Thomas, check for negative oparg too, because it was +also reported to cause undefined behaviour report. + +Note that s390 removed access_ok check in d12a29703 ("s390/uaccess: +remove pointless access_ok() checks") as access_ok there returns true. +We introduce it back to the helper for the sake of simplicity (it gets +optimized away anyway). + +Signed-off-by: Jiri Slaby +Signed-off-by: Thomas Gleixner +Acked-by: Russell King +Acked-by: Michael Ellerman (powerpc) +Acked-by: Heiko Carstens [s390] +Acked-by: Chris Metcalf [for tile] +Reviewed-by: Darren Hart (VMware) +Reviewed-by: Will Deacon [core/arm64] +Cc: linux-mips@linux-mips.org +Cc: Rich Felker +Cc: linux-ia64@vger.kernel.org +Cc: linux-sh@vger.kernel.org +Cc: peterz@infradead.org +Cc: Benjamin Herrenschmidt +Cc: Max Filippov +Cc: Paul Mackerras +Cc: sparclinux@vger.kernel.org +Cc: Jonas Bonn +Cc: linux-s390@vger.kernel.org +Cc: linux-arch@vger.kernel.org +Cc: Yoshinori Sato +Cc: linux-hexagon@vger.kernel.org +Cc: Helge Deller +Cc: "James E.J. Bottomley" +Cc: Catalin Marinas +Cc: Matt Turner +Cc: linux-snps-arc@lists.infradead.org +Cc: Fenghua Yu +Cc: Arnd Bergmann +Cc: linux-xtensa@linux-xtensa.org +Cc: Stefan Kristiansson +Cc: openrisc@lists.librecores.org +Cc: Ivan Kokshaysky +Cc: Stafford Horne +Cc: linux-arm-kernel@lists.infradead.org +Cc: Richard Henderson +Cc: Chris Zankel +Cc: Michal Simek +Cc: Tony Luck +Cc: linux-parisc@vger.kernel.org +Cc: Vineet Gupta +Cc: Ralf Baechle +Cc: Richard Kuo +Cc: linux-alpha@vger.kernel.org +Cc: Martin Schwidefsky +Cc: linuxppc-dev@lists.ozlabs.org +Cc: "David S. Miller" +Link: http://lkml.kernel.org/r/20170824073105.3901-1-jslaby@suse.cz +Cc: Ben Hutchings +Signed-off-by: Greg Kroah-Hartman + +--- + arch/alpha/include/asm/futex.h | 26 +++--------------- + arch/arc/include/asm/futex.h | 40 +++------------------------- + arch/arm/include/asm/futex.h | 26 ++---------------- + arch/arm64/include/asm/futex.h | 26 ++---------------- + arch/frv/include/asm/futex.h | 3 +- + arch/frv/kernel/futex.c | 27 ++----------------- + arch/hexagon/include/asm/futex.h | 38 ++------------------------- + arch/ia64/include/asm/futex.h | 25 ++---------------- + arch/microblaze/include/asm/futex.h | 38 ++------------------------- + arch/mips/include/asm/futex.h | 25 ++---------------- + arch/parisc/include/asm/futex.h | 25 ++---------------- + arch/powerpc/include/asm/futex.h | 26 +++--------------- + arch/s390/include/asm/futex.h | 23 +++------------- + arch/sh/include/asm/futex.h | 26 ++---------------- + arch/sparc/include/asm/futex_64.h | 26 +++--------------- + arch/tile/include/asm/futex.h | 40 +++------------------------- + arch/x86/include/asm/futex.h | 40 +++------------------------- + arch/xtensa/include/asm/futex.h | 27 +++---------------- + include/asm-generic/futex.h | 50 ++++++------------------------------ + kernel/futex.c | 39 ++++++++++++++++++++++++++++ + 20 files changed, 126 insertions(+), 470 deletions(-) + +--- a/arch/alpha/include/asm/futex.h ++++ b/arch/alpha/include/asm/futex.h +@@ -29,18 +29,10 @@ + : "r" (uaddr), "r"(oparg) \ + : "memory") + +-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) ++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, ++ u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) +- return -EFAULT; + + pagefault_disable(); + +@@ -66,17 +58,9 @@ static inline int futex_atomic_op_inuser + + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; +- default: ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; ++ + return ret; + } + +--- a/arch/arc/include/asm/futex.h ++++ b/arch/arc/include/asm/futex.h +@@ -73,20 +73,11 @@ + + #endif + +-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) ++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, ++ u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; + +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) +- return -EFAULT; +- + #ifndef CONFIG_ARC_HAS_LLSC + preempt_disable(); /* to guarantee atomic r-m-w of futex op */ + #endif +@@ -118,30 +109,9 @@ static inline int futex_atomic_op_inuser + preempt_enable(); + #endif + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: +- ret = (oldval == cmparg); +- break; +- case FUTEX_OP_CMP_NE: +- ret = (oldval != cmparg); +- break; +- case FUTEX_OP_CMP_LT: +- ret = (oldval < cmparg); +- break; +- case FUTEX_OP_CMP_GE: +- ret = (oldval >= cmparg); +- break; +- case FUTEX_OP_CMP_LE: +- ret = (oldval <= cmparg); +- break; +- case FUTEX_OP_CMP_GT: +- ret = (oldval > cmparg); +- break; +- default: +- ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; ++ + return ret; + } + +--- a/arch/arm/include/asm/futex.h ++++ b/arch/arm/include/asm/futex.h +@@ -128,20 +128,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, + #endif /* !SMP */ + + static inline int +-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) ++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret, tmp; + +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) +- return -EFAULT; +- + #ifndef CONFIG_SMP + preempt_disable(); + #endif +@@ -172,17 +162,9 @@ futex_atomic_op_inuser (int encoded_op, + preempt_enable(); + #endif + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; +- default: ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; ++ + return ret; + } + +--- a/arch/arm64/include/asm/futex.h ++++ b/arch/arm64/include/asm/futex.h +@@ -53,20 +53,10 @@ + : "memory") + + static inline int +-futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr) ++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (int)(encoded_op << 8) >> 20; +- int cmparg = (int)(encoded_op << 20) >> 20; + int oldval = 0, ret, tmp; + +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1U << (oparg & 0x1f); +- +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) +- return -EFAULT; +- + pagefault_disable(); + + switch (op) { +@@ -96,17 +86,9 @@ futex_atomic_op_inuser(unsigned int enco + + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; +- default: ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; ++ + return ret; + } + +--- a/arch/frv/include/asm/futex.h ++++ b/arch/frv/include/asm/futex.h +@@ -7,7 +7,8 @@ + #include + #include + +-extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr); ++extern int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, ++ u32 __user *uaddr); + + static inline int + futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, +--- a/arch/frv/kernel/futex.c ++++ b/arch/frv/kernel/futex.c +@@ -186,20 +186,10 @@ static inline int atomic_futex_op_xchg_x + /* + * do the futex operations + */ +-int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) ++int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; + +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) +- return -EFAULT; +- + pagefault_disable(); + + switch (op) { +@@ -225,18 +215,9 @@ int futex_atomic_op_inuser(int encoded_o + + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; +- default: ret = -ENOSYS; break; +- } +- } ++ if (!ret) ++ *oval = oldval; + + return ret; + +-} /* end futex_atomic_op_inuser() */ ++} /* end arch_futex_atomic_op_inuser() */ +--- a/arch/hexagon/include/asm/futex.h ++++ b/arch/hexagon/include/asm/futex.h +@@ -31,18 +31,9 @@ + + + static inline int +-futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) +- return -EFAULT; + + pagefault_disable(); + +@@ -72,30 +63,9 @@ futex_atomic_op_inuser(int encoded_op, i + + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: +- ret = (oldval == cmparg); +- break; +- case FUTEX_OP_CMP_NE: +- ret = (oldval != cmparg); +- break; +- case FUTEX_OP_CMP_LT: +- ret = (oldval < cmparg); +- break; +- case FUTEX_OP_CMP_GE: +- ret = (oldval >= cmparg); +- break; +- case FUTEX_OP_CMP_LE: +- ret = (oldval <= cmparg); +- break; +- case FUTEX_OP_CMP_GT: +- ret = (oldval > cmparg); +- break; +- default: +- ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; ++ + return ret; + } + +--- a/arch/ia64/include/asm/futex.h ++++ b/arch/ia64/include/asm/futex.h +@@ -45,18 +45,9 @@ do { \ + } while (0) + + static inline int +-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) ++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) +- return -EFAULT; + + pagefault_disable(); + +@@ -84,17 +75,9 @@ futex_atomic_op_inuser (int encoded_op, + + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; +- default: ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; ++ + return ret; + } + +--- a/arch/microblaze/include/asm/futex.h ++++ b/arch/microblaze/include/asm/futex.h +@@ -29,18 +29,9 @@ + }) + + static inline int +-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) ++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) +- return -EFAULT; + + pagefault_disable(); + +@@ -66,30 +57,9 @@ futex_atomic_op_inuser(int encoded_op, u + + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: +- ret = (oldval == cmparg); +- break; +- case FUTEX_OP_CMP_NE: +- ret = (oldval != cmparg); +- break; +- case FUTEX_OP_CMP_LT: +- ret = (oldval < cmparg); +- break; +- case FUTEX_OP_CMP_GE: +- ret = (oldval >= cmparg); +- break; +- case FUTEX_OP_CMP_LE: +- ret = (oldval <= cmparg); +- break; +- case FUTEX_OP_CMP_GT: +- ret = (oldval > cmparg); +- break; +- default: +- ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; ++ + return ret; + } + +--- a/arch/mips/include/asm/futex.h ++++ b/arch/mips/include/asm/futex.h +@@ -83,18 +83,9 @@ + } + + static inline int +-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) ++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) +- return -EFAULT; + + pagefault_disable(); + +@@ -125,17 +116,9 @@ futex_atomic_op_inuser(int encoded_op, u + + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; +- default: ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; ++ + return ret; + } + +--- a/arch/parisc/include/asm/futex.h ++++ b/arch/parisc/include/asm/futex.h +@@ -32,20 +32,11 @@ _futex_spin_unlock_irqrestore(u32 __user + } + + static inline int +-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) ++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) + { + unsigned long int flags; + u32 val; +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr))) +- return -EFAULT; + + pagefault_disable(); + +@@ -98,17 +89,9 @@ futex_atomic_op_inuser (int encoded_op, + + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; +- default: ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; ++ + return ret; + } + +--- a/arch/powerpc/include/asm/futex.h ++++ b/arch/powerpc/include/asm/futex.h +@@ -31,18 +31,10 @@ + : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ + : "cr0", "memory") + +-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) ++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, ++ u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) +- return -EFAULT; + + pagefault_disable(); + +@@ -68,17 +60,9 @@ static inline int futex_atomic_op_inuser + + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; +- default: ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; ++ + return ret; + } + +--- a/arch/s390/include/asm/futex.h ++++ b/arch/s390/include/asm/futex.h +@@ -21,17 +21,12 @@ + : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ + "m" (*uaddr) : "cc"); + +-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) ++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, ++ u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, newval, ret; + + load_kernel_asce(); +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; + + pagefault_disable(); + switch (op) { +@@ -60,17 +55,9 @@ static inline int futex_atomic_op_inuser + } + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; +- default: ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; ++ + return ret; + } + +--- a/arch/sh/include/asm/futex.h ++++ b/arch/sh/include/asm/futex.h +@@ -10,20 +10,11 @@ + /* XXX: UP variants, fix for SH-4A and SMP.. */ + #include + +-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) ++static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, ++ u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; + +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) +- return -EFAULT; +- + pagefault_disable(); + + switch (op) { +@@ -49,17 +40,8 @@ static inline int futex_atomic_op_inuser + + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; +- default: ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; + + return ret; + } +--- a/arch/sparc/include/asm/futex_64.h ++++ b/arch/sparc/include/asm/futex_64.h +@@ -29,22 +29,14 @@ + : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ + : "memory") + +-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) ++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, ++ u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret, tem; + +- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) +- return -EFAULT; + if (unlikely((((unsigned long) uaddr) & 0x3UL))) + return -EINVAL; + +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- + pagefault_disable(); + + switch (op) { +@@ -69,17 +61,9 @@ static inline int futex_atomic_op_inuser + + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; +- default: ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; ++ + return ret; + } + +--- a/arch/tile/include/asm/futex.h ++++ b/arch/tile/include/asm/futex.h +@@ -106,12 +106,9 @@ + lock = __atomic_hashed_lock((int __force *)uaddr) + #endif + +-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) ++static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, ++ u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int uninitialized_var(val), ret; + + __futex_prolog(); +@@ -119,12 +116,6 @@ static inline int futex_atomic_op_inuser + /* The 32-bit futex code makes this assumption, so validate it here. */ + BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int)); + +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) +- return -EFAULT; +- + pagefault_disable(); + switch (op) { + case FUTEX_OP_SET: +@@ -148,30 +139,9 @@ static inline int futex_atomic_op_inuser + } + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: +- ret = (val == cmparg); +- break; +- case FUTEX_OP_CMP_NE: +- ret = (val != cmparg); +- break; +- case FUTEX_OP_CMP_LT: +- ret = (val < cmparg); +- break; +- case FUTEX_OP_CMP_GE: +- ret = (val >= cmparg); +- break; +- case FUTEX_OP_CMP_LE: +- ret = (val <= cmparg); +- break; +- case FUTEX_OP_CMP_GT: +- ret = (val > cmparg); +- break; +- default: +- ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = val; ++ + return ret; + } + +--- a/arch/x86/include/asm/futex.h ++++ b/arch/x86/include/asm/futex.h +@@ -41,20 +41,11 @@ + "+m" (*uaddr), "=&r" (tem) \ + : "r" (oparg), "i" (-EFAULT), "1" (0)) + +-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) ++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, ++ u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret, tem; + +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) +- return -EFAULT; +- + pagefault_disable(); + + switch (op) { +@@ -80,30 +71,9 @@ static inline int futex_atomic_op_inuser + + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: +- ret = (oldval == cmparg); +- break; +- case FUTEX_OP_CMP_NE: +- ret = (oldval != cmparg); +- break; +- case FUTEX_OP_CMP_LT: +- ret = (oldval < cmparg); +- break; +- case FUTEX_OP_CMP_GE: +- ret = (oldval >= cmparg); +- break; +- case FUTEX_OP_CMP_LE: +- ret = (oldval <= cmparg); +- break; +- case FUTEX_OP_CMP_GT: +- ret = (oldval > cmparg); +- break; +- default: +- ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; ++ + return ret; + } + +--- a/arch/xtensa/include/asm/futex.h ++++ b/arch/xtensa/include/asm/futex.h +@@ -44,18 +44,10 @@ + : "r" (uaddr), "I" (-EFAULT), "r" (oparg) \ + : "memory") + +-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) ++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, ++ u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) +- return -EFAULT; + + #if !XCHAL_HAVE_S32C1I + return -ENOSYS; +@@ -89,19 +81,10 @@ static inline int futex_atomic_op_inuser + + pagefault_enable(); + +- if (ret) +- return ret; +- +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: return (oldval == cmparg); +- case FUTEX_OP_CMP_NE: return (oldval != cmparg); +- case FUTEX_OP_CMP_LT: return (oldval < cmparg); +- case FUTEX_OP_CMP_GE: return (oldval >= cmparg); +- case FUTEX_OP_CMP_LE: return (oldval <= cmparg); +- case FUTEX_OP_CMP_GT: return (oldval > cmparg); +- } ++ if (!ret) ++ *oval = oldval; + +- return -ENOSYS; ++ return ret; + } + + static inline int +--- a/include/asm-generic/futex.h ++++ b/include/asm-generic/futex.h +@@ -13,7 +13,7 @@ + */ + + /** +- * futex_atomic_op_inuser() - Atomic arithmetic operation with constant ++ * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant + * argument and comparison of the previous + * futex value with another constant. + * +@@ -25,18 +25,11 @@ + * <0 - On error + */ + static inline int +-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) ++arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval, ret; + u32 tmp; + +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- + preempt_disable(); + pagefault_disable(); + +@@ -74,17 +67,9 @@ out_pagefault_enable: + pagefault_enable(); + preempt_enable(); + +- if (ret == 0) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; +- default: ret = -ENOSYS; +- } +- } ++ if (ret == 0) ++ *oval = oldval; ++ + return ret; + } + +@@ -126,18 +111,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, + + #else + static inline int +-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) ++arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) + { +- int op = (encoded_op >> 28) & 7; +- int cmp = (encoded_op >> 24) & 15; +- int oparg = (encoded_op << 8) >> 20; +- int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) +- oparg = 1 << oparg; +- +- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) +- return -EFAULT; + + pagefault_disable(); + +@@ -153,17 +129,9 @@ futex_atomic_op_inuser (int encoded_op, + + pagefault_enable(); + +- if (!ret) { +- switch (cmp) { +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; +- default: ret = -ENOSYS; +- } +- } ++ if (!ret) ++ *oval = oldval; ++ + return ret; + } + +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -1453,6 +1453,45 @@ out: + return ret; + } + ++static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr) ++{ ++ unsigned int op = (encoded_op & 0x70000000) >> 28; ++ unsigned int cmp = (encoded_op & 0x0f000000) >> 24; ++ int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 12); ++ int cmparg = sign_extend32(encoded_op & 0x00000fff, 12); ++ int oldval, ret; ++ ++ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) { ++ if (oparg < 0 || oparg > 31) ++ return -EINVAL; ++ oparg = 1 << oparg; ++ } ++ ++ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) ++ return -EFAULT; ++ ++ ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr); ++ if (ret) ++ return ret; ++ ++ switch (cmp) { ++ case FUTEX_OP_CMP_EQ: ++ return oldval == cmparg; ++ case FUTEX_OP_CMP_NE: ++ return oldval != cmparg; ++ case FUTEX_OP_CMP_LT: ++ return oldval < cmparg; ++ case FUTEX_OP_CMP_GE: ++ return oldval >= cmparg; ++ case FUTEX_OP_CMP_LE: ++ return oldval <= cmparg; ++ case FUTEX_OP_CMP_GT: ++ return oldval > cmparg; ++ default: ++ return -ENOSYS; ++ } ++} ++ + /* + * Wake up all waiters hashed on the physical page that is mapped + * to this virtual address: diff --git a/queue-4.4/futex-remove-unnecessary-warning-from-get_futex_key.patch b/queue-4.4/futex-remove-unnecessary-warning-from-get_futex_key.patch new file mode 100644 index 00000000000..2b421706aaf --- /dev/null +++ b/queue-4.4/futex-remove-unnecessary-warning-from-get_futex_key.patch @@ -0,0 +1,125 @@ +From 48fb6f4db940e92cfb16cd878cddd59ea6120d06 Mon Sep 17 00:00:00 2001 +From: Mel Gorman +Date: Wed, 9 Aug 2017 08:27:11 +0100 +Subject: futex: Remove unnecessary warning from get_futex_key + +From: Mel Gorman + +commit 48fb6f4db940e92cfb16cd878cddd59ea6120d06 upstream. + +Commit 65d8fc777f6d ("futex: Remove requirement for lock_page() in +get_futex_key()") removed an unnecessary lock_page() with the +side-effect that page->mapping needed to be treated very carefully. + +Two defensive warnings were added in case any assumption was missed and +the first warning assumed a correct application would not alter a +mapping backing a futex key. Since merging, it has not triggered for +any unexpected case but Mark Rutland reported the following bug +triggering due to the first warning. + + kernel BUG at kernel/futex.c:679! + Internal error: Oops - BUG: 0 [#1] PREEMPT SMP + Modules linked in: + CPU: 0 PID: 3695 Comm: syz-executor1 Not tainted 4.13.0-rc3-00020-g307fec773ba3 #3 + Hardware name: linux,dummy-virt (DT) + task: ffff80001e271780 task.stack: ffff000010908000 + PC is at get_futex_key+0x6a4/0xcf0 kernel/futex.c:679 + LR is at get_futex_key+0x6a4/0xcf0 kernel/futex.c:679 + pc : [] lr : [] pstate: 80000145 + +The fact that it's a bug instead of a warning was due to an unrelated +arm64 problem, but the warning itself triggered because the underlying +mapping changed. + +This is an application issue but from a kernel perspective it's a +recoverable situation and the warning is unnecessary so this patch +removes the warning. The warning may potentially be triggered with the +following test program from Mark although it may be necessary to adjust +NR_FUTEX_THREADS to be a value smaller than the number of CPUs in the +system. + + #include + #include + #include + #include + #include + #include + #include + #include + + #define NR_FUTEX_THREADS 16 + pthread_t threads[NR_FUTEX_THREADS]; + + void *mem; + + #define MEM_PROT (PROT_READ | PROT_WRITE) + #define MEM_SIZE 65536 + + static int futex_wrapper(int *uaddr, int op, int val, + const struct timespec *timeout, + int *uaddr2, int val3) + { + syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3); + } + + void *poll_futex(void *unused) + { + for (;;) { + futex_wrapper(mem, FUTEX_CMP_REQUEUE_PI, 1, NULL, mem + 4, 1); + } + } + + int main(int argc, char *argv[]) + { + int i; + + mem = mmap(NULL, MEM_SIZE, MEM_PROT, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + + printf("Mapping @ %p\n", mem); + + printf("Creating futex threads...\n"); + + for (i = 0; i < NR_FUTEX_THREADS; i++) + pthread_create(&threads[i], NULL, poll_futex, NULL); + + printf("Flipping mapping...\n"); + for (;;) { + mmap(mem, MEM_SIZE, MEM_PROT, + MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS, -1, 0); + } + + return 0; + } + +Reported-and-tested-by: Mark Rutland +Signed-off-by: Mel Gorman +Acked-by: Peter Zijlstra (Intel) +Cc: stable@vger.kernel.org # 4.7+ +Signed-off-by: Linus Torvalds +Cc: Ben Hutchings +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/futex.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -666,13 +666,14 @@ again: + * this reference was taken by ihold under the page lock + * pinning the inode in place so i_lock was unnecessary. The + * only way for this check to fail is if the inode was +- * truncated in parallel so warn for now if this happens. ++ * truncated in parallel which is almost certainly an ++ * application bug. In such a case, just retry. + * + * We are not calling into get_futex_key_refs() in file-backed + * cases, therefore a successful atomic_inc return below will + * guarantee that get_futex_key() will still imply smp_mb(); (B). + */ +- if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) { ++ if (!atomic_inc_not_zero(&inode->i_count)) { + rcu_read_unlock(); + put_page(page_head); + diff --git a/queue-4.4/series b/queue-4.4/series index 3131a27faf7..1ba29d05b8b 100644 --- a/queue-4.4/series +++ b/queue-4.4/series @@ -18,3 +18,9 @@ tcp-ignore-fast-open-on-repair-mode.patch sctp-fix-the-issue-that-the-cookie-ack-with-auth-can-t-get-processed.patch sctp-delay-the-authentication-for-the-duplicated-cookie-echo-chunk.patch net-ethernet-ti-cpsw-fix-packet-leaking-in-dual_mac-mode.patch +alsa-timer-call-notifier-in-the-same-spinlock.patch +audit-move-calcs-after-alloc-and-check-when-logging-set-loginuid.patch +arm64-introduce-mov_q-macro-to-move-a-constant-into-a-64-bit-register.patch +arm64-add-work-around-for-arm-cortex-a55-erratum-1024718.patch +futex-remove-unnecessary-warning-from-get_futex_key.patch +futex-remove-duplicated-code-and-fix-undefined-behaviour.patch