--- /dev/null
+From 97b9410643475d6557d2517c2aff9fd2221141a9 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 24 Sep 2013 21:50:23 +0200
+Subject: clockevents: Sanitize ticks to nsec conversion
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 97b9410643475d6557d2517c2aff9fd2221141a9 upstream.
+
+Marc Kleine-Budde pointed out, that commit 77cc982 "clocksource: use
+clockevents_config_and_register() where possible" caused a regression
+for some of the converted subarchs.
+
+The reason is, that the clockevents core code converts the minimal
+hardware tick delta to a nanosecond value for core internal
+usage. This conversion is affected by integer math rounding loss, so
+the backwards conversion to hardware ticks will likely result in a
+value which is less than the configured hardware limitation. The
+affected subarchs used their own workaround (SIGH!) which got lost in
+the conversion.
+
+The solution for the issue at hand is simple: adding evt->mult - 1 to
+the shifted value before the integer divison in the core conversion
+function takes care of it. But this only works for the case where for
+the scaled math mult/shift pair "mult <= 1 << shift" is true. For the
+case where "mult > 1 << shift" we can apply the rounding add only for
+the minimum delta value to make sure that the backward conversion is
+not less than the given hardware limit. For the upper bound we need to
+omit the rounding add, because the backwards conversion is always
+larger than the original latch value. That would violate the upper
+bound of the hardware device.
+
+Though looking closer at the details of that function reveals another
+bogosity: The upper bounds check is broken as well. Checking for a
+resulting "clc" value greater than KTIME_MAX after the conversion is
+pointless. The conversion does:
+
+ u64 clc = (latch << evt->shift) / evt->mult;
+
+So there is no sanity check for (latch << evt->shift) exceeding the
+64bit boundary. The latch argument is "unsigned long", so on a 64bit
+arch the handed in argument could easily lead to an unnoticed shift
+overflow. With the above rounding fix applied the calculation before
+the divison is:
+
+ u64 clc = (latch << evt->shift) + evt->mult - 1;
+
+So we need to make sure, that neither the shift nor the rounding add
+is overflowing the u64 boundary.
+
+[ukl: move assignment to rnd after eventually changing mult, fix build
+ issue and correct comment with the right math]
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Russell King - ARM Linux <linux@arm.linux.org.uk>
+Cc: Marc Kleine-Budde <mkl@pengutronix.de>
+Cc: nicolas.ferre@atmel.com
+Cc: Marc Pignat <marc.pignat@hevs.ch>
+Cc: john.stultz@linaro.org
+Cc: kernel@pengutronix.de
+Cc: Ronald Wahl <ronald.wahl@raritan.com>
+Cc: LAK <linux-arm-kernel@lists.infradead.org>
+Cc: Ludovic Desroches <ludovic.desroches@atmel.com>
+Link: http://lkml.kernel.org/r/1380052223-24139-1-git-send-email-u.kleine-koenig@pengutronix.de
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/clockevents.c | 65 +++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 50 insertions(+), 15 deletions(-)
+
+--- a/kernel/time/clockevents.c
++++ b/kernel/time/clockevents.c
+@@ -30,29 +30,64 @@ static RAW_NOTIFIER_HEAD(clockevents_cha
+ /* Protection for the above */
+ static DEFINE_RAW_SPINLOCK(clockevents_lock);
+
+-/**
+- * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
+- * @latch: value to convert
+- * @evt: pointer to clock event device descriptor
+- *
+- * Math helper, returns latch value converted to nanoseconds (bound checked)
+- */
+-u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
++static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
++ bool ismax)
+ {
+ u64 clc = (u64) latch << evt->shift;
++ u64 rnd;
+
+ if (unlikely(!evt->mult)) {
+ evt->mult = 1;
+ WARN_ON(1);
+ }
++ rnd = (u64) evt->mult - 1;
++
++ /*
++ * Upper bound sanity check. If the backwards conversion is
++ * not equal latch, we know that the above shift overflowed.
++ */
++ if ((clc >> evt->shift) != (u64)latch)
++ clc = ~0ULL;
++
++ /*
++ * Scaled math oddities:
++ *
++ * For mult <= (1 << shift) we can safely add mult - 1 to
++ * prevent integer rounding loss. So the backwards conversion
++ * from nsec to device ticks will be correct.
++ *
++ * For mult > (1 << shift), i.e. device frequency is > 1GHz we
++ * need to be careful. Adding mult - 1 will result in a value
++ * which when converted back to device ticks can be larger
++ * than latch by up to (mult - 1) >> shift. For the min_delta
++ * calculation we still want to apply this in order to stay
++ * above the minimum device ticks limit. For the upper limit
++ * we would end up with a latch value larger than the upper
++ * limit of the device, so we omit the add to stay below the
++ * device upper boundary.
++ *
++ * Also omit the add if it would overflow the u64 boundary.
++ */
++ if ((~0ULL - clc > rnd) &&
++ (!ismax || evt->mult <= (1U << evt->shift)))
++ clc += rnd;
+
+ do_div(clc, evt->mult);
+- if (clc < 1000)
+- clc = 1000;
+- if (clc > KTIME_MAX)
+- clc = KTIME_MAX;
+
+- return clc;
++ /* Deltas less than 1usec are pointless noise */
++ return clc > 1000 ? clc : 1000;
++}
++
++/**
++ * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
++ * @latch: value to convert
++ * @evt: pointer to clock event device descriptor
++ *
++ * Math helper, returns latch value converted to nanoseconds (bound checked)
++ */
++u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
++{
++ return cev_delta2ns(latch, evt, false);
+ }
+ EXPORT_SYMBOL_GPL(clockevent_delta2ns);
+
+@@ -318,8 +353,8 @@ static void clockevents_config(struct cl
+ sec = 600;
+
+ clockevents_calc_mult_shift(dev, freq, sec);
+- dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
+- dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
++ dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
++ dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
+ }
+
+ /**
--- /dev/null
+From f13e220161e738c2710b9904dcb3cf8bb0bcce61 Mon Sep 17 00:00:00 2001
+From: Gwendal Grignou <gwendal@google.com>
+Date: Fri, 7 Aug 2009 16:17:49 -0700
+Subject: libata: make ata_eh_qc_retry() bump scmd->allowed on bogus failures
+
+From: Gwendal Grignou <gwendal@google.com>
+
+commit f13e220161e738c2710b9904dcb3cf8bb0bcce61 upstream.
+
+libata EH decrements scmd->retries when the command failed for reasons
+unrelated to the command itself so that, for example, commands aborted
+due to suspend / resume cycle don't get penalized; however,
+decrementing scmd->retries isn't enough for ATA passthrough commands.
+
+Without this fix, ATA passthrough commands are not resend to the
+drive, and no error is signalled to the caller because:
+
+- allowed retry count is 1
+- ata_eh_qc_complete fill the sense data, so result is valid
+- sense data is filled with untouched ATA registers.
+
+Signed-off-by: Gwendal Grignou <gwendal@google.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/libata-eh.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -1287,14 +1287,14 @@ void ata_eh_qc_complete(struct ata_queue
+ * should be retried. To be used from EH.
+ *
+ * SCSI midlayer limits the number of retries to scmd->allowed.
+- * scmd->retries is decremented for commands which get retried
++ * scmd->allowed is incremented for commands which get retried
+ * due to unrelated failures (qc->err_mask is zero).
+ */
+ void ata_eh_qc_retry(struct ata_queued_cmd *qc)
+ {
+ struct scsi_cmnd *scmd = qc->scsicmd;
+- if (!qc->err_mask && scmd->retries)
+- scmd->retries--;
++ if (!qc->err_mask)
++ scmd->allowed++;
+ __ata_eh_qc_complete(qc);
+ }
+
--- /dev/null
+From 905b0297a9533d7a6ee00a01a990456636877dd6 Mon Sep 17 00:00:00 2001
+From: Bian Yu <bianyu@kedacom.com>
+Date: Sat, 12 Oct 2013 01:10:03 -0400
+Subject: md: avoid deadlock when md_set_badblocks.
+
+From: Bian Yu <bianyu@kedacom.com>
+
+commit 905b0297a9533d7a6ee00a01a990456636877dd6 upstream.
+
+When operate harddisk and hit errors, md_set_badblocks is called after
+scsi_restart_operations which already disabled the irq. but md_set_badblocks
+will call write_sequnlock_irq and enable irq. so softirq can preempt the
+current thread and that may cause a deadlock. I think this situation should
+use write_sequnlock_irqsave/irqrestore instead.
+
+I met the situation and the call trace is below:
+[ 638.919974] BUG: spinlock recursion on CPU#0, scsi_eh_13/1010
+[ 638.921923] lock: 0xffff8800d4d51fc8, .magic: dead4ead, .owner: scsi_eh_13/1010, .owner_cpu: 0
+[ 638.923890] CPU: 0 PID: 1010 Comm: scsi_eh_13 Not tainted 3.12.0-rc5+ #37
+[ 638.925844] Hardware name: To be filled by O.E.M. To be filled by O.E.M./MAHOBAY, BIOS 4.6.5 03/05/2013
+[ 638.927816] ffff880037ad4640 ffff880118c03d50 ffffffff8172ff85 0000000000000007
+[ 638.929829] ffff8800d4d51fc8 ffff880118c03d70 ffffffff81730030 ffff8800d4d51fc8
+[ 638.931848] ffffffff81a72eb0 ffff880118c03d90 ffffffff81730056 ffff8800d4d51fc8
+[ 638.933884] Call Trace:
+[ 638.935867] <IRQ> [<ffffffff8172ff85>] dump_stack+0x55/0x76
+[ 638.937878] [<ffffffff81730030>] spin_dump+0x8a/0x8f
+[ 638.939861] [<ffffffff81730056>] spin_bug+0x21/0x26
+[ 638.941836] [<ffffffff81336de4>] do_raw_spin_lock+0xa4/0xc0
+[ 638.943801] [<ffffffff8173f036>] _raw_spin_lock+0x66/0x80
+[ 638.945747] [<ffffffff814a73ed>] ? scsi_device_unbusy+0x9d/0xd0
+[ 638.947672] [<ffffffff8173fb1b>] ? _raw_spin_unlock+0x2b/0x50
+[ 638.949595] [<ffffffff814a73ed>] scsi_device_unbusy+0x9d/0xd0
+[ 638.951504] [<ffffffff8149ec47>] scsi_finish_command+0x37/0xe0
+[ 638.953388] [<ffffffff814a75e8>] scsi_softirq_done+0xa8/0x140
+[ 638.955248] [<ffffffff8130e32b>] blk_done_softirq+0x7b/0x90
+[ 638.957116] [<ffffffff8104fddd>] __do_softirq+0xfd/0x330
+[ 638.958987] [<ffffffff810b964f>] ? __lock_release+0x6f/0x100
+[ 638.960861] [<ffffffff8174a5cc>] call_softirq+0x1c/0x30
+[ 638.962724] [<ffffffff81004c7d>] do_softirq+0x8d/0xc0
+[ 638.964565] [<ffffffff8105024e>] irq_exit+0x10e/0x150
+[ 638.966390] [<ffffffff8174ad4a>] smp_apic_timer_interrupt+0x4a/0x60
+[ 638.968223] [<ffffffff817499af>] apic_timer_interrupt+0x6f/0x80
+[ 638.970079] <EOI> [<ffffffff810b964f>] ? __lock_release+0x6f/0x100
+[ 638.971899] [<ffffffff8173fa6a>] ? _raw_spin_unlock_irq+0x3a/0x50
+[ 638.973691] [<ffffffff8173fa60>] ? _raw_spin_unlock_irq+0x30/0x50
+[ 638.975475] [<ffffffff81562393>] md_set_badblocks+0x1f3/0x4a0
+[ 638.977243] [<ffffffff81566e07>] rdev_set_badblocks+0x27/0x80
+[ 638.978988] [<ffffffffa00d97bb>] raid5_end_read_request+0x36b/0x4e0 [raid456]
+[ 638.980723] [<ffffffff811b5a1d>] bio_endio+0x1d/0x40
+[ 638.982463] [<ffffffff81304ff3>] req_bio_endio.isra.65+0x83/0xa0
+[ 638.984214] [<ffffffff81306b9f>] blk_update_request+0x7f/0x350
+[ 638.985967] [<ffffffff81306ea1>] blk_update_bidi_request+0x31/0x90
+[ 638.987710] [<ffffffff813085e0>] __blk_end_bidi_request+0x20/0x50
+[ 638.989439] [<ffffffff8130862f>] __blk_end_request_all+0x1f/0x30
+[ 638.991149] [<ffffffff81308746>] blk_peek_request+0x106/0x250
+[ 638.992861] [<ffffffff814a62a9>] ? scsi_kill_request.isra.32+0xe9/0x130
+[ 638.994561] [<ffffffff814a633a>] scsi_request_fn+0x4a/0x3d0
+[ 638.996251] [<ffffffff813040a7>] __blk_run_queue+0x37/0x50
+[ 638.997900] [<ffffffff813045af>] blk_run_queue+0x2f/0x50
+[ 638.999553] [<ffffffff814a5750>] scsi_run_queue+0xe0/0x1c0
+[ 639.001185] [<ffffffff814a7721>] scsi_run_host_queues+0x21/0x40
+[ 639.002798] [<ffffffff814a2e87>] scsi_restart_operations+0x177/0x200
+[ 639.004391] [<ffffffff814a4fe9>] scsi_error_handler+0xc9/0xe0
+[ 639.005996] [<ffffffff814a4f20>] ? scsi_unjam_host+0xd0/0xd0
+[ 639.007600] [<ffffffff81072f6b>] kthread+0xdb/0xe0
+[ 639.009205] [<ffffffff81072e90>] ? flush_kthread_worker+0x170/0x170
+[ 639.010821] [<ffffffff81748cac>] ret_from_fork+0x7c/0xb0
+[ 639.012437] [<ffffffff81072e90>] ? flush_kthread_worker+0x170/0x170
+
+This bug was introduce in commit 2e8ac30312973dd20e68073653
+(the first time rdev_set_badblock was call from interrupt context),
+so this patch is appropriate for 3.5 and subsequent kernels.
+
+Signed-off-by: Bian Yu <bianyu@kedacom.com>
+Reviewed-by: Jianpeng Ma <majianpeng@gmail.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/md.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7780,6 +7780,7 @@ static int md_set_badblocks(struct badbl
+ u64 *p;
+ int lo, hi;
+ int rv = 1;
++ unsigned long flags;
+
+ if (bb->shift < 0)
+ /* badblocks are disabled */
+@@ -7794,7 +7795,7 @@ static int md_set_badblocks(struct badbl
+ sectors = next - s;
+ }
+
+- write_seqlock_irq(&bb->lock);
++ write_seqlock_irqsave(&bb->lock, flags);
+
+ p = bb->page;
+ lo = 0;
+@@ -7910,7 +7911,7 @@ static int md_set_badblocks(struct badbl
+ bb->changed = 1;
+ if (!acknowledged)
+ bb->unacked_exist = 1;
+- write_sequnlock_irq(&bb->lock);
++ write_sequnlock_irqrestore(&bb->lock, flags);
+
+ return rv;
+ }
--- /dev/null
+From 61e4947c99c4494336254ec540c50186d186150b Mon Sep 17 00:00:00 2001
+From: Lukasz Dorau <lukasz.dorau@intel.com>
+Date: Thu, 24 Oct 2013 12:55:17 +1100
+Subject: md: Fix skipping recovery for read-only arrays.
+
+From: Lukasz Dorau <lukasz.dorau@intel.com>
+
+commit 61e4947c99c4494336254ec540c50186d186150b upstream.
+
+Since:
+ commit 7ceb17e87bde79d285a8b988cfed9eaeebe60b86
+ md: Allow devices to be re-added to a read-only array.
+
+spares are activated on a read-only array. In case of raid1 and raid10
+personalities it causes that not-in-sync devices are marked in-sync
+without checking if recovery has been finished.
+
+If a read-only array is degraded and one of its devices is not in-sync
+(because the array has been only partially recovered) recovery will be skipped.
+
+This patch adds checking if recovery has been finished before marking a device
+in-sync for raid1 and raid10 personalities. In case of raid5 personality
+such condition is already present (at raid5.c:6029).
+
+Bug was introduced in 3.10 and causes data corruption.
+
+Signed-off-by: Pawel Baldysiak <pawel.baldysiak@intel.com>
+Signed-off-by: Lukasz Dorau <lukasz.dorau@intel.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 1 +
+ drivers/md/raid10.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1357,6 +1357,7 @@ static int raid1_spare_active(struct mdd
+ }
+ }
+ if (rdev
++ && rdev->recovery_offset == MaxSector
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_and_set_bit(In_sync, &rdev->flags)) {
+ count++;
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1534,6 +1534,7 @@ static int raid10_spare_active(struct md
+ }
+ sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
+ } else if (tmp->rdev
++ && tmp->rdev->recovery_offset == MaxSector
+ && !test_bit(Faulty, &tmp->rdev->flags)
+ && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
+ count++;
--- /dev/null
+From 54e181e073fc1415e41917d725ebdbd7de956455 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Sat, 26 Oct 2013 23:19:25 +0200
+Subject: parisc: Do not crash 64bit SMP kernels on machines with >= 4GB RAM
+
+From: Helge Deller <deller@gmx.de>
+
+commit 54e181e073fc1415e41917d725ebdbd7de956455 upstream.
+
+Since the beginning of the parisc-linux port, sometimes 64bit SMP kernels were
+not able to bring up other CPUs than the monarch CPU and instead crashed the
+kernel. The reason was unclear, esp. since it involved various machines (e.g.
+J5600, J6750 and SuperDome). Testing showed, that those crashes didn't happened
+when less than 4GB were installed, or if a 32bit Linux kernel was booted.
+
+In the end, the fix for those SMP problems is trivial:
+During the early phase of the initialization of the CPUs, including the monarch
+CPU, the PDC_PSW firmware function to enable WIDE (=64bit) mode is called.
+It's documented that this firmware function may clobber various registers, and
+one one of those possibly clobbered registers is %cr30 which holds the task
+thread info pointer.
+
+Now, if %cr30 would always have been clobbered, then this bug would have been
+detected much earlier. But lots of testing finally showed, that - at least for
+%cr30 - on some machines only the upper 32bits of the 64bit register suddenly
+turned zero after the firmware call.
+
+So, after finding the root cause, the explanation for the various crashes
+became clear:
+- On 32bit SMP Linux kernels all upper 32bit were zero, so we didn't faced this
+ problem.
+- Monarch CPUs in 64bit mode always booted sucessfully, because the inital task
+ thread info pointer was below 4GB.
+- Secondary CPUs booted sucessfully on machines with less than 4GB RAM because
+ the upper 32bit were zero anyay.
+- Secondary CPus failed to boot if we had more than 4GB RAM and the task thread
+ info pointer was located above the 4GB boundary.
+
+Finally, the patch to fix this problem is trivial by saving the %cr30 register
+before the firmware call and restoring it afterwards.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/kernel/head.S | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/parisc/kernel/head.S
++++ b/arch/parisc/kernel/head.S
+@@ -195,6 +195,8 @@ common_stext:
+ ldw MEM_PDC_HI(%r0),%r6
+ depd %r6, 31, 32, %r3 /* move to upper word */
+
++ mfctl %cr30,%r6 /* PCX-W2 firmware bug */
++
+ ldo PDC_PSW(%r0),%arg0 /* 21 */
+ ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
+ ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
+@@ -203,6 +205,8 @@ common_stext:
+ copy %r0,%arg3
+
+ stext_pdc_ret:
++ mtctl %r6,%cr30 /* restore task thread info */
++
+ /* restore rfi target address*/
+ ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
+ tophys_r1 %r10
rtlwifi-rtl8192cu-fix-error-in-pointer-arithmetic.patch
jfs-fix-error-path-in-ialloc.patch
can-flexcan-flexcan_chip_start-fix-regression-mark-one-mb-for-tx-and-abort-pending-tx.patch
+libata-make-ata_eh_qc_retry-bump-scmd-allowed-on-bogus-failures.patch
+md-avoid-deadlock-when-md_set_badblocks.patch
+md-fix-skipping-recovery-for-read-only-arrays.patch
+clockevents-sanitize-ticks-to-nsec-conversion.patch
+parisc-do-not-crash-64bit-smp-kernels-on-machines-with-4gb-ram.patch