--- /dev/null
+From foo@baz Thu Aug 7 22:33:35 PDT 2014
+From: Andrey Utkin <andrey.krieger.utkin@gmail.com>
+Date: Mon, 4 Aug 2014 23:47:41 +0300
+Subject: arch/sparc/math-emu/math_32.c: drop stray break operator
+
+From: Andrey Utkin <andrey.krieger.utkin@gmail.com>
+
+[ Upstream commit 093758e3daede29cb4ce6aedb111becf9d4bfc57 ]
+
+This commit is a guesswork, but it seems to make sense to drop this
+break, as otherwise the following line is never executed and becomes
+dead code. And that following line actually saves the result of
+local calculation by the pointer given in function argument. So the
+proposed change makes sense if this code in the whole makes sense (but I
+am unable to analyze it in the whole).
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=81641
+Reported-by: David Binderman <dcb314@hotmail.com>
+Signed-off-by: Andrey Utkin <andrey.krieger.utkin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/math-emu/math_32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/sparc/math-emu/math_32.c
++++ b/arch/sparc/math-emu/math_32.c
+@@ -499,7 +499,7 @@ static int do_one_mathemu(u32 insn, unsi
+ case 0: fsr = *pfsr;
+ if (IR == -1) IR = 2;
+ /* fcc is always fcc0 */
+- fsr &= ~0xc00; fsr |= (IR << 10); break;
++ fsr &= ~0xc00; fsr |= (IR << 10);
+ *pfsr = fsr;
+ break;
+ case 1: rd->s = IR; break;
--- /dev/null
+From foo@baz Thu Aug 7 22:33:35 PDT 2014
+From: Christopher Alexander Tobias Schulze <cat.schulze@alice-dsl.net>
+Date: Sun, 3 Aug 2014 15:44:52 +0200
+Subject: bbc-i2c: Fix BBC I2C envctrl on SunBlade 2000
+
+From: Christopher Alexander Tobias Schulze <cat.schulze@alice-dsl.net>
+
+[ Upstream commit 5cdceab3d5e02eb69ea0f5d8fa9181800baf6f77 ]
+
+Fix regression in bbc i2c temperature and fan control on some Sun systems
+that causes the driver to refuse to load due to the bbc_i2c_bussel resource not
+being present on the (second) i2c bus where the temperature sensors and fan
+control are located. (The check for the number of resources was removed when
+the driver was ported to a pure OF driver in mid 2008.)
+
+Signed-off-by: Christopher Alexander Tobias Schulze <cat.schulze@alice-dsl.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/sbus/char/bbc_envctrl.c | 6 ++++++
+ drivers/sbus/char/bbc_i2c.c | 11 ++++++++---
+ 2 files changed, 14 insertions(+), 3 deletions(-)
+
+--- a/drivers/sbus/char/bbc_envctrl.c
++++ b/drivers/sbus/char/bbc_envctrl.c
+@@ -452,6 +452,9 @@ static void attach_one_temp(struct bbc_i
+ if (!tp)
+ return;
+
++ INIT_LIST_HEAD(&tp->bp_list);
++ INIT_LIST_HEAD(&tp->glob_list);
++
+ tp->client = bbc_i2c_attach(bp, op);
+ if (!tp->client) {
+ kfree(tp);
+@@ -497,6 +500,9 @@ static void attach_one_fan(struct bbc_i2
+ if (!fp)
+ return;
+
++ INIT_LIST_HEAD(&fp->bp_list);
++ INIT_LIST_HEAD(&fp->glob_list);
++
+ fp->client = bbc_i2c_attach(bp, op);
+ if (!fp->client) {
+ kfree(fp);
+--- a/drivers/sbus/char/bbc_i2c.c
++++ b/drivers/sbus/char/bbc_i2c.c
+@@ -301,13 +301,18 @@ static struct bbc_i2c_bus * __init attac
+ if (!bp)
+ return NULL;
+
++ INIT_LIST_HEAD(&bp->temps);
++ INIT_LIST_HEAD(&bp->fans);
++
+ bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs");
+ if (!bp->i2c_control_regs)
+ goto fail;
+
+- bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
+- if (!bp->i2c_bussel_reg)
+- goto fail;
++ if (op->num_resources == 2) {
++ bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
++ if (!bp->i2c_bussel_reg)
++ goto fail;
++ }
+
+ bp->waiting = 0;
+ init_waitqueue_head(&bp->wq);
net-correctly-set-segment-mac_len-in-skb_segment.patch
iovec-make-sure-the-caller-actually-wants-anything-in-memcpy_fromiovecend.patch
sctp-fix-possible-seqlock-seadlock-in-sctp_packet_transmit.patch
+sparc64-fix-argument-sign-extension-for-compat_sys_futex.patch
+sparc64-make-itc_sync_lock-raw.patch
+sparc64-handle-32-bit-tasks-properly-in-compute_effective_address.patch
+sparc64-fix-top-level-fault-handling-bugs.patch
+sparc64-don-t-bark-so-loudly-about-32-bit-tasks-generating-64-bit-fault-addresses.patch
+sparc64-fix-huge-tsb-mapping-on-pre-ultrasparc-iii-cpus.patch
+sparc64-add-membar-to-niagara2-memcpy-code.patch
+sparc64-do-not-insert-non-valid-ptes-into-the-tsb-hash-table.patch
+sparc64-guard-against-flushing-openfirmware-mappings.patch
+bbc-i2c-fix-bbc-i2c-envctrl-on-sunblade-2000.patch
+sunsab-fix-detection-of-break-on-sunsab-serial-console.patch
+sparc64-ldc_connect-should-not-return-einval-when-handshake-is-in-progress.patch
+arch-sparc-math-emu-math_32.c-drop-stray-break-operator.patch
--- /dev/null
+From foo@baz Thu Aug 7 22:33:35 PDT 2014
+From: "David S. Miller" <davem@davemloft.net>
+Date: Sat, 17 May 2014 11:28:05 -0700
+Subject: sparc64: Add membar to Niagara2 memcpy code.
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 5aa4ecfd0ddb1e6dcd1c886e6c49677550f581aa ]
+
+This is the prevent previous stores from overlapping the block stores
+done by the memcpy loop.
+
+Based upon a glibc patch by Jose E. Marchesi
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/lib/NG2memcpy.S | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/sparc/lib/NG2memcpy.S
++++ b/arch/sparc/lib/NG2memcpy.S
+@@ -236,6 +236,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len
+ */
+ VISEntryHalf
+
++ membar #Sync
+ alignaddr %o1, %g0, %g0
+
+ add %o1, (64 - 1), %o4
--- /dev/null
+From foo@baz Thu Aug 7 22:33:35 PDT 2014
+From: "David S. Miller" <davem@davemloft.net>
+Date: Mon, 4 Aug 2014 16:34:01 -0700
+Subject: sparc64: Do not insert non-valid PTEs into the TSB hash table.
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 18f38132528c3e603c66ea464727b29e9bbcb91b ]
+
+The assumption was that update_mmu_cache() (and the equivalent for PMDs) would
+only be called when the PTE being installed will be accessible by the user.
+
+This is not true for code paths originating from remove_migration_pte().
+
+There are dire consequences for placing a non-valid PTE into the TSB. The TLB
+miss frramework assumes thatwhen a TSB entry matches we can just load it into
+the TLB and return from the TLB miss trap.
+
+So if a non-valid PTE is in there, we will deadlock taking the TLB miss over
+and over, never satisfying the miss.
+
+Just exit early from update_mmu_cache() and friends in this situation.
+
+Based upon a report and patch from Christopher Alexander Tobias Schulze.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/mm/init_64.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -308,6 +308,10 @@ void update_mmu_cache(struct vm_area_str
+ tsb_index = MM_TSB_BASE;
+ tsb_hash_shift = PAGE_SHIFT;
+
++ /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
++ if (!(pte_val(pte) & _PAGE_VALID))
++ return;
++
+ spin_lock_irqsave(&mm->context.lock, flags);
+
+ #ifdef CONFIG_HUGETLB_PAGE
--- /dev/null
+From foo@baz Thu Aug 7 22:33:35 PDT 2014
+From: "David S. Miller" <davem@davemloft.net>
+Date: Tue, 6 May 2014 21:27:37 -0700
+Subject: sparc64: Don't bark so loudly about 32-bit tasks generating 64-bit fault addresses.
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit e5c460f46ae7ee94831cb55cb980f942aa9e5a85 ]
+
+This was found using Dave Jone's trinity tool.
+
+When a user process which is 32-bit performs a load or a store, the
+cpu chops off the top 32-bits of the effective address before
+translating it.
+
+This is because we run 32-bit tasks with the PSTATE_AM (address
+masking) bit set.
+
+We can't run the kernel with that bit set, so when the kernel accesses
+userspace no address masking occurs.
+
+Since a 32-bit process will have no mappings in that region we will
+properly fault, so we don't try to handle this using access_ok(),
+which can safely just be a NOP on sparc64.
+
+Real faults from 32-bit processes should never generate such addresses
+so a bug check was added long ago, and it barks in the logs if this
+happens.
+
+But it also barks when a kernel user access causes this condition, and
+that _can_ happen. For example, if a pointer passed into a system call
+is "0xfffffffc" and the kernel access 4 bytes offset from that pointer.
+
+Just handle such faults normally via the exception entries.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/mm/fault_64.c | 16 +---------------
+ 1 file changed, 1 insertion(+), 15 deletions(-)
+
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -282,18 +282,6 @@ static void noinline __kprobes bogus_32b
+ show_regs(regs);
+ }
+
+-static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
+- unsigned long addr)
+-{
+- static int times;
+-
+- if (times++ < 10)
+- printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
+- "reports 64-bit fault address [%lx]\n",
+- current->comm, current->pid, addr);
+- show_regs(regs);
+-}
+-
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+ struct mm_struct *mm = current->mm;
+@@ -322,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fau
+ goto intr_or_no_mm;
+ }
+ }
+- if (unlikely((address >> 32) != 0)) {
+- bogus_32bit_fault_address(regs, address);
++ if (unlikely((address >> 32) != 0))
+ goto intr_or_no_mm;
+- }
+ }
+
+ if (regs->tstate & TSTATE_PRIV) {
--- /dev/null
+From foo@baz Thu Aug 7 22:33:35 PDT 2014
+From: "David S. Miller" <davem@davemloft.net>
+Date: Wed, 30 Apr 2014 19:37:48 -0700
+Subject: sparc64: Fix argument sign extension for compat_sys_futex().
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit aa3449ee9c87d9b7660dd1493248abcc57769e31 ]
+
+Only the second argument, 'op', is signed.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/sys32.S | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/sparc/kernel/sys32.S
++++ b/arch/sparc/kernel/sys32.S
+@@ -87,7 +87,7 @@ SIGN1(sys32_io_submit, compat_sys_io_sub
+ SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
+ SIGN1(sys32_select, compat_sys_select, %o0)
+ SIGN1(sys32_mkdir, sys_mkdir, %o1)
+-SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
++SIGN1(sys32_futex, compat_sys_futex, %o1)
+ SIGN1(sys32_sysfs, compat_sys_sysfs, %o0)
+ SIGN2(sys32_sendfile, compat_sys_sendfile, %o0, %o1)
+ SIGN2(sys32_sendfile64, compat_sys_sendfile64, %o0, %o1)
--- /dev/null
+From foo@baz Thu Aug 7 22:33:35 PDT 2014
+From: "David S. Miller" <davem@davemloft.net>
+Date: Wed, 7 May 2014 14:07:32 -0700
+Subject: sparc64: Fix huge TSB mapping on pre-UltraSPARC-III cpus.
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit b18eb2d779240631a098626cb6841ee2dd34fda0 ]
+
+Access to the TSB hash tables during TLB misses requires that there be
+an atomic 128-bit quad load available so that we fetch a matching TAG
+and DATA field at the same time.
+
+On cpus prior to UltraSPARC-III only virtual address based quad loads
+are available. UltraSPARC-III and later provide physical address
+based variants which are easier to use.
+
+When we only have virtual address based quad loads available this
+means that we have to lock the TSB into the TLB at a fixed virtual
+address on each cpu when it runs that process. We can't just access
+the PAGE_OFFSET based aliased mapping of these TSBs because we cannot
+take a recursive TLB miss inside of the TLB miss handler without
+risking running out of hardware trap levels (some trap combinations
+can be deep, such as those generated by register window spill and fill
+traps).
+
+Without huge pages it's working perfectly fine, but when the huge TSB
+got added another chunk of fixed virtual address space was not
+allocated for this second TSB mapping.
+
+So we were mapping both the 8K and 4MB TSBs to the same exact virtual
+address, causing multiple TLB matches which gives undefined behavior.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/include/asm/pgtable_64.h | 6 ++++--
+ arch/sparc/mm/tsb.c | 14 +++++++++++++-
+ 2 files changed, 17 insertions(+), 3 deletions(-)
+
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -24,7 +24,8 @@
+
+ /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
+ * The page copy blockops can use 0x6000000 to 0x8000000.
+- * The TSB is mapped in the 0x8000000 to 0xa000000 range.
++ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
++ * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
+ * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
+ * The vmalloc area spans 0x100000000 to 0x200000000.
+ * Since modules need to be in the lowest 32-bits of the address space,
+@@ -33,7 +34,8 @@
+ * 0x400000000.
+ */
+ #define TLBTEMP_BASE _AC(0x0000000006000000,UL)
+-#define TSBMAP_BASE _AC(0x0000000008000000,UL)
++#define TSBMAP_8K_BASE _AC(0x0000000008000000,UL)
++#define TSBMAP_4M_BASE _AC(0x0000000008400000,UL)
+ #define MODULES_VADDR _AC(0x0000000010000000,UL)
+ #define MODULES_LEN _AC(0x00000000e0000000,UL)
+ #define MODULES_END _AC(0x00000000f0000000,UL)
+--- a/arch/sparc/mm/tsb.c
++++ b/arch/sparc/mm/tsb.c
+@@ -150,7 +150,19 @@ static void setup_tsb_params(struct mm_s
+ mm->context.tsb_block[tsb_idx].tsb_nentries =
+ tsb_bytes / sizeof(struct tsb);
+
+- base = TSBMAP_BASE;
++ switch (tsb_idx) {
++ case MM_TSB_BASE:
++ base = TSBMAP_8K_BASE;
++ break;
++#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
++ case MM_TSB_HUGE:
++ base = TSBMAP_4M_BASE;
++ break;
++#endif
++ default:
++ BUG();
++ }
++
+ tte = pgprot_val(PAGE_KERNEL_LOCKED);
+ tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
+ BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
--- /dev/null
+From foo@baz Thu Aug 7 22:33:35 PDT 2014
+From: "David S. Miller" <davem@davemloft.net>
+Date: Mon, 28 Apr 2014 23:52:11 -0700
+Subject: sparc64: Fix top-level fault handling bugs.
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 70ffc6ebaead783ac8dafb1e87df0039bb043596 ]
+
+Make get_user_insn() able to cope with huge PMDs.
+
+Next, make do_fault_siginfo() more robust when get_user_insn() can't
+actually fetch the instruction. In particular, use the MMU announced
+fault address when that happens, instead of calling
+compute_effective_address() and computing garbage.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/mm/fault_64.c | 84 +++++++++++++++++++++++++++++------------------
+ 1 file changed, 53 insertions(+), 31 deletions(-)
+
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -95,38 +95,51 @@ static unsigned int get_user_insn(unsign
+ pte_t *ptep, pte;
+ unsigned long pa;
+ u32 insn = 0;
+- unsigned long pstate;
+
+- if (pgd_none(*pgdp))
+- goto outret;
++ if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
++ goto out;
+ pudp = pud_offset(pgdp, tpc);
+- if (pud_none(*pudp))
+- goto outret;
+- pmdp = pmd_offset(pudp, tpc);
+- if (pmd_none(*pmdp))
+- goto outret;
+-
+- /* This disables preemption for us as well. */
+- __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+- __asm__ __volatile__("wrpr %0, %1, %%pstate"
+- : : "r" (pstate), "i" (PSTATE_IE));
+- ptep = pte_offset_map(pmdp, tpc);
+- pte = *ptep;
+- if (!pte_present(pte))
++ if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
+ goto out;
+
+- pa = (pte_pfn(pte) << PAGE_SHIFT);
+- pa += (tpc & ~PAGE_MASK);
++ /* This disables preemption for us as well. */
++ local_irq_disable();
+
+- /* Use phys bypass so we don't pollute dtlb/dcache. */
+- __asm__ __volatile__("lduwa [%1] %2, %0"
+- : "=r" (insn)
+- : "r" (pa), "i" (ASI_PHYS_USE_EC));
++ pmdp = pmd_offset(pudp, tpc);
++ if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
++ goto out_irq_enable;
+
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++ if (pmd_trans_huge(*pmdp)) {
++ if (pmd_trans_splitting(*pmdp))
++ goto out_irq_enable;
++
++ pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
++ pa += tpc & ~HPAGE_MASK;
++
++ /* Use phys bypass so we don't pollute dtlb/dcache. */
++ __asm__ __volatile__("lduwa [%1] %2, %0"
++ : "=r" (insn)
++ : "r" (pa), "i" (ASI_PHYS_USE_EC));
++ } else
++#endif
++ {
++ ptep = pte_offset_map(pmdp, tpc);
++ pte = *ptep;
++ if (pte_present(pte)) {
++ pa = (pte_pfn(pte) << PAGE_SHIFT);
++ pa += (tpc & ~PAGE_MASK);
++
++ /* Use phys bypass so we don't pollute dtlb/dcache. */
++ __asm__ __volatile__("lduwa [%1] %2, %0"
++ : "=r" (insn)
++ : "r" (pa), "i" (ASI_PHYS_USE_EC));
++ }
++ pte_unmap(ptep);
++ }
++out_irq_enable:
++ local_irq_enable();
+ out:
+- pte_unmap(ptep);
+- __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
+-outret:
+ return insn;
+ }
+
+@@ -154,7 +167,8 @@ show_signal_msg(struct pt_regs *regs, in
+ extern unsigned long compute_effective_address(struct pt_regs *, unsigned int, unsigned int);
+
+ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+- unsigned int insn, int fault_code)
++ unsigned long fault_addr, unsigned int insn,
++ int fault_code)
+ {
+ unsigned long addr;
+ siginfo_t info;
+@@ -162,10 +176,18 @@ static void do_fault_siginfo(int code, i
+ info.si_code = code;
+ info.si_signo = sig;
+ info.si_errno = 0;
+- if (fault_code & FAULT_CODE_ITLB)
++ if (fault_code & FAULT_CODE_ITLB) {
+ addr = regs->tpc;
+- else
+- addr = compute_effective_address(regs, insn, 0);
++ } else {
++ /* If we were able to probe the faulting instruction, use it
++ * to compute a precise fault address. Otherwise use the fault
++ * time provided address which may only have page granularity.
++ */
++ if (insn)
++ addr = compute_effective_address(regs, insn, 0);
++ else
++ addr = fault_addr;
++ }
+ info.si_addr = (void __user *) addr;
+ info.si_trapno = 0;
+
+@@ -240,7 +262,7 @@ static void __kprobes do_kernel_fault(st
+ /* The si_code was set to make clear whether
+ * this was a SEGV_MAPERR or SEGV_ACCERR fault.
+ */
+- do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
++ do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
+ return;
+ }
+
+@@ -515,7 +537,7 @@ do_sigbus:
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+- do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
++ do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (regs->tstate & TSTATE_PRIV)
--- /dev/null
+From foo@baz Thu Aug 7 22:33:35 PDT 2014
+From: "David S. Miller" <davem@davemloft.net>
+Date: Mon, 4 Aug 2014 20:07:37 -0700
+Subject: sparc64: Guard against flushing openfirmware mappings.
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 4ca9a23765da3260058db3431faf5b4efd8cf926 ]
+
+Based almost entirely upon a patch by Christopher Alexander Tobias
+Schulze.
+
+In commit db64fe02258f1507e13fe5212a989922323685ce ("mm: rewrite vmap
+layer") lazy VMAP tlb flushing was added to the vmalloc layer. This
+causes problems on sparc64.
+
+Sparc64 has two VMAP mapped regions and they are not contiguous with
+eachother. First we have the malloc mapping area, then another
+unrelated region, then the vmalloc region.
+
+This "another unrelated region" is where the firmware is mapped.
+
+If the lazy TLB flushing logic in the vmalloc code triggers after
+we've had both a module unload and a vfree or similar, it will pass an
+address range that goes from somewhere inside the malloc region to
+somewhere inside the vmalloc region, and thus covering the
+openfirmware area entirely.
+
+The sparc64 kernel learns about openfirmware's dynamic mappings in
+this region early in the boot, and then services TLB misses in this
+area. But openfirmware has some locked TLB entries which are not
+mentioned in those dynamic mappings and we should thus not disturb
+them.
+
+These huge lazy TLB flush ranges causes those openfirmware locked TLB
+entries to be removed, resulting in all kinds of problems including
+hard hangs and crashes during reboot/reset.
+
+Besides causing problems like this, such huge TLB flush ranges are
+also incredibly inefficient. A plea has been made with the author of
+the VMAP lazy TLB flushing code, but for now we'll put a safety guard
+into our flush_tlb_kernel_range() implementation.
+
+Since the implementation has become non-trivial, stop defining it as a
+macro and instead make it a function in a C source file.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/include/asm/tlbflush_64.h | 12 ++----------
+ arch/sparc/mm/init_64.c | 23 +++++++++++++++++++++++
+ 2 files changed, 25 insertions(+), 10 deletions(-)
+
+--- a/arch/sparc/include/asm/tlbflush_64.h
++++ b/arch/sparc/include/asm/tlbflush_64.h
+@@ -35,6 +35,8 @@ static inline void flush_tlb_range(struc
+ {
+ }
+
++void flush_tlb_kernel_range(unsigned long start, unsigned long end);
++
+ #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+
+ extern void flush_tlb_pending(void);
+@@ -49,11 +51,6 @@ extern void __flush_tlb_kernel_range(uns
+
+ #ifndef CONFIG_SMP
+
+-#define flush_tlb_kernel_range(start,end) \
+-do { flush_tsb_kernel_range(start,end); \
+- __flush_tlb_kernel_range(start,end); \
+-} while (0)
+-
+ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
+ {
+ __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
+@@ -64,11 +61,6 @@ static inline void global_flush_tlb_page
+ extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+ extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
+
+-#define flush_tlb_kernel_range(start, end) \
+-do { flush_tsb_kernel_range(start,end); \
+- smp_flush_tlb_kernel_range(start, end); \
+-} while (0)
+-
+ #define global_flush_tlb_page(mm, vaddr) \
+ smp_flush_tlb_page(mm, vaddr)
+
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -2421,3 +2421,26 @@ void __flush_tlb_all(void)
+ __asm__ __volatile__("wrpr %0, 0, %%pstate"
+ : : "r" (pstate));
+ }
++
++#ifdef CONFIG_SMP
++#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
++#else
++#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
++#endif
++
++void flush_tlb_kernel_range(unsigned long start, unsigned long end)
++{
++ if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
++ if (start < LOW_OBP_ADDRESS) {
++ flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
++ do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
++ }
++ if (end > HI_OBP_ADDRESS) {
++ flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
++ do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
++ }
++ } else {
++ flush_tsb_kernel_range(start, end);
++ do_flush_tlb_kernel_range(start, end);
++ }
++}
--- /dev/null
+From foo@baz Thu Aug 7 22:33:35 PDT 2014
+From: "David S. Miller" <davem@davemloft.net>
+Date: Mon, 28 Apr 2014 23:50:08 -0700
+Subject: sparc64: Handle 32-bit tasks properly in compute_effective_address().
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit d037d16372bbe4d580342bebbb8826821ad9edf0 ]
+
+If we have a 32-bit task we must chop off the top 32-bits of the
+64-bit value just as the cpu would.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/unaligned_64.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/arch/sparc/kernel/unaligned_64.c
++++ b/arch/sparc/kernel/unaligned_64.c
+@@ -156,17 +156,23 @@ static unsigned long *fetch_reg_addr(uns
+ unsigned long compute_effective_address(struct pt_regs *regs,
+ unsigned int insn, unsigned int rd)
+ {
++ int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+ unsigned int rs1 = (insn >> 14) & 0x1f;
+ unsigned int rs2 = insn & 0x1f;
+- int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
++ unsigned long addr;
+
+ if (insn & 0x2000) {
+ maybe_flush_windows(rs1, 0, rd, from_kernel);
+- return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
++ addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
+ } else {
+ maybe_flush_windows(rs1, rs2, rd, from_kernel);
+- return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
++ addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
+ }
++
++ if (!from_kernel && test_thread_flag(TIF_32BIT))
++ addr &= 0xffffffff;
++
++ return addr;
+ }
+
+ /* This is just to make gcc think die_if_kernel does return... */
--- /dev/null
+From foo@baz Thu Aug 7 22:33:35 PDT 2014
+From: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+Date: Fri, 1 Aug 2014 09:50:40 -0400
+Subject: sparc64: ldc_connect() should not return EINVAL when handshake is in progress.
+
+From: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+
+[ Upstream commit 4ec1b01029b4facb651b8ef70bc20a4be4cebc63 ]
+
+The LDC handshake could have been asynchronously triggered
+after ldc_bind() enables the ldc_rx() receive interrupt-handler
+(and thus intercepts incoming control packets)
+and before vio_port_up() calls ldc_connect(). If that is the case,
+ldc_connect() should return 0 and let the state-machine
+progress.
+
+Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+Acked-by: Karl Volz <karl.volz@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/ldc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/sparc/kernel/ldc.c
++++ b/arch/sparc/kernel/ldc.c
+@@ -1339,7 +1339,7 @@ int ldc_connect(struct ldc_channel *lp)
+ if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
+ !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
+ lp->hs_state != LDC_HS_OPEN)
+- err = -EINVAL;
++ err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
+ else
+ err = start_handshake(lp);
+
--- /dev/null
+From foo@baz Thu Aug 7 22:33:35 PDT 2014
+From: Kirill Tkhai <tkhai@yandex.ru>
+Date: Thu, 17 Apr 2014 00:45:24 +0400
+Subject: sparc64: Make itc_sync_lock raw
+
+From: Kirill Tkhai <tkhai@yandex.ru>
+
+[ Upstream commit 49b6c01f4c1de3b5e5427ac5aba80f9f6d27837a ]
+
+One more place where we must not be able
+to be preempted or to be interrupted in RT.
+
+Always actually disable interrupts during
+synchronization cycle.
+
+Signed-off-by: Kirill Tkhai <tkhai@yandex.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/smp_64.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -151,7 +151,7 @@ void cpu_panic(void)
+ #define NUM_ROUNDS 64 /* magic value */
+ #define NUM_ITERS 5 /* likewise */
+
+-static DEFINE_SPINLOCK(itc_sync_lock);
++static DEFINE_RAW_SPINLOCK(itc_sync_lock);
+ static unsigned long go[SLAVE + 1];
+
+ #define DEBUG_TICK_SYNC 0
+@@ -259,7 +259,7 @@ static void smp_synchronize_one_tick(int
+ go[MASTER] = 0;
+ membar_safe("#StoreLoad");
+
+- spin_lock_irqsave(&itc_sync_lock, flags);
++ raw_spin_lock_irqsave(&itc_sync_lock, flags);
+ {
+ for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
+ while (!go[MASTER])
+@@ -270,7 +270,7 @@ static void smp_synchronize_one_tick(int
+ membar_safe("#StoreLoad");
+ }
+ }
+- spin_unlock_irqrestore(&itc_sync_lock, flags);
++ raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
+ }
+
+ #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
--- /dev/null
+From foo@baz Thu Aug 7 22:33:35 PDT 2014
+From: Christopher Alexander Tobias Schulze <cat.schulze@alice-dsl.net>
+Date: Sun, 3 Aug 2014 16:01:53 +0200
+Subject: sunsab: Fix detection of BREAK on sunsab serial console
+
+From: Christopher Alexander Tobias Schulze <cat.schulze@alice-dsl.net>
+
+[ Upstream commit fe418231b195c205701c0cc550a03f6c9758fd9e ]
+
+Fix detection of BREAK on sunsab serial console: BREAK detection was only
+performed when there were also serial characters received simultaneously.
+To handle all BREAKs correctly, the check for BREAK and the corresponding
+call to uart_handle_break() must also be done if count == 0, therefore
+duplicate this code fragment and pull it out of the loop over the received
+characters.
+
+Patch applies to 3.16-rc6.
+
+Signed-off-by: Christopher Alexander Tobias Schulze <cat.schulze@alice-dsl.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/sunsab.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/tty/serial/sunsab.c
++++ b/drivers/tty/serial/sunsab.c
+@@ -157,6 +157,15 @@ receive_chars(struct uart_sunsab_port *u
+ (up->port.line == up->port.cons->index))
+ saw_console_brk = 1;
+
++ if (count == 0) {
++ if (unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) {
++ stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR |
++ SAB82532_ISR0_FERR);
++ up->port.icount.brk++;
++ uart_handle_break(&up->port);
++ }
++ }
++
+ for (i = 0; i < count; i++) {
+ unsigned char ch = buf[i], flag;
+