alsa-hda-workaround-for-silent-output-on-sony-vaio-vgc-ln51jgb-with-alc889.patch
alsa-hda-hdmi-eld-shouldn-t-be-valid-after-unplug.patch
gfs2-get-a-block-reservation-before-resizing-a-file.patch
+sunvdc-fix-off-by-one-in-generic_request.patch
+sparc64-add-missing-have_arch_transparent_hugepage.patch
+sparc64-fix-get_user_pages_fast-wrt.-thp.patch
+sparc64-fix-gfp_flags-setting-in-tsb_grow.patch
+sparc64-handle-hugepage-tsb-being-null.patch
+sparc64-fix-tsb_grow-in-atomic-context.patch
+sparc64-fix-huge-pmd-to-pte-translation-for-sun4u-in-tlb-miss-handler.patch
--- /dev/null
+From 08636a30f5393585bf9adae2ffc97755b90e47c2 Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Wed, 13 Feb 2013 12:15:08 -0800
+Subject: sparc64: Add missing HAVE_ARCH_TRANSPARENT_HUGEPAGE.
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit b9156ebb7beef015745f917f373abc137efc3400 ]
+
+This got missed in the cleanups done for the S390 THP
+support.
+
+CC: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -61,6 +61,7 @@ config SPARC64
+ select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
+ select HAVE_SYSCALL_WRAPPERS
++ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_SYSCALL_TRACEPOINTS
--- /dev/null
+From e53e72f560c09679cc191fc2d1ab26d108f9b7de Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Wed, 13 Feb 2013 12:21:06 -0800
+Subject: sparc64: Fix get_user_pages_fast() wrt. THP.
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 89a77915e0f56dc7b9f9082ba787895b6a83f809 ]
+
+Mostly mirrors the s390 logic, as unlike x86 we don't need the
+SetPageReferenced() bits.
+
+On sparc64 we also lack a user/privileged bit in the huge PMDs.
+
+In order to make this work for THP and non-THP builds, some header
+file adjustments were necessary. Namely, provide the PMD_HUGE_* bit
+defines and the pmd_large() inline unconditionally rather than
+protected by TRANSPARENT_HUGEPAGE.
+
+Reported-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/include/asm/pgtable_64.h | 14 +++-----
+ arch/sparc/mm/gup.c | 59 ++++++++++++++++++++++++++++++++++--
+ 2 files changed, 63 insertions(+), 10 deletions(-)
+
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -71,7 +71,6 @@
+ #define PMD_PADDR _AC(0xfffffffe,UL)
+ #define PMD_PADDR_SHIFT _AC(11,UL)
+
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ #define PMD_ISHUGE _AC(0x00000001,UL)
+
+ /* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge
+@@ -86,7 +85,6 @@
+ #define PMD_HUGE_ACCESSED _AC(0x00000080,UL)
+ #define PMD_HUGE_EXEC _AC(0x00000040,UL)
+ #define PMD_HUGE_SPLITTING _AC(0x00000020,UL)
+-#endif
+
+ /* PGDs point to PMD tables which are 8K aligned. */
+ #define PGD_PADDR _AC(0xfffffffc,UL)
+@@ -622,6 +620,12 @@ static inline unsigned long pte_special(
+ return pte_val(pte) & _PAGE_SPECIAL;
+ }
+
++static inline int pmd_large(pmd_t pmd)
++{
++ return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) ==
++ (PMD_ISHUGE | PMD_HUGE_PRESENT);
++}
++
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ static inline int pmd_young(pmd_t pmd)
+ {
+@@ -640,12 +644,6 @@ static inline unsigned long pmd_pfn(pmd_
+ return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT);
+ }
+
+-static inline int pmd_large(pmd_t pmd)
+-{
+- return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) ==
+- (PMD_ISHUGE | PMD_HUGE_PRESENT);
+-}
+-
+ static inline int pmd_trans_splitting(pmd_t pmd)
+ {
+ return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) ==
+--- a/arch/sparc/mm/gup.c
++++ b/arch/sparc/mm/gup.c
+@@ -66,6 +66,56 @@ static noinline int gup_pte_range(pmd_t
+ return 1;
+ }
+
++static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
++ unsigned long end, int write, struct page **pages,
++ int *nr)
++{
++ struct page *head, *page, *tail;
++ u32 mask;
++ int refs;
++
++ mask = PMD_HUGE_PRESENT;
++ if (write)
++ mask |= PMD_HUGE_WRITE;
++ if ((pmd_val(pmd) & mask) != mask)
++ return 0;
++
++ refs = 0;
++ head = pmd_page(pmd);
++ page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
++ tail = page;
++ do {
++ VM_BUG_ON(compound_head(page) != head);
++ pages[*nr] = page;
++ (*nr)++;
++ page++;
++ refs++;
++ } while (addr += PAGE_SIZE, addr != end);
++
++ if (!page_cache_add_speculative(head, refs)) {
++ *nr -= refs;
++ return 0;
++ }
++
++ if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
++ *nr -= refs;
++ while (refs--)
++ put_page(head);
++ return 0;
++ }
++
++ /* Any tail page need their mapcount reference taken before we
++ * return.
++ */
++ while (refs--) {
++ if (PageTail(tail))
++ get_huge_page_tail(tail);
++ tail++;
++ }
++
++ return 1;
++}
++
+ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+ {
+@@ -77,9 +127,14 @@ static int gup_pmd_range(pud_t pud, unsi
+ pmd_t pmd = *pmdp;
+
+ next = pmd_addr_end(addr, end);
+- if (pmd_none(pmd))
++ if (pmd_none(pmd) || pmd_trans_splitting(pmd))
+ return 0;
+- if (!gup_pte_range(pmd, addr, next, write, pages, nr))
++ if (unlikely(pmd_large(pmd))) {
++ if (!gup_huge_pmd(pmdp, pmd, addr, next,
++ write, pages, nr))
++ return 0;
++ } else if (!gup_pte_range(pmd, addr, next, write,
++ pages, nr))
+ return 0;
+ } while (pmdp++, addr = next, addr != end);
+
--- /dev/null
+From 156cad2c15016b185b3eb91bdd1ee19a689fd921 Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Tue, 19 Feb 2013 12:56:18 -0800
+Subject: sparc64: Fix gfp_flags setting in tsb_grow().
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit a55ee1ff751f88252207160087d8197bb7538d4c ]
+
+We should "|= more_flags" rather than "= more_flags".
+
+Reported-by: David Rientjes <rientjes@google.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/mm/tsb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/sparc/mm/tsb.c
++++ b/arch/sparc/mm/tsb.c
+@@ -314,7 +314,7 @@ void tsb_grow(struct mm_struct *mm, unsi
+ retry_tsb_alloc:
+ gfp_flags = GFP_KERNEL;
+ if (new_size > (PAGE_SIZE * 2))
+- gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
++ gfp_flags |= __GFP_NOWARN | __GFP_NORETRY;
+
+ new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
+ gfp_flags, numa_node_id());
--- /dev/null
+From 586a59231597a362fd0c2eb8e62d2fdd42eb7342 Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Wed, 20 Feb 2013 12:38:40 -0800
+Subject: sparc64: Fix huge PMD to PTE translation for sun4u in TLB miss handler.
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 76968ad2eac6456270353de168b21f04f4b3d1d3 ]
+
+When we set the sun4u version of the PTE execute bit, it's:
+
+ or REG, _PAGE_EXEC_4U, REG
+
+_PAGE_EXEC_4U is 0x1000, unfortunately the immedate field of the
+'or' instruction is a signed 13-bit value. So the above actually
+assembles into:
+
+ or REG, -4096, REG
+
+completely corrupting the final PTE value.
+
+Set it with a:
+
+ sethi %hi(_PAGE_EXEC_4U), TMP
+ or REG, TMP, REG
+
+sequence instead.
+
+This fixes "git gc" crashes on sun4u machines.
+
+Reported-by: Meelis Roos <mroos@linux.ee>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/include/asm/tsb.h | 28 +++++++++++++++++++---------
+ 1 file changed, 19 insertions(+), 9 deletions(-)
+
+--- a/arch/sparc/include/asm/tsb.h
++++ b/arch/sparc/include/asm/tsb.h
+@@ -157,17 +157,26 @@ extern struct tsb_phys_patch_entry __tsb
+ andn REG2, 0x7, REG2; \
+ add REG1, REG2, REG1;
+
+- /* This macro exists only to make the PMD translator below easier
+- * to read. It hides the ELF section switch for the sun4v code
+- * patching.
++ /* These macros exists only to make the PMD translator below
++ * easier to read. It hides the ELF section switch for the
++ * sun4v code patching.
+ */
+-#define OR_PTE_BIT(REG, NAME) \
++#define OR_PTE_BIT_1INSN(REG, NAME) \
+ 661: or REG, _PAGE_##NAME##_4U, REG; \
+ .section .sun4v_1insn_patch, "ax"; \
+ .word 661b; \
+ or REG, _PAGE_##NAME##_4V, REG; \
+ .previous;
+
++#define OR_PTE_BIT_2INSN(REG, TMP, NAME) \
++661: sethi %hi(_PAGE_##NAME##_4U), TMP; \
++ or REG, TMP, REG; \
++ .section .sun4v_2insn_patch, "ax"; \
++ .word 661b; \
++ mov -1, TMP; \
++ or REG, _PAGE_##NAME##_4V, REG; \
++ .previous;
++
+ /* Load into REG the PTE value for VALID, CACHE, and SZHUGE. */
+ #define BUILD_PTE_VALID_SZHUGE_CACHE(REG) \
+ 661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \
+@@ -214,12 +223,13 @@ extern struct tsb_phys_patch_entry __tsb
+ andn REG1, PMD_HUGE_PROTBITS, REG2; \
+ sllx REG2, PMD_PADDR_SHIFT, REG2; \
+ /* REG2 now holds PFN << PAGE_SHIFT */ \
+- andcc REG1, PMD_HUGE_EXEC, %g0; \
+- bne,a,pt %xcc, 1f; \
+- OR_PTE_BIT(REG2, EXEC); \
+-1: andcc REG1, PMD_HUGE_WRITE, %g0; \
++ andcc REG1, PMD_HUGE_WRITE, %g0; \
+ bne,a,pt %xcc, 1f; \
+- OR_PTE_BIT(REG2, W); \
++ OR_PTE_BIT_1INSN(REG2, W); \
++1: andcc REG1, PMD_HUGE_EXEC, %g0; \
++ be,pt %xcc, 1f; \
++ nop; \
++ OR_PTE_BIT_2INSN(REG2, REG1, EXEC); \
+ /* REG1 can now be clobbered, build final PTE */ \
+ 1: BUILD_PTE_VALID_SZHUGE_CACHE(REG1); \
+ ba,pt %xcc, PTE_LABEL; \
--- /dev/null
+From bbcc6e195b62cfbac72569f3bd2c2268cd6a60a4 Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Tue, 19 Feb 2013 22:34:10 -0800
+Subject: sparc64: Fix tsb_grow() in atomic context.
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 0fbebed682ff2788dee58e8d7f7dda46e33aa10b ]
+
+If our first THP installation for an MM is via the set_pmd_at() done
+during khugepaged's collapsing we'll end up in tsb_grow() trying to do
+a GFP_KERNEL allocation with several locks held.
+
+Simply using GFP_ATOMIC in this situation is not the best option
+because we really can't have this fail, so we'd really like to keep
+this an order 0 GFP_KERNEL allocation if possible.
+
+Also, doing the TSB allocation from khugepaged is a really bad idea
+because we'll allocate it potentially from the wrong NUMA node in that
+context.
+
+So what we do is defer the hugepage TSB allocation until the first TLB
+miss we take on a hugepage. This is slightly tricky because we have
+to handle two unusual cases:
+
+1) Taking the first hugepage TLB miss in the window trap handler.
+ We'll call the winfix_trampoline when that is detected.
+
+2) An initial TSB allocation via TLB miss races with a hugetlb
+ fault on another cpu running the same MM. We handle this by
+ unconditionally loading the TSB we see into the current cpu
+ even if it's non-NULL at hugetlb_setup time.
+
+Reported-by: Meelis Roos <mroos@ut.ee>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/include/asm/hugetlb.h | 1 -
+ arch/sparc/include/asm/page_64.h | 4 ++--
+ arch/sparc/kernel/tsb.S | 39 +++++++++++++++++++++++++++++++++++----
+ arch/sparc/mm/fault_64.c | 9 +++++++--
+ arch/sparc/mm/init_64.c | 24 +++++++++++++++++++-----
+ arch/sparc/mm/tlb.c | 11 +++++++++--
+ 6 files changed, 72 insertions(+), 16 deletions(-)
+
+--- a/arch/sparc/include/asm/hugetlb.h
++++ b/arch/sparc/include/asm/hugetlb.h
+@@ -12,7 +12,6 @@ pte_t huge_ptep_get_and_clear(struct mm_
+
+ static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+ {
+- hugetlb_setup(mm);
+ }
+
+ static inline int is_hugepage_only_range(struct mm_struct *mm,
+--- a/arch/sparc/include/asm/page_64.h
++++ b/arch/sparc/include/asm/page_64.h
+@@ -27,8 +27,8 @@
+ #ifndef __ASSEMBLY__
+
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+-struct mm_struct;
+-extern void hugetlb_setup(struct mm_struct *mm);
++struct pt_regs;
++extern void hugetlb_setup(struct pt_regs *regs);
+ #endif
+
+ #define WANT_PAGE_VIRTUAL
+--- a/arch/sparc/kernel/tsb.S
++++ b/arch/sparc/kernel/tsb.S
+@@ -136,12 +136,43 @@ tsb_miss_page_table_walk_sun4v_fastpath:
+ nop
+
+ /* It is a huge page, use huge page TSB entry address we
+- * calculated above.
++ * calculated above. If the huge page TSB has not been
++ * allocated, setup a trap stack and call hugetlb_setup()
++ * to do so, then return from the trap to replay the TLB
++ * miss.
++ *
++ * This is necessary to handle the case of transparent huge
++ * pages where we don't really have a non-atomic context
++ * in which to allocate the hugepage TSB hash table. When
++ * the 'mm' faults in the hugepage for the first time, we
++ * thus handle it here. This also makes sure that we can
++ * allocate the TSB hash table on the correct NUMA node.
+ */
+ TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
+- ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2
+- cmp %g2, -1
+- movne %xcc, %g2, %g1
++ ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1
++ cmp %g1, -1
++ bne,pt %xcc, 60f
++ nop
++
++661: rdpr %pstate, %g5
++ wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
++ .section .sun4v_2insn_patch, "ax"
++ .word 661b
++ SET_GL(1)
++ nop
++ .previous
++
++ rdpr %tl, %g3
++ cmp %g3, 1
++ bne,pn %xcc, winfix_trampoline
++ nop
++ ba,pt %xcc, etrap
++ rd %pc, %g7
++ call hugetlb_setup
++ add %sp, PTREGS_OFF, %o0
++ ba,pt %xcc, rtrap
++ nop
++
+ 60:
+ #endif
+
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -472,8 +472,13 @@ good_area:
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+ mm_rss = mm->context.huge_pte_count;
+ if (unlikely(mm_rss >
+- mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
+- tsb_grow(mm, MM_TSB_HUGE, mm_rss);
++ mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
++ if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
++ tsb_grow(mm, MM_TSB_HUGE, mm_rss);
++ else
++ hugetlb_setup(regs);
++
++ }
+ #endif
+ return;
+
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -2718,14 +2718,28 @@ static void context_reload(void *__data)
+ load_secondary_context(mm);
+ }
+
+-void hugetlb_setup(struct mm_struct *mm)
++void hugetlb_setup(struct pt_regs *regs)
+ {
+- struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
++ struct mm_struct *mm = current->mm;
++ struct tsb_config *tp;
+
+- if (likely(tp->tsb != NULL))
+- return;
++ if (in_atomic() || !mm) {
++ const struct exception_table_entry *entry;
++
++ entry = search_exception_tables(regs->tpc);
++ if (entry) {
++ regs->tpc = entry->fixup;
++ regs->tnpc = regs->tpc + 4;
++ return;
++ }
++ pr_alert("Unexpected HugeTLB setup in atomic context.\n");
++ die_if_kernel("HugeTSB in atomic", regs);
++ }
++
++ tp = &mm->context.tsb_block[MM_TSB_HUGE];
++ if (likely(tp->tsb == NULL))
++ tsb_grow(mm, MM_TSB_HUGE, 0);
+
+- tsb_grow(mm, MM_TSB_HUGE, 0);
+ tsb_context_switch(mm);
+ smp_tsb_sync(mm);
+
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -135,8 +135,15 @@ void set_pmd_at(struct mm_struct *mm, un
+ mm->context.huge_pte_count++;
+ else
+ mm->context.huge_pte_count--;
+- if (mm->context.huge_pte_count == 1)
+- hugetlb_setup(mm);
++
++ /* Do not try to allocate the TSB hash table if we
++ * don't have one already. We have various locks held
++ * and thus we'll end up doing a GFP_KERNEL allocation
++ * in an atomic context.
++ *
++ * Instead, we let the first TLB miss on a hugepage
++ * take care of this.
++ */
+ }
+
+ if (!pmd_none(orig)) {
--- /dev/null
+From 890b015b620ca3c2d14c798ce5303cf830d31afd Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Tue, 19 Feb 2013 13:20:08 -0800
+Subject: sparc64: Handle hugepage TSB being NULL.
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit bcd896bae0166b4443503482a26ecf84d9ba60ab ]
+
+Accomodate the possibility that the TSB might be NULL at
+the point that update_mmu_cache() is invoked. This is
+necessary because we will sometimes need to defer the TSB
+allocation to the first fault that happens in the 'mm'.
+
+Seperate out the hugepage PTE test into a seperate function
+so that the logic is clearer.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/mm/init_64.c | 38 ++++++++++++++++++++++----------------
+ 1 file changed, 22 insertions(+), 16 deletions(-)
+
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -314,16 +314,31 @@ static void __update_mmu_tsb_insert(stru
+ struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
+ unsigned long tag;
+
++ if (unlikely(!tsb))
++ return;
++
+ tsb += ((address >> tsb_hash_shift) &
+ (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
+ tag = (address >> 22UL);
+ tsb_insert(tsb, tag, tte);
+ }
+
++#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
++static inline bool is_hugetlb_pte(pte_t pte)
++{
++ if ((tlb_type == hypervisor &&
++ (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
++ (tlb_type != hypervisor &&
++ (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
++ return true;
++ return false;
++}
++#endif
++
+ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+ {
+- unsigned long tsb_index, tsb_hash_shift, flags;
+ struct mm_struct *mm;
++ unsigned long flags;
+ pte_t pte = *ptep;
+
+ if (tlb_type != hypervisor) {
+@@ -335,25 +350,16 @@ void update_mmu_cache(struct vm_area_str
+
+ mm = vma->vm_mm;
+
+- tsb_index = MM_TSB_BASE;
+- tsb_hash_shift = PAGE_SHIFT;
+-
+ spin_lock_irqsave(&mm->context.lock, flags);
+
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+- if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
+- if ((tlb_type == hypervisor &&
+- (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
+- (tlb_type != hypervisor &&
+- (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
+- tsb_index = MM_TSB_HUGE;
+- tsb_hash_shift = HPAGE_SHIFT;
+- }
+- }
++ if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
++ __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
++ address, pte_val(pte));
++ else
+ #endif
+-
+- __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift,
+- address, pte_val(pte));
++ __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
++ address, pte_val(pte));
+
+ spin_unlock_irqrestore(&mm->context.lock, flags);
+ }
--- /dev/null
+From fb0e27172e8e7ba84486ec94cc574412df0bf2f8 Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Thu, 14 Feb 2013 11:49:01 -0800
+Subject: sunvdc: Fix off-by-one in generic_request().
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit f4d9605434c0fd4cc8639bf25cfc043418c52362 ]
+
+The 'operations' bitmap corresponds one-for-one with the operation
+codes, no adjustment is necessary.
+
+Reported-by: Mark Kettenis <mark.kettenis@xs4all.nl>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/sunvdc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/block/sunvdc.c
++++ b/drivers/block/sunvdc.c
+@@ -461,7 +461,7 @@ static int generic_request(struct vdc_po
+ int op_len, err;
+ void *req_buf;
+
+- if (!(((u64)1 << ((u64)op - 1)) & port->operations))
++ if (!(((u64)1 << (u64)op) & port->operations))
+ return -EOPNOTSUPP;
+
+ switch (op) {