]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
Merge branch 'for-next/c1-pro-erratum-4193714' into for-next/core
authorCatalin Marinas <catalin.marinas@arm.com>
Mon, 20 Apr 2026 12:12:35 +0000 (13:12 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Mon, 20 Apr 2026 12:12:35 +0000 (13:12 +0100)
* for-next/c1-pro-erratum-4193714:
  : Work around C1-Pro erratum 4193714 (CVE-2026-0995)
  arm64: errata: Work around early CME DVMSync acknowledgement
  arm64: cputype: Add C1-Pro definitions
  arm64: tlb: Pass the corresponding mm to __tlbi_sync_s1ish()
  arm64: tlb: Introduce __tlbi_sync_s1ish_{kernel,batch}() for TLB maintenance

1  2 
Documentation/arch/arm64/silicon-errata.rst
arch/arm64/Kconfig
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/tlbflush.h
arch/arm64/kernel/entry-common.c
arch/arm64/kernel/process.c
arch/arm64/kernel/sys_compat.c
arch/arm64/tools/cpucaps

Simple merge
Simple merge
index 47fa4d39a461914de4d2688bf0030541e7f8373b,4aae42b83049017bdf49a1ceb075646d35d5c2de..c0bf5b39804119e64ca2a8c194859f72ac9e83e0
@@@ -526,41 -579,17 +605,41 @@@ static __always_inline void __do_flush_
                return;
        }
  
 -      dsb(ishst);
 +      if (!(flags & TLBF_NOBROADCAST))
 +              dsb(ishst);
 +      else
 +              dsb(nshst);
 +
        asid = ASID(mm);
  
 -      if (last_level)
 -              __flush_tlb_range_op(vale1is, start, pages, stride, asid,
 -                                   tlb_level, true, lpa2_is_enabled());
 -      else
 -              __flush_tlb_range_op(vae1is, start, pages, stride, asid,
 -                                   tlb_level, true, lpa2_is_enabled());
 +      switch (flags & (TLBF_NOWALKCACHE | TLBF_NOBROADCAST)) {
 +      case TLBF_NONE:
 +              __flush_s1_tlb_range_op(vae1is, start, pages, stride,
 +                                      asid, tlb_level);
 +              break;
 +      case TLBF_NOWALKCACHE:
 +              __flush_s1_tlb_range_op(vale1is, start, pages, stride,
 +                                      asid, tlb_level);
 +              break;
 +      case TLBF_NOBROADCAST:
 +              /* Combination unused */
 +              BUG();
 +              break;
 +      case TLBF_NOWALKCACHE | TLBF_NOBROADCAST:
 +              __flush_s1_tlb_range_op(vale1, start, pages, stride,
 +                                      asid, tlb_level);
 +              break;
 +      }
 +
 +      if (!(flags & TLBF_NONOTIFY))
 +              mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
  
 -      mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
 +      if (!(flags & TLBF_NOSYNC)) {
 +              if (!(flags & TLBF_NOBROADCAST))
-                       __tlbi_sync_s1ish();
++                      __tlbi_sync_s1ish(mm);
 +              else
 +                      dsb(nsh);
 +      }
  }
  
  static inline void __flush_tlb_range(struct vm_area_struct *vma,
@@@ -616,9 -645,9 +695,9 @@@ static inline void flush_tlb_kernel_ran
        }
  
        dsb(ishst);
 -      __flush_tlb_range_op(vaale1is, start, pages, stride, 0,
 -                           TLBI_TTL_UNKNOWN, false, lpa2_is_enabled());
 +      __flush_s1_tlb_range_op(vaale1is, start, pages, stride, 0,
 +                              TLBI_TTL_UNKNOWN);
-       __tlbi_sync_s1ish();
+       __tlbi_sync_s1ish_kernel();
        isb();
  }
  
@@@ -639,10 -668,8 +718,11 @@@ static inline void __flush_tlb_kernel_p
  static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
                struct mm_struct *mm, unsigned long start, unsigned long end)
  {
 -      __flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, true, 3);
 +      struct vm_area_struct vma = { .vm_mm = mm, .vm_flags = 0 };
 +
 +      __flush_tlb_range(&vma, start, end, PAGE_SIZE, 3,
 +                        TLBF_NOWALKCACHE | TLBF_NOSYNC);
+       sme_dvmsync_add_pending(batch, mm);
  }
  
  static inline bool __pte_flags_need_flush(ptdesc_t oldval, ptdesc_t newval)
Simple merge
Simple merge
index 7e9860143add85f1d465e60830462052f066e601,03fde2677d5be74c58a89202b27cfc3c55f286d2..0451f96c2c3ff17050eb7ae1bafcc074ea88bf17
@@@ -36,8 -36,8 +36,8 @@@ __do_compat_cache_op(unsigned long star
                         * The workaround requires an inner-shareable tlbi.
                         * We pick the reserved-ASID to minimise the impact.
                         */
 -                      __tlbi(aside1is, __TLBI_VADDR(0, 0));
 +                      __tlbi(aside1is, 0UL);
-                       __tlbi_sync_s1ish();
+                       __tlbi_sync_s1ish(current->mm);
                }
  
                ret = caches_clean_inval_user_pou(start, start + chunk);
Simple merge