--- /dev/null
+From foo@baz Wed 15 Jan 2020 03:59:37 PM CET
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+Date: Wed, 13 Dec 2017 17:07:20 +0000
+Subject: arm64: don't open code page table entry creation
+
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+
+commit 193383043f14a398393dc18bae8380f7fe665ec3 upstream.
+
+Instead of open coding the generation of page table entries, use the
+macros/functions that exist for this - pfn_p*d and p*d_populate. Most
+code in the kernel already uses these macros, this patch tries to fix
+up the few places that don't. This is useful for the next patch in this
+series, which needs to change the page table entry logic, and it's
+better to have that logic in one place.
+
+The KVM extended ID map is special, since we're creating a level above
+CONFIG_PGTABLE_LEVELS and the required function isn't available. Leave
+it as is and add a comment to explain it. (The normal kernel ID map code
+doesn't need this change because its page tables are created in assembly
+(__create_page_tables)).
+
+Tested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Tested-by: Bob Picco <bob.picco@oracle.com>
+Reviewed-by: Bob Picco <bob.picco@oracle.com>
+Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[bwh: Backported to 4.9: adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_mmu.h | 5 +++++
+ arch/arm64/include/asm/pgtable.h | 1 +
+ arch/arm64/kernel/hibernate.c | 3 +--
+ arch/arm64/mm/mmu.c | 14 +++++++++-----
+ 4 files changed, 16 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -300,6 +300,11 @@ static inline bool __kvm_cpu_uses_extend
+ return __cpu_uses_extended_idmap();
+ }
+
++/*
++ * Can't use pgd_populate here, because the extended idmap adds an extra level
++ * above CONFIG_PGTABLE_LEVELS (which is 2 or 3 if we're using the extended
++ * idmap), and pgd_populate is only available if CONFIG_PGTABLE_LEVELS = 4.
++ */
+ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
+ pgd_t *hyp_pgd,
+ pgd_t *merged_hyp_pgd,
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -353,6 +353,7 @@ static inline int pmd_protnone(pmd_t pmd
+
+ #define pud_write(pud) pte_write(pud_pte(pud))
+ #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
++#define pfn_pud(pfn,prot) (__pud(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
+ #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
+
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -247,8 +247,7 @@ static int create_safe_exec_page(void *s
+ }
+
+ pte = pte_offset_kernel(pmd, dst_addr);
+- set_pte(pte, __pte(virt_to_phys((void *)dst) |
+- pgprot_val(PAGE_KERNEL_EXEC)));
++ set_pte(pte, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
+
+ /*
+ * Load our new page tables. A strict BBM approach requires that we
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -495,8 +495,8 @@ static void __init map_kernel(pgd_t *pgd
+ * entry instead.
+ */
+ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+- set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
+- __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
++ pud_populate(&init_mm, pud_set_fixmap_offset(pgd, FIXADDR_START),
++ lm_alias(bm_pmd));
+ pud_clear_fixmap();
+ } else {
+ BUG();
+@@ -611,7 +611,7 @@ int __meminit vmemmap_populate(unsigned
+ if (!p)
+ return -ENOMEM;
+
+- set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
++ pmd_set_huge(pmd, __pa(p), __pgprot(PROT_SECT_NORMAL));
+ } else
+ vmemmap_verify((pte_t *)pmd, node, addr, next);
+ } while (addr = next, addr != end);
+@@ -797,15 +797,19 @@ int __init arch_ioremap_pmd_supported(vo
+
+ int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
+ {
++ pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
++ pgprot_val(mk_sect_prot(prot)));
+ BUG_ON(phys & ~PUD_MASK);
+- set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
++ set_pud(pud, pfn_pud(__phys_to_pfn(phys), sect_prot));
+ return 1;
+ }
+
+ int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
+ {
++ pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
++ pgprot_val(mk_sect_prot(prot)));
+ BUG_ON(phys & ~PMD_MASK);
+- set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
++ set_pmd(pmd, pfn_pmd(__phys_to_pfn(phys), sect_prot));
+ return 1;
+ }
+
--- /dev/null
+From foo@baz Wed 15 Jan 2020 03:59:37 PM CET
+From: Will Deacon <will.deacon@arm.com>
+Date: Wed, 21 Feb 2018 12:59:27 +0000
+Subject: arm64: Enforce BBM for huge IO/VMAP mappings
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 15122ee2c515a253b0c66a3e618bc7ebe35105eb upstream.
+
+ioremap_page_range doesn't honour break-before-make and attempts to put
+down huge mappings (using p*d_set_huge) over the top of pre-existing
+table entries. This leads to us leaking page table memory and also gives
+rise to TLB conflicts and spurious aborts, which have been seen in
+practice on Cortex-A75.
+
+Until this has been resolved, refuse to put block mappings when the
+existing entry is found to be present.
+
+Fixes: 324420bf91f60 ("arm64: add support for ioremap() block mappings")
+Reported-by: Hanjun Guo <hanjun.guo@linaro.org>
+Reported-by: Lei Li <lious.lilei@hisilicon.com>
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/mm/mmu.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -799,6 +799,11 @@ int pud_set_huge(pud_t *pudp, phys_addr_
+ {
+ pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
+ pgprot_val(mk_sect_prot(prot)));
++
++ /* ioremap_page_range doesn't honour BBM */
++ if (pud_present(READ_ONCE(*pudp)))
++ return 0;
++
+ BUG_ON(phys & ~PUD_MASK);
+ set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
+ return 1;
+@@ -808,6 +813,11 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_
+ {
+ pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
+ pgprot_val(mk_sect_prot(prot)));
++
++ /* ioremap_page_range doesn't honour BBM */
++ if (pmd_present(READ_ONCE(*pmdp)))
++ return 0;
++
+ BUG_ON(phys & ~PMD_MASK);
+ set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
+ return 1;
--- /dev/null
+From foo@baz Wed 15 Jan 2020 03:59:37 PM CET
+From: Laura Abbott <labbott@redhat.com>
+Date: Wed, 23 May 2018 11:43:46 -0700
+Subject: arm64: Make sure permission updates happen for pmd/pud
+
+From: Laura Abbott <labbott@redhat.com>
+
+commit 82034c23fcbc2389c73d97737f61fa2dd6526413 upstream.
+
+Commit 15122ee2c515 ("arm64: Enforce BBM for huge IO/VMAP mappings")
+disallowed block mappings for ioremap since that code does not honor
+break-before-make. The same APIs are also used for permission updating
+though and the extra checks prevent the permission updates from happening,
+even though this should be permitted. This results in read-only permissions
+not being fully applied. Visibly, this can occasionaly be seen as a failure
+on the built in rodata test when the test data ends up in a section or
+as an odd RW gap on the page table dump. Fix this by using
+pgattr_change_is_safe instead of p*d_present for determining if the
+change is permitted.
+
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Tested-by: Peter Robinson <pbrobinson@gmail.com>
+Reported-by: Peter Robinson <pbrobinson@gmail.com>
+Fixes: 15122ee2c515 ("arm64: Enforce BBM for huge IO/VMAP mappings")
+Signed-off-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/mm/mmu.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -799,13 +799,15 @@ int pud_set_huge(pud_t *pudp, phys_addr_
+ {
+ pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
+ pgprot_val(mk_sect_prot(prot)));
++ pud_t new_pud = pfn_pud(__phys_to_pfn(phys), sect_prot);
+
+- /* ioremap_page_range doesn't honour BBM */
+- if (pud_present(READ_ONCE(*pudp)))
++ /* Only allow permission changes for now */
++ if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
++ pud_val(new_pud)))
+ return 0;
+
+ BUG_ON(phys & ~PUD_MASK);
+- set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
++ set_pud(pudp, new_pud);
+ return 1;
+ }
+
+@@ -813,13 +815,15 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_
+ {
+ pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
+ pgprot_val(mk_sect_prot(prot)));
++ pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), sect_prot);
+
+- /* ioremap_page_range doesn't honour BBM */
+- if (pmd_present(READ_ONCE(*pmdp)))
++ /* Only allow permission changes for now */
++ if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
++ pmd_val(new_pmd)))
+ return 0;
+
+ BUG_ON(phys & ~PMD_MASK);
+- set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
++ set_pmd(pmdp, new_pmd);
+ return 1;
+ }
+
--- /dev/null
+From foo@baz Wed 15 Jan 2020 03:59:37 PM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Fri, 21 Oct 2016 12:22:56 +0100
+Subject: arm64: mm: BUG on unsupported manipulations of live kernel mappings
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit e98216b52176ba2bfa4bdb02f178f4d08832d465 upstream.
+
+Now that we take care not manipulate the live kernel page tables in a
+way that may lead to TLB conflicts, the case where a table mapping is
+replaced by a block mapping can no longer occur. So remove the handling
+of this at the PUD and PMD levels, and instead, BUG() on any occurrence
+of live kernel page table manipulations that modify anything other than
+the permission bits.
+
+Since mark_rodata_ro() is the only caller where the kernel mappings that
+are being manipulated are actually live, drop the various conditional
+flush_tlb_all() invocations, and add a single call to mark_rodata_ro()
+instead.
+
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/mm/mmu.c | 70 +++++++++++++++++++++++++++++++---------------------
+ 1 file changed, 43 insertions(+), 27 deletions(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -28,8 +28,6 @@
+ #include <linux/memblock.h>
+ #include <linux/fs.h>
+ #include <linux/io.h>
+-#include <linux/slab.h>
+-#include <linux/stop_machine.h>
+
+ #include <asm/barrier.h>
+ #include <asm/cputype.h>
+@@ -95,6 +93,17 @@ static phys_addr_t __init early_pgtable_
+ return phys;
+ }
+
++static bool pgattr_change_is_safe(u64 old, u64 new)
++{
++ /*
++ * The following mapping attributes may be updated in live
++ * kernel mappings without the need for break-before-make.
++ */
++ static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
++
++ return old == 0 || new == 0 || ((old ^ new) & ~mask) == 0;
++}
++
+ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
+ unsigned long end, unsigned long pfn,
+ pgprot_t prot,
+@@ -115,8 +124,17 @@ static void alloc_init_pte(pmd_t *pmd, u
+
+ pte = pte_set_fixmap_offset(pmd, addr);
+ do {
++ pte_t old_pte = *pte;
++
+ set_pte(pte, pfn_pte(pfn, prot));
+ pfn++;
++
++ /*
++ * After the PTE entry has been populated once, we
++ * only allow updates to the permission attributes.
++ */
++ BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
++
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+
+ pte_clear_fixmap();
+@@ -146,27 +164,27 @@ static void alloc_init_pmd(pud_t *pud, u
+
+ pmd = pmd_set_fixmap_offset(pud, addr);
+ do {
++ pmd_t old_pmd = *pmd;
++
+ next = pmd_addr_end(addr, end);
++
+ /* try section mapping first */
+ if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+ allow_block_mappings) {
+- pmd_t old_pmd =*pmd;
+ pmd_set_huge(pmd, phys, prot);
++
+ /*
+- * Check for previous table entries created during
+- * boot (__create_page_tables) and flush them.
++ * After the PMD entry has been populated once, we
++ * only allow updates to the permission attributes.
+ */
+- if (!pmd_none(old_pmd)) {
+- flush_tlb_all();
+- if (pmd_table(old_pmd)) {
+- phys_addr_t table = pmd_page_paddr(old_pmd);
+- if (!WARN_ON_ONCE(slab_is_available()))
+- memblock_free(table, PAGE_SIZE);
+- }
+- }
++ BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
++ pmd_val(*pmd)));
+ } else {
+ alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
+ prot, pgtable_alloc);
++
++ BUG_ON(pmd_val(old_pmd) != 0 &&
++ pmd_val(old_pmd) != pmd_val(*pmd));
+ }
+ phys += next - addr;
+ } while (pmd++, addr = next, addr != end);
+@@ -204,33 +222,28 @@ static void alloc_init_pud(pgd_t *pgd, u
+
+ pud = pud_set_fixmap_offset(pgd, addr);
+ do {
++ pud_t old_pud = *pud;
++
+ next = pud_addr_end(addr, end);
+
+ /*
+ * For 4K granule only, attempt to put down a 1GB block
+ */
+ if (use_1G_block(addr, next, phys) && allow_block_mappings) {
+- pud_t old_pud = *pud;
+ pud_set_huge(pud, phys, prot);
+
+ /*
+- * If we have an old value for a pud, it will
+- * be pointing to a pmd table that we no longer
+- * need (from swapper_pg_dir).
+- *
+- * Look up the old pmd table and free it.
++ * After the PUD entry has been populated once, we
++ * only allow updates to the permission attributes.
+ */
+- if (!pud_none(old_pud)) {
+- flush_tlb_all();
+- if (pud_table(old_pud)) {
+- phys_addr_t table = pud_page_paddr(old_pud);
+- if (!WARN_ON_ONCE(slab_is_available()))
+- memblock_free(table, PAGE_SIZE);
+- }
+- }
++ BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
++ pud_val(*pud)));
+ } else {
+ alloc_init_pmd(pud, addr, next, phys, prot,
+ pgtable_alloc, allow_block_mappings);
++
++ BUG_ON(pud_val(old_pud) != 0 &&
++ pud_val(old_pud) != pud_val(*pud));
+ }
+ phys += next - addr;
+ } while (pud++, addr = next, addr != end);
+@@ -396,6 +409,9 @@ void mark_rodata_ro(void)
+ section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
+ create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
+ section_size, PAGE_KERNEL_RO);
++
++ /* flush the TLBs after updating live kernel mappings */
++ flush_tlb_all();
+ }
+
+ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
--- /dev/null
+From foo@baz Wed 15 Jan 2020 03:59:37 PM CET
+From: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Date: Tue, 14 Jan 2020 15:44:11 +0000
+Subject: arm64: mm: Change page table pointer name in p[md]_set_huge()
+
+From: Ben Hutchings <ben.hutchings@codethink.co.uk>
+
+This is preparation for the following backported fixes. It was done
+upstream as part of commit 20a004e7b017 "arm64: mm: Use
+READ_ONCE/WRITE_ONCE when accessing page tables", the rest of which
+does not seem suitable for stable.
+
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/mm/mmu.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -795,21 +795,21 @@ int __init arch_ioremap_pmd_supported(vo
+ return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
+ }
+
+-int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
++int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
+ {
+ pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
+ pgprot_val(mk_sect_prot(prot)));
+ BUG_ON(phys & ~PUD_MASK);
+- set_pud(pud, pfn_pud(__phys_to_pfn(phys), sect_prot));
++ set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
+ return 1;
+ }
+
+-int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
++int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
+ {
+ pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
+ pgprot_val(mk_sect_prot(prot)));
+ BUG_ON(phys & ~PMD_MASK);
+- set_pmd(pmd, pfn_pmd(__phys_to_pfn(phys), sect_prot));
++ set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
+ return 1;
+ }
+
--- /dev/null
+From foo@baz Wed 15 Jan 2020 03:59:37 PM CET
+From: Dedy Lansky <dlansky@codeaurora.org>
+Date: Sun, 29 Jul 2018 14:59:16 +0300
+Subject: cfg80211/mac80211: make ieee80211_send_layer2_update a public function
+
+From: Dedy Lansky <dlansky@codeaurora.org>
+
+commit 30ca1aa536211f5ac3de0173513a7a99a98a97f3 upstream.
+
+Make ieee80211_send_layer2_update() a common function so other drivers
+can re-use it.
+
+Signed-off-by: Dedy Lansky <dlansky@codeaurora.org>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+[bwh: Backported to 4.9 as dependency of commit 3e493173b784
+ "mac80211: Do not send Layer 2 Update frame before authorization":
+ - Retain type-casting of skb_put() return value
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/cfg80211.h | 11 +++++++++++
+ net/mac80211/cfg.c | 48 ++----------------------------------------------
+ net/wireless/util.c | 45 +++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 58 insertions(+), 46 deletions(-)
+
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -4182,6 +4182,17 @@ const u8 *cfg80211_find_vendor_ie(unsign
+ const u8 *ies, int len);
+
+ /**
++ * cfg80211_send_layer2_update - send layer 2 update frame
++ *
++ * @dev: network device
++ * @addr: STA MAC address
++ *
++ * Wireless drivers can use this function to update forwarding tables in bridge
++ * devices upon STA association.
++ */
++void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr);
++
++/**
+ * DOC: Regulatory enforcement infrastructure
+ *
+ * TODO
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1048,50 +1048,6 @@ static int ieee80211_stop_ap(struct wiph
+ return 0;
+ }
+
+-/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */
+-struct iapp_layer2_update {
+- u8 da[ETH_ALEN]; /* broadcast */
+- u8 sa[ETH_ALEN]; /* STA addr */
+- __be16 len; /* 6 */
+- u8 dsap; /* 0 */
+- u8 ssap; /* 0 */
+- u8 control;
+- u8 xid_info[3];
+-} __packed;
+-
+-static void ieee80211_send_layer2_update(struct sta_info *sta)
+-{
+- struct iapp_layer2_update *msg;
+- struct sk_buff *skb;
+-
+- /* Send Level 2 Update Frame to update forwarding tables in layer 2
+- * bridge devices */
+-
+- skb = dev_alloc_skb(sizeof(*msg));
+- if (!skb)
+- return;
+- msg = (struct iapp_layer2_update *)skb_put(skb, sizeof(*msg));
+-
+- /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
+- * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
+-
+- eth_broadcast_addr(msg->da);
+- memcpy(msg->sa, sta->sta.addr, ETH_ALEN);
+- msg->len = htons(6);
+- msg->dsap = 0;
+- msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */
+- msg->control = 0xaf; /* XID response lsb.1111F101.
+- * F=0 (no poll command; unsolicited frame) */
+- msg->xid_info[0] = 0x81; /* XID format identifier */
+- msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */
+- msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */
+-
+- skb->dev = sta->sdata->dev;
+- skb->protocol = eth_type_trans(skb, sta->sdata->dev);
+- memset(skb->cb, 0, sizeof(skb->cb));
+- netif_rx_ni(skb);
+-}
+-
+ static int sta_apply_auth_flags(struct ieee80211_local *local,
+ struct sta_info *sta,
+ u32 mask, u32 set)
+@@ -1455,7 +1411,7 @@ static int ieee80211_add_station(struct
+ }
+
+ if (layer2_update)
+- ieee80211_send_layer2_update(sta);
++ cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
+
+ rcu_read_unlock();
+
+@@ -1565,7 +1521,7 @@ static int ieee80211_change_station(stru
+ atomic_inc(&sta->sdata->bss->num_mcast_sta);
+ }
+
+- ieee80211_send_layer2_update(sta);
++ cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
+ }
+
+ err = sta_apply_parameters(local, sta, params);
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -1794,3 +1794,48 @@ EXPORT_SYMBOL(rfc1042_header);
+ const unsigned char bridge_tunnel_header[] __aligned(2) =
+ { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+ EXPORT_SYMBOL(bridge_tunnel_header);
++
++/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */
++struct iapp_layer2_update {
++ u8 da[ETH_ALEN]; /* broadcast */
++ u8 sa[ETH_ALEN]; /* STA addr */
++ __be16 len; /* 6 */
++ u8 dsap; /* 0 */
++ u8 ssap; /* 0 */
++ u8 control;
++ u8 xid_info[3];
++} __packed;
++
++void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr)
++{
++ struct iapp_layer2_update *msg;
++ struct sk_buff *skb;
++
++ /* Send Level 2 Update Frame to update forwarding tables in layer 2
++ * bridge devices */
++
++ skb = dev_alloc_skb(sizeof(*msg));
++ if (!skb)
++ return;
++ msg = (struct iapp_layer2_update *)skb_put(skb, sizeof(*msg));
++
++ /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
++ * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
++
++ eth_broadcast_addr(msg->da);
++ ether_addr_copy(msg->sa, addr);
++ msg->len = htons(6);
++ msg->dsap = 0;
++ msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */
++ msg->control = 0xaf; /* XID response lsb.1111F101.
++ * F=0 (no poll command; unsolicited frame) */
++ msg->xid_info[0] = 0x81; /* XID format identifier */
++ msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */
++ msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */
++
++ skb->dev = dev;
++ skb->protocol = eth_type_trans(skb, dev);
++ memset(skb->cb, 0, sizeof(skb->cb));
++ netif_rx_ni(skb);
++}
++EXPORT_SYMBOL(cfg80211_send_layer2_update);
--- /dev/null
+From foo@baz Wed 15 Jan 2020 03:59:37 PM CET
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Mon, 1 Apr 2019 09:35:54 +0800
+Subject: dccp: Fix memleak in __feat_register_sp
+
+From: YueHaibing <yuehaibing@huawei.com>
+
+commit 1d3ff0950e2b40dc861b1739029649d03f591820 upstream.
+
+If dccp_feat_push_change fails, we forget free the mem
+which is alloced by kmemdup in dccp_feat_clone_sp_val.
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Fixes: e8ef967a54f4 ("dccp: Registration routines for changing feature values")
+Reviewed-by: Mukesh Ojha <mojha@codeaurora.org>
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dccp/feat.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/net/dccp/feat.c
++++ b/net/dccp/feat.c
+@@ -738,7 +738,12 @@ static int __feat_register_sp(struct lis
+ if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len))
+ return -ENOMEM;
+
+- return dccp_feat_push_change(fn, feat, is_local, mandatory, &fval);
++ if (dccp_feat_push_change(fn, feat, is_local, mandatory, &fval)) {
++ kfree(fval.sp.vec);
++ return -ENOMEM;
++ }
++
++ return 0;
+ }
+
+ /**
--- /dev/null
+From foo@baz Wed 15 Jan 2020 03:59:37 PM CET
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Thu, 7 Nov 2019 21:43:41 -0500
+Subject: ext4: add more paranoia checking in ext4_expand_extra_isize handling
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 4ea99936a1630f51fc3a2d61a58ec4a1c4b7d55a upstream.
+
+It's possible to specify a non-zero s_want_extra_isize via debugging
+option, and this can cause bad things(tm) to happen when using a file
+system with an inode size of 128 bytes.
+
+Add better checking when the file system is mounted, as well as when
+we are actually doing the trying to do the inode expansion.
+
+Link: https://lore.kernel.org/r/20191110121510.GH23325@mit.edu
+Reported-by: syzbot+f8d6f8386ceacdbfff57@syzkaller.appspotmail.com
+Reported-by: syzbot+33d7ea72e47de3bdf4e1@syzkaller.appspotmail.com
+Reported-by: syzbot+44b6763edfc17144296f@syzkaller.appspotmail.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+[bwh: Backported to 4.9: adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/inode.c | 15 +++++++++++++++
+ fs/ext4/super.c | 20 ++++++++++++--------
+ 2 files changed, 27 insertions(+), 8 deletions(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5472,10 +5472,25 @@ static int ext4_expand_extra_isize(struc
+ {
+ struct ext4_inode *raw_inode;
+ struct ext4_xattr_ibody_header *header;
++ unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
++ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
+ return 0;
+
++ /* this was checked at iget time, but double check for good measure */
++ if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
++ (ei->i_extra_isize & 3)) {
++ EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
++ ei->i_extra_isize,
++ EXT4_INODE_SIZE(inode->i_sb));
++ return -EFSCORRUPTED;
++ }
++ if ((new_extra_isize < ei->i_extra_isize) ||
++ (new_extra_isize < 4) ||
++ (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
++ return -EINVAL; /* Should never happen */
++
+ raw_inode = ext4_raw_inode(&iloc);
+
+ header = IHDR(inode, raw_inode);
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3346,11 +3346,15 @@ static void ext4_clamp_want_extra_isize(
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
++ unsigned def_extra_isize = sizeof(struct ext4_inode) -
++ EXT4_GOOD_OLD_INODE_SIZE;
+
+- /* determine the minimum size of new large inodes, if present */
+- if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
+- sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+- EXT4_GOOD_OLD_INODE_SIZE;
++ if (sbi->s_inode_size == EXT4_GOOD_OLD_INODE_SIZE) {
++ sbi->s_want_extra_isize = 0;
++ return;
++ }
++ if (sbi->s_want_extra_isize < 4) {
++ sbi->s_want_extra_isize = def_extra_isize;
+ if (ext4_has_feature_extra_isize(sb)) {
+ if (sbi->s_want_extra_isize <
+ le16_to_cpu(es->s_want_extra_isize))
+@@ -3363,10 +3367,10 @@ static void ext4_clamp_want_extra_isize(
+ }
+ }
+ /* Check if enough inode space is available */
+- if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
+- sbi->s_inode_size) {
+- sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+- EXT4_GOOD_OLD_INODE_SIZE;
++ if ((sbi->s_want_extra_isize > sbi->s_inode_size) ||
++ (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
++ sbi->s_inode_size)) {
++ sbi->s_want_extra_isize = def_extra_isize;
+ ext4_msg(sb, KERN_INFO,
+ "required extra inode space not available");
+ }
--- /dev/null
+From foo@baz Wed 15 Jan 2020 03:59:37 PM CET
+From: Barret Rhoden <brho@google.com>
+Date: Thu, 25 Apr 2019 11:55:50 -0400
+Subject: ext4: fix use-after-free race with debug_want_extra_isize
+
+From: Barret Rhoden <brho@google.com>
+
+commit 7bc04c5c2cc467c5b40f2b03ba08da174a0d5fa7 upstream.
+
+When remounting with debug_want_extra_isize, we were not performing the
+same checks that we do during a normal mount. That allowed us to set a
+value for s_want_extra_isize that reached outside the s_inode_size.
+
+Fixes: e2b911c53584 ("ext4: clean up feature test macros with predicate functions")
+Reported-by: syzbot+f584efa0ac7213c226b7@syzkaller.appspotmail.com
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Barret Rhoden <brho@google.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+[bwh: Backported to 4.9: The debug_want_extra_isize mount option is not
+ supported]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/super.c | 56 +++++++++++++++++++++++++++++++++-----------------------
+ 1 file changed, 33 insertions(+), 23 deletions(-)
+
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3342,6 +3342,36 @@ int ext4_calculate_overhead(struct super
+ return 0;
+ }
+
++static void ext4_clamp_want_extra_isize(struct super_block *sb)
++{
++ struct ext4_sb_info *sbi = EXT4_SB(sb);
++ struct ext4_super_block *es = sbi->s_es;
++
++ /* determine the minimum size of new large inodes, if present */
++ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
++ sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
++ EXT4_GOOD_OLD_INODE_SIZE;
++ if (ext4_has_feature_extra_isize(sb)) {
++ if (sbi->s_want_extra_isize <
++ le16_to_cpu(es->s_want_extra_isize))
++ sbi->s_want_extra_isize =
++ le16_to_cpu(es->s_want_extra_isize);
++ if (sbi->s_want_extra_isize <
++ le16_to_cpu(es->s_min_extra_isize))
++ sbi->s_want_extra_isize =
++ le16_to_cpu(es->s_min_extra_isize);
++ }
++ }
++ /* Check if enough inode space is available */
++ if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
++ sbi->s_inode_size) {
++ sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
++ EXT4_GOOD_OLD_INODE_SIZE;
++ ext4_msg(sb, KERN_INFO,
++ "required extra inode space not available");
++ }
++}
++
+ static void ext4_set_resv_clusters(struct super_block *sb)
+ {
+ ext4_fsblk_t resv_clusters;
+@@ -4156,29 +4186,7 @@ no_journal:
+ if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
+ sb->s_flags |= MS_RDONLY;
+
+- /* determine the minimum size of new large inodes, if present */
+- if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
+- sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+- EXT4_GOOD_OLD_INODE_SIZE;
+- if (ext4_has_feature_extra_isize(sb)) {
+- if (sbi->s_want_extra_isize <
+- le16_to_cpu(es->s_want_extra_isize))
+- sbi->s_want_extra_isize =
+- le16_to_cpu(es->s_want_extra_isize);
+- if (sbi->s_want_extra_isize <
+- le16_to_cpu(es->s_min_extra_isize))
+- sbi->s_want_extra_isize =
+- le16_to_cpu(es->s_min_extra_isize);
+- }
+- }
+- /* Check if enough inode space is available */
+- if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
+- sbi->s_inode_size) {
+- sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+- EXT4_GOOD_OLD_INODE_SIZE;
+- ext4_msg(sb, KERN_INFO, "required extra inode space not"
+- "available");
+- }
++ ext4_clamp_want_extra_isize(sb);
+
+ ext4_set_resv_clusters(sb);
+
+@@ -4959,6 +4967,8 @@ static int ext4_remount(struct super_blo
+ goto restore_opts;
+ }
+
++ ext4_clamp_want_extra_isize(sb);
++
+ if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
+ test_opt(sb, JOURNAL_CHECKSUM)) {
+ ext4_msg(sb, KERN_ERR, "changing journal_checksum "
--- /dev/null
+From foo@baz Wed 15 Jan 2020 03:59:37 PM CET
+From: Jouni Malinen <jouni@codeaurora.org>
+Date: Wed, 11 Sep 2019 16:03:05 +0300
+Subject: mac80211: Do not send Layer 2 Update frame before authorization
+
+From: Jouni Malinen <jouni@codeaurora.org>
+
+commit 3e493173b7841259a08c5c8e5cbe90adb349da7e upstream.
+
+The Layer 2 Update frame is used to update bridges when a station roams
+to another AP even if that STA does not transmit any frames after the
+reassociation. This behavior was described in IEEE Std 802.11F-2003 as
+something that would happen based on MLME-ASSOCIATE.indication, i.e.,
+before completing 4-way handshake. However, this IEEE trial-use
+recommended practice document was published before RSN (IEEE Std
+802.11i-2004) and as such, did not consider RSN use cases. Furthermore,
+IEEE Std 802.11F-2003 was withdrawn in 2006 and as such, has not been
+maintained amd should not be used anymore.
+
+Sending out the Layer 2 Update frame immediately after association is
+fine for open networks (and also when using SAE, FT protocol, or FILS
+authentication when the station is actually authenticated by the time
+association completes). However, it is not appropriate for cases where
+RSN is used with PSK or EAP authentication since the station is actually
+fully authenticated only once the 4-way handshake completes after
+authentication and attackers might be able to use the unauthenticated
+triggering of Layer 2 Update frame transmission to disrupt bridge
+behavior.
+
+Fix this by postponing transmission of the Layer 2 Update frame from
+station entry addition to the point when the station entry is marked
+authorized. Similarly, send out the VLAN binding update only if the STA
+entry has already been authorized.
+
+Signed-off-by: Jouni Malinen <jouni@codeaurora.org>
+Reviewed-by: Johannes Berg <johannes@sipsolutions.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+[bwh: Backported to 4.9: adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mac80211/cfg.c | 11 +++--------
+ net/mac80211/sta_info.c | 4 ++++
+ 2 files changed, 7 insertions(+), 8 deletions(-)
+
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1357,7 +1357,6 @@ static int ieee80211_add_station(struct
+ struct sta_info *sta;
+ struct ieee80211_sub_if_data *sdata;
+ int err;
+- int layer2_update;
+
+ if (params->vlan) {
+ sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
+@@ -1401,18 +1400,12 @@ static int ieee80211_add_station(struct
+ test_sta_flag(sta, WLAN_STA_ASSOC))
+ rate_control_rate_init(sta);
+
+- layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+- sdata->vif.type == NL80211_IFTYPE_AP;
+-
+ err = sta_info_insert_rcu(sta);
+ if (err) {
+ rcu_read_unlock();
+ return err;
+ }
+
+- if (layer2_update)
+- cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
+-
+ rcu_read_unlock();
+
+ return 0;
+@@ -1521,7 +1514,9 @@ static int ieee80211_change_station(stru
+ atomic_inc(&sta->sdata->bss->num_mcast_sta);
+ }
+
+- cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
++ if (sta->sta_state == IEEE80211_STA_AUTHORIZED)
++ cfg80211_send_layer2_update(sta->sdata->dev,
++ sta->sta.addr);
+ }
+
+ err = sta_apply_parameters(local, sta, params);
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -1896,6 +1896,10 @@ int sta_info_move_state(struct sta_info
+ ieee80211_check_fast_xmit(sta);
+ ieee80211_check_fast_rx(sta);
+ }
++ if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
++ sta->sdata->vif.type == NL80211_IFTYPE_AP)
++ cfg80211_send_layer2_update(sta->sdata->dev,
++ sta->sta.addr);
+ break;
+ default:
+ break;
--- /dev/null
+From foo@baz Wed 15 Jan 2020 03:59:37 PM CET
+From: Vandana BN <bnvandana@gmail.com>
+Date: Wed, 22 May 2019 04:34:15 -0400
+Subject: media: usb:zr364xx:Fix KASAN:null-ptr-deref Read in zr364xx_vidioc_querycap
+
+From: Vandana BN <bnvandana@gmail.com>
+
+commit 5d2e73a5f80a5b5aff3caf1ec6d39b5b3f54b26e upstream.
+
+SyzKaller hit the null pointer deref while reading from uninitialized
+udev->product in zr364xx_vidioc_querycap().
+
+==================================================================
+BUG: KASAN: null-ptr-deref in read_word_at_a_time+0xe/0x20
+include/linux/compiler.h:274
+Read of size 1 at addr 0000000000000000 by task v4l_id/5287
+
+CPU: 1 PID: 5287 Comm: v4l_id Not tainted 5.1.0-rc3-319004-g43151d6 #6
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
+Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0xe8/0x16e lib/dump_stack.c:113
+ kasan_report.cold+0x5/0x3c mm/kasan/report.c:321
+ read_word_at_a_time+0xe/0x20 include/linux/compiler.h:274
+ strscpy+0x8a/0x280 lib/string.c:207
+ zr364xx_vidioc_querycap+0xb5/0x210 drivers/media/usb/zr364xx/zr364xx.c:706
+ v4l_querycap+0x12b/0x340 drivers/media/v4l2-core/v4l2-ioctl.c:1062
+ __video_do_ioctl+0x5bb/0xb40 drivers/media/v4l2-core/v4l2-ioctl.c:2874
+ video_usercopy+0x44e/0xf00 drivers/media/v4l2-core/v4l2-ioctl.c:3056
+ v4l2_ioctl+0x14e/0x1a0 drivers/media/v4l2-core/v4l2-dev.c:364
+ vfs_ioctl fs/ioctl.c:46 [inline]
+ file_ioctl fs/ioctl.c:509 [inline]
+ do_vfs_ioctl+0xced/0x12f0 fs/ioctl.c:696
+ ksys_ioctl+0xa0/0xc0 fs/ioctl.c:713
+ __do_sys_ioctl fs/ioctl.c:720 [inline]
+ __se_sys_ioctl fs/ioctl.c:718 [inline]
+ __x64_sys_ioctl+0x74/0xb0 fs/ioctl.c:718
+ do_syscall_64+0xcf/0x4f0 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x7f3b56d8b347
+Code: 90 90 90 48 8b 05 f1 fa 2a 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff
+ff c3 90 90 90 90 90 90 90 90 90 90 b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff
+ff 73 01 c3 48 8b 0d c1 fa 2a 00 31 d2 48 29 c2 64
+RSP: 002b:00007ffe005d5d68 EFLAGS: 00000202 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 00007f3b56d8b347
+RDX: 00007ffe005d5d70 RSI: 0000000080685600 RDI: 0000000000000003
+RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000202 R12: 0000000000400884
+R13: 00007ffe005d5ec0 R14: 0000000000000000 R15: 0000000000000000
+==================================================================
+
+For this device udev->product is not initialized and accessing it causes a NULL pointer deref.
+
+The fix is to check for NULL before strscpy() and copy empty string, if
+product is NULL
+
+Reported-by: syzbot+66010012fd4c531a1a96@syzkaller.appspotmail.com
+Signed-off-by: Vandana BN <bnvandana@gmail.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+[bwh: Backported to 4.9: This function uses strlcpy() instead of strscpy()]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/usb/zr364xx/zr364xx.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/media/usb/zr364xx/zr364xx.c
++++ b/drivers/media/usb/zr364xx/zr364xx.c
+@@ -711,7 +711,8 @@ static int zr364xx_vidioc_querycap(struc
+ struct zr364xx_camera *cam = video_drvdata(file);
+
+ strlcpy(cap->driver, DRIVER_DESC, sizeof(cap->driver));
+- strlcpy(cap->card, cam->udev->product, sizeof(cap->card));
++ if (cam->udev->product)
++ strlcpy(cap->card, cam->udev->product, sizeof(cap->card));
+ strlcpy(cap->bus_info, dev_name(&cam->udev->dev),
+ sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
--- /dev/null
+From foo@baz Wed 15 Jan 2020 03:59:37 PM CET
+From: Sanjay Konduri <sanjay.konduri@redpinesignals.com>
+Date: Tue, 15 May 2018 14:34:30 +0530
+Subject: rsi: add fix for crash during assertions
+
+From: Sanjay Konduri <sanjay.konduri@redpinesignals.com>
+
+commit abd39c6ded9db53aa44c2540092bdd5fb6590fa8 upstream.
+
+Observed crash in some scenarios when assertion has occurred,
+this is because hw structure is freed and is tried to get
+accessed in some functions where null check is already
+present. So, avoided the crash by making the hw to NULL after
+freeing.
+
+Signed-off-by: Sanjay Konduri <sanjay.konduri@redpinesignals.com>
+Signed-off-by: Sushant Kumar Mishra <sushant.mishra@redpinesignals.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/rsi/rsi_91x_mac80211.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
++++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+@@ -199,6 +199,7 @@ void rsi_mac80211_detach(struct rsi_hw *
+ ieee80211_stop_queues(hw);
+ ieee80211_unregister_hw(hw);
+ ieee80211_free_hw(hw);
++ adapter->hw = NULL;
+ }
+
+ rsi_remove_dbgfs(adapter);
hid-hidraw-uhid-always-report-epollout.patch
ethtool-reduce-stack-usage-with-clang.patch
fs-select-avoid-clang-stack-usage-warning.patch
+rsi-add-fix-for-crash-during-assertions.patch
+arm64-mm-bug-on-unsupported-manipulations-of-live-kernel-mappings.patch
+arm64-don-t-open-code-page-table-entry-creation.patch
+arm64-mm-change-page-table-pointer-name-in-p_set_huge.patch
+arm64-enforce-bbm-for-huge-io-vmap-mappings.patch
+arm64-make-sure-permission-updates-happen-for-pmd-pud.patch
+cfg80211-mac80211-make-ieee80211_send_layer2_update-a-public-function.patch
+mac80211-do-not-send-layer-2-update-frame-before-authorization.patch
+media-usb-zr364xx-fix-kasan-null-ptr-deref-read-in-zr364xx_vidioc_querycap.patch
+wimax-i2400-fix-memory-leak.patch
+wimax-i2400-fix-memory-leak-in-i2400m_op_rfkill_sw_toggle.patch
+ext4-fix-use-after-free-race-with-debug_want_extra_isize.patch
+ext4-add-more-paranoia-checking-in-ext4_expand_extra_isize-handling.patch
+dccp-fix-memleak-in-__feat_register_sp.patch
--- /dev/null
+From foo@baz Wed 15 Jan 2020 03:59:37 PM CET
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+Date: Fri, 25 Oct 2019 23:53:30 -0500
+Subject: wimax: i2400: Fix memory leak in i2400m_op_rfkill_sw_toggle
+
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+
+commit 6f3ef5c25cc762687a7341c18cbea5af54461407 upstream.
+
+In the implementation of i2400m_op_rfkill_sw_toggle() the allocated
+buffer for cmd should be released before returning. The
+documentation for i2400m_msg_to_dev() says when it returns the buffer
+can be reused. Meaning cmd should be released in either case. Move
+kfree(cmd) before return to be reached by all execution paths.
+
+Fixes: 2507e6ab7a9a ("wimax: i2400: fix memory leak")
+Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wimax/i2400m/op-rfkill.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wimax/i2400m/op-rfkill.c
++++ b/drivers/net/wimax/i2400m/op-rfkill.c
+@@ -142,12 +142,12 @@ int i2400m_op_rfkill_sw_toggle(struct wi
+ "%d\n", result);
+ result = 0;
+ error_cmd:
+- kfree(cmd);
+ kfree_skb(ack_skb);
+ error_msg_to_dev:
+ error_alloc:
+ d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n",
+ wimax_dev, state, result);
++ kfree(cmd);
+ return result;
+ }
+
--- /dev/null
+From foo@baz Wed 15 Jan 2020 03:59:37 PM CET
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+Date: Tue, 10 Sep 2019 18:01:40 -0500
+Subject: wimax: i2400: fix memory leak
+
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+
+commit 2507e6ab7a9a440773be476141a255934468c5ef upstream.
+
+In i2400m_op_rfkill_sw_toggle cmd buffer should be released along with
+skb response.
+
+Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wimax/i2400m/op-rfkill.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/wimax/i2400m/op-rfkill.c
++++ b/drivers/net/wimax/i2400m/op-rfkill.c
+@@ -142,6 +142,7 @@ int i2400m_op_rfkill_sw_toggle(struct wi
+ "%d\n", result);
+ result = 0;
+ error_cmd:
++ kfree(cmd);
+ kfree_skb(ack_skb);
+ error_msg_to_dev:
+ error_alloc: