--- /dev/null
+From ad16d77be2a0198010976085a26d065ab148f5e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Jul 2023 10:49:20 +0100
+Subject: btrfs: check if the transaction was aborted at
+ btrfs_wait_for_commit()
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit bf7ecbe9875061bf3fce1883e3b26b77f847d1e8 ]
+
+At btrfs_wait_for_commit() we wait for a transaction to finish and then
+always return 0 (success) without checking if it was aborted, in which
+case the transaction didn't happen due to some critical error. Fix this
+by checking if the transaction was aborted.
+
+Fixes: 462045928bda ("Btrfs: add START_SYNC, WAIT_SYNC ioctls")
+CC: stable@vger.kernel.org # 4.19+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/transaction.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index ff372f1226a3b..abd67f984fbcf 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -891,6 +891,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
+ }
+
+ wait_for_commit(cur_trans);
++ ret = cur_trans->aborted;
+ btrfs_put_transaction(cur_trans);
+ out:
+ return ret;
+--
+2.40.1
+
--- /dev/null
+From c4c158bbd540ef7875678ef84fe26bdf08b25d90 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Jun 2023 09:26:20 +0200
+Subject: irq-bcm6345-l1: Do not assume a fixed block to cpu mapping
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 55ad24857341c36616ecc1d9580af5626c226cf1 ]
+
+The irq to block mapping is fixed, and interrupts from the first block
+will always be routed to the first parent IRQ. But the parent interrupts
+themselves can be routed to any available CPU.
+
+This is used by the bootloader to map the first parent interrupt to the
+boot CPU, regardless wether the boot CPU is the first one or the second
+one.
+
+When booting from the second CPU, the assumption that the first block's
+IRQ is mapped to the first CPU breaks, and the system hangs because
+interrupts do not get routed correctly.
+
+Fix this by passing the appropriate bcm6434_l1_cpu to the interrupt
+handler instead of the chip itself, so the handler always has the right
+block.
+
+Fixes: c7c42ec2baa1 ("irqchips/bmips: Add bcm6345-l1 interrupt controller")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20230629072620.62527-1-jonas.gorski@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-bcm6345-l1.c | 14 +++++---------
+ 1 file changed, 5 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
+index 1bd0621c4ce2a..4827a11832478 100644
+--- a/drivers/irqchip/irq-bcm6345-l1.c
++++ b/drivers/irqchip/irq-bcm6345-l1.c
+@@ -82,6 +82,7 @@ struct bcm6345_l1_chip {
+ };
+
+ struct bcm6345_l1_cpu {
++ struct bcm6345_l1_chip *intc;
+ void __iomem *map_base;
+ unsigned int parent_irq;
+ u32 enable_cache[];
+@@ -115,17 +116,11 @@ static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
+
+ static void bcm6345_l1_irq_handle(struct irq_desc *desc)
+ {
+- struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc);
+- struct bcm6345_l1_cpu *cpu;
++ struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc);
++ struct bcm6345_l1_chip *intc = cpu->intc;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int idx;
+
+-#ifdef CONFIG_SMP
+- cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
+-#else
+- cpu = intc->cpus[0];
+-#endif
+-
+ chained_irq_enter(chip, desc);
+
+ for (idx = 0; idx < intc->n_words; idx++) {
+@@ -257,6 +252,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn,
+ if (!cpu)
+ return -ENOMEM;
+
++ cpu->intc = intc;
+ cpu->map_base = ioremap(res.start, sz);
+ if (!cpu->map_base)
+ return -ENOMEM;
+@@ -272,7 +268,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn,
+ return -EINVAL;
+ }
+ irq_set_chained_handler_and_data(cpu->parent_irq,
+- bcm6345_l1_irq_handle, intc);
++ bcm6345_l1_irq_handle, cpu);
+
+ return 0;
+ }
+--
+2.40.1
+
--- /dev/null
+From 274a7fb27086cd007a8c995fa48db87087a55072 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 17 Jun 2023 08:32:42 +0100
+Subject: irqchip/gic-v4.1: Properly lock VPEs when doing a directLPI
+ invalidation
+
+From: Marc Zyngier <maz@kernel.org>
+
+[ Upstream commit 926846a703cbf5d0635cc06e67d34b228746554b ]
+
+We normally rely on the irq_to_cpuid_[un]lock() primitives to make
+sure nothing will change col->idx while performing a LPI invalidation.
+
+However, these primitives do not cover VPE doorbells, and we have
+some open-coded locking for that. Unfortunately, this locking is
+pretty bogus.
+
+Instead, extend the above primitives to cover VPE doorbells and
+convert the whole thing to it.
+
+Fixes: f3a059219bc7 ("irqchip/gic-v4.1: Ensure mutual exclusion between vPE affinity change and RD access")
+Reported-by: Kunkun Jiang <jiangkunkun@huawei.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: Zenghui Yu <yuzenghui@huawei.com>
+Cc: wanghaibin.wang@huawei.com
+Tested-by: Kunkun Jiang <jiangkunkun@huawei.com>
+Reviewed-by: Zenghui Yu <yuzenghui@huawei.com>
+Link: https://lore.kernel.org/r/20230617073242.3199746-1-maz@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-gic-v3-its.c | 75 ++++++++++++++++++++------------
+ 1 file changed, 46 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 5ec091c64d47f..f1fa98e5ea13f 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -267,13 +267,23 @@ static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
+ raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
+ }
+
++static struct irq_chip its_vpe_irq_chip;
++
+ static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
+ {
+- struct its_vlpi_map *map = get_vlpi_map(d);
++ struct its_vpe *vpe = NULL;
+ int cpu;
+
+- if (map) {
+- cpu = vpe_to_cpuid_lock(map->vpe, flags);
++ if (d->chip == &its_vpe_irq_chip) {
++ vpe = irq_data_get_irq_chip_data(d);
++ } else {
++ struct its_vlpi_map *map = get_vlpi_map(d);
++ if (map)
++ vpe = map->vpe;
++ }
++
++ if (vpe) {
++ cpu = vpe_to_cpuid_lock(vpe, flags);
+ } else {
+ /* Physical LPIs are already locked via the irq_desc lock */
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+@@ -287,10 +297,18 @@ static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
+
+ static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
+ {
+- struct its_vlpi_map *map = get_vlpi_map(d);
++ struct its_vpe *vpe = NULL;
++
++ if (d->chip == &its_vpe_irq_chip) {
++ vpe = irq_data_get_irq_chip_data(d);
++ } else {
++ struct its_vlpi_map *map = get_vlpi_map(d);
++ if (map)
++ vpe = map->vpe;
++ }
+
+- if (map)
+- vpe_to_cpuid_unlock(map->vpe, flags);
++ if (vpe)
++ vpe_to_cpuid_unlock(vpe, flags);
+ }
+
+ static struct its_collection *valid_col(struct its_collection *col)
+@@ -1422,14 +1440,29 @@ static void wait_for_syncr(void __iomem *rdbase)
+ cpu_relax();
+ }
+
+-static void direct_lpi_inv(struct irq_data *d)
++static void __direct_lpi_inv(struct irq_data *d, u64 val)
+ {
+- struct its_vlpi_map *map = get_vlpi_map(d);
+ void __iomem *rdbase;
+ unsigned long flags;
+- u64 val;
+ int cpu;
+
++ /* Target the redistributor this LPI is currently routed to */
++ cpu = irq_to_cpuid_lock(d, &flags);
++ raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
++
++ rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
++ gic_write_lpir(val, rdbase + GICR_INVLPIR);
++ wait_for_syncr(rdbase);
++
++ raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
++ irq_to_cpuid_unlock(d, flags);
++}
++
++static void direct_lpi_inv(struct irq_data *d)
++{
++ struct its_vlpi_map *map = get_vlpi_map(d);
++ u64 val;
++
+ if (map) {
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+@@ -1442,15 +1475,7 @@ static void direct_lpi_inv(struct irq_data *d)
+ val = d->hwirq;
+ }
+
+- /* Target the redistributor this LPI is currently routed to */
+- cpu = irq_to_cpuid_lock(d, &flags);
+- raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
+- rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
+- gic_write_lpir(val, rdbase + GICR_INVLPIR);
+-
+- wait_for_syncr(rdbase);
+- raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
+- irq_to_cpuid_unlock(d, flags);
++ __direct_lpi_inv(d, val);
+ }
+
+ static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
+@@ -3916,18 +3941,10 @@ static void its_vpe_send_inv(struct irq_data *d)
+ {
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+- if (gic_rdists->has_direct_lpi) {
+- void __iomem *rdbase;
+-
+- /* Target the redistributor this VPE is currently known on */
+- raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
+- rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+- gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
+- wait_for_syncr(rdbase);
+- raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
+- } else {
++ if (gic_rdists->has_direct_lpi)
++ __direct_lpi_inv(d, d->parent_data->hwirq);
++ else
+ its_vpe_send_cmd(vpe, its_send_inv);
+- }
+ }
+
+ static void its_vpe_mask_irq(struct irq_data *d)
+--
+2.40.1
+
--- /dev/null
+From 9e34e4ee64d135591ddaa46f3ca2390cd0666c7f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jul 2021 09:33:02 -0700
+Subject: KVM: nVMX: Do not clear CR3 load/store exiting bits if L1 wants 'em
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 470750b3425513b9f63f176e564e63e0e7998afc ]
+
+Keep CR3 load/store exiting enable as needed when running L2 in order to
+honor L1's desires. This fixes a largely theoretical bug where L1 could
+intercept CR3 but not CR0.PG and end up not getting the desired CR3 exits
+when L2 enables paging. In other words, the existing !is_paging() check
+inadvertantly handles the normal case for L2 where vmx_set_cr0() is
+called during VM-Enter, which is guaranteed to run with paging enabled,
+and thus will never clear the bits.
+
+Removing the !is_paging() check will also allow future consolidation and
+cleanup of the related code. From a performance perspective, this is
+all a nop, as the VMCS controls shadow will optimize away the VMWRITE
+when the controls are in the desired state.
+
+Add a comment explaining why CR3 is intercepted, with a big disclaimer
+about not querying the old CR3. Because vmx_set_cr0() is used for flows
+that are not directly tied to MOV CR3, e.g. vCPU RESET/INIT and nested
+VM-Enter, it's possible that is_paging() is not synchronized with CR3
+load/store exiting. This is actually guaranteed in the current code, as
+KVM starts with CR3 interception disabled. Obviously that can be fixed,
+but there's no good reason to play whack-a-mole, and it tends to end
+poorly, e.g. descriptor table exiting for UMIP emulation attempted to be
+precise in the past and ended up botching the interception toggling.
+
+Fixes: fe3ef05c7572 ("KVM: nVMX: Prepare vmcs02 from vmcs01 and vmcs12")
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210713163324.627647-25-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: c4abd7352023 ("KVM: VMX: Don't fudge CR0 and CR4 for restricted L2 guest")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/vmx/vmx.c | 46 +++++++++++++++++++++++++++++++++---------
+ 1 file changed, 37 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index b9abe08c9d590..ca51df0df3f94 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -3063,10 +3063,14 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu)
+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
+ }
+
++#define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
++ CPU_BASED_CR3_STORE_EXITING)
++
+ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long hw_cr0;
++ u32 tmp;
+
+ hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
+ if (is_unrestricted_guest(vcpu))
+@@ -3093,18 +3097,42 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ #endif
+
+ if (enable_ept && !is_unrestricted_guest(vcpu)) {
++ /*
++ * Ensure KVM has an up-to-date snapshot of the guest's CR3. If
++ * the below code _enables_ CR3 exiting, vmx_cache_reg() will
++ * (correctly) stop reading vmcs.GUEST_CR3 because it thinks
++ * KVM's CR3 is installed.
++ */
+ if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
+ vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
++
++ /*
++ * When running with EPT but not unrestricted guest, KVM must
++ * intercept CR3 accesses when paging is _disabled_. This is
++ * necessary because restricted guests can't actually run with
++ * paging disabled, and so KVM stuffs its own CR3 in order to
++ * run the guest when identity mapped page tables.
++ *
++ * Do _NOT_ check the old CR0.PG, e.g. to optimize away the
++ * update, it may be stale with respect to CR3 interception,
++ * e.g. after nested VM-Enter.
++ *
++ * Lastly, honor L1's desires, i.e. intercept CR3 loads and/or
++ * stores to forward them to L1, even if KVM does not need to
++ * intercept them to preserve its identity mapped page tables.
++ */
+ if (!(cr0 & X86_CR0_PG)) {
+- /* From paging/starting to nonpaging */
+- exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
+- CPU_BASED_CR3_STORE_EXITING);
+- vcpu->arch.cr0 = cr0;
+- vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
+- } else if (!is_paging(vcpu)) {
+- /* From nonpaging to paging */
+- exec_controls_clearbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
+- CPU_BASED_CR3_STORE_EXITING);
++ exec_controls_setbit(vmx, CR3_EXITING_BITS);
++ } else if (!is_guest_mode(vcpu)) {
++ exec_controls_clearbit(vmx, CR3_EXITING_BITS);
++ } else {
++ tmp = exec_controls_get(vmx);
++ tmp &= ~CR3_EXITING_BITS;
++ tmp |= get_vmcs12(vcpu)->cpu_based_vm_exec_control & CR3_EXITING_BITS;
++ exec_controls_set(vmx, tmp);
++ }
++
++ if (!is_paging(vcpu) != !(cr0 & X86_CR0_PG)) {
+ vcpu->arch.cr0 = cr0;
+ vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
+ }
+--
+2.40.1
+
--- /dev/null
+From 7f78eab3ff8018f2ebcc5bf4d1e5492cd001ee10 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jun 2023 13:30:36 -0700
+Subject: KVM: VMX: Don't fudge CR0 and CR4 for restricted L2 guest
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit c4abd7352023aa96114915a0bb2b88016a425cda ]
+
+Stuff CR0 and/or CR4 to be compliant with a restricted guest if and only
+if KVM itself is not configured to utilize unrestricted guests, i.e. don't
+stuff CR0/CR4 for a restricted L2 that is running as the guest of an
+unrestricted L1. Any attempt to VM-Enter a restricted guest with invalid
+CR0/CR4 values should fail, i.e. in a nested scenario, KVM (as L0) should
+never observe a restricted L2 with incompatible CR0/CR4, since nested
+VM-Enter from L1 should have failed.
+
+And if KVM does observe an active, restricted L2 with incompatible state,
+e.g. due to a KVM bug, fudging CR0/CR4 instead of letting VM-Enter fail
+does more harm than good, as KVM will often neglect to undo the side
+effects, e.g. won't clear rmode.vm86_active on nested VM-Exit, and thus
+the damage can easily spill over to L1. On the other hand, letting
+VM-Enter fail due to bad guest state is more likely to contain the damage
+to L2 as KVM relies on hardware to perform most guest state consistency
+checks, i.e. KVM needs to be able to reflect a failed nested VM-Enter into
+L1 irrespective of (un)restricted guest behavior.
+
+Cc: Jim Mattson <jmattson@google.com>
+Cc: stable@vger.kernel.org
+Fixes: bddd82d19e2e ("KVM: nVMX: KVM needs to unset "unrestricted guest" VM-execution control in vmcs02 if vmcs12 doesn't set it")
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20230613203037.1968489-3-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/vmx/vmx.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index ca51df0df3f94..2445c61038954 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1519,6 +1519,11 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long old_rflags;
+
++ /*
++ * Unlike CR0 and CR4, RFLAGS handling requires checking if the vCPU
++ * is an unrestricted guest in order to mark L2 as needing emulation
++ * if L1 runs L2 as a restricted guest.
++ */
+ if (is_unrestricted_guest(vcpu)) {
+ kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
+ vmx->rflags = rflags;
+@@ -3073,7 +3078,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ u32 tmp;
+
+ hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
+- if (is_unrestricted_guest(vcpu))
++ if (enable_unrestricted_guest)
+ hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
+ else {
+ hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
+@@ -3096,7 +3101,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ }
+ #endif
+
+- if (enable_ept && !is_unrestricted_guest(vcpu)) {
++ if (enable_ept && !enable_unrestricted_guest) {
+ /*
+ * Ensure KVM has an up-to-date snapshot of the guest's CR3. If
+ * the below code _enables_ CR3 exiting, vmx_cache_reg() will
+@@ -3231,7 +3236,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ unsigned long hw_cr4;
+
+ hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
+- if (is_unrestricted_guest(vcpu))
++ if (enable_unrestricted_guest)
+ hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
+ else if (vmx->rmode.vm86_active)
+ hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
+@@ -3251,7 +3256,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ vcpu->arch.cr4 = cr4;
+ kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
+
+- if (!is_unrestricted_guest(vcpu)) {
++ if (!enable_unrestricted_guest) {
+ if (enable_ept) {
+ if (!is_paging(vcpu)) {
+ hw_cr4 &= ~X86_CR4_PAE;
+--
+2.40.1
+
--- /dev/null
+From d1ad78eeee302acf3df2ecccb2c54cfe77feb9a5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jul 2021 09:33:01 -0700
+Subject: KVM: VMX: Fold ept_update_paging_mode_cr0() back into vmx_set_cr0()
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit c834fd7fc1308a0e0429d203a6c3af528cd902fa ]
+
+Move the CR0/CR3/CR4 shenanigans for EPT without unrestricted guest back
+into vmx_set_cr0(). This will allow a future patch to eliminate the
+rather gross stuffing of vcpu->arch.cr0 in the paging transition cases
+by snapshotting the old CR0.
+
+No functional change intended.
+
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210713163324.627647-24-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: c4abd7352023 ("KVM: VMX: Don't fudge CR0 and CR4 for restricted L2 guest")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/vmx/vmx.c | 40 +++++++++++++++++-----------------------
+ 1 file changed, 17 insertions(+), 23 deletions(-)
+
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 574acfa98fa9b..b9abe08c9d590 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -3063,27 +3063,6 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu)
+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
+ }
+
+-static void ept_update_paging_mode_cr0(unsigned long cr0, struct kvm_vcpu *vcpu)
+-{
+- struct vcpu_vmx *vmx = to_vmx(vcpu);
+-
+- if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
+- vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
+- if (!(cr0 & X86_CR0_PG)) {
+- /* From paging/starting to nonpaging */
+- exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
+- CPU_BASED_CR3_STORE_EXITING);
+- vcpu->arch.cr0 = cr0;
+- vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
+- } else if (!is_paging(vcpu)) {
+- /* From nonpaging to paging */
+- exec_controls_clearbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
+- CPU_BASED_CR3_STORE_EXITING);
+- vcpu->arch.cr0 = cr0;
+- vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
+- }
+-}
+-
+ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+@@ -3113,8 +3092,23 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ }
+ #endif
+
+- if (enable_ept && !is_unrestricted_guest(vcpu))
+- ept_update_paging_mode_cr0(cr0, vcpu);
++ if (enable_ept && !is_unrestricted_guest(vcpu)) {
++ if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
++ vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
++ if (!(cr0 & X86_CR0_PG)) {
++ /* From paging/starting to nonpaging */
++ exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
++ CPU_BASED_CR3_STORE_EXITING);
++ vcpu->arch.cr0 = cr0;
++ vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
++ } else if (!is_paging(vcpu)) {
++ /* From nonpaging to paging */
++ exec_controls_clearbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
++ CPU_BASED_CR3_STORE_EXITING);
++ vcpu->arch.cr0 = cr0;
++ vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
++ }
++ }
+
+ vmcs_writel(CR0_READ_SHADOW, cr0);
+ vmcs_writel(GUEST_CR0, hw_cr0);
+--
+2.40.1
+
--- /dev/null
+From 2faba2d4314b6a745f1c103aef2342cce630c813 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jul 2021 09:32:59 -0700
+Subject: KVM: VMX: Invert handling of CR0.WP for EPT without unrestricted
+ guest
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit ee5a5584cba316bc90bc2fad0c6d10b71f1791cb ]
+
+Opt-in to forcing CR0.WP=1 for shadow paging, and stop lying about WP
+being "always on" for unrestricted guest. In addition to making KVM a
+wee bit more honest, this paves the way for additional cleanup.
+
+No functional change intended.
+
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210713163324.627647-22-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: c4abd7352023 ("KVM: VMX: Don't fudge CR0 and CR4 for restricted L2 guest")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/vmx/vmx.c | 14 +++++---------
+ 1 file changed, 5 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 9aedc7b06da7a..574acfa98fa9b 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -135,8 +135,7 @@ module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
+ #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
+ #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
+ #define KVM_VM_CR0_ALWAYS_ON \
+- (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \
+- X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
++ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
+
+ #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
+ #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
+@@ -3064,9 +3063,7 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu)
+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
+ }
+
+-static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
+- unsigned long cr0,
+- struct kvm_vcpu *vcpu)
++static void ept_update_paging_mode_cr0(unsigned long cr0, struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+@@ -3085,9 +3082,6 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
+ vcpu->arch.cr0 = cr0;
+ vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
+ }
+-
+- if (!(cr0 & X86_CR0_WP))
+- *hw_cr0 &= ~X86_CR0_WP;
+ }
+
+ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+@@ -3100,6 +3094,8 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
+ else {
+ hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
++ if (!enable_ept)
++ hw_cr0 |= X86_CR0_WP;
+
+ if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
+ enter_pmode(vcpu);
+@@ -3118,7 +3114,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ #endif
+
+ if (enable_ept && !is_unrestricted_guest(vcpu))
+- ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
++ ept_update_paging_mode_cr0(cr0, vcpu);
+
+ vmcs_writel(CR0_READ_SHADOW, cr0);
+ vmcs_writel(GUEST_CR0, hw_cr0);
+--
+2.40.1
+
file-always-lock-position-for-fmode_atomic_pos.patch
nfsd-remove-incorrect-check-in-nfsd4_validate_stateid.patch
tpm_tis-explicitly-check-for-error-code.patch
+irq-bcm6345-l1-do-not-assume-a-fixed-block-to-cpu-ma.patch
+irqchip-gic-v4.1-properly-lock-vpes-when-doing-a-dir.patch
+kvm-vmx-invert-handling-of-cr0.wp-for-ept-without-un.patch
+kvm-vmx-fold-ept_update_paging_mode_cr0-back-into-vm.patch
+kvm-nvmx-do-not-clear-cr3-load-store-exiting-bits-if.patch
+kvm-vmx-don-t-fudge-cr0-and-cr4-for-restricted-l2-gu.patch
+staging-rtl8712-use-constants-from-linux-ieee80211.h.patch
+staging-r8712-fix-memory-leak-in-_r8712_init_xmit_pr.patch
+btrfs-check-if-the-transaction-was-aborted-at-btrfs_.patch
--- /dev/null
+From d7a69d0ab5607c9d939e7eae9820cc18e01daefd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Jul 2023 12:54:17 -0500
+Subject: staging: r8712: Fix memory leak in _r8712_init_xmit_priv()
+
+From: Larry Finger <Larry.Finger@lwfinger.net>
+
+[ Upstream commit ac83631230f77dda94154ed0ebfd368fc81c70a3 ]
+
+In the above mentioned routine, memory is allocated in several places.
+If the first succeeds and a later one fails, the routine will leak memory.
+This patch fixes commit 2865d42c78a9 ("staging: r8712u: Add the new driver
+to the mainline kernel"). A potential memory leak in
+r8712_xmit_resource_alloc() is also addressed.
+
+Fixes: 2865d42c78a9 ("staging: r8712u: Add the new driver to the mainline kernel")
+Reported-by: syzbot+cf71097ffb6755df8251@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/x/log.txt?x=11ac3fa0a80000
+Cc: stable@vger.kernel.org
+Cc: Nam Cao <namcaov@gmail.com>
+Signed-off-by: Larry Finger <Larry.Finger@lwfinger.net>
+Reviewed-by: Nam Cao <namcaov@gmail.com>
+Link: https://lore.kernel.org/r/20230714175417.18578-1-Larry.Finger@lwfinger.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/rtl8712/rtl871x_xmit.c | 43 ++++++++++++++++++++------
+ drivers/staging/rtl8712/xmit_linux.c | 6 ++++
+ 2 files changed, 40 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
+index 15491859aedae..eb6493047aaf6 100644
+--- a/drivers/staging/rtl8712/rtl871x_xmit.c
++++ b/drivers/staging/rtl8712/rtl871x_xmit.c
+@@ -22,6 +22,7 @@
+ #include "osdep_intf.h"
+ #include "usb_ops.h"
+
++#include <linux/usb.h>
+ #include <linux/ieee80211.h>
+
+ static const u8 P802_1H_OUI[P80211_OUI_LEN] = {0x00, 0x00, 0xf8};
+@@ -56,6 +57,7 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
+ sint i;
+ struct xmit_buf *pxmitbuf;
+ struct xmit_frame *pxframe;
++ int j;
+
+ memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv));
+ spin_lock_init(&pxmitpriv->lock);
+@@ -118,11 +120,8 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
+ _init_queue(&pxmitpriv->pending_xmitbuf_queue);
+ pxmitpriv->pallocated_xmitbuf =
+ kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4, GFP_ATOMIC);
+- if (!pxmitpriv->pallocated_xmitbuf) {
+- kfree(pxmitpriv->pallocated_frame_buf);
+- pxmitpriv->pallocated_frame_buf = NULL;
+- return -ENOMEM;
+- }
++ if (!pxmitpriv->pallocated_xmitbuf)
++ goto clean_up_frame_buf;
+ pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 -
+ ((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3);
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
+@@ -130,13 +129,17 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
+ INIT_LIST_HEAD(&pxmitbuf->list);
+ pxmitbuf->pallocated_buf =
+ kmalloc(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ, GFP_ATOMIC);
+- if (!pxmitbuf->pallocated_buf)
+- return -ENOMEM;
++ if (!pxmitbuf->pallocated_buf) {
++ j = 0;
++ goto clean_up_alloc_buf;
++ }
+ pxmitbuf->pbuf = pxmitbuf->pallocated_buf + XMITBUF_ALIGN_SZ -
+ ((addr_t) (pxmitbuf->pallocated_buf) &
+ (XMITBUF_ALIGN_SZ - 1));
+- if (r8712_xmit_resource_alloc(padapter, pxmitbuf))
+- return -ENOMEM;
++ if (r8712_xmit_resource_alloc(padapter, pxmitbuf)) {
++ j = 1;
++ goto clean_up_alloc_buf;
++ }
+ list_add_tail(&pxmitbuf->list,
+ &(pxmitpriv->free_xmitbuf_queue.queue));
+ pxmitbuf++;
+@@ -147,6 +150,28 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
+ init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
+ tasklet_setup(&pxmitpriv->xmit_tasklet, r8712_xmit_bh);
+ return 0;
++
++clean_up_alloc_buf:
++ if (j) {
++ /* failure happened in r8712_xmit_resource_alloc()
++ * delete extra pxmitbuf->pallocated_buf
++ */
++ kfree(pxmitbuf->pallocated_buf);
++ }
++ for (j = 0; j < i; j++) {
++ int k;
++
++ pxmitbuf--; /* reset pointer */
++ kfree(pxmitbuf->pallocated_buf);
++ for (k = 0; k < 8; k++) /* delete xmit urb's */
++ usb_free_urb(pxmitbuf->pxmit_urb[k]);
++ }
++ kfree(pxmitpriv->pallocated_xmitbuf);
++ pxmitpriv->pallocated_xmitbuf = NULL;
++clean_up_frame_buf:
++ kfree(pxmitpriv->pallocated_frame_buf);
++ pxmitpriv->pallocated_frame_buf = NULL;
++ return -ENOMEM;
+ }
+
+ void _free_xmit_priv(struct xmit_priv *pxmitpriv)
+diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
+index 1f67d86c606f6..9050e51aa4079 100644
+--- a/drivers/staging/rtl8712/xmit_linux.c
++++ b/drivers/staging/rtl8712/xmit_linux.c
+@@ -119,6 +119,12 @@ int r8712_xmit_resource_alloc(struct _adapter *padapter,
+ for (i = 0; i < 8; i++) {
+ pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!pxmitbuf->pxmit_urb[i]) {
++ int k;
++
++ for (k = i - 1; k >= 0; k--) {
++ /* handle allocation errors part way through loop */
++ usb_free_urb(pxmitbuf->pxmit_urb[k]);
++ }
+ netdev_err(padapter->pnetdev, "pxmitbuf->pxmit_urb[i] == NULL\n");
+ return -ENOMEM;
+ }
+--
+2.40.1
+
--- /dev/null
+From 7008fb2472165590ea8a696c62343a0399a1fc69 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Apr 2021 22:41:32 +0200
+Subject: staging: rtl8712: Use constants from <linux/ieee80211.h>
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit f179515da9780c4cd37bee76c3cbb6f7364451d6 ]
+
+Some constants defined in wifi.h are already defined in <linux/ieee80211.h>
+with some other (but similar) names.
+Be consistent and use the ones from <linux/ieee80211.h>.
+
+The conversions made are:
+_SSID_IE_ --> WLAN_EID_SSID
+_SUPPORTEDRATES_IE_ --> WLAN_EID_SUPP_RATES
+_DSSET_IE_ --> WLAN_EID_DS_PARAMS
+_IBSS_PARA_IE_ --> WLAN_EID_IBSS_PARAMS
+_ERPINFO_IE_ --> WLAN_EID_ERP_INFO
+_EXT_SUPPORTEDRATES_IE_ --> WLAN_EID_EXT_SUPP_RATES
+
+_HT_CAPABILITY_IE_ --> WLAN_EID_HT_CAPABILITY
+_HT_EXTRA_INFO_IE_ --> WLAN_EID_HT_OPERATION (not used)
+_HT_ADD_INFO_IE_ --> WLAN_EID_HT_OPERATION
+
+_VENDOR_SPECIFIC_IE_ --> WLAN_EID_VENDOR_SPECIFIC
+
+_RESERVED47_ --> (not used)
+
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Link: https://lore.kernel.org/r/fe35fb45323adc3a30f31b7280cec7700fd325d8.1617741313.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: ac83631230f7 ("staging: r8712: Fix memory leak in _r8712_init_xmit_priv()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/rtl8712/ieee80211.c | 12 ++++++------
+ drivers/staging/rtl8712/rtl871x_ioctl_linux.c | 8 ++++----
+ drivers/staging/rtl8712/rtl871x_mlme.c | 10 +++++-----
+ drivers/staging/rtl8712/rtl871x_xmit.c | 3 ++-
+ drivers/staging/rtl8712/wifi.h | 15 ---------------
+ 5 files changed, 17 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
+index b4a099169c7c8..8075ed2ba61ea 100644
+--- a/drivers/staging/rtl8712/ieee80211.c
++++ b/drivers/staging/rtl8712/ieee80211.c
+@@ -181,25 +181,25 @@ int r8712_generate_ie(struct registry_priv *registrypriv)
+ sz += 2;
+ ie += 2;
+ /*SSID*/
+- ie = r8712_set_ie(ie, _SSID_IE_, dev_network->Ssid.SsidLength,
++ ie = r8712_set_ie(ie, WLAN_EID_SSID, dev_network->Ssid.SsidLength,
+ dev_network->Ssid.Ssid, &sz);
+ /*supported rates*/
+ set_supported_rate(dev_network->rates, registrypriv->wireless_mode);
+ rate_len = r8712_get_rateset_len(dev_network->rates);
+ if (rate_len > 8) {
+- ie = r8712_set_ie(ie, _SUPPORTEDRATES_IE_, 8,
++ ie = r8712_set_ie(ie, WLAN_EID_SUPP_RATES, 8,
+ dev_network->rates, &sz);
+- ie = r8712_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8),
++ ie = r8712_set_ie(ie, WLAN_EID_EXT_SUPP_RATES, (rate_len - 8),
+ (dev_network->rates + 8), &sz);
+ } else {
+- ie = r8712_set_ie(ie, _SUPPORTEDRATES_IE_,
++ ie = r8712_set_ie(ie, WLAN_EID_SUPP_RATES,
+ rate_len, dev_network->rates, &sz);
+ }
+ /*DS parameter set*/
+- ie = r8712_set_ie(ie, _DSSET_IE_, 1,
++ ie = r8712_set_ie(ie, WLAN_EID_DS_PARAMS, 1,
+ (u8 *)&dev_network->Configuration.DSConfig, &sz);
+ /*IBSS Parameter Set*/
+- ie = r8712_set_ie(ie, _IBSS_PARA_IE_, 2,
++ ie = r8712_set_ie(ie, WLAN_EID_IBSS_PARAMS, 2,
+ (u8 *)&dev_network->Configuration.ATIMWindow, &sz);
+ return sz;
+ }
+diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+index 2a661b04cd255..15c6ac518c167 100644
+--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
++++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+@@ -236,7 +236,7 @@ static char *translate_scan(struct _adapter *padapter,
+ start = iwe_stream_add_point(info, start, stop, &iwe,
+ pnetwork->network.Ssid.Ssid);
+ /* parsing HT_CAP_IE */
+- p = r8712_get_ie(&pnetwork->network.IEs[12], _HT_CAPABILITY_IE_,
++ p = r8712_get_ie(&pnetwork->network.IEs[12], WLAN_EID_HT_CAPABILITY,
+ &ht_ielen, pnetwork->network.IELength - 12);
+ if (p && ht_ielen > 0)
+ ht_cap = true;
+@@ -567,7 +567,7 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
+ while (cnt < ielen) {
+ eid = buf[cnt];
+
+- if ((eid == _VENDOR_SPECIFIC_IE_) &&
++ if ((eid == WLAN_EID_VENDOR_SPECIFIC) &&
+ (!memcmp(&buf[cnt + 2], wps_oui, 4))) {
+ netdev_info(padapter->pnetdev, "r8712u: SET WPS_IE\n");
+ padapter->securitypriv.wps_ie_len =
+@@ -609,7 +609,7 @@ static int r8711_wx_get_name(struct net_device *dev,
+ if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE) ==
+ true) {
+ /* parsing HT_CAP_IE */
+- p = r8712_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_,
++ p = r8712_get_ie(&pcur_bss->IEs[12], WLAN_EID_HT_CAPABILITY,
+ &ht_ielen, pcur_bss->IELength - 12);
+ if (p && ht_ielen > 0)
+ ht_cap = true;
+@@ -1403,7 +1403,7 @@ static int r8711_wx_get_rate(struct net_device *dev,
+ i = 0;
+ if (!check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE))
+ return -ENOLINK;
+- p = r8712_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen,
++ p = r8712_get_ie(&pcur_bss->IEs[12], WLAN_EID_HT_CAPABILITY, &ht_ielen,
+ pcur_bss->IELength - 12);
+ if (p && ht_ielen > 0) {
+ ht_cap = true;
+diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
+index 6074383ec0b50..250cb0c4ed083 100644
+--- a/drivers/staging/rtl8712/rtl871x_mlme.c
++++ b/drivers/staging/rtl8712/rtl871x_mlme.c
+@@ -1649,11 +1649,11 @@ unsigned int r8712_restructure_ht_ie(struct _adapter *padapter, u8 *in_ie,
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ phtpriv->ht_option = 0;
+- p = r8712_get_ie(in_ie + 12, _HT_CAPABILITY_IE_, &ielen, in_len - 12);
++ p = r8712_get_ie(in_ie + 12, WLAN_EID_HT_CAPABILITY, &ielen, in_len - 12);
+ if (p && (ielen > 0)) {
+ if (pqospriv->qos_option == 0) {
+ out_len = *pout_len;
+- r8712_set_ie(out_ie + out_len, _VENDOR_SPECIFIC_IE_,
++ r8712_set_ie(out_ie + out_len, WLAN_EID_VENDOR_SPECIFIC,
+ _WMM_IE_Length_, WMM_IE, pout_len);
+ pqospriv->qos_option = 1;
+ }
+@@ -1667,7 +1667,7 @@ unsigned int r8712_restructure_ht_ie(struct _adapter *padapter, u8 *in_ie,
+ IEEE80211_HT_CAP_DSSSCCK40);
+ ht_capie.ampdu_params_info = (IEEE80211_HT_AMPDU_PARM_FACTOR &
+ 0x03) | (IEEE80211_HT_AMPDU_PARM_DENSITY & 0x00);
+- r8712_set_ie(out_ie + out_len, _HT_CAPABILITY_IE_,
++ r8712_set_ie(out_ie + out_len, WLAN_EID_HT_CAPABILITY,
+ sizeof(struct rtl_ieee80211_ht_cap),
+ (unsigned char *)&ht_capie, pout_len);
+ phtpriv->ht_option = 1;
+@@ -1698,7 +1698,7 @@ static void update_ht_cap(struct _adapter *padapter, u8 *pie, uint ie_len)
+ /*check Max Rx A-MPDU Size*/
+ len = 0;
+ p = r8712_get_ie(pie + sizeof(struct NDIS_802_11_FIXED_IEs),
+- _HT_CAPABILITY_IE_,
++ WLAN_EID_HT_CAPABILITY,
+ &len, ie_len -
+ sizeof(struct NDIS_802_11_FIXED_IEs));
+ if (p && len > 0) {
+@@ -1733,7 +1733,7 @@ static void update_ht_cap(struct _adapter *padapter, u8 *pie, uint ie_len)
+ }
+ len = 0;
+ p = r8712_get_ie(pie + sizeof(struct NDIS_802_11_FIXED_IEs),
+- _HT_ADD_INFO_IE_, &len,
++ WLAN_EID_HT_OPERATION, &len,
+ ie_len - sizeof(struct NDIS_802_11_FIXED_IEs));
+ }
+
+diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
+index fd99782a400a0..15491859aedae 100644
+--- a/drivers/staging/rtl8712/rtl871x_xmit.c
++++ b/drivers/staging/rtl8712/rtl871x_xmit.c
+@@ -22,6 +22,7 @@
+ #include "osdep_intf.h"
+ #include "usb_ops.h"
+
++#include <linux/ieee80211.h>
+
+ static const u8 P802_1H_OUI[P80211_OUI_LEN] = {0x00, 0x00, 0xf8};
+ static const u8 RFC1042_OUI[P80211_OUI_LEN] = {0x00, 0x00, 0x00};
+@@ -709,7 +710,7 @@ void r8712_update_protection(struct _adapter *padapter, u8 *ie, uint ie_len)
+ break;
+ case AUTO_VCS:
+ default:
+- perp = r8712_get_ie(ie, _ERPINFO_IE_, &erp_len, ie_len);
++ perp = r8712_get_ie(ie, WLAN_EID_ERP_INFO, &erp_len, ie_len);
+ if (!perp) {
+ pxmitpriv->vcs = NONE_VCS;
+ } else {
+diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
+index 601d4ff607bc8..9bb310b245899 100644
+--- a/drivers/staging/rtl8712/wifi.h
++++ b/drivers/staging/rtl8712/wifi.h
+@@ -374,21 +374,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
+
+ #define _FIXED_IE_LENGTH_ _BEACON_IE_OFFSET_
+
+-#define _SSID_IE_ 0
+-#define _SUPPORTEDRATES_IE_ 1
+-#define _DSSET_IE_ 3
+-#define _IBSS_PARA_IE_ 6
+-#define _ERPINFO_IE_ 42
+-#define _EXT_SUPPORTEDRATES_IE_ 50
+-
+-#define _HT_CAPABILITY_IE_ 45
+-#define _HT_EXTRA_INFO_IE_ 61
+-#define _HT_ADD_INFO_IE_ 61 /* _HT_EXTRA_INFO_IE_ */
+-
+-#define _VENDOR_SPECIFIC_IE_ 221
+-
+-#define _RESERVED47_ 47
+-
+ /* ---------------------------------------------------------------------------
+ * Below is the fixed elements...
+ * ---------------------------------------------------------------------------
+--
+2.40.1
+