--- /dev/null
+From 995f54ea962e03ec08b8bc6a4fe11a32b420edd3 Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Thu, 8 Jul 2021 19:51:46 +0200
+Subject: drm/cma-helper: Release non-coherent memory with dma_free_noncoherent()
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit 995f54ea962e03ec08b8bc6a4fe11a32b420edd3 upstream.
+
+The GEM CMA helpers allocate non-coherent (i.e., cached) backing storage
+with dma_alloc_noncoherent(), but release it with dma_free_wc(). Fix this
+with a call to dma_free_noncoherent(). Writecombining storage is still
+released with dma_free_wc().
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: cf8ccbc72d61 ("drm: Add support for GEM buffers backed by non-coherent memory")
+Acked-by: Paul Cercueil <paul@crapouillou.net>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: Maxime Ripard <mripard@kernel.org>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v5.14+
+Link: https://patchwork.freedesktop.org/patch/msgid/20210708175146.10618-1-tzimmermann@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_gem_cma_helper.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/drm_gem_cma_helper.c
++++ b/drivers/gpu/drm/drm_gem_cma_helper.c
+@@ -210,8 +210,13 @@ void drm_gem_cma_free_object(struct drm_
+ dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
+ drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
+ } else if (cma_obj->vaddr) {
+- dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
+- cma_obj->vaddr, cma_obj->paddr);
++ if (cma_obj->map_noncoherent)
++ dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size,
++ cma_obj->vaddr, cma_obj->paddr,
++ DMA_TO_DEVICE);
++ else
++ dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
++ cma_obj->vaddr, cma_obj->paddr);
+ }
+
+ drm_gem_object_release(gem_obj);
--- /dev/null
+From af957eebfcc17433ee83ab85b1195a933ab5049c Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Mon, 15 Nov 2021 15:18:36 +0200
+Subject: KVM: nVMX: don't use vcpu->arch.efer when checking host state on nested state load
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit af957eebfcc17433ee83ab85b1195a933ab5049c upstream.
+
+When loading nested state, don't use check vcpu->arch.efer to get the
+L1 host's 64-bit vs. 32-bit state and don't check it for consistency
+with respect to VM_EXIT_HOST_ADDR_SPACE_SIZE, as register state in vCPU
+may be stale when KVM_SET_NESTED_STATE is called---and architecturally
+does not exist. When restoring L2 state in KVM, the CPU is placed in
+non-root where nested VMX code has no snapshot of L1 host state: VMX
+(conditionally) loads host state fields loaded on VM-exit, but they need
+not correspond to the state before entry. A simple case occurs in KVM
+itself, where the host RIP field points to vmx_vmexit rather than the
+instruction following vmlaunch/vmresume.
+
+However, for the particular case of L1 being in 32- or 64-bit mode
+on entry, the exit controls can be treated instead as the source of
+truth regarding the state of L1 on entry, and can be used to check
+that vmcs12.VM_EXIT_HOST_ADDR_SPACE_SIZE matches vmcs12.HOST_EFER if
+vmcs12.VM_EXIT_LOAD_IA32_EFER is set. The consistency check on CPU
+EFER vs. vmcs12.VM_EXIT_HOST_ADDR_SPACE_SIZE, instead, happens only
+on VM-Enter. That's because, again, there's conceptually no "current"
+L1 EFER to check on KVM_SET_NESTED_STATE.
+
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Message-Id: <20211115131837.195527-2-mlevitsk@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/nested.c | 22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2854,6 +2854,17 @@ static int nested_vmx_check_controls(str
+ return 0;
+ }
+
++static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu,
++ struct vmcs12 *vmcs12)
++{
++#ifdef CONFIG_X86_64
++ if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
++ !!(vcpu->arch.efer & EFER_LMA)))
++ return -EINVAL;
++#endif
++ return 0;
++}
++
+ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+ {
+@@ -2878,18 +2889,16 @@ static int nested_vmx_check_host_state(s
+ return -EINVAL;
+
+ #ifdef CONFIG_X86_64
+- ia32e = !!(vcpu->arch.efer & EFER_LMA);
++ ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
+ #else
+ ia32e = false;
+ #endif
+
+ if (ia32e) {
+- if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) ||
+- CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
++ if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
+ return -EINVAL;
+ } else {
+- if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) ||
+- CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
++ if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
+ CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
+ CC((vmcs12->host_rip) >> 32))
+ return -EINVAL;
+@@ -3559,6 +3568,9 @@ static int nested_vmx_run(struct kvm_vcp
+ if (nested_vmx_check_controls(vcpu, vmcs12))
+ return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+
++ if (nested_vmx_check_address_space_size(vcpu, vmcs12))
++ return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
++
+ if (nested_vmx_check_host_state(vcpu, vmcs12))
+ return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
+
--- /dev/null
+From 79b11142763791bdead8b6460052cbdde8e08e2f Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Tue, 9 Nov 2021 21:50:56 +0000
+Subject: KVM: SEV: Disallow COPY_ENC_CONTEXT_FROM if target has created vCPUs
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 79b11142763791bdead8b6460052cbdde8e08e2f upstream.
+
+Reject COPY_ENC_CONTEXT_FROM if the destination VM has created vCPUs.
+KVM relies on SEV activation to occur before vCPUs are created, e.g. to
+set VMCB flags and intercepts correctly.
+
+Fixes: 54526d1fd593 ("KVM: x86: Support KVM VMs sharing SEV context")
+Cc: stable@vger.kernel.org
+Cc: Peter Gonda <pgonda@google.com>
+Cc: Marc Orr <marcorr@google.com>
+Cc: Sean Christopherson <seanjc@google.com>
+Cc: Nathan Tempelman <natet@google.com>
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20211109215101.2211373-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/sev.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -1787,7 +1787,12 @@ int svm_vm_copy_asid_from(struct kvm *kv
+ mutex_unlock(&source_kvm->lock);
+ mutex_lock(&kvm->lock);
+
+- if (sev_guest(kvm)) {
++ /*
++ * Disallow out-of-band SEV/SEV-ES init if the target is already an
++ * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
++ * created after SEV/SEV-ES initialization, e.g. to init intercepts.
++ */
++ if (sev_guest(kvm) || kvm->created_vcpus) {
+ ret = -EINVAL;
+ goto e_mirror_unlock;
+ }
--- /dev/null
+From 9119570039481d56350af1c636f040fb300b8cf3 Mon Sep 17 00:00:00 2001
+From: Meng Li <meng.li@windriver.com>
+Date: Mon, 15 Nov 2021 15:04:23 +0800
+Subject: net: stmmac: socfpga: add runtime suspend/resume callback for stratix10 platform
+
+From: Meng Li <meng.li@windriver.com>
+
+commit 9119570039481d56350af1c636f040fb300b8cf3 upstream.
+
+According to upstream commit 5ec55823438e("net: stmmac:
+add clocks management for gmac driver"), it improve clocks
+management for stmmac driver. So, it is necessary to implement
+the runtime callback in dwmac-socfpga driver because it doesn't
+use the common stmmac_pltfr_pm_ops instance. Otherwise, clocks
+are not disabled when system enters suspend status.
+
+Fixes: 5ec55823438e ("net: stmmac: add clocks management for gmac driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Meng Li <Meng.Li@windriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c | 24 ++++++++++++++++++--
+ 1 file changed, 22 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+@@ -485,8 +485,28 @@ static int socfpga_dwmac_resume(struct d
+ }
+ #endif /* CONFIG_PM_SLEEP */
+
+-static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend,
+- socfpga_dwmac_resume);
++static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev)
++{
++ struct net_device *ndev = dev_get_drvdata(dev);
++ struct stmmac_priv *priv = netdev_priv(ndev);
++
++ stmmac_bus_clks_config(priv, false);
++
++ return 0;
++}
++
++static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev)
++{
++ struct net_device *ndev = dev_get_drvdata(dev);
++ struct stmmac_priv *priv = netdev_priv(ndev);
++
++ return stmmac_bus_clks_config(priv, true);
++}
++
++static const struct dev_pm_ops socfpga_dwmac_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume)
++ SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL)
++};
+
+ static const struct socfpga_dwmac_ops socfpga_gen5_ops = {
+ .set_phy_mode = socfpga_gen5_set_phy_mode,
--- /dev/null
+From 5d5e4522a7f404d1a96fd6c703989d32a9c9568d Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Sun, 7 Nov 2021 14:51:16 +1000
+Subject: printk: restore flushing of NMI buffers on remote CPUs after NMI backtraces
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit 5d5e4522a7f404d1a96fd6c703989d32a9c9568d upstream.
+
+printk from NMI context relies on irq work being raised on the local CPU
+to print to console. This can be a problem if the NMI was raised by a
+lockup detector to print lockup stack and regs, because the CPU may not
+enable irqs (because it is locked up).
+
+Introduce printk_trigger_flush() that can be called another CPU to try
+to get those messages to the console, call that where printk_safe_flush
+was previously called.
+
+Fixes: 93d102f094be ("printk: remove safe buffers")
+Cc: stable@vger.kernel.org # 5.15
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Reviewed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20211107045116.1754411-1-npiggin@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/watchdog.c | 6 ++++++
+ include/linux/printk.h | 4 ++++
+ kernel/printk/printk.c | 5 +++++
+ lib/nmi_backtrace.c | 6 ++++++
+ 4 files changed, 21 insertions(+)
+
+--- a/arch/powerpc/kernel/watchdog.c
++++ b/arch/powerpc/kernel/watchdog.c
+@@ -187,6 +187,12 @@ static void watchdog_smp_panic(int cpu,
+ if (sysctl_hardlockup_all_cpu_backtrace)
+ trigger_allbutself_cpu_backtrace();
+
++ /*
++ * Force flush any remote buffers that might be stuck in IRQ context
++ * and therefore could not run their irq_work.
++ */
++ printk_trigger_flush();
++
+ if (hardlockup_panic)
+ nmi_panic(NULL, "Hard LOCKUP");
+
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -198,6 +198,7 @@ void dump_stack_print_info(const char *l
+ void show_regs_print_info(const char *log_lvl);
+ extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
+ extern asmlinkage void dump_stack(void) __cold;
++void printk_trigger_flush(void);
+ #else
+ static inline __printf(1, 0)
+ int vprintk(const char *s, va_list args)
+@@ -274,6 +275,9 @@ static inline void dump_stack_lvl(const
+ static inline void dump_stack(void)
+ {
+ }
++static inline void printk_trigger_flush(void)
++{
++}
+ #endif
+
+ #ifdef CONFIG_SMP
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3252,6 +3252,11 @@ void defer_console_output(void)
+ preempt_enable();
+ }
+
++void printk_trigger_flush(void)
++{
++ defer_console_output();
++}
++
+ int vprintk_deferred(const char *fmt, va_list args)
+ {
+ int r;
+--- a/lib/nmi_backtrace.c
++++ b/lib/nmi_backtrace.c
+@@ -75,6 +75,12 @@ void nmi_trigger_cpumask_backtrace(const
+ touch_softlockup_watchdog();
+ }
+
++ /*
++ * Force flush any remote buffers that might be stuck in IRQ context
++ * and therefore could not run their irq_work.
++ */
++ printk_trigger_flush();
++
+ clear_bit_unlock(0, &backtrace_flag);
+ put_cpu();
+ }
s390-dump-fix-copying-to-user-space-of-swapped-kdump-oldmem.patch
block-check-admin-before-nice-for-ioprio_class_rt.patch
fbdev-prevent-probing-generic-drivers-if-a-fb-is-already-registered.patch
+kvm-sev-disallow-copy_enc_context_from-if-target-has-created-vcpus.patch
+kvm-nvmx-don-t-use-vcpu-arch.efer-when-checking-host-state-on-nested-state-load.patch
+drm-cma-helper-release-non-coherent-memory-with-dma_free_noncoherent.patch
+printk-restore-flushing-of-nmi-buffers-on-remote-cpus-after-nmi-backtraces.patch
+udf-fix-crash-after-seekdir.patch
+spi-fix-use-after-free-of-the-add_lock-mutex.patch
+net-stmmac-socfpga-add-runtime-suspend-resume-callback-for-stratix10-platform.patch
--- /dev/null
+From 6c53b45c71b4920b5e62f0ea8079a1da382b9434 Mon Sep 17 00:00:00 2001
+From: Michael Walle <michael@walle.cc>
+Date: Thu, 11 Nov 2021 09:37:13 +0100
+Subject: spi: fix use-after-free of the add_lock mutex
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michael Walle <michael@walle.cc>
+
+commit 6c53b45c71b4920b5e62f0ea8079a1da382b9434 upstream.
+
+Commit 6098475d4cb4 ("spi: Fix deadlock when adding SPI controllers on
+SPI buses") introduced a per-controller mutex. But mutex_unlock() of
+said lock is called after the controller is already freed:
+
+ spi_unregister_controller(ctlr)
+ -> put_device(&ctlr->dev)
+ -> spi_controller_release(dev)
+ -> mutex_unlock(&ctrl->add_lock)
+
+Move the put_device() after the mutex_unlock().
+
+Fixes: 6098475d4cb4 ("spi: Fix deadlock when adding SPI controllers on SPI buses")
+Signed-off-by: Michael Walle <michael@walle.cc>
+Reviewed-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Reviewed-by: Lukas Wunner <lukas@wunner.de>
+Cc: stable@vger.kernel.org # v5.15
+Link: https://lore.kernel.org/r/20211111083713.3335171-1-michael@walle.cc
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -3020,12 +3020,6 @@ void spi_unregister_controller(struct sp
+
+ device_del(&ctlr->dev);
+
+- /* Release the last reference on the controller if its driver
+- * has not yet been converted to devm_spi_alloc_master/slave().
+- */
+- if (!ctlr->devm_allocated)
+- put_device(&ctlr->dev);
+-
+ /* free bus id */
+ mutex_lock(&board_lock);
+ if (found == ctlr)
+@@ -3034,6 +3028,12 @@ void spi_unregister_controller(struct sp
+
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
+ mutex_unlock(&ctlr->add_lock);
++
++ /* Release the last reference on the controller if its driver
++ * has not yet been converted to devm_spi_alloc_master/slave().
++ */
++ if (!ctlr->devm_allocated)
++ put_device(&ctlr->dev);
+ }
+ EXPORT_SYMBOL_GPL(spi_unregister_controller);
+
--- /dev/null
+From a48fc69fe6588b48d878d69de223b91a386a7cb4 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 4 Nov 2021 15:22:35 +0100
+Subject: udf: Fix crash after seekdir
+
+From: Jan Kara <jack@suse.cz>
+
+commit a48fc69fe6588b48d878d69de223b91a386a7cb4 upstream.
+
+udf_readdir() didn't validate the directory position it should start
+reading from. Thus when user uses lseek(2) on directory file descriptor
+it can trick udf_readdir() into reading from a position in the middle of
+directory entry which then upsets directory parsing code resulting in
+errors or even possible kernel crashes. Similarly when the directory is
+modified between two readdir calls, the directory position need not be
+valid anymore.
+
+Add code to validate current offset in the directory. This is actually
+rather expensive for UDF as we need to read from the beginning of the
+directory and parse all directory entries. This is because in UDF a
+directory is just a stream of data containing directory entries and
+since file names are fully under user's control we cannot depend on
+detecting magic numbers and checksums in the header of directory entry
+as a malicious attacker could fake them. We skip this step if we detect
+that nothing changed since the last readdir call.
+
+Reported-by: Nathan Wilson <nate@chickenbrittle.com>
+CC: stable@vger.kernel.org
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/udf/dir.c | 32 ++++++++++++++++++++++++++++++--
+ fs/udf/namei.c | 3 +++
+ fs/udf/super.c | 2 ++
+ 3 files changed, 35 insertions(+), 2 deletions(-)
+
+--- a/fs/udf/dir.c
++++ b/fs/udf/dir.c
+@@ -31,6 +31,7 @@
+ #include <linux/mm.h>
+ #include <linux/slab.h>
+ #include <linux/bio.h>
++#include <linux/iversion.h>
+
+ #include "udf_i.h"
+ #include "udf_sb.h"
+@@ -43,7 +44,7 @@ static int udf_readdir(struct file *file
+ struct fileIdentDesc *fi = NULL;
+ struct fileIdentDesc cfi;
+ udf_pblk_t block, iblock;
+- loff_t nf_pos;
++ loff_t nf_pos, emit_pos = 0;
+ int flen;
+ unsigned char *fname = NULL, *copy_name = NULL;
+ unsigned char *nameptr;
+@@ -57,6 +58,7 @@ static int udf_readdir(struct file *file
+ int i, num, ret = 0;
+ struct extent_position epos = { NULL, 0, {0, 0} };
+ struct super_block *sb = dir->i_sb;
++ bool pos_valid = false;
+
+ if (ctx->pos == 0) {
+ if (!dir_emit_dot(file, ctx))
+@@ -67,6 +69,21 @@ static int udf_readdir(struct file *file
+ if (nf_pos >= size)
+ goto out;
+
++ /*
++ * Something changed since last readdir (either lseek was called or dir
++ * changed)? We need to verify the position correctly points at the
++ * beginning of some dir entry so that the directory parsing code does
++ * not get confused. Since UDF does not have any reliable way of
++ * identifying beginning of dir entry (names are under user control),
++ * we need to scan the directory from the beginning.
++ */
++ if (!inode_eq_iversion(dir, file->f_version)) {
++ emit_pos = nf_pos;
++ nf_pos = 0;
++ } else {
++ pos_valid = true;
++ }
++
+ fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
+ if (!fname) {
+ ret = -ENOMEM;
+@@ -122,13 +139,21 @@ static int udf_readdir(struct file *file
+
+ while (nf_pos < size) {
+ struct kernel_lb_addr tloc;
++ loff_t cur_pos = nf_pos;
+
+- ctx->pos = (nf_pos >> 2) + 1;
++ /* Update file position only if we got past the current one */
++ if (nf_pos >= emit_pos) {
++ ctx->pos = (nf_pos >> 2) + 1;
++ pos_valid = true;
++ }
+
+ fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
+ &elen, &offset);
+ if (!fi)
+ goto out;
++ /* Still not at offset where user asked us to read from? */
++ if (cur_pos < emit_pos)
++ continue;
+
+ liu = le16_to_cpu(cfi.lengthOfImpUse);
+ lfi = cfi.lengthFileIdent;
+@@ -186,8 +211,11 @@ static int udf_readdir(struct file *file
+ } /* end while */
+
+ ctx->pos = (nf_pos >> 2) + 1;
++ pos_valid = true;
+
+ out:
++ if (pos_valid)
++ file->f_version = inode_query_iversion(dir);
+ if (fibh.sbh != fibh.ebh)
+ brelse(fibh.ebh);
+ brelse(fibh.sbh);
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -30,6 +30,7 @@
+ #include <linux/sched.h>
+ #include <linux/crc-itu-t.h>
+ #include <linux/exportfs.h>
++#include <linux/iversion.h>
+
+ static inline int udf_match(int len1, const unsigned char *name1, int len2,
+ const unsigned char *name2)
+@@ -134,6 +135,8 @@ int udf_write_fi(struct inode *inode, st
+ mark_buffer_dirty_inode(fibh->ebh, inode);
+ mark_buffer_dirty_inode(fibh->sbh, inode);
+ }
++ inode_inc_iversion(inode);
++
+ return 0;
+ }
+
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -57,6 +57,7 @@
+ #include <linux/crc-itu-t.h>
+ #include <linux/log2.h>
+ #include <asm/byteorder.h>
++#include <linux/iversion.h>
+
+ #include "udf_sb.h"
+ #include "udf_i.h"
+@@ -149,6 +150,7 @@ static struct inode *udf_alloc_inode(str
+ init_rwsem(&ei->i_data_sem);
+ ei->cached_extent.lstart = -1;
+ spin_lock_init(&ei->i_extent_cache_lock);
++ inode_set_iversion(&ei->vfs_inode, 1);
+
+ return &ei->vfs_inode;
+ }