--- /dev/null
+From 57b3006492a4c11b2d4a772b5b2905d544a32037 Mon Sep 17 00:00:00 2001
+From: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Date: Thu, 22 Aug 2019 11:32:00 +0300
+Subject: ACPI / LPSS: Save/restore LPSS private registers also on Lynxpoint
+
+From: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+
+commit 57b3006492a4c11b2d4a772b5b2905d544a32037 upstream.
+
+My assumption in commit b53548f9d9e4 ("spi: pxa2xx: Remove LPSS private
+register restoring during resume") that Intel Lynxpoint and compatible
+based chipsets may not need LPSS private registers saving and restoring
+over suspend/resume cycle turned out to be false on Intel Broadwell.
+
+Curtis Malainey sent a patch bringing above change back and reported the
+LPSS SPI Chip Select control was lost over suspend/resume cycle on
+Broadwell machine.
+
+Instead of reverting above commit lets add LPSS private register
+saving/restoring also for all LPSS SPI, I2C and UART controllers on
+Lynxpoint and compatible chipset to make sure context is not lost in
+case nothing else preserves it like firmware or if LPSS is always on.
+
+Fixes: b53548f9d9e4 ("spi: pxa2xx: Remove LPSS private register restoring during resume")
+Reported-by: Curtis Malainey <cujomalainey@chromium.org>
+Tested-by: Curtis Malainey <cujomalainey@chromium.org>
+Cc: 5.0+ <stable@vger.kernel.org> # 5.0+
+Signed-off-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/acpi_lpss.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -219,12 +219,13 @@ static void bsw_pwm_setup(struct lpss_pr
+ }
+
+ static const struct lpss_device_desc lpt_dev_desc = {
+- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
++ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
++ | LPSS_SAVE_CTX,
+ .prv_offset = 0x800,
+ };
+
+ static const struct lpss_device_desc lpt_i2c_dev_desc = {
+- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
++ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX,
+ .prv_offset = 0x800,
+ };
+
+@@ -236,7 +237,8 @@ static struct property_entry uart_proper
+ };
+
+ static const struct lpss_device_desc lpt_uart_dev_desc = {
+- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
++ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
++ | LPSS_SAVE_CTX,
+ .clk_con_id = "baudclk",
+ .prv_offset = 0x800,
+ .setup = lpss_uart_setup,
--- /dev/null
+From c1d3ad84eae35414b6b334790048406bd6301b12 Mon Sep 17 00:00:00 2001
+From: Denis Kenzior <denkenz@gmail.com>
+Date: Wed, 28 Aug 2019 16:11:10 -0500
+Subject: cfg80211: Purge frame registrations on iftype change
+
+From: Denis Kenzior <denkenz@gmail.com>
+
+commit c1d3ad84eae35414b6b334790048406bd6301b12 upstream.
+
+Currently frame registrations are not purged, even when changing the
+interface type. This can lead to potentially weird situations where
+frames possibly not allowed on a given interface type remain registered
+due to the type switching happening after registration.
+
+The kernel currently relies on userspace apps to actually purge the
+registrations themselves, this is not something that the kernel should
+rely on.
+
+Add a call to cfg80211_mlme_purge_registrations() to forcefully remove
+any registrations left over prior to switching the iftype.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Denis Kenzior <denkenz@gmail.com>
+Link: https://lore.kernel.org/r/20190828211110.15005-1-denkenz@gmail.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/wireless/util.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -960,6 +960,7 @@ int cfg80211_change_iface(struct cfg8021
+ }
+
+ cfg80211_process_rdev_events(rdev);
++ cfg80211_mlme_purge_registrations(dev->ieee80211_ptr);
+ }
+
+ err = rdev_change_virtual_intf(rdev, dev, ntype, params);
--- /dev/null
+From 63d37fb4ce5ae7bf1e58f906d1bf25f036fe79b2 Mon Sep 17 00:00:00 2001
+From: Murphy Zhou <jencce.kernel@gmail.com>
+Date: Sat, 21 Sep 2019 19:26:00 +0800
+Subject: CIFS: fix max ea value size
+
+From: Murphy Zhou <jencce.kernel@gmail.com>
+
+commit 63d37fb4ce5ae7bf1e58f906d1bf25f036fe79b2 upstream.
+
+It should not be larger then the slab max buf size. If user
+specifies a larger size, it passes this check and goes
+straightly to SMB2_set_info_init performing an insecure memcpy.
+
+Signed-off-by: Murphy Zhou <jencce.kernel@gmail.com>
+Reviewed-by: Aurelien Aptel <aaptel@suse.com>
+CC: Stable <stable@vger.kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/xattr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/cifs/xattr.c
++++ b/fs/cifs/xattr.c
+@@ -31,7 +31,7 @@
+ #include "cifs_fs_sb.h"
+ #include "cifs_unicode.h"
+
+-#define MAX_EA_VALUE_SIZE 65535
++#define MAX_EA_VALUE_SIZE CIFSMaxBufSize
+ #define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
+ #define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */
+ #define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */
--- /dev/null
+From a016e2794fc3a245a91946038dd8f34d65e53cc3 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <pshilov@microsoft.com>
+Date: Thu, 26 Sep 2019 12:31:20 -0700
+Subject: CIFS: Fix oplock handling for SMB 2.1+ protocols
+
+From: Pavel Shilovsky <pshilov@microsoft.com>
+
+commit a016e2794fc3a245a91946038dd8f34d65e53cc3 upstream.
+
+There may be situations when a server negotiates SMB 2.1
+protocol version or higher but responds to a CREATE request
+with an oplock rather than a lease.
+
+Currently the client doesn't handle such a case correctly:
+when another CREATE comes in the server sends an oplock
+break to the initial CREATE and the client doesn't send
+an ack back due to a wrong caching level being set (READ
+instead of RWH). Missing an oplock break ack makes the
+server wait until the break times out which dramatically
+increases the latency of the second CREATE.
+
+Fix this by properly detecting oplocks when using SMB 2.1
+protocol version and higher.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2ops.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -3254,6 +3254,11 @@ smb21_set_oplock_level(struct cifsInodeI
+ if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
+ return;
+
++ /* Check if the server granted an oplock rather than a lease */
++ if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
++ return smb2_set_oplock_level(cinode, oplock, epoch,
++ purge_cache);
++
+ if (oplock & SMB2_LEASE_READ_CACHING_HE) {
+ new_oplock |= CIFS_CACHE_READ_FLG;
+ strcat(message, "R");
--- /dev/null
+From 8619e5bdeee8b2c685d686281f2d2a6017c4bc15 Mon Sep 17 00:00:00 2001
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Date: Mon, 26 Aug 2019 22:13:25 +0900
+Subject: /dev/mem: Bail out upon SIGKILL.
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+commit 8619e5bdeee8b2c685d686281f2d2a6017c4bc15 upstream.
+
+syzbot found that a thread can stall for minutes inside read_mem() or
+write_mem() after that thread was killed by SIGKILL [1]. Reading from
+iomem areas of /dev/mem can be slow, depending on the hardware.
+While reading 2GB at one read() is legal, delaying termination of killed
+thread for minutes is bad. Thus, allow reading/writing /dev/mem and
+/dev/kmem to be preemptible and killable.
+
+ [ 1335.912419][T20577] read_mem: sz=4096 count=2134565632
+ [ 1335.943194][T20577] read_mem: sz=4096 count=2134561536
+ [ 1335.978280][T20577] read_mem: sz=4096 count=2134557440
+ [ 1336.011147][T20577] read_mem: sz=4096 count=2134553344
+ [ 1336.041897][T20577] read_mem: sz=4096 count=2134549248
+
+Theoretically, reading/writing /dev/mem and /dev/kmem can become
+"interruptible". But this patch chose "killable". Future patch will make
+them "interruptible" so that we can revert to "killable" if some program
+regressed.
+
+[1] https://syzkaller.appspot.com/bug?id=a0e3436829698d5824231251fad9d8e998f94f5e
+
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Cc: stable <stable@vger.kernel.org>
+Reported-by: syzbot <syzbot+8ab2d0f39fb79fe6ca40@syzkaller.appspotmail.com>
+Link: https://lore.kernel.org/r/1566825205-10703-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/mem.c | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -97,6 +97,13 @@ void __weak unxlate_dev_mem_ptr(phys_add
+ }
+ #endif
+
++static inline bool should_stop_iteration(void)
++{
++ if (need_resched())
++ cond_resched();
++ return fatal_signal_pending(current);
++}
++
+ /*
+ * This funcion reads the *physical* memory. The f_pos points directly to the
+ * memory location.
+@@ -175,6 +182,8 @@ static ssize_t read_mem(struct file *fil
+ p += sz;
+ count -= sz;
+ read += sz;
++ if (should_stop_iteration())
++ break;
+ }
+ kfree(bounce);
+
+@@ -251,6 +260,8 @@ static ssize_t write_mem(struct file *fi
+ p += sz;
+ count -= sz;
+ written += sz;
++ if (should_stop_iteration())
++ break;
+ }
+
+ *ppos += written;
+@@ -468,6 +479,10 @@ static ssize_t read_kmem(struct file *fi
+ read += sz;
+ low_count -= sz;
+ count -= sz;
++ if (should_stop_iteration()) {
++ count = 0;
++ break;
++ }
+ }
+ }
+
+@@ -492,6 +507,8 @@ static ssize_t read_kmem(struct file *fi
+ buf += sz;
+ read += sz;
+ p += sz;
++ if (should_stop_iteration())
++ break;
+ }
+ free_page((unsigned long)kbuf);
+ }
+@@ -544,6 +561,8 @@ static ssize_t do_write_kmem(unsigned lo
+ p += sz;
+ count -= sz;
+ written += sz;
++ if (should_stop_iteration())
++ break;
+ }
+
+ *ppos += written;
+@@ -595,6 +614,8 @@ static ssize_t write_kmem(struct file *f
+ buf += sz;
+ virtr += sz;
+ p += sz;
++ if (should_stop_iteration())
++ break;
+ }
+ free_page((unsigned long)kbuf);
+ }
--- /dev/null
+From c1e8220bd316d8ae8e524df39534b8a412a45d5e Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Fri, 23 Aug 2019 22:38:00 -0400
+Subject: ext4: fix punch hole for inline_data file systems
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit c1e8220bd316d8ae8e524df39534b8a412a45d5e upstream.
+
+If a program attempts to punch a hole on an inline data file, we need
+to convert it to a normal file first.
+
+This was detected using ext4/032 using the adv configuration. Simple
+reproducer:
+
+mke2fs -Fq -t ext4 -O inline_data /dev/vdc
+mount /vdc
+echo "" > /vdc/testfile
+xfs_io -c 'truncate 33554432' /vdc/testfile
+xfs_io -c 'fpunch 0 1048576' /vdc/testfile
+umount /vdc
+e2fsck -fy /dev/vdc
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/inode.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4288,6 +4288,15 @@ int ext4_punch_hole(struct inode *inode,
+
+ trace_ext4_punch_hole(inode, offset, length, 0);
+
++ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
++ if (ext4_has_inline_data(inode)) {
++ down_write(&EXT4_I(inode)->i_mmap_sem);
++ ret = ext4_convert_inline_data(inode);
++ up_write(&EXT4_I(inode)->i_mmap_sem);
++ if (ret)
++ return ret;
++ }
++
+ /*
+ * Write out all dirty pages to avoid race conditions
+ * Then release them.
--- /dev/null
+From e3d550c2c4f2f3dba469bc3c4b83d9332b4e99e1 Mon Sep 17 00:00:00 2001
+From: Rakesh Pandit <rakesh@tuxera.com>
+Date: Thu, 22 Aug 2019 22:53:46 -0400
+Subject: ext4: fix warning inside ext4_convert_unwritten_extents_endio
+
+From: Rakesh Pandit <rakesh@tuxera.com>
+
+commit e3d550c2c4f2f3dba469bc3c4b83d9332b4e99e1 upstream.
+
+Really enable warning when CONFIG_EXT4_DEBUG is set and fix missing
+first argument. This was introduced in commit ff95ec22cd7f ("ext4:
+add warning to ext4_convert_unwritten_extents_endio") and splitting
+extents inside endio would trigger it.
+
+Fixes: ff95ec22cd7f ("ext4: add warning to ext4_convert_unwritten_extents_endio")
+Signed-off-by: Rakesh Pandit <rakesh@tuxera.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/extents.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3813,8 +3813,8 @@ static int ext4_convert_unwritten_extent
+ * illegal.
+ */
+ if (ee_block != map->m_lblk || ee_len > map->m_len) {
+-#ifdef EXT4_DEBUG
+- ext4_warning("Inode (%ld) finished: extent logical block %llu,"
++#ifdef CONFIG_EXT4_DEBUG
++ ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
+ " len %u; IO logical block %llu, len %u",
+ inode->i_ino, (unsigned long long)ee_block, ee_len,
+ (unsigned long long)map->m_lblk, map->m_len);
--- /dev/null
+From cf1ea0592dbf109e7e7935b7d5b1a47a1ba04174 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 29 Aug 2019 09:04:11 -0700
+Subject: fs: Export generic_fadvise()
+
+From: Jan Kara <jack@suse.cz>
+
+commit cf1ea0592dbf109e7e7935b7d5b1a47a1ba04174 upstream.
+
+Filesystems will need to call this function from their fadvise handlers.
+
+CC: stable@vger.kernel.org
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/fs.h | 2 ++
+ mm/fadvise.c | 4 ++--
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3544,6 +3544,8 @@ extern void inode_nohighmem(struct inode
+ /* mm/fadvise.c */
+ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
+ int advice);
++extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
++ int advice);
+
+ #if defined(CONFIG_IO_URING)
+ extern struct sock *io_uring_get_socket(struct file *file);
+--- a/mm/fadvise.c
++++ b/mm/fadvise.c
+@@ -27,8 +27,7 @@
+ * deactivate the pages and clear PG_Referenced.
+ */
+
+-static int generic_fadvise(struct file *file, loff_t offset, loff_t len,
+- int advice)
++int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
+ {
+ struct inode *inode;
+ struct address_space *mapping;
+@@ -178,6 +177,7 @@ static int generic_fadvise(struct file *
+ }
+ return 0;
+ }
++EXPORT_SYMBOL(generic_fadvise);
+
+ int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
+ {
--- /dev/null
+From 78887832e76541f77169a24ac238fccb51059b63 Mon Sep 17 00:00:00 2001
+From: Laurent Vivier <lvivier@redhat.com>
+Date: Tue, 17 Sep 2019 11:54:50 +0200
+Subject: hwrng: core - don't wait on add_early_randomness()
+
+From: Laurent Vivier <lvivier@redhat.com>
+
+commit 78887832e76541f77169a24ac238fccb51059b63 upstream.
+
+add_early_randomness() is called by hwrng_register() when the
+hardware is added. If this hardware and its module are present
+at boot, and if there is no data available the boot hangs until
+data are available and can't be interrupted.
+
+For instance, in the case of virtio-rng, in some cases the host can be
+not able to provide enough entropy for all the guests.
+
+We can have two easy ways to reproduce the problem but they rely on
+misconfiguration of the hypervisor or the egd daemon:
+
+- if virtio-rng device is configured to connect to the egd daemon of the
+host but when the virtio-rng driver asks for data the daemon is not
+connected,
+
+- if virtio-rng device is configured to connect to the egd daemon of the
+host but the egd daemon doesn't provide data.
+
+The guest kernel will hang at boot until the virtio-rng driver provides
+enough data.
+
+To avoid that, call rng_get_data() in non-blocking mode (wait=0)
+from add_early_randomness().
+
+Signed-off-by: Laurent Vivier <lvivier@redhat.com>
+Fixes: d9e797261933 ("hwrng: add randomness to system from rng...")
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/hw_random/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -67,7 +67,7 @@ static void add_early_randomness(struct
+ size_t size = min_t(size_t, 16, rng_buffer_size());
+
+ mutex_lock(&reading_mutex);
+- bytes_read = rng_get_data(rng, rng_buffer, size, 1);
++ bytes_read = rng_get_data(rng, rng_buffer, size, 0);
+ mutex_unlock(&reading_mutex);
+ if (bytes_read > 0)
+ add_device_randomness(rng_buffer, bytes_read);
--- /dev/null
+From a71e2ac1f32097fbb2beab098687a7a95c84543e Mon Sep 17 00:00:00 2001
+From: Chris Brandt <chris.brandt@renesas.com>
+Date: Thu, 26 Sep 2019 07:19:09 -0500
+Subject: i2c: riic: Clear NACK in tend isr
+
+From: Chris Brandt <chris.brandt@renesas.com>
+
+commit a71e2ac1f32097fbb2beab098687a7a95c84543e upstream.
+
+The NACKF flag should be cleared in INTRIICNAKI interrupt processing as
+description in HW manual.
+
+This issue shows up quickly when PREEMPT_RT is applied and a device is
+probed that is not plugged in (like a touchscreen controller). The result
+is endless interrupts that halt system boot.
+
+Fixes: 310c18a41450 ("i2c: riic: add driver")
+Cc: stable@vger.kernel.org
+Reported-by: Chien Nguyen <chien.nguyen.eb@rvc.renesas.com>
+Signed-off-by: Chris Brandt <chris.brandt@renesas.com>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-riic.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/i2c/busses/i2c-riic.c
++++ b/drivers/i2c/busses/i2c-riic.c
+@@ -202,6 +202,7 @@ static irqreturn_t riic_tend_isr(int irq
+ if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) {
+ /* We got a NACKIE */
+ readb(riic->base + RIIC_ICDRR); /* dummy read */
++ riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2);
+ riic->err = -ENXIO;
+ } else if (riic->bytes_left) {
+ return IRQ_NONE;
--- /dev/null
+From 383035211c79d4d98481a09ad429b31c7dbf22bd Mon Sep 17 00:00:00 2001
+From: Tony Camuso <tcamuso@redhat.com>
+Date: Thu, 22 Aug 2019 08:24:53 -0400
+Subject: ipmi: move message error checking to avoid deadlock
+
+From: Tony Camuso <tcamuso@redhat.com>
+
+commit 383035211c79d4d98481a09ad429b31c7dbf22bd upstream.
+
+V1->V2: in handle_one_rcv_msg, if data_size > 2, set requeue to zero and
+ goto out instead of calling ipmi_free_msg.
+ Kosuke Tatsukawa <tatsu@ab.jp.nec.com>
+
+In the source stack trace below, function set_need_watch tries to
+take out the same si_lock that was taken earlier by ipmi_thread.
+
+ipmi_thread() [drivers/char/ipmi/ipmi_si_intf.c:995]
+ smi_event_handler() [drivers/char/ipmi/ipmi_si_intf.c:765]
+ handle_transaction_done() [drivers/char/ipmi/ipmi_si_intf.c:555]
+ deliver_recv_msg() [drivers/char/ipmi/ipmi_si_intf.c:283]
+ ipmi_smi_msg_received() [drivers/char/ipmi/ipmi_msghandler.c:4503]
+ intf_err_seq() [drivers/char/ipmi/ipmi_msghandler.c:1149]
+ smi_remove_watch() [drivers/char/ipmi/ipmi_msghandler.c:999]
+ set_need_watch() [drivers/char/ipmi/ipmi_si_intf.c:1066]
+
+Upstream commit e1891cffd4c4896a899337a243273f0e23c028df adds code to
+ipmi_smi_msg_received() to call smi_remove_watch() via intf_err_seq()
+and this seems to be causing the deadlock.
+
+commit e1891cffd4c4896a899337a243273f0e23c028df
+Author: Corey Minyard <cminyard@mvista.com>
+Date: Wed Oct 24 15:17:04 2018 -0500
+ ipmi: Make the smi watcher be disabled immediately when not needed
+
+The fix is to put all messages in the queue and move the message
+checking code out of ipmi_smi_msg_received and into handle_one_recv_msg,
+which processes the message checking after ipmi_thread releases its
+locks.
+
+Additionally,Kosuke Tatsukawa <tatsu@ab.jp.nec.com> reported that
+handle_new_recv_msgs calls ipmi_free_msg when handle_one_rcv_msg returns
+zero, so that the call to ipmi_free_msg in handle_one_rcv_msg introduced
+another panic when "ipmitool sensor list" was run in a loop. He
+submitted this part of the patch.
+
++free_msg:
++ requeue = 0;
++ goto out;
+
+Reported by: Osamu Samukawa <osa-samukawa@tg.jp.nec.com>
+Characterized by: Kosuke Tatsukawa <tatsu@ab.jp.nec.com>
+Signed-off-by: Tony Camuso <tcamuso@redhat.com>
+Fixes: e1891cffd4c4 ("ipmi: Make the smi watcher be disabled immediately when not needed")
+Cc: stable@vger.kernel.org # 5.1
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/ipmi/ipmi_msghandler.c | 114 ++++++++++++++++++------------------
+ 1 file changed, 57 insertions(+), 57 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -4215,7 +4215,53 @@ static int handle_one_recv_msg(struct ip
+ int chan;
+
+ ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
+- if (msg->rsp_size < 2) {
++
++ if ((msg->data_size >= 2)
++ && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
++ && (msg->data[1] == IPMI_SEND_MSG_CMD)
++ && (msg->user_data == NULL)) {
++
++ if (intf->in_shutdown)
++ goto free_msg;
++
++ /*
++ * This is the local response to a command send, start
++ * the timer for these. The user_data will not be
++ * NULL if this is a response send, and we will let
++ * response sends just go through.
++ */
++
++ /*
++ * Check for errors, if we get certain errors (ones
++ * that mean basically we can try again later), we
++ * ignore them and start the timer. Otherwise we
++ * report the error immediately.
++ */
++ if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
++ && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
++ && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
++ && (msg->rsp[2] != IPMI_BUS_ERR)
++ && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
++ int ch = msg->rsp[3] & 0xf;
++ struct ipmi_channel *chans;
++
++ /* Got an error sending the message, handle it. */
++
++ chans = READ_ONCE(intf->channel_list)->c;
++ if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
++ || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
++ ipmi_inc_stat(intf, sent_lan_command_errs);
++ else
++ ipmi_inc_stat(intf, sent_ipmb_command_errs);
++ intf_err_seq(intf, msg->msgid, msg->rsp[2]);
++ } else
++ /* The message was sent, start the timer. */
++ intf_start_seq_timer(intf, msg->msgid);
++free_msg:
++ requeue = 0;
++ goto out;
++
++ } else if (msg->rsp_size < 2) {
+ /* Message is too small to be correct. */
+ dev_warn(intf->si_dev,
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
+@@ -4472,62 +4518,16 @@ void ipmi_smi_msg_received(struct ipmi_s
+ unsigned long flags = 0; /* keep us warning-free. */
+ int run_to_completion = intf->run_to_completion;
+
+- if ((msg->data_size >= 2)
+- && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
+- && (msg->data[1] == IPMI_SEND_MSG_CMD)
+- && (msg->user_data == NULL)) {
+-
+- if (intf->in_shutdown)
+- goto free_msg;
+-
+- /*
+- * This is the local response to a command send, start
+- * the timer for these. The user_data will not be
+- * NULL if this is a response send, and we will let
+- * response sends just go through.
+- */
+-
+- /*
+- * Check for errors, if we get certain errors (ones
+- * that mean basically we can try again later), we
+- * ignore them and start the timer. Otherwise we
+- * report the error immediately.
+- */
+- if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
+- && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
+- && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
+- && (msg->rsp[2] != IPMI_BUS_ERR)
+- && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
+- int ch = msg->rsp[3] & 0xf;
+- struct ipmi_channel *chans;
+-
+- /* Got an error sending the message, handle it. */
+-
+- chans = READ_ONCE(intf->channel_list)->c;
+- if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
+- || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
+- ipmi_inc_stat(intf, sent_lan_command_errs);
+- else
+- ipmi_inc_stat(intf, sent_ipmb_command_errs);
+- intf_err_seq(intf, msg->msgid, msg->rsp[2]);
+- } else
+- /* The message was sent, start the timer. */
+- intf_start_seq_timer(intf, msg->msgid);
+-
+-free_msg:
+- ipmi_free_smi_msg(msg);
+- } else {
+- /*
+- * To preserve message order, we keep a queue and deliver from
+- * a tasklet.
+- */
+- if (!run_to_completion)
+- spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+- list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
+- if (!run_to_completion)
+- spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+- flags);
+- }
++ /*
++ * To preserve message order, we keep a queue and deliver from
++ * a tasklet.
++ */
++ if (!run_to_completion)
++ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
++ list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
++ if (!run_to_completion)
++ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
++ flags);
+
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
--- /dev/null
+From 9d4b45d6af442237560d0bb5502a012baa5234b7 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Tue, 20 Aug 2019 10:21:09 +1000
+Subject: md: don't report active array_state until after revalidate_disk() completes.
+
+From: NeilBrown <neilb@suse.com>
+
+commit 9d4b45d6af442237560d0bb5502a012baa5234b7 upstream.
+
+Until revalidate_disk() has completed, the size of a new md array will
+appear to be zero.
+So we shouldn't report, through array_state, that the array is active
+until that time.
+udev rules check array_state to see if the array is ready. As soon as
+it appear to be zero, fsck can be run. If it find the size to be
+zero, it will fail.
+
+So add a new flag to provide an interlock between do_md_run() and
+array_state_show(). This flag is set while do_md_run() is active and
+it prevents array_state_show() from reporting that the array is
+active.
+
+Before do_md_run() is called, ->pers will be NULL so array is
+definitely not active.
+After do_md_run() is called, revalidate_disk() will have run and the
+array will be completely ready.
+
+We also move various sysfs_notify*() calls out of md_run() into
+do_md_run() after MD_NOT_READY is cleared. This ensure the
+information is ready before the notification is sent.
+
+Prior to v4.12, array_state_show() was called with the
+mddev->reconfig_mutex held, which provided exclusion with do_md_run().
+
+Note that MD_NOT_READY cleared twice. This is deliberate to cover
+both success and error paths with minimal noise.
+
+Fixes: b7b17c9b67e5 ("md: remove mddev_lock() from md_attr_show()")
+Cc: stable@vger.kernel.org (v4.12++)
+Signed-off-by: NeilBrown <neilb@suse.com>
+Signed-off-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/md.c | 11 +++++++----
+ drivers/md/md.h | 3 +++
+ 2 files changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -4105,7 +4105,7 @@ array_state_show(struct mddev *mddev, ch
+ {
+ enum array_state st = inactive;
+
+- if (mddev->pers)
++ if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags))
+ switch(mddev->ro) {
+ case 1:
+ st = readonly;
+@@ -5660,9 +5660,6 @@ int md_run(struct mddev *mddev)
+ md_update_sb(mddev, 0);
+
+ md_new_event(mddev);
+- sysfs_notify_dirent_safe(mddev->sysfs_state);
+- sysfs_notify_dirent_safe(mddev->sysfs_action);
+- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ return 0;
+
+ abort:
+@@ -5676,6 +5673,7 @@ static int do_md_run(struct mddev *mddev
+ {
+ int err;
+
++ set_bit(MD_NOT_READY, &mddev->flags);
+ err = md_run(mddev);
+ if (err)
+ goto out;
+@@ -5696,9 +5694,14 @@ static int do_md_run(struct mddev *mddev
+
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
++ clear_bit(MD_NOT_READY, &mddev->flags);
+ mddev->changed = 1;
+ kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
++ sysfs_notify_dirent_safe(mddev->sysfs_state);
++ sysfs_notify_dirent_safe(mddev->sysfs_action);
++ sysfs_notify(&mddev->kobj, NULL, "degraded");
+ out:
++ clear_bit(MD_NOT_READY, &mddev->flags);
+ return err;
+ }
+
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -236,6 +236,9 @@ enum mddev_flags {
+ MD_UPDATING_SB, /* md_check_recovery is updating the metadata
+ * without explicitly holding reconfig_mutex.
+ */
++ MD_NOT_READY, /* do_md_run() is active, so 'array_state'
++ * must not report that array is ready yet
++ */
+ };
+
+ enum mddev_sb_flags {
--- /dev/null
+From 480523feae581ab714ba6610388a3b4619a2f695 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Tue, 20 Aug 2019 10:21:09 +1000
+Subject: md: only call set_in_sync() when it is expected to succeed.
+
+From: NeilBrown <neilb@suse.com>
+
+commit 480523feae581ab714ba6610388a3b4619a2f695 upstream.
+
+Since commit 4ad23a976413 ("MD: use per-cpu counter for
+writes_pending"), set_in_sync() is substantially more expensive: it
+can wait for a full RCU grace period which can be 10s of milliseconds.
+
+So we should only call it when the cost is justified.
+
+md_check_recovery() currently calls set_in_sync() every time it finds
+anything to do (on non-external active arrays). For an array
+performing resync or recovery, this will be quite often.
+Each call will introduce a delay to the md thread, which can noticeable
+affect IO submission latency.
+
+In md_check_recovery() we only need to call set_in_sync() if
+'safemode' was non-zero at entry, meaning that there has been not
+recent IO. So we save this "safemode was nonzero" state, and only
+call set_in_sync() if it was non-zero.
+
+This measurably reduces mean and maximum IO submission latency during
+resync/recovery.
+
+Reported-and-tested-by: Jack Wang <jinpu.wang@cloud.ionos.com>
+Fixes: 4ad23a976413 ("MD: use per-cpu counter for writes_pending")
+Cc: stable@vger.kernel.org (v4.12+)
+Signed-off-by: NeilBrown <neilb@suse.com>
+Signed-off-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/md.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8811,6 +8811,7 @@ void md_check_recovery(struct mddev *mdd
+
+ if (mddev_trylock(mddev)) {
+ int spares = 0;
++ bool try_set_sync = mddev->safemode != 0;
+
+ if (!mddev->external && mddev->safemode == 1)
+ mddev->safemode = 0;
+@@ -8856,7 +8857,7 @@ void md_check_recovery(struct mddev *mdd
+ }
+ }
+
+- if (!mddev->external && !mddev->in_sync) {
++ if (try_set_sync && !mddev->external && !mddev->in_sync) {
+ spin_lock(&mddev->lock);
+ set_in_sync(mddev);
+ spin_unlock(&mddev->lock);
--- /dev/null
+From 143f6e733b73051cd22dcb80951c6c929da413ce Mon Sep 17 00:00:00 2001
+From: Xiao Ni <xni@redhat.com>
+Date: Mon, 8 Jul 2019 10:14:32 +0800
+Subject: md/raid6: Set R5_ReadError when there is read failure on parity disk
+
+From: Xiao Ni <xni@redhat.com>
+
+commit 143f6e733b73051cd22dcb80951c6c929da413ce upstream.
+
+7471fb77ce4d ("md/raid6: Fix anomily when recovering a single device in
+RAID6.") avoids rereading P when it can be computed from other members.
+However, this misses the chance to re-write the right data to P. This
+patch sets R5_ReadError if the re-read fails.
+
+Also, when re-read is skipped, we also missed the chance to reset
+rdev->read_errors to 0. It can fail the disk when there are many read
+errors on P member disk (other disks don't have read error)
+
+V2: upper layer read request don't read parity/Q data. So there is no
+need to consider such situation.
+
+This is Reported-by: kbuild test robot <lkp@intel.com>
+
+Fixes: 7471fb77ce4d ("md/raid6: Fix anomily when recovering a single device in RAID6.")
+Cc: <stable@vger.kernel.org> #4.4+
+Signed-off-by: Xiao Ni <xni@redhat.com>
+Signed-off-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid5.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2559,7 +2559,9 @@ static void raid5_end_read_request(struc
+ && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+ retry = 1;
+ if (retry)
+- if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
++ if (sh->qd_idx >= 0 && sh->pd_idx == i)
++ set_bit(R5_ReadError, &sh->dev[i].flags);
++ else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
+ set_bit(R5_ReadError, &sh->dev[i].flags);
+ clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
+ } else
--- /dev/null
+From 692fe62433d4ca47605b39f7c416efd6679ba694 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 29 Aug 2019 09:04:11 -0700
+Subject: mm: Handle MADV_WILLNEED through vfs_fadvise()
+
+From: Jan Kara <jack@suse.cz>
+
+commit 692fe62433d4ca47605b39f7c416efd6679ba694 upstream.
+
+Currently handling of MADV_WILLNEED hint calls directly into readahead
+code. Handle it by calling vfs_fadvise() instead so that filesystem can
+use its ->fadvise() callback to acquire necessary locks or otherwise
+prepare for the request.
+
+Suggested-by: Amir Goldstein <amir73il@gmail.com>
+Reviewed-by: Boaz Harrosh <boazh@netapp.com>
+CC: stable@vger.kernel.org
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/madvise.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -14,6 +14,7 @@
+ #include <linux/userfaultfd_k.h>
+ #include <linux/hugetlb.h>
+ #include <linux/falloc.h>
++#include <linux/fadvise.h>
+ #include <linux/sched.h>
+ #include <linux/ksm.h>
+ #include <linux/fs.h>
+@@ -275,6 +276,7 @@ static long madvise_willneed(struct vm_a
+ unsigned long start, unsigned long end)
+ {
+ struct file *file = vma->vm_file;
++ loff_t offset;
+
+ *prev = vma;
+ #ifdef CONFIG_SWAP
+@@ -298,12 +300,20 @@ static long madvise_willneed(struct vm_a
+ return 0;
+ }
+
+- start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+- if (end > vma->vm_end)
+- end = vma->vm_end;
+- end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+-
+- force_page_cache_readahead(file->f_mapping, file, start, end - start);
++ /*
++ * Filesystem's fadvise may need to take various locks. We need to
++ * explicitly grab a reference because the vma (and hence the
++ * vma's reference to the file) can go away as soon as we drop
++ * mmap_sem.
++ */
++ *prev = NULL; /* tell sys_madvise we drop mmap_sem */
++ get_file(file);
++ up_read(¤t->mm->mmap_sem);
++ offset = (loff_t)(start - vma->vm_start)
++ + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
++ vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
++ fput(file);
++ down_read(¤t->mm->mmap_sem);
+ return 0;
+ }
+
--- /dev/null
+From b410f4eb01a1950ed73ae40859d0978b1a924380 Mon Sep 17 00:00:00 2001
+From: Christophe Kerello <christophe.kerello@st.com>
+Date: Tue, 9 Jul 2019 11:41:45 +0200
+Subject: mtd: rawnand: stm32_fmc2: avoid warnings when building with W=1 option
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christophe Kerello <christophe.kerello@st.com>
+
+commit b410f4eb01a1950ed73ae40859d0978b1a924380 upstream.
+
+This patch solves warnings detected by setting W=1 when building.
+
+Warnings type detected:
+drivers/mtd/nand/raw/stm32_fmc2_nand.c: In function ‘stm32_fmc2_calc_timings’:
+drivers/mtd/nand/raw/stm32_fmc2_nand.c:1417:23: warning: comparison is
+always false due to limited range of data type [-Wtype-limits]
+ else if (tims->twait > FMC2_PMEM_PATT_TIMING_MASK)
+
+Signed-off-by: Christophe Kerello <christophe.kerello@st.com>
+Cc: stable@vger.kernel.org
+Fixes: 2cd457f328c1 ("mtd: rawnand: stm32_fmc2: add STM32 FMC2 NAND flash controller driver")
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/raw/stm32_fmc2_nand.c | 88 ++++++++++-----------------------
+ 1 file changed, 28 insertions(+), 60 deletions(-)
+
+--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+@@ -1424,21 +1424,16 @@ static void stm32_fmc2_calc_timings(stru
+ struct stm32_fmc2_timings *tims = &nand->timings;
+ unsigned long hclk = clk_get_rate(fmc2->clk);
+ unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000);
+- int tar, tclr, thiz, twait, tset_mem, tset_att, thold_mem, thold_att;
++ unsigned long timing, tar, tclr, thiz, twait;
++ unsigned long tset_mem, tset_att, thold_mem, thold_att;
+
+- tar = hclkp;
+- if (tar < sdrt->tAR_min)
+- tar = sdrt->tAR_min;
+- tims->tar = DIV_ROUND_UP(tar, hclkp) - 1;
+- if (tims->tar > FMC2_PCR_TIMING_MASK)
+- tims->tar = FMC2_PCR_TIMING_MASK;
+-
+- tclr = hclkp;
+- if (tclr < sdrt->tCLR_min)
+- tclr = sdrt->tCLR_min;
+- tims->tclr = DIV_ROUND_UP(tclr, hclkp) - 1;
+- if (tims->tclr > FMC2_PCR_TIMING_MASK)
+- tims->tclr = FMC2_PCR_TIMING_MASK;
++ tar = max_t(unsigned long, hclkp, sdrt->tAR_min);
++ timing = DIV_ROUND_UP(tar, hclkp) - 1;
++ tims->tar = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK);
++
++ tclr = max_t(unsigned long, hclkp, sdrt->tCLR_min);
++ timing = DIV_ROUND_UP(tclr, hclkp) - 1;
++ tims->tclr = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK);
+
+ tims->thiz = FMC2_THIZ;
+ thiz = (tims->thiz + 1) * hclkp;
+@@ -1448,18 +1443,11 @@ static void stm32_fmc2_calc_timings(stru
+ * tWAIT > tWP
+ * tWAIT > tREA + tIO
+ */
+- twait = hclkp;
+- if (twait < sdrt->tRP_min)
+- twait = sdrt->tRP_min;
+- if (twait < sdrt->tWP_min)
+- twait = sdrt->tWP_min;
+- if (twait < sdrt->tREA_max + FMC2_TIO)
+- twait = sdrt->tREA_max + FMC2_TIO;
+- tims->twait = DIV_ROUND_UP(twait, hclkp);
+- if (tims->twait == 0)
+- tims->twait = 1;
+- else if (tims->twait > FMC2_PMEM_PATT_TIMING_MASK)
+- tims->twait = FMC2_PMEM_PATT_TIMING_MASK;
++ twait = max_t(unsigned long, hclkp, sdrt->tRP_min);
++ twait = max_t(unsigned long, twait, sdrt->tWP_min);
++ twait = max_t(unsigned long, twait, sdrt->tREA_max + FMC2_TIO);
++ timing = DIV_ROUND_UP(twait, hclkp);
++ tims->twait = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tSETUP_MEM > tCS - tWAIT
+@@ -1474,20 +1462,15 @@ static void stm32_fmc2_calc_timings(stru
+ if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
+ (tset_mem < sdrt->tDS_min - (twait - thiz)))
+ tset_mem = sdrt->tDS_min - (twait - thiz);
+- tims->tset_mem = DIV_ROUND_UP(tset_mem, hclkp);
+- if (tims->tset_mem == 0)
+- tims->tset_mem = 1;
+- else if (tims->tset_mem > FMC2_PMEM_PATT_TIMING_MASK)
+- tims->tset_mem = FMC2_PMEM_PATT_TIMING_MASK;
++ timing = DIV_ROUND_UP(tset_mem, hclkp);
++ tims->tset_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tHOLD_MEM > tCH
+ * tHOLD_MEM > tREH - tSETUP_MEM
+ * tHOLD_MEM > max(tRC, tWC) - (tSETUP_MEM + tWAIT)
+ */
+- thold_mem = hclkp;
+- if (thold_mem < sdrt->tCH_min)
+- thold_mem = sdrt->tCH_min;
++ thold_mem = max_t(unsigned long, hclkp, sdrt->tCH_min);
+ if (sdrt->tREH_min > tset_mem &&
+ (thold_mem < sdrt->tREH_min - tset_mem))
+ thold_mem = sdrt->tREH_min - tset_mem;
+@@ -1497,11 +1480,8 @@ static void stm32_fmc2_calc_timings(stru
+ if ((sdrt->tWC_min > tset_mem + twait) &&
+ (thold_mem < sdrt->tWC_min - (tset_mem + twait)))
+ thold_mem = sdrt->tWC_min - (tset_mem + twait);
+- tims->thold_mem = DIV_ROUND_UP(thold_mem, hclkp);
+- if (tims->thold_mem == 0)
+- tims->thold_mem = 1;
+- else if (tims->thold_mem > FMC2_PMEM_PATT_TIMING_MASK)
+- tims->thold_mem = FMC2_PMEM_PATT_TIMING_MASK;
++ timing = DIV_ROUND_UP(thold_mem, hclkp);
++ tims->thold_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tSETUP_ATT > tCS - tWAIT
+@@ -1523,11 +1503,8 @@ static void stm32_fmc2_calc_timings(stru
+ if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
+ (tset_att < sdrt->tDS_min - (twait - thiz)))
+ tset_att = sdrt->tDS_min - (twait - thiz);
+- tims->tset_att = DIV_ROUND_UP(tset_att, hclkp);
+- if (tims->tset_att == 0)
+- tims->tset_att = 1;
+- else if (tims->tset_att > FMC2_PMEM_PATT_TIMING_MASK)
+- tims->tset_att = FMC2_PMEM_PATT_TIMING_MASK;
++ timing = DIV_ROUND_UP(tset_att, hclkp);
++ tims->tset_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tHOLD_ATT > tALH
+@@ -1542,17 +1519,11 @@ static void stm32_fmc2_calc_timings(stru
+ * tHOLD_ATT > tRC - (tSETUP_ATT + tWAIT)
+ * tHOLD_ATT > tWC - (tSETUP_ATT + tWAIT)
+ */
+- thold_att = hclkp;
+- if (thold_att < sdrt->tALH_min)
+- thold_att = sdrt->tALH_min;
+- if (thold_att < sdrt->tCH_min)
+- thold_att = sdrt->tCH_min;
+- if (thold_att < sdrt->tCLH_min)
+- thold_att = sdrt->tCLH_min;
+- if (thold_att < sdrt->tCOH_min)
+- thold_att = sdrt->tCOH_min;
+- if (thold_att < sdrt->tDH_min)
+- thold_att = sdrt->tDH_min;
++ thold_att = max_t(unsigned long, hclkp, sdrt->tALH_min);
++ thold_att = max_t(unsigned long, thold_att, sdrt->tCH_min);
++ thold_att = max_t(unsigned long, thold_att, sdrt->tCLH_min);
++ thold_att = max_t(unsigned long, thold_att, sdrt->tCOH_min);
++ thold_att = max_t(unsigned long, thold_att, sdrt->tDH_min);
+ if ((sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC > tset_mem) &&
+ (thold_att < sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem))
+ thold_att = sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem;
+@@ -1571,11 +1542,8 @@ static void stm32_fmc2_calc_timings(stru
+ if ((sdrt->tWC_min > tset_att + twait) &&
+ (thold_att < sdrt->tWC_min - (tset_att + twait)))
+ thold_att = sdrt->tWC_min - (tset_att + twait);
+- tims->thold_att = DIV_ROUND_UP(thold_att, hclkp);
+- if (tims->thold_att == 0)
+- tims->thold_att = 1;
+- else if (tims->thold_att > FMC2_PMEM_PATT_TIMING_MASK)
+- tims->thold_att = FMC2_PMEM_PATT_TIMING_MASK;
++ timing = DIV_ROUND_UP(thold_att, hclkp);
++ tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+ }
+
+ static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
--- /dev/null
+From 6565c182094f69e4ffdece337d395eb7ec760efc Mon Sep 17 00:00:00 2001
+From: Chao Yu <chao@kernel.org>
+Date: Wed, 11 Sep 2019 17:36:50 +0800
+Subject: quota: fix wrong condition in is_quota_modification()
+
+From: Chao Yu <yuchao0@huawei.com>
+
+commit 6565c182094f69e4ffdece337d395eb7ec760efc upstream.
+
+Quoted from
+commit 3da40c7b0898 ("ext4: only call ext4_truncate when size <= isize")
+
+" At LSF we decided that if we truncate up from isize we shouldn't trim
+ fallocated blocks that were fallocated with KEEP_SIZE and are past the
+ new i_size. This patch fixes ext4 to do this. "
+
+And generic/092 of fstest have covered this case for long time, however
+is_quota_modification() didn't adjust based on that rule, so that in
+below condition, we will lose to quota block change:
+- fallocate blocks beyond EOF
+- remount
+- truncate(file_path, file_size)
+
+Fix it.
+
+Link: https://lore.kernel.org/r/20190911093650.35329-1-yuchao0@huawei.com
+Fixes: 3da40c7b0898 ("ext4: only call ext4_truncate when size <= isize")
+CC: stable@vger.kernel.org
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/quotaops.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/quotaops.h
++++ b/include/linux/quotaops.h
+@@ -22,7 +22,7 @@ static inline struct quota_info *sb_dqop
+ /* i_mutex must being held */
+ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
+ {
+- return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
++ return (ia->ia_valid & ATTR_SIZE) ||
+ (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
+ (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
+ }
btrfs-qgroup-fix-the-wrong-target-io_tree-when-freeing-reserved-data-space.patch
btrfs-qgroup-fix-reserved-data-space-leak-if-we-have-multiple-reserve-calls.patch
btrfs-fix-race-setting-up-and-completing-qgroup-rescan-workers.patch
+sunrpc-dequeue-the-request-from-the-receive-queue-while-we-re-re-encoding.patch
+sunrpc-fix-buffer-handling-of-gss-mic-without-slack.patch
+acpi-lpss-save-restore-lpss-private-registers-also-on-lynxpoint.patch
+md-raid6-set-r5_readerror-when-there-is-read-failure-on-parity-disk.patch
+md-don-t-report-active-array_state-until-after-revalidate_disk-completes.patch
+md-only-call-set_in_sync-when-it-is-expected-to-succeed.patch
+cfg80211-purge-frame-registrations-on-iftype-change.patch
+dev-mem-bail-out-upon-sigkill.patch
+fs-export-generic_fadvise.patch
+mm-handle-madv_willneed-through-vfs_fadvise.patch
+xfs-fix-stale-data-exposure-when-readahead-races-with-hole-punch.patch
+ipmi-move-message-error-checking-to-avoid-deadlock.patch
+mtd-rawnand-stm32_fmc2-avoid-warnings-when-building-with-w-1-option.patch
+ext4-fix-warning-inside-ext4_convert_unwritten_extents_endio.patch
+ext4-fix-punch-hole-for-inline_data-file-systems.patch
+quota-fix-wrong-condition-in-is_quota_modification.patch
+hwrng-core-don-t-wait-on-add_early_randomness.patch
+i2c-riic-clear-nack-in-tend-isr.patch
+cifs-fix-max-ea-value-size.patch
+cifs-fix-oplock-handling-for-smb-2.1-protocols.patch
--- /dev/null
+From cc204d01262a69218b2d0db5cdea371de85871d9 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trondmy@gmail.com>
+Date: Tue, 10 Sep 2019 13:01:35 -0400
+Subject: SUNRPC: Dequeue the request from the receive queue while we're re-encoding
+
+From: Trond Myklebust <trondmy@gmail.com>
+
+commit cc204d01262a69218b2d0db5cdea371de85871d9 upstream.
+
+Ensure that we dequeue the request from the transport receive queue
+while we're re-encoding to prevent issues like use-after-free when
+we release the bvec.
+
+Fixes: 7536908982047 ("SUNRPC: Ensure the bvecs are reset when we re-encode...")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Cc: stable@vger.kernel.org # v4.20+
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/sunrpc/xprt.h | 1
+ net/sunrpc/clnt.c | 6 ++--
+ net/sunrpc/xprt.c | 54 +++++++++++++++++++++++++-------------------
+ 3 files changed, 35 insertions(+), 26 deletions(-)
+
+--- a/include/linux/sunrpc/xprt.h
++++ b/include/linux/sunrpc/xprt.h
+@@ -346,6 +346,7 @@ bool xprt_prepare_transmit(struct rpc_
+ void xprt_request_enqueue_transmit(struct rpc_task *task);
+ void xprt_request_enqueue_receive(struct rpc_task *task);
+ void xprt_request_wait_receive(struct rpc_task *task);
++void xprt_request_dequeue_xprt(struct rpc_task *task);
+ bool xprt_request_need_retransmit(struct rpc_task *task);
+ void xprt_transmit(struct rpc_task *task);
+ void xprt_end_transmit(struct rpc_task *task);
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1785,6 +1785,7 @@ rpc_xdr_encode(struct rpc_task *task)
+ req->rq_rbuffer,
+ req->rq_rcvsize);
+
++ req->rq_reply_bytes_recvd = 0;
+ req->rq_snd_buf.head[0].iov_len = 0;
+ xdr_init_encode(&xdr, &req->rq_snd_buf,
+ req->rq_snd_buf.head[0].iov_base, req);
+@@ -1804,6 +1805,8 @@ call_encode(struct rpc_task *task)
+ if (!rpc_task_need_encode(task))
+ goto out;
+ dprint_status(task);
++ /* Dequeue task from the receive queue while we're encoding */
++ xprt_request_dequeue_xprt(task);
+ /* Encode here so that rpcsec_gss can use correct sequence number. */
+ rpc_xdr_encode(task);
+ /* Did the encode result in an error condition? */
+@@ -2437,9 +2440,6 @@ call_decode(struct rpc_task *task)
+ return;
+ case -EAGAIN:
+ task->tk_status = 0;
+- xdr_free_bvec(&req->rq_rcv_buf);
+- req->rq_reply_bytes_recvd = 0;
+- req->rq_rcv_buf.len = 0;
+ if (task->tk_client->cl_discrtry)
+ xprt_conditional_disconnect(req->rq_xprt,
+ req->rq_connect_cookie);
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1296,6 +1296,36 @@ xprt_request_dequeue_transmit(struct rpc
+ }
+
+ /**
++ * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
++ * @task: pointer to rpc_task
++ *
++ * Remove a task from the transmit and receive queues, and ensure that
++ * it is not pinned by the receive work item.
++ */
++void
++xprt_request_dequeue_xprt(struct rpc_task *task)
++{
++ struct rpc_rqst *req = task->tk_rqstp;
++ struct rpc_xprt *xprt = req->rq_xprt;
++
++ if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
++ test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
++ xprt_is_pinned_rqst(req)) {
++ spin_lock(&xprt->queue_lock);
++ xprt_request_dequeue_transmit_locked(task);
++ xprt_request_dequeue_receive_locked(task);
++ while (xprt_is_pinned_rqst(req)) {
++ set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
++ spin_unlock(&xprt->queue_lock);
++ xprt_wait_on_pinned_rqst(req);
++ spin_lock(&xprt->queue_lock);
++ clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
++ }
++ spin_unlock(&xprt->queue_lock);
++ }
++}
++
++/**
+ * xprt_request_prepare - prepare an encoded request for transport
+ * @req: pointer to rpc_rqst
+ *
+@@ -1719,28 +1749,6 @@ void xprt_retry_reserve(struct rpc_task
+ xprt_do_reserve(xprt, task);
+ }
+
+-static void
+-xprt_request_dequeue_all(struct rpc_task *task, struct rpc_rqst *req)
+-{
+- struct rpc_xprt *xprt = req->rq_xprt;
+-
+- if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
+- test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
+- xprt_is_pinned_rqst(req)) {
+- spin_lock(&xprt->queue_lock);
+- xprt_request_dequeue_transmit_locked(task);
+- xprt_request_dequeue_receive_locked(task);
+- while (xprt_is_pinned_rqst(req)) {
+- set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
+- spin_unlock(&xprt->queue_lock);
+- xprt_wait_on_pinned_rqst(req);
+- spin_lock(&xprt->queue_lock);
+- clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
+- }
+- spin_unlock(&xprt->queue_lock);
+- }
+-}
+-
+ /**
+ * xprt_release - release an RPC request slot
+ * @task: task which is finished with the slot
+@@ -1764,7 +1772,7 @@ void xprt_release(struct rpc_task *task)
+ task->tk_ops->rpc_count_stats(task, task->tk_calldata);
+ else if (task->tk_client)
+ rpc_count_iostats(task, task->tk_client->cl_metrics);
+- xprt_request_dequeue_all(task, req);
++ xprt_request_dequeue_xprt(task);
+ spin_lock_bh(&xprt->transport_lock);
+ xprt->ops->release_xprt(xprt, task);
+ if (xprt->ops->release_request)
--- /dev/null
+From 5f1bc39979d868a0358c683864bec3fc8395440b Mon Sep 17 00:00:00 2001
+From: Benjamin Coddington <bcodding@redhat.com>
+Date: Mon, 16 Sep 2019 07:59:37 -0400
+Subject: SUNRPC: Fix buffer handling of GSS MIC without slack
+
+From: Benjamin Coddington <bcodding@redhat.com>
+
+commit 5f1bc39979d868a0358c683864bec3fc8395440b upstream.
+
+The GSS Message Integrity Check data for krb5i may lie partially in the XDR
+reply buffer's pages and tail. If so, we try to copy the entire MIC into
+free space in the tail. But as the estimations of the slack space required
+for authentication and verification have improved there may be less free
+space in the tail to complete this copy -- see commit 2c94b8eca1a2
+("SUNRPC: Use au_rslack when computing reply buffer size"). In fact, there
+may only be room in the tail for a single copy of the MIC, and not part of
+the MIC and then another complete copy.
+
+The real world failure reported is that `ls` of a directory on NFS may
+sometimes return -EIO, which can be traced back to xdr_buf_read_netobj()
+failing to find available free space in the tail to copy the MIC.
+
+Fix this by checking for the case of the MIC crossing the boundaries of
+head, pages, and tail. If so, shift the buffer until the MIC is contained
+completely within the pages or tail. This allows the remainder of the
+function to create a sub buffer that directly address the complete MIC.
+
+Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
+Cc: stable@vger.kernel.org # v5.1
+Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/xdr.c | 27 ++++++++++++++++++---------
+ 1 file changed, 18 insertions(+), 9 deletions(-)
+
+--- a/net/sunrpc/xdr.c
++++ b/net/sunrpc/xdr.c
+@@ -1237,16 +1237,29 @@ xdr_encode_word(struct xdr_buf *buf, uns
+ EXPORT_SYMBOL_GPL(xdr_encode_word);
+
+ /* If the netobj starting offset bytes from the start of xdr_buf is contained
+- * entirely in the head or the tail, set object to point to it; otherwise
+- * try to find space for it at the end of the tail, copy it there, and
+- * set obj to point to it. */
++ * entirely in the head, pages, or tail, set object to point to it; otherwise
++ * shift the buffer until it is contained entirely within the pages or tail.
++ */
+ int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
+ {
+ struct xdr_buf subbuf;
++ unsigned int boundary;
+
+ if (xdr_decode_word(buf, offset, &obj->len))
+ return -EFAULT;
+- if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
++ offset += 4;
++
++ /* Is the obj partially in the head? */
++ boundary = buf->head[0].iov_len;
++ if (offset < boundary && (offset + obj->len) > boundary)
++ xdr_shift_buf(buf, boundary - offset);
++
++ /* Is the obj partially in the pages? */
++ boundary += buf->page_len;
++ if (offset < boundary && (offset + obj->len) > boundary)
++ xdr_shrink_pagelen(buf, boundary - offset);
++
++ if (xdr_buf_subsegment(buf, &subbuf, offset, obj->len))
+ return -EFAULT;
+
+ /* Is the obj contained entirely in the head? */
+@@ -1258,11 +1271,7 @@ int xdr_buf_read_netobj(struct xdr_buf *
+ if (subbuf.tail[0].iov_len == obj->len)
+ return 0;
+
+- /* use end of tail as storage for obj:
+- * (We don't copy to the beginning because then we'd have
+- * to worry about doing a potentially overlapping copy.
+- * This assumes the object is at most half the length of the
+- * tail.) */
++ /* Find a contiguous area in @buf to hold all of @obj */
+ if (obj->len > buf->buflen - buf->len)
+ return -ENOMEM;
+ if (buf->tail[0].iov_len != 0)
--- /dev/null
+From 40144e49ff84c3bd6bd091b58115257670be8803 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 29 Aug 2019 09:04:12 -0700
+Subject: xfs: Fix stale data exposure when readahead races with hole punch
+
+From: Jan Kara <jack@suse.cz>
+
+commit 40144e49ff84c3bd6bd091b58115257670be8803 upstream.
+
+Hole puching currently evicts pages from page cache and then goes on to
+remove blocks from the inode. This happens under both XFS_IOLOCK_EXCL
+and XFS_MMAPLOCK_EXCL which provides appropriate serialization with
+racing reads or page faults. However there is currently nothing that
+prevents readahead triggered by fadvise() or madvise() from racing with
+the hole punch and instantiating page cache page after hole punching has
+evicted page cache in xfs_flush_unmap_range() but before it has removed
+blocks from the inode. This page cache page will be mapping soon to be
+freed block and that can lead to returning stale data to userspace or
+even filesystem corruption.
+
+Fix the problem by protecting handling of readahead requests by
+XFS_IOLOCK_SHARED similarly as we protect reads.
+
+CC: stable@vger.kernel.org
+Link: https://lore.kernel.org/linux-fsdevel/CAOQ4uxjQNmxqmtA_VbYW0Su9rKRk2zobJmahcyeaEVOFKVQ5dw@mail.gmail.com/
+Reported-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/xfs/xfs_file.c | 26 ++++++++++++++++++++++++++
+ 1 file changed, 26 insertions(+)
+
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -33,6 +33,7 @@
+ #include <linux/pagevec.h>
+ #include <linux/backing-dev.h>
+ #include <linux/mman.h>
++#include <linux/fadvise.h>
+
+ static const struct vm_operations_struct xfs_file_vm_ops;
+
+@@ -939,6 +940,30 @@ out_unlock:
+ return error;
+ }
+
++STATIC int
++xfs_file_fadvise(
++ struct file *file,
++ loff_t start,
++ loff_t end,
++ int advice)
++{
++ struct xfs_inode *ip = XFS_I(file_inode(file));
++ int ret;
++ int lockflags = 0;
++
++ /*
++ * Operations creating pages in page cache need protection from hole
++ * punching and similar ops
++ */
++ if (advice == POSIX_FADV_WILLNEED) {
++ lockflags = XFS_IOLOCK_SHARED;
++ xfs_ilock(ip, lockflags);
++ }
++ ret = generic_fadvise(file, start, end, advice);
++ if (lockflags)
++ xfs_iunlock(ip, lockflags);
++ return ret;
++}
+
+ STATIC loff_t
+ xfs_file_remap_range(
+@@ -1235,6 +1260,7 @@ const struct file_operations xfs_file_op
+ .fsync = xfs_file_fsync,
+ .get_unmapped_area = thp_get_unmapped_area,
+ .fallocate = xfs_file_fallocate,
++ .fadvise = xfs_file_fadvise,
+ .remap_file_range = xfs_file_remap_range,
+ };
+