--- /dev/null
+From 91740fc8242b4f260cfa4d4536d8551804777fae Mon Sep 17 00:00:00 2001
+From: Kohji Okuno <okuno.kohji@jp.panasonic.com>
+Date: Tue, 26 Feb 2019 11:34:13 +0900
+Subject: ARM: imx6q: cpuidle: fix bug that CPU might not wake up at expected time
+
+From: Kohji Okuno <okuno.kohji@jp.panasonic.com>
+
+commit 91740fc8242b4f260cfa4d4536d8551804777fae upstream.
+
+In the current cpuidle implementation for i.MX6q, the CPU that sets
+'WAIT_UNCLOCKED' and the CPU that returns to 'WAIT_CLOCKED' are always
+the same. While the CPU that sets 'WAIT_UNCLOCKED' is in IDLE state of
+"WAIT", if the other CPU wakes up and enters IDLE state of "WFI"
+istead of "WAIT", this CPU can not wake up at expired time.
+ Because, in the case of "WFI", the CPU must be waked up by the local
+timer interrupt. But, while 'WAIT_UNCLOCKED' is set, the local timer
+is stopped, when all CPUs execute "wfi" instruction. As a result, the
+local timer interrupt is not fired.
+ In this situation, this CPU will wake up by IRQ different from local
+timer. (e.g. broacast timer)
+
+So, this fix changes CPU to return to 'WAIT_CLOCKED'.
+
+Signed-off-by: Kohji Okuno <okuno.kohji@jp.panasonic.com>
+Fixes: e5f9dec8ff5f ("ARM: imx6q: support WAIT mode using cpuidle")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-imx/cpuidle-imx6q.c | 27 ++++++++++-----------------
+ 1 file changed, 10 insertions(+), 17 deletions(-)
+
+--- a/arch/arm/mach-imx/cpuidle-imx6q.c
++++ b/arch/arm/mach-imx/cpuidle-imx6q.c
+@@ -14,30 +14,23 @@
+ #include "cpuidle.h"
+ #include "hardware.h"
+
+-static atomic_t master = ATOMIC_INIT(0);
+-static DEFINE_SPINLOCK(master_lock);
++static int num_idle_cpus = 0;
++static DEFINE_SPINLOCK(cpuidle_lock);
+
+ static int imx6q_enter_wait(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+ {
+- if (atomic_inc_return(&master) == num_online_cpus()) {
+- /*
+- * With this lock, we prevent other cpu to exit and enter
+- * this function again and become the master.
+- */
+- if (!spin_trylock(&master_lock))
+- goto idle;
++ spin_lock(&cpuidle_lock);
++ if (++num_idle_cpus == num_online_cpus())
+ imx6_set_lpm(WAIT_UNCLOCKED);
+- cpu_do_idle();
+- imx6_set_lpm(WAIT_CLOCKED);
+- spin_unlock(&master_lock);
+- goto done;
+- }
++ spin_unlock(&cpuidle_lock);
+
+-idle:
+ cpu_do_idle();
+-done:
+- atomic_dec(&master);
++
++ spin_lock(&cpuidle_lock);
++ if (num_idle_cpus-- == num_online_cpus())
++ imx6_set_lpm(WAIT_CLOCKED);
++ spin_unlock(&cpuidle_lock);
+
+ return index;
+ }
--- /dev/null
+From bf504110bc8aa05df48b0e5f0aa84bfb81e0574b Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Mon, 4 Mar 2019 14:06:12 +0000
+Subject: Btrfs: fix incorrect file size after shrinking truncate and fsync
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit bf504110bc8aa05df48b0e5f0aa84bfb81e0574b upstream.
+
+If we do a shrinking truncate against an inode which is already present
+in the respective log tree and then rename it, as part of logging the new
+name we end up logging an inode item that reflects the old size of the
+file (the one which we previously logged) and not the new smaller size.
+The decision to preserve the size previously logged was added by commit
+1a4bcf470c886b ("Btrfs: fix fsync data loss after adding hard link to
+inode") in order to avoid data loss after replaying the log. However that
+decision is only needed for the case the logged inode size is smaller then
+the current size of the inode, as explained in that commit's change log.
+If the current size of the inode is smaller then the previously logged
+size, we know a shrinking truncate happened and therefore need to use
+that smaller size.
+
+Example to trigger the problem:
+
+ $ mkfs.btrfs -f /dev/sdb
+ $ mount /dev/sdb /mnt
+
+ $ xfs_io -f -c "pwrite -S 0xab 0 8000" /mnt/foo
+ $ xfs_io -c "fsync" /mnt/foo
+ $ xfs_io -c "truncate 3000" /mnt/foo
+
+ $ mv /mnt/foo /mnt/bar
+ $ xfs_io -c "fsync" /mnt/bar
+
+ <power failure>
+
+ $ mount /dev/sdb /mnt
+ $ od -t x1 -A d /mnt/bar
+ 0000000 ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab
+ *
+ 0008000
+
+Once we rename the file, we log its name (and inode item), and because
+the inode was already logged before in the current transaction, we log it
+with a size of 8000 bytes because that is the size we previously logged
+(with the first fsync). As part of the rename, besides logging the inode,
+we do also sync the log, which is done since commit d4682ba03ef618
+("Btrfs: sync log after logging new name"), so the next fsync against our
+inode is effectively a no-op, since no new changes happened since the
+rename operation. Even if did not sync the log during the rename
+operation, the same problem (fize size of 8000 bytes instead of 3000
+bytes) would be visible after replaying the log if the log ended up
+getting synced to disk through some other means, such as for example by
+fsyncing some other modified file. In the example above the fsync after
+the rename operation is there just because not every filesystem may
+guarantee logging/journalling the inode (and syncing the log/journal)
+during the rename operation, for example it is needed for f2fs, but not
+for ext4 and xfs.
+
+Fix this scenario by, when logging a new name (which is triggered by
+rename and link operations), using the current size of the inode instead
+of the previously logged inode size.
+
+A test case for fstests follows soon.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=202695
+CC: stable@vger.kernel.org # 4.4+
+Reported-by: Seulbae Kim <seulbae@gatech.edu>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/tree-log.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4233,6 +4233,19 @@ static int logged_inode_size(struct btrf
+ item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_item);
+ *size_ret = btrfs_inode_size(path->nodes[0], item);
++ /*
++ * If the in-memory inode's i_size is smaller then the inode
++ * size stored in the btree, return the inode's i_size, so
++ * that we get a correct inode size after replaying the log
++ * when before a power failure we had a shrinking truncate
++ * followed by addition of a new name (rename / new hard link).
++ * Otherwise return the inode size from the btree, to avoid
++ * data loss when replaying a log due to previously doing a
++ * write that expands the inode's size and logging a new name
++ * immediately after.
++ */
++ if (*size_ret > inode->vfs_inode.i_size)
++ *size_ret = inode->vfs_inode.i_size;
+ }
+
+ btrfs_release_path(path);
--- /dev/null
+From 3897b6f0a859288c22fb793fad11ec2327e60fcd Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Thu, 14 Mar 2019 08:56:28 +0100
+Subject: btrfs: raid56: properly unmap parity page in finish_parity_scrub()
+
+From: Andrea Righi <andrea.righi@canonical.com>
+
+commit 3897b6f0a859288c22fb793fad11ec2327e60fcd upstream.
+
+Parity page is incorrectly unmapped in finish_parity_scrub(), triggering
+a reference counter bug on i386, i.e.:
+
+ [ 157.662401] kernel BUG at mm/highmem.c:349!
+ [ 157.666725] invalid opcode: 0000 [#1] SMP PTI
+
+The reason is that kunmap(p_page) was completely left out, so we never
+did an unmap for the p_page and the loop unmapping the rbio page was
+iterating over the wrong number of stripes: unmapping should be done
+with nr_data instead of rbio->real_stripes.
+
+Test case to reproduce the bug:
+
+ - create a raid5 btrfs filesystem:
+ # mkfs.btrfs -m raid5 -d raid5 /dev/sdb /dev/sdc /dev/sdd /dev/sde
+
+ - mount it:
+ # mount /dev/sdb /mnt
+
+ - run btrfs scrub in a loop:
+ # while :; do btrfs scrub start -BR /mnt; done
+
+BugLink: https://bugs.launchpad.net/bugs/1812845
+Fixes: 5a6ac9eacb49 ("Btrfs, raid56: support parity scrub on raid56")
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/raid56.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/raid56.c
++++ b/fs/btrfs/raid56.c
+@@ -2420,8 +2420,9 @@ static noinline void finish_parity_scrub
+ bitmap_clear(rbio->dbitmap, pagenr, 1);
+ kunmap(p);
+
+- for (stripe = 0; stripe < rbio->real_stripes; stripe++)
++ for (stripe = 0; stripe < nr_data; stripe++)
+ kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
++ kunmap(p_page);
+ }
+
+ __free_page(p_page);
--- /dev/null
+From 2cc8334270e281815c3850c3adea363c51f21e0d Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Wed, 6 Mar 2019 17:13:04 -0500
+Subject: btrfs: remove WARN_ON in log_dir_items
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit 2cc8334270e281815c3850c3adea363c51f21e0d upstream.
+
+When Filipe added the recursive directory logging stuff in
+2f2ff0ee5e430 ("Btrfs: fix metadata inconsistencies after directory
+fsync") he specifically didn't take the directory i_mutex for the
+children directories that we need to log because of lockdep. This is
+generally fine, but can lead to this WARN_ON() tripping if we happen to
+run delayed deletion's in between our first search and our second search
+of dir_item/dir_indexes for this directory. We expect this to happen,
+so the WARN_ON() isn't necessary. Drop the WARN_ON() and add a comment
+so we know why this case can happen.
+
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/tree-log.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3321,9 +3321,16 @@ static noinline int log_dir_items(struct
+ }
+ btrfs_release_path(path);
+
+- /* find the first key from this transaction again */
++ /*
++ * Find the first key from this transaction again. See the note for
++ * log_new_dir_dentries, if we're logging a directory recursively we
++ * won't be holding its i_mutex, which means we can modify the directory
++ * while we're logging it. If we remove an entry between our first
++ * search and this search we'll not find the key again and can just
++ * bail.
++ */
+ ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
+- if (WARN_ON(ret != 0))
++ if (ret != 0)
+ goto done;
+
+ /*
vxlan-don-t-call-gro_cells_destroy-before-device-is-unregistered.patch
sctp-get-sctphdr-by-offset-in-sctp_compute_cksum.patch
mac8390-fix-mmio-access-size-probe.patch
+btrfs-fix-incorrect-file-size-after-shrinking-truncate-and-fsync.patch
+btrfs-remove-warn_on-in-log_dir_items.patch
+btrfs-raid56-properly-unmap-parity-page-in-finish_parity_scrub.patch
+arm-imx6q-cpuidle-fix-bug-that-cpu-might-not-wake-up-at-expected-time.patch