]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 29 Mar 2019 16:46:06 +0000 (17:46 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 29 Mar 2019 16:46:06 +0000 (17:46 +0100)
added patches:
arm-imx6q-cpuidle-fix-bug-that-cpu-might-not-wake-up-at-expected-time.patch
btrfs-fix-incorrect-file-size-after-shrinking-truncate-and-fsync.patch
btrfs-raid56-properly-unmap-parity-page-in-finish_parity_scrub.patch
btrfs-remove-warn_on-in-log_dir_items.patch
net-dsa-qca8k-remove-leftover-phy-accessors.patch
nfsv4.1-don-t-free-interrupted-slot-on-open.patch
powerpc-bpf-fix-generation-of-load-store-dw-instructions.patch

queue-4.9/arm-imx6q-cpuidle-fix-bug-that-cpu-might-not-wake-up-at-expected-time.patch [new file with mode: 0644]
queue-4.9/btrfs-fix-incorrect-file-size-after-shrinking-truncate-and-fsync.patch [new file with mode: 0644]
queue-4.9/btrfs-raid56-properly-unmap-parity-page-in-finish_parity_scrub.patch [new file with mode: 0644]
queue-4.9/btrfs-remove-warn_on-in-log_dir_items.patch [new file with mode: 0644]
queue-4.9/net-dsa-qca8k-remove-leftover-phy-accessors.patch [new file with mode: 0644]
queue-4.9/nfsv4.1-don-t-free-interrupted-slot-on-open.patch [new file with mode: 0644]
queue-4.9/powerpc-bpf-fix-generation-of-load-store-dw-instructions.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/arm-imx6q-cpuidle-fix-bug-that-cpu-might-not-wake-up-at-expected-time.patch b/queue-4.9/arm-imx6q-cpuidle-fix-bug-that-cpu-might-not-wake-up-at-expected-time.patch
new file mode 100644 (file)
index 0000000..1d535ca
--- /dev/null
@@ -0,0 +1,76 @@
+From 91740fc8242b4f260cfa4d4536d8551804777fae Mon Sep 17 00:00:00 2001
+From: Kohji Okuno <okuno.kohji@jp.panasonic.com>
+Date: Tue, 26 Feb 2019 11:34:13 +0900
+Subject: ARM: imx6q: cpuidle: fix bug that CPU might not wake up at expected time
+
+From: Kohji Okuno <okuno.kohji@jp.panasonic.com>
+
+commit 91740fc8242b4f260cfa4d4536d8551804777fae upstream.
+
+In the current cpuidle implementation for i.MX6q, the CPU that sets
+'WAIT_UNCLOCKED' and the CPU that returns to 'WAIT_CLOCKED' are always
+the same. While the CPU that sets 'WAIT_UNCLOCKED' is in IDLE state of
+"WAIT", if the other CPU wakes up and enters IDLE state of "WFI"
+istead of "WAIT", this CPU can not wake up at expired time.
+ Because, in the case of "WFI", the CPU must be waked up by the local
+timer interrupt. But, while 'WAIT_UNCLOCKED' is set, the local timer
+is stopped, when all CPUs execute "wfi" instruction. As a result, the
+local timer interrupt is not fired.
+ In this situation, this CPU will wake up by IRQ different from local
+timer. (e.g. broacast timer)
+
+So, this fix changes CPU to return to 'WAIT_CLOCKED'.
+
+Signed-off-by: Kohji Okuno <okuno.kohji@jp.panasonic.com>
+Fixes: e5f9dec8ff5f ("ARM: imx6q: support WAIT mode using cpuidle")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-imx/cpuidle-imx6q.c |   27 ++++++++++-----------------
+ 1 file changed, 10 insertions(+), 17 deletions(-)
+
+--- a/arch/arm/mach-imx/cpuidle-imx6q.c
++++ b/arch/arm/mach-imx/cpuidle-imx6q.c
+@@ -16,30 +16,23 @@
+ #include "cpuidle.h"
+ #include "hardware.h"
+-static atomic_t master = ATOMIC_INIT(0);
+-static DEFINE_SPINLOCK(master_lock);
++static int num_idle_cpus = 0;
++static DEFINE_SPINLOCK(cpuidle_lock);
+ static int imx6q_enter_wait(struct cpuidle_device *dev,
+                           struct cpuidle_driver *drv, int index)
+ {
+-      if (atomic_inc_return(&master) == num_online_cpus()) {
+-              /*
+-               * With this lock, we prevent other cpu to exit and enter
+-               * this function again and become the master.
+-               */
+-              if (!spin_trylock(&master_lock))
+-                      goto idle;
++      spin_lock(&cpuidle_lock);
++      if (++num_idle_cpus == num_online_cpus())
+               imx6_set_lpm(WAIT_UNCLOCKED);
+-              cpu_do_idle();
+-              imx6_set_lpm(WAIT_CLOCKED);
+-              spin_unlock(&master_lock);
+-              goto done;
+-      }
++      spin_unlock(&cpuidle_lock);
+-idle:
+       cpu_do_idle();
+-done:
+-      atomic_dec(&master);
++
++      spin_lock(&cpuidle_lock);
++      if (num_idle_cpus-- == num_online_cpus())
++              imx6_set_lpm(WAIT_CLOCKED);
++      spin_unlock(&cpuidle_lock);
+       return index;
+ }
diff --git a/queue-4.9/btrfs-fix-incorrect-file-size-after-shrinking-truncate-and-fsync.patch b/queue-4.9/btrfs-fix-incorrect-file-size-after-shrinking-truncate-and-fsync.patch
new file mode 100644 (file)
index 0000000..affd5c3
--- /dev/null
@@ -0,0 +1,98 @@
+From bf504110bc8aa05df48b0e5f0aa84bfb81e0574b Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Mon, 4 Mar 2019 14:06:12 +0000
+Subject: Btrfs: fix incorrect file size after shrinking truncate and fsync
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit bf504110bc8aa05df48b0e5f0aa84bfb81e0574b upstream.
+
+If we do a shrinking truncate against an inode which is already present
+in the respective log tree and then rename it, as part of logging the new
+name we end up logging an inode item that reflects the old size of the
+file (the one which we previously logged) and not the new smaller size.
+The decision to preserve the size previously logged was added by commit
+1a4bcf470c886b ("Btrfs: fix fsync data loss after adding hard link to
+inode") in order to avoid data loss after replaying the log. However that
+decision is only needed for the case the logged inode size is smaller then
+the current size of the inode, as explained in that commit's change log.
+If the current size of the inode is smaller then the previously logged
+size, we know a shrinking truncate happened and therefore need to use
+that smaller size.
+
+Example to trigger the problem:
+
+  $ mkfs.btrfs -f /dev/sdb
+  $ mount /dev/sdb /mnt
+
+  $ xfs_io -f -c "pwrite -S 0xab 0 8000" /mnt/foo
+  $ xfs_io -c "fsync" /mnt/foo
+  $ xfs_io -c "truncate 3000" /mnt/foo
+
+  $ mv /mnt/foo /mnt/bar
+  $ xfs_io -c "fsync" /mnt/bar
+
+  <power failure>
+
+  $ mount /dev/sdb /mnt
+  $ od -t x1 -A d /mnt/bar
+  0000000 ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab
+  *
+  0008000
+
+Once we rename the file, we log its name (and inode item), and because
+the inode was already logged before in the current transaction, we log it
+with a size of 8000 bytes because that is the size we previously logged
+(with the first fsync). As part of the rename, besides logging the inode,
+we do also sync the log, which is done since commit d4682ba03ef618
+("Btrfs: sync log after logging new name"), so the next fsync against our
+inode is effectively a no-op, since no new changes happened since the
+rename operation. Even if did not sync the log during the rename
+operation, the same problem (fize size of 8000 bytes instead of 3000
+bytes) would be visible after replaying the log if the log ended up
+getting synced to disk through some other means, such as for example by
+fsyncing some other modified file. In the example above the fsync after
+the rename operation is there just because not every filesystem may
+guarantee logging/journalling the inode (and syncing the log/journal)
+during the rename operation, for example it is needed for f2fs, but not
+for ext4 and xfs.
+
+Fix this scenario by, when logging a new name (which is triggered by
+rename and link operations), using the current size of the inode instead
+of the previously logged inode size.
+
+A test case for fstests follows soon.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=202695
+CC: stable@vger.kernel.org # 4.4+
+Reported-by: Seulbae Kim <seulbae@gatech.edu>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/tree-log.c |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4272,6 +4272,19 @@ static int logged_inode_size(struct btrf
+               item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+                                     struct btrfs_inode_item);
+               *size_ret = btrfs_inode_size(path->nodes[0], item);
++              /*
++               * If the in-memory inode's i_size is smaller then the inode
++               * size stored in the btree, return the inode's i_size, so
++               * that we get a correct inode size after replaying the log
++               * when before a power failure we had a shrinking truncate
++               * followed by addition of a new name (rename / new hard link).
++               * Otherwise return the inode size from the btree, to avoid
++               * data loss when replaying a log due to previously doing a
++               * write that expands the inode's size and logging a new name
++               * immediately after.
++               */
++              if (*size_ret > inode->vfs_inode.i_size)
++                      *size_ret = inode->vfs_inode.i_size;
+       }
+       btrfs_release_path(path);
diff --git a/queue-4.9/btrfs-raid56-properly-unmap-parity-page-in-finish_parity_scrub.patch b/queue-4.9/btrfs-raid56-properly-unmap-parity-page-in-finish_parity_scrub.patch
new file mode 100644 (file)
index 0000000..84bf358
--- /dev/null
@@ -0,0 +1,57 @@
+From 3897b6f0a859288c22fb793fad11ec2327e60fcd Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Thu, 14 Mar 2019 08:56:28 +0100
+Subject: btrfs: raid56: properly unmap parity page in finish_parity_scrub()
+
+From: Andrea Righi <andrea.righi@canonical.com>
+
+commit 3897b6f0a859288c22fb793fad11ec2327e60fcd upstream.
+
+Parity page is incorrectly unmapped in finish_parity_scrub(), triggering
+a reference counter bug on i386, i.e.:
+
+ [ 157.662401] kernel BUG at mm/highmem.c:349!
+ [ 157.666725] invalid opcode: 0000 [#1] SMP PTI
+
+The reason is that kunmap(p_page) was completely left out, so we never
+did an unmap for the p_page and the loop unmapping the rbio page was
+iterating over the wrong number of stripes: unmapping should be done
+with nr_data instead of rbio->real_stripes.
+
+Test case to reproduce the bug:
+
+ - create a raid5 btrfs filesystem:
+   # mkfs.btrfs -m raid5 -d raid5 /dev/sdb /dev/sdc /dev/sdd /dev/sde
+
+ - mount it:
+   # mount /dev/sdb /mnt
+
+ - run btrfs scrub in a loop:
+   # while :; do btrfs scrub start -BR /mnt; done
+
+BugLink: https://bugs.launchpad.net/bugs/1812845
+Fixes: 5a6ac9eacb49 ("Btrfs, raid56: support parity scrub on raid56")
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/raid56.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/raid56.c
++++ b/fs/btrfs/raid56.c
+@@ -2395,8 +2395,9 @@ static noinline void finish_parity_scrub
+                       bitmap_clear(rbio->dbitmap, pagenr, 1);
+               kunmap(p);
+-              for (stripe = 0; stripe < rbio->real_stripes; stripe++)
++              for (stripe = 0; stripe < nr_data; stripe++)
+                       kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
++              kunmap(p_page);
+       }
+       __free_page(p_page);
diff --git a/queue-4.9/btrfs-remove-warn_on-in-log_dir_items.patch b/queue-4.9/btrfs-remove-warn_on-in-log_dir_items.patch
new file mode 100644 (file)
index 0000000..6ff3cbc
--- /dev/null
@@ -0,0 +1,50 @@
+From 2cc8334270e281815c3850c3adea363c51f21e0d Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Wed, 6 Mar 2019 17:13:04 -0500
+Subject: btrfs: remove WARN_ON in log_dir_items
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit 2cc8334270e281815c3850c3adea363c51f21e0d upstream.
+
+When Filipe added the recursive directory logging stuff in
+2f2ff0ee5e430 ("Btrfs: fix metadata inconsistencies after directory
+fsync") he specifically didn't take the directory i_mutex for the
+children directories that we need to log because of lockdep.  This is
+generally fine, but can lead to this WARN_ON() tripping if we happen to
+run delayed deletion's in between our first search and our second search
+of dir_item/dir_indexes for this directory.  We expect this to happen,
+so the WARN_ON() isn't necessary.  Drop the WARN_ON() and add a comment
+so we know why this case can happen.
+
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/tree-log.c |   11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3343,9 +3343,16 @@ static noinline int log_dir_items(struct
+       }
+       btrfs_release_path(path);
+-      /* find the first key from this transaction again */
++      /*
++       * Find the first key from this transaction again.  See the note for
++       * log_new_dir_dentries, if we're logging a directory recursively we
++       * won't be holding its i_mutex, which means we can modify the directory
++       * while we're logging it.  If we remove an entry between our first
++       * search and this search we'll not find the key again and can just
++       * bail.
++       */
+       ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
+-      if (WARN_ON(ret != 0))
++      if (ret != 0)
+               goto done;
+       /*
diff --git a/queue-4.9/net-dsa-qca8k-remove-leftover-phy-accessors.patch b/queue-4.9/net-dsa-qca8k-remove-leftover-phy-accessors.patch
new file mode 100644 (file)
index 0000000..c75e789
--- /dev/null
@@ -0,0 +1,64 @@
+From 1eec7151ae0e134bd42e3f128066b2ff8da21393 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Fri, 22 Mar 2019 01:05:02 +0100
+Subject: net: dsa: qca8k: remove leftover phy accessors
+
+From: Christian Lamparter <chunkeey@gmail.com>
+
+commit 1eec7151ae0e134bd42e3f128066b2ff8da21393 upstream.
+
+This belated patch implements Andrew Lunn's request of
+"remove the phy_read() and phy_write() functions."
+<https://lore.kernel.org/patchwork/comment/902734/>
+
+While seemingly harmless, this causes the switch's user
+port PHYs to get registered twice. This is because the
+DSA subsystem will create a slave mdio-bus not knowing
+that the qca8k_phy_(read|write) accessors operate on
+the external mdio-bus. So the same "bus" gets effectively
+duplicated.
+
+Cc: stable@vger.kernel.org
+Fixes: 6b93fb46480a ("net-next: dsa: add new driver for qca8xxx family")
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/dsa/qca8k.c |   18 ------------------
+ 1 file changed, 18 deletions(-)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -630,22 +630,6 @@ qca8k_adjust_link(struct dsa_switch *ds,
+       qca8k_port_set_status(priv, port, 1);
+ }
+-static int
+-qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
+-{
+-      struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+-
+-      return mdiobus_read(priv->bus, phy, regnum);
+-}
+-
+-static int
+-qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
+-{
+-      struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+-
+-      return mdiobus_write(priv->bus, phy, regnum, val);
+-}
+-
+ static void
+ qca8k_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+ {
+@@ -961,8 +945,6 @@ static struct dsa_switch_ops qca8k_switc
+       .setup                  = qca8k_setup,
+       .adjust_link            = qca8k_adjust_link,
+       .get_strings            = qca8k_get_strings,
+-      .phy_read               = qca8k_phy_read,
+-      .phy_write              = qca8k_phy_write,
+       .get_ethtool_stats      = qca8k_get_ethtool_stats,
+       .get_sset_count         = qca8k_get_sset_count,
+       .get_eee                = qca8k_get_eee,
diff --git a/queue-4.9/nfsv4.1-don-t-free-interrupted-slot-on-open.patch b/queue-4.9/nfsv4.1-don-t-free-interrupted-slot-on-open.patch
new file mode 100644 (file)
index 0000000..3cff151
--- /dev/null
@@ -0,0 +1,34 @@
+From 0cb98abb5bd13b9a636bde603d952d722688b428 Mon Sep 17 00:00:00 2001
+From: Olga Kornievskaia <kolga@netapp.com>
+Date: Tue, 19 Mar 2019 12:12:13 -0400
+Subject: NFSv4.1 don't free interrupted slot on open
+
+From: Olga Kornievskaia <kolga@netapp.com>
+
+commit 0cb98abb5bd13b9a636bde603d952d722688b428 upstream.
+
+Allow the async rpc task for finish and update the open state if needed,
+then free the slot. Otherwise, the async rpc unable to decode the reply.
+
+Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
+Fixes: ae55e59da0e4 ("pnfs: Don't release the sequence slot...")
+Cc: stable@vger.kernel.org # v4.18+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4proc.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2748,7 +2748,8 @@ static int _nfs4_open_and_get_state(stru
+                       nfs4_schedule_stateid_recovery(server, state);
+       }
+ out:
+-      nfs4_sequence_free_slot(&opendata->o_res.seq_res);
++      if (!opendata->cancelled)
++              nfs4_sequence_free_slot(&opendata->o_res.seq_res);
+       return ret;
+ }
diff --git a/queue-4.9/powerpc-bpf-fix-generation-of-load-store-dw-instructions.patch b/queue-4.9/powerpc-bpf-fix-generation-of-load-store-dw-instructions.patch
new file mode 100644 (file)
index 0000000..7d2bc59
--- /dev/null
@@ -0,0 +1,200 @@
+From 86be36f6502c52ddb4b85938145324fd07332da1 Mon Sep 17 00:00:00 2001
+From: "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com>
+Date: Fri, 15 Mar 2019 20:21:19 +0530
+Subject: powerpc: bpf: Fix generation of load/store DW instructions
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+commit 86be36f6502c52ddb4b85938145324fd07332da1 upstream.
+
+Yauheni Kaliuta pointed out that PTR_TO_STACK store/load verifier test
+was failing on powerpc64 BE, and rightfully indicated that the PPC_LD()
+macro is not masking away the last two bits of the offset per the ISA,
+resulting in the generation of 'lwa' instruction instead of the intended
+'ld' instruction.
+
+Segher also pointed out that we can't simply mask away the last two bits
+as that will result in loading/storing from/to a memory location that
+was not intended.
+
+This patch addresses this by using ldx/stdx if the offset is not
+word-aligned. We load the offset into a temporary register (TMP_REG_2)
+and use that as the index register in a subsequent ldx/stdx. We fix
+PPC_LD() macro to mask off the last two bits, but enhance PPC_BPF_LL()
+and PPC_BPF_STL() to factor in the offset value and generate the proper
+instruction sequence. We also convert all existing users of PPC_LD() and
+PPC_STD() to use these macros. All existing uses of these macros have
+been audited to ensure that TMP_REG_2 can be clobbered.
+
+Fixes: 156d0e290e96 ("powerpc/ebpf/jit: Implement JIT compiler for extended BPF")
+Cc: stable@vger.kernel.org # v4.9+
+
+Reported-by: Yauheni Kaliuta <yauheni.kaliuta@redhat.com>
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/ppc-opcode.h |    2 ++
+ arch/powerpc/net/bpf_jit.h            |   17 +++++------------
+ arch/powerpc/net/bpf_jit32.h          |    4 ++++
+ arch/powerpc/net/bpf_jit64.h          |   20 ++++++++++++++++++++
+ arch/powerpc/net/bpf_jit_comp64.c     |   12 ++++++------
+ 5 files changed, 37 insertions(+), 18 deletions(-)
+
+--- a/arch/powerpc/include/asm/ppc-opcode.h
++++ b/arch/powerpc/include/asm/ppc-opcode.h
+@@ -225,6 +225,7 @@
+ /* Misc instructions for BPF compiler */
+ #define PPC_INST_LBZ                  0x88000000
+ #define PPC_INST_LD                   0xe8000000
++#define PPC_INST_LDX                  0x7c00002a
+ #define PPC_INST_LHZ                  0xa0000000
+ #define PPC_INST_LWZ                  0x80000000
+ #define PPC_INST_LHBRX                        0x7c00062c
+@@ -232,6 +233,7 @@
+ #define PPC_INST_STB                  0x98000000
+ #define PPC_INST_STH                  0xb0000000
+ #define PPC_INST_STD                  0xf8000000
++#define PPC_INST_STDX                 0x7c00012a
+ #define PPC_INST_STDU                 0xf8000001
+ #define PPC_INST_STW                  0x90000000
+ #define PPC_INST_STWU                 0x94000000
+--- a/arch/powerpc/net/bpf_jit.h
++++ b/arch/powerpc/net/bpf_jit.h
+@@ -51,6 +51,8 @@
+ #define PPC_LIS(r, i)         PPC_ADDIS(r, 0, i)
+ #define PPC_STD(r, base, i)   EMIT(PPC_INST_STD | ___PPC_RS(r) |            \
+                                    ___PPC_RA(base) | ((i) & 0xfffc))
++#define PPC_STDX(r, base, b)  EMIT(PPC_INST_STDX | ___PPC_RS(r) |           \
++                                   ___PPC_RA(base) | ___PPC_RB(b))
+ #define PPC_STDU(r, base, i)  EMIT(PPC_INST_STDU | ___PPC_RS(r) |           \
+                                    ___PPC_RA(base) | ((i) & 0xfffc))
+ #define PPC_STW(r, base, i)   EMIT(PPC_INST_STW | ___PPC_RS(r) |            \
+@@ -65,7 +67,9 @@
+ #define PPC_LBZ(r, base, i)   EMIT(PPC_INST_LBZ | ___PPC_RT(r) |            \
+                                    ___PPC_RA(base) | IMM_L(i))
+ #define PPC_LD(r, base, i)    EMIT(PPC_INST_LD | ___PPC_RT(r) |             \
+-                                   ___PPC_RA(base) | IMM_L(i))
++                                   ___PPC_RA(base) | ((i) & 0xfffc))
++#define PPC_LDX(r, base, b)   EMIT(PPC_INST_LDX | ___PPC_RT(r) |            \
++                                   ___PPC_RA(base) | ___PPC_RB(b))
+ #define PPC_LWZ(r, base, i)   EMIT(PPC_INST_LWZ | ___PPC_RT(r) |            \
+                                    ___PPC_RA(base) | IMM_L(i))
+ #define PPC_LHZ(r, base, i)   EMIT(PPC_INST_LHZ | ___PPC_RT(r) |            \
+@@ -85,17 +89,6 @@
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+ #define PPC_BPF_STDCX(s, a, b)        EMIT(PPC_INST_STDCX | ___PPC_RS(s) |          \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+-
+-#ifdef CONFIG_PPC64
+-#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
+-#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
+-#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
+-#else
+-#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
+-#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
+-#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
+-#endif
+-
+ #define PPC_CMPWI(a, i)               EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
+ #define PPC_CMPDI(a, i)               EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
+ #define PPC_CMPW(a, b)                EMIT(PPC_INST_CMPW | ___PPC_RA(a) |           \
+--- a/arch/powerpc/net/bpf_jit32.h
++++ b/arch/powerpc/net/bpf_jit32.h
+@@ -122,6 +122,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
+ #define PPC_NTOHS_OFFS(r, base, i)    PPC_LHZ_OFFS(r, base, i)
+ #endif
++#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
++#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
++#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
++
+ #define SEEN_DATAREF 0x10000 /* might call external helpers */
+ #define SEEN_XREG    0x20000 /* X reg is used */
+ #define SEEN_MEM     0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
+--- a/arch/powerpc/net/bpf_jit64.h
++++ b/arch/powerpc/net/bpf_jit64.h
+@@ -86,6 +86,26 @@ DECLARE_LOAD_FUNC(sk_load_byte);
+                       (imm >= SKF_LL_OFF ? func##_negative_offset : func) :   \
+                       func##_positive_offset)
++/*
++ * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
++ * so ensure that it isn't in use already.
++ */
++#define PPC_BPF_LL(r, base, i) do {                                         \
++                              if ((i) % 4) {                                \
++                                      PPC_LI(b2p[TMP_REG_2], (i));          \
++                                      PPC_LDX(r, base, b2p[TMP_REG_2]);     \
++                              } else                                        \
++                                      PPC_LD(r, base, i);                   \
++                              } while(0)
++#define PPC_BPF_STL(r, base, i) do {                                        \
++                              if ((i) % 4) {                                \
++                                      PPC_LI(b2p[TMP_REG_2], (i));          \
++                                      PPC_STDX(r, base, b2p[TMP_REG_2]);    \
++                              } else                                        \
++                                      PPC_STD(r, base, i);                  \
++                              } while(0)
++#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
++
+ #define SEEN_FUNC     0x1000 /* might call external helpers */
+ #define SEEN_STACK    0x2000 /* uses BPF stack */
+ #define SEEN_SKB      0x4000 /* uses sk_buff */
+--- a/arch/powerpc/net/bpf_jit_comp64.c
++++ b/arch/powerpc/net/bpf_jit_comp64.c
+@@ -265,7 +265,7 @@ static void bpf_jit_emit_tail_call(u32 *
+        * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
+        *   goto out;
+        */
+-      PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
++      PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
+       PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
+       PPC_BCC(COND_GT, out);
+@@ -278,7 +278,7 @@ static void bpf_jit_emit_tail_call(u32 *
+       /* prog = array->ptrs[index]; */
+       PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
+       PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
+-      PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
++      PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
+       /*
+        * if (prog == NULL)
+@@ -288,7 +288,7 @@ static void bpf_jit_emit_tail_call(u32 *
+       PPC_BCC(COND_EQ, out);
+       /* goto *(prog->bpf_func + prologue_size); */
+-      PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
++      PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
+ #ifdef PPC64_ELF_ABI_v1
+       /* skip past the function descriptor */
+       PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
+@@ -620,7 +620,7 @@ bpf_alu32_trunc:
+                                * the instructions generated will remain the
+                                * same across all passes
+                                */
+-                              PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
++                              PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
+                               PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
+                               PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
+                               break;
+@@ -676,7 +676,7 @@ emit_clear:
+                               PPC_LI32(b2p[TMP_REG_1], imm);
+                               src_reg = b2p[TMP_REG_1];
+                       }
+-                      PPC_STD(src_reg, dst_reg, off);
++                      PPC_BPF_STL(src_reg, dst_reg, off);
+                       break;
+               /*
+@@ -723,7 +723,7 @@ emit_clear:
+                       break;
+               /* dst = *(u64 *)(ul) (src + off) */
+               case BPF_LDX | BPF_MEM | BPF_DW:
+-                      PPC_LD(dst_reg, src_reg, off);
++                      PPC_BPF_LL(dst_reg, src_reg, off);
+                       break;
+               /*
index ad6791f7d1081bdcde125abd09c152a0ce539c94..a2727a7d23fd9f7faf6937c7f3131e4813866a7f 100644 (file)
@@ -14,3 +14,10 @@ sctp-get-sctphdr-by-offset-in-sctp_compute_cksum.patch
 mac8390-fix-mmio-access-size-probe.patch
 tun-properly-test-for-iff_up.patch
 tun-add-a-missing-rcu_read_unlock-in-error-path.patch
+btrfs-fix-incorrect-file-size-after-shrinking-truncate-and-fsync.patch
+btrfs-remove-warn_on-in-log_dir_items.patch
+btrfs-raid56-properly-unmap-parity-page-in-finish_parity_scrub.patch
+arm-imx6q-cpuidle-fix-bug-that-cpu-might-not-wake-up-at-expected-time.patch
+powerpc-bpf-fix-generation-of-load-store-dw-instructions.patch
+nfsv4.1-don-t-free-interrupted-slot-on-open.patch
+net-dsa-qca8k-remove-leftover-phy-accessors.patch