--- /dev/null
+From foo@baz Mon Jul 3 11:54:13 CEST 2017
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Tue, 17 Jan 2017 22:07:19 -0500
+Subject: bnxt_en: Fix "uninitialized variable" bug in TPA code path.
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+
+[ Upstream commit 719ca8111402aa6157bd83a3c966d184db0d8956 ]
+
+In the TPA GRO code path, initialize the tcp_opt_len variable to 0 so
+that it will be correct for packets without TCP timestamps. The bug
+caused the SKB fields to be incorrectly set up for packets without
+TCP timestamps, leading to these packets being rejected by the stack.
+
+Reported-by: Andy Gospodarek <andrew.gospodarek@broadocm.com>
+Acked-by: Andy Gospodarek <andrew.gospodarek@broadocm.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1097,7 +1097,7 @@ static struct sk_buff *bnxt_gro_func_573
+ {
+ #ifdef CONFIG_INET
+ struct tcphdr *th;
+- int len, nw_off, tcp_opt_len;
++ int len, nw_off, tcp_opt_len = 0;
+
+ if (tcp_ts)
+ tcp_opt_len = 12;
--- /dev/null
+From foo@baz Mon Jul 3 11:54:13 CEST 2017
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Wed, 18 Jan 2017 15:14:17 +0100
+Subject: bpf: don't trigger OOM killer under pressure with map alloc
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+
+[ Upstream commit d407bd25a204bd66b7346dde24bd3d37ef0e0b05 ]
+
+This patch adds two helpers, bpf_map_area_alloc() and bpf_map_area_free(),
+that are to be used for map allocations. Using kmalloc() for very large
+allocations can cause excessive work within the page allocator, so i) fall
+back earlier to vmalloc() when the attempt is considered costly anyway,
+and even more importantly ii) don't trigger OOM killer with any of the
+allocators.
+
+Since this is based on a user space request, for example, when creating
+maps with element pre-allocation, we really want such requests to fail
+instead of killing other user space processes.
+
+Also, don't spam the kernel log with warnings should any of the allocations
+fail under pressure. Given that, we can make backend selection in
+bpf_map_area_alloc() generic, and convert all maps over to use this API
+for spots with potentially large allocation requests.
+
+Note, replacing the one kmalloc_array() is fine as overflow checks happen
+earlier in htab_map_alloc(), since it must also protect the multiplication
+for vmalloc() should kmalloc_array() fail.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/bpf.h | 2 ++
+ kernel/bpf/arraymap.c | 18 +++++++-----------
+ kernel/bpf/hashtab.c | 22 +++++++++-------------
+ kernel/bpf/stackmap.c | 20 ++++++++------------
+ kernel/bpf/syscall.c | 26 ++++++++++++++++++++++++++
+ 5 files changed, 52 insertions(+), 36 deletions(-)
+
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -243,6 +243,8 @@ struct bpf_map *bpf_map_inc(struct bpf_m
+ void bpf_map_put_with_uref(struct bpf_map *map);
+ void bpf_map_put(struct bpf_map *map);
+ int bpf_map_precharge_memlock(u32 pages);
++void *bpf_map_area_alloc(size_t size);
++void bpf_map_area_free(void *base);
+
+ extern int sysctl_unprivileged_bpf_disabled;
+
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -11,7 +11,6 @@
+ */
+ #include <linux/bpf.h>
+ #include <linux/err.h>
+-#include <linux/vmalloc.h>
+ #include <linux/slab.h>
+ #include <linux/mm.h>
+ #include <linux/filter.h>
+@@ -74,14 +73,10 @@ static struct bpf_map *array_map_alloc(u
+ if (array_size >= U32_MAX - PAGE_SIZE)
+ return ERR_PTR(-ENOMEM);
+
+-
+ /* allocate all map elements and zero-initialize them */
+- array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
+- if (!array) {
+- array = vzalloc(array_size);
+- if (!array)
+- return ERR_PTR(-ENOMEM);
+- }
++ array = bpf_map_area_alloc(array_size);
++ if (!array)
++ return ERR_PTR(-ENOMEM);
+
+ /* copy mandatory map attributes */
+ array->map.map_type = attr->map_type;
+@@ -97,7 +92,7 @@ static struct bpf_map *array_map_alloc(u
+
+ if (array_size >= U32_MAX - PAGE_SIZE ||
+ elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
+- kvfree(array);
++ bpf_map_area_free(array);
+ return ERR_PTR(-ENOMEM);
+ }
+ out:
+@@ -262,7 +257,7 @@ static void array_map_free(struct bpf_ma
+ if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
+ bpf_array_free_percpu(array);
+
+- kvfree(array);
++ bpf_map_area_free(array);
+ }
+
+ static const struct bpf_map_ops array_ops = {
+@@ -319,7 +314,8 @@ static void fd_array_map_free(struct bpf
+ /* make sure it's empty */
+ for (i = 0; i < array->map.max_entries; i++)
+ BUG_ON(array->ptrs[i] != NULL);
+- kvfree(array);
++
++ bpf_map_area_free(array);
+ }
+
+ static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -13,7 +13,6 @@
+ #include <linux/bpf.h>
+ #include <linux/jhash.h>
+ #include <linux/filter.h>
+-#include <linux/vmalloc.h>
+ #include "percpu_freelist.h"
+
+ struct bucket {
+@@ -84,14 +83,15 @@ static void htab_free_elems(struct bpf_h
+ free_percpu(pptr);
+ }
+ free_elems:
+- vfree(htab->elems);
++ bpf_map_area_free(htab->elems);
+ }
+
+ static int prealloc_elems_and_freelist(struct bpf_htab *htab)
+ {
+ int err = -ENOMEM, i;
+
+- htab->elems = vzalloc(htab->elem_size * htab->map.max_entries);
++ htab->elems = bpf_map_area_alloc(htab->elem_size *
++ htab->map.max_entries);
+ if (!htab->elems)
+ return -ENOMEM;
+
+@@ -227,14 +227,10 @@ static struct bpf_map *htab_map_alloc(un
+ goto free_htab;
+
+ err = -ENOMEM;
+- htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
+- GFP_USER | __GFP_NOWARN);
+-
+- if (!htab->buckets) {
+- htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
+- if (!htab->buckets)
+- goto free_htab;
+- }
++ htab->buckets = bpf_map_area_alloc(htab->n_buckets *
++ sizeof(struct bucket));
++ if (!htab->buckets)
++ goto free_htab;
+
+ for (i = 0; i < htab->n_buckets; i++) {
+ INIT_HLIST_HEAD(&htab->buckets[i].head);
+@@ -258,7 +254,7 @@ static struct bpf_map *htab_map_alloc(un
+ free_extra_elems:
+ free_percpu(htab->extra_elems);
+ free_buckets:
+- kvfree(htab->buckets);
++ bpf_map_area_free(htab->buckets);
+ free_htab:
+ kfree(htab);
+ return ERR_PTR(err);
+@@ -715,7 +711,7 @@ static void htab_map_free(struct bpf_map
+ pcpu_freelist_destroy(&htab->freelist);
+ }
+ free_percpu(htab->extra_elems);
+- kvfree(htab->buckets);
++ bpf_map_area_free(htab->buckets);
+ kfree(htab);
+ }
+
+--- a/kernel/bpf/stackmap.c
++++ b/kernel/bpf/stackmap.c
+@@ -7,7 +7,6 @@
+ #include <linux/bpf.h>
+ #include <linux/jhash.h>
+ #include <linux/filter.h>
+-#include <linux/vmalloc.h>
+ #include <linux/stacktrace.h>
+ #include <linux/perf_event.h>
+ #include "percpu_freelist.h"
+@@ -32,7 +31,7 @@ static int prealloc_elems_and_freelist(s
+ u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
+ int err;
+
+- smap->elems = vzalloc(elem_size * smap->map.max_entries);
++ smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries);
+ if (!smap->elems)
+ return -ENOMEM;
+
+@@ -45,7 +44,7 @@ static int prealloc_elems_and_freelist(s
+ return 0;
+
+ free_elems:
+- vfree(smap->elems);
++ bpf_map_area_free(smap->elems);
+ return err;
+ }
+
+@@ -76,12 +75,9 @@ static struct bpf_map *stack_map_alloc(u
+ if (cost >= U32_MAX - PAGE_SIZE)
+ return ERR_PTR(-E2BIG);
+
+- smap = kzalloc(cost, GFP_USER | __GFP_NOWARN);
+- if (!smap) {
+- smap = vzalloc(cost);
+- if (!smap)
+- return ERR_PTR(-ENOMEM);
+- }
++ smap = bpf_map_area_alloc(cost);
++ if (!smap)
++ return ERR_PTR(-ENOMEM);
+
+ err = -E2BIG;
+ cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
+@@ -112,7 +108,7 @@ static struct bpf_map *stack_map_alloc(u
+ put_buffers:
+ put_callchain_buffers();
+ free_smap:
+- kvfree(smap);
++ bpf_map_area_free(smap);
+ return ERR_PTR(err);
+ }
+
+@@ -262,9 +258,9 @@ static void stack_map_free(struct bpf_ma
+ /* wait for bpf programs to complete before freeing stack map */
+ synchronize_rcu();
+
+- vfree(smap->elems);
++ bpf_map_area_free(smap->elems);
+ pcpu_freelist_destroy(&smap->freelist);
+- kvfree(smap);
++ bpf_map_area_free(smap);
+ put_callchain_buffers();
+ }
+
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -12,6 +12,8 @@
+ #include <linux/bpf.h>
+ #include <linux/syscalls.h>
+ #include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/mmzone.h>
+ #include <linux/anon_inodes.h>
+ #include <linux/file.h>
+ #include <linux/license.h>
+@@ -48,6 +50,30 @@ void bpf_register_map_type(struct bpf_ma
+ list_add(&tl->list_node, &bpf_map_types);
+ }
+
++void *bpf_map_area_alloc(size_t size)
++{
++ /* We definitely need __GFP_NORETRY, so OOM killer doesn't
++ * trigger under memory pressure as we really just want to
++ * fail instead.
++ */
++ const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
++ void *area;
++
++ if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
++ area = kmalloc(size, GFP_USER | flags);
++ if (area != NULL)
++ return area;
++ }
++
++ return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
++ PAGE_KERNEL);
++}
++
++void bpf_map_area_free(void *area)
++{
++ kvfree(area);
++}
++
+ int bpf_map_precharge_memlock(u32 pages)
+ {
+ struct user_struct *user = get_current_user();
--- /dev/null
+From foo@baz Mon Jul 3 11:54:13 CEST 2017
+From: Chandan Rajendra <chandan@linux.vnet.ibm.com>
+Date: Fri, 23 Dec 2016 15:00:18 +0530
+Subject: Btrfs: Fix deadlock between direct IO and fast fsync
+
+From: Chandan Rajendra <chandan@linux.vnet.ibm.com>
+
+
+[ Upstream commit 97dcdea076ecef41ea4aaa23d4397c2f622e4265 ]
+
+The following deadlock is seen when executing generic/113 test,
+
+ ---------------------------------------------------------+----------------------------------------------------
+ Direct I/O task Fast fsync task
+ ---------------------------------------------------------+----------------------------------------------------
+ btrfs_direct_IO
+ __blockdev_direct_IO
+ do_blockdev_direct_IO
+ do_direct_IO
+ btrfs_get_blocks_direct
+ while (blocks needs to written)
+ get_more_blocks (first iteration)
+ btrfs_get_blocks_direct
+ btrfs_create_dio_extent
+ down_read(&BTRFS_I(inode) >dio_sem)
+ Create and add extent map and ordered extent
+ up_read(&BTRFS_I(inode) >dio_sem)
+ btrfs_sync_file
+ btrfs_log_dentry_safe
+ btrfs_log_inode_parent
+ btrfs_log_inode
+ btrfs_log_changed_extents
+ down_write(&BTRFS_I(inode) >dio_sem)
+ Collect new extent maps and ordered extents
+ wait for ordered extent completion
+ get_more_blocks (second iteration)
+ btrfs_get_blocks_direct
+ btrfs_create_dio_extent
+ down_read(&BTRFS_I(inode) >dio_sem)
+ --------------------------------------------------------------------------------------------------------------
+
+In the above description, Btrfs direct I/O code path has not yet started
+submitting bios for file range covered by the initial ordered
+extent. Meanwhile, The fast fsync task obtains the write semaphore and
+waits for I/O on the ordered extent to get completed. However, the
+Direct I/O task is now blocked on obtaining the read semaphore.
+
+To resolve the deadlock, this commit modifies the Direct I/O code path
+to obtain the read semaphore before invoking
+__blockdev_direct_IO(). The semaphore is then given up after
+__blockdev_direct_IO() returns. This allows the Direct I/O code to
+complete I/O on all the ordered extents it creates.
+
+Signed-off-by: Chandan Rajendra <chandan@linux.vnet.ibm.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7235,7 +7235,6 @@ static struct extent_map *btrfs_create_d
+ struct extent_map *em = NULL;
+ int ret;
+
+- down_read(&BTRFS_I(inode)->dio_sem);
+ if (type != BTRFS_ORDERED_NOCOW) {
+ em = create_pinned_em(inode, start, len, orig_start,
+ block_start, block_len, orig_block_len,
+@@ -7254,7 +7253,6 @@ static struct extent_map *btrfs_create_d
+ em = ERR_PTR(ret);
+ }
+ out:
+- up_read(&BTRFS_I(inode)->dio_sem);
+
+ return em;
+ }
+@@ -8707,6 +8705,7 @@ static ssize_t btrfs_direct_IO(struct ki
+ dio_data.unsubmitted_oe_range_start = (u64)offset;
+ dio_data.unsubmitted_oe_range_end = (u64)offset;
+ current->journal_info = &dio_data;
++ down_read(&BTRFS_I(inode)->dio_sem);
+ } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
+ &BTRFS_I(inode)->runtime_flags)) {
+ inode_dio_end(inode);
+@@ -8719,6 +8718,7 @@ static ssize_t btrfs_direct_IO(struct ki
+ iter, btrfs_get_blocks_direct, NULL,
+ btrfs_submit_direct, flags);
+ if (iov_iter_rw(iter) == WRITE) {
++ up_read(&BTRFS_I(inode)->dio_sem);
+ current->journal_info = NULL;
+ if (ret < 0 && ret != -EIOCBQUEUED) {
+ if (dio_data.reserve)
--- /dev/null
+From foo@baz Mon Jul 3 11:54:13 CEST 2017
+From: Liu Bo <bo.li.liu@oracle.com>
+Date: Thu, 1 Dec 2016 13:43:31 -0800
+Subject: Btrfs: fix truncate down when no_holes feature is enabled
+
+From: Liu Bo <bo.li.liu@oracle.com>
+
+
+[ Upstream commit 91298eec05cd8d4e828cf7ee5d4a6334f70cf69a ]
+
+For such a file mapping,
+
+[0-4k][hole][8k-12k]
+
+In NO_HOLES mode, we don't have the [hole] extent any more.
+Commit c1aa45759e90 ("Btrfs: fix shrinking truncate when the no_holes feature is enabled")
+ fixed disk isize not being updated in NO_HOLES mode when data is not flushed.
+
+However, even if data has been flushed, we can still have trouble
+in updating disk isize since we updated disk isize to 'start' of
+the last evicted extent.
+
+Reviewed-by: Chris Mason <clm@fb.com>
+Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4480,8 +4480,19 @@ search_again:
+ if (found_type > min_type) {
+ del_item = 1;
+ } else {
+- if (item_end < new_size)
++ if (item_end < new_size) {
++ /*
++ * With NO_HOLES mode, for the following mapping
++ *
++ * [0-4k][hole][8k-12k]
++ *
++ * if truncating isize down to 6k, it ends up
++ * isize being 8k.
++ */
++ if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
++ last_size = new_size;
+ break;
++ }
+ if (found_key.offset >= new_size)
+ del_item = 1;
+ else
--- /dev/null
+From foo@baz Mon Jul 3 11:54:13 CEST 2017
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 18 Jan 2017 19:44:42 -0800
+Subject: gianfar: Do not reuse pages from emergency reserve
+
+From: Eric Dumazet <edumazet@google.com>
+
+
+[ Upstream commit 69fed99baac186013840ced3524562841296034f ]
+
+A driver using dev_alloc_page() must not reuse a page that had to
+use emergency memory reserve.
+
+Otherwise all packets using this page will be immediately dropped,
+unless for very specific sockets having SOCK_MEMALLOC bit set.
+
+This issue might be hard to debug, because only a fraction of the RX
+ring buffer would suffer from drops.
+
+Fixes: 75354148ce69 ("gianfar: Add paged allocation and Rx S/G")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Claudiu Manoil <claudiu.manoil@freescale.com>
+Acked-by: Claudiu Manoil <claudiu.manoil@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/freescale/gianfar.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -2951,7 +2951,7 @@ static bool gfar_add_rx_frag(struct gfar
+ }
+
+ /* try reuse page */
+- if (unlikely(page_count(page) != 1))
++ if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
+ return false;
+
+ /* change offset to the other half */
--- /dev/null
+From foo@baz Mon Jul 3 11:54:13 CEST 2017
+From: Eran Ben Elisha <eranbe@mellanox.com>
+Date: Tue, 17 Jan 2017 19:19:17 +0200
+Subject: net: ethtool: Initialize buffer when querying device channel settings
+
+From: Eran Ben Elisha <eranbe@mellanox.com>
+
+
+[ Upstream commit 31a86d137219373c3222ca5f4f912e9a4d8065bb ]
+
+Ethtool channels respond struct was uninitialized when querying device
+channel boundaries settings. As a result, unreported fields by the driver
+hold garbage. This may cause sending unsupported params to driver.
+
+Fixes: 8bf368620486 ('ethtool: ensure channel counts are within bounds ...')
+Signed-off-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+CC: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/ethtool.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1704,7 +1704,7 @@ static noinline_for_stack int ethtool_ge
+ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
+ void __user *useraddr)
+ {
+- struct ethtool_channels channels, max;
++ struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS };
+ u32 max_rx_in_use = 0;
+
+ if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
--- /dev/null
+From foo@baz Mon Jul 3 11:54:13 CEST 2017
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Wed, 18 Jan 2017 14:29:21 +0100
+Subject: objtool: Fix IRET's opcode
+
+From: Jiri Slaby <jslaby@suse.cz>
+
+
+[ Upstream commit b5b46c4740aed1538544f0fa849c5b76c7823469 ]
+
+The IRET opcode is 0xcf according to the Intel manual and also to objdump of my
+vmlinux:
+
+ 1ea8: 48 cf iretq
+
+Fix the opcode in arch_decode_instruction().
+
+The previous value (0xc5) seems to correspond to LDS.
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20170118132921.19319-1-jslaby@suse.cz
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/objtool/arch/x86/decode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/objtool/arch/x86/decode.c
++++ b/tools/objtool/arch/x86/decode.c
+@@ -150,9 +150,9 @@ int arch_decode_instruction(struct elf *
+ *type = INSN_RETURN;
+ break;
+
+- case 0xc5: /* iret */
+ case 0xca: /* retf */
+ case 0xcb: /* retf */
++ case 0xcf: /* iret */
+ *type = INSN_CONTEXT_SWITCH;
+ break;
+
--- /dev/null
+From foo@baz Mon Jul 3 11:54:13 CEST 2017
+From: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Date: Fri, 6 Jan 2017 10:39:49 +1100
+Subject: powerpc/eeh: Enable IO path on permanent error
+
+From: Gavin Shan <gwshan@linux.vnet.ibm.com>
+
+
+[ Upstream commit 387bbc974f6adf91aa635090f73434ed10edd915 ]
+
+We give up recovery on permanent error, simply shutdown the affected
+devices and remove them. If the devices can't be put into quiet state,
+they spew more traffic that is likely to cause another unexpected EEH
+error. This was observed on "p8dtu2u" machine:
+
+ 0002:00:00.0 PCI bridge: IBM Device 03dc
+ 0002:01:00.0 Ethernet controller: Intel Corporation \
+ Ethernet Controller X710/X557-AT 10GBASE-T (rev 02)
+ 0002:01:00.1 Ethernet controller: Intel Corporation \
+ Ethernet Controller X710/X557-AT 10GBASE-T (rev 02)
+ 0002:01:00.2 Ethernet controller: Intel Corporation \
+ Ethernet Controller X710/X557-AT 10GBASE-T (rev 02)
+ 0002:01:00.3 Ethernet controller: Intel Corporation \
+ Ethernet Controller X710/X557-AT 10GBASE-T (rev 02)
+
+On P8 PowerNV platform, the IO path is frozen when shutdowning the
+devices, meaning the memory registers are inaccessible. It is why
+the devices can't be put into quiet state before removing them.
+This fixes the issue by enabling IO path prior to putting the devices
+into quiet state.
+
+Reported-by: Pridhiviraj Paidipeddi <ppaidipe@linux.vnet.ibm.com>
+Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Acked-by: Russell Currey <ruscur@russell.cc>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/eeh.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe
+ *
+ * For pHyp, we have to enable IO for log retrieval. Otherwise,
+ * 0xFF's is always returned from PCI config space.
++ *
++ * When the @severity is EEH_LOG_PERM, the PE is going to be
++ * removed. Prior to that, the drivers for devices included in
++ * the PE will be closed. The drivers rely on working IO path
++ * to bring the devices to quiet state. Otherwise, PCI traffic
++ * from those devices after they are removed is like to cause
++ * another unexpected EEH error.
+ */
+ if (!(pe->type & EEH_PE_PHB)) {
+- if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
++ if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
++ severity == EEH_LOG_PERM)
+ eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
+
+ /*
net-phy-dp83848-add-dp83620-phy-support.patch
perf-x86-intel-handle-exclusive-threadid-correctly-on-cpu-hotplug.patch
net-korina-fix-napi-versus-resources-freeing.patch
+powerpc-eeh-enable-io-path-on-permanent-error.patch
+net-ethtool-initialize-buffer-when-querying-device-channel-settings.patch
+xen-netback-fix-memory-leaks-on-xenbus-disconnect.patch
+xen-netback-protect-resource-cleaning-on-xenbus-disconnect.patch
+bnxt_en-fix-uninitialized-variable-bug-in-tpa-code-path.patch
+bpf-don-t-trigger-oom-killer-under-pressure-with-map-alloc.patch
+objtool-fix-iret-s-opcode.patch
+gianfar-do-not-reuse-pages-from-emergency-reserve.patch
+btrfs-fix-deadlock-between-direct-io-and-fast-fsync.patch
+btrfs-fix-truncate-down-when-no_holes-feature-is-enabled.patch
--- /dev/null
+From foo@baz Mon Jul 3 11:54:13 CEST 2017
+From: Igor Druzhinin <igor.druzhinin@citrix.com>
+Date: Tue, 17 Jan 2017 20:49:37 +0000
+Subject: xen-netback: fix memory leaks on XenBus disconnect
+
+From: Igor Druzhinin <igor.druzhinin@citrix.com>
+
+
+[ Upstream commit 9a6cdf52b85ea5fb21d2bb31e4a7bc61b79923a7 ]
+
+Eliminate memory leaks introduced several years ago by cleaning the
+queue resources which are allocated on XenBus connection event. Namely, queue
+structure array and pages used for IO rings.
+
+Signed-off-by: Igor Druzhinin <igor.druzhinin@citrix.com>
+Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
+Acked-by: Wei Liu <wei.liu2@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/xenbus.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/net/xen-netback/xenbus.c
++++ b/drivers/net/xen-netback/xenbus.c
+@@ -493,11 +493,20 @@ static int backend_create_xenvif(struct
+ static void backend_disconnect(struct backend_info *be)
+ {
+ if (be->vif) {
++ unsigned int queue_index;
++
+ xen_unregister_watchers(be->vif);
+ #ifdef CONFIG_DEBUG_FS
+ xenvif_debugfs_delif(be->vif);
+ #endif /* CONFIG_DEBUG_FS */
+ xenvif_disconnect_data(be->vif);
++ for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
++ xenvif_deinit_queue(&be->vif->queues[queue_index]);
++
++ vfree(be->vif->queues);
++ be->vif->num_queues = 0;
++ be->vif->queues = NULL;
++
+ xenvif_disconnect_ctrl(be->vif);
+ }
+ }
+@@ -1040,6 +1049,8 @@ static void connect(struct backend_info
+ err:
+ if (be->vif->num_queues > 0)
+ xenvif_disconnect_data(be->vif); /* Clean up existing queues */
++ for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
++ xenvif_deinit_queue(&be->vif->queues[queue_index]);
+ vfree(be->vif->queues);
+ be->vif->queues = NULL;
+ be->vif->num_queues = 0;
--- /dev/null
+From foo@baz Mon Jul 3 11:54:13 CEST 2017
+From: Igor Druzhinin <igor.druzhinin@citrix.com>
+Date: Tue, 17 Jan 2017 20:49:38 +0000
+Subject: xen-netback: protect resource cleaning on XenBus disconnect
+
+From: Igor Druzhinin <igor.druzhinin@citrix.com>
+
+
+[ Upstream commit f16f1df65f1cf139ff9e9f84661e6573d6bb27fc ]
+
+vif->lock is used to protect statistics gathering agents from using the
+queue structure during cleaning.
+
+Signed-off-by: Igor Druzhinin <igor.druzhinin@citrix.com>
+Acked-by: Wei Liu <wei.liu2@citrix.com>
+Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/interface.c | 6 ++++--
+ drivers/net/xen-netback/xenbus.c | 2 ++
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_g
+ {
+ struct xenvif *vif = netdev_priv(dev);
+ struct xenvif_queue *queue = NULL;
+- unsigned int num_queues = vif->num_queues;
+ unsigned long rx_bytes = 0;
+ unsigned long rx_packets = 0;
+ unsigned long tx_bytes = 0;
+ unsigned long tx_packets = 0;
+ unsigned int index;
+
++ spin_lock(&vif->lock);
+ if (vif->queues == NULL)
+ goto out;
+
+ /* Aggregate tx and rx stats from each queue */
+- for (index = 0; index < num_queues; ++index) {
++ for (index = 0; index < vif->num_queues; ++index) {
+ queue = &vif->queues[index];
+ rx_bytes += queue->stats.rx_bytes;
+ rx_packets += queue->stats.rx_packets;
+@@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_g
+ }
+
+ out:
++ spin_unlock(&vif->lock);
++
+ vif->dev->stats.rx_bytes = rx_bytes;
+ vif->dev->stats.rx_packets = rx_packets;
+ vif->dev->stats.tx_bytes = tx_bytes;
+--- a/drivers/net/xen-netback/xenbus.c
++++ b/drivers/net/xen-netback/xenbus.c
+@@ -503,9 +503,11 @@ static void backend_disconnect(struct ba
+ for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
+ xenvif_deinit_queue(&be->vif->queues[queue_index]);
+
++ spin_lock(&be->vif->lock);
+ vfree(be->vif->queues);
+ be->vif->num_queues = 0;
+ be->vif->queues = NULL;
++ spin_unlock(&be->vif->lock);
+
+ xenvif_disconnect_ctrl(be->vif);
+ }