--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: "Toke Høiland-Jørgensen" <toke@toke.dk>
+Date: Mon, 2 Jul 2018 22:52:20 +0200
+Subject: gen_stats: Fix netlink stats dumping in the presence of padding
+
+From: "Toke Høiland-Jørgensen" <toke@toke.dk>
+
+[ Upstream commit d5a672ac9f48f81b20b1cad1d9ed7bbf4e418d4c ]
+
+The gen_stats facility will add a header for the toplevel nlattr of type
+TCA_STATS2 that contains all stats added by qdisc callbacks. A reference
+to this header is stored in the gnet_dump struct, and when all the
+per-qdisc callbacks have finished adding their stats, the length of the
+containing header will be adjusted to the right value.
+
+However, on architectures that need padding (i.e., that don't set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), the padding nlattr is added
+before the stats, which means that the stored pointer will point to the
+padding, and so when the header is fixed up, the result is just a very
+big padding nlattr. Because most qdiscs also supply the legacy TCA_STATS
+struct, this problem has been mostly invisible, but we exposed it with
+the netlink attribute-based statistics in CAKE.
+
+Fix the issue by fixing up the stored pointer if it points to a padding
+nlattr.
+
+Tested-by: Pete Heist <pete@heistp.net>
+Tested-by: Kevin Darbyshire-Bryant <kevin@darbyshire-bryant.me.uk>
+Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/gen_stats.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/net/core/gen_stats.c
++++ b/net/core/gen_stats.c
+@@ -77,8 +77,20 @@ gnet_stats_start_copy_compat(struct sk_b
+ d->lock = lock;
+ spin_lock_bh(lock);
+ }
+- if (d->tail)
+- return gnet_stats_copy(d, type, NULL, 0, padattr);
++ if (d->tail) {
++ int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
++
++ /* The initial attribute added in gnet_stats_copy() may be
++ * preceded by a padding attribute, in which case d->tail will
++ * end up pointing at the padding instead of the real attribute.
++ * Fix this so gnet_stats_finish_copy() adjusts the length of
++ * the right attribute.
++ */
++ if (ret == 0 && d->tail->nla_type == padattr)
++ d->tail = (struct nlattr *)((char *)d->tail +
++ NLA_ALIGN(d->tail->nla_len));
++ return ret;
++ }
+
+ return 0;
+ }
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Haiyang Zhang <haiyangz@microsoft.com>
+Date: Tue, 17 Jul 2018 17:11:13 +0000
+Subject: hv_netvsc: Fix napi reschedule while receive completion is busy
+
+From: Haiyang Zhang <haiyangz@microsoft.com>
+
+[ Upstream commit 6b81b193b83e87da1ea13217d684b54fccf8ee8a ]
+
+If out ring is full temporarily and receive completion cannot go out,
+we may still need to reschedule napi if certain conditions are met.
+Otherwise the napi poll might be stopped forever, and cause network
+disconnect.
+
+Fixes: 7426b1a51803 ("netvsc: optimize receive completions")
+Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
+Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/hyperv/netvsc.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -1291,6 +1291,7 @@ int netvsc_poll(struct napi_struct *napi
+ struct hv_device *device = netvsc_channel_to_device(channel);
+ struct net_device *ndev = hv_get_drvdata(device);
+ int work_done = 0;
++ int ret;
+
+ /* If starting a new interval */
+ if (!nvchan->desc)
+@@ -1302,16 +1303,18 @@ int netvsc_poll(struct napi_struct *napi
+ nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
+ }
+
+- /* If send of pending receive completions suceeded
+- * and did not exhaust NAPI budget this time
+- * and not doing busy poll
++ /* Send any pending receive completions */
++ ret = send_recv_completions(ndev, net_device, nvchan);
++
++ /* If it did not exhaust NAPI budget this time
++ * and not doing busy poll
+ * then re-enable host interrupts
+- * and reschedule if ring is not empty.
++ * and reschedule if ring is not empty
++ * or sending receive completion failed.
+ */
+- if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
+- work_done < budget &&
++ if (work_done < budget &&
+ napi_complete_done(napi, work_done) &&
+- hv_end_read(&channel->inbound) &&
++ (ret || hv_end_read(&channel->inbound)) &&
+ napi_schedule_prep(napi)) {
+ hv_begin_read(&channel->inbound);
+ __napi_schedule(napi);
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Tyler Hicks <tyhicks@canonical.com>
+Date: Thu, 5 Jul 2018 18:49:23 +0000
+Subject: ipv4: Return EINVAL when ping_group_range sysctl doesn't map to user ns
+
+From: Tyler Hicks <tyhicks@canonical.com>
+
+[ Upstream commit 70ba5b6db96ff7324b8cfc87e0d0383cf59c9677 ]
+
+The low and high values of the net.ipv4.ping_group_range sysctl were
+being silently forced to the default disabled state when a write to the
+sysctl contained GIDs that didn't map to the associated user namespace.
+Confusingly, the sysctl's write operation would return success and then
+a subsequent read of the sysctl would indicate that the low and high
+values are the overflowgid.
+
+This patch changes the behavior by clearly returning an error when the
+sysctl write operation receives a GID range that doesn't map to the
+associated user namespace. In such a situation, the previous value of
+the sysctl is preserved and that range will be returned in a subsequent
+read of the sysctl.
+
+Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/sysctl_net_ipv4.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -187,8 +187,9 @@ static int ipv4_ping_group_range(struct
+ if (write && ret == 0) {
+ low = make_kgid(user_ns, urange[0]);
+ high = make_kgid(user_ns, urange[1]);
+- if (!gid_valid(low) || !gid_valid(high) ||
+- (urange[1] < urange[0]) || gid_lt(high, low)) {
++ if (!gid_valid(low) || !gid_valid(high))
++ return -EINVAL;
++ if (urange[1] < urange[0] || gid_lt(high, low)) {
+ low = make_kgid(&init_user_ns, 1);
+ high = make_kgid(&init_user_ns, 0);
+ }
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Colin Ian King <colin.king@canonical.com>
+Date: Tue, 17 Jul 2018 17:12:39 +0100
+Subject: ipv6: fix useless rol32 call on hash
+
+From: Colin Ian King <colin.king@canonical.com>
+
+[ Upstream commit 169dc027fb02492ea37a0575db6a658cf922b854 ]
+
+The rol32 call is currently rotating hash but the rol'd value is
+being discarded. I believe the current code is incorrect and hash
+should be assigned the rotated value returned from rol32.
+
+Thanks to David Lebrun for spotting this.
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ipv6.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -829,7 +829,7 @@ static inline __be32 ip6_make_flowlabel(
+ * to minimize possbility that any useful information to an
+ * attacker is leaked. Only lower 20 bits are relevant.
+ */
+- rol32(hash, 16);
++ hash = rol32(hash, 16);
+
+ flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Wed, 18 Jul 2018 10:48:56 +0200
+Subject: ipv6: ila: select CONFIG_DST_CACHE
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit 83ed7d1fe2d2d4a11b30660dec20168bb473d9c1 ]
+
+My randconfig builds came across an old missing dependency for ILA:
+
+ERROR: "dst_cache_set_ip6" [net/ipv6/ila/ila.ko] undefined!
+ERROR: "dst_cache_get" [net/ipv6/ila/ila.ko] undefined!
+ERROR: "dst_cache_init" [net/ipv6/ila/ila.ko] undefined!
+ERROR: "dst_cache_destroy" [net/ipv6/ila/ila.ko] undefined!
+
+We almost never run into this by accident because randconfig builds
+end up selecting DST_CACHE from some other tunnel protocol, and this
+one appears to be the only one missing the explicit 'select'.
+
+>From all I can tell, this problem first appeared in linux-4.9
+when dst_cache support got added to ILA.
+
+Fixes: 79ff2fc31e0f ("ila: Cache a route to translated address")
+Cc: Tom Herbert <tom@herbertland.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv6/Kconfig
++++ b/net/ipv6/Kconfig
+@@ -108,6 +108,7 @@ config IPV6_MIP6
+ config IPV6_ILA
+ tristate "IPv6: Identifier Locator Addressing (ILA)"
+ depends on NETFILTER
++ select DST_CACHE
+ select LWTUNNEL
+ ---help---
+ Support for IPv6 Identifier Locator Addressing (ILA).
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Sabrina Dubroca <sd@queasysnail.net>
+Date: Fri, 13 Jul 2018 17:21:42 +0200
+Subject: ipv6: make DAD fail with enhanced DAD when nonce length differs
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+[ Upstream commit e66515999b627368892ccc9b3a13a506f2ea1357 ]
+
+Commit adc176c54722 ("ipv6 addrconf: Implemented enhanced DAD (RFC7527)")
+added enhanced DAD with a nonce length of 6 bytes. However, RFC7527
+doesn't specify the length of the nonce, other than being 6 + 8*k bytes,
+with integer k >= 0 (RFC3971 5.3.2). The current implementation simply
+assumes that the nonce will always be 6 bytes, but others systems are
+free to choose different sizes.
+
+If another system sends a nonce of different length but with the same 6
+bytes prefix, it shouldn't be considered as the same nonce. Thus, check
+that the length of the received nonce is the same as the length we sent.
+
+Ugly scapy test script running on veth0:
+
+def loop():
+ pkt=sniff(iface="veth0", filter="icmp6", count=1)
+ pkt = pkt[0]
+ b = bytearray(pkt[Raw].load)
+ b[1] += 1
+ b += b'\xde\xad\xbe\xef\xde\xad\xbe\xef'
+ pkt[Raw].load = bytes(b)
+ pkt[IPv6].plen += 8
+ # fixup checksum after modifying the payload
+ pkt[IPv6].payload.cksum -= 0x3b44
+ if pkt[IPv6].payload.cksum < 0:
+ pkt[IPv6].payload.cksum += 0xffff
+ sendp(pkt, iface="veth0")
+
+This should result in DAD failure for any address added to veth0's peer,
+but is currently ignored.
+
+Fixes: adc176c54722 ("ipv6 addrconf: Implemented enhanced DAD (RFC7527)")
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ndisc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -811,7 +811,7 @@ static void ndisc_recv_ns(struct sk_buff
+ return;
+ }
+ }
+- if (ndopts.nd_opts_nonce)
++ if (ndopts.nd_opts_nonce && ndopts.nd_opts_nonce->nd_opt_len == 1)
+ memcpy(&nonce, (u8 *)(ndopts.nd_opts_nonce + 1), 6);
+
+ inc = ipv6_addr_is_multicast(daddr);
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Davidlohr Bueso <dave@stgolabs.net>
+Date: Mon, 16 Jul 2018 13:26:13 -0700
+Subject: lib/rhashtable: consider param->min_size when setting initial table size
+
+From: Davidlohr Bueso <dave@stgolabs.net>
+
+[ Upstream commit 107d01f5ba10f4162c38109496607eb197059064 ]
+
+rhashtable_init() currently does not take into account the user-passed
+min_size parameter unless param->nelem_hint is set as well. As such,
+the default size (number of buckets) will always be HASH_DEFAULT_SIZE
+even if the smallest allowed size is larger than that. Remediate this
+by unconditionally calling into rounded_hashtable_size() and handling
+things accordingly.
+
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/rhashtable.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/lib/rhashtable.c
++++ b/lib/rhashtable.c
+@@ -923,8 +923,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
+
+ static size_t rounded_hashtable_size(const struct rhashtable_params *params)
+ {
+- return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+- (unsigned long)params->min_size);
++ size_t retsize;
++
++ if (params->nelem_hint)
++ retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
++ (unsigned long)params->min_size);
++ else
++ retsize = max(HASH_DEFAULT_SIZE,
++ (unsigned long)params->min_size);
++
++ return retsize;
+ }
+
+ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
+@@ -981,8 +989,6 @@ int rhashtable_init(struct rhashtable *h
+ struct bucket_table *tbl;
+ size_t size;
+
+- size = HASH_DEFAULT_SIZE;
+-
+ if ((!params->key_len && !params->obj_hashfn) ||
+ (params->obj_hashfn && !params->obj_cmpfn))
+ return -EINVAL;
+@@ -1009,8 +1015,7 @@ int rhashtable_init(struct rhashtable *h
+
+ ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
+
+- if (params->nelem_hint)
+- size = rounded_hashtable_size(&ht->p);
++ size = rounded_hashtable_size(&ht->p);
+
+ if (params->locks_mul)
+ ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Igor Russkikh <igor.russkikh@aquantia.com>
+Date: Thu, 5 Jul 2018 17:01:09 +0300
+Subject: net: aquantia: vlan unicast address list correct handling
+
+From: Igor Russkikh <igor.russkikh@aquantia.com>
+
+[ Upstream commit 94b3b542303f3055c326df74ef144a8a790d7d7f ]
+
+Setting up macvlan/macvtap networks over atlantic NIC results
+in no traffic over these networks because ndo_set_rx_mode did
+not listed UC MACs as registered in unicast filter.
+
+Here we fix that taking into account maximum number of UC
+filters supported by hardware. If more than MAX addresses were
+registered, we just enable promisc and/or allmulti to pass
+the traffic in.
+
+We also remove MULTICAST_ADDRESS_MAX constant from aq_cfg since
+thats not a configurable parameter at all.
+
+Fixes: b21f502 ("net:ethernet:aquantia: Fix for multicast filter handling.")
+Signed-off-by: Igor Russkikh <igor.russkikh@aquantia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/aquantia/atlantic/aq_cfg.h | 2
+ drivers/net/ethernet/aquantia/atlantic/aq_hw.h | 4 -
+ drivers/net/ethernet/aquantia/atlantic/aq_main.c | 11 ---
+ drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 47 ++++++++------
+ drivers/net/ethernet/aquantia/atlantic/aq_nic.h | 2
+ drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | 2
+ drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | 4 -
+ 7 files changed, 36 insertions(+), 36 deletions(-)
+
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+@@ -63,8 +63,6 @@
+
+ #define AQ_CFG_NAPI_WEIGHT 64U
+
+-#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
+-
+ /*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
+
+ #define AQ_NIC_FC_OFF 0U
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+@@ -98,6 +98,8 @@ struct aq_stats_s {
+ #define AQ_HW_MEDIA_TYPE_TP 1U
+ #define AQ_HW_MEDIA_TYPE_FIBRE 2U
+
++#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
++
+ struct aq_hw_s {
+ atomic_t flags;
+ u8 rbl_enabled:1;
+@@ -177,7 +179,7 @@ struct aq_hw_ops {
+ unsigned int packet_filter);
+
+ int (*hw_multicast_list_set)(struct aq_hw_s *self,
+- u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX]
++ u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count);
+
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+@@ -135,17 +135,10 @@ err_exit:
+ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
+ {
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+- int err = 0;
+
+- err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
+- if (err < 0)
+- return;
++ aq_nic_set_packet_filter(aq_nic, ndev->flags);
+
+- if (netdev_mc_count(ndev)) {
+- err = aq_nic_set_multicast_list(aq_nic, ndev);
+- if (err < 0)
+- return;
+- }
++ aq_nic_set_multicast_list(aq_nic, ndev);
+ }
+
+ static const struct net_device_ops aq_ndev_ops = {
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+@@ -563,34 +563,41 @@ err_exit:
+
+ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
+ {
++ unsigned int packet_filter = self->packet_filter;
+ struct netdev_hw_addr *ha = NULL;
+ unsigned int i = 0U;
+
+- self->mc_list.count = 0U;
+-
+- netdev_for_each_mc_addr(ha, ndev) {
+- ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+- ++self->mc_list.count;
++ self->mc_list.count = 0;
++ if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
++ packet_filter |= IFF_PROMISC;
++ } else {
++ netdev_for_each_uc_addr(ha, ndev) {
++ ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+
+- if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
+- break;
++ if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
++ break;
++ }
+ }
+
+- if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
+- /* Number of filters is too big: atlantic does not support this.
+- * Force all multi filter to support this.
+- * With this we disable all UC filters and setup "all pass"
+- * multicast mask
+- */
+- self->packet_filter |= IFF_ALLMULTI;
+- self->aq_nic_cfg.mc_list_count = 0;
+- return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
+- self->packet_filter);
++ if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
++ packet_filter |= IFF_ALLMULTI;
+ } else {
+- return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
+- self->mc_list.ar,
+- self->mc_list.count);
++ netdev_for_each_mc_addr(ha, ndev) {
++ ether_addr_copy(self->mc_list.ar[i++], ha->addr);
++
++ if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
++ break;
++ }
++ }
++
++ if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
++ packet_filter |= IFF_MULTICAST;
++ self->mc_list.count = i;
++ self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
++ self->mc_list.ar,
++ self->mc_list.count);
+ }
++ return aq_nic_set_packet_filter(self, packet_filter);
+ }
+
+ int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+@@ -75,7 +75,7 @@ struct aq_nic_s {
+ struct aq_hw_link_status_s link_status;
+ struct {
+ u32 count;
+- u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
++ u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+ } mc_list;
+
+ struct pci_dev *pdev;
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+@@ -765,7 +765,7 @@ static int hw_atl_a0_hw_packet_filter_se
+
+ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
+ u8 ar_mac
+- [AQ_CFG_MULTICAST_ADDRESS_MAX]
++ [AQ_HW_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count)
+ {
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+@@ -784,7 +784,7 @@ static int hw_atl_b0_hw_packet_filter_se
+
+ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
+ u8 ar_mac
+- [AQ_CFG_MULTICAST_ADDRESS_MAX]
++ [AQ_HW_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count)
+ {
+@@ -812,7 +812,7 @@ static int hw_atl_b0_hw_multicast_list_s
+
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled),
+- HW_ATL_B0_MAC_MIN + i);
++ HW_ATL_B0_MAC_MIN + i);
+ }
+
+ err = aq_hw_err_from_flags(self);
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Lorenzo Colitti <lorenzo@google.com>
+Date: Sat, 7 Jul 2018 16:31:40 +0900
+Subject: net: diag: Don't double-free TCP_NEW_SYN_RECV sockets in tcp_abort
+
+From: Lorenzo Colitti <lorenzo@google.com>
+
+[ Upstream commit acc2cf4e37174646a24cba42fa53c668b2338d4e ]
+
+When tcp_diag_destroy closes a TCP_NEW_SYN_RECV socket, it first
+frees it by calling inet_csk_reqsk_queue_drop_and_and_put in
+tcp_abort, and then frees it again by calling sock_gen_put.
+
+Since tcp_abort only has one caller, and all the other codepaths
+in tcp_abort don't free the socket, just remove the free in that
+function.
+
+Cc: David Ahern <dsa@cumulusnetworks.com>
+Tested: passes Android sock_diag_test.py, which exercises this codepath
+Fixes: d7226c7a4dd1 ("net: diag: Fix refcnt leak in error path destroying socket")
+Signed-off-by: Lorenzo Colitti <lorenzo@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: David Ahern <dsa@cumulusnetworks.com>
+Tested-by: David Ahern <dsa@cumulusnetworks.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3524,8 +3524,7 @@ int tcp_abort(struct sock *sk, int err)
+ struct request_sock *req = inet_reqsk(sk);
+
+ local_bh_disable();
+- inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
+- req);
++ inet_csk_reqsk_queue_drop(req->rsk_listener, req);
+ local_bh_enable();
+ return 0;
+ }
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Stefano Brivio <sbrivio@redhat.com>
+Date: Wed, 11 Jul 2018 14:39:42 +0200
+Subject: net: Don't copy pfmemalloc flag in __copy_skb_header()
+
+From: Stefano Brivio <sbrivio@redhat.com>
+
+[ Upstream commit 8b7008620b8452728cadead460a36f64ed78c460 ]
+
+The pfmemalloc flag indicates that the skb was allocated from
+the PFMEMALLOC reserves, and the flag is currently copied on skb
+copy and clone.
+
+However, an skb copied from an skb flagged with pfmemalloc
+wasn't necessarily allocated from PFMEMALLOC reserves, and on
+the other hand an skb allocated that way might be copied from an
+skb that wasn't.
+
+So we should not copy the flag on skb copy, and rather decide
+whether to allow an skb to be associated with sockets unrelated
+to page reclaim depending only on how it was allocated.
+
+Move the pfmemalloc flag before headers_start[0] using an
+existing 1-bit hole, so that __copy_skb_header() doesn't copy
+it.
+
+When cloning, we'll now take care of this flag explicitly,
+contravening to the warning comment of __skb_clone().
+
+While at it, restore the newline usage introduced by commit
+b19372273164 ("net: reorganize sk_buff for faster
+__copy_skb_header()") to visually separate bytes used in
+bitfields after headers_start[0], that was gone after commit
+a9e419dc7be6 ("netfilter: merge ctinfo into nfct pointer storage
+area"), and describe the pfmemalloc flag in the kernel-doc
+structure comment.
+
+This doesn't change the size of sk_buff or cacheline boundaries,
+but consolidates the 15 bits hole before tc_index into a 2 bytes
+hole before csum, that could now be filled more easily.
+
+Reported-by: Patrick Talbert <ptalbert@redhat.com>
+Fixes: c93bdd0e03e8 ("netvm: allow skb allocation to use PFMEMALLOC reserves")
+Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skbuff.h | 10 +++++-----
+ net/core/skbuff.c | 2 ++
+ 2 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -628,6 +628,7 @@ typedef unsigned char *sk_buff_data_t;
+ * @hash: the packet hash
+ * @queue_mapping: Queue mapping for multiqueue devices
+ * @xmit_more: More SKBs are pending for this queue
++ * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
+ * @ndisc_nodetype: router type (from link layer)
+ * @ooo_okay: allow the mapping of a socket to a queue to be changed
+ * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
+@@ -733,7 +734,7 @@ struct sk_buff {
+ peeked:1,
+ head_frag:1,
+ xmit_more:1,
+- __unused:1; /* one bit hole */
++ pfmemalloc:1;
+
+ /* fields enclosed in headers_start/headers_end are copied
+ * using a single memcpy() in __copy_skb_header()
+@@ -752,31 +753,30 @@ struct sk_buff {
+
+ __u8 __pkt_type_offset[0];
+ __u8 pkt_type:3;
+- __u8 pfmemalloc:1;
+ __u8 ignore_df:1;
+-
+ __u8 nf_trace:1;
+ __u8 ip_summed:2;
+ __u8 ooo_okay:1;
++
+ __u8 l4_hash:1;
+ __u8 sw_hash:1;
+ __u8 wifi_acked_valid:1;
+ __u8 wifi_acked:1;
+-
+ __u8 no_fcs:1;
+ /* Indicates the inner headers are valid in the skbuff. */
+ __u8 encapsulation:1;
+ __u8 encap_hdr_csum:1;
+ __u8 csum_valid:1;
++
+ __u8 csum_complete_sw:1;
+ __u8 csum_level:2;
+ __u8 csum_not_inet:1;
+-
+ __u8 dst_pending_confirm:1;
+ #ifdef CONFIG_IPV6_NDISC_NODETYPE
+ __u8 ndisc_nodetype:2;
+ #endif
+ __u8 ipvs_property:1;
++
+ __u8 inner_protocol_type:1;
+ __u8 remcsum_offload:1;
+ #ifdef CONFIG_NET_SWITCHDEV
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -858,6 +858,8 @@ static struct sk_buff *__skb_clone(struc
+ n->cloned = 1;
+ n->nohdr = 0;
+ n->peeked = 0;
++ if (skb->pfmemalloc)
++ n->pfmemalloc = 1;
+ n->destructor = NULL;
+ C(tail);
+ C(end);
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp>
+Date: Fri, 13 Jul 2018 14:40:50 +0900
+Subject: net: ip6_gre: get ipv6hdr after skb_cow_head()
+
+From: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp>
+
+[ Upstream commit b7ed879425be371905d856410d19e9a42a62bcf3 ]
+
+A KASAN:use-after-free bug was found related to ip6-erspan
+while running selftests/net/ip6_gre_headroom.sh
+
+It happens because of following sequence:
+- ipv6hdr pointer is obtained from skb
+- skb_cow_head() is called, skb->head memory is reallocated
+- old data is accessed using ipv6hdr pointer
+
+skb_cow_head() call was added in e41c7c68ea77 ("ip6erspan: make sure
+enough headroom at xmit."), but looking at the history there was a
+chance of similar bug because gre_handle_offloads() and pskb_trim()
+can also reallocate skb->head memory. Fixes tag points to commit
+which introduced possibility of this bug.
+
+This patch moves ipv6hdr pointer assignment after skb_cow_head() call.
+
+Fixes: 5a963eb61b7c ("ip6_gre: Add ERSPAN native tunnel support")
+Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp>
+Reviewed-by: Greg Rose <gvrose8192@gmail.com>
+Acked-by: William Tu <u9012063@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_gre.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -927,7 +927,6 @@ tx_err:
+ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+ {
+- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct dst_entry *dst = skb_dst(skb);
+ struct net_device_stats *stats;
+@@ -998,6 +997,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit
+ goto tx_err;
+ }
+ } else {
++ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
++
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: David Ahern <dsahern@gmail.com>
+Date: Sat, 7 Jul 2018 16:15:26 -0700
+Subject: net/ipv4: Set oif in fib_compute_spec_dst
+
+From: David Ahern <dsahern@gmail.com>
+
+[ Upstream commit e7372197e15856ec4ee66b668020a662994db103 ]
+
+Xin reported that icmp replies may not use the address on the device the
+echo request is received if the destination address is broadcast. Instead
+a route lookup is done without considering VRF context. Fix by setting
+oif in flow struct to the master device if it is enslaved. That directs
+the lookup to the VRF table. If the device is not enslaved, oif is still
+0 so no affect.
+
+Fixes: cd2fbe1b6b51 ("net: Use VRF device index for lookups on RX")
+Reported-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/fib_frontend.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -300,6 +300,7 @@ __be32 fib_compute_spec_dst(struct sk_bu
+ if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
+ struct flowi4 fl4 = {
+ .flowi4_iif = LOOPBACK_IFINDEX,
++ .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
+ .daddr = ip_hdr(skb)->saddr,
+ .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
+ .flowi4_scope = scope,
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: David Ahern <dsahern@gmail.com>
+Date: Sun, 15 Jul 2018 09:35:19 -0700
+Subject: net/ipv6: Do not allow device only routes via the multipath API
+
+From: David Ahern <dsahern@gmail.com>
+
+[ Upstream commit b5d2d75e079a918be686957b1a8d2f6c5cc95a0a ]
+
+Eric reported that reverting the patch that fixed and simplified IPv6
+multipath routes means reverting back to invalid userspace notifications.
+eg.,
+$ ip -6 route add 2001:db8:1::/64 nexthop dev eth0 nexthop dev eth1
+
+only generates a single notification:
+2001:db8:1::/64 dev eth0 metric 1024 pref medium
+
+While working on a fix for this problem I found another case that is just
+broken completely - a multipath route with a gateway followed by device
+followed by gateway:
+ $ ip -6 ro add 2001:db8:103::/64
+ nexthop via 2001:db8:1::64
+ nexthop dev dummy2
+ nexthop via 2001:db8:3::64
+
+In this case the device only route is dropped completely - no notification
+to userpsace but no addition to the FIB either:
+
+$ ip -6 ro ls
+2001:db8:1::/64 dev dummy1 proto kernel metric 256 pref medium
+2001:db8:2::/64 dev dummy2 proto kernel metric 256 pref medium
+2001:db8:3::/64 dev dummy3 proto kernel metric 256 pref medium
+2001:db8:103::/64 metric 1024
+ nexthop via 2001:db8:1::64 dev dummy1 weight 1
+ nexthop via 2001:db8:3::64 dev dummy3 weight 1 pref medium
+fe80::/64 dev dummy1 proto kernel metric 256 pref medium
+fe80::/64 dev dummy2 proto kernel metric 256 pref medium
+fe80::/64 dev dummy3 proto kernel metric 256 pref medium
+
+Really, IPv6 multipath is just FUBAR'ed beyond repair when it comes to
+device only routes, so do not allow it all.
+
+This change will break any scripts relying on the mpath api for insert,
+but I don't see any other way to handle the permutations. Besides, since
+the routes are added to the FIB as standalone (non-multipath) routes the
+kernel is not doing what the user requested, so it might as well tell the
+user that.
+
+Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: David Ahern <dsahern@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/route.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -4274,6 +4274,13 @@ static int ip6_route_multipath_add(struc
+ err_nh = nh;
+ goto add_errout;
+ }
++ if (!rt6_qualify_for_ecmp(rt)) {
++ err = -EINVAL;
++ NL_SET_ERR_MSG(extack,
++ "Device only routes can not be added for IPv6 using the multipath API.");
++ dst_release_immediate(&rt->dst);
++ goto cleanup;
++ }
+
+ /* Because each route is added like a single route we remove
+ * these flags after the first nexthop: if there is a collision,
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Saeed Mahameed <saeedm@mellanox.com>
+Date: Sun, 15 Jul 2018 13:54:39 +0300
+Subject: net/mlx4_en: Don't reuse RX page when XDP is set
+
+From: Saeed Mahameed <saeedm@mellanox.com>
+
+[ Upstream commit 432e629e56432064761be63bcd5e263c0920430d ]
+
+When a new rx packet arrives, the rx path will decide whether to reuse
+the remainder of the page or not according to one of the below conditions:
+1. frag_info->frag_stride == PAGE_SIZE / 2
+2. frags->page_offset + frag_info->frag_size > PAGE_SIZE;
+
+The first condition is no met for when XDP is set.
+For XDP, page_offset is always set to priv->rx_headroom which is
+XDP_PACKET_HEADROOM and frag_info->frag_size is around mtu size + some
+padding, still the 2nd release condition will hold since
+XDP_PACKET_HEADROOM + 1536 < PAGE_SIZE, as a result the page will not
+be released and will be _wrongly_ reused for next free rx descriptor.
+
+In XDP there is an assumption to have a page per packet and reuse can
+break such assumption and might cause packet data corruptions.
+
+Fix this by adding an extra condition (!priv->rx_headroom) to the 2nd
+case to avoid page reuse when XDP is set, since rx_headroom is set to 0
+for non XDP setup and set to XDP_PACKET_HEADROOM for XDP setup.
+
+No additional cache line is required for the new condition.
+
+Fixes: 34db548bfb95 ("mlx4: add page recycling in receive path")
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Suggested-by: Martin KaFai Lau <kafai@fb.com>
+CC: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_rx.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -474,10 +474,10 @@ static int mlx4_en_complete_rx_desc(stru
+ {
+ const struct mlx4_en_frag_info *frag_info = priv->frag_info;
+ unsigned int truesize = 0;
++ bool release = true;
+ int nr, frag_size;
+ struct page *page;
+ dma_addr_t dma;
+- bool release;
+
+ /* Collect used fragments while replacing them in the HW descriptors */
+ for (nr = 0;; frags++) {
+@@ -500,7 +500,11 @@ static int mlx4_en_complete_rx_desc(stru
+ release = page_count(page) != 1 ||
+ page_is_pfmemalloc(page) ||
+ page_to_nid(page) != numa_mem_id();
+- } else {
++ } else if (!priv->rx_headroom) {
++ /* rx_headroom for non XDP setup is always 0.
++ * When XDP is set, the above condition will
++ * guarantee page is always released.
++ */
+ u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
+
+ frags->page_offset += sz_align;
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Tue, 3 Jul 2018 22:34:54 +0200
+Subject: net: phy: fix flag masking in __set_phy_supported
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit df8ed346d4a806a6eef2db5924285e839604b3f9 ]
+
+Currently also the pause flags are removed from phydev->supported because
+they're not included in PHY_DEFAULT_FEATURES. I don't think this is
+intended, especially when considering that this function can be called
+via phy_set_max_speed() anywhere in a driver. Change the masking to mask
+out only the values we're going to change. In addition remove the
+misleading comment, job of this small function is just to adjust the
+supported and advertised speeds.
+
+Fixes: f3a6bd393c2c ("phylib: Add phy_set_max_speed helper")
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/phy_device.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1720,11 +1720,8 @@ EXPORT_SYMBOL(genphy_loopback);
+
+ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
+ {
+- /* The default values for phydev->supported are provided by the PHY
+- * driver "features" member, we want to reset to sane defaults first
+- * before supporting higher speeds.
+- */
+- phydev->supported &= PHY_DEFAULT_FEATURES;
++ phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
++ PHY_10BT_FEATURES);
+
+ switch (max_speed) {
+ default:
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 11 Jul 2018 02:47:58 -0700
+Subject: net: systemport: Fix CRC forwarding check for SYSTEMPORT Lite
+
+From: Florian Fainelli <f.fainelli@gmail.com>
+
+[ Upstream commit 9e3bff923913729d76d87f0015848ee7b8ff7083 ]
+
+SYSTEMPORT Lite reversed the logic compared to SYSTEMPORT, the
+GIB_FCS_STRIP bit is set when the Ethernet FCS is stripped, and that bit
+is not set by default. Fix the logic such that we properly check whether
+that bit is set or not and we don't forward an extra 4 bytes to the
+network stack.
+
+Fixes: 44a4524c54af ("net: systemport: Add support for SYSTEMPORT Lite")
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bcmsysport.c | 4 ++--
+ drivers/net/ethernet/broadcom/bcmsysport.h | 3 ++-
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -1946,8 +1946,8 @@ static int bcm_sysport_open(struct net_d
+ if (!priv->is_lite)
+ priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+ else
+- priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
+- GIB_FCS_STRIP);
++ priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
++ GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
+
+ phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
+ 0, priv->phy_interface);
+--- a/drivers/net/ethernet/broadcom/bcmsysport.h
++++ b/drivers/net/ethernet/broadcom/bcmsysport.h
+@@ -278,7 +278,8 @@ struct bcm_rsb {
+ #define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT)
+ #define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT)
+ #define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT)
+-#define GIB_FCS_STRIP (1 << 6)
++#define GIB_FCS_STRIP_SHIFT 6
++#define GIB_FCS_STRIP (1 << GIB_FCS_STRIP_SHIFT)
+ #define GIB_LCL_LOOP_EN (1 << 7)
+ #define GIB_LCL_LOOP_TXEN (1 << 8)
+ #define GIB_RMT_LOOP_EN (1 << 9)
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Alexander Couzens <lynxis@fe80.eu>
+Date: Tue, 17 Jul 2018 13:17:09 +0200
+Subject: net: usb: asix: replace mii_nway_restart in resume path
+
+From: Alexander Couzens <lynxis@fe80.eu>
+
+[ Upstream commit 5c968f48021a9b3faa61ac2543cfab32461c0e05 ]
+
+mii_nway_restart is not pm aware which results in a rtnl deadlock.
+Implement mii_nway_restart manual by setting BMCR_ANRESTART if
+BMCR_ANENABLE is set.
+
+To reproduce:
+* plug an asix based usb network interface
+* wait until the device enters PM (~5 sec)
+* `ip link set eth1 up` will never return
+
+Fixes: d9fe64e51114 ("net: asix: Add in_pm parameter")
+Signed-off-by: Alexander Couzens <lynxis@fe80.eu>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/asix_devices.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct u
+ priv->presvd_phy_advertise);
+
+ /* Restore BMCR */
++ if (priv->presvd_phy_bmcr & BMCR_ANENABLE)
++ priv->presvd_phy_bmcr |= BMCR_ANRESTART;
++
+ asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
+ priv->presvd_phy_bmcr);
+
+- mii_nway_restart(&dev->mii);
+ priv->presvd_phy_advertise = 0;
+ priv->presvd_phy_bmcr = 0;
+ }
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Tue, 17 Jul 2018 20:17:33 -0500
+Subject: ptp: fix missing break in switch
+
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+
+[ Upstream commit 9ba8376ce1e2cbf4ce44f7e4bee1d0648e10d594 ]
+
+It seems that a *break* is missing in order to avoid falling through
+to the default case. Otherwise, checking *chan* makes no sense.
+
+Fixes: 72df7a7244c0 ("ptp: Allow reassigning calibration pin function")
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Acked-by: Richard Cochran <richardcochran@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ptp/ptp_chardev.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -89,6 +89,7 @@ int ptp_set_pinfunc(struct ptp_clock *pt
+ case PTP_PF_PHYSYNC:
+ if (chan != 0)
+ return -EINVAL;
++ break;
+ default:
+ return -EINVAL;
+ }
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Matevz Vucnik <vucnikm@gmail.com>
+Date: Wed, 4 Jul 2018 18:12:48 +0200
+Subject: qmi_wwan: add support for Quectel EG91
+
+From: Matevz Vucnik <vucnikm@gmail.com>
+
+[ Upstream commit 38cd58ed9c4e389799b507bcffe02a7a7a180b33 ]
+
+This adds the USB id of LTE modem Quectel EG91. It requires the
+same quirk as other Quectel modems to make it work.
+
+Signed-off-by: Matevz Vucnik <vucnikm@gmail.com>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1253,6 +1253,7 @@ static const struct usb_device_id produc
+ {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
++ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
+ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
+
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Sun, 8 Jul 2018 11:55:51 +0900
+Subject: rhashtable: add restart routine in rhashtable_free_and_destroy()
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit 0026129c8629265bfe5079c1e017fa8543796d9f ]
+
+rhashtable_free_and_destroy() cancels re-hash deferred work
+then walks and destroys elements. at this moment, some elements can be
+still in future_tbl. that elements are not destroyed.
+
+test case:
+nft_rhash_destroy() calls rhashtable_free_and_destroy() to destroy
+all elements of sets before destroying sets and chains.
+But rhashtable_free_and_destroy() doesn't destroy elements of future_tbl.
+so that splat occurred.
+
+test script:
+ %cat test.nft
+ table ip aa {
+ map map1 {
+ type ipv4_addr : verdict;
+ elements = {
+ 0 : jump a0,
+ 1 : jump a0,
+ 2 : jump a0,
+ 3 : jump a0,
+ 4 : jump a0,
+ 5 : jump a0,
+ 6 : jump a0,
+ 7 : jump a0,
+ 8 : jump a0,
+ 9 : jump a0,
+ }
+ }
+ chain a0 {
+ }
+ }
+ flush ruleset
+ table ip aa {
+ map map1 {
+ type ipv4_addr : verdict;
+ elements = {
+ 0 : jump a0,
+ 1 : jump a0,
+ 2 : jump a0,
+ 3 : jump a0,
+ 4 : jump a0,
+ 5 : jump a0,
+ 6 : jump a0,
+ 7 : jump a0,
+ 8 : jump a0,
+ 9 : jump a0,
+ }
+ }
+ chain a0 {
+ }
+ }
+ flush ruleset
+
+ %while :; do nft -f test.nft; done
+
+Splat looks like:
+[ 200.795603] kernel BUG at net/netfilter/nf_tables_api.c:1363!
+[ 200.806944] invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI
+[ 200.812253] CPU: 1 PID: 1582 Comm: nft Not tainted 4.17.0+ #24
+[ 200.820297] Hardware name: To be filled by O.E.M. To be filled by O.E.M./Aptio CRB, BIOS 5.6.5 07/08/2015
+[ 200.830309] RIP: 0010:nf_tables_chain_destroy.isra.34+0x62/0x240 [nf_tables]
+[ 200.838317] Code: 43 50 85 c0 74 26 48 8b 45 00 48 8b 4d 08 ba 54 05 00 00 48 c7 c6 60 6d 29 c0 48 c7 c7 c0 65 29 c0 4c 8b 40 08 e8 58 e5 fd f8 <0f> 0b 48 89 da 48 b8 00 00 00 00 00 fc ff
+[ 200.860366] RSP: 0000:ffff880118dbf4d0 EFLAGS: 00010282
+[ 200.866354] RAX: 0000000000000061 RBX: ffff88010cdeaf08 RCX: 0000000000000000
+[ 200.874355] RDX: 0000000000000061 RSI: 0000000000000008 RDI: ffffed00231b7e90
+[ 200.882361] RBP: ffff880118dbf4e8 R08: ffffed002373bcfb R09: ffffed002373bcfa
+[ 200.890354] R10: 0000000000000000 R11: ffffed002373bcfb R12: dead000000000200
+[ 200.898356] R13: dead000000000100 R14: ffffffffbb62af38 R15: dffffc0000000000
+[ 200.906354] FS: 00007fefc31fd700(0000) GS:ffff88011b800000(0000) knlGS:0000000000000000
+[ 200.915533] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 200.922355] CR2: 0000557f1c8e9128 CR3: 0000000106880000 CR4: 00000000001006e0
+[ 200.930353] Call Trace:
+[ 200.932351] ? nf_tables_commit+0x26f6/0x2c60 [nf_tables]
+[ 200.939525] ? nf_tables_setelem_notify.constprop.49+0x1a0/0x1a0 [nf_tables]
+[ 200.947525] ? nf_tables_delchain+0x6e0/0x6e0 [nf_tables]
+[ 200.952383] ? nft_add_set_elem+0x1700/0x1700 [nf_tables]
+[ 200.959532] ? nla_parse+0xab/0x230
+[ 200.963529] ? nfnetlink_rcv_batch+0xd06/0x10d0 [nfnetlink]
+[ 200.968384] ? nfnetlink_net_init+0x130/0x130 [nfnetlink]
+[ 200.975525] ? debug_show_all_locks+0x290/0x290
+[ 200.980363] ? debug_show_all_locks+0x290/0x290
+[ 200.986356] ? sched_clock_cpu+0x132/0x170
+[ 200.990352] ? find_held_lock+0x39/0x1b0
+[ 200.994355] ? sched_clock_local+0x10d/0x130
+[ 200.999531] ? memset+0x1f/0x40
+
+V2:
+ - free all tables requested by Herbert Xu
+
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/rhashtable.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/lib/rhashtable.c
++++ b/lib/rhashtable.c
+@@ -1107,13 +1107,14 @@ void rhashtable_free_and_destroy(struct
+ void (*free_fn)(void *ptr, void *arg),
+ void *arg)
+ {
+- struct bucket_table *tbl;
++ struct bucket_table *tbl, *next_tbl;
+ unsigned int i;
+
+ cancel_work_sync(&ht->run_work);
+
+ mutex_lock(&ht->mutex);
+ tbl = rht_dereference(ht->tbl, ht);
++restart:
+ if (free_fn) {
+ for (i = 0; i < tbl->size; i++) {
+ struct rhash_head *pos, *next;
+@@ -1130,7 +1131,12 @@ void rhashtable_free_and_destroy(struct
+ }
+ }
+
++ next_tbl = rht_dereference(tbl->future_tbl, ht);
+ bucket_table_free(tbl);
++ if (next_tbl) {
++ tbl = next_tbl;
++ goto restart;
++ }
+ mutex_unlock(&ht->mutex);
+ }
+ EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Jacob Keller <jacob.e.keller@intel.com>
+Date: Tue, 10 Jul 2018 14:22:27 -0700
+Subject: sch_fq_codel: zero q->flows_cnt when fq_codel_init fails
+
+From: Jacob Keller <jacob.e.keller@intel.com>
+
+[ Upstream commit 83fe6b8709f65bc505b10235bd82ece12c4c5099 ]
+
+When fq_codel_init fails, qdisc_create_dflt will cleanup by using
+qdisc_destroy. This function calls the ->reset() op prior to calling the
+->destroy() op.
+
+Unfortunately, during the failure flow for sch_fq_codel, the ->flows
+parameter is not initialized, so the fq_codel_reset function will null
+pointer dereference.
+
+ kernel: BUG: unable to handle kernel NULL pointer dereference at 0000000000000008
+ kernel: IP: fq_codel_reset+0x58/0xd0 [sch_fq_codel]
+ kernel: PGD 0 P4D 0
+ kernel: Oops: 0000 [#1] SMP PTI
+ kernel: Modules linked in: i40iw i40e(OE) xt_CHECKSUM iptable_mangle ipt_MASQUERADE nf_nat_masquerade_ipv4 iptable_nat nf_nat_ipv4 nf_nat nf_conntrack_ipv4 nf_defrag_ipv4 xt_conntrack nf_conntrack tun bridge stp llc devlink ebtable_filter ebtables ip6table_filter ip6_tables rpcrdma ib_isert iscsi_target_mod sunrpc ib_iser libiscsi scsi_transport_iscsi ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm intel_rapl sb_edac x86_pkg_temp_thermal intel_powerclamp coretemp kvm irqbypass crct10dif_pclmul crc32_pclmul ghash_clmulni_intel intel_cstate iTCO_wdt iTCO_vendor_support intel_uncore ib_core intel_rapl_perf mei_me mei joydev i2c_i801 lpc_ich ioatdma shpchp wmi sch_fq_codel xfs libcrc32c mgag200 ixgbe drm_kms_helper isci ttm firewire_ohci
+ kernel: mdio drm igb libsas crc32c_intel firewire_core ptp pps_core scsi_transport_sas crc_itu_t dca i2c_algo_bit ipmi_si ipmi_devintf ipmi_msghandler [last unloaded: i40e]
+ kernel: CPU: 10 PID: 4219 Comm: ip Tainted: G OE 4.16.13custom-fq-codel-test+ #3
+ kernel: Hardware name: Intel Corporation S2600CO/S2600CO, BIOS SE5C600.86B.02.05.0004.051120151007 05/11/2015
+ kernel: RIP: 0010:fq_codel_reset+0x58/0xd0 [sch_fq_codel]
+ kernel: RSP: 0018:ffffbfbf4c1fb620 EFLAGS: 00010246
+ kernel: RAX: 0000000000000400 RBX: 0000000000000000 RCX: 00000000000005b9
+ kernel: RDX: 0000000000000000 RSI: ffff9d03264a60c0 RDI: ffff9cfd17b31c00
+ kernel: RBP: 0000000000000001 R08: 00000000000260c0 R09: ffffffffb679c3e9
+ kernel: R10: fffff1dab06a0e80 R11: ffff9cfd163af800 R12: ffff9cfd17b31c00
+ kernel: R13: 0000000000000001 R14: ffff9cfd153de600 R15: 0000000000000001
+ kernel: FS: 00007fdec2f92800(0000) GS:ffff9d0326480000(0000) knlGS:0000000000000000
+ kernel: CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ kernel: CR2: 0000000000000008 CR3: 0000000c1956a006 CR4: 00000000000606e0
+ kernel: Call Trace:
+ kernel: qdisc_destroy+0x56/0x140
+ kernel: qdisc_create_dflt+0x8b/0xb0
+ kernel: mq_init+0xc1/0xf0
+ kernel: qdisc_create_dflt+0x5a/0xb0
+ kernel: dev_activate+0x205/0x230
+ kernel: __dev_open+0xf5/0x160
+ kernel: __dev_change_flags+0x1a3/0x210
+ kernel: dev_change_flags+0x21/0x60
+ kernel: do_setlink+0x660/0xdf0
+ kernel: ? down_trylock+0x25/0x30
+ kernel: ? xfs_buf_trylock+0x1a/0xd0 [xfs]
+ kernel: ? rtnl_newlink+0x816/0x990
+ kernel: ? _xfs_buf_find+0x327/0x580 [xfs]
+ kernel: ? _cond_resched+0x15/0x30
+ kernel: ? kmem_cache_alloc+0x20/0x1b0
+ kernel: ? rtnetlink_rcv_msg+0x200/0x2f0
+ kernel: ? rtnl_calcit.isra.30+0x100/0x100
+ kernel: ? netlink_rcv_skb+0x4c/0x120
+ kernel: ? netlink_unicast+0x19e/0x260
+ kernel: ? netlink_sendmsg+0x1ff/0x3c0
+ kernel: ? sock_sendmsg+0x36/0x40
+ kernel: ? ___sys_sendmsg+0x295/0x2f0
+ kernel: ? ebitmap_cmp+0x6d/0x90
+ kernel: ? dev_get_by_name_rcu+0x73/0x90
+ kernel: ? skb_dequeue+0x52/0x60
+ kernel: ? __inode_wait_for_writeback+0x7f/0xf0
+ kernel: ? bit_waitqueue+0x30/0x30
+ kernel: ? fsnotify_grab_connector+0x3c/0x60
+ kernel: ? __sys_sendmsg+0x51/0x90
+ kernel: ? do_syscall_64+0x74/0x180
+ kernel: ? entry_SYSCALL_64_after_hwframe+0x3d/0xa2
+ kernel: Code: 00 00 48 89 87 00 02 00 00 8b 87 a0 01 00 00 85 c0 0f 84 84 00 00 00 31 ed 48 63 dd 83 c5 01 48 c1 e3 06 49 03 9c 24 90 01 00 00 <48> 8b 73 08 48 8b 3b e8 6c 9a 4f f6 48 8d 43 10 48 c7 03 00 00
+ kernel: RIP: fq_codel_reset+0x58/0xd0 [sch_fq_codel] RSP: ffffbfbf4c1fb620
+ kernel: CR2: 0000000000000008
+ kernel: ---[ end trace e81a62bede66274e ]---
+
+This is caused because flows_cnt is non-zero, but flows hasn't been
+initialized. fq_codel_init has left the private data in a partially
+initialized state.
+
+To fix this, reset flows_cnt to 0 when we fail to initialize.
+Additionally, to make the state more consistent, also cleanup the flows
+pointer when the allocation of backlogs fails.
+
+This fixes the NULL pointer dereference, since both the for-loop and
+memset in fq_codel_reset will be no-ops when flow_cnt is zero.
+
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_fq_codel.c | 25 ++++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -479,23 +479,27 @@ static int fq_codel_init(struct Qdisc *s
+ q->cparams.mtu = psched_mtu(qdisc_dev(sch));
+
+ if (opt) {
+- int err = fq_codel_change(sch, opt, extack);
++ err = fq_codel_change(sch, opt, extack);
+ if (err)
+- return err;
++ goto init_failure;
+ }
+
+ err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
+ if (err)
+- return err;
++ goto init_failure;
+
+ if (!q->flows) {
+ q->flows = kvzalloc(q->flows_cnt *
+ sizeof(struct fq_codel_flow), GFP_KERNEL);
+- if (!q->flows)
+- return -ENOMEM;
++ if (!q->flows) {
++ err = -ENOMEM;
++ goto init_failure;
++ }
+ q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL);
+- if (!q->backlogs)
+- return -ENOMEM;
++ if (!q->backlogs) {
++ err = -ENOMEM;
++ goto alloc_failure;
++ }
+ for (i = 0; i < q->flows_cnt; i++) {
+ struct fq_codel_flow *flow = q->flows + i;
+
+@@ -508,6 +512,13 @@ static int fq_codel_init(struct Qdisc *s
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+ return 0;
++
++alloc_failure:
++ kvfree(q->flows);
++ q->flows = NULL;
++init_failure:
++ q->flows_cnt = 0;
++ return err;
+ }
+
+ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Xin Long <lucien.xin@gmail.com>
+Date: Tue, 3 Jul 2018 16:30:47 +0800
+Subject: sctp: fix the issue that pathmtu may be set lower than MINSEGMENT
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit a65925475571953da12a9bc2082aec29d4e2c0e7 ]
+
+After commit b6c5734db070 ("sctp: fix the handling of ICMP Frag Needed
+for too small MTUs"), sctp_transport_update_pmtu would refetch pathmtu
+from the dst and set it to transport's pathmtu without any check.
+
+The new pathmtu may be lower than MINSEGMENT if the dst is obsolete and
+updated by .get_dst() in sctp_transport_update_pmtu. In this case, it
+could have a smaller MTU as well, and thus we should validate it
+against MINSEGMENT instead.
+
+Syzbot reported a warning in sctp_mtu_payload caused by this.
+
+This patch refetches the pathmtu by calling sctp_dst_mtu where it does
+the check against MINSEGMENT.
+
+v1->v2:
+ - refetch the pathmtu by calling sctp_dst_mtu instead as Marcelo's
+ suggestion.
+
+Fixes: b6c5734db070 ("sctp: fix the handling of ICMP Frag Needed for too small MTUs")
+Reported-by: syzbot+f0d9d7cba052f9344b03@syzkaller.appspotmail.com
+Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/transport.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sctp/transport.c
++++ b/net/sctp/transport.c
+@@ -273,7 +273,7 @@ bool sctp_transport_update_pmtu(struct s
+
+ if (dst) {
+ /* Re-fetch, as under layers may have a higher minimum size */
+- pmtu = SCTP_TRUNC4(dst_mtu(dst));
++ pmtu = sctp_dst_mtu(dst);
+ change = t->pathmtu != pmtu;
+ }
+ t->pathmtu = pmtu;
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Date: Thu, 26 Apr 2018 16:58:57 -0300
+Subject: sctp: introduce sctp_dst_mtu
+
+From: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+
+[ Upstream commit 6ff0f871c20ec1769a481edca86f23c76b2b06d3 ]
+
+Which makes sure that the MTU respects the minimum value of
+SCTP_DEFAULT_MINSEGMENT and that it is correctly aligned.
+
+Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sctp/sctp.h | 9 +++++++--
+ net/sctp/associola.c | 6 ++----
+ net/sctp/transport.c | 6 +++---
+ 3 files changed, 12 insertions(+), 9 deletions(-)
+
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -609,10 +609,15 @@ static inline struct dst_entry *sctp_tra
+ return t->dst;
+ }
+
++static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
++{
++ return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst),
++ SCTP_DEFAULT_MINSEGMENT));
++}
++
+ static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
+ {
+- __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
+- SCTP_DEFAULT_MINSEGMENT);
++ __u32 pmtu = sctp_dst_mtu(t->dst);
+
+ if (t->pathmtu == pmtu)
+ return true;
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1446,11 +1446,9 @@ void sctp_assoc_sync_pmtu(struct sctp_as
+ return;
+
+ /* Get the lowest pmtu of all the transports. */
+- list_for_each_entry(t, &asoc->peer.transport_addr_list,
+- transports) {
++ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
+ if (t->pmtu_pending && t->dst) {
+- sctp_transport_update_pmtu(
+- t, SCTP_TRUNC4(dst_mtu(t->dst)));
++ sctp_transport_update_pmtu(t, sctp_dst_mtu(t->dst));
+ t->pmtu_pending = 0;
+ }
+ if (!pmtu || (t->pathmtu < pmtu))
+--- a/net/sctp/transport.c
++++ b/net/sctp/transport.c
+@@ -242,9 +242,9 @@ void sctp_transport_pmtu(struct sctp_tra
+ &transport->fl, sk);
+ }
+
+- if (transport->dst) {
+- transport->pathmtu = SCTP_TRUNC4(dst_mtu(transport->dst));
+- } else
++ if (transport->dst)
++ transport->pathmtu = sctp_dst_mtu(transport->dst);
++ else
+ transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
+ }
+
drm-nouveau-remove-bogus-crtc-check-in-pmops_runtime_idle.patch
drm-nouveau-use-drm_connector_list_iter_-for-iterating-connectors.patch
drm-nouveau-avoid-looping-through-fake-mst-connectors.patch
+gen_stats-fix-netlink-stats-dumping-in-the-presence-of-padding.patch
+ipv4-return-einval-when-ping_group_range-sysctl-doesn-t-map-to-user-ns.patch
+ipv6-fix-useless-rol32-call-on-hash.patch
+ipv6-ila-select-config_dst_cache.patch
+lib-rhashtable-consider-param-min_size-when-setting-initial-table-size.patch
+net-diag-don-t-double-free-tcp_new_syn_recv-sockets-in-tcp_abort.patch
+net-don-t-copy-pfmemalloc-flag-in-__copy_skb_header.patch
+skbuff-unconditionally-copy-pfmemalloc-in-__skb_clone.patch
+net-ipv4-set-oif-in-fib_compute_spec_dst.patch
+net-ipv6-do-not-allow-device-only-routes-via-the-multipath-api.patch
+net-phy-fix-flag-masking-in-__set_phy_supported.patch
+ptp-fix-missing-break-in-switch.patch
+qmi_wwan-add-support-for-quectel-eg91.patch
+rhashtable-add-restart-routine-in-rhashtable_free_and_destroy.patch
+sch_fq_codel-zero-q-flows_cnt-when-fq_codel_init-fails.patch
+tg3-add-higher-cpu-clock-for-5762.patch
+net-ip6_gre-get-ipv6hdr-after-skb_cow_head.patch
+sctp-introduce-sctp_dst_mtu.patch
+sctp-fix-the-issue-that-pathmtu-may-be-set-lower-than-minsegment.patch
+hv_netvsc-fix-napi-reschedule-while-receive-completion-is-busy.patch
+net-aquantia-vlan-unicast-address-list-correct-handling.patch
+net-mlx4_en-don-t-reuse-rx-page-when-xdp-is-set.patch
+net-systemport-fix-crc-forwarding-check-for-systemport-lite.patch
+ipv6-make-dad-fail-with-enhanced-dad-when-nonce-length-differs.patch
+net-usb-asix-replace-mii_nway_restart-in-resume-path.patch
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Stefano Brivio <sbrivio@redhat.com>
+Date: Fri, 13 Jul 2018 13:21:07 +0200
+Subject: skbuff: Unconditionally copy pfmemalloc in __skb_clone()
+
+From: Stefano Brivio <sbrivio@redhat.com>
+
+[ Upstream commit e78bfb0751d4e312699106ba7efbed2bab1a53ca ]
+
+Commit 8b7008620b84 ("net: Don't copy pfmemalloc flag in
+__copy_skb_header()") introduced a different handling for the
+pfmemalloc flag in copy and clone paths.
+
+In __skb_clone(), now, the flag is set only if it was set in the
+original skb, but not cleared if it wasn't. This is wrong and
+might lead to socket buffers being flagged with pfmemalloc even
+if the skb data wasn't allocated from pfmemalloc reserves. Copy
+the flag instead of ORing it.
+
+Reported-by: Sabrina Dubroca <sd@queasysnail.net>
+Fixes: 8b7008620b84 ("net: Don't copy pfmemalloc flag in __copy_skb_header()")
+Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
+Tested-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -858,8 +858,7 @@ static struct sk_buff *__skb_clone(struc
+ n->cloned = 1;
+ n->nohdr = 0;
+ n->peeked = 0;
+- if (skb->pfmemalloc)
+- n->pfmemalloc = 1;
++ C(pfmemalloc);
+ n->destructor = NULL;
+ C(tail);
+ C(end);
--- /dev/null
+From foo@baz Mon Jul 23 07:30:59 CEST 2018
+From: Sanjeev Bansal <sanjeevb.bansal@broadcom.com>
+Date: Mon, 16 Jul 2018 11:13:32 +0530
+Subject: tg3: Add higher cpu clock for 5762.
+
+From: Sanjeev Bansal <sanjeevb.bansal@broadcom.com>
+
+[ Upstream commit 3a498606bb04af603a46ebde8296040b2de350d1 ]
+
+This patch has fix for TX timeout while running bi-directional
+traffic with 100 Mbps using 5762.
+
+Signed-off-by: Sanjeev Bansal <sanjeevb.bansal@broadcom.com>
+Signed-off-by: Siva Reddy Kallam <siva.kallam@broadcom.com>
+Reviewed-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/tg3.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -9289,6 +9289,15 @@ static int tg3_chip_reset(struct tg3 *tp
+
+ tg3_restore_clk(tp);
+
++ /* Increase the core clock speed to fix tx timeout issue for 5762
++ * with 100Mbps link speed.
++ */
++ if (tg3_asic_rev(tp) == ASIC_REV_5762) {
++ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
++ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
++ TG3_CPMU_MAC_ORIDE_ENABLE);
++ }
++
+ /* Reprobe ASF enable state. */
+ tg3_flag_clear(tp, ENABLE_ASF);
+ tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |