]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 23 Jul 2018 07:18:20 +0000 (09:18 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 23 Jul 2018 07:18:20 +0000 (09:18 +0200)
added patches:
ipv4-return-einval-when-ping_group_range-sysctl-doesn-t-map-to-user-ns.patch
ipv6-fix-useless-rol32-call-on-hash.patch
lib-rhashtable-consider-param-min_size-when-setting-initial-table-size.patch
net-don-t-copy-pfmemalloc-flag-in-__copy_skb_header.patch
net-ipv4-set-oif-in-fib_compute_spec_dst.patch
net-phy-fix-flag-masking-in-__set_phy_supported.patch
ptp-fix-missing-break-in-switch.patch
skbuff-unconditionally-copy-pfmemalloc-in-__skb_clone.patch
tg3-add-higher-cpu-clock-for-5762.patch

queue-4.4/ipv4-return-einval-when-ping_group_range-sysctl-doesn-t-map-to-user-ns.patch [new file with mode: 0644]
queue-4.4/ipv6-fix-useless-rol32-call-on-hash.patch [new file with mode: 0644]
queue-4.4/lib-rhashtable-consider-param-min_size-when-setting-initial-table-size.patch [new file with mode: 0644]
queue-4.4/net-don-t-copy-pfmemalloc-flag-in-__copy_skb_header.patch [new file with mode: 0644]
queue-4.4/net-ipv4-set-oif-in-fib_compute_spec_dst.patch [new file with mode: 0644]
queue-4.4/net-phy-fix-flag-masking-in-__set_phy_supported.patch [new file with mode: 0644]
queue-4.4/ptp-fix-missing-break-in-switch.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/skbuff-unconditionally-copy-pfmemalloc-in-__skb_clone.patch [new file with mode: 0644]
queue-4.4/tg3-add-higher-cpu-clock-for-5762.patch [new file with mode: 0644]

diff --git a/queue-4.4/ipv4-return-einval-when-ping_group_range-sysctl-doesn-t-map-to-user-ns.patch b/queue-4.4/ipv4-return-einval-when-ping_group_range-sysctl-doesn-t-map-to-user-ns.patch
new file mode 100644 (file)
index 0000000..f124c07
--- /dev/null
@@ -0,0 +1,43 @@
+From foo@baz Mon Jul 23 08:34:58 CEST 2018
+From: Tyler Hicks <tyhicks@canonical.com>
+Date: Thu, 5 Jul 2018 18:49:23 +0000
+Subject: ipv4: Return EINVAL when ping_group_range sysctl doesn't map to user ns
+
+From: Tyler Hicks <tyhicks@canonical.com>
+
+[ Upstream commit 70ba5b6db96ff7324b8cfc87e0d0383cf59c9677 ]
+
+The low and high values of the net.ipv4.ping_group_range sysctl were
+being silently forced to the default disabled state when a write to the
+sysctl contained GIDs that didn't map to the associated user namespace.
+Confusingly, the sysctl's write operation would return success and then
+a subsequent read of the sysctl would indicate that the low and high
+values are the overflowgid.
+
+This patch changes the behavior by clearly returning an error when the
+sysctl write operation receives a GID range that doesn't map to the
+associated user namespace. In such a situation, the previous value of
+the sysctl is preserved and that range will be returned in a subsequent
+read of the sysctl.
+
+Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/sysctl_net_ipv4.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -141,8 +141,9 @@ static int ipv4_ping_group_range(struct
+       if (write && ret == 0) {
+               low = make_kgid(user_ns, urange[0]);
+               high = make_kgid(user_ns, urange[1]);
+-              if (!gid_valid(low) || !gid_valid(high) ||
+-                  (urange[1] < urange[0]) || gid_lt(high, low)) {
++              if (!gid_valid(low) || !gid_valid(high))
++                      return -EINVAL;
++              if (urange[1] < urange[0] || gid_lt(high, low)) {
+                       low = make_kgid(&init_user_ns, 1);
+                       high = make_kgid(&init_user_ns, 0);
+               }
diff --git a/queue-4.4/ipv6-fix-useless-rol32-call-on-hash.patch b/queue-4.4/ipv6-fix-useless-rol32-call-on-hash.patch
new file mode 100644 (file)
index 0000000..943a576
--- /dev/null
@@ -0,0 +1,33 @@
+From foo@baz Mon Jul 23 08:34:58 CEST 2018
+From: Colin Ian King <colin.king@canonical.com>
+Date: Tue, 17 Jul 2018 17:12:39 +0100
+Subject: ipv6: fix useless rol32 call on hash
+
+From: Colin Ian King <colin.king@canonical.com>
+
+[ Upstream commit 169dc027fb02492ea37a0575db6a658cf922b854 ]
+
+The rol32 call is currently rotating hash but the rol'd value is
+being discarded. I believe the current code is incorrect and hash
+should be assigned the rotated value returned from rol32.
+
+Thanks to David Lebrun for spotting this.
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ipv6.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -762,7 +762,7 @@ static inline __be32 ip6_make_flowlabel(
+        * to minimize possbility that any useful information to an
+        * attacker is leaked. Only lower 20 bits are relevant.
+        */
+-      rol32(hash, 16);
++      hash = rol32(hash, 16);
+       flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
diff --git a/queue-4.4/lib-rhashtable-consider-param-min_size-when-setting-initial-table-size.patch b/queue-4.4/lib-rhashtable-consider-param-min_size-when-setting-initial-table-size.patch
new file mode 100644 (file)
index 0000000..94ee654
--- /dev/null
@@ -0,0 +1,64 @@
+From foo@baz Mon Jul 23 08:34:58 CEST 2018
+From: Davidlohr Bueso <dave@stgolabs.net>
+Date: Mon, 16 Jul 2018 13:26:13 -0700
+Subject: lib/rhashtable: consider param->min_size when setting initial table size
+
+From: Davidlohr Bueso <dave@stgolabs.net>
+
+[ Upstream commit 107d01f5ba10f4162c38109496607eb197059064 ]
+
+rhashtable_init() currently does not take into account the user-passed
+min_size parameter unless param->nelem_hint is set as well. As such,
+the default size (number of buckets) will always be HASH_DEFAULT_SIZE
+even if the smallest allowed size is larger than that. Remediate this
+by unconditionally calling into rounded_hashtable_size() and handling
+things accordingly.
+
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/rhashtable.c |   17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/lib/rhashtable.c
++++ b/lib/rhashtable.c
+@@ -670,8 +670,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
+ static size_t rounded_hashtable_size(const struct rhashtable_params *params)
+ {
+-      return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+-                 (unsigned long)params->min_size);
++      size_t retsize;
++
++      if (params->nelem_hint)
++              retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
++                            (unsigned long)params->min_size);
++      else
++              retsize = max(HASH_DEFAULT_SIZE,
++                            (unsigned long)params->min_size);
++
++      return retsize;
+ }
+ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
+@@ -728,8 +736,6 @@ int rhashtable_init(struct rhashtable *h
+       struct bucket_table *tbl;
+       size_t size;
+-      size = HASH_DEFAULT_SIZE;
+-
+       if ((!params->key_len && !params->obj_hashfn) ||
+           (params->obj_hashfn && !params->obj_cmpfn))
+               return -EINVAL;
+@@ -756,8 +762,7 @@ int rhashtable_init(struct rhashtable *h
+       ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
+-      if (params->nelem_hint)
+-              size = rounded_hashtable_size(&ht->p);
++      size = rounded_hashtable_size(&ht->p);
+       /* The maximum (not average) chain length grows with the
+        * size of the hash table, at a rate of (log N)/(log log N).
diff --git a/queue-4.4/net-don-t-copy-pfmemalloc-flag-in-__copy_skb_header.patch b/queue-4.4/net-don-t-copy-pfmemalloc-flag-in-__copy_skb_header.patch
new file mode 100644 (file)
index 0000000..25649c2
--- /dev/null
@@ -0,0 +1,118 @@
+From foo@baz Mon Jul 23 08:24:46 CEST 2018
+From: Stefano Brivio <sbrivio@redhat.com>
+Date: Wed, 11 Jul 2018 14:39:42 +0200
+Subject: net: Don't copy pfmemalloc flag in __copy_skb_header()
+
+From: Stefano Brivio <sbrivio@redhat.com>
+
+[ Upstream commit 8b7008620b8452728cadead460a36f64ed78c460 ]
+
+The pfmemalloc flag indicates that the skb was allocated from
+the PFMEMALLOC reserves, and the flag is currently copied on skb
+copy and clone.
+
+However, an skb copied from an skb flagged with pfmemalloc
+wasn't necessarily allocated from PFMEMALLOC reserves, and on
+the other hand an skb allocated that way might be copied from an
+skb that wasn't.
+
+So we should not copy the flag on skb copy, and rather decide
+whether to allow an skb to be associated with sockets unrelated
+to page reclaim depending only on how it was allocated.
+
+Move the pfmemalloc flag before headers_start[0] using an
+existing 1-bit hole, so that __copy_skb_header() doesn't copy
+it.
+
+When cloning, we'll now take care of this flag explicitly,
+contravening to the warning comment of __skb_clone().
+
+While at it, restore the newline usage introduced by commit
+b19372273164 ("net: reorganize sk_buff for faster
+__copy_skb_header()") to visually separate bytes used in
+bitfields after headers_start[0], that was gone after commit
+a9e419dc7be6 ("netfilter: merge ctinfo into nfct pointer storage
+area"), and describe the pfmemalloc flag in the kernel-doc
+structure comment.
+
+This doesn't change the size of sk_buff or cacheline boundaries,
+but consolidates the 15 bits hole before tc_index into a 2 bytes
+hole before csum, that could now be filled more easily.
+
+Reported-by: Patrick Talbert <ptalbert@redhat.com>
+Fixes: c93bdd0e03e8 ("netvm: allow skb allocation to use PFMEMALLOC reserves")
+Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skbuff.h |   12 ++++++------
+ net/core/skbuff.c      |    2 ++
+ 2 files changed, 8 insertions(+), 6 deletions(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -514,6 +514,7 @@ static inline bool skb_mstamp_after(cons
+  *    @hash: the packet hash
+  *    @queue_mapping: Queue mapping for multiqueue devices
+  *    @xmit_more: More SKBs are pending for this queue
++ *    @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
+  *    @ndisc_nodetype: router type (from link layer)
+  *    @ooo_okay: allow the mapping of a socket to a queue to be changed
+  *    @l4_hash: indicate hash is a canonical 4-tuple hash over transport
+@@ -594,8 +595,8 @@ struct sk_buff {
+                               fclone:2,
+                               peeked:1,
+                               head_frag:1,
+-                              xmit_more:1;
+-      /* one bit hole */
++                              xmit_more:1,
++                              pfmemalloc:1;
+       kmemcheck_bitfield_end(flags1);
+       /* fields enclosed in headers_start/headers_end are copied
+@@ -615,19 +616,18 @@ struct sk_buff {
+       __u8                    __pkt_type_offset[0];
+       __u8                    pkt_type:3;
+-      __u8                    pfmemalloc:1;
+       __u8                    ignore_df:1;
+       __u8                    nfctinfo:3;
+-
+       __u8                    nf_trace:1;
++
+       __u8                    ip_summed:2;
+       __u8                    ooo_okay:1;
+       __u8                    l4_hash:1;
+       __u8                    sw_hash:1;
+       __u8                    wifi_acked_valid:1;
+       __u8                    wifi_acked:1;
+-
+       __u8                    no_fcs:1;
++
+       /* Indicates the inner headers are valid in the skbuff. */
+       __u8                    encapsulation:1;
+       __u8                    encap_hdr_csum:1;
+@@ -635,11 +635,11 @@ struct sk_buff {
+       __u8                    csum_complete_sw:1;
+       __u8                    csum_level:2;
+       __u8                    csum_bad:1;
+-
+ #ifdef CONFIG_IPV6_NDISC_NODETYPE
+       __u8                    ndisc_nodetype:2;
+ #endif
+       __u8                    ipvs_property:1;
++
+       __u8                    inner_protocol_type:1;
+       __u8                    remcsum_offload:1;
+       /* 3 or 5 bit hole */
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -828,6 +828,8 @@ static struct sk_buff *__skb_clone(struc
+       n->cloned = 1;
+       n->nohdr = 0;
+       n->peeked = 0;
++      if (skb->pfmemalloc)
++              n->pfmemalloc = 1;
+       n->destructor = NULL;
+       C(tail);
+       C(end);
diff --git a/queue-4.4/net-ipv4-set-oif-in-fib_compute_spec_dst.patch b/queue-4.4/net-ipv4-set-oif-in-fib_compute_spec_dst.patch
new file mode 100644 (file)
index 0000000..34fab21
--- /dev/null
@@ -0,0 +1,35 @@
+From foo@baz Mon Jul 23 08:34:58 CEST 2018
+From: David Ahern <dsahern@gmail.com>
+Date: Sat, 7 Jul 2018 16:15:26 -0700
+Subject: net/ipv4: Set oif in fib_compute_spec_dst
+
+From: David Ahern <dsahern@gmail.com>
+
+[ Upstream commit e7372197e15856ec4ee66b668020a662994db103 ]
+
+Xin reported that icmp replies may not use the address on the device the
+echo request is received if the destination address is broadcast. Instead
+a route lookup is done without considering VRF context. Fix by setting
+oif in flow struct to the master device if it is enslaved. That directs
+the lookup to the VRF table. If the device is not enslaved, oif is still
+0 so no affect.
+
+Fixes: cd2fbe1b6b51 ("net: Use VRF device index for lookups on RX")
+Reported-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/fib_frontend.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -297,6 +297,7 @@ __be32 fib_compute_spec_dst(struct sk_bu
+       if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
+               struct flowi4 fl4 = {
+                       .flowi4_iif = LOOPBACK_IFINDEX,
++                      .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
+                       .daddr = ip_hdr(skb)->saddr,
+                       .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
+                       .flowi4_scope = scope,
diff --git a/queue-4.4/net-phy-fix-flag-masking-in-__set_phy_supported.patch b/queue-4.4/net-phy-fix-flag-masking-in-__set_phy_supported.patch
new file mode 100644 (file)
index 0000000..fb93b06
--- /dev/null
@@ -0,0 +1,43 @@
+From foo@baz Mon Jul 23 08:34:58 CEST 2018
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Tue, 3 Jul 2018 22:34:54 +0200
+Subject: net: phy: fix flag masking in __set_phy_supported
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit df8ed346d4a806a6eef2db5924285e839604b3f9 ]
+
+Currently also the pause flags are removed from phydev->supported because
+they're not included in PHY_DEFAULT_FEATURES. I don't think this is
+intended, especially when considering that this function can be called
+via phy_set_max_speed() anywhere in a driver. Change the masking to mask
+out only the values we're going to change. In addition remove the
+misleading comment, job of this small function is just to adjust the
+supported and advertised speeds.
+
+Fixes: f3a6bd393c2c ("phylib: Add phy_set_max_speed helper")
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/phy_device.c |    7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1265,11 +1265,8 @@ static int gen10g_resume(struct phy_devi
+ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
+ {
+-      /* The default values for phydev->supported are provided by the PHY
+-       * driver "features" member, we want to reset to sane defaults first
+-       * before supporting higher speeds.
+-       */
+-      phydev->supported &= PHY_DEFAULT_FEATURES;
++      phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
++                             PHY_10BT_FEATURES);
+       switch (max_speed) {
+       default:
diff --git a/queue-4.4/ptp-fix-missing-break-in-switch.patch b/queue-4.4/ptp-fix-missing-break-in-switch.patch
new file mode 100644 (file)
index 0000000..49773ac
--- /dev/null
@@ -0,0 +1,31 @@
+From foo@baz Mon Jul 23 08:34:58 CEST 2018
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Tue, 17 Jul 2018 20:17:33 -0500
+Subject: ptp: fix missing break in switch
+
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+
+[ Upstream commit 9ba8376ce1e2cbf4ce44f7e4bee1d0648e10d594 ]
+
+It seems that a *break* is missing in order to avoid falling through
+to the default case. Otherwise, checking *chan* makes no sense.
+
+Fixes: 72df7a7244c0 ("ptp: Allow reassigning calibration pin function")
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Acked-by: Richard Cochran <richardcochran@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ptp/ptp_chardev.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -88,6 +88,7 @@ int ptp_set_pinfunc(struct ptp_clock *pt
+       case PTP_PF_PHYSYNC:
+               if (chan != 0)
+                       return -EINVAL;
++              break;
+       default:
+               return -EINVAL;
+       }
index 5784bd3991d041d73fc736950eb25113e904016f..dd397a3695998f5e4182a5a35ca32dbfb259741d 100644 (file)
@@ -5,3 +5,12 @@ alsa-rawmidi-change-resized-buffers-atomically.patch
 arc-fix-config_swap.patch
 arc-mm-allow-mprotect-to-make-stack-mappings-executable.patch
 mm-memcg-fix-use-after-free-in-mem_cgroup_iter.patch
+ipv4-return-einval-when-ping_group_range-sysctl-doesn-t-map-to-user-ns.patch
+ipv6-fix-useless-rol32-call-on-hash.patch
+lib-rhashtable-consider-param-min_size-when-setting-initial-table-size.patch
+net-ipv4-set-oif-in-fib_compute_spec_dst.patch
+net-phy-fix-flag-masking-in-__set_phy_supported.patch
+ptp-fix-missing-break-in-switch.patch
+tg3-add-higher-cpu-clock-for-5762.patch
+net-don-t-copy-pfmemalloc-flag-in-__copy_skb_header.patch
+skbuff-unconditionally-copy-pfmemalloc-in-__skb_clone.patch
diff --git a/queue-4.4/skbuff-unconditionally-copy-pfmemalloc-in-__skb_clone.patch b/queue-4.4/skbuff-unconditionally-copy-pfmemalloc-in-__skb_clone.patch
new file mode 100644 (file)
index 0000000..1e6bdb9
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Mon Jul 23 08:34:58 CEST 2018
+From: Stefano Brivio <sbrivio@redhat.com>
+Date: Fri, 13 Jul 2018 13:21:07 +0200
+Subject: skbuff: Unconditionally copy pfmemalloc in __skb_clone()
+
+From: Stefano Brivio <sbrivio@redhat.com>
+
+[ Upstream commit e78bfb0751d4e312699106ba7efbed2bab1a53ca ]
+
+Commit 8b7008620b84 ("net: Don't copy pfmemalloc flag in
+__copy_skb_header()") introduced a different handling for the
+pfmemalloc flag in copy and clone paths.
+
+In __skb_clone(), now, the flag is set only if it was set in the
+original skb, but not cleared if it wasn't. This is wrong and
+might lead to socket buffers being flagged with pfmemalloc even
+if the skb data wasn't allocated from pfmemalloc reserves. Copy
+the flag instead of ORing it.
+
+Reported-by: Sabrina Dubroca <sd@queasysnail.net>
+Fixes: 8b7008620b84 ("net: Don't copy pfmemalloc flag in __copy_skb_header()")
+Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
+Tested-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -828,8 +828,7 @@ static struct sk_buff *__skb_clone(struc
+       n->cloned = 1;
+       n->nohdr = 0;
+       n->peeked = 0;
+-      if (skb->pfmemalloc)
+-              n->pfmemalloc = 1;
++      C(pfmemalloc);
+       n->destructor = NULL;
+       C(tail);
+       C(end);
diff --git a/queue-4.4/tg3-add-higher-cpu-clock-for-5762.patch b/queue-4.4/tg3-add-higher-cpu-clock-for-5762.patch
new file mode 100644 (file)
index 0000000..8fc8a9f
--- /dev/null
@@ -0,0 +1,39 @@
+From foo@baz Mon Jul 23 08:34:58 CEST 2018
+From: Sanjeev Bansal <sanjeevb.bansal@broadcom.com>
+Date: Mon, 16 Jul 2018 11:13:32 +0530
+Subject: tg3: Add higher cpu clock for 5762.
+
+From: Sanjeev Bansal <sanjeevb.bansal@broadcom.com>
+
+[ Upstream commit 3a498606bb04af603a46ebde8296040b2de350d1 ]
+
+This patch has fix for TX timeout while running bi-directional
+traffic with 100 Mbps using 5762.
+
+Signed-off-by: Sanjeev Bansal <sanjeevb.bansal@broadcom.com>
+Signed-off-by: Siva Reddy Kallam <siva.kallam@broadcom.com>
+Reviewed-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/tg3.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -9278,6 +9278,15 @@ static int tg3_chip_reset(struct tg3 *tp
+       tg3_restore_clk(tp);
++      /* Increase the core clock speed to fix tx timeout issue for 5762
++       * with 100Mbps link speed.
++       */
++      if (tg3_asic_rev(tp) == ASIC_REV_5762) {
++              val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
++              tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
++                   TG3_CPMU_MAC_ORIDE_ENABLE);
++      }
++
+       /* Reprobe ASF enable state.  */
+       tg3_flag_clear(tp, ENABLE_ASF);
+       tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |