--- /dev/null
+From ea9364bbadf11f0c55802cf11387d74f524cee84 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Tue, 10 Nov 2020 18:26:37 +0100
+Subject: cpufreq: Add strict_target to struct cpufreq_policy
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit ea9364bbadf11f0c55802cf11387d74f524cee84 upstream.
+
+Add a new field to be set when the CPUFREQ_GOV_STRICT_TARGET flag is
+set for the current governor to struct cpufreq_policy, so that the
+drivers needing to check CPUFREQ_GOV_STRICT_TARGET do not have to
+access the governor object during every frequency transition.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/cpufreq.c | 2 ++
+ include/linux/cpufreq.h | 6 ++++++
+ 2 files changed, 8 insertions(+)
+
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2259,6 +2259,8 @@ static int cpufreq_init_governor(struct
+ }
+ }
+
++ policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
++
+ return 0;
+ }
+
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -110,6 +110,12 @@ struct cpufreq_policy {
+ bool fast_switch_enabled;
+
+ /*
++ * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current
++ * governor.
++ */
++ bool strict_target;
++
++ /*
+ * Preferred average time interval between consecutive invocations of
+ * the driver to set the frequency for this policy. To be set by the
+ * scaling driver (0, which is the default, means no preference).
--- /dev/null
+From fcb3a1ab79904d54499db77017793ccca665eb7e Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Tue, 10 Nov 2020 18:27:40 +0100
+Subject: cpufreq: intel_pstate: Take CPUFREQ_GOV_STRICT_TARGET into account
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit fcb3a1ab79904d54499db77017793ccca665eb7e upstream.
+
+Make intel_pstate take the new CPUFREQ_GOV_STRICT_TARGET governor
+flag into account when it operates in the passive mode with HWP
+enabled, so as to fix the "powersave" governor behavior in that
+case (currently, HWP is allowed to scale the performance all the
+way up to the policy max limit when the "powersave" governor is
+used, but it should be constrained to the policy min limit then).
+
+Fixes: f6ebbcf08f37 ("cpufreq: intel_pstate: Implement passive mode with HWP enabled")
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: 5.9+ <stable@vger.kernel.org> # 5.9+: 9a2a9ebc0a75 cpufreq: Introduce governor flags
+Cc: 5.9+ <stable@vger.kernel.org> # 5.9+: 218f66870181 cpufreq: Introduce CPUFREQ_GOV_STRICT_TARGET
+Cc: 5.9+ <stable@vger.kernel.org> # 5.9+: ea9364bbadf1 cpufreq: Add strict_target to struct cpufreq_policy
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/intel_pstate.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -2509,7 +2509,7 @@ static void intel_cpufreq_trace(struct c
+ }
+
+ static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
+- bool fast_switch)
++ bool strict, bool fast_switch)
+ {
+ u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
+
+@@ -2521,7 +2521,7 @@ static void intel_cpufreq_adjust_hwp(str
+ * field in it, so opportunistically update the max too if needed.
+ */
+ value &= ~HWP_MAX_PERF(~0L);
+- value |= HWP_MAX_PERF(cpu->max_perf_ratio);
++ value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
+
+ if (value == prev)
+ return;
+@@ -2544,14 +2544,16 @@ static void intel_cpufreq_adjust_perf_ct
+ pstate_funcs.get_val(cpu, target_pstate));
+ }
+
+-static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate,
+- bool fast_switch)
++static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
++ int target_pstate, bool fast_switch)
+ {
++ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ int old_pstate = cpu->pstate.current_pstate;
+
+ target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+ if (hwp_active) {
+- intel_cpufreq_adjust_hwp(cpu, target_pstate, fast_switch);
++ intel_cpufreq_adjust_hwp(cpu, target_pstate,
++ policy->strict_target, fast_switch);
+ cpu->pstate.current_pstate = target_pstate;
+ } else if (target_pstate != old_pstate) {
+ intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
+@@ -2591,7 +2593,7 @@ static int intel_cpufreq_target(struct c
+ break;
+ }
+
+- target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false);
++ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
+
+ freqs.new = target_pstate * cpu->pstate.scaling;
+
+@@ -2610,7 +2612,7 @@ static unsigned int intel_cpufreq_fast_s
+
+ target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+
+- target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true);
++ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
+
+ return target_pstate * cpu->pstate.scaling;
+ }
--- /dev/null
+From 218f66870181bec7aaa6e3c72f346039c590c3c2 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Tue, 10 Nov 2020 18:26:10 +0100
+Subject: cpufreq: Introduce CPUFREQ_GOV_STRICT_TARGET
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 218f66870181bec7aaa6e3c72f346039c590c3c2 upstream.
+
+Introduce a new governor flag, CPUFREQ_GOV_STRICT_TARGET, for the
+governors that want the target frequency to be set exactly to the
+given value without leaving any room for adjustments on the hardware
+side and set this flag for the powersave and performance governors.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/cpufreq_performance.c | 1 +
+ drivers/cpufreq/cpufreq_powersave.c | 1 +
+ include/linux/cpufreq.h | 3 +++
+ 3 files changed, 5 insertions(+)
+
+--- a/drivers/cpufreq/cpufreq_performance.c
++++ b/drivers/cpufreq/cpufreq_performance.c
+@@ -20,6 +20,7 @@ static void cpufreq_gov_performance_limi
+ static struct cpufreq_governor cpufreq_gov_performance = {
+ .name = "performance",
+ .owner = THIS_MODULE,
++ .flags = CPUFREQ_GOV_STRICT_TARGET,
+ .limits = cpufreq_gov_performance_limits,
+ };
+
+--- a/drivers/cpufreq/cpufreq_powersave.c
++++ b/drivers/cpufreq/cpufreq_powersave.c
+@@ -21,6 +21,7 @@ static struct cpufreq_governor cpufreq_g
+ .name = "powersave",
+ .limits = cpufreq_gov_powersave_limits,
+ .owner = THIS_MODULE,
++ .flags = CPUFREQ_GOV_STRICT_TARGET,
+ };
+
+ MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -575,6 +575,9 @@ struct cpufreq_governor {
+ /* For governors which change frequency dynamically by themselves */
+ #define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0)
+
++/* For governors wanting the target frequency to be set exactly */
++#define CPUFREQ_GOV_STRICT_TARGET BIT(1)
++
+
+ /* Pass a target to the cpufreq driver */
+ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
--- /dev/null
+From 9a2a9ebc0a758d887ee06e067e9f7f0b36ff7574 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Tue, 10 Nov 2020 18:25:57 +0100
+Subject: cpufreq: Introduce governor flags
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 9a2a9ebc0a758d887ee06e067e9f7f0b36ff7574 upstream.
+
+A new cpufreq governor flag will be added subsequently, so replace
+the bool dynamic_switching fleid in struct cpufreq_governor with a
+flags field and introduce CPUFREQ_GOV_DYNAMIC_SWITCHING to set for
+the "dynamic switching" governors instead of it.
+
+No intentional functional impact.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/cpufreq.c | 2 +-
+ drivers/cpufreq/cpufreq_governor.h | 2 +-
+ include/linux/cpufreq.h | 9 +++++++--
+ kernel/sched/cpufreq_schedutil.c | 2 +-
+ 4 files changed, 10 insertions(+), 5 deletions(-)
+
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2233,7 +2233,7 @@ static int cpufreq_init_governor(struct
+ return -EINVAL;
+
+ /* Platform doesn't want dynamic frequency switching ? */
+- if (policy->governor->dynamic_switching &&
++ if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
+ cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
+ struct cpufreq_governor *gov = cpufreq_fallback_governor();
+
+--- a/drivers/cpufreq/cpufreq_governor.h
++++ b/drivers/cpufreq/cpufreq_governor.h
+@@ -156,7 +156,7 @@ void cpufreq_dbs_governor_limits(struct
+ #define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
+ { \
+ .name = _name_, \
+- .dynamic_switching = true, \
++ .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, \
+ .owner = THIS_MODULE, \
+ .init = cpufreq_dbs_governor_init, \
+ .exit = cpufreq_dbs_governor_exit, \
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -565,12 +565,17 @@ struct cpufreq_governor {
+ char *buf);
+ int (*store_setspeed) (struct cpufreq_policy *policy,
+ unsigned int freq);
+- /* For governors which change frequency dynamically by themselves */
+- bool dynamic_switching;
+ struct list_head governor_list;
+ struct module *owner;
++ u8 flags;
+ };
+
++/* Governor flags */
++
++/* For governors which change frequency dynamically by themselves */
++#define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0)
++
++
+ /* Pass a target to the cpufreq driver */
+ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq);
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -896,7 +896,7 @@ static void sugov_limits(struct cpufreq_
+ struct cpufreq_governor schedutil_gov = {
+ .name = "schedutil",
+ .owner = THIS_MODULE,
+- .dynamic_switching = true,
++ .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,
+ .init = sugov_init,
+ .exit = sugov_exit,
+ .start = sugov_start,
--- /dev/null
+From foo@baz Mon Nov 16 07:48:29 PM CET 2020
+From: Parav Pandit <parav@nvidia.com>
+Date: Wed, 11 Nov 2020 05:47:44 +0200
+Subject: devlink: Avoid overwriting port attributes of registered port
+
+From: Parav Pandit <parav@nvidia.com>
+
+[ Upstream commit 9f73bd1c2c4c304b238051fc92b3f807326f0a89 ]
+
+Cited commit in fixes tag overwrites the port attributes for the
+registered port.
+
+Avoid such error by checking registered flag before setting attributes.
+
+Fixes: 71ad8d55f8e5 ("devlink: Replace devlink_port_attrs_set parameters with a struct")
+Signed-off-by: Parav Pandit <parav@nvidia.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Link: https://lore.kernel.org/r/20201111034744.35554-1-parav@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/devlink.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/net/core/devlink.c
++++ b/net/core/devlink.c
+@@ -7675,8 +7675,6 @@ static int __devlink_port_attrs_set(stru
+ {
+ struct devlink_port_attrs *attrs = &devlink_port->attrs;
+
+- if (WARN_ON(devlink_port->registered))
+- return -EEXIST;
+ devlink_port->attrs_set = true;
+ attrs->flavour = flavour;
+ if (attrs->switch_id.id_len) {
+@@ -7700,6 +7698,8 @@ void devlink_port_attrs_set(struct devli
+ {
+ int ret;
+
++ if (WARN_ON(devlink_port->registered))
++ return;
+ devlink_port->attrs = *attrs;
+ ret = __devlink_port_attrs_set(devlink_port, attrs->flavour);
+ if (ret)
+@@ -7719,6 +7719,8 @@ void devlink_port_attrs_pci_pf_set(struc
+ struct devlink_port_attrs *attrs = &devlink_port->attrs;
+ int ret;
+
++ if (WARN_ON(devlink_port->registered))
++ return;
+ ret = __devlink_port_attrs_set(devlink_port,
+ DEVLINK_PORT_FLAVOUR_PCI_PF);
+ if (ret)
+@@ -7741,6 +7743,8 @@ void devlink_port_attrs_pci_vf_set(struc
+ struct devlink_port_attrs *attrs = &devlink_port->attrs;
+ int ret;
+
++ if (WARN_ON(devlink_port->registered))
++ return;
+ ret = __devlink_port_attrs_set(devlink_port,
+ DEVLINK_PORT_FLAVOUR_PCI_VF);
+ if (ret)
--- /dev/null
+From foo@baz Mon Nov 16 07:48:29 PM CET 2020
+From: Alexander Lobakin <alobakin@pm.me>
+Date: Sun, 8 Nov 2020 00:46:15 +0000
+Subject: ethtool: netlink: add missing netdev_features_change() call
+
+From: Alexander Lobakin <alobakin@pm.me>
+
+[ Upstream commit 413691384a37fe27f43460226c4160e33140e638 ]
+
+After updating userspace Ethtool from 5.7 to 5.9, I noticed that
+NETDEV_FEAT_CHANGE is no more raised when changing netdev features
+through Ethtool.
+That's because the old Ethtool ioctl interface always calls
+netdev_features_change() at the end of user request processing to
+inform the kernel that our netdevice has some features changed, but
+the new Netlink interface does not. Instead, it just notifies itself
+with ETHTOOL_MSG_FEATURES_NTF.
+Replace this ethtool_notify() call with netdev_features_change(), so
+the kernel will be aware of any features changes, just like in case
+with the ioctl interface. This does not omit Ethtool notifications,
+as Ethtool itself listens to NETDEV_FEAT_CHANGE and drops
+ETHTOOL_MSG_FEATURES_NTF on it
+(net/ethtool/netlink.c:ethnl_netdev_event()).
+
+>From v1 [1]:
+- dropped extra new line as advised by Jakub;
+- no functional changes.
+
+[1] https://lore.kernel.org/netdev/AlZXQ2o5uuTVHCfNGOiGgJ8vJ3KgO5YIWAnQjH0cDE@cp3-web-009.plabs.ch
+
+Fixes: 0980bfcd6954 ("ethtool: set netdev features with FEATURES_SET request")
+Signed-off-by: Alexander Lobakin <alobakin@pm.me>
+Reviewed-by: Michal Kubecek <mkubecek@suse.cz>
+Link: https://lore.kernel.org/r/ahA2YWXYICz5rbUSQqNG4roJ8OlJzzYQX7PTiG80@cp4-web-028.plabs.ch
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ethtool/features.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ethtool/features.c
++++ b/net/ethtool/features.c
+@@ -296,7 +296,7 @@ int ethnl_set_features(struct sk_buff *s
+ active_diff_mask, compact);
+ }
+ if (mod)
+- ethtool_notify(dev, ETHTOOL_MSG_FEATURES_NTF, NULL);
++ netdev_features_change(dev);
+
+ out_rtnl:
+ rtnl_unlock();
--- /dev/null
+From foo@baz Mon Nov 16 07:48:29 PM CET 2020
+From: Oliver Herms <oliver.peter.herms@gmail.com>
+Date: Tue, 3 Nov 2020 11:41:33 +0100
+Subject: IPv6: Set SIT tunnel hard_header_len to zero
+
+From: Oliver Herms <oliver.peter.herms@gmail.com>
+
+[ Upstream commit 8ef9ba4d666614497a057d09b0a6eafc1e34eadf ]
+
+Due to the legacy usage of hard_header_len for SIT tunnels while
+already using infrastructure from net/ipv4/ip_tunnel.c the
+calculation of the path MTU in tnl_update_pmtu is incorrect.
+This leads to unnecessary creation of MTU exceptions for any
+flow going over a SIT tunnel.
+
+As SIT tunnels do not have a header themsevles other than their
+transport (L3, L2) headers we're leaving hard_header_len set to zero
+as tnl_update_pmtu is already taking care of the transport headers
+sizes.
+
+This will also help avoiding unnecessary IPv6 GC runs and spinlock
+contention seen when using SIT tunnels and for more than
+net.ipv6.route.gc_thresh flows.
+
+Fixes: c54419321455 ("GRE: Refactor GRE tunneling code.")
+Signed-off-by: Oliver Herms <oliver.peter.herms@gmail.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Link: https://lore.kernel.org/r/20201103104133.GA1573211@tws
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/sit.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1128,7 +1128,6 @@ static void ipip6_tunnel_bind_dev(struct
+ if (tdev && !netif_is_l3_master(tdev)) {
+ int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+
+- dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
+ dev->mtu = tdev->mtu - t_hlen;
+ if (dev->mtu < IPV6_MIN_MTU)
+ dev->mtu = IPV6_MIN_MTU;
+@@ -1426,7 +1425,6 @@ static void ipip6_tunnel_setup(struct ne
+ dev->priv_destructor = ipip6_dev_free;
+
+ dev->type = ARPHRD_SIT;
+- dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+ dev->mtu = ETH_DATA_LEN - t_hlen;
+ dev->min_mtu = IPV6_MIN_MTU;
+ dev->max_mtu = IP6_MAX_MTU - t_hlen;
--- /dev/null
+From foo@baz Mon Nov 16 07:48:29 PM CET 2020
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Sun, 8 Nov 2020 19:49:59 +0100
+Subject: mptcp: provide rmem[0] limit
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 989ef49bdf100cc772b3a8737089df36b1ab1e30 ]
+
+The mptcp proto struct currently does not provide the
+required limit for forward memory scheduling. Under
+pressure sk_rmem_schedule() will unconditionally try
+to use such field and will oops.
+
+Address the issue inheriting the tcp limit, as we already
+do for the wmem one.
+
+Fixes: 9c3f94e1681b ("mptcp: add missing memory scheduling in the rx path")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Link: https://lore.kernel.org/r/37af798bd46f402fb7c79f57ebbdd00614f5d7fa.1604861097.git.pabeni@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2122,6 +2122,7 @@ static struct proto mptcp_prot = {
+ .memory_pressure = &tcp_memory_pressure,
+ .stream_memory_free = mptcp_memory_free,
+ .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
++ .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
+ .sysctl_mem = sysctl_tcp_mem,
+ .obj_size = sizeof(struct mptcp_sock),
+ .slab_flags = SLAB_TYPESAFE_BY_RCU,
--- /dev/null
+From foo@baz Mon Nov 16 07:48:29 PM CET 2020
+From: Ursula Braun <ubraun@linux.ibm.com>
+Date: Mon, 9 Nov 2020 08:57:05 +0100
+Subject: net/af_iucv: fix null pointer dereference on shutdown
+
+From: Ursula Braun <ubraun@linux.ibm.com>
+
+[ Upstream commit 4031eeafa71eaf22ae40a15606a134ae86345daf ]
+
+syzbot reported the following KASAN finding:
+
+BUG: KASAN: nullptr-dereference in iucv_send_ctrl+0x390/0x3f0 net/iucv/af_iucv.c:385
+Read of size 2 at addr 000000000000021e by task syz-executor907/519
+
+CPU: 0 PID: 519 Comm: syz-executor907 Not tainted 5.9.0-syzkaller-07043-gbcf9877ad213 #0
+Hardware name: IBM 3906 M04 701 (KVM/Linux)
+Call Trace:
+ [<00000000c576af60>] unwind_start arch/s390/include/asm/unwind.h:65 [inline]
+ [<00000000c576af60>] show_stack+0x180/0x228 arch/s390/kernel/dumpstack.c:135
+ [<00000000c9dcd1f8>] __dump_stack lib/dump_stack.c:77 [inline]
+ [<00000000c9dcd1f8>] dump_stack+0x268/0x2f0 lib/dump_stack.c:118
+ [<00000000c5fed016>] print_address_description.constprop.0+0x5e/0x218 mm/kasan/report.c:383
+ [<00000000c5fec82a>] __kasan_report mm/kasan/report.c:517 [inline]
+ [<00000000c5fec82a>] kasan_report+0x11a/0x168 mm/kasan/report.c:534
+ [<00000000c98b5b60>] iucv_send_ctrl+0x390/0x3f0 net/iucv/af_iucv.c:385
+ [<00000000c98b6262>] iucv_sock_shutdown+0x44a/0x4c0 net/iucv/af_iucv.c:1457
+ [<00000000c89d3a54>] __sys_shutdown+0x12c/0x1c8 net/socket.c:2204
+ [<00000000c89d3b70>] __do_sys_shutdown net/socket.c:2212 [inline]
+ [<00000000c89d3b70>] __s390x_sys_shutdown+0x38/0x48 net/socket.c:2210
+ [<00000000c9e36eac>] system_call+0xe0/0x28c arch/s390/kernel/entry.S:415
+
+There is nothing to shutdown if a connection has never been established.
+Besides that iucv->hs_dev is not yet initialized if a socket is in
+IUCV_OPEN state and iucv->path is not yet initialized if socket is in
+IUCV_BOUND state.
+So, just skip the shutdown calls for a socket in these states.
+
+Fixes: eac3731bd04c ("[S390]: Add AF_IUCV socket support")
+Fixes: 82492a355fac ("af_iucv: add shutdown for HS transport")
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Ursula Braun <ubraun@linux.ibm.com>
+[jwi: correct one Fixes tag]
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/iucv/af_iucv.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -1434,7 +1434,8 @@ static int iucv_sock_shutdown(struct soc
+ break;
+ }
+
+- if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
++ if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) &&
++ sk->sk_state == IUCV_CONNECTED) {
+ if (iucv->transport == AF_IUCV_TRANS_IUCV) {
+ txmsg.class = 0;
+ txmsg.tag = 0;
--- /dev/null
+From foo@baz Mon Nov 16 07:48:29 PM CET 2020
+From: Alexander Lobakin <alobakin@pm.me>
+Date: Wed, 11 Nov 2020 20:45:38 +0000
+Subject: net: udp: fix IP header access and skb lookup on Fast/frag0 UDP GRO
+
+From: Alexander Lobakin <alobakin@pm.me>
+
+[ Upstream commit 55e729889bb07d68ab071660ce3f5e7a7872ebe8 ]
+
+udp{4,6}_lib_lookup_skb() use ip{,v6}_hdr() to get IP header of the
+packet. While it's probably OK for non-frag0 paths, this helpers
+will also point to junk on Fast/frag0 GRO when all headers are
+located in frags. As a result, sk/skb lookup may fail or give wrong
+results. To support both GRO modes, skb_gro_network_header() might
+be used. To not modify original functions, add private versions of
+udp{4,6}_lib_lookup_skb() only to perform correct sk lookups on GRO.
+
+Present since the introduction of "application-level" UDP GRO
+in 4.7-rc1.
+
+Misc: replace totally unneeded ternaries with plain ifs.
+
+Fixes: a6024562ffd7 ("udp: Add GRO functions to UDP socket")
+Suggested-by: Willem de Bruijn <willemb@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Alexander Lobakin <alobakin@pm.me>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/udp_offload.c | 17 +++++++++++++++--
+ net/ipv6/udp_offload.c | 17 +++++++++++++++--
+ 2 files changed, 30 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -500,12 +500,22 @@ out:
+ }
+ EXPORT_SYMBOL(udp_gro_receive);
+
++static struct sock *udp4_gro_lookup_skb(struct sk_buff *skb, __be16 sport,
++ __be16 dport)
++{
++ const struct iphdr *iph = skb_gro_network_header(skb);
++
++ return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
++ iph->daddr, dport, inet_iif(skb),
++ inet_sdif(skb), &udp_table, NULL);
++}
++
+ INDIRECT_CALLABLE_SCOPE
+ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
+ {
+ struct udphdr *uh = udp_gro_udphdr(skb);
++ struct sock *sk = NULL;
+ struct sk_buff *pp;
+- struct sock *sk;
+
+ if (unlikely(!uh))
+ goto flush;
+@@ -523,7 +533,10 @@ struct sk_buff *udp4_gro_receive(struct
+ skip:
+ NAPI_GRO_CB(skb)->is_ipv6 = 0;
+ rcu_read_lock();
+- sk = static_branch_unlikely(&udp_encap_needed_key) ? udp4_lib_lookup_skb(skb, uh->source, uh->dest) : NULL;
++
++ if (static_branch_unlikely(&udp_encap_needed_key))
++ sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest);
++
+ pp = udp_gro_receive(head, skb, uh, sk);
+ rcu_read_unlock();
+ return pp;
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -111,12 +111,22 @@ out:
+ return segs;
+ }
+
++static struct sock *udp6_gro_lookup_skb(struct sk_buff *skb, __be16 sport,
++ __be16 dport)
++{
++ const struct ipv6hdr *iph = skb_gro_network_header(skb);
++
++ return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
++ &iph->daddr, dport, inet6_iif(skb),
++ inet6_sdif(skb), &udp_table, NULL);
++}
++
+ INDIRECT_CALLABLE_SCOPE
+ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb)
+ {
+ struct udphdr *uh = udp_gro_udphdr(skb);
++ struct sock *sk = NULL;
+ struct sk_buff *pp;
+- struct sock *sk;
+
+ if (unlikely(!uh))
+ goto flush;
+@@ -135,7 +145,10 @@ struct sk_buff *udp6_gro_receive(struct
+ skip:
+ NAPI_GRO_CB(skb)->is_ipv6 = 1;
+ rcu_read_lock();
+- sk = static_branch_unlikely(&udpv6_encap_needed_key) ? udp6_lib_lookup_skb(skb, uh->source, uh->dest) : NULL;
++
++ if (static_branch_unlikely(&udpv6_encap_needed_key))
++ sk = udp6_gro_lookup_skb(skb, uh->source, uh->dest);
++
+ pp = udp_gro_receive(head, skb, uh, sk);
+ rcu_read_unlock();
+ return pp;
--- /dev/null
+From foo@baz Mon Nov 16 07:48:29 PM CET 2020
+From: Alexander Lobakin <alobakin@pm.me>
+Date: Wed, 11 Nov 2020 20:45:25 +0000
+Subject: net: udp: fix UDP header access on Fast/frag0 UDP GRO
+
+From: Alexander Lobakin <alobakin@pm.me>
+
+[ Upstream commit 4b1a86281cc1d0de46df3ad2cb8c1f86ac07681c ]
+
+UDP GRO uses udp_hdr(skb) in its .gro_receive() callback. While it's
+probably OK for non-frag0 paths (when all headers or even the entire
+frame are already in skb head), this inline points to junk when
+using Fast GRO (napi_gro_frags() or napi_gro_receive() with only
+Ethernet header in skb head and all the rest in the frags) and breaks
+GRO packet compilation and the packet flow itself.
+To support both modes, skb_gro_header_fast() + skb_gro_header_slow()
+are typically used. UDP even has an inline helper that makes use of
+them, udp_gro_udphdr(). Use that instead of troublemaking udp_hdr()
+to get rid of the out-of-order delivers.
+
+Present since the introduction of plain UDP GRO in 5.0-rc1.
+
+Fixes: e20cf8d3f1f7 ("udp: implement GRO for plain UDP sockets.")
+Cc: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Alexander Lobakin <alobakin@pm.me>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/udp_offload.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -366,7 +366,7 @@ out:
+ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
+ struct sk_buff *skb)
+ {
+- struct udphdr *uh = udp_hdr(skb);
++ struct udphdr *uh = udp_gro_udphdr(skb);
+ struct sk_buff *pp = NULL;
+ struct udphdr *uh2;
+ struct sk_buff *p;
--- /dev/null
+From foo@baz Mon Nov 16 07:48:29 PM CET 2020
+From: Mao Wenan <wenan.mao@linux.alibaba.com>
+Date: Tue, 10 Nov 2020 08:16:31 +0800
+Subject: net: Update window_clamp if SOCK_RCVBUF is set
+
+From: Mao Wenan <wenan.mao@linux.alibaba.com>
+
+[ Upstream commit 909172a149749242990a6e64cb55d55460d4e417 ]
+
+When net.ipv4.tcp_syncookies=1 and syn flood is happened,
+cookie_v4_check or cookie_v6_check tries to redo what
+tcp_v4_send_synack or tcp_v6_send_synack did,
+rsk_window_clamp will be changed if SOCK_RCVBUF is set,
+which will make rcv_wscale is different, the client
+still operates with initial window scale and can overshot
+granted window, the client use the initial scale but local
+server use new scale to advertise window value, and session
+work abnormally.
+
+Fixes: e88c64f0a425 ("tcp: allow effective reduction of TCP's rcv-buffer via setsockopt")
+Signed-off-by: Mao Wenan <wenan.mao@linux.alibaba.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/1604967391-123737-1-git-send-email-wenan.mao@linux.alibaba.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/syncookies.c | 9 +++++++--
+ net/ipv6/syncookies.c | 10 ++++++++--
+ 2 files changed, 15 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -331,7 +331,7 @@ struct sock *cookie_v4_check(struct sock
+ __u32 cookie = ntohl(th->ack_seq) - 1;
+ struct sock *ret = sk;
+ struct request_sock *req;
+- int mss;
++ int full_space, mss;
+ struct rtable *rt;
+ __u8 rcv_wscale;
+ struct flowi4 fl4;
+@@ -427,8 +427,13 @@ struct sock *cookie_v4_check(struct sock
+
+ /* Try to redo what tcp_v4_send_synack did. */
+ req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
++ /* limit the window selection if the user enforce a smaller rx buffer */
++ full_space = tcp_full_space(sk);
++ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
++ (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
++ req->rsk_window_clamp = full_space;
+
+- tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
++ tcp_select_initial_window(sk, full_space, req->mss,
+ &req->rsk_rcv_wnd, &req->rsk_window_clamp,
+ ireq->wscale_ok, &rcv_wscale,
+ dst_metric(&rt->dst, RTAX_INITRWND));
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -136,7 +136,7 @@ struct sock *cookie_v6_check(struct sock
+ __u32 cookie = ntohl(th->ack_seq) - 1;
+ struct sock *ret = sk;
+ struct request_sock *req;
+- int mss;
++ int full_space, mss;
+ struct dst_entry *dst;
+ __u8 rcv_wscale;
+ u32 tsoff = 0;
+@@ -241,7 +241,13 @@ struct sock *cookie_v6_check(struct sock
+ }
+
+ req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
+- tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
++ /* limit the window selection if the user enforce a smaller rx buffer */
++ full_space = tcp_full_space(sk);
++ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
++ (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
++ req->rsk_window_clamp = full_space;
++
++ tcp_select_initial_window(sk, full_space, req->mss,
+ &req->rsk_rcv_wnd, &req->rsk_window_clamp,
+ ireq->wscale_ok, &rcv_wscale,
+ dst_metric(dst, RTAX_INITRWND));
--- /dev/null
+From foo@baz Mon Nov 16 07:48:29 PM CET 2020
+From: Martin Schiller <ms@dev.tdt.de>
+Date: Mon, 9 Nov 2020 07:54:49 +0100
+Subject: net/x25: Fix null-ptr-deref in x25_connect
+
+From: Martin Schiller <ms@dev.tdt.de>
+
+[ Upstream commit 361182308766a265b6c521879b34302617a8c209 ]
+
+This fixes a regression for blocking connects introduced by commit
+4becb7ee5b3d ("net/x25: Fix x25_neigh refcnt leak when x25 disconnect").
+
+The x25->neighbour is already set to "NULL" by x25_disconnect() now,
+while a blocking connect is waiting in
+x25_wait_for_connection_establishment(). Therefore x25->neighbour must
+not be accessed here again and x25->state is also already set to
+X25_STATE_0 by x25_disconnect().
+
+Fixes: 4becb7ee5b3d ("net/x25: Fix x25_neigh refcnt leak when x25 disconnect")
+Signed-off-by: Martin Schiller <ms@dev.tdt.de>
+Reviewed-by: Xie He <xie.he.0141@gmail.com>
+Link: https://lore.kernel.org/r/20201109065449.9014-1-ms@dev.tdt.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/x25/af_x25.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -825,7 +825,7 @@ static int x25_connect(struct socket *so
+ sock->state = SS_CONNECTED;
+ rc = 0;
+ out_put_neigh:
+- if (rc) {
++ if (rc && x25->neighbour) {
+ read_lock_bh(&x25_list_lock);
+ x25_neigh_put(x25->neighbour);
+ x25->neighbour = NULL;
nfsv4.2-fix-failure-to-unregister-shrinker.patch
pinctrl-amd-use-higher-precision-for-512-rtcclk.patch
pinctrl-amd-fix-incorrect-way-to-disable-debounce-filter.patch
+swiotlb-fix-x86-don-t-panic-if-can-not-alloc-buffer-for-swiotlb.patch
+cpufreq-introduce-governor-flags.patch
+cpufreq-introduce-cpufreq_gov_strict_target.patch
+cpufreq-add-strict_target-to-struct-cpufreq_policy.patch
+cpufreq-intel_pstate-take-cpufreq_gov_strict_target-into-account.patch
+ethtool-netlink-add-missing-netdev_features_change-call.patch
+ipv6-set-sit-tunnel-hard_header_len-to-zero.patch
+net-af_iucv-fix-null-pointer-dereference-on-shutdown.patch
+net-udp-fix-ip-header-access-and-skb-lookup-on-fast-frag0-udp-gro.patch
+net-udp-fix-udp-header-access-on-fast-frag0-udp-gro.patch
+net-update-window_clamp-if-sock_rcvbuf-is-set.patch
+net-x25-fix-null-ptr-deref-in-x25_connect.patch
+tipc-fix-memory-leak-in-tipc_topsrv_start.patch
+devlink-avoid-overwriting-port-attributes-of-registered-port.patch
+limit.patch
+tunnels-fix-off-by-one-in-lower-mtu-bounds-for-icmp-icmpv6-replies.patch
--- /dev/null
+From e9696d259d0fb5d239e8c28ca41089838ea76d13 Mon Sep 17 00:00:00 2001
+From: Stefano Stabellini <stefano.stabellini@xilinx.com>
+Date: Mon, 26 Oct 2020 17:02:14 -0700
+Subject: swiotlb: fix "x86: Don't panic if can not alloc buffer for swiotlb"
+
+From: Stefano Stabellini <stefano.stabellini@xilinx.com>
+
+commit e9696d259d0fb5d239e8c28ca41089838ea76d13 upstream.
+
+kernel/dma/swiotlb.c:swiotlb_init gets called first and tries to
+allocate a buffer for the swiotlb. It does so by calling
+
+ memblock_alloc_low(PAGE_ALIGN(bytes), PAGE_SIZE);
+
+If the allocation must fail, no_iotlb_memory is set.
+
+Later during initialization swiotlb-xen comes in
+(drivers/xen/swiotlb-xen.c:xen_swiotlb_init) and given that io_tlb_start
+is != 0, it thinks the memory is ready to use when actually it is not.
+
+When the swiotlb is actually needed, swiotlb_tbl_map_single gets called
+and since no_iotlb_memory is set the kernel panics.
+
+Instead, if swiotlb-xen.c:xen_swiotlb_init knew the swiotlb hadn't been
+initialized, it would do the initialization itself, which might still
+succeed.
+
+Fix the panic by setting io_tlb_start to 0 on swiotlb initialization
+failure, and also by setting no_iotlb_memory to false on swiotlb
+initialization success.
+
+Fixes: ac2cbab21f31 ("x86: Don't panic if can not alloc buffer for swiotlb")
+
+Reported-by: Elliott Mitchell <ehem+xen@m5p.com>
+Tested-by: Elliott Mitchell <ehem+xen@m5p.com>
+Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/dma/swiotlb.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -231,6 +231,7 @@ int __init swiotlb_init_with_tbl(char *t
+ io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ }
+ io_tlb_index = 0;
++ no_iotlb_memory = false;
+
+ if (verbose)
+ swiotlb_print_info();
+@@ -262,9 +263,11 @@ swiotlb_init(int verbose)
+ if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
+ return;
+
+- if (io_tlb_start)
++ if (io_tlb_start) {
+ memblock_free_early(io_tlb_start,
+ PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
++ io_tlb_start = 0;
++ }
+ pr_warn("Cannot allocate buffer");
+ no_iotlb_memory = true;
+ }
+@@ -362,6 +365,7 @@ swiotlb_late_init_with_tbl(char *tlb, un
+ io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ }
+ io_tlb_index = 0;
++ no_iotlb_memory = false;
+
+ swiotlb_print_info();
+
--- /dev/null
+From foo@baz Mon Nov 16 07:48:29 PM CET 2020
+From: Wang Hai <wanghai38@huawei.com>
+Date: Mon, 9 Nov 2020 22:09:13 +0800
+Subject: tipc: fix memory leak in tipc_topsrv_start()
+
+From: Wang Hai <wanghai38@huawei.com>
+
+[ Upstream commit fa6882c63621821f73cc806f291208e1c6ea6187 ]
+
+kmemleak report a memory leak as follows:
+
+unreferenced object 0xffff88810a596800 (size 512):
+ comm "ip", pid 21558, jiffies 4297568990 (age 112.120s)
+ hex dump (first 32 bytes):
+ 00 00 00 00 ad 4e ad de ff ff ff ff 00 00 00 00 .....N..........
+ ff ff ff ff ff ff ff ff 00 83 60 b0 ff ff ff ff ..........`.....
+ backtrace:
+ [<0000000022bbe21f>] tipc_topsrv_init_net+0x1f3/0xa70
+ [<00000000fe15ddf7>] ops_init+0xa8/0x3c0
+ [<00000000138af6f2>] setup_net+0x2de/0x7e0
+ [<000000008c6807a3>] copy_net_ns+0x27d/0x530
+ [<000000006b21adbd>] create_new_namespaces+0x382/0xa30
+ [<00000000bb169746>] unshare_nsproxy_namespaces+0xa1/0x1d0
+ [<00000000fe2e42bc>] ksys_unshare+0x39c/0x780
+ [<0000000009ba3b19>] __x64_sys_unshare+0x2d/0x40
+ [<00000000614ad866>] do_syscall_64+0x56/0xa0
+ [<00000000a1b5ca3c>] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+'srv' is malloced in tipc_topsrv_start() but not free before
+leaving from the error handling cases. We need to free it.
+
+Fixes: 5c45ab24ac77 ("tipc: make struct tipc_server private for server.c")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Wang Hai <wanghai38@huawei.com>
+Link: https://lore.kernel.org/r/20201109140913.47370-1-wanghai38@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/topsrv.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/net/tipc/topsrv.c
++++ b/net/tipc/topsrv.c
+@@ -665,12 +665,18 @@ static int tipc_topsrv_start(struct net
+
+ ret = tipc_topsrv_work_start(srv);
+ if (ret < 0)
+- return ret;
++ goto err_start;
+
+ ret = tipc_topsrv_create_listener(srv);
+ if (ret < 0)
+- tipc_topsrv_work_stop(srv);
++ goto err_create;
+
++ return 0;
++
++err_create:
++ tipc_topsrv_work_stop(srv);
++err_start:
++ kfree(srv);
+ return ret;
+ }
+
--- /dev/null
+From foo@baz Mon Nov 16 07:48:29 PM CET 2020
+From: Stefano Brivio <sbrivio@redhat.com>
+Date: Fri, 6 Nov 2020 17:59:52 +0100
+Subject: tunnels: Fix off-by-one in lower MTU bounds for ICMP/ICMPv6 replies
+
+From: Stefano Brivio <sbrivio@redhat.com>
+
+[ Upstream commit 77a2d673d5c9d1d359b5652ff75043273c5dea28 ]
+
+Jianlin reports that a bridged IPv6 VXLAN endpoint, carrying IPv6
+packets over a link with a PMTU estimation of exactly 1350 bytes,
+won't trigger ICMPv6 Packet Too Big replies when the encapsulated
+datagrams exceed said PMTU value. VXLAN over IPv6 adds 70 bytes of
+overhead, so an ICMPv6 reply indicating 1280 bytes as inner MTU
+would be legitimate and expected.
+
+This comes from an off-by-one error I introduced in checks added
+as part of commit 4cb47a8644cc ("tunnels: PMTU discovery support
+for directly bridged IP packets"), whose purpose was to prevent
+sending ICMPv6 Packet Too Big messages with an MTU lower than the
+smallest permissible IPv6 link MTU, i.e. 1280 bytes.
+
+In iptunnel_pmtud_check_icmpv6(), avoid triggering a reply only if
+the advertised MTU would be less than, and not equal to, 1280 bytes.
+
+Also fix the analogous comparison for IPv4, that is, skip the ICMP
+reply only if the resulting MTU is strictly less than 576 bytes.
+
+This becomes apparent while running the net/pmtu.sh bridged VXLAN
+or GENEVE selftests with adjusted lower-link MTU values. Using
+e.g. GENEVE, setting ll_mtu to the values reported below, in the
+test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception() test
+function, we can see failures on the following tests:
+
+ test | ll_mtu
+ -------------------------------|--------
+ pmtu_ipv4_br_geneve4_exception | 626
+ pmtu_ipv6_br_geneve4_exception | 1330
+ pmtu_ipv6_br_geneve6_exception | 1350
+
+owing to the different tunneling overheads implied by the
+corresponding configurations.
+
+Reported-by: Jianlin Shi <jishi@redhat.com>
+Fixes: 4cb47a8644cc ("tunnels: PMTU discovery support for directly bridged IP packets")
+Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
+Link: https://lore.kernel.org/r/4f5fc2f33bfdf8409549fafd4f952b008bf04d63.1604681709.git.sbrivio@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_tunnel_core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -263,7 +263,7 @@ static int iptunnel_pmtud_check_icmp(str
+ const struct icmphdr *icmph = icmp_hdr(skb);
+ const struct iphdr *iph = ip_hdr(skb);
+
+- if (mtu <= 576 || iph->frag_off != htons(IP_DF))
++ if (mtu < 576 || iph->frag_off != htons(IP_DF))
+ return 0;
+
+ if (ipv4_is_lbcast(iph->daddr) || ipv4_is_multicast(iph->daddr) ||
+@@ -359,7 +359,7 @@ static int iptunnel_pmtud_check_icmpv6(s
+ __be16 frag_off;
+ int offset;
+
+- if (mtu <= IPV6_MIN_MTU)
++ if (mtu < IPV6_MIN_MTU)
+ return 0;
+
+ if (stype == IPV6_ADDR_ANY || stype == IPV6_ADDR_MULTICAST ||