--- /dev/null
+From 37bd9e803daea816f2dc2c8f6dc264097eb3ebd2 Mon Sep 17 00:00:00 2001
+From: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+Date: Tue, 21 Jul 2020 22:24:24 -0700
+Subject: Input: ati_remote2 - add missing newlines when printing module parameters
+
+From: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+
+commit 37bd9e803daea816f2dc2c8f6dc264097eb3ebd2 upstream.
+
+When I cat some module parameters by sysfs, it displays as follows. It's
+better to add a newline for easy reading.
+
+root@syzkaller:~# cat /sys/module/ati_remote2/parameters/mode_mask
+0x1froot@syzkaller:~# cat /sys/module/ati_remote2/parameters/channel_mask
+0xffffroot@syzkaller:~#
+
+Signed-off-by: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+Link: https://lore.kernel.org/r/20200720092148.9320-1-wangxiongfeng2@huawei.com
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/misc/ati_remote2.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/input/misc/ati_remote2.c
++++ b/drivers/input/misc/ati_remote2.c
+@@ -68,7 +68,7 @@ static int ati_remote2_get_channel_mask(
+ {
+ pr_debug("%s()\n", __func__);
+
+- return sprintf(buffer, "0x%04x", *(unsigned int *)kp->arg);
++ return sprintf(buffer, "0x%04x\n", *(unsigned int *)kp->arg);
+ }
+
+ static int ati_remote2_set_mode_mask(const char *val,
+@@ -84,7 +84,7 @@ static int ati_remote2_get_mode_mask(cha
+ {
+ pr_debug("%s()\n", __func__);
+
+- return sprintf(buffer, "0x%02x", *(unsigned int *)kp->arg);
++ return sprintf(buffer, "0x%02x\n", *(unsigned int *)kp->arg);
+ }
+
+ static unsigned int channel_mask = ATI_REMOTE2_MAX_CHANNEL_MASK;
--- /dev/null
+From a7809ff90ce6c48598d3c4ab54eb599bec1e9c42 Mon Sep 17 00:00:00 2001
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Date: Sat, 26 Sep 2020 22:26:25 +0530
+Subject: net: qrtr: ns: Protect radix_tree_deref_slot() using rcu read locks
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+commit a7809ff90ce6c48598d3c4ab54eb599bec1e9c42 upstream.
+
+The rcu read locks are needed to avoid potential race condition while
+dereferencing radix tree from multiple threads. The issue was identified
+by syzbot. Below is the crash report:
+
+=============================
+WARNING: suspicious RCU usage
+5.7.0-syzkaller #0 Not tainted
+-----------------------------
+include/linux/radix-tree.h:176 suspicious rcu_dereference_check() usage!
+
+other info that might help us debug this:
+
+rcu_scheduler_active = 2, debug_locks = 1
+2 locks held by kworker/u4:1/21:
+ #0: ffff88821b097938 ((wq_completion)qrtr_ns_handler){+.+.}-{0:0}, at: spin_unlock_irq include/linux/spinlock.h:403 [inline]
+ #0: ffff88821b097938 ((wq_completion)qrtr_ns_handler){+.+.}-{0:0}, at: process_one_work+0x6df/0xfd0 kernel/workqueue.c:2241
+ #1: ffffc90000dd7d80 ((work_completion)(&qrtr_ns.work)){+.+.}-{0:0}, at: process_one_work+0x71e/0xfd0 kernel/workqueue.c:2243
+
+stack backtrace:
+CPU: 0 PID: 21 Comm: kworker/u4:1 Not tainted 5.7.0-syzkaller #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Workqueue: qrtr_ns_handler qrtr_ns_worker
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x1e9/0x30e lib/dump_stack.c:118
+ radix_tree_deref_slot include/linux/radix-tree.h:176 [inline]
+ ctrl_cmd_new_lookup net/qrtr/ns.c:558 [inline]
+ qrtr_ns_worker+0x2aff/0x4500 net/qrtr/ns.c:674
+ process_one_work+0x76e/0xfd0 kernel/workqueue.c:2268
+ worker_thread+0xa7f/0x1450 kernel/workqueue.c:2414
+ kthread+0x353/0x380 kernel/kthread.c:268
+
+Fixes: 0c2204a4ad71 ("net: qrtr: Migrate nameservice to kernel from userspace")
+Reported-and-tested-by: syzbot+0f84f6eed90503da72fc@syzkaller.appspotmail.com
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/qrtr/ns.c | 34 +++++++++++++++++++++++++---------
+ 1 file changed, 25 insertions(+), 9 deletions(-)
+
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -193,12 +193,13 @@ static int announce_servers(struct socka
+ struct qrtr_server *srv;
+ struct qrtr_node *node;
+ void __rcu **slot;
+- int ret;
++ int ret = 0;
+
+ node = node_get(qrtr_ns.local_node);
+ if (!node)
+ return 0;
+
++ rcu_read_lock();
+ /* Announce the list of servers registered in this node */
+ radix_tree_for_each_slot(slot, &node->servers, &iter, 0) {
+ srv = radix_tree_deref_slot(slot);
+@@ -206,11 +207,14 @@ static int announce_servers(struct socka
+ ret = service_announce_new(sq, srv);
+ if (ret < 0) {
+ pr_err("failed to announce new service\n");
+- return ret;
++ goto err_out;
+ }
+ }
+
+- return 0;
++err_out:
++ rcu_read_unlock();
++
++ return ret;
+ }
+
+ static struct qrtr_server *server_add(unsigned int service,
+@@ -335,7 +339,7 @@ static int ctrl_cmd_bye(struct sockaddr_
+ struct qrtr_node *node;
+ void __rcu **slot;
+ struct kvec iv;
+- int ret;
++ int ret = 0;
+
+ iv.iov_base = &pkt;
+ iv.iov_len = sizeof(pkt);
+@@ -344,11 +348,13 @@ static int ctrl_cmd_bye(struct sockaddr_
+ if (!node)
+ return 0;
+
++ rcu_read_lock();
+ /* Advertise removal of this client to all servers of remote node */
+ radix_tree_for_each_slot(slot, &node->servers, &iter, 0) {
+ srv = radix_tree_deref_slot(slot);
+ server_del(node, srv->port);
+ }
++ rcu_read_unlock();
+
+ /* Advertise the removal of this client to all local servers */
+ local_node = node_get(qrtr_ns.local_node);
+@@ -359,6 +365,7 @@ static int ctrl_cmd_bye(struct sockaddr_
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_BYE);
+ pkt.client.node = cpu_to_le32(from->sq_node);
+
++ rcu_read_lock();
+ radix_tree_for_each_slot(slot, &local_node->servers, &iter, 0) {
+ srv = radix_tree_deref_slot(slot);
+
+@@ -372,11 +379,14 @@ static int ctrl_cmd_bye(struct sockaddr_
+ ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0) {
+ pr_err("failed to send bye cmd\n");
+- return ret;
++ goto err_out;
+ }
+ }
+
+- return 0;
++err_out:
++ rcu_read_unlock();
++
++ return ret;
+ }
+
+ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
+@@ -394,7 +404,7 @@ static int ctrl_cmd_del_client(struct so
+ struct list_head *li;
+ void __rcu **slot;
+ struct kvec iv;
+- int ret;
++ int ret = 0;
+
+ iv.iov_base = &pkt;
+ iv.iov_len = sizeof(pkt);
+@@ -434,6 +444,7 @@ static int ctrl_cmd_del_client(struct so
+ pkt.client.node = cpu_to_le32(node_id);
+ pkt.client.port = cpu_to_le32(port);
+
++ rcu_read_lock();
+ radix_tree_for_each_slot(slot, &local_node->servers, &iter, 0) {
+ srv = radix_tree_deref_slot(slot);
+
+@@ -447,11 +458,14 @@ static int ctrl_cmd_del_client(struct so
+ ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0) {
+ pr_err("failed to send del client cmd\n");
+- return ret;
++ goto err_out;
+ }
+ }
+
+- return 0;
++err_out:
++ rcu_read_unlock();
++
++ return ret;
+ }
+
+ static int ctrl_cmd_new_server(struct sockaddr_qrtr *from,
+@@ -554,6 +568,7 @@ static int ctrl_cmd_new_lookup(struct so
+ filter.service = service;
+ filter.instance = instance;
+
++ rcu_read_lock();
+ radix_tree_for_each_slot(node_slot, &nodes, &node_iter, 0) {
+ node = radix_tree_deref_slot(node_slot);
+
+@@ -568,6 +583,7 @@ static int ctrl_cmd_new_lookup(struct so
+ lookup_notify(from, srv, true);
+ }
+ }
++ rcu_read_unlock();
+
+ /* Empty notification, to indicate end of listing */
+ lookup_notify(from, NULL, true);
--- /dev/null
+From f45a4248ea4cc13ed50618ff066849f9587226b2 Mon Sep 17 00:00:00 2001
+From: Anant Thazhemadam <anant.thazhemadam@gmail.com>
+Date: Mon, 5 Oct 2020 18:59:58 +0530
+Subject: net: usb: rtl8150: set random MAC address when set_ethernet_addr() fails
+
+From: Anant Thazhemadam <anant.thazhemadam@gmail.com>
+
+commit f45a4248ea4cc13ed50618ff066849f9587226b2 upstream.
+
+When get_registers() fails in set_ethernet_addr(),the uninitialized
+value of node_id gets copied over as the address.
+So, check the return value of get_registers().
+
+If get_registers() executed successfully (i.e., it returns
+sizeof(node_id)), copy over the MAC address using ether_addr_copy()
+(instead of using memcpy()).
+
+Else, if get_registers() failed instead, a randomly generated MAC
+address is set as the MAC address instead.
+
+Reported-by: syzbot+abbc768b560c84d92fd3@syzkaller.appspotmail.com
+Tested-by: syzbot+abbc768b560c84d92fd3@syzkaller.appspotmail.com
+Acked-by: Petko Manolov <petkan@nucleusys.com>
+Signed-off-by: Anant Thazhemadam <anant.thazhemadam@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/usb/rtl8150.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/usb/rtl8150.c
++++ b/drivers/net/usb/rtl8150.c
+@@ -274,12 +274,20 @@ static int write_mii_word(rtl8150_t * de
+ return 1;
+ }
+
+-static inline void set_ethernet_addr(rtl8150_t * dev)
++static void set_ethernet_addr(rtl8150_t *dev)
+ {
+- u8 node_id[6];
++ u8 node_id[ETH_ALEN];
++ int ret;
+
+- get_registers(dev, IDR, sizeof(node_id), node_id);
+- memcpy(dev->netdev->dev_addr, node_id, sizeof(node_id));
++ ret = get_registers(dev, IDR, sizeof(node_id), node_id);
++
++ if (ret == sizeof(node_id)) {
++ ether_addr_copy(dev->netdev->dev_addr, node_id);
++ } else {
++ eth_hw_addr_random(dev->netdev);
++ netdev_notice(dev->netdev, "Assigned a random MAC address: %pM\n",
++ dev->netdev->dev_addr);
++ }
+ }
+
+ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
--- /dev/null
+From 0fedc63fadf0404a729e73a35349481c8009c02f Mon Sep 17 00:00:00 2001
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Tue, 22 Sep 2020 20:56:24 -0700
+Subject: net_sched: commit action insertions together
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+commit 0fedc63fadf0404a729e73a35349481c8009c02f upstream.
+
+syzbot is able to trigger a failure case inside the loop in
+tcf_action_init(), and when this happens we clean up with
+tcf_action_destroy(). But, as these actions are already inserted
+into the global IDR, other parallel process could free them
+before tcf_action_destroy(), then we will trigger a use-after-free.
+
+Fix this by deferring the insertions even later, after the loop,
+and committing all the insertions in a separate loop, so we will
+never fail in the middle of the insertions any more.
+
+One side effect is that the window between alloction and final
+insertion becomes larger, now it is more likely that the loop in
+tcf_del_walker() sees the placeholder -EBUSY pointer. So we have
+to check for error pointer in tcf_del_walker().
+
+Reported-and-tested-by: syzbot+2287853d392e4b42374a@syzkaller.appspotmail.com
+Fixes: 0190c1d452a9 ("net: sched: atomically check-allocate action")
+Cc: Vlad Buslov <vladbu@mellanox.com>
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sched/act_api.c | 32 +++++++++++++++++++++++---------
+ 1 file changed, 23 insertions(+), 9 deletions(-)
+
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -307,6 +307,8 @@ static int tcf_del_walker(struct tcf_idr
+
+ mutex_lock(&idrinfo->lock);
+ idr_for_each_entry_ul(idr, p, tmp, id) {
++ if (IS_ERR(p))
++ continue;
+ ret = tcf_idr_release_unsafe(p);
+ if (ret == ACT_P_DELETED) {
+ module_put(ops->owner);
+@@ -891,14 +893,24 @@ static const struct nla_policy tcf_actio
+ [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
+ };
+
+-static void tcf_idr_insert(struct tc_action *a)
++static void tcf_idr_insert_many(struct tc_action *actions[])
+ {
+- struct tcf_idrinfo *idrinfo = a->idrinfo;
++ int i;
+
+- mutex_lock(&idrinfo->lock);
+- /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
+- WARN_ON(!IS_ERR(idr_replace(&idrinfo->action_idr, a, a->tcfa_index)));
+- mutex_unlock(&idrinfo->lock);
++ for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
++ struct tc_action *a = actions[i];
++ struct tcf_idrinfo *idrinfo;
++
++ if (!a)
++ continue;
++ idrinfo = a->idrinfo;
++ mutex_lock(&idrinfo->lock);
++ /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
++ * it is just created, otherwise this is just a nop.
++ */
++ idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
++ mutex_unlock(&idrinfo->lock);
++ }
+ }
+
+ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+@@ -995,9 +1007,6 @@ struct tc_action *tcf_action_init_1(stru
+ return ERR_PTR(-EINVAL);
+ }
+
+- if (err == ACT_P_CREATED)
+- tcf_idr_insert(a);
+-
+ if (!name && tb[TCA_ACT_COOKIE])
+ tcf_set_action_cookie(&a->act_cookie, cookie);
+
+@@ -1053,6 +1062,11 @@ int tcf_action_init(struct net *net, str
+ actions[i - 1] = act;
+ }
+
++ /* We have to commit them all together, because if any error happened in
++ * between, we could not handle the failure gracefully.
++ */
++ tcf_idr_insert_many(actions);
++
+ *attr_size = tcf_action_full_attrs_size(sz);
+ return i - 1;
+
--- /dev/null
+From e49d8c22f1261c43a986a7fdbf677ac309682a07 Mon Sep 17 00:00:00 2001
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Tue, 22 Sep 2020 20:56:23 -0700
+Subject: net_sched: defer tcf_idr_insert() in tcf_action_init_1()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+commit e49d8c22f1261c43a986a7fdbf677ac309682a07 upstream.
+
+All TC actions call tcf_idr_insert() for new action at the end
+of their ->init(), so we can actually move it to a central place
+in tcf_action_init_1().
+
+And once the action is inserted into the global IDR, other parallel
+process could free it immediately as its refcnt is still 1, so we can
+not fail after this, we need to move it after the goto action
+validation to avoid handling the failure case after insertion.
+
+This is found during code review, is not directly triggered by syzbot.
+And this prepares for the next patch.
+
+Cc: Vlad Buslov <vladbu@mellanox.com>
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/net/act_api.h | 2 --
+ net/sched/act_api.c | 38 ++++++++++++++++++++------------------
+ net/sched/act_bpf.c | 4 +---
+ net/sched/act_connmark.c | 1 -
+ net/sched/act_csum.c | 3 ---
+ net/sched/act_ct.c | 2 --
+ net/sched/act_ctinfo.c | 3 ---
+ net/sched/act_gact.c | 2 --
+ net/sched/act_gate.c | 3 ---
+ net/sched/act_ife.c | 3 ---
+ net/sched/act_ipt.c | 2 --
+ net/sched/act_mirred.c | 2 --
+ net/sched/act_mpls.c | 2 --
+ net/sched/act_nat.c | 3 ---
+ net/sched/act_pedit.c | 2 --
+ net/sched/act_police.c | 2 --
+ net/sched/act_sample.c | 2 --
+ net/sched/act_simple.c | 2 --
+ net/sched/act_skbedit.c | 2 --
+ net/sched/act_skbmod.c | 2 --
+ net/sched/act_tunnel_key.c | 3 ---
+ net/sched/act_vlan.c | 2 --
+ 22 files changed, 21 insertions(+), 66 deletions(-)
+
+--- a/include/net/act_api.h
++++ b/include/net/act_api.h
+@@ -166,8 +166,6 @@ int tcf_idr_create_from_flags(struct tc_
+ struct nlattr *est, struct tc_action **a,
+ const struct tc_action_ops *ops, int bind,
+ u32 flags);
+-void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
+-
+ void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
+ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+ struct tc_action **a, int bind);
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -467,17 +467,6 @@ int tcf_idr_create_from_flags(struct tc_
+ }
+ EXPORT_SYMBOL(tcf_idr_create_from_flags);
+
+-void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
+-{
+- struct tcf_idrinfo *idrinfo = tn->idrinfo;
+-
+- mutex_lock(&idrinfo->lock);
+- /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
+- WARN_ON(!IS_ERR(idr_replace(&idrinfo->action_idr, a, a->tcfa_index)));
+- mutex_unlock(&idrinfo->lock);
+-}
+-EXPORT_SYMBOL(tcf_idr_insert);
+-
+ /* Cleanup idr index that was allocated but not initialized. */
+
+ void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
+@@ -902,6 +891,16 @@ static const struct nla_policy tcf_actio
+ [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
+ };
+
++static void tcf_idr_insert(struct tc_action *a)
++{
++ struct tcf_idrinfo *idrinfo = a->idrinfo;
++
++ mutex_lock(&idrinfo->lock);
++ /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
++ WARN_ON(!IS_ERR(idr_replace(&idrinfo->action_idr, a, a->tcfa_index)));
++ mutex_unlock(&idrinfo->lock);
++}
++
+ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ struct nlattr *nla, struct nlattr *est,
+ char *name, int ovr, int bind,
+@@ -989,6 +988,16 @@ struct tc_action *tcf_action_init_1(stru
+ if (err < 0)
+ goto err_mod;
+
++ if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) &&
++ !rcu_access_pointer(a->goto_chain)) {
++ tcf_action_destroy_1(a, bind);
++ NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain");
++ return ERR_PTR(-EINVAL);
++ }
++
++ if (err == ACT_P_CREATED)
++ tcf_idr_insert(a);
++
+ if (!name && tb[TCA_ACT_COOKIE])
+ tcf_set_action_cookie(&a->act_cookie, cookie);
+
+@@ -1002,13 +1011,6 @@ struct tc_action *tcf_action_init_1(stru
+ if (err != ACT_P_CREATED)
+ module_put(a_o->owner);
+
+- if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) &&
+- !rcu_access_pointer(a->goto_chain)) {
+- tcf_action_destroy_1(a, bind);
+- NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain");
+- return ERR_PTR(-EINVAL);
+- }
+-
+ return a;
+
+ err_mod:
+--- a/net/sched/act_bpf.c
++++ b/net/sched/act_bpf.c
+@@ -365,9 +365,7 @@ static int tcf_bpf_init(struct net *net,
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
+- if (res == ACT_P_CREATED) {
+- tcf_idr_insert(tn, *act);
+- } else {
++ if (res != ACT_P_CREATED) {
+ /* make sure the program being replaced is no longer executing */
+ synchronize_rcu();
+ tcf_bpf_cfg_cleanup(&old);
+--- a/net/sched/act_connmark.c
++++ b/net/sched/act_connmark.c
+@@ -139,7 +139,6 @@ static int tcf_connmark_init(struct net
+ ci->net = net;
+ ci->zone = parm->zone;
+
+- tcf_idr_insert(tn, *a);
+ ret = ACT_P_CREATED;
+ } else if (ret > 0) {
+ ci = to_connmark(*a);
+--- a/net/sched/act_csum.c
++++ b/net/sched/act_csum.c
+@@ -110,9 +110,6 @@ static int tcf_csum_init(struct net *net
+ if (params_new)
+ kfree_rcu(params_new, rcu);
+
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+-
+ return ret;
+ put_chain:
+ if (goto_ch)
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -1293,8 +1293,6 @@ static int tcf_ct_init(struct net *net,
+ tcf_chain_put_by_act(goto_ch);
+ if (params)
+ call_rcu(¶ms->rcu, tcf_ct_params_free);
+- if (res == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+
+ return res;
+
+--- a/net/sched/act_ctinfo.c
++++ b/net/sched/act_ctinfo.c
+@@ -269,9 +269,6 @@ static int tcf_ctinfo_init(struct net *n
+ if (cp_new)
+ kfree_rcu(cp_new, rcu);
+
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+-
+ return ret;
+
+ put_chain:
+--- a/net/sched/act_gact.c
++++ b/net/sched/act_gact.c
+@@ -140,8 +140,6 @@ static int tcf_gact_init(struct net *net
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+ return ret;
+ release_idr:
+ tcf_idr_release(*a, bind);
+--- a/net/sched/act_gate.c
++++ b/net/sched/act_gate.c
+@@ -437,9 +437,6 @@ static int tcf_gate_init(struct net *net
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+-
+ return ret;
+
+ chain_put:
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -627,9 +627,6 @@ static int tcf_ife_init(struct net *net,
+ if (p)
+ kfree_rcu(p, rcu);
+
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+-
+ return ret;
+ metadata_parse_err:
+ if (goto_ch)
+--- a/net/sched/act_ipt.c
++++ b/net/sched/act_ipt.c
+@@ -189,8 +189,6 @@ static int __tcf_ipt_init(struct net *ne
+ ipt->tcfi_t = t;
+ ipt->tcfi_hook = hook;
+ spin_unlock_bh(&ipt->tcf_lock);
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+ return ret;
+
+ err3:
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -194,8 +194,6 @@ static int tcf_mirred_init(struct net *n
+ spin_lock(&mirred_list_lock);
+ list_add(&m->tcfm_list, &mirred_list);
+ spin_unlock(&mirred_list_lock);
+-
+- tcf_idr_insert(tn, *a);
+ }
+
+ return ret;
+--- a/net/sched/act_mpls.c
++++ b/net/sched/act_mpls.c
+@@ -273,8 +273,6 @@ static int tcf_mpls_init(struct net *net
+ if (p)
+ kfree_rcu(p, rcu);
+
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+ return ret;
+ put_chain:
+ if (goto_ch)
+--- a/net/sched/act_nat.c
++++ b/net/sched/act_nat.c
+@@ -93,9 +93,6 @@ static int tcf_nat_init(struct net *net,
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+-
+ return ret;
+ release_idr:
+ tcf_idr_release(*a, bind);
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -238,8 +238,6 @@ static int tcf_pedit_init(struct net *ne
+ spin_unlock_bh(&p->tcf_lock);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+ return ret;
+
+ put_chain:
+--- a/net/sched/act_police.c
++++ b/net/sched/act_police.c
+@@ -201,8 +201,6 @@ static int tcf_police_init(struct net *n
+ if (new)
+ kfree_rcu(new, rcu);
+
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+ return ret;
+
+ failure:
+--- a/net/sched/act_sample.c
++++ b/net/sched/act_sample.c
+@@ -116,8 +116,6 @@ static int tcf_sample_init(struct net *n
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+ return ret;
+ put_chain:
+ if (goto_ch)
+--- a/net/sched/act_simple.c
++++ b/net/sched/act_simple.c
+@@ -157,8 +157,6 @@ static int tcf_simp_init(struct net *net
+ goto release_idr;
+ }
+
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+ return ret;
+ put_chain:
+ if (goto_ch)
+--- a/net/sched/act_skbedit.c
++++ b/net/sched/act_skbedit.c
+@@ -224,8 +224,6 @@ static int tcf_skbedit_init(struct net *
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+ return ret;
+ put_chain:
+ if (goto_ch)
+--- a/net/sched/act_skbmod.c
++++ b/net/sched/act_skbmod.c
+@@ -190,8 +190,6 @@ static int tcf_skbmod_init(struct net *n
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+ return ret;
+ put_chain:
+ if (goto_ch)
+--- a/net/sched/act_tunnel_key.c
++++ b/net/sched/act_tunnel_key.c
+@@ -536,9 +536,6 @@ static int tunnel_key_init(struct net *n
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+-
+ return ret;
+
+ put_chain:
+--- a/net/sched/act_vlan.c
++++ b/net/sched/act_vlan.c
+@@ -229,8 +229,6 @@ static int tcf_vlan_init(struct net *net
+ if (p)
+ kfree_rcu(p, rcu);
+
+- if (ret == ACT_P_CREATED)
+- tcf_idr_insert(tn, *a);
+ return ret;
+ put_chain:
+ if (goto_ch)
net-bridge-fdb-don-t-flush-ext_learn-entries.patch
net-tls-race-causes-kernel-panic.patch
net-mlx5e-fix-driver-s-declaration-to-support-gre-offload.patch
+tty-vt-do-not-warn-when-huge-selection-requested.patch
+input-ati_remote2-add-missing-newlines-when-printing-module-parameters.patch
+net-usb-rtl8150-set-random-mac-address-when-set_ethernet_addr-fails.patch
+net-qrtr-ns-protect-radix_tree_deref_slot-using-rcu-read-locks.patch
+net_sched-defer-tcf_idr_insert-in-tcf_action_init_1.patch
+net_sched-commit-action-insertions-together.patch
--- /dev/null
+From 44c413d9a51752056d606bf6f312003ac1740fab Mon Sep 17 00:00:00 2001
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+Date: Wed, 17 Jun 2020 17:04:44 +1000
+Subject: tty/vt: Do not warn when huge selection requested
+
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+
+commit 44c413d9a51752056d606bf6f312003ac1740fab upstream.
+
+The tty TIOCL_SETSEL ioctl allocates a memory buffer big enough for text
+selection area. The maximum allowed console size is
+VC_RESIZE_MAXCOL * VC_RESIZE_MAXROW == 32767*32767 == ~1GB and typical
+MAX_ORDER is set to allow allocations lot less than than (circa 16MB).
+
+So it is quite possible to trigger huge allocation (and syzkaller just
+did that) which is going to fail (which is fine) with a backtrace in
+mm/page_alloc.c at WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)) and
+this may trigger panic (if panic_on_warn is enabled) and
+leak kernel addresses to dmesg.
+
+This passes __GFP_NOWARN to kmalloc_array to avoid unnecessary user-
+triggered WARN_ON. Note that the error is not ignored and
+the warning is still printed.
+
+Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Link: https://lore.kernel.org/r/20200617070444.116704-1-aik@ozlabs.ru
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/vt/selection.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/tty/vt/selection.c
++++ b/drivers/tty/vt/selection.c
+@@ -193,7 +193,7 @@ static int vc_selection_store_chars(stru
+ /* Allocate a new buffer before freeing the old one ... */
+ /* chars can take up to 4 bytes with unicode */
+ bp = kmalloc_array((vc_sel.end - vc_sel.start) / 2 + 1, unicode ? 4 : 1,
+- GFP_KERNEL);
++ GFP_KERNEL | __GFP_NOWARN);
+ if (!bp) {
+ printk(KERN_WARNING "selection: kmalloc() failed\n");
+ clear_selection();