]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
c2302a0ac46b08b671687a727e97af40231022a5
[thirdparty/kernel/stable-queue.git] /
1 From foo@baz Sat Jan 17 18:13:02 PST 2015
2 From: Jiri Pirko <jiri@resnulli.us>
3 Date: Wed, 14 Jan 2015 18:15:30 +0100
4 Subject: team: avoid possible underflow of count_pending value for notify_peers and mcast_rejoin
5
6 From: Jiri Pirko <jiri@resnulli.us>
7
8 [ Upstream commit b0d11b42785b70e19bc6a3122eead3f7969a7589 ]
9
10 This patch is fixing a race condition that may cause setting
11 count_pending to -1, which results in unwanted big bulk of arp messages
12 (in case of "notify peers").
13
14 Consider following scenario:
15
16 count_pending == 2
17 CPU0 CPU1
18 team_notify_peers_work
19 atomic_dec_and_test (dec count_pending to 1)
20 schedule_delayed_work
21 team_notify_peers
22 atomic_add (adding 1 to count_pending)
23 team_notify_peers_work
24 atomic_dec_and_test (dec count_pending to 1)
25 schedule_delayed_work
26 team_notify_peers_work
27 atomic_dec_and_test (dec count_pending to 0)
28 schedule_delayed_work
29 team_notify_peers_work
30 atomic_dec_and_test (dec count_pending to -1)
31
32 Fix this race by using atomic_dec_if_positive - that will prevent
33 count_pending running under 0.
34
35 Fixes: fc423ff00df3a1955441 ("team: add peer notification")
36 Fixes: 492b200efdd20b8fcfd ("team: add support for sending multicast rejoins")
37 Signed-off-by: Jiri Pirko <jiri@resnulli.us>
38 Signed-off-by: Jiri Benc <jbenc@redhat.com>
39 Signed-off-by: David S. Miller <davem@davemloft.net>
40 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
41 ---
42 drivers/net/team/team.c | 16 ++++++++++++++--
43 1 file changed, 14 insertions(+), 2 deletions(-)
44
45 --- a/drivers/net/team/team.c
46 +++ b/drivers/net/team/team.c
47 @@ -629,6 +629,7 @@ static int team_change_mode(struct team
48 static void team_notify_peers_work(struct work_struct *work)
49 {
50 struct team *team;
51 + int val;
52
53 team = container_of(work, struct team, notify_peers.dw.work);
54
55 @@ -636,9 +637,14 @@ static void team_notify_peers_work(struc
56 schedule_delayed_work(&team->notify_peers.dw, 0);
57 return;
58 }
59 + val = atomic_dec_if_positive(&team->notify_peers.count_pending);
60 + if (val < 0) {
61 + rtnl_unlock();
62 + return;
63 + }
64 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
65 rtnl_unlock();
66 - if (!atomic_dec_and_test(&team->notify_peers.count_pending))
67 + if (val)
68 schedule_delayed_work(&team->notify_peers.dw,
69 msecs_to_jiffies(team->notify_peers.interval));
70 }
71 @@ -669,6 +675,7 @@ static void team_notify_peers_fini(struc
72 static void team_mcast_rejoin_work(struct work_struct *work)
73 {
74 struct team *team;
75 + int val;
76
77 team = container_of(work, struct team, mcast_rejoin.dw.work);
78
79 @@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struc
80 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
81 return;
82 }
83 + val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
84 + if (val < 0) {
85 + rtnl_unlock();
86 + return;
87 + }
88 call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
89 rtnl_unlock();
90 - if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
91 + if (val)
92 schedule_delayed_work(&team->mcast_rejoin.dw,
93 msecs_to_jiffies(team->mcast_rejoin.interval));
94 }