]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - net/bridge/br_multicast.c
073d54afa0568396b9a4f929de54553cffc7016e
[thirdparty/kernel/stable.git] / net / bridge / br_multicast.c
1 /*
2 * Bridge multicast support.
3 *
4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13 #include <linux/err.h>
14 #include <linux/export.h>
15 #include <linux/if_ether.h>
16 #include <linux/igmp.h>
17 #include <linux/jhash.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
20 #include <linux/netdevice.h>
21 #include <linux/netfilter_bridge.h>
22 #include <linux/random.h>
23 #include <linux/rculist.h>
24 #include <linux/skbuff.h>
25 #include <linux/slab.h>
26 #include <linux/timer.h>
27 #include <linux/inetdevice.h>
28 #include <linux/mroute.h>
29 #include <net/ip.h>
30 #if IS_ENABLED(CONFIG_IPV6)
31 #include <net/ipv6.h>
32 #include <net/mld.h>
33 #include <net/ip6_checksum.h>
34 #include <net/addrconf.h>
35 #endif
36
37 #include "br_private.h"
38
39 static void br_multicast_start_querier(struct net_bridge *br,
40 struct bridge_mcast_own_query *query);
41 static void br_multicast_add_router(struct net_bridge *br,
42 struct net_bridge_port *port);
43 static void br_ip4_multicast_leave_group(struct net_bridge *br,
44 struct net_bridge_port *port,
45 __be32 group,
46 __u16 vid);
47 #if IS_ENABLED(CONFIG_IPV6)
48 static void br_ip6_multicast_leave_group(struct net_bridge *br,
49 struct net_bridge_port *port,
50 const struct in6_addr *group,
51 __u16 vid);
52 #endif
53 unsigned int br_mdb_rehash_seq;
54
55 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
56 {
57 if (a->proto != b->proto)
58 return 0;
59 if (a->vid != b->vid)
60 return 0;
61 switch (a->proto) {
62 case htons(ETH_P_IP):
63 return a->u.ip4 == b->u.ip4;
64 #if IS_ENABLED(CONFIG_IPV6)
65 case htons(ETH_P_IPV6):
66 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
67 #endif
68 }
69 return 0;
70 }
71
72 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip,
73 __u16 vid)
74 {
75 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1);
76 }
77
78 #if IS_ENABLED(CONFIG_IPV6)
79 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
80 const struct in6_addr *ip,
81 __u16 vid)
82 {
83 return jhash_2words(ipv6_addr_hash(ip), vid,
84 mdb->secret) & (mdb->max - 1);
85 }
86 #endif
87
88 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
89 struct br_ip *ip)
90 {
91 switch (ip->proto) {
92 case htons(ETH_P_IP):
93 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid);
94 #if IS_ENABLED(CONFIG_IPV6)
95 case htons(ETH_P_IPV6):
96 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid);
97 #endif
98 }
99 return 0;
100 }
101
102 static struct net_bridge_mdb_entry *__br_mdb_ip_get(
103 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
104 {
105 struct net_bridge_mdb_entry *mp;
106
107 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
108 if (br_ip_equal(&mp->addr, dst))
109 return mp;
110 }
111
112 return NULL;
113 }
114
115 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
116 struct br_ip *dst)
117 {
118 if (!mdb)
119 return NULL;
120
121 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
122 }
123
124 static struct net_bridge_mdb_entry *br_mdb_ip4_get(
125 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid)
126 {
127 struct br_ip br_dst;
128
129 br_dst.u.ip4 = dst;
130 br_dst.proto = htons(ETH_P_IP);
131 br_dst.vid = vid;
132
133 return br_mdb_ip_get(mdb, &br_dst);
134 }
135
136 #if IS_ENABLED(CONFIG_IPV6)
137 static struct net_bridge_mdb_entry *br_mdb_ip6_get(
138 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst,
139 __u16 vid)
140 {
141 struct br_ip br_dst;
142
143 br_dst.u.ip6 = *dst;
144 br_dst.proto = htons(ETH_P_IPV6);
145 br_dst.vid = vid;
146
147 return br_mdb_ip_get(mdb, &br_dst);
148 }
149 #endif
150
151 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
152 struct sk_buff *skb, u16 vid)
153 {
154 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
155 struct br_ip ip;
156
157 if (br->multicast_disabled)
158 return NULL;
159
160 if (BR_INPUT_SKB_CB(skb)->igmp)
161 return NULL;
162
163 ip.proto = skb->protocol;
164 ip.vid = vid;
165
166 switch (skb->protocol) {
167 case htons(ETH_P_IP):
168 ip.u.ip4 = ip_hdr(skb)->daddr;
169 break;
170 #if IS_ENABLED(CONFIG_IPV6)
171 case htons(ETH_P_IPV6):
172 ip.u.ip6 = ipv6_hdr(skb)->daddr;
173 break;
174 #endif
175 default:
176 return NULL;
177 }
178
179 return br_mdb_ip_get(mdb, &ip);
180 }
181
182 static void br_mdb_free(struct rcu_head *head)
183 {
184 struct net_bridge_mdb_htable *mdb =
185 container_of(head, struct net_bridge_mdb_htable, rcu);
186 struct net_bridge_mdb_htable *old = mdb->old;
187
188 mdb->old = NULL;
189 kfree(old->mhash);
190 kfree(old);
191 }
192
193 static int br_mdb_copy(struct net_bridge_mdb_htable *new,
194 struct net_bridge_mdb_htable *old,
195 int elasticity)
196 {
197 struct net_bridge_mdb_entry *mp;
198 int maxlen;
199 int len;
200 int i;
201
202 for (i = 0; i < old->max; i++)
203 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
204 hlist_add_head(&mp->hlist[new->ver],
205 &new->mhash[br_ip_hash(new, &mp->addr)]);
206
207 if (!elasticity)
208 return 0;
209
210 maxlen = 0;
211 for (i = 0; i < new->max; i++) {
212 len = 0;
213 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
214 len++;
215 if (len > maxlen)
216 maxlen = len;
217 }
218
219 return maxlen > elasticity ? -EINVAL : 0;
220 }
221
222 void br_multicast_free_pg(struct rcu_head *head)
223 {
224 struct net_bridge_port_group *p =
225 container_of(head, struct net_bridge_port_group, rcu);
226
227 kfree(p);
228 }
229
230 static void br_multicast_free_group(struct rcu_head *head)
231 {
232 struct net_bridge_mdb_entry *mp =
233 container_of(head, struct net_bridge_mdb_entry, rcu);
234
235 kfree(mp);
236 }
237
238 static void br_multicast_group_expired(unsigned long data)
239 {
240 struct net_bridge_mdb_entry *mp = (void *)data;
241 struct net_bridge *br = mp->br;
242 struct net_bridge_mdb_htable *mdb;
243
244 spin_lock(&br->multicast_lock);
245 if (!netif_running(br->dev) || timer_pending(&mp->timer))
246 goto out;
247
248 mp->mglist = false;
249
250 if (mp->ports)
251 goto out;
252
253 mdb = mlock_dereference(br->mdb, br);
254
255 hlist_del_rcu(&mp->hlist[mdb->ver]);
256 mdb->size--;
257
258 call_rcu_bh(&mp->rcu, br_multicast_free_group);
259
260 out:
261 spin_unlock(&br->multicast_lock);
262 }
263
264 static void br_multicast_del_pg(struct net_bridge *br,
265 struct net_bridge_port_group *pg)
266 {
267 struct net_bridge_mdb_htable *mdb;
268 struct net_bridge_mdb_entry *mp;
269 struct net_bridge_port_group *p;
270 struct net_bridge_port_group __rcu **pp;
271
272 mdb = mlock_dereference(br->mdb, br);
273
274 mp = br_mdb_ip_get(mdb, &pg->addr);
275 if (WARN_ON(!mp))
276 return;
277
278 for (pp = &mp->ports;
279 (p = mlock_dereference(*pp, br)) != NULL;
280 pp = &p->next) {
281 if (p != pg)
282 continue;
283
284 rcu_assign_pointer(*pp, p->next);
285 hlist_del_init(&p->mglist);
286 del_timer(&p->timer);
287 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
288 p->flags);
289 call_rcu_bh(&p->rcu, br_multicast_free_pg);
290
291 if (!mp->ports && !mp->mglist &&
292 netif_running(br->dev))
293 mod_timer(&mp->timer, jiffies);
294
295 return;
296 }
297
298 WARN_ON(1);
299 }
300
301 static void br_multicast_port_group_expired(unsigned long data)
302 {
303 struct net_bridge_port_group *pg = (void *)data;
304 struct net_bridge *br = pg->port->br;
305
306 spin_lock(&br->multicast_lock);
307 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
308 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
309 goto out;
310
311 br_multicast_del_pg(br, pg);
312
313 out:
314 spin_unlock(&br->multicast_lock);
315 }
316
317 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
318 int elasticity)
319 {
320 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
321 struct net_bridge_mdb_htable *mdb;
322 int err;
323
324 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
325 if (!mdb)
326 return -ENOMEM;
327
328 mdb->max = max;
329 mdb->old = old;
330
331 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
332 if (!mdb->mhash) {
333 kfree(mdb);
334 return -ENOMEM;
335 }
336
337 mdb->size = old ? old->size : 0;
338 mdb->ver = old ? old->ver ^ 1 : 0;
339
340 if (!old || elasticity)
341 get_random_bytes(&mdb->secret, sizeof(mdb->secret));
342 else
343 mdb->secret = old->secret;
344
345 if (!old)
346 goto out;
347
348 err = br_mdb_copy(mdb, old, elasticity);
349 if (err) {
350 kfree(mdb->mhash);
351 kfree(mdb);
352 return err;
353 }
354
355 br_mdb_rehash_seq++;
356 call_rcu_bh(&mdb->rcu, br_mdb_free);
357
358 out:
359 rcu_assign_pointer(*mdbp, mdb);
360
361 return 0;
362 }
363
364 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
365 __be32 group,
366 u8 *igmp_type)
367 {
368 struct sk_buff *skb;
369 struct igmphdr *ih;
370 struct ethhdr *eth;
371 struct iphdr *iph;
372
373 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
374 sizeof(*ih) + 4);
375 if (!skb)
376 goto out;
377
378 skb->protocol = htons(ETH_P_IP);
379
380 skb_reset_mac_header(skb);
381 eth = eth_hdr(skb);
382
383 ether_addr_copy(eth->h_source, br->dev->dev_addr);
384 eth->h_dest[0] = 1;
385 eth->h_dest[1] = 0;
386 eth->h_dest[2] = 0x5e;
387 eth->h_dest[3] = 0;
388 eth->h_dest[4] = 0;
389 eth->h_dest[5] = 1;
390 eth->h_proto = htons(ETH_P_IP);
391 skb_put(skb, sizeof(*eth));
392
393 skb_set_network_header(skb, skb->len);
394 iph = ip_hdr(skb);
395
396 iph->version = 4;
397 iph->ihl = 6;
398 iph->tos = 0xc0;
399 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4);
400 iph->id = 0;
401 iph->frag_off = htons(IP_DF);
402 iph->ttl = 1;
403 iph->protocol = IPPROTO_IGMP;
404 iph->saddr = br->multicast_query_use_ifaddr ?
405 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
406 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
407 ((u8 *)&iph[1])[0] = IPOPT_RA;
408 ((u8 *)&iph[1])[1] = 4;
409 ((u8 *)&iph[1])[2] = 0;
410 ((u8 *)&iph[1])[3] = 0;
411 ip_send_check(iph);
412 skb_put(skb, 24);
413
414 skb_set_transport_header(skb, skb->len);
415 ih = igmp_hdr(skb);
416 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
417 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
418 ih->code = (group ? br->multicast_last_member_interval :
419 br->multicast_query_response_interval) /
420 (HZ / IGMP_TIMER_SCALE);
421 ih->group = group;
422 ih->csum = 0;
423 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
424 skb_put(skb, sizeof(*ih));
425
426 __skb_pull(skb, sizeof(*eth));
427
428 out:
429 return skb;
430 }
431
432 #if IS_ENABLED(CONFIG_IPV6)
433 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
434 const struct in6_addr *grp,
435 u8 *igmp_type)
436 {
437 struct sk_buff *skb;
438 struct ipv6hdr *ip6h;
439 struct mld_msg *mldq;
440 struct ethhdr *eth;
441 u8 *hopopt;
442 unsigned long interval;
443
444 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
445 8 + sizeof(*mldq));
446 if (!skb)
447 goto out;
448
449 skb->protocol = htons(ETH_P_IPV6);
450
451 /* Ethernet header */
452 skb_reset_mac_header(skb);
453 eth = eth_hdr(skb);
454
455 ether_addr_copy(eth->h_source, br->dev->dev_addr);
456 eth->h_proto = htons(ETH_P_IPV6);
457 skb_put(skb, sizeof(*eth));
458
459 /* IPv6 header + HbH option */
460 skb_set_network_header(skb, skb->len);
461 ip6h = ipv6_hdr(skb);
462
463 *(__force __be32 *)ip6h = htonl(0x60000000);
464 ip6h->payload_len = htons(8 + sizeof(*mldq));
465 ip6h->nexthdr = IPPROTO_HOPOPTS;
466 ip6h->hop_limit = 1;
467 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
468 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
469 &ip6h->saddr)) {
470 kfree_skb(skb);
471 br->has_ipv6_addr = 0;
472 return NULL;
473 }
474
475 br->has_ipv6_addr = 1;
476 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
477
478 hopopt = (u8 *)(ip6h + 1);
479 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
480 hopopt[1] = 0; /* length of HbH */
481 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
482 hopopt[3] = 2; /* Length of RA Option */
483 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
484 hopopt[5] = 0;
485 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
486 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
487
488 skb_put(skb, sizeof(*ip6h) + 8);
489
490 /* ICMPv6 */
491 skb_set_transport_header(skb, skb->len);
492 mldq = (struct mld_msg *) icmp6_hdr(skb);
493
494 interval = ipv6_addr_any(grp) ?
495 br->multicast_query_response_interval :
496 br->multicast_last_member_interval;
497
498 *igmp_type = ICMPV6_MGM_QUERY;
499 mldq->mld_type = ICMPV6_MGM_QUERY;
500 mldq->mld_code = 0;
501 mldq->mld_cksum = 0;
502 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
503 mldq->mld_reserved = 0;
504 mldq->mld_mca = *grp;
505
506 /* checksum */
507 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
508 sizeof(*mldq), IPPROTO_ICMPV6,
509 csum_partial(mldq,
510 sizeof(*mldq), 0));
511 skb_put(skb, sizeof(*mldq));
512
513 __skb_pull(skb, sizeof(*eth));
514
515 out:
516 return skb;
517 }
518 #endif
519
520 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
521 struct br_ip *addr,
522 u8 *igmp_type)
523 {
524 switch (addr->proto) {
525 case htons(ETH_P_IP):
526 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
527 #if IS_ENABLED(CONFIG_IPV6)
528 case htons(ETH_P_IPV6):
529 return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
530 igmp_type);
531 #endif
532 }
533 return NULL;
534 }
535
536 static struct net_bridge_mdb_entry *br_multicast_get_group(
537 struct net_bridge *br, struct net_bridge_port *port,
538 struct br_ip *group, int hash)
539 {
540 struct net_bridge_mdb_htable *mdb;
541 struct net_bridge_mdb_entry *mp;
542 unsigned int count = 0;
543 unsigned int max;
544 int elasticity;
545 int err;
546
547 mdb = rcu_dereference_protected(br->mdb, 1);
548 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
549 count++;
550 if (unlikely(br_ip_equal(group, &mp->addr)))
551 return mp;
552 }
553
554 elasticity = 0;
555 max = mdb->max;
556
557 if (unlikely(count > br->hash_elasticity && count)) {
558 if (net_ratelimit())
559 br_info(br, "Multicast hash table "
560 "chain limit reached: %s\n",
561 port ? port->dev->name : br->dev->name);
562
563 elasticity = br->hash_elasticity;
564 }
565
566 if (mdb->size >= max) {
567 max *= 2;
568 if (unlikely(max > br->hash_max)) {
569 br_warn(br, "Multicast hash table maximum of %d "
570 "reached, disabling snooping: %s\n",
571 br->hash_max,
572 port ? port->dev->name : br->dev->name);
573 err = -E2BIG;
574 disable:
575 br->multicast_disabled = 1;
576 goto err;
577 }
578 }
579
580 if (max > mdb->max || elasticity) {
581 if (mdb->old) {
582 if (net_ratelimit())
583 br_info(br, "Multicast hash table "
584 "on fire: %s\n",
585 port ? port->dev->name : br->dev->name);
586 err = -EEXIST;
587 goto err;
588 }
589
590 err = br_mdb_rehash(&br->mdb, max, elasticity);
591 if (err) {
592 br_warn(br, "Cannot rehash multicast "
593 "hash table, disabling snooping: %s, %d, %d\n",
594 port ? port->dev->name : br->dev->name,
595 mdb->size, err);
596 goto disable;
597 }
598
599 err = -EAGAIN;
600 goto err;
601 }
602
603 return NULL;
604
605 err:
606 mp = ERR_PTR(err);
607 return mp;
608 }
609
610 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
611 struct net_bridge_port *port, struct br_ip *group)
612 {
613 struct net_bridge_mdb_htable *mdb;
614 struct net_bridge_mdb_entry *mp;
615 int hash;
616 int err;
617
618 mdb = rcu_dereference_protected(br->mdb, 1);
619 if (!mdb) {
620 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
621 if (err)
622 return ERR_PTR(err);
623 goto rehash;
624 }
625
626 hash = br_ip_hash(mdb, group);
627 mp = br_multicast_get_group(br, port, group, hash);
628 switch (PTR_ERR(mp)) {
629 case 0:
630 break;
631
632 case -EAGAIN:
633 rehash:
634 mdb = rcu_dereference_protected(br->mdb, 1);
635 hash = br_ip_hash(mdb, group);
636 break;
637
638 default:
639 goto out;
640 }
641
642 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
643 if (unlikely(!mp))
644 return ERR_PTR(-ENOMEM);
645
646 mp->br = br;
647 mp->addr = *group;
648 setup_timer(&mp->timer, br_multicast_group_expired,
649 (unsigned long)mp);
650
651 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
652 mdb->size++;
653
654 out:
655 return mp;
656 }
657
658 struct net_bridge_port_group *br_multicast_new_port_group(
659 struct net_bridge_port *port,
660 struct br_ip *group,
661 struct net_bridge_port_group __rcu *next,
662 unsigned char flags)
663 {
664 struct net_bridge_port_group *p;
665
666 p = kzalloc(sizeof(*p), GFP_ATOMIC);
667 if (unlikely(!p))
668 return NULL;
669
670 p->addr = *group;
671 p->port = port;
672 p->flags = flags;
673 rcu_assign_pointer(p->next, next);
674 hlist_add_head(&p->mglist, &port->mglist);
675 setup_timer(&p->timer, br_multicast_port_group_expired,
676 (unsigned long)p);
677 return p;
678 }
679
680 static int br_multicast_add_group(struct net_bridge *br,
681 struct net_bridge_port *port,
682 struct br_ip *group)
683 {
684 struct net_bridge_mdb_entry *mp;
685 struct net_bridge_port_group *p;
686 struct net_bridge_port_group __rcu **pp;
687 unsigned long now = jiffies;
688 int err;
689
690 spin_lock(&br->multicast_lock);
691 if (!netif_running(br->dev) ||
692 (port && port->state == BR_STATE_DISABLED))
693 goto out;
694
695 mp = br_multicast_new_group(br, port, group);
696 err = PTR_ERR(mp);
697 if (IS_ERR(mp))
698 goto err;
699
700 if (!port) {
701 mp->mglist = true;
702 mod_timer(&mp->timer, now + br->multicast_membership_interval);
703 goto out;
704 }
705
706 for (pp = &mp->ports;
707 (p = mlock_dereference(*pp, br)) != NULL;
708 pp = &p->next) {
709 if (p->port == port)
710 goto found;
711 if ((unsigned long)p->port < (unsigned long)port)
712 break;
713 }
714
715 p = br_multicast_new_port_group(port, group, *pp, 0);
716 if (unlikely(!p))
717 goto err;
718 rcu_assign_pointer(*pp, p);
719 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
720
721 found:
722 mod_timer(&p->timer, now + br->multicast_membership_interval);
723 out:
724 err = 0;
725
726 err:
727 spin_unlock(&br->multicast_lock);
728 return err;
729 }
730
731 static int br_ip4_multicast_add_group(struct net_bridge *br,
732 struct net_bridge_port *port,
733 __be32 group,
734 __u16 vid)
735 {
736 struct br_ip br_group;
737
738 if (ipv4_is_local_multicast(group))
739 return 0;
740
741 br_group.u.ip4 = group;
742 br_group.proto = htons(ETH_P_IP);
743 br_group.vid = vid;
744
745 return br_multicast_add_group(br, port, &br_group);
746 }
747
748 #if IS_ENABLED(CONFIG_IPV6)
749 static int br_ip6_multicast_add_group(struct net_bridge *br,
750 struct net_bridge_port *port,
751 const struct in6_addr *group,
752 __u16 vid)
753 {
754 struct br_ip br_group;
755
756 if (ipv6_addr_is_ll_all_nodes(group))
757 return 0;
758
759 br_group.u.ip6 = *group;
760 br_group.proto = htons(ETH_P_IPV6);
761 br_group.vid = vid;
762
763 return br_multicast_add_group(br, port, &br_group);
764 }
765 #endif
766
767 static void br_multicast_router_expired(unsigned long data)
768 {
769 struct net_bridge_port *port = (void *)data;
770 struct net_bridge *br = port->br;
771
772 spin_lock(&br->multicast_lock);
773 if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
774 port->multicast_router == MDB_RTR_TYPE_PERM ||
775 timer_pending(&port->multicast_router_timer) ||
776 hlist_unhashed(&port->rlist))
777 goto out;
778
779 hlist_del_init_rcu(&port->rlist);
780 br_rtr_notify(br->dev, port, RTM_DELMDB);
781 /* Don't allow timer refresh if the router expired */
782 if (port->multicast_router == MDB_RTR_TYPE_TEMP)
783 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
784
785 out:
786 spin_unlock(&br->multicast_lock);
787 }
788
789 static void br_multicast_local_router_expired(unsigned long data)
790 {
791 }
792
793 static void br_multicast_querier_expired(struct net_bridge *br,
794 struct bridge_mcast_own_query *query)
795 {
796 spin_lock(&br->multicast_lock);
797 if (!netif_running(br->dev) || br->multicast_disabled)
798 goto out;
799
800 br_multicast_start_querier(br, query);
801
802 out:
803 spin_unlock(&br->multicast_lock);
804 }
805
806 static void br_ip4_multicast_querier_expired(unsigned long data)
807 {
808 struct net_bridge *br = (void *)data;
809
810 br_multicast_querier_expired(br, &br->ip4_own_query);
811 }
812
813 #if IS_ENABLED(CONFIG_IPV6)
814 static void br_ip6_multicast_querier_expired(unsigned long data)
815 {
816 struct net_bridge *br = (void *)data;
817
818 br_multicast_querier_expired(br, &br->ip6_own_query);
819 }
820 #endif
821
822 static void br_multicast_select_own_querier(struct net_bridge *br,
823 struct br_ip *ip,
824 struct sk_buff *skb)
825 {
826 if (ip->proto == htons(ETH_P_IP))
827 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
828 #if IS_ENABLED(CONFIG_IPV6)
829 else
830 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
831 #endif
832 }
833
834 static void __br_multicast_send_query(struct net_bridge *br,
835 struct net_bridge_port *port,
836 struct br_ip *ip)
837 {
838 struct sk_buff *skb;
839 u8 igmp_type;
840
841 skb = br_multicast_alloc_query(br, ip, &igmp_type);
842 if (!skb)
843 return;
844
845 if (port) {
846 skb->dev = port->dev;
847 br_multicast_count(br, port, skb, igmp_type,
848 BR_MCAST_DIR_TX);
849 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
850 dev_net(port->dev), NULL, skb, NULL, skb->dev,
851 br_dev_queue_push_xmit);
852 } else {
853 br_multicast_select_own_querier(br, ip, skb);
854 br_multicast_count(br, port, skb, igmp_type,
855 BR_MCAST_DIR_RX);
856 netif_rx(skb);
857 }
858 }
859
860 static void br_multicast_send_query(struct net_bridge *br,
861 struct net_bridge_port *port,
862 struct bridge_mcast_own_query *own_query)
863 {
864 unsigned long time;
865 struct br_ip br_group;
866 struct bridge_mcast_other_query *other_query = NULL;
867
868 if (!netif_running(br->dev) || br->multicast_disabled ||
869 !br->multicast_querier)
870 return;
871
872 memset(&br_group.u, 0, sizeof(br_group.u));
873
874 if (port ? (own_query == &port->ip4_own_query) :
875 (own_query == &br->ip4_own_query)) {
876 other_query = &br->ip4_other_query;
877 br_group.proto = htons(ETH_P_IP);
878 #if IS_ENABLED(CONFIG_IPV6)
879 } else {
880 other_query = &br->ip6_other_query;
881 br_group.proto = htons(ETH_P_IPV6);
882 #endif
883 }
884
885 if (!other_query || timer_pending(&other_query->timer))
886 return;
887
888 __br_multicast_send_query(br, port, &br_group);
889
890 time = jiffies;
891 time += own_query->startup_sent < br->multicast_startup_query_count ?
892 br->multicast_startup_query_interval :
893 br->multicast_query_interval;
894 mod_timer(&own_query->timer, time);
895 }
896
897 static void
898 br_multicast_port_query_expired(struct net_bridge_port *port,
899 struct bridge_mcast_own_query *query)
900 {
901 struct net_bridge *br = port->br;
902
903 spin_lock(&br->multicast_lock);
904 if (port->state == BR_STATE_DISABLED ||
905 port->state == BR_STATE_BLOCKING)
906 goto out;
907
908 if (query->startup_sent < br->multicast_startup_query_count)
909 query->startup_sent++;
910
911 br_multicast_send_query(port->br, port, query);
912
913 out:
914 spin_unlock(&br->multicast_lock);
915 }
916
917 static void br_ip4_multicast_port_query_expired(unsigned long data)
918 {
919 struct net_bridge_port *port = (void *)data;
920
921 br_multicast_port_query_expired(port, &port->ip4_own_query);
922 }
923
924 #if IS_ENABLED(CONFIG_IPV6)
925 static void br_ip6_multicast_port_query_expired(unsigned long data)
926 {
927 struct net_bridge_port *port = (void *)data;
928
929 br_multicast_port_query_expired(port, &port->ip6_own_query);
930 }
931 #endif
932
933 int br_multicast_add_port(struct net_bridge_port *port)
934 {
935 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
936
937 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
938 (unsigned long)port);
939 setup_timer(&port->ip4_own_query.timer,
940 br_ip4_multicast_port_query_expired, (unsigned long)port);
941 #if IS_ENABLED(CONFIG_IPV6)
942 setup_timer(&port->ip6_own_query.timer,
943 br_ip6_multicast_port_query_expired, (unsigned long)port);
944 #endif
945 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
946 if (!port->mcast_stats)
947 return -ENOMEM;
948
949 return 0;
950 }
951
952 void br_multicast_del_port(struct net_bridge_port *port)
953 {
954 struct net_bridge *br = port->br;
955 struct net_bridge_port_group *pg;
956 struct hlist_node *n;
957
958 /* Take care of the remaining groups, only perm ones should be left */
959 spin_lock_bh(&br->multicast_lock);
960 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
961 br_multicast_del_pg(br, pg);
962 spin_unlock_bh(&br->multicast_lock);
963 del_timer_sync(&port->multicast_router_timer);
964 free_percpu(port->mcast_stats);
965 }
966
967 static void br_multicast_enable(struct bridge_mcast_own_query *query)
968 {
969 query->startup_sent = 0;
970
971 if (try_to_del_timer_sync(&query->timer) >= 0 ||
972 del_timer(&query->timer))
973 mod_timer(&query->timer, jiffies);
974 }
975
976 static void __br_multicast_enable_port(struct net_bridge_port *port)
977 {
978 struct net_bridge *br = port->br;
979
980 if (br->multicast_disabled || !netif_running(br->dev))
981 return;
982
983 br_multicast_enable(&port->ip4_own_query);
984 #if IS_ENABLED(CONFIG_IPV6)
985 br_multicast_enable(&port->ip6_own_query);
986 #endif
987 if (port->multicast_router == MDB_RTR_TYPE_PERM &&
988 hlist_unhashed(&port->rlist))
989 br_multicast_add_router(br, port);
990 }
991
992 void br_multicast_enable_port(struct net_bridge_port *port)
993 {
994 struct net_bridge *br = port->br;
995
996 spin_lock(&br->multicast_lock);
997 __br_multicast_enable_port(port);
998 spin_unlock(&br->multicast_lock);
999 }
1000
1001 void br_multicast_disable_port(struct net_bridge_port *port)
1002 {
1003 struct net_bridge *br = port->br;
1004 struct net_bridge_port_group *pg;
1005 struct hlist_node *n;
1006
1007 spin_lock(&br->multicast_lock);
1008 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1009 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
1010 br_multicast_del_pg(br, pg);
1011
1012 if (!hlist_unhashed(&port->rlist)) {
1013 hlist_del_init_rcu(&port->rlist);
1014 br_rtr_notify(br->dev, port, RTM_DELMDB);
1015 /* Don't allow timer refresh if disabling */
1016 if (port->multicast_router == MDB_RTR_TYPE_TEMP)
1017 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1018 }
1019 del_timer(&port->multicast_router_timer);
1020 del_timer(&port->ip4_own_query.timer);
1021 #if IS_ENABLED(CONFIG_IPV6)
1022 del_timer(&port->ip6_own_query.timer);
1023 #endif
1024 spin_unlock(&br->multicast_lock);
1025 }
1026
1027 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1028 struct net_bridge_port *port,
1029 struct sk_buff *skb,
1030 u16 vid)
1031 {
1032 struct igmpv3_report *ih;
1033 struct igmpv3_grec *grec;
1034 int i;
1035 int len;
1036 int num;
1037 int type;
1038 int err = 0;
1039 __be32 group;
1040
1041 ih = igmpv3_report_hdr(skb);
1042 num = ntohs(ih->ngrec);
1043 len = skb_transport_offset(skb) + sizeof(*ih);
1044
1045 for (i = 0; i < num; i++) {
1046 len += sizeof(*grec);
1047 if (!pskb_may_pull(skb, len))
1048 return -EINVAL;
1049
1050 grec = (void *)(skb->data + len - sizeof(*grec));
1051 group = grec->grec_mca;
1052 type = grec->grec_type;
1053
1054 len += ntohs(grec->grec_nsrcs) * 4;
1055 if (!pskb_may_pull(skb, len))
1056 return -EINVAL;
1057
1058 /* We treat this as an IGMPv2 report for now. */
1059 switch (type) {
1060 case IGMPV3_MODE_IS_INCLUDE:
1061 case IGMPV3_MODE_IS_EXCLUDE:
1062 case IGMPV3_CHANGE_TO_INCLUDE:
1063 case IGMPV3_CHANGE_TO_EXCLUDE:
1064 case IGMPV3_ALLOW_NEW_SOURCES:
1065 case IGMPV3_BLOCK_OLD_SOURCES:
1066 break;
1067
1068 default:
1069 continue;
1070 }
1071
1072 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
1073 type == IGMPV3_MODE_IS_INCLUDE) &&
1074 ntohs(grec->grec_nsrcs) == 0) {
1075 br_ip4_multicast_leave_group(br, port, group, vid);
1076 } else {
1077 err = br_ip4_multicast_add_group(br, port, group, vid);
1078 if (err)
1079 break;
1080 }
1081 }
1082
1083 return err;
1084 }
1085
1086 #if IS_ENABLED(CONFIG_IPV6)
1087 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1088 struct net_bridge_port *port,
1089 struct sk_buff *skb,
1090 u16 vid)
1091 {
1092 struct icmp6hdr *icmp6h;
1093 struct mld2_grec *grec;
1094 int i;
1095 int len;
1096 int num;
1097 int err = 0;
1098
1099 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
1100 return -EINVAL;
1101
1102 icmp6h = icmp6_hdr(skb);
1103 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1104 len = skb_transport_offset(skb) + sizeof(*icmp6h);
1105
1106 for (i = 0; i < num; i++) {
1107 __be16 *nsrcs, _nsrcs;
1108
1109 nsrcs = skb_header_pointer(skb,
1110 len + offsetof(struct mld2_grec,
1111 grec_nsrcs),
1112 sizeof(_nsrcs), &_nsrcs);
1113 if (!nsrcs)
1114 return -EINVAL;
1115
1116 if (!pskb_may_pull(skb,
1117 len + sizeof(*grec) +
1118 sizeof(struct in6_addr) * ntohs(*nsrcs)))
1119 return -EINVAL;
1120
1121 grec = (struct mld2_grec *)(skb->data + len);
1122 len += sizeof(*grec) +
1123 sizeof(struct in6_addr) * ntohs(*nsrcs);
1124
1125 /* We treat these as MLDv1 reports for now. */
1126 switch (grec->grec_type) {
1127 case MLD2_MODE_IS_INCLUDE:
1128 case MLD2_MODE_IS_EXCLUDE:
1129 case MLD2_CHANGE_TO_INCLUDE:
1130 case MLD2_CHANGE_TO_EXCLUDE:
1131 case MLD2_ALLOW_NEW_SOURCES:
1132 case MLD2_BLOCK_OLD_SOURCES:
1133 break;
1134
1135 default:
1136 continue;
1137 }
1138
1139 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1140 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1141 ntohs(*nsrcs) == 0) {
1142 br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1143 vid);
1144 } else {
1145 err = br_ip6_multicast_add_group(br, port,
1146 &grec->grec_mca, vid);
1147 if (err)
1148 break;
1149 }
1150 }
1151
1152 return err;
1153 }
1154 #endif
1155
1156 static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1157 struct net_bridge_port *port,
1158 __be32 saddr)
1159 {
1160 if (!timer_pending(&br->ip4_own_query.timer) &&
1161 !timer_pending(&br->ip4_other_query.timer))
1162 goto update;
1163
1164 if (!br->ip4_querier.addr.u.ip4)
1165 goto update;
1166
1167 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1168 goto update;
1169
1170 return false;
1171
1172 update:
1173 br->ip4_querier.addr.u.ip4 = saddr;
1174
1175 /* update protected by general multicast_lock by caller */
1176 rcu_assign_pointer(br->ip4_querier.port, port);
1177
1178 return true;
1179 }
1180
1181 #if IS_ENABLED(CONFIG_IPV6)
1182 static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1183 struct net_bridge_port *port,
1184 struct in6_addr *saddr)
1185 {
1186 if (!timer_pending(&br->ip6_own_query.timer) &&
1187 !timer_pending(&br->ip6_other_query.timer))
1188 goto update;
1189
1190 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1191 goto update;
1192
1193 return false;
1194
1195 update:
1196 br->ip6_querier.addr.u.ip6 = *saddr;
1197
1198 /* update protected by general multicast_lock by caller */
1199 rcu_assign_pointer(br->ip6_querier.port, port);
1200
1201 return true;
1202 }
1203 #endif
1204
1205 static bool br_multicast_select_querier(struct net_bridge *br,
1206 struct net_bridge_port *port,
1207 struct br_ip *saddr)
1208 {
1209 switch (saddr->proto) {
1210 case htons(ETH_P_IP):
1211 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1212 #if IS_ENABLED(CONFIG_IPV6)
1213 case htons(ETH_P_IPV6):
1214 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1215 #endif
1216 }
1217
1218 return false;
1219 }
1220
1221 static void
1222 br_multicast_update_query_timer(struct net_bridge *br,
1223 struct bridge_mcast_other_query *query,
1224 unsigned long max_delay)
1225 {
1226 if (!timer_pending(&query->timer))
1227 query->delay_time = jiffies + max_delay;
1228
1229 mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1230 }
1231
1232 /*
1233 * Add port to router_list
1234 * list is maintained ordered by pointer value
1235 * and locked by br->multicast_lock and RCU
1236 */
1237 static void br_multicast_add_router(struct net_bridge *br,
1238 struct net_bridge_port *port)
1239 {
1240 struct net_bridge_port *p;
1241 struct hlist_node *slot = NULL;
1242
1243 if (!hlist_unhashed(&port->rlist))
1244 return;
1245
1246 hlist_for_each_entry(p, &br->router_list, rlist) {
1247 if ((unsigned long) port >= (unsigned long) p)
1248 break;
1249 slot = &p->rlist;
1250 }
1251
1252 if (slot)
1253 hlist_add_behind_rcu(&port->rlist, slot);
1254 else
1255 hlist_add_head_rcu(&port->rlist, &br->router_list);
1256 br_rtr_notify(br->dev, port, RTM_NEWMDB);
1257 }
1258
1259 static void br_multicast_mark_router(struct net_bridge *br,
1260 struct net_bridge_port *port)
1261 {
1262 unsigned long now = jiffies;
1263
1264 if (!port) {
1265 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY)
1266 mod_timer(&br->multicast_router_timer,
1267 now + br->multicast_querier_interval);
1268 return;
1269 }
1270
1271 if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
1272 port->multicast_router == MDB_RTR_TYPE_PERM)
1273 return;
1274
1275 br_multicast_add_router(br, port);
1276
1277 mod_timer(&port->multicast_router_timer,
1278 now + br->multicast_querier_interval);
1279 }
1280
1281 static void br_multicast_query_received(struct net_bridge *br,
1282 struct net_bridge_port *port,
1283 struct bridge_mcast_other_query *query,
1284 struct br_ip *saddr,
1285 unsigned long max_delay)
1286 {
1287 if (!br_multicast_select_querier(br, port, saddr))
1288 return;
1289
1290 br_multicast_update_query_timer(br, query, max_delay);
1291 br_multicast_mark_router(br, port);
1292 }
1293
1294 static int br_ip4_multicast_query(struct net_bridge *br,
1295 struct net_bridge_port *port,
1296 struct sk_buff *skb,
1297 u16 vid)
1298 {
1299 const struct iphdr *iph = ip_hdr(skb);
1300 struct igmphdr *ih = igmp_hdr(skb);
1301 struct net_bridge_mdb_entry *mp;
1302 struct igmpv3_query *ih3;
1303 struct net_bridge_port_group *p;
1304 struct net_bridge_port_group __rcu **pp;
1305 struct br_ip saddr;
1306 unsigned long max_delay;
1307 unsigned long now = jiffies;
1308 unsigned int offset = skb_transport_offset(skb);
1309 __be32 group;
1310 int err = 0;
1311
1312 spin_lock(&br->multicast_lock);
1313 if (!netif_running(br->dev) ||
1314 (port && port->state == BR_STATE_DISABLED))
1315 goto out;
1316
1317 group = ih->group;
1318
1319 if (skb->len == offset + sizeof(*ih)) {
1320 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1321
1322 if (!max_delay) {
1323 max_delay = 10 * HZ;
1324 group = 0;
1325 }
1326 } else if (skb->len >= offset + sizeof(*ih3)) {
1327 ih3 = igmpv3_query_hdr(skb);
1328 if (ih3->nsrcs)
1329 goto out;
1330
1331 max_delay = ih3->code ?
1332 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1333 } else {
1334 goto out;
1335 }
1336
1337 if (!group) {
1338 saddr.proto = htons(ETH_P_IP);
1339 saddr.u.ip4 = iph->saddr;
1340
1341 br_multicast_query_received(br, port, &br->ip4_other_query,
1342 &saddr, max_delay);
1343 goto out;
1344 }
1345
1346 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
1347 if (!mp)
1348 goto out;
1349
1350 max_delay *= br->multicast_last_member_count;
1351
1352 if (mp->mglist &&
1353 (timer_pending(&mp->timer) ?
1354 time_after(mp->timer.expires, now + max_delay) :
1355 try_to_del_timer_sync(&mp->timer) >= 0))
1356 mod_timer(&mp->timer, now + max_delay);
1357
1358 for (pp = &mp->ports;
1359 (p = mlock_dereference(*pp, br)) != NULL;
1360 pp = &p->next) {
1361 if (timer_pending(&p->timer) ?
1362 time_after(p->timer.expires, now + max_delay) :
1363 try_to_del_timer_sync(&p->timer) >= 0)
1364 mod_timer(&p->timer, now + max_delay);
1365 }
1366
1367 out:
1368 spin_unlock(&br->multicast_lock);
1369 return err;
1370 }
1371
1372 #if IS_ENABLED(CONFIG_IPV6)
1373 static int br_ip6_multicast_query(struct net_bridge *br,
1374 struct net_bridge_port *port,
1375 struct sk_buff *skb,
1376 u16 vid)
1377 {
1378 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1379 struct mld_msg *mld;
1380 struct net_bridge_mdb_entry *mp;
1381 struct mld2_query *mld2q;
1382 struct net_bridge_port_group *p;
1383 struct net_bridge_port_group __rcu **pp;
1384 struct br_ip saddr;
1385 unsigned long max_delay;
1386 unsigned long now = jiffies;
1387 unsigned int offset = skb_transport_offset(skb);
1388 const struct in6_addr *group = NULL;
1389 bool is_general_query;
1390 int err = 0;
1391
1392 spin_lock(&br->multicast_lock);
1393 if (!netif_running(br->dev) ||
1394 (port && port->state == BR_STATE_DISABLED))
1395 goto out;
1396
1397 if (skb->len == offset + sizeof(*mld)) {
1398 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
1399 err = -EINVAL;
1400 goto out;
1401 }
1402 mld = (struct mld_msg *) icmp6_hdr(skb);
1403 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1404 if (max_delay)
1405 group = &mld->mld_mca;
1406 } else {
1407 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
1408 err = -EINVAL;
1409 goto out;
1410 }
1411 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1412 if (!mld2q->mld2q_nsrcs)
1413 group = &mld2q->mld2q_mca;
1414
1415 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1416 }
1417
1418 is_general_query = group && ipv6_addr_any(group);
1419
1420 if (is_general_query) {
1421 saddr.proto = htons(ETH_P_IPV6);
1422 saddr.u.ip6 = ip6h->saddr;
1423
1424 br_multicast_query_received(br, port, &br->ip6_other_query,
1425 &saddr, max_delay);
1426 goto out;
1427 } else if (!group) {
1428 goto out;
1429 }
1430
1431 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
1432 if (!mp)
1433 goto out;
1434
1435 max_delay *= br->multicast_last_member_count;
1436 if (mp->mglist &&
1437 (timer_pending(&mp->timer) ?
1438 time_after(mp->timer.expires, now + max_delay) :
1439 try_to_del_timer_sync(&mp->timer) >= 0))
1440 mod_timer(&mp->timer, now + max_delay);
1441
1442 for (pp = &mp->ports;
1443 (p = mlock_dereference(*pp, br)) != NULL;
1444 pp = &p->next) {
1445 if (timer_pending(&p->timer) ?
1446 time_after(p->timer.expires, now + max_delay) :
1447 try_to_del_timer_sync(&p->timer) >= 0)
1448 mod_timer(&p->timer, now + max_delay);
1449 }
1450
1451 out:
1452 spin_unlock(&br->multicast_lock);
1453 return err;
1454 }
1455 #endif
1456
1457 static void
1458 br_multicast_leave_group(struct net_bridge *br,
1459 struct net_bridge_port *port,
1460 struct br_ip *group,
1461 struct bridge_mcast_other_query *other_query,
1462 struct bridge_mcast_own_query *own_query)
1463 {
1464 struct net_bridge_mdb_htable *mdb;
1465 struct net_bridge_mdb_entry *mp;
1466 struct net_bridge_port_group *p;
1467 unsigned long now;
1468 unsigned long time;
1469
1470 spin_lock(&br->multicast_lock);
1471 if (!netif_running(br->dev) ||
1472 (port && port->state == BR_STATE_DISABLED))
1473 goto out;
1474
1475 mdb = mlock_dereference(br->mdb, br);
1476 mp = br_mdb_ip_get(mdb, group);
1477 if (!mp)
1478 goto out;
1479
1480 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1481 struct net_bridge_port_group __rcu **pp;
1482
1483 for (pp = &mp->ports;
1484 (p = mlock_dereference(*pp, br)) != NULL;
1485 pp = &p->next) {
1486 if (p->port != port)
1487 continue;
1488
1489 rcu_assign_pointer(*pp, p->next);
1490 hlist_del_init(&p->mglist);
1491 del_timer(&p->timer);
1492 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1493 br_mdb_notify(br->dev, port, group, RTM_DELMDB,
1494 p->flags);
1495
1496 if (!mp->ports && !mp->mglist &&
1497 netif_running(br->dev))
1498 mod_timer(&mp->timer, jiffies);
1499 }
1500 goto out;
1501 }
1502
1503 if (timer_pending(&other_query->timer))
1504 goto out;
1505
1506 if (br->multicast_querier) {
1507 __br_multicast_send_query(br, port, &mp->addr);
1508
1509 time = jiffies + br->multicast_last_member_count *
1510 br->multicast_last_member_interval;
1511
1512 mod_timer(&own_query->timer, time);
1513
1514 for (p = mlock_dereference(mp->ports, br);
1515 p != NULL;
1516 p = mlock_dereference(p->next, br)) {
1517 if (p->port != port)
1518 continue;
1519
1520 if (!hlist_unhashed(&p->mglist) &&
1521 (timer_pending(&p->timer) ?
1522 time_after(p->timer.expires, time) :
1523 try_to_del_timer_sync(&p->timer) >= 0)) {
1524 mod_timer(&p->timer, time);
1525 }
1526
1527 break;
1528 }
1529 }
1530
1531 now = jiffies;
1532 time = now + br->multicast_last_member_count *
1533 br->multicast_last_member_interval;
1534
1535 if (!port) {
1536 if (mp->mglist &&
1537 (timer_pending(&mp->timer) ?
1538 time_after(mp->timer.expires, time) :
1539 try_to_del_timer_sync(&mp->timer) >= 0)) {
1540 mod_timer(&mp->timer, time);
1541 }
1542
1543 goto out;
1544 }
1545
1546 for (p = mlock_dereference(mp->ports, br);
1547 p != NULL;
1548 p = mlock_dereference(p->next, br)) {
1549 if (p->port != port)
1550 continue;
1551
1552 if (!hlist_unhashed(&p->mglist) &&
1553 (timer_pending(&p->timer) ?
1554 time_after(p->timer.expires, time) :
1555 try_to_del_timer_sync(&p->timer) >= 0)) {
1556 mod_timer(&p->timer, time);
1557 }
1558
1559 break;
1560 }
1561 out:
1562 spin_unlock(&br->multicast_lock);
1563 }
1564
1565 static void br_ip4_multicast_leave_group(struct net_bridge *br,
1566 struct net_bridge_port *port,
1567 __be32 group,
1568 __u16 vid)
1569 {
1570 struct br_ip br_group;
1571 struct bridge_mcast_own_query *own_query;
1572
1573 if (ipv4_is_local_multicast(group))
1574 return;
1575
1576 own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1577
1578 br_group.u.ip4 = group;
1579 br_group.proto = htons(ETH_P_IP);
1580 br_group.vid = vid;
1581
1582 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
1583 own_query);
1584 }
1585
1586 #if IS_ENABLED(CONFIG_IPV6)
1587 static void br_ip6_multicast_leave_group(struct net_bridge *br,
1588 struct net_bridge_port *port,
1589 const struct in6_addr *group,
1590 __u16 vid)
1591 {
1592 struct br_ip br_group;
1593 struct bridge_mcast_own_query *own_query;
1594
1595 if (ipv6_addr_is_ll_all_nodes(group))
1596 return;
1597
1598 own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1599
1600 br_group.u.ip6 = *group;
1601 br_group.proto = htons(ETH_P_IPV6);
1602 br_group.vid = vid;
1603
1604 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
1605 own_query);
1606 }
1607 #endif
1608
1609 static void br_multicast_err_count(const struct net_bridge *br,
1610 const struct net_bridge_port *p,
1611 __be16 proto)
1612 {
1613 struct bridge_mcast_stats __percpu *stats;
1614 struct bridge_mcast_stats *pstats;
1615
1616 if (!br->multicast_stats_enabled)
1617 return;
1618
1619 if (p)
1620 stats = p->mcast_stats;
1621 else
1622 stats = br->mcast_stats;
1623 if (WARN_ON(!stats))
1624 return;
1625
1626 pstats = this_cpu_ptr(stats);
1627
1628 u64_stats_update_begin(&pstats->syncp);
1629 switch (proto) {
1630 case htons(ETH_P_IP):
1631 pstats->mstats.igmp_parse_errors++;
1632 break;
1633 #if IS_ENABLED(CONFIG_IPV6)
1634 case htons(ETH_P_IPV6):
1635 pstats->mstats.mld_parse_errors++;
1636 break;
1637 #endif
1638 }
1639 u64_stats_update_end(&pstats->syncp);
1640 }
1641
1642 static void br_multicast_pim(struct net_bridge *br,
1643 struct net_bridge_port *port,
1644 const struct sk_buff *skb)
1645 {
1646 unsigned int offset = skb_transport_offset(skb);
1647 struct pimhdr *pimhdr, _pimhdr;
1648
1649 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
1650 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
1651 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
1652 return;
1653
1654 br_multicast_mark_router(br, port);
1655 }
1656
1657 static int br_multicast_ipv4_rcv(struct net_bridge *br,
1658 struct net_bridge_port *port,
1659 struct sk_buff *skb,
1660 u16 vid)
1661 {
1662 struct sk_buff *skb_trimmed = NULL;
1663 struct igmphdr *ih;
1664 int err;
1665
1666 err = ip_mc_check_igmp(skb, &skb_trimmed);
1667
1668 if (err == -ENOMSG) {
1669 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
1670 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1671 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
1672 if (ip_hdr(skb)->protocol == IPPROTO_PIM)
1673 br_multicast_pim(br, port, skb);
1674 }
1675 return 0;
1676 } else if (err < 0) {
1677 br_multicast_err_count(br, port, skb->protocol);
1678 return err;
1679 }
1680
1681 ih = igmp_hdr(skb);
1682 BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1683
1684 switch (ih->type) {
1685 case IGMP_HOST_MEMBERSHIP_REPORT:
1686 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1687 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1688 err = br_ip4_multicast_add_group(br, port, ih->group, vid);
1689 break;
1690 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1691 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
1692 break;
1693 case IGMP_HOST_MEMBERSHIP_QUERY:
1694 err = br_ip4_multicast_query(br, port, skb_trimmed, vid);
1695 break;
1696 case IGMP_HOST_LEAVE_MESSAGE:
1697 br_ip4_multicast_leave_group(br, port, ih->group, vid);
1698 break;
1699 }
1700
1701 if (skb_trimmed && skb_trimmed != skb)
1702 kfree_skb(skb_trimmed);
1703
1704 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1705 BR_MCAST_DIR_RX);
1706
1707 return err;
1708 }
1709
1710 #if IS_ENABLED(CONFIG_IPV6)
1711 static int br_multicast_ipv6_rcv(struct net_bridge *br,
1712 struct net_bridge_port *port,
1713 struct sk_buff *skb,
1714 u16 vid)
1715 {
1716 struct sk_buff *skb_trimmed = NULL;
1717 struct mld_msg *mld;
1718 int err;
1719
1720 err = ipv6_mc_check_mld(skb, &skb_trimmed);
1721
1722 if (err == -ENOMSG) {
1723 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
1724 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1725 return 0;
1726 } else if (err < 0) {
1727 br_multicast_err_count(br, port, skb->protocol);
1728 return err;
1729 }
1730
1731 mld = (struct mld_msg *)skb_transport_header(skb);
1732 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
1733
1734 switch (mld->mld_type) {
1735 case ICMPV6_MGM_REPORT:
1736 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1737 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
1738 break;
1739 case ICMPV6_MLD2_REPORT:
1740 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
1741 break;
1742 case ICMPV6_MGM_QUERY:
1743 err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
1744 break;
1745 case ICMPV6_MGM_REDUCTION:
1746 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
1747 break;
1748 }
1749
1750 if (skb_trimmed && skb_trimmed != skb)
1751 kfree_skb(skb_trimmed);
1752
1753 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1754 BR_MCAST_DIR_RX);
1755
1756 return err;
1757 }
1758 #endif
1759
1760 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1761 struct sk_buff *skb, u16 vid)
1762 {
1763 int ret = 0;
1764
1765 BR_INPUT_SKB_CB(skb)->igmp = 0;
1766 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1767
1768 if (br->multicast_disabled)
1769 return 0;
1770
1771 switch (skb->protocol) {
1772 case htons(ETH_P_IP):
1773 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
1774 break;
1775 #if IS_ENABLED(CONFIG_IPV6)
1776 case htons(ETH_P_IPV6):
1777 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
1778 break;
1779 #endif
1780 }
1781
1782 return ret;
1783 }
1784
1785 static void br_multicast_query_expired(struct net_bridge *br,
1786 struct bridge_mcast_own_query *query,
1787 struct bridge_mcast_querier *querier)
1788 {
1789 spin_lock(&br->multicast_lock);
1790 if (query->startup_sent < br->multicast_startup_query_count)
1791 query->startup_sent++;
1792
1793 RCU_INIT_POINTER(querier->port, NULL);
1794 br_multicast_send_query(br, NULL, query);
1795 spin_unlock(&br->multicast_lock);
1796 }
1797
1798 static void br_ip4_multicast_query_expired(unsigned long data)
1799 {
1800 struct net_bridge *br = (void *)data;
1801
1802 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1803 }
1804
1805 #if IS_ENABLED(CONFIG_IPV6)
1806 static void br_ip6_multicast_query_expired(unsigned long data)
1807 {
1808 struct net_bridge *br = (void *)data;
1809
1810 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1811 }
1812 #endif
1813
1814 void br_multicast_init(struct net_bridge *br)
1815 {
1816 br->hash_elasticity = 4;
1817 br->hash_max = 512;
1818
1819 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1820 br->multicast_querier = 0;
1821 br->multicast_query_use_ifaddr = 0;
1822 br->multicast_last_member_count = 2;
1823 br->multicast_startup_query_count = 2;
1824
1825 br->multicast_last_member_interval = HZ;
1826 br->multicast_query_response_interval = 10 * HZ;
1827 br->multicast_startup_query_interval = 125 * HZ / 4;
1828 br->multicast_query_interval = 125 * HZ;
1829 br->multicast_querier_interval = 255 * HZ;
1830 br->multicast_membership_interval = 260 * HZ;
1831
1832 br->ip4_other_query.delay_time = 0;
1833 br->ip4_querier.port = NULL;
1834 #if IS_ENABLED(CONFIG_IPV6)
1835 br->ip6_other_query.delay_time = 0;
1836 br->ip6_querier.port = NULL;
1837 #endif
1838 br->has_ipv6_addr = 1;
1839
1840 spin_lock_init(&br->multicast_lock);
1841 setup_timer(&br->multicast_router_timer,
1842 br_multicast_local_router_expired, 0);
1843 setup_timer(&br->ip4_other_query.timer,
1844 br_ip4_multicast_querier_expired, (unsigned long)br);
1845 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired,
1846 (unsigned long)br);
1847 #if IS_ENABLED(CONFIG_IPV6)
1848 setup_timer(&br->ip6_other_query.timer,
1849 br_ip6_multicast_querier_expired, (unsigned long)br);
1850 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired,
1851 (unsigned long)br);
1852 #endif
1853 }
1854
1855 static void __br_multicast_open(struct net_bridge *br,
1856 struct bridge_mcast_own_query *query)
1857 {
1858 query->startup_sent = 0;
1859
1860 if (br->multicast_disabled)
1861 return;
1862
1863 mod_timer(&query->timer, jiffies);
1864 }
1865
1866 void br_multicast_open(struct net_bridge *br)
1867 {
1868 __br_multicast_open(br, &br->ip4_own_query);
1869 #if IS_ENABLED(CONFIG_IPV6)
1870 __br_multicast_open(br, &br->ip6_own_query);
1871 #endif
1872 }
1873
1874 void br_multicast_stop(struct net_bridge *br)
1875 {
1876 del_timer_sync(&br->multicast_router_timer);
1877 del_timer_sync(&br->ip4_other_query.timer);
1878 del_timer_sync(&br->ip4_own_query.timer);
1879 #if IS_ENABLED(CONFIG_IPV6)
1880 del_timer_sync(&br->ip6_other_query.timer);
1881 del_timer_sync(&br->ip6_own_query.timer);
1882 #endif
1883 }
1884
1885 void br_multicast_dev_del(struct net_bridge *br)
1886 {
1887 struct net_bridge_mdb_htable *mdb;
1888 struct net_bridge_mdb_entry *mp;
1889 struct hlist_node *n;
1890 u32 ver;
1891 int i;
1892
1893 spin_lock_bh(&br->multicast_lock);
1894 mdb = mlock_dereference(br->mdb, br);
1895 if (!mdb)
1896 goto out;
1897
1898 br->mdb = NULL;
1899
1900 ver = mdb->ver;
1901 for (i = 0; i < mdb->max; i++) {
1902 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
1903 hlist[ver]) {
1904 del_timer(&mp->timer);
1905 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1906 }
1907 }
1908
1909 if (mdb->old) {
1910 spin_unlock_bh(&br->multicast_lock);
1911 rcu_barrier_bh();
1912 spin_lock_bh(&br->multicast_lock);
1913 WARN_ON(mdb->old);
1914 }
1915
1916 mdb->old = mdb;
1917 call_rcu_bh(&mdb->rcu, br_mdb_free);
1918
1919 out:
1920 spin_unlock_bh(&br->multicast_lock);
1921
1922 free_percpu(br->mcast_stats);
1923 }
1924
1925 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1926 {
1927 int err = -EINVAL;
1928
1929 spin_lock_bh(&br->multicast_lock);
1930
1931 switch (val) {
1932 case MDB_RTR_TYPE_DISABLED:
1933 case MDB_RTR_TYPE_PERM:
1934 del_timer(&br->multicast_router_timer);
1935 /* fall through */
1936 case MDB_RTR_TYPE_TEMP_QUERY:
1937 br->multicast_router = val;
1938 err = 0;
1939 break;
1940 }
1941
1942 spin_unlock_bh(&br->multicast_lock);
1943
1944 return err;
1945 }
1946
1947 static void __del_port_router(struct net_bridge_port *p)
1948 {
1949 if (hlist_unhashed(&p->rlist))
1950 return;
1951 hlist_del_init_rcu(&p->rlist);
1952 br_rtr_notify(p->br->dev, p, RTM_DELMDB);
1953 }
1954
1955 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1956 {
1957 struct net_bridge *br = p->br;
1958 unsigned long now = jiffies;
1959 int err = -EINVAL;
1960
1961 spin_lock(&br->multicast_lock);
1962 if (p->multicast_router == val) {
1963 /* Refresh the temp router port timer */
1964 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
1965 mod_timer(&p->multicast_router_timer,
1966 now + br->multicast_querier_interval);
1967 err = 0;
1968 goto unlock;
1969 }
1970 switch (val) {
1971 case MDB_RTR_TYPE_DISABLED:
1972 p->multicast_router = MDB_RTR_TYPE_DISABLED;
1973 __del_port_router(p);
1974 del_timer(&p->multicast_router_timer);
1975 break;
1976 case MDB_RTR_TYPE_TEMP_QUERY:
1977 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1978 __del_port_router(p);
1979 break;
1980 case MDB_RTR_TYPE_PERM:
1981 p->multicast_router = MDB_RTR_TYPE_PERM;
1982 del_timer(&p->multicast_router_timer);
1983 br_multicast_add_router(br, p);
1984 break;
1985 case MDB_RTR_TYPE_TEMP:
1986 p->multicast_router = MDB_RTR_TYPE_TEMP;
1987 br_multicast_mark_router(br, p);
1988 break;
1989 default:
1990 goto unlock;
1991 }
1992 err = 0;
1993 unlock:
1994 spin_unlock(&br->multicast_lock);
1995
1996 return err;
1997 }
1998
1999 static void br_multicast_start_querier(struct net_bridge *br,
2000 struct bridge_mcast_own_query *query)
2001 {
2002 struct net_bridge_port *port;
2003
2004 __br_multicast_open(br, query);
2005
2006 list_for_each_entry(port, &br->port_list, list) {
2007 if (port->state == BR_STATE_DISABLED ||
2008 port->state == BR_STATE_BLOCKING)
2009 continue;
2010
2011 if (query == &br->ip4_own_query)
2012 br_multicast_enable(&port->ip4_own_query);
2013 #if IS_ENABLED(CONFIG_IPV6)
2014 else
2015 br_multicast_enable(&port->ip6_own_query);
2016 #endif
2017 }
2018 }
2019
2020 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
2021 {
2022 struct net_bridge_mdb_htable *mdb;
2023 struct net_bridge_port *port;
2024 int err = 0;
2025
2026 spin_lock_bh(&br->multicast_lock);
2027 if (br->multicast_disabled == !val)
2028 goto unlock;
2029
2030 br->multicast_disabled = !val;
2031 if (br->multicast_disabled)
2032 goto unlock;
2033
2034 if (!netif_running(br->dev))
2035 goto unlock;
2036
2037 mdb = mlock_dereference(br->mdb, br);
2038 if (mdb) {
2039 if (mdb->old) {
2040 err = -EEXIST;
2041 rollback:
2042 br->multicast_disabled = !!val;
2043 goto unlock;
2044 }
2045
2046 err = br_mdb_rehash(&br->mdb, mdb->max,
2047 br->hash_elasticity);
2048 if (err)
2049 goto rollback;
2050 }
2051
2052 br_multicast_open(br);
2053 list_for_each_entry(port, &br->port_list, list)
2054 __br_multicast_enable_port(port);
2055
2056 unlock:
2057 spin_unlock_bh(&br->multicast_lock);
2058
2059 return err;
2060 }
2061
2062 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
2063 {
2064 unsigned long max_delay;
2065
2066 val = !!val;
2067
2068 spin_lock_bh(&br->multicast_lock);
2069 if (br->multicast_querier == val)
2070 goto unlock;
2071
2072 br->multicast_querier = val;
2073 if (!val)
2074 goto unlock;
2075
2076 max_delay = br->multicast_query_response_interval;
2077
2078 if (!timer_pending(&br->ip4_other_query.timer))
2079 br->ip4_other_query.delay_time = jiffies + max_delay;
2080
2081 br_multicast_start_querier(br, &br->ip4_own_query);
2082
2083 #if IS_ENABLED(CONFIG_IPV6)
2084 if (!timer_pending(&br->ip6_other_query.timer))
2085 br->ip6_other_query.delay_time = jiffies + max_delay;
2086
2087 br_multicast_start_querier(br, &br->ip6_own_query);
2088 #endif
2089
2090 unlock:
2091 spin_unlock_bh(&br->multicast_lock);
2092
2093 return 0;
2094 }
2095
2096 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
2097 {
2098 int err = -EINVAL;
2099 u32 old;
2100 struct net_bridge_mdb_htable *mdb;
2101
2102 spin_lock_bh(&br->multicast_lock);
2103 if (!is_power_of_2(val))
2104 goto unlock;
2105
2106 mdb = mlock_dereference(br->mdb, br);
2107 if (mdb && val < mdb->size)
2108 goto unlock;
2109
2110 err = 0;
2111
2112 old = br->hash_max;
2113 br->hash_max = val;
2114
2115 if (mdb) {
2116 if (mdb->old) {
2117 err = -EEXIST;
2118 rollback:
2119 br->hash_max = old;
2120 goto unlock;
2121 }
2122
2123 err = br_mdb_rehash(&br->mdb, br->hash_max,
2124 br->hash_elasticity);
2125 if (err)
2126 goto rollback;
2127 }
2128
2129 unlock:
2130 spin_unlock_bh(&br->multicast_lock);
2131
2132 return err;
2133 }
2134
2135 /**
2136 * br_multicast_list_adjacent - Returns snooped multicast addresses
2137 * @dev: The bridge port adjacent to which to retrieve addresses
2138 * @br_ip_list: The list to store found, snooped multicast IP addresses in
2139 *
2140 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2141 * snooping feature on all bridge ports of dev's bridge device, excluding
2142 * the addresses from dev itself.
2143 *
2144 * Returns the number of items added to br_ip_list.
2145 *
2146 * Notes:
2147 * - br_ip_list needs to be initialized by caller
2148 * - br_ip_list might contain duplicates in the end
2149 * (needs to be taken care of by caller)
2150 * - br_ip_list needs to be freed by caller
2151 */
2152 int br_multicast_list_adjacent(struct net_device *dev,
2153 struct list_head *br_ip_list)
2154 {
2155 struct net_bridge *br;
2156 struct net_bridge_port *port;
2157 struct net_bridge_port_group *group;
2158 struct br_ip_list *entry;
2159 int count = 0;
2160
2161 rcu_read_lock();
2162 if (!br_ip_list || !br_port_exists(dev))
2163 goto unlock;
2164
2165 port = br_port_get_rcu(dev);
2166 if (!port || !port->br)
2167 goto unlock;
2168
2169 br = port->br;
2170
2171 list_for_each_entry_rcu(port, &br->port_list, list) {
2172 if (!port->dev || port->dev == dev)
2173 continue;
2174
2175 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2176 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2177 if (!entry)
2178 goto unlock;
2179
2180 entry->addr = group->addr;
2181 list_add(&entry->list, br_ip_list);
2182 count++;
2183 }
2184 }
2185
2186 unlock:
2187 rcu_read_unlock();
2188 return count;
2189 }
2190 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2191
2192 /**
2193 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
2194 * @dev: The bridge port providing the bridge on which to check for a querier
2195 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2196 *
2197 * Checks whether the given interface has a bridge on top and if so returns
2198 * true if a valid querier exists anywhere on the bridged link layer.
2199 * Otherwise returns false.
2200 */
2201 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
2202 {
2203 struct net_bridge *br;
2204 struct net_bridge_port *port;
2205 struct ethhdr eth;
2206 bool ret = false;
2207
2208 rcu_read_lock();
2209 if (!br_port_exists(dev))
2210 goto unlock;
2211
2212 port = br_port_get_rcu(dev);
2213 if (!port || !port->br)
2214 goto unlock;
2215
2216 br = port->br;
2217
2218 memset(&eth, 0, sizeof(eth));
2219 eth.h_proto = htons(proto);
2220
2221 ret = br_multicast_querier_exists(br, &eth);
2222
2223 unlock:
2224 rcu_read_unlock();
2225 return ret;
2226 }
2227 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
2228
2229 /**
2230 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2231 * @dev: The bridge port adjacent to which to check for a querier
2232 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2233 *
2234 * Checks whether the given interface has a bridge on top and if so returns
2235 * true if a selected querier is behind one of the other ports of this
2236 * bridge. Otherwise returns false.
2237 */
2238 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2239 {
2240 struct net_bridge *br;
2241 struct net_bridge_port *port;
2242 bool ret = false;
2243
2244 rcu_read_lock();
2245 if (!br_port_exists(dev))
2246 goto unlock;
2247
2248 port = br_port_get_rcu(dev);
2249 if (!port || !port->br)
2250 goto unlock;
2251
2252 br = port->br;
2253
2254 switch (proto) {
2255 case ETH_P_IP:
2256 if (!timer_pending(&br->ip4_other_query.timer) ||
2257 rcu_dereference(br->ip4_querier.port) == port)
2258 goto unlock;
2259 break;
2260 #if IS_ENABLED(CONFIG_IPV6)
2261 case ETH_P_IPV6:
2262 if (!timer_pending(&br->ip6_other_query.timer) ||
2263 rcu_dereference(br->ip6_querier.port) == port)
2264 goto unlock;
2265 break;
2266 #endif
2267 default:
2268 goto unlock;
2269 }
2270
2271 ret = true;
2272 unlock:
2273 rcu_read_unlock();
2274 return ret;
2275 }
2276 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2277
2278 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
2279 const struct sk_buff *skb, u8 type, u8 dir)
2280 {
2281 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
2282 __be16 proto = skb->protocol;
2283 unsigned int t_len;
2284
2285 u64_stats_update_begin(&pstats->syncp);
2286 switch (proto) {
2287 case htons(ETH_P_IP):
2288 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
2289 switch (type) {
2290 case IGMP_HOST_MEMBERSHIP_REPORT:
2291 pstats->mstats.igmp_v1reports[dir]++;
2292 break;
2293 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2294 pstats->mstats.igmp_v2reports[dir]++;
2295 break;
2296 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2297 pstats->mstats.igmp_v3reports[dir]++;
2298 break;
2299 case IGMP_HOST_MEMBERSHIP_QUERY:
2300 if (t_len != sizeof(struct igmphdr)) {
2301 pstats->mstats.igmp_v3queries[dir]++;
2302 } else {
2303 unsigned int offset = skb_transport_offset(skb);
2304 struct igmphdr *ih, _ihdr;
2305
2306 ih = skb_header_pointer(skb, offset,
2307 sizeof(_ihdr), &_ihdr);
2308 if (!ih)
2309 break;
2310 if (!ih->code)
2311 pstats->mstats.igmp_v1queries[dir]++;
2312 else
2313 pstats->mstats.igmp_v2queries[dir]++;
2314 }
2315 break;
2316 case IGMP_HOST_LEAVE_MESSAGE:
2317 pstats->mstats.igmp_leaves[dir]++;
2318 break;
2319 }
2320 break;
2321 #if IS_ENABLED(CONFIG_IPV6)
2322 case htons(ETH_P_IPV6):
2323 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
2324 sizeof(struct ipv6hdr);
2325 t_len -= skb_network_header_len(skb);
2326 switch (type) {
2327 case ICMPV6_MGM_REPORT:
2328 pstats->mstats.mld_v1reports[dir]++;
2329 break;
2330 case ICMPV6_MLD2_REPORT:
2331 pstats->mstats.mld_v2reports[dir]++;
2332 break;
2333 case ICMPV6_MGM_QUERY:
2334 if (t_len != sizeof(struct mld_msg))
2335 pstats->mstats.mld_v2queries[dir]++;
2336 else
2337 pstats->mstats.mld_v1queries[dir]++;
2338 break;
2339 case ICMPV6_MGM_REDUCTION:
2340 pstats->mstats.mld_leaves[dir]++;
2341 break;
2342 }
2343 break;
2344 #endif /* CONFIG_IPV6 */
2345 }
2346 u64_stats_update_end(&pstats->syncp);
2347 }
2348
2349 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
2350 const struct sk_buff *skb, u8 type, u8 dir)
2351 {
2352 struct bridge_mcast_stats __percpu *stats;
2353
2354 /* if multicast_disabled is true then igmp type can't be set */
2355 if (!type || !br->multicast_stats_enabled)
2356 return;
2357
2358 if (p)
2359 stats = p->mcast_stats;
2360 else
2361 stats = br->mcast_stats;
2362 if (WARN_ON(!stats))
2363 return;
2364
2365 br_mcast_stats_add(stats, skb, type, dir);
2366 }
2367
2368 int br_multicast_init_stats(struct net_bridge *br)
2369 {
2370 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2371 if (!br->mcast_stats)
2372 return -ENOMEM;
2373
2374 return 0;
2375 }
2376
2377 static void mcast_stats_add_dir(u64 *dst, u64 *src)
2378 {
2379 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
2380 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
2381 }
2382
2383 void br_multicast_get_stats(const struct net_bridge *br,
2384 const struct net_bridge_port *p,
2385 struct br_mcast_stats *dest)
2386 {
2387 struct bridge_mcast_stats __percpu *stats;
2388 struct br_mcast_stats tdst;
2389 int i;
2390
2391 memset(dest, 0, sizeof(*dest));
2392 if (p)
2393 stats = p->mcast_stats;
2394 else
2395 stats = br->mcast_stats;
2396 if (WARN_ON(!stats))
2397 return;
2398
2399 memset(&tdst, 0, sizeof(tdst));
2400 for_each_possible_cpu(i) {
2401 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
2402 struct br_mcast_stats temp;
2403 unsigned int start;
2404
2405 do {
2406 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2407 memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
2408 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2409
2410 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
2411 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
2412 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
2413 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
2414 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
2415 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
2416 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
2417 tdst.igmp_parse_errors += temp.igmp_parse_errors;
2418
2419 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
2420 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
2421 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
2422 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
2423 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
2424 tdst.mld_parse_errors += temp.mld_parse_errors;
2425 }
2426 memcpy(dest, &tdst, sizeof(*dest));
2427 }