]> git.ipfire.org Git - thirdparty/linux.git/blob - net/bridge/br_multicast.c
net: bridge: multicast: add vlan state initialization and control
[thirdparty/linux.git] / net / bridge / br_multicast.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Bridge multicast support.
4 *
5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7
8 #include <linux/err.h>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
12 #include <linux/in.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
25 #include <net/ip.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
29 #include <net/ipv6.h>
30 #include <net/mld.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
33 #endif
34
35 #include "br_private.h"
36 #include "br_private_mcast_eht.h"
37
38 static const struct rhashtable_params br_mdb_rht_params = {
39 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
40 .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
41 .key_len = sizeof(struct br_ip),
42 .automatic_shrinking = true,
43 };
44
45 static const struct rhashtable_params br_sg_port_rht_params = {
46 .head_offset = offsetof(struct net_bridge_port_group, rhnode),
47 .key_offset = offsetof(struct net_bridge_port_group, key),
48 .key_len = sizeof(struct net_bridge_port_group_sg_key),
49 .automatic_shrinking = true,
50 };
51
52 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
53 struct bridge_mcast_own_query *query);
54 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
55 struct net_bridge_mcast_port *pmctx);
56 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
57 struct net_bridge_mcast_port *pmctx,
58 __be32 group,
59 __u16 vid,
60 const unsigned char *src);
61 static void br_multicast_port_group_rexmit(struct timer_list *t);
62
63 static void
64 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted);
65 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
66 struct net_bridge_mcast_port *pmctx);
67 #if IS_ENABLED(CONFIG_IPV6)
68 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
69 struct net_bridge_mcast_port *pmctx,
70 const struct in6_addr *group,
71 __u16 vid, const unsigned char *src);
72 #endif
73 static struct net_bridge_port_group *
74 __br_multicast_add_group(struct net_bridge_mcast *brmctx,
75 struct net_bridge_mcast_port *pmctx,
76 struct br_ip *group,
77 const unsigned char *src,
78 u8 filter_mode,
79 bool igmpv2_mldv1,
80 bool blocked);
81 static void br_multicast_find_del_pg(struct net_bridge *br,
82 struct net_bridge_port_group *pg);
83 static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
84
85 static struct net_bridge_port_group *
86 br_sg_port_find(struct net_bridge *br,
87 struct net_bridge_port_group_sg_key *sg_p)
88 {
89 lockdep_assert_held_once(&br->multicast_lock);
90
91 return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
92 br_sg_port_rht_params);
93 }
94
95 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
96 struct br_ip *dst)
97 {
98 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
99 }
100
101 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
102 struct br_ip *dst)
103 {
104 struct net_bridge_mdb_entry *ent;
105
106 lockdep_assert_held_once(&br->multicast_lock);
107
108 rcu_read_lock();
109 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
110 rcu_read_unlock();
111
112 return ent;
113 }
114
115 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
116 __be32 dst, __u16 vid)
117 {
118 struct br_ip br_dst;
119
120 memset(&br_dst, 0, sizeof(br_dst));
121 br_dst.dst.ip4 = dst;
122 br_dst.proto = htons(ETH_P_IP);
123 br_dst.vid = vid;
124
125 return br_mdb_ip_get(br, &br_dst);
126 }
127
128 #if IS_ENABLED(CONFIG_IPV6)
129 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
130 const struct in6_addr *dst,
131 __u16 vid)
132 {
133 struct br_ip br_dst;
134
135 memset(&br_dst, 0, sizeof(br_dst));
136 br_dst.dst.ip6 = *dst;
137 br_dst.proto = htons(ETH_P_IPV6);
138 br_dst.vid = vid;
139
140 return br_mdb_ip_get(br, &br_dst);
141 }
142 #endif
143
144 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx,
145 struct sk_buff *skb, u16 vid)
146 {
147 struct net_bridge *br = brmctx->br;
148 struct br_ip ip;
149
150 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
151 return NULL;
152
153 if (BR_INPUT_SKB_CB(skb)->igmp)
154 return NULL;
155
156 memset(&ip, 0, sizeof(ip));
157 ip.proto = skb->protocol;
158 ip.vid = vid;
159
160 switch (skb->protocol) {
161 case htons(ETH_P_IP):
162 ip.dst.ip4 = ip_hdr(skb)->daddr;
163 if (brmctx->multicast_igmp_version == 3) {
164 struct net_bridge_mdb_entry *mdb;
165
166 ip.src.ip4 = ip_hdr(skb)->saddr;
167 mdb = br_mdb_ip_get_rcu(br, &ip);
168 if (mdb)
169 return mdb;
170 ip.src.ip4 = 0;
171 }
172 break;
173 #if IS_ENABLED(CONFIG_IPV6)
174 case htons(ETH_P_IPV6):
175 ip.dst.ip6 = ipv6_hdr(skb)->daddr;
176 if (brmctx->multicast_mld_version == 2) {
177 struct net_bridge_mdb_entry *mdb;
178
179 ip.src.ip6 = ipv6_hdr(skb)->saddr;
180 mdb = br_mdb_ip_get_rcu(br, &ip);
181 if (mdb)
182 return mdb;
183 memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
184 }
185 break;
186 #endif
187 default:
188 ip.proto = 0;
189 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
190 }
191
192 return br_mdb_ip_get_rcu(br, &ip);
193 }
194
195 static bool br_port_group_equal(struct net_bridge_port_group *p,
196 struct net_bridge_port *port,
197 const unsigned char *src)
198 {
199 if (p->key.port != port)
200 return false;
201
202 if (!(port->flags & BR_MULTICAST_TO_UNICAST))
203 return true;
204
205 return ether_addr_equal(src, p->eth_addr);
206 }
207
208 static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx,
209 struct net_bridge_port_group *pg,
210 struct br_ip *sg_ip)
211 {
212 struct net_bridge_port_group_sg_key sg_key;
213 struct net_bridge_port_group *src_pg;
214 struct net_bridge_mcast *brmctx;
215
216 memset(&sg_key, 0, sizeof(sg_key));
217 brmctx = br_multicast_port_ctx_get_global(pmctx);
218 sg_key.port = pg->key.port;
219 sg_key.addr = *sg_ip;
220 if (br_sg_port_find(brmctx->br, &sg_key))
221 return;
222
223 src_pg = __br_multicast_add_group(brmctx, pmctx,
224 sg_ip, pg->eth_addr,
225 MCAST_INCLUDE, false, false);
226 if (IS_ERR_OR_NULL(src_pg) ||
227 src_pg->rt_protocol != RTPROT_KERNEL)
228 return;
229
230 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
231 }
232
233 static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
234 struct br_ip *sg_ip)
235 {
236 struct net_bridge_port_group_sg_key sg_key;
237 struct net_bridge *br = pg->key.port->br;
238 struct net_bridge_port_group *src_pg;
239
240 memset(&sg_key, 0, sizeof(sg_key));
241 sg_key.port = pg->key.port;
242 sg_key.addr = *sg_ip;
243 src_pg = br_sg_port_find(br, &sg_key);
244 if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
245 src_pg->rt_protocol != RTPROT_KERNEL)
246 return;
247
248 br_multicast_find_del_pg(br, src_pg);
249 }
250
251 /* When a port group transitions to (or is added as) EXCLUDE we need to add it
252 * to all other ports' S,G entries which are not blocked by the current group
253 * for proper replication, the assumption is that any S,G blocked entries
254 * are already added so the S,G,port lookup should skip them.
255 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
256 * deleted we need to remove it from all ports' S,G entries where it was
257 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
258 */
259 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
260 u8 filter_mode)
261 {
262 struct net_bridge *br = pg->key.port->br;
263 struct net_bridge_port_group *pg_lst;
264 struct net_bridge_mcast_port *pmctx;
265 struct net_bridge_mdb_entry *mp;
266 struct br_ip sg_ip;
267
268 if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
269 return;
270
271 mp = br_mdb_ip_get(br, &pg->key.addr);
272 if (!mp)
273 return;
274 pmctx = &pg->key.port->multicast_ctx;
275
276 memset(&sg_ip, 0, sizeof(sg_ip));
277 sg_ip = pg->key.addr;
278
279 for (pg_lst = mlock_dereference(mp->ports, br);
280 pg_lst;
281 pg_lst = mlock_dereference(pg_lst->next, br)) {
282 struct net_bridge_group_src *src_ent;
283
284 if (pg_lst == pg)
285 continue;
286 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
287 if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
288 continue;
289 sg_ip.src = src_ent->addr.src;
290 switch (filter_mode) {
291 case MCAST_INCLUDE:
292 __fwd_del_star_excl(pg, &sg_ip);
293 break;
294 case MCAST_EXCLUDE:
295 __fwd_add_star_excl(pmctx, pg, &sg_ip);
296 break;
297 }
298 }
299 }
300 }
301
302 /* called when adding a new S,G with host_joined == false by default */
303 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
304 struct net_bridge_port_group *sg)
305 {
306 struct net_bridge_mdb_entry *sg_mp;
307
308 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
309 return;
310 if (!star_mp->host_joined)
311 return;
312
313 sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
314 if (!sg_mp)
315 return;
316 sg_mp->host_joined = true;
317 }
318
319 /* set the host_joined state of all of *,G's S,G entries */
320 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
321 {
322 struct net_bridge *br = star_mp->br;
323 struct net_bridge_mdb_entry *sg_mp;
324 struct net_bridge_port_group *pg;
325 struct br_ip sg_ip;
326
327 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
328 return;
329
330 memset(&sg_ip, 0, sizeof(sg_ip));
331 sg_ip = star_mp->addr;
332 for (pg = mlock_dereference(star_mp->ports, br);
333 pg;
334 pg = mlock_dereference(pg->next, br)) {
335 struct net_bridge_group_src *src_ent;
336
337 hlist_for_each_entry(src_ent, &pg->src_list, node) {
338 if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
339 continue;
340 sg_ip.src = src_ent->addr.src;
341 sg_mp = br_mdb_ip_get(br, &sg_ip);
342 if (!sg_mp)
343 continue;
344 sg_mp->host_joined = star_mp->host_joined;
345 }
346 }
347 }
348
349 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
350 {
351 struct net_bridge_port_group __rcu **pp;
352 struct net_bridge_port_group *p;
353
354 /* *,G exclude ports are only added to S,G entries */
355 if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
356 return;
357
358 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
359 * we should ignore perm entries since they're managed by user-space
360 */
361 for (pp = &sgmp->ports;
362 (p = mlock_dereference(*pp, sgmp->br)) != NULL;
363 pp = &p->next)
364 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
365 MDB_PG_FLAGS_PERMANENT)))
366 return;
367
368 /* currently the host can only have joined the *,G which means
369 * we treat it as EXCLUDE {}, so for an S,G it's considered a
370 * STAR_EXCLUDE entry and we can safely leave it
371 */
372 sgmp->host_joined = false;
373
374 for (pp = &sgmp->ports;
375 (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
376 if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
377 br_multicast_del_pg(sgmp, p, pp);
378 else
379 pp = &p->next;
380 }
381 }
382
383 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
384 struct net_bridge_port_group *sg)
385 {
386 struct net_bridge_port_group_sg_key sg_key;
387 struct net_bridge *br = star_mp->br;
388 struct net_bridge_mcast_port *pmctx;
389 struct net_bridge_port_group *pg;
390 struct net_bridge_mcast *brmctx;
391
392 if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
393 return;
394 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
395 return;
396
397 br_multicast_sg_host_state(star_mp, sg);
398 memset(&sg_key, 0, sizeof(sg_key));
399 sg_key.addr = sg->key.addr;
400 brmctx = &br->multicast_ctx;
401 /* we need to add all exclude ports to the S,G */
402 for (pg = mlock_dereference(star_mp->ports, br);
403 pg;
404 pg = mlock_dereference(pg->next, br)) {
405 struct net_bridge_port_group *src_pg;
406
407 if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
408 continue;
409
410 sg_key.port = pg->key.port;
411 if (br_sg_port_find(br, &sg_key))
412 continue;
413
414 pmctx = &pg->key.port->multicast_ctx;
415 src_pg = __br_multicast_add_group(brmctx, pmctx,
416 &sg->key.addr,
417 sg->eth_addr,
418 MCAST_INCLUDE, false, false);
419 if (IS_ERR_OR_NULL(src_pg) ||
420 src_pg->rt_protocol != RTPROT_KERNEL)
421 continue;
422 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
423 }
424 }
425
426 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
427 {
428 struct net_bridge_mdb_entry *star_mp;
429 struct net_bridge_mcast_port *pmctx;
430 struct net_bridge_port_group *sg;
431 struct net_bridge_mcast *brmctx;
432 struct br_ip sg_ip;
433
434 if (src->flags & BR_SGRP_F_INSTALLED)
435 return;
436
437 memset(&sg_ip, 0, sizeof(sg_ip));
438 pmctx = &src->pg->key.port->multicast_ctx;
439 brmctx = br_multicast_port_ctx_get_global(pmctx);
440 sg_ip = src->pg->key.addr;
441 sg_ip.src = src->addr.src;
442
443 sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip,
444 src->pg->eth_addr, MCAST_INCLUDE, false,
445 !timer_pending(&src->timer));
446 if (IS_ERR_OR_NULL(sg))
447 return;
448 src->flags |= BR_SGRP_F_INSTALLED;
449 sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
450
451 /* if it was added by user-space as perm we can skip next steps */
452 if (sg->rt_protocol != RTPROT_KERNEL &&
453 (sg->flags & MDB_PG_FLAGS_PERMANENT))
454 return;
455
456 /* the kernel is now responsible for removing this S,G */
457 del_timer(&sg->timer);
458 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
459 if (!star_mp)
460 return;
461
462 br_multicast_sg_add_exclude_ports(star_mp, sg);
463 }
464
465 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src,
466 bool fastleave)
467 {
468 struct net_bridge_port_group *p, *pg = src->pg;
469 struct net_bridge_port_group __rcu **pp;
470 struct net_bridge_mdb_entry *mp;
471 struct br_ip sg_ip;
472
473 memset(&sg_ip, 0, sizeof(sg_ip));
474 sg_ip = pg->key.addr;
475 sg_ip.src = src->addr.src;
476
477 mp = br_mdb_ip_get(src->br, &sg_ip);
478 if (!mp)
479 return;
480
481 for (pp = &mp->ports;
482 (p = mlock_dereference(*pp, src->br)) != NULL;
483 pp = &p->next) {
484 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
485 continue;
486
487 if (p->rt_protocol != RTPROT_KERNEL &&
488 (p->flags & MDB_PG_FLAGS_PERMANENT))
489 break;
490
491 if (fastleave)
492 p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
493 br_multicast_del_pg(mp, p, pp);
494 break;
495 }
496 src->flags &= ~BR_SGRP_F_INSTALLED;
497 }
498
499 /* install S,G and based on src's timer enable or disable forwarding */
500 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
501 {
502 struct net_bridge_port_group_sg_key sg_key;
503 struct net_bridge_port_group *sg;
504 u8 old_flags;
505
506 br_multicast_fwd_src_add(src);
507
508 memset(&sg_key, 0, sizeof(sg_key));
509 sg_key.addr = src->pg->key.addr;
510 sg_key.addr.src = src->addr.src;
511 sg_key.port = src->pg->key.port;
512
513 sg = br_sg_port_find(src->br, &sg_key);
514 if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
515 return;
516
517 old_flags = sg->flags;
518 if (timer_pending(&src->timer))
519 sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
520 else
521 sg->flags |= MDB_PG_FLAGS_BLOCKED;
522
523 if (old_flags != sg->flags) {
524 struct net_bridge_mdb_entry *sg_mp;
525
526 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
527 if (!sg_mp)
528 return;
529 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
530 }
531 }
532
533 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
534 {
535 struct net_bridge_mdb_entry *mp;
536
537 mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
538 WARN_ON(!hlist_unhashed(&mp->mdb_node));
539 WARN_ON(mp->ports);
540
541 del_timer_sync(&mp->timer);
542 kfree_rcu(mp, rcu);
543 }
544
545 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
546 {
547 struct net_bridge *br = mp->br;
548
549 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
550 br_mdb_rht_params);
551 hlist_del_init_rcu(&mp->mdb_node);
552 hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
553 queue_work(system_long_wq, &br->mcast_gc_work);
554 }
555
556 static void br_multicast_group_expired(struct timer_list *t)
557 {
558 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
559 struct net_bridge *br = mp->br;
560
561 spin_lock(&br->multicast_lock);
562 if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
563 timer_pending(&mp->timer))
564 goto out;
565
566 br_multicast_host_leave(mp, true);
567
568 if (mp->ports)
569 goto out;
570 br_multicast_del_mdb_entry(mp);
571 out:
572 spin_unlock(&br->multicast_lock);
573 }
574
575 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
576 {
577 struct net_bridge_group_src *src;
578
579 src = container_of(gc, struct net_bridge_group_src, mcast_gc);
580 WARN_ON(!hlist_unhashed(&src->node));
581
582 del_timer_sync(&src->timer);
583 kfree_rcu(src, rcu);
584 }
585
586 void br_multicast_del_group_src(struct net_bridge_group_src *src,
587 bool fastleave)
588 {
589 struct net_bridge *br = src->pg->key.port->br;
590
591 br_multicast_fwd_src_remove(src, fastleave);
592 hlist_del_init_rcu(&src->node);
593 src->pg->src_ents--;
594 hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
595 queue_work(system_long_wq, &br->mcast_gc_work);
596 }
597
598 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
599 {
600 struct net_bridge_port_group *pg;
601
602 pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
603 WARN_ON(!hlist_unhashed(&pg->mglist));
604 WARN_ON(!hlist_empty(&pg->src_list));
605
606 del_timer_sync(&pg->rexmit_timer);
607 del_timer_sync(&pg->timer);
608 kfree_rcu(pg, rcu);
609 }
610
611 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
612 struct net_bridge_port_group *pg,
613 struct net_bridge_port_group __rcu **pp)
614 {
615 struct net_bridge *br = pg->key.port->br;
616 struct net_bridge_group_src *ent;
617 struct hlist_node *tmp;
618
619 rcu_assign_pointer(*pp, pg->next);
620 hlist_del_init(&pg->mglist);
621 br_multicast_eht_clean_sets(pg);
622 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
623 br_multicast_del_group_src(ent, false);
624 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
625 if (!br_multicast_is_star_g(&mp->addr)) {
626 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
627 br_sg_port_rht_params);
628 br_multicast_sg_del_exclude_ports(mp);
629 } else {
630 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
631 }
632 hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
633 queue_work(system_long_wq, &br->mcast_gc_work);
634
635 if (!mp->ports && !mp->host_joined && netif_running(br->dev))
636 mod_timer(&mp->timer, jiffies);
637 }
638
639 static void br_multicast_find_del_pg(struct net_bridge *br,
640 struct net_bridge_port_group *pg)
641 {
642 struct net_bridge_port_group __rcu **pp;
643 struct net_bridge_mdb_entry *mp;
644 struct net_bridge_port_group *p;
645
646 mp = br_mdb_ip_get(br, &pg->key.addr);
647 if (WARN_ON(!mp))
648 return;
649
650 for (pp = &mp->ports;
651 (p = mlock_dereference(*pp, br)) != NULL;
652 pp = &p->next) {
653 if (p != pg)
654 continue;
655
656 br_multicast_del_pg(mp, pg, pp);
657 return;
658 }
659
660 WARN_ON(1);
661 }
662
663 static void br_multicast_port_group_expired(struct timer_list *t)
664 {
665 struct net_bridge_port_group *pg = from_timer(pg, t, timer);
666 struct net_bridge_group_src *src_ent;
667 struct net_bridge *br = pg->key.port->br;
668 struct hlist_node *tmp;
669 bool changed;
670
671 spin_lock(&br->multicast_lock);
672 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
673 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
674 goto out;
675
676 changed = !!(pg->filter_mode == MCAST_EXCLUDE);
677 pg->filter_mode = MCAST_INCLUDE;
678 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
679 if (!timer_pending(&src_ent->timer)) {
680 br_multicast_del_group_src(src_ent, false);
681 changed = true;
682 }
683 }
684
685 if (hlist_empty(&pg->src_list)) {
686 br_multicast_find_del_pg(br, pg);
687 } else if (changed) {
688 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
689
690 if (changed && br_multicast_is_star_g(&pg->key.addr))
691 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
692
693 if (WARN_ON(!mp))
694 goto out;
695 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
696 }
697 out:
698 spin_unlock(&br->multicast_lock);
699 }
700
701 static void br_multicast_gc(struct hlist_head *head)
702 {
703 struct net_bridge_mcast_gc *gcent;
704 struct hlist_node *tmp;
705
706 hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
707 hlist_del_init(&gcent->gc_node);
708 gcent->destroy(gcent);
709 }
710 }
711
712 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx,
713 struct net_bridge_port_group *pg,
714 __be32 ip_dst, __be32 group,
715 bool with_srcs, bool over_lmqt,
716 u8 sflag, u8 *igmp_type,
717 bool *need_rexmit)
718 {
719 struct net_bridge_port *p = pg ? pg->key.port : NULL;
720 struct net_bridge_group_src *ent;
721 size_t pkt_size, igmp_hdr_size;
722 unsigned long now = jiffies;
723 struct igmpv3_query *ihv3;
724 void *csum_start = NULL;
725 __sum16 *csum = NULL;
726 struct sk_buff *skb;
727 struct igmphdr *ih;
728 struct ethhdr *eth;
729 unsigned long lmqt;
730 struct iphdr *iph;
731 u16 lmqt_srcs = 0;
732
733 igmp_hdr_size = sizeof(*ih);
734 if (brmctx->multicast_igmp_version == 3) {
735 igmp_hdr_size = sizeof(*ihv3);
736 if (pg && with_srcs) {
737 lmqt = now + (brmctx->multicast_last_member_interval *
738 brmctx->multicast_last_member_count);
739 hlist_for_each_entry(ent, &pg->src_list, node) {
740 if (over_lmqt == time_after(ent->timer.expires,
741 lmqt) &&
742 ent->src_query_rexmit_cnt > 0)
743 lmqt_srcs++;
744 }
745
746 if (!lmqt_srcs)
747 return NULL;
748 igmp_hdr_size += lmqt_srcs * sizeof(__be32);
749 }
750 }
751
752 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
753 if ((p && pkt_size > p->dev->mtu) ||
754 pkt_size > brmctx->br->dev->mtu)
755 return NULL;
756
757 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
758 if (!skb)
759 goto out;
760
761 skb->protocol = htons(ETH_P_IP);
762
763 skb_reset_mac_header(skb);
764 eth = eth_hdr(skb);
765
766 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
767 ip_eth_mc_map(ip_dst, eth->h_dest);
768 eth->h_proto = htons(ETH_P_IP);
769 skb_put(skb, sizeof(*eth));
770
771 skb_set_network_header(skb, skb->len);
772 iph = ip_hdr(skb);
773 iph->tot_len = htons(pkt_size - sizeof(*eth));
774
775 iph->version = 4;
776 iph->ihl = 6;
777 iph->tos = 0xc0;
778 iph->id = 0;
779 iph->frag_off = htons(IP_DF);
780 iph->ttl = 1;
781 iph->protocol = IPPROTO_IGMP;
782 iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
783 inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0;
784 iph->daddr = ip_dst;
785 ((u8 *)&iph[1])[0] = IPOPT_RA;
786 ((u8 *)&iph[1])[1] = 4;
787 ((u8 *)&iph[1])[2] = 0;
788 ((u8 *)&iph[1])[3] = 0;
789 ip_send_check(iph);
790 skb_put(skb, 24);
791
792 skb_set_transport_header(skb, skb->len);
793 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
794
795 switch (brmctx->multicast_igmp_version) {
796 case 2:
797 ih = igmp_hdr(skb);
798 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
799 ih->code = (group ? brmctx->multicast_last_member_interval :
800 brmctx->multicast_query_response_interval) /
801 (HZ / IGMP_TIMER_SCALE);
802 ih->group = group;
803 ih->csum = 0;
804 csum = &ih->csum;
805 csum_start = (void *)ih;
806 break;
807 case 3:
808 ihv3 = igmpv3_query_hdr(skb);
809 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
810 ihv3->code = (group ? brmctx->multicast_last_member_interval :
811 brmctx->multicast_query_response_interval) /
812 (HZ / IGMP_TIMER_SCALE);
813 ihv3->group = group;
814 ihv3->qqic = brmctx->multicast_query_interval / HZ;
815 ihv3->nsrcs = htons(lmqt_srcs);
816 ihv3->resv = 0;
817 ihv3->suppress = sflag;
818 ihv3->qrv = 2;
819 ihv3->csum = 0;
820 csum = &ihv3->csum;
821 csum_start = (void *)ihv3;
822 if (!pg || !with_srcs)
823 break;
824
825 lmqt_srcs = 0;
826 hlist_for_each_entry(ent, &pg->src_list, node) {
827 if (over_lmqt == time_after(ent->timer.expires,
828 lmqt) &&
829 ent->src_query_rexmit_cnt > 0) {
830 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
831 ent->src_query_rexmit_cnt--;
832 if (need_rexmit && ent->src_query_rexmit_cnt)
833 *need_rexmit = true;
834 }
835 }
836 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
837 kfree_skb(skb);
838 return NULL;
839 }
840 break;
841 }
842
843 if (WARN_ON(!csum || !csum_start)) {
844 kfree_skb(skb);
845 return NULL;
846 }
847
848 *csum = ip_compute_csum(csum_start, igmp_hdr_size);
849 skb_put(skb, igmp_hdr_size);
850 __skb_pull(skb, sizeof(*eth));
851
852 out:
853 return skb;
854 }
855
856 #if IS_ENABLED(CONFIG_IPV6)
857 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx,
858 struct net_bridge_port_group *pg,
859 const struct in6_addr *ip6_dst,
860 const struct in6_addr *group,
861 bool with_srcs, bool over_llqt,
862 u8 sflag, u8 *igmp_type,
863 bool *need_rexmit)
864 {
865 struct net_bridge_port *p = pg ? pg->key.port : NULL;
866 struct net_bridge_group_src *ent;
867 size_t pkt_size, mld_hdr_size;
868 unsigned long now = jiffies;
869 struct mld2_query *mld2q;
870 void *csum_start = NULL;
871 unsigned long interval;
872 __sum16 *csum = NULL;
873 struct ipv6hdr *ip6h;
874 struct mld_msg *mldq;
875 struct sk_buff *skb;
876 unsigned long llqt;
877 struct ethhdr *eth;
878 u16 llqt_srcs = 0;
879 u8 *hopopt;
880
881 mld_hdr_size = sizeof(*mldq);
882 if (brmctx->multicast_mld_version == 2) {
883 mld_hdr_size = sizeof(*mld2q);
884 if (pg && with_srcs) {
885 llqt = now + (brmctx->multicast_last_member_interval *
886 brmctx->multicast_last_member_count);
887 hlist_for_each_entry(ent, &pg->src_list, node) {
888 if (over_llqt == time_after(ent->timer.expires,
889 llqt) &&
890 ent->src_query_rexmit_cnt > 0)
891 llqt_srcs++;
892 }
893
894 if (!llqt_srcs)
895 return NULL;
896 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
897 }
898 }
899
900 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
901 if ((p && pkt_size > p->dev->mtu) ||
902 pkt_size > brmctx->br->dev->mtu)
903 return NULL;
904
905 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
906 if (!skb)
907 goto out;
908
909 skb->protocol = htons(ETH_P_IPV6);
910
911 /* Ethernet header */
912 skb_reset_mac_header(skb);
913 eth = eth_hdr(skb);
914
915 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
916 eth->h_proto = htons(ETH_P_IPV6);
917 skb_put(skb, sizeof(*eth));
918
919 /* IPv6 header + HbH option */
920 skb_set_network_header(skb, skb->len);
921 ip6h = ipv6_hdr(skb);
922
923 *(__force __be32 *)ip6h = htonl(0x60000000);
924 ip6h->payload_len = htons(8 + mld_hdr_size);
925 ip6h->nexthdr = IPPROTO_HOPOPTS;
926 ip6h->hop_limit = 1;
927 ip6h->daddr = *ip6_dst;
928 if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev,
929 &ip6h->daddr, 0, &ip6h->saddr)) {
930 kfree_skb(skb);
931 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false);
932 return NULL;
933 }
934
935 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true);
936 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
937
938 hopopt = (u8 *)(ip6h + 1);
939 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
940 hopopt[1] = 0; /* length of HbH */
941 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
942 hopopt[3] = 2; /* Length of RA Option */
943 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
944 hopopt[5] = 0;
945 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
946 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
947
948 skb_put(skb, sizeof(*ip6h) + 8);
949
950 /* ICMPv6 */
951 skb_set_transport_header(skb, skb->len);
952 interval = ipv6_addr_any(group) ?
953 brmctx->multicast_query_response_interval :
954 brmctx->multicast_last_member_interval;
955 *igmp_type = ICMPV6_MGM_QUERY;
956 switch (brmctx->multicast_mld_version) {
957 case 1:
958 mldq = (struct mld_msg *)icmp6_hdr(skb);
959 mldq->mld_type = ICMPV6_MGM_QUERY;
960 mldq->mld_code = 0;
961 mldq->mld_cksum = 0;
962 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
963 mldq->mld_reserved = 0;
964 mldq->mld_mca = *group;
965 csum = &mldq->mld_cksum;
966 csum_start = (void *)mldq;
967 break;
968 case 2:
969 mld2q = (struct mld2_query *)icmp6_hdr(skb);
970 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
971 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
972 mld2q->mld2q_code = 0;
973 mld2q->mld2q_cksum = 0;
974 mld2q->mld2q_resv1 = 0;
975 mld2q->mld2q_resv2 = 0;
976 mld2q->mld2q_suppress = sflag;
977 mld2q->mld2q_qrv = 2;
978 mld2q->mld2q_nsrcs = htons(llqt_srcs);
979 mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ;
980 mld2q->mld2q_mca = *group;
981 csum = &mld2q->mld2q_cksum;
982 csum_start = (void *)mld2q;
983 if (!pg || !with_srcs)
984 break;
985
986 llqt_srcs = 0;
987 hlist_for_each_entry(ent, &pg->src_list, node) {
988 if (over_llqt == time_after(ent->timer.expires,
989 llqt) &&
990 ent->src_query_rexmit_cnt > 0) {
991 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
992 ent->src_query_rexmit_cnt--;
993 if (need_rexmit && ent->src_query_rexmit_cnt)
994 *need_rexmit = true;
995 }
996 }
997 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
998 kfree_skb(skb);
999 return NULL;
1000 }
1001 break;
1002 }
1003
1004 if (WARN_ON(!csum || !csum_start)) {
1005 kfree_skb(skb);
1006 return NULL;
1007 }
1008
1009 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
1010 IPPROTO_ICMPV6,
1011 csum_partial(csum_start, mld_hdr_size, 0));
1012 skb_put(skb, mld_hdr_size);
1013 __skb_pull(skb, sizeof(*eth));
1014
1015 out:
1016 return skb;
1017 }
1018 #endif
1019
1020 static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx,
1021 struct net_bridge_port_group *pg,
1022 struct br_ip *ip_dst,
1023 struct br_ip *group,
1024 bool with_srcs, bool over_lmqt,
1025 u8 sflag, u8 *igmp_type,
1026 bool *need_rexmit)
1027 {
1028 __be32 ip4_dst;
1029
1030 switch (group->proto) {
1031 case htons(ETH_P_IP):
1032 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
1033 return br_ip4_multicast_alloc_query(brmctx, pg,
1034 ip4_dst, group->dst.ip4,
1035 with_srcs, over_lmqt,
1036 sflag, igmp_type,
1037 need_rexmit);
1038 #if IS_ENABLED(CONFIG_IPV6)
1039 case htons(ETH_P_IPV6): {
1040 struct in6_addr ip6_dst;
1041
1042 if (ip_dst)
1043 ip6_dst = ip_dst->dst.ip6;
1044 else
1045 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
1046 htonl(1));
1047
1048 return br_ip6_multicast_alloc_query(brmctx, pg,
1049 &ip6_dst, &group->dst.ip6,
1050 with_srcs, over_lmqt,
1051 sflag, igmp_type,
1052 need_rexmit);
1053 }
1054 #endif
1055 }
1056 return NULL;
1057 }
1058
1059 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
1060 struct br_ip *group)
1061 {
1062 struct net_bridge_mdb_entry *mp;
1063 int err;
1064
1065 mp = br_mdb_ip_get(br, group);
1066 if (mp)
1067 return mp;
1068
1069 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
1070 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
1071 return ERR_PTR(-E2BIG);
1072 }
1073
1074 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
1075 if (unlikely(!mp))
1076 return ERR_PTR(-ENOMEM);
1077
1078 mp->br = br;
1079 mp->addr = *group;
1080 mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
1081 timer_setup(&mp->timer, br_multicast_group_expired, 0);
1082 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
1083 br_mdb_rht_params);
1084 if (err) {
1085 kfree(mp);
1086 mp = ERR_PTR(err);
1087 } else {
1088 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
1089 }
1090
1091 return mp;
1092 }
1093
1094 static void br_multicast_group_src_expired(struct timer_list *t)
1095 {
1096 struct net_bridge_group_src *src = from_timer(src, t, timer);
1097 struct net_bridge_port_group *pg;
1098 struct net_bridge *br = src->br;
1099
1100 spin_lock(&br->multicast_lock);
1101 if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
1102 timer_pending(&src->timer))
1103 goto out;
1104
1105 pg = src->pg;
1106 if (pg->filter_mode == MCAST_INCLUDE) {
1107 br_multicast_del_group_src(src, false);
1108 if (!hlist_empty(&pg->src_list))
1109 goto out;
1110 br_multicast_find_del_pg(br, pg);
1111 } else {
1112 br_multicast_fwd_src_handle(src);
1113 }
1114
1115 out:
1116 spin_unlock(&br->multicast_lock);
1117 }
1118
1119 struct net_bridge_group_src *
1120 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
1121 {
1122 struct net_bridge_group_src *ent;
1123
1124 switch (ip->proto) {
1125 case htons(ETH_P_IP):
1126 hlist_for_each_entry(ent, &pg->src_list, node)
1127 if (ip->src.ip4 == ent->addr.src.ip4)
1128 return ent;
1129 break;
1130 #if IS_ENABLED(CONFIG_IPV6)
1131 case htons(ETH_P_IPV6):
1132 hlist_for_each_entry(ent, &pg->src_list, node)
1133 if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
1134 return ent;
1135 break;
1136 #endif
1137 }
1138
1139 return NULL;
1140 }
1141
1142 static struct net_bridge_group_src *
1143 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
1144 {
1145 struct net_bridge_group_src *grp_src;
1146
1147 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
1148 return NULL;
1149
1150 switch (src_ip->proto) {
1151 case htons(ETH_P_IP):
1152 if (ipv4_is_zeronet(src_ip->src.ip4) ||
1153 ipv4_is_multicast(src_ip->src.ip4))
1154 return NULL;
1155 break;
1156 #if IS_ENABLED(CONFIG_IPV6)
1157 case htons(ETH_P_IPV6):
1158 if (ipv6_addr_any(&src_ip->src.ip6) ||
1159 ipv6_addr_is_multicast(&src_ip->src.ip6))
1160 return NULL;
1161 break;
1162 #endif
1163 }
1164
1165 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
1166 if (unlikely(!grp_src))
1167 return NULL;
1168
1169 grp_src->pg = pg;
1170 grp_src->br = pg->key.port->br;
1171 grp_src->addr = *src_ip;
1172 grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
1173 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
1174
1175 hlist_add_head_rcu(&grp_src->node, &pg->src_list);
1176 pg->src_ents++;
1177
1178 return grp_src;
1179 }
1180
1181 struct net_bridge_port_group *br_multicast_new_port_group(
1182 struct net_bridge_port *port,
1183 struct br_ip *group,
1184 struct net_bridge_port_group __rcu *next,
1185 unsigned char flags,
1186 const unsigned char *src,
1187 u8 filter_mode,
1188 u8 rt_protocol)
1189 {
1190 struct net_bridge_port_group *p;
1191
1192 p = kzalloc(sizeof(*p), GFP_ATOMIC);
1193 if (unlikely(!p))
1194 return NULL;
1195
1196 p->key.addr = *group;
1197 p->key.port = port;
1198 p->flags = flags;
1199 p->filter_mode = filter_mode;
1200 p->rt_protocol = rt_protocol;
1201 p->eht_host_tree = RB_ROOT;
1202 p->eht_set_tree = RB_ROOT;
1203 p->mcast_gc.destroy = br_multicast_destroy_port_group;
1204 INIT_HLIST_HEAD(&p->src_list);
1205
1206 if (!br_multicast_is_star_g(group) &&
1207 rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
1208 br_sg_port_rht_params)) {
1209 kfree(p);
1210 return NULL;
1211 }
1212
1213 rcu_assign_pointer(p->next, next);
1214 timer_setup(&p->timer, br_multicast_port_group_expired, 0);
1215 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
1216 hlist_add_head(&p->mglist, &port->mglist);
1217
1218 if (src)
1219 memcpy(p->eth_addr, src, ETH_ALEN);
1220 else
1221 eth_broadcast_addr(p->eth_addr);
1222
1223 return p;
1224 }
1225
1226 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
1227 {
1228 if (!mp->host_joined) {
1229 mp->host_joined = true;
1230 if (br_multicast_is_star_g(&mp->addr))
1231 br_multicast_star_g_host_state(mp);
1232 if (notify)
1233 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
1234 }
1235
1236 if (br_group_is_l2(&mp->addr))
1237 return;
1238
1239 mod_timer(&mp->timer,
1240 jiffies + mp->br->multicast_ctx.multicast_membership_interval);
1241 }
1242
1243 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
1244 {
1245 if (!mp->host_joined)
1246 return;
1247
1248 mp->host_joined = false;
1249 if (br_multicast_is_star_g(&mp->addr))
1250 br_multicast_star_g_host_state(mp);
1251 if (notify)
1252 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
1253 }
1254
1255 static struct net_bridge_port_group *
1256 __br_multicast_add_group(struct net_bridge_mcast *brmctx,
1257 struct net_bridge_mcast_port *pmctx,
1258 struct br_ip *group,
1259 const unsigned char *src,
1260 u8 filter_mode,
1261 bool igmpv2_mldv1,
1262 bool blocked)
1263 {
1264 struct net_bridge_port_group __rcu **pp;
1265 struct net_bridge_port_group *p = NULL;
1266 struct net_bridge_mdb_entry *mp;
1267 unsigned long now = jiffies;
1268
1269 if (!netif_running(brmctx->br->dev) ||
1270 (pmctx && pmctx->port->state == BR_STATE_DISABLED))
1271 goto out;
1272
1273 mp = br_multicast_new_group(brmctx->br, group);
1274 if (IS_ERR(mp))
1275 return ERR_CAST(mp);
1276
1277 if (!pmctx) {
1278 br_multicast_host_join(mp, true);
1279 goto out;
1280 }
1281
1282 for (pp = &mp->ports;
1283 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
1284 pp = &p->next) {
1285 if (br_port_group_equal(p, pmctx->port, src))
1286 goto found;
1287 if ((unsigned long)p->key.port < (unsigned long)pmctx->port)
1288 break;
1289 }
1290
1291 p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
1292 filter_mode, RTPROT_KERNEL);
1293 if (unlikely(!p)) {
1294 p = ERR_PTR(-ENOMEM);
1295 goto out;
1296 }
1297 rcu_assign_pointer(*pp, p);
1298 if (blocked)
1299 p->flags |= MDB_PG_FLAGS_BLOCKED;
1300 br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB);
1301
1302 found:
1303 if (igmpv2_mldv1)
1304 mod_timer(&p->timer,
1305 now + brmctx->multicast_membership_interval);
1306
1307 out:
1308 return p;
1309 }
1310
1311 static int br_multicast_add_group(struct net_bridge_mcast *brmctx,
1312 struct net_bridge_mcast_port *pmctx,
1313 struct br_ip *group,
1314 const unsigned char *src,
1315 u8 filter_mode,
1316 bool igmpv2_mldv1)
1317 {
1318 struct net_bridge_port_group *pg;
1319 int err;
1320
1321 spin_lock(&brmctx->br->multicast_lock);
1322 pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode,
1323 igmpv2_mldv1, false);
1324 /* NULL is considered valid for host joined groups */
1325 err = PTR_ERR_OR_ZERO(pg);
1326 spin_unlock(&brmctx->br->multicast_lock);
1327
1328 return err;
1329 }
1330
1331 static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx,
1332 struct net_bridge_mcast_port *pmctx,
1333 __be32 group,
1334 __u16 vid,
1335 const unsigned char *src,
1336 bool igmpv2)
1337 {
1338 struct br_ip br_group;
1339 u8 filter_mode;
1340
1341 if (ipv4_is_local_multicast(group))
1342 return 0;
1343
1344 memset(&br_group, 0, sizeof(br_group));
1345 br_group.dst.ip4 = group;
1346 br_group.proto = htons(ETH_P_IP);
1347 br_group.vid = vid;
1348 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1349
1350 return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1351 filter_mode, igmpv2);
1352 }
1353
1354 #if IS_ENABLED(CONFIG_IPV6)
1355 static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx,
1356 struct net_bridge_mcast_port *pmctx,
1357 const struct in6_addr *group,
1358 __u16 vid,
1359 const unsigned char *src,
1360 bool mldv1)
1361 {
1362 struct br_ip br_group;
1363 u8 filter_mode;
1364
1365 if (ipv6_addr_is_ll_all_nodes(group))
1366 return 0;
1367
1368 memset(&br_group, 0, sizeof(br_group));
1369 br_group.dst.ip6 = *group;
1370 br_group.proto = htons(ETH_P_IPV6);
1371 br_group.vid = vid;
1372 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1373
1374 return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1375 filter_mode, mldv1);
1376 }
1377 #endif
1378
1379 static bool br_multicast_rport_del(struct hlist_node *rlist)
1380 {
1381 if (hlist_unhashed(rlist))
1382 return false;
1383
1384 hlist_del_init_rcu(rlist);
1385 return true;
1386 }
1387
1388 static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1389 {
1390 return br_multicast_rport_del(&pmctx->ip4_rlist);
1391 }
1392
1393 static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1394 {
1395 #if IS_ENABLED(CONFIG_IPV6)
1396 return br_multicast_rport_del(&pmctx->ip6_rlist);
1397 #else
1398 return false;
1399 #endif
1400 }
1401
1402 static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx,
1403 struct timer_list *t,
1404 struct hlist_node *rlist)
1405 {
1406 struct net_bridge *br = pmctx->port->br;
1407 bool del;
1408
1409 spin_lock(&br->multicast_lock);
1410 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1411 pmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1412 timer_pending(t))
1413 goto out;
1414
1415 del = br_multicast_rport_del(rlist);
1416 br_multicast_rport_del_notify(pmctx, del);
1417 out:
1418 spin_unlock(&br->multicast_lock);
1419 }
1420
1421 static void br_ip4_multicast_router_expired(struct timer_list *t)
1422 {
1423 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1424 ip4_mc_router_timer);
1425
1426 br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist);
1427 }
1428
1429 #if IS_ENABLED(CONFIG_IPV6)
1430 static void br_ip6_multicast_router_expired(struct timer_list *t)
1431 {
1432 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1433 ip6_mc_router_timer);
1434
1435 br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist);
1436 }
1437 #endif
1438
1439 static void br_mc_router_state_change(struct net_bridge *p,
1440 bool is_mc_router)
1441 {
1442 struct switchdev_attr attr = {
1443 .orig_dev = p->dev,
1444 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
1445 .flags = SWITCHDEV_F_DEFER,
1446 .u.mrouter = is_mc_router,
1447 };
1448
1449 switchdev_port_attr_set(p->dev, &attr, NULL);
1450 }
1451
1452 static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx,
1453 struct timer_list *timer)
1454 {
1455 spin_lock(&brmctx->br->multicast_lock);
1456 if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1457 brmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1458 br_ip4_multicast_is_router(brmctx) ||
1459 br_ip6_multicast_is_router(brmctx))
1460 goto out;
1461
1462 br_mc_router_state_change(brmctx->br, false);
1463 out:
1464 spin_unlock(&brmctx->br->multicast_lock);
1465 }
1466
1467 static void br_ip4_multicast_local_router_expired(struct timer_list *t)
1468 {
1469 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1470 ip4_mc_router_timer);
1471
1472 br_multicast_local_router_expired(brmctx, t);
1473 }
1474
1475 #if IS_ENABLED(CONFIG_IPV6)
1476 static void br_ip6_multicast_local_router_expired(struct timer_list *t)
1477 {
1478 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1479 ip6_mc_router_timer);
1480
1481 br_multicast_local_router_expired(brmctx, t);
1482 }
1483 #endif
1484
1485 static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx,
1486 struct bridge_mcast_own_query *query)
1487 {
1488 spin_lock(&brmctx->br->multicast_lock);
1489 if (!netif_running(brmctx->br->dev) ||
1490 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
1491 goto out;
1492
1493 br_multicast_start_querier(brmctx, query);
1494
1495 out:
1496 spin_unlock(&brmctx->br->multicast_lock);
1497 }
1498
1499 static void br_ip4_multicast_querier_expired(struct timer_list *t)
1500 {
1501 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1502 ip4_other_query.timer);
1503
1504 br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query);
1505 }
1506
1507 #if IS_ENABLED(CONFIG_IPV6)
1508 static void br_ip6_multicast_querier_expired(struct timer_list *t)
1509 {
1510 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1511 ip6_other_query.timer);
1512
1513 br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query);
1514 }
1515 #endif
1516
1517 static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
1518 struct br_ip *ip,
1519 struct sk_buff *skb)
1520 {
1521 if (ip->proto == htons(ETH_P_IP))
1522 brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
1523 #if IS_ENABLED(CONFIG_IPV6)
1524 else
1525 brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
1526 #endif
1527 }
1528
1529 static void __br_multicast_send_query(struct net_bridge_mcast *brmctx,
1530 struct net_bridge_mcast_port *pmctx,
1531 struct net_bridge_port_group *pg,
1532 struct br_ip *ip_dst,
1533 struct br_ip *group,
1534 bool with_srcs,
1535 u8 sflag,
1536 bool *need_rexmit)
1537 {
1538 bool over_lmqt = !!sflag;
1539 struct sk_buff *skb;
1540 u8 igmp_type;
1541
1542 again_under_lmqt:
1543 skb = br_multicast_alloc_query(brmctx, pg, ip_dst, group, with_srcs,
1544 over_lmqt, sflag, &igmp_type,
1545 need_rexmit);
1546 if (!skb)
1547 return;
1548
1549 if (pmctx) {
1550 skb->dev = pmctx->port->dev;
1551 br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type,
1552 BR_MCAST_DIR_TX);
1553 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
1554 dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev,
1555 br_dev_queue_push_xmit);
1556
1557 if (over_lmqt && with_srcs && sflag) {
1558 over_lmqt = false;
1559 goto again_under_lmqt;
1560 }
1561 } else {
1562 br_multicast_select_own_querier(brmctx, group, skb);
1563 br_multicast_count(brmctx->br, NULL, skb, igmp_type,
1564 BR_MCAST_DIR_RX);
1565 netif_rx(skb);
1566 }
1567 }
1568
1569 static void br_multicast_send_query(struct net_bridge_mcast *brmctx,
1570 struct net_bridge_mcast_port *pmctx,
1571 struct bridge_mcast_own_query *own_query)
1572 {
1573 struct bridge_mcast_other_query *other_query = NULL;
1574 struct br_ip br_group;
1575 unsigned long time;
1576
1577 if (!netif_running(brmctx->br->dev) ||
1578 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
1579 !br_opt_get(brmctx->br, BROPT_MULTICAST_QUERIER))
1580 return;
1581
1582 memset(&br_group.dst, 0, sizeof(br_group.dst));
1583
1584 if (pmctx ? (own_query == &pmctx->ip4_own_query) :
1585 (own_query == &brmctx->ip4_own_query)) {
1586 other_query = &brmctx->ip4_other_query;
1587 br_group.proto = htons(ETH_P_IP);
1588 #if IS_ENABLED(CONFIG_IPV6)
1589 } else {
1590 other_query = &brmctx->ip6_other_query;
1591 br_group.proto = htons(ETH_P_IPV6);
1592 #endif
1593 }
1594
1595 if (!other_query || timer_pending(&other_query->timer))
1596 return;
1597
1598 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false,
1599 0, NULL);
1600
1601 time = jiffies;
1602 time += own_query->startup_sent < brmctx->multicast_startup_query_count ?
1603 brmctx->multicast_startup_query_interval :
1604 brmctx->multicast_query_interval;
1605 mod_timer(&own_query->timer, time);
1606 }
1607
1608 static void
1609 br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx,
1610 struct bridge_mcast_own_query *query)
1611 {
1612 struct net_bridge *br = pmctx->port->br;
1613
1614 spin_lock(&br->multicast_lock);
1615 if (pmctx->port->state == BR_STATE_DISABLED ||
1616 pmctx->port->state == BR_STATE_BLOCKING)
1617 goto out;
1618
1619 if (query->startup_sent < br->multicast_ctx.multicast_startup_query_count)
1620 query->startup_sent++;
1621
1622 br_multicast_send_query(&br->multicast_ctx, pmctx, query);
1623
1624 out:
1625 spin_unlock(&br->multicast_lock);
1626 }
1627
1628 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1629 {
1630 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1631 ip4_own_query.timer);
1632
1633 br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query);
1634 }
1635
1636 #if IS_ENABLED(CONFIG_IPV6)
1637 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1638 {
1639 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1640 ip6_own_query.timer);
1641
1642 br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query);
1643 }
1644 #endif
1645
1646 static void br_multicast_port_group_rexmit(struct timer_list *t)
1647 {
1648 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
1649 struct bridge_mcast_other_query *other_query = NULL;
1650 struct net_bridge *br = pg->key.port->br;
1651 struct net_bridge_mcast_port *pmctx;
1652 struct net_bridge_mcast *brmctx;
1653 bool need_rexmit = false;
1654
1655 spin_lock(&br->multicast_lock);
1656 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
1657 !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1658 !br_opt_get(br, BROPT_MULTICAST_QUERIER))
1659 goto out;
1660
1661 brmctx = &br->multicast_ctx;
1662 pmctx = &pg->key.port->multicast_ctx;
1663 if (pg->key.addr.proto == htons(ETH_P_IP))
1664 other_query = &brmctx->ip4_other_query;
1665 #if IS_ENABLED(CONFIG_IPV6)
1666 else
1667 other_query = &brmctx->ip6_other_query;
1668 #endif
1669
1670 if (!other_query || timer_pending(&other_query->timer))
1671 goto out;
1672
1673 if (pg->grp_query_rexmit_cnt) {
1674 pg->grp_query_rexmit_cnt--;
1675 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1676 &pg->key.addr, false, 1, NULL);
1677 }
1678 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1679 &pg->key.addr, true, 0, &need_rexmit);
1680
1681 if (pg->grp_query_rexmit_cnt || need_rexmit)
1682 mod_timer(&pg->rexmit_timer, jiffies +
1683 brmctx->multicast_last_member_interval);
1684 out:
1685 spin_unlock(&br->multicast_lock);
1686 }
1687
1688 static int br_mc_disabled_update(struct net_device *dev, bool value,
1689 struct netlink_ext_ack *extack)
1690 {
1691 struct switchdev_attr attr = {
1692 .orig_dev = dev,
1693 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
1694 .flags = SWITCHDEV_F_DEFER,
1695 .u.mc_disabled = !value,
1696 };
1697
1698 return switchdev_port_attr_set(dev, &attr, extack);
1699 }
1700
1701 void br_multicast_port_ctx_init(struct net_bridge_port *port,
1702 struct net_bridge_vlan *vlan,
1703 struct net_bridge_mcast_port *pmctx)
1704 {
1705 pmctx->port = port;
1706 pmctx->vlan = vlan;
1707 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1708 timer_setup(&pmctx->ip4_mc_router_timer,
1709 br_ip4_multicast_router_expired, 0);
1710 timer_setup(&pmctx->ip4_own_query.timer,
1711 br_ip4_multicast_port_query_expired, 0);
1712 #if IS_ENABLED(CONFIG_IPV6)
1713 timer_setup(&pmctx->ip6_mc_router_timer,
1714 br_ip6_multicast_router_expired, 0);
1715 timer_setup(&pmctx->ip6_own_query.timer,
1716 br_ip6_multicast_port_query_expired, 0);
1717 #endif
1718 }
1719
1720 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
1721 {
1722 #if IS_ENABLED(CONFIG_IPV6)
1723 del_timer_sync(&pmctx->ip6_mc_router_timer);
1724 #endif
1725 del_timer_sync(&pmctx->ip4_mc_router_timer);
1726 }
1727
1728 int br_multicast_add_port(struct net_bridge_port *port)
1729 {
1730 int err;
1731
1732 port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
1733 br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx);
1734
1735 err = br_mc_disabled_update(port->dev,
1736 br_opt_get(port->br,
1737 BROPT_MULTICAST_ENABLED),
1738 NULL);
1739 if (err && err != -EOPNOTSUPP)
1740 return err;
1741
1742 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
1743 if (!port->mcast_stats)
1744 return -ENOMEM;
1745
1746 return 0;
1747 }
1748
1749 void br_multicast_del_port(struct net_bridge_port *port)
1750 {
1751 struct net_bridge *br = port->br;
1752 struct net_bridge_port_group *pg;
1753 HLIST_HEAD(deleted_head);
1754 struct hlist_node *n;
1755
1756 /* Take care of the remaining groups, only perm ones should be left */
1757 spin_lock_bh(&br->multicast_lock);
1758 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1759 br_multicast_find_del_pg(br, pg);
1760 hlist_move_list(&br->mcast_gc_list, &deleted_head);
1761 spin_unlock_bh(&br->multicast_lock);
1762 br_multicast_gc(&deleted_head);
1763 br_multicast_port_ctx_deinit(&port->multicast_ctx);
1764 free_percpu(port->mcast_stats);
1765 }
1766
1767 static void br_multicast_enable(struct bridge_mcast_own_query *query)
1768 {
1769 query->startup_sent = 0;
1770
1771 if (try_to_del_timer_sync(&query->timer) >= 0 ||
1772 del_timer(&query->timer))
1773 mod_timer(&query->timer, jiffies);
1774 }
1775
1776 static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
1777 {
1778 struct net_bridge *br = pmctx->port->br;
1779 struct net_bridge_mcast *brmctx;
1780
1781 brmctx = br_multicast_port_ctx_get_global(pmctx);
1782 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1783 !netif_running(br->dev))
1784 return;
1785
1786 br_multicast_enable(&pmctx->ip4_own_query);
1787 #if IS_ENABLED(CONFIG_IPV6)
1788 br_multicast_enable(&pmctx->ip6_own_query);
1789 #endif
1790 if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) {
1791 br_ip4_multicast_add_router(brmctx, pmctx);
1792 br_ip6_multicast_add_router(brmctx, pmctx);
1793 }
1794 }
1795
1796 void br_multicast_enable_port(struct net_bridge_port *port)
1797 {
1798 struct net_bridge *br = port->br;
1799
1800 spin_lock(&br->multicast_lock);
1801 __br_multicast_enable_port_ctx(&port->multicast_ctx);
1802 spin_unlock(&br->multicast_lock);
1803 }
1804
1805 static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
1806 {
1807 struct net_bridge_port_group *pg;
1808 struct hlist_node *n;
1809 bool del = false;
1810
1811 hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist)
1812 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) &&
1813 (!br_multicast_port_ctx_is_vlan(pmctx) ||
1814 pg->key.addr.vid == pmctx->vlan->vid))
1815 br_multicast_find_del_pg(pmctx->port->br, pg);
1816
1817 del |= br_ip4_multicast_rport_del(pmctx);
1818 del_timer(&pmctx->ip4_mc_router_timer);
1819 del_timer(&pmctx->ip4_own_query.timer);
1820 del |= br_ip6_multicast_rport_del(pmctx);
1821 #if IS_ENABLED(CONFIG_IPV6)
1822 del_timer(&pmctx->ip6_mc_router_timer);
1823 del_timer(&pmctx->ip6_own_query.timer);
1824 #endif
1825 br_multicast_rport_del_notify(pmctx, del);
1826 }
1827
1828 void br_multicast_disable_port(struct net_bridge_port *port)
1829 {
1830 spin_lock(&port->br->multicast_lock);
1831 __br_multicast_disable_port_ctx(&port->multicast_ctx);
1832 spin_unlock(&port->br->multicast_lock);
1833 }
1834
1835 static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
1836 {
1837 struct net_bridge_group_src *ent;
1838 struct hlist_node *tmp;
1839 int deleted = 0;
1840
1841 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
1842 if (ent->flags & BR_SGRP_F_DELETE) {
1843 br_multicast_del_group_src(ent, false);
1844 deleted++;
1845 }
1846
1847 return deleted;
1848 }
1849
1850 static void __grp_src_mod_timer(struct net_bridge_group_src *src,
1851 unsigned long expires)
1852 {
1853 mod_timer(&src->timer, expires);
1854 br_multicast_fwd_src_handle(src);
1855 }
1856
1857 static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx,
1858 struct net_bridge_mcast_port *pmctx,
1859 struct net_bridge_port_group *pg)
1860 {
1861 struct bridge_mcast_other_query *other_query = NULL;
1862 u32 lmqc = brmctx->multicast_last_member_count;
1863 unsigned long lmqt, lmi, now = jiffies;
1864 struct net_bridge_group_src *ent;
1865
1866 if (!netif_running(brmctx->br->dev) ||
1867 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
1868 return;
1869
1870 if (pg->key.addr.proto == htons(ETH_P_IP))
1871 other_query = &brmctx->ip4_other_query;
1872 #if IS_ENABLED(CONFIG_IPV6)
1873 else
1874 other_query = &brmctx->ip6_other_query;
1875 #endif
1876
1877 lmqt = now + br_multicast_lmqt(brmctx);
1878 hlist_for_each_entry(ent, &pg->src_list, node) {
1879 if (ent->flags & BR_SGRP_F_SEND) {
1880 ent->flags &= ~BR_SGRP_F_SEND;
1881 if (ent->timer.expires > lmqt) {
1882 if (br_opt_get(brmctx->br,
1883 BROPT_MULTICAST_QUERIER) &&
1884 other_query &&
1885 !timer_pending(&other_query->timer))
1886 ent->src_query_rexmit_cnt = lmqc;
1887 __grp_src_mod_timer(ent, lmqt);
1888 }
1889 }
1890 }
1891
1892 if (!br_opt_get(brmctx->br, BROPT_MULTICAST_QUERIER) ||
1893 !other_query || timer_pending(&other_query->timer))
1894 return;
1895
1896 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1897 &pg->key.addr, true, 1, NULL);
1898
1899 lmi = now + brmctx->multicast_last_member_interval;
1900 if (!timer_pending(&pg->rexmit_timer) ||
1901 time_after(pg->rexmit_timer.expires, lmi))
1902 mod_timer(&pg->rexmit_timer, lmi);
1903 }
1904
1905 static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx,
1906 struct net_bridge_mcast_port *pmctx,
1907 struct net_bridge_port_group *pg)
1908 {
1909 struct bridge_mcast_other_query *other_query = NULL;
1910 unsigned long now = jiffies, lmi;
1911
1912 if (!netif_running(brmctx->br->dev) ||
1913 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
1914 return;
1915
1916 if (pg->key.addr.proto == htons(ETH_P_IP))
1917 other_query = &brmctx->ip4_other_query;
1918 #if IS_ENABLED(CONFIG_IPV6)
1919 else
1920 other_query = &brmctx->ip6_other_query;
1921 #endif
1922
1923 if (br_opt_get(brmctx->br, BROPT_MULTICAST_QUERIER) &&
1924 other_query && !timer_pending(&other_query->timer)) {
1925 lmi = now + brmctx->multicast_last_member_interval;
1926 pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1;
1927 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1928 &pg->key.addr, false, 0, NULL);
1929 if (!timer_pending(&pg->rexmit_timer) ||
1930 time_after(pg->rexmit_timer.expires, lmi))
1931 mod_timer(&pg->rexmit_timer, lmi);
1932 }
1933
1934 if (pg->filter_mode == MCAST_EXCLUDE &&
1935 (!timer_pending(&pg->timer) ||
1936 time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx))))
1937 mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx));
1938 }
1939
1940 /* State Msg type New state Actions
1941 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
1942 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
1943 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
1944 */
1945 static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx,
1946 struct net_bridge_port_group *pg, void *h_addr,
1947 void *srcs, u32 nsrcs, size_t addr_size,
1948 int grec_type)
1949 {
1950 struct net_bridge_group_src *ent;
1951 unsigned long now = jiffies;
1952 bool changed = false;
1953 struct br_ip src_ip;
1954 u32 src_idx;
1955
1956 memset(&src_ip, 0, sizeof(src_ip));
1957 src_ip.proto = pg->key.addr.proto;
1958 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1959 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
1960 ent = br_multicast_find_group_src(pg, &src_ip);
1961 if (!ent) {
1962 ent = br_multicast_new_group_src(pg, &src_ip);
1963 if (ent)
1964 changed = true;
1965 }
1966
1967 if (ent)
1968 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
1969 }
1970
1971 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
1972 grec_type))
1973 changed = true;
1974
1975 return changed;
1976 }
1977
1978 /* State Msg type New state Actions
1979 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1980 * Delete (A-B)
1981 * Group Timer=GMI
1982 */
1983 static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx,
1984 struct net_bridge_port_group *pg, void *h_addr,
1985 void *srcs, u32 nsrcs, size_t addr_size,
1986 int grec_type)
1987 {
1988 struct net_bridge_group_src *ent;
1989 struct br_ip src_ip;
1990 u32 src_idx;
1991
1992 hlist_for_each_entry(ent, &pg->src_list, node)
1993 ent->flags |= BR_SGRP_F_DELETE;
1994
1995 memset(&src_ip, 0, sizeof(src_ip));
1996 src_ip.proto = pg->key.addr.proto;
1997 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1998 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
1999 ent = br_multicast_find_group_src(pg, &src_ip);
2000 if (ent)
2001 ent->flags &= ~BR_SGRP_F_DELETE;
2002 else
2003 ent = br_multicast_new_group_src(pg, &src_ip);
2004 if (ent)
2005 br_multicast_fwd_src_handle(ent);
2006 }
2007
2008 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2009 grec_type);
2010
2011 __grp_src_delete_marked(pg);
2012 }
2013
2014 /* State Msg type New state Actions
2015 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
2016 * Delete (X-A)
2017 * Delete (Y-A)
2018 * Group Timer=GMI
2019 */
2020 static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx,
2021 struct net_bridge_port_group *pg, void *h_addr,
2022 void *srcs, u32 nsrcs, size_t addr_size,
2023 int grec_type)
2024 {
2025 struct net_bridge_group_src *ent;
2026 unsigned long now = jiffies;
2027 bool changed = false;
2028 struct br_ip src_ip;
2029 u32 src_idx;
2030
2031 hlist_for_each_entry(ent, &pg->src_list, node)
2032 ent->flags |= BR_SGRP_F_DELETE;
2033
2034 memset(&src_ip, 0, sizeof(src_ip));
2035 src_ip.proto = pg->key.addr.proto;
2036 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2037 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2038 ent = br_multicast_find_group_src(pg, &src_ip);
2039 if (ent) {
2040 ent->flags &= ~BR_SGRP_F_DELETE;
2041 } else {
2042 ent = br_multicast_new_group_src(pg, &src_ip);
2043 if (ent) {
2044 __grp_src_mod_timer(ent,
2045 now + br_multicast_gmi(brmctx));
2046 changed = true;
2047 }
2048 }
2049 }
2050
2051 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2052 grec_type))
2053 changed = true;
2054
2055 if (__grp_src_delete_marked(pg))
2056 changed = true;
2057
2058 return changed;
2059 }
2060
2061 static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx,
2062 struct net_bridge_port_group *pg, void *h_addr,
2063 void *srcs, u32 nsrcs, size_t addr_size,
2064 int grec_type)
2065 {
2066 bool changed = false;
2067
2068 switch (pg->filter_mode) {
2069 case MCAST_INCLUDE:
2070 __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2071 grec_type);
2072 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2073 changed = true;
2074 break;
2075 case MCAST_EXCLUDE:
2076 changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs,
2077 addr_size, grec_type);
2078 break;
2079 }
2080
2081 pg->filter_mode = MCAST_EXCLUDE;
2082 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2083
2084 return changed;
2085 }
2086
2087 /* State Msg type New state Actions
2088 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
2089 * Send Q(G,A-B)
2090 */
2091 static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx,
2092 struct net_bridge_mcast_port *pmctx,
2093 struct net_bridge_port_group *pg, void *h_addr,
2094 void *srcs, u32 nsrcs, size_t addr_size,
2095 int grec_type)
2096 {
2097 u32 src_idx, to_send = pg->src_ents;
2098 struct net_bridge_group_src *ent;
2099 unsigned long now = jiffies;
2100 bool changed = false;
2101 struct br_ip src_ip;
2102
2103 hlist_for_each_entry(ent, &pg->src_list, node)
2104 ent->flags |= BR_SGRP_F_SEND;
2105
2106 memset(&src_ip, 0, sizeof(src_ip));
2107 src_ip.proto = pg->key.addr.proto;
2108 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2109 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2110 ent = br_multicast_find_group_src(pg, &src_ip);
2111 if (ent) {
2112 ent->flags &= ~BR_SGRP_F_SEND;
2113 to_send--;
2114 } else {
2115 ent = br_multicast_new_group_src(pg, &src_ip);
2116 if (ent)
2117 changed = true;
2118 }
2119 if (ent)
2120 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2121 }
2122
2123 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2124 grec_type))
2125 changed = true;
2126
2127 if (to_send)
2128 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2129
2130 return changed;
2131 }
2132
2133 /* State Msg type New state Actions
2134 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
2135 * Send Q(G,X-A)
2136 * Send Q(G)
2137 */
2138 static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx,
2139 struct net_bridge_mcast_port *pmctx,
2140 struct net_bridge_port_group *pg, void *h_addr,
2141 void *srcs, u32 nsrcs, size_t addr_size,
2142 int grec_type)
2143 {
2144 u32 src_idx, to_send = pg->src_ents;
2145 struct net_bridge_group_src *ent;
2146 unsigned long now = jiffies;
2147 bool changed = false;
2148 struct br_ip src_ip;
2149
2150 hlist_for_each_entry(ent, &pg->src_list, node)
2151 if (timer_pending(&ent->timer))
2152 ent->flags |= BR_SGRP_F_SEND;
2153
2154 memset(&src_ip, 0, sizeof(src_ip));
2155 src_ip.proto = pg->key.addr.proto;
2156 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2157 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2158 ent = br_multicast_find_group_src(pg, &src_ip);
2159 if (ent) {
2160 if (timer_pending(&ent->timer)) {
2161 ent->flags &= ~BR_SGRP_F_SEND;
2162 to_send--;
2163 }
2164 } else {
2165 ent = br_multicast_new_group_src(pg, &src_ip);
2166 if (ent)
2167 changed = true;
2168 }
2169 if (ent)
2170 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2171 }
2172
2173 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2174 grec_type))
2175 changed = true;
2176
2177 if (to_send)
2178 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2179
2180 __grp_send_query_and_rexmit(brmctx, pmctx, pg);
2181
2182 return changed;
2183 }
2184
2185 static bool br_multicast_toin(struct net_bridge_mcast *brmctx,
2186 struct net_bridge_mcast_port *pmctx,
2187 struct net_bridge_port_group *pg, void *h_addr,
2188 void *srcs, u32 nsrcs, size_t addr_size,
2189 int grec_type)
2190 {
2191 bool changed = false;
2192
2193 switch (pg->filter_mode) {
2194 case MCAST_INCLUDE:
2195 changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs,
2196 nsrcs, addr_size, grec_type);
2197 break;
2198 case MCAST_EXCLUDE:
2199 changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs,
2200 nsrcs, addr_size, grec_type);
2201 break;
2202 }
2203
2204 if (br_multicast_eht_should_del_pg(pg)) {
2205 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2206 br_multicast_find_del_pg(pg->key.port->br, pg);
2207 /* a notification has already been sent and we shouldn't
2208 * access pg after the delete so we have to return false
2209 */
2210 changed = false;
2211 }
2212
2213 return changed;
2214 }
2215
2216 /* State Msg type New state Actions
2217 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
2218 * Delete (A-B)
2219 * Send Q(G,A*B)
2220 * Group Timer=GMI
2221 */
2222 static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx,
2223 struct net_bridge_mcast_port *pmctx,
2224 struct net_bridge_port_group *pg, void *h_addr,
2225 void *srcs, u32 nsrcs, size_t addr_size,
2226 int grec_type)
2227 {
2228 struct net_bridge_group_src *ent;
2229 u32 src_idx, to_send = 0;
2230 struct br_ip src_ip;
2231
2232 hlist_for_each_entry(ent, &pg->src_list, node)
2233 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2234
2235 memset(&src_ip, 0, sizeof(src_ip));
2236 src_ip.proto = pg->key.addr.proto;
2237 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2238 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2239 ent = br_multicast_find_group_src(pg, &src_ip);
2240 if (ent) {
2241 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
2242 BR_SGRP_F_SEND;
2243 to_send++;
2244 } else {
2245 ent = br_multicast_new_group_src(pg, &src_ip);
2246 }
2247 if (ent)
2248 br_multicast_fwd_src_handle(ent);
2249 }
2250
2251 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2252 grec_type);
2253
2254 __grp_src_delete_marked(pg);
2255 if (to_send)
2256 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2257 }
2258
2259 /* State Msg type New state Actions
2260 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
2261 * Delete (X-A)
2262 * Delete (Y-A)
2263 * Send Q(G,A-Y)
2264 * Group Timer=GMI
2265 */
2266 static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx,
2267 struct net_bridge_mcast_port *pmctx,
2268 struct net_bridge_port_group *pg, void *h_addr,
2269 void *srcs, u32 nsrcs, size_t addr_size,
2270 int grec_type)
2271 {
2272 struct net_bridge_group_src *ent;
2273 u32 src_idx, to_send = 0;
2274 bool changed = false;
2275 struct br_ip src_ip;
2276
2277 hlist_for_each_entry(ent, &pg->src_list, node)
2278 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2279
2280 memset(&src_ip, 0, sizeof(src_ip));
2281 src_ip.proto = pg->key.addr.proto;
2282 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2283 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2284 ent = br_multicast_find_group_src(pg, &src_ip);
2285 if (ent) {
2286 ent->flags &= ~BR_SGRP_F_DELETE;
2287 } else {
2288 ent = br_multicast_new_group_src(pg, &src_ip);
2289 if (ent) {
2290 __grp_src_mod_timer(ent, pg->timer.expires);
2291 changed = true;
2292 }
2293 }
2294 if (ent && timer_pending(&ent->timer)) {
2295 ent->flags |= BR_SGRP_F_SEND;
2296 to_send++;
2297 }
2298 }
2299
2300 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2301 grec_type))
2302 changed = true;
2303
2304 if (__grp_src_delete_marked(pg))
2305 changed = true;
2306 if (to_send)
2307 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2308
2309 return changed;
2310 }
2311
2312 static bool br_multicast_toex(struct net_bridge_mcast *brmctx,
2313 struct net_bridge_mcast_port *pmctx,
2314 struct net_bridge_port_group *pg, void *h_addr,
2315 void *srcs, u32 nsrcs, size_t addr_size,
2316 int grec_type)
2317 {
2318 bool changed = false;
2319
2320 switch (pg->filter_mode) {
2321 case MCAST_INCLUDE:
2322 __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs,
2323 addr_size, grec_type);
2324 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2325 changed = true;
2326 break;
2327 case MCAST_EXCLUDE:
2328 changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs,
2329 nsrcs, addr_size, grec_type);
2330 break;
2331 }
2332
2333 pg->filter_mode = MCAST_EXCLUDE;
2334 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2335
2336 return changed;
2337 }
2338
2339 /* State Msg type New state Actions
2340 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
2341 */
2342 static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx,
2343 struct net_bridge_mcast_port *pmctx,
2344 struct net_bridge_port_group *pg, void *h_addr,
2345 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2346 {
2347 struct net_bridge_group_src *ent;
2348 u32 src_idx, to_send = 0;
2349 bool changed = false;
2350 struct br_ip src_ip;
2351
2352 hlist_for_each_entry(ent, &pg->src_list, node)
2353 ent->flags &= ~BR_SGRP_F_SEND;
2354
2355 memset(&src_ip, 0, sizeof(src_ip));
2356 src_ip.proto = pg->key.addr.proto;
2357 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2358 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2359 ent = br_multicast_find_group_src(pg, &src_ip);
2360 if (ent) {
2361 ent->flags |= BR_SGRP_F_SEND;
2362 to_send++;
2363 }
2364 }
2365
2366 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2367 grec_type))
2368 changed = true;
2369
2370 if (to_send)
2371 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2372
2373 return changed;
2374 }
2375
2376 /* State Msg type New state Actions
2377 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
2378 * Send Q(G,A-Y)
2379 */
2380 static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx,
2381 struct net_bridge_mcast_port *pmctx,
2382 struct net_bridge_port_group *pg, void *h_addr,
2383 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2384 {
2385 struct net_bridge_group_src *ent;
2386 u32 src_idx, to_send = 0;
2387 bool changed = false;
2388 struct br_ip src_ip;
2389
2390 hlist_for_each_entry(ent, &pg->src_list, node)
2391 ent->flags &= ~BR_SGRP_F_SEND;
2392
2393 memset(&src_ip, 0, sizeof(src_ip));
2394 src_ip.proto = pg->key.addr.proto;
2395 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2396 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2397 ent = br_multicast_find_group_src(pg, &src_ip);
2398 if (!ent) {
2399 ent = br_multicast_new_group_src(pg, &src_ip);
2400 if (ent) {
2401 __grp_src_mod_timer(ent, pg->timer.expires);
2402 changed = true;
2403 }
2404 }
2405 if (ent && timer_pending(&ent->timer)) {
2406 ent->flags |= BR_SGRP_F_SEND;
2407 to_send++;
2408 }
2409 }
2410
2411 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2412 grec_type))
2413 changed = true;
2414
2415 if (to_send)
2416 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2417
2418 return changed;
2419 }
2420
2421 static bool br_multicast_block(struct net_bridge_mcast *brmctx,
2422 struct net_bridge_mcast_port *pmctx,
2423 struct net_bridge_port_group *pg, void *h_addr,
2424 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2425 {
2426 bool changed = false;
2427
2428 switch (pg->filter_mode) {
2429 case MCAST_INCLUDE:
2430 changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs,
2431 nsrcs, addr_size, grec_type);
2432 break;
2433 case MCAST_EXCLUDE:
2434 changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs,
2435 nsrcs, addr_size, grec_type);
2436 break;
2437 }
2438
2439 if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) ||
2440 br_multicast_eht_should_del_pg(pg)) {
2441 if (br_multicast_eht_should_del_pg(pg))
2442 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2443 br_multicast_find_del_pg(pg->key.port->br, pg);
2444 /* a notification has already been sent and we shouldn't
2445 * access pg after the delete so we have to return false
2446 */
2447 changed = false;
2448 }
2449
2450 return changed;
2451 }
2452
2453 static struct net_bridge_port_group *
2454 br_multicast_find_port(struct net_bridge_mdb_entry *mp,
2455 struct net_bridge_port *p,
2456 const unsigned char *src)
2457 {
2458 struct net_bridge *br __maybe_unused = mp->br;
2459 struct net_bridge_port_group *pg;
2460
2461 for (pg = mlock_dereference(mp->ports, br);
2462 pg;
2463 pg = mlock_dereference(pg->next, br))
2464 if (br_port_group_equal(pg, p, src))
2465 return pg;
2466
2467 return NULL;
2468 }
2469
2470 static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
2471 struct net_bridge_mcast_port *pmctx,
2472 struct sk_buff *skb,
2473 u16 vid)
2474 {
2475 bool igmpv2 = brmctx->multicast_igmp_version == 2;
2476 struct net_bridge_mdb_entry *mdst;
2477 struct net_bridge_port_group *pg;
2478 const unsigned char *src;
2479 struct igmpv3_report *ih;
2480 struct igmpv3_grec *grec;
2481 int i, len, num, type;
2482 __be32 group, *h_addr;
2483 bool changed = false;
2484 int err = 0;
2485 u16 nsrcs;
2486
2487 ih = igmpv3_report_hdr(skb);
2488 num = ntohs(ih->ngrec);
2489 len = skb_transport_offset(skb) + sizeof(*ih);
2490
2491 for (i = 0; i < num; i++) {
2492 len += sizeof(*grec);
2493 if (!ip_mc_may_pull(skb, len))
2494 return -EINVAL;
2495
2496 grec = (void *)(skb->data + len - sizeof(*grec));
2497 group = grec->grec_mca;
2498 type = grec->grec_type;
2499 nsrcs = ntohs(grec->grec_nsrcs);
2500
2501 len += nsrcs * 4;
2502 if (!ip_mc_may_pull(skb, len))
2503 return -EINVAL;
2504
2505 switch (type) {
2506 case IGMPV3_MODE_IS_INCLUDE:
2507 case IGMPV3_MODE_IS_EXCLUDE:
2508 case IGMPV3_CHANGE_TO_INCLUDE:
2509 case IGMPV3_CHANGE_TO_EXCLUDE:
2510 case IGMPV3_ALLOW_NEW_SOURCES:
2511 case IGMPV3_BLOCK_OLD_SOURCES:
2512 break;
2513
2514 default:
2515 continue;
2516 }
2517
2518 src = eth_hdr(skb)->h_source;
2519 if (nsrcs == 0 &&
2520 (type == IGMPV3_CHANGE_TO_INCLUDE ||
2521 type == IGMPV3_MODE_IS_INCLUDE)) {
2522 if (!pmctx || igmpv2) {
2523 br_ip4_multicast_leave_group(brmctx, pmctx,
2524 group, vid, src);
2525 continue;
2526 }
2527 } else {
2528 err = br_ip4_multicast_add_group(brmctx, pmctx, group,
2529 vid, src, igmpv2);
2530 if (err)
2531 break;
2532 }
2533
2534 if (!pmctx || igmpv2)
2535 continue;
2536
2537 spin_lock_bh(&brmctx->br->multicast_lock);
2538 mdst = br_mdb_ip4_get(brmctx->br, group, vid);
2539 if (!mdst)
2540 goto unlock_continue;
2541 pg = br_multicast_find_port(mdst, pmctx->port, src);
2542 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2543 goto unlock_continue;
2544 /* reload grec and host addr */
2545 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
2546 h_addr = &ip_hdr(skb)->saddr;
2547 switch (type) {
2548 case IGMPV3_ALLOW_NEW_SOURCES:
2549 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2550 grec->grec_src,
2551 nsrcs, sizeof(__be32), type);
2552 break;
2553 case IGMPV3_MODE_IS_INCLUDE:
2554 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2555 grec->grec_src,
2556 nsrcs, sizeof(__be32), type);
2557 break;
2558 case IGMPV3_MODE_IS_EXCLUDE:
2559 changed = br_multicast_isexc(brmctx, pg, h_addr,
2560 grec->grec_src,
2561 nsrcs, sizeof(__be32), type);
2562 break;
2563 case IGMPV3_CHANGE_TO_INCLUDE:
2564 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2565 grec->grec_src,
2566 nsrcs, sizeof(__be32), type);
2567 break;
2568 case IGMPV3_CHANGE_TO_EXCLUDE:
2569 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2570 grec->grec_src,
2571 nsrcs, sizeof(__be32), type);
2572 break;
2573 case IGMPV3_BLOCK_OLD_SOURCES:
2574 changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2575 grec->grec_src,
2576 nsrcs, sizeof(__be32), type);
2577 break;
2578 }
2579 if (changed)
2580 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2581 unlock_continue:
2582 spin_unlock_bh(&brmctx->br->multicast_lock);
2583 }
2584
2585 return err;
2586 }
2587
2588 #if IS_ENABLED(CONFIG_IPV6)
2589 static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
2590 struct net_bridge_mcast_port *pmctx,
2591 struct sk_buff *skb,
2592 u16 vid)
2593 {
2594 bool mldv1 = brmctx->multicast_mld_version == 1;
2595 struct net_bridge_mdb_entry *mdst;
2596 struct net_bridge_port_group *pg;
2597 unsigned int nsrcs_offset;
2598 const unsigned char *src;
2599 struct icmp6hdr *icmp6h;
2600 struct in6_addr *h_addr;
2601 struct mld2_grec *grec;
2602 unsigned int grec_len;
2603 bool changed = false;
2604 int i, len, num;
2605 int err = 0;
2606
2607 if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
2608 return -EINVAL;
2609
2610 icmp6h = icmp6_hdr(skb);
2611 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
2612 len = skb_transport_offset(skb) + sizeof(*icmp6h);
2613
2614 for (i = 0; i < num; i++) {
2615 __be16 *_nsrcs, __nsrcs;
2616 u16 nsrcs;
2617
2618 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
2619
2620 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
2621 nsrcs_offset + sizeof(__nsrcs))
2622 return -EINVAL;
2623
2624 _nsrcs = skb_header_pointer(skb, nsrcs_offset,
2625 sizeof(__nsrcs), &__nsrcs);
2626 if (!_nsrcs)
2627 return -EINVAL;
2628
2629 nsrcs = ntohs(*_nsrcs);
2630 grec_len = struct_size(grec, grec_src, nsrcs);
2631
2632 if (!ipv6_mc_may_pull(skb, len + grec_len))
2633 return -EINVAL;
2634
2635 grec = (struct mld2_grec *)(skb->data + len);
2636 len += grec_len;
2637
2638 switch (grec->grec_type) {
2639 case MLD2_MODE_IS_INCLUDE:
2640 case MLD2_MODE_IS_EXCLUDE:
2641 case MLD2_CHANGE_TO_INCLUDE:
2642 case MLD2_CHANGE_TO_EXCLUDE:
2643 case MLD2_ALLOW_NEW_SOURCES:
2644 case MLD2_BLOCK_OLD_SOURCES:
2645 break;
2646
2647 default:
2648 continue;
2649 }
2650
2651 src = eth_hdr(skb)->h_source;
2652 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
2653 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
2654 nsrcs == 0) {
2655 if (!pmctx || mldv1) {
2656 br_ip6_multicast_leave_group(brmctx, pmctx,
2657 &grec->grec_mca,
2658 vid, src);
2659 continue;
2660 }
2661 } else {
2662 err = br_ip6_multicast_add_group(brmctx, pmctx,
2663 &grec->grec_mca, vid,
2664 src, mldv1);
2665 if (err)
2666 break;
2667 }
2668
2669 if (!pmctx || mldv1)
2670 continue;
2671
2672 spin_lock_bh(&brmctx->br->multicast_lock);
2673 mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid);
2674 if (!mdst)
2675 goto unlock_continue;
2676 pg = br_multicast_find_port(mdst, pmctx->port, src);
2677 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2678 goto unlock_continue;
2679 h_addr = &ipv6_hdr(skb)->saddr;
2680 switch (grec->grec_type) {
2681 case MLD2_ALLOW_NEW_SOURCES:
2682 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2683 grec->grec_src, nsrcs,
2684 sizeof(struct in6_addr),
2685 grec->grec_type);
2686 break;
2687 case MLD2_MODE_IS_INCLUDE:
2688 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2689 grec->grec_src, nsrcs,
2690 sizeof(struct in6_addr),
2691 grec->grec_type);
2692 break;
2693 case MLD2_MODE_IS_EXCLUDE:
2694 changed = br_multicast_isexc(brmctx, pg, h_addr,
2695 grec->grec_src, nsrcs,
2696 sizeof(struct in6_addr),
2697 grec->grec_type);
2698 break;
2699 case MLD2_CHANGE_TO_INCLUDE:
2700 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2701 grec->grec_src, nsrcs,
2702 sizeof(struct in6_addr),
2703 grec->grec_type);
2704 break;
2705 case MLD2_CHANGE_TO_EXCLUDE:
2706 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2707 grec->grec_src, nsrcs,
2708 sizeof(struct in6_addr),
2709 grec->grec_type);
2710 break;
2711 case MLD2_BLOCK_OLD_SOURCES:
2712 changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2713 grec->grec_src, nsrcs,
2714 sizeof(struct in6_addr),
2715 grec->grec_type);
2716 break;
2717 }
2718 if (changed)
2719 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2720 unlock_continue:
2721 spin_unlock_bh(&brmctx->br->multicast_lock);
2722 }
2723
2724 return err;
2725 }
2726 #endif
2727
2728 static bool br_ip4_multicast_select_querier(struct net_bridge_mcast *brmctx,
2729 struct net_bridge_port *port,
2730 __be32 saddr)
2731 {
2732 if (!timer_pending(&brmctx->ip4_own_query.timer) &&
2733 !timer_pending(&brmctx->ip4_other_query.timer))
2734 goto update;
2735
2736 if (!brmctx->ip4_querier.addr.src.ip4)
2737 goto update;
2738
2739 if (ntohl(saddr) <= ntohl(brmctx->ip4_querier.addr.src.ip4))
2740 goto update;
2741
2742 return false;
2743
2744 update:
2745 brmctx->ip4_querier.addr.src.ip4 = saddr;
2746
2747 /* update protected by general multicast_lock by caller */
2748 rcu_assign_pointer(brmctx->ip4_querier.port, port);
2749
2750 return true;
2751 }
2752
2753 #if IS_ENABLED(CONFIG_IPV6)
2754 static bool br_ip6_multicast_select_querier(struct net_bridge_mcast *brmctx,
2755 struct net_bridge_port *port,
2756 struct in6_addr *saddr)
2757 {
2758 if (!timer_pending(&brmctx->ip6_own_query.timer) &&
2759 !timer_pending(&brmctx->ip6_other_query.timer))
2760 goto update;
2761
2762 if (ipv6_addr_cmp(saddr, &brmctx->ip6_querier.addr.src.ip6) <= 0)
2763 goto update;
2764
2765 return false;
2766
2767 update:
2768 brmctx->ip6_querier.addr.src.ip6 = *saddr;
2769
2770 /* update protected by general multicast_lock by caller */
2771 rcu_assign_pointer(brmctx->ip6_querier.port, port);
2772
2773 return true;
2774 }
2775 #endif
2776
2777 static void
2778 br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
2779 struct bridge_mcast_other_query *query,
2780 unsigned long max_delay)
2781 {
2782 if (!timer_pending(&query->timer))
2783 query->delay_time = jiffies + max_delay;
2784
2785 mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
2786 }
2787
2788 static void br_port_mc_router_state_change(struct net_bridge_port *p,
2789 bool is_mc_router)
2790 {
2791 struct switchdev_attr attr = {
2792 .orig_dev = p->dev,
2793 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
2794 .flags = SWITCHDEV_F_DEFER,
2795 .u.mrouter = is_mc_router,
2796 };
2797
2798 switchdev_port_attr_set(p->dev, &attr, NULL);
2799 }
2800
2801 static struct net_bridge_port *
2802 br_multicast_rport_from_node(struct net_bridge_mcast *brmctx,
2803 struct hlist_head *mc_router_list,
2804 struct hlist_node *rlist)
2805 {
2806 struct net_bridge_mcast_port *pmctx;
2807
2808 #if IS_ENABLED(CONFIG_IPV6)
2809 if (mc_router_list == &brmctx->ip6_mc_router_list)
2810 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
2811 ip6_rlist);
2812 else
2813 #endif
2814 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
2815 ip4_rlist);
2816
2817 return pmctx->port;
2818 }
2819
2820 static struct hlist_node *
2821 br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx,
2822 struct net_bridge_port *port,
2823 struct hlist_head *mc_router_list)
2824
2825 {
2826 struct hlist_node *slot = NULL;
2827 struct net_bridge_port *p;
2828 struct hlist_node *rlist;
2829
2830 hlist_for_each(rlist, mc_router_list) {
2831 p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist);
2832
2833 if ((unsigned long)port >= (unsigned long)p)
2834 break;
2835
2836 slot = rlist;
2837 }
2838
2839 return slot;
2840 }
2841
2842 static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx,
2843 struct hlist_node *rnode)
2844 {
2845 #if IS_ENABLED(CONFIG_IPV6)
2846 if (rnode != &pmctx->ip6_rlist)
2847 return hlist_unhashed(&pmctx->ip6_rlist);
2848 else
2849 return hlist_unhashed(&pmctx->ip4_rlist);
2850 #else
2851 return true;
2852 #endif
2853 }
2854
2855 /* Add port to router_list
2856 * list is maintained ordered by pointer value
2857 * and locked by br->multicast_lock and RCU
2858 */
2859 static void br_multicast_add_router(struct net_bridge_mcast *brmctx,
2860 struct net_bridge_mcast_port *pmctx,
2861 struct hlist_node *rlist,
2862 struct hlist_head *mc_router_list)
2863 {
2864 struct hlist_node *slot;
2865
2866 if (!hlist_unhashed(rlist))
2867 return;
2868
2869 slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list);
2870
2871 if (slot)
2872 hlist_add_behind_rcu(rlist, slot);
2873 else
2874 hlist_add_head_rcu(rlist, mc_router_list);
2875
2876 /* For backwards compatibility for now, only notify if we
2877 * switched from no IPv4/IPv6 multicast router to a new
2878 * IPv4 or IPv6 multicast router.
2879 */
2880 if (br_multicast_no_router_otherpf(pmctx, rlist)) {
2881 br_rtr_notify(pmctx->port->br->dev, pmctx->port, RTM_NEWMDB);
2882 br_port_mc_router_state_change(pmctx->port, true);
2883 }
2884 }
2885
2886 /* Add port to router_list
2887 * list is maintained ordered by pointer value
2888 * and locked by br->multicast_lock and RCU
2889 */
2890 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
2891 struct net_bridge_mcast_port *pmctx)
2892 {
2893 br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist,
2894 &brmctx->ip4_mc_router_list);
2895 }
2896
2897 /* Add port to router_list
2898 * list is maintained ordered by pointer value
2899 * and locked by br->multicast_lock and RCU
2900 */
2901 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
2902 struct net_bridge_mcast_port *pmctx)
2903 {
2904 #if IS_ENABLED(CONFIG_IPV6)
2905 br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist,
2906 &brmctx->ip6_mc_router_list);
2907 #endif
2908 }
2909
2910 static void br_multicast_mark_router(struct net_bridge_mcast *brmctx,
2911 struct net_bridge_mcast_port *pmctx,
2912 struct timer_list *timer,
2913 struct hlist_node *rlist,
2914 struct hlist_head *mc_router_list)
2915 {
2916 unsigned long now = jiffies;
2917
2918 if (!pmctx) {
2919 if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
2920 if (!br_ip4_multicast_is_router(brmctx) &&
2921 !br_ip6_multicast_is_router(brmctx))
2922 br_mc_router_state_change(brmctx->br, true);
2923 mod_timer(timer, now + brmctx->multicast_querier_interval);
2924 }
2925 return;
2926 }
2927
2928 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
2929 pmctx->multicast_router == MDB_RTR_TYPE_PERM)
2930 return;
2931
2932 br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list);
2933 mod_timer(timer, now + brmctx->multicast_querier_interval);
2934 }
2935
2936 static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx,
2937 struct net_bridge_mcast_port *pmctx)
2938 {
2939 struct timer_list *timer = &brmctx->ip4_mc_router_timer;
2940 struct hlist_node *rlist = NULL;
2941
2942 if (pmctx) {
2943 timer = &pmctx->ip4_mc_router_timer;
2944 rlist = &pmctx->ip4_rlist;
2945 }
2946
2947 br_multicast_mark_router(brmctx, pmctx, timer, rlist,
2948 &brmctx->ip4_mc_router_list);
2949 }
2950
2951 static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx,
2952 struct net_bridge_mcast_port *pmctx)
2953 {
2954 #if IS_ENABLED(CONFIG_IPV6)
2955 struct timer_list *timer = &brmctx->ip6_mc_router_timer;
2956 struct hlist_node *rlist = NULL;
2957
2958 if (pmctx) {
2959 timer = &pmctx->ip6_mc_router_timer;
2960 rlist = &pmctx->ip6_rlist;
2961 }
2962
2963 br_multicast_mark_router(brmctx, pmctx, timer, rlist,
2964 &brmctx->ip6_mc_router_list);
2965 #endif
2966 }
2967
2968 static void
2969 br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx,
2970 struct net_bridge_mcast_port *pmctx,
2971 struct bridge_mcast_other_query *query,
2972 struct br_ip *saddr,
2973 unsigned long max_delay)
2974 {
2975 if (!br_ip4_multicast_select_querier(brmctx, pmctx->port, saddr->src.ip4))
2976 return;
2977
2978 br_multicast_update_query_timer(brmctx, query, max_delay);
2979 br_ip4_multicast_mark_router(brmctx, pmctx);
2980 }
2981
2982 #if IS_ENABLED(CONFIG_IPV6)
2983 static void
2984 br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx,
2985 struct net_bridge_mcast_port *pmctx,
2986 struct bridge_mcast_other_query *query,
2987 struct br_ip *saddr,
2988 unsigned long max_delay)
2989 {
2990 if (!br_ip6_multicast_select_querier(brmctx, pmctx->port, &saddr->src.ip6))
2991 return;
2992
2993 br_multicast_update_query_timer(brmctx, query, max_delay);
2994 br_ip6_multicast_mark_router(brmctx, pmctx);
2995 }
2996 #endif
2997
2998 static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx,
2999 struct net_bridge_mcast_port *pmctx,
3000 struct sk_buff *skb,
3001 u16 vid)
3002 {
3003 unsigned int transport_len = ip_transport_len(skb);
3004 const struct iphdr *iph = ip_hdr(skb);
3005 struct igmphdr *ih = igmp_hdr(skb);
3006 struct net_bridge_mdb_entry *mp;
3007 struct igmpv3_query *ih3;
3008 struct net_bridge_port_group *p;
3009 struct net_bridge_port_group __rcu **pp;
3010 struct br_ip saddr;
3011 unsigned long max_delay;
3012 unsigned long now = jiffies;
3013 __be32 group;
3014
3015 spin_lock(&brmctx->br->multicast_lock);
3016 if (!netif_running(brmctx->br->dev) ||
3017 (pmctx && pmctx->port->state == BR_STATE_DISABLED))
3018 goto out;
3019
3020 group = ih->group;
3021
3022 if (transport_len == sizeof(*ih)) {
3023 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
3024
3025 if (!max_delay) {
3026 max_delay = 10 * HZ;
3027 group = 0;
3028 }
3029 } else if (transport_len >= sizeof(*ih3)) {
3030 ih3 = igmpv3_query_hdr(skb);
3031 if (ih3->nsrcs ||
3032 (brmctx->multicast_igmp_version == 3 && group &&
3033 ih3->suppress))
3034 goto out;
3035
3036 max_delay = ih3->code ?
3037 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
3038 } else {
3039 goto out;
3040 }
3041
3042 if (!group) {
3043 saddr.proto = htons(ETH_P_IP);
3044 saddr.src.ip4 = iph->saddr;
3045
3046 br_ip4_multicast_query_received(brmctx, pmctx,
3047 &brmctx->ip4_other_query,
3048 &saddr, max_delay);
3049 goto out;
3050 }
3051
3052 mp = br_mdb_ip4_get(brmctx->br, group, vid);
3053 if (!mp)
3054 goto out;
3055
3056 max_delay *= brmctx->multicast_last_member_count;
3057
3058 if (mp->host_joined &&
3059 (timer_pending(&mp->timer) ?
3060 time_after(mp->timer.expires, now + max_delay) :
3061 try_to_del_timer_sync(&mp->timer) >= 0))
3062 mod_timer(&mp->timer, now + max_delay);
3063
3064 for (pp = &mp->ports;
3065 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3066 pp = &p->next) {
3067 if (timer_pending(&p->timer) ?
3068 time_after(p->timer.expires, now + max_delay) :
3069 try_to_del_timer_sync(&p->timer) >= 0 &&
3070 (brmctx->multicast_igmp_version == 2 ||
3071 p->filter_mode == MCAST_EXCLUDE))
3072 mod_timer(&p->timer, now + max_delay);
3073 }
3074
3075 out:
3076 spin_unlock(&brmctx->br->multicast_lock);
3077 }
3078
3079 #if IS_ENABLED(CONFIG_IPV6)
3080 static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx,
3081 struct net_bridge_mcast_port *pmctx,
3082 struct sk_buff *skb,
3083 u16 vid)
3084 {
3085 unsigned int transport_len = ipv6_transport_len(skb);
3086 struct mld_msg *mld;
3087 struct net_bridge_mdb_entry *mp;
3088 struct mld2_query *mld2q;
3089 struct net_bridge_port_group *p;
3090 struct net_bridge_port_group __rcu **pp;
3091 struct br_ip saddr;
3092 unsigned long max_delay;
3093 unsigned long now = jiffies;
3094 unsigned int offset = skb_transport_offset(skb);
3095 const struct in6_addr *group = NULL;
3096 bool is_general_query;
3097 int err = 0;
3098
3099 spin_lock(&brmctx->br->multicast_lock);
3100 if (!netif_running(brmctx->br->dev) ||
3101 (pmctx && pmctx->port->state == BR_STATE_DISABLED))
3102 goto out;
3103
3104 if (transport_len == sizeof(*mld)) {
3105 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
3106 err = -EINVAL;
3107 goto out;
3108 }
3109 mld = (struct mld_msg *) icmp6_hdr(skb);
3110 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
3111 if (max_delay)
3112 group = &mld->mld_mca;
3113 } else {
3114 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
3115 err = -EINVAL;
3116 goto out;
3117 }
3118 mld2q = (struct mld2_query *)icmp6_hdr(skb);
3119 if (!mld2q->mld2q_nsrcs)
3120 group = &mld2q->mld2q_mca;
3121 if (brmctx->multicast_mld_version == 2 &&
3122 !ipv6_addr_any(&mld2q->mld2q_mca) &&
3123 mld2q->mld2q_suppress)
3124 goto out;
3125
3126 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
3127 }
3128
3129 is_general_query = group && ipv6_addr_any(group);
3130
3131 if (is_general_query) {
3132 saddr.proto = htons(ETH_P_IPV6);
3133 saddr.src.ip6 = ipv6_hdr(skb)->saddr;
3134
3135 br_ip6_multicast_query_received(brmctx, pmctx,
3136 &brmctx->ip6_other_query,
3137 &saddr, max_delay);
3138 goto out;
3139 } else if (!group) {
3140 goto out;
3141 }
3142
3143 mp = br_mdb_ip6_get(brmctx->br, group, vid);
3144 if (!mp)
3145 goto out;
3146
3147 max_delay *= brmctx->multicast_last_member_count;
3148 if (mp->host_joined &&
3149 (timer_pending(&mp->timer) ?
3150 time_after(mp->timer.expires, now + max_delay) :
3151 try_to_del_timer_sync(&mp->timer) >= 0))
3152 mod_timer(&mp->timer, now + max_delay);
3153
3154 for (pp = &mp->ports;
3155 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3156 pp = &p->next) {
3157 if (timer_pending(&p->timer) ?
3158 time_after(p->timer.expires, now + max_delay) :
3159 try_to_del_timer_sync(&p->timer) >= 0 &&
3160 (brmctx->multicast_mld_version == 1 ||
3161 p->filter_mode == MCAST_EXCLUDE))
3162 mod_timer(&p->timer, now + max_delay);
3163 }
3164
3165 out:
3166 spin_unlock(&brmctx->br->multicast_lock);
3167 return err;
3168 }
3169 #endif
3170
3171 static void
3172 br_multicast_leave_group(struct net_bridge_mcast *brmctx,
3173 struct net_bridge_mcast_port *pmctx,
3174 struct br_ip *group,
3175 struct bridge_mcast_other_query *other_query,
3176 struct bridge_mcast_own_query *own_query,
3177 const unsigned char *src)
3178 {
3179 struct net_bridge_mdb_entry *mp;
3180 struct net_bridge_port_group *p;
3181 unsigned long now;
3182 unsigned long time;
3183
3184 spin_lock(&brmctx->br->multicast_lock);
3185 if (!netif_running(brmctx->br->dev) ||
3186 (pmctx && pmctx->port->state == BR_STATE_DISABLED))
3187 goto out;
3188
3189 mp = br_mdb_ip_get(brmctx->br, group);
3190 if (!mp)
3191 goto out;
3192
3193 if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) {
3194 struct net_bridge_port_group __rcu **pp;
3195
3196 for (pp = &mp->ports;
3197 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3198 pp = &p->next) {
3199 if (!br_port_group_equal(p, pmctx->port, src))
3200 continue;
3201
3202 if (p->flags & MDB_PG_FLAGS_PERMANENT)
3203 break;
3204
3205 p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
3206 br_multicast_del_pg(mp, p, pp);
3207 }
3208 goto out;
3209 }
3210
3211 if (timer_pending(&other_query->timer))
3212 goto out;
3213
3214 if (br_opt_get(brmctx->br, BROPT_MULTICAST_QUERIER)) {
3215 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr,
3216 false, 0, NULL);
3217
3218 time = jiffies + brmctx->multicast_last_member_count *
3219 brmctx->multicast_last_member_interval;
3220
3221 mod_timer(&own_query->timer, time);
3222
3223 for (p = mlock_dereference(mp->ports, brmctx->br);
3224 p != NULL;
3225 p = mlock_dereference(p->next, brmctx->br)) {
3226 if (!br_port_group_equal(p, pmctx->port, src))
3227 continue;
3228
3229 if (!hlist_unhashed(&p->mglist) &&
3230 (timer_pending(&p->timer) ?
3231 time_after(p->timer.expires, time) :
3232 try_to_del_timer_sync(&p->timer) >= 0)) {
3233 mod_timer(&p->timer, time);
3234 }
3235
3236 break;
3237 }
3238 }
3239
3240 now = jiffies;
3241 time = now + brmctx->multicast_last_member_count *
3242 brmctx->multicast_last_member_interval;
3243
3244 if (!pmctx) {
3245 if (mp->host_joined &&
3246 (timer_pending(&mp->timer) ?
3247 time_after(mp->timer.expires, time) :
3248 try_to_del_timer_sync(&mp->timer) >= 0)) {
3249 mod_timer(&mp->timer, time);
3250 }
3251
3252 goto out;
3253 }
3254
3255 for (p = mlock_dereference(mp->ports, brmctx->br);
3256 p != NULL;
3257 p = mlock_dereference(p->next, brmctx->br)) {
3258 if (p->key.port != pmctx->port)
3259 continue;
3260
3261 if (!hlist_unhashed(&p->mglist) &&
3262 (timer_pending(&p->timer) ?
3263 time_after(p->timer.expires, time) :
3264 try_to_del_timer_sync(&p->timer) >= 0)) {
3265 mod_timer(&p->timer, time);
3266 }
3267
3268 break;
3269 }
3270 out:
3271 spin_unlock(&brmctx->br->multicast_lock);
3272 }
3273
3274 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
3275 struct net_bridge_mcast_port *pmctx,
3276 __be32 group,
3277 __u16 vid,
3278 const unsigned char *src)
3279 {
3280 struct br_ip br_group;
3281 struct bridge_mcast_own_query *own_query;
3282
3283 if (ipv4_is_local_multicast(group))
3284 return;
3285
3286 own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query;
3287
3288 memset(&br_group, 0, sizeof(br_group));
3289 br_group.dst.ip4 = group;
3290 br_group.proto = htons(ETH_P_IP);
3291 br_group.vid = vid;
3292
3293 br_multicast_leave_group(brmctx, pmctx, &br_group,
3294 &brmctx->ip4_other_query,
3295 own_query, src);
3296 }
3297
3298 #if IS_ENABLED(CONFIG_IPV6)
3299 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
3300 struct net_bridge_mcast_port *pmctx,
3301 const struct in6_addr *group,
3302 __u16 vid,
3303 const unsigned char *src)
3304 {
3305 struct br_ip br_group;
3306 struct bridge_mcast_own_query *own_query;
3307
3308 if (ipv6_addr_is_ll_all_nodes(group))
3309 return;
3310
3311 own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query;
3312
3313 memset(&br_group, 0, sizeof(br_group));
3314 br_group.dst.ip6 = *group;
3315 br_group.proto = htons(ETH_P_IPV6);
3316 br_group.vid = vid;
3317
3318 br_multicast_leave_group(brmctx, pmctx, &br_group,
3319 &brmctx->ip6_other_query,
3320 own_query, src);
3321 }
3322 #endif
3323
3324 static void br_multicast_err_count(const struct net_bridge *br,
3325 const struct net_bridge_port *p,
3326 __be16 proto)
3327 {
3328 struct bridge_mcast_stats __percpu *stats;
3329 struct bridge_mcast_stats *pstats;
3330
3331 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
3332 return;
3333
3334 if (p)
3335 stats = p->mcast_stats;
3336 else
3337 stats = br->mcast_stats;
3338 if (WARN_ON(!stats))
3339 return;
3340
3341 pstats = this_cpu_ptr(stats);
3342
3343 u64_stats_update_begin(&pstats->syncp);
3344 switch (proto) {
3345 case htons(ETH_P_IP):
3346 pstats->mstats.igmp_parse_errors++;
3347 break;
3348 #if IS_ENABLED(CONFIG_IPV6)
3349 case htons(ETH_P_IPV6):
3350 pstats->mstats.mld_parse_errors++;
3351 break;
3352 #endif
3353 }
3354 u64_stats_update_end(&pstats->syncp);
3355 }
3356
3357 static void br_multicast_pim(struct net_bridge_mcast *brmctx,
3358 struct net_bridge_mcast_port *pmctx,
3359 const struct sk_buff *skb)
3360 {
3361 unsigned int offset = skb_transport_offset(skb);
3362 struct pimhdr *pimhdr, _pimhdr;
3363
3364 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
3365 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
3366 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
3367 return;
3368
3369 spin_lock(&brmctx->br->multicast_lock);
3370 br_ip4_multicast_mark_router(brmctx, pmctx);
3371 spin_unlock(&brmctx->br->multicast_lock);
3372 }
3373
3374 static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3375 struct net_bridge_mcast_port *pmctx,
3376 struct sk_buff *skb)
3377 {
3378 if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
3379 igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
3380 return -ENOMSG;
3381
3382 spin_lock(&brmctx->br->multicast_lock);
3383 br_ip4_multicast_mark_router(brmctx, pmctx);
3384 spin_unlock(&brmctx->br->multicast_lock);
3385
3386 return 0;
3387 }
3388
3389 static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx,
3390 struct net_bridge_mcast_port *pmctx,
3391 struct sk_buff *skb,
3392 u16 vid)
3393 {
3394 struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3395 const unsigned char *src;
3396 struct igmphdr *ih;
3397 int err;
3398
3399 err = ip_mc_check_igmp(skb);
3400
3401 if (err == -ENOMSG) {
3402 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
3403 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3404 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
3405 if (ip_hdr(skb)->protocol == IPPROTO_PIM)
3406 br_multicast_pim(brmctx, pmctx, skb);
3407 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
3408 br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb);
3409 }
3410
3411 return 0;
3412 } else if (err < 0) {
3413 br_multicast_err_count(brmctx->br, p, skb->protocol);
3414 return err;
3415 }
3416
3417 ih = igmp_hdr(skb);
3418 src = eth_hdr(skb)->h_source;
3419 BR_INPUT_SKB_CB(skb)->igmp = ih->type;
3420
3421 switch (ih->type) {
3422 case IGMP_HOST_MEMBERSHIP_REPORT:
3423 case IGMPV2_HOST_MEMBERSHIP_REPORT:
3424 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3425 err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid,
3426 src, true);
3427 break;
3428 case IGMPV3_HOST_MEMBERSHIP_REPORT:
3429 err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid);
3430 break;
3431 case IGMP_HOST_MEMBERSHIP_QUERY:
3432 br_ip4_multicast_query(brmctx, pmctx, skb, vid);
3433 break;
3434 case IGMP_HOST_LEAVE_MESSAGE:
3435 br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src);
3436 break;
3437 }
3438
3439 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3440 BR_MCAST_DIR_RX);
3441
3442 return err;
3443 }
3444
3445 #if IS_ENABLED(CONFIG_IPV6)
3446 static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3447 struct net_bridge_mcast_port *pmctx,
3448 struct sk_buff *skb)
3449 {
3450 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
3451 return;
3452
3453 spin_lock(&brmctx->br->multicast_lock);
3454 br_ip6_multicast_mark_router(brmctx, pmctx);
3455 spin_unlock(&brmctx->br->multicast_lock);
3456 }
3457
3458 static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx,
3459 struct net_bridge_mcast_port *pmctx,
3460 struct sk_buff *skb,
3461 u16 vid)
3462 {
3463 struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3464 const unsigned char *src;
3465 struct mld_msg *mld;
3466 int err;
3467
3468 err = ipv6_mc_check_mld(skb);
3469
3470 if (err == -ENOMSG || err == -ENODATA) {
3471 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
3472 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3473 if (err == -ENODATA &&
3474 ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
3475 br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb);
3476
3477 return 0;
3478 } else if (err < 0) {
3479 br_multicast_err_count(brmctx->br, p, skb->protocol);
3480 return err;
3481 }
3482
3483 mld = (struct mld_msg *)skb_transport_header(skb);
3484 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
3485
3486 switch (mld->mld_type) {
3487 case ICMPV6_MGM_REPORT:
3488 src = eth_hdr(skb)->h_source;
3489 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3490 err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca,
3491 vid, src, true);
3492 break;
3493 case ICMPV6_MLD2_REPORT:
3494 err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid);
3495 break;
3496 case ICMPV6_MGM_QUERY:
3497 err = br_ip6_multicast_query(brmctx, pmctx, skb, vid);
3498 break;
3499 case ICMPV6_MGM_REDUCTION:
3500 src = eth_hdr(skb)->h_source;
3501 br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid,
3502 src);
3503 break;
3504 }
3505
3506 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3507 BR_MCAST_DIR_RX);
3508
3509 return err;
3510 }
3511 #endif
3512
3513 int br_multicast_rcv(struct net_bridge_mcast *brmctx,
3514 struct net_bridge_mcast_port *pmctx,
3515 struct sk_buff *skb, u16 vid)
3516 {
3517 int ret = 0;
3518
3519 BR_INPUT_SKB_CB(skb)->igmp = 0;
3520 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
3521
3522 if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
3523 return 0;
3524
3525 switch (skb->protocol) {
3526 case htons(ETH_P_IP):
3527 ret = br_multicast_ipv4_rcv(brmctx, pmctx, skb, vid);
3528 break;
3529 #if IS_ENABLED(CONFIG_IPV6)
3530 case htons(ETH_P_IPV6):
3531 ret = br_multicast_ipv6_rcv(brmctx, pmctx, skb, vid);
3532 break;
3533 #endif
3534 }
3535
3536 return ret;
3537 }
3538
3539 static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
3540 struct bridge_mcast_own_query *query,
3541 struct bridge_mcast_querier *querier)
3542 {
3543 spin_lock(&brmctx->br->multicast_lock);
3544 if (query->startup_sent < brmctx->multicast_startup_query_count)
3545 query->startup_sent++;
3546
3547 RCU_INIT_POINTER(querier->port, NULL);
3548 br_multicast_send_query(brmctx, NULL, query);
3549 spin_unlock(&brmctx->br->multicast_lock);
3550 }
3551
3552 static void br_ip4_multicast_query_expired(struct timer_list *t)
3553 {
3554 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
3555 ip4_own_query.timer);
3556
3557 br_multicast_query_expired(brmctx, &brmctx->ip4_own_query,
3558 &brmctx->ip4_querier);
3559 }
3560
3561 #if IS_ENABLED(CONFIG_IPV6)
3562 static void br_ip6_multicast_query_expired(struct timer_list *t)
3563 {
3564 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
3565 ip6_own_query.timer);
3566
3567 br_multicast_query_expired(brmctx, &brmctx->ip6_own_query,
3568 &brmctx->ip6_querier);
3569 }
3570 #endif
3571
3572 static void br_multicast_gc_work(struct work_struct *work)
3573 {
3574 struct net_bridge *br = container_of(work, struct net_bridge,
3575 mcast_gc_work);
3576 HLIST_HEAD(deleted_head);
3577
3578 spin_lock_bh(&br->multicast_lock);
3579 hlist_move_list(&br->mcast_gc_list, &deleted_head);
3580 spin_unlock_bh(&br->multicast_lock);
3581
3582 br_multicast_gc(&deleted_head);
3583 }
3584
3585 void br_multicast_ctx_init(struct net_bridge *br,
3586 struct net_bridge_vlan *vlan,
3587 struct net_bridge_mcast *brmctx)
3588 {
3589 brmctx->br = br;
3590 brmctx->vlan = vlan;
3591 brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3592 brmctx->multicast_last_member_count = 2;
3593 brmctx->multicast_startup_query_count = 2;
3594
3595 brmctx->multicast_last_member_interval = HZ;
3596 brmctx->multicast_query_response_interval = 10 * HZ;
3597 brmctx->multicast_startup_query_interval = 125 * HZ / 4;
3598 brmctx->multicast_query_interval = 125 * HZ;
3599 brmctx->multicast_querier_interval = 255 * HZ;
3600 brmctx->multicast_membership_interval = 260 * HZ;
3601
3602 brmctx->ip4_other_query.delay_time = 0;
3603 brmctx->ip4_querier.port = NULL;
3604 brmctx->multicast_igmp_version = 2;
3605 #if IS_ENABLED(CONFIG_IPV6)
3606 brmctx->multicast_mld_version = 1;
3607 brmctx->ip6_other_query.delay_time = 0;
3608 brmctx->ip6_querier.port = NULL;
3609 #endif
3610
3611 timer_setup(&brmctx->ip4_mc_router_timer,
3612 br_ip4_multicast_local_router_expired, 0);
3613 timer_setup(&brmctx->ip4_other_query.timer,
3614 br_ip4_multicast_querier_expired, 0);
3615 timer_setup(&brmctx->ip4_own_query.timer,
3616 br_ip4_multicast_query_expired, 0);
3617 #if IS_ENABLED(CONFIG_IPV6)
3618 timer_setup(&brmctx->ip6_mc_router_timer,
3619 br_ip6_multicast_local_router_expired, 0);
3620 timer_setup(&brmctx->ip6_other_query.timer,
3621 br_ip6_multicast_querier_expired, 0);
3622 timer_setup(&brmctx->ip6_own_query.timer,
3623 br_ip6_multicast_query_expired, 0);
3624 #endif
3625 }
3626
3627 void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx)
3628 {
3629 __br_multicast_stop(brmctx);
3630 }
3631
3632 void br_multicast_init(struct net_bridge *br)
3633 {
3634 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
3635
3636 br_multicast_ctx_init(br, NULL, &br->multicast_ctx);
3637
3638 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
3639 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
3640
3641 spin_lock_init(&br->multicast_lock);
3642 INIT_HLIST_HEAD(&br->mdb_list);
3643 INIT_HLIST_HEAD(&br->mcast_gc_list);
3644 INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
3645 }
3646
3647 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
3648 {
3649 struct in_device *in_dev = in_dev_get(br->dev);
3650
3651 if (!in_dev)
3652 return;
3653
3654 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3655 in_dev_put(in_dev);
3656 }
3657
3658 #if IS_ENABLED(CONFIG_IPV6)
3659 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3660 {
3661 struct in6_addr addr;
3662
3663 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3664 ipv6_dev_mc_inc(br->dev, &addr);
3665 }
3666 #else
3667 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3668 {
3669 }
3670 #endif
3671
3672 void br_multicast_join_snoopers(struct net_bridge *br)
3673 {
3674 br_ip4_multicast_join_snoopers(br);
3675 br_ip6_multicast_join_snoopers(br);
3676 }
3677
3678 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
3679 {
3680 struct in_device *in_dev = in_dev_get(br->dev);
3681
3682 if (WARN_ON(!in_dev))
3683 return;
3684
3685 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3686 in_dev_put(in_dev);
3687 }
3688
3689 #if IS_ENABLED(CONFIG_IPV6)
3690 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3691 {
3692 struct in6_addr addr;
3693
3694 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3695 ipv6_dev_mc_dec(br->dev, &addr);
3696 }
3697 #else
3698 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3699 {
3700 }
3701 #endif
3702
3703 void br_multicast_leave_snoopers(struct net_bridge *br)
3704 {
3705 br_ip4_multicast_leave_snoopers(br);
3706 br_ip6_multicast_leave_snoopers(br);
3707 }
3708
3709 static void __br_multicast_open_query(struct net_bridge *br,
3710 struct bridge_mcast_own_query *query)
3711 {
3712 query->startup_sent = 0;
3713
3714 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
3715 return;
3716
3717 mod_timer(&query->timer, jiffies);
3718 }
3719
3720 static void __br_multicast_open(struct net_bridge_mcast *brmctx)
3721 {
3722 __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query);
3723 #if IS_ENABLED(CONFIG_IPV6)
3724 __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query);
3725 #endif
3726 }
3727
3728 void br_multicast_open(struct net_bridge *br)
3729 {
3730 struct net_bridge_vlan_group *vg;
3731 struct net_bridge_vlan *vlan;
3732
3733 ASSERT_RTNL();
3734
3735 vg = br_vlan_group(br);
3736 if (vg) {
3737 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
3738 struct net_bridge_mcast *brmctx;
3739
3740 brmctx = &vlan->br_mcast_ctx;
3741 if (br_vlan_is_brentry(vlan) &&
3742 !br_multicast_ctx_vlan_disabled(brmctx))
3743 __br_multicast_open(&vlan->br_mcast_ctx);
3744 }
3745 }
3746
3747 __br_multicast_open(&br->multicast_ctx);
3748 }
3749
3750 static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
3751 {
3752 del_timer_sync(&brmctx->ip4_mc_router_timer);
3753 del_timer_sync(&brmctx->ip4_other_query.timer);
3754 del_timer_sync(&brmctx->ip4_own_query.timer);
3755 #if IS_ENABLED(CONFIG_IPV6)
3756 del_timer_sync(&brmctx->ip6_mc_router_timer);
3757 del_timer_sync(&brmctx->ip6_other_query.timer);
3758 del_timer_sync(&brmctx->ip6_own_query.timer);
3759 #endif
3760 }
3761
3762 void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
3763 {
3764 struct net_bridge *br;
3765
3766 /* it's okay to check for the flag without the multicast lock because it
3767 * can only change under RTNL -> multicast_lock, we need the latter to
3768 * sync with timers and packets
3769 */
3770 if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED))
3771 return;
3772
3773 if (br_vlan_is_master(vlan)) {
3774 br = vlan->br;
3775
3776 if (!br_vlan_is_brentry(vlan) ||
3777 (on &&
3778 br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx)))
3779 return;
3780
3781 spin_lock_bh(&br->multicast_lock);
3782 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
3783 spin_unlock_bh(&br->multicast_lock);
3784
3785 if (on)
3786 __br_multicast_open(&vlan->br_mcast_ctx);
3787 else
3788 __br_multicast_stop(&vlan->br_mcast_ctx);
3789 } else {
3790 struct net_bridge_mcast *brmctx;
3791
3792 brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx);
3793 if (on && br_multicast_ctx_vlan_global_disabled(brmctx))
3794 return;
3795
3796 br = vlan->port->br;
3797 spin_lock_bh(&br->multicast_lock);
3798 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
3799 if (on)
3800 __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx);
3801 else
3802 __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx);
3803 spin_unlock_bh(&br->multicast_lock);
3804 }
3805 }
3806
3807 void br_multicast_stop(struct net_bridge *br)
3808 {
3809 struct net_bridge_vlan_group *vg;
3810 struct net_bridge_vlan *vlan;
3811
3812 ASSERT_RTNL();
3813
3814 vg = br_vlan_group(br);
3815 if (vg) {
3816 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
3817 struct net_bridge_mcast *brmctx;
3818
3819 brmctx = &vlan->br_mcast_ctx;
3820 if (br_vlan_is_brentry(vlan) &&
3821 !br_multicast_ctx_vlan_disabled(brmctx))
3822 __br_multicast_stop(&vlan->br_mcast_ctx);
3823 }
3824 }
3825
3826 __br_multicast_stop(&br->multicast_ctx);
3827 }
3828
3829 void br_multicast_dev_del(struct net_bridge *br)
3830 {
3831 struct net_bridge_mdb_entry *mp;
3832 HLIST_HEAD(deleted_head);
3833 struct hlist_node *tmp;
3834
3835 spin_lock_bh(&br->multicast_lock);
3836 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
3837 br_multicast_del_mdb_entry(mp);
3838 hlist_move_list(&br->mcast_gc_list, &deleted_head);
3839 spin_unlock_bh(&br->multicast_lock);
3840
3841 br_multicast_ctx_deinit(&br->multicast_ctx);
3842 br_multicast_gc(&deleted_head);
3843 cancel_work_sync(&br->mcast_gc_work);
3844
3845 rcu_barrier();
3846 }
3847
3848 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
3849 {
3850 struct net_bridge_mcast *brmctx = &br->multicast_ctx;
3851 int err = -EINVAL;
3852
3853 spin_lock_bh(&br->multicast_lock);
3854
3855 switch (val) {
3856 case MDB_RTR_TYPE_DISABLED:
3857 case MDB_RTR_TYPE_PERM:
3858 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
3859 del_timer(&brmctx->ip4_mc_router_timer);
3860 #if IS_ENABLED(CONFIG_IPV6)
3861 del_timer(&brmctx->ip6_mc_router_timer);
3862 #endif
3863 brmctx->multicast_router = val;
3864 err = 0;
3865 break;
3866 case MDB_RTR_TYPE_TEMP_QUERY:
3867 if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
3868 br_mc_router_state_change(br, false);
3869 brmctx->multicast_router = val;
3870 err = 0;
3871 break;
3872 }
3873
3874 spin_unlock_bh(&br->multicast_lock);
3875
3876 return err;
3877 }
3878
3879 static void
3880 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted)
3881 {
3882 if (!deleted)
3883 return;
3884
3885 /* For backwards compatibility for now, only notify if there is
3886 * no multicast router anymore for both IPv4 and IPv6.
3887 */
3888 if (!hlist_unhashed(&pmctx->ip4_rlist))
3889 return;
3890 #if IS_ENABLED(CONFIG_IPV6)
3891 if (!hlist_unhashed(&pmctx->ip6_rlist))
3892 return;
3893 #endif
3894
3895 br_rtr_notify(pmctx->port->br->dev, pmctx->port, RTM_DELMDB);
3896 br_port_mc_router_state_change(pmctx->port, false);
3897
3898 /* don't allow timer refresh */
3899 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP)
3900 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3901 }
3902
3903 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
3904 {
3905 struct net_bridge_mcast *brmctx = &p->br->multicast_ctx;
3906 struct net_bridge_mcast_port *pmctx = &p->multicast_ctx;
3907 unsigned long now = jiffies;
3908 int err = -EINVAL;
3909 bool del = false;
3910
3911 spin_lock(&p->br->multicast_lock);
3912 if (pmctx->multicast_router == val) {
3913 /* Refresh the temp router port timer */
3914 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) {
3915 mod_timer(&pmctx->ip4_mc_router_timer,
3916 now + brmctx->multicast_querier_interval);
3917 #if IS_ENABLED(CONFIG_IPV6)
3918 mod_timer(&pmctx->ip6_mc_router_timer,
3919 now + brmctx->multicast_querier_interval);
3920 #endif
3921 }
3922 err = 0;
3923 goto unlock;
3924 }
3925 switch (val) {
3926 case MDB_RTR_TYPE_DISABLED:
3927 pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
3928 del |= br_ip4_multicast_rport_del(pmctx);
3929 del_timer(&pmctx->ip4_mc_router_timer);
3930 del |= br_ip6_multicast_rport_del(pmctx);
3931 #if IS_ENABLED(CONFIG_IPV6)
3932 del_timer(&pmctx->ip6_mc_router_timer);
3933 #endif
3934 br_multicast_rport_del_notify(pmctx, del);
3935 break;
3936 case MDB_RTR_TYPE_TEMP_QUERY:
3937 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3938 del |= br_ip4_multicast_rport_del(pmctx);
3939 del |= br_ip6_multicast_rport_del(pmctx);
3940 br_multicast_rport_del_notify(pmctx, del);
3941 break;
3942 case MDB_RTR_TYPE_PERM:
3943 pmctx->multicast_router = MDB_RTR_TYPE_PERM;
3944 del_timer(&pmctx->ip4_mc_router_timer);
3945 br_ip4_multicast_add_router(brmctx, pmctx);
3946 #if IS_ENABLED(CONFIG_IPV6)
3947 del_timer(&pmctx->ip6_mc_router_timer);
3948 #endif
3949 br_ip6_multicast_add_router(brmctx, pmctx);
3950 break;
3951 case MDB_RTR_TYPE_TEMP:
3952 pmctx->multicast_router = MDB_RTR_TYPE_TEMP;
3953 br_ip4_multicast_mark_router(brmctx, pmctx);
3954 br_ip6_multicast_mark_router(brmctx, pmctx);
3955 break;
3956 default:
3957 goto unlock;
3958 }
3959 err = 0;
3960 unlock:
3961 spin_unlock(&p->br->multicast_lock);
3962
3963 return err;
3964 }
3965
3966 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
3967 struct bridge_mcast_own_query *query)
3968 {
3969 struct net_bridge_port *port;
3970
3971 __br_multicast_open_query(brmctx->br, query);
3972
3973 rcu_read_lock();
3974 list_for_each_entry_rcu(port, &brmctx->br->port_list, list) {
3975 if (port->state == BR_STATE_DISABLED ||
3976 port->state == BR_STATE_BLOCKING)
3977 continue;
3978
3979 if (query == &brmctx->ip4_own_query)
3980 br_multicast_enable(&port->multicast_ctx.ip4_own_query);
3981 #if IS_ENABLED(CONFIG_IPV6)
3982 else
3983 br_multicast_enable(&port->multicast_ctx.ip6_own_query);
3984 #endif
3985 }
3986 rcu_read_unlock();
3987 }
3988
3989 int br_multicast_toggle(struct net_bridge *br, unsigned long val,
3990 struct netlink_ext_ack *extack)
3991 {
3992 struct net_bridge_port *port;
3993 bool change_snoopers = false;
3994 int err = 0;
3995
3996 spin_lock_bh(&br->multicast_lock);
3997 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
3998 goto unlock;
3999
4000 err = br_mc_disabled_update(br->dev, val, extack);
4001 if (err == -EOPNOTSUPP)
4002 err = 0;
4003 if (err)
4004 goto unlock;
4005
4006 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
4007 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
4008 change_snoopers = true;
4009 goto unlock;
4010 }
4011
4012 if (!netif_running(br->dev))
4013 goto unlock;
4014
4015 br_multicast_open(br);
4016 list_for_each_entry(port, &br->port_list, list)
4017 __br_multicast_enable_port_ctx(&port->multicast_ctx);
4018
4019 change_snoopers = true;
4020
4021 unlock:
4022 spin_unlock_bh(&br->multicast_lock);
4023
4024 /* br_multicast_join_snoopers has the potential to cause
4025 * an MLD Report/Leave to be delivered to br_multicast_rcv,
4026 * which would in turn call br_multicast_add_group, which would
4027 * attempt to acquire multicast_lock. This function should be
4028 * called after the lock has been released to avoid deadlocks on
4029 * multicast_lock.
4030 *
4031 * br_multicast_leave_snoopers does not have the problem since
4032 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
4033 * returns without calling br_multicast_ipv4/6_rcv if it's not
4034 * enabled. Moved both functions out just for symmetry.
4035 */
4036 if (change_snoopers) {
4037 if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
4038 br_multicast_join_snoopers(br);
4039 else
4040 br_multicast_leave_snoopers(br);
4041 }
4042
4043 return err;
4044 }
4045
4046 bool br_multicast_enabled(const struct net_device *dev)
4047 {
4048 struct net_bridge *br = netdev_priv(dev);
4049
4050 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
4051 }
4052 EXPORT_SYMBOL_GPL(br_multicast_enabled);
4053
4054 bool br_multicast_router(const struct net_device *dev)
4055 {
4056 struct net_bridge *br = netdev_priv(dev);
4057 bool is_router;
4058
4059 spin_lock_bh(&br->multicast_lock);
4060 is_router = br_multicast_is_router(&br->multicast_ctx, NULL);
4061 spin_unlock_bh(&br->multicast_lock);
4062 return is_router;
4063 }
4064 EXPORT_SYMBOL_GPL(br_multicast_router);
4065
4066 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
4067 {
4068 struct net_bridge_mcast *brmctx = &br->multicast_ctx;
4069 unsigned long max_delay;
4070
4071 val = !!val;
4072
4073 spin_lock_bh(&br->multicast_lock);
4074 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
4075 goto unlock;
4076
4077 br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
4078 if (!val)
4079 goto unlock;
4080
4081 max_delay = brmctx->multicast_query_response_interval;
4082
4083 if (!timer_pending(&brmctx->ip4_other_query.timer))
4084 brmctx->ip4_other_query.delay_time = jiffies + max_delay;
4085
4086 br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
4087
4088 #if IS_ENABLED(CONFIG_IPV6)
4089 if (!timer_pending(&brmctx->ip6_other_query.timer))
4090 brmctx->ip6_other_query.delay_time = jiffies + max_delay;
4091
4092 br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
4093 #endif
4094
4095 unlock:
4096 spin_unlock_bh(&br->multicast_lock);
4097
4098 return 0;
4099 }
4100
4101 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
4102 {
4103 /* Currently we support only version 2 and 3 */
4104 switch (val) {
4105 case 2:
4106 case 3:
4107 break;
4108 default:
4109 return -EINVAL;
4110 }
4111
4112 spin_lock_bh(&br->multicast_lock);
4113 br->multicast_ctx.multicast_igmp_version = val;
4114 spin_unlock_bh(&br->multicast_lock);
4115
4116 return 0;
4117 }
4118
4119 #if IS_ENABLED(CONFIG_IPV6)
4120 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
4121 {
4122 /* Currently we support version 1 and 2 */
4123 switch (val) {
4124 case 1:
4125 case 2:
4126 break;
4127 default:
4128 return -EINVAL;
4129 }
4130
4131 spin_lock_bh(&br->multicast_lock);
4132 br->multicast_ctx.multicast_mld_version = val;
4133 spin_unlock_bh(&br->multicast_lock);
4134
4135 return 0;
4136 }
4137 #endif
4138
4139 /**
4140 * br_multicast_list_adjacent - Returns snooped multicast addresses
4141 * @dev: The bridge port adjacent to which to retrieve addresses
4142 * @br_ip_list: The list to store found, snooped multicast IP addresses in
4143 *
4144 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
4145 * snooping feature on all bridge ports of dev's bridge device, excluding
4146 * the addresses from dev itself.
4147 *
4148 * Returns the number of items added to br_ip_list.
4149 *
4150 * Notes:
4151 * - br_ip_list needs to be initialized by caller
4152 * - br_ip_list might contain duplicates in the end
4153 * (needs to be taken care of by caller)
4154 * - br_ip_list needs to be freed by caller
4155 */
4156 int br_multicast_list_adjacent(struct net_device *dev,
4157 struct list_head *br_ip_list)
4158 {
4159 struct net_bridge *br;
4160 struct net_bridge_port *port;
4161 struct net_bridge_port_group *group;
4162 struct br_ip_list *entry;
4163 int count = 0;
4164
4165 rcu_read_lock();
4166 if (!br_ip_list || !netif_is_bridge_port(dev))
4167 goto unlock;
4168
4169 port = br_port_get_rcu(dev);
4170 if (!port || !port->br)
4171 goto unlock;
4172
4173 br = port->br;
4174
4175 list_for_each_entry_rcu(port, &br->port_list, list) {
4176 if (!port->dev || port->dev == dev)
4177 continue;
4178
4179 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
4180 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
4181 if (!entry)
4182 goto unlock;
4183
4184 entry->addr = group->key.addr;
4185 list_add(&entry->list, br_ip_list);
4186 count++;
4187 }
4188 }
4189
4190 unlock:
4191 rcu_read_unlock();
4192 return count;
4193 }
4194 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
4195
4196 /**
4197 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
4198 * @dev: The bridge port providing the bridge on which to check for a querier
4199 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4200 *
4201 * Checks whether the given interface has a bridge on top and if so returns
4202 * true if a valid querier exists anywhere on the bridged link layer.
4203 * Otherwise returns false.
4204 */
4205 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
4206 {
4207 struct net_bridge *br;
4208 struct net_bridge_port *port;
4209 struct ethhdr eth;
4210 bool ret = false;
4211
4212 rcu_read_lock();
4213 if (!netif_is_bridge_port(dev))
4214 goto unlock;
4215
4216 port = br_port_get_rcu(dev);
4217 if (!port || !port->br)
4218 goto unlock;
4219
4220 br = port->br;
4221
4222 memset(&eth, 0, sizeof(eth));
4223 eth.h_proto = htons(proto);
4224
4225 ret = br_multicast_querier_exists(&br->multicast_ctx, &eth, NULL);
4226
4227 unlock:
4228 rcu_read_unlock();
4229 return ret;
4230 }
4231 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
4232
4233 /**
4234 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
4235 * @dev: The bridge port adjacent to which to check for a querier
4236 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4237 *
4238 * Checks whether the given interface has a bridge on top and if so returns
4239 * true if a selected querier is behind one of the other ports of this
4240 * bridge. Otherwise returns false.
4241 */
4242 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
4243 {
4244 struct net_bridge_mcast *brmctx;
4245 struct net_bridge *br;
4246 struct net_bridge_port *port;
4247 bool ret = false;
4248
4249 rcu_read_lock();
4250 if (!netif_is_bridge_port(dev))
4251 goto unlock;
4252
4253 port = br_port_get_rcu(dev);
4254 if (!port || !port->br)
4255 goto unlock;
4256
4257 br = port->br;
4258 brmctx = &br->multicast_ctx;
4259
4260 switch (proto) {
4261 case ETH_P_IP:
4262 if (!timer_pending(&brmctx->ip4_other_query.timer) ||
4263 rcu_dereference(brmctx->ip4_querier.port) == port)
4264 goto unlock;
4265 break;
4266 #if IS_ENABLED(CONFIG_IPV6)
4267 case ETH_P_IPV6:
4268 if (!timer_pending(&brmctx->ip6_other_query.timer) ||
4269 rcu_dereference(brmctx->ip6_querier.port) == port)
4270 goto unlock;
4271 break;
4272 #endif
4273 default:
4274 goto unlock;
4275 }
4276
4277 ret = true;
4278 unlock:
4279 rcu_read_unlock();
4280 return ret;
4281 }
4282 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
4283
4284 /**
4285 * br_multicast_has_router_adjacent - Checks for a router behind a bridge port
4286 * @dev: The bridge port adjacent to which to check for a multicast router
4287 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4288 *
4289 * Checks whether the given interface has a bridge on top and if so returns
4290 * true if a multicast router is behind one of the other ports of this
4291 * bridge. Otherwise returns false.
4292 */
4293 bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
4294 {
4295 struct net_bridge_mcast_port *pmctx;
4296 struct net_bridge_mcast *brmctx;
4297 struct net_bridge_port *port;
4298 bool ret = false;
4299
4300 rcu_read_lock();
4301 port = br_port_get_check_rcu(dev);
4302 if (!port)
4303 goto unlock;
4304
4305 brmctx = &port->br->multicast_ctx;
4306 switch (proto) {
4307 case ETH_P_IP:
4308 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
4309 ip4_rlist) {
4310 if (pmctx->port == port)
4311 continue;
4312
4313 ret = true;
4314 goto unlock;
4315 }
4316 break;
4317 #if IS_ENABLED(CONFIG_IPV6)
4318 case ETH_P_IPV6:
4319 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
4320 ip6_rlist) {
4321 if (pmctx->port == port)
4322 continue;
4323
4324 ret = true;
4325 goto unlock;
4326 }
4327 break;
4328 #endif
4329 default:
4330 /* when compiled without IPv6 support, be conservative and
4331 * always assume presence of an IPv6 multicast router
4332 */
4333 ret = true;
4334 }
4335
4336 unlock:
4337 rcu_read_unlock();
4338 return ret;
4339 }
4340 EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);
4341
4342 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
4343 const struct sk_buff *skb, u8 type, u8 dir)
4344 {
4345 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
4346 __be16 proto = skb->protocol;
4347 unsigned int t_len;
4348
4349 u64_stats_update_begin(&pstats->syncp);
4350 switch (proto) {
4351 case htons(ETH_P_IP):
4352 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
4353 switch (type) {
4354 case IGMP_HOST_MEMBERSHIP_REPORT:
4355 pstats->mstats.igmp_v1reports[dir]++;
4356 break;
4357 case IGMPV2_HOST_MEMBERSHIP_REPORT:
4358 pstats->mstats.igmp_v2reports[dir]++;
4359 break;
4360 case IGMPV3_HOST_MEMBERSHIP_REPORT:
4361 pstats->mstats.igmp_v3reports[dir]++;
4362 break;
4363 case IGMP_HOST_MEMBERSHIP_QUERY:
4364 if (t_len != sizeof(struct igmphdr)) {
4365 pstats->mstats.igmp_v3queries[dir]++;
4366 } else {
4367 unsigned int offset = skb_transport_offset(skb);
4368 struct igmphdr *ih, _ihdr;
4369
4370 ih = skb_header_pointer(skb, offset,
4371 sizeof(_ihdr), &_ihdr);
4372 if (!ih)
4373 break;
4374 if (!ih->code)
4375 pstats->mstats.igmp_v1queries[dir]++;
4376 else
4377 pstats->mstats.igmp_v2queries[dir]++;
4378 }
4379 break;
4380 case IGMP_HOST_LEAVE_MESSAGE:
4381 pstats->mstats.igmp_leaves[dir]++;
4382 break;
4383 }
4384 break;
4385 #if IS_ENABLED(CONFIG_IPV6)
4386 case htons(ETH_P_IPV6):
4387 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
4388 sizeof(struct ipv6hdr);
4389 t_len -= skb_network_header_len(skb);
4390 switch (type) {
4391 case ICMPV6_MGM_REPORT:
4392 pstats->mstats.mld_v1reports[dir]++;
4393 break;
4394 case ICMPV6_MLD2_REPORT:
4395 pstats->mstats.mld_v2reports[dir]++;
4396 break;
4397 case ICMPV6_MGM_QUERY:
4398 if (t_len != sizeof(struct mld_msg))
4399 pstats->mstats.mld_v2queries[dir]++;
4400 else
4401 pstats->mstats.mld_v1queries[dir]++;
4402 break;
4403 case ICMPV6_MGM_REDUCTION:
4404 pstats->mstats.mld_leaves[dir]++;
4405 break;
4406 }
4407 break;
4408 #endif /* CONFIG_IPV6 */
4409 }
4410 u64_stats_update_end(&pstats->syncp);
4411 }
4412
4413 void br_multicast_count(struct net_bridge *br,
4414 const struct net_bridge_port *p,
4415 const struct sk_buff *skb, u8 type, u8 dir)
4416 {
4417 struct bridge_mcast_stats __percpu *stats;
4418
4419 /* if multicast_disabled is true then igmp type can't be set */
4420 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
4421 return;
4422
4423 if (p)
4424 stats = p->mcast_stats;
4425 else
4426 stats = br->mcast_stats;
4427 if (WARN_ON(!stats))
4428 return;
4429
4430 br_mcast_stats_add(stats, skb, type, dir);
4431 }
4432
4433 int br_multicast_init_stats(struct net_bridge *br)
4434 {
4435 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
4436 if (!br->mcast_stats)
4437 return -ENOMEM;
4438
4439 return 0;
4440 }
4441
4442 void br_multicast_uninit_stats(struct net_bridge *br)
4443 {
4444 free_percpu(br->mcast_stats);
4445 }
4446
4447 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
4448 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
4449 {
4450 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
4451 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
4452 }
4453
4454 void br_multicast_get_stats(const struct net_bridge *br,
4455 const struct net_bridge_port *p,
4456 struct br_mcast_stats *dest)
4457 {
4458 struct bridge_mcast_stats __percpu *stats;
4459 struct br_mcast_stats tdst;
4460 int i;
4461
4462 memset(dest, 0, sizeof(*dest));
4463 if (p)
4464 stats = p->mcast_stats;
4465 else
4466 stats = br->mcast_stats;
4467 if (WARN_ON(!stats))
4468 return;
4469
4470 memset(&tdst, 0, sizeof(tdst));
4471 for_each_possible_cpu(i) {
4472 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
4473 struct br_mcast_stats temp;
4474 unsigned int start;
4475
4476 do {
4477 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
4478 memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
4479 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
4480
4481 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
4482 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
4483 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
4484 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
4485 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
4486 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
4487 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
4488 tdst.igmp_parse_errors += temp.igmp_parse_errors;
4489
4490 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
4491 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
4492 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
4493 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
4494 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
4495 tdst.mld_parse_errors += temp.mld_parse_errors;
4496 }
4497 memcpy(dest, &tdst, sizeof(*dest));
4498 }
4499
4500 int br_mdb_hash_init(struct net_bridge *br)
4501 {
4502 int err;
4503
4504 err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
4505 if (err)
4506 return err;
4507
4508 err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
4509 if (err) {
4510 rhashtable_destroy(&br->sg_port_tbl);
4511 return err;
4512 }
4513
4514 return 0;
4515 }
4516
4517 void br_mdb_hash_fini(struct net_bridge *br)
4518 {
4519 rhashtable_destroy(&br->sg_port_tbl);
4520 rhashtable_destroy(&br->mdb_hash_tbl);
4521 }