1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Bridge multicast support.
5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
35 #include "br_private.h"
36 #include "br_private_mcast_eht.h"
38 static const struct rhashtable_params br_mdb_rht_params
= {
39 .head_offset
= offsetof(struct net_bridge_mdb_entry
, rhnode
),
40 .key_offset
= offsetof(struct net_bridge_mdb_entry
, addr
),
41 .key_len
= sizeof(struct br_ip
),
42 .automatic_shrinking
= true,
45 static const struct rhashtable_params br_sg_port_rht_params
= {
46 .head_offset
= offsetof(struct net_bridge_port_group
, rhnode
),
47 .key_offset
= offsetof(struct net_bridge_port_group
, key
),
48 .key_len
= sizeof(struct net_bridge_port_group_sg_key
),
49 .automatic_shrinking
= true,
52 static void br_multicast_start_querier(struct net_bridge_mcast
*brmctx
,
53 struct bridge_mcast_own_query
*query
);
54 static void br_ip4_multicast_add_router(struct net_bridge_mcast
*brmctx
,
55 struct net_bridge_mcast_port
*pmctx
);
56 static void br_ip4_multicast_leave_group(struct net_bridge_mcast
*brmctx
,
57 struct net_bridge_mcast_port
*pmctx
,
60 const unsigned char *src
);
61 static void br_multicast_port_group_rexmit(struct timer_list
*t
);
64 br_multicast_rport_del_notify(struct net_bridge_mcast_port
*pmctx
, bool deleted
);
65 static void br_ip6_multicast_add_router(struct net_bridge_mcast
*brmctx
,
66 struct net_bridge_mcast_port
*pmctx
);
67 #if IS_ENABLED(CONFIG_IPV6)
68 static void br_ip6_multicast_leave_group(struct net_bridge_mcast
*brmctx
,
69 struct net_bridge_mcast_port
*pmctx
,
70 const struct in6_addr
*group
,
71 __u16 vid
, const unsigned char *src
);
73 static struct net_bridge_port_group
*
74 __br_multicast_add_group(struct net_bridge_mcast
*brmctx
,
75 struct net_bridge_mcast_port
*pmctx
,
77 const unsigned char *src
,
81 static void br_multicast_find_del_pg(struct net_bridge
*br
,
82 struct net_bridge_port_group
*pg
);
83 static void __br_multicast_stop(struct net_bridge_mcast
*brmctx
);
85 static struct net_bridge_port_group
*
86 br_sg_port_find(struct net_bridge
*br
,
87 struct net_bridge_port_group_sg_key
*sg_p
)
89 lockdep_assert_held_once(&br
->multicast_lock
);
91 return rhashtable_lookup_fast(&br
->sg_port_tbl
, sg_p
,
92 br_sg_port_rht_params
);
95 static struct net_bridge_mdb_entry
*br_mdb_ip_get_rcu(struct net_bridge
*br
,
98 return rhashtable_lookup(&br
->mdb_hash_tbl
, dst
, br_mdb_rht_params
);
101 struct net_bridge_mdb_entry
*br_mdb_ip_get(struct net_bridge
*br
,
104 struct net_bridge_mdb_entry
*ent
;
106 lockdep_assert_held_once(&br
->multicast_lock
);
109 ent
= rhashtable_lookup(&br
->mdb_hash_tbl
, dst
, br_mdb_rht_params
);
115 static struct net_bridge_mdb_entry
*br_mdb_ip4_get(struct net_bridge
*br
,
116 __be32 dst
, __u16 vid
)
120 memset(&br_dst
, 0, sizeof(br_dst
));
121 br_dst
.dst
.ip4
= dst
;
122 br_dst
.proto
= htons(ETH_P_IP
);
125 return br_mdb_ip_get(br
, &br_dst
);
128 #if IS_ENABLED(CONFIG_IPV6)
129 static struct net_bridge_mdb_entry
*br_mdb_ip6_get(struct net_bridge
*br
,
130 const struct in6_addr
*dst
,
135 memset(&br_dst
, 0, sizeof(br_dst
));
136 br_dst
.dst
.ip6
= *dst
;
137 br_dst
.proto
= htons(ETH_P_IPV6
);
140 return br_mdb_ip_get(br
, &br_dst
);
144 struct net_bridge_mdb_entry
*br_mdb_get(struct net_bridge_mcast
*brmctx
,
145 struct sk_buff
*skb
, u16 vid
)
147 struct net_bridge
*br
= brmctx
->br
;
150 if (!br_opt_get(br
, BROPT_MULTICAST_ENABLED
))
153 if (BR_INPUT_SKB_CB(skb
)->igmp
)
156 memset(&ip
, 0, sizeof(ip
));
157 ip
.proto
= skb
->protocol
;
160 switch (skb
->protocol
) {
161 case htons(ETH_P_IP
):
162 ip
.dst
.ip4
= ip_hdr(skb
)->daddr
;
163 if (brmctx
->multicast_igmp_version
== 3) {
164 struct net_bridge_mdb_entry
*mdb
;
166 ip
.src
.ip4
= ip_hdr(skb
)->saddr
;
167 mdb
= br_mdb_ip_get_rcu(br
, &ip
);
173 #if IS_ENABLED(CONFIG_IPV6)
174 case htons(ETH_P_IPV6
):
175 ip
.dst
.ip6
= ipv6_hdr(skb
)->daddr
;
176 if (brmctx
->multicast_mld_version
== 2) {
177 struct net_bridge_mdb_entry
*mdb
;
179 ip
.src
.ip6
= ipv6_hdr(skb
)->saddr
;
180 mdb
= br_mdb_ip_get_rcu(br
, &ip
);
183 memset(&ip
.src
.ip6
, 0, sizeof(ip
.src
.ip6
));
189 ether_addr_copy(ip
.dst
.mac_addr
, eth_hdr(skb
)->h_dest
);
192 return br_mdb_ip_get_rcu(br
, &ip
);
195 static bool br_port_group_equal(struct net_bridge_port_group
*p
,
196 struct net_bridge_port
*port
,
197 const unsigned char *src
)
199 if (p
->key
.port
!= port
)
202 if (!(port
->flags
& BR_MULTICAST_TO_UNICAST
))
205 return ether_addr_equal(src
, p
->eth_addr
);
208 static void __fwd_add_star_excl(struct net_bridge_mcast_port
*pmctx
,
209 struct net_bridge_port_group
*pg
,
212 struct net_bridge_port_group_sg_key sg_key
;
213 struct net_bridge_port_group
*src_pg
;
214 struct net_bridge_mcast
*brmctx
;
216 memset(&sg_key
, 0, sizeof(sg_key
));
217 brmctx
= br_multicast_port_ctx_get_global(pmctx
);
218 sg_key
.port
= pg
->key
.port
;
219 sg_key
.addr
= *sg_ip
;
220 if (br_sg_port_find(brmctx
->br
, &sg_key
))
223 src_pg
= __br_multicast_add_group(brmctx
, pmctx
,
225 MCAST_INCLUDE
, false, false);
226 if (IS_ERR_OR_NULL(src_pg
) ||
227 src_pg
->rt_protocol
!= RTPROT_KERNEL
)
230 src_pg
->flags
|= MDB_PG_FLAGS_STAR_EXCL
;
233 static void __fwd_del_star_excl(struct net_bridge_port_group
*pg
,
236 struct net_bridge_port_group_sg_key sg_key
;
237 struct net_bridge
*br
= pg
->key
.port
->br
;
238 struct net_bridge_port_group
*src_pg
;
240 memset(&sg_key
, 0, sizeof(sg_key
));
241 sg_key
.port
= pg
->key
.port
;
242 sg_key
.addr
= *sg_ip
;
243 src_pg
= br_sg_port_find(br
, &sg_key
);
244 if (!src_pg
|| !(src_pg
->flags
& MDB_PG_FLAGS_STAR_EXCL
) ||
245 src_pg
->rt_protocol
!= RTPROT_KERNEL
)
248 br_multicast_find_del_pg(br
, src_pg
);
251 /* When a port group transitions to (or is added as) EXCLUDE we need to add it
252 * to all other ports' S,G entries which are not blocked by the current group
253 * for proper replication, the assumption is that any S,G blocked entries
254 * are already added so the S,G,port lookup should skip them.
255 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
256 * deleted we need to remove it from all ports' S,G entries where it was
257 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
259 void br_multicast_star_g_handle_mode(struct net_bridge_port_group
*pg
,
262 struct net_bridge
*br
= pg
->key
.port
->br
;
263 struct net_bridge_port_group
*pg_lst
;
264 struct net_bridge_mcast_port
*pmctx
;
265 struct net_bridge_mdb_entry
*mp
;
268 if (WARN_ON(!br_multicast_is_star_g(&pg
->key
.addr
)))
271 mp
= br_mdb_ip_get(br
, &pg
->key
.addr
);
274 pmctx
= &pg
->key
.port
->multicast_ctx
;
276 memset(&sg_ip
, 0, sizeof(sg_ip
));
277 sg_ip
= pg
->key
.addr
;
279 for (pg_lst
= mlock_dereference(mp
->ports
, br
);
281 pg_lst
= mlock_dereference(pg_lst
->next
, br
)) {
282 struct net_bridge_group_src
*src_ent
;
286 hlist_for_each_entry(src_ent
, &pg_lst
->src_list
, node
) {
287 if (!(src_ent
->flags
& BR_SGRP_F_INSTALLED
))
289 sg_ip
.src
= src_ent
->addr
.src
;
290 switch (filter_mode
) {
292 __fwd_del_star_excl(pg
, &sg_ip
);
295 __fwd_add_star_excl(pmctx
, pg
, &sg_ip
);
302 /* called when adding a new S,G with host_joined == false by default */
303 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry
*star_mp
,
304 struct net_bridge_port_group
*sg
)
306 struct net_bridge_mdb_entry
*sg_mp
;
308 if (WARN_ON(!br_multicast_is_star_g(&star_mp
->addr
)))
310 if (!star_mp
->host_joined
)
313 sg_mp
= br_mdb_ip_get(star_mp
->br
, &sg
->key
.addr
);
316 sg_mp
->host_joined
= true;
319 /* set the host_joined state of all of *,G's S,G entries */
320 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry
*star_mp
)
322 struct net_bridge
*br
= star_mp
->br
;
323 struct net_bridge_mdb_entry
*sg_mp
;
324 struct net_bridge_port_group
*pg
;
327 if (WARN_ON(!br_multicast_is_star_g(&star_mp
->addr
)))
330 memset(&sg_ip
, 0, sizeof(sg_ip
));
331 sg_ip
= star_mp
->addr
;
332 for (pg
= mlock_dereference(star_mp
->ports
, br
);
334 pg
= mlock_dereference(pg
->next
, br
)) {
335 struct net_bridge_group_src
*src_ent
;
337 hlist_for_each_entry(src_ent
, &pg
->src_list
, node
) {
338 if (!(src_ent
->flags
& BR_SGRP_F_INSTALLED
))
340 sg_ip
.src
= src_ent
->addr
.src
;
341 sg_mp
= br_mdb_ip_get(br
, &sg_ip
);
344 sg_mp
->host_joined
= star_mp
->host_joined
;
349 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry
*sgmp
)
351 struct net_bridge_port_group __rcu
**pp
;
352 struct net_bridge_port_group
*p
;
354 /* *,G exclude ports are only added to S,G entries */
355 if (WARN_ON(br_multicast_is_star_g(&sgmp
->addr
)))
358 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
359 * we should ignore perm entries since they're managed by user-space
361 for (pp
= &sgmp
->ports
;
362 (p
= mlock_dereference(*pp
, sgmp
->br
)) != NULL
;
364 if (!(p
->flags
& (MDB_PG_FLAGS_STAR_EXCL
|
365 MDB_PG_FLAGS_PERMANENT
)))
368 /* currently the host can only have joined the *,G which means
369 * we treat it as EXCLUDE {}, so for an S,G it's considered a
370 * STAR_EXCLUDE entry and we can safely leave it
372 sgmp
->host_joined
= false;
374 for (pp
= &sgmp
->ports
;
375 (p
= mlock_dereference(*pp
, sgmp
->br
)) != NULL
;) {
376 if (!(p
->flags
& MDB_PG_FLAGS_PERMANENT
))
377 br_multicast_del_pg(sgmp
, p
, pp
);
383 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry
*star_mp
,
384 struct net_bridge_port_group
*sg
)
386 struct net_bridge_port_group_sg_key sg_key
;
387 struct net_bridge
*br
= star_mp
->br
;
388 struct net_bridge_mcast_port
*pmctx
;
389 struct net_bridge_port_group
*pg
;
390 struct net_bridge_mcast
*brmctx
;
392 if (WARN_ON(br_multicast_is_star_g(&sg
->key
.addr
)))
394 if (WARN_ON(!br_multicast_is_star_g(&star_mp
->addr
)))
397 br_multicast_sg_host_state(star_mp
, sg
);
398 memset(&sg_key
, 0, sizeof(sg_key
));
399 sg_key
.addr
= sg
->key
.addr
;
400 brmctx
= &br
->multicast_ctx
;
401 /* we need to add all exclude ports to the S,G */
402 for (pg
= mlock_dereference(star_mp
->ports
, br
);
404 pg
= mlock_dereference(pg
->next
, br
)) {
405 struct net_bridge_port_group
*src_pg
;
407 if (pg
== sg
|| pg
->filter_mode
== MCAST_INCLUDE
)
410 sg_key
.port
= pg
->key
.port
;
411 if (br_sg_port_find(br
, &sg_key
))
414 pmctx
= &pg
->key
.port
->multicast_ctx
;
415 src_pg
= __br_multicast_add_group(brmctx
, pmctx
,
418 MCAST_INCLUDE
, false, false);
419 if (IS_ERR_OR_NULL(src_pg
) ||
420 src_pg
->rt_protocol
!= RTPROT_KERNEL
)
422 src_pg
->flags
|= MDB_PG_FLAGS_STAR_EXCL
;
426 static void br_multicast_fwd_src_add(struct net_bridge_group_src
*src
)
428 struct net_bridge_mdb_entry
*star_mp
;
429 struct net_bridge_mcast_port
*pmctx
;
430 struct net_bridge_port_group
*sg
;
431 struct net_bridge_mcast
*brmctx
;
434 if (src
->flags
& BR_SGRP_F_INSTALLED
)
437 memset(&sg_ip
, 0, sizeof(sg_ip
));
438 pmctx
= &src
->pg
->key
.port
->multicast_ctx
;
439 brmctx
= br_multicast_port_ctx_get_global(pmctx
);
440 sg_ip
= src
->pg
->key
.addr
;
441 sg_ip
.src
= src
->addr
.src
;
443 sg
= __br_multicast_add_group(brmctx
, pmctx
, &sg_ip
,
444 src
->pg
->eth_addr
, MCAST_INCLUDE
, false,
445 !timer_pending(&src
->timer
));
446 if (IS_ERR_OR_NULL(sg
))
448 src
->flags
|= BR_SGRP_F_INSTALLED
;
449 sg
->flags
&= ~MDB_PG_FLAGS_STAR_EXCL
;
451 /* if it was added by user-space as perm we can skip next steps */
452 if (sg
->rt_protocol
!= RTPROT_KERNEL
&&
453 (sg
->flags
& MDB_PG_FLAGS_PERMANENT
))
456 /* the kernel is now responsible for removing this S,G */
457 del_timer(&sg
->timer
);
458 star_mp
= br_mdb_ip_get(src
->br
, &src
->pg
->key
.addr
);
462 br_multicast_sg_add_exclude_ports(star_mp
, sg
);
465 static void br_multicast_fwd_src_remove(struct net_bridge_group_src
*src
,
468 struct net_bridge_port_group
*p
, *pg
= src
->pg
;
469 struct net_bridge_port_group __rcu
**pp
;
470 struct net_bridge_mdb_entry
*mp
;
473 memset(&sg_ip
, 0, sizeof(sg_ip
));
474 sg_ip
= pg
->key
.addr
;
475 sg_ip
.src
= src
->addr
.src
;
477 mp
= br_mdb_ip_get(src
->br
, &sg_ip
);
481 for (pp
= &mp
->ports
;
482 (p
= mlock_dereference(*pp
, src
->br
)) != NULL
;
484 if (!br_port_group_equal(p
, pg
->key
.port
, pg
->eth_addr
))
487 if (p
->rt_protocol
!= RTPROT_KERNEL
&&
488 (p
->flags
& MDB_PG_FLAGS_PERMANENT
))
492 p
->flags
|= MDB_PG_FLAGS_FAST_LEAVE
;
493 br_multicast_del_pg(mp
, p
, pp
);
496 src
->flags
&= ~BR_SGRP_F_INSTALLED
;
499 /* install S,G and based on src's timer enable or disable forwarding */
500 static void br_multicast_fwd_src_handle(struct net_bridge_group_src
*src
)
502 struct net_bridge_port_group_sg_key sg_key
;
503 struct net_bridge_port_group
*sg
;
506 br_multicast_fwd_src_add(src
);
508 memset(&sg_key
, 0, sizeof(sg_key
));
509 sg_key
.addr
= src
->pg
->key
.addr
;
510 sg_key
.addr
.src
= src
->addr
.src
;
511 sg_key
.port
= src
->pg
->key
.port
;
513 sg
= br_sg_port_find(src
->br
, &sg_key
);
514 if (!sg
|| (sg
->flags
& MDB_PG_FLAGS_PERMANENT
))
517 old_flags
= sg
->flags
;
518 if (timer_pending(&src
->timer
))
519 sg
->flags
&= ~MDB_PG_FLAGS_BLOCKED
;
521 sg
->flags
|= MDB_PG_FLAGS_BLOCKED
;
523 if (old_flags
!= sg
->flags
) {
524 struct net_bridge_mdb_entry
*sg_mp
;
526 sg_mp
= br_mdb_ip_get(src
->br
, &sg_key
.addr
);
529 br_mdb_notify(src
->br
->dev
, sg_mp
, sg
, RTM_NEWMDB
);
533 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc
*gc
)
535 struct net_bridge_mdb_entry
*mp
;
537 mp
= container_of(gc
, struct net_bridge_mdb_entry
, mcast_gc
);
538 WARN_ON(!hlist_unhashed(&mp
->mdb_node
));
541 del_timer_sync(&mp
->timer
);
545 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry
*mp
)
547 struct net_bridge
*br
= mp
->br
;
549 rhashtable_remove_fast(&br
->mdb_hash_tbl
, &mp
->rhnode
,
551 hlist_del_init_rcu(&mp
->mdb_node
);
552 hlist_add_head(&mp
->mcast_gc
.gc_node
, &br
->mcast_gc_list
);
553 queue_work(system_long_wq
, &br
->mcast_gc_work
);
556 static void br_multicast_group_expired(struct timer_list
*t
)
558 struct net_bridge_mdb_entry
*mp
= from_timer(mp
, t
, timer
);
559 struct net_bridge
*br
= mp
->br
;
561 spin_lock(&br
->multicast_lock
);
562 if (hlist_unhashed(&mp
->mdb_node
) || !netif_running(br
->dev
) ||
563 timer_pending(&mp
->timer
))
566 br_multicast_host_leave(mp
, true);
570 br_multicast_del_mdb_entry(mp
);
572 spin_unlock(&br
->multicast_lock
);
575 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc
*gc
)
577 struct net_bridge_group_src
*src
;
579 src
= container_of(gc
, struct net_bridge_group_src
, mcast_gc
);
580 WARN_ON(!hlist_unhashed(&src
->node
));
582 del_timer_sync(&src
->timer
);
586 void br_multicast_del_group_src(struct net_bridge_group_src
*src
,
589 struct net_bridge
*br
= src
->pg
->key
.port
->br
;
591 br_multicast_fwd_src_remove(src
, fastleave
);
592 hlist_del_init_rcu(&src
->node
);
594 hlist_add_head(&src
->mcast_gc
.gc_node
, &br
->mcast_gc_list
);
595 queue_work(system_long_wq
, &br
->mcast_gc_work
);
598 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc
*gc
)
600 struct net_bridge_port_group
*pg
;
602 pg
= container_of(gc
, struct net_bridge_port_group
, mcast_gc
);
603 WARN_ON(!hlist_unhashed(&pg
->mglist
));
604 WARN_ON(!hlist_empty(&pg
->src_list
));
606 del_timer_sync(&pg
->rexmit_timer
);
607 del_timer_sync(&pg
->timer
);
611 void br_multicast_del_pg(struct net_bridge_mdb_entry
*mp
,
612 struct net_bridge_port_group
*pg
,
613 struct net_bridge_port_group __rcu
**pp
)
615 struct net_bridge
*br
= pg
->key
.port
->br
;
616 struct net_bridge_group_src
*ent
;
617 struct hlist_node
*tmp
;
619 rcu_assign_pointer(*pp
, pg
->next
);
620 hlist_del_init(&pg
->mglist
);
621 br_multicast_eht_clean_sets(pg
);
622 hlist_for_each_entry_safe(ent
, tmp
, &pg
->src_list
, node
)
623 br_multicast_del_group_src(ent
, false);
624 br_mdb_notify(br
->dev
, mp
, pg
, RTM_DELMDB
);
625 if (!br_multicast_is_star_g(&mp
->addr
)) {
626 rhashtable_remove_fast(&br
->sg_port_tbl
, &pg
->rhnode
,
627 br_sg_port_rht_params
);
628 br_multicast_sg_del_exclude_ports(mp
);
630 br_multicast_star_g_handle_mode(pg
, MCAST_INCLUDE
);
632 hlist_add_head(&pg
->mcast_gc
.gc_node
, &br
->mcast_gc_list
);
633 queue_work(system_long_wq
, &br
->mcast_gc_work
);
635 if (!mp
->ports
&& !mp
->host_joined
&& netif_running(br
->dev
))
636 mod_timer(&mp
->timer
, jiffies
);
639 static void br_multicast_find_del_pg(struct net_bridge
*br
,
640 struct net_bridge_port_group
*pg
)
642 struct net_bridge_port_group __rcu
**pp
;
643 struct net_bridge_mdb_entry
*mp
;
644 struct net_bridge_port_group
*p
;
646 mp
= br_mdb_ip_get(br
, &pg
->key
.addr
);
650 for (pp
= &mp
->ports
;
651 (p
= mlock_dereference(*pp
, br
)) != NULL
;
656 br_multicast_del_pg(mp
, pg
, pp
);
663 static void br_multicast_port_group_expired(struct timer_list
*t
)
665 struct net_bridge_port_group
*pg
= from_timer(pg
, t
, timer
);
666 struct net_bridge_group_src
*src_ent
;
667 struct net_bridge
*br
= pg
->key
.port
->br
;
668 struct hlist_node
*tmp
;
671 spin_lock(&br
->multicast_lock
);
672 if (!netif_running(br
->dev
) || timer_pending(&pg
->timer
) ||
673 hlist_unhashed(&pg
->mglist
) || pg
->flags
& MDB_PG_FLAGS_PERMANENT
)
676 changed
= !!(pg
->filter_mode
== MCAST_EXCLUDE
);
677 pg
->filter_mode
= MCAST_INCLUDE
;
678 hlist_for_each_entry_safe(src_ent
, tmp
, &pg
->src_list
, node
) {
679 if (!timer_pending(&src_ent
->timer
)) {
680 br_multicast_del_group_src(src_ent
, false);
685 if (hlist_empty(&pg
->src_list
)) {
686 br_multicast_find_del_pg(br
, pg
);
687 } else if (changed
) {
688 struct net_bridge_mdb_entry
*mp
= br_mdb_ip_get(br
, &pg
->key
.addr
);
690 if (changed
&& br_multicast_is_star_g(&pg
->key
.addr
))
691 br_multicast_star_g_handle_mode(pg
, MCAST_INCLUDE
);
695 br_mdb_notify(br
->dev
, mp
, pg
, RTM_NEWMDB
);
698 spin_unlock(&br
->multicast_lock
);
701 static void br_multicast_gc(struct hlist_head
*head
)
703 struct net_bridge_mcast_gc
*gcent
;
704 struct hlist_node
*tmp
;
706 hlist_for_each_entry_safe(gcent
, tmp
, head
, gc_node
) {
707 hlist_del_init(&gcent
->gc_node
);
708 gcent
->destroy(gcent
);
712 static struct sk_buff
*br_ip4_multicast_alloc_query(struct net_bridge_mcast
*brmctx
,
713 struct net_bridge_port_group
*pg
,
714 __be32 ip_dst
, __be32 group
,
715 bool with_srcs
, bool over_lmqt
,
716 u8 sflag
, u8
*igmp_type
,
719 struct net_bridge_port
*p
= pg
? pg
->key
.port
: NULL
;
720 struct net_bridge_group_src
*ent
;
721 size_t pkt_size
, igmp_hdr_size
;
722 unsigned long now
= jiffies
;
723 struct igmpv3_query
*ihv3
;
724 void *csum_start
= NULL
;
725 __sum16
*csum
= NULL
;
733 igmp_hdr_size
= sizeof(*ih
);
734 if (brmctx
->multicast_igmp_version
== 3) {
735 igmp_hdr_size
= sizeof(*ihv3
);
736 if (pg
&& with_srcs
) {
737 lmqt
= now
+ (brmctx
->multicast_last_member_interval
*
738 brmctx
->multicast_last_member_count
);
739 hlist_for_each_entry(ent
, &pg
->src_list
, node
) {
740 if (over_lmqt
== time_after(ent
->timer
.expires
,
742 ent
->src_query_rexmit_cnt
> 0)
748 igmp_hdr_size
+= lmqt_srcs
* sizeof(__be32
);
752 pkt_size
= sizeof(*eth
) + sizeof(*iph
) + 4 + igmp_hdr_size
;
753 if ((p
&& pkt_size
> p
->dev
->mtu
) ||
754 pkt_size
> brmctx
->br
->dev
->mtu
)
757 skb
= netdev_alloc_skb_ip_align(brmctx
->br
->dev
, pkt_size
);
761 skb
->protocol
= htons(ETH_P_IP
);
763 skb_reset_mac_header(skb
);
766 ether_addr_copy(eth
->h_source
, brmctx
->br
->dev
->dev_addr
);
767 ip_eth_mc_map(ip_dst
, eth
->h_dest
);
768 eth
->h_proto
= htons(ETH_P_IP
);
769 skb_put(skb
, sizeof(*eth
));
771 skb_set_network_header(skb
, skb
->len
);
773 iph
->tot_len
= htons(pkt_size
- sizeof(*eth
));
779 iph
->frag_off
= htons(IP_DF
);
781 iph
->protocol
= IPPROTO_IGMP
;
782 iph
->saddr
= br_opt_get(brmctx
->br
, BROPT_MULTICAST_QUERY_USE_IFADDR
) ?
783 inet_select_addr(brmctx
->br
->dev
, 0, RT_SCOPE_LINK
) : 0;
785 ((u8
*)&iph
[1])[0] = IPOPT_RA
;
786 ((u8
*)&iph
[1])[1] = 4;
787 ((u8
*)&iph
[1])[2] = 0;
788 ((u8
*)&iph
[1])[3] = 0;
792 skb_set_transport_header(skb
, skb
->len
);
793 *igmp_type
= IGMP_HOST_MEMBERSHIP_QUERY
;
795 switch (brmctx
->multicast_igmp_version
) {
798 ih
->type
= IGMP_HOST_MEMBERSHIP_QUERY
;
799 ih
->code
= (group
? brmctx
->multicast_last_member_interval
:
800 brmctx
->multicast_query_response_interval
) /
801 (HZ
/ IGMP_TIMER_SCALE
);
805 csum_start
= (void *)ih
;
808 ihv3
= igmpv3_query_hdr(skb
);
809 ihv3
->type
= IGMP_HOST_MEMBERSHIP_QUERY
;
810 ihv3
->code
= (group
? brmctx
->multicast_last_member_interval
:
811 brmctx
->multicast_query_response_interval
) /
812 (HZ
/ IGMP_TIMER_SCALE
);
814 ihv3
->qqic
= brmctx
->multicast_query_interval
/ HZ
;
815 ihv3
->nsrcs
= htons(lmqt_srcs
);
817 ihv3
->suppress
= sflag
;
821 csum_start
= (void *)ihv3
;
822 if (!pg
|| !with_srcs
)
826 hlist_for_each_entry(ent
, &pg
->src_list
, node
) {
827 if (over_lmqt
== time_after(ent
->timer
.expires
,
829 ent
->src_query_rexmit_cnt
> 0) {
830 ihv3
->srcs
[lmqt_srcs
++] = ent
->addr
.src
.ip4
;
831 ent
->src_query_rexmit_cnt
--;
832 if (need_rexmit
&& ent
->src_query_rexmit_cnt
)
836 if (WARN_ON(lmqt_srcs
!= ntohs(ihv3
->nsrcs
))) {
843 if (WARN_ON(!csum
|| !csum_start
)) {
848 *csum
= ip_compute_csum(csum_start
, igmp_hdr_size
);
849 skb_put(skb
, igmp_hdr_size
);
850 __skb_pull(skb
, sizeof(*eth
));
856 #if IS_ENABLED(CONFIG_IPV6)
857 static struct sk_buff
*br_ip6_multicast_alloc_query(struct net_bridge_mcast
*brmctx
,
858 struct net_bridge_port_group
*pg
,
859 const struct in6_addr
*ip6_dst
,
860 const struct in6_addr
*group
,
861 bool with_srcs
, bool over_llqt
,
862 u8 sflag
, u8
*igmp_type
,
865 struct net_bridge_port
*p
= pg
? pg
->key
.port
: NULL
;
866 struct net_bridge_group_src
*ent
;
867 size_t pkt_size
, mld_hdr_size
;
868 unsigned long now
= jiffies
;
869 struct mld2_query
*mld2q
;
870 void *csum_start
= NULL
;
871 unsigned long interval
;
872 __sum16
*csum
= NULL
;
873 struct ipv6hdr
*ip6h
;
874 struct mld_msg
*mldq
;
881 mld_hdr_size
= sizeof(*mldq
);
882 if (brmctx
->multicast_mld_version
== 2) {
883 mld_hdr_size
= sizeof(*mld2q
);
884 if (pg
&& with_srcs
) {
885 llqt
= now
+ (brmctx
->multicast_last_member_interval
*
886 brmctx
->multicast_last_member_count
);
887 hlist_for_each_entry(ent
, &pg
->src_list
, node
) {
888 if (over_llqt
== time_after(ent
->timer
.expires
,
890 ent
->src_query_rexmit_cnt
> 0)
896 mld_hdr_size
+= llqt_srcs
* sizeof(struct in6_addr
);
900 pkt_size
= sizeof(*eth
) + sizeof(*ip6h
) + 8 + mld_hdr_size
;
901 if ((p
&& pkt_size
> p
->dev
->mtu
) ||
902 pkt_size
> brmctx
->br
->dev
->mtu
)
905 skb
= netdev_alloc_skb_ip_align(brmctx
->br
->dev
, pkt_size
);
909 skb
->protocol
= htons(ETH_P_IPV6
);
911 /* Ethernet header */
912 skb_reset_mac_header(skb
);
915 ether_addr_copy(eth
->h_source
, brmctx
->br
->dev
->dev_addr
);
916 eth
->h_proto
= htons(ETH_P_IPV6
);
917 skb_put(skb
, sizeof(*eth
));
919 /* IPv6 header + HbH option */
920 skb_set_network_header(skb
, skb
->len
);
921 ip6h
= ipv6_hdr(skb
);
923 *(__force __be32
*)ip6h
= htonl(0x60000000);
924 ip6h
->payload_len
= htons(8 + mld_hdr_size
);
925 ip6h
->nexthdr
= IPPROTO_HOPOPTS
;
927 ip6h
->daddr
= *ip6_dst
;
928 if (ipv6_dev_get_saddr(dev_net(brmctx
->br
->dev
), brmctx
->br
->dev
,
929 &ip6h
->daddr
, 0, &ip6h
->saddr
)) {
931 br_opt_toggle(brmctx
->br
, BROPT_HAS_IPV6_ADDR
, false);
935 br_opt_toggle(brmctx
->br
, BROPT_HAS_IPV6_ADDR
, true);
936 ipv6_eth_mc_map(&ip6h
->daddr
, eth
->h_dest
);
938 hopopt
= (u8
*)(ip6h
+ 1);
939 hopopt
[0] = IPPROTO_ICMPV6
; /* next hdr */
940 hopopt
[1] = 0; /* length of HbH */
941 hopopt
[2] = IPV6_TLV_ROUTERALERT
; /* Router Alert */
942 hopopt
[3] = 2; /* Length of RA Option */
943 hopopt
[4] = 0; /* Type = 0x0000 (MLD) */
945 hopopt
[6] = IPV6_TLV_PAD1
; /* Pad1 */
946 hopopt
[7] = IPV6_TLV_PAD1
; /* Pad1 */
948 skb_put(skb
, sizeof(*ip6h
) + 8);
951 skb_set_transport_header(skb
, skb
->len
);
952 interval
= ipv6_addr_any(group
) ?
953 brmctx
->multicast_query_response_interval
:
954 brmctx
->multicast_last_member_interval
;
955 *igmp_type
= ICMPV6_MGM_QUERY
;
956 switch (brmctx
->multicast_mld_version
) {
958 mldq
= (struct mld_msg
*)icmp6_hdr(skb
);
959 mldq
->mld_type
= ICMPV6_MGM_QUERY
;
962 mldq
->mld_maxdelay
= htons((u16
)jiffies_to_msecs(interval
));
963 mldq
->mld_reserved
= 0;
964 mldq
->mld_mca
= *group
;
965 csum
= &mldq
->mld_cksum
;
966 csum_start
= (void *)mldq
;
969 mld2q
= (struct mld2_query
*)icmp6_hdr(skb
);
970 mld2q
->mld2q_mrc
= htons((u16
)jiffies_to_msecs(interval
));
971 mld2q
->mld2q_type
= ICMPV6_MGM_QUERY
;
972 mld2q
->mld2q_code
= 0;
973 mld2q
->mld2q_cksum
= 0;
974 mld2q
->mld2q_resv1
= 0;
975 mld2q
->mld2q_resv2
= 0;
976 mld2q
->mld2q_suppress
= sflag
;
977 mld2q
->mld2q_qrv
= 2;
978 mld2q
->mld2q_nsrcs
= htons(llqt_srcs
);
979 mld2q
->mld2q_qqic
= brmctx
->multicast_query_interval
/ HZ
;
980 mld2q
->mld2q_mca
= *group
;
981 csum
= &mld2q
->mld2q_cksum
;
982 csum_start
= (void *)mld2q
;
983 if (!pg
|| !with_srcs
)
987 hlist_for_each_entry(ent
, &pg
->src_list
, node
) {
988 if (over_llqt
== time_after(ent
->timer
.expires
,
990 ent
->src_query_rexmit_cnt
> 0) {
991 mld2q
->mld2q_srcs
[llqt_srcs
++] = ent
->addr
.src
.ip6
;
992 ent
->src_query_rexmit_cnt
--;
993 if (need_rexmit
&& ent
->src_query_rexmit_cnt
)
997 if (WARN_ON(llqt_srcs
!= ntohs(mld2q
->mld2q_nsrcs
))) {
1004 if (WARN_ON(!csum
|| !csum_start
)) {
1009 *csum
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
, mld_hdr_size
,
1011 csum_partial(csum_start
, mld_hdr_size
, 0));
1012 skb_put(skb
, mld_hdr_size
);
1013 __skb_pull(skb
, sizeof(*eth
));
1020 static struct sk_buff
*br_multicast_alloc_query(struct net_bridge_mcast
*brmctx
,
1021 struct net_bridge_port_group
*pg
,
1022 struct br_ip
*ip_dst
,
1023 struct br_ip
*group
,
1024 bool with_srcs
, bool over_lmqt
,
1025 u8 sflag
, u8
*igmp_type
,
1030 switch (group
->proto
) {
1031 case htons(ETH_P_IP
):
1032 ip4_dst
= ip_dst
? ip_dst
->dst
.ip4
: htonl(INADDR_ALLHOSTS_GROUP
);
1033 return br_ip4_multicast_alloc_query(brmctx
, pg
,
1034 ip4_dst
, group
->dst
.ip4
,
1035 with_srcs
, over_lmqt
,
1038 #if IS_ENABLED(CONFIG_IPV6)
1039 case htons(ETH_P_IPV6
): {
1040 struct in6_addr ip6_dst
;
1043 ip6_dst
= ip_dst
->dst
.ip6
;
1045 ipv6_addr_set(&ip6_dst
, htonl(0xff020000), 0, 0,
1048 return br_ip6_multicast_alloc_query(brmctx
, pg
,
1049 &ip6_dst
, &group
->dst
.ip6
,
1050 with_srcs
, over_lmqt
,
1059 struct net_bridge_mdb_entry
*br_multicast_new_group(struct net_bridge
*br
,
1060 struct br_ip
*group
)
1062 struct net_bridge_mdb_entry
*mp
;
1065 mp
= br_mdb_ip_get(br
, group
);
1069 if (atomic_read(&br
->mdb_hash_tbl
.nelems
) >= br
->hash_max
) {
1070 br_opt_toggle(br
, BROPT_MULTICAST_ENABLED
, false);
1071 return ERR_PTR(-E2BIG
);
1074 mp
= kzalloc(sizeof(*mp
), GFP_ATOMIC
);
1076 return ERR_PTR(-ENOMEM
);
1080 mp
->mcast_gc
.destroy
= br_multicast_destroy_mdb_entry
;
1081 timer_setup(&mp
->timer
, br_multicast_group_expired
, 0);
1082 err
= rhashtable_lookup_insert_fast(&br
->mdb_hash_tbl
, &mp
->rhnode
,
1088 hlist_add_head_rcu(&mp
->mdb_node
, &br
->mdb_list
);
1094 static void br_multicast_group_src_expired(struct timer_list
*t
)
1096 struct net_bridge_group_src
*src
= from_timer(src
, t
, timer
);
1097 struct net_bridge_port_group
*pg
;
1098 struct net_bridge
*br
= src
->br
;
1100 spin_lock(&br
->multicast_lock
);
1101 if (hlist_unhashed(&src
->node
) || !netif_running(br
->dev
) ||
1102 timer_pending(&src
->timer
))
1106 if (pg
->filter_mode
== MCAST_INCLUDE
) {
1107 br_multicast_del_group_src(src
, false);
1108 if (!hlist_empty(&pg
->src_list
))
1110 br_multicast_find_del_pg(br
, pg
);
1112 br_multicast_fwd_src_handle(src
);
1116 spin_unlock(&br
->multicast_lock
);
1119 struct net_bridge_group_src
*
1120 br_multicast_find_group_src(struct net_bridge_port_group
*pg
, struct br_ip
*ip
)
1122 struct net_bridge_group_src
*ent
;
1124 switch (ip
->proto
) {
1125 case htons(ETH_P_IP
):
1126 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
1127 if (ip
->src
.ip4
== ent
->addr
.src
.ip4
)
1130 #if IS_ENABLED(CONFIG_IPV6)
1131 case htons(ETH_P_IPV6
):
1132 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
1133 if (!ipv6_addr_cmp(&ent
->addr
.src
.ip6
, &ip
->src
.ip6
))
1142 static struct net_bridge_group_src
*
1143 br_multicast_new_group_src(struct net_bridge_port_group
*pg
, struct br_ip
*src_ip
)
1145 struct net_bridge_group_src
*grp_src
;
1147 if (unlikely(pg
->src_ents
>= PG_SRC_ENT_LIMIT
))
1150 switch (src_ip
->proto
) {
1151 case htons(ETH_P_IP
):
1152 if (ipv4_is_zeronet(src_ip
->src
.ip4
) ||
1153 ipv4_is_multicast(src_ip
->src
.ip4
))
1156 #if IS_ENABLED(CONFIG_IPV6)
1157 case htons(ETH_P_IPV6
):
1158 if (ipv6_addr_any(&src_ip
->src
.ip6
) ||
1159 ipv6_addr_is_multicast(&src_ip
->src
.ip6
))
1165 grp_src
= kzalloc(sizeof(*grp_src
), GFP_ATOMIC
);
1166 if (unlikely(!grp_src
))
1170 grp_src
->br
= pg
->key
.port
->br
;
1171 grp_src
->addr
= *src_ip
;
1172 grp_src
->mcast_gc
.destroy
= br_multicast_destroy_group_src
;
1173 timer_setup(&grp_src
->timer
, br_multicast_group_src_expired
, 0);
1175 hlist_add_head_rcu(&grp_src
->node
, &pg
->src_list
);
1181 struct net_bridge_port_group
*br_multicast_new_port_group(
1182 struct net_bridge_port
*port
,
1183 struct br_ip
*group
,
1184 struct net_bridge_port_group __rcu
*next
,
1185 unsigned char flags
,
1186 const unsigned char *src
,
1190 struct net_bridge_port_group
*p
;
1192 p
= kzalloc(sizeof(*p
), GFP_ATOMIC
);
1196 p
->key
.addr
= *group
;
1199 p
->filter_mode
= filter_mode
;
1200 p
->rt_protocol
= rt_protocol
;
1201 p
->eht_host_tree
= RB_ROOT
;
1202 p
->eht_set_tree
= RB_ROOT
;
1203 p
->mcast_gc
.destroy
= br_multicast_destroy_port_group
;
1204 INIT_HLIST_HEAD(&p
->src_list
);
1206 if (!br_multicast_is_star_g(group
) &&
1207 rhashtable_lookup_insert_fast(&port
->br
->sg_port_tbl
, &p
->rhnode
,
1208 br_sg_port_rht_params
)) {
1213 rcu_assign_pointer(p
->next
, next
);
1214 timer_setup(&p
->timer
, br_multicast_port_group_expired
, 0);
1215 timer_setup(&p
->rexmit_timer
, br_multicast_port_group_rexmit
, 0);
1216 hlist_add_head(&p
->mglist
, &port
->mglist
);
1219 memcpy(p
->eth_addr
, src
, ETH_ALEN
);
1221 eth_broadcast_addr(p
->eth_addr
);
1226 void br_multicast_host_join(struct net_bridge_mdb_entry
*mp
, bool notify
)
1228 if (!mp
->host_joined
) {
1229 mp
->host_joined
= true;
1230 if (br_multicast_is_star_g(&mp
->addr
))
1231 br_multicast_star_g_host_state(mp
);
1233 br_mdb_notify(mp
->br
->dev
, mp
, NULL
, RTM_NEWMDB
);
1236 if (br_group_is_l2(&mp
->addr
))
1239 mod_timer(&mp
->timer
,
1240 jiffies
+ mp
->br
->multicast_ctx
.multicast_membership_interval
);
1243 void br_multicast_host_leave(struct net_bridge_mdb_entry
*mp
, bool notify
)
1245 if (!mp
->host_joined
)
1248 mp
->host_joined
= false;
1249 if (br_multicast_is_star_g(&mp
->addr
))
1250 br_multicast_star_g_host_state(mp
);
1252 br_mdb_notify(mp
->br
->dev
, mp
, NULL
, RTM_DELMDB
);
1255 static struct net_bridge_port_group
*
1256 __br_multicast_add_group(struct net_bridge_mcast
*brmctx
,
1257 struct net_bridge_mcast_port
*pmctx
,
1258 struct br_ip
*group
,
1259 const unsigned char *src
,
1264 struct net_bridge_port_group __rcu
**pp
;
1265 struct net_bridge_port_group
*p
= NULL
;
1266 struct net_bridge_mdb_entry
*mp
;
1267 unsigned long now
= jiffies
;
1269 if (!netif_running(brmctx
->br
->dev
) ||
1270 (pmctx
&& pmctx
->port
->state
== BR_STATE_DISABLED
))
1273 mp
= br_multicast_new_group(brmctx
->br
, group
);
1275 return ERR_CAST(mp
);
1278 br_multicast_host_join(mp
, true);
1282 for (pp
= &mp
->ports
;
1283 (p
= mlock_dereference(*pp
, brmctx
->br
)) != NULL
;
1285 if (br_port_group_equal(p
, pmctx
->port
, src
))
1287 if ((unsigned long)p
->key
.port
< (unsigned long)pmctx
->port
)
1291 p
= br_multicast_new_port_group(pmctx
->port
, group
, *pp
, 0, src
,
1292 filter_mode
, RTPROT_KERNEL
);
1294 p
= ERR_PTR(-ENOMEM
);
1297 rcu_assign_pointer(*pp
, p
);
1299 p
->flags
|= MDB_PG_FLAGS_BLOCKED
;
1300 br_mdb_notify(brmctx
->br
->dev
, mp
, p
, RTM_NEWMDB
);
1304 mod_timer(&p
->timer
,
1305 now
+ brmctx
->multicast_membership_interval
);
1311 static int br_multicast_add_group(struct net_bridge_mcast
*brmctx
,
1312 struct net_bridge_mcast_port
*pmctx
,
1313 struct br_ip
*group
,
1314 const unsigned char *src
,
1318 struct net_bridge_port_group
*pg
;
1321 spin_lock(&brmctx
->br
->multicast_lock
);
1322 pg
= __br_multicast_add_group(brmctx
, pmctx
, group
, src
, filter_mode
,
1323 igmpv2_mldv1
, false);
1324 /* NULL is considered valid for host joined groups */
1325 err
= PTR_ERR_OR_ZERO(pg
);
1326 spin_unlock(&brmctx
->br
->multicast_lock
);
1331 static int br_ip4_multicast_add_group(struct net_bridge_mcast
*brmctx
,
1332 struct net_bridge_mcast_port
*pmctx
,
1335 const unsigned char *src
,
1338 struct br_ip br_group
;
1341 if (ipv4_is_local_multicast(group
))
1344 memset(&br_group
, 0, sizeof(br_group
));
1345 br_group
.dst
.ip4
= group
;
1346 br_group
.proto
= htons(ETH_P_IP
);
1348 filter_mode
= igmpv2
? MCAST_EXCLUDE
: MCAST_INCLUDE
;
1350 return br_multicast_add_group(brmctx
, pmctx
, &br_group
, src
,
1351 filter_mode
, igmpv2
);
1354 #if IS_ENABLED(CONFIG_IPV6)
1355 static int br_ip6_multicast_add_group(struct net_bridge_mcast
*brmctx
,
1356 struct net_bridge_mcast_port
*pmctx
,
1357 const struct in6_addr
*group
,
1359 const unsigned char *src
,
1362 struct br_ip br_group
;
1365 if (ipv6_addr_is_ll_all_nodes(group
))
1368 memset(&br_group
, 0, sizeof(br_group
));
1369 br_group
.dst
.ip6
= *group
;
1370 br_group
.proto
= htons(ETH_P_IPV6
);
1372 filter_mode
= mldv1
? MCAST_EXCLUDE
: MCAST_INCLUDE
;
1374 return br_multicast_add_group(brmctx
, pmctx
, &br_group
, src
,
1375 filter_mode
, mldv1
);
1379 static bool br_multicast_rport_del(struct hlist_node
*rlist
)
1381 if (hlist_unhashed(rlist
))
1384 hlist_del_init_rcu(rlist
);
1388 static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port
*pmctx
)
1390 return br_multicast_rport_del(&pmctx
->ip4_rlist
);
1393 static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port
*pmctx
)
1395 #if IS_ENABLED(CONFIG_IPV6)
1396 return br_multicast_rport_del(&pmctx
->ip6_rlist
);
1402 static void br_multicast_router_expired(struct net_bridge_mcast_port
*pmctx
,
1403 struct timer_list
*t
,
1404 struct hlist_node
*rlist
)
1406 struct net_bridge
*br
= pmctx
->port
->br
;
1409 spin_lock(&br
->multicast_lock
);
1410 if (pmctx
->multicast_router
== MDB_RTR_TYPE_DISABLED
||
1411 pmctx
->multicast_router
== MDB_RTR_TYPE_PERM
||
1415 del
= br_multicast_rport_del(rlist
);
1416 br_multicast_rport_del_notify(pmctx
, del
);
1418 spin_unlock(&br
->multicast_lock
);
1421 static void br_ip4_multicast_router_expired(struct timer_list
*t
)
1423 struct net_bridge_mcast_port
*pmctx
= from_timer(pmctx
, t
,
1424 ip4_mc_router_timer
);
1426 br_multicast_router_expired(pmctx
, t
, &pmctx
->ip4_rlist
);
1429 #if IS_ENABLED(CONFIG_IPV6)
1430 static void br_ip6_multicast_router_expired(struct timer_list
*t
)
1432 struct net_bridge_mcast_port
*pmctx
= from_timer(pmctx
, t
,
1433 ip6_mc_router_timer
);
1435 br_multicast_router_expired(pmctx
, t
, &pmctx
->ip6_rlist
);
1439 static void br_mc_router_state_change(struct net_bridge
*p
,
1442 struct switchdev_attr attr
= {
1444 .id
= SWITCHDEV_ATTR_ID_BRIDGE_MROUTER
,
1445 .flags
= SWITCHDEV_F_DEFER
,
1446 .u
.mrouter
= is_mc_router
,
1449 switchdev_port_attr_set(p
->dev
, &attr
, NULL
);
1452 static void br_multicast_local_router_expired(struct net_bridge_mcast
*brmctx
,
1453 struct timer_list
*timer
)
1455 spin_lock(&brmctx
->br
->multicast_lock
);
1456 if (brmctx
->multicast_router
== MDB_RTR_TYPE_DISABLED
||
1457 brmctx
->multicast_router
== MDB_RTR_TYPE_PERM
||
1458 br_ip4_multicast_is_router(brmctx
) ||
1459 br_ip6_multicast_is_router(brmctx
))
1462 br_mc_router_state_change(brmctx
->br
, false);
1464 spin_unlock(&brmctx
->br
->multicast_lock
);
1467 static void br_ip4_multicast_local_router_expired(struct timer_list
*t
)
1469 struct net_bridge_mcast
*brmctx
= from_timer(brmctx
, t
,
1470 ip4_mc_router_timer
);
1472 br_multicast_local_router_expired(brmctx
, t
);
1475 #if IS_ENABLED(CONFIG_IPV6)
1476 static void br_ip6_multicast_local_router_expired(struct timer_list
*t
)
1478 struct net_bridge_mcast
*brmctx
= from_timer(brmctx
, t
,
1479 ip6_mc_router_timer
);
1481 br_multicast_local_router_expired(brmctx
, t
);
1485 static void br_multicast_querier_expired(struct net_bridge_mcast
*brmctx
,
1486 struct bridge_mcast_own_query
*query
)
1488 spin_lock(&brmctx
->br
->multicast_lock
);
1489 if (!netif_running(brmctx
->br
->dev
) ||
1490 !br_opt_get(brmctx
->br
, BROPT_MULTICAST_ENABLED
))
1493 br_multicast_start_querier(brmctx
, query
);
1496 spin_unlock(&brmctx
->br
->multicast_lock
);
1499 static void br_ip4_multicast_querier_expired(struct timer_list
*t
)
1501 struct net_bridge_mcast
*brmctx
= from_timer(brmctx
, t
,
1502 ip4_other_query
.timer
);
1504 br_multicast_querier_expired(brmctx
, &brmctx
->ip4_own_query
);
1507 #if IS_ENABLED(CONFIG_IPV6)
1508 static void br_ip6_multicast_querier_expired(struct timer_list
*t
)
1510 struct net_bridge_mcast
*brmctx
= from_timer(brmctx
, t
,
1511 ip6_other_query
.timer
);
1513 br_multicast_querier_expired(brmctx
, &brmctx
->ip6_own_query
);
1517 static void br_multicast_select_own_querier(struct net_bridge_mcast
*brmctx
,
1519 struct sk_buff
*skb
)
1521 if (ip
->proto
== htons(ETH_P_IP
))
1522 brmctx
->ip4_querier
.addr
.src
.ip4
= ip_hdr(skb
)->saddr
;
1523 #if IS_ENABLED(CONFIG_IPV6)
1525 brmctx
->ip6_querier
.addr
.src
.ip6
= ipv6_hdr(skb
)->saddr
;
1529 static void __br_multicast_send_query(struct net_bridge_mcast
*brmctx
,
1530 struct net_bridge_mcast_port
*pmctx
,
1531 struct net_bridge_port_group
*pg
,
1532 struct br_ip
*ip_dst
,
1533 struct br_ip
*group
,
1538 bool over_lmqt
= !!sflag
;
1539 struct sk_buff
*skb
;
1543 skb
= br_multicast_alloc_query(brmctx
, pg
, ip_dst
, group
, with_srcs
,
1544 over_lmqt
, sflag
, &igmp_type
,
1550 skb
->dev
= pmctx
->port
->dev
;
1551 br_multicast_count(brmctx
->br
, pmctx
->port
, skb
, igmp_type
,
1553 NF_HOOK(NFPROTO_BRIDGE
, NF_BR_LOCAL_OUT
,
1554 dev_net(pmctx
->port
->dev
), NULL
, skb
, NULL
, skb
->dev
,
1555 br_dev_queue_push_xmit
);
1557 if (over_lmqt
&& with_srcs
&& sflag
) {
1559 goto again_under_lmqt
;
1562 br_multicast_select_own_querier(brmctx
, group
, skb
);
1563 br_multicast_count(brmctx
->br
, NULL
, skb
, igmp_type
,
1569 static void br_multicast_send_query(struct net_bridge_mcast
*brmctx
,
1570 struct net_bridge_mcast_port
*pmctx
,
1571 struct bridge_mcast_own_query
*own_query
)
1573 struct bridge_mcast_other_query
*other_query
= NULL
;
1574 struct br_ip br_group
;
1577 if (!netif_running(brmctx
->br
->dev
) ||
1578 !br_opt_get(brmctx
->br
, BROPT_MULTICAST_ENABLED
) ||
1579 !br_opt_get(brmctx
->br
, BROPT_MULTICAST_QUERIER
))
1582 memset(&br_group
.dst
, 0, sizeof(br_group
.dst
));
1584 if (pmctx
? (own_query
== &pmctx
->ip4_own_query
) :
1585 (own_query
== &brmctx
->ip4_own_query
)) {
1586 other_query
= &brmctx
->ip4_other_query
;
1587 br_group
.proto
= htons(ETH_P_IP
);
1588 #if IS_ENABLED(CONFIG_IPV6)
1590 other_query
= &brmctx
->ip6_other_query
;
1591 br_group
.proto
= htons(ETH_P_IPV6
);
1595 if (!other_query
|| timer_pending(&other_query
->timer
))
1598 __br_multicast_send_query(brmctx
, pmctx
, NULL
, NULL
, &br_group
, false,
1602 time
+= own_query
->startup_sent
< brmctx
->multicast_startup_query_count
?
1603 brmctx
->multicast_startup_query_interval
:
1604 brmctx
->multicast_query_interval
;
1605 mod_timer(&own_query
->timer
, time
);
1609 br_multicast_port_query_expired(struct net_bridge_mcast_port
*pmctx
,
1610 struct bridge_mcast_own_query
*query
)
1612 struct net_bridge
*br
= pmctx
->port
->br
;
1614 spin_lock(&br
->multicast_lock
);
1615 if (pmctx
->port
->state
== BR_STATE_DISABLED
||
1616 pmctx
->port
->state
== BR_STATE_BLOCKING
)
1619 if (query
->startup_sent
< br
->multicast_ctx
.multicast_startup_query_count
)
1620 query
->startup_sent
++;
1622 br_multicast_send_query(&br
->multicast_ctx
, pmctx
, query
);
1625 spin_unlock(&br
->multicast_lock
);
1628 static void br_ip4_multicast_port_query_expired(struct timer_list
*t
)
1630 struct net_bridge_mcast_port
*pmctx
= from_timer(pmctx
, t
,
1631 ip4_own_query
.timer
);
1633 br_multicast_port_query_expired(pmctx
, &pmctx
->ip4_own_query
);
1636 #if IS_ENABLED(CONFIG_IPV6)
1637 static void br_ip6_multicast_port_query_expired(struct timer_list
*t
)
1639 struct net_bridge_mcast_port
*pmctx
= from_timer(pmctx
, t
,
1640 ip6_own_query
.timer
);
1642 br_multicast_port_query_expired(pmctx
, &pmctx
->ip6_own_query
);
1646 static void br_multicast_port_group_rexmit(struct timer_list
*t
)
1648 struct net_bridge_port_group
*pg
= from_timer(pg
, t
, rexmit_timer
);
1649 struct bridge_mcast_other_query
*other_query
= NULL
;
1650 struct net_bridge
*br
= pg
->key
.port
->br
;
1651 struct net_bridge_mcast_port
*pmctx
;
1652 struct net_bridge_mcast
*brmctx
;
1653 bool need_rexmit
= false;
1655 spin_lock(&br
->multicast_lock
);
1656 if (!netif_running(br
->dev
) || hlist_unhashed(&pg
->mglist
) ||
1657 !br_opt_get(br
, BROPT_MULTICAST_ENABLED
) ||
1658 !br_opt_get(br
, BROPT_MULTICAST_QUERIER
))
1661 brmctx
= &br
->multicast_ctx
;
1662 pmctx
= &pg
->key
.port
->multicast_ctx
;
1663 if (pg
->key
.addr
.proto
== htons(ETH_P_IP
))
1664 other_query
= &brmctx
->ip4_other_query
;
1665 #if IS_ENABLED(CONFIG_IPV6)
1667 other_query
= &brmctx
->ip6_other_query
;
1670 if (!other_query
|| timer_pending(&other_query
->timer
))
1673 if (pg
->grp_query_rexmit_cnt
) {
1674 pg
->grp_query_rexmit_cnt
--;
1675 __br_multicast_send_query(brmctx
, pmctx
, pg
, &pg
->key
.addr
,
1676 &pg
->key
.addr
, false, 1, NULL
);
1678 __br_multicast_send_query(brmctx
, pmctx
, pg
, &pg
->key
.addr
,
1679 &pg
->key
.addr
, true, 0, &need_rexmit
);
1681 if (pg
->grp_query_rexmit_cnt
|| need_rexmit
)
1682 mod_timer(&pg
->rexmit_timer
, jiffies
+
1683 brmctx
->multicast_last_member_interval
);
1685 spin_unlock(&br
->multicast_lock
);
1688 static int br_mc_disabled_update(struct net_device
*dev
, bool value
,
1689 struct netlink_ext_ack
*extack
)
1691 struct switchdev_attr attr
= {
1693 .id
= SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED
,
1694 .flags
= SWITCHDEV_F_DEFER
,
1695 .u
.mc_disabled
= !value
,
1698 return switchdev_port_attr_set(dev
, &attr
, extack
);
1701 void br_multicast_port_ctx_init(struct net_bridge_port
*port
,
1702 struct net_bridge_vlan
*vlan
,
1703 struct net_bridge_mcast_port
*pmctx
)
1707 pmctx
->multicast_router
= MDB_RTR_TYPE_TEMP_QUERY
;
1708 timer_setup(&pmctx
->ip4_mc_router_timer
,
1709 br_ip4_multicast_router_expired
, 0);
1710 timer_setup(&pmctx
->ip4_own_query
.timer
,
1711 br_ip4_multicast_port_query_expired
, 0);
1712 #if IS_ENABLED(CONFIG_IPV6)
1713 timer_setup(&pmctx
->ip6_mc_router_timer
,
1714 br_ip6_multicast_router_expired
, 0);
1715 timer_setup(&pmctx
->ip6_own_query
.timer
,
1716 br_ip6_multicast_port_query_expired
, 0);
1720 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port
*pmctx
)
1722 #if IS_ENABLED(CONFIG_IPV6)
1723 del_timer_sync(&pmctx
->ip6_mc_router_timer
);
1725 del_timer_sync(&pmctx
->ip4_mc_router_timer
);
1728 int br_multicast_add_port(struct net_bridge_port
*port
)
1732 port
->multicast_eht_hosts_limit
= BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT
;
1733 br_multicast_port_ctx_init(port
, NULL
, &port
->multicast_ctx
);
1735 err
= br_mc_disabled_update(port
->dev
,
1736 br_opt_get(port
->br
,
1737 BROPT_MULTICAST_ENABLED
),
1739 if (err
&& err
!= -EOPNOTSUPP
)
1742 port
->mcast_stats
= netdev_alloc_pcpu_stats(struct bridge_mcast_stats
);
1743 if (!port
->mcast_stats
)
1749 void br_multicast_del_port(struct net_bridge_port
*port
)
1751 struct net_bridge
*br
= port
->br
;
1752 struct net_bridge_port_group
*pg
;
1753 HLIST_HEAD(deleted_head
);
1754 struct hlist_node
*n
;
1756 /* Take care of the remaining groups, only perm ones should be left */
1757 spin_lock_bh(&br
->multicast_lock
);
1758 hlist_for_each_entry_safe(pg
, n
, &port
->mglist
, mglist
)
1759 br_multicast_find_del_pg(br
, pg
);
1760 hlist_move_list(&br
->mcast_gc_list
, &deleted_head
);
1761 spin_unlock_bh(&br
->multicast_lock
);
1762 br_multicast_gc(&deleted_head
);
1763 br_multicast_port_ctx_deinit(&port
->multicast_ctx
);
1764 free_percpu(port
->mcast_stats
);
1767 static void br_multicast_enable(struct bridge_mcast_own_query
*query
)
1769 query
->startup_sent
= 0;
1771 if (try_to_del_timer_sync(&query
->timer
) >= 0 ||
1772 del_timer(&query
->timer
))
1773 mod_timer(&query
->timer
, jiffies
);
1776 static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port
*pmctx
)
1778 struct net_bridge
*br
= pmctx
->port
->br
;
1779 struct net_bridge_mcast
*brmctx
;
1781 brmctx
= br_multicast_port_ctx_get_global(pmctx
);
1782 if (!br_opt_get(br
, BROPT_MULTICAST_ENABLED
) ||
1783 !netif_running(br
->dev
))
1786 br_multicast_enable(&pmctx
->ip4_own_query
);
1787 #if IS_ENABLED(CONFIG_IPV6)
1788 br_multicast_enable(&pmctx
->ip6_own_query
);
1790 if (pmctx
->multicast_router
== MDB_RTR_TYPE_PERM
) {
1791 br_ip4_multicast_add_router(brmctx
, pmctx
);
1792 br_ip6_multicast_add_router(brmctx
, pmctx
);
1796 void br_multicast_enable_port(struct net_bridge_port
*port
)
1798 struct net_bridge
*br
= port
->br
;
1800 spin_lock(&br
->multicast_lock
);
1801 __br_multicast_enable_port_ctx(&port
->multicast_ctx
);
1802 spin_unlock(&br
->multicast_lock
);
1805 static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port
*pmctx
)
1807 struct net_bridge_port_group
*pg
;
1808 struct hlist_node
*n
;
1811 hlist_for_each_entry_safe(pg
, n
, &pmctx
->port
->mglist
, mglist
)
1812 if (!(pg
->flags
& MDB_PG_FLAGS_PERMANENT
) &&
1813 (!br_multicast_port_ctx_is_vlan(pmctx
) ||
1814 pg
->key
.addr
.vid
== pmctx
->vlan
->vid
))
1815 br_multicast_find_del_pg(pmctx
->port
->br
, pg
);
1817 del
|= br_ip4_multicast_rport_del(pmctx
);
1818 del_timer(&pmctx
->ip4_mc_router_timer
);
1819 del_timer(&pmctx
->ip4_own_query
.timer
);
1820 del
|= br_ip6_multicast_rport_del(pmctx
);
1821 #if IS_ENABLED(CONFIG_IPV6)
1822 del_timer(&pmctx
->ip6_mc_router_timer
);
1823 del_timer(&pmctx
->ip6_own_query
.timer
);
1825 br_multicast_rport_del_notify(pmctx
, del
);
1828 void br_multicast_disable_port(struct net_bridge_port
*port
)
1830 spin_lock(&port
->br
->multicast_lock
);
1831 __br_multicast_disable_port_ctx(&port
->multicast_ctx
);
1832 spin_unlock(&port
->br
->multicast_lock
);
1835 static int __grp_src_delete_marked(struct net_bridge_port_group
*pg
)
1837 struct net_bridge_group_src
*ent
;
1838 struct hlist_node
*tmp
;
1841 hlist_for_each_entry_safe(ent
, tmp
, &pg
->src_list
, node
)
1842 if (ent
->flags
& BR_SGRP_F_DELETE
) {
1843 br_multicast_del_group_src(ent
, false);
1850 static void __grp_src_mod_timer(struct net_bridge_group_src
*src
,
1851 unsigned long expires
)
1853 mod_timer(&src
->timer
, expires
);
1854 br_multicast_fwd_src_handle(src
);
1857 static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast
*brmctx
,
1858 struct net_bridge_mcast_port
*pmctx
,
1859 struct net_bridge_port_group
*pg
)
1861 struct bridge_mcast_other_query
*other_query
= NULL
;
1862 u32 lmqc
= brmctx
->multicast_last_member_count
;
1863 unsigned long lmqt
, lmi
, now
= jiffies
;
1864 struct net_bridge_group_src
*ent
;
1866 if (!netif_running(brmctx
->br
->dev
) ||
1867 !br_opt_get(brmctx
->br
, BROPT_MULTICAST_ENABLED
))
1870 if (pg
->key
.addr
.proto
== htons(ETH_P_IP
))
1871 other_query
= &brmctx
->ip4_other_query
;
1872 #if IS_ENABLED(CONFIG_IPV6)
1874 other_query
= &brmctx
->ip6_other_query
;
1877 lmqt
= now
+ br_multicast_lmqt(brmctx
);
1878 hlist_for_each_entry(ent
, &pg
->src_list
, node
) {
1879 if (ent
->flags
& BR_SGRP_F_SEND
) {
1880 ent
->flags
&= ~BR_SGRP_F_SEND
;
1881 if (ent
->timer
.expires
> lmqt
) {
1882 if (br_opt_get(brmctx
->br
,
1883 BROPT_MULTICAST_QUERIER
) &&
1885 !timer_pending(&other_query
->timer
))
1886 ent
->src_query_rexmit_cnt
= lmqc
;
1887 __grp_src_mod_timer(ent
, lmqt
);
1892 if (!br_opt_get(brmctx
->br
, BROPT_MULTICAST_QUERIER
) ||
1893 !other_query
|| timer_pending(&other_query
->timer
))
1896 __br_multicast_send_query(brmctx
, pmctx
, pg
, &pg
->key
.addr
,
1897 &pg
->key
.addr
, true, 1, NULL
);
1899 lmi
= now
+ brmctx
->multicast_last_member_interval
;
1900 if (!timer_pending(&pg
->rexmit_timer
) ||
1901 time_after(pg
->rexmit_timer
.expires
, lmi
))
1902 mod_timer(&pg
->rexmit_timer
, lmi
);
1905 static void __grp_send_query_and_rexmit(struct net_bridge_mcast
*brmctx
,
1906 struct net_bridge_mcast_port
*pmctx
,
1907 struct net_bridge_port_group
*pg
)
1909 struct bridge_mcast_other_query
*other_query
= NULL
;
1910 unsigned long now
= jiffies
, lmi
;
1912 if (!netif_running(brmctx
->br
->dev
) ||
1913 !br_opt_get(brmctx
->br
, BROPT_MULTICAST_ENABLED
))
1916 if (pg
->key
.addr
.proto
== htons(ETH_P_IP
))
1917 other_query
= &brmctx
->ip4_other_query
;
1918 #if IS_ENABLED(CONFIG_IPV6)
1920 other_query
= &brmctx
->ip6_other_query
;
1923 if (br_opt_get(brmctx
->br
, BROPT_MULTICAST_QUERIER
) &&
1924 other_query
&& !timer_pending(&other_query
->timer
)) {
1925 lmi
= now
+ brmctx
->multicast_last_member_interval
;
1926 pg
->grp_query_rexmit_cnt
= brmctx
->multicast_last_member_count
- 1;
1927 __br_multicast_send_query(brmctx
, pmctx
, pg
, &pg
->key
.addr
,
1928 &pg
->key
.addr
, false, 0, NULL
);
1929 if (!timer_pending(&pg
->rexmit_timer
) ||
1930 time_after(pg
->rexmit_timer
.expires
, lmi
))
1931 mod_timer(&pg
->rexmit_timer
, lmi
);
1934 if (pg
->filter_mode
== MCAST_EXCLUDE
&&
1935 (!timer_pending(&pg
->timer
) ||
1936 time_after(pg
->timer
.expires
, now
+ br_multicast_lmqt(brmctx
))))
1937 mod_timer(&pg
->timer
, now
+ br_multicast_lmqt(brmctx
));
1940 /* State Msg type New state Actions
1941 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
1942 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
1943 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
1945 static bool br_multicast_isinc_allow(const struct net_bridge_mcast
*brmctx
,
1946 struct net_bridge_port_group
*pg
, void *h_addr
,
1947 void *srcs
, u32 nsrcs
, size_t addr_size
,
1950 struct net_bridge_group_src
*ent
;
1951 unsigned long now
= jiffies
;
1952 bool changed
= false;
1953 struct br_ip src_ip
;
1956 memset(&src_ip
, 0, sizeof(src_ip
));
1957 src_ip
.proto
= pg
->key
.addr
.proto
;
1958 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
1959 memcpy(&src_ip
.src
, srcs
+ (src_idx
* addr_size
), addr_size
);
1960 ent
= br_multicast_find_group_src(pg
, &src_ip
);
1962 ent
= br_multicast_new_group_src(pg
, &src_ip
);
1968 __grp_src_mod_timer(ent
, now
+ br_multicast_gmi(brmctx
));
1971 if (br_multicast_eht_handle(brmctx
, pg
, h_addr
, srcs
, nsrcs
, addr_size
,
1978 /* State Msg type New state Actions
1979 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1983 static void __grp_src_isexc_incl(const struct net_bridge_mcast
*brmctx
,
1984 struct net_bridge_port_group
*pg
, void *h_addr
,
1985 void *srcs
, u32 nsrcs
, size_t addr_size
,
1988 struct net_bridge_group_src
*ent
;
1989 struct br_ip src_ip
;
1992 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
1993 ent
->flags
|= BR_SGRP_F_DELETE
;
1995 memset(&src_ip
, 0, sizeof(src_ip
));
1996 src_ip
.proto
= pg
->key
.addr
.proto
;
1997 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
1998 memcpy(&src_ip
.src
, srcs
+ (src_idx
* addr_size
), addr_size
);
1999 ent
= br_multicast_find_group_src(pg
, &src_ip
);
2001 ent
->flags
&= ~BR_SGRP_F_DELETE
;
2003 ent
= br_multicast_new_group_src(pg
, &src_ip
);
2005 br_multicast_fwd_src_handle(ent
);
2008 br_multicast_eht_handle(brmctx
, pg
, h_addr
, srcs
, nsrcs
, addr_size
,
2011 __grp_src_delete_marked(pg
);
2014 /* State Msg type New state Actions
2015 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
2020 static bool __grp_src_isexc_excl(const struct net_bridge_mcast
*brmctx
,
2021 struct net_bridge_port_group
*pg
, void *h_addr
,
2022 void *srcs
, u32 nsrcs
, size_t addr_size
,
2025 struct net_bridge_group_src
*ent
;
2026 unsigned long now
= jiffies
;
2027 bool changed
= false;
2028 struct br_ip src_ip
;
2031 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
2032 ent
->flags
|= BR_SGRP_F_DELETE
;
2034 memset(&src_ip
, 0, sizeof(src_ip
));
2035 src_ip
.proto
= pg
->key
.addr
.proto
;
2036 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
2037 memcpy(&src_ip
.src
, srcs
+ (src_idx
* addr_size
), addr_size
);
2038 ent
= br_multicast_find_group_src(pg
, &src_ip
);
2040 ent
->flags
&= ~BR_SGRP_F_DELETE
;
2042 ent
= br_multicast_new_group_src(pg
, &src_ip
);
2044 __grp_src_mod_timer(ent
,
2045 now
+ br_multicast_gmi(brmctx
));
2051 if (br_multicast_eht_handle(brmctx
, pg
, h_addr
, srcs
, nsrcs
, addr_size
,
2055 if (__grp_src_delete_marked(pg
))
2061 static bool br_multicast_isexc(const struct net_bridge_mcast
*brmctx
,
2062 struct net_bridge_port_group
*pg
, void *h_addr
,
2063 void *srcs
, u32 nsrcs
, size_t addr_size
,
2066 bool changed
= false;
2068 switch (pg
->filter_mode
) {
2070 __grp_src_isexc_incl(brmctx
, pg
, h_addr
, srcs
, nsrcs
, addr_size
,
2072 br_multicast_star_g_handle_mode(pg
, MCAST_EXCLUDE
);
2076 changed
= __grp_src_isexc_excl(brmctx
, pg
, h_addr
, srcs
, nsrcs
,
2077 addr_size
, grec_type
);
2081 pg
->filter_mode
= MCAST_EXCLUDE
;
2082 mod_timer(&pg
->timer
, jiffies
+ br_multicast_gmi(brmctx
));
2087 /* State Msg type New state Actions
2088 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
2091 static bool __grp_src_toin_incl(struct net_bridge_mcast
*brmctx
,
2092 struct net_bridge_mcast_port
*pmctx
,
2093 struct net_bridge_port_group
*pg
, void *h_addr
,
2094 void *srcs
, u32 nsrcs
, size_t addr_size
,
2097 u32 src_idx
, to_send
= pg
->src_ents
;
2098 struct net_bridge_group_src
*ent
;
2099 unsigned long now
= jiffies
;
2100 bool changed
= false;
2101 struct br_ip src_ip
;
2103 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
2104 ent
->flags
|= BR_SGRP_F_SEND
;
2106 memset(&src_ip
, 0, sizeof(src_ip
));
2107 src_ip
.proto
= pg
->key
.addr
.proto
;
2108 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
2109 memcpy(&src_ip
.src
, srcs
+ (src_idx
* addr_size
), addr_size
);
2110 ent
= br_multicast_find_group_src(pg
, &src_ip
);
2112 ent
->flags
&= ~BR_SGRP_F_SEND
;
2115 ent
= br_multicast_new_group_src(pg
, &src_ip
);
2120 __grp_src_mod_timer(ent
, now
+ br_multicast_gmi(brmctx
));
2123 if (br_multicast_eht_handle(brmctx
, pg
, h_addr
, srcs
, nsrcs
, addr_size
,
2128 __grp_src_query_marked_and_rexmit(brmctx
, pmctx
, pg
);
2133 /* State Msg type New state Actions
2134 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
2138 static bool __grp_src_toin_excl(struct net_bridge_mcast
*brmctx
,
2139 struct net_bridge_mcast_port
*pmctx
,
2140 struct net_bridge_port_group
*pg
, void *h_addr
,
2141 void *srcs
, u32 nsrcs
, size_t addr_size
,
2144 u32 src_idx
, to_send
= pg
->src_ents
;
2145 struct net_bridge_group_src
*ent
;
2146 unsigned long now
= jiffies
;
2147 bool changed
= false;
2148 struct br_ip src_ip
;
2150 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
2151 if (timer_pending(&ent
->timer
))
2152 ent
->flags
|= BR_SGRP_F_SEND
;
2154 memset(&src_ip
, 0, sizeof(src_ip
));
2155 src_ip
.proto
= pg
->key
.addr
.proto
;
2156 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
2157 memcpy(&src_ip
.src
, srcs
+ (src_idx
* addr_size
), addr_size
);
2158 ent
= br_multicast_find_group_src(pg
, &src_ip
);
2160 if (timer_pending(&ent
->timer
)) {
2161 ent
->flags
&= ~BR_SGRP_F_SEND
;
2165 ent
= br_multicast_new_group_src(pg
, &src_ip
);
2170 __grp_src_mod_timer(ent
, now
+ br_multicast_gmi(brmctx
));
2173 if (br_multicast_eht_handle(brmctx
, pg
, h_addr
, srcs
, nsrcs
, addr_size
,
2178 __grp_src_query_marked_and_rexmit(brmctx
, pmctx
, pg
);
2180 __grp_send_query_and_rexmit(brmctx
, pmctx
, pg
);
2185 static bool br_multicast_toin(struct net_bridge_mcast
*brmctx
,
2186 struct net_bridge_mcast_port
*pmctx
,
2187 struct net_bridge_port_group
*pg
, void *h_addr
,
2188 void *srcs
, u32 nsrcs
, size_t addr_size
,
2191 bool changed
= false;
2193 switch (pg
->filter_mode
) {
2195 changed
= __grp_src_toin_incl(brmctx
, pmctx
, pg
, h_addr
, srcs
,
2196 nsrcs
, addr_size
, grec_type
);
2199 changed
= __grp_src_toin_excl(brmctx
, pmctx
, pg
, h_addr
, srcs
,
2200 nsrcs
, addr_size
, grec_type
);
2204 if (br_multicast_eht_should_del_pg(pg
)) {
2205 pg
->flags
|= MDB_PG_FLAGS_FAST_LEAVE
;
2206 br_multicast_find_del_pg(pg
->key
.port
->br
, pg
);
2207 /* a notification has already been sent and we shouldn't
2208 * access pg after the delete so we have to return false
2216 /* State Msg type New state Actions
2217 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
2222 static void __grp_src_toex_incl(struct net_bridge_mcast
*brmctx
,
2223 struct net_bridge_mcast_port
*pmctx
,
2224 struct net_bridge_port_group
*pg
, void *h_addr
,
2225 void *srcs
, u32 nsrcs
, size_t addr_size
,
2228 struct net_bridge_group_src
*ent
;
2229 u32 src_idx
, to_send
= 0;
2230 struct br_ip src_ip
;
2232 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
2233 ent
->flags
= (ent
->flags
& ~BR_SGRP_F_SEND
) | BR_SGRP_F_DELETE
;
2235 memset(&src_ip
, 0, sizeof(src_ip
));
2236 src_ip
.proto
= pg
->key
.addr
.proto
;
2237 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
2238 memcpy(&src_ip
.src
, srcs
+ (src_idx
* addr_size
), addr_size
);
2239 ent
= br_multicast_find_group_src(pg
, &src_ip
);
2241 ent
->flags
= (ent
->flags
& ~BR_SGRP_F_DELETE
) |
2245 ent
= br_multicast_new_group_src(pg
, &src_ip
);
2248 br_multicast_fwd_src_handle(ent
);
2251 br_multicast_eht_handle(brmctx
, pg
, h_addr
, srcs
, nsrcs
, addr_size
,
2254 __grp_src_delete_marked(pg
);
2256 __grp_src_query_marked_and_rexmit(brmctx
, pmctx
, pg
);
2259 /* State Msg type New state Actions
2260 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
2266 static bool __grp_src_toex_excl(struct net_bridge_mcast
*brmctx
,
2267 struct net_bridge_mcast_port
*pmctx
,
2268 struct net_bridge_port_group
*pg
, void *h_addr
,
2269 void *srcs
, u32 nsrcs
, size_t addr_size
,
2272 struct net_bridge_group_src
*ent
;
2273 u32 src_idx
, to_send
= 0;
2274 bool changed
= false;
2275 struct br_ip src_ip
;
2277 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
2278 ent
->flags
= (ent
->flags
& ~BR_SGRP_F_SEND
) | BR_SGRP_F_DELETE
;
2280 memset(&src_ip
, 0, sizeof(src_ip
));
2281 src_ip
.proto
= pg
->key
.addr
.proto
;
2282 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
2283 memcpy(&src_ip
.src
, srcs
+ (src_idx
* addr_size
), addr_size
);
2284 ent
= br_multicast_find_group_src(pg
, &src_ip
);
2286 ent
->flags
&= ~BR_SGRP_F_DELETE
;
2288 ent
= br_multicast_new_group_src(pg
, &src_ip
);
2290 __grp_src_mod_timer(ent
, pg
->timer
.expires
);
2294 if (ent
&& timer_pending(&ent
->timer
)) {
2295 ent
->flags
|= BR_SGRP_F_SEND
;
2300 if (br_multicast_eht_handle(brmctx
, pg
, h_addr
, srcs
, nsrcs
, addr_size
,
2304 if (__grp_src_delete_marked(pg
))
2307 __grp_src_query_marked_and_rexmit(brmctx
, pmctx
, pg
);
2312 static bool br_multicast_toex(struct net_bridge_mcast
*brmctx
,
2313 struct net_bridge_mcast_port
*pmctx
,
2314 struct net_bridge_port_group
*pg
, void *h_addr
,
2315 void *srcs
, u32 nsrcs
, size_t addr_size
,
2318 bool changed
= false;
2320 switch (pg
->filter_mode
) {
2322 __grp_src_toex_incl(brmctx
, pmctx
, pg
, h_addr
, srcs
, nsrcs
,
2323 addr_size
, grec_type
);
2324 br_multicast_star_g_handle_mode(pg
, MCAST_EXCLUDE
);
2328 changed
= __grp_src_toex_excl(brmctx
, pmctx
, pg
, h_addr
, srcs
,
2329 nsrcs
, addr_size
, grec_type
);
2333 pg
->filter_mode
= MCAST_EXCLUDE
;
2334 mod_timer(&pg
->timer
, jiffies
+ br_multicast_gmi(brmctx
));
2339 /* State Msg type New state Actions
2340 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
2342 static bool __grp_src_block_incl(struct net_bridge_mcast
*brmctx
,
2343 struct net_bridge_mcast_port
*pmctx
,
2344 struct net_bridge_port_group
*pg
, void *h_addr
,
2345 void *srcs
, u32 nsrcs
, size_t addr_size
, int grec_type
)
2347 struct net_bridge_group_src
*ent
;
2348 u32 src_idx
, to_send
= 0;
2349 bool changed
= false;
2350 struct br_ip src_ip
;
2352 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
2353 ent
->flags
&= ~BR_SGRP_F_SEND
;
2355 memset(&src_ip
, 0, sizeof(src_ip
));
2356 src_ip
.proto
= pg
->key
.addr
.proto
;
2357 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
2358 memcpy(&src_ip
.src
, srcs
+ (src_idx
* addr_size
), addr_size
);
2359 ent
= br_multicast_find_group_src(pg
, &src_ip
);
2361 ent
->flags
|= BR_SGRP_F_SEND
;
2366 if (br_multicast_eht_handle(brmctx
, pg
, h_addr
, srcs
, nsrcs
, addr_size
,
2371 __grp_src_query_marked_and_rexmit(brmctx
, pmctx
, pg
);
2376 /* State Msg type New state Actions
2377 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
2380 static bool __grp_src_block_excl(struct net_bridge_mcast
*brmctx
,
2381 struct net_bridge_mcast_port
*pmctx
,
2382 struct net_bridge_port_group
*pg
, void *h_addr
,
2383 void *srcs
, u32 nsrcs
, size_t addr_size
, int grec_type
)
2385 struct net_bridge_group_src
*ent
;
2386 u32 src_idx
, to_send
= 0;
2387 bool changed
= false;
2388 struct br_ip src_ip
;
2390 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
2391 ent
->flags
&= ~BR_SGRP_F_SEND
;
2393 memset(&src_ip
, 0, sizeof(src_ip
));
2394 src_ip
.proto
= pg
->key
.addr
.proto
;
2395 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
2396 memcpy(&src_ip
.src
, srcs
+ (src_idx
* addr_size
), addr_size
);
2397 ent
= br_multicast_find_group_src(pg
, &src_ip
);
2399 ent
= br_multicast_new_group_src(pg
, &src_ip
);
2401 __grp_src_mod_timer(ent
, pg
->timer
.expires
);
2405 if (ent
&& timer_pending(&ent
->timer
)) {
2406 ent
->flags
|= BR_SGRP_F_SEND
;
2411 if (br_multicast_eht_handle(brmctx
, pg
, h_addr
, srcs
, nsrcs
, addr_size
,
2416 __grp_src_query_marked_and_rexmit(brmctx
, pmctx
, pg
);
2421 static bool br_multicast_block(struct net_bridge_mcast
*brmctx
,
2422 struct net_bridge_mcast_port
*pmctx
,
2423 struct net_bridge_port_group
*pg
, void *h_addr
,
2424 void *srcs
, u32 nsrcs
, size_t addr_size
, int grec_type
)
2426 bool changed
= false;
2428 switch (pg
->filter_mode
) {
2430 changed
= __grp_src_block_incl(brmctx
, pmctx
, pg
, h_addr
, srcs
,
2431 nsrcs
, addr_size
, grec_type
);
2434 changed
= __grp_src_block_excl(brmctx
, pmctx
, pg
, h_addr
, srcs
,
2435 nsrcs
, addr_size
, grec_type
);
2439 if ((pg
->filter_mode
== MCAST_INCLUDE
&& hlist_empty(&pg
->src_list
)) ||
2440 br_multicast_eht_should_del_pg(pg
)) {
2441 if (br_multicast_eht_should_del_pg(pg
))
2442 pg
->flags
|= MDB_PG_FLAGS_FAST_LEAVE
;
2443 br_multicast_find_del_pg(pg
->key
.port
->br
, pg
);
2444 /* a notification has already been sent and we shouldn't
2445 * access pg after the delete so we have to return false
2453 static struct net_bridge_port_group
*
2454 br_multicast_find_port(struct net_bridge_mdb_entry
*mp
,
2455 struct net_bridge_port
*p
,
2456 const unsigned char *src
)
2458 struct net_bridge
*br __maybe_unused
= mp
->br
;
2459 struct net_bridge_port_group
*pg
;
2461 for (pg
= mlock_dereference(mp
->ports
, br
);
2463 pg
= mlock_dereference(pg
->next
, br
))
2464 if (br_port_group_equal(pg
, p
, src
))
2470 static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast
*brmctx
,
2471 struct net_bridge_mcast_port
*pmctx
,
2472 struct sk_buff
*skb
,
2475 bool igmpv2
= brmctx
->multicast_igmp_version
== 2;
2476 struct net_bridge_mdb_entry
*mdst
;
2477 struct net_bridge_port_group
*pg
;
2478 const unsigned char *src
;
2479 struct igmpv3_report
*ih
;
2480 struct igmpv3_grec
*grec
;
2481 int i
, len
, num
, type
;
2482 __be32 group
, *h_addr
;
2483 bool changed
= false;
2487 ih
= igmpv3_report_hdr(skb
);
2488 num
= ntohs(ih
->ngrec
);
2489 len
= skb_transport_offset(skb
) + sizeof(*ih
);
2491 for (i
= 0; i
< num
; i
++) {
2492 len
+= sizeof(*grec
);
2493 if (!ip_mc_may_pull(skb
, len
))
2496 grec
= (void *)(skb
->data
+ len
- sizeof(*grec
));
2497 group
= grec
->grec_mca
;
2498 type
= grec
->grec_type
;
2499 nsrcs
= ntohs(grec
->grec_nsrcs
);
2502 if (!ip_mc_may_pull(skb
, len
))
2506 case IGMPV3_MODE_IS_INCLUDE
:
2507 case IGMPV3_MODE_IS_EXCLUDE
:
2508 case IGMPV3_CHANGE_TO_INCLUDE
:
2509 case IGMPV3_CHANGE_TO_EXCLUDE
:
2510 case IGMPV3_ALLOW_NEW_SOURCES
:
2511 case IGMPV3_BLOCK_OLD_SOURCES
:
2518 src
= eth_hdr(skb
)->h_source
;
2520 (type
== IGMPV3_CHANGE_TO_INCLUDE
||
2521 type
== IGMPV3_MODE_IS_INCLUDE
)) {
2522 if (!pmctx
|| igmpv2
) {
2523 br_ip4_multicast_leave_group(brmctx
, pmctx
,
2528 err
= br_ip4_multicast_add_group(brmctx
, pmctx
, group
,
2534 if (!pmctx
|| igmpv2
)
2537 spin_lock_bh(&brmctx
->br
->multicast_lock
);
2538 mdst
= br_mdb_ip4_get(brmctx
->br
, group
, vid
);
2540 goto unlock_continue
;
2541 pg
= br_multicast_find_port(mdst
, pmctx
->port
, src
);
2542 if (!pg
|| (pg
->flags
& MDB_PG_FLAGS_PERMANENT
))
2543 goto unlock_continue
;
2544 /* reload grec and host addr */
2545 grec
= (void *)(skb
->data
+ len
- sizeof(*grec
) - (nsrcs
* 4));
2546 h_addr
= &ip_hdr(skb
)->saddr
;
2548 case IGMPV3_ALLOW_NEW_SOURCES
:
2549 changed
= br_multicast_isinc_allow(brmctx
, pg
, h_addr
,
2551 nsrcs
, sizeof(__be32
), type
);
2553 case IGMPV3_MODE_IS_INCLUDE
:
2554 changed
= br_multicast_isinc_allow(brmctx
, pg
, h_addr
,
2556 nsrcs
, sizeof(__be32
), type
);
2558 case IGMPV3_MODE_IS_EXCLUDE
:
2559 changed
= br_multicast_isexc(brmctx
, pg
, h_addr
,
2561 nsrcs
, sizeof(__be32
), type
);
2563 case IGMPV3_CHANGE_TO_INCLUDE
:
2564 changed
= br_multicast_toin(brmctx
, pmctx
, pg
, h_addr
,
2566 nsrcs
, sizeof(__be32
), type
);
2568 case IGMPV3_CHANGE_TO_EXCLUDE
:
2569 changed
= br_multicast_toex(brmctx
, pmctx
, pg
, h_addr
,
2571 nsrcs
, sizeof(__be32
), type
);
2573 case IGMPV3_BLOCK_OLD_SOURCES
:
2574 changed
= br_multicast_block(brmctx
, pmctx
, pg
, h_addr
,
2576 nsrcs
, sizeof(__be32
), type
);
2580 br_mdb_notify(brmctx
->br
->dev
, mdst
, pg
, RTM_NEWMDB
);
2582 spin_unlock_bh(&brmctx
->br
->multicast_lock
);
2588 #if IS_ENABLED(CONFIG_IPV6)
2589 static int br_ip6_multicast_mld2_report(struct net_bridge_mcast
*brmctx
,
2590 struct net_bridge_mcast_port
*pmctx
,
2591 struct sk_buff
*skb
,
2594 bool mldv1
= brmctx
->multicast_mld_version
== 1;
2595 struct net_bridge_mdb_entry
*mdst
;
2596 struct net_bridge_port_group
*pg
;
2597 unsigned int nsrcs_offset
;
2598 const unsigned char *src
;
2599 struct icmp6hdr
*icmp6h
;
2600 struct in6_addr
*h_addr
;
2601 struct mld2_grec
*grec
;
2602 unsigned int grec_len
;
2603 bool changed
= false;
2607 if (!ipv6_mc_may_pull(skb
, sizeof(*icmp6h
)))
2610 icmp6h
= icmp6_hdr(skb
);
2611 num
= ntohs(icmp6h
->icmp6_dataun
.un_data16
[1]);
2612 len
= skb_transport_offset(skb
) + sizeof(*icmp6h
);
2614 for (i
= 0; i
< num
; i
++) {
2615 __be16
*_nsrcs
, __nsrcs
;
2618 nsrcs_offset
= len
+ offsetof(struct mld2_grec
, grec_nsrcs
);
2620 if (skb_transport_offset(skb
) + ipv6_transport_len(skb
) <
2621 nsrcs_offset
+ sizeof(__nsrcs
))
2624 _nsrcs
= skb_header_pointer(skb
, nsrcs_offset
,
2625 sizeof(__nsrcs
), &__nsrcs
);
2629 nsrcs
= ntohs(*_nsrcs
);
2630 grec_len
= struct_size(grec
, grec_src
, nsrcs
);
2632 if (!ipv6_mc_may_pull(skb
, len
+ grec_len
))
2635 grec
= (struct mld2_grec
*)(skb
->data
+ len
);
2638 switch (grec
->grec_type
) {
2639 case MLD2_MODE_IS_INCLUDE
:
2640 case MLD2_MODE_IS_EXCLUDE
:
2641 case MLD2_CHANGE_TO_INCLUDE
:
2642 case MLD2_CHANGE_TO_EXCLUDE
:
2643 case MLD2_ALLOW_NEW_SOURCES
:
2644 case MLD2_BLOCK_OLD_SOURCES
:
2651 src
= eth_hdr(skb
)->h_source
;
2652 if ((grec
->grec_type
== MLD2_CHANGE_TO_INCLUDE
||
2653 grec
->grec_type
== MLD2_MODE_IS_INCLUDE
) &&
2655 if (!pmctx
|| mldv1
) {
2656 br_ip6_multicast_leave_group(brmctx
, pmctx
,
2662 err
= br_ip6_multicast_add_group(brmctx
, pmctx
,
2663 &grec
->grec_mca
, vid
,
2669 if (!pmctx
|| mldv1
)
2672 spin_lock_bh(&brmctx
->br
->multicast_lock
);
2673 mdst
= br_mdb_ip6_get(brmctx
->br
, &grec
->grec_mca
, vid
);
2675 goto unlock_continue
;
2676 pg
= br_multicast_find_port(mdst
, pmctx
->port
, src
);
2677 if (!pg
|| (pg
->flags
& MDB_PG_FLAGS_PERMANENT
))
2678 goto unlock_continue
;
2679 h_addr
= &ipv6_hdr(skb
)->saddr
;
2680 switch (grec
->grec_type
) {
2681 case MLD2_ALLOW_NEW_SOURCES
:
2682 changed
= br_multicast_isinc_allow(brmctx
, pg
, h_addr
,
2683 grec
->grec_src
, nsrcs
,
2684 sizeof(struct in6_addr
),
2687 case MLD2_MODE_IS_INCLUDE
:
2688 changed
= br_multicast_isinc_allow(brmctx
, pg
, h_addr
,
2689 grec
->grec_src
, nsrcs
,
2690 sizeof(struct in6_addr
),
2693 case MLD2_MODE_IS_EXCLUDE
:
2694 changed
= br_multicast_isexc(brmctx
, pg
, h_addr
,
2695 grec
->grec_src
, nsrcs
,
2696 sizeof(struct in6_addr
),
2699 case MLD2_CHANGE_TO_INCLUDE
:
2700 changed
= br_multicast_toin(brmctx
, pmctx
, pg
, h_addr
,
2701 grec
->grec_src
, nsrcs
,
2702 sizeof(struct in6_addr
),
2705 case MLD2_CHANGE_TO_EXCLUDE
:
2706 changed
= br_multicast_toex(brmctx
, pmctx
, pg
, h_addr
,
2707 grec
->grec_src
, nsrcs
,
2708 sizeof(struct in6_addr
),
2711 case MLD2_BLOCK_OLD_SOURCES
:
2712 changed
= br_multicast_block(brmctx
, pmctx
, pg
, h_addr
,
2713 grec
->grec_src
, nsrcs
,
2714 sizeof(struct in6_addr
),
2719 br_mdb_notify(brmctx
->br
->dev
, mdst
, pg
, RTM_NEWMDB
);
2721 spin_unlock_bh(&brmctx
->br
->multicast_lock
);
2728 static bool br_ip4_multicast_select_querier(struct net_bridge_mcast
*brmctx
,
2729 struct net_bridge_port
*port
,
2732 if (!timer_pending(&brmctx
->ip4_own_query
.timer
) &&
2733 !timer_pending(&brmctx
->ip4_other_query
.timer
))
2736 if (!brmctx
->ip4_querier
.addr
.src
.ip4
)
2739 if (ntohl(saddr
) <= ntohl(brmctx
->ip4_querier
.addr
.src
.ip4
))
2745 brmctx
->ip4_querier
.addr
.src
.ip4
= saddr
;
2747 /* update protected by general multicast_lock by caller */
2748 rcu_assign_pointer(brmctx
->ip4_querier
.port
, port
);
2753 #if IS_ENABLED(CONFIG_IPV6)
2754 static bool br_ip6_multicast_select_querier(struct net_bridge_mcast
*brmctx
,
2755 struct net_bridge_port
*port
,
2756 struct in6_addr
*saddr
)
2758 if (!timer_pending(&brmctx
->ip6_own_query
.timer
) &&
2759 !timer_pending(&brmctx
->ip6_other_query
.timer
))
2762 if (ipv6_addr_cmp(saddr
, &brmctx
->ip6_querier
.addr
.src
.ip6
) <= 0)
2768 brmctx
->ip6_querier
.addr
.src
.ip6
= *saddr
;
2770 /* update protected by general multicast_lock by caller */
2771 rcu_assign_pointer(brmctx
->ip6_querier
.port
, port
);
2778 br_multicast_update_query_timer(struct net_bridge_mcast
*brmctx
,
2779 struct bridge_mcast_other_query
*query
,
2780 unsigned long max_delay
)
2782 if (!timer_pending(&query
->timer
))
2783 query
->delay_time
= jiffies
+ max_delay
;
2785 mod_timer(&query
->timer
, jiffies
+ brmctx
->multicast_querier_interval
);
2788 static void br_port_mc_router_state_change(struct net_bridge_port
*p
,
2791 struct switchdev_attr attr
= {
2793 .id
= SWITCHDEV_ATTR_ID_PORT_MROUTER
,
2794 .flags
= SWITCHDEV_F_DEFER
,
2795 .u
.mrouter
= is_mc_router
,
2798 switchdev_port_attr_set(p
->dev
, &attr
, NULL
);
2801 static struct net_bridge_port
*
2802 br_multicast_rport_from_node(struct net_bridge_mcast
*brmctx
,
2803 struct hlist_head
*mc_router_list
,
2804 struct hlist_node
*rlist
)
2806 struct net_bridge_mcast_port
*pmctx
;
2808 #if IS_ENABLED(CONFIG_IPV6)
2809 if (mc_router_list
== &brmctx
->ip6_mc_router_list
)
2810 pmctx
= hlist_entry(rlist
, struct net_bridge_mcast_port
,
2814 pmctx
= hlist_entry(rlist
, struct net_bridge_mcast_port
,
2820 static struct hlist_node
*
2821 br_multicast_get_rport_slot(struct net_bridge_mcast
*brmctx
,
2822 struct net_bridge_port
*port
,
2823 struct hlist_head
*mc_router_list
)
2826 struct hlist_node
*slot
= NULL
;
2827 struct net_bridge_port
*p
;
2828 struct hlist_node
*rlist
;
2830 hlist_for_each(rlist
, mc_router_list
) {
2831 p
= br_multicast_rport_from_node(brmctx
, mc_router_list
, rlist
);
2833 if ((unsigned long)port
>= (unsigned long)p
)
2842 static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port
*pmctx
,
2843 struct hlist_node
*rnode
)
2845 #if IS_ENABLED(CONFIG_IPV6)
2846 if (rnode
!= &pmctx
->ip6_rlist
)
2847 return hlist_unhashed(&pmctx
->ip6_rlist
);
2849 return hlist_unhashed(&pmctx
->ip4_rlist
);
2855 /* Add port to router_list
2856 * list is maintained ordered by pointer value
2857 * and locked by br->multicast_lock and RCU
2859 static void br_multicast_add_router(struct net_bridge_mcast
*brmctx
,
2860 struct net_bridge_mcast_port
*pmctx
,
2861 struct hlist_node
*rlist
,
2862 struct hlist_head
*mc_router_list
)
2864 struct hlist_node
*slot
;
2866 if (!hlist_unhashed(rlist
))
2869 slot
= br_multicast_get_rport_slot(brmctx
, pmctx
->port
, mc_router_list
);
2872 hlist_add_behind_rcu(rlist
, slot
);
2874 hlist_add_head_rcu(rlist
, mc_router_list
);
2876 /* For backwards compatibility for now, only notify if we
2877 * switched from no IPv4/IPv6 multicast router to a new
2878 * IPv4 or IPv6 multicast router.
2880 if (br_multicast_no_router_otherpf(pmctx
, rlist
)) {
2881 br_rtr_notify(pmctx
->port
->br
->dev
, pmctx
->port
, RTM_NEWMDB
);
2882 br_port_mc_router_state_change(pmctx
->port
, true);
2886 /* Add port to router_list
2887 * list is maintained ordered by pointer value
2888 * and locked by br->multicast_lock and RCU
2890 static void br_ip4_multicast_add_router(struct net_bridge_mcast
*brmctx
,
2891 struct net_bridge_mcast_port
*pmctx
)
2893 br_multicast_add_router(brmctx
, pmctx
, &pmctx
->ip4_rlist
,
2894 &brmctx
->ip4_mc_router_list
);
2897 /* Add port to router_list
2898 * list is maintained ordered by pointer value
2899 * and locked by br->multicast_lock and RCU
2901 static void br_ip6_multicast_add_router(struct net_bridge_mcast
*brmctx
,
2902 struct net_bridge_mcast_port
*pmctx
)
2904 #if IS_ENABLED(CONFIG_IPV6)
2905 br_multicast_add_router(brmctx
, pmctx
, &pmctx
->ip6_rlist
,
2906 &brmctx
->ip6_mc_router_list
);
2910 static void br_multicast_mark_router(struct net_bridge_mcast
*brmctx
,
2911 struct net_bridge_mcast_port
*pmctx
,
2912 struct timer_list
*timer
,
2913 struct hlist_node
*rlist
,
2914 struct hlist_head
*mc_router_list
)
2916 unsigned long now
= jiffies
;
2919 if (brmctx
->multicast_router
== MDB_RTR_TYPE_TEMP_QUERY
) {
2920 if (!br_ip4_multicast_is_router(brmctx
) &&
2921 !br_ip6_multicast_is_router(brmctx
))
2922 br_mc_router_state_change(brmctx
->br
, true);
2923 mod_timer(timer
, now
+ brmctx
->multicast_querier_interval
);
2928 if (pmctx
->multicast_router
== MDB_RTR_TYPE_DISABLED
||
2929 pmctx
->multicast_router
== MDB_RTR_TYPE_PERM
)
2932 br_multicast_add_router(brmctx
, pmctx
, rlist
, mc_router_list
);
2933 mod_timer(timer
, now
+ brmctx
->multicast_querier_interval
);
2936 static void br_ip4_multicast_mark_router(struct net_bridge_mcast
*brmctx
,
2937 struct net_bridge_mcast_port
*pmctx
)
2939 struct timer_list
*timer
= &brmctx
->ip4_mc_router_timer
;
2940 struct hlist_node
*rlist
= NULL
;
2943 timer
= &pmctx
->ip4_mc_router_timer
;
2944 rlist
= &pmctx
->ip4_rlist
;
2947 br_multicast_mark_router(brmctx
, pmctx
, timer
, rlist
,
2948 &brmctx
->ip4_mc_router_list
);
2951 static void br_ip6_multicast_mark_router(struct net_bridge_mcast
*brmctx
,
2952 struct net_bridge_mcast_port
*pmctx
)
2954 #if IS_ENABLED(CONFIG_IPV6)
2955 struct timer_list
*timer
= &brmctx
->ip6_mc_router_timer
;
2956 struct hlist_node
*rlist
= NULL
;
2959 timer
= &pmctx
->ip6_mc_router_timer
;
2960 rlist
= &pmctx
->ip6_rlist
;
2963 br_multicast_mark_router(brmctx
, pmctx
, timer
, rlist
,
2964 &brmctx
->ip6_mc_router_list
);
2969 br_ip4_multicast_query_received(struct net_bridge_mcast
*brmctx
,
2970 struct net_bridge_mcast_port
*pmctx
,
2971 struct bridge_mcast_other_query
*query
,
2972 struct br_ip
*saddr
,
2973 unsigned long max_delay
)
2975 if (!br_ip4_multicast_select_querier(brmctx
, pmctx
->port
, saddr
->src
.ip4
))
2978 br_multicast_update_query_timer(brmctx
, query
, max_delay
);
2979 br_ip4_multicast_mark_router(brmctx
, pmctx
);
2982 #if IS_ENABLED(CONFIG_IPV6)
2984 br_ip6_multicast_query_received(struct net_bridge_mcast
*brmctx
,
2985 struct net_bridge_mcast_port
*pmctx
,
2986 struct bridge_mcast_other_query
*query
,
2987 struct br_ip
*saddr
,
2988 unsigned long max_delay
)
2990 if (!br_ip6_multicast_select_querier(brmctx
, pmctx
->port
, &saddr
->src
.ip6
))
2993 br_multicast_update_query_timer(brmctx
, query
, max_delay
);
2994 br_ip6_multicast_mark_router(brmctx
, pmctx
);
2998 static void br_ip4_multicast_query(struct net_bridge_mcast
*brmctx
,
2999 struct net_bridge_mcast_port
*pmctx
,
3000 struct sk_buff
*skb
,
3003 unsigned int transport_len
= ip_transport_len(skb
);
3004 const struct iphdr
*iph
= ip_hdr(skb
);
3005 struct igmphdr
*ih
= igmp_hdr(skb
);
3006 struct net_bridge_mdb_entry
*mp
;
3007 struct igmpv3_query
*ih3
;
3008 struct net_bridge_port_group
*p
;
3009 struct net_bridge_port_group __rcu
**pp
;
3011 unsigned long max_delay
;
3012 unsigned long now
= jiffies
;
3015 spin_lock(&brmctx
->br
->multicast_lock
);
3016 if (!netif_running(brmctx
->br
->dev
) ||
3017 (pmctx
&& pmctx
->port
->state
== BR_STATE_DISABLED
))
3022 if (transport_len
== sizeof(*ih
)) {
3023 max_delay
= ih
->code
* (HZ
/ IGMP_TIMER_SCALE
);
3026 max_delay
= 10 * HZ
;
3029 } else if (transport_len
>= sizeof(*ih3
)) {
3030 ih3
= igmpv3_query_hdr(skb
);
3032 (brmctx
->multicast_igmp_version
== 3 && group
&&
3036 max_delay
= ih3
->code
?
3037 IGMPV3_MRC(ih3
->code
) * (HZ
/ IGMP_TIMER_SCALE
) : 1;
3043 saddr
.proto
= htons(ETH_P_IP
);
3044 saddr
.src
.ip4
= iph
->saddr
;
3046 br_ip4_multicast_query_received(brmctx
, pmctx
,
3047 &brmctx
->ip4_other_query
,
3052 mp
= br_mdb_ip4_get(brmctx
->br
, group
, vid
);
3056 max_delay
*= brmctx
->multicast_last_member_count
;
3058 if (mp
->host_joined
&&
3059 (timer_pending(&mp
->timer
) ?
3060 time_after(mp
->timer
.expires
, now
+ max_delay
) :
3061 try_to_del_timer_sync(&mp
->timer
) >= 0))
3062 mod_timer(&mp
->timer
, now
+ max_delay
);
3064 for (pp
= &mp
->ports
;
3065 (p
= mlock_dereference(*pp
, brmctx
->br
)) != NULL
;
3067 if (timer_pending(&p
->timer
) ?
3068 time_after(p
->timer
.expires
, now
+ max_delay
) :
3069 try_to_del_timer_sync(&p
->timer
) >= 0 &&
3070 (brmctx
->multicast_igmp_version
== 2 ||
3071 p
->filter_mode
== MCAST_EXCLUDE
))
3072 mod_timer(&p
->timer
, now
+ max_delay
);
3076 spin_unlock(&brmctx
->br
->multicast_lock
);
3079 #if IS_ENABLED(CONFIG_IPV6)
3080 static int br_ip6_multicast_query(struct net_bridge_mcast
*brmctx
,
3081 struct net_bridge_mcast_port
*pmctx
,
3082 struct sk_buff
*skb
,
3085 unsigned int transport_len
= ipv6_transport_len(skb
);
3086 struct mld_msg
*mld
;
3087 struct net_bridge_mdb_entry
*mp
;
3088 struct mld2_query
*mld2q
;
3089 struct net_bridge_port_group
*p
;
3090 struct net_bridge_port_group __rcu
**pp
;
3092 unsigned long max_delay
;
3093 unsigned long now
= jiffies
;
3094 unsigned int offset
= skb_transport_offset(skb
);
3095 const struct in6_addr
*group
= NULL
;
3096 bool is_general_query
;
3099 spin_lock(&brmctx
->br
->multicast_lock
);
3100 if (!netif_running(brmctx
->br
->dev
) ||
3101 (pmctx
&& pmctx
->port
->state
== BR_STATE_DISABLED
))
3104 if (transport_len
== sizeof(*mld
)) {
3105 if (!pskb_may_pull(skb
, offset
+ sizeof(*mld
))) {
3109 mld
= (struct mld_msg
*) icmp6_hdr(skb
);
3110 max_delay
= msecs_to_jiffies(ntohs(mld
->mld_maxdelay
));
3112 group
= &mld
->mld_mca
;
3114 if (!pskb_may_pull(skb
, offset
+ sizeof(*mld2q
))) {
3118 mld2q
= (struct mld2_query
*)icmp6_hdr(skb
);
3119 if (!mld2q
->mld2q_nsrcs
)
3120 group
= &mld2q
->mld2q_mca
;
3121 if (brmctx
->multicast_mld_version
== 2 &&
3122 !ipv6_addr_any(&mld2q
->mld2q_mca
) &&
3123 mld2q
->mld2q_suppress
)
3126 max_delay
= max(msecs_to_jiffies(mldv2_mrc(mld2q
)), 1UL);
3129 is_general_query
= group
&& ipv6_addr_any(group
);
3131 if (is_general_query
) {
3132 saddr
.proto
= htons(ETH_P_IPV6
);
3133 saddr
.src
.ip6
= ipv6_hdr(skb
)->saddr
;
3135 br_ip6_multicast_query_received(brmctx
, pmctx
,
3136 &brmctx
->ip6_other_query
,
3139 } else if (!group
) {
3143 mp
= br_mdb_ip6_get(brmctx
->br
, group
, vid
);
3147 max_delay
*= brmctx
->multicast_last_member_count
;
3148 if (mp
->host_joined
&&
3149 (timer_pending(&mp
->timer
) ?
3150 time_after(mp
->timer
.expires
, now
+ max_delay
) :
3151 try_to_del_timer_sync(&mp
->timer
) >= 0))
3152 mod_timer(&mp
->timer
, now
+ max_delay
);
3154 for (pp
= &mp
->ports
;
3155 (p
= mlock_dereference(*pp
, brmctx
->br
)) != NULL
;
3157 if (timer_pending(&p
->timer
) ?
3158 time_after(p
->timer
.expires
, now
+ max_delay
) :
3159 try_to_del_timer_sync(&p
->timer
) >= 0 &&
3160 (brmctx
->multicast_mld_version
== 1 ||
3161 p
->filter_mode
== MCAST_EXCLUDE
))
3162 mod_timer(&p
->timer
, now
+ max_delay
);
3166 spin_unlock(&brmctx
->br
->multicast_lock
);
3172 br_multicast_leave_group(struct net_bridge_mcast
*brmctx
,
3173 struct net_bridge_mcast_port
*pmctx
,
3174 struct br_ip
*group
,
3175 struct bridge_mcast_other_query
*other_query
,
3176 struct bridge_mcast_own_query
*own_query
,
3177 const unsigned char *src
)
3179 struct net_bridge_mdb_entry
*mp
;
3180 struct net_bridge_port_group
*p
;
3184 spin_lock(&brmctx
->br
->multicast_lock
);
3185 if (!netif_running(brmctx
->br
->dev
) ||
3186 (pmctx
&& pmctx
->port
->state
== BR_STATE_DISABLED
))
3189 mp
= br_mdb_ip_get(brmctx
->br
, group
);
3193 if (pmctx
&& (pmctx
->port
->flags
& BR_MULTICAST_FAST_LEAVE
)) {
3194 struct net_bridge_port_group __rcu
**pp
;
3196 for (pp
= &mp
->ports
;
3197 (p
= mlock_dereference(*pp
, brmctx
->br
)) != NULL
;
3199 if (!br_port_group_equal(p
, pmctx
->port
, src
))
3202 if (p
->flags
& MDB_PG_FLAGS_PERMANENT
)
3205 p
->flags
|= MDB_PG_FLAGS_FAST_LEAVE
;
3206 br_multicast_del_pg(mp
, p
, pp
);
3211 if (timer_pending(&other_query
->timer
))
3214 if (br_opt_get(brmctx
->br
, BROPT_MULTICAST_QUERIER
)) {
3215 __br_multicast_send_query(brmctx
, pmctx
, NULL
, NULL
, &mp
->addr
,
3218 time
= jiffies
+ brmctx
->multicast_last_member_count
*
3219 brmctx
->multicast_last_member_interval
;
3221 mod_timer(&own_query
->timer
, time
);
3223 for (p
= mlock_dereference(mp
->ports
, brmctx
->br
);
3225 p
= mlock_dereference(p
->next
, brmctx
->br
)) {
3226 if (!br_port_group_equal(p
, pmctx
->port
, src
))
3229 if (!hlist_unhashed(&p
->mglist
) &&
3230 (timer_pending(&p
->timer
) ?
3231 time_after(p
->timer
.expires
, time
) :
3232 try_to_del_timer_sync(&p
->timer
) >= 0)) {
3233 mod_timer(&p
->timer
, time
);
3241 time
= now
+ brmctx
->multicast_last_member_count
*
3242 brmctx
->multicast_last_member_interval
;
3245 if (mp
->host_joined
&&
3246 (timer_pending(&mp
->timer
) ?
3247 time_after(mp
->timer
.expires
, time
) :
3248 try_to_del_timer_sync(&mp
->timer
) >= 0)) {
3249 mod_timer(&mp
->timer
, time
);
3255 for (p
= mlock_dereference(mp
->ports
, brmctx
->br
);
3257 p
= mlock_dereference(p
->next
, brmctx
->br
)) {
3258 if (p
->key
.port
!= pmctx
->port
)
3261 if (!hlist_unhashed(&p
->mglist
) &&
3262 (timer_pending(&p
->timer
) ?
3263 time_after(p
->timer
.expires
, time
) :
3264 try_to_del_timer_sync(&p
->timer
) >= 0)) {
3265 mod_timer(&p
->timer
, time
);
3271 spin_unlock(&brmctx
->br
->multicast_lock
);
3274 static void br_ip4_multicast_leave_group(struct net_bridge_mcast
*brmctx
,
3275 struct net_bridge_mcast_port
*pmctx
,
3278 const unsigned char *src
)
3280 struct br_ip br_group
;
3281 struct bridge_mcast_own_query
*own_query
;
3283 if (ipv4_is_local_multicast(group
))
3286 own_query
= pmctx
? &pmctx
->ip4_own_query
: &brmctx
->ip4_own_query
;
3288 memset(&br_group
, 0, sizeof(br_group
));
3289 br_group
.dst
.ip4
= group
;
3290 br_group
.proto
= htons(ETH_P_IP
);
3293 br_multicast_leave_group(brmctx
, pmctx
, &br_group
,
3294 &brmctx
->ip4_other_query
,
3298 #if IS_ENABLED(CONFIG_IPV6)
3299 static void br_ip6_multicast_leave_group(struct net_bridge_mcast
*brmctx
,
3300 struct net_bridge_mcast_port
*pmctx
,
3301 const struct in6_addr
*group
,
3303 const unsigned char *src
)
3305 struct br_ip br_group
;
3306 struct bridge_mcast_own_query
*own_query
;
3308 if (ipv6_addr_is_ll_all_nodes(group
))
3311 own_query
= pmctx
? &pmctx
->ip6_own_query
: &brmctx
->ip6_own_query
;
3313 memset(&br_group
, 0, sizeof(br_group
));
3314 br_group
.dst
.ip6
= *group
;
3315 br_group
.proto
= htons(ETH_P_IPV6
);
3318 br_multicast_leave_group(brmctx
, pmctx
, &br_group
,
3319 &brmctx
->ip6_other_query
,
3324 static void br_multicast_err_count(const struct net_bridge
*br
,
3325 const struct net_bridge_port
*p
,
3328 struct bridge_mcast_stats __percpu
*stats
;
3329 struct bridge_mcast_stats
*pstats
;
3331 if (!br_opt_get(br
, BROPT_MULTICAST_STATS_ENABLED
))
3335 stats
= p
->mcast_stats
;
3337 stats
= br
->mcast_stats
;
3338 if (WARN_ON(!stats
))
3341 pstats
= this_cpu_ptr(stats
);
3343 u64_stats_update_begin(&pstats
->syncp
);
3345 case htons(ETH_P_IP
):
3346 pstats
->mstats
.igmp_parse_errors
++;
3348 #if IS_ENABLED(CONFIG_IPV6)
3349 case htons(ETH_P_IPV6
):
3350 pstats
->mstats
.mld_parse_errors
++;
3354 u64_stats_update_end(&pstats
->syncp
);
3357 static void br_multicast_pim(struct net_bridge_mcast
*brmctx
,
3358 struct net_bridge_mcast_port
*pmctx
,
3359 const struct sk_buff
*skb
)
3361 unsigned int offset
= skb_transport_offset(skb
);
3362 struct pimhdr
*pimhdr
, _pimhdr
;
3364 pimhdr
= skb_header_pointer(skb
, offset
, sizeof(_pimhdr
), &_pimhdr
);
3365 if (!pimhdr
|| pim_hdr_version(pimhdr
) != PIM_VERSION
||
3366 pim_hdr_type(pimhdr
) != PIM_TYPE_HELLO
)
3369 spin_lock(&brmctx
->br
->multicast_lock
);
3370 br_ip4_multicast_mark_router(brmctx
, pmctx
);
3371 spin_unlock(&brmctx
->br
->multicast_lock
);
3374 static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast
*brmctx
,
3375 struct net_bridge_mcast_port
*pmctx
,
3376 struct sk_buff
*skb
)
3378 if (ip_hdr(skb
)->protocol
!= IPPROTO_IGMP
||
3379 igmp_hdr(skb
)->type
!= IGMP_MRDISC_ADV
)
3382 spin_lock(&brmctx
->br
->multicast_lock
);
3383 br_ip4_multicast_mark_router(brmctx
, pmctx
);
3384 spin_unlock(&brmctx
->br
->multicast_lock
);
3389 static int br_multicast_ipv4_rcv(struct net_bridge_mcast
*brmctx
,
3390 struct net_bridge_mcast_port
*pmctx
,
3391 struct sk_buff
*skb
,
3394 struct net_bridge_port
*p
= pmctx
? pmctx
->port
: NULL
;
3395 const unsigned char *src
;
3399 err
= ip_mc_check_igmp(skb
);
3401 if (err
== -ENOMSG
) {
3402 if (!ipv4_is_local_multicast(ip_hdr(skb
)->daddr
)) {
3403 BR_INPUT_SKB_CB(skb
)->mrouters_only
= 1;
3404 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb
)->daddr
)) {
3405 if (ip_hdr(skb
)->protocol
== IPPROTO_PIM
)
3406 br_multicast_pim(brmctx
, pmctx
, skb
);
3407 } else if (ipv4_is_all_snoopers(ip_hdr(skb
)->daddr
)) {
3408 br_ip4_multicast_mrd_rcv(brmctx
, pmctx
, skb
);
3412 } else if (err
< 0) {
3413 br_multicast_err_count(brmctx
->br
, p
, skb
->protocol
);
3418 src
= eth_hdr(skb
)->h_source
;
3419 BR_INPUT_SKB_CB(skb
)->igmp
= ih
->type
;
3422 case IGMP_HOST_MEMBERSHIP_REPORT
:
3423 case IGMPV2_HOST_MEMBERSHIP_REPORT
:
3424 BR_INPUT_SKB_CB(skb
)->mrouters_only
= 1;
3425 err
= br_ip4_multicast_add_group(brmctx
, pmctx
, ih
->group
, vid
,
3428 case IGMPV3_HOST_MEMBERSHIP_REPORT
:
3429 err
= br_ip4_multicast_igmp3_report(brmctx
, pmctx
, skb
, vid
);
3431 case IGMP_HOST_MEMBERSHIP_QUERY
:
3432 br_ip4_multicast_query(brmctx
, pmctx
, skb
, vid
);
3434 case IGMP_HOST_LEAVE_MESSAGE
:
3435 br_ip4_multicast_leave_group(brmctx
, pmctx
, ih
->group
, vid
, src
);
3439 br_multicast_count(brmctx
->br
, p
, skb
, BR_INPUT_SKB_CB(skb
)->igmp
,
3445 #if IS_ENABLED(CONFIG_IPV6)
3446 static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast
*brmctx
,
3447 struct net_bridge_mcast_port
*pmctx
,
3448 struct sk_buff
*skb
)
3450 if (icmp6_hdr(skb
)->icmp6_type
!= ICMPV6_MRDISC_ADV
)
3453 spin_lock(&brmctx
->br
->multicast_lock
);
3454 br_ip6_multicast_mark_router(brmctx
, pmctx
);
3455 spin_unlock(&brmctx
->br
->multicast_lock
);
3458 static int br_multicast_ipv6_rcv(struct net_bridge_mcast
*brmctx
,
3459 struct net_bridge_mcast_port
*pmctx
,
3460 struct sk_buff
*skb
,
3463 struct net_bridge_port
*p
= pmctx
? pmctx
->port
: NULL
;
3464 const unsigned char *src
;
3465 struct mld_msg
*mld
;
3468 err
= ipv6_mc_check_mld(skb
);
3470 if (err
== -ENOMSG
|| err
== -ENODATA
) {
3471 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb
)->daddr
))
3472 BR_INPUT_SKB_CB(skb
)->mrouters_only
= 1;
3473 if (err
== -ENODATA
&&
3474 ipv6_addr_is_all_snoopers(&ipv6_hdr(skb
)->daddr
))
3475 br_ip6_multicast_mrd_rcv(brmctx
, pmctx
, skb
);
3478 } else if (err
< 0) {
3479 br_multicast_err_count(brmctx
->br
, p
, skb
->protocol
);
3483 mld
= (struct mld_msg
*)skb_transport_header(skb
);
3484 BR_INPUT_SKB_CB(skb
)->igmp
= mld
->mld_type
;
3486 switch (mld
->mld_type
) {
3487 case ICMPV6_MGM_REPORT
:
3488 src
= eth_hdr(skb
)->h_source
;
3489 BR_INPUT_SKB_CB(skb
)->mrouters_only
= 1;
3490 err
= br_ip6_multicast_add_group(brmctx
, pmctx
, &mld
->mld_mca
,
3493 case ICMPV6_MLD2_REPORT
:
3494 err
= br_ip6_multicast_mld2_report(brmctx
, pmctx
, skb
, vid
);
3496 case ICMPV6_MGM_QUERY
:
3497 err
= br_ip6_multicast_query(brmctx
, pmctx
, skb
, vid
);
3499 case ICMPV6_MGM_REDUCTION
:
3500 src
= eth_hdr(skb
)->h_source
;
3501 br_ip6_multicast_leave_group(brmctx
, pmctx
, &mld
->mld_mca
, vid
,
3506 br_multicast_count(brmctx
->br
, p
, skb
, BR_INPUT_SKB_CB(skb
)->igmp
,
3513 int br_multicast_rcv(struct net_bridge_mcast
*brmctx
,
3514 struct net_bridge_mcast_port
*pmctx
,
3515 struct sk_buff
*skb
, u16 vid
)
3519 BR_INPUT_SKB_CB(skb
)->igmp
= 0;
3520 BR_INPUT_SKB_CB(skb
)->mrouters_only
= 0;
3522 if (!br_opt_get(brmctx
->br
, BROPT_MULTICAST_ENABLED
))
3525 switch (skb
->protocol
) {
3526 case htons(ETH_P_IP
):
3527 ret
= br_multicast_ipv4_rcv(brmctx
, pmctx
, skb
, vid
);
3529 #if IS_ENABLED(CONFIG_IPV6)
3530 case htons(ETH_P_IPV6
):
3531 ret
= br_multicast_ipv6_rcv(brmctx
, pmctx
, skb
, vid
);
3539 static void br_multicast_query_expired(struct net_bridge_mcast
*brmctx
,
3540 struct bridge_mcast_own_query
*query
,
3541 struct bridge_mcast_querier
*querier
)
3543 spin_lock(&brmctx
->br
->multicast_lock
);
3544 if (query
->startup_sent
< brmctx
->multicast_startup_query_count
)
3545 query
->startup_sent
++;
3547 RCU_INIT_POINTER(querier
->port
, NULL
);
3548 br_multicast_send_query(brmctx
, NULL
, query
);
3549 spin_unlock(&brmctx
->br
->multicast_lock
);
3552 static void br_ip4_multicast_query_expired(struct timer_list
*t
)
3554 struct net_bridge_mcast
*brmctx
= from_timer(brmctx
, t
,
3555 ip4_own_query
.timer
);
3557 br_multicast_query_expired(brmctx
, &brmctx
->ip4_own_query
,
3558 &brmctx
->ip4_querier
);
3561 #if IS_ENABLED(CONFIG_IPV6)
3562 static void br_ip6_multicast_query_expired(struct timer_list
*t
)
3564 struct net_bridge_mcast
*brmctx
= from_timer(brmctx
, t
,
3565 ip6_own_query
.timer
);
3567 br_multicast_query_expired(brmctx
, &brmctx
->ip6_own_query
,
3568 &brmctx
->ip6_querier
);
3572 static void br_multicast_gc_work(struct work_struct
*work
)
3574 struct net_bridge
*br
= container_of(work
, struct net_bridge
,
3576 HLIST_HEAD(deleted_head
);
3578 spin_lock_bh(&br
->multicast_lock
);
3579 hlist_move_list(&br
->mcast_gc_list
, &deleted_head
);
3580 spin_unlock_bh(&br
->multicast_lock
);
3582 br_multicast_gc(&deleted_head
);
3585 void br_multicast_ctx_init(struct net_bridge
*br
,
3586 struct net_bridge_vlan
*vlan
,
3587 struct net_bridge_mcast
*brmctx
)
3590 brmctx
->vlan
= vlan
;
3591 brmctx
->multicast_router
= MDB_RTR_TYPE_TEMP_QUERY
;
3592 brmctx
->multicast_last_member_count
= 2;
3593 brmctx
->multicast_startup_query_count
= 2;
3595 brmctx
->multicast_last_member_interval
= HZ
;
3596 brmctx
->multicast_query_response_interval
= 10 * HZ
;
3597 brmctx
->multicast_startup_query_interval
= 125 * HZ
/ 4;
3598 brmctx
->multicast_query_interval
= 125 * HZ
;
3599 brmctx
->multicast_querier_interval
= 255 * HZ
;
3600 brmctx
->multicast_membership_interval
= 260 * HZ
;
3602 brmctx
->ip4_other_query
.delay_time
= 0;
3603 brmctx
->ip4_querier
.port
= NULL
;
3604 brmctx
->multicast_igmp_version
= 2;
3605 #if IS_ENABLED(CONFIG_IPV6)
3606 brmctx
->multicast_mld_version
= 1;
3607 brmctx
->ip6_other_query
.delay_time
= 0;
3608 brmctx
->ip6_querier
.port
= NULL
;
3611 timer_setup(&brmctx
->ip4_mc_router_timer
,
3612 br_ip4_multicast_local_router_expired
, 0);
3613 timer_setup(&brmctx
->ip4_other_query
.timer
,
3614 br_ip4_multicast_querier_expired
, 0);
3615 timer_setup(&brmctx
->ip4_own_query
.timer
,
3616 br_ip4_multicast_query_expired
, 0);
3617 #if IS_ENABLED(CONFIG_IPV6)
3618 timer_setup(&brmctx
->ip6_mc_router_timer
,
3619 br_ip6_multicast_local_router_expired
, 0);
3620 timer_setup(&brmctx
->ip6_other_query
.timer
,
3621 br_ip6_multicast_querier_expired
, 0);
3622 timer_setup(&brmctx
->ip6_own_query
.timer
,
3623 br_ip6_multicast_query_expired
, 0);
3627 void br_multicast_ctx_deinit(struct net_bridge_mcast
*brmctx
)
3629 __br_multicast_stop(brmctx
);
3632 void br_multicast_init(struct net_bridge
*br
)
3634 br
->hash_max
= BR_MULTICAST_DEFAULT_HASH_MAX
;
3636 br_multicast_ctx_init(br
, NULL
, &br
->multicast_ctx
);
3638 br_opt_toggle(br
, BROPT_MULTICAST_ENABLED
, true);
3639 br_opt_toggle(br
, BROPT_HAS_IPV6_ADDR
, true);
3641 spin_lock_init(&br
->multicast_lock
);
3642 INIT_HLIST_HEAD(&br
->mdb_list
);
3643 INIT_HLIST_HEAD(&br
->mcast_gc_list
);
3644 INIT_WORK(&br
->mcast_gc_work
, br_multicast_gc_work
);
3647 static void br_ip4_multicast_join_snoopers(struct net_bridge
*br
)
3649 struct in_device
*in_dev
= in_dev_get(br
->dev
);
3654 __ip_mc_inc_group(in_dev
, htonl(INADDR_ALLSNOOPERS_GROUP
), GFP_ATOMIC
);
3658 #if IS_ENABLED(CONFIG_IPV6)
3659 static void br_ip6_multicast_join_snoopers(struct net_bridge
*br
)
3661 struct in6_addr addr
;
3663 ipv6_addr_set(&addr
, htonl(0xff020000), 0, 0, htonl(0x6a));
3664 ipv6_dev_mc_inc(br
->dev
, &addr
);
3667 static inline void br_ip6_multicast_join_snoopers(struct net_bridge
*br
)
3672 void br_multicast_join_snoopers(struct net_bridge
*br
)
3674 br_ip4_multicast_join_snoopers(br
);
3675 br_ip6_multicast_join_snoopers(br
);
3678 static void br_ip4_multicast_leave_snoopers(struct net_bridge
*br
)
3680 struct in_device
*in_dev
= in_dev_get(br
->dev
);
3682 if (WARN_ON(!in_dev
))
3685 __ip_mc_dec_group(in_dev
, htonl(INADDR_ALLSNOOPERS_GROUP
), GFP_ATOMIC
);
3689 #if IS_ENABLED(CONFIG_IPV6)
3690 static void br_ip6_multicast_leave_snoopers(struct net_bridge
*br
)
3692 struct in6_addr addr
;
3694 ipv6_addr_set(&addr
, htonl(0xff020000), 0, 0, htonl(0x6a));
3695 ipv6_dev_mc_dec(br
->dev
, &addr
);
3698 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge
*br
)
3703 void br_multicast_leave_snoopers(struct net_bridge
*br
)
3705 br_ip4_multicast_leave_snoopers(br
);
3706 br_ip6_multicast_leave_snoopers(br
);
3709 static void __br_multicast_open_query(struct net_bridge
*br
,
3710 struct bridge_mcast_own_query
*query
)
3712 query
->startup_sent
= 0;
3714 if (!br_opt_get(br
, BROPT_MULTICAST_ENABLED
))
3717 mod_timer(&query
->timer
, jiffies
);
3720 static void __br_multicast_open(struct net_bridge_mcast
*brmctx
)
3722 __br_multicast_open_query(brmctx
->br
, &brmctx
->ip4_own_query
);
3723 #if IS_ENABLED(CONFIG_IPV6)
3724 __br_multicast_open_query(brmctx
->br
, &brmctx
->ip6_own_query
);
3728 void br_multicast_open(struct net_bridge
*br
)
3730 struct net_bridge_vlan_group
*vg
;
3731 struct net_bridge_vlan
*vlan
;
3735 vg
= br_vlan_group(br
);
3737 list_for_each_entry(vlan
, &vg
->vlan_list
, vlist
) {
3738 struct net_bridge_mcast
*brmctx
;
3740 brmctx
= &vlan
->br_mcast_ctx
;
3741 if (br_vlan_is_brentry(vlan
) &&
3742 !br_multicast_ctx_vlan_disabled(brmctx
))
3743 __br_multicast_open(&vlan
->br_mcast_ctx
);
3747 __br_multicast_open(&br
->multicast_ctx
);
3750 static void __br_multicast_stop(struct net_bridge_mcast
*brmctx
)
3752 del_timer_sync(&brmctx
->ip4_mc_router_timer
);
3753 del_timer_sync(&brmctx
->ip4_other_query
.timer
);
3754 del_timer_sync(&brmctx
->ip4_own_query
.timer
);
3755 #if IS_ENABLED(CONFIG_IPV6)
3756 del_timer_sync(&brmctx
->ip6_mc_router_timer
);
3757 del_timer_sync(&brmctx
->ip6_other_query
.timer
);
3758 del_timer_sync(&brmctx
->ip6_own_query
.timer
);
3762 void br_multicast_toggle_one_vlan(struct net_bridge_vlan
*vlan
, bool on
)
3764 struct net_bridge
*br
;
3766 /* it's okay to check for the flag without the multicast lock because it
3767 * can only change under RTNL -> multicast_lock, we need the latter to
3768 * sync with timers and packets
3770 if (on
== !!(vlan
->priv_flags
& BR_VLFLAG_MCAST_ENABLED
))
3773 if (br_vlan_is_master(vlan
)) {
3776 if (!br_vlan_is_brentry(vlan
) ||
3778 br_multicast_ctx_vlan_global_disabled(&vlan
->br_mcast_ctx
)))
3781 spin_lock_bh(&br
->multicast_lock
);
3782 vlan
->priv_flags
^= BR_VLFLAG_MCAST_ENABLED
;
3783 spin_unlock_bh(&br
->multicast_lock
);
3786 __br_multicast_open(&vlan
->br_mcast_ctx
);
3788 __br_multicast_stop(&vlan
->br_mcast_ctx
);
3790 struct net_bridge_mcast
*brmctx
;
3792 brmctx
= br_multicast_port_ctx_get_global(&vlan
->port_mcast_ctx
);
3793 if (on
&& br_multicast_ctx_vlan_global_disabled(brmctx
))
3796 br
= vlan
->port
->br
;
3797 spin_lock_bh(&br
->multicast_lock
);
3798 vlan
->priv_flags
^= BR_VLFLAG_MCAST_ENABLED
;
3800 __br_multicast_enable_port_ctx(&vlan
->port_mcast_ctx
);
3802 __br_multicast_disable_port_ctx(&vlan
->port_mcast_ctx
);
3803 spin_unlock_bh(&br
->multicast_lock
);
3807 void br_multicast_stop(struct net_bridge
*br
)
3809 struct net_bridge_vlan_group
*vg
;
3810 struct net_bridge_vlan
*vlan
;
3814 vg
= br_vlan_group(br
);
3816 list_for_each_entry(vlan
, &vg
->vlan_list
, vlist
) {
3817 struct net_bridge_mcast
*brmctx
;
3819 brmctx
= &vlan
->br_mcast_ctx
;
3820 if (br_vlan_is_brentry(vlan
) &&
3821 !br_multicast_ctx_vlan_disabled(brmctx
))
3822 __br_multicast_stop(&vlan
->br_mcast_ctx
);
3826 __br_multicast_stop(&br
->multicast_ctx
);
3829 void br_multicast_dev_del(struct net_bridge
*br
)
3831 struct net_bridge_mdb_entry
*mp
;
3832 HLIST_HEAD(deleted_head
);
3833 struct hlist_node
*tmp
;
3835 spin_lock_bh(&br
->multicast_lock
);
3836 hlist_for_each_entry_safe(mp
, tmp
, &br
->mdb_list
, mdb_node
)
3837 br_multicast_del_mdb_entry(mp
);
3838 hlist_move_list(&br
->mcast_gc_list
, &deleted_head
);
3839 spin_unlock_bh(&br
->multicast_lock
);
3841 br_multicast_ctx_deinit(&br
->multicast_ctx
);
3842 br_multicast_gc(&deleted_head
);
3843 cancel_work_sync(&br
->mcast_gc_work
);
3848 int br_multicast_set_router(struct net_bridge
*br
, unsigned long val
)
3850 struct net_bridge_mcast
*brmctx
= &br
->multicast_ctx
;
3853 spin_lock_bh(&br
->multicast_lock
);
3856 case MDB_RTR_TYPE_DISABLED
:
3857 case MDB_RTR_TYPE_PERM
:
3858 br_mc_router_state_change(br
, val
== MDB_RTR_TYPE_PERM
);
3859 del_timer(&brmctx
->ip4_mc_router_timer
);
3860 #if IS_ENABLED(CONFIG_IPV6)
3861 del_timer(&brmctx
->ip6_mc_router_timer
);
3863 brmctx
->multicast_router
= val
;
3866 case MDB_RTR_TYPE_TEMP_QUERY
:
3867 if (brmctx
->multicast_router
!= MDB_RTR_TYPE_TEMP_QUERY
)
3868 br_mc_router_state_change(br
, false);
3869 brmctx
->multicast_router
= val
;
3874 spin_unlock_bh(&br
->multicast_lock
);
3880 br_multicast_rport_del_notify(struct net_bridge_mcast_port
*pmctx
, bool deleted
)
3885 /* For backwards compatibility for now, only notify if there is
3886 * no multicast router anymore for both IPv4 and IPv6.
3888 if (!hlist_unhashed(&pmctx
->ip4_rlist
))
3890 #if IS_ENABLED(CONFIG_IPV6)
3891 if (!hlist_unhashed(&pmctx
->ip6_rlist
))
3895 br_rtr_notify(pmctx
->port
->br
->dev
, pmctx
->port
, RTM_DELMDB
);
3896 br_port_mc_router_state_change(pmctx
->port
, false);
3898 /* don't allow timer refresh */
3899 if (pmctx
->multicast_router
== MDB_RTR_TYPE_TEMP
)
3900 pmctx
->multicast_router
= MDB_RTR_TYPE_TEMP_QUERY
;
3903 int br_multicast_set_port_router(struct net_bridge_port
*p
, unsigned long val
)
3905 struct net_bridge_mcast
*brmctx
= &p
->br
->multicast_ctx
;
3906 struct net_bridge_mcast_port
*pmctx
= &p
->multicast_ctx
;
3907 unsigned long now
= jiffies
;
3911 spin_lock(&p
->br
->multicast_lock
);
3912 if (pmctx
->multicast_router
== val
) {
3913 /* Refresh the temp router port timer */
3914 if (pmctx
->multicast_router
== MDB_RTR_TYPE_TEMP
) {
3915 mod_timer(&pmctx
->ip4_mc_router_timer
,
3916 now
+ brmctx
->multicast_querier_interval
);
3917 #if IS_ENABLED(CONFIG_IPV6)
3918 mod_timer(&pmctx
->ip6_mc_router_timer
,
3919 now
+ brmctx
->multicast_querier_interval
);
3926 case MDB_RTR_TYPE_DISABLED
:
3927 pmctx
->multicast_router
= MDB_RTR_TYPE_DISABLED
;
3928 del
|= br_ip4_multicast_rport_del(pmctx
);
3929 del_timer(&pmctx
->ip4_mc_router_timer
);
3930 del
|= br_ip6_multicast_rport_del(pmctx
);
3931 #if IS_ENABLED(CONFIG_IPV6)
3932 del_timer(&pmctx
->ip6_mc_router_timer
);
3934 br_multicast_rport_del_notify(pmctx
, del
);
3936 case MDB_RTR_TYPE_TEMP_QUERY
:
3937 pmctx
->multicast_router
= MDB_RTR_TYPE_TEMP_QUERY
;
3938 del
|= br_ip4_multicast_rport_del(pmctx
);
3939 del
|= br_ip6_multicast_rport_del(pmctx
);
3940 br_multicast_rport_del_notify(pmctx
, del
);
3942 case MDB_RTR_TYPE_PERM
:
3943 pmctx
->multicast_router
= MDB_RTR_TYPE_PERM
;
3944 del_timer(&pmctx
->ip4_mc_router_timer
);
3945 br_ip4_multicast_add_router(brmctx
, pmctx
);
3946 #if IS_ENABLED(CONFIG_IPV6)
3947 del_timer(&pmctx
->ip6_mc_router_timer
);
3949 br_ip6_multicast_add_router(brmctx
, pmctx
);
3951 case MDB_RTR_TYPE_TEMP
:
3952 pmctx
->multicast_router
= MDB_RTR_TYPE_TEMP
;
3953 br_ip4_multicast_mark_router(brmctx
, pmctx
);
3954 br_ip6_multicast_mark_router(brmctx
, pmctx
);
3961 spin_unlock(&p
->br
->multicast_lock
);
3966 static void br_multicast_start_querier(struct net_bridge_mcast
*brmctx
,
3967 struct bridge_mcast_own_query
*query
)
3969 struct net_bridge_port
*port
;
3971 __br_multicast_open_query(brmctx
->br
, query
);
3974 list_for_each_entry_rcu(port
, &brmctx
->br
->port_list
, list
) {
3975 if (port
->state
== BR_STATE_DISABLED
||
3976 port
->state
== BR_STATE_BLOCKING
)
3979 if (query
== &brmctx
->ip4_own_query
)
3980 br_multicast_enable(&port
->multicast_ctx
.ip4_own_query
);
3981 #if IS_ENABLED(CONFIG_IPV6)
3983 br_multicast_enable(&port
->multicast_ctx
.ip6_own_query
);
3989 int br_multicast_toggle(struct net_bridge
*br
, unsigned long val
,
3990 struct netlink_ext_ack
*extack
)
3992 struct net_bridge_port
*port
;
3993 bool change_snoopers
= false;
3996 spin_lock_bh(&br
->multicast_lock
);
3997 if (!!br_opt_get(br
, BROPT_MULTICAST_ENABLED
) == !!val
)
4000 err
= br_mc_disabled_update(br
->dev
, val
, extack
);
4001 if (err
== -EOPNOTSUPP
)
4006 br_opt_toggle(br
, BROPT_MULTICAST_ENABLED
, !!val
);
4007 if (!br_opt_get(br
, BROPT_MULTICAST_ENABLED
)) {
4008 change_snoopers
= true;
4012 if (!netif_running(br
->dev
))
4015 br_multicast_open(br
);
4016 list_for_each_entry(port
, &br
->port_list
, list
)
4017 __br_multicast_enable_port_ctx(&port
->multicast_ctx
);
4019 change_snoopers
= true;
4022 spin_unlock_bh(&br
->multicast_lock
);
4024 /* br_multicast_join_snoopers has the potential to cause
4025 * an MLD Report/Leave to be delivered to br_multicast_rcv,
4026 * which would in turn call br_multicast_add_group, which would
4027 * attempt to acquire multicast_lock. This function should be
4028 * called after the lock has been released to avoid deadlocks on
4031 * br_multicast_leave_snoopers does not have the problem since
4032 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
4033 * returns without calling br_multicast_ipv4/6_rcv if it's not
4034 * enabled. Moved both functions out just for symmetry.
4036 if (change_snoopers
) {
4037 if (br_opt_get(br
, BROPT_MULTICAST_ENABLED
))
4038 br_multicast_join_snoopers(br
);
4040 br_multicast_leave_snoopers(br
);
4046 bool br_multicast_enabled(const struct net_device
*dev
)
4048 struct net_bridge
*br
= netdev_priv(dev
);
4050 return !!br_opt_get(br
, BROPT_MULTICAST_ENABLED
);
4052 EXPORT_SYMBOL_GPL(br_multicast_enabled
);
4054 bool br_multicast_router(const struct net_device
*dev
)
4056 struct net_bridge
*br
= netdev_priv(dev
);
4059 spin_lock_bh(&br
->multicast_lock
);
4060 is_router
= br_multicast_is_router(&br
->multicast_ctx
, NULL
);
4061 spin_unlock_bh(&br
->multicast_lock
);
4064 EXPORT_SYMBOL_GPL(br_multicast_router
);
4066 int br_multicast_set_querier(struct net_bridge
*br
, unsigned long val
)
4068 struct net_bridge_mcast
*brmctx
= &br
->multicast_ctx
;
4069 unsigned long max_delay
;
4073 spin_lock_bh(&br
->multicast_lock
);
4074 if (br_opt_get(br
, BROPT_MULTICAST_QUERIER
) == val
)
4077 br_opt_toggle(br
, BROPT_MULTICAST_QUERIER
, !!val
);
4081 max_delay
= brmctx
->multicast_query_response_interval
;
4083 if (!timer_pending(&brmctx
->ip4_other_query
.timer
))
4084 brmctx
->ip4_other_query
.delay_time
= jiffies
+ max_delay
;
4086 br_multicast_start_querier(brmctx
, &brmctx
->ip4_own_query
);
4088 #if IS_ENABLED(CONFIG_IPV6)
4089 if (!timer_pending(&brmctx
->ip6_other_query
.timer
))
4090 brmctx
->ip6_other_query
.delay_time
= jiffies
+ max_delay
;
4092 br_multicast_start_querier(brmctx
, &brmctx
->ip6_own_query
);
4096 spin_unlock_bh(&br
->multicast_lock
);
4101 int br_multicast_set_igmp_version(struct net_bridge
*br
, unsigned long val
)
4103 /* Currently we support only version 2 and 3 */
4112 spin_lock_bh(&br
->multicast_lock
);
4113 br
->multicast_ctx
.multicast_igmp_version
= val
;
4114 spin_unlock_bh(&br
->multicast_lock
);
4119 #if IS_ENABLED(CONFIG_IPV6)
4120 int br_multicast_set_mld_version(struct net_bridge
*br
, unsigned long val
)
4122 /* Currently we support version 1 and 2 */
4131 spin_lock_bh(&br
->multicast_lock
);
4132 br
->multicast_ctx
.multicast_mld_version
= val
;
4133 spin_unlock_bh(&br
->multicast_lock
);
4140 * br_multicast_list_adjacent - Returns snooped multicast addresses
4141 * @dev: The bridge port adjacent to which to retrieve addresses
4142 * @br_ip_list: The list to store found, snooped multicast IP addresses in
4144 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
4145 * snooping feature on all bridge ports of dev's bridge device, excluding
4146 * the addresses from dev itself.
4148 * Returns the number of items added to br_ip_list.
4151 * - br_ip_list needs to be initialized by caller
4152 * - br_ip_list might contain duplicates in the end
4153 * (needs to be taken care of by caller)
4154 * - br_ip_list needs to be freed by caller
4156 int br_multicast_list_adjacent(struct net_device
*dev
,
4157 struct list_head
*br_ip_list
)
4159 struct net_bridge
*br
;
4160 struct net_bridge_port
*port
;
4161 struct net_bridge_port_group
*group
;
4162 struct br_ip_list
*entry
;
4166 if (!br_ip_list
|| !netif_is_bridge_port(dev
))
4169 port
= br_port_get_rcu(dev
);
4170 if (!port
|| !port
->br
)
4175 list_for_each_entry_rcu(port
, &br
->port_list
, list
) {
4176 if (!port
->dev
|| port
->dev
== dev
)
4179 hlist_for_each_entry_rcu(group
, &port
->mglist
, mglist
) {
4180 entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
4184 entry
->addr
= group
->key
.addr
;
4185 list_add(&entry
->list
, br_ip_list
);
4194 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent
);
4197 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
4198 * @dev: The bridge port providing the bridge on which to check for a querier
4199 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4201 * Checks whether the given interface has a bridge on top and if so returns
4202 * true if a valid querier exists anywhere on the bridged link layer.
4203 * Otherwise returns false.
4205 bool br_multicast_has_querier_anywhere(struct net_device
*dev
, int proto
)
4207 struct net_bridge
*br
;
4208 struct net_bridge_port
*port
;
4213 if (!netif_is_bridge_port(dev
))
4216 port
= br_port_get_rcu(dev
);
4217 if (!port
|| !port
->br
)
4222 memset(ð
, 0, sizeof(eth
));
4223 eth
.h_proto
= htons(proto
);
4225 ret
= br_multicast_querier_exists(&br
->multicast_ctx
, ð
, NULL
);
4231 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere
);
4234 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
4235 * @dev: The bridge port adjacent to which to check for a querier
4236 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4238 * Checks whether the given interface has a bridge on top and if so returns
4239 * true if a selected querier is behind one of the other ports of this
4240 * bridge. Otherwise returns false.
4242 bool br_multicast_has_querier_adjacent(struct net_device
*dev
, int proto
)
4244 struct net_bridge_mcast
*brmctx
;
4245 struct net_bridge
*br
;
4246 struct net_bridge_port
*port
;
4250 if (!netif_is_bridge_port(dev
))
4253 port
= br_port_get_rcu(dev
);
4254 if (!port
|| !port
->br
)
4258 brmctx
= &br
->multicast_ctx
;
4262 if (!timer_pending(&brmctx
->ip4_other_query
.timer
) ||
4263 rcu_dereference(brmctx
->ip4_querier
.port
) == port
)
4266 #if IS_ENABLED(CONFIG_IPV6)
4268 if (!timer_pending(&brmctx
->ip6_other_query
.timer
) ||
4269 rcu_dereference(brmctx
->ip6_querier
.port
) == port
)
4282 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent
);
4285 * br_multicast_has_router_adjacent - Checks for a router behind a bridge port
4286 * @dev: The bridge port adjacent to which to check for a multicast router
4287 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4289 * Checks whether the given interface has a bridge on top and if so returns
4290 * true if a multicast router is behind one of the other ports of this
4291 * bridge. Otherwise returns false.
4293 bool br_multicast_has_router_adjacent(struct net_device
*dev
, int proto
)
4295 struct net_bridge_mcast_port
*pmctx
;
4296 struct net_bridge_mcast
*brmctx
;
4297 struct net_bridge_port
*port
;
4301 port
= br_port_get_check_rcu(dev
);
4305 brmctx
= &port
->br
->multicast_ctx
;
4308 hlist_for_each_entry_rcu(pmctx
, &brmctx
->ip4_mc_router_list
,
4310 if (pmctx
->port
== port
)
4317 #if IS_ENABLED(CONFIG_IPV6)
4319 hlist_for_each_entry_rcu(pmctx
, &brmctx
->ip6_mc_router_list
,
4321 if (pmctx
->port
== port
)
4330 /* when compiled without IPv6 support, be conservative and
4331 * always assume presence of an IPv6 multicast router
4340 EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent
);
4342 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu
*stats
,
4343 const struct sk_buff
*skb
, u8 type
, u8 dir
)
4345 struct bridge_mcast_stats
*pstats
= this_cpu_ptr(stats
);
4346 __be16 proto
= skb
->protocol
;
4349 u64_stats_update_begin(&pstats
->syncp
);
4351 case htons(ETH_P_IP
):
4352 t_len
= ntohs(ip_hdr(skb
)->tot_len
) - ip_hdrlen(skb
);
4354 case IGMP_HOST_MEMBERSHIP_REPORT
:
4355 pstats
->mstats
.igmp_v1reports
[dir
]++;
4357 case IGMPV2_HOST_MEMBERSHIP_REPORT
:
4358 pstats
->mstats
.igmp_v2reports
[dir
]++;
4360 case IGMPV3_HOST_MEMBERSHIP_REPORT
:
4361 pstats
->mstats
.igmp_v3reports
[dir
]++;
4363 case IGMP_HOST_MEMBERSHIP_QUERY
:
4364 if (t_len
!= sizeof(struct igmphdr
)) {
4365 pstats
->mstats
.igmp_v3queries
[dir
]++;
4367 unsigned int offset
= skb_transport_offset(skb
);
4368 struct igmphdr
*ih
, _ihdr
;
4370 ih
= skb_header_pointer(skb
, offset
,
4371 sizeof(_ihdr
), &_ihdr
);
4375 pstats
->mstats
.igmp_v1queries
[dir
]++;
4377 pstats
->mstats
.igmp_v2queries
[dir
]++;
4380 case IGMP_HOST_LEAVE_MESSAGE
:
4381 pstats
->mstats
.igmp_leaves
[dir
]++;
4385 #if IS_ENABLED(CONFIG_IPV6)
4386 case htons(ETH_P_IPV6
):
4387 t_len
= ntohs(ipv6_hdr(skb
)->payload_len
) +
4388 sizeof(struct ipv6hdr
);
4389 t_len
-= skb_network_header_len(skb
);
4391 case ICMPV6_MGM_REPORT
:
4392 pstats
->mstats
.mld_v1reports
[dir
]++;
4394 case ICMPV6_MLD2_REPORT
:
4395 pstats
->mstats
.mld_v2reports
[dir
]++;
4397 case ICMPV6_MGM_QUERY
:
4398 if (t_len
!= sizeof(struct mld_msg
))
4399 pstats
->mstats
.mld_v2queries
[dir
]++;
4401 pstats
->mstats
.mld_v1queries
[dir
]++;
4403 case ICMPV6_MGM_REDUCTION
:
4404 pstats
->mstats
.mld_leaves
[dir
]++;
4408 #endif /* CONFIG_IPV6 */
4410 u64_stats_update_end(&pstats
->syncp
);
4413 void br_multicast_count(struct net_bridge
*br
,
4414 const struct net_bridge_port
*p
,
4415 const struct sk_buff
*skb
, u8 type
, u8 dir
)
4417 struct bridge_mcast_stats __percpu
*stats
;
4419 /* if multicast_disabled is true then igmp type can't be set */
4420 if (!type
|| !br_opt_get(br
, BROPT_MULTICAST_STATS_ENABLED
))
4424 stats
= p
->mcast_stats
;
4426 stats
= br
->mcast_stats
;
4427 if (WARN_ON(!stats
))
4430 br_mcast_stats_add(stats
, skb
, type
, dir
);
4433 int br_multicast_init_stats(struct net_bridge
*br
)
4435 br
->mcast_stats
= netdev_alloc_pcpu_stats(struct bridge_mcast_stats
);
4436 if (!br
->mcast_stats
)
4442 void br_multicast_uninit_stats(struct net_bridge
*br
)
4444 free_percpu(br
->mcast_stats
);
4447 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
4448 static noinline_for_stack
void mcast_stats_add_dir(u64
*dst
, u64
*src
)
4450 dst
[BR_MCAST_DIR_RX
] += src
[BR_MCAST_DIR_RX
];
4451 dst
[BR_MCAST_DIR_TX
] += src
[BR_MCAST_DIR_TX
];
4454 void br_multicast_get_stats(const struct net_bridge
*br
,
4455 const struct net_bridge_port
*p
,
4456 struct br_mcast_stats
*dest
)
4458 struct bridge_mcast_stats __percpu
*stats
;
4459 struct br_mcast_stats tdst
;
4462 memset(dest
, 0, sizeof(*dest
));
4464 stats
= p
->mcast_stats
;
4466 stats
= br
->mcast_stats
;
4467 if (WARN_ON(!stats
))
4470 memset(&tdst
, 0, sizeof(tdst
));
4471 for_each_possible_cpu(i
) {
4472 struct bridge_mcast_stats
*cpu_stats
= per_cpu_ptr(stats
, i
);
4473 struct br_mcast_stats temp
;
4477 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
4478 memcpy(&temp
, &cpu_stats
->mstats
, sizeof(temp
));
4479 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
4481 mcast_stats_add_dir(tdst
.igmp_v1queries
, temp
.igmp_v1queries
);
4482 mcast_stats_add_dir(tdst
.igmp_v2queries
, temp
.igmp_v2queries
);
4483 mcast_stats_add_dir(tdst
.igmp_v3queries
, temp
.igmp_v3queries
);
4484 mcast_stats_add_dir(tdst
.igmp_leaves
, temp
.igmp_leaves
);
4485 mcast_stats_add_dir(tdst
.igmp_v1reports
, temp
.igmp_v1reports
);
4486 mcast_stats_add_dir(tdst
.igmp_v2reports
, temp
.igmp_v2reports
);
4487 mcast_stats_add_dir(tdst
.igmp_v3reports
, temp
.igmp_v3reports
);
4488 tdst
.igmp_parse_errors
+= temp
.igmp_parse_errors
;
4490 mcast_stats_add_dir(tdst
.mld_v1queries
, temp
.mld_v1queries
);
4491 mcast_stats_add_dir(tdst
.mld_v2queries
, temp
.mld_v2queries
);
4492 mcast_stats_add_dir(tdst
.mld_leaves
, temp
.mld_leaves
);
4493 mcast_stats_add_dir(tdst
.mld_v1reports
, temp
.mld_v1reports
);
4494 mcast_stats_add_dir(tdst
.mld_v2reports
, temp
.mld_v2reports
);
4495 tdst
.mld_parse_errors
+= temp
.mld_parse_errors
;
4497 memcpy(dest
, &tdst
, sizeof(*dest
));
4500 int br_mdb_hash_init(struct net_bridge
*br
)
4504 err
= rhashtable_init(&br
->sg_port_tbl
, &br_sg_port_rht_params
);
4508 err
= rhashtable_init(&br
->mdb_hash_tbl
, &br_mdb_rht_params
);
4510 rhashtable_destroy(&br
->sg_port_tbl
);
4517 void br_mdb_hash_fini(struct net_bridge
*br
)
4519 rhashtable_destroy(&br
->sg_port_tbl
);
4520 rhashtable_destroy(&br
->mdb_hash_tbl
);