]> git.ipfire.org Git - thirdparty/linux.git/blob - net/bridge/br_multicast.c
net: bridge: vlan: add mcast snooping control
[thirdparty/linux.git] / net / bridge / br_multicast.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Bridge multicast support.
4 *
5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7
8 #include <linux/err.h>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
12 #include <linux/in.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
25 #include <net/ip.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
29 #include <net/ipv6.h>
30 #include <net/mld.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
33 #endif
34
35 #include "br_private.h"
36 #include "br_private_mcast_eht.h"
37
38 static const struct rhashtable_params br_mdb_rht_params = {
39 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
40 .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
41 .key_len = sizeof(struct br_ip),
42 .automatic_shrinking = true,
43 };
44
45 static const struct rhashtable_params br_sg_port_rht_params = {
46 .head_offset = offsetof(struct net_bridge_port_group, rhnode),
47 .key_offset = offsetof(struct net_bridge_port_group, key),
48 .key_len = sizeof(struct net_bridge_port_group_sg_key),
49 .automatic_shrinking = true,
50 };
51
52 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
53 struct bridge_mcast_own_query *query);
54 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
55 struct net_bridge_mcast_port *pmctx);
56 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
57 struct net_bridge_mcast_port *pmctx,
58 __be32 group,
59 __u16 vid,
60 const unsigned char *src);
61 static void br_multicast_port_group_rexmit(struct timer_list *t);
62
63 static void
64 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted);
65 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
66 struct net_bridge_mcast_port *pmctx);
67 #if IS_ENABLED(CONFIG_IPV6)
68 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
69 struct net_bridge_mcast_port *pmctx,
70 const struct in6_addr *group,
71 __u16 vid, const unsigned char *src);
72 #endif
73 static struct net_bridge_port_group *
74 __br_multicast_add_group(struct net_bridge_mcast *brmctx,
75 struct net_bridge_mcast_port *pmctx,
76 struct br_ip *group,
77 const unsigned char *src,
78 u8 filter_mode,
79 bool igmpv2_mldv1,
80 bool blocked);
81 static void br_multicast_find_del_pg(struct net_bridge *br,
82 struct net_bridge_port_group *pg);
83 static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
84
85 static struct net_bridge_port_group *
86 br_sg_port_find(struct net_bridge *br,
87 struct net_bridge_port_group_sg_key *sg_p)
88 {
89 lockdep_assert_held_once(&br->multicast_lock);
90
91 return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
92 br_sg_port_rht_params);
93 }
94
95 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
96 struct br_ip *dst)
97 {
98 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
99 }
100
101 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
102 struct br_ip *dst)
103 {
104 struct net_bridge_mdb_entry *ent;
105
106 lockdep_assert_held_once(&br->multicast_lock);
107
108 rcu_read_lock();
109 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
110 rcu_read_unlock();
111
112 return ent;
113 }
114
115 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
116 __be32 dst, __u16 vid)
117 {
118 struct br_ip br_dst;
119
120 memset(&br_dst, 0, sizeof(br_dst));
121 br_dst.dst.ip4 = dst;
122 br_dst.proto = htons(ETH_P_IP);
123 br_dst.vid = vid;
124
125 return br_mdb_ip_get(br, &br_dst);
126 }
127
128 #if IS_ENABLED(CONFIG_IPV6)
129 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
130 const struct in6_addr *dst,
131 __u16 vid)
132 {
133 struct br_ip br_dst;
134
135 memset(&br_dst, 0, sizeof(br_dst));
136 br_dst.dst.ip6 = *dst;
137 br_dst.proto = htons(ETH_P_IPV6);
138 br_dst.vid = vid;
139
140 return br_mdb_ip_get(br, &br_dst);
141 }
142 #endif
143
144 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx,
145 struct sk_buff *skb, u16 vid)
146 {
147 struct net_bridge *br = brmctx->br;
148 struct br_ip ip;
149
150 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
151 br_multicast_ctx_vlan_global_disabled(brmctx))
152 return NULL;
153
154 if (BR_INPUT_SKB_CB(skb)->igmp)
155 return NULL;
156
157 memset(&ip, 0, sizeof(ip));
158 ip.proto = skb->protocol;
159 ip.vid = vid;
160
161 switch (skb->protocol) {
162 case htons(ETH_P_IP):
163 ip.dst.ip4 = ip_hdr(skb)->daddr;
164 if (brmctx->multicast_igmp_version == 3) {
165 struct net_bridge_mdb_entry *mdb;
166
167 ip.src.ip4 = ip_hdr(skb)->saddr;
168 mdb = br_mdb_ip_get_rcu(br, &ip);
169 if (mdb)
170 return mdb;
171 ip.src.ip4 = 0;
172 }
173 break;
174 #if IS_ENABLED(CONFIG_IPV6)
175 case htons(ETH_P_IPV6):
176 ip.dst.ip6 = ipv6_hdr(skb)->daddr;
177 if (brmctx->multicast_mld_version == 2) {
178 struct net_bridge_mdb_entry *mdb;
179
180 ip.src.ip6 = ipv6_hdr(skb)->saddr;
181 mdb = br_mdb_ip_get_rcu(br, &ip);
182 if (mdb)
183 return mdb;
184 memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
185 }
186 break;
187 #endif
188 default:
189 ip.proto = 0;
190 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
191 }
192
193 return br_mdb_ip_get_rcu(br, &ip);
194 }
195
196 /* IMPORTANT: this function must be used only when the contexts cannot be
197 * passed down (e.g. timer) and must be used for read-only purposes because
198 * the vlan snooping option can change, so it can return any context
199 * (non-vlan or vlan). Its initial intended purpose is to read timer values
200 * from the *current* context based on the option. At worst that could lead
201 * to inconsistent timers when the contexts are changed, i.e. src timer
202 * which needs to re-arm with a specific delay taken from the old context
203 */
204 static struct net_bridge_mcast_port *
205 br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg)
206 {
207 struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx;
208 struct net_bridge_vlan *vlan;
209
210 lockdep_assert_held_once(&pg->key.port->br->multicast_lock);
211
212 /* if vlan snooping is disabled use the port's multicast context */
213 if (!pg->key.addr.vid ||
214 !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
215 goto out;
216
217 /* locking is tricky here, due to different rules for multicast and
218 * vlans we need to take rcu to find the vlan and make sure it has
219 * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under
220 * multicast_lock which must be already held here, so the vlan's pmctx
221 * can safely be used on return
222 */
223 rcu_read_lock();
224 vlan = br_vlan_find(nbp_vlan_group(pg->key.port), pg->key.addr.vid);
225 if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
226 pmctx = &vlan->port_mcast_ctx;
227 else
228 pmctx = NULL;
229 rcu_read_unlock();
230 out:
231 return pmctx;
232 }
233
234 /* when snooping we need to check if the contexts should be used
235 * in the following order:
236 * - if pmctx is non-NULL (port), check if it should be used
237 * - if pmctx is NULL (bridge), check if brmctx should be used
238 */
239 static bool
240 br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx,
241 const struct net_bridge_mcast_port *pmctx)
242 {
243 if (!netif_running(brmctx->br->dev))
244 return false;
245
246 if (pmctx)
247 return !br_multicast_port_ctx_state_disabled(pmctx);
248 else
249 return !br_multicast_ctx_vlan_disabled(brmctx);
250 }
251
252 static bool br_port_group_equal(struct net_bridge_port_group *p,
253 struct net_bridge_port *port,
254 const unsigned char *src)
255 {
256 if (p->key.port != port)
257 return false;
258
259 if (!(port->flags & BR_MULTICAST_TO_UNICAST))
260 return true;
261
262 return ether_addr_equal(src, p->eth_addr);
263 }
264
265 static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx,
266 struct net_bridge_port_group *pg,
267 struct br_ip *sg_ip)
268 {
269 struct net_bridge_port_group_sg_key sg_key;
270 struct net_bridge_port_group *src_pg;
271 struct net_bridge_mcast *brmctx;
272
273 memset(&sg_key, 0, sizeof(sg_key));
274 brmctx = br_multicast_port_ctx_get_global(pmctx);
275 sg_key.port = pg->key.port;
276 sg_key.addr = *sg_ip;
277 if (br_sg_port_find(brmctx->br, &sg_key))
278 return;
279
280 src_pg = __br_multicast_add_group(brmctx, pmctx,
281 sg_ip, pg->eth_addr,
282 MCAST_INCLUDE, false, false);
283 if (IS_ERR_OR_NULL(src_pg) ||
284 src_pg->rt_protocol != RTPROT_KERNEL)
285 return;
286
287 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
288 }
289
290 static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
291 struct br_ip *sg_ip)
292 {
293 struct net_bridge_port_group_sg_key sg_key;
294 struct net_bridge *br = pg->key.port->br;
295 struct net_bridge_port_group *src_pg;
296
297 memset(&sg_key, 0, sizeof(sg_key));
298 sg_key.port = pg->key.port;
299 sg_key.addr = *sg_ip;
300 src_pg = br_sg_port_find(br, &sg_key);
301 if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
302 src_pg->rt_protocol != RTPROT_KERNEL)
303 return;
304
305 br_multicast_find_del_pg(br, src_pg);
306 }
307
308 /* When a port group transitions to (or is added as) EXCLUDE we need to add it
309 * to all other ports' S,G entries which are not blocked by the current group
310 * for proper replication, the assumption is that any S,G blocked entries
311 * are already added so the S,G,port lookup should skip them.
312 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
313 * deleted we need to remove it from all ports' S,G entries where it was
314 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
315 */
316 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
317 u8 filter_mode)
318 {
319 struct net_bridge *br = pg->key.port->br;
320 struct net_bridge_port_group *pg_lst;
321 struct net_bridge_mcast_port *pmctx;
322 struct net_bridge_mdb_entry *mp;
323 struct br_ip sg_ip;
324
325 if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
326 return;
327
328 mp = br_mdb_ip_get(br, &pg->key.addr);
329 if (!mp)
330 return;
331 pmctx = br_multicast_pg_to_port_ctx(pg);
332 if (!pmctx)
333 return;
334
335 memset(&sg_ip, 0, sizeof(sg_ip));
336 sg_ip = pg->key.addr;
337
338 for (pg_lst = mlock_dereference(mp->ports, br);
339 pg_lst;
340 pg_lst = mlock_dereference(pg_lst->next, br)) {
341 struct net_bridge_group_src *src_ent;
342
343 if (pg_lst == pg)
344 continue;
345 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
346 if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
347 continue;
348 sg_ip.src = src_ent->addr.src;
349 switch (filter_mode) {
350 case MCAST_INCLUDE:
351 __fwd_del_star_excl(pg, &sg_ip);
352 break;
353 case MCAST_EXCLUDE:
354 __fwd_add_star_excl(pmctx, pg, &sg_ip);
355 break;
356 }
357 }
358 }
359 }
360
361 /* called when adding a new S,G with host_joined == false by default */
362 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
363 struct net_bridge_port_group *sg)
364 {
365 struct net_bridge_mdb_entry *sg_mp;
366
367 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
368 return;
369 if (!star_mp->host_joined)
370 return;
371
372 sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
373 if (!sg_mp)
374 return;
375 sg_mp->host_joined = true;
376 }
377
378 /* set the host_joined state of all of *,G's S,G entries */
379 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
380 {
381 struct net_bridge *br = star_mp->br;
382 struct net_bridge_mdb_entry *sg_mp;
383 struct net_bridge_port_group *pg;
384 struct br_ip sg_ip;
385
386 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
387 return;
388
389 memset(&sg_ip, 0, sizeof(sg_ip));
390 sg_ip = star_mp->addr;
391 for (pg = mlock_dereference(star_mp->ports, br);
392 pg;
393 pg = mlock_dereference(pg->next, br)) {
394 struct net_bridge_group_src *src_ent;
395
396 hlist_for_each_entry(src_ent, &pg->src_list, node) {
397 if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
398 continue;
399 sg_ip.src = src_ent->addr.src;
400 sg_mp = br_mdb_ip_get(br, &sg_ip);
401 if (!sg_mp)
402 continue;
403 sg_mp->host_joined = star_mp->host_joined;
404 }
405 }
406 }
407
408 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
409 {
410 struct net_bridge_port_group __rcu **pp;
411 struct net_bridge_port_group *p;
412
413 /* *,G exclude ports are only added to S,G entries */
414 if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
415 return;
416
417 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
418 * we should ignore perm entries since they're managed by user-space
419 */
420 for (pp = &sgmp->ports;
421 (p = mlock_dereference(*pp, sgmp->br)) != NULL;
422 pp = &p->next)
423 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
424 MDB_PG_FLAGS_PERMANENT)))
425 return;
426
427 /* currently the host can only have joined the *,G which means
428 * we treat it as EXCLUDE {}, so for an S,G it's considered a
429 * STAR_EXCLUDE entry and we can safely leave it
430 */
431 sgmp->host_joined = false;
432
433 for (pp = &sgmp->ports;
434 (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
435 if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
436 br_multicast_del_pg(sgmp, p, pp);
437 else
438 pp = &p->next;
439 }
440 }
441
442 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
443 struct net_bridge_port_group *sg)
444 {
445 struct net_bridge_port_group_sg_key sg_key;
446 struct net_bridge *br = star_mp->br;
447 struct net_bridge_mcast_port *pmctx;
448 struct net_bridge_port_group *pg;
449 struct net_bridge_mcast *brmctx;
450
451 if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
452 return;
453 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
454 return;
455
456 br_multicast_sg_host_state(star_mp, sg);
457 memset(&sg_key, 0, sizeof(sg_key));
458 sg_key.addr = sg->key.addr;
459 /* we need to add all exclude ports to the S,G */
460 for (pg = mlock_dereference(star_mp->ports, br);
461 pg;
462 pg = mlock_dereference(pg->next, br)) {
463 struct net_bridge_port_group *src_pg;
464
465 if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
466 continue;
467
468 sg_key.port = pg->key.port;
469 if (br_sg_port_find(br, &sg_key))
470 continue;
471
472 pmctx = br_multicast_pg_to_port_ctx(pg);
473 if (!pmctx)
474 continue;
475 brmctx = br_multicast_port_ctx_get_global(pmctx);
476
477 src_pg = __br_multicast_add_group(brmctx, pmctx,
478 &sg->key.addr,
479 sg->eth_addr,
480 MCAST_INCLUDE, false, false);
481 if (IS_ERR_OR_NULL(src_pg) ||
482 src_pg->rt_protocol != RTPROT_KERNEL)
483 continue;
484 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
485 }
486 }
487
488 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
489 {
490 struct net_bridge_mdb_entry *star_mp;
491 struct net_bridge_mcast_port *pmctx;
492 struct net_bridge_port_group *sg;
493 struct net_bridge_mcast *brmctx;
494 struct br_ip sg_ip;
495
496 if (src->flags & BR_SGRP_F_INSTALLED)
497 return;
498
499 memset(&sg_ip, 0, sizeof(sg_ip));
500 pmctx = br_multicast_pg_to_port_ctx(src->pg);
501 if (!pmctx)
502 return;
503 brmctx = br_multicast_port_ctx_get_global(pmctx);
504 sg_ip = src->pg->key.addr;
505 sg_ip.src = src->addr.src;
506
507 sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip,
508 src->pg->eth_addr, MCAST_INCLUDE, false,
509 !timer_pending(&src->timer));
510 if (IS_ERR_OR_NULL(sg))
511 return;
512 src->flags |= BR_SGRP_F_INSTALLED;
513 sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
514
515 /* if it was added by user-space as perm we can skip next steps */
516 if (sg->rt_protocol != RTPROT_KERNEL &&
517 (sg->flags & MDB_PG_FLAGS_PERMANENT))
518 return;
519
520 /* the kernel is now responsible for removing this S,G */
521 del_timer(&sg->timer);
522 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
523 if (!star_mp)
524 return;
525
526 br_multicast_sg_add_exclude_ports(star_mp, sg);
527 }
528
529 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src,
530 bool fastleave)
531 {
532 struct net_bridge_port_group *p, *pg = src->pg;
533 struct net_bridge_port_group __rcu **pp;
534 struct net_bridge_mdb_entry *mp;
535 struct br_ip sg_ip;
536
537 memset(&sg_ip, 0, sizeof(sg_ip));
538 sg_ip = pg->key.addr;
539 sg_ip.src = src->addr.src;
540
541 mp = br_mdb_ip_get(src->br, &sg_ip);
542 if (!mp)
543 return;
544
545 for (pp = &mp->ports;
546 (p = mlock_dereference(*pp, src->br)) != NULL;
547 pp = &p->next) {
548 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
549 continue;
550
551 if (p->rt_protocol != RTPROT_KERNEL &&
552 (p->flags & MDB_PG_FLAGS_PERMANENT))
553 break;
554
555 if (fastleave)
556 p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
557 br_multicast_del_pg(mp, p, pp);
558 break;
559 }
560 src->flags &= ~BR_SGRP_F_INSTALLED;
561 }
562
563 /* install S,G and based on src's timer enable or disable forwarding */
564 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
565 {
566 struct net_bridge_port_group_sg_key sg_key;
567 struct net_bridge_port_group *sg;
568 u8 old_flags;
569
570 br_multicast_fwd_src_add(src);
571
572 memset(&sg_key, 0, sizeof(sg_key));
573 sg_key.addr = src->pg->key.addr;
574 sg_key.addr.src = src->addr.src;
575 sg_key.port = src->pg->key.port;
576
577 sg = br_sg_port_find(src->br, &sg_key);
578 if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
579 return;
580
581 old_flags = sg->flags;
582 if (timer_pending(&src->timer))
583 sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
584 else
585 sg->flags |= MDB_PG_FLAGS_BLOCKED;
586
587 if (old_flags != sg->flags) {
588 struct net_bridge_mdb_entry *sg_mp;
589
590 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
591 if (!sg_mp)
592 return;
593 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
594 }
595 }
596
597 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
598 {
599 struct net_bridge_mdb_entry *mp;
600
601 mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
602 WARN_ON(!hlist_unhashed(&mp->mdb_node));
603 WARN_ON(mp->ports);
604
605 del_timer_sync(&mp->timer);
606 kfree_rcu(mp, rcu);
607 }
608
609 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
610 {
611 struct net_bridge *br = mp->br;
612
613 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
614 br_mdb_rht_params);
615 hlist_del_init_rcu(&mp->mdb_node);
616 hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
617 queue_work(system_long_wq, &br->mcast_gc_work);
618 }
619
620 static void br_multicast_group_expired(struct timer_list *t)
621 {
622 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
623 struct net_bridge *br = mp->br;
624
625 spin_lock(&br->multicast_lock);
626 if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
627 timer_pending(&mp->timer))
628 goto out;
629
630 br_multicast_host_leave(mp, true);
631
632 if (mp->ports)
633 goto out;
634 br_multicast_del_mdb_entry(mp);
635 out:
636 spin_unlock(&br->multicast_lock);
637 }
638
639 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
640 {
641 struct net_bridge_group_src *src;
642
643 src = container_of(gc, struct net_bridge_group_src, mcast_gc);
644 WARN_ON(!hlist_unhashed(&src->node));
645
646 del_timer_sync(&src->timer);
647 kfree_rcu(src, rcu);
648 }
649
650 void br_multicast_del_group_src(struct net_bridge_group_src *src,
651 bool fastleave)
652 {
653 struct net_bridge *br = src->pg->key.port->br;
654
655 br_multicast_fwd_src_remove(src, fastleave);
656 hlist_del_init_rcu(&src->node);
657 src->pg->src_ents--;
658 hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
659 queue_work(system_long_wq, &br->mcast_gc_work);
660 }
661
662 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
663 {
664 struct net_bridge_port_group *pg;
665
666 pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
667 WARN_ON(!hlist_unhashed(&pg->mglist));
668 WARN_ON(!hlist_empty(&pg->src_list));
669
670 del_timer_sync(&pg->rexmit_timer);
671 del_timer_sync(&pg->timer);
672 kfree_rcu(pg, rcu);
673 }
674
675 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
676 struct net_bridge_port_group *pg,
677 struct net_bridge_port_group __rcu **pp)
678 {
679 struct net_bridge *br = pg->key.port->br;
680 struct net_bridge_group_src *ent;
681 struct hlist_node *tmp;
682
683 rcu_assign_pointer(*pp, pg->next);
684 hlist_del_init(&pg->mglist);
685 br_multicast_eht_clean_sets(pg);
686 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
687 br_multicast_del_group_src(ent, false);
688 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
689 if (!br_multicast_is_star_g(&mp->addr)) {
690 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
691 br_sg_port_rht_params);
692 br_multicast_sg_del_exclude_ports(mp);
693 } else {
694 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
695 }
696 hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
697 queue_work(system_long_wq, &br->mcast_gc_work);
698
699 if (!mp->ports && !mp->host_joined && netif_running(br->dev))
700 mod_timer(&mp->timer, jiffies);
701 }
702
703 static void br_multicast_find_del_pg(struct net_bridge *br,
704 struct net_bridge_port_group *pg)
705 {
706 struct net_bridge_port_group __rcu **pp;
707 struct net_bridge_mdb_entry *mp;
708 struct net_bridge_port_group *p;
709
710 mp = br_mdb_ip_get(br, &pg->key.addr);
711 if (WARN_ON(!mp))
712 return;
713
714 for (pp = &mp->ports;
715 (p = mlock_dereference(*pp, br)) != NULL;
716 pp = &p->next) {
717 if (p != pg)
718 continue;
719
720 br_multicast_del_pg(mp, pg, pp);
721 return;
722 }
723
724 WARN_ON(1);
725 }
726
727 static void br_multicast_port_group_expired(struct timer_list *t)
728 {
729 struct net_bridge_port_group *pg = from_timer(pg, t, timer);
730 struct net_bridge_group_src *src_ent;
731 struct net_bridge *br = pg->key.port->br;
732 struct hlist_node *tmp;
733 bool changed;
734
735 spin_lock(&br->multicast_lock);
736 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
737 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
738 goto out;
739
740 changed = !!(pg->filter_mode == MCAST_EXCLUDE);
741 pg->filter_mode = MCAST_INCLUDE;
742 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
743 if (!timer_pending(&src_ent->timer)) {
744 br_multicast_del_group_src(src_ent, false);
745 changed = true;
746 }
747 }
748
749 if (hlist_empty(&pg->src_list)) {
750 br_multicast_find_del_pg(br, pg);
751 } else if (changed) {
752 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
753
754 if (changed && br_multicast_is_star_g(&pg->key.addr))
755 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
756
757 if (WARN_ON(!mp))
758 goto out;
759 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
760 }
761 out:
762 spin_unlock(&br->multicast_lock);
763 }
764
765 static void br_multicast_gc(struct hlist_head *head)
766 {
767 struct net_bridge_mcast_gc *gcent;
768 struct hlist_node *tmp;
769
770 hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
771 hlist_del_init(&gcent->gc_node);
772 gcent->destroy(gcent);
773 }
774 }
775
776 static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx,
777 struct net_bridge_mcast_port *pmctx,
778 struct sk_buff *skb)
779 {
780 struct net_bridge_vlan *vlan = NULL;
781
782 if (pmctx && br_multicast_port_ctx_is_vlan(pmctx))
783 vlan = pmctx->vlan;
784 else if (br_multicast_ctx_is_vlan(brmctx))
785 vlan = brmctx->vlan;
786
787 if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
788 u16 vlan_proto;
789
790 if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0)
791 return;
792 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid);
793 }
794 }
795
796 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx,
797 struct net_bridge_mcast_port *pmctx,
798 struct net_bridge_port_group *pg,
799 __be32 ip_dst, __be32 group,
800 bool with_srcs, bool over_lmqt,
801 u8 sflag, u8 *igmp_type,
802 bool *need_rexmit)
803 {
804 struct net_bridge_port *p = pg ? pg->key.port : NULL;
805 struct net_bridge_group_src *ent;
806 size_t pkt_size, igmp_hdr_size;
807 unsigned long now = jiffies;
808 struct igmpv3_query *ihv3;
809 void *csum_start = NULL;
810 __sum16 *csum = NULL;
811 struct sk_buff *skb;
812 struct igmphdr *ih;
813 struct ethhdr *eth;
814 unsigned long lmqt;
815 struct iphdr *iph;
816 u16 lmqt_srcs = 0;
817
818 igmp_hdr_size = sizeof(*ih);
819 if (brmctx->multicast_igmp_version == 3) {
820 igmp_hdr_size = sizeof(*ihv3);
821 if (pg && with_srcs) {
822 lmqt = now + (brmctx->multicast_last_member_interval *
823 brmctx->multicast_last_member_count);
824 hlist_for_each_entry(ent, &pg->src_list, node) {
825 if (over_lmqt == time_after(ent->timer.expires,
826 lmqt) &&
827 ent->src_query_rexmit_cnt > 0)
828 lmqt_srcs++;
829 }
830
831 if (!lmqt_srcs)
832 return NULL;
833 igmp_hdr_size += lmqt_srcs * sizeof(__be32);
834 }
835 }
836
837 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
838 if ((p && pkt_size > p->dev->mtu) ||
839 pkt_size > brmctx->br->dev->mtu)
840 return NULL;
841
842 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
843 if (!skb)
844 goto out;
845
846 __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
847 skb->protocol = htons(ETH_P_IP);
848
849 skb_reset_mac_header(skb);
850 eth = eth_hdr(skb);
851
852 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
853 ip_eth_mc_map(ip_dst, eth->h_dest);
854 eth->h_proto = htons(ETH_P_IP);
855 skb_put(skb, sizeof(*eth));
856
857 skb_set_network_header(skb, skb->len);
858 iph = ip_hdr(skb);
859 iph->tot_len = htons(pkt_size - sizeof(*eth));
860
861 iph->version = 4;
862 iph->ihl = 6;
863 iph->tos = 0xc0;
864 iph->id = 0;
865 iph->frag_off = htons(IP_DF);
866 iph->ttl = 1;
867 iph->protocol = IPPROTO_IGMP;
868 iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
869 inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0;
870 iph->daddr = ip_dst;
871 ((u8 *)&iph[1])[0] = IPOPT_RA;
872 ((u8 *)&iph[1])[1] = 4;
873 ((u8 *)&iph[1])[2] = 0;
874 ((u8 *)&iph[1])[3] = 0;
875 ip_send_check(iph);
876 skb_put(skb, 24);
877
878 skb_set_transport_header(skb, skb->len);
879 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
880
881 switch (brmctx->multicast_igmp_version) {
882 case 2:
883 ih = igmp_hdr(skb);
884 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
885 ih->code = (group ? brmctx->multicast_last_member_interval :
886 brmctx->multicast_query_response_interval) /
887 (HZ / IGMP_TIMER_SCALE);
888 ih->group = group;
889 ih->csum = 0;
890 csum = &ih->csum;
891 csum_start = (void *)ih;
892 break;
893 case 3:
894 ihv3 = igmpv3_query_hdr(skb);
895 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
896 ihv3->code = (group ? brmctx->multicast_last_member_interval :
897 brmctx->multicast_query_response_interval) /
898 (HZ / IGMP_TIMER_SCALE);
899 ihv3->group = group;
900 ihv3->qqic = brmctx->multicast_query_interval / HZ;
901 ihv3->nsrcs = htons(lmqt_srcs);
902 ihv3->resv = 0;
903 ihv3->suppress = sflag;
904 ihv3->qrv = 2;
905 ihv3->csum = 0;
906 csum = &ihv3->csum;
907 csum_start = (void *)ihv3;
908 if (!pg || !with_srcs)
909 break;
910
911 lmqt_srcs = 0;
912 hlist_for_each_entry(ent, &pg->src_list, node) {
913 if (over_lmqt == time_after(ent->timer.expires,
914 lmqt) &&
915 ent->src_query_rexmit_cnt > 0) {
916 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
917 ent->src_query_rexmit_cnt--;
918 if (need_rexmit && ent->src_query_rexmit_cnt)
919 *need_rexmit = true;
920 }
921 }
922 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
923 kfree_skb(skb);
924 return NULL;
925 }
926 break;
927 }
928
929 if (WARN_ON(!csum || !csum_start)) {
930 kfree_skb(skb);
931 return NULL;
932 }
933
934 *csum = ip_compute_csum(csum_start, igmp_hdr_size);
935 skb_put(skb, igmp_hdr_size);
936 __skb_pull(skb, sizeof(*eth));
937
938 out:
939 return skb;
940 }
941
942 #if IS_ENABLED(CONFIG_IPV6)
943 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx,
944 struct net_bridge_mcast_port *pmctx,
945 struct net_bridge_port_group *pg,
946 const struct in6_addr *ip6_dst,
947 const struct in6_addr *group,
948 bool with_srcs, bool over_llqt,
949 u8 sflag, u8 *igmp_type,
950 bool *need_rexmit)
951 {
952 struct net_bridge_port *p = pg ? pg->key.port : NULL;
953 struct net_bridge_group_src *ent;
954 size_t pkt_size, mld_hdr_size;
955 unsigned long now = jiffies;
956 struct mld2_query *mld2q;
957 void *csum_start = NULL;
958 unsigned long interval;
959 __sum16 *csum = NULL;
960 struct ipv6hdr *ip6h;
961 struct mld_msg *mldq;
962 struct sk_buff *skb;
963 unsigned long llqt;
964 struct ethhdr *eth;
965 u16 llqt_srcs = 0;
966 u8 *hopopt;
967
968 mld_hdr_size = sizeof(*mldq);
969 if (brmctx->multicast_mld_version == 2) {
970 mld_hdr_size = sizeof(*mld2q);
971 if (pg && with_srcs) {
972 llqt = now + (brmctx->multicast_last_member_interval *
973 brmctx->multicast_last_member_count);
974 hlist_for_each_entry(ent, &pg->src_list, node) {
975 if (over_llqt == time_after(ent->timer.expires,
976 llqt) &&
977 ent->src_query_rexmit_cnt > 0)
978 llqt_srcs++;
979 }
980
981 if (!llqt_srcs)
982 return NULL;
983 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
984 }
985 }
986
987 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
988 if ((p && pkt_size > p->dev->mtu) ||
989 pkt_size > brmctx->br->dev->mtu)
990 return NULL;
991
992 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
993 if (!skb)
994 goto out;
995
996 __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
997 skb->protocol = htons(ETH_P_IPV6);
998
999 /* Ethernet header */
1000 skb_reset_mac_header(skb);
1001 eth = eth_hdr(skb);
1002
1003 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
1004 eth->h_proto = htons(ETH_P_IPV6);
1005 skb_put(skb, sizeof(*eth));
1006
1007 /* IPv6 header + HbH option */
1008 skb_set_network_header(skb, skb->len);
1009 ip6h = ipv6_hdr(skb);
1010
1011 *(__force __be32 *)ip6h = htonl(0x60000000);
1012 ip6h->payload_len = htons(8 + mld_hdr_size);
1013 ip6h->nexthdr = IPPROTO_HOPOPTS;
1014 ip6h->hop_limit = 1;
1015 ip6h->daddr = *ip6_dst;
1016 if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev,
1017 &ip6h->daddr, 0, &ip6h->saddr)) {
1018 kfree_skb(skb);
1019 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false);
1020 return NULL;
1021 }
1022
1023 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true);
1024 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
1025
1026 hopopt = (u8 *)(ip6h + 1);
1027 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
1028 hopopt[1] = 0; /* length of HbH */
1029 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
1030 hopopt[3] = 2; /* Length of RA Option */
1031 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
1032 hopopt[5] = 0;
1033 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
1034 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
1035
1036 skb_put(skb, sizeof(*ip6h) + 8);
1037
1038 /* ICMPv6 */
1039 skb_set_transport_header(skb, skb->len);
1040 interval = ipv6_addr_any(group) ?
1041 brmctx->multicast_query_response_interval :
1042 brmctx->multicast_last_member_interval;
1043 *igmp_type = ICMPV6_MGM_QUERY;
1044 switch (brmctx->multicast_mld_version) {
1045 case 1:
1046 mldq = (struct mld_msg *)icmp6_hdr(skb);
1047 mldq->mld_type = ICMPV6_MGM_QUERY;
1048 mldq->mld_code = 0;
1049 mldq->mld_cksum = 0;
1050 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
1051 mldq->mld_reserved = 0;
1052 mldq->mld_mca = *group;
1053 csum = &mldq->mld_cksum;
1054 csum_start = (void *)mldq;
1055 break;
1056 case 2:
1057 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1058 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
1059 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
1060 mld2q->mld2q_code = 0;
1061 mld2q->mld2q_cksum = 0;
1062 mld2q->mld2q_resv1 = 0;
1063 mld2q->mld2q_resv2 = 0;
1064 mld2q->mld2q_suppress = sflag;
1065 mld2q->mld2q_qrv = 2;
1066 mld2q->mld2q_nsrcs = htons(llqt_srcs);
1067 mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ;
1068 mld2q->mld2q_mca = *group;
1069 csum = &mld2q->mld2q_cksum;
1070 csum_start = (void *)mld2q;
1071 if (!pg || !with_srcs)
1072 break;
1073
1074 llqt_srcs = 0;
1075 hlist_for_each_entry(ent, &pg->src_list, node) {
1076 if (over_llqt == time_after(ent->timer.expires,
1077 llqt) &&
1078 ent->src_query_rexmit_cnt > 0) {
1079 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
1080 ent->src_query_rexmit_cnt--;
1081 if (need_rexmit && ent->src_query_rexmit_cnt)
1082 *need_rexmit = true;
1083 }
1084 }
1085 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
1086 kfree_skb(skb);
1087 return NULL;
1088 }
1089 break;
1090 }
1091
1092 if (WARN_ON(!csum || !csum_start)) {
1093 kfree_skb(skb);
1094 return NULL;
1095 }
1096
1097 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
1098 IPPROTO_ICMPV6,
1099 csum_partial(csum_start, mld_hdr_size, 0));
1100 skb_put(skb, mld_hdr_size);
1101 __skb_pull(skb, sizeof(*eth));
1102
1103 out:
1104 return skb;
1105 }
1106 #endif
1107
1108 static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx,
1109 struct net_bridge_mcast_port *pmctx,
1110 struct net_bridge_port_group *pg,
1111 struct br_ip *ip_dst,
1112 struct br_ip *group,
1113 bool with_srcs, bool over_lmqt,
1114 u8 sflag, u8 *igmp_type,
1115 bool *need_rexmit)
1116 {
1117 __be32 ip4_dst;
1118
1119 switch (group->proto) {
1120 case htons(ETH_P_IP):
1121 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
1122 return br_ip4_multicast_alloc_query(brmctx, pmctx, pg,
1123 ip4_dst, group->dst.ip4,
1124 with_srcs, over_lmqt,
1125 sflag, igmp_type,
1126 need_rexmit);
1127 #if IS_ENABLED(CONFIG_IPV6)
1128 case htons(ETH_P_IPV6): {
1129 struct in6_addr ip6_dst;
1130
1131 if (ip_dst)
1132 ip6_dst = ip_dst->dst.ip6;
1133 else
1134 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
1135 htonl(1));
1136
1137 return br_ip6_multicast_alloc_query(brmctx, pmctx, pg,
1138 &ip6_dst, &group->dst.ip6,
1139 with_srcs, over_lmqt,
1140 sflag, igmp_type,
1141 need_rexmit);
1142 }
1143 #endif
1144 }
1145 return NULL;
1146 }
1147
1148 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
1149 struct br_ip *group)
1150 {
1151 struct net_bridge_mdb_entry *mp;
1152 int err;
1153
1154 mp = br_mdb_ip_get(br, group);
1155 if (mp)
1156 return mp;
1157
1158 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
1159 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
1160 return ERR_PTR(-E2BIG);
1161 }
1162
1163 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
1164 if (unlikely(!mp))
1165 return ERR_PTR(-ENOMEM);
1166
1167 mp->br = br;
1168 mp->addr = *group;
1169 mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
1170 timer_setup(&mp->timer, br_multicast_group_expired, 0);
1171 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
1172 br_mdb_rht_params);
1173 if (err) {
1174 kfree(mp);
1175 mp = ERR_PTR(err);
1176 } else {
1177 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
1178 }
1179
1180 return mp;
1181 }
1182
1183 static void br_multicast_group_src_expired(struct timer_list *t)
1184 {
1185 struct net_bridge_group_src *src = from_timer(src, t, timer);
1186 struct net_bridge_port_group *pg;
1187 struct net_bridge *br = src->br;
1188
1189 spin_lock(&br->multicast_lock);
1190 if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
1191 timer_pending(&src->timer))
1192 goto out;
1193
1194 pg = src->pg;
1195 if (pg->filter_mode == MCAST_INCLUDE) {
1196 br_multicast_del_group_src(src, false);
1197 if (!hlist_empty(&pg->src_list))
1198 goto out;
1199 br_multicast_find_del_pg(br, pg);
1200 } else {
1201 br_multicast_fwd_src_handle(src);
1202 }
1203
1204 out:
1205 spin_unlock(&br->multicast_lock);
1206 }
1207
1208 struct net_bridge_group_src *
1209 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
1210 {
1211 struct net_bridge_group_src *ent;
1212
1213 switch (ip->proto) {
1214 case htons(ETH_P_IP):
1215 hlist_for_each_entry(ent, &pg->src_list, node)
1216 if (ip->src.ip4 == ent->addr.src.ip4)
1217 return ent;
1218 break;
1219 #if IS_ENABLED(CONFIG_IPV6)
1220 case htons(ETH_P_IPV6):
1221 hlist_for_each_entry(ent, &pg->src_list, node)
1222 if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
1223 return ent;
1224 break;
1225 #endif
1226 }
1227
1228 return NULL;
1229 }
1230
1231 static struct net_bridge_group_src *
1232 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
1233 {
1234 struct net_bridge_group_src *grp_src;
1235
1236 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
1237 return NULL;
1238
1239 switch (src_ip->proto) {
1240 case htons(ETH_P_IP):
1241 if (ipv4_is_zeronet(src_ip->src.ip4) ||
1242 ipv4_is_multicast(src_ip->src.ip4))
1243 return NULL;
1244 break;
1245 #if IS_ENABLED(CONFIG_IPV6)
1246 case htons(ETH_P_IPV6):
1247 if (ipv6_addr_any(&src_ip->src.ip6) ||
1248 ipv6_addr_is_multicast(&src_ip->src.ip6))
1249 return NULL;
1250 break;
1251 #endif
1252 }
1253
1254 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
1255 if (unlikely(!grp_src))
1256 return NULL;
1257
1258 grp_src->pg = pg;
1259 grp_src->br = pg->key.port->br;
1260 grp_src->addr = *src_ip;
1261 grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
1262 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
1263
1264 hlist_add_head_rcu(&grp_src->node, &pg->src_list);
1265 pg->src_ents++;
1266
1267 return grp_src;
1268 }
1269
1270 struct net_bridge_port_group *br_multicast_new_port_group(
1271 struct net_bridge_port *port,
1272 struct br_ip *group,
1273 struct net_bridge_port_group __rcu *next,
1274 unsigned char flags,
1275 const unsigned char *src,
1276 u8 filter_mode,
1277 u8 rt_protocol)
1278 {
1279 struct net_bridge_port_group *p;
1280
1281 p = kzalloc(sizeof(*p), GFP_ATOMIC);
1282 if (unlikely(!p))
1283 return NULL;
1284
1285 p->key.addr = *group;
1286 p->key.port = port;
1287 p->flags = flags;
1288 p->filter_mode = filter_mode;
1289 p->rt_protocol = rt_protocol;
1290 p->eht_host_tree = RB_ROOT;
1291 p->eht_set_tree = RB_ROOT;
1292 p->mcast_gc.destroy = br_multicast_destroy_port_group;
1293 INIT_HLIST_HEAD(&p->src_list);
1294
1295 if (!br_multicast_is_star_g(group) &&
1296 rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
1297 br_sg_port_rht_params)) {
1298 kfree(p);
1299 return NULL;
1300 }
1301
1302 rcu_assign_pointer(p->next, next);
1303 timer_setup(&p->timer, br_multicast_port_group_expired, 0);
1304 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
1305 hlist_add_head(&p->mglist, &port->mglist);
1306
1307 if (src)
1308 memcpy(p->eth_addr, src, ETH_ALEN);
1309 else
1310 eth_broadcast_addr(p->eth_addr);
1311
1312 return p;
1313 }
1314
1315 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
1316 {
1317 if (!mp->host_joined) {
1318 mp->host_joined = true;
1319 if (br_multicast_is_star_g(&mp->addr))
1320 br_multicast_star_g_host_state(mp);
1321 if (notify)
1322 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
1323 }
1324
1325 if (br_group_is_l2(&mp->addr))
1326 return;
1327
1328 mod_timer(&mp->timer,
1329 jiffies + mp->br->multicast_ctx.multicast_membership_interval);
1330 }
1331
1332 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
1333 {
1334 if (!mp->host_joined)
1335 return;
1336
1337 mp->host_joined = false;
1338 if (br_multicast_is_star_g(&mp->addr))
1339 br_multicast_star_g_host_state(mp);
1340 if (notify)
1341 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
1342 }
1343
1344 static struct net_bridge_port_group *
1345 __br_multicast_add_group(struct net_bridge_mcast *brmctx,
1346 struct net_bridge_mcast_port *pmctx,
1347 struct br_ip *group,
1348 const unsigned char *src,
1349 u8 filter_mode,
1350 bool igmpv2_mldv1,
1351 bool blocked)
1352 {
1353 struct net_bridge_port_group __rcu **pp;
1354 struct net_bridge_port_group *p = NULL;
1355 struct net_bridge_mdb_entry *mp;
1356 unsigned long now = jiffies;
1357
1358 if (!br_multicast_ctx_should_use(brmctx, pmctx))
1359 goto out;
1360
1361 mp = br_multicast_new_group(brmctx->br, group);
1362 if (IS_ERR(mp))
1363 return ERR_CAST(mp);
1364
1365 if (!pmctx) {
1366 br_multicast_host_join(mp, true);
1367 goto out;
1368 }
1369
1370 for (pp = &mp->ports;
1371 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
1372 pp = &p->next) {
1373 if (br_port_group_equal(p, pmctx->port, src))
1374 goto found;
1375 if ((unsigned long)p->key.port < (unsigned long)pmctx->port)
1376 break;
1377 }
1378
1379 p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
1380 filter_mode, RTPROT_KERNEL);
1381 if (unlikely(!p)) {
1382 p = ERR_PTR(-ENOMEM);
1383 goto out;
1384 }
1385 rcu_assign_pointer(*pp, p);
1386 if (blocked)
1387 p->flags |= MDB_PG_FLAGS_BLOCKED;
1388 br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB);
1389
1390 found:
1391 if (igmpv2_mldv1)
1392 mod_timer(&p->timer,
1393 now + brmctx->multicast_membership_interval);
1394
1395 out:
1396 return p;
1397 }
1398
1399 static int br_multicast_add_group(struct net_bridge_mcast *brmctx,
1400 struct net_bridge_mcast_port *pmctx,
1401 struct br_ip *group,
1402 const unsigned char *src,
1403 u8 filter_mode,
1404 bool igmpv2_mldv1)
1405 {
1406 struct net_bridge_port_group *pg;
1407 int err;
1408
1409 spin_lock(&brmctx->br->multicast_lock);
1410 pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode,
1411 igmpv2_mldv1, false);
1412 /* NULL is considered valid for host joined groups */
1413 err = PTR_ERR_OR_ZERO(pg);
1414 spin_unlock(&brmctx->br->multicast_lock);
1415
1416 return err;
1417 }
1418
1419 static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx,
1420 struct net_bridge_mcast_port *pmctx,
1421 __be32 group,
1422 __u16 vid,
1423 const unsigned char *src,
1424 bool igmpv2)
1425 {
1426 struct br_ip br_group;
1427 u8 filter_mode;
1428
1429 if (ipv4_is_local_multicast(group))
1430 return 0;
1431
1432 memset(&br_group, 0, sizeof(br_group));
1433 br_group.dst.ip4 = group;
1434 br_group.proto = htons(ETH_P_IP);
1435 br_group.vid = vid;
1436 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1437
1438 return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1439 filter_mode, igmpv2);
1440 }
1441
1442 #if IS_ENABLED(CONFIG_IPV6)
1443 static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx,
1444 struct net_bridge_mcast_port *pmctx,
1445 const struct in6_addr *group,
1446 __u16 vid,
1447 const unsigned char *src,
1448 bool mldv1)
1449 {
1450 struct br_ip br_group;
1451 u8 filter_mode;
1452
1453 if (ipv6_addr_is_ll_all_nodes(group))
1454 return 0;
1455
1456 memset(&br_group, 0, sizeof(br_group));
1457 br_group.dst.ip6 = *group;
1458 br_group.proto = htons(ETH_P_IPV6);
1459 br_group.vid = vid;
1460 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1461
1462 return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1463 filter_mode, mldv1);
1464 }
1465 #endif
1466
1467 static bool br_multicast_rport_del(struct hlist_node *rlist)
1468 {
1469 if (hlist_unhashed(rlist))
1470 return false;
1471
1472 hlist_del_init_rcu(rlist);
1473 return true;
1474 }
1475
1476 static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1477 {
1478 return br_multicast_rport_del(&pmctx->ip4_rlist);
1479 }
1480
1481 static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1482 {
1483 #if IS_ENABLED(CONFIG_IPV6)
1484 return br_multicast_rport_del(&pmctx->ip6_rlist);
1485 #else
1486 return false;
1487 #endif
1488 }
1489
1490 static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx,
1491 struct timer_list *t,
1492 struct hlist_node *rlist)
1493 {
1494 struct net_bridge *br = pmctx->port->br;
1495 bool del;
1496
1497 spin_lock(&br->multicast_lock);
1498 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1499 pmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1500 timer_pending(t))
1501 goto out;
1502
1503 del = br_multicast_rport_del(rlist);
1504 br_multicast_rport_del_notify(pmctx, del);
1505 out:
1506 spin_unlock(&br->multicast_lock);
1507 }
1508
1509 static void br_ip4_multicast_router_expired(struct timer_list *t)
1510 {
1511 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1512 ip4_mc_router_timer);
1513
1514 br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist);
1515 }
1516
1517 #if IS_ENABLED(CONFIG_IPV6)
1518 static void br_ip6_multicast_router_expired(struct timer_list *t)
1519 {
1520 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1521 ip6_mc_router_timer);
1522
1523 br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist);
1524 }
1525 #endif
1526
1527 static void br_mc_router_state_change(struct net_bridge *p,
1528 bool is_mc_router)
1529 {
1530 struct switchdev_attr attr = {
1531 .orig_dev = p->dev,
1532 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
1533 .flags = SWITCHDEV_F_DEFER,
1534 .u.mrouter = is_mc_router,
1535 };
1536
1537 switchdev_port_attr_set(p->dev, &attr, NULL);
1538 }
1539
1540 static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx,
1541 struct timer_list *timer)
1542 {
1543 spin_lock(&brmctx->br->multicast_lock);
1544 if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1545 brmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1546 br_ip4_multicast_is_router(brmctx) ||
1547 br_ip6_multicast_is_router(brmctx))
1548 goto out;
1549
1550 br_mc_router_state_change(brmctx->br, false);
1551 out:
1552 spin_unlock(&brmctx->br->multicast_lock);
1553 }
1554
1555 static void br_ip4_multicast_local_router_expired(struct timer_list *t)
1556 {
1557 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1558 ip4_mc_router_timer);
1559
1560 br_multicast_local_router_expired(brmctx, t);
1561 }
1562
1563 #if IS_ENABLED(CONFIG_IPV6)
1564 static void br_ip6_multicast_local_router_expired(struct timer_list *t)
1565 {
1566 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1567 ip6_mc_router_timer);
1568
1569 br_multicast_local_router_expired(brmctx, t);
1570 }
1571 #endif
1572
1573 static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx,
1574 struct bridge_mcast_own_query *query)
1575 {
1576 spin_lock(&brmctx->br->multicast_lock);
1577 if (!netif_running(brmctx->br->dev) ||
1578 br_multicast_ctx_vlan_global_disabled(brmctx) ||
1579 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
1580 goto out;
1581
1582 br_multicast_start_querier(brmctx, query);
1583
1584 out:
1585 spin_unlock(&brmctx->br->multicast_lock);
1586 }
1587
1588 static void br_ip4_multicast_querier_expired(struct timer_list *t)
1589 {
1590 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1591 ip4_other_query.timer);
1592
1593 br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query);
1594 }
1595
1596 #if IS_ENABLED(CONFIG_IPV6)
1597 static void br_ip6_multicast_querier_expired(struct timer_list *t)
1598 {
1599 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1600 ip6_other_query.timer);
1601
1602 br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query);
1603 }
1604 #endif
1605
1606 static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
1607 struct br_ip *ip,
1608 struct sk_buff *skb)
1609 {
1610 if (ip->proto == htons(ETH_P_IP))
1611 brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
1612 #if IS_ENABLED(CONFIG_IPV6)
1613 else
1614 brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
1615 #endif
1616 }
1617
1618 static void __br_multicast_send_query(struct net_bridge_mcast *brmctx,
1619 struct net_bridge_mcast_port *pmctx,
1620 struct net_bridge_port_group *pg,
1621 struct br_ip *ip_dst,
1622 struct br_ip *group,
1623 bool with_srcs,
1624 u8 sflag,
1625 bool *need_rexmit)
1626 {
1627 bool over_lmqt = !!sflag;
1628 struct sk_buff *skb;
1629 u8 igmp_type;
1630
1631 if (!br_multicast_ctx_should_use(brmctx, pmctx))
1632 return;
1633
1634 again_under_lmqt:
1635 skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group,
1636 with_srcs, over_lmqt, sflag, &igmp_type,
1637 need_rexmit);
1638 if (!skb)
1639 return;
1640
1641 if (pmctx) {
1642 skb->dev = pmctx->port->dev;
1643 br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type,
1644 BR_MCAST_DIR_TX);
1645 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
1646 dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev,
1647 br_dev_queue_push_xmit);
1648
1649 if (over_lmqt && with_srcs && sflag) {
1650 over_lmqt = false;
1651 goto again_under_lmqt;
1652 }
1653 } else {
1654 br_multicast_select_own_querier(brmctx, group, skb);
1655 br_multicast_count(brmctx->br, NULL, skb, igmp_type,
1656 BR_MCAST_DIR_RX);
1657 netif_rx(skb);
1658 }
1659 }
1660
1661 static void br_multicast_send_query(struct net_bridge_mcast *brmctx,
1662 struct net_bridge_mcast_port *pmctx,
1663 struct bridge_mcast_own_query *own_query)
1664 {
1665 struct bridge_mcast_other_query *other_query = NULL;
1666 struct br_ip br_group;
1667 unsigned long time;
1668
1669 if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
1670 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
1671 !br_opt_get(brmctx->br, BROPT_MULTICAST_QUERIER))
1672 return;
1673
1674 memset(&br_group.dst, 0, sizeof(br_group.dst));
1675
1676 if (pmctx ? (own_query == &pmctx->ip4_own_query) :
1677 (own_query == &brmctx->ip4_own_query)) {
1678 other_query = &brmctx->ip4_other_query;
1679 br_group.proto = htons(ETH_P_IP);
1680 #if IS_ENABLED(CONFIG_IPV6)
1681 } else {
1682 other_query = &brmctx->ip6_other_query;
1683 br_group.proto = htons(ETH_P_IPV6);
1684 #endif
1685 }
1686
1687 if (!other_query || timer_pending(&other_query->timer))
1688 return;
1689
1690 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false,
1691 0, NULL);
1692
1693 time = jiffies;
1694 time += own_query->startup_sent < brmctx->multicast_startup_query_count ?
1695 brmctx->multicast_startup_query_interval :
1696 brmctx->multicast_query_interval;
1697 mod_timer(&own_query->timer, time);
1698 }
1699
1700 static void
1701 br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx,
1702 struct bridge_mcast_own_query *query)
1703 {
1704 struct net_bridge *br = pmctx->port->br;
1705 struct net_bridge_mcast *brmctx;
1706
1707 spin_lock(&br->multicast_lock);
1708 if (br_multicast_port_ctx_state_stopped(pmctx))
1709 goto out;
1710
1711 brmctx = br_multicast_port_ctx_get_global(pmctx);
1712 if (query->startup_sent < brmctx->multicast_startup_query_count)
1713 query->startup_sent++;
1714
1715 br_multicast_send_query(brmctx, pmctx, query);
1716
1717 out:
1718 spin_unlock(&br->multicast_lock);
1719 }
1720
1721 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1722 {
1723 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1724 ip4_own_query.timer);
1725
1726 br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query);
1727 }
1728
1729 #if IS_ENABLED(CONFIG_IPV6)
1730 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1731 {
1732 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1733 ip6_own_query.timer);
1734
1735 br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query);
1736 }
1737 #endif
1738
1739 static void br_multicast_port_group_rexmit(struct timer_list *t)
1740 {
1741 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
1742 struct bridge_mcast_other_query *other_query = NULL;
1743 struct net_bridge *br = pg->key.port->br;
1744 struct net_bridge_mcast_port *pmctx;
1745 struct net_bridge_mcast *brmctx;
1746 bool need_rexmit = false;
1747
1748 spin_lock(&br->multicast_lock);
1749 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
1750 !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1751 !br_opt_get(br, BROPT_MULTICAST_QUERIER))
1752 goto out;
1753
1754 pmctx = br_multicast_pg_to_port_ctx(pg);
1755 if (!pmctx)
1756 goto out;
1757 brmctx = br_multicast_port_ctx_get_global(pmctx);
1758 if (pg->key.addr.proto == htons(ETH_P_IP))
1759 other_query = &brmctx->ip4_other_query;
1760 #if IS_ENABLED(CONFIG_IPV6)
1761 else
1762 other_query = &brmctx->ip6_other_query;
1763 #endif
1764
1765 if (!other_query || timer_pending(&other_query->timer))
1766 goto out;
1767
1768 if (pg->grp_query_rexmit_cnt) {
1769 pg->grp_query_rexmit_cnt--;
1770 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1771 &pg->key.addr, false, 1, NULL);
1772 }
1773 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1774 &pg->key.addr, true, 0, &need_rexmit);
1775
1776 if (pg->grp_query_rexmit_cnt || need_rexmit)
1777 mod_timer(&pg->rexmit_timer, jiffies +
1778 brmctx->multicast_last_member_interval);
1779 out:
1780 spin_unlock(&br->multicast_lock);
1781 }
1782
1783 static int br_mc_disabled_update(struct net_device *dev, bool value,
1784 struct netlink_ext_ack *extack)
1785 {
1786 struct switchdev_attr attr = {
1787 .orig_dev = dev,
1788 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
1789 .flags = SWITCHDEV_F_DEFER,
1790 .u.mc_disabled = !value,
1791 };
1792
1793 return switchdev_port_attr_set(dev, &attr, extack);
1794 }
1795
1796 void br_multicast_port_ctx_init(struct net_bridge_port *port,
1797 struct net_bridge_vlan *vlan,
1798 struct net_bridge_mcast_port *pmctx)
1799 {
1800 pmctx->port = port;
1801 pmctx->vlan = vlan;
1802 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1803 timer_setup(&pmctx->ip4_mc_router_timer,
1804 br_ip4_multicast_router_expired, 0);
1805 timer_setup(&pmctx->ip4_own_query.timer,
1806 br_ip4_multicast_port_query_expired, 0);
1807 #if IS_ENABLED(CONFIG_IPV6)
1808 timer_setup(&pmctx->ip6_mc_router_timer,
1809 br_ip6_multicast_router_expired, 0);
1810 timer_setup(&pmctx->ip6_own_query.timer,
1811 br_ip6_multicast_port_query_expired, 0);
1812 #endif
1813 }
1814
1815 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
1816 {
1817 #if IS_ENABLED(CONFIG_IPV6)
1818 del_timer_sync(&pmctx->ip6_mc_router_timer);
1819 #endif
1820 del_timer_sync(&pmctx->ip4_mc_router_timer);
1821 }
1822
1823 int br_multicast_add_port(struct net_bridge_port *port)
1824 {
1825 int err;
1826
1827 port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
1828 br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx);
1829
1830 err = br_mc_disabled_update(port->dev,
1831 br_opt_get(port->br,
1832 BROPT_MULTICAST_ENABLED),
1833 NULL);
1834 if (err && err != -EOPNOTSUPP)
1835 return err;
1836
1837 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
1838 if (!port->mcast_stats)
1839 return -ENOMEM;
1840
1841 return 0;
1842 }
1843
1844 void br_multicast_del_port(struct net_bridge_port *port)
1845 {
1846 struct net_bridge *br = port->br;
1847 struct net_bridge_port_group *pg;
1848 HLIST_HEAD(deleted_head);
1849 struct hlist_node *n;
1850
1851 /* Take care of the remaining groups, only perm ones should be left */
1852 spin_lock_bh(&br->multicast_lock);
1853 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1854 br_multicast_find_del_pg(br, pg);
1855 hlist_move_list(&br->mcast_gc_list, &deleted_head);
1856 spin_unlock_bh(&br->multicast_lock);
1857 br_multicast_gc(&deleted_head);
1858 br_multicast_port_ctx_deinit(&port->multicast_ctx);
1859 free_percpu(port->mcast_stats);
1860 }
1861
1862 static void br_multicast_enable(struct bridge_mcast_own_query *query)
1863 {
1864 query->startup_sent = 0;
1865
1866 if (try_to_del_timer_sync(&query->timer) >= 0 ||
1867 del_timer(&query->timer))
1868 mod_timer(&query->timer, jiffies);
1869 }
1870
1871 static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
1872 {
1873 struct net_bridge *br = pmctx->port->br;
1874 struct net_bridge_mcast *brmctx;
1875
1876 brmctx = br_multicast_port_ctx_get_global(pmctx);
1877 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1878 !netif_running(br->dev))
1879 return;
1880
1881 br_multicast_enable(&pmctx->ip4_own_query);
1882 #if IS_ENABLED(CONFIG_IPV6)
1883 br_multicast_enable(&pmctx->ip6_own_query);
1884 #endif
1885 if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) {
1886 br_ip4_multicast_add_router(brmctx, pmctx);
1887 br_ip6_multicast_add_router(brmctx, pmctx);
1888 }
1889 }
1890
1891 void br_multicast_enable_port(struct net_bridge_port *port)
1892 {
1893 struct net_bridge *br = port->br;
1894
1895 spin_lock_bh(&br->multicast_lock);
1896 __br_multicast_enable_port_ctx(&port->multicast_ctx);
1897 spin_unlock_bh(&br->multicast_lock);
1898 }
1899
1900 static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
1901 {
1902 struct net_bridge_port_group *pg;
1903 struct hlist_node *n;
1904 bool del = false;
1905
1906 hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist)
1907 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) &&
1908 (!br_multicast_port_ctx_is_vlan(pmctx) ||
1909 pg->key.addr.vid == pmctx->vlan->vid))
1910 br_multicast_find_del_pg(pmctx->port->br, pg);
1911
1912 del |= br_ip4_multicast_rport_del(pmctx);
1913 del_timer(&pmctx->ip4_mc_router_timer);
1914 del_timer(&pmctx->ip4_own_query.timer);
1915 del |= br_ip6_multicast_rport_del(pmctx);
1916 #if IS_ENABLED(CONFIG_IPV6)
1917 del_timer(&pmctx->ip6_mc_router_timer);
1918 del_timer(&pmctx->ip6_own_query.timer);
1919 #endif
1920 br_multicast_rport_del_notify(pmctx, del);
1921 }
1922
1923 void br_multicast_disable_port(struct net_bridge_port *port)
1924 {
1925 spin_lock_bh(&port->br->multicast_lock);
1926 __br_multicast_disable_port_ctx(&port->multicast_ctx);
1927 spin_unlock_bh(&port->br->multicast_lock);
1928 }
1929
1930 static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
1931 {
1932 struct net_bridge_group_src *ent;
1933 struct hlist_node *tmp;
1934 int deleted = 0;
1935
1936 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
1937 if (ent->flags & BR_SGRP_F_DELETE) {
1938 br_multicast_del_group_src(ent, false);
1939 deleted++;
1940 }
1941
1942 return deleted;
1943 }
1944
1945 static void __grp_src_mod_timer(struct net_bridge_group_src *src,
1946 unsigned long expires)
1947 {
1948 mod_timer(&src->timer, expires);
1949 br_multicast_fwd_src_handle(src);
1950 }
1951
1952 static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx,
1953 struct net_bridge_mcast_port *pmctx,
1954 struct net_bridge_port_group *pg)
1955 {
1956 struct bridge_mcast_other_query *other_query = NULL;
1957 u32 lmqc = brmctx->multicast_last_member_count;
1958 unsigned long lmqt, lmi, now = jiffies;
1959 struct net_bridge_group_src *ent;
1960
1961 if (!netif_running(brmctx->br->dev) ||
1962 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
1963 return;
1964
1965 if (pg->key.addr.proto == htons(ETH_P_IP))
1966 other_query = &brmctx->ip4_other_query;
1967 #if IS_ENABLED(CONFIG_IPV6)
1968 else
1969 other_query = &brmctx->ip6_other_query;
1970 #endif
1971
1972 lmqt = now + br_multicast_lmqt(brmctx);
1973 hlist_for_each_entry(ent, &pg->src_list, node) {
1974 if (ent->flags & BR_SGRP_F_SEND) {
1975 ent->flags &= ~BR_SGRP_F_SEND;
1976 if (ent->timer.expires > lmqt) {
1977 if (br_opt_get(brmctx->br,
1978 BROPT_MULTICAST_QUERIER) &&
1979 other_query &&
1980 !timer_pending(&other_query->timer))
1981 ent->src_query_rexmit_cnt = lmqc;
1982 __grp_src_mod_timer(ent, lmqt);
1983 }
1984 }
1985 }
1986
1987 if (!br_opt_get(brmctx->br, BROPT_MULTICAST_QUERIER) ||
1988 !other_query || timer_pending(&other_query->timer))
1989 return;
1990
1991 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1992 &pg->key.addr, true, 1, NULL);
1993
1994 lmi = now + brmctx->multicast_last_member_interval;
1995 if (!timer_pending(&pg->rexmit_timer) ||
1996 time_after(pg->rexmit_timer.expires, lmi))
1997 mod_timer(&pg->rexmit_timer, lmi);
1998 }
1999
2000 static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx,
2001 struct net_bridge_mcast_port *pmctx,
2002 struct net_bridge_port_group *pg)
2003 {
2004 struct bridge_mcast_other_query *other_query = NULL;
2005 unsigned long now = jiffies, lmi;
2006
2007 if (!netif_running(brmctx->br->dev) ||
2008 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
2009 return;
2010
2011 if (pg->key.addr.proto == htons(ETH_P_IP))
2012 other_query = &brmctx->ip4_other_query;
2013 #if IS_ENABLED(CONFIG_IPV6)
2014 else
2015 other_query = &brmctx->ip6_other_query;
2016 #endif
2017
2018 if (br_opt_get(brmctx->br, BROPT_MULTICAST_QUERIER) &&
2019 other_query && !timer_pending(&other_query->timer)) {
2020 lmi = now + brmctx->multicast_last_member_interval;
2021 pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1;
2022 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
2023 &pg->key.addr, false, 0, NULL);
2024 if (!timer_pending(&pg->rexmit_timer) ||
2025 time_after(pg->rexmit_timer.expires, lmi))
2026 mod_timer(&pg->rexmit_timer, lmi);
2027 }
2028
2029 if (pg->filter_mode == MCAST_EXCLUDE &&
2030 (!timer_pending(&pg->timer) ||
2031 time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx))))
2032 mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx));
2033 }
2034
2035 /* State Msg type New state Actions
2036 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
2037 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
2038 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
2039 */
2040 static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx,
2041 struct net_bridge_port_group *pg, void *h_addr,
2042 void *srcs, u32 nsrcs, size_t addr_size,
2043 int grec_type)
2044 {
2045 struct net_bridge_group_src *ent;
2046 unsigned long now = jiffies;
2047 bool changed = false;
2048 struct br_ip src_ip;
2049 u32 src_idx;
2050
2051 memset(&src_ip, 0, sizeof(src_ip));
2052 src_ip.proto = pg->key.addr.proto;
2053 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2054 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2055 ent = br_multicast_find_group_src(pg, &src_ip);
2056 if (!ent) {
2057 ent = br_multicast_new_group_src(pg, &src_ip);
2058 if (ent)
2059 changed = true;
2060 }
2061
2062 if (ent)
2063 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2064 }
2065
2066 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2067 grec_type))
2068 changed = true;
2069
2070 return changed;
2071 }
2072
2073 /* State Msg type New state Actions
2074 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
2075 * Delete (A-B)
2076 * Group Timer=GMI
2077 */
2078 static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx,
2079 struct net_bridge_port_group *pg, void *h_addr,
2080 void *srcs, u32 nsrcs, size_t addr_size,
2081 int grec_type)
2082 {
2083 struct net_bridge_group_src *ent;
2084 struct br_ip src_ip;
2085 u32 src_idx;
2086
2087 hlist_for_each_entry(ent, &pg->src_list, node)
2088 ent->flags |= BR_SGRP_F_DELETE;
2089
2090 memset(&src_ip, 0, sizeof(src_ip));
2091 src_ip.proto = pg->key.addr.proto;
2092 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2093 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2094 ent = br_multicast_find_group_src(pg, &src_ip);
2095 if (ent)
2096 ent->flags &= ~BR_SGRP_F_DELETE;
2097 else
2098 ent = br_multicast_new_group_src(pg, &src_ip);
2099 if (ent)
2100 br_multicast_fwd_src_handle(ent);
2101 }
2102
2103 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2104 grec_type);
2105
2106 __grp_src_delete_marked(pg);
2107 }
2108
2109 /* State Msg type New state Actions
2110 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
2111 * Delete (X-A)
2112 * Delete (Y-A)
2113 * Group Timer=GMI
2114 */
2115 static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx,
2116 struct net_bridge_port_group *pg, void *h_addr,
2117 void *srcs, u32 nsrcs, size_t addr_size,
2118 int grec_type)
2119 {
2120 struct net_bridge_group_src *ent;
2121 unsigned long now = jiffies;
2122 bool changed = false;
2123 struct br_ip src_ip;
2124 u32 src_idx;
2125
2126 hlist_for_each_entry(ent, &pg->src_list, node)
2127 ent->flags |= BR_SGRP_F_DELETE;
2128
2129 memset(&src_ip, 0, sizeof(src_ip));
2130 src_ip.proto = pg->key.addr.proto;
2131 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2132 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2133 ent = br_multicast_find_group_src(pg, &src_ip);
2134 if (ent) {
2135 ent->flags &= ~BR_SGRP_F_DELETE;
2136 } else {
2137 ent = br_multicast_new_group_src(pg, &src_ip);
2138 if (ent) {
2139 __grp_src_mod_timer(ent,
2140 now + br_multicast_gmi(brmctx));
2141 changed = true;
2142 }
2143 }
2144 }
2145
2146 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2147 grec_type))
2148 changed = true;
2149
2150 if (__grp_src_delete_marked(pg))
2151 changed = true;
2152
2153 return changed;
2154 }
2155
2156 static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx,
2157 struct net_bridge_port_group *pg, void *h_addr,
2158 void *srcs, u32 nsrcs, size_t addr_size,
2159 int grec_type)
2160 {
2161 bool changed = false;
2162
2163 switch (pg->filter_mode) {
2164 case MCAST_INCLUDE:
2165 __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2166 grec_type);
2167 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2168 changed = true;
2169 break;
2170 case MCAST_EXCLUDE:
2171 changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs,
2172 addr_size, grec_type);
2173 break;
2174 }
2175
2176 pg->filter_mode = MCAST_EXCLUDE;
2177 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2178
2179 return changed;
2180 }
2181
2182 /* State Msg type New state Actions
2183 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
2184 * Send Q(G,A-B)
2185 */
2186 static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx,
2187 struct net_bridge_mcast_port *pmctx,
2188 struct net_bridge_port_group *pg, void *h_addr,
2189 void *srcs, u32 nsrcs, size_t addr_size,
2190 int grec_type)
2191 {
2192 u32 src_idx, to_send = pg->src_ents;
2193 struct net_bridge_group_src *ent;
2194 unsigned long now = jiffies;
2195 bool changed = false;
2196 struct br_ip src_ip;
2197
2198 hlist_for_each_entry(ent, &pg->src_list, node)
2199 ent->flags |= BR_SGRP_F_SEND;
2200
2201 memset(&src_ip, 0, sizeof(src_ip));
2202 src_ip.proto = pg->key.addr.proto;
2203 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2204 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2205 ent = br_multicast_find_group_src(pg, &src_ip);
2206 if (ent) {
2207 ent->flags &= ~BR_SGRP_F_SEND;
2208 to_send--;
2209 } else {
2210 ent = br_multicast_new_group_src(pg, &src_ip);
2211 if (ent)
2212 changed = true;
2213 }
2214 if (ent)
2215 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2216 }
2217
2218 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2219 grec_type))
2220 changed = true;
2221
2222 if (to_send)
2223 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2224
2225 return changed;
2226 }
2227
2228 /* State Msg type New state Actions
2229 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
2230 * Send Q(G,X-A)
2231 * Send Q(G)
2232 */
2233 static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx,
2234 struct net_bridge_mcast_port *pmctx,
2235 struct net_bridge_port_group *pg, void *h_addr,
2236 void *srcs, u32 nsrcs, size_t addr_size,
2237 int grec_type)
2238 {
2239 u32 src_idx, to_send = pg->src_ents;
2240 struct net_bridge_group_src *ent;
2241 unsigned long now = jiffies;
2242 bool changed = false;
2243 struct br_ip src_ip;
2244
2245 hlist_for_each_entry(ent, &pg->src_list, node)
2246 if (timer_pending(&ent->timer))
2247 ent->flags |= BR_SGRP_F_SEND;
2248
2249 memset(&src_ip, 0, sizeof(src_ip));
2250 src_ip.proto = pg->key.addr.proto;
2251 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2252 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2253 ent = br_multicast_find_group_src(pg, &src_ip);
2254 if (ent) {
2255 if (timer_pending(&ent->timer)) {
2256 ent->flags &= ~BR_SGRP_F_SEND;
2257 to_send--;
2258 }
2259 } else {
2260 ent = br_multicast_new_group_src(pg, &src_ip);
2261 if (ent)
2262 changed = true;
2263 }
2264 if (ent)
2265 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2266 }
2267
2268 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2269 grec_type))
2270 changed = true;
2271
2272 if (to_send)
2273 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2274
2275 __grp_send_query_and_rexmit(brmctx, pmctx, pg);
2276
2277 return changed;
2278 }
2279
2280 static bool br_multicast_toin(struct net_bridge_mcast *brmctx,
2281 struct net_bridge_mcast_port *pmctx,
2282 struct net_bridge_port_group *pg, void *h_addr,
2283 void *srcs, u32 nsrcs, size_t addr_size,
2284 int grec_type)
2285 {
2286 bool changed = false;
2287
2288 switch (pg->filter_mode) {
2289 case MCAST_INCLUDE:
2290 changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs,
2291 nsrcs, addr_size, grec_type);
2292 break;
2293 case MCAST_EXCLUDE:
2294 changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs,
2295 nsrcs, addr_size, grec_type);
2296 break;
2297 }
2298
2299 if (br_multicast_eht_should_del_pg(pg)) {
2300 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2301 br_multicast_find_del_pg(pg->key.port->br, pg);
2302 /* a notification has already been sent and we shouldn't
2303 * access pg after the delete so we have to return false
2304 */
2305 changed = false;
2306 }
2307
2308 return changed;
2309 }
2310
2311 /* State Msg type New state Actions
2312 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
2313 * Delete (A-B)
2314 * Send Q(G,A*B)
2315 * Group Timer=GMI
2316 */
2317 static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx,
2318 struct net_bridge_mcast_port *pmctx,
2319 struct net_bridge_port_group *pg, void *h_addr,
2320 void *srcs, u32 nsrcs, size_t addr_size,
2321 int grec_type)
2322 {
2323 struct net_bridge_group_src *ent;
2324 u32 src_idx, to_send = 0;
2325 struct br_ip src_ip;
2326
2327 hlist_for_each_entry(ent, &pg->src_list, node)
2328 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2329
2330 memset(&src_ip, 0, sizeof(src_ip));
2331 src_ip.proto = pg->key.addr.proto;
2332 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2333 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2334 ent = br_multicast_find_group_src(pg, &src_ip);
2335 if (ent) {
2336 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
2337 BR_SGRP_F_SEND;
2338 to_send++;
2339 } else {
2340 ent = br_multicast_new_group_src(pg, &src_ip);
2341 }
2342 if (ent)
2343 br_multicast_fwd_src_handle(ent);
2344 }
2345
2346 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2347 grec_type);
2348
2349 __grp_src_delete_marked(pg);
2350 if (to_send)
2351 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2352 }
2353
2354 /* State Msg type New state Actions
2355 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
2356 * Delete (X-A)
2357 * Delete (Y-A)
2358 * Send Q(G,A-Y)
2359 * Group Timer=GMI
2360 */
2361 static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx,
2362 struct net_bridge_mcast_port *pmctx,
2363 struct net_bridge_port_group *pg, void *h_addr,
2364 void *srcs, u32 nsrcs, size_t addr_size,
2365 int grec_type)
2366 {
2367 struct net_bridge_group_src *ent;
2368 u32 src_idx, to_send = 0;
2369 bool changed = false;
2370 struct br_ip src_ip;
2371
2372 hlist_for_each_entry(ent, &pg->src_list, node)
2373 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2374
2375 memset(&src_ip, 0, sizeof(src_ip));
2376 src_ip.proto = pg->key.addr.proto;
2377 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2378 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2379 ent = br_multicast_find_group_src(pg, &src_ip);
2380 if (ent) {
2381 ent->flags &= ~BR_SGRP_F_DELETE;
2382 } else {
2383 ent = br_multicast_new_group_src(pg, &src_ip);
2384 if (ent) {
2385 __grp_src_mod_timer(ent, pg->timer.expires);
2386 changed = true;
2387 }
2388 }
2389 if (ent && timer_pending(&ent->timer)) {
2390 ent->flags |= BR_SGRP_F_SEND;
2391 to_send++;
2392 }
2393 }
2394
2395 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2396 grec_type))
2397 changed = true;
2398
2399 if (__grp_src_delete_marked(pg))
2400 changed = true;
2401 if (to_send)
2402 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2403
2404 return changed;
2405 }
2406
2407 static bool br_multicast_toex(struct net_bridge_mcast *brmctx,
2408 struct net_bridge_mcast_port *pmctx,
2409 struct net_bridge_port_group *pg, void *h_addr,
2410 void *srcs, u32 nsrcs, size_t addr_size,
2411 int grec_type)
2412 {
2413 bool changed = false;
2414
2415 switch (pg->filter_mode) {
2416 case MCAST_INCLUDE:
2417 __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs,
2418 addr_size, grec_type);
2419 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2420 changed = true;
2421 break;
2422 case MCAST_EXCLUDE:
2423 changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs,
2424 nsrcs, addr_size, grec_type);
2425 break;
2426 }
2427
2428 pg->filter_mode = MCAST_EXCLUDE;
2429 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2430
2431 return changed;
2432 }
2433
2434 /* State Msg type New state Actions
2435 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
2436 */
2437 static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx,
2438 struct net_bridge_mcast_port *pmctx,
2439 struct net_bridge_port_group *pg, void *h_addr,
2440 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2441 {
2442 struct net_bridge_group_src *ent;
2443 u32 src_idx, to_send = 0;
2444 bool changed = false;
2445 struct br_ip src_ip;
2446
2447 hlist_for_each_entry(ent, &pg->src_list, node)
2448 ent->flags &= ~BR_SGRP_F_SEND;
2449
2450 memset(&src_ip, 0, sizeof(src_ip));
2451 src_ip.proto = pg->key.addr.proto;
2452 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2453 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2454 ent = br_multicast_find_group_src(pg, &src_ip);
2455 if (ent) {
2456 ent->flags |= BR_SGRP_F_SEND;
2457 to_send++;
2458 }
2459 }
2460
2461 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2462 grec_type))
2463 changed = true;
2464
2465 if (to_send)
2466 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2467
2468 return changed;
2469 }
2470
2471 /* State Msg type New state Actions
2472 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
2473 * Send Q(G,A-Y)
2474 */
2475 static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx,
2476 struct net_bridge_mcast_port *pmctx,
2477 struct net_bridge_port_group *pg, void *h_addr,
2478 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2479 {
2480 struct net_bridge_group_src *ent;
2481 u32 src_idx, to_send = 0;
2482 bool changed = false;
2483 struct br_ip src_ip;
2484
2485 hlist_for_each_entry(ent, &pg->src_list, node)
2486 ent->flags &= ~BR_SGRP_F_SEND;
2487
2488 memset(&src_ip, 0, sizeof(src_ip));
2489 src_ip.proto = pg->key.addr.proto;
2490 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2491 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2492 ent = br_multicast_find_group_src(pg, &src_ip);
2493 if (!ent) {
2494 ent = br_multicast_new_group_src(pg, &src_ip);
2495 if (ent) {
2496 __grp_src_mod_timer(ent, pg->timer.expires);
2497 changed = true;
2498 }
2499 }
2500 if (ent && timer_pending(&ent->timer)) {
2501 ent->flags |= BR_SGRP_F_SEND;
2502 to_send++;
2503 }
2504 }
2505
2506 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2507 grec_type))
2508 changed = true;
2509
2510 if (to_send)
2511 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2512
2513 return changed;
2514 }
2515
2516 static bool br_multicast_block(struct net_bridge_mcast *brmctx,
2517 struct net_bridge_mcast_port *pmctx,
2518 struct net_bridge_port_group *pg, void *h_addr,
2519 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2520 {
2521 bool changed = false;
2522
2523 switch (pg->filter_mode) {
2524 case MCAST_INCLUDE:
2525 changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs,
2526 nsrcs, addr_size, grec_type);
2527 break;
2528 case MCAST_EXCLUDE:
2529 changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs,
2530 nsrcs, addr_size, grec_type);
2531 break;
2532 }
2533
2534 if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) ||
2535 br_multicast_eht_should_del_pg(pg)) {
2536 if (br_multicast_eht_should_del_pg(pg))
2537 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2538 br_multicast_find_del_pg(pg->key.port->br, pg);
2539 /* a notification has already been sent and we shouldn't
2540 * access pg after the delete so we have to return false
2541 */
2542 changed = false;
2543 }
2544
2545 return changed;
2546 }
2547
2548 static struct net_bridge_port_group *
2549 br_multicast_find_port(struct net_bridge_mdb_entry *mp,
2550 struct net_bridge_port *p,
2551 const unsigned char *src)
2552 {
2553 struct net_bridge *br __maybe_unused = mp->br;
2554 struct net_bridge_port_group *pg;
2555
2556 for (pg = mlock_dereference(mp->ports, br);
2557 pg;
2558 pg = mlock_dereference(pg->next, br))
2559 if (br_port_group_equal(pg, p, src))
2560 return pg;
2561
2562 return NULL;
2563 }
2564
2565 static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
2566 struct net_bridge_mcast_port *pmctx,
2567 struct sk_buff *skb,
2568 u16 vid)
2569 {
2570 bool igmpv2 = brmctx->multicast_igmp_version == 2;
2571 struct net_bridge_mdb_entry *mdst;
2572 struct net_bridge_port_group *pg;
2573 const unsigned char *src;
2574 struct igmpv3_report *ih;
2575 struct igmpv3_grec *grec;
2576 int i, len, num, type;
2577 __be32 group, *h_addr;
2578 bool changed = false;
2579 int err = 0;
2580 u16 nsrcs;
2581
2582 ih = igmpv3_report_hdr(skb);
2583 num = ntohs(ih->ngrec);
2584 len = skb_transport_offset(skb) + sizeof(*ih);
2585
2586 for (i = 0; i < num; i++) {
2587 len += sizeof(*grec);
2588 if (!ip_mc_may_pull(skb, len))
2589 return -EINVAL;
2590
2591 grec = (void *)(skb->data + len - sizeof(*grec));
2592 group = grec->grec_mca;
2593 type = grec->grec_type;
2594 nsrcs = ntohs(grec->grec_nsrcs);
2595
2596 len += nsrcs * 4;
2597 if (!ip_mc_may_pull(skb, len))
2598 return -EINVAL;
2599
2600 switch (type) {
2601 case IGMPV3_MODE_IS_INCLUDE:
2602 case IGMPV3_MODE_IS_EXCLUDE:
2603 case IGMPV3_CHANGE_TO_INCLUDE:
2604 case IGMPV3_CHANGE_TO_EXCLUDE:
2605 case IGMPV3_ALLOW_NEW_SOURCES:
2606 case IGMPV3_BLOCK_OLD_SOURCES:
2607 break;
2608
2609 default:
2610 continue;
2611 }
2612
2613 src = eth_hdr(skb)->h_source;
2614 if (nsrcs == 0 &&
2615 (type == IGMPV3_CHANGE_TO_INCLUDE ||
2616 type == IGMPV3_MODE_IS_INCLUDE)) {
2617 if (!pmctx || igmpv2) {
2618 br_ip4_multicast_leave_group(brmctx, pmctx,
2619 group, vid, src);
2620 continue;
2621 }
2622 } else {
2623 err = br_ip4_multicast_add_group(brmctx, pmctx, group,
2624 vid, src, igmpv2);
2625 if (err)
2626 break;
2627 }
2628
2629 if (!pmctx || igmpv2)
2630 continue;
2631
2632 spin_lock_bh(&brmctx->br->multicast_lock);
2633 if (!br_multicast_ctx_should_use(brmctx, pmctx))
2634 goto unlock_continue;
2635
2636 mdst = br_mdb_ip4_get(brmctx->br, group, vid);
2637 if (!mdst)
2638 goto unlock_continue;
2639 pg = br_multicast_find_port(mdst, pmctx->port, src);
2640 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2641 goto unlock_continue;
2642 /* reload grec and host addr */
2643 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
2644 h_addr = &ip_hdr(skb)->saddr;
2645 switch (type) {
2646 case IGMPV3_ALLOW_NEW_SOURCES:
2647 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2648 grec->grec_src,
2649 nsrcs, sizeof(__be32), type);
2650 break;
2651 case IGMPV3_MODE_IS_INCLUDE:
2652 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2653 grec->grec_src,
2654 nsrcs, sizeof(__be32), type);
2655 break;
2656 case IGMPV3_MODE_IS_EXCLUDE:
2657 changed = br_multicast_isexc(brmctx, pg, h_addr,
2658 grec->grec_src,
2659 nsrcs, sizeof(__be32), type);
2660 break;
2661 case IGMPV3_CHANGE_TO_INCLUDE:
2662 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2663 grec->grec_src,
2664 nsrcs, sizeof(__be32), type);
2665 break;
2666 case IGMPV3_CHANGE_TO_EXCLUDE:
2667 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2668 grec->grec_src,
2669 nsrcs, sizeof(__be32), type);
2670 break;
2671 case IGMPV3_BLOCK_OLD_SOURCES:
2672 changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2673 grec->grec_src,
2674 nsrcs, sizeof(__be32), type);
2675 break;
2676 }
2677 if (changed)
2678 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2679 unlock_continue:
2680 spin_unlock_bh(&brmctx->br->multicast_lock);
2681 }
2682
2683 return err;
2684 }
2685
2686 #if IS_ENABLED(CONFIG_IPV6)
2687 static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
2688 struct net_bridge_mcast_port *pmctx,
2689 struct sk_buff *skb,
2690 u16 vid)
2691 {
2692 bool mldv1 = brmctx->multicast_mld_version == 1;
2693 struct net_bridge_mdb_entry *mdst;
2694 struct net_bridge_port_group *pg;
2695 unsigned int nsrcs_offset;
2696 const unsigned char *src;
2697 struct icmp6hdr *icmp6h;
2698 struct in6_addr *h_addr;
2699 struct mld2_grec *grec;
2700 unsigned int grec_len;
2701 bool changed = false;
2702 int i, len, num;
2703 int err = 0;
2704
2705 if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
2706 return -EINVAL;
2707
2708 icmp6h = icmp6_hdr(skb);
2709 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
2710 len = skb_transport_offset(skb) + sizeof(*icmp6h);
2711
2712 for (i = 0; i < num; i++) {
2713 __be16 *_nsrcs, __nsrcs;
2714 u16 nsrcs;
2715
2716 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
2717
2718 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
2719 nsrcs_offset + sizeof(__nsrcs))
2720 return -EINVAL;
2721
2722 _nsrcs = skb_header_pointer(skb, nsrcs_offset,
2723 sizeof(__nsrcs), &__nsrcs);
2724 if (!_nsrcs)
2725 return -EINVAL;
2726
2727 nsrcs = ntohs(*_nsrcs);
2728 grec_len = struct_size(grec, grec_src, nsrcs);
2729
2730 if (!ipv6_mc_may_pull(skb, len + grec_len))
2731 return -EINVAL;
2732
2733 grec = (struct mld2_grec *)(skb->data + len);
2734 len += grec_len;
2735
2736 switch (grec->grec_type) {
2737 case MLD2_MODE_IS_INCLUDE:
2738 case MLD2_MODE_IS_EXCLUDE:
2739 case MLD2_CHANGE_TO_INCLUDE:
2740 case MLD2_CHANGE_TO_EXCLUDE:
2741 case MLD2_ALLOW_NEW_SOURCES:
2742 case MLD2_BLOCK_OLD_SOURCES:
2743 break;
2744
2745 default:
2746 continue;
2747 }
2748
2749 src = eth_hdr(skb)->h_source;
2750 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
2751 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
2752 nsrcs == 0) {
2753 if (!pmctx || mldv1) {
2754 br_ip6_multicast_leave_group(brmctx, pmctx,
2755 &grec->grec_mca,
2756 vid, src);
2757 continue;
2758 }
2759 } else {
2760 err = br_ip6_multicast_add_group(brmctx, pmctx,
2761 &grec->grec_mca, vid,
2762 src, mldv1);
2763 if (err)
2764 break;
2765 }
2766
2767 if (!pmctx || mldv1)
2768 continue;
2769
2770 spin_lock_bh(&brmctx->br->multicast_lock);
2771 if (!br_multicast_ctx_should_use(brmctx, pmctx))
2772 goto unlock_continue;
2773
2774 mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid);
2775 if (!mdst)
2776 goto unlock_continue;
2777 pg = br_multicast_find_port(mdst, pmctx->port, src);
2778 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2779 goto unlock_continue;
2780 h_addr = &ipv6_hdr(skb)->saddr;
2781 switch (grec->grec_type) {
2782 case MLD2_ALLOW_NEW_SOURCES:
2783 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2784 grec->grec_src, nsrcs,
2785 sizeof(struct in6_addr),
2786 grec->grec_type);
2787 break;
2788 case MLD2_MODE_IS_INCLUDE:
2789 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2790 grec->grec_src, nsrcs,
2791 sizeof(struct in6_addr),
2792 grec->grec_type);
2793 break;
2794 case MLD2_MODE_IS_EXCLUDE:
2795 changed = br_multicast_isexc(brmctx, pg, h_addr,
2796 grec->grec_src, nsrcs,
2797 sizeof(struct in6_addr),
2798 grec->grec_type);
2799 break;
2800 case MLD2_CHANGE_TO_INCLUDE:
2801 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2802 grec->grec_src, nsrcs,
2803 sizeof(struct in6_addr),
2804 grec->grec_type);
2805 break;
2806 case MLD2_CHANGE_TO_EXCLUDE:
2807 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2808 grec->grec_src, nsrcs,
2809 sizeof(struct in6_addr),
2810 grec->grec_type);
2811 break;
2812 case MLD2_BLOCK_OLD_SOURCES:
2813 changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2814 grec->grec_src, nsrcs,
2815 sizeof(struct in6_addr),
2816 grec->grec_type);
2817 break;
2818 }
2819 if (changed)
2820 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2821 unlock_continue:
2822 spin_unlock_bh(&brmctx->br->multicast_lock);
2823 }
2824
2825 return err;
2826 }
2827 #endif
2828
2829 static bool br_ip4_multicast_select_querier(struct net_bridge_mcast *brmctx,
2830 struct net_bridge_port *port,
2831 __be32 saddr)
2832 {
2833 if (!timer_pending(&brmctx->ip4_own_query.timer) &&
2834 !timer_pending(&brmctx->ip4_other_query.timer))
2835 goto update;
2836
2837 if (!brmctx->ip4_querier.addr.src.ip4)
2838 goto update;
2839
2840 if (ntohl(saddr) <= ntohl(brmctx->ip4_querier.addr.src.ip4))
2841 goto update;
2842
2843 return false;
2844
2845 update:
2846 brmctx->ip4_querier.addr.src.ip4 = saddr;
2847
2848 /* update protected by general multicast_lock by caller */
2849 rcu_assign_pointer(brmctx->ip4_querier.port, port);
2850
2851 return true;
2852 }
2853
2854 #if IS_ENABLED(CONFIG_IPV6)
2855 static bool br_ip6_multicast_select_querier(struct net_bridge_mcast *brmctx,
2856 struct net_bridge_port *port,
2857 struct in6_addr *saddr)
2858 {
2859 if (!timer_pending(&brmctx->ip6_own_query.timer) &&
2860 !timer_pending(&brmctx->ip6_other_query.timer))
2861 goto update;
2862
2863 if (ipv6_addr_cmp(saddr, &brmctx->ip6_querier.addr.src.ip6) <= 0)
2864 goto update;
2865
2866 return false;
2867
2868 update:
2869 brmctx->ip6_querier.addr.src.ip6 = *saddr;
2870
2871 /* update protected by general multicast_lock by caller */
2872 rcu_assign_pointer(brmctx->ip6_querier.port, port);
2873
2874 return true;
2875 }
2876 #endif
2877
2878 static void
2879 br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
2880 struct bridge_mcast_other_query *query,
2881 unsigned long max_delay)
2882 {
2883 if (!timer_pending(&query->timer))
2884 query->delay_time = jiffies + max_delay;
2885
2886 mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
2887 }
2888
2889 static void br_port_mc_router_state_change(struct net_bridge_port *p,
2890 bool is_mc_router)
2891 {
2892 struct switchdev_attr attr = {
2893 .orig_dev = p->dev,
2894 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
2895 .flags = SWITCHDEV_F_DEFER,
2896 .u.mrouter = is_mc_router,
2897 };
2898
2899 switchdev_port_attr_set(p->dev, &attr, NULL);
2900 }
2901
2902 static struct net_bridge_port *
2903 br_multicast_rport_from_node(struct net_bridge_mcast *brmctx,
2904 struct hlist_head *mc_router_list,
2905 struct hlist_node *rlist)
2906 {
2907 struct net_bridge_mcast_port *pmctx;
2908
2909 #if IS_ENABLED(CONFIG_IPV6)
2910 if (mc_router_list == &brmctx->ip6_mc_router_list)
2911 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
2912 ip6_rlist);
2913 else
2914 #endif
2915 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
2916 ip4_rlist);
2917
2918 return pmctx->port;
2919 }
2920
2921 static struct hlist_node *
2922 br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx,
2923 struct net_bridge_port *port,
2924 struct hlist_head *mc_router_list)
2925
2926 {
2927 struct hlist_node *slot = NULL;
2928 struct net_bridge_port *p;
2929 struct hlist_node *rlist;
2930
2931 hlist_for_each(rlist, mc_router_list) {
2932 p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist);
2933
2934 if ((unsigned long)port >= (unsigned long)p)
2935 break;
2936
2937 slot = rlist;
2938 }
2939
2940 return slot;
2941 }
2942
2943 static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx,
2944 struct hlist_node *rnode)
2945 {
2946 #if IS_ENABLED(CONFIG_IPV6)
2947 if (rnode != &pmctx->ip6_rlist)
2948 return hlist_unhashed(&pmctx->ip6_rlist);
2949 else
2950 return hlist_unhashed(&pmctx->ip4_rlist);
2951 #else
2952 return true;
2953 #endif
2954 }
2955
2956 /* Add port to router_list
2957 * list is maintained ordered by pointer value
2958 * and locked by br->multicast_lock and RCU
2959 */
2960 static void br_multicast_add_router(struct net_bridge_mcast *brmctx,
2961 struct net_bridge_mcast_port *pmctx,
2962 struct hlist_node *rlist,
2963 struct hlist_head *mc_router_list)
2964 {
2965 struct hlist_node *slot;
2966
2967 if (!hlist_unhashed(rlist))
2968 return;
2969
2970 slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list);
2971
2972 if (slot)
2973 hlist_add_behind_rcu(rlist, slot);
2974 else
2975 hlist_add_head_rcu(rlist, mc_router_list);
2976
2977 /* For backwards compatibility for now, only notify if we
2978 * switched from no IPv4/IPv6 multicast router to a new
2979 * IPv4 or IPv6 multicast router.
2980 */
2981 if (br_multicast_no_router_otherpf(pmctx, rlist)) {
2982 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB);
2983 br_port_mc_router_state_change(pmctx->port, true);
2984 }
2985 }
2986
2987 /* Add port to router_list
2988 * list is maintained ordered by pointer value
2989 * and locked by br->multicast_lock and RCU
2990 */
2991 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
2992 struct net_bridge_mcast_port *pmctx)
2993 {
2994 br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist,
2995 &brmctx->ip4_mc_router_list);
2996 }
2997
2998 /* Add port to router_list
2999 * list is maintained ordered by pointer value
3000 * and locked by br->multicast_lock and RCU
3001 */
3002 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
3003 struct net_bridge_mcast_port *pmctx)
3004 {
3005 #if IS_ENABLED(CONFIG_IPV6)
3006 br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist,
3007 &brmctx->ip6_mc_router_list);
3008 #endif
3009 }
3010
3011 static void br_multicast_mark_router(struct net_bridge_mcast *brmctx,
3012 struct net_bridge_mcast_port *pmctx,
3013 struct timer_list *timer,
3014 struct hlist_node *rlist,
3015 struct hlist_head *mc_router_list)
3016 {
3017 unsigned long now = jiffies;
3018
3019 if (!br_multicast_ctx_should_use(brmctx, pmctx))
3020 return;
3021
3022 if (!pmctx) {
3023 if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
3024 if (!br_ip4_multicast_is_router(brmctx) &&
3025 !br_ip6_multicast_is_router(brmctx))
3026 br_mc_router_state_change(brmctx->br, true);
3027 mod_timer(timer, now + brmctx->multicast_querier_interval);
3028 }
3029 return;
3030 }
3031
3032 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
3033 pmctx->multicast_router == MDB_RTR_TYPE_PERM)
3034 return;
3035
3036 br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list);
3037 mod_timer(timer, now + brmctx->multicast_querier_interval);
3038 }
3039
3040 static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx,
3041 struct net_bridge_mcast_port *pmctx)
3042 {
3043 struct timer_list *timer = &brmctx->ip4_mc_router_timer;
3044 struct hlist_node *rlist = NULL;
3045
3046 if (pmctx) {
3047 timer = &pmctx->ip4_mc_router_timer;
3048 rlist = &pmctx->ip4_rlist;
3049 }
3050
3051 br_multicast_mark_router(brmctx, pmctx, timer, rlist,
3052 &brmctx->ip4_mc_router_list);
3053 }
3054
3055 static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx,
3056 struct net_bridge_mcast_port *pmctx)
3057 {
3058 #if IS_ENABLED(CONFIG_IPV6)
3059 struct timer_list *timer = &brmctx->ip6_mc_router_timer;
3060 struct hlist_node *rlist = NULL;
3061
3062 if (pmctx) {
3063 timer = &pmctx->ip6_mc_router_timer;
3064 rlist = &pmctx->ip6_rlist;
3065 }
3066
3067 br_multicast_mark_router(brmctx, pmctx, timer, rlist,
3068 &brmctx->ip6_mc_router_list);
3069 #endif
3070 }
3071
3072 static void
3073 br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx,
3074 struct net_bridge_mcast_port *pmctx,
3075 struct bridge_mcast_other_query *query,
3076 struct br_ip *saddr,
3077 unsigned long max_delay)
3078 {
3079 if (!br_ip4_multicast_select_querier(brmctx, pmctx->port, saddr->src.ip4))
3080 return;
3081
3082 br_multicast_update_query_timer(brmctx, query, max_delay);
3083 br_ip4_multicast_mark_router(brmctx, pmctx);
3084 }
3085
3086 #if IS_ENABLED(CONFIG_IPV6)
3087 static void
3088 br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx,
3089 struct net_bridge_mcast_port *pmctx,
3090 struct bridge_mcast_other_query *query,
3091 struct br_ip *saddr,
3092 unsigned long max_delay)
3093 {
3094 if (!br_ip6_multicast_select_querier(brmctx, pmctx->port, &saddr->src.ip6))
3095 return;
3096
3097 br_multicast_update_query_timer(brmctx, query, max_delay);
3098 br_ip6_multicast_mark_router(brmctx, pmctx);
3099 }
3100 #endif
3101
3102 static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx,
3103 struct net_bridge_mcast_port *pmctx,
3104 struct sk_buff *skb,
3105 u16 vid)
3106 {
3107 unsigned int transport_len = ip_transport_len(skb);
3108 const struct iphdr *iph = ip_hdr(skb);
3109 struct igmphdr *ih = igmp_hdr(skb);
3110 struct net_bridge_mdb_entry *mp;
3111 struct igmpv3_query *ih3;
3112 struct net_bridge_port_group *p;
3113 struct net_bridge_port_group __rcu **pp;
3114 struct br_ip saddr;
3115 unsigned long max_delay;
3116 unsigned long now = jiffies;
3117 __be32 group;
3118
3119 spin_lock(&brmctx->br->multicast_lock);
3120 if (!br_multicast_ctx_should_use(brmctx, pmctx))
3121 goto out;
3122
3123 group = ih->group;
3124
3125 if (transport_len == sizeof(*ih)) {
3126 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
3127
3128 if (!max_delay) {
3129 max_delay = 10 * HZ;
3130 group = 0;
3131 }
3132 } else if (transport_len >= sizeof(*ih3)) {
3133 ih3 = igmpv3_query_hdr(skb);
3134 if (ih3->nsrcs ||
3135 (brmctx->multicast_igmp_version == 3 && group &&
3136 ih3->suppress))
3137 goto out;
3138
3139 max_delay = ih3->code ?
3140 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
3141 } else {
3142 goto out;
3143 }
3144
3145 if (!group) {
3146 saddr.proto = htons(ETH_P_IP);
3147 saddr.src.ip4 = iph->saddr;
3148
3149 br_ip4_multicast_query_received(brmctx, pmctx,
3150 &brmctx->ip4_other_query,
3151 &saddr, max_delay);
3152 goto out;
3153 }
3154
3155 mp = br_mdb_ip4_get(brmctx->br, group, vid);
3156 if (!mp)
3157 goto out;
3158
3159 max_delay *= brmctx->multicast_last_member_count;
3160
3161 if (mp->host_joined &&
3162 (timer_pending(&mp->timer) ?
3163 time_after(mp->timer.expires, now + max_delay) :
3164 try_to_del_timer_sync(&mp->timer) >= 0))
3165 mod_timer(&mp->timer, now + max_delay);
3166
3167 for (pp = &mp->ports;
3168 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3169 pp = &p->next) {
3170 if (timer_pending(&p->timer) ?
3171 time_after(p->timer.expires, now + max_delay) :
3172 try_to_del_timer_sync(&p->timer) >= 0 &&
3173 (brmctx->multicast_igmp_version == 2 ||
3174 p->filter_mode == MCAST_EXCLUDE))
3175 mod_timer(&p->timer, now + max_delay);
3176 }
3177
3178 out:
3179 spin_unlock(&brmctx->br->multicast_lock);
3180 }
3181
3182 #if IS_ENABLED(CONFIG_IPV6)
3183 static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx,
3184 struct net_bridge_mcast_port *pmctx,
3185 struct sk_buff *skb,
3186 u16 vid)
3187 {
3188 unsigned int transport_len = ipv6_transport_len(skb);
3189 struct mld_msg *mld;
3190 struct net_bridge_mdb_entry *mp;
3191 struct mld2_query *mld2q;
3192 struct net_bridge_port_group *p;
3193 struct net_bridge_port_group __rcu **pp;
3194 struct br_ip saddr;
3195 unsigned long max_delay;
3196 unsigned long now = jiffies;
3197 unsigned int offset = skb_transport_offset(skb);
3198 const struct in6_addr *group = NULL;
3199 bool is_general_query;
3200 int err = 0;
3201
3202 spin_lock(&brmctx->br->multicast_lock);
3203 if (!br_multicast_ctx_should_use(brmctx, pmctx))
3204 goto out;
3205
3206 if (transport_len == sizeof(*mld)) {
3207 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
3208 err = -EINVAL;
3209 goto out;
3210 }
3211 mld = (struct mld_msg *) icmp6_hdr(skb);
3212 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
3213 if (max_delay)
3214 group = &mld->mld_mca;
3215 } else {
3216 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
3217 err = -EINVAL;
3218 goto out;
3219 }
3220 mld2q = (struct mld2_query *)icmp6_hdr(skb);
3221 if (!mld2q->mld2q_nsrcs)
3222 group = &mld2q->mld2q_mca;
3223 if (brmctx->multicast_mld_version == 2 &&
3224 !ipv6_addr_any(&mld2q->mld2q_mca) &&
3225 mld2q->mld2q_suppress)
3226 goto out;
3227
3228 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
3229 }
3230
3231 is_general_query = group && ipv6_addr_any(group);
3232
3233 if (is_general_query) {
3234 saddr.proto = htons(ETH_P_IPV6);
3235 saddr.src.ip6 = ipv6_hdr(skb)->saddr;
3236
3237 br_ip6_multicast_query_received(brmctx, pmctx,
3238 &brmctx->ip6_other_query,
3239 &saddr, max_delay);
3240 goto out;
3241 } else if (!group) {
3242 goto out;
3243 }
3244
3245 mp = br_mdb_ip6_get(brmctx->br, group, vid);
3246 if (!mp)
3247 goto out;
3248
3249 max_delay *= brmctx->multicast_last_member_count;
3250 if (mp->host_joined &&
3251 (timer_pending(&mp->timer) ?
3252 time_after(mp->timer.expires, now + max_delay) :
3253 try_to_del_timer_sync(&mp->timer) >= 0))
3254 mod_timer(&mp->timer, now + max_delay);
3255
3256 for (pp = &mp->ports;
3257 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3258 pp = &p->next) {
3259 if (timer_pending(&p->timer) ?
3260 time_after(p->timer.expires, now + max_delay) :
3261 try_to_del_timer_sync(&p->timer) >= 0 &&
3262 (brmctx->multicast_mld_version == 1 ||
3263 p->filter_mode == MCAST_EXCLUDE))
3264 mod_timer(&p->timer, now + max_delay);
3265 }
3266
3267 out:
3268 spin_unlock(&brmctx->br->multicast_lock);
3269 return err;
3270 }
3271 #endif
3272
3273 static void
3274 br_multicast_leave_group(struct net_bridge_mcast *brmctx,
3275 struct net_bridge_mcast_port *pmctx,
3276 struct br_ip *group,
3277 struct bridge_mcast_other_query *other_query,
3278 struct bridge_mcast_own_query *own_query,
3279 const unsigned char *src)
3280 {
3281 struct net_bridge_mdb_entry *mp;
3282 struct net_bridge_port_group *p;
3283 unsigned long now;
3284 unsigned long time;
3285
3286 spin_lock(&brmctx->br->multicast_lock);
3287 if (!br_multicast_ctx_should_use(brmctx, pmctx))
3288 goto out;
3289
3290 mp = br_mdb_ip_get(brmctx->br, group);
3291 if (!mp)
3292 goto out;
3293
3294 if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) {
3295 struct net_bridge_port_group __rcu **pp;
3296
3297 for (pp = &mp->ports;
3298 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3299 pp = &p->next) {
3300 if (!br_port_group_equal(p, pmctx->port, src))
3301 continue;
3302
3303 if (p->flags & MDB_PG_FLAGS_PERMANENT)
3304 break;
3305
3306 p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
3307 br_multicast_del_pg(mp, p, pp);
3308 }
3309 goto out;
3310 }
3311
3312 if (timer_pending(&other_query->timer))
3313 goto out;
3314
3315 if (br_opt_get(brmctx->br, BROPT_MULTICAST_QUERIER)) {
3316 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr,
3317 false, 0, NULL);
3318
3319 time = jiffies + brmctx->multicast_last_member_count *
3320 brmctx->multicast_last_member_interval;
3321
3322 mod_timer(&own_query->timer, time);
3323
3324 for (p = mlock_dereference(mp->ports, brmctx->br);
3325 p != NULL;
3326 p = mlock_dereference(p->next, brmctx->br)) {
3327 if (!br_port_group_equal(p, pmctx->port, src))
3328 continue;
3329
3330 if (!hlist_unhashed(&p->mglist) &&
3331 (timer_pending(&p->timer) ?
3332 time_after(p->timer.expires, time) :
3333 try_to_del_timer_sync(&p->timer) >= 0)) {
3334 mod_timer(&p->timer, time);
3335 }
3336
3337 break;
3338 }
3339 }
3340
3341 now = jiffies;
3342 time = now + brmctx->multicast_last_member_count *
3343 brmctx->multicast_last_member_interval;
3344
3345 if (!pmctx) {
3346 if (mp->host_joined &&
3347 (timer_pending(&mp->timer) ?
3348 time_after(mp->timer.expires, time) :
3349 try_to_del_timer_sync(&mp->timer) >= 0)) {
3350 mod_timer(&mp->timer, time);
3351 }
3352
3353 goto out;
3354 }
3355
3356 for (p = mlock_dereference(mp->ports, brmctx->br);
3357 p != NULL;
3358 p = mlock_dereference(p->next, brmctx->br)) {
3359 if (p->key.port != pmctx->port)
3360 continue;
3361
3362 if (!hlist_unhashed(&p->mglist) &&
3363 (timer_pending(&p->timer) ?
3364 time_after(p->timer.expires, time) :
3365 try_to_del_timer_sync(&p->timer) >= 0)) {
3366 mod_timer(&p->timer, time);
3367 }
3368
3369 break;
3370 }
3371 out:
3372 spin_unlock(&brmctx->br->multicast_lock);
3373 }
3374
3375 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
3376 struct net_bridge_mcast_port *pmctx,
3377 __be32 group,
3378 __u16 vid,
3379 const unsigned char *src)
3380 {
3381 struct br_ip br_group;
3382 struct bridge_mcast_own_query *own_query;
3383
3384 if (ipv4_is_local_multicast(group))
3385 return;
3386
3387 own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query;
3388
3389 memset(&br_group, 0, sizeof(br_group));
3390 br_group.dst.ip4 = group;
3391 br_group.proto = htons(ETH_P_IP);
3392 br_group.vid = vid;
3393
3394 br_multicast_leave_group(brmctx, pmctx, &br_group,
3395 &brmctx->ip4_other_query,
3396 own_query, src);
3397 }
3398
3399 #if IS_ENABLED(CONFIG_IPV6)
3400 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
3401 struct net_bridge_mcast_port *pmctx,
3402 const struct in6_addr *group,
3403 __u16 vid,
3404 const unsigned char *src)
3405 {
3406 struct br_ip br_group;
3407 struct bridge_mcast_own_query *own_query;
3408
3409 if (ipv6_addr_is_ll_all_nodes(group))
3410 return;
3411
3412 own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query;
3413
3414 memset(&br_group, 0, sizeof(br_group));
3415 br_group.dst.ip6 = *group;
3416 br_group.proto = htons(ETH_P_IPV6);
3417 br_group.vid = vid;
3418
3419 br_multicast_leave_group(brmctx, pmctx, &br_group,
3420 &brmctx->ip6_other_query,
3421 own_query, src);
3422 }
3423 #endif
3424
3425 static void br_multicast_err_count(const struct net_bridge *br,
3426 const struct net_bridge_port *p,
3427 __be16 proto)
3428 {
3429 struct bridge_mcast_stats __percpu *stats;
3430 struct bridge_mcast_stats *pstats;
3431
3432 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
3433 return;
3434
3435 if (p)
3436 stats = p->mcast_stats;
3437 else
3438 stats = br->mcast_stats;
3439 if (WARN_ON(!stats))
3440 return;
3441
3442 pstats = this_cpu_ptr(stats);
3443
3444 u64_stats_update_begin(&pstats->syncp);
3445 switch (proto) {
3446 case htons(ETH_P_IP):
3447 pstats->mstats.igmp_parse_errors++;
3448 break;
3449 #if IS_ENABLED(CONFIG_IPV6)
3450 case htons(ETH_P_IPV6):
3451 pstats->mstats.mld_parse_errors++;
3452 break;
3453 #endif
3454 }
3455 u64_stats_update_end(&pstats->syncp);
3456 }
3457
3458 static void br_multicast_pim(struct net_bridge_mcast *brmctx,
3459 struct net_bridge_mcast_port *pmctx,
3460 const struct sk_buff *skb)
3461 {
3462 unsigned int offset = skb_transport_offset(skb);
3463 struct pimhdr *pimhdr, _pimhdr;
3464
3465 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
3466 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
3467 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
3468 return;
3469
3470 spin_lock(&brmctx->br->multicast_lock);
3471 br_ip4_multicast_mark_router(brmctx, pmctx);
3472 spin_unlock(&brmctx->br->multicast_lock);
3473 }
3474
3475 static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3476 struct net_bridge_mcast_port *pmctx,
3477 struct sk_buff *skb)
3478 {
3479 if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
3480 igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
3481 return -ENOMSG;
3482
3483 spin_lock(&brmctx->br->multicast_lock);
3484 br_ip4_multicast_mark_router(brmctx, pmctx);
3485 spin_unlock(&brmctx->br->multicast_lock);
3486
3487 return 0;
3488 }
3489
3490 static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx,
3491 struct net_bridge_mcast_port *pmctx,
3492 struct sk_buff *skb,
3493 u16 vid)
3494 {
3495 struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3496 const unsigned char *src;
3497 struct igmphdr *ih;
3498 int err;
3499
3500 err = ip_mc_check_igmp(skb);
3501
3502 if (err == -ENOMSG) {
3503 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
3504 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3505 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
3506 if (ip_hdr(skb)->protocol == IPPROTO_PIM)
3507 br_multicast_pim(brmctx, pmctx, skb);
3508 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
3509 br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb);
3510 }
3511
3512 return 0;
3513 } else if (err < 0) {
3514 br_multicast_err_count(brmctx->br, p, skb->protocol);
3515 return err;
3516 }
3517
3518 ih = igmp_hdr(skb);
3519 src = eth_hdr(skb)->h_source;
3520 BR_INPUT_SKB_CB(skb)->igmp = ih->type;
3521
3522 switch (ih->type) {
3523 case IGMP_HOST_MEMBERSHIP_REPORT:
3524 case IGMPV2_HOST_MEMBERSHIP_REPORT:
3525 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3526 err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid,
3527 src, true);
3528 break;
3529 case IGMPV3_HOST_MEMBERSHIP_REPORT:
3530 err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid);
3531 break;
3532 case IGMP_HOST_MEMBERSHIP_QUERY:
3533 br_ip4_multicast_query(brmctx, pmctx, skb, vid);
3534 break;
3535 case IGMP_HOST_LEAVE_MESSAGE:
3536 br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src);
3537 break;
3538 }
3539
3540 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3541 BR_MCAST_DIR_RX);
3542
3543 return err;
3544 }
3545
3546 #if IS_ENABLED(CONFIG_IPV6)
3547 static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3548 struct net_bridge_mcast_port *pmctx,
3549 struct sk_buff *skb)
3550 {
3551 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
3552 return;
3553
3554 spin_lock(&brmctx->br->multicast_lock);
3555 br_ip6_multicast_mark_router(brmctx, pmctx);
3556 spin_unlock(&brmctx->br->multicast_lock);
3557 }
3558
3559 static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx,
3560 struct net_bridge_mcast_port *pmctx,
3561 struct sk_buff *skb,
3562 u16 vid)
3563 {
3564 struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3565 const unsigned char *src;
3566 struct mld_msg *mld;
3567 int err;
3568
3569 err = ipv6_mc_check_mld(skb);
3570
3571 if (err == -ENOMSG || err == -ENODATA) {
3572 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
3573 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3574 if (err == -ENODATA &&
3575 ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
3576 br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb);
3577
3578 return 0;
3579 } else if (err < 0) {
3580 br_multicast_err_count(brmctx->br, p, skb->protocol);
3581 return err;
3582 }
3583
3584 mld = (struct mld_msg *)skb_transport_header(skb);
3585 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
3586
3587 switch (mld->mld_type) {
3588 case ICMPV6_MGM_REPORT:
3589 src = eth_hdr(skb)->h_source;
3590 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3591 err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca,
3592 vid, src, true);
3593 break;
3594 case ICMPV6_MLD2_REPORT:
3595 err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid);
3596 break;
3597 case ICMPV6_MGM_QUERY:
3598 err = br_ip6_multicast_query(brmctx, pmctx, skb, vid);
3599 break;
3600 case ICMPV6_MGM_REDUCTION:
3601 src = eth_hdr(skb)->h_source;
3602 br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid,
3603 src);
3604 break;
3605 }
3606
3607 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3608 BR_MCAST_DIR_RX);
3609
3610 return err;
3611 }
3612 #endif
3613
3614 int br_multicast_rcv(struct net_bridge_mcast **brmctx,
3615 struct net_bridge_mcast_port **pmctx,
3616 struct net_bridge_vlan *vlan,
3617 struct sk_buff *skb, u16 vid)
3618 {
3619 int ret = 0;
3620
3621 BR_INPUT_SKB_CB(skb)->igmp = 0;
3622 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
3623
3624 if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED))
3625 return 0;
3626
3627 if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) {
3628 const struct net_bridge_vlan *masterv;
3629
3630 /* the vlan has the master flag set only when transmitting
3631 * through the bridge device
3632 */
3633 if (br_vlan_is_master(vlan)) {
3634 masterv = vlan;
3635 *brmctx = &vlan->br_mcast_ctx;
3636 *pmctx = NULL;
3637 } else {
3638 masterv = vlan->brvlan;
3639 *brmctx = &vlan->brvlan->br_mcast_ctx;
3640 *pmctx = &vlan->port_mcast_ctx;
3641 }
3642
3643 if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
3644 return 0;
3645 }
3646
3647 switch (skb->protocol) {
3648 case htons(ETH_P_IP):
3649 ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid);
3650 break;
3651 #if IS_ENABLED(CONFIG_IPV6)
3652 case htons(ETH_P_IPV6):
3653 ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid);
3654 break;
3655 #endif
3656 }
3657
3658 return ret;
3659 }
3660
3661 static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
3662 struct bridge_mcast_own_query *query,
3663 struct bridge_mcast_querier *querier)
3664 {
3665 spin_lock(&brmctx->br->multicast_lock);
3666 if (br_multicast_ctx_vlan_disabled(brmctx))
3667 goto out;
3668
3669 if (query->startup_sent < brmctx->multicast_startup_query_count)
3670 query->startup_sent++;
3671
3672 RCU_INIT_POINTER(querier->port, NULL);
3673 br_multicast_send_query(brmctx, NULL, query);
3674 out:
3675 spin_unlock(&brmctx->br->multicast_lock);
3676 }
3677
3678 static void br_ip4_multicast_query_expired(struct timer_list *t)
3679 {
3680 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
3681 ip4_own_query.timer);
3682
3683 br_multicast_query_expired(brmctx, &brmctx->ip4_own_query,
3684 &brmctx->ip4_querier);
3685 }
3686
3687 #if IS_ENABLED(CONFIG_IPV6)
3688 static void br_ip6_multicast_query_expired(struct timer_list *t)
3689 {
3690 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
3691 ip6_own_query.timer);
3692
3693 br_multicast_query_expired(brmctx, &brmctx->ip6_own_query,
3694 &brmctx->ip6_querier);
3695 }
3696 #endif
3697
3698 static void br_multicast_gc_work(struct work_struct *work)
3699 {
3700 struct net_bridge *br = container_of(work, struct net_bridge,
3701 mcast_gc_work);
3702 HLIST_HEAD(deleted_head);
3703
3704 spin_lock_bh(&br->multicast_lock);
3705 hlist_move_list(&br->mcast_gc_list, &deleted_head);
3706 spin_unlock_bh(&br->multicast_lock);
3707
3708 br_multicast_gc(&deleted_head);
3709 }
3710
3711 void br_multicast_ctx_init(struct net_bridge *br,
3712 struct net_bridge_vlan *vlan,
3713 struct net_bridge_mcast *brmctx)
3714 {
3715 brmctx->br = br;
3716 brmctx->vlan = vlan;
3717 brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3718 brmctx->multicast_last_member_count = 2;
3719 brmctx->multicast_startup_query_count = 2;
3720
3721 brmctx->multicast_last_member_interval = HZ;
3722 brmctx->multicast_query_response_interval = 10 * HZ;
3723 brmctx->multicast_startup_query_interval = 125 * HZ / 4;
3724 brmctx->multicast_query_interval = 125 * HZ;
3725 brmctx->multicast_querier_interval = 255 * HZ;
3726 brmctx->multicast_membership_interval = 260 * HZ;
3727
3728 brmctx->ip4_other_query.delay_time = 0;
3729 brmctx->ip4_querier.port = NULL;
3730 brmctx->multicast_igmp_version = 2;
3731 #if IS_ENABLED(CONFIG_IPV6)
3732 brmctx->multicast_mld_version = 1;
3733 brmctx->ip6_other_query.delay_time = 0;
3734 brmctx->ip6_querier.port = NULL;
3735 #endif
3736
3737 timer_setup(&brmctx->ip4_mc_router_timer,
3738 br_ip4_multicast_local_router_expired, 0);
3739 timer_setup(&brmctx->ip4_other_query.timer,
3740 br_ip4_multicast_querier_expired, 0);
3741 timer_setup(&brmctx->ip4_own_query.timer,
3742 br_ip4_multicast_query_expired, 0);
3743 #if IS_ENABLED(CONFIG_IPV6)
3744 timer_setup(&brmctx->ip6_mc_router_timer,
3745 br_ip6_multicast_local_router_expired, 0);
3746 timer_setup(&brmctx->ip6_other_query.timer,
3747 br_ip6_multicast_querier_expired, 0);
3748 timer_setup(&brmctx->ip6_own_query.timer,
3749 br_ip6_multicast_query_expired, 0);
3750 #endif
3751 }
3752
3753 void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx)
3754 {
3755 __br_multicast_stop(brmctx);
3756 }
3757
3758 void br_multicast_init(struct net_bridge *br)
3759 {
3760 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
3761
3762 br_multicast_ctx_init(br, NULL, &br->multicast_ctx);
3763
3764 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
3765 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
3766
3767 spin_lock_init(&br->multicast_lock);
3768 INIT_HLIST_HEAD(&br->mdb_list);
3769 INIT_HLIST_HEAD(&br->mcast_gc_list);
3770 INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
3771 }
3772
3773 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
3774 {
3775 struct in_device *in_dev = in_dev_get(br->dev);
3776
3777 if (!in_dev)
3778 return;
3779
3780 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3781 in_dev_put(in_dev);
3782 }
3783
3784 #if IS_ENABLED(CONFIG_IPV6)
3785 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3786 {
3787 struct in6_addr addr;
3788
3789 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3790 ipv6_dev_mc_inc(br->dev, &addr);
3791 }
3792 #else
3793 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3794 {
3795 }
3796 #endif
3797
3798 void br_multicast_join_snoopers(struct net_bridge *br)
3799 {
3800 br_ip4_multicast_join_snoopers(br);
3801 br_ip6_multicast_join_snoopers(br);
3802 }
3803
3804 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
3805 {
3806 struct in_device *in_dev = in_dev_get(br->dev);
3807
3808 if (WARN_ON(!in_dev))
3809 return;
3810
3811 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3812 in_dev_put(in_dev);
3813 }
3814
3815 #if IS_ENABLED(CONFIG_IPV6)
3816 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3817 {
3818 struct in6_addr addr;
3819
3820 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3821 ipv6_dev_mc_dec(br->dev, &addr);
3822 }
3823 #else
3824 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3825 {
3826 }
3827 #endif
3828
3829 void br_multicast_leave_snoopers(struct net_bridge *br)
3830 {
3831 br_ip4_multicast_leave_snoopers(br);
3832 br_ip6_multicast_leave_snoopers(br);
3833 }
3834
3835 static void __br_multicast_open_query(struct net_bridge *br,
3836 struct bridge_mcast_own_query *query)
3837 {
3838 query->startup_sent = 0;
3839
3840 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
3841 return;
3842
3843 mod_timer(&query->timer, jiffies);
3844 }
3845
3846 static void __br_multicast_open(struct net_bridge_mcast *brmctx)
3847 {
3848 __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query);
3849 #if IS_ENABLED(CONFIG_IPV6)
3850 __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query);
3851 #endif
3852 }
3853
3854 void br_multicast_open(struct net_bridge *br)
3855 {
3856 ASSERT_RTNL();
3857
3858 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
3859 struct net_bridge_vlan_group *vg;
3860 struct net_bridge_vlan *vlan;
3861
3862 vg = br_vlan_group(br);
3863 if (vg) {
3864 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
3865 struct net_bridge_mcast *brmctx;
3866
3867 brmctx = &vlan->br_mcast_ctx;
3868 if (br_vlan_is_brentry(vlan) &&
3869 !br_multicast_ctx_vlan_disabled(brmctx))
3870 __br_multicast_open(&vlan->br_mcast_ctx);
3871 }
3872 }
3873 }
3874
3875 __br_multicast_open(&br->multicast_ctx);
3876 }
3877
3878 static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
3879 {
3880 del_timer_sync(&brmctx->ip4_mc_router_timer);
3881 del_timer_sync(&brmctx->ip4_other_query.timer);
3882 del_timer_sync(&brmctx->ip4_own_query.timer);
3883 #if IS_ENABLED(CONFIG_IPV6)
3884 del_timer_sync(&brmctx->ip6_mc_router_timer);
3885 del_timer_sync(&brmctx->ip6_other_query.timer);
3886 del_timer_sync(&brmctx->ip6_own_query.timer);
3887 #endif
3888 }
3889
3890 void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
3891 {
3892 struct net_bridge *br;
3893
3894 /* it's okay to check for the flag without the multicast lock because it
3895 * can only change under RTNL -> multicast_lock, we need the latter to
3896 * sync with timers and packets
3897 */
3898 if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED))
3899 return;
3900
3901 if (br_vlan_is_master(vlan)) {
3902 br = vlan->br;
3903
3904 if (!br_vlan_is_brentry(vlan) ||
3905 (on &&
3906 br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx)))
3907 return;
3908
3909 spin_lock_bh(&br->multicast_lock);
3910 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
3911 spin_unlock_bh(&br->multicast_lock);
3912
3913 if (on)
3914 __br_multicast_open(&vlan->br_mcast_ctx);
3915 else
3916 __br_multicast_stop(&vlan->br_mcast_ctx);
3917 } else {
3918 struct net_bridge_mcast *brmctx;
3919
3920 brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx);
3921 if (on && br_multicast_ctx_vlan_global_disabled(brmctx))
3922 return;
3923
3924 br = vlan->port->br;
3925 spin_lock_bh(&br->multicast_lock);
3926 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
3927 if (on)
3928 __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx);
3929 else
3930 __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx);
3931 spin_unlock_bh(&br->multicast_lock);
3932 }
3933 }
3934
3935 void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on)
3936 {
3937 struct net_bridge_port *p;
3938
3939 if (WARN_ON_ONCE(!br_vlan_is_master(vlan)))
3940 return;
3941
3942 list_for_each_entry(p, &vlan->br->port_list, list) {
3943 struct net_bridge_vlan *vport;
3944
3945 vport = br_vlan_find(nbp_vlan_group(p), vlan->vid);
3946 if (!vport)
3947 continue;
3948 br_multicast_toggle_one_vlan(vport, on);
3949 }
3950 }
3951
3952 int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
3953 struct netlink_ext_ack *extack)
3954 {
3955 struct net_bridge_vlan_group *vg;
3956 struct net_bridge_vlan *vlan;
3957 struct net_bridge_port *p;
3958
3959 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on)
3960 return 0;
3961
3962 if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) {
3963 NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled");
3964 return -EINVAL;
3965 }
3966
3967 vg = br_vlan_group(br);
3968 if (!vg)
3969 return 0;
3970
3971 br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on);
3972
3973 /* disable/enable non-vlan mcast contexts based on vlan snooping */
3974 if (on)
3975 __br_multicast_stop(&br->multicast_ctx);
3976 else
3977 __br_multicast_open(&br->multicast_ctx);
3978 list_for_each_entry(p, &br->port_list, list) {
3979 if (on)
3980 br_multicast_disable_port(p);
3981 else
3982 br_multicast_enable_port(p);
3983 }
3984
3985 list_for_each_entry(vlan, &vg->vlan_list, vlist)
3986 br_multicast_toggle_vlan(vlan, on);
3987
3988 return 0;
3989 }
3990
3991 bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on)
3992 {
3993 ASSERT_RTNL();
3994
3995 /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and
3996 * requires only RTNL to change
3997 */
3998 if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
3999 return false;
4000
4001 vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
4002 br_multicast_toggle_vlan(vlan, on);
4003
4004 return true;
4005 }
4006
4007 void br_multicast_stop(struct net_bridge *br)
4008 {
4009 ASSERT_RTNL();
4010
4011 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
4012 struct net_bridge_vlan_group *vg;
4013 struct net_bridge_vlan *vlan;
4014
4015 vg = br_vlan_group(br);
4016 if (vg) {
4017 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
4018 struct net_bridge_mcast *brmctx;
4019
4020 brmctx = &vlan->br_mcast_ctx;
4021 if (br_vlan_is_brentry(vlan) &&
4022 !br_multicast_ctx_vlan_disabled(brmctx))
4023 __br_multicast_stop(&vlan->br_mcast_ctx);
4024 }
4025 }
4026 }
4027
4028 __br_multicast_stop(&br->multicast_ctx);
4029 }
4030
4031 void br_multicast_dev_del(struct net_bridge *br)
4032 {
4033 struct net_bridge_mdb_entry *mp;
4034 HLIST_HEAD(deleted_head);
4035 struct hlist_node *tmp;
4036
4037 spin_lock_bh(&br->multicast_lock);
4038 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
4039 br_multicast_del_mdb_entry(mp);
4040 hlist_move_list(&br->mcast_gc_list, &deleted_head);
4041 spin_unlock_bh(&br->multicast_lock);
4042
4043 br_multicast_ctx_deinit(&br->multicast_ctx);
4044 br_multicast_gc(&deleted_head);
4045 cancel_work_sync(&br->mcast_gc_work);
4046
4047 rcu_barrier();
4048 }
4049
4050 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
4051 {
4052 struct net_bridge_mcast *brmctx = &br->multicast_ctx;
4053 int err = -EINVAL;
4054
4055 spin_lock_bh(&br->multicast_lock);
4056
4057 switch (val) {
4058 case MDB_RTR_TYPE_DISABLED:
4059 case MDB_RTR_TYPE_PERM:
4060 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
4061 del_timer(&brmctx->ip4_mc_router_timer);
4062 #if IS_ENABLED(CONFIG_IPV6)
4063 del_timer(&brmctx->ip6_mc_router_timer);
4064 #endif
4065 brmctx->multicast_router = val;
4066 err = 0;
4067 break;
4068 case MDB_RTR_TYPE_TEMP_QUERY:
4069 if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
4070 br_mc_router_state_change(br, false);
4071 brmctx->multicast_router = val;
4072 err = 0;
4073 break;
4074 }
4075
4076 spin_unlock_bh(&br->multicast_lock);
4077
4078 return err;
4079 }
4080
4081 static void
4082 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted)
4083 {
4084 if (!deleted)
4085 return;
4086
4087 /* For backwards compatibility for now, only notify if there is
4088 * no multicast router anymore for both IPv4 and IPv6.
4089 */
4090 if (!hlist_unhashed(&pmctx->ip4_rlist))
4091 return;
4092 #if IS_ENABLED(CONFIG_IPV6)
4093 if (!hlist_unhashed(&pmctx->ip6_rlist))
4094 return;
4095 #endif
4096
4097 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB);
4098 br_port_mc_router_state_change(pmctx->port, false);
4099
4100 /* don't allow timer refresh */
4101 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP)
4102 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4103 }
4104
4105 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
4106 {
4107 struct net_bridge_mcast *brmctx = &p->br->multicast_ctx;
4108 struct net_bridge_mcast_port *pmctx = &p->multicast_ctx;
4109 unsigned long now = jiffies;
4110 int err = -EINVAL;
4111 bool del = false;
4112
4113 spin_lock(&p->br->multicast_lock);
4114 if (pmctx->multicast_router == val) {
4115 /* Refresh the temp router port timer */
4116 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) {
4117 mod_timer(&pmctx->ip4_mc_router_timer,
4118 now + brmctx->multicast_querier_interval);
4119 #if IS_ENABLED(CONFIG_IPV6)
4120 mod_timer(&pmctx->ip6_mc_router_timer,
4121 now + brmctx->multicast_querier_interval);
4122 #endif
4123 }
4124 err = 0;
4125 goto unlock;
4126 }
4127 switch (val) {
4128 case MDB_RTR_TYPE_DISABLED:
4129 pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
4130 del |= br_ip4_multicast_rport_del(pmctx);
4131 del_timer(&pmctx->ip4_mc_router_timer);
4132 del |= br_ip6_multicast_rport_del(pmctx);
4133 #if IS_ENABLED(CONFIG_IPV6)
4134 del_timer(&pmctx->ip6_mc_router_timer);
4135 #endif
4136 br_multicast_rport_del_notify(pmctx, del);
4137 break;
4138 case MDB_RTR_TYPE_TEMP_QUERY:
4139 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4140 del |= br_ip4_multicast_rport_del(pmctx);
4141 del |= br_ip6_multicast_rport_del(pmctx);
4142 br_multicast_rport_del_notify(pmctx, del);
4143 break;
4144 case MDB_RTR_TYPE_PERM:
4145 pmctx->multicast_router = MDB_RTR_TYPE_PERM;
4146 del_timer(&pmctx->ip4_mc_router_timer);
4147 br_ip4_multicast_add_router(brmctx, pmctx);
4148 #if IS_ENABLED(CONFIG_IPV6)
4149 del_timer(&pmctx->ip6_mc_router_timer);
4150 #endif
4151 br_ip6_multicast_add_router(brmctx, pmctx);
4152 break;
4153 case MDB_RTR_TYPE_TEMP:
4154 pmctx->multicast_router = MDB_RTR_TYPE_TEMP;
4155 br_ip4_multicast_mark_router(brmctx, pmctx);
4156 br_ip6_multicast_mark_router(brmctx, pmctx);
4157 break;
4158 default:
4159 goto unlock;
4160 }
4161 err = 0;
4162 unlock:
4163 spin_unlock(&p->br->multicast_lock);
4164
4165 return err;
4166 }
4167
4168 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
4169 struct bridge_mcast_own_query *query)
4170 {
4171 struct net_bridge_port *port;
4172
4173 __br_multicast_open_query(brmctx->br, query);
4174
4175 rcu_read_lock();
4176 list_for_each_entry_rcu(port, &brmctx->br->port_list, list) {
4177 struct bridge_mcast_own_query *ip4_own_query;
4178 #if IS_ENABLED(CONFIG_IPV6)
4179 struct bridge_mcast_own_query *ip6_own_query;
4180 #endif
4181
4182 if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx))
4183 continue;
4184
4185 if (br_multicast_ctx_is_vlan(brmctx)) {
4186 struct net_bridge_vlan *vlan;
4187
4188 vlan = br_vlan_find(nbp_vlan_group(port), brmctx->vlan->vid);
4189 if (!vlan ||
4190 br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx))
4191 continue;
4192
4193 ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query;
4194 #if IS_ENABLED(CONFIG_IPV6)
4195 ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query;
4196 #endif
4197 } else {
4198 ip4_own_query = &port->multicast_ctx.ip4_own_query;
4199 #if IS_ENABLED(CONFIG_IPV6)
4200 ip6_own_query = &port->multicast_ctx.ip6_own_query;
4201 #endif
4202 }
4203
4204 if (query == &brmctx->ip4_own_query)
4205 br_multicast_enable(ip4_own_query);
4206 #if IS_ENABLED(CONFIG_IPV6)
4207 else
4208 br_multicast_enable(ip6_own_query);
4209 #endif
4210 }
4211 rcu_read_unlock();
4212 }
4213
4214 int br_multicast_toggle(struct net_bridge *br, unsigned long val,
4215 struct netlink_ext_ack *extack)
4216 {
4217 struct net_bridge_port *port;
4218 bool change_snoopers = false;
4219 int err = 0;
4220
4221 spin_lock_bh(&br->multicast_lock);
4222 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
4223 goto unlock;
4224
4225 err = br_mc_disabled_update(br->dev, val, extack);
4226 if (err == -EOPNOTSUPP)
4227 err = 0;
4228 if (err)
4229 goto unlock;
4230
4231 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
4232 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
4233 change_snoopers = true;
4234 goto unlock;
4235 }
4236
4237 if (!netif_running(br->dev))
4238 goto unlock;
4239
4240 br_multicast_open(br);
4241 list_for_each_entry(port, &br->port_list, list)
4242 __br_multicast_enable_port_ctx(&port->multicast_ctx);
4243
4244 change_snoopers = true;
4245
4246 unlock:
4247 spin_unlock_bh(&br->multicast_lock);
4248
4249 /* br_multicast_join_snoopers has the potential to cause
4250 * an MLD Report/Leave to be delivered to br_multicast_rcv,
4251 * which would in turn call br_multicast_add_group, which would
4252 * attempt to acquire multicast_lock. This function should be
4253 * called after the lock has been released to avoid deadlocks on
4254 * multicast_lock.
4255 *
4256 * br_multicast_leave_snoopers does not have the problem since
4257 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
4258 * returns without calling br_multicast_ipv4/6_rcv if it's not
4259 * enabled. Moved both functions out just for symmetry.
4260 */
4261 if (change_snoopers) {
4262 if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
4263 br_multicast_join_snoopers(br);
4264 else
4265 br_multicast_leave_snoopers(br);
4266 }
4267
4268 return err;
4269 }
4270
4271 bool br_multicast_enabled(const struct net_device *dev)
4272 {
4273 struct net_bridge *br = netdev_priv(dev);
4274
4275 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
4276 }
4277 EXPORT_SYMBOL_GPL(br_multicast_enabled);
4278
4279 bool br_multicast_router(const struct net_device *dev)
4280 {
4281 struct net_bridge *br = netdev_priv(dev);
4282 bool is_router;
4283
4284 spin_lock_bh(&br->multicast_lock);
4285 is_router = br_multicast_is_router(&br->multicast_ctx, NULL);
4286 spin_unlock_bh(&br->multicast_lock);
4287 return is_router;
4288 }
4289 EXPORT_SYMBOL_GPL(br_multicast_router);
4290
4291 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
4292 {
4293 struct net_bridge_mcast *brmctx = &br->multicast_ctx;
4294 unsigned long max_delay;
4295
4296 val = !!val;
4297
4298 spin_lock_bh(&br->multicast_lock);
4299 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
4300 goto unlock;
4301
4302 br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
4303 if (!val)
4304 goto unlock;
4305
4306 max_delay = brmctx->multicast_query_response_interval;
4307
4308 if (!timer_pending(&brmctx->ip4_other_query.timer))
4309 brmctx->ip4_other_query.delay_time = jiffies + max_delay;
4310
4311 br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
4312
4313 #if IS_ENABLED(CONFIG_IPV6)
4314 if (!timer_pending(&brmctx->ip6_other_query.timer))
4315 brmctx->ip6_other_query.delay_time = jiffies + max_delay;
4316
4317 br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
4318 #endif
4319
4320 unlock:
4321 spin_unlock_bh(&br->multicast_lock);
4322
4323 return 0;
4324 }
4325
4326 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
4327 {
4328 /* Currently we support only version 2 and 3 */
4329 switch (val) {
4330 case 2:
4331 case 3:
4332 break;
4333 default:
4334 return -EINVAL;
4335 }
4336
4337 spin_lock_bh(&br->multicast_lock);
4338 br->multicast_ctx.multicast_igmp_version = val;
4339 spin_unlock_bh(&br->multicast_lock);
4340
4341 return 0;
4342 }
4343
4344 #if IS_ENABLED(CONFIG_IPV6)
4345 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
4346 {
4347 /* Currently we support version 1 and 2 */
4348 switch (val) {
4349 case 1:
4350 case 2:
4351 break;
4352 default:
4353 return -EINVAL;
4354 }
4355
4356 spin_lock_bh(&br->multicast_lock);
4357 br->multicast_ctx.multicast_mld_version = val;
4358 spin_unlock_bh(&br->multicast_lock);
4359
4360 return 0;
4361 }
4362 #endif
4363
4364 /**
4365 * br_multicast_list_adjacent - Returns snooped multicast addresses
4366 * @dev: The bridge port adjacent to which to retrieve addresses
4367 * @br_ip_list: The list to store found, snooped multicast IP addresses in
4368 *
4369 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
4370 * snooping feature on all bridge ports of dev's bridge device, excluding
4371 * the addresses from dev itself.
4372 *
4373 * Returns the number of items added to br_ip_list.
4374 *
4375 * Notes:
4376 * - br_ip_list needs to be initialized by caller
4377 * - br_ip_list might contain duplicates in the end
4378 * (needs to be taken care of by caller)
4379 * - br_ip_list needs to be freed by caller
4380 */
4381 int br_multicast_list_adjacent(struct net_device *dev,
4382 struct list_head *br_ip_list)
4383 {
4384 struct net_bridge *br;
4385 struct net_bridge_port *port;
4386 struct net_bridge_port_group *group;
4387 struct br_ip_list *entry;
4388 int count = 0;
4389
4390 rcu_read_lock();
4391 if (!br_ip_list || !netif_is_bridge_port(dev))
4392 goto unlock;
4393
4394 port = br_port_get_rcu(dev);
4395 if (!port || !port->br)
4396 goto unlock;
4397
4398 br = port->br;
4399
4400 list_for_each_entry_rcu(port, &br->port_list, list) {
4401 if (!port->dev || port->dev == dev)
4402 continue;
4403
4404 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
4405 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
4406 if (!entry)
4407 goto unlock;
4408
4409 entry->addr = group->key.addr;
4410 list_add(&entry->list, br_ip_list);
4411 count++;
4412 }
4413 }
4414
4415 unlock:
4416 rcu_read_unlock();
4417 return count;
4418 }
4419 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
4420
4421 /**
4422 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
4423 * @dev: The bridge port providing the bridge on which to check for a querier
4424 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4425 *
4426 * Checks whether the given interface has a bridge on top and if so returns
4427 * true if a valid querier exists anywhere on the bridged link layer.
4428 * Otherwise returns false.
4429 */
4430 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
4431 {
4432 struct net_bridge *br;
4433 struct net_bridge_port *port;
4434 struct ethhdr eth;
4435 bool ret = false;
4436
4437 rcu_read_lock();
4438 if (!netif_is_bridge_port(dev))
4439 goto unlock;
4440
4441 port = br_port_get_rcu(dev);
4442 if (!port || !port->br)
4443 goto unlock;
4444
4445 br = port->br;
4446
4447 memset(&eth, 0, sizeof(eth));
4448 eth.h_proto = htons(proto);
4449
4450 ret = br_multicast_querier_exists(&br->multicast_ctx, &eth, NULL);
4451
4452 unlock:
4453 rcu_read_unlock();
4454 return ret;
4455 }
4456 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
4457
4458 /**
4459 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
4460 * @dev: The bridge port adjacent to which to check for a querier
4461 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4462 *
4463 * Checks whether the given interface has a bridge on top and if so returns
4464 * true if a selected querier is behind one of the other ports of this
4465 * bridge. Otherwise returns false.
4466 */
4467 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
4468 {
4469 struct net_bridge_mcast *brmctx;
4470 struct net_bridge *br;
4471 struct net_bridge_port *port;
4472 bool ret = false;
4473
4474 rcu_read_lock();
4475 if (!netif_is_bridge_port(dev))
4476 goto unlock;
4477
4478 port = br_port_get_rcu(dev);
4479 if (!port || !port->br)
4480 goto unlock;
4481
4482 br = port->br;
4483 brmctx = &br->multicast_ctx;
4484
4485 switch (proto) {
4486 case ETH_P_IP:
4487 if (!timer_pending(&brmctx->ip4_other_query.timer) ||
4488 rcu_dereference(brmctx->ip4_querier.port) == port)
4489 goto unlock;
4490 break;
4491 #if IS_ENABLED(CONFIG_IPV6)
4492 case ETH_P_IPV6:
4493 if (!timer_pending(&brmctx->ip6_other_query.timer) ||
4494 rcu_dereference(brmctx->ip6_querier.port) == port)
4495 goto unlock;
4496 break;
4497 #endif
4498 default:
4499 goto unlock;
4500 }
4501
4502 ret = true;
4503 unlock:
4504 rcu_read_unlock();
4505 return ret;
4506 }
4507 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
4508
4509 /**
4510 * br_multicast_has_router_adjacent - Checks for a router behind a bridge port
4511 * @dev: The bridge port adjacent to which to check for a multicast router
4512 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4513 *
4514 * Checks whether the given interface has a bridge on top and if so returns
4515 * true if a multicast router is behind one of the other ports of this
4516 * bridge. Otherwise returns false.
4517 */
4518 bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
4519 {
4520 struct net_bridge_mcast_port *pmctx;
4521 struct net_bridge_mcast *brmctx;
4522 struct net_bridge_port *port;
4523 bool ret = false;
4524
4525 rcu_read_lock();
4526 port = br_port_get_check_rcu(dev);
4527 if (!port)
4528 goto unlock;
4529
4530 brmctx = &port->br->multicast_ctx;
4531 switch (proto) {
4532 case ETH_P_IP:
4533 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
4534 ip4_rlist) {
4535 if (pmctx->port == port)
4536 continue;
4537
4538 ret = true;
4539 goto unlock;
4540 }
4541 break;
4542 #if IS_ENABLED(CONFIG_IPV6)
4543 case ETH_P_IPV6:
4544 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
4545 ip6_rlist) {
4546 if (pmctx->port == port)
4547 continue;
4548
4549 ret = true;
4550 goto unlock;
4551 }
4552 break;
4553 #endif
4554 default:
4555 /* when compiled without IPv6 support, be conservative and
4556 * always assume presence of an IPv6 multicast router
4557 */
4558 ret = true;
4559 }
4560
4561 unlock:
4562 rcu_read_unlock();
4563 return ret;
4564 }
4565 EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);
4566
4567 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
4568 const struct sk_buff *skb, u8 type, u8 dir)
4569 {
4570 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
4571 __be16 proto = skb->protocol;
4572 unsigned int t_len;
4573
4574 u64_stats_update_begin(&pstats->syncp);
4575 switch (proto) {
4576 case htons(ETH_P_IP):
4577 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
4578 switch (type) {
4579 case IGMP_HOST_MEMBERSHIP_REPORT:
4580 pstats->mstats.igmp_v1reports[dir]++;
4581 break;
4582 case IGMPV2_HOST_MEMBERSHIP_REPORT:
4583 pstats->mstats.igmp_v2reports[dir]++;
4584 break;
4585 case IGMPV3_HOST_MEMBERSHIP_REPORT:
4586 pstats->mstats.igmp_v3reports[dir]++;
4587 break;
4588 case IGMP_HOST_MEMBERSHIP_QUERY:
4589 if (t_len != sizeof(struct igmphdr)) {
4590 pstats->mstats.igmp_v3queries[dir]++;
4591 } else {
4592 unsigned int offset = skb_transport_offset(skb);
4593 struct igmphdr *ih, _ihdr;
4594
4595 ih = skb_header_pointer(skb, offset,
4596 sizeof(_ihdr), &_ihdr);
4597 if (!ih)
4598 break;
4599 if (!ih->code)
4600 pstats->mstats.igmp_v1queries[dir]++;
4601 else
4602 pstats->mstats.igmp_v2queries[dir]++;
4603 }
4604 break;
4605 case IGMP_HOST_LEAVE_MESSAGE:
4606 pstats->mstats.igmp_leaves[dir]++;
4607 break;
4608 }
4609 break;
4610 #if IS_ENABLED(CONFIG_IPV6)
4611 case htons(ETH_P_IPV6):
4612 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
4613 sizeof(struct ipv6hdr);
4614 t_len -= skb_network_header_len(skb);
4615 switch (type) {
4616 case ICMPV6_MGM_REPORT:
4617 pstats->mstats.mld_v1reports[dir]++;
4618 break;
4619 case ICMPV6_MLD2_REPORT:
4620 pstats->mstats.mld_v2reports[dir]++;
4621 break;
4622 case ICMPV6_MGM_QUERY:
4623 if (t_len != sizeof(struct mld_msg))
4624 pstats->mstats.mld_v2queries[dir]++;
4625 else
4626 pstats->mstats.mld_v1queries[dir]++;
4627 break;
4628 case ICMPV6_MGM_REDUCTION:
4629 pstats->mstats.mld_leaves[dir]++;
4630 break;
4631 }
4632 break;
4633 #endif /* CONFIG_IPV6 */
4634 }
4635 u64_stats_update_end(&pstats->syncp);
4636 }
4637
4638 void br_multicast_count(struct net_bridge *br,
4639 const struct net_bridge_port *p,
4640 const struct sk_buff *skb, u8 type, u8 dir)
4641 {
4642 struct bridge_mcast_stats __percpu *stats;
4643
4644 /* if multicast_disabled is true then igmp type can't be set */
4645 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
4646 return;
4647
4648 if (p)
4649 stats = p->mcast_stats;
4650 else
4651 stats = br->mcast_stats;
4652 if (WARN_ON(!stats))
4653 return;
4654
4655 br_mcast_stats_add(stats, skb, type, dir);
4656 }
4657
4658 int br_multicast_init_stats(struct net_bridge *br)
4659 {
4660 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
4661 if (!br->mcast_stats)
4662 return -ENOMEM;
4663
4664 return 0;
4665 }
4666
4667 void br_multicast_uninit_stats(struct net_bridge *br)
4668 {
4669 free_percpu(br->mcast_stats);
4670 }
4671
4672 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
4673 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
4674 {
4675 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
4676 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
4677 }
4678
4679 void br_multicast_get_stats(const struct net_bridge *br,
4680 const struct net_bridge_port *p,
4681 struct br_mcast_stats *dest)
4682 {
4683 struct bridge_mcast_stats __percpu *stats;
4684 struct br_mcast_stats tdst;
4685 int i;
4686
4687 memset(dest, 0, sizeof(*dest));
4688 if (p)
4689 stats = p->mcast_stats;
4690 else
4691 stats = br->mcast_stats;
4692 if (WARN_ON(!stats))
4693 return;
4694
4695 memset(&tdst, 0, sizeof(tdst));
4696 for_each_possible_cpu(i) {
4697 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
4698 struct br_mcast_stats temp;
4699 unsigned int start;
4700
4701 do {
4702 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
4703 memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
4704 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
4705
4706 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
4707 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
4708 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
4709 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
4710 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
4711 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
4712 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
4713 tdst.igmp_parse_errors += temp.igmp_parse_errors;
4714
4715 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
4716 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
4717 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
4718 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
4719 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
4720 tdst.mld_parse_errors += temp.mld_parse_errors;
4721 }
4722 memcpy(dest, &tdst, sizeof(*dest));
4723 }
4724
4725 int br_mdb_hash_init(struct net_bridge *br)
4726 {
4727 int err;
4728
4729 err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
4730 if (err)
4731 return err;
4732
4733 err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
4734 if (err) {
4735 rhashtable_destroy(&br->sg_port_tbl);
4736 return err;
4737 }
4738
4739 return 0;
4740 }
4741
4742 void br_mdb_hash_fini(struct net_bridge *br)
4743 {
4744 rhashtable_destroy(&br->sg_port_tbl);
4745 rhashtable_destroy(&br->mdb_hash_tbl);
4746 }