]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - net/core/neighbour.c
KVM: x86/mmu: Remove unnecessary ‘NULL’ values from sptep
[thirdparty/kernel/stable.git] / net / core / neighbour.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Generic address resolution entity
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * Fixes:
10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
11 * Harald Welte Add neighbour cache statistics like rtstat
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/slab.h>
17 #include <linux/kmemleak.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/arp.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39 #include <linux/inetdevice.h>
40 #include <net/addrconf.h>
41
42 #include <trace/events/neigh.h>
43
44 #define NEIGH_DEBUG 1
45 #define neigh_dbg(level, fmt, ...) \
46 do { \
47 if (level <= NEIGH_DEBUG) \
48 pr_debug(fmt, ##__VA_ARGS__); \
49 } while (0)
50
51 #define PNEIGH_HASHMASK 0xF
52
53 static void neigh_timer_handler(struct timer_list *t);
54 static void __neigh_notify(struct neighbour *n, int type, int flags,
55 u32 pid);
56 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
57 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
58 struct net_device *dev);
59
60 #ifdef CONFIG_PROC_FS
61 static const struct seq_operations neigh_stat_seq_ops;
62 #endif
63
64 /*
65 Neighbour hash table buckets are protected with rwlock tbl->lock.
66
67 - All the scans/updates to hash buckets MUST be made under this lock.
68 - NOTHING clever should be made under this lock: no callbacks
69 to protocol backends, no attempts to send something to network.
70 It will result in deadlocks, if backend/driver wants to use neighbour
71 cache.
72 - If the entry requires some non-trivial actions, increase
73 its reference count and release table lock.
74
75 Neighbour entries are protected:
76 - with reference count.
77 - with rwlock neigh->lock
78
79 Reference count prevents destruction.
80
81 neigh->lock mainly serializes ll address data and its validity state.
82 However, the same lock is used to protect another entry fields:
83 - timer
84 - resolution queue
85
86 Again, nothing clever shall be made under neigh->lock,
87 the most complicated procedure, which we allow is dev->hard_header.
88 It is supposed, that dev->hard_header is simplistic and does
89 not make callbacks to neighbour tables.
90 */
91
92 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
93 {
94 kfree_skb(skb);
95 return -ENETDOWN;
96 }
97
98 static void neigh_cleanup_and_release(struct neighbour *neigh)
99 {
100 trace_neigh_cleanup_and_release(neigh, 0);
101 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
102 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
103 neigh_release(neigh);
104 }
105
106 /*
107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
108 * It corresponds to default IPv6 settings and is not overridable,
109 * because it is really reasonable choice.
110 */
111
112 unsigned long neigh_rand_reach_time(unsigned long base)
113 {
114 return base ? get_random_u32_below(base) + (base >> 1) : 0;
115 }
116 EXPORT_SYMBOL(neigh_rand_reach_time);
117
118 static void neigh_mark_dead(struct neighbour *n)
119 {
120 n->dead = 1;
121 if (!list_empty(&n->gc_list)) {
122 list_del_init(&n->gc_list);
123 atomic_dec(&n->tbl->gc_entries);
124 }
125 if (!list_empty(&n->managed_list))
126 list_del_init(&n->managed_list);
127 }
128
129 static void neigh_update_gc_list(struct neighbour *n)
130 {
131 bool on_gc_list, exempt_from_gc;
132
133 write_lock_bh(&n->tbl->lock);
134 write_lock(&n->lock);
135 if (n->dead)
136 goto out;
137
138 /* remove from the gc list if new state is permanent or if neighbor
139 * is externally learned; otherwise entry should be on the gc list
140 */
141 exempt_from_gc = n->nud_state & NUD_PERMANENT ||
142 n->flags & NTF_EXT_LEARNED;
143 on_gc_list = !list_empty(&n->gc_list);
144
145 if (exempt_from_gc && on_gc_list) {
146 list_del_init(&n->gc_list);
147 atomic_dec(&n->tbl->gc_entries);
148 } else if (!exempt_from_gc && !on_gc_list) {
149 /* add entries to the tail; cleaning removes from the front */
150 list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 atomic_inc(&n->tbl->gc_entries);
152 }
153 out:
154 write_unlock(&n->lock);
155 write_unlock_bh(&n->tbl->lock);
156 }
157
158 static void neigh_update_managed_list(struct neighbour *n)
159 {
160 bool on_managed_list, add_to_managed;
161
162 write_lock_bh(&n->tbl->lock);
163 write_lock(&n->lock);
164 if (n->dead)
165 goto out;
166
167 add_to_managed = n->flags & NTF_MANAGED;
168 on_managed_list = !list_empty(&n->managed_list);
169
170 if (!add_to_managed && on_managed_list)
171 list_del_init(&n->managed_list);
172 else if (add_to_managed && !on_managed_list)
173 list_add_tail(&n->managed_list, &n->tbl->managed_list);
174 out:
175 write_unlock(&n->lock);
176 write_unlock_bh(&n->tbl->lock);
177 }
178
179 static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
180 bool *gc_update, bool *managed_update)
181 {
182 u32 ndm_flags, old_flags = neigh->flags;
183
184 if (!(flags & NEIGH_UPDATE_F_ADMIN))
185 return;
186
187 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
188 ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
189
190 if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
191 if (ndm_flags & NTF_EXT_LEARNED)
192 neigh->flags |= NTF_EXT_LEARNED;
193 else
194 neigh->flags &= ~NTF_EXT_LEARNED;
195 *notify = 1;
196 *gc_update = true;
197 }
198 if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
199 if (ndm_flags & NTF_MANAGED)
200 neigh->flags |= NTF_MANAGED;
201 else
202 neigh->flags &= ~NTF_MANAGED;
203 *notify = 1;
204 *managed_update = true;
205 }
206 }
207
208 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
209 struct neigh_table *tbl)
210 {
211 bool retval = false;
212
213 write_lock(&n->lock);
214 if (refcount_read(&n->refcnt) == 1) {
215 struct neighbour *neigh;
216
217 neigh = rcu_dereference_protected(n->next,
218 lockdep_is_held(&tbl->lock));
219 rcu_assign_pointer(*np, neigh);
220 neigh_mark_dead(n);
221 retval = true;
222 }
223 write_unlock(&n->lock);
224 if (retval)
225 neigh_cleanup_and_release(n);
226 return retval;
227 }
228
229 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
230 {
231 struct neigh_hash_table *nht;
232 void *pkey = ndel->primary_key;
233 u32 hash_val;
234 struct neighbour *n;
235 struct neighbour __rcu **np;
236
237 nht = rcu_dereference_protected(tbl->nht,
238 lockdep_is_held(&tbl->lock));
239 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
240 hash_val = hash_val >> (32 - nht->hash_shift);
241
242 np = &nht->hash_buckets[hash_val];
243 while ((n = rcu_dereference_protected(*np,
244 lockdep_is_held(&tbl->lock)))) {
245 if (n == ndel)
246 return neigh_del(n, np, tbl);
247 np = &n->next;
248 }
249 return false;
250 }
251
252 static int neigh_forced_gc(struct neigh_table *tbl)
253 {
254 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
255 unsigned long tref = jiffies - 5 * HZ;
256 struct neighbour *n, *tmp;
257 int shrunk = 0;
258
259 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
260
261 write_lock_bh(&tbl->lock);
262
263 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
264 if (refcount_read(&n->refcnt) == 1) {
265 bool remove = false;
266
267 write_lock(&n->lock);
268 if ((n->nud_state == NUD_FAILED) ||
269 (n->nud_state == NUD_NOARP) ||
270 (tbl->is_multicast &&
271 tbl->is_multicast(n->primary_key)) ||
272 !time_in_range(n->updated, tref, jiffies))
273 remove = true;
274 write_unlock(&n->lock);
275
276 if (remove && neigh_remove_one(n, tbl))
277 shrunk++;
278 if (shrunk >= max_clean)
279 break;
280 }
281 }
282
283 tbl->last_flush = jiffies;
284
285 write_unlock_bh(&tbl->lock);
286
287 return shrunk;
288 }
289
290 static void neigh_add_timer(struct neighbour *n, unsigned long when)
291 {
292 /* Use safe distance from the jiffies - LONG_MAX point while timer
293 * is running in DELAY/PROBE state but still show to user space
294 * large times in the past.
295 */
296 unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
297
298 neigh_hold(n);
299 if (!time_in_range(n->confirmed, mint, jiffies))
300 n->confirmed = mint;
301 if (time_before(n->used, n->confirmed))
302 n->used = n->confirmed;
303 if (unlikely(mod_timer(&n->timer, when))) {
304 printk("NEIGH: BUG, double timer add, state is %x\n",
305 n->nud_state);
306 dump_stack();
307 }
308 }
309
310 static int neigh_del_timer(struct neighbour *n)
311 {
312 if ((n->nud_state & NUD_IN_TIMER) &&
313 del_timer(&n->timer)) {
314 neigh_release(n);
315 return 1;
316 }
317 return 0;
318 }
319
320 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
321 int family)
322 {
323 switch (family) {
324 case AF_INET:
325 return __in_dev_arp_parms_get_rcu(dev);
326 case AF_INET6:
327 return __in6_dev_nd_parms_get_rcu(dev);
328 }
329 return NULL;
330 }
331
332 static void neigh_parms_qlen_dec(struct net_device *dev, int family)
333 {
334 struct neigh_parms *p;
335
336 rcu_read_lock();
337 p = neigh_get_dev_parms_rcu(dev, family);
338 if (p)
339 p->qlen--;
340 rcu_read_unlock();
341 }
342
343 static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
344 int family)
345 {
346 struct sk_buff_head tmp;
347 unsigned long flags;
348 struct sk_buff *skb;
349
350 skb_queue_head_init(&tmp);
351 spin_lock_irqsave(&list->lock, flags);
352 skb = skb_peek(list);
353 while (skb != NULL) {
354 struct sk_buff *skb_next = skb_peek_next(skb, list);
355 struct net_device *dev = skb->dev;
356
357 if (net == NULL || net_eq(dev_net(dev), net)) {
358 neigh_parms_qlen_dec(dev, family);
359 __skb_unlink(skb, list);
360 __skb_queue_tail(&tmp, skb);
361 }
362 skb = skb_next;
363 }
364 spin_unlock_irqrestore(&list->lock, flags);
365
366 while ((skb = __skb_dequeue(&tmp))) {
367 dev_put(skb->dev);
368 kfree_skb(skb);
369 }
370 }
371
372 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
373 bool skip_perm)
374 {
375 int i;
376 struct neigh_hash_table *nht;
377
378 nht = rcu_dereference_protected(tbl->nht,
379 lockdep_is_held(&tbl->lock));
380
381 for (i = 0; i < (1 << nht->hash_shift); i++) {
382 struct neighbour *n;
383 struct neighbour __rcu **np = &nht->hash_buckets[i];
384
385 while ((n = rcu_dereference_protected(*np,
386 lockdep_is_held(&tbl->lock))) != NULL) {
387 if (dev && n->dev != dev) {
388 np = &n->next;
389 continue;
390 }
391 if (skip_perm && n->nud_state & NUD_PERMANENT) {
392 np = &n->next;
393 continue;
394 }
395 rcu_assign_pointer(*np,
396 rcu_dereference_protected(n->next,
397 lockdep_is_held(&tbl->lock)));
398 write_lock(&n->lock);
399 neigh_del_timer(n);
400 neigh_mark_dead(n);
401 if (refcount_read(&n->refcnt) != 1) {
402 /* The most unpleasant situation.
403 We must destroy neighbour entry,
404 but someone still uses it.
405
406 The destroy will be delayed until
407 the last user releases us, but
408 we must kill timers etc. and move
409 it to safe state.
410 */
411 __skb_queue_purge(&n->arp_queue);
412 n->arp_queue_len_bytes = 0;
413 n->output = neigh_blackhole;
414 if (n->nud_state & NUD_VALID)
415 n->nud_state = NUD_NOARP;
416 else
417 n->nud_state = NUD_NONE;
418 neigh_dbg(2, "neigh %p is stray\n", n);
419 }
420 write_unlock(&n->lock);
421 neigh_cleanup_and_release(n);
422 }
423 }
424 }
425
426 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
427 {
428 write_lock_bh(&tbl->lock);
429 neigh_flush_dev(tbl, dev, false);
430 write_unlock_bh(&tbl->lock);
431 }
432 EXPORT_SYMBOL(neigh_changeaddr);
433
434 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
435 bool skip_perm)
436 {
437 write_lock_bh(&tbl->lock);
438 neigh_flush_dev(tbl, dev, skip_perm);
439 pneigh_ifdown_and_unlock(tbl, dev);
440 pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
441 tbl->family);
442 if (skb_queue_empty_lockless(&tbl->proxy_queue))
443 del_timer_sync(&tbl->proxy_timer);
444 return 0;
445 }
446
447 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
448 {
449 __neigh_ifdown(tbl, dev, true);
450 return 0;
451 }
452 EXPORT_SYMBOL(neigh_carrier_down);
453
454 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
455 {
456 __neigh_ifdown(tbl, dev, false);
457 return 0;
458 }
459 EXPORT_SYMBOL(neigh_ifdown);
460
461 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
462 struct net_device *dev,
463 u32 flags, bool exempt_from_gc)
464 {
465 struct neighbour *n = NULL;
466 unsigned long now = jiffies;
467 int entries;
468
469 if (exempt_from_gc)
470 goto do_alloc;
471
472 entries = atomic_inc_return(&tbl->gc_entries) - 1;
473 if (entries >= tbl->gc_thresh3 ||
474 (entries >= tbl->gc_thresh2 &&
475 time_after(now, tbl->last_flush + 5 * HZ))) {
476 if (!neigh_forced_gc(tbl) &&
477 entries >= tbl->gc_thresh3) {
478 net_info_ratelimited("%s: neighbor table overflow!\n",
479 tbl->id);
480 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
481 goto out_entries;
482 }
483 }
484
485 do_alloc:
486 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
487 if (!n)
488 goto out_entries;
489
490 __skb_queue_head_init(&n->arp_queue);
491 rwlock_init(&n->lock);
492 seqlock_init(&n->ha_lock);
493 n->updated = n->used = now;
494 n->nud_state = NUD_NONE;
495 n->output = neigh_blackhole;
496 n->flags = flags;
497 seqlock_init(&n->hh.hh_lock);
498 n->parms = neigh_parms_clone(&tbl->parms);
499 timer_setup(&n->timer, neigh_timer_handler, 0);
500
501 NEIGH_CACHE_STAT_INC(tbl, allocs);
502 n->tbl = tbl;
503 refcount_set(&n->refcnt, 1);
504 n->dead = 1;
505 INIT_LIST_HEAD(&n->gc_list);
506 INIT_LIST_HEAD(&n->managed_list);
507
508 atomic_inc(&tbl->entries);
509 out:
510 return n;
511
512 out_entries:
513 if (!exempt_from_gc)
514 atomic_dec(&tbl->gc_entries);
515 goto out;
516 }
517
518 static void neigh_get_hash_rnd(u32 *x)
519 {
520 *x = get_random_u32() | 1;
521 }
522
523 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
524 {
525 size_t size = (1 << shift) * sizeof(struct neighbour *);
526 struct neigh_hash_table *ret;
527 struct neighbour __rcu **buckets;
528 int i;
529
530 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
531 if (!ret)
532 return NULL;
533 if (size <= PAGE_SIZE) {
534 buckets = kzalloc(size, GFP_ATOMIC);
535 } else {
536 buckets = (struct neighbour __rcu **)
537 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
538 get_order(size));
539 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
540 }
541 if (!buckets) {
542 kfree(ret);
543 return NULL;
544 }
545 ret->hash_buckets = buckets;
546 ret->hash_shift = shift;
547 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
548 neigh_get_hash_rnd(&ret->hash_rnd[i]);
549 return ret;
550 }
551
552 static void neigh_hash_free_rcu(struct rcu_head *head)
553 {
554 struct neigh_hash_table *nht = container_of(head,
555 struct neigh_hash_table,
556 rcu);
557 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
558 struct neighbour __rcu **buckets = nht->hash_buckets;
559
560 if (size <= PAGE_SIZE) {
561 kfree(buckets);
562 } else {
563 kmemleak_free(buckets);
564 free_pages((unsigned long)buckets, get_order(size));
565 }
566 kfree(nht);
567 }
568
569 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
570 unsigned long new_shift)
571 {
572 unsigned int i, hash;
573 struct neigh_hash_table *new_nht, *old_nht;
574
575 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
576
577 old_nht = rcu_dereference_protected(tbl->nht,
578 lockdep_is_held(&tbl->lock));
579 new_nht = neigh_hash_alloc(new_shift);
580 if (!new_nht)
581 return old_nht;
582
583 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
584 struct neighbour *n, *next;
585
586 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
587 lockdep_is_held(&tbl->lock));
588 n != NULL;
589 n = next) {
590 hash = tbl->hash(n->primary_key, n->dev,
591 new_nht->hash_rnd);
592
593 hash >>= (32 - new_nht->hash_shift);
594 next = rcu_dereference_protected(n->next,
595 lockdep_is_held(&tbl->lock));
596
597 rcu_assign_pointer(n->next,
598 rcu_dereference_protected(
599 new_nht->hash_buckets[hash],
600 lockdep_is_held(&tbl->lock)));
601 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
602 }
603 }
604
605 rcu_assign_pointer(tbl->nht, new_nht);
606 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
607 return new_nht;
608 }
609
610 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
611 struct net_device *dev)
612 {
613 struct neighbour *n;
614
615 NEIGH_CACHE_STAT_INC(tbl, lookups);
616
617 rcu_read_lock();
618 n = __neigh_lookup_noref(tbl, pkey, dev);
619 if (n) {
620 if (!refcount_inc_not_zero(&n->refcnt))
621 n = NULL;
622 NEIGH_CACHE_STAT_INC(tbl, hits);
623 }
624
625 rcu_read_unlock();
626 return n;
627 }
628 EXPORT_SYMBOL(neigh_lookup);
629
630 static struct neighbour *
631 ___neigh_create(struct neigh_table *tbl, const void *pkey,
632 struct net_device *dev, u32 flags,
633 bool exempt_from_gc, bool want_ref)
634 {
635 u32 hash_val, key_len = tbl->key_len;
636 struct neighbour *n1, *rc, *n;
637 struct neigh_hash_table *nht;
638 int error;
639
640 n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
641 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
642 if (!n) {
643 rc = ERR_PTR(-ENOBUFS);
644 goto out;
645 }
646
647 memcpy(n->primary_key, pkey, key_len);
648 n->dev = dev;
649 netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC);
650
651 /* Protocol specific setup. */
652 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
653 rc = ERR_PTR(error);
654 goto out_neigh_release;
655 }
656
657 if (dev->netdev_ops->ndo_neigh_construct) {
658 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
659 if (error < 0) {
660 rc = ERR_PTR(error);
661 goto out_neigh_release;
662 }
663 }
664
665 /* Device specific setup. */
666 if (n->parms->neigh_setup &&
667 (error = n->parms->neigh_setup(n)) < 0) {
668 rc = ERR_PTR(error);
669 goto out_neigh_release;
670 }
671
672 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
673
674 write_lock_bh(&tbl->lock);
675 nht = rcu_dereference_protected(tbl->nht,
676 lockdep_is_held(&tbl->lock));
677
678 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
679 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
680
681 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
682
683 if (n->parms->dead) {
684 rc = ERR_PTR(-EINVAL);
685 goto out_tbl_unlock;
686 }
687
688 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
689 lockdep_is_held(&tbl->lock));
690 n1 != NULL;
691 n1 = rcu_dereference_protected(n1->next,
692 lockdep_is_held(&tbl->lock))) {
693 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
694 if (want_ref)
695 neigh_hold(n1);
696 rc = n1;
697 goto out_tbl_unlock;
698 }
699 }
700
701 n->dead = 0;
702 if (!exempt_from_gc)
703 list_add_tail(&n->gc_list, &n->tbl->gc_list);
704 if (n->flags & NTF_MANAGED)
705 list_add_tail(&n->managed_list, &n->tbl->managed_list);
706 if (want_ref)
707 neigh_hold(n);
708 rcu_assign_pointer(n->next,
709 rcu_dereference_protected(nht->hash_buckets[hash_val],
710 lockdep_is_held(&tbl->lock)));
711 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
712 write_unlock_bh(&tbl->lock);
713 neigh_dbg(2, "neigh %p is created\n", n);
714 rc = n;
715 out:
716 return rc;
717 out_tbl_unlock:
718 write_unlock_bh(&tbl->lock);
719 out_neigh_release:
720 if (!exempt_from_gc)
721 atomic_dec(&tbl->gc_entries);
722 neigh_release(n);
723 goto out;
724 }
725
726 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
727 struct net_device *dev, bool want_ref)
728 {
729 return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
730 }
731 EXPORT_SYMBOL(__neigh_create);
732
733 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
734 {
735 u32 hash_val = *(u32 *)(pkey + key_len - 4);
736 hash_val ^= (hash_val >> 16);
737 hash_val ^= hash_val >> 8;
738 hash_val ^= hash_val >> 4;
739 hash_val &= PNEIGH_HASHMASK;
740 return hash_val;
741 }
742
743 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
744 struct net *net,
745 const void *pkey,
746 unsigned int key_len,
747 struct net_device *dev)
748 {
749 while (n) {
750 if (!memcmp(n->key, pkey, key_len) &&
751 net_eq(pneigh_net(n), net) &&
752 (n->dev == dev || !n->dev))
753 return n;
754 n = n->next;
755 }
756 return NULL;
757 }
758
759 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
760 struct net *net, const void *pkey, struct net_device *dev)
761 {
762 unsigned int key_len = tbl->key_len;
763 u32 hash_val = pneigh_hash(pkey, key_len);
764
765 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
766 net, pkey, key_len, dev);
767 }
768 EXPORT_SYMBOL_GPL(__pneigh_lookup);
769
770 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
771 struct net *net, const void *pkey,
772 struct net_device *dev, int creat)
773 {
774 struct pneigh_entry *n;
775 unsigned int key_len = tbl->key_len;
776 u32 hash_val = pneigh_hash(pkey, key_len);
777
778 read_lock_bh(&tbl->lock);
779 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
780 net, pkey, key_len, dev);
781 read_unlock_bh(&tbl->lock);
782
783 if (n || !creat)
784 goto out;
785
786 ASSERT_RTNL();
787
788 n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
789 if (!n)
790 goto out;
791
792 write_pnet(&n->net, net);
793 memcpy(n->key, pkey, key_len);
794 n->dev = dev;
795 netdev_hold(dev, &n->dev_tracker, GFP_KERNEL);
796
797 if (tbl->pconstructor && tbl->pconstructor(n)) {
798 netdev_put(dev, &n->dev_tracker);
799 kfree(n);
800 n = NULL;
801 goto out;
802 }
803
804 write_lock_bh(&tbl->lock);
805 n->next = tbl->phash_buckets[hash_val];
806 tbl->phash_buckets[hash_val] = n;
807 write_unlock_bh(&tbl->lock);
808 out:
809 return n;
810 }
811 EXPORT_SYMBOL(pneigh_lookup);
812
813
814 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
815 struct net_device *dev)
816 {
817 struct pneigh_entry *n, **np;
818 unsigned int key_len = tbl->key_len;
819 u32 hash_val = pneigh_hash(pkey, key_len);
820
821 write_lock_bh(&tbl->lock);
822 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
823 np = &n->next) {
824 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
825 net_eq(pneigh_net(n), net)) {
826 *np = n->next;
827 write_unlock_bh(&tbl->lock);
828 if (tbl->pdestructor)
829 tbl->pdestructor(n);
830 netdev_put(n->dev, &n->dev_tracker);
831 kfree(n);
832 return 0;
833 }
834 }
835 write_unlock_bh(&tbl->lock);
836 return -ENOENT;
837 }
838
839 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
840 struct net_device *dev)
841 {
842 struct pneigh_entry *n, **np, *freelist = NULL;
843 u32 h;
844
845 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
846 np = &tbl->phash_buckets[h];
847 while ((n = *np) != NULL) {
848 if (!dev || n->dev == dev) {
849 *np = n->next;
850 n->next = freelist;
851 freelist = n;
852 continue;
853 }
854 np = &n->next;
855 }
856 }
857 write_unlock_bh(&tbl->lock);
858 while ((n = freelist)) {
859 freelist = n->next;
860 n->next = NULL;
861 if (tbl->pdestructor)
862 tbl->pdestructor(n);
863 netdev_put(n->dev, &n->dev_tracker);
864 kfree(n);
865 }
866 return -ENOENT;
867 }
868
869 static void neigh_parms_destroy(struct neigh_parms *parms);
870
871 static inline void neigh_parms_put(struct neigh_parms *parms)
872 {
873 if (refcount_dec_and_test(&parms->refcnt))
874 neigh_parms_destroy(parms);
875 }
876
877 /*
878 * neighbour must already be out of the table;
879 *
880 */
881 void neigh_destroy(struct neighbour *neigh)
882 {
883 struct net_device *dev = neigh->dev;
884
885 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
886
887 if (!neigh->dead) {
888 pr_warn("Destroying alive neighbour %p\n", neigh);
889 dump_stack();
890 return;
891 }
892
893 if (neigh_del_timer(neigh))
894 pr_warn("Impossible event\n");
895
896 write_lock_bh(&neigh->lock);
897 __skb_queue_purge(&neigh->arp_queue);
898 write_unlock_bh(&neigh->lock);
899 neigh->arp_queue_len_bytes = 0;
900
901 if (dev->netdev_ops->ndo_neigh_destroy)
902 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
903
904 netdev_put(dev, &neigh->dev_tracker);
905 neigh_parms_put(neigh->parms);
906
907 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
908
909 atomic_dec(&neigh->tbl->entries);
910 kfree_rcu(neigh, rcu);
911 }
912 EXPORT_SYMBOL(neigh_destroy);
913
914 /* Neighbour state is suspicious;
915 disable fast path.
916
917 Called with write_locked neigh.
918 */
919 static void neigh_suspect(struct neighbour *neigh)
920 {
921 neigh_dbg(2, "neigh %p is suspected\n", neigh);
922
923 neigh->output = neigh->ops->output;
924 }
925
926 /* Neighbour state is OK;
927 enable fast path.
928
929 Called with write_locked neigh.
930 */
931 static void neigh_connect(struct neighbour *neigh)
932 {
933 neigh_dbg(2, "neigh %p is connected\n", neigh);
934
935 neigh->output = neigh->ops->connected_output;
936 }
937
938 static void neigh_periodic_work(struct work_struct *work)
939 {
940 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
941 struct neighbour *n;
942 struct neighbour __rcu **np;
943 unsigned int i;
944 struct neigh_hash_table *nht;
945
946 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
947
948 write_lock_bh(&tbl->lock);
949 nht = rcu_dereference_protected(tbl->nht,
950 lockdep_is_held(&tbl->lock));
951
952 /*
953 * periodically recompute ReachableTime from random function
954 */
955
956 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
957 struct neigh_parms *p;
958 tbl->last_rand = jiffies;
959 list_for_each_entry(p, &tbl->parms_list, list)
960 p->reachable_time =
961 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
962 }
963
964 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
965 goto out;
966
967 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
968 np = &nht->hash_buckets[i];
969
970 while ((n = rcu_dereference_protected(*np,
971 lockdep_is_held(&tbl->lock))) != NULL) {
972 unsigned int state;
973
974 write_lock(&n->lock);
975
976 state = n->nud_state;
977 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
978 (n->flags & NTF_EXT_LEARNED)) {
979 write_unlock(&n->lock);
980 goto next_elt;
981 }
982
983 if (time_before(n->used, n->confirmed) &&
984 time_is_before_eq_jiffies(n->confirmed))
985 n->used = n->confirmed;
986
987 if (refcount_read(&n->refcnt) == 1 &&
988 (state == NUD_FAILED ||
989 !time_in_range_open(jiffies, n->used,
990 n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
991 *np = n->next;
992 neigh_mark_dead(n);
993 write_unlock(&n->lock);
994 neigh_cleanup_and_release(n);
995 continue;
996 }
997 write_unlock(&n->lock);
998
999 next_elt:
1000 np = &n->next;
1001 }
1002 /*
1003 * It's fine to release lock here, even if hash table
1004 * grows while we are preempted.
1005 */
1006 write_unlock_bh(&tbl->lock);
1007 cond_resched();
1008 write_lock_bh(&tbl->lock);
1009 nht = rcu_dereference_protected(tbl->nht,
1010 lockdep_is_held(&tbl->lock));
1011 }
1012 out:
1013 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
1014 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
1015 * BASE_REACHABLE_TIME.
1016 */
1017 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1018 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
1019 write_unlock_bh(&tbl->lock);
1020 }
1021
1022 static __inline__ int neigh_max_probes(struct neighbour *n)
1023 {
1024 struct neigh_parms *p = n->parms;
1025 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
1026 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
1027 NEIGH_VAR(p, MCAST_PROBES));
1028 }
1029
1030 static void neigh_invalidate(struct neighbour *neigh)
1031 __releases(neigh->lock)
1032 __acquires(neigh->lock)
1033 {
1034 struct sk_buff *skb;
1035
1036 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
1037 neigh_dbg(2, "neigh %p is failed\n", neigh);
1038 neigh->updated = jiffies;
1039
1040 /* It is very thin place. report_unreachable is very complicated
1041 routine. Particularly, it can hit the same neighbour entry!
1042
1043 So that, we try to be accurate and avoid dead loop. --ANK
1044 */
1045 while (neigh->nud_state == NUD_FAILED &&
1046 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1047 write_unlock(&neigh->lock);
1048 neigh->ops->error_report(neigh, skb);
1049 write_lock(&neigh->lock);
1050 }
1051 __skb_queue_purge(&neigh->arp_queue);
1052 neigh->arp_queue_len_bytes = 0;
1053 }
1054
1055 static void neigh_probe(struct neighbour *neigh)
1056 __releases(neigh->lock)
1057 {
1058 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1059 /* keep skb alive even if arp_queue overflows */
1060 if (skb)
1061 skb = skb_clone(skb, GFP_ATOMIC);
1062 write_unlock(&neigh->lock);
1063 if (neigh->ops->solicit)
1064 neigh->ops->solicit(neigh, skb);
1065 atomic_inc(&neigh->probes);
1066 consume_skb(skb);
1067 }
1068
1069 /* Called when a timer expires for a neighbour entry. */
1070
1071 static void neigh_timer_handler(struct timer_list *t)
1072 {
1073 unsigned long now, next;
1074 struct neighbour *neigh = from_timer(neigh, t, timer);
1075 unsigned int state;
1076 int notify = 0;
1077
1078 write_lock(&neigh->lock);
1079
1080 state = neigh->nud_state;
1081 now = jiffies;
1082 next = now + HZ;
1083
1084 if (!(state & NUD_IN_TIMER))
1085 goto out;
1086
1087 if (state & NUD_REACHABLE) {
1088 if (time_before_eq(now,
1089 neigh->confirmed + neigh->parms->reachable_time)) {
1090 neigh_dbg(2, "neigh %p is still alive\n", neigh);
1091 next = neigh->confirmed + neigh->parms->reachable_time;
1092 } else if (time_before_eq(now,
1093 neigh->used +
1094 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1095 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1096 WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1097 neigh->updated = jiffies;
1098 neigh_suspect(neigh);
1099 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1100 } else {
1101 neigh_dbg(2, "neigh %p is suspected\n", neigh);
1102 WRITE_ONCE(neigh->nud_state, NUD_STALE);
1103 neigh->updated = jiffies;
1104 neigh_suspect(neigh);
1105 notify = 1;
1106 }
1107 } else if (state & NUD_DELAY) {
1108 if (time_before_eq(now,
1109 neigh->confirmed +
1110 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1111 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1112 WRITE_ONCE(neigh->nud_state, NUD_REACHABLE);
1113 neigh->updated = jiffies;
1114 neigh_connect(neigh);
1115 notify = 1;
1116 next = neigh->confirmed + neigh->parms->reachable_time;
1117 } else {
1118 neigh_dbg(2, "neigh %p is probed\n", neigh);
1119 WRITE_ONCE(neigh->nud_state, NUD_PROBE);
1120 neigh->updated = jiffies;
1121 atomic_set(&neigh->probes, 0);
1122 notify = 1;
1123 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1124 HZ/100);
1125 }
1126 } else {
1127 /* NUD_PROBE|NUD_INCOMPLETE */
1128 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1129 }
1130
1131 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1132 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1133 WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1134 notify = 1;
1135 neigh_invalidate(neigh);
1136 goto out;
1137 }
1138
1139 if (neigh->nud_state & NUD_IN_TIMER) {
1140 if (time_before(next, jiffies + HZ/100))
1141 next = jiffies + HZ/100;
1142 if (!mod_timer(&neigh->timer, next))
1143 neigh_hold(neigh);
1144 }
1145 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1146 neigh_probe(neigh);
1147 } else {
1148 out:
1149 write_unlock(&neigh->lock);
1150 }
1151
1152 if (notify)
1153 neigh_update_notify(neigh, 0);
1154
1155 trace_neigh_timer_handler(neigh, 0);
1156
1157 neigh_release(neigh);
1158 }
1159
1160 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
1161 const bool immediate_ok)
1162 {
1163 int rc;
1164 bool immediate_probe = false;
1165
1166 write_lock_bh(&neigh->lock);
1167
1168 rc = 0;
1169 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1170 goto out_unlock_bh;
1171 if (neigh->dead)
1172 goto out_dead;
1173
1174 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1175 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1176 NEIGH_VAR(neigh->parms, APP_PROBES)) {
1177 unsigned long next, now = jiffies;
1178
1179 atomic_set(&neigh->probes,
1180 NEIGH_VAR(neigh->parms, UCAST_PROBES));
1181 neigh_del_timer(neigh);
1182 WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1183 neigh->updated = now;
1184 if (!immediate_ok) {
1185 next = now + 1;
1186 } else {
1187 immediate_probe = true;
1188 next = now + max(NEIGH_VAR(neigh->parms,
1189 RETRANS_TIME),
1190 HZ / 100);
1191 }
1192 neigh_add_timer(neigh, next);
1193 } else {
1194 WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1195 neigh->updated = jiffies;
1196 write_unlock_bh(&neigh->lock);
1197
1198 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED);
1199 return 1;
1200 }
1201 } else if (neigh->nud_state & NUD_STALE) {
1202 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1203 neigh_del_timer(neigh);
1204 WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1205 neigh->updated = jiffies;
1206 neigh_add_timer(neigh, jiffies +
1207 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1208 }
1209
1210 if (neigh->nud_state == NUD_INCOMPLETE) {
1211 if (skb) {
1212 while (neigh->arp_queue_len_bytes + skb->truesize >
1213 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1214 struct sk_buff *buff;
1215
1216 buff = __skb_dequeue(&neigh->arp_queue);
1217 if (!buff)
1218 break;
1219 neigh->arp_queue_len_bytes -= buff->truesize;
1220 kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL);
1221 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1222 }
1223 skb_dst_force(skb);
1224 __skb_queue_tail(&neigh->arp_queue, skb);
1225 neigh->arp_queue_len_bytes += skb->truesize;
1226 }
1227 rc = 1;
1228 }
1229 out_unlock_bh:
1230 if (immediate_probe)
1231 neigh_probe(neigh);
1232 else
1233 write_unlock(&neigh->lock);
1234 local_bh_enable();
1235 trace_neigh_event_send_done(neigh, rc);
1236 return rc;
1237
1238 out_dead:
1239 if (neigh->nud_state & NUD_STALE)
1240 goto out_unlock_bh;
1241 write_unlock_bh(&neigh->lock);
1242 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD);
1243 trace_neigh_event_send_dead(neigh, 1);
1244 return 1;
1245 }
1246 EXPORT_SYMBOL(__neigh_event_send);
1247
1248 static void neigh_update_hhs(struct neighbour *neigh)
1249 {
1250 struct hh_cache *hh;
1251 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1252 = NULL;
1253
1254 if (neigh->dev->header_ops)
1255 update = neigh->dev->header_ops->cache_update;
1256
1257 if (update) {
1258 hh = &neigh->hh;
1259 if (READ_ONCE(hh->hh_len)) {
1260 write_seqlock_bh(&hh->hh_lock);
1261 update(hh, neigh->dev, neigh->ha);
1262 write_sequnlock_bh(&hh->hh_lock);
1263 }
1264 }
1265 }
1266
1267 /* Generic update routine.
1268 -- lladdr is new lladdr or NULL, if it is not supplied.
1269 -- new is new state.
1270 -- flags
1271 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1272 if it is different.
1273 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1274 lladdr instead of overriding it
1275 if it is different.
1276 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1277 NEIGH_UPDATE_F_USE means that the entry is user triggered.
1278 NEIGH_UPDATE_F_MANAGED means that the entry will be auto-refreshed.
1279 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1280 NTF_ROUTER flag.
1281 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1282 a router.
1283
1284 Caller MUST hold reference count on the entry.
1285 */
1286 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1287 u8 new, u32 flags, u32 nlmsg_pid,
1288 struct netlink_ext_ack *extack)
1289 {
1290 bool gc_update = false, managed_update = false;
1291 int update_isrouter = 0;
1292 struct net_device *dev;
1293 int err, notify = 0;
1294 u8 old;
1295
1296 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1297
1298 write_lock_bh(&neigh->lock);
1299
1300 dev = neigh->dev;
1301 old = neigh->nud_state;
1302 err = -EPERM;
1303
1304 if (neigh->dead) {
1305 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1306 new = old;
1307 goto out;
1308 }
1309 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1310 (old & (NUD_NOARP | NUD_PERMANENT)))
1311 goto out;
1312
1313 neigh_update_flags(neigh, flags, &notify, &gc_update, &managed_update);
1314 if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
1315 new = old & ~NUD_PERMANENT;
1316 WRITE_ONCE(neigh->nud_state, new);
1317 err = 0;
1318 goto out;
1319 }
1320
1321 if (!(new & NUD_VALID)) {
1322 neigh_del_timer(neigh);
1323 if (old & NUD_CONNECTED)
1324 neigh_suspect(neigh);
1325 WRITE_ONCE(neigh->nud_state, new);
1326 err = 0;
1327 notify = old & NUD_VALID;
1328 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1329 (new & NUD_FAILED)) {
1330 neigh_invalidate(neigh);
1331 notify = 1;
1332 }
1333 goto out;
1334 }
1335
1336 /* Compare new lladdr with cached one */
1337 if (!dev->addr_len) {
1338 /* First case: device needs no address. */
1339 lladdr = neigh->ha;
1340 } else if (lladdr) {
1341 /* The second case: if something is already cached
1342 and a new address is proposed:
1343 - compare new & old
1344 - if they are different, check override flag
1345 */
1346 if ((old & NUD_VALID) &&
1347 !memcmp(lladdr, neigh->ha, dev->addr_len))
1348 lladdr = neigh->ha;
1349 } else {
1350 /* No address is supplied; if we know something,
1351 use it, otherwise discard the request.
1352 */
1353 err = -EINVAL;
1354 if (!(old & NUD_VALID)) {
1355 NL_SET_ERR_MSG(extack, "No link layer address given");
1356 goto out;
1357 }
1358 lladdr = neigh->ha;
1359 }
1360
1361 /* Update confirmed timestamp for neighbour entry after we
1362 * received ARP packet even if it doesn't change IP to MAC binding.
1363 */
1364 if (new & NUD_CONNECTED)
1365 neigh->confirmed = jiffies;
1366
1367 /* If entry was valid and address is not changed,
1368 do not change entry state, if new one is STALE.
1369 */
1370 err = 0;
1371 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1372 if (old & NUD_VALID) {
1373 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1374 update_isrouter = 0;
1375 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1376 (old & NUD_CONNECTED)) {
1377 lladdr = neigh->ha;
1378 new = NUD_STALE;
1379 } else
1380 goto out;
1381 } else {
1382 if (lladdr == neigh->ha && new == NUD_STALE &&
1383 !(flags & NEIGH_UPDATE_F_ADMIN))
1384 new = old;
1385 }
1386 }
1387
1388 /* Update timestamp only once we know we will make a change to the
1389 * neighbour entry. Otherwise we risk to move the locktime window with
1390 * noop updates and ignore relevant ARP updates.
1391 */
1392 if (new != old || lladdr != neigh->ha)
1393 neigh->updated = jiffies;
1394
1395 if (new != old) {
1396 neigh_del_timer(neigh);
1397 if (new & NUD_PROBE)
1398 atomic_set(&neigh->probes, 0);
1399 if (new & NUD_IN_TIMER)
1400 neigh_add_timer(neigh, (jiffies +
1401 ((new & NUD_REACHABLE) ?
1402 neigh->parms->reachable_time :
1403 0)));
1404 WRITE_ONCE(neigh->nud_state, new);
1405 notify = 1;
1406 }
1407
1408 if (lladdr != neigh->ha) {
1409 write_seqlock(&neigh->ha_lock);
1410 memcpy(&neigh->ha, lladdr, dev->addr_len);
1411 write_sequnlock(&neigh->ha_lock);
1412 neigh_update_hhs(neigh);
1413 if (!(new & NUD_CONNECTED))
1414 neigh->confirmed = jiffies -
1415 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1416 notify = 1;
1417 }
1418 if (new == old)
1419 goto out;
1420 if (new & NUD_CONNECTED)
1421 neigh_connect(neigh);
1422 else
1423 neigh_suspect(neigh);
1424 if (!(old & NUD_VALID)) {
1425 struct sk_buff *skb;
1426
1427 /* Again: avoid dead loop if something went wrong */
1428
1429 while (neigh->nud_state & NUD_VALID &&
1430 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1431 struct dst_entry *dst = skb_dst(skb);
1432 struct neighbour *n2, *n1 = neigh;
1433 write_unlock_bh(&neigh->lock);
1434
1435 rcu_read_lock();
1436
1437 /* Why not just use 'neigh' as-is? The problem is that
1438 * things such as shaper, eql, and sch_teql can end up
1439 * using alternative, different, neigh objects to output
1440 * the packet in the output path. So what we need to do
1441 * here is re-lookup the top-level neigh in the path so
1442 * we can reinject the packet there.
1443 */
1444 n2 = NULL;
1445 if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1446 n2 = dst_neigh_lookup_skb(dst, skb);
1447 if (n2)
1448 n1 = n2;
1449 }
1450 n1->output(n1, skb);
1451 if (n2)
1452 neigh_release(n2);
1453 rcu_read_unlock();
1454
1455 write_lock_bh(&neigh->lock);
1456 }
1457 __skb_queue_purge(&neigh->arp_queue);
1458 neigh->arp_queue_len_bytes = 0;
1459 }
1460 out:
1461 if (update_isrouter)
1462 neigh_update_is_router(neigh, flags, &notify);
1463 write_unlock_bh(&neigh->lock);
1464 if (((new ^ old) & NUD_PERMANENT) || gc_update)
1465 neigh_update_gc_list(neigh);
1466 if (managed_update)
1467 neigh_update_managed_list(neigh);
1468 if (notify)
1469 neigh_update_notify(neigh, nlmsg_pid);
1470 trace_neigh_update_done(neigh, err);
1471 return err;
1472 }
1473
1474 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1475 u32 flags, u32 nlmsg_pid)
1476 {
1477 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1478 }
1479 EXPORT_SYMBOL(neigh_update);
1480
1481 /* Update the neigh to listen temporarily for probe responses, even if it is
1482 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1483 */
1484 void __neigh_set_probe_once(struct neighbour *neigh)
1485 {
1486 if (neigh->dead)
1487 return;
1488 neigh->updated = jiffies;
1489 if (!(neigh->nud_state & NUD_FAILED))
1490 return;
1491 WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1492 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1493 neigh_add_timer(neigh,
1494 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1495 HZ/100));
1496 }
1497 EXPORT_SYMBOL(__neigh_set_probe_once);
1498
1499 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1500 u8 *lladdr, void *saddr,
1501 struct net_device *dev)
1502 {
1503 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1504 lladdr || !dev->addr_len);
1505 if (neigh)
1506 neigh_update(neigh, lladdr, NUD_STALE,
1507 NEIGH_UPDATE_F_OVERRIDE, 0);
1508 return neigh;
1509 }
1510 EXPORT_SYMBOL(neigh_event_ns);
1511
1512 /* called with read_lock_bh(&n->lock); */
1513 static void neigh_hh_init(struct neighbour *n)
1514 {
1515 struct net_device *dev = n->dev;
1516 __be16 prot = n->tbl->protocol;
1517 struct hh_cache *hh = &n->hh;
1518
1519 write_lock_bh(&n->lock);
1520
1521 /* Only one thread can come in here and initialize the
1522 * hh_cache entry.
1523 */
1524 if (!hh->hh_len)
1525 dev->header_ops->cache(n, hh, prot);
1526
1527 write_unlock_bh(&n->lock);
1528 }
1529
1530 /* Slow and careful. */
1531
1532 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1533 {
1534 int rc = 0;
1535
1536 if (!neigh_event_send(neigh, skb)) {
1537 int err;
1538 struct net_device *dev = neigh->dev;
1539 unsigned int seq;
1540
1541 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1542 neigh_hh_init(neigh);
1543
1544 do {
1545 __skb_pull(skb, skb_network_offset(skb));
1546 seq = read_seqbegin(&neigh->ha_lock);
1547 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1548 neigh->ha, NULL, skb->len);
1549 } while (read_seqretry(&neigh->ha_lock, seq));
1550
1551 if (err >= 0)
1552 rc = dev_queue_xmit(skb);
1553 else
1554 goto out_kfree_skb;
1555 }
1556 out:
1557 return rc;
1558 out_kfree_skb:
1559 rc = -EINVAL;
1560 kfree_skb(skb);
1561 goto out;
1562 }
1563 EXPORT_SYMBOL(neigh_resolve_output);
1564
1565 /* As fast as possible without hh cache */
1566
1567 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1568 {
1569 struct net_device *dev = neigh->dev;
1570 unsigned int seq;
1571 int err;
1572
1573 do {
1574 __skb_pull(skb, skb_network_offset(skb));
1575 seq = read_seqbegin(&neigh->ha_lock);
1576 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1577 neigh->ha, NULL, skb->len);
1578 } while (read_seqretry(&neigh->ha_lock, seq));
1579
1580 if (err >= 0)
1581 err = dev_queue_xmit(skb);
1582 else {
1583 err = -EINVAL;
1584 kfree_skb(skb);
1585 }
1586 return err;
1587 }
1588 EXPORT_SYMBOL(neigh_connected_output);
1589
1590 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1591 {
1592 return dev_queue_xmit(skb);
1593 }
1594 EXPORT_SYMBOL(neigh_direct_output);
1595
1596 static void neigh_managed_work(struct work_struct *work)
1597 {
1598 struct neigh_table *tbl = container_of(work, struct neigh_table,
1599 managed_work.work);
1600 struct neighbour *neigh;
1601
1602 write_lock_bh(&tbl->lock);
1603 list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1604 neigh_event_send_probe(neigh, NULL, false);
1605 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1606 NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
1607 write_unlock_bh(&tbl->lock);
1608 }
1609
1610 static void neigh_proxy_process(struct timer_list *t)
1611 {
1612 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1613 long sched_next = 0;
1614 unsigned long now = jiffies;
1615 struct sk_buff *skb, *n;
1616
1617 spin_lock(&tbl->proxy_queue.lock);
1618
1619 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1620 long tdif = NEIGH_CB(skb)->sched_next - now;
1621
1622 if (tdif <= 0) {
1623 struct net_device *dev = skb->dev;
1624
1625 neigh_parms_qlen_dec(dev, tbl->family);
1626 __skb_unlink(skb, &tbl->proxy_queue);
1627
1628 if (tbl->proxy_redo && netif_running(dev)) {
1629 rcu_read_lock();
1630 tbl->proxy_redo(skb);
1631 rcu_read_unlock();
1632 } else {
1633 kfree_skb(skb);
1634 }
1635
1636 dev_put(dev);
1637 } else if (!sched_next || tdif < sched_next)
1638 sched_next = tdif;
1639 }
1640 del_timer(&tbl->proxy_timer);
1641 if (sched_next)
1642 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1643 spin_unlock(&tbl->proxy_queue.lock);
1644 }
1645
1646 static unsigned long neigh_proxy_delay(struct neigh_parms *p)
1647 {
1648 /* If proxy_delay is zero, do not call get_random_u32_below()
1649 * as it is undefined behavior.
1650 */
1651 unsigned long proxy_delay = NEIGH_VAR(p, PROXY_DELAY);
1652
1653 return proxy_delay ?
1654 jiffies + get_random_u32_below(proxy_delay) : jiffies;
1655 }
1656
1657 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1658 struct sk_buff *skb)
1659 {
1660 unsigned long sched_next = neigh_proxy_delay(p);
1661
1662 if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1663 kfree_skb(skb);
1664 return;
1665 }
1666
1667 NEIGH_CB(skb)->sched_next = sched_next;
1668 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1669
1670 spin_lock(&tbl->proxy_queue.lock);
1671 if (del_timer(&tbl->proxy_timer)) {
1672 if (time_before(tbl->proxy_timer.expires, sched_next))
1673 sched_next = tbl->proxy_timer.expires;
1674 }
1675 skb_dst_drop(skb);
1676 dev_hold(skb->dev);
1677 __skb_queue_tail(&tbl->proxy_queue, skb);
1678 p->qlen++;
1679 mod_timer(&tbl->proxy_timer, sched_next);
1680 spin_unlock(&tbl->proxy_queue.lock);
1681 }
1682 EXPORT_SYMBOL(pneigh_enqueue);
1683
1684 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1685 struct net *net, int ifindex)
1686 {
1687 struct neigh_parms *p;
1688
1689 list_for_each_entry(p, &tbl->parms_list, list) {
1690 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1691 (!p->dev && !ifindex && net_eq(net, &init_net)))
1692 return p;
1693 }
1694
1695 return NULL;
1696 }
1697
1698 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1699 struct neigh_table *tbl)
1700 {
1701 struct neigh_parms *p;
1702 struct net *net = dev_net(dev);
1703 const struct net_device_ops *ops = dev->netdev_ops;
1704
1705 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1706 if (p) {
1707 p->tbl = tbl;
1708 refcount_set(&p->refcnt, 1);
1709 p->reachable_time =
1710 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1711 p->qlen = 0;
1712 netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
1713 p->dev = dev;
1714 write_pnet(&p->net, net);
1715 p->sysctl_table = NULL;
1716
1717 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1718 netdev_put(dev, &p->dev_tracker);
1719 kfree(p);
1720 return NULL;
1721 }
1722
1723 write_lock_bh(&tbl->lock);
1724 list_add(&p->list, &tbl->parms.list);
1725 write_unlock_bh(&tbl->lock);
1726
1727 neigh_parms_data_state_cleanall(p);
1728 }
1729 return p;
1730 }
1731 EXPORT_SYMBOL(neigh_parms_alloc);
1732
1733 static void neigh_rcu_free_parms(struct rcu_head *head)
1734 {
1735 struct neigh_parms *parms =
1736 container_of(head, struct neigh_parms, rcu_head);
1737
1738 neigh_parms_put(parms);
1739 }
1740
1741 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1742 {
1743 if (!parms || parms == &tbl->parms)
1744 return;
1745 write_lock_bh(&tbl->lock);
1746 list_del(&parms->list);
1747 parms->dead = 1;
1748 write_unlock_bh(&tbl->lock);
1749 netdev_put(parms->dev, &parms->dev_tracker);
1750 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1751 }
1752 EXPORT_SYMBOL(neigh_parms_release);
1753
1754 static void neigh_parms_destroy(struct neigh_parms *parms)
1755 {
1756 kfree(parms);
1757 }
1758
1759 static struct lock_class_key neigh_table_proxy_queue_class;
1760
1761 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1762
1763 void neigh_table_init(int index, struct neigh_table *tbl)
1764 {
1765 unsigned long now = jiffies;
1766 unsigned long phsize;
1767
1768 INIT_LIST_HEAD(&tbl->parms_list);
1769 INIT_LIST_HEAD(&tbl->gc_list);
1770 INIT_LIST_HEAD(&tbl->managed_list);
1771
1772 list_add(&tbl->parms.list, &tbl->parms_list);
1773 write_pnet(&tbl->parms.net, &init_net);
1774 refcount_set(&tbl->parms.refcnt, 1);
1775 tbl->parms.reachable_time =
1776 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1777 tbl->parms.qlen = 0;
1778
1779 tbl->stats = alloc_percpu(struct neigh_statistics);
1780 if (!tbl->stats)
1781 panic("cannot create neighbour cache statistics");
1782
1783 #ifdef CONFIG_PROC_FS
1784 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1785 &neigh_stat_seq_ops, tbl))
1786 panic("cannot create neighbour proc dir entry");
1787 #endif
1788
1789 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1790
1791 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1792 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1793
1794 if (!tbl->nht || !tbl->phash_buckets)
1795 panic("cannot allocate neighbour cache hashes");
1796
1797 if (!tbl->entry_size)
1798 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1799 tbl->key_len, NEIGH_PRIV_ALIGN);
1800 else
1801 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1802
1803 rwlock_init(&tbl->lock);
1804
1805 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1806 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1807 tbl->parms.reachable_time);
1808 INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1809 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1810
1811 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1812 skb_queue_head_init_class(&tbl->proxy_queue,
1813 &neigh_table_proxy_queue_class);
1814
1815 tbl->last_flush = now;
1816 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1817
1818 neigh_tables[index] = tbl;
1819 }
1820 EXPORT_SYMBOL(neigh_table_init);
1821
1822 int neigh_table_clear(int index, struct neigh_table *tbl)
1823 {
1824 neigh_tables[index] = NULL;
1825 /* It is not clean... Fix it to unload IPv6 module safely */
1826 cancel_delayed_work_sync(&tbl->managed_work);
1827 cancel_delayed_work_sync(&tbl->gc_work);
1828 del_timer_sync(&tbl->proxy_timer);
1829 pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family);
1830 neigh_ifdown(tbl, NULL);
1831 if (atomic_read(&tbl->entries))
1832 pr_crit("neighbour leakage\n");
1833
1834 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1835 neigh_hash_free_rcu);
1836 tbl->nht = NULL;
1837
1838 kfree(tbl->phash_buckets);
1839 tbl->phash_buckets = NULL;
1840
1841 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1842
1843 free_percpu(tbl->stats);
1844 tbl->stats = NULL;
1845
1846 return 0;
1847 }
1848 EXPORT_SYMBOL(neigh_table_clear);
1849
1850 static struct neigh_table *neigh_find_table(int family)
1851 {
1852 struct neigh_table *tbl = NULL;
1853
1854 switch (family) {
1855 case AF_INET:
1856 tbl = neigh_tables[NEIGH_ARP_TABLE];
1857 break;
1858 case AF_INET6:
1859 tbl = neigh_tables[NEIGH_ND_TABLE];
1860 break;
1861 }
1862
1863 return tbl;
1864 }
1865
1866 const struct nla_policy nda_policy[NDA_MAX+1] = {
1867 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID },
1868 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1869 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1870 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
1871 [NDA_PROBES] = { .type = NLA_U32 },
1872 [NDA_VLAN] = { .type = NLA_U16 },
1873 [NDA_PORT] = { .type = NLA_U16 },
1874 [NDA_VNI] = { .type = NLA_U32 },
1875 [NDA_IFINDEX] = { .type = NLA_U32 },
1876 [NDA_MASTER] = { .type = NLA_U32 },
1877 [NDA_PROTOCOL] = { .type = NLA_U8 },
1878 [NDA_NH_ID] = { .type = NLA_U32 },
1879 [NDA_FLAGS_EXT] = NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
1880 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED },
1881 };
1882
1883 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1884 struct netlink_ext_ack *extack)
1885 {
1886 struct net *net = sock_net(skb->sk);
1887 struct ndmsg *ndm;
1888 struct nlattr *dst_attr;
1889 struct neigh_table *tbl;
1890 struct neighbour *neigh;
1891 struct net_device *dev = NULL;
1892 int err = -EINVAL;
1893
1894 ASSERT_RTNL();
1895 if (nlmsg_len(nlh) < sizeof(*ndm))
1896 goto out;
1897
1898 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1899 if (!dst_attr) {
1900 NL_SET_ERR_MSG(extack, "Network address not specified");
1901 goto out;
1902 }
1903
1904 ndm = nlmsg_data(nlh);
1905 if (ndm->ndm_ifindex) {
1906 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1907 if (dev == NULL) {
1908 err = -ENODEV;
1909 goto out;
1910 }
1911 }
1912
1913 tbl = neigh_find_table(ndm->ndm_family);
1914 if (tbl == NULL)
1915 return -EAFNOSUPPORT;
1916
1917 if (nla_len(dst_attr) < (int)tbl->key_len) {
1918 NL_SET_ERR_MSG(extack, "Invalid network address");
1919 goto out;
1920 }
1921
1922 if (ndm->ndm_flags & NTF_PROXY) {
1923 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1924 goto out;
1925 }
1926
1927 if (dev == NULL)
1928 goto out;
1929
1930 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1931 if (neigh == NULL) {
1932 err = -ENOENT;
1933 goto out;
1934 }
1935
1936 err = __neigh_update(neigh, NULL, NUD_FAILED,
1937 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1938 NETLINK_CB(skb).portid, extack);
1939 write_lock_bh(&tbl->lock);
1940 neigh_release(neigh);
1941 neigh_remove_one(neigh, tbl);
1942 write_unlock_bh(&tbl->lock);
1943
1944 out:
1945 return err;
1946 }
1947
1948 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1949 struct netlink_ext_ack *extack)
1950 {
1951 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1952 NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1953 struct net *net = sock_net(skb->sk);
1954 struct ndmsg *ndm;
1955 struct nlattr *tb[NDA_MAX+1];
1956 struct neigh_table *tbl;
1957 struct net_device *dev = NULL;
1958 struct neighbour *neigh;
1959 void *dst, *lladdr;
1960 u8 protocol = 0;
1961 u32 ndm_flags;
1962 int err;
1963
1964 ASSERT_RTNL();
1965 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1966 nda_policy, extack);
1967 if (err < 0)
1968 goto out;
1969
1970 err = -EINVAL;
1971 if (!tb[NDA_DST]) {
1972 NL_SET_ERR_MSG(extack, "Network address not specified");
1973 goto out;
1974 }
1975
1976 ndm = nlmsg_data(nlh);
1977 ndm_flags = ndm->ndm_flags;
1978 if (tb[NDA_FLAGS_EXT]) {
1979 u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
1980
1981 BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
1982 (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
1983 hweight32(NTF_EXT_MASK)));
1984 ndm_flags |= (ext << NTF_EXT_SHIFT);
1985 }
1986 if (ndm->ndm_ifindex) {
1987 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1988 if (dev == NULL) {
1989 err = -ENODEV;
1990 goto out;
1991 }
1992
1993 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1994 NL_SET_ERR_MSG(extack, "Invalid link address");
1995 goto out;
1996 }
1997 }
1998
1999 tbl = neigh_find_table(ndm->ndm_family);
2000 if (tbl == NULL)
2001 return -EAFNOSUPPORT;
2002
2003 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
2004 NL_SET_ERR_MSG(extack, "Invalid network address");
2005 goto out;
2006 }
2007
2008 dst = nla_data(tb[NDA_DST]);
2009 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
2010
2011 if (tb[NDA_PROTOCOL])
2012 protocol = nla_get_u8(tb[NDA_PROTOCOL]);
2013 if (ndm_flags & NTF_PROXY) {
2014 struct pneigh_entry *pn;
2015
2016 if (ndm_flags & NTF_MANAGED) {
2017 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
2018 goto out;
2019 }
2020
2021 err = -ENOBUFS;
2022 pn = pneigh_lookup(tbl, net, dst, dev, 1);
2023 if (pn) {
2024 pn->flags = ndm_flags;
2025 if (protocol)
2026 pn->protocol = protocol;
2027 err = 0;
2028 }
2029 goto out;
2030 }
2031
2032 if (!dev) {
2033 NL_SET_ERR_MSG(extack, "Device not specified");
2034 goto out;
2035 }
2036
2037 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
2038 err = -EINVAL;
2039 goto out;
2040 }
2041
2042 neigh = neigh_lookup(tbl, dst, dev);
2043 if (neigh == NULL) {
2044 bool ndm_permanent = ndm->ndm_state & NUD_PERMANENT;
2045 bool exempt_from_gc = ndm_permanent ||
2046 ndm_flags & NTF_EXT_LEARNED;
2047
2048 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2049 err = -ENOENT;
2050 goto out;
2051 }
2052 if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
2053 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
2054 err = -EINVAL;
2055 goto out;
2056 }
2057
2058 neigh = ___neigh_create(tbl, dst, dev,
2059 ndm_flags &
2060 (NTF_EXT_LEARNED | NTF_MANAGED),
2061 exempt_from_gc, true);
2062 if (IS_ERR(neigh)) {
2063 err = PTR_ERR(neigh);
2064 goto out;
2065 }
2066 } else {
2067 if (nlh->nlmsg_flags & NLM_F_EXCL) {
2068 err = -EEXIST;
2069 neigh_release(neigh);
2070 goto out;
2071 }
2072
2073 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
2074 flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
2075 NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
2076 }
2077
2078 if (protocol)
2079 neigh->protocol = protocol;
2080 if (ndm_flags & NTF_EXT_LEARNED)
2081 flags |= NEIGH_UPDATE_F_EXT_LEARNED;
2082 if (ndm_flags & NTF_ROUTER)
2083 flags |= NEIGH_UPDATE_F_ISROUTER;
2084 if (ndm_flags & NTF_MANAGED)
2085 flags |= NEIGH_UPDATE_F_MANAGED;
2086 if (ndm_flags & NTF_USE)
2087 flags |= NEIGH_UPDATE_F_USE;
2088
2089 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2090 NETLINK_CB(skb).portid, extack);
2091 if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
2092 neigh_event_send(neigh, NULL);
2093 err = 0;
2094 }
2095 neigh_release(neigh);
2096 out:
2097 return err;
2098 }
2099
2100 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2101 {
2102 struct nlattr *nest;
2103
2104 nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2105 if (nest == NULL)
2106 return -ENOBUFS;
2107
2108 if ((parms->dev &&
2109 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
2110 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2111 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2112 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2113 /* approximative value for deprecated QUEUE_LEN (in packets) */
2114 nla_put_u32(skb, NDTPA_QUEUE_LEN,
2115 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2116 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2117 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2118 nla_put_u32(skb, NDTPA_UCAST_PROBES,
2119 NEIGH_VAR(parms, UCAST_PROBES)) ||
2120 nla_put_u32(skb, NDTPA_MCAST_PROBES,
2121 NEIGH_VAR(parms, MCAST_PROBES)) ||
2122 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2123 NEIGH_VAR(parms, MCAST_REPROBES)) ||
2124 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2125 NDTPA_PAD) ||
2126 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2127 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2128 nla_put_msecs(skb, NDTPA_GC_STALETIME,
2129 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2130 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2131 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2132 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2133 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2134 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2135 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2136 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2137 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2138 nla_put_msecs(skb, NDTPA_LOCKTIME,
2139 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) ||
2140 nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS,
2141 NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD))
2142 goto nla_put_failure;
2143 return nla_nest_end(skb, nest);
2144
2145 nla_put_failure:
2146 nla_nest_cancel(skb, nest);
2147 return -EMSGSIZE;
2148 }
2149
2150 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2151 u32 pid, u32 seq, int type, int flags)
2152 {
2153 struct nlmsghdr *nlh;
2154 struct ndtmsg *ndtmsg;
2155
2156 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2157 if (nlh == NULL)
2158 return -EMSGSIZE;
2159
2160 ndtmsg = nlmsg_data(nlh);
2161
2162 read_lock_bh(&tbl->lock);
2163 ndtmsg->ndtm_family = tbl->family;
2164 ndtmsg->ndtm_pad1 = 0;
2165 ndtmsg->ndtm_pad2 = 0;
2166
2167 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2168 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
2169 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2170 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2171 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2172 goto nla_put_failure;
2173 {
2174 unsigned long now = jiffies;
2175 long flush_delta = now - tbl->last_flush;
2176 long rand_delta = now - tbl->last_rand;
2177 struct neigh_hash_table *nht;
2178 struct ndt_config ndc = {
2179 .ndtc_key_len = tbl->key_len,
2180 .ndtc_entry_size = tbl->entry_size,
2181 .ndtc_entries = atomic_read(&tbl->entries),
2182 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
2183 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
2184 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
2185 };
2186
2187 rcu_read_lock();
2188 nht = rcu_dereference(tbl->nht);
2189 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2190 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2191 rcu_read_unlock();
2192
2193 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2194 goto nla_put_failure;
2195 }
2196
2197 {
2198 int cpu;
2199 struct ndt_stats ndst;
2200
2201 memset(&ndst, 0, sizeof(ndst));
2202
2203 for_each_possible_cpu(cpu) {
2204 struct neigh_statistics *st;
2205
2206 st = per_cpu_ptr(tbl->stats, cpu);
2207 ndst.ndts_allocs += st->allocs;
2208 ndst.ndts_destroys += st->destroys;
2209 ndst.ndts_hash_grows += st->hash_grows;
2210 ndst.ndts_res_failed += st->res_failed;
2211 ndst.ndts_lookups += st->lookups;
2212 ndst.ndts_hits += st->hits;
2213 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
2214 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
2215 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
2216 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
2217 ndst.ndts_table_fulls += st->table_fulls;
2218 }
2219
2220 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2221 NDTA_PAD))
2222 goto nla_put_failure;
2223 }
2224
2225 BUG_ON(tbl->parms.dev);
2226 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2227 goto nla_put_failure;
2228
2229 read_unlock_bh(&tbl->lock);
2230 nlmsg_end(skb, nlh);
2231 return 0;
2232
2233 nla_put_failure:
2234 read_unlock_bh(&tbl->lock);
2235 nlmsg_cancel(skb, nlh);
2236 return -EMSGSIZE;
2237 }
2238
2239 static int neightbl_fill_param_info(struct sk_buff *skb,
2240 struct neigh_table *tbl,
2241 struct neigh_parms *parms,
2242 u32 pid, u32 seq, int type,
2243 unsigned int flags)
2244 {
2245 struct ndtmsg *ndtmsg;
2246 struct nlmsghdr *nlh;
2247
2248 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2249 if (nlh == NULL)
2250 return -EMSGSIZE;
2251
2252 ndtmsg = nlmsg_data(nlh);
2253
2254 read_lock_bh(&tbl->lock);
2255 ndtmsg->ndtm_family = tbl->family;
2256 ndtmsg->ndtm_pad1 = 0;
2257 ndtmsg->ndtm_pad2 = 0;
2258
2259 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2260 neightbl_fill_parms(skb, parms) < 0)
2261 goto errout;
2262
2263 read_unlock_bh(&tbl->lock);
2264 nlmsg_end(skb, nlh);
2265 return 0;
2266 errout:
2267 read_unlock_bh(&tbl->lock);
2268 nlmsg_cancel(skb, nlh);
2269 return -EMSGSIZE;
2270 }
2271
2272 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2273 [NDTA_NAME] = { .type = NLA_STRING },
2274 [NDTA_THRESH1] = { .type = NLA_U32 },
2275 [NDTA_THRESH2] = { .type = NLA_U32 },
2276 [NDTA_THRESH3] = { .type = NLA_U32 },
2277 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
2278 [NDTA_PARMS] = { .type = NLA_NESTED },
2279 };
2280
2281 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2282 [NDTPA_IFINDEX] = { .type = NLA_U32 },
2283 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
2284 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
2285 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
2286 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
2287 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
2288 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
2289 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
2290 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
2291 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
2292 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
2293 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
2294 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
2295 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
2296 [NDTPA_INTERVAL_PROBE_TIME_MS] = { .type = NLA_U64, .min = 1 },
2297 };
2298
2299 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2300 struct netlink_ext_ack *extack)
2301 {
2302 struct net *net = sock_net(skb->sk);
2303 struct neigh_table *tbl;
2304 struct ndtmsg *ndtmsg;
2305 struct nlattr *tb[NDTA_MAX+1];
2306 bool found = false;
2307 int err, tidx;
2308
2309 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2310 nl_neightbl_policy, extack);
2311 if (err < 0)
2312 goto errout;
2313
2314 if (tb[NDTA_NAME] == NULL) {
2315 err = -EINVAL;
2316 goto errout;
2317 }
2318
2319 ndtmsg = nlmsg_data(nlh);
2320
2321 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2322 tbl = neigh_tables[tidx];
2323 if (!tbl)
2324 continue;
2325 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2326 continue;
2327 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2328 found = true;
2329 break;
2330 }
2331 }
2332
2333 if (!found)
2334 return -ENOENT;
2335
2336 /*
2337 * We acquire tbl->lock to be nice to the periodic timers and
2338 * make sure they always see a consistent set of values.
2339 */
2340 write_lock_bh(&tbl->lock);
2341
2342 if (tb[NDTA_PARMS]) {
2343 struct nlattr *tbp[NDTPA_MAX+1];
2344 struct neigh_parms *p;
2345 int i, ifindex = 0;
2346
2347 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2348 tb[NDTA_PARMS],
2349 nl_ntbl_parm_policy, extack);
2350 if (err < 0)
2351 goto errout_tbl_lock;
2352
2353 if (tbp[NDTPA_IFINDEX])
2354 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2355
2356 p = lookup_neigh_parms(tbl, net, ifindex);
2357 if (p == NULL) {
2358 err = -ENOENT;
2359 goto errout_tbl_lock;
2360 }
2361
2362 for (i = 1; i <= NDTPA_MAX; i++) {
2363 if (tbp[i] == NULL)
2364 continue;
2365
2366 switch (i) {
2367 case NDTPA_QUEUE_LEN:
2368 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2369 nla_get_u32(tbp[i]) *
2370 SKB_TRUESIZE(ETH_FRAME_LEN));
2371 break;
2372 case NDTPA_QUEUE_LENBYTES:
2373 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2374 nla_get_u32(tbp[i]));
2375 break;
2376 case NDTPA_PROXY_QLEN:
2377 NEIGH_VAR_SET(p, PROXY_QLEN,
2378 nla_get_u32(tbp[i]));
2379 break;
2380 case NDTPA_APP_PROBES:
2381 NEIGH_VAR_SET(p, APP_PROBES,
2382 nla_get_u32(tbp[i]));
2383 break;
2384 case NDTPA_UCAST_PROBES:
2385 NEIGH_VAR_SET(p, UCAST_PROBES,
2386 nla_get_u32(tbp[i]));
2387 break;
2388 case NDTPA_MCAST_PROBES:
2389 NEIGH_VAR_SET(p, MCAST_PROBES,
2390 nla_get_u32(tbp[i]));
2391 break;
2392 case NDTPA_MCAST_REPROBES:
2393 NEIGH_VAR_SET(p, MCAST_REPROBES,
2394 nla_get_u32(tbp[i]));
2395 break;
2396 case NDTPA_BASE_REACHABLE_TIME:
2397 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2398 nla_get_msecs(tbp[i]));
2399 /* update reachable_time as well, otherwise, the change will
2400 * only be effective after the next time neigh_periodic_work
2401 * decides to recompute it (can be multiple minutes)
2402 */
2403 p->reachable_time =
2404 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2405 break;
2406 case NDTPA_GC_STALETIME:
2407 NEIGH_VAR_SET(p, GC_STALETIME,
2408 nla_get_msecs(tbp[i]));
2409 break;
2410 case NDTPA_DELAY_PROBE_TIME:
2411 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2412 nla_get_msecs(tbp[i]));
2413 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2414 break;
2415 case NDTPA_INTERVAL_PROBE_TIME_MS:
2416 NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS,
2417 nla_get_msecs(tbp[i]));
2418 break;
2419 case NDTPA_RETRANS_TIME:
2420 NEIGH_VAR_SET(p, RETRANS_TIME,
2421 nla_get_msecs(tbp[i]));
2422 break;
2423 case NDTPA_ANYCAST_DELAY:
2424 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2425 nla_get_msecs(tbp[i]));
2426 break;
2427 case NDTPA_PROXY_DELAY:
2428 NEIGH_VAR_SET(p, PROXY_DELAY,
2429 nla_get_msecs(tbp[i]));
2430 break;
2431 case NDTPA_LOCKTIME:
2432 NEIGH_VAR_SET(p, LOCKTIME,
2433 nla_get_msecs(tbp[i]));
2434 break;
2435 }
2436 }
2437 }
2438
2439 err = -ENOENT;
2440 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2441 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2442 !net_eq(net, &init_net))
2443 goto errout_tbl_lock;
2444
2445 if (tb[NDTA_THRESH1])
2446 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2447
2448 if (tb[NDTA_THRESH2])
2449 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2450
2451 if (tb[NDTA_THRESH3])
2452 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2453
2454 if (tb[NDTA_GC_INTERVAL])
2455 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2456
2457 err = 0;
2458
2459 errout_tbl_lock:
2460 write_unlock_bh(&tbl->lock);
2461 errout:
2462 return err;
2463 }
2464
2465 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2466 struct netlink_ext_ack *extack)
2467 {
2468 struct ndtmsg *ndtm;
2469
2470 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2471 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2472 return -EINVAL;
2473 }
2474
2475 ndtm = nlmsg_data(nlh);
2476 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) {
2477 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2478 return -EINVAL;
2479 }
2480
2481 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2482 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2483 return -EINVAL;
2484 }
2485
2486 return 0;
2487 }
2488
2489 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2490 {
2491 const struct nlmsghdr *nlh = cb->nlh;
2492 struct net *net = sock_net(skb->sk);
2493 int family, tidx, nidx = 0;
2494 int tbl_skip = cb->args[0];
2495 int neigh_skip = cb->args[1];
2496 struct neigh_table *tbl;
2497
2498 if (cb->strict_check) {
2499 int err = neightbl_valid_dump_info(nlh, cb->extack);
2500
2501 if (err < 0)
2502 return err;
2503 }
2504
2505 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2506
2507 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2508 struct neigh_parms *p;
2509
2510 tbl = neigh_tables[tidx];
2511 if (!tbl)
2512 continue;
2513
2514 if (tidx < tbl_skip || (family && tbl->family != family))
2515 continue;
2516
2517 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2518 nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2519 NLM_F_MULTI) < 0)
2520 break;
2521
2522 nidx = 0;
2523 p = list_next_entry(&tbl->parms, list);
2524 list_for_each_entry_from(p, &tbl->parms_list, list) {
2525 if (!net_eq(neigh_parms_net(p), net))
2526 continue;
2527
2528 if (nidx < neigh_skip)
2529 goto next;
2530
2531 if (neightbl_fill_param_info(skb, tbl, p,
2532 NETLINK_CB(cb->skb).portid,
2533 nlh->nlmsg_seq,
2534 RTM_NEWNEIGHTBL,
2535 NLM_F_MULTI) < 0)
2536 goto out;
2537 next:
2538 nidx++;
2539 }
2540
2541 neigh_skip = 0;
2542 }
2543 out:
2544 cb->args[0] = tidx;
2545 cb->args[1] = nidx;
2546
2547 return skb->len;
2548 }
2549
2550 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2551 u32 pid, u32 seq, int type, unsigned int flags)
2552 {
2553 u32 neigh_flags, neigh_flags_ext;
2554 unsigned long now = jiffies;
2555 struct nda_cacheinfo ci;
2556 struct nlmsghdr *nlh;
2557 struct ndmsg *ndm;
2558
2559 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2560 if (nlh == NULL)
2561 return -EMSGSIZE;
2562
2563 neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2564 neigh_flags = neigh->flags & NTF_OLD_MASK;
2565
2566 ndm = nlmsg_data(nlh);
2567 ndm->ndm_family = neigh->ops->family;
2568 ndm->ndm_pad1 = 0;
2569 ndm->ndm_pad2 = 0;
2570 ndm->ndm_flags = neigh_flags;
2571 ndm->ndm_type = neigh->type;
2572 ndm->ndm_ifindex = neigh->dev->ifindex;
2573
2574 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2575 goto nla_put_failure;
2576
2577 read_lock_bh(&neigh->lock);
2578 ndm->ndm_state = neigh->nud_state;
2579 if (neigh->nud_state & NUD_VALID) {
2580 char haddr[MAX_ADDR_LEN];
2581
2582 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2583 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2584 read_unlock_bh(&neigh->lock);
2585 goto nla_put_failure;
2586 }
2587 }
2588
2589 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2590 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2591 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2592 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
2593 read_unlock_bh(&neigh->lock);
2594
2595 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2596 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2597 goto nla_put_failure;
2598
2599 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2600 goto nla_put_failure;
2601 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2602 goto nla_put_failure;
2603
2604 nlmsg_end(skb, nlh);
2605 return 0;
2606
2607 nla_put_failure:
2608 nlmsg_cancel(skb, nlh);
2609 return -EMSGSIZE;
2610 }
2611
2612 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2613 u32 pid, u32 seq, int type, unsigned int flags,
2614 struct neigh_table *tbl)
2615 {
2616 u32 neigh_flags, neigh_flags_ext;
2617 struct nlmsghdr *nlh;
2618 struct ndmsg *ndm;
2619
2620 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2621 if (nlh == NULL)
2622 return -EMSGSIZE;
2623
2624 neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
2625 neigh_flags = pn->flags & NTF_OLD_MASK;
2626
2627 ndm = nlmsg_data(nlh);
2628 ndm->ndm_family = tbl->family;
2629 ndm->ndm_pad1 = 0;
2630 ndm->ndm_pad2 = 0;
2631 ndm->ndm_flags = neigh_flags | NTF_PROXY;
2632 ndm->ndm_type = RTN_UNICAST;
2633 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2634 ndm->ndm_state = NUD_NONE;
2635
2636 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2637 goto nla_put_failure;
2638
2639 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2640 goto nla_put_failure;
2641 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2642 goto nla_put_failure;
2643
2644 nlmsg_end(skb, nlh);
2645 return 0;
2646
2647 nla_put_failure:
2648 nlmsg_cancel(skb, nlh);
2649 return -EMSGSIZE;
2650 }
2651
2652 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2653 {
2654 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2655 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2656 }
2657
2658 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2659 {
2660 struct net_device *master;
2661
2662 if (!master_idx)
2663 return false;
2664
2665 master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2666
2667 /* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
2668 * invalid value for ifindex to denote "no master".
2669 */
2670 if (master_idx == -1)
2671 return !!master;
2672
2673 if (!master || master->ifindex != master_idx)
2674 return true;
2675
2676 return false;
2677 }
2678
2679 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2680 {
2681 if (filter_idx && (!dev || dev->ifindex != filter_idx))
2682 return true;
2683
2684 return false;
2685 }
2686
2687 struct neigh_dump_filter {
2688 int master_idx;
2689 int dev_idx;
2690 };
2691
2692 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2693 struct netlink_callback *cb,
2694 struct neigh_dump_filter *filter)
2695 {
2696 struct net *net = sock_net(skb->sk);
2697 struct neighbour *n;
2698 int rc, h, s_h = cb->args[1];
2699 int idx, s_idx = idx = cb->args[2];
2700 struct neigh_hash_table *nht;
2701 unsigned int flags = NLM_F_MULTI;
2702
2703 if (filter->dev_idx || filter->master_idx)
2704 flags |= NLM_F_DUMP_FILTERED;
2705
2706 rcu_read_lock();
2707 nht = rcu_dereference(tbl->nht);
2708
2709 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2710 if (h > s_h)
2711 s_idx = 0;
2712 for (n = rcu_dereference(nht->hash_buckets[h]), idx = 0;
2713 n != NULL;
2714 n = rcu_dereference(n->next)) {
2715 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2716 goto next;
2717 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2718 neigh_master_filtered(n->dev, filter->master_idx))
2719 goto next;
2720 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2721 cb->nlh->nlmsg_seq,
2722 RTM_NEWNEIGH,
2723 flags) < 0) {
2724 rc = -1;
2725 goto out;
2726 }
2727 next:
2728 idx++;
2729 }
2730 }
2731 rc = skb->len;
2732 out:
2733 rcu_read_unlock();
2734 cb->args[1] = h;
2735 cb->args[2] = idx;
2736 return rc;
2737 }
2738
2739 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2740 struct netlink_callback *cb,
2741 struct neigh_dump_filter *filter)
2742 {
2743 struct pneigh_entry *n;
2744 struct net *net = sock_net(skb->sk);
2745 int rc, h, s_h = cb->args[3];
2746 int idx, s_idx = idx = cb->args[4];
2747 unsigned int flags = NLM_F_MULTI;
2748
2749 if (filter->dev_idx || filter->master_idx)
2750 flags |= NLM_F_DUMP_FILTERED;
2751
2752 read_lock_bh(&tbl->lock);
2753
2754 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2755 if (h > s_h)
2756 s_idx = 0;
2757 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2758 if (idx < s_idx || pneigh_net(n) != net)
2759 goto next;
2760 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2761 neigh_master_filtered(n->dev, filter->master_idx))
2762 goto next;
2763 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2764 cb->nlh->nlmsg_seq,
2765 RTM_NEWNEIGH, flags, tbl) < 0) {
2766 read_unlock_bh(&tbl->lock);
2767 rc = -1;
2768 goto out;
2769 }
2770 next:
2771 idx++;
2772 }
2773 }
2774
2775 read_unlock_bh(&tbl->lock);
2776 rc = skb->len;
2777 out:
2778 cb->args[3] = h;
2779 cb->args[4] = idx;
2780 return rc;
2781
2782 }
2783
2784 static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2785 bool strict_check,
2786 struct neigh_dump_filter *filter,
2787 struct netlink_ext_ack *extack)
2788 {
2789 struct nlattr *tb[NDA_MAX + 1];
2790 int err, i;
2791
2792 if (strict_check) {
2793 struct ndmsg *ndm;
2794
2795 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2796 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2797 return -EINVAL;
2798 }
2799
2800 ndm = nlmsg_data(nlh);
2801 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
2802 ndm->ndm_state || ndm->ndm_type) {
2803 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2804 return -EINVAL;
2805 }
2806
2807 if (ndm->ndm_flags & ~NTF_PROXY) {
2808 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2809 return -EINVAL;
2810 }
2811
2812 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2813 tb, NDA_MAX, nda_policy,
2814 extack);
2815 } else {
2816 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2817 NDA_MAX, nda_policy, extack);
2818 }
2819 if (err < 0)
2820 return err;
2821
2822 for (i = 0; i <= NDA_MAX; ++i) {
2823 if (!tb[i])
2824 continue;
2825
2826 /* all new attributes should require strict_check */
2827 switch (i) {
2828 case NDA_IFINDEX:
2829 filter->dev_idx = nla_get_u32(tb[i]);
2830 break;
2831 case NDA_MASTER:
2832 filter->master_idx = nla_get_u32(tb[i]);
2833 break;
2834 default:
2835 if (strict_check) {
2836 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2837 return -EINVAL;
2838 }
2839 }
2840 }
2841
2842 return 0;
2843 }
2844
2845 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2846 {
2847 const struct nlmsghdr *nlh = cb->nlh;
2848 struct neigh_dump_filter filter = {};
2849 struct neigh_table *tbl;
2850 int t, family, s_t;
2851 int proxy = 0;
2852 int err;
2853
2854 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2855
2856 /* check for full ndmsg structure presence, family member is
2857 * the same for both structures
2858 */
2859 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2860 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2861 proxy = 1;
2862
2863 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2864 if (err < 0 && cb->strict_check)
2865 return err;
2866
2867 s_t = cb->args[0];
2868
2869 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2870 tbl = neigh_tables[t];
2871
2872 if (!tbl)
2873 continue;
2874 if (t < s_t || (family && tbl->family != family))
2875 continue;
2876 if (t > s_t)
2877 memset(&cb->args[1], 0, sizeof(cb->args) -
2878 sizeof(cb->args[0]));
2879 if (proxy)
2880 err = pneigh_dump_table(tbl, skb, cb, &filter);
2881 else
2882 err = neigh_dump_table(tbl, skb, cb, &filter);
2883 if (err < 0)
2884 break;
2885 }
2886
2887 cb->args[0] = t;
2888 return skb->len;
2889 }
2890
2891 static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2892 struct neigh_table **tbl,
2893 void **dst, int *dev_idx, u8 *ndm_flags,
2894 struct netlink_ext_ack *extack)
2895 {
2896 struct nlattr *tb[NDA_MAX + 1];
2897 struct ndmsg *ndm;
2898 int err, i;
2899
2900 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2901 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2902 return -EINVAL;
2903 }
2904
2905 ndm = nlmsg_data(nlh);
2906 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
2907 ndm->ndm_type) {
2908 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2909 return -EINVAL;
2910 }
2911
2912 if (ndm->ndm_flags & ~NTF_PROXY) {
2913 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2914 return -EINVAL;
2915 }
2916
2917 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2918 NDA_MAX, nda_policy, extack);
2919 if (err < 0)
2920 return err;
2921
2922 *ndm_flags = ndm->ndm_flags;
2923 *dev_idx = ndm->ndm_ifindex;
2924 *tbl = neigh_find_table(ndm->ndm_family);
2925 if (*tbl == NULL) {
2926 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2927 return -EAFNOSUPPORT;
2928 }
2929
2930 for (i = 0; i <= NDA_MAX; ++i) {
2931 if (!tb[i])
2932 continue;
2933
2934 switch (i) {
2935 case NDA_DST:
2936 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2937 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2938 return -EINVAL;
2939 }
2940 *dst = nla_data(tb[i]);
2941 break;
2942 default:
2943 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2944 return -EINVAL;
2945 }
2946 }
2947
2948 return 0;
2949 }
2950
2951 static inline size_t neigh_nlmsg_size(void)
2952 {
2953 return NLMSG_ALIGN(sizeof(struct ndmsg))
2954 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2955 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2956 + nla_total_size(sizeof(struct nda_cacheinfo))
2957 + nla_total_size(4) /* NDA_PROBES */
2958 + nla_total_size(4) /* NDA_FLAGS_EXT */
2959 + nla_total_size(1); /* NDA_PROTOCOL */
2960 }
2961
2962 static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2963 u32 pid, u32 seq)
2964 {
2965 struct sk_buff *skb;
2966 int err = 0;
2967
2968 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2969 if (!skb)
2970 return -ENOBUFS;
2971
2972 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2973 if (err) {
2974 kfree_skb(skb);
2975 goto errout;
2976 }
2977
2978 err = rtnl_unicast(skb, net, pid);
2979 errout:
2980 return err;
2981 }
2982
2983 static inline size_t pneigh_nlmsg_size(void)
2984 {
2985 return NLMSG_ALIGN(sizeof(struct ndmsg))
2986 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2987 + nla_total_size(4) /* NDA_FLAGS_EXT */
2988 + nla_total_size(1); /* NDA_PROTOCOL */
2989 }
2990
2991 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2992 u32 pid, u32 seq, struct neigh_table *tbl)
2993 {
2994 struct sk_buff *skb;
2995 int err = 0;
2996
2997 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2998 if (!skb)
2999 return -ENOBUFS;
3000
3001 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
3002 if (err) {
3003 kfree_skb(skb);
3004 goto errout;
3005 }
3006
3007 err = rtnl_unicast(skb, net, pid);
3008 errout:
3009 return err;
3010 }
3011
3012 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3013 struct netlink_ext_ack *extack)
3014 {
3015 struct net *net = sock_net(in_skb->sk);
3016 struct net_device *dev = NULL;
3017 struct neigh_table *tbl = NULL;
3018 struct neighbour *neigh;
3019 void *dst = NULL;
3020 u8 ndm_flags = 0;
3021 int dev_idx = 0;
3022 int err;
3023
3024 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
3025 extack);
3026 if (err < 0)
3027 return err;
3028
3029 if (dev_idx) {
3030 dev = __dev_get_by_index(net, dev_idx);
3031 if (!dev) {
3032 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
3033 return -ENODEV;
3034 }
3035 }
3036
3037 if (!dst) {
3038 NL_SET_ERR_MSG(extack, "Network address not specified");
3039 return -EINVAL;
3040 }
3041
3042 if (ndm_flags & NTF_PROXY) {
3043 struct pneigh_entry *pn;
3044
3045 pn = pneigh_lookup(tbl, net, dst, dev, 0);
3046 if (!pn) {
3047 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
3048 return -ENOENT;
3049 }
3050 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
3051 nlh->nlmsg_seq, tbl);
3052 }
3053
3054 if (!dev) {
3055 NL_SET_ERR_MSG(extack, "No device specified");
3056 return -EINVAL;
3057 }
3058
3059 neigh = neigh_lookup(tbl, dst, dev);
3060 if (!neigh) {
3061 NL_SET_ERR_MSG(extack, "Neighbour entry not found");
3062 return -ENOENT;
3063 }
3064
3065 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
3066 nlh->nlmsg_seq);
3067
3068 neigh_release(neigh);
3069
3070 return err;
3071 }
3072
3073 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3074 {
3075 int chain;
3076 struct neigh_hash_table *nht;
3077
3078 rcu_read_lock();
3079 nht = rcu_dereference(tbl->nht);
3080
3081 read_lock_bh(&tbl->lock); /* avoid resizes */
3082 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3083 struct neighbour *n;
3084
3085 for (n = rcu_dereference(nht->hash_buckets[chain]);
3086 n != NULL;
3087 n = rcu_dereference(n->next))
3088 cb(n, cookie);
3089 }
3090 read_unlock_bh(&tbl->lock);
3091 rcu_read_unlock();
3092 }
3093 EXPORT_SYMBOL(neigh_for_each);
3094
3095 /* The tbl->lock must be held as a writer and BH disabled. */
3096 void __neigh_for_each_release(struct neigh_table *tbl,
3097 int (*cb)(struct neighbour *))
3098 {
3099 int chain;
3100 struct neigh_hash_table *nht;
3101
3102 nht = rcu_dereference_protected(tbl->nht,
3103 lockdep_is_held(&tbl->lock));
3104 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3105 struct neighbour *n;
3106 struct neighbour __rcu **np;
3107
3108 np = &nht->hash_buckets[chain];
3109 while ((n = rcu_dereference_protected(*np,
3110 lockdep_is_held(&tbl->lock))) != NULL) {
3111 int release;
3112
3113 write_lock(&n->lock);
3114 release = cb(n);
3115 if (release) {
3116 rcu_assign_pointer(*np,
3117 rcu_dereference_protected(n->next,
3118 lockdep_is_held(&tbl->lock)));
3119 neigh_mark_dead(n);
3120 } else
3121 np = &n->next;
3122 write_unlock(&n->lock);
3123 if (release)
3124 neigh_cleanup_and_release(n);
3125 }
3126 }
3127 }
3128 EXPORT_SYMBOL(__neigh_for_each_release);
3129
3130 int neigh_xmit(int index, struct net_device *dev,
3131 const void *addr, struct sk_buff *skb)
3132 {
3133 int err = -EAFNOSUPPORT;
3134 if (likely(index < NEIGH_NR_TABLES)) {
3135 struct neigh_table *tbl;
3136 struct neighbour *neigh;
3137
3138 tbl = neigh_tables[index];
3139 if (!tbl)
3140 goto out;
3141 rcu_read_lock();
3142 if (index == NEIGH_ARP_TABLE) {
3143 u32 key = *((u32 *)addr);
3144
3145 neigh = __ipv4_neigh_lookup_noref(dev, key);
3146 } else {
3147 neigh = __neigh_lookup_noref(tbl, addr, dev);
3148 }
3149 if (!neigh)
3150 neigh = __neigh_create(tbl, addr, dev, false);
3151 err = PTR_ERR(neigh);
3152 if (IS_ERR(neigh)) {
3153 rcu_read_unlock();
3154 goto out_kfree_skb;
3155 }
3156 err = neigh->output(neigh, skb);
3157 rcu_read_unlock();
3158 }
3159 else if (index == NEIGH_LINK_TABLE) {
3160 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3161 addr, NULL, skb->len);
3162 if (err < 0)
3163 goto out_kfree_skb;
3164 err = dev_queue_xmit(skb);
3165 }
3166 out:
3167 return err;
3168 out_kfree_skb:
3169 kfree_skb(skb);
3170 goto out;
3171 }
3172 EXPORT_SYMBOL(neigh_xmit);
3173
3174 #ifdef CONFIG_PROC_FS
3175
3176 static struct neighbour *neigh_get_first(struct seq_file *seq)
3177 {
3178 struct neigh_seq_state *state = seq->private;
3179 struct net *net = seq_file_net(seq);
3180 struct neigh_hash_table *nht = state->nht;
3181 struct neighbour *n = NULL;
3182 int bucket;
3183
3184 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3185 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3186 n = rcu_dereference(nht->hash_buckets[bucket]);
3187
3188 while (n) {
3189 if (!net_eq(dev_net(n->dev), net))
3190 goto next;
3191 if (state->neigh_sub_iter) {
3192 loff_t fakep = 0;
3193 void *v;
3194
3195 v = state->neigh_sub_iter(state, n, &fakep);
3196 if (!v)
3197 goto next;
3198 }
3199 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3200 break;
3201 if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
3202 break;
3203 next:
3204 n = rcu_dereference(n->next);
3205 }
3206
3207 if (n)
3208 break;
3209 }
3210 state->bucket = bucket;
3211
3212 return n;
3213 }
3214
3215 static struct neighbour *neigh_get_next(struct seq_file *seq,
3216 struct neighbour *n,
3217 loff_t *pos)
3218 {
3219 struct neigh_seq_state *state = seq->private;
3220 struct net *net = seq_file_net(seq);
3221 struct neigh_hash_table *nht = state->nht;
3222
3223 if (state->neigh_sub_iter) {
3224 void *v = state->neigh_sub_iter(state, n, pos);
3225 if (v)
3226 return n;
3227 }
3228 n = rcu_dereference(n->next);
3229
3230 while (1) {
3231 while (n) {
3232 if (!net_eq(dev_net(n->dev), net))
3233 goto next;
3234 if (state->neigh_sub_iter) {
3235 void *v = state->neigh_sub_iter(state, n, pos);
3236 if (v)
3237 return n;
3238 goto next;
3239 }
3240 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3241 break;
3242
3243 if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
3244 break;
3245 next:
3246 n = rcu_dereference(n->next);
3247 }
3248
3249 if (n)
3250 break;
3251
3252 if (++state->bucket >= (1 << nht->hash_shift))
3253 break;
3254
3255 n = rcu_dereference(nht->hash_buckets[state->bucket]);
3256 }
3257
3258 if (n && pos)
3259 --(*pos);
3260 return n;
3261 }
3262
3263 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3264 {
3265 struct neighbour *n = neigh_get_first(seq);
3266
3267 if (n) {
3268 --(*pos);
3269 while (*pos) {
3270 n = neigh_get_next(seq, n, pos);
3271 if (!n)
3272 break;
3273 }
3274 }
3275 return *pos ? NULL : n;
3276 }
3277
3278 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3279 {
3280 struct neigh_seq_state *state = seq->private;
3281 struct net *net = seq_file_net(seq);
3282 struct neigh_table *tbl = state->tbl;
3283 struct pneigh_entry *pn = NULL;
3284 int bucket;
3285
3286 state->flags |= NEIGH_SEQ_IS_PNEIGH;
3287 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3288 pn = tbl->phash_buckets[bucket];
3289 while (pn && !net_eq(pneigh_net(pn), net))
3290 pn = pn->next;
3291 if (pn)
3292 break;
3293 }
3294 state->bucket = bucket;
3295
3296 return pn;
3297 }
3298
3299 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3300 struct pneigh_entry *pn,
3301 loff_t *pos)
3302 {
3303 struct neigh_seq_state *state = seq->private;
3304 struct net *net = seq_file_net(seq);
3305 struct neigh_table *tbl = state->tbl;
3306
3307 do {
3308 pn = pn->next;
3309 } while (pn && !net_eq(pneigh_net(pn), net));
3310
3311 while (!pn) {
3312 if (++state->bucket > PNEIGH_HASHMASK)
3313 break;
3314 pn = tbl->phash_buckets[state->bucket];
3315 while (pn && !net_eq(pneigh_net(pn), net))
3316 pn = pn->next;
3317 if (pn)
3318 break;
3319 }
3320
3321 if (pn && pos)
3322 --(*pos);
3323
3324 return pn;
3325 }
3326
3327 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3328 {
3329 struct pneigh_entry *pn = pneigh_get_first(seq);
3330
3331 if (pn) {
3332 --(*pos);
3333 while (*pos) {
3334 pn = pneigh_get_next(seq, pn, pos);
3335 if (!pn)
3336 break;
3337 }
3338 }
3339 return *pos ? NULL : pn;
3340 }
3341
3342 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3343 {
3344 struct neigh_seq_state *state = seq->private;
3345 void *rc;
3346 loff_t idxpos = *pos;
3347
3348 rc = neigh_get_idx(seq, &idxpos);
3349 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3350 rc = pneigh_get_idx(seq, &idxpos);
3351
3352 return rc;
3353 }
3354
3355 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3356 __acquires(tbl->lock)
3357 __acquires(rcu)
3358 {
3359 struct neigh_seq_state *state = seq->private;
3360
3361 state->tbl = tbl;
3362 state->bucket = 0;
3363 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3364
3365 rcu_read_lock();
3366 state->nht = rcu_dereference(tbl->nht);
3367 read_lock_bh(&tbl->lock);
3368
3369 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3370 }
3371 EXPORT_SYMBOL(neigh_seq_start);
3372
3373 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3374 {
3375 struct neigh_seq_state *state;
3376 void *rc;
3377
3378 if (v == SEQ_START_TOKEN) {
3379 rc = neigh_get_first(seq);
3380 goto out;
3381 }
3382
3383 state = seq->private;
3384 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3385 rc = neigh_get_next(seq, v, NULL);
3386 if (rc)
3387 goto out;
3388 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3389 rc = pneigh_get_first(seq);
3390 } else {
3391 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3392 rc = pneigh_get_next(seq, v, NULL);
3393 }
3394 out:
3395 ++(*pos);
3396 return rc;
3397 }
3398 EXPORT_SYMBOL(neigh_seq_next);
3399
3400 void neigh_seq_stop(struct seq_file *seq, void *v)
3401 __releases(tbl->lock)
3402 __releases(rcu)
3403 {
3404 struct neigh_seq_state *state = seq->private;
3405 struct neigh_table *tbl = state->tbl;
3406
3407 read_unlock_bh(&tbl->lock);
3408 rcu_read_unlock();
3409 }
3410 EXPORT_SYMBOL(neigh_seq_stop);
3411
3412 /* statistics via seq_file */
3413
3414 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3415 {
3416 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3417 int cpu;
3418
3419 if (*pos == 0)
3420 return SEQ_START_TOKEN;
3421
3422 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3423 if (!cpu_possible(cpu))
3424 continue;
3425 *pos = cpu+1;
3426 return per_cpu_ptr(tbl->stats, cpu);
3427 }
3428 return NULL;
3429 }
3430
3431 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3432 {
3433 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3434 int cpu;
3435
3436 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3437 if (!cpu_possible(cpu))
3438 continue;
3439 *pos = cpu+1;
3440 return per_cpu_ptr(tbl->stats, cpu);
3441 }
3442 (*pos)++;
3443 return NULL;
3444 }
3445
3446 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3447 {
3448
3449 }
3450
3451 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3452 {
3453 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3454 struct neigh_statistics *st = v;
3455
3456 if (v == SEQ_START_TOKEN) {
3457 seq_puts(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3458 return 0;
3459 }
3460
3461 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3462 "%08lx %08lx %08lx "
3463 "%08lx %08lx %08lx\n",
3464 atomic_read(&tbl->entries),
3465
3466 st->allocs,
3467 st->destroys,
3468 st->hash_grows,
3469
3470 st->lookups,
3471 st->hits,
3472
3473 st->res_failed,
3474
3475 st->rcv_probes_mcast,
3476 st->rcv_probes_ucast,
3477
3478 st->periodic_gc_runs,
3479 st->forced_gc_runs,
3480 st->unres_discards,
3481 st->table_fulls
3482 );
3483
3484 return 0;
3485 }
3486
3487 static const struct seq_operations neigh_stat_seq_ops = {
3488 .start = neigh_stat_seq_start,
3489 .next = neigh_stat_seq_next,
3490 .stop = neigh_stat_seq_stop,
3491 .show = neigh_stat_seq_show,
3492 };
3493 #endif /* CONFIG_PROC_FS */
3494
3495 static void __neigh_notify(struct neighbour *n, int type, int flags,
3496 u32 pid)
3497 {
3498 struct net *net = dev_net(n->dev);
3499 struct sk_buff *skb;
3500 int err = -ENOBUFS;
3501
3502 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3503 if (skb == NULL)
3504 goto errout;
3505
3506 err = neigh_fill_info(skb, n, pid, 0, type, flags);
3507 if (err < 0) {
3508 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3509 WARN_ON(err == -EMSGSIZE);
3510 kfree_skb(skb);
3511 goto errout;
3512 }
3513 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3514 return;
3515 errout:
3516 if (err < 0)
3517 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3518 }
3519
3520 void neigh_app_ns(struct neighbour *n)
3521 {
3522 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3523 }
3524 EXPORT_SYMBOL(neigh_app_ns);
3525
3526 #ifdef CONFIG_SYSCTL
3527 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3528
3529 static int proc_unres_qlen(struct ctl_table *ctl, int write,
3530 void *buffer, size_t *lenp, loff_t *ppos)
3531 {
3532 int size, ret;
3533 struct ctl_table tmp = *ctl;
3534
3535 tmp.extra1 = SYSCTL_ZERO;
3536 tmp.extra2 = &unres_qlen_max;
3537 tmp.data = &size;
3538
3539 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3540 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3541
3542 if (write && !ret)
3543 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3544 return ret;
3545 }
3546
3547 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3548 int index)
3549 {
3550 struct net_device *dev;
3551 int family = neigh_parms_family(p);
3552
3553 rcu_read_lock();
3554 for_each_netdev_rcu(net, dev) {
3555 struct neigh_parms *dst_p =
3556 neigh_get_dev_parms_rcu(dev, family);
3557
3558 if (dst_p && !test_bit(index, dst_p->data_state))
3559 dst_p->data[index] = p->data[index];
3560 }
3561 rcu_read_unlock();
3562 }
3563
3564 static void neigh_proc_update(struct ctl_table *ctl, int write)
3565 {
3566 struct net_device *dev = ctl->extra1;
3567 struct neigh_parms *p = ctl->extra2;
3568 struct net *net = neigh_parms_net(p);
3569 int index = (int *) ctl->data - p->data;
3570
3571 if (!write)
3572 return;
3573
3574 set_bit(index, p->data_state);
3575 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3576 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3577 if (!dev) /* NULL dev means this is default value */
3578 neigh_copy_dflt_parms(net, p, index);
3579 }
3580
3581 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3582 void *buffer, size_t *lenp,
3583 loff_t *ppos)
3584 {
3585 struct ctl_table tmp = *ctl;
3586 int ret;
3587
3588 tmp.extra1 = SYSCTL_ZERO;
3589 tmp.extra2 = SYSCTL_INT_MAX;
3590
3591 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3592 neigh_proc_update(ctl, write);
3593 return ret;
3594 }
3595
3596 static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int write,
3597 void *buffer, size_t *lenp, loff_t *ppos)
3598 {
3599 struct ctl_table tmp = *ctl;
3600 int ret;
3601
3602 int min = msecs_to_jiffies(1);
3603
3604 tmp.extra1 = &min;
3605 tmp.extra2 = NULL;
3606
3607 ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos);
3608 neigh_proc_update(ctl, write);
3609 return ret;
3610 }
3611
3612 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3613 size_t *lenp, loff_t *ppos)
3614 {
3615 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3616
3617 neigh_proc_update(ctl, write);
3618 return ret;
3619 }
3620 EXPORT_SYMBOL(neigh_proc_dointvec);
3621
3622 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3623 size_t *lenp, loff_t *ppos)
3624 {
3625 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3626
3627 neigh_proc_update(ctl, write);
3628 return ret;
3629 }
3630 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3631
3632 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3633 void *buffer, size_t *lenp,
3634 loff_t *ppos)
3635 {
3636 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3637
3638 neigh_proc_update(ctl, write);
3639 return ret;
3640 }
3641
3642 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3643 void *buffer, size_t *lenp, loff_t *ppos)
3644 {
3645 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3646
3647 neigh_proc_update(ctl, write);
3648 return ret;
3649 }
3650 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3651
3652 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3653 void *buffer, size_t *lenp,
3654 loff_t *ppos)
3655 {
3656 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3657
3658 neigh_proc_update(ctl, write);
3659 return ret;
3660 }
3661
3662 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3663 void *buffer, size_t *lenp,
3664 loff_t *ppos)
3665 {
3666 struct neigh_parms *p = ctl->extra2;
3667 int ret;
3668
3669 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3670 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3671 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3672 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3673 else
3674 ret = -1;
3675
3676 if (write && ret == 0) {
3677 /* update reachable_time as well, otherwise, the change will
3678 * only be effective after the next time neigh_periodic_work
3679 * decides to recompute it
3680 */
3681 p->reachable_time =
3682 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3683 }
3684 return ret;
3685 }
3686
3687 #define NEIGH_PARMS_DATA_OFFSET(index) \
3688 (&((struct neigh_parms *) 0)->data[index])
3689
3690 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3691 [NEIGH_VAR_ ## attr] = { \
3692 .procname = name, \
3693 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3694 .maxlen = sizeof(int), \
3695 .mode = mval, \
3696 .proc_handler = proc, \
3697 }
3698
3699 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3700 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3701
3702 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3703 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3704
3705 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3706 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3707
3708 #define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \
3709 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive)
3710
3711 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3712 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3713
3714 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3715 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3716
3717 static struct neigh_sysctl_table {
3718 struct ctl_table_header *sysctl_header;
3719 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3720 } neigh_sysctl_template __read_mostly = {
3721 .neigh_vars = {
3722 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3723 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3724 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3725 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3726 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3727 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3728 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3729 NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS,
3730 "interval_probe_time_ms"),
3731 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3732 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3733 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3734 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3735 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3736 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3737 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3738 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3739 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3740 [NEIGH_VAR_GC_INTERVAL] = {
3741 .procname = "gc_interval",
3742 .maxlen = sizeof(int),
3743 .mode = 0644,
3744 .proc_handler = proc_dointvec_jiffies,
3745 },
3746 [NEIGH_VAR_GC_THRESH1] = {
3747 .procname = "gc_thresh1",
3748 .maxlen = sizeof(int),
3749 .mode = 0644,
3750 .extra1 = SYSCTL_ZERO,
3751 .extra2 = SYSCTL_INT_MAX,
3752 .proc_handler = proc_dointvec_minmax,
3753 },
3754 [NEIGH_VAR_GC_THRESH2] = {
3755 .procname = "gc_thresh2",
3756 .maxlen = sizeof(int),
3757 .mode = 0644,
3758 .extra1 = SYSCTL_ZERO,
3759 .extra2 = SYSCTL_INT_MAX,
3760 .proc_handler = proc_dointvec_minmax,
3761 },
3762 [NEIGH_VAR_GC_THRESH3] = {
3763 .procname = "gc_thresh3",
3764 .maxlen = sizeof(int),
3765 .mode = 0644,
3766 .extra1 = SYSCTL_ZERO,
3767 .extra2 = SYSCTL_INT_MAX,
3768 .proc_handler = proc_dointvec_minmax,
3769 },
3770 {},
3771 },
3772 };
3773
3774 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3775 proc_handler *handler)
3776 {
3777 int i;
3778 struct neigh_sysctl_table *t;
3779 const char *dev_name_source;
3780 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3781 char *p_name;
3782 size_t neigh_vars_size;
3783
3784 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT);
3785 if (!t)
3786 goto err;
3787
3788 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3789 t->neigh_vars[i].data += (long) p;
3790 t->neigh_vars[i].extra1 = dev;
3791 t->neigh_vars[i].extra2 = p;
3792 }
3793
3794 neigh_vars_size = ARRAY_SIZE(t->neigh_vars);
3795 if (dev) {
3796 dev_name_source = dev->name;
3797 /* Terminate the table early */
3798 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3799 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3800 neigh_vars_size = NEIGH_VAR_BASE_REACHABLE_TIME_MS + 1;
3801 } else {
3802 struct neigh_table *tbl = p->tbl;
3803 dev_name_source = "default";
3804 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3805 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3806 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3807 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3808 }
3809
3810 if (handler) {
3811 /* RetransTime */
3812 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3813 /* ReachableTime */
3814 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3815 /* RetransTime (in milliseconds)*/
3816 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3817 /* ReachableTime (in milliseconds) */
3818 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3819 } else {
3820 /* Those handlers will update p->reachable_time after
3821 * base_reachable_time(_ms) is set to ensure the new timer starts being
3822 * applied after the next neighbour update instead of waiting for
3823 * neigh_periodic_work to update its value (can be multiple minutes)
3824 * So any handler that replaces them should do this as well
3825 */
3826 /* ReachableTime */
3827 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3828 neigh_proc_base_reachable_time;
3829 /* ReachableTime (in milliseconds) */
3830 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3831 neigh_proc_base_reachable_time;
3832 }
3833
3834 switch (neigh_parms_family(p)) {
3835 case AF_INET:
3836 p_name = "ipv4";
3837 break;
3838 case AF_INET6:
3839 p_name = "ipv6";
3840 break;
3841 default:
3842 BUG();
3843 }
3844
3845 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3846 p_name, dev_name_source);
3847 t->sysctl_header = register_net_sysctl_sz(neigh_parms_net(p),
3848 neigh_path, t->neigh_vars,
3849 neigh_vars_size);
3850 if (!t->sysctl_header)
3851 goto free;
3852
3853 p->sysctl_table = t;
3854 return 0;
3855
3856 free:
3857 kfree(t);
3858 err:
3859 return -ENOBUFS;
3860 }
3861 EXPORT_SYMBOL(neigh_sysctl_register);
3862
3863 void neigh_sysctl_unregister(struct neigh_parms *p)
3864 {
3865 if (p->sysctl_table) {
3866 struct neigh_sysctl_table *t = p->sysctl_table;
3867 p->sysctl_table = NULL;
3868 unregister_net_sysctl_table(t->sysctl_header);
3869 kfree(t);
3870 }
3871 }
3872 EXPORT_SYMBOL(neigh_sysctl_unregister);
3873
3874 #endif /* CONFIG_SYSCTL */
3875
3876 static int __init neigh_init(void)
3877 {
3878 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3879 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3880 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3881
3882 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3883 0);
3884 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3885
3886 return 0;
3887 }
3888
3889 subsys_initcall(neigh_init);