2 * Packet matching code for ARP packets.
4 * Based heavily, if not almost entirely, upon ip_tables.c framework.
6 * Some ARP specific bits are:
8 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
12 #include <linux/config.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/if_arp.h>
17 #include <linux/kmod.h>
18 #include <linux/vmalloc.h>
19 #include <linux/proc_fs.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
23 #include <asm/uaccess.h>
24 #include <asm/semaphore.h>
26 #include <linux/netfilter_arp/arp_tables.h>
28 MODULE_LICENSE("GPL");
29 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
30 MODULE_DESCRIPTION("arptables core");
32 /*#define DEBUG_ARP_TABLES*/
33 /*#define DEBUG_ARP_TABLES_USER*/
35 #ifdef DEBUG_ARP_TABLES
36 #define dprintf(format, args...) printk(format , ## args)
38 #define dprintf(format, args...)
41 #ifdef DEBUG_ARP_TABLES_USER
42 #define duprintf(format, args...) printk(format , ## args)
44 #define duprintf(format, args...)
47 #ifdef CONFIG_NETFILTER_DEBUG
48 #define ARP_NF_ASSERT(x) \
51 printk("ARP_NF_ASSERT: %s:%s:%u\n", \
52 __FUNCTION__, __FILE__, __LINE__); \
55 #define ARP_NF_ASSERT(x)
57 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
59 static DECLARE_MUTEX(arpt_mutex
);
61 #define ASSERT_READ_LOCK(x) ARP_NF_ASSERT(down_trylock(&arpt_mutex) != 0)
62 #define ASSERT_WRITE_LOCK(x) ARP_NF_ASSERT(down_trylock(&arpt_mutex) != 0)
63 #include <linux/netfilter_ipv4/lockhelp.h>
64 #include <linux/netfilter_ipv4/listhelp.h>
66 struct arpt_table_info
{
69 unsigned int initial_entries
;
70 unsigned int hook_entry
[NF_ARP_NUMHOOKS
];
71 unsigned int underflow
[NF_ARP_NUMHOOKS
];
72 char entries
[0] __attribute__((aligned(SMP_CACHE_BYTES
)));
75 static LIST_HEAD(arpt_target
);
76 static LIST_HEAD(arpt_tables
);
77 #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
80 #define TABLE_OFFSET(t,p) (SMP_ALIGN((t)->size)*(p))
82 #define TABLE_OFFSET(t,p) 0
85 static inline int arp_devaddr_compare(const struct arpt_devaddr_info
*ap
,
86 char *hdr_addr
, int len
)
90 if (len
> ARPT_DEV_ADDR_LEN_MAX
)
91 len
= ARPT_DEV_ADDR_LEN_MAX
;
94 for (i
= 0; i
< len
; i
++)
95 ret
|= (hdr_addr
[i
] ^ ap
->addr
[i
]) & ap
->mask
[i
];
100 /* Returns whether packet matches rule or not. */
101 static inline int arp_packet_match(const struct arphdr
*arphdr
,
102 struct net_device
*dev
,
105 const struct arpt_arp
*arpinfo
)
107 char *arpptr
= (char *)(arphdr
+ 1);
108 char *src_devaddr
, *tgt_devaddr
;
109 u32 src_ipaddr
, tgt_ipaddr
;
112 #define FWINV(bool,invflg) ((bool) ^ !!(arpinfo->invflags & invflg))
114 if (FWINV((arphdr
->ar_op
& arpinfo
->arpop_mask
) != arpinfo
->arpop
,
116 dprintf("ARP operation field mismatch.\n");
117 dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n",
118 arphdr
->ar_op
, arpinfo
->arpop
, arpinfo
->arpop_mask
);
122 if (FWINV((arphdr
->ar_hrd
& arpinfo
->arhrd_mask
) != arpinfo
->arhrd
,
124 dprintf("ARP hardware address format mismatch.\n");
125 dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n",
126 arphdr
->ar_hrd
, arpinfo
->arhrd
, arpinfo
->arhrd_mask
);
130 if (FWINV((arphdr
->ar_pro
& arpinfo
->arpro_mask
) != arpinfo
->arpro
,
132 dprintf("ARP protocol address format mismatch.\n");
133 dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n",
134 arphdr
->ar_pro
, arpinfo
->arpro
, arpinfo
->arpro_mask
);
138 if (FWINV((arphdr
->ar_hln
& arpinfo
->arhln_mask
) != arpinfo
->arhln
,
140 dprintf("ARP hardware address length mismatch.\n");
141 dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n",
142 arphdr
->ar_hln
, arpinfo
->arhln
, arpinfo
->arhln_mask
);
146 src_devaddr
= arpptr
;
147 arpptr
+= dev
->addr_len
;
148 memcpy(&src_ipaddr
, arpptr
, sizeof(u32
));
149 arpptr
+= sizeof(u32
);
150 tgt_devaddr
= arpptr
;
151 arpptr
+= dev
->addr_len
;
152 memcpy(&tgt_ipaddr
, arpptr
, sizeof(u32
));
154 if (FWINV(arp_devaddr_compare(&arpinfo
->src_devaddr
, src_devaddr
, dev
->addr_len
),
155 ARPT_INV_SRCDEVADDR
) ||
156 FWINV(arp_devaddr_compare(&arpinfo
->tgt_devaddr
, tgt_devaddr
, dev
->addr_len
),
157 ARPT_INV_TGTDEVADDR
)) {
158 dprintf("Source or target device address mismatch.\n");
163 if (FWINV((src_ipaddr
& arpinfo
->smsk
.s_addr
) != arpinfo
->src
.s_addr
,
165 FWINV(((tgt_ipaddr
& arpinfo
->tmsk
.s_addr
) != arpinfo
->tgt
.s_addr
),
167 dprintf("Source or target IP address mismatch.\n");
169 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
171 NIPQUAD(arpinfo
->smsk
.s_addr
),
172 NIPQUAD(arpinfo
->src
.s_addr
),
173 arpinfo
->invflags
& ARPT_INV_SRCIP
? " (INV)" : "");
174 dprintf("TGT: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
176 NIPQUAD(arpinfo
->tmsk
.s_addr
),
177 NIPQUAD(arpinfo
->tgt
.s_addr
),
178 arpinfo
->invflags
& ARPT_INV_TGTIP
? " (INV)" : "");
182 /* Look for ifname matches. */
183 for (i
= 0, ret
= 0; i
< IFNAMSIZ
; i
++) {
184 ret
|= (indev
[i
] ^ arpinfo
->iniface
[i
])
185 & arpinfo
->iniface_mask
[i
];
188 if (FWINV(ret
!= 0, ARPT_INV_VIA_IN
)) {
189 dprintf("VIA in mismatch (%s vs %s).%s\n",
190 indev
, arpinfo
->iniface
,
191 arpinfo
->invflags
&ARPT_INV_VIA_IN
?" (INV)":"");
195 for (i
= 0, ret
= 0; i
< IFNAMSIZ
/sizeof(unsigned long); i
++) {
197 memcpy(&odev
, outdev
+ i
*sizeof(unsigned long),
198 sizeof(unsigned long));
200 ^ ((const unsigned long *)arpinfo
->outiface
)[i
])
201 & ((const unsigned long *)arpinfo
->outiface_mask
)[i
];
204 if (FWINV(ret
!= 0, ARPT_INV_VIA_OUT
)) {
205 dprintf("VIA out mismatch (%s vs %s).%s\n",
206 outdev
, arpinfo
->outiface
,
207 arpinfo
->invflags
&ARPT_INV_VIA_OUT
?" (INV)":"");
214 static inline int arp_checkentry(const struct arpt_arp
*arp
)
216 if (arp
->flags
& ~ARPT_F_MASK
) {
217 duprintf("Unknown flag bits set: %08X\n",
218 arp
->flags
& ~ARPT_F_MASK
);
221 if (arp
->invflags
& ~ARPT_INV_MASK
) {
222 duprintf("Unknown invflag bits set: %08X\n",
223 arp
->invflags
& ~ARPT_INV_MASK
);
230 static unsigned int arpt_error(struct sk_buff
**pskb
,
231 unsigned int hooknum
,
232 const struct net_device
*in
,
233 const struct net_device
*out
,
234 const void *targinfo
,
238 printk("arp_tables: error: '%s'\n", (char *)targinfo
);
243 static inline struct arpt_entry
*get_entry(void *base
, unsigned int offset
)
245 return (struct arpt_entry
*)(base
+ offset
);
248 unsigned int arpt_do_table(struct sk_buff
**pskb
,
250 const struct net_device
*in
,
251 const struct net_device
*out
,
252 struct arpt_table
*table
,
255 static const char nulldevname
[IFNAMSIZ
];
256 unsigned int verdict
= NF_DROP
;
259 struct arpt_entry
*e
, *back
;
260 const char *indev
, *outdev
;
263 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
264 if (!pskb_may_pull((*pskb
), (sizeof(struct arphdr
) +
265 (2 * (*pskb
)->dev
->addr_len
) +
269 indev
= in
? in
->name
: nulldevname
;
270 outdev
= out
? out
->name
: nulldevname
;
272 read_lock_bh(&table
->lock
);
273 table_base
= (void *)table
->private->entries
274 + TABLE_OFFSET(table
->private,
276 e
= get_entry(table_base
, table
->private->hook_entry
[hook
]);
277 back
= get_entry(table_base
, table
->private->underflow
[hook
]);
279 arp
= (*pskb
)->nh
.arph
;
281 if (arp_packet_match(arp
, (*pskb
)->dev
, indev
, outdev
, &e
->arp
)) {
282 struct arpt_entry_target
*t
;
285 hdr_len
= sizeof(*arp
) + (2 * sizeof(struct in_addr
)) +
286 (2 * (*pskb
)->dev
->addr_len
);
287 ADD_COUNTER(e
->counters
, hdr_len
, 1);
289 t
= arpt_get_target(e
);
291 /* Standard target? */
292 if (!t
->u
.kernel
.target
->target
) {
295 v
= ((struct arpt_standard_target
*)t
)->verdict
;
297 /* Pop from stack? */
298 if (v
!= ARPT_RETURN
) {
299 verdict
= (unsigned)(-v
) - 1;
303 back
= get_entry(table_base
,
308 != (void *)e
+ e
->next_offset
) {
309 /* Save old back ptr in next entry */
310 struct arpt_entry
*next
311 = (void *)e
+ e
->next_offset
;
313 (void *)back
- table_base
;
315 /* set back pointer to next entry */
319 e
= get_entry(table_base
, v
);
321 /* Targets which reenter must return
324 verdict
= t
->u
.kernel
.target
->target(pskb
,
330 /* Target might have changed stuff. */
331 arp
= (*pskb
)->nh
.arph
;
333 if (verdict
== ARPT_CONTINUE
)
334 e
= (void *)e
+ e
->next_offset
;
340 e
= (void *)e
+ e
->next_offset
;
343 read_unlock_bh(&table
->lock
);
351 static inline void *find_inlist_lock_noload(struct list_head
*head
,
354 struct semaphore
*mutex
)
358 *error
= down_interruptible(mutex
);
362 ret
= list_named_find(head
, name
);
371 #define find_inlist_lock(h,n,p,e,m) find_inlist_lock_noload((h),(n),(e),(m))
374 find_inlist_lock(struct list_head
*head
,
378 struct semaphore
*mutex
)
382 ret
= find_inlist_lock_noload(head
, name
, error
, mutex
);
384 duprintf("find_inlist: loading `%s%s'.\n", prefix
, name
);
385 request_module("%s%s", prefix
, name
);
386 ret
= find_inlist_lock_noload(head
, name
, error
, mutex
);
393 static inline struct arpt_table
*arpt_find_table_lock(const char *name
, int *error
, struct semaphore
*mutex
)
395 return find_inlist_lock(&arpt_tables
, name
, "arptable_", error
, mutex
);
398 static struct arpt_target
*arpt_find_target_lock(const char *name
, int *error
, struct semaphore
*mutex
)
400 return find_inlist_lock(&arpt_target
, name
, "arpt_", error
, mutex
);
403 /* All zeroes == unconditional rule. */
404 static inline int unconditional(const struct arpt_arp
*arp
)
408 for (i
= 0; i
< sizeof(*arp
)/sizeof(__u32
); i
++)
409 if (((__u32
*)arp
)[i
])
415 /* Figures out from what hook each rule can be called: returns 0 if
416 * there are loops. Puts hook bitmask in comefrom.
418 static int mark_source_chains(struct arpt_table_info
*newinfo
, unsigned int valid_hooks
)
422 /* No recursion; use packet counter to save back ptrs (reset
423 * to 0 as we leave), and comefrom to save source hook bitmask.
425 for (hook
= 0; hook
< NF_ARP_NUMHOOKS
; hook
++) {
426 unsigned int pos
= newinfo
->hook_entry
[hook
];
428 = (struct arpt_entry
*)(newinfo
->entries
+ pos
);
430 if (!(valid_hooks
& (1 << hook
)))
433 /* Set initial back pointer. */
434 e
->counters
.pcnt
= pos
;
437 struct arpt_standard_target
*t
438 = (void *)arpt_get_target(e
);
440 if (e
->comefrom
& (1 << NF_ARP_NUMHOOKS
)) {
441 printk("arptables: loop hook %u pos %u %08X.\n",
442 hook
, pos
, e
->comefrom
);
446 |= ((1 << hook
) | (1 << NF_ARP_NUMHOOKS
));
448 /* Unconditional return/END. */
449 if (e
->target_offset
== sizeof(struct arpt_entry
)
450 && (strcmp(t
->target
.u
.user
.name
,
451 ARPT_STANDARD_TARGET
) == 0)
453 && unconditional(&e
->arp
)) {
454 unsigned int oldpos
, size
;
456 /* Return: backtrack through the last
460 e
->comefrom
^= (1<<NF_ARP_NUMHOOKS
);
462 pos
= e
->counters
.pcnt
;
463 e
->counters
.pcnt
= 0;
465 /* We're at the start. */
469 e
= (struct arpt_entry
*)
470 (newinfo
->entries
+ pos
);
471 } while (oldpos
== pos
+ e
->next_offset
);
474 size
= e
->next_offset
;
475 e
= (struct arpt_entry
*)
476 (newinfo
->entries
+ pos
+ size
);
477 e
->counters
.pcnt
= pos
;
480 int newpos
= t
->verdict
;
482 if (strcmp(t
->target
.u
.user
.name
,
483 ARPT_STANDARD_TARGET
) == 0
485 /* This a jump; chase it. */
486 duprintf("Jump rule %u -> %u\n",
489 /* ... this is a fallthru */
490 newpos
= pos
+ e
->next_offset
;
492 e
= (struct arpt_entry
*)
493 (newinfo
->entries
+ newpos
);
494 e
->counters
.pcnt
= pos
;
499 duprintf("Finished chain %u\n", hook
);
504 static inline int standard_check(const struct arpt_entry_target
*t
,
505 unsigned int max_offset
)
507 struct arpt_standard_target
*targ
= (void *)t
;
509 /* Check standard info. */
511 != ARPT_ALIGN(sizeof(struct arpt_standard_target
))) {
512 duprintf("arpt_standard_check: target size %u != %Zu\n",
514 ARPT_ALIGN(sizeof(struct arpt_standard_target
)));
518 if (targ
->verdict
>= 0
519 && targ
->verdict
> max_offset
- sizeof(struct arpt_entry
)) {
520 duprintf("arpt_standard_check: bad verdict (%i)\n",
525 if (targ
->verdict
< -NF_MAX_VERDICT
- 1) {
526 duprintf("arpt_standard_check: bad negative verdict (%i)\n",
533 static struct arpt_target arpt_standard_target
;
535 static inline int check_entry(struct arpt_entry
*e
, const char *name
, unsigned int size
,
538 struct arpt_entry_target
*t
;
539 struct arpt_target
*target
;
542 if (!arp_checkentry(&e
->arp
)) {
543 duprintf("arp_tables: arp check failed %p %s.\n", e
, name
);
547 t
= arpt_get_target(e
);
548 target
= arpt_find_target_lock(t
->u
.user
.name
, &ret
, &arpt_mutex
);
550 duprintf("check_entry: `%s' not found\n", t
->u
.user
.name
);
553 if (!try_module_get((target
->me
))) {
557 t
->u
.kernel
.target
= target
;
560 if (t
->u
.kernel
.target
== &arpt_standard_target
) {
561 if (!standard_check(t
, size
)) {
565 } else if (t
->u
.kernel
.target
->checkentry
566 && !t
->u
.kernel
.target
->checkentry(name
, e
, t
->data
,
570 module_put(t
->u
.kernel
.target
->me
);
571 duprintf("arp_tables: check failed for `%s'.\n",
572 t
->u
.kernel
.target
->name
);
586 static inline int check_entry_size_and_hooks(struct arpt_entry
*e
,
587 struct arpt_table_info
*newinfo
,
589 unsigned char *limit
,
590 const unsigned int *hook_entries
,
591 const unsigned int *underflows
,
596 if ((unsigned long)e
% __alignof__(struct arpt_entry
) != 0
597 || (unsigned char *)e
+ sizeof(struct arpt_entry
) >= limit
) {
598 duprintf("Bad offset %p\n", e
);
603 < sizeof(struct arpt_entry
) + sizeof(struct arpt_entry_target
)) {
604 duprintf("checking: element %p size %u\n",
609 /* Check hooks & underflows */
610 for (h
= 0; h
< NF_ARP_NUMHOOKS
; h
++) {
611 if ((unsigned char *)e
- base
== hook_entries
[h
])
612 newinfo
->hook_entry
[h
] = hook_entries
[h
];
613 if ((unsigned char *)e
- base
== underflows
[h
])
614 newinfo
->underflow
[h
] = underflows
[h
];
617 /* FIXME: underflows must be unconditional, standard verdicts
618 < 0 (not ARPT_RETURN). --RR */
620 /* Clear counters and comefrom */
621 e
->counters
= ((struct arpt_counters
) { 0, 0 });
628 static inline int cleanup_entry(struct arpt_entry
*e
, unsigned int *i
)
630 struct arpt_entry_target
*t
;
632 if (i
&& (*i
)-- == 0)
635 t
= arpt_get_target(e
);
636 if (t
->u
.kernel
.target
->destroy
)
637 t
->u
.kernel
.target
->destroy(t
->data
,
638 t
->u
.target_size
- sizeof(*t
));
639 module_put(t
->u
.kernel
.target
->me
);
643 /* Checks and translates the user-supplied table segment (held in
646 static int translate_table(const char *name
,
647 unsigned int valid_hooks
,
648 struct arpt_table_info
*newinfo
,
651 const unsigned int *hook_entries
,
652 const unsigned int *underflows
)
657 newinfo
->size
= size
;
658 newinfo
->number
= number
;
660 /* Init all hooks to impossible value. */
661 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
662 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
663 newinfo
->underflow
[i
] = 0xFFFFFFFF;
666 duprintf("translate_table: size %u\n", newinfo
->size
);
669 /* Walk through entries, checking offsets. */
670 ret
= ARPT_ENTRY_ITERATE(newinfo
->entries
, newinfo
->size
,
671 check_entry_size_and_hooks
,
674 newinfo
->entries
+ size
,
675 hook_entries
, underflows
, &i
);
676 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret
);
681 duprintf("translate_table: %u not %u entries\n",
686 /* Check hooks all assigned */
687 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
688 /* Only hooks which are valid */
689 if (!(valid_hooks
& (1 << i
)))
691 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
692 duprintf("Invalid hook entry %u %u\n",
696 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
697 duprintf("Invalid underflow %u %u\n",
703 if (!mark_source_chains(newinfo
, valid_hooks
)) {
704 duprintf("Looping hook\n");
708 /* Finally, each sanity check must pass */
710 ret
= ARPT_ENTRY_ITERATE(newinfo
->entries
, newinfo
->size
,
711 check_entry
, name
, size
, &i
);
714 ARPT_ENTRY_ITERATE(newinfo
->entries
, newinfo
->size
,
719 /* And one copy for every other CPU */
720 for (i
= 1; i
< num_possible_cpus(); i
++) {
721 memcpy(newinfo
->entries
+ SMP_ALIGN(newinfo
->size
)*i
,
723 SMP_ALIGN(newinfo
->size
));
729 static struct arpt_table_info
*replace_table(struct arpt_table
*table
,
730 unsigned int num_counters
,
731 struct arpt_table_info
*newinfo
,
734 struct arpt_table_info
*oldinfo
;
736 /* Do the substitution. */
737 write_lock_bh(&table
->lock
);
738 /* Check inside lock: is the old number correct? */
739 if (num_counters
!= table
->private->number
) {
740 duprintf("num_counters != table->private->number (%u/%u)\n",
741 num_counters
, table
->private->number
);
742 write_unlock_bh(&table
->lock
);
746 oldinfo
= table
->private;
747 table
->private = newinfo
;
748 newinfo
->initial_entries
= oldinfo
->initial_entries
;
749 write_unlock_bh(&table
->lock
);
755 static inline int add_entry_to_counter(const struct arpt_entry
*e
,
756 struct arpt_counters total
[],
759 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
765 static void get_counters(const struct arpt_table_info
*t
,
766 struct arpt_counters counters
[])
771 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++) {
773 ARPT_ENTRY_ITERATE(t
->entries
+ TABLE_OFFSET(t
, cpu
),
775 add_entry_to_counter
,
781 static int copy_entries_to_user(unsigned int total_size
,
782 struct arpt_table
*table
,
783 void __user
*userptr
)
785 unsigned int off
, num
, countersize
;
786 struct arpt_entry
*e
;
787 struct arpt_counters
*counters
;
790 /* We need atomic snapshot of counters: rest doesn't change
791 * (other than comefrom, which userspace doesn't care
794 countersize
= sizeof(struct arpt_counters
) * table
->private->number
;
795 counters
= vmalloc(countersize
);
797 if (counters
== NULL
)
800 /* First, sum counters... */
801 memset(counters
, 0, countersize
);
802 write_lock_bh(&table
->lock
);
803 get_counters(table
->private, counters
);
804 write_unlock_bh(&table
->lock
);
806 /* ... then copy entire thing from CPU 0... */
807 if (copy_to_user(userptr
, table
->private->entries
, total_size
) != 0) {
812 /* FIXME: use iterator macros --RR */
813 /* ... then go back and fix counters and names */
814 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
815 struct arpt_entry_target
*t
;
817 e
= (struct arpt_entry
*)(table
->private->entries
+ off
);
818 if (copy_to_user(userptr
+ off
819 + offsetof(struct arpt_entry
, counters
),
821 sizeof(counters
[num
])) != 0) {
826 t
= arpt_get_target(e
);
827 if (copy_to_user(userptr
+ off
+ e
->target_offset
828 + offsetof(struct arpt_entry_target
,
830 t
->u
.kernel
.target
->name
,
831 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
842 static int get_entries(const struct arpt_get_entries
*entries
,
843 struct arpt_get_entries __user
*uptr
)
846 struct arpt_table
*t
;
848 t
= arpt_find_table_lock(entries
->name
, &ret
, &arpt_mutex
);
850 duprintf("t->private->number = %u\n",
852 if (entries
->size
== t
->private->size
)
853 ret
= copy_entries_to_user(t
->private->size
,
854 t
, uptr
->entrytable
);
856 duprintf("get_entries: I've got %u not %u!\n",
863 duprintf("get_entries: Can't find %s!\n",
869 static int do_replace(void __user
*user
, unsigned int len
)
872 struct arpt_replace tmp
;
873 struct arpt_table
*t
;
874 struct arpt_table_info
*newinfo
, *oldinfo
;
875 struct arpt_counters
*counters
;
877 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
880 /* Hack: Causes ipchains to give correct error msg --RR */
881 if (len
!= sizeof(tmp
) + tmp
.size
)
884 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
885 if ((SMP_ALIGN(tmp
.size
) >> PAGE_SHIFT
) + 2 > num_physpages
)
888 newinfo
= vmalloc(sizeof(struct arpt_table_info
)
889 + SMP_ALIGN(tmp
.size
) * num_possible_cpus());
893 if (copy_from_user(newinfo
->entries
, user
+ sizeof(tmp
),
899 counters
= vmalloc(tmp
.num_counters
* sizeof(struct arpt_counters
));
904 memset(counters
, 0, tmp
.num_counters
* sizeof(struct arpt_counters
));
906 ret
= translate_table(tmp
.name
, tmp
.valid_hooks
,
907 newinfo
, tmp
.size
, tmp
.num_entries
,
908 tmp
.hook_entry
, tmp
.underflow
);
910 goto free_newinfo_counters
;
912 duprintf("arp_tables: Translated table\n");
914 t
= arpt_find_table_lock(tmp
.name
, &ret
, &arpt_mutex
);
916 goto free_newinfo_counters_untrans
;
919 if (tmp
.valid_hooks
!= t
->valid_hooks
) {
920 duprintf("Valid hook crap: %08X vs %08X\n",
921 tmp
.valid_hooks
, t
->valid_hooks
);
923 goto free_newinfo_counters_untrans_unlock
;
926 /* Get a reference in advance, we're not allowed fail later */
927 if (!try_module_get(t
->me
)) {
929 goto free_newinfo_counters_untrans_unlock
;
932 oldinfo
= replace_table(t
, tmp
.num_counters
, newinfo
, &ret
);
936 /* Update module usage count based on number of rules */
937 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
938 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
939 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
940 (newinfo
->number
<= oldinfo
->initial_entries
))
942 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
943 (newinfo
->number
<= oldinfo
->initial_entries
))
946 /* Get the old counters. */
947 get_counters(oldinfo
, counters
);
948 /* Decrease module usage counts and free resource */
949 ARPT_ENTRY_ITERATE(oldinfo
->entries
, oldinfo
->size
, cleanup_entry
,NULL
);
951 if (copy_to_user(tmp
.counters
, counters
,
952 sizeof(struct arpt_counters
) * tmp
.num_counters
) != 0)
960 free_newinfo_counters_untrans_unlock
:
962 free_newinfo_counters_untrans
:
963 ARPT_ENTRY_ITERATE(newinfo
->entries
, newinfo
->size
, cleanup_entry
, NULL
);
964 free_newinfo_counters
:
971 /* We're lazy, and add to the first CPU; overflow works its fey magic
972 * and everything is OK.
974 static inline int add_counter_to_entry(struct arpt_entry
*e
,
975 const struct arpt_counters addme
[],
979 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
985 static int do_add_counters(void __user
*user
, unsigned int len
)
988 struct arpt_counters_info tmp
, *paddc
;
989 struct arpt_table
*t
;
992 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
995 if (len
!= sizeof(tmp
) + tmp
.num_counters
*sizeof(struct arpt_counters
))
998 paddc
= vmalloc(len
);
1002 if (copy_from_user(paddc
, user
, len
) != 0) {
1007 t
= arpt_find_table_lock(tmp
.name
, &ret
, &arpt_mutex
);
1011 write_lock_bh(&t
->lock
);
1012 if (t
->private->number
!= paddc
->num_counters
) {
1014 goto unlock_up_free
;
1018 ARPT_ENTRY_ITERATE(t
->private->entries
,
1020 add_counter_to_entry
,
1024 write_unlock_bh(&t
->lock
);
1032 static int do_arpt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1036 if (!capable(CAP_NET_ADMIN
))
1040 case ARPT_SO_SET_REPLACE
:
1041 ret
= do_replace(user
, len
);
1044 case ARPT_SO_SET_ADD_COUNTERS
:
1045 ret
= do_add_counters(user
, len
);
1049 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd
);
1056 static int do_arpt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1060 if (!capable(CAP_NET_ADMIN
))
1064 case ARPT_SO_GET_INFO
: {
1065 char name
[ARPT_TABLE_MAXNAMELEN
];
1066 struct arpt_table
*t
;
1068 if (*len
!= sizeof(struct arpt_getinfo
)) {
1069 duprintf("length %u != %Zu\n", *len
,
1070 sizeof(struct arpt_getinfo
));
1075 if (copy_from_user(name
, user
, sizeof(name
)) != 0) {
1079 name
[ARPT_TABLE_MAXNAMELEN
-1] = '\0';
1080 t
= arpt_find_table_lock(name
, &ret
, &arpt_mutex
);
1082 struct arpt_getinfo info
;
1084 info
.valid_hooks
= t
->valid_hooks
;
1085 memcpy(info
.hook_entry
, t
->private->hook_entry
,
1086 sizeof(info
.hook_entry
));
1087 memcpy(info
.underflow
, t
->private->underflow
,
1088 sizeof(info
.underflow
));
1089 info
.num_entries
= t
->private->number
;
1090 info
.size
= t
->private->size
;
1091 strcpy(info
.name
, name
);
1093 if (copy_to_user(user
, &info
, *len
) != 0)
1103 case ARPT_SO_GET_ENTRIES
: {
1104 struct arpt_get_entries get
;
1106 if (*len
< sizeof(get
)) {
1107 duprintf("get_entries: %u < %Zu\n", *len
, sizeof(get
));
1109 } else if (copy_from_user(&get
, user
, sizeof(get
)) != 0) {
1111 } else if (*len
!= sizeof(struct arpt_get_entries
) + get
.size
) {
1112 duprintf("get_entries: %u != %Zu\n", *len
,
1113 sizeof(struct arpt_get_entries
) + get
.size
);
1116 ret
= get_entries(&get
, user
);
1121 duprintf("do_arpt_get_ctl: unknown request %i\n", cmd
);
1128 /* Registration hooks for targets. */
1129 int arpt_register_target(struct arpt_target
*target
)
1133 ret
= down_interruptible(&arpt_mutex
);
1137 if (!list_named_insert(&arpt_target
, target
)) {
1138 duprintf("arpt_register_target: `%s' already in list!\n",
1146 void arpt_unregister_target(struct arpt_target
*target
)
1149 LIST_DELETE(&arpt_target
, target
);
1153 int arpt_register_table(struct arpt_table
*table
,
1154 const struct arpt_replace
*repl
)
1157 struct arpt_table_info
*newinfo
;
1158 static struct arpt_table_info bootstrap
1159 = { 0, 0, 0, { 0 }, { 0 }, { } };
1161 newinfo
= vmalloc(sizeof(struct arpt_table_info
)
1162 + SMP_ALIGN(repl
->size
) * num_possible_cpus());
1167 memcpy(newinfo
->entries
, repl
->entries
, repl
->size
);
1169 ret
= translate_table(table
->name
, table
->valid_hooks
,
1170 newinfo
, repl
->size
,
1174 duprintf("arpt_register_table: translate table gives %d\n", ret
);
1180 ret
= down_interruptible(&arpt_mutex
);
1186 /* Don't autoload: we'd eat our tail... */
1187 if (list_named_find(&arpt_tables
, table
->name
)) {
1192 /* Simplifies replace_table code. */
1193 table
->private = &bootstrap
;
1194 if (!replace_table(table
, 0, newinfo
, &ret
))
1197 duprintf("table->private->number = %u\n",
1198 table
->private->number
);
1200 /* save number of initial entries */
1201 table
->private->initial_entries
= table
->private->number
;
1203 rwlock_init(&table
->lock
);
1204 list_prepend(&arpt_tables
, table
);
1215 void arpt_unregister_table(struct arpt_table
*table
)
1218 LIST_DELETE(&arpt_tables
, table
);
1221 /* Decrease module usage counts and free resources */
1222 ARPT_ENTRY_ITERATE(table
->private->entries
, table
->private->size
,
1223 cleanup_entry
, NULL
);
1224 vfree(table
->private);
1227 /* The built-in targets: standard (NULL) and error. */
1228 static struct arpt_target arpt_standard_target
= {
1229 .name
= ARPT_STANDARD_TARGET
,
1232 static struct arpt_target arpt_error_target
= {
1233 .name
= ARPT_ERROR_TARGET
,
1234 .target
= arpt_error
,
1237 static struct nf_sockopt_ops arpt_sockopts
= {
1239 .set_optmin
= ARPT_BASE_CTL
,
1240 .set_optmax
= ARPT_SO_SET_MAX
+1,
1241 .set
= do_arpt_set_ctl
,
1242 .get_optmin
= ARPT_BASE_CTL
,
1243 .get_optmax
= ARPT_SO_GET_MAX
+1,
1244 .get
= do_arpt_get_ctl
,
1247 #ifdef CONFIG_PROC_FS
1248 static inline int print_name(const struct arpt_table
*t
,
1249 off_t start_offset
, char *buffer
, int length
,
1250 off_t
*pos
, unsigned int *count
)
1252 if ((*count
)++ >= start_offset
) {
1253 unsigned int namelen
;
1255 namelen
= sprintf(buffer
+ *pos
, "%s\n", t
->name
);
1256 if (*pos
+ namelen
> length
) {
1257 /* Stop iterating */
1265 static int arpt_get_tables(char *buffer
, char **start
, off_t offset
, int length
)
1268 unsigned int count
= 0;
1270 if (down_interruptible(&arpt_mutex
) != 0)
1273 LIST_FIND(&arpt_tables
, print_name
, struct arpt_table
*,
1274 offset
, buffer
, length
, &pos
, &count
);
1278 /* `start' hack - see fs/proc/generic.c line ~105 */
1279 *start
=(char *)((unsigned long)count
-offset
);
1282 #endif /*CONFIG_PROC_FS*/
1284 static int __init
init(void)
1288 /* Noone else will be downing sem now, so we won't sleep */
1290 list_append(&arpt_target
, &arpt_standard_target
);
1291 list_append(&arpt_target
, &arpt_error_target
);
1294 /* Register setsockopt */
1295 ret
= nf_register_sockopt(&arpt_sockopts
);
1297 duprintf("Unable to register sockopts.\n");
1301 #ifdef CONFIG_PROC_FS
1303 struct proc_dir_entry
*proc
;
1305 proc
= proc_net_create("arp_tables_names", 0, arpt_get_tables
);
1307 nf_unregister_sockopt(&arpt_sockopts
);
1310 proc
->owner
= THIS_MODULE
;
1314 printk("arp_tables: (C) 2002 David S. Miller\n");
1318 static void __exit
fini(void)
1320 nf_unregister_sockopt(&arpt_sockopts
);
1321 #ifdef CONFIG_PROC_FS
1322 proc_net_remove("arp_tables_names");
1326 EXPORT_SYMBOL(arpt_register_table
);
1327 EXPORT_SYMBOL(arpt_unregister_table
);
1328 EXPORT_SYMBOL(arpt_do_table
);
1329 EXPORT_SYMBOL(arpt_register_target
);
1330 EXPORT_SYMBOL(arpt_unregister_target
);