]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - include/net/ip.h
Merge tag 'for-linus-5.1a-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[thirdparty/kernel/stable.git] / include / net / ip.h
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the IP module.
7 *
8 * Version: @(#)ip.h 1.0.2 05/07/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 *
14 * Changes:
15 * Mike McLagan : Routing by source
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22 #ifndef _IP_H
23 #define _IP_H
24
25 #include <linux/types.h>
26 #include <linux/ip.h>
27 #include <linux/in.h>
28 #include <linux/skbuff.h>
29 #include <linux/jhash.h>
30
31 #include <net/inet_sock.h>
32 #include <net/route.h>
33 #include <net/snmp.h>
34 #include <net/flow.h>
35 #include <net/flow_dissector.h>
36 #include <net/netns/hash.h>
37
38 #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
39 #define IPV4_MIN_MTU 68 /* RFC 791 */
40
41 struct sock;
42
43 struct inet_skb_parm {
44 int iif;
45 struct ip_options opt; /* Compiled IP options */
46 u16 flags;
47
48 #define IPSKB_FORWARDED BIT(0)
49 #define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
50 #define IPSKB_XFRM_TRANSFORMED BIT(2)
51 #define IPSKB_FRAG_COMPLETE BIT(3)
52 #define IPSKB_REROUTED BIT(4)
53 #define IPSKB_DOREDIRECT BIT(5)
54 #define IPSKB_FRAG_PMTU BIT(6)
55 #define IPSKB_L3SLAVE BIT(7)
56
57 u16 frag_max_size;
58 };
59
60 static inline bool ipv4_l3mdev_skb(u16 flags)
61 {
62 return !!(flags & IPSKB_L3SLAVE);
63 }
64
65 static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
66 {
67 return ip_hdr(skb)->ihl * 4;
68 }
69
70 struct ipcm_cookie {
71 struct sockcm_cookie sockc;
72 __be32 addr;
73 int oif;
74 struct ip_options_rcu *opt;
75 __u8 ttl;
76 __s16 tos;
77 char priority;
78 __u16 gso_size;
79 };
80
81 static inline void ipcm_init(struct ipcm_cookie *ipcm)
82 {
83 *ipcm = (struct ipcm_cookie) { .tos = -1 };
84 }
85
86 static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
87 const struct inet_sock *inet)
88 {
89 ipcm_init(ipcm);
90
91 ipcm->sockc.tsflags = inet->sk.sk_tsflags;
92 ipcm->oif = inet->sk.sk_bound_dev_if;
93 ipcm->addr = inet->inet_saddr;
94 }
95
96 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
97 #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
98
99 /* return enslaved device index if relevant */
100 static inline int inet_sdif(struct sk_buff *skb)
101 {
102 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
103 if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
104 return IPCB(skb)->iif;
105 #endif
106 return 0;
107 }
108
109 /* Special input handler for packets caught by router alert option.
110 They are selected only by protocol field, and then processed likely
111 local ones; but only if someone wants them! Otherwise, router
112 not running rsvpd will kill RSVP.
113
114 It is user level problem, what it will make with them.
115 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
116 but receiver should be enough clever f.e. to forward mtrace requests,
117 sent to multicast group to reach destination designated router.
118 */
119
120 struct ip_ra_chain {
121 struct ip_ra_chain __rcu *next;
122 struct sock *sk;
123 union {
124 void (*destructor)(struct sock *);
125 struct sock *saved_sk;
126 };
127 struct rcu_head rcu;
128 };
129
130 /* IP flags. */
131 #define IP_CE 0x8000 /* Flag: "Congestion" */
132 #define IP_DF 0x4000 /* Flag: "Don't Fragment" */
133 #define IP_MF 0x2000 /* Flag: "More Fragments" */
134 #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
135
136 #define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
137
138 struct msghdr;
139 struct net_device;
140 struct packet_type;
141 struct rtable;
142 struct sockaddr;
143
144 int igmp_mc_init(void);
145
146 /*
147 * Functions provided by ip.c
148 */
149
150 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
151 __be32 saddr, __be32 daddr,
152 struct ip_options_rcu *opt);
153 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
154 struct net_device *orig_dev);
155 void ip_list_rcv(struct list_head *head, struct packet_type *pt,
156 struct net_device *orig_dev);
157 int ip_local_deliver(struct sk_buff *skb);
158 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
159 int ip_mr_input(struct sk_buff *skb);
160 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
161 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
162 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
163 int (*output)(struct net *, struct sock *, struct sk_buff *));
164 void ip_send_check(struct iphdr *ip);
165 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
166 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
167
168 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
169 __u8 tos);
170 void ip_init(void);
171 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
172 int getfrag(void *from, char *to, int offset, int len,
173 int odd, struct sk_buff *skb),
174 void *from, int len, int protolen,
175 struct ipcm_cookie *ipc,
176 struct rtable **rt,
177 unsigned int flags);
178 int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
179 struct sk_buff *skb);
180 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
181 int offset, size_t size, int flags);
182 struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
183 struct sk_buff_head *queue,
184 struct inet_cork *cork);
185 int ip_send_skb(struct net *net, struct sk_buff *skb);
186 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
187 void ip_flush_pending_frames(struct sock *sk);
188 struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
189 int getfrag(void *from, char *to, int offset,
190 int len, int odd, struct sk_buff *skb),
191 void *from, int length, int transhdrlen,
192 struct ipcm_cookie *ipc, struct rtable **rtp,
193 struct inet_cork *cork, unsigned int flags);
194
195 static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
196 struct flowi *fl)
197 {
198 return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
199 }
200
201 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
202 {
203 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
204 }
205
206 static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
207 {
208 return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
209 }
210
211 static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
212 {
213 return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
214 }
215
216 /* datagram.c */
217 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
218 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
219
220 void ip4_datagram_release_cb(struct sock *sk);
221
222 struct ip_reply_arg {
223 struct kvec iov[1];
224 int flags;
225 __wsum csum;
226 int csumoffset; /* u16 offset of csum in iov[0].iov_base */
227 /* -1 if not needed */
228 int bound_dev_if;
229 u8 tos;
230 kuid_t uid;
231 };
232
233 #define IP_REPLY_ARG_NOSRCCHECK 1
234
235 static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
236 {
237 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
238 }
239
240 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
241 const struct ip_options *sopt,
242 __be32 daddr, __be32 saddr,
243 const struct ip_reply_arg *arg,
244 unsigned int len);
245
246 #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
247 #define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
248 #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
249 #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
250 #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
251 #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
252 #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
253 #define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
254 #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
255 #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
256
257 u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
258 unsigned long snmp_fold_field(void __percpu *mib, int offt);
259 #if BITS_PER_LONG==32
260 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
261 size_t syncp_offset);
262 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
263 #else
264 static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
265 size_t syncp_offset)
266 {
267 return snmp_get_cpu_field(mib, cpu, offct);
268
269 }
270
271 static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
272 {
273 return snmp_fold_field(mib, offt);
274 }
275 #endif
276
277 #define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
278 { \
279 int i, c; \
280 for_each_possible_cpu(c) { \
281 for (i = 0; stats_list[i].name; i++) \
282 buff64[i] += snmp_get_cpu_field64( \
283 mib_statistic, \
284 c, stats_list[i].entry, \
285 offset); \
286 } \
287 }
288
289 #define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
290 { \
291 int i, c; \
292 for_each_possible_cpu(c) { \
293 for (i = 0; stats_list[i].name; i++) \
294 buff[i] += snmp_get_cpu_field( \
295 mib_statistic, \
296 c, stats_list[i].entry); \
297 } \
298 }
299
300 void inet_get_local_port_range(struct net *net, int *low, int *high);
301
302 #ifdef CONFIG_SYSCTL
303 static inline int inet_is_local_reserved_port(struct net *net, int port)
304 {
305 if (!net->ipv4.sysctl_local_reserved_ports)
306 return 0;
307 return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
308 }
309
310 static inline bool sysctl_dev_name_is_allowed(const char *name)
311 {
312 return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
313 }
314
315 static inline int inet_prot_sock(struct net *net)
316 {
317 return net->ipv4.sysctl_ip_prot_sock;
318 }
319
320 #else
321 static inline int inet_is_local_reserved_port(struct net *net, int port)
322 {
323 return 0;
324 }
325
326 static inline int inet_prot_sock(struct net *net)
327 {
328 return PROT_SOCK;
329 }
330 #endif
331
332 __be32 inet_current_timestamp(void);
333
334 /* From inetpeer.c */
335 extern int inet_peer_threshold;
336 extern int inet_peer_minttl;
337 extern int inet_peer_maxttl;
338
339 void ipfrag_init(void);
340
341 void ip_static_sysctl_init(void);
342
343 #define IP4_REPLY_MARK(net, mark) \
344 ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
345
346 static inline bool ip_is_fragment(const struct iphdr *iph)
347 {
348 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
349 }
350
351 #ifdef CONFIG_INET
352 #include <net/dst.h>
353
354 /* The function in 2.2 was invalid, producing wrong result for
355 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
356 static inline
357 int ip_decrease_ttl(struct iphdr *iph)
358 {
359 u32 check = (__force u32)iph->check;
360 check += (__force u32)htons(0x0100);
361 iph->check = (__force __sum16)(check + (check>=0xFFFF));
362 return --iph->ttl;
363 }
364
365 static inline int ip_mtu_locked(const struct dst_entry *dst)
366 {
367 const struct rtable *rt = (const struct rtable *)dst;
368
369 return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
370 }
371
372 static inline
373 int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
374 {
375 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
376
377 return pmtudisc == IP_PMTUDISC_DO ||
378 (pmtudisc == IP_PMTUDISC_WANT &&
379 !ip_mtu_locked(dst));
380 }
381
382 static inline bool ip_sk_accept_pmtu(const struct sock *sk)
383 {
384 return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
385 inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
386 }
387
388 static inline bool ip_sk_use_pmtu(const struct sock *sk)
389 {
390 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
391 }
392
393 static inline bool ip_sk_ignore_df(const struct sock *sk)
394 {
395 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
396 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
397 }
398
399 static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
400 bool forwarding)
401 {
402 struct net *net = dev_net(dst->dev);
403
404 if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
405 ip_mtu_locked(dst) ||
406 !forwarding)
407 return dst_mtu(dst);
408
409 return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
410 }
411
412 static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
413 const struct sk_buff *skb)
414 {
415 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
416 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
417
418 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
419 }
420
421 return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
422 }
423
424 struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
425 int fc_mx_len,
426 struct netlink_ext_ack *extack);
427 static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
428 {
429 if (fib_metrics != &dst_default_metrics &&
430 refcount_dec_and_test(&fib_metrics->refcnt))
431 kfree(fib_metrics);
432 }
433
434 /* ipv4 and ipv6 both use refcounted metrics if it is not the default */
435 static inline
436 void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
437 {
438 dst_init_metrics(dst, fib_metrics->metrics, true);
439
440 if (fib_metrics != &dst_default_metrics) {
441 dst->_metrics |= DST_METRICS_REFCOUNTED;
442 refcount_inc(&fib_metrics->refcnt);
443 }
444 }
445
446 static inline
447 void ip_dst_metrics_put(struct dst_entry *dst)
448 {
449 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
450
451 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
452 kfree(p);
453 }
454
455 u32 ip_idents_reserve(u32 hash, int segs);
456 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
457
458 static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
459 struct sock *sk, int segs)
460 {
461 struct iphdr *iph = ip_hdr(skb);
462
463 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
464 /* This is only to work around buggy Windows95/2000
465 * VJ compression implementations. If the ID field
466 * does not change, they drop every other packet in
467 * a TCP stream using header compression.
468 */
469 if (sk && inet_sk(sk)->inet_daddr) {
470 iph->id = htons(inet_sk(sk)->inet_id);
471 inet_sk(sk)->inet_id += segs;
472 } else {
473 iph->id = 0;
474 }
475 } else {
476 __ip_select_ident(net, iph, segs);
477 }
478 }
479
480 static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
481 struct sock *sk)
482 {
483 ip_select_ident_segs(net, skb, sk, 1);
484 }
485
486 static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
487 {
488 return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
489 skb->len, proto, 0);
490 }
491
492 /* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
493 * Equivalent to : flow->v4addrs.src = iph->saddr;
494 * flow->v4addrs.dst = iph->daddr;
495 */
496 static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
497 const struct iphdr *iph)
498 {
499 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
500 offsetof(typeof(flow->addrs), v4addrs.src) +
501 sizeof(flow->addrs.v4addrs.src));
502 memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
503 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
504 }
505
506 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
507 {
508 const struct iphdr *iph = skb_gro_network_header(skb);
509
510 return csum_tcpudp_nofold(iph->saddr, iph->daddr,
511 skb_gro_len(skb), proto, 0);
512 }
513
514 /*
515 * Map a multicast IP onto multicast MAC for type ethernet.
516 */
517
518 static inline void ip_eth_mc_map(__be32 naddr, char *buf)
519 {
520 __u32 addr=ntohl(naddr);
521 buf[0]=0x01;
522 buf[1]=0x00;
523 buf[2]=0x5e;
524 buf[5]=addr&0xFF;
525 addr>>=8;
526 buf[4]=addr&0xFF;
527 addr>>=8;
528 buf[3]=addr&0x7F;
529 }
530
531 /*
532 * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
533 * Leave P_Key as 0 to be filled in by driver.
534 */
535
536 static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
537 {
538 __u32 addr;
539 unsigned char scope = broadcast[5] & 0xF;
540
541 buf[0] = 0; /* Reserved */
542 buf[1] = 0xff; /* Multicast QPN */
543 buf[2] = 0xff;
544 buf[3] = 0xff;
545 addr = ntohl(naddr);
546 buf[4] = 0xff;
547 buf[5] = 0x10 | scope; /* scope from broadcast address */
548 buf[6] = 0x40; /* IPv4 signature */
549 buf[7] = 0x1b;
550 buf[8] = broadcast[8]; /* P_Key */
551 buf[9] = broadcast[9];
552 buf[10] = 0;
553 buf[11] = 0;
554 buf[12] = 0;
555 buf[13] = 0;
556 buf[14] = 0;
557 buf[15] = 0;
558 buf[19] = addr & 0xff;
559 addr >>= 8;
560 buf[18] = addr & 0xff;
561 addr >>= 8;
562 buf[17] = addr & 0xff;
563 addr >>= 8;
564 buf[16] = addr & 0x0f;
565 }
566
567 static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
568 {
569 if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
570 memcpy(buf, broadcast, 4);
571 else
572 memcpy(buf, &naddr, sizeof(naddr));
573 }
574
575 #if IS_ENABLED(CONFIG_IPV6)
576 #include <linux/ipv6.h>
577 #endif
578
579 static __inline__ void inet_reset_saddr(struct sock *sk)
580 {
581 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
582 #if IS_ENABLED(CONFIG_IPV6)
583 if (sk->sk_family == PF_INET6) {
584 struct ipv6_pinfo *np = inet6_sk(sk);
585
586 memset(&np->saddr, 0, sizeof(np->saddr));
587 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
588 }
589 #endif
590 }
591
592 #endif
593
594 static inline unsigned int ipv4_addr_hash(__be32 ip)
595 {
596 return (__force unsigned int) ip;
597 }
598
599 static inline u32 ipv4_portaddr_hash(const struct net *net,
600 __be32 saddr,
601 unsigned int port)
602 {
603 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
604 }
605
606 bool ip_call_ra_chain(struct sk_buff *skb);
607
608 /*
609 * Functions provided by ip_fragment.c
610 */
611
612 enum ip_defrag_users {
613 IP_DEFRAG_LOCAL_DELIVER,
614 IP_DEFRAG_CALL_RA_CHAIN,
615 IP_DEFRAG_CONNTRACK_IN,
616 __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
617 IP_DEFRAG_CONNTRACK_OUT,
618 __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
619 IP_DEFRAG_CONNTRACK_BRIDGE_IN,
620 __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
621 IP_DEFRAG_VS_IN,
622 IP_DEFRAG_VS_OUT,
623 IP_DEFRAG_VS_FWD,
624 IP_DEFRAG_AF_PACKET,
625 IP_DEFRAG_MACVLAN,
626 };
627
628 /* Return true if the value of 'user' is between 'lower_bond'
629 * and 'upper_bond' inclusively.
630 */
631 static inline bool ip_defrag_user_in_between(u32 user,
632 enum ip_defrag_users lower_bond,
633 enum ip_defrag_users upper_bond)
634 {
635 return user >= lower_bond && user <= upper_bond;
636 }
637
638 int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
639 #ifdef CONFIG_INET
640 struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
641 #else
642 static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
643 {
644 return skb;
645 }
646 #endif
647
648 /*
649 * Functions provided by ip_forward.c
650 */
651
652 int ip_forward(struct sk_buff *skb);
653
654 /*
655 * Functions provided by ip_options.c
656 */
657
658 void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
659 __be32 daddr, struct rtable *rt, int is_frag);
660
661 int __ip_options_echo(struct net *net, struct ip_options *dopt,
662 struct sk_buff *skb, const struct ip_options *sopt);
663 static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
664 struct sk_buff *skb)
665 {
666 return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
667 }
668
669 void ip_options_fragment(struct sk_buff *skb);
670 int __ip_options_compile(struct net *net, struct ip_options *opt,
671 struct sk_buff *skb, __be32 *info);
672 int ip_options_compile(struct net *net, struct ip_options *opt,
673 struct sk_buff *skb);
674 int ip_options_get(struct net *net, struct ip_options_rcu **optp,
675 unsigned char *data, int optlen);
676 int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
677 unsigned char __user *data, int optlen);
678 void ip_options_undo(struct ip_options *opt);
679 void ip_forward_options(struct sk_buff *skb);
680 int ip_options_rcv_srr(struct sk_buff *skb);
681
682 /*
683 * Functions provided by ip_sockglue.c
684 */
685
686 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
687 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
688 struct sk_buff *skb, int tlen, int offset);
689 int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
690 struct ipcm_cookie *ipc, bool allow_ipv6);
691 int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
692 unsigned int optlen);
693 int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
694 int __user *optlen);
695 int compat_ip_setsockopt(struct sock *sk, int level, int optname,
696 char __user *optval, unsigned int optlen);
697 int compat_ip_getsockopt(struct sock *sk, int level, int optname,
698 char __user *optval, int __user *optlen);
699 int ip_ra_control(struct sock *sk, unsigned char on,
700 void (*destructor)(struct sock *));
701
702 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
703 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
704 u32 info, u8 *payload);
705 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
706 u32 info);
707
708 static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
709 {
710 ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
711 }
712
713 bool icmp_global_allow(void);
714 extern int sysctl_icmp_msgs_per_sec;
715 extern int sysctl_icmp_msgs_burst;
716
717 #ifdef CONFIG_PROC_FS
718 int ip_misc_proc_init(void);
719 #endif
720
721 int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
722 struct netlink_ext_ack *extack);
723
724 #endif /* _IP_H */