From c40bba6922b470c0fd0c7a7b8b09584527c468e9 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 6 Aug 2009 14:38:13 -0700 Subject: [PATCH] update kernel headers to 2.6.31-rc5 --- include/linux/gen_stats.h | 2 +- include/linux/if.h | 4 + include/linux/if_addr.h | 2 + include/linux/if_ether.h | 37 ++- include/linux/if_link.h | 2 + include/linux/if_tunnel.h | 21 +- include/linux/if_vlan.h | 298 ++++++++++++++++++ include/linux/inet_diag.h | 16 + include/linux/neighbour.h | 1 + include/linux/netdevice.h | 92 ++++++ include/linux/netfilter/x_tables.h | 465 +++++++++++++++++++++++++++- include/linux/netfilter/xt_tcpudp.h | 20 +- include/linux/netfilter_ipv4.h | 8 + include/linux/netlink.h | 119 +++++++ include/linux/rtnetlink.h | 165 ++++++++++ include/linux/socket.h | 40 ++- include/linux/types.h | 62 +++- include/linux/xfrm.h | 4 +- 18 files changed, 1308 insertions(+), 50 deletions(-) create mode 100644 include/linux/netdevice.h diff --git a/include/linux/gen_stats.h b/include/linux/gen_stats.h index 13f4e7460..0ffa41df0 100644 --- a/include/linux/gen_stats.h +++ b/include/linux/gen_stats.h @@ -22,7 +22,7 @@ struct gnet_stats_basic { __u64 bytes; __u32 packets; -}; +} __attribute__ ((packed)); /** * struct gnet_stats_rate_est - rate estimator diff --git a/include/linux/if.h b/include/linux/if.h index 6d7bd525f..010a0d290 100644 --- a/include/linux/if.h +++ b/include/linux/if.h @@ -66,6 +66,10 @@ #define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */ #define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */ #define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */ +#define IFF_WAN_HDLC 0x200 /* WAN HDLC device */ +#define IFF_XMIT_DST_RELEASE 0x400 /* dev_hard_start_xmit() is allowed to + * release skb->dst + */ #define IF_GET_IFACE 0x0001 /* for querying only */ #define IF_GET_PROTO 0x0002 diff --git a/include/linux/if_addr.h b/include/linux/if_addr.h index 08ea9800b..a60c821be 100644 --- a/include/linux/if_addr.h +++ b/include/linux/if_addr.h @@ -55,7 +55,9 @@ struct ifa_cacheinfo }; /* backwards compatibility for userspace */ +#ifndef __KERNEL__ #define IFA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifaddrmsg)))) #define IFA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifaddrmsg)) +#endif #endif diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 977c4de9b..70fdba2bb 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -17,7 +17,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ - + #ifndef _LINUX_IF_ETHER_H #define _LINUX_IF_ETHER_H @@ -25,7 +25,7 @@ /* * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble - * and FCS/CRC (frame check sequence). + * and FCS/CRC (frame check sequence). */ #define ETH_ALEN 6 /* Octets in one ethernet addr */ @@ -78,12 +78,15 @@ #define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ #define ETH_P_TIPC 0x88CA /* TIPC */ +#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */ +#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */ +#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */ #define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ /* * Non DIX types. Won't clash for 1500 types. */ - + #define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */ #define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */ #define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */ @@ -105,16 +108,42 @@ #define ETH_P_DSA 0x001B /* Distributed Switch Arch. */ #define ETH_P_TRAILER 0x001C /* Trailer switch tagging */ #define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */ +#define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */ /* * This is an Ethernet frame header. */ - + struct ethhdr { unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ unsigned char h_source[ETH_ALEN]; /* source ether addr */ __be16 h_proto; /* packet type ID field */ } __attribute__((packed)); +#ifdef __KERNEL__ +#include + +static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) +{ + return (struct ethhdr *)skb_mac_header(skb); +} + +int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); + +#ifdef CONFIG_SYSCTL +extern struct ctl_table ether_table[]; +#endif + +extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); + +/* + * Display a 6 byte device address (MAC) in a readable format. + */ +extern char *print_mac(char *buf, const unsigned char *addr); +#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" +#define MAC_BUF_SIZE 18 +#define DECLARE_MAC_BUF(var) char var[MAC_BUF_SIZE] __maybe_unused + +#endif #endif /* _LINUX_IF_ETHER_H */ diff --git a/include/linux/if_link.h b/include/linux/if_link.h index b0b9e8a66..176c5182c 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -88,8 +88,10 @@ enum #define IFLA_MAX (__IFLA_MAX - 1) /* backwards compatibility for userspace */ +#ifndef __KERNEL__ #define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg)))) #define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg)) +#endif /* ifi_flags. diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h index 2918c6fac..5eb9b0f85 100644 --- a/include/linux/if_tunnel.h +++ b/include/linux/if_tunnel.h @@ -3,6 +3,9 @@ #include +#ifdef __KERNEL__ +#include +#endif #define SIOCGETTUNNEL (SIOCDEVPRIVATE + 0) #define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1) @@ -13,14 +16,14 @@ #define SIOCDELPRL (SIOCDEVPRIVATE + 6) #define SIOCCHGPRL (SIOCDEVPRIVATE + 7) -#define GRE_CSUM __constant_htons(0x8000) -#define GRE_ROUTING __constant_htons(0x4000) -#define GRE_KEY __constant_htons(0x2000) -#define GRE_SEQ __constant_htons(0x1000) -#define GRE_STRICT __constant_htons(0x0800) -#define GRE_REC __constant_htons(0x0700) -#define GRE_FLAGS __constant_htons(0x00F8) -#define GRE_VERSION __constant_htons(0x0007) +#define GRE_CSUM __cpu_to_be16(0x8000) +#define GRE_ROUTING __cpu_to_be16(0x4000) +#define GRE_KEY __cpu_to_be16(0x2000) +#define GRE_SEQ __cpu_to_be16(0x1000) +#define GRE_STRICT __cpu_to_be16(0x0800) +#define GRE_REC __cpu_to_be16(0x0700) +#define GRE_FLAGS __cpu_to_be16(0x00F8) +#define GRE_VERSION __cpu_to_be16(0x0007) struct ip_tunnel_parm { @@ -41,7 +44,7 @@ struct ip_tunnel_prl { __u16 flags; __u16 __reserved; __u32 datalen; - __u32 __reserved2; + __u32 rs_delay; /* data follows */ }; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 2dc4a57de..7ff9af1d0 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -13,6 +13,304 @@ #ifndef _LINUX_IF_VLAN_H_ #define _LINUX_IF_VLAN_H_ +#ifdef __KERNEL__ +#include +#include + +#define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header) + * that VLAN requires. + */ +#define VLAN_ETH_ALEN 6 /* Octets in one ethernet addr */ +#define VLAN_ETH_HLEN 18 /* Total octets in header. */ +#define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */ + +/* + * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan + */ +#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ +#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ + +/* + * struct vlan_hdr - vlan header + * @h_vlan_TCI: priority and VLAN ID + * @h_vlan_encapsulated_proto: packet type ID or len + */ +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +/** + * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) + * @h_dest: destination ethernet address + * @h_source: source ethernet address + * @h_vlan_proto: ethernet protocol (always 0x8100) + * @h_vlan_TCI: priority and VLAN ID + * @h_vlan_encapsulated_proto: packet type ID or len + */ +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +#include + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb_mac_header(skb); +} + +#define VLAN_VID_MASK 0xfff + +/* found in socket.c */ +extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); + +/* if this changes, algorithm will have to be reworked because this + * depends on completely exhausting the VLAN identifier space. Thus + * it gives constant time look-up, but in many cases it wastes memory. + */ +#define VLAN_GROUP_ARRAY_LEN 4096 +#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 +#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS) + +struct vlan_group { + struct net_device *real_dev; /* The ethernet(like) device + * the vlan is attached to. + */ + unsigned int nr_vlans; + struct hlist_node hlist; /* linked list */ + struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; + struct rcu_head rcu; +}; + +static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, + u16 vlan_id) +{ + struct net_device **array; + array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; + return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL; +} + +static inline void vlan_group_set_device(struct vlan_group *vg, + u16 vlan_id, + struct net_device *dev) +{ + struct net_device **array; + if (!vg) + return; + array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; + array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; +} + +#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci) +#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci) + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); +extern u16 vlan_dev_vlan_id(const struct net_device *dev); + +extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, + u16 vlan_tci, int polling); +extern int vlan_hwaccel_do_receive(struct sk_buff *skb); +extern int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, + unsigned int vlan_tci, struct sk_buff *skb); +extern int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, + unsigned int vlan_tci); + +#else +static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) +{ + BUG(); + return NULL; +} + +static inline u16 vlan_dev_vlan_id(const struct net_device *dev) +{ + BUG(); + return 0; +} + +static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, + u16 vlan_tci, int polling) +{ + BUG(); + return NET_XMIT_SUCCESS; +} + +static inline int vlan_hwaccel_do_receive(struct sk_buff *skb) +{ + return 0; +} + +static inline int vlan_gro_receive(struct napi_struct *napi, + struct vlan_group *grp, + unsigned int vlan_tci, struct sk_buff *skb) +{ + return NET_RX_DROP; +} + +static inline int vlan_gro_frags(struct napi_struct *napi, + struct vlan_group *grp, unsigned int vlan_tci) +{ + return NET_RX_DROP; +} +#endif + +/** + * vlan_hwaccel_rx - netif_rx wrapper for VLAN RX acceleration + * @skb: buffer + * @grp: vlan group + * @vlan_tci: VLAN TCI as received from the card + */ +static inline int vlan_hwaccel_rx(struct sk_buff *skb, + struct vlan_group *grp, + u16 vlan_tci) +{ + return __vlan_hwaccel_rx(skb, grp, vlan_tci, 0); +} + +/** + * vlan_hwaccel_receive_skb - netif_receive_skb wrapper for VLAN RX acceleration + * @skb: buffer + * @grp: vlan group + * @vlan_tci: VLAN TCI as received from the card + */ +static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb, + struct vlan_group *grp, + u16 vlan_tci) +{ + return __vlan_hwaccel_rx(skb, grp, vlan_tci, 1); +} + +/** + * __vlan_put_tag - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + */ +static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) +{ + struct vlan_ethhdr *veth; + + if (skb_cow_head(skb, VLAN_HLEN) < 0) { + kfree_skb(skb); + return NULL; + } + veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); + + /* Move the mac addresses to the beginning of the new header. */ + memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN); + skb->mac_header -= VLAN_HLEN; + + /* first, the ethernet type */ + veth->h_vlan_proto = htons(ETH_P_8021Q); + + /* now, the TCI */ + veth->h_vlan_TCI = htons(vlan_tci); + + skb->protocol = htons(ETH_P_8021Q); + + return skb; +} + +/** + * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting + * @skb: skbuff to tag + * @vlan_tci: VLAN TCI to insert + * + * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest + */ +static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ + skb->vlan_tci = vlan_tci; + return skb; +} + +#define HAVE_VLAN_PUT_TAG + +/** + * vlan_put_tag - inserts VLAN tag according to device features + * @skb: skbuff to tag + * @vlan_tci: VLAN TCI to insert + * + * Assumes skb->dev is the target that will xmit this frame. + * Returns a VLAN tagged skb. + */ +static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) +{ + if (skb->dev->features & NETIF_F_HW_VLAN_TX) { + return __vlan_hwaccel_put_tag(skb, vlan_tci); + } else { + return __vlan_put_tag(skb, vlan_tci); + } +} + +/** + * __vlan_get_tag - get the VLAN ID that is part of the payload + * @skb: skbuff to query + * @vlan_tci: buffer to store vlaue + * + * Returns error if the skb is not of VLAN type + */ +static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) +{ + struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; + + if (veth->h_vlan_proto != htons(ETH_P_8021Q)) { + return -EINVAL; + } + + *vlan_tci = ntohs(veth->h_vlan_TCI); + return 0; +} + +/** + * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] + * @skb: skbuff to query + * @vlan_tci: buffer to store vlaue + * + * Returns error if @skb->vlan_tci is not set correctly + */ +static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, + u16 *vlan_tci) +{ + if (vlan_tx_tag_present(skb)) { + *vlan_tci = skb->vlan_tci; + return 0; + } else { + *vlan_tci = 0; + return -EINVAL; + } +} + +#define HAVE_VLAN_GET_TAG + +/** + * vlan_get_tag - get the VLAN ID from the skb + * @skb: skbuff to query + * @vlan_tci: buffer to store vlaue + * + * Returns error if the skb is not VLAN tagged + */ +static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) +{ + if (skb->dev->features & NETIF_F_HW_VLAN_TX) { + return __vlan_hwaccel_get_tag(skb, vlan_tci); + } else { + return __vlan_get_tag(skb, vlan_tci); + } +} + +#endif /* __KERNEL__ */ /* VLAN IOCTLs are found in sockios.h */ diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index 1107ed2b9..bc8c49022 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -120,5 +120,21 @@ struct tcpvegas_info { __u32 tcpv_minrtt; }; +#ifdef __KERNEL__ +struct sock; +struct inet_hashinfo; + +struct inet_diag_handler { + struct inet_hashinfo *idiag_hashinfo; + void (*idiag_get_info)(struct sock *sk, + struct inet_diag_msg *r, + void *info); + __u16 idiag_info_size; + __u16 idiag_type; +}; + +extern int inet_diag_register(const struct inet_diag_handler *handler); +extern void inet_diag_unregister(const struct inet_diag_handler *handler); +#endif /* __KERNEL__ */ #endif /* _INET_DIAG_H_ */ diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h index 8730d5dae..12c9de138 100644 --- a/include/linux/neighbour.h +++ b/include/linux/neighbour.h @@ -31,6 +31,7 @@ enum * Neighbor Cache Entry Flags */ +#define NTF_USE 0x01 #define NTF_PROXY 0x08 /* == ATF_PUBL */ #define NTF_ROUTER 0x80 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h new file mode 100644 index 000000000..2faed2a97 --- /dev/null +++ b/include/linux/netdevice.h @@ -0,0 +1,92 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the Interfaces handler. + * + * Version: @(#)dev.h 1.0.10 08/12/93 + * + * Authors: Ross Biro + * Fred N. van Kempen, + * Corey Minyard + * Donald J. Becker, + * Alan Cox, + * Bjorn Ekwall. + * Pekka Riikonen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Moved to /usr/include/linux for NET3 + */ +#ifndef _LINUX_NETDEVICE_H +#define _LINUX_NETDEVICE_H + +#include +#include +#include + + +#define MAX_ADDR_LEN 32 /* Largest hardware address length */ + +/* Driver transmit return codes */ +#define NETDEV_TX_OK 0 /* driver took care of packet */ +#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ +#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ + + +/* + * Network device statistics. Akin to the 2.0 ether stats but + * with byte counters. + */ + +struct net_device_stats +{ + unsigned long rx_packets; /* total packets received */ + unsigned long tx_packets; /* total packets transmitted */ + unsigned long rx_bytes; /* total bytes received */ + unsigned long tx_bytes; /* total bytes transmitted */ + unsigned long rx_errors; /* bad packets received */ + unsigned long tx_errors; /* packet transmit problems */ + unsigned long rx_dropped; /* no space in linux buffers */ + unsigned long tx_dropped; /* no space available in linux */ + unsigned long multicast; /* multicast packets received */ + unsigned long collisions; + + /* detailed rx_errors: */ + unsigned long rx_length_errors; + unsigned long rx_over_errors; /* receiver ring buff overflow */ + unsigned long rx_crc_errors; /* recved pkt with crc error */ + unsigned long rx_frame_errors; /* recv'd frame alignment error */ + unsigned long rx_fifo_errors; /* recv'r fifo overrun */ + unsigned long rx_missed_errors; /* receiver missed packet */ + + /* detailed tx_errors */ + unsigned long tx_aborted_errors; + unsigned long tx_carrier_errors; + unsigned long tx_fifo_errors; + unsigned long tx_heartbeat_errors; + unsigned long tx_window_errors; + + /* for cslip etc */ + unsigned long rx_compressed; + unsigned long tx_compressed; +}; + + +/* Media selection options. */ +enum { + IF_PORT_UNKNOWN = 0, + IF_PORT_10BASE2, + IF_PORT_10BASET, + IF_PORT_AUI, + IF_PORT_100BASET, + IF_PORT_100BASETX, + IF_PORT_100BASEFX +}; + + +#endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 89eae5ce1..1030b7593 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -1,6 +1,8 @@ #ifndef _X_TABLES_H #define _X_TABLES_H +#include + #define XT_FUNCTION_MAXNAMELEN 30 #define XT_TABLE_MAXNAMELEN 32 @@ -8,22 +10,22 @@ struct xt_entry_match { union { struct { - u_int16_t match_size; + __u16 match_size; /* Used by userspace */ char name[XT_FUNCTION_MAXNAMELEN-1]; - u_int8_t revision; + __u8 revision; } user; struct { - u_int16_t match_size; + __u16 match_size; /* Used inside the kernel */ struct xt_match *match; } kernel; /* Total length */ - u_int16_t match_size; + __u16 match_size; } u; unsigned char data[0]; @@ -33,22 +35,22 @@ struct xt_entry_target { union { struct { - u_int16_t target_size; + __u16 target_size; /* Used by userspace */ char name[XT_FUNCTION_MAXNAMELEN-1]; - u_int8_t revision; + __u8 revision; } user; struct { - u_int16_t target_size; + __u16 target_size; /* Used inside the kernel */ struct xt_target *target; } kernel; /* Total length */ - u_int16_t target_size; + __u16 target_size; } u; unsigned char data[0]; @@ -74,7 +76,7 @@ struct xt_get_revision { char name[XT_FUNCTION_MAXNAMELEN-1]; - u_int8_t revision; + __u8 revision; }; /* CONTINUE verdict for targets */ @@ -90,10 +92,10 @@ struct xt_get_revision */ struct _xt_align { - u_int8_t u8; - u_int16_t u16; - u_int32_t u32; - u_int64_t u64; + __u8 u8; + __u16 u16; + __u32 u32; + __u64 u64; }; #define XT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) \ @@ -109,7 +111,7 @@ struct _xt_align struct xt_counters { - u_int64_t pcnt, bcnt; /* Packet and byte counters */ + __u64 pcnt, bcnt; /* Packet and byte counters */ }; /* The argument to IPT_SO_ADD_COUNTERS. */ @@ -169,5 +171,440 @@ struct xt_counters_info #define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \ XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args) +#ifdef __KERNEL__ + +#include + +/** + * struct xt_match_param - parameters for match extensions' match functions + * + * @in: input netdevice + * @out: output netdevice + * @match: struct xt_match through which this function was invoked + * @matchinfo: per-match data + * @fragoff: packet is a fragment, this is the data offset + * @thoff: position of transport header relative to skb->data + * @hook: hook number given packet came from + * @family: Actual NFPROTO_* through which the function is invoked + * (helpful when match->family == NFPROTO_UNSPEC) + * @hotdrop: drop packet if we had inspection problems + */ +struct xt_match_param { + const struct net_device *in, *out; + const struct xt_match *match; + const void *matchinfo; + int fragoff; + unsigned int thoff; + unsigned int hooknum; + u_int8_t family; + bool *hotdrop; +}; + +/** + * struct xt_mtchk_param - parameters for match extensions' + * checkentry functions + * + * @table: table the rule is tried to be inserted into + * @entryinfo: the family-specific rule data + * (struct ipt_ip, ip6t_ip, ebt_entry) + * @match: struct xt_match through which this function was invoked + * @matchinfo: per-match data + * @hook_mask: via which hooks the new rule is reachable + */ +struct xt_mtchk_param { + const char *table; + const void *entryinfo; + const struct xt_match *match; + void *matchinfo; + unsigned int hook_mask; + u_int8_t family; +}; + +/* Match destructor parameters */ +struct xt_mtdtor_param { + const struct xt_match *match; + void *matchinfo; + u_int8_t family; +}; + +/** + * struct xt_target_param - parameters for target extensions' target functions + * + * @hooknum: hook through which this target was invoked + * @target: struct xt_target through which this function was invoked + * @targinfo: per-target data + * + * Other fields see above. + */ +struct xt_target_param { + const struct net_device *in, *out; + unsigned int hooknum; + const struct xt_target *target; + const void *targinfo; + u_int8_t family; +}; + +/** + * struct xt_tgchk_param - parameters for target extensions' + * checkentry functions + * + * @entryinfo: the family-specific rule data + * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry) + * + * Other fields see above. + */ +struct xt_tgchk_param { + const char *table; + const void *entryinfo; + const struct xt_target *target; + void *targinfo; + unsigned int hook_mask; + u_int8_t family; +}; + +/* Target destructor parameters */ +struct xt_tgdtor_param { + const struct xt_target *target; + void *targinfo; + u_int8_t family; +}; + +struct xt_match +{ + struct list_head list; + + const char name[XT_FUNCTION_MAXNAMELEN-1]; + u_int8_t revision; + + /* Return true or false: return FALSE and set *hotdrop = 1 to + force immediate packet drop. */ + /* Arguments changed since 2.6.9, as this must now handle + non-linear skb, using skb_header_pointer and + skb_ip_make_writable. */ + bool (*match)(const struct sk_buff *skb, + const struct xt_match_param *); + + /* Called when user tries to insert an entry of this type. */ + bool (*checkentry)(const struct xt_mtchk_param *); + + /* Called when entry of this type deleted. */ + void (*destroy)(const struct xt_mtdtor_param *); + + /* Called when userspace align differs from kernel space one */ + void (*compat_from_user)(void *dst, void *src); + int (*compat_to_user)(void __user *dst, void *src); + + /* Set this to THIS_MODULE if you are a module, otherwise NULL */ + struct module *me; + + /* Free to use by each match */ + unsigned long data; + + const char *table; + unsigned int matchsize; + unsigned int compatsize; + unsigned int hooks; + unsigned short proto; + + unsigned short family; +}; + +/* Registration hooks for targets. */ +struct xt_target +{ + struct list_head list; + + const char name[XT_FUNCTION_MAXNAMELEN-1]; + + /* Returns verdict. Argument order changed since 2.6.9, as this + must now handle non-linear skbs, using skb_copy_bits and + skb_ip_make_writable. */ + unsigned int (*target)(struct sk_buff *skb, + const struct xt_target_param *); + + /* Called when user tries to insert an entry of this type: + hook_mask is a bitmask of hooks from which it can be + called. */ + /* Should return true or false. */ + bool (*checkentry)(const struct xt_tgchk_param *); + + /* Called when entry of this type deleted. */ + void (*destroy)(const struct xt_tgdtor_param *); + + /* Called when userspace align differs from kernel space one */ + void (*compat_from_user)(void *dst, void *src); + int (*compat_to_user)(void __user *dst, void *src); + + /* Set this to THIS_MODULE if you are a module, otherwise NULL */ + struct module *me; + + const char *table; + unsigned int targetsize; + unsigned int compatsize; + unsigned int hooks; + unsigned short proto; + + unsigned short family; + u_int8_t revision; +}; + +/* Furniture shopping... */ +struct xt_table +{ + struct list_head list; + + /* What hooks you will enter on */ + unsigned int valid_hooks; + + /* Man behind the curtain... */ + struct xt_table_info *private; + + /* Set this to THIS_MODULE if you are a module, otherwise NULL */ + struct module *me; + + u_int8_t af; /* address/protocol family */ + + /* A unique name... */ + const char name[XT_TABLE_MAXNAMELEN]; +}; + +#include + +/* The table itself */ +struct xt_table_info +{ + /* Size per table */ + unsigned int size; + /* Number of entries: FIXME. --RR */ + unsigned int number; + /* Initial number of entries. Needed for module usage count */ + unsigned int initial_entries; + + /* Entry points and underflows */ + unsigned int hook_entry[NF_INET_NUMHOOKS]; + unsigned int underflow[NF_INET_NUMHOOKS]; + + /* ipt_entry tables: one per CPU */ + /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ + void *entries[1]; +}; + +#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \ + + nr_cpu_ids * sizeof(char *)) +extern int xt_register_target(struct xt_target *target); +extern void xt_unregister_target(struct xt_target *target); +extern int xt_register_targets(struct xt_target *target, unsigned int n); +extern void xt_unregister_targets(struct xt_target *target, unsigned int n); + +extern int xt_register_match(struct xt_match *target); +extern void xt_unregister_match(struct xt_match *target); +extern int xt_register_matches(struct xt_match *match, unsigned int n); +extern void xt_unregister_matches(struct xt_match *match, unsigned int n); + +extern int xt_check_match(struct xt_mtchk_param *, + unsigned int size, u_int8_t proto, bool inv_proto); +extern int xt_check_target(struct xt_tgchk_param *, + unsigned int size, u_int8_t proto, bool inv_proto); + +extern struct xt_table *xt_register_table(struct net *net, + struct xt_table *table, + struct xt_table_info *bootstrap, + struct xt_table_info *newinfo); +extern void *xt_unregister_table(struct xt_table *table); + +extern struct xt_table_info *xt_replace_table(struct xt_table *table, + unsigned int num_counters, + struct xt_table_info *newinfo, + int *error); + +extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision); +extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision); +extern struct xt_target *xt_request_find_target(u8 af, const char *name, + u8 revision); +extern int xt_find_revision(u8 af, const char *name, u8 revision, + int target, int *err); + +extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, + const char *name); +extern void xt_table_unlock(struct xt_table *t); + +extern int xt_proto_init(struct net *net, u_int8_t af); +extern void xt_proto_fini(struct net *net, u_int8_t af); + +extern struct xt_table_info *xt_alloc_table_info(unsigned int size); +extern void xt_free_table_info(struct xt_table_info *info); + +/* + * Per-CPU spinlock associated with per-cpu table entries, and + * with a counter for the "reading" side that allows a recursive + * reader to avoid taking the lock and deadlocking. + * + * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu. + * It needs to ensure that the rules are not being changed while the packet + * is being processed. In some cases, the read lock will be acquired + * twice on the same CPU; this is okay because of the count. + * + * "writing" is used when reading counters. + * During replace any readers that are using the old tables have to complete + * before freeing the old table. This is handled by the write locking + * necessary for reading the counters. + */ +struct xt_info_lock { + spinlock_t lock; + unsigned char readers; +}; +DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); + +/* + * Note: we need to ensure that preemption is disabled before acquiring + * the per-cpu-variable, so we do it as a two step process rather than + * using "spin_lock_bh()". + * + * We _also_ need to disable bottom half processing before updating our + * nesting count, to make sure that the only kind of re-entrancy is this + * code being called by itself: since the count+lock is not an atomic + * operation, we can allow no races. + * + * _Only_ that special combination of being per-cpu and never getting + * re-entered asynchronously means that the count is safe. + */ +static inline void xt_info_rdlock_bh(void) +{ + struct xt_info_lock *lock; + + local_bh_disable(); + lock = &__get_cpu_var(xt_info_locks); + if (likely(!lock->readers++)) + spin_lock(&lock->lock); +} + +static inline void xt_info_rdunlock_bh(void) +{ + struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); + + if (likely(!--lock->readers)) + spin_unlock(&lock->lock); + local_bh_enable(); +} + +/* + * The "writer" side needs to get exclusive access to the lock, + * regardless of readers. This must be called with bottom half + * processing (and thus also preemption) disabled. + */ +static inline void xt_info_wrlock(unsigned int cpu) +{ + spin_lock(&per_cpu(xt_info_locks, cpu).lock); +} + +static inline void xt_info_wrunlock(unsigned int cpu) +{ + spin_unlock(&per_cpu(xt_info_locks, cpu).lock); +} + +/* + * This helper is performance critical and must be inlined + */ +static inline unsigned long ifname_compare_aligned(const char *_a, + const char *_b, + const char *_mask) +{ + const unsigned long *a = (const unsigned long *)_a; + const unsigned long *b = (const unsigned long *)_b; + const unsigned long *mask = (const unsigned long *)_mask; + unsigned long ret; + + ret = (a[0] ^ b[0]) & mask[0]; + if (IFNAMSIZ > sizeof(unsigned long)) + ret |= (a[1] ^ b[1]) & mask[1]; + if (IFNAMSIZ > 2 * sizeof(unsigned long)) + ret |= (a[2] ^ b[2]) & mask[2]; + if (IFNAMSIZ > 3 * sizeof(unsigned long)) + ret |= (a[3] ^ b[3]) & mask[3]; + BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); + return ret; +} + +#ifdef CONFIG_COMPAT +#include + +struct compat_xt_entry_match +{ + union { + struct { + u_int16_t match_size; + char name[XT_FUNCTION_MAXNAMELEN - 1]; + u_int8_t revision; + } user; + struct { + u_int16_t match_size; + compat_uptr_t match; + } kernel; + u_int16_t match_size; + } u; + unsigned char data[0]; +}; + +struct compat_xt_entry_target +{ + union { + struct { + u_int16_t target_size; + char name[XT_FUNCTION_MAXNAMELEN - 1]; + u_int8_t revision; + } user; + struct { + u_int16_t target_size; + compat_uptr_t target; + } kernel; + u_int16_t target_size; + } u; + unsigned char data[0]; +}; + +/* FIXME: this works only on 32 bit tasks + * need to change whole approach in order to calculate align as function of + * current task alignment */ + +struct compat_xt_counters +{ +#if defined(CONFIG_X86_64) || defined(CONFIG_IA64) + u_int32_t cnt[4]; +#else + u_int64_t cnt[2]; +#endif +}; + +struct compat_xt_counters_info +{ + char name[XT_TABLE_MAXNAMELEN]; + compat_uint_t num_counters; + struct compat_xt_counters counters[0]; +}; + +#define COMPAT_XT_ALIGN(s) (((s) + (__alignof__(struct compat_xt_counters)-1)) \ + & ~(__alignof__(struct compat_xt_counters)-1)) + +extern void xt_compat_lock(u_int8_t af); +extern void xt_compat_unlock(u_int8_t af); + +extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta); +extern void xt_compat_flush_offsets(u_int8_t af); +extern short xt_compat_calc_jump(u_int8_t af, unsigned int offset); + +extern int xt_compat_match_offset(const struct xt_match *match); +extern int xt_compat_match_from_user(struct xt_entry_match *m, + void **dstptr, unsigned int *size); +extern int xt_compat_match_to_user(struct xt_entry_match *m, + void __user **dstptr, unsigned int *size); + +extern int xt_compat_target_offset(const struct xt_target *target); +extern void xt_compat_target_from_user(struct xt_entry_target *t, + void **dstptr, unsigned int *size); +extern int xt_compat_target_to_user(struct xt_entry_target *t, + void __user **dstptr, unsigned int *size); + +#endif /* CONFIG_COMPAT */ +#endif /* __KERNEL__ */ #endif /* _X_TABLES_H */ diff --git a/include/linux/netfilter/xt_tcpudp.h b/include/linux/netfilter/xt_tcpudp.h index 78bc65f11..a490a0bc1 100644 --- a/include/linux/netfilter/xt_tcpudp.h +++ b/include/linux/netfilter/xt_tcpudp.h @@ -1,15 +1,17 @@ #ifndef _XT_TCPUDP_H #define _XT_TCPUDP_H +#include + /* TCP matching stuff */ struct xt_tcp { - u_int16_t spts[2]; /* Source port range. */ - u_int16_t dpts[2]; /* Destination port range. */ - u_int8_t option; /* TCP Option iff non-zero*/ - u_int8_t flg_mask; /* TCP flags mask byte */ - u_int8_t flg_cmp; /* TCP flags compare byte */ - u_int8_t invflags; /* Inverse flags */ + __u16 spts[2]; /* Source port range. */ + __u16 dpts[2]; /* Destination port range. */ + __u8 option; /* TCP Option iff non-zero*/ + __u8 flg_mask; /* TCP flags mask byte */ + __u8 flg_cmp; /* TCP flags compare byte */ + __u8 invflags; /* Inverse flags */ }; /* Values for "inv" field in struct ipt_tcp. */ @@ -22,9 +24,9 @@ struct xt_tcp /* UDP matching stuff */ struct xt_udp { - u_int16_t spts[2]; /* Source port range. */ - u_int16_t dpts[2]; /* Destination port range. */ - u_int8_t invflags; /* Inverse flags */ + __u16 spts[2]; /* Source port range. */ + __u16 dpts[2]; /* Destination port range. */ + __u8 invflags; /* Inverse flags */ }; /* Values for "invflags" field in struct ipt_udp. */ diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 4d7ba3e4b..29c7727ff 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -8,6 +8,7 @@ #include /* only for userspace compatibility */ +#ifndef __KERNEL__ /* IP Cache bits. */ /* Src IP address. */ #define NFC_IP_SRC 0x0001 @@ -48,6 +49,7 @@ /* Packets about to hit the wire. */ #define NF_IP_POST_ROUTING 4 #define NF_IP_NUMHOOKS 5 +#endif /* ! __KERNEL__ */ enum nf_ip_hook_priorities { NF_IP_PRI_FIRST = INT_MIN, @@ -71,5 +73,11 @@ enum nf_ip_hook_priorities { /* 2.4 firewalling went 64 through 67. */ #define SO_ORIGINAL_DST 80 +#ifdef __KERNEL__ +extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type); +extern int ip_xfrm_me_harder(struct sk_buff *skb); +extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol); +#endif /*__KERNEL__*/ #endif /*__LINUX_IP_NETFILTER_H*/ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 5593f0558..5ba398e90 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -103,6 +103,8 @@ struct nlmsgerr #define NETLINK_ADD_MEMBERSHIP 1 #define NETLINK_DROP_MEMBERSHIP 2 #define NETLINK_PKTINFO 3 +#define NETLINK_BROADCAST_ERROR 4 +#define NETLINK_NO_ENOBUFS 5 struct nl_pktinfo { @@ -149,5 +151,122 @@ struct nlattr #define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1)) #define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr))) +#ifdef __KERNEL__ + +#include +#include + +static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) +{ + return (struct nlmsghdr *)skb->data; +} + +struct netlink_skb_parms +{ + struct ucred creds; /* Skb credentials */ + __u32 pid; + __u32 dst_group; + kernel_cap_t eff_cap; + __u32 loginuid; /* Login (audit) uid */ + __u32 sessionid; /* Session id (audit) */ + __u32 sid; /* SELinux security id */ +}; + +#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) +#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) + + +extern struct sock *netlink_kernel_create(struct net *net, + int unit,unsigned int groups, + void (*input)(struct sk_buff *skb), + struct mutex *cb_mutex, + struct module *module); +extern void netlink_kernel_release(struct sock *sk); +extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); +extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group); +extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); +extern int netlink_has_listeners(struct sock *sk, unsigned int group); +extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock); +extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid, + __u32 group, gfp_t allocation); +extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code); +extern int netlink_register_notifier(struct notifier_block *nb); +extern int netlink_unregister_notifier(struct notifier_block *nb); + +/* finegrained unicast helpers: */ +struct sock *netlink_getsockbyfilp(struct file *filp); +int netlink_attachskb(struct sock *sk, struct sk_buff *skb, + long *timeo, struct sock *ssk); +void netlink_detachskb(struct sock *sk, struct sk_buff *skb); +int netlink_sendskb(struct sock *sk, struct sk_buff *skb); + +/* + * skb should fit one page. This choice is good for headerless malloc. + * But we should limit to 8K so that userspace does not have to + * use enormous buffer sizes on recvmsg() calls just to avoid + * MSG_TRUNC when PAGE_SIZE is very large. + */ +#if PAGE_SIZE < 8192UL +#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(PAGE_SIZE) +#else +#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(8192UL) +#endif + +#define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN) + + +struct netlink_callback +{ + struct sk_buff *skb; + struct nlmsghdr *nlh; + int (*dump)(struct sk_buff * skb, struct netlink_callback *cb); + int (*done)(struct netlink_callback *cb); + int family; + long args[6]; +}; + +struct netlink_notify +{ + struct net *net; + int pid; + int protocol; +}; + +static __inline__ struct nlmsghdr * +__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) +{ + struct nlmsghdr *nlh; + int size = NLMSG_LENGTH(len); + + nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size)); + nlh->nlmsg_type = type; + nlh->nlmsg_len = size; + nlh->nlmsg_flags = flags; + nlh->nlmsg_pid = pid; + nlh->nlmsg_seq = seq; + if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0) + memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size); + return nlh; +} + +#define NLMSG_NEW(skb, pid, seq, type, len, flags) \ +({ if (unlikely(skb_tailroom(skb) < (int)NLMSG_SPACE(len))) \ + goto nlmsg_failure; \ + __nlmsg_put(skb, pid, seq, type, len, flags); }) + +#define NLMSG_PUT(skb, pid, seq, type, len) \ + NLMSG_NEW(skb, pid, seq, type, len, 0) + +extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, + struct nlmsghdr *nlh, + int (*dump)(struct sk_buff *skb, struct netlink_callback*), + int (*done)(struct netlink_callback*)); + + +#define NL_NONROOT_RECV 0x1 +#define NL_NONROOT_SEND 0x2 +extern void netlink_set_nonroot(int protocol, unsigned flag); + +#endif /* __KERNEL__ */ #endif /* __LINUX_NETLINK_H */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 63d1c697e..ba3254ecf 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -524,6 +524,7 @@ enum #define NDUSEROPT_MAX (__NDUSEROPT_MAX - 1) +#ifndef __KERNEL__ /* RTnetlink multicast groups - backwards compatibility for userspace */ #define RTMGRP_LINK 1 #define RTMGRP_NOTIFY 2 @@ -544,6 +545,7 @@ enum #define RTMGRP_DECnet_ROUTE 0x4000 #define RTMGRP_IPV6_PREFIX 0x20000 +#endif /* RTnetlink multicast groups */ enum rtnetlink_groups { @@ -609,6 +611,169 @@ struct tcamsg /* End of information exported to user level */ +#ifdef __KERNEL__ + +#include + +static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str) +{ + int len = strlen(str) + 1; + return len > rta->rta_len || memcmp(RTA_DATA(rta), str, len); +} + +extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); +extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid); +extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, + u32 group, struct nlmsghdr *nlh, gfp_t flags); +extern void rtnl_set_sk_err(struct net *net, u32 group, int error); +extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics); +extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, + u32 id, u32 ts, u32 tsage, long expires, + u32 error); + +extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data); + +#define RTA_PUT(skb, attrtype, attrlen, data) \ +({ if (unlikely(skb_tailroom(skb) < (int)RTA_SPACE(attrlen))) \ + goto rtattr_failure; \ + __rta_fill(skb, attrtype, attrlen, data); }) + +#define RTA_APPEND(skb, attrlen, data) \ +({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \ + goto rtattr_failure; \ + memcpy(skb_put(skb, attrlen), data, attrlen); }) + +#define RTA_PUT_NOHDR(skb, attrlen, data) \ +({ RTA_APPEND(skb, RTA_ALIGN(attrlen), data); \ + memset(skb_tail_pointer(skb) - (RTA_ALIGN(attrlen) - attrlen), 0, \ + RTA_ALIGN(attrlen) - attrlen); }) + +#define RTA_PUT_U8(skb, attrtype, value) \ +({ u8 _tmp = (value); \ + RTA_PUT(skb, attrtype, sizeof(u8), &_tmp); }) + +#define RTA_PUT_U16(skb, attrtype, value) \ +({ u16 _tmp = (value); \ + RTA_PUT(skb, attrtype, sizeof(u16), &_tmp); }) + +#define RTA_PUT_U32(skb, attrtype, value) \ +({ u32 _tmp = (value); \ + RTA_PUT(skb, attrtype, sizeof(u32), &_tmp); }) + +#define RTA_PUT_U64(skb, attrtype, value) \ +({ u64 _tmp = (value); \ + RTA_PUT(skb, attrtype, sizeof(u64), &_tmp); }) + +#define RTA_PUT_SECS(skb, attrtype, value) \ + RTA_PUT_U64(skb, attrtype, (value) / HZ) + +#define RTA_PUT_MSECS(skb, attrtype, value) \ + RTA_PUT_U64(skb, attrtype, jiffies_to_msecs(value)) + +#define RTA_PUT_STRING(skb, attrtype, value) \ + RTA_PUT(skb, attrtype, strlen(value) + 1, value) + +#define RTA_PUT_FLAG(skb, attrtype) \ + RTA_PUT(skb, attrtype, 0, NULL); + +#define RTA_NEST(skb, type) \ +({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \ + RTA_PUT(skb, type, 0, NULL); \ + __start; }) + +#define RTA_NEST_END(skb, start) \ +({ (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ + (skb)->len; }) + +#define RTA_NEST_COMPAT(skb, type, attrlen, data) \ +({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \ + RTA_PUT(skb, type, attrlen, data); \ + RTA_NEST(skb, type); \ + __start; }) + +#define RTA_NEST_COMPAT_END(skb, start) \ +({ struct rtattr *__nest = (void *)(start) + NLMSG_ALIGN((start)->rta_len); \ + (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ + RTA_NEST_END(skb, __nest); \ + (skb)->len; }) + +#define RTA_NEST_CANCEL(skb, start) \ +({ if (start) \ + skb_trim(skb, (unsigned char *) (start) - (skb)->data); \ + -1; }) + +#define RTA_GET_U8(rta) \ +({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u8)) \ + goto rtattr_failure; \ + *(u8 *) RTA_DATA(rta); }) + +#define RTA_GET_U16(rta) \ +({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u16)) \ + goto rtattr_failure; \ + *(u16 *) RTA_DATA(rta); }) + +#define RTA_GET_U32(rta) \ +({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u32)) \ + goto rtattr_failure; \ + *(u32 *) RTA_DATA(rta); }) + +#define RTA_GET_U64(rta) \ +({ u64 _tmp; \ + if (!rta || RTA_PAYLOAD(rta) < sizeof(u64)) \ + goto rtattr_failure; \ + memcpy(&_tmp, RTA_DATA(rta), sizeof(_tmp)); \ + _tmp; }) + +#define RTA_GET_FLAG(rta) (!!(rta)) + +#define RTA_GET_SECS(rta) ((unsigned long) RTA_GET_U64(rta) * HZ) +#define RTA_GET_MSECS(rta) (msecs_to_jiffies((unsigned long) RTA_GET_U64(rta))) + +static inline struct rtattr * +__rta_reserve(struct sk_buff *skb, int attrtype, int attrlen) +{ + struct rtattr *rta; + int size = RTA_LENGTH(attrlen); + + rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size)); + rta->rta_type = attrtype; + rta->rta_len = size; + memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size); + return rta; +} + +#define __RTA_PUT(skb, attrtype, attrlen) \ +({ if (unlikely(skb_tailroom(skb) < (int)RTA_SPACE(attrlen))) \ + goto rtattr_failure; \ + __rta_reserve(skb, attrtype, attrlen); }) + +extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change); + +/* RTNL is used as a global lock for all changes to network configuration */ +extern void rtnl_lock(void); +extern void rtnl_unlock(void); +extern int rtnl_trylock(void); +extern int rtnl_is_locked(void); + +extern void rtnetlink_init(void); +extern void __rtnl_unlock(void); + +#define ASSERT_RTNL() do { \ + if (unlikely(!rtnl_is_locked())) { \ + printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \ + __FILE__, __LINE__); \ + dump_stack(); \ + } \ +} while(0) + +static inline u32 rtm_get_table(struct rtattr **rta, u8 table) +{ + return RTA_GET_U32(rta[RTA_TABLE-1]); +rtattr_failure: + return table; +} + +#endif /* __KERNEL__ */ #endif /* __LINUX_RTNETLINK_H */ diff --git a/include/linux/socket.h b/include/linux/socket.h index e9fa95f77..3b461dffe 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -22,12 +22,14 @@ struct __kernel_sockaddr_storage { #include /* the SIOCxxx I/O controls */ #include /* iovec support */ #include /* pid_t */ - /* */ +#include /* __user */ -#ifdef CONFIG_PROC_FS +#ifdef __KERNEL__ +# ifdef CONFIG_PROC_FS struct seq_file; extern void socket_seq_show(struct seq_file *seq); -#endif +# endif +#endif /* __KERNEL__ */ typedef unsigned short sa_family_t; @@ -102,10 +104,12 @@ struct cmsghdr { * This mess will go away with glibc */ -#if defined(__GNUC__) +#ifdef __KERNEL__ +#define __KINLINE static inline +#elif defined(__GNUC__) #define __KINLINE static __inline__ #elif defined(__cplusplus) -#define __KINLINE static __inline__ +#define __KINLINE static inline #else #define __KINLINE static #endif @@ -177,6 +181,7 @@ struct ucred { #define AF_ASH 18 /* Ash */ #define AF_ECONET 19 /* Acorn Econet */ #define AF_ATMSVC 20 /* ATM SVCs */ +#define AF_RDS 21 /* RDS sockets */ #define AF_SNA 22 /* Linux SNA Project (nutters!) */ #define AF_IRDA 23 /* IRDA sockets */ #define AF_PPPOX 24 /* PPPoX sockets */ @@ -189,7 +194,8 @@ struct ucred { #define AF_RXRPC 33 /* RxRPC sockets */ #define AF_ISDN 34 /* mISDN sockets */ #define AF_PHONET 35 /* Phonet sockets */ -#define AF_MAX 36 /* For now.. */ +#define AF_IEEE802154 36 /* IEEE802154 sockets */ +#define AF_MAX 37 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -215,6 +221,7 @@ struct ucred { #define PF_ASH AF_ASH #define PF_ECONET AF_ECONET #define PF_ATMSVC AF_ATMSVC +#define PF_RDS AF_RDS #define PF_SNA AF_SNA #define PF_IRDA AF_IRDA #define PF_PPPOX AF_PPPOX @@ -227,6 +234,7 @@ struct ucred { #define PF_RXRPC AF_RXRPC #define PF_ISDN AF_ISDN #define PF_PHONET AF_PHONET +#define PF_IEEE802154 AF_IEEE802154 #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ @@ -296,9 +304,29 @@ struct ucred { #define SOL_PPPOL2TP 273 #define SOL_BLUETOOTH 274 #define SOL_PNPIPE 275 +#define SOL_RDS 276 +#define SOL_IUCV 277 /* IPX options */ #define IPX_TYPE 1 +#ifdef __KERNEL__ +extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); +extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, + int offset, int len); +extern int csum_partial_copy_fromiovecend(unsigned char *kdata, + struct iovec *iov, + int offset, + unsigned int len, __wsum *csump); + +extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); +extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); +extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, + int offset, int len); +extern int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, int __user *ulen); +extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr); +extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); + +#endif #endif /* not kernel and not glibc */ #endif /* _LINUX_SOCKET_H */ diff --git a/include/linux/types.h b/include/linux/types.h index eb6a9bec8..c42724f8c 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -1,11 +1,19 @@ #ifndef _LINUX_TYPES_H #define _LINUX_TYPES_H +#include + +#ifndef __ASSEMBLY__ +#ifdef __KERNEL__ + +#define DECLARE_BITMAP(name,bits) \ + unsigned long name[BITS_TO_LONGS(bits)] + +#endif #include -#include -#ifndef __KERNEL_STRICT_NAMES +#ifdef __KERNEL__ typedef __u32 __kernel_dev_t; @@ -23,8 +31,20 @@ typedef __kernel_timer_t timer_t; typedef __kernel_clockid_t clockid_t; typedef __kernel_mqd_t mqd_t; -typedef __kernel_uid_t uid_t; -typedef __kernel_gid_t gid_t; +typedef _Bool bool; + +typedef __kernel_uid32_t uid_t; +typedef __kernel_gid32_t gid_t; +typedef __kernel_uid16_t uid16_t; +typedef __kernel_gid16_t gid16_t; + +typedef unsigned long uintptr_t; + +#ifdef CONFIG_UID16 +/* This is defined by include/asm-{arch}/posix_types.h */ +typedef __kernel_old_uid_t old_uid_t; +typedef __kernel_old_gid_t old_gid_t; +#endif /* CONFIG_UID16 */ #if defined(__GNUC__) typedef __kernel_loff_t loff_t; @@ -111,7 +131,7 @@ typedef __s64 int64_t; * * blkcnt_t is the type of the inode's block count. */ -#ifdef CONFIG_LBD +#ifdef CONFIG_LBDAF typedef u64 sector_t; typedef u64 blkcnt_t; #else @@ -127,7 +147,7 @@ typedef unsigned long blkcnt_t; #define pgoff_t unsigned long #endif -#endif /* __KERNEL_STRICT_NAMES */ +#endif /* __KERNEL__ */ /* * Below are truly Linux-specific types that should never collide with @@ -155,5 +175,35 @@ typedef __u64 __bitwise __be64; typedef __u16 __bitwise __sum16; typedef __u32 __bitwise __wsum; +#ifdef __KERNEL__ +typedef unsigned __bitwise__ gfp_t; +typedef unsigned __bitwise__ fmode_t; + +#ifdef CONFIG_PHYS_ADDR_T_64BIT +typedef u64 phys_addr_t; +#else +typedef u32 phys_addr_t; +#endif + +typedef phys_addr_t resource_size_t; + +typedef struct { + volatile int counter; +} atomic_t; + +#ifdef CONFIG_64BIT +typedef struct { + volatile long counter; +} atomic64_t; +#endif + +struct ustat { + __kernel_daddr_t f_tfree; + __kernel_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; +#endif /* __KERNEL__ */ +#endif /* __ASSEMBLY__ */ #endif /* _LINUX_TYPES_H */ diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index 63dcf4a1b..2d4ec15ab 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h @@ -58,7 +58,7 @@ struct xfrm_selector __u8 prefixlen_s; __u8 proto; int ifindex; - uid_t user; + __kernel_uid32_t user; }; #define XFRM_INF (~(__u64)0) @@ -450,12 +450,14 @@ struct xfrm_user_mapping { __be16 new_sport; }; +#ifndef __KERNEL__ /* backwards compatibility for userspace */ #define XFRMGRP_ACQUIRE 1 #define XFRMGRP_EXPIRE 2 #define XFRMGRP_SA 4 #define XFRMGRP_POLICY 8 #define XFRMGRP_REPORT 0x20 +#endif enum xfrm_nlgroups { XFRMNLGRP_NONE, -- 2.47.2