]> git.ipfire.org Git - thirdparty/systemd.git/commitdiff
linux: update headers
authorYu Watanabe <watanabe.yu+github@gmail.com>
Sun, 15 Dec 2019 11:25:25 +0000 (20:25 +0900)
committerYu Watanabe <watanabe.yu+github@gmail.com>
Sun, 15 Dec 2019 19:37:50 +0000 (04:37 +0900)
src/basic/linux/btrfs.h
src/basic/linux/btrfs_tree.h
src/basic/linux/can/vxcan.h
src/basic/linux/if.h
src/basic/linux/if_bridge.h
src/basic/linux/if_ether.h
src/basic/linux/if_link.h
src/basic/linux/nexthop.h
src/basic/linux/pkt_sched.h
src/basic/linux/rtnetlink.h
src/basic/linux/wireguard.h

index b960b45d674290fdb32a2ce6ff0a46afcad973d0..4b6ee345ba71d25841d1a808b496f12fdebb6f5f 100644 (file)
@@ -270,6 +270,7 @@ struct btrfs_ioctl_fs_info_args {
 #define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8)
 #define BTRFS_FEATURE_INCOMPAT_NO_HOLES                (1ULL << 9)
 #define BTRFS_FEATURE_INCOMPAT_METADATA_UUID   (1ULL << 10)
+#define BTRFS_FEATURE_INCOMPAT_RAID1C34                (1ULL << 11)
 
 struct btrfs_ioctl_feature_flags {
        __u64 compat_flags;
@@ -665,7 +666,12 @@ struct btrfs_ioctl_get_dev_stats {
        /* out values: */
        __u64 values[BTRFS_DEV_STAT_VALUES_MAX];
 
-       __u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; /* pad to 1k */
+       /*
+        * This pads the struct to 1032 bytes. It was originally meant to pad to
+        * 1024 bytes, but when adding the flags field, the padding calculation
+        * was not adjusted.
+        */
+       __u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX];
 };
 
 #define BTRFS_QUOTA_CTL_ENABLE 1
@@ -826,7 +832,9 @@ enum btrfs_err_code {
        BTRFS_ERROR_DEV_TGT_REPLACE,
        BTRFS_ERROR_DEV_MISSING_NOT_FOUND,
        BTRFS_ERROR_DEV_ONLY_WRITABLE,
-       BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS
+       BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS,
+       BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
+       BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
 };
 
 #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
@@ -917,10 +925,8 @@ enum btrfs_err_code {
 #define BTRFS_IOC_QUOTA_RESCAN_STATUS _IOR(BTRFS_IOCTL_MAGIC, 45, \
                               struct btrfs_ioctl_quota_rescan_args)
 #define BTRFS_IOC_QUOTA_RESCAN_WAIT _IO(BTRFS_IOCTL_MAGIC, 46)
-#define BTRFS_IOC_GET_FSLABEL _IOR(BTRFS_IOCTL_MAGIC, 49, \
-                                  char[BTRFS_LABEL_SIZE])
-#define BTRFS_IOC_SET_FSLABEL _IOW(BTRFS_IOCTL_MAGIC, 50, \
-                                  char[BTRFS_LABEL_SIZE])
+#define BTRFS_IOC_GET_FSLABEL  FS_IOC_GETFSLABEL
+#define BTRFS_IOC_SET_FSLABEL  FS_IOC_SETFSLABEL
 #define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \
                                      struct btrfs_ioctl_get_dev_stats)
 #define BTRFS_IOC_DEV_REPLACE _IOWR(BTRFS_IOCTL_MAGIC, 53, \
index 421239b98db2c79e9a9e3fe57c5013716af43603..8e322e2c7e787e337617de04a61a633647f6b777 100644 (file)
 #define BTRFS_CSUM_SIZE 32
 
 /* csum types */
-#define BTRFS_CSUM_TYPE_CRC32  0
+enum btrfs_csum_type {
+       BTRFS_CSUM_TYPE_CRC32   = 0,
+       BTRFS_CSUM_TYPE_XXHASH  = 1,
+       BTRFS_CSUM_TYPE_SHA256  = 2,
+       BTRFS_CSUM_TYPE_BLAKE2  = 3,
+};
 
 /*
  * flags definitions for directory entry item type
@@ -735,10 +740,12 @@ struct btrfs_balance_item {
        __le64 unused[4];
 } __attribute__ ((__packed__));
 
-#define BTRFS_FILE_EXTENT_INLINE 0
-#define BTRFS_FILE_EXTENT_REG 1
-#define BTRFS_FILE_EXTENT_PREALLOC 2
-#define BTRFS_FILE_EXTENT_TYPES        2
+enum {
+       BTRFS_FILE_EXTENT_INLINE   = 0,
+       BTRFS_FILE_EXTENT_REG      = 1,
+       BTRFS_FILE_EXTENT_PREALLOC = 2,
+       BTRFS_NR_FILE_EXTENT_TYPES = 3,
+};
 
 struct btrfs_file_extent_item {
        /*
@@ -806,11 +813,6 @@ struct btrfs_dev_stats_item {
 
 #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS    0
 #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID     1
-#define BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED     0
-#define BTRFS_DEV_REPLACE_ITEM_STATE_STARTED           1
-#define BTRFS_DEV_REPLACE_ITEM_STATE_SUSPENDED         2
-#define BTRFS_DEV_REPLACE_ITEM_STATE_FINISHED          3
-#define BTRFS_DEV_REPLACE_ITEM_STATE_CANCELED          4
 
 struct btrfs_dev_replace_item {
        /*
@@ -839,6 +841,8 @@ struct btrfs_dev_replace_item {
 #define BTRFS_BLOCK_GROUP_RAID10       (1ULL << 6)
 #define BTRFS_BLOCK_GROUP_RAID5         (1ULL << 7)
 #define BTRFS_BLOCK_GROUP_RAID6         (1ULL << 8)
+#define BTRFS_BLOCK_GROUP_RAID1C3       (1ULL << 9)
+#define BTRFS_BLOCK_GROUP_RAID1C4       (1ULL << 10)
 #define BTRFS_BLOCK_GROUP_RESERVED     (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \
                                         BTRFS_SPACE_INFO_GLOBAL_RSV)
 
@@ -850,6 +854,8 @@ enum btrfs_raid_types {
        BTRFS_RAID_SINGLE,
        BTRFS_RAID_RAID5,
        BTRFS_RAID_RAID6,
+       BTRFS_RAID_RAID1C3,
+       BTRFS_RAID_RAID1C4,
        BTRFS_NR_RAID_TYPES
 };
 
@@ -859,6 +865,8 @@ enum btrfs_raid_types {
 
 #define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 |   \
                                         BTRFS_BLOCK_GROUP_RAID1 |   \
+                                        BTRFS_BLOCK_GROUP_RAID1C3 | \
+                                        BTRFS_BLOCK_GROUP_RAID1C4 | \
                                         BTRFS_BLOCK_GROUP_RAID5 |   \
                                         BTRFS_BLOCK_GROUP_RAID6 |   \
                                         BTRFS_BLOCK_GROUP_DUP |     \
@@ -866,6 +874,10 @@ enum btrfs_raid_types {
 #define BTRFS_BLOCK_GROUP_RAID56_MASK  (BTRFS_BLOCK_GROUP_RAID5 |   \
                                         BTRFS_BLOCK_GROUP_RAID6)
 
+#define BTRFS_BLOCK_GROUP_RAID1_MASK   (BTRFS_BLOCK_GROUP_RAID1 |   \
+                                        BTRFS_BLOCK_GROUP_RAID1C3 | \
+                                        BTRFS_BLOCK_GROUP_RAID1C4)
+
 /*
  * We need a bit for restriper to be able to tell when chunks of type
  * SINGLE are available.  This "extended" profile format is used in
index 066812d118a2cf8fc05807a79549bc78856e71c3..4fa9d8777a07b440cdd2a577bd000f9d40d44574 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
 #ifndef _UAPI_CAN_VXCAN_H
 #define _UAPI_CAN_VXCAN_H
 
index 4c6f030afc14e71048aa73da2b6f99c9388d6d47..a5ae898ff4c249e64efa03c34ff4585822e3fb73 100644 (file)
@@ -32,6 +32,7 @@
 #define        IFNAMSIZ        16
 #endif /* __UAPI_DEF_IF_IFNAMSIZ */
 #define        IFALIASZ        256
+#define        ALTIFNAMSIZ     128
 #include <linux/hdlc/ioctl.h>
 
 /* For glibc compatibility. An empty enum does not compile. */
index 773e476a8e5465d12e662d8efe37bc06f0b51279..1b3c2b643a02e611a8e45dadc46db1ef688a5dd9 100644 (file)
@@ -237,6 +237,7 @@ struct br_mdb_entry {
 #define MDB_PERMANENT 1
        __u8 state;
 #define MDB_FLAGS_OFFLOAD      (1 << 0)
+#define MDB_FLAGS_FAST_LEAVE   (1 << 1)
        __u8 flags;
        __u16 vid;
        struct {
index 3158ba672b72ba24b97a5e7f341389c629202c11..f6ceb2e63d1ebad812ace8f8af4deff6feb8318f 100644 (file)
@@ -91,6 +91,7 @@
 #define ETH_P_802_EX1  0x88B5          /* 802.1 Local Experimental 1.  */
 #define ETH_P_PREAUTH  0x88C7          /* 802.11 Preauthentication */
 #define ETH_P_TIPC     0x88CA          /* TIPC                         */
+#define ETH_P_LLDP     0x88CC          /* Link Layer Discovery Protocol */
 #define ETH_P_MACSEC   0x88E5          /* 802.1ae MACsec */
 #define ETH_P_8021AH   0x88E7          /* 802.1ah Backbone Service Tag */
 #define ETH_P_MVRP     0x88F5          /* 802.1Q MVRP                  */
index 5b225ff63b483145878e977cf0a1be7d1fe53cd9..8aec8769d9442d3eadbeb441cb987d35897c4882 100644 (file)
@@ -167,6 +167,8 @@ enum {
        IFLA_NEW_IFINDEX,
        IFLA_MIN_MTU,
        IFLA_MAX_MTU,
+       IFLA_PROP_LIST,
+       IFLA_ALT_IFNAME, /* Alternative ifname */
        __IFLA_MAX
 };
 
@@ -636,6 +638,7 @@ enum {
        IFLA_BOND_AD_USER_PORT_KEY,
        IFLA_BOND_AD_ACTOR_SYSTEM,
        IFLA_BOND_TLB_DYNAMIC_LB,
+       IFLA_BOND_PEER_NOTIF_DELAY,
        __IFLA_BOND_MAX,
 };
 
@@ -694,6 +697,7 @@ enum {
        IFLA_VF_IB_NODE_GUID,   /* VF Infiniband node GUID */
        IFLA_VF_IB_PORT_GUID,   /* VF Infiniband port GUID */
        IFLA_VF_VLAN_LIST,      /* nested list of vlans, option for QinQ */
+       IFLA_VF_BROADCAST,      /* VF broadcast */
        __IFLA_VF_MAX,
 };
 
@@ -704,6 +708,10 @@ struct ifla_vf_mac {
        __u8 mac[32]; /* MAX_ADDR_LEN */
 };
 
+struct ifla_vf_broadcast {
+       __u8 broadcast[32];
+};
+
 struct ifla_vf_vlan {
        __u32 vf;
        __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
index d51b6e1d08a2d25aa29e2a98990cfcd8d548e4cb..7b61867e98487e4d4240e59b8e9304af7d64d9b1 100644 (file)
@@ -1,56 +1,56 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _LINUX_NEXTHOP_H
-#define _LINUX_NEXTHOP_H
+#ifndef _UAPI_LINUX_NEXTHOP_H
+#define _UAPI_LINUX_NEXTHOP_H
 
 #include <linux/types.h>
 
 struct nhmsg {
-        unsigned char   nh_family;
-        unsigned char   nh_scope;     /* return only */
-        unsigned char   nh_protocol;  /* Routing protocol that installed nh */
-        unsigned char   resvd;
-        unsigned int    nh_flags;     /* RTNH_F flags */
+       unsigned char   nh_family;
+       unsigned char   nh_scope;     /* return only */
+       unsigned char   nh_protocol;  /* Routing protocol that installed nh */
+       unsigned char   resvd;
+       unsigned int    nh_flags;     /* RTNH_F flags */
 };
 
 /* entry in a nexthop group */
 struct nexthop_grp {
-        __u32   id;       /* nexthop id - must exist */
-        __u8    weight;   /* weight of this nexthop */
-        __u8    resvd1;
-        __u16   resvd2;
+       __u32   id;       /* nexthop id - must exist */
+       __u8    weight;   /* weight of this nexthop */
+       __u8    resvd1;
+       __u16   resvd2;
 };
 
 enum {
-      NEXTHOP_GRP_TYPE_MPATH,  /* default type if not specified */
-      __NEXTHOP_GRP_TYPE_MAX,
+       NEXTHOP_GRP_TYPE_MPATH,  /* default type if not specified */
+       __NEXTHOP_GRP_TYPE_MAX,
 };
 
 #define NEXTHOP_GRP_TYPE_MAX (__NEXTHOP_GRP_TYPE_MAX - 1)
 
 enum {
-      NHA_UNSPEC,
-      NHA_ID,           /* u32; id for nexthop. id == 0 means auto-assign */
+       NHA_UNSPEC,
+       NHA_ID,         /* u32; id for nexthop. id == 0 means auto-assign */
 
-      NHA_GROUP,        /* array of nexthop_grp */
-      NHA_GROUP_TYPE,   /* u16 one of NEXTHOP_GRP_TYPE */
-      /* if NHA_GROUP attribute is added, no other attributes can be set */
+       NHA_GROUP,      /* array of nexthop_grp */
+       NHA_GROUP_TYPE, /* u16 one of NEXTHOP_GRP_TYPE */
+       /* if NHA_GROUP attribute is added, no other attributes can be set */
 
-      NHA_BLACKHOLE,    /* flag; nexthop used to blackhole packets */
-      /* if NHA_BLACKHOLE is added, OIF, GATEWAY, ENCAP can not be set */
+       NHA_BLACKHOLE,  /* flag; nexthop used to blackhole packets */
+       /* if NHA_BLACKHOLE is added, OIF, GATEWAY, ENCAP can not be set */
 
-      NHA_OIF,  /* u32; nexthop device */
-      NHA_GATEWAY,      /* be32 (IPv4) or in6_addr (IPv6) gw address */
-      NHA_ENCAP_TYPE, /* u16; lwt encap type */
-      NHA_ENCAP,        /* lwt encap data */
+       NHA_OIF,        /* u32; nexthop device */
+       NHA_GATEWAY,    /* be32 (IPv4) or in6_addr (IPv6) gw address */
+       NHA_ENCAP_TYPE, /* u16; lwt encap type */
+       NHA_ENCAP,      /* lwt encap data */
 
-      /* NHA_OIF can be appended to dump request to return only
-       * nexthops using given device
-       */
-      NHA_GROUPS,       /* flag; only return nexthop groups in dump */
-      NHA_MASTER,       /* u32;  only return nexthops with given master dev */
+       /* NHA_OIF can be appended to dump request to return only
+        * nexthops using given device
+        */
+       NHA_GROUPS,     /* flag; only return nexthop groups in dump */
+       NHA_MASTER,     /* u32;  only return nexthops with given master dev */
 
-      __NHA_MAX,
+       __NHA_MAX,
 };
 
-#define NHA_MAX (__NHA_MAX - 1)
+#define NHA_MAX        (__NHA_MAX - 1)
 #endif
index daf605760c526d7d7562cd53aeb6d3748bb77d46..9f1a728762124c59f366aa80c3e69ce78049aaf4 100644 (file)
    Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
    class, actually, as rule it will be handled with more care than
    filler or even bulk.
-*/
+ */
 
-#define TC_PRIO_BESTEFFORT              0
-#define TC_PRIO_FILLER                  1
-#define TC_PRIO_BULK                    2
-#define TC_PRIO_INTERACTIVE_BULK        4
-#define TC_PRIO_INTERACTIVE             6
-#define TC_PRIO_CONTROL                 7
+#define TC_PRIO_BESTEFFORT             0
+#define TC_PRIO_FILLER                 1
+#define TC_PRIO_BULK                   2
+#define TC_PRIO_INTERACTIVE_BULK       4
+#define TC_PRIO_INTERACTIVE            6
+#define TC_PRIO_CONTROL                        7
 
-#define TC_PRIO_MAX                     15
+#define TC_PRIO_MAX                    15
 
 /* Generic queue statistics, available for all the elements.
    Particular schedulers may have also their private records.
-*/
+ */
 
 struct tc_stats {
-        __u64   bytes;                  /* Number of enqueued bytes */
-        __u32   packets;                /* Number of enqueued packets   */
-        __u32   drops;                  /* Packets dropped because of lack of resources */
-        __u32   overlimits;             /* Number of throttle events when this
-                                         * flow goes out of allocated bandwidth */
-        __u32   bps;                    /* Current flow byte rate */
-        __u32   pps;                    /* Current flow packet rate */
-        __u32   qlen;
-        __u32   backlog;
+       __u64   bytes;                  /* Number of enqueued bytes */
+       __u32   packets;                /* Number of enqueued packets   */
+       __u32   drops;                  /* Packets dropped because of lack of resources */
+       __u32   overlimits;             /* Number of throttle events when this
+                                        * flow goes out of allocated bandwidth */
+       __u32   bps;                    /* Current flow byte rate */
+       __u32   pps;                    /* Current flow packet rate */
+       __u32   qlen;
+       __u32   backlog;
 };
 
 struct tc_estimator {
-        signed char     interval;
-        unsigned char   ewma_log;
+       signed char     interval;
+       unsigned char   ewma_log;
 };
 
 /* "Handles"
    ---------
 
-   All the traffic control objects have 32bit identifiers, or "handles".
+    All the traffic control objects have 32bit identifiers, or "handles".
 
-   They can be considered as opaque numbers from user API viewpoint,
-   but actually they always consist of two fields: major and
-   minor numbers, which are interpreted by kernel specially,
-   that may be used by applications, though not recommended.
+    They can be considered as opaque numbers from user API viewpoint,
+    but actually they always consist of two fields: major and
+    minor numbers, which are interpreted by kernel specially,
+    that may be used by applications, though not recommended.
 
-   F.e. qdisc handles always have minor number equal to zero,
-   classes (or flows) have major equal to parent qdisc major, and
-   minor uniquely identifying class inside qdisc.
+    F.e. qdisc handles always have minor number equal to zero,
+    classes (or flows) have major equal to parent qdisc major, and
+    minor uniquely identifying class inside qdisc.
 
-   Macros to manipulate handles:
-*/
+    Macros to manipulate handles:
+ */
 
 #define TC_H_MAJ_MASK (0xFFFF0000U)
 #define TC_H_MIN_MASK (0x0000FFFFU)
@@ -71,50 +71,50 @@ struct tc_estimator {
 #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
 #define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
 
-#define TC_H_UNSPEC     (0U)
-#define TC_H_ROOT       (0xFFFFFFFFU)
+#define TC_H_UNSPEC    (0U)
+#define TC_H_ROOT      (0xFFFFFFFFU)
 #define TC_H_INGRESS    (0xFFFFFFF1U)
-#define TC_H_CLSACT     TC_H_INGRESS
+#define TC_H_CLSACT    TC_H_INGRESS
 
-#define TC_H_MIN_PRIORITY       0xFFE0U
-#define TC_H_MIN_INGRESS        0xFFF2U
-#define TC_H_MIN_EGRESS         0xFFF3U
+#define TC_H_MIN_PRIORITY      0xFFE0U
+#define TC_H_MIN_INGRESS       0xFFF2U
+#define TC_H_MIN_EGRESS                0xFFF3U
 
 /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
 enum tc_link_layer {
-                    TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
-                    TC_LINKLAYER_ETHERNET,
-                    TC_LINKLAYER_ATM,
+       TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
+       TC_LINKLAYER_ETHERNET,
+       TC_LINKLAYER_ATM,
 };
 #define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
 
 struct tc_ratespec {
-        unsigned char   cell_log;
-        __u8            linklayer; /* lower 4 bits */
-        unsigned short  overhead;
-        short           cell_align;
-        unsigned short  mpu;
-        __u32           rate;
+       unsigned char   cell_log;
+       __u8            linklayer; /* lower 4 bits */
+       unsigned short  overhead;
+       short           cell_align;
+       unsigned short  mpu;
+       __u32           rate;
 };
 
-#define TC_RTAB_SIZE    1024
+#define TC_RTAB_SIZE   1024
 
 struct tc_sizespec {
-        unsigned char   cell_log;
-        unsigned char   size_log;
-        short           cell_align;
-        int             overhead;
-        unsigned int    linklayer;
-        unsigned int    mpu;
-        unsigned int    mtu;
-        unsigned int    tsize;
+       unsigned char   cell_log;
+       unsigned char   size_log;
+       short           cell_align;
+       int             overhead;
+       unsigned int    linklayer;
+       unsigned int    mpu;
+       unsigned int    mtu;
+       unsigned int    tsize;
 };
 
 enum {
-      TCA_STAB_UNSPEC,
-      TCA_STAB_BASE,
-      TCA_STAB_DATA,
-      __TCA_STAB_MAX
+       TCA_STAB_UNSPEC,
+       TCA_STAB_BASE,
+       TCA_STAB_DATA,
+       __TCA_STAB_MAX
 };
 
 #define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
@@ -122,7 +122,7 @@ enum {
 /* FIFO section */
 
 struct tc_fifo_qopt {
-        __u32   limit;  /* Queue length: bytes for bfifo, packets for pfifo */
+       __u32   limit;  /* Queue length: bytes for bfifo, packets for pfifo */
 };
 
 /* SKBPRIO section */
@@ -137,24 +137,24 @@ struct tc_fifo_qopt {
 #define SKBPRIO_MAX_PRIORITY 64
 
 struct tc_skbprio_qopt {
-        __u32   limit;          /* Queue length in packets. */
+       __u32   limit;          /* Queue length in packets. */
 };
 
 /* PRIO section */
 
-#define TCQ_PRIO_BANDS  16
+#define TCQ_PRIO_BANDS 16
 #define TCQ_MIN_PRIO_BANDS 2
 
 struct tc_prio_qopt {
-        int     bands;                  /* Number of bands */
-        __u8    priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
+       int     bands;                  /* Number of bands */
+       __u8    priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
 };
 
 /* MULTIQ section */
 
 struct tc_multiq_qopt {
-        __u16   bands;                  /* Number of bands */
-        __u16   max_bands;              /* Maximum number of queues */
+       __u16   bands;                  /* Number of bands */
+       __u16   max_bands;              /* Maximum number of queues */
 };
 
 /* PLUG section */
@@ -165,40 +165,40 @@ struct tc_multiq_qopt {
 #define TCQ_PLUG_LIMIT                 3
 
 struct tc_plug_qopt {
-        /* TCQ_PLUG_BUFFER: Inset a plug into the queue and
-         *  buffer any incoming packets
-         * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
-         *   to beginning of the next plug.
-         * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
-         *   Stop buffering packets until the next TCQ_PLUG_BUFFER
-         *   command is received (just act as a pass-thru queue).
-         * TCQ_PLUG_LIMIT: Increase/decrease queue size
-         */
-        int             action;
-        __u32           limit;
+       /* TCQ_PLUG_BUFFER: Inset a plug into the queue and
+        *  buffer any incoming packets
+        * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
+        *   to beginning of the next plug.
+        * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
+        *   Stop buffering packets until the next TCQ_PLUG_BUFFER
+        *   command is received (just act as a pass-thru queue).
+        * TCQ_PLUG_LIMIT: Increase/decrease queue size
+        */
+       int             action;
+       __u32           limit;
 };
 
 /* TBF section */
 
 struct tc_tbf_qopt {
-        struct tc_ratespec rate;
-        struct tc_ratespec peakrate;
-        __u32           limit;
-        __u32           buffer;
-        __u32           mtu;
+       struct tc_ratespec rate;
+       struct tc_ratespec peakrate;
+       __u32           limit;
+       __u32           buffer;
+       __u32           mtu;
 };
 
 enum {
-      TCA_TBF_UNSPEC,
-      TCA_TBF_PARMS,
-      TCA_TBF_RTAB,
-      TCA_TBF_PTAB,
-      TCA_TBF_RATE64,
-      TCA_TBF_PRATE64,
-      TCA_TBF_BURST,
-      TCA_TBF_PBURST,
-      TCA_TBF_PAD,
-      __TCA_TBF_MAX,
+       TCA_TBF_UNSPEC,
+       TCA_TBF_PARMS,
+       TCA_TBF_RTAB,
+       TCA_TBF_PTAB,
+       TCA_TBF_RATE64,
+       TCA_TBF_PRATE64,
+       TCA_TBF_BURST,
+       TCA_TBF_PBURST,
+       TCA_TBF_PAD,
+       __TCA_TBF_MAX,
 };
 
 #define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
@@ -211,74 +211,74 @@ enum {
 /* SFQ section */
 
 struct tc_sfq_qopt {
-        unsigned        quantum;        /* Bytes per round allocated to flow */
-        int             perturb_period; /* Period of hash perturbation */
-        __u32           limit;          /* Maximal packets in queue */
-        unsigned        divisor;        /* Hash divisor  */
-        unsigned        flows;          /* Maximal number of flows  */
+       unsigned        quantum;        /* Bytes per round allocated to flow */
+       int             perturb_period; /* Period of hash perturbation */
+       __u32           limit;          /* Maximal packets in queue */
+       unsigned        divisor;        /* Hash divisor  */
+       unsigned        flows;          /* Maximal number of flows  */
 };
 
 struct tc_sfqred_stats {
-        __u32           prob_drop;      /* Early drops, below max threshold */
-        __u32           forced_drop;    /* Early drops, after max threshold */
-        __u32           prob_mark;      /* Marked packets, below max threshold */
-        __u32           forced_mark;    /* Marked packets, after max threshold */
-        __u32           prob_mark_head; /* Marked packets, below max threshold */
-        __u32           forced_mark_head;/* Marked packets, after max threshold */
+       __u32           prob_drop;      /* Early drops, below max threshold */
+       __u32           forced_drop;    /* Early drops, after max threshold */
+       __u32           prob_mark;      /* Marked packets, below max threshold */
+       __u32           forced_mark;    /* Marked packets, after max threshold */
+       __u32           prob_mark_head; /* Marked packets, below max threshold */
+       __u32           forced_mark_head;/* Marked packets, after max threshold */
 };
 
 struct tc_sfq_qopt_v1 {
-        struct tc_sfq_qopt v0;
-        unsigned int    depth;          /* max number of packets per flow */
-        unsigned int    headdrop;
-        /* SFQRED parameters */
-        __u32           limit;          /* HARD maximal flow queue length (bytes) */
-        __u32           qth_min;        /* Min average length threshold (bytes) */
-        __u32           qth_max;        /* Max average length threshold (bytes) */
-        unsigned char   Wlog;           /* log(W)               */
-        unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
-        unsigned char   Scell_log;      /* cell size for idle damping */
-        unsigned char   flags;
-        __u32           max_P;          /* probability, high resolution */
-        /* SFQRED stats */
-        struct tc_sfqred_stats stats;
+       struct tc_sfq_qopt v0;
+       unsigned int    depth;          /* max number of packets per flow */
+       unsigned int    headdrop;
+/* SFQRED parameters */
+       __u32           limit;          /* HARD maximal flow queue length (bytes) */
+       __u32           qth_min;        /* Min average length threshold (bytes) */
+       __u32           qth_max;        /* Max average length threshold (bytes) */
+       unsigned char   Wlog;           /* log(W)               */
+       unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
+       unsigned char   Scell_log;      /* cell size for idle damping */
+       unsigned char   flags;
+       __u32           max_P;          /* probability, high resolution */
+/* SFQRED stats */
+       struct tc_sfqred_stats stats;
 };
 
 
 struct tc_sfq_xstats {
-        __s32           allot;
+       __s32           allot;
 };
 
 /* RED section */
 
 enum {
-      TCA_RED_UNSPEC,
-      TCA_RED_PARMS,
-      TCA_RED_STAB,
-      TCA_RED_MAX_P,
-      __TCA_RED_MAX,
+       TCA_RED_UNSPEC,
+       TCA_RED_PARMS,
+       TCA_RED_STAB,
+       TCA_RED_MAX_P,
+       __TCA_RED_MAX,
 };
 
 #define TCA_RED_MAX (__TCA_RED_MAX - 1)
 
 struct tc_red_qopt {
-        __u32           limit;          /* HARD maximal queue length (bytes)    */
-        __u32           qth_min;        /* Min average length threshold (bytes) */
-        __u32           qth_max;        /* Max average length threshold (bytes) */
-        unsigned char   Wlog;           /* log(W)               */
-        unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
-        unsigned char   Scell_log;      /* cell size for idle damping */
-        unsigned char   flags;
-#define TC_RED_ECN              1
-#define TC_RED_HARDDROP         2
-#define TC_RED_ADAPTATIVE       4
+       __u32           limit;          /* HARD maximal queue length (bytes)    */
+       __u32           qth_min;        /* Min average length threshold (bytes) */
+       __u32           qth_max;        /* Max average length threshold (bytes) */
+       unsigned char   Wlog;           /* log(W)               */
+       unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
+       unsigned char   Scell_log;      /* cell size for idle damping */
+       unsigned char   flags;
+#define TC_RED_ECN             1
+#define TC_RED_HARDDROP                2
+#define TC_RED_ADAPTATIVE      4
 };
 
 struct tc_red_xstats {
-        __u32           early;          /* Early drops */
-        __u32           pdrop;          /* Drops due to queue limits */
-        __u32           other;          /* Drops due to drop() calls */
-        __u32           marked;         /* Marked packets */
+       __u32           early;          /* Early drops */
+       __u32           pdrop;          /* Drops due to queue limits */
+       __u32           other;          /* Drops due to drop() calls */
+       __u32           marked;         /* Marked packets */
 };
 
 /* GRED section */
@@ -286,173 +286,173 @@ struct tc_red_xstats {
 #define MAX_DPs 16
 
 enum {
-      TCA_GRED_UNSPEC,
-      TCA_GRED_PARMS,
-      TCA_GRED_STAB,
-      TCA_GRED_DPS,
-      TCA_GRED_MAX_P,
-      TCA_GRED_LIMIT,
-      TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */
-      __TCA_GRED_MAX,
+       TCA_GRED_UNSPEC,
+       TCA_GRED_PARMS,
+       TCA_GRED_STAB,
+       TCA_GRED_DPS,
+       TCA_GRED_MAX_P,
+       TCA_GRED_LIMIT,
+       TCA_GRED_VQ_LIST,       /* nested TCA_GRED_VQ_ENTRY */
+       __TCA_GRED_MAX,
 };
 
 #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
 
 enum {
-      TCA_GRED_VQ_ENTRY_UNSPEC,
-      TCA_GRED_VQ_ENTRY,        /* nested TCA_GRED_VQ_* */
-      __TCA_GRED_VQ_ENTRY_MAX,
+       TCA_GRED_VQ_ENTRY_UNSPEC,
+       TCA_GRED_VQ_ENTRY,      /* nested TCA_GRED_VQ_* */
+       __TCA_GRED_VQ_ENTRY_MAX,
 };
 #define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
 
 enum {
-      TCA_GRED_VQ_UNSPEC,
-      TCA_GRED_VQ_PAD,
-      TCA_GRED_VQ_DP,                   /* u32 */
-      TCA_GRED_VQ_STAT_BYTES,           /* u64 */
-      TCA_GRED_VQ_STAT_PACKETS, /* u32 */
-      TCA_GRED_VQ_STAT_BACKLOG, /* u32 */
-      TCA_GRED_VQ_STAT_PROB_DROP,       /* u32 */
-      TCA_GRED_VQ_STAT_PROB_MARK,       /* u32 */
-      TCA_GRED_VQ_STAT_FORCED_DROP,     /* u32 */
-      TCA_GRED_VQ_STAT_FORCED_MARK,     /* u32 */
-      TCA_GRED_VQ_STAT_PDROP,           /* u32 */
-      TCA_GRED_VQ_STAT_OTHER,           /* u32 */
-      TCA_GRED_VQ_FLAGS,                /* u32 */
-      __TCA_GRED_VQ_MAX
+       TCA_GRED_VQ_UNSPEC,
+       TCA_GRED_VQ_PAD,
+       TCA_GRED_VQ_DP,                 /* u32 */
+       TCA_GRED_VQ_STAT_BYTES,         /* u64 */
+       TCA_GRED_VQ_STAT_PACKETS,       /* u32 */
+       TCA_GRED_VQ_STAT_BACKLOG,       /* u32 */
+       TCA_GRED_VQ_STAT_PROB_DROP,     /* u32 */
+       TCA_GRED_VQ_STAT_PROB_MARK,     /* u32 */
+       TCA_GRED_VQ_STAT_FORCED_DROP,   /* u32 */
+       TCA_GRED_VQ_STAT_FORCED_MARK,   /* u32 */
+       TCA_GRED_VQ_STAT_PDROP,         /* u32 */
+       TCA_GRED_VQ_STAT_OTHER,         /* u32 */
+       TCA_GRED_VQ_FLAGS,              /* u32 */
+       __TCA_GRED_VQ_MAX
 };
 
 #define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
 
 struct tc_gred_qopt {
-        __u32           limit;        /* HARD maximal queue length (bytes)    */
-        __u32           qth_min;      /* Min average length threshold (bytes) */
-        __u32           qth_max;      /* Max average length threshold (bytes) */
-        __u32           DP;           /* up to 2^32 DPs */
-        __u32           backlog;
-        __u32           qave;
-        __u32           forced;
-        __u32           early;
-        __u32           other;
-        __u32           pdrop;
-        __u8            Wlog;         /* log(W)               */
-        __u8            Plog;         /* log(P_max/(qth_max-qth_min)) */
-        __u8            Scell_log;    /* cell size for idle damping */
-        __u8            prio;         /* prio of this VQ */
-        __u32           packets;
-        __u32           bytesin;
+       __u32           limit;        /* HARD maximal queue length (bytes)    */
+       __u32           qth_min;      /* Min average length threshold (bytes) */
+       __u32           qth_max;      /* Max average length threshold (bytes) */
+       __u32           DP;           /* up to 2^32 DPs */
+       __u32           backlog;
+       __u32           qave;
+       __u32           forced;
+       __u32           early;
+       __u32           other;
+       __u32           pdrop;
+       __u8            Wlog;         /* log(W)               */
+       __u8            Plog;         /* log(P_max/(qth_max-qth_min)) */
+       __u8            Scell_log;    /* cell size for idle damping */
+       __u8            prio;         /* prio of this VQ */
+       __u32           packets;
+       __u32           bytesin;
 };
 
 /* gred setup */
 struct tc_gred_sopt {
-        __u32           DPs;
-        __u32           def_DP;
-        __u8            grio;
-        __u8            flags;
-        __u16           pad1;
+       __u32           DPs;
+       __u32           def_DP;
+       __u8            grio;
+       __u8            flags;
+       __u16           pad1;
 };
 
 /* CHOKe section */
 
 enum {
-      TCA_CHOKE_UNSPEC,
-      TCA_CHOKE_PARMS,
-      TCA_CHOKE_STAB,
-      TCA_CHOKE_MAX_P,
-      __TCA_CHOKE_MAX,
+       TCA_CHOKE_UNSPEC,
+       TCA_CHOKE_PARMS,
+       TCA_CHOKE_STAB,
+       TCA_CHOKE_MAX_P,
+       __TCA_CHOKE_MAX,
 };
 
 #define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
 
 struct tc_choke_qopt {
-        __u32           limit;          /* Hard queue length (packets)  */
-        __u32           qth_min;        /* Min average threshold (packets) */
-        __u32           qth_max;        /* Max average threshold (packets) */
-        unsigned char   Wlog;           /* log(W)               */
-        unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
-        unsigned char   Scell_log;      /* cell size for idle damping */
-        unsigned char   flags;          /* see RED flags */
+       __u32           limit;          /* Hard queue length (packets)  */
+       __u32           qth_min;        /* Min average threshold (packets) */
+       __u32           qth_max;        /* Max average threshold (packets) */
+       unsigned char   Wlog;           /* log(W)               */
+       unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
+       unsigned char   Scell_log;      /* cell size for idle damping */
+       unsigned char   flags;          /* see RED flags */
 };
 
 struct tc_choke_xstats {
-        __u32           early;          /* Early drops */
-        __u32           pdrop;          /* Drops due to queue limits */
-        __u32           other;          /* Drops due to drop() calls */
-        __u32           marked;         /* Marked packets */
-        __u32           matched;        /* Drops due to flow match */
+       __u32           early;          /* Early drops */
+       __u32           pdrop;          /* Drops due to queue limits */
+       __u32           other;          /* Drops due to drop() calls */
+       __u32           marked;         /* Marked packets */
+       __u32           matched;        /* Drops due to flow match */
 };
 
 /* HTB section */
-#define TC_HTB_NUMPRIO          8
-#define TC_HTB_MAXDEPTH         8
-#define TC_HTB_PROTOVER         3 /* the same as HTB and TC's major */
+#define TC_HTB_NUMPRIO         8
+#define TC_HTB_MAXDEPTH                8
+#define TC_HTB_PROTOVER                3 /* the same as HTB and TC's major */
 
 struct tc_htb_opt {
-        struct tc_ratespec      rate;
-        struct tc_ratespec      ceil;
-        __u32   buffer;
-        __u32   cbuffer;
-        __u32   quantum;
-        __u32   level;          /* out only */
-        __u32   prio;
+       struct tc_ratespec      rate;
+       struct tc_ratespec      ceil;
+       __u32   buffer;
+       __u32   cbuffer;
+       __u32   quantum;
+       __u32   level;          /* out only */
+       __u32   prio;
 };
 struct tc_htb_glob {
-        __u32 version;          /* to match HTB/TC */
-        __u32 rate2quantum;     /* bps->quantum divisor */
-        __u32 defcls;           /* default class number */
-        __u32 debug;            /* debug flags */
+       __u32 version;          /* to match HTB/TC */
+       __u32 rate2quantum;     /* bps->quantum divisor */
+       __u32 defcls;           /* default class number */
+       __u32 debug;            /* debug flags */
 
-        /* stats */
-        __u32 direct_pkts; /* count of non shaped packets */
+       /* stats */
+       __u32 direct_pkts; /* count of non shaped packets */
 };
 enum {
-      TCA_HTB_UNSPEC,
-      TCA_HTB_PARMS,
-      TCA_HTB_INIT,
-      TCA_HTB_CTAB,
-      TCA_HTB_RTAB,
-      TCA_HTB_DIRECT_QLEN,
-      TCA_HTB_RATE64,
-      TCA_HTB_CEIL64,
-      TCA_HTB_PAD,
-      __TCA_HTB_MAX,
+       TCA_HTB_UNSPEC,
+       TCA_HTB_PARMS,
+       TCA_HTB_INIT,
+       TCA_HTB_CTAB,
+       TCA_HTB_RTAB,
+       TCA_HTB_DIRECT_QLEN,
+       TCA_HTB_RATE64,
+       TCA_HTB_CEIL64,
+       TCA_HTB_PAD,
+       __TCA_HTB_MAX,
 };
 
 #define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
 
 struct tc_htb_xstats {
-        __u32 lends;
-        __u32 borrows;
-        __u32 giants;   /* unused since 'Make HTB scheduler work with TSO.' */
-        __s32 tokens;
-        __s32 ctokens;
+       __u32 lends;
+       __u32 borrows;
+       __u32 giants;   /* unused since 'Make HTB scheduler work with TSO.' */
+       __s32 tokens;
+       __s32 ctokens;
 };
 
 /* HFSC section */
 
 struct tc_hfsc_qopt {
-        __u16   defcls;         /* default class */
+       __u16   defcls;         /* default class */
 };
 
 struct tc_service_curve {
-        __u32   m1;             /* slope of the first segment in bps */
-        __u32   d;              /* x-projection of the first segment in us */
-        __u32   m2;             /* slope of the second segment in bps */
+       __u32   m1;             /* slope of the first segment in bps */
+       __u32   d;              /* x-projection of the first segment in us */
+       __u32   m2;             /* slope of the second segment in bps */
 };
 
 struct tc_hfsc_stats {
-        __u64   work;           /* total work done */
-        __u64   rtwork;         /* work done by real-time criteria */
-        __u32   period;         /* current period */
-        __u32   level;          /* class level in hierarchy */
+       __u64   work;           /* total work done */
+       __u64   rtwork;         /* work done by real-time criteria */
+       __u32   period;         /* current period */
+       __u32   level;          /* class level in hierarchy */
 };
 
 enum {
-      TCA_HFSC_UNSPEC,
-      TCA_HFSC_RSC,
-      TCA_HFSC_FSC,
-      TCA_HFSC_USC,
-      __TCA_HFSC_MAX,
+       TCA_HFSC_UNSPEC,
+       TCA_HFSC_RSC,
+       TCA_HFSC_FSC,
+       TCA_HFSC_USC,
+       __TCA_HFSC_MAX,
 };
 
 #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
@@ -460,93 +460,93 @@ enum {
 
 /* CBQ section */
 
-#define TC_CBQ_MAXPRIO          8
-#define TC_CBQ_MAXLEVEL         8
-#define TC_CBQ_DEF_EWMA         5
+#define TC_CBQ_MAXPRIO         8
+#define TC_CBQ_MAXLEVEL                8
+#define TC_CBQ_DEF_EWMA                5
 
 struct tc_cbq_lssopt {
-        unsigned char   change;
-        unsigned char   flags;
-#define TCF_CBQ_LSS_BOUNDED     1
-#define TCF_CBQ_LSS_ISOLATED    2
-        unsigned char   ewma_log;
-        unsigned char   level;
-#define TCF_CBQ_LSS_FLAGS       1
-#define TCF_CBQ_LSS_EWMA        2
-#define TCF_CBQ_LSS_MAXIDLE     4
-#define TCF_CBQ_LSS_MINIDLE     8
-#define TCF_CBQ_LSS_OFFTIME     0x10
-#define TCF_CBQ_LSS_AVPKT       0x20
-        __u32           maxidle;
-        __u32           minidle;
-        __u32           offtime;
-        __u32           avpkt;
+       unsigned char   change;
+       unsigned char   flags;
+#define TCF_CBQ_LSS_BOUNDED    1
+#define TCF_CBQ_LSS_ISOLATED   2
+       unsigned char   ewma_log;
+       unsigned char   level;
+#define TCF_CBQ_LSS_FLAGS      1
+#define TCF_CBQ_LSS_EWMA       2
+#define TCF_CBQ_LSS_MAXIDLE    4
+#define TCF_CBQ_LSS_MINIDLE    8
+#define TCF_CBQ_LSS_OFFTIME    0x10
+#define TCF_CBQ_LSS_AVPKT      0x20
+       __u32           maxidle;
+       __u32           minidle;
+       __u32           offtime;
+       __u32           avpkt;
 };
 
 struct tc_cbq_wrropt {
-        unsigned char   flags;
-        unsigned char   priority;
-        unsigned char   cpriority;
-        unsigned char   __reserved;
-        __u32           allot;
-        __u32           weight;
+       unsigned char   flags;
+       unsigned char   priority;
+       unsigned char   cpriority;
+       unsigned char   __reserved;
+       __u32           allot;
+       __u32           weight;
 };
 
 struct tc_cbq_ovl {
-        unsigned char   strategy;
-#define TC_CBQ_OVL_CLASSIC      0
-#define TC_CBQ_OVL_DELAY        1
-#define TC_CBQ_OVL_LOWPRIO      2
-#define TC_CBQ_OVL_DROP         3
-#define TC_CBQ_OVL_RCLASSIC     4
-        unsigned char   priority2;
-        __u16           pad;
-        __u32           penalty;
+       unsigned char   strategy;
+#define        TC_CBQ_OVL_CLASSIC      0
+#define        TC_CBQ_OVL_DELAY        1
+#define        TC_CBQ_OVL_LOWPRIO      2
+#define        TC_CBQ_OVL_DROP         3
+#define        TC_CBQ_OVL_RCLASSIC     4
+       unsigned char   priority2;
+       __u16           pad;
+       __u32           penalty;
 };
 
 struct tc_cbq_police {
-        unsigned char   police;
-        unsigned char   __res1;
-        unsigned short  __res2;
+       unsigned char   police;
+       unsigned char   __res1;
+       unsigned short  __res2;
 };
 
 struct tc_cbq_fopt {
-        __u32           split;
-        __u32           defmap;
-        __u32           defchange;
+       __u32           split;
+       __u32           defmap;
+       __u32           defchange;
 };
 
 struct tc_cbq_xstats {
-        __u32           borrows;
-        __u32           overactions;
-        __s32           avgidle;
-        __s32           undertime;
+       __u32           borrows;
+       __u32           overactions;
+       __s32           avgidle;
+       __s32           undertime;
 };
 
 enum {
-      TCA_CBQ_UNSPEC,
-      TCA_CBQ_LSSOPT,
-      TCA_CBQ_WRROPT,
-      TCA_CBQ_FOPT,
-      TCA_CBQ_OVL_STRATEGY,
-      TCA_CBQ_RATE,
-      TCA_CBQ_RTAB,
-      TCA_CBQ_POLICE,
-      __TCA_CBQ_MAX,
+       TCA_CBQ_UNSPEC,
+       TCA_CBQ_LSSOPT,
+       TCA_CBQ_WRROPT,
+       TCA_CBQ_FOPT,
+       TCA_CBQ_OVL_STRATEGY,
+       TCA_CBQ_RATE,
+       TCA_CBQ_RTAB,
+       TCA_CBQ_POLICE,
+       __TCA_CBQ_MAX,
 };
 
-#define TCA_CBQ_MAX     (__TCA_CBQ_MAX - 1)
+#define TCA_CBQ_MAX    (__TCA_CBQ_MAX - 1)
 
 /* dsmark section */
 
 enum {
-      TCA_DSMARK_UNSPEC,
-      TCA_DSMARK_INDICES,
-      TCA_DSMARK_DEFAULT_INDEX,
-      TCA_DSMARK_SET_TC_INDEX,
-      TCA_DSMARK_MASK,
-      TCA_DSMARK_VALUE,
-      __TCA_DSMARK_MAX,
+       TCA_DSMARK_UNSPEC,
+       TCA_DSMARK_INDICES,
+       TCA_DSMARK_DEFAULT_INDEX,
+       TCA_DSMARK_SET_TC_INDEX,
+       TCA_DSMARK_MASK,
+       TCA_DSMARK_VALUE,
+       __TCA_DSMARK_MAX,
 };
 
 #define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
@@ -554,121 +554,121 @@ enum {
 /* ATM  section */
 
 enum {
-      TCA_ATM_UNSPEC,
-      TCA_ATM_FD,               /* file/socket descriptor */
-      TCA_ATM_PTR,              /* pointer to descriptor - later */
-      TCA_ATM_HDR,              /* LL header */
-      TCA_ATM_EXCESS,           /* excess traffic class (0 for CLP)  */
-      TCA_ATM_ADDR,             /* PVC address (for output only) */
-      TCA_ATM_STATE,            /* VC state (ATM_VS_*; for output only) */
-      __TCA_ATM_MAX,
+       TCA_ATM_UNSPEC,
+       TCA_ATM_FD,             /* file/socket descriptor */
+       TCA_ATM_PTR,            /* pointer to descriptor - later */
+       TCA_ATM_HDR,            /* LL header */
+       TCA_ATM_EXCESS,         /* excess traffic class (0 for CLP)  */
+       TCA_ATM_ADDR,           /* PVC address (for output only) */
+       TCA_ATM_STATE,          /* VC state (ATM_VS_*; for output only) */
+       __TCA_ATM_MAX,
 };
 
-#define TCA_ATM_MAX     (__TCA_ATM_MAX - 1)
+#define TCA_ATM_MAX    (__TCA_ATM_MAX - 1)
 
 /* Network emulator */
 
 enum {
-      TCA_NETEM_UNSPEC,
-      TCA_NETEM_CORR,
-      TCA_NETEM_DELAY_DIST,
-      TCA_NETEM_REORDER,
-      TCA_NETEM_CORRUPT,
-      TCA_NETEM_LOSS,
-      TCA_NETEM_RATE,
-      TCA_NETEM_ECN,
-      TCA_NETEM_RATE64,
-      TCA_NETEM_PAD,
-      TCA_NETEM_LATENCY64,
-      TCA_NETEM_JITTER64,
-      TCA_NETEM_SLOT,
-      TCA_NETEM_SLOT_DIST,
-      __TCA_NETEM_MAX,
+       TCA_NETEM_UNSPEC,
+       TCA_NETEM_CORR,
+       TCA_NETEM_DELAY_DIST,
+       TCA_NETEM_REORDER,
+       TCA_NETEM_CORRUPT,
+       TCA_NETEM_LOSS,
+       TCA_NETEM_RATE,
+       TCA_NETEM_ECN,
+       TCA_NETEM_RATE64,
+       TCA_NETEM_PAD,
+       TCA_NETEM_LATENCY64,
+       TCA_NETEM_JITTER64,
+       TCA_NETEM_SLOT,
+       TCA_NETEM_SLOT_DIST,
+       __TCA_NETEM_MAX,
 };
 
 #define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
 
 struct tc_netem_qopt {
-        __u32   latency;        /* added delay (us) */
-        __u32   limit;          /* fifo limit (packets) */
-        __u32   loss;           /* random packet loss (0=none ~0=100%) */
-        __u32   gap;            /* re-ordering gap (0 for none) */
-        __u32   duplicate;      /* random packet dup  (0=none ~0=100%) */
-        __u32   jitter;         /* random jitter in latency (us) */
+       __u32   latency;        /* added delay (us) */
+       __u32   limit;          /* fifo limit (packets) */
+       __u32   loss;           /* random packet loss (0=none ~0=100%) */
+       __u32   gap;            /* re-ordering gap (0 for none) */
+       __u32   duplicate;      /* random packet dup  (0=none ~0=100%) */
+       __u32   jitter;         /* random jitter in latency (us) */
 };
 
 struct tc_netem_corr {
-        __u32   delay_corr;     /* delay correlation */
-        __u32   loss_corr;      /* packet loss correlation */
-        __u32   dup_corr;       /* duplicate correlation  */
+       __u32   delay_corr;     /* delay correlation */
+       __u32   loss_corr;      /* packet loss correlation */
+       __u32   dup_corr;       /* duplicate correlation  */
 };
 
 struct tc_netem_reorder {
-        __u32   probability;
-        __u32   correlation;
+       __u32   probability;
+       __u32   correlation;
 };
 
 struct tc_netem_corrupt {
-        __u32   probability;
-        __u32   correlation;
+       __u32   probability;
+       __u32   correlation;
 };
 
 struct tc_netem_rate {
-        __u32   rate;   /* byte/s */
-        __s32   packet_overhead;
-        __u32   cell_size;
-        __s32   cell_overhead;
+       __u32   rate;   /* byte/s */
+       __s32   packet_overhead;
+       __u32   cell_size;
+       __s32   cell_overhead;
 };
 
 struct tc_netem_slot {
-        __s64   min_delay; /* nsec */
-        __s64   max_delay;
-        __s32   max_packets;
-        __s32   max_bytes;
-        __s64   dist_delay; /* nsec */
-        __s64   dist_jitter; /* nsec */
+       __s64   min_delay; /* nsec */
+       __s64   max_delay;
+       __s32   max_packets;
+       __s32   max_bytes;
+       __s64   dist_delay; /* nsec */
+       __s64   dist_jitter; /* nsec */
 };
 
 enum {
-      NETEM_LOSS_UNSPEC,
-      NETEM_LOSS_GI,            /* General Intuitive - 4 state model */
-      NETEM_LOSS_GE,            /* Gilbert Elliot models */
-      __NETEM_LOSS_MAX
+       NETEM_LOSS_UNSPEC,
+       NETEM_LOSS_GI,          /* General Intuitive - 4 state model */
+       NETEM_LOSS_GE,          /* Gilbert Elliot models */
+       __NETEM_LOSS_MAX
 };
 #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
 
 /* State transition probabilities for 4 state model */
 struct tc_netem_gimodel {
-        __u32   p13;
-        __u32   p31;
-        __u32   p32;
-        __u32   p14;
-        __u32   p23;
+       __u32   p13;
+       __u32   p31;
+       __u32   p32;
+       __u32   p14;
+       __u32   p23;
 };
 
 /* Gilbert-Elliot models */
 struct tc_netem_gemodel {
-        __u32 p;
-        __u32 r;
-        __u32 h;
-        __u32 k1;
+       __u32 p;
+       __u32 r;
+       __u32 h;
+       __u32 k1;
 };
 
-#define NETEM_DIST_SCALE        8192
-#define NETEM_DIST_MAX          16384
+#define NETEM_DIST_SCALE       8192
+#define NETEM_DIST_MAX         16384
 
 /* DRR */
 
 enum {
-      TCA_DRR_UNSPEC,
-      TCA_DRR_QUANTUM,
-      __TCA_DRR_MAX
+       TCA_DRR_UNSPEC,
+       TCA_DRR_QUANTUM,
+       __TCA_DRR_MAX
 };
 
-#define TCA_DRR_MAX     (__TCA_DRR_MAX - 1)
+#define TCA_DRR_MAX    (__TCA_DRR_MAX - 1)
 
 struct tc_drr_stats {
-        __u32   deficit;
+       __u32   deficit;
 };
 
 /* MQPRIO */
@@ -676,49 +676,49 @@ struct tc_drr_stats {
 #define TC_QOPT_MAX_QUEUE 16
 
 enum {
-      TC_MQPRIO_HW_OFFLOAD_NONE,        /* no offload requested */
-      TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */
-      __TC_MQPRIO_HW_OFFLOAD_MAX
+       TC_MQPRIO_HW_OFFLOAD_NONE,      /* no offload requested */
+       TC_MQPRIO_HW_OFFLOAD_TCS,       /* offload TCs, no queue counts */
+       __TC_MQPRIO_HW_OFFLOAD_MAX
 };
 
 #define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
 
 enum {
-      TC_MQPRIO_MODE_DCB,
-      TC_MQPRIO_MODE_CHANNEL,
-      __TC_MQPRIO_MODE_MAX
+       TC_MQPRIO_MODE_DCB,
+       TC_MQPRIO_MODE_CHANNEL,
+       __TC_MQPRIO_MODE_MAX
 };
 
 #define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
 
 enum {
-      TC_MQPRIO_SHAPER_DCB,
-      TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */
-      __TC_MQPRIO_SHAPER_MAX
+       TC_MQPRIO_SHAPER_DCB,
+       TC_MQPRIO_SHAPER_BW_RATE,       /* Add new shapers below */
+       __TC_MQPRIO_SHAPER_MAX
 };
 
 #define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
 
 struct tc_mqprio_qopt {
-        __u8    num_tc;
-        __u8    prio_tc_map[TC_QOPT_BITMASK + 1];
-        __u8    hw;
-        __u16   count[TC_QOPT_MAX_QUEUE];
-        __u16   offset[TC_QOPT_MAX_QUEUE];
+       __u8    num_tc;
+       __u8    prio_tc_map[TC_QOPT_BITMASK + 1];
+       __u8    hw;
+       __u16   count[TC_QOPT_MAX_QUEUE];
+       __u16   offset[TC_QOPT_MAX_QUEUE];
 };
 
-#define TC_MQPRIO_F_MODE                0x1
-#define TC_MQPRIO_F_SHAPER              0x2
-#define TC_MQPRIO_F_MIN_RATE            0x4
-#define TC_MQPRIO_F_MAX_RATE            0x8
+#define TC_MQPRIO_F_MODE               0x1
+#define TC_MQPRIO_F_SHAPER             0x2
+#define TC_MQPRIO_F_MIN_RATE           0x4
+#define TC_MQPRIO_F_MAX_RATE           0x8
 
 enum {
-      TCA_MQPRIO_UNSPEC,
-      TCA_MQPRIO_MODE,
-      TCA_MQPRIO_SHAPER,
-      TCA_MQPRIO_MIN_RATE64,
-      TCA_MQPRIO_MAX_RATE64,
-      __TCA_MQPRIO_MAX,
+       TCA_MQPRIO_UNSPEC,
+       TCA_MQPRIO_MODE,
+       TCA_MQPRIO_SHAPER,
+       TCA_MQPRIO_MIN_RATE64,
+       TCA_MQPRIO_MAX_RATE64,
+       __TCA_MQPRIO_MAX,
 };
 
 #define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
@@ -726,9 +726,9 @@ enum {
 /* SFB */
 
 enum {
-      TCA_SFB_UNSPEC,
-      TCA_SFB_PARMS,
-      __TCA_SFB_MAX,
+       TCA_SFB_UNSPEC,
+       TCA_SFB_PARMS,
+       __TCA_SFB_MAX,
 };
 
 #define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
@@ -737,248 +737,254 @@ enum {
  * Note: increment, decrement are Q0.16 fixed-point values.
  */
 struct tc_sfb_qopt {
-        __u32 rehash_interval;  /* delay between hash move, in ms */
-        __u32 warmup_time;      /* double buffering warmup time in ms (warmup_time < rehash_interval) */
-        __u32 max;              /* max len of qlen_min */
-        __u32 bin_size;         /* maximum queue length per bin */
-        __u32 increment;        /* probability increment, (d1 in Blue) */
-        __u32 decrement;        /* probability decrement, (d2 in Blue) */
-        __u32 limit;            /* max SFB queue length */
-        __u32 penalty_rate;     /* inelastic flows are rate limited to 'rate' pps */
-        __u32 penalty_burst;
+       __u32 rehash_interval;  /* delay between hash move, in ms */
+       __u32 warmup_time;      /* double buffering warmup time in ms (warmup_time < rehash_interval) */
+       __u32 max;              /* max len of qlen_min */
+       __u32 bin_size;         /* maximum queue length per bin */
+       __u32 increment;        /* probability increment, (d1 in Blue) */
+       __u32 decrement;        /* probability decrement, (d2 in Blue) */
+       __u32 limit;            /* max SFB queue length */
+       __u32 penalty_rate;     /* inelastic flows are rate limited to 'rate' pps */
+       __u32 penalty_burst;
 };
 
 struct tc_sfb_xstats {
-        __u32 earlydrop;
-        __u32 penaltydrop;
-        __u32 bucketdrop;
-        __u32 queuedrop;
-        __u32 childdrop; /* drops in child qdisc */
-        __u32 marked;
-        __u32 maxqlen;
-        __u32 maxprob;
-        __u32 avgprob;
+       __u32 earlydrop;
+       __u32 penaltydrop;
+       __u32 bucketdrop;
+       __u32 queuedrop;
+       __u32 childdrop; /* drops in child qdisc */
+       __u32 marked;
+       __u32 maxqlen;
+       __u32 maxprob;
+       __u32 avgprob;
 };
 
 #define SFB_MAX_PROB 0xFFFF
 
 /* QFQ */
 enum {
-      TCA_QFQ_UNSPEC,
-      TCA_QFQ_WEIGHT,
-      TCA_QFQ_LMAX,
-      __TCA_QFQ_MAX
+       TCA_QFQ_UNSPEC,
+       TCA_QFQ_WEIGHT,
+       TCA_QFQ_LMAX,
+       __TCA_QFQ_MAX
 };
 
-#define TCA_QFQ_MAX     (__TCA_QFQ_MAX - 1)
+#define TCA_QFQ_MAX    (__TCA_QFQ_MAX - 1)
 
 struct tc_qfq_stats {
-        __u32 weight;
-        __u32 lmax;
+       __u32 weight;
+       __u32 lmax;
 };
 
 /* CODEL */
 
 enum {
-      TCA_CODEL_UNSPEC,
-      TCA_CODEL_TARGET,
-      TCA_CODEL_LIMIT,
-      TCA_CODEL_INTERVAL,
-      TCA_CODEL_ECN,
-      TCA_CODEL_CE_THRESHOLD,
-      __TCA_CODEL_MAX
+       TCA_CODEL_UNSPEC,
+       TCA_CODEL_TARGET,
+       TCA_CODEL_LIMIT,
+       TCA_CODEL_INTERVAL,
+       TCA_CODEL_ECN,
+       TCA_CODEL_CE_THRESHOLD,
+       __TCA_CODEL_MAX
 };
 
-#define TCA_CODEL_MAX   (__TCA_CODEL_MAX - 1)
+#define TCA_CODEL_MAX  (__TCA_CODEL_MAX - 1)
 
 struct tc_codel_xstats {
-        __u32   maxpacket; /* largest packet we've seen so far */
-        __u32   count;     /* how many drops we've done since the last time we
-                            * entered dropping state
-                            */
-        __u32   lastcount; /* count at entry to dropping state */
-        __u32   ldelay;    /* in-queue delay seen by most recently dequeued packet */
-        __s32   drop_next; /* time to drop next packet */
-        __u32   drop_overlimit; /* number of time max qdisc packet limit was hit */
-        __u32   ecn_mark;  /* number of packets we ECN marked instead of dropped */
-        __u32   dropping;  /* are we in dropping state ? */
-        __u32   ce_mark;   /* number of CE marked packets because of ce_threshold */
+       __u32   maxpacket; /* largest packet we've seen so far */
+       __u32   count;     /* how many drops we've done since the last time we
+                           * entered dropping state
+                           */
+       __u32   lastcount; /* count at entry to dropping state */
+       __u32   ldelay;    /* in-queue delay seen by most recently dequeued packet */
+       __s32   drop_next; /* time to drop next packet */
+       __u32   drop_overlimit; /* number of time max qdisc packet limit was hit */
+       __u32   ecn_mark;  /* number of packets we ECN marked instead of dropped */
+       __u32   dropping;  /* are we in dropping state ? */
+       __u32   ce_mark;   /* number of CE marked packets because of ce_threshold */
 };
 
 /* FQ_CODEL */
 
 enum {
-      TCA_FQ_CODEL_UNSPEC,
-      TCA_FQ_CODEL_TARGET,
-      TCA_FQ_CODEL_LIMIT,
-      TCA_FQ_CODEL_INTERVAL,
-      TCA_FQ_CODEL_ECN,
-      TCA_FQ_CODEL_FLOWS,
-      TCA_FQ_CODEL_QUANTUM,
-      TCA_FQ_CODEL_CE_THRESHOLD,
-      TCA_FQ_CODEL_DROP_BATCH_SIZE,
-      TCA_FQ_CODEL_MEMORY_LIMIT,
-      __TCA_FQ_CODEL_MAX
+       TCA_FQ_CODEL_UNSPEC,
+       TCA_FQ_CODEL_TARGET,
+       TCA_FQ_CODEL_LIMIT,
+       TCA_FQ_CODEL_INTERVAL,
+       TCA_FQ_CODEL_ECN,
+       TCA_FQ_CODEL_FLOWS,
+       TCA_FQ_CODEL_QUANTUM,
+       TCA_FQ_CODEL_CE_THRESHOLD,
+       TCA_FQ_CODEL_DROP_BATCH_SIZE,
+       TCA_FQ_CODEL_MEMORY_LIMIT,
+       __TCA_FQ_CODEL_MAX
 };
 
-#define TCA_FQ_CODEL_MAX        (__TCA_FQ_CODEL_MAX - 1)
+#define TCA_FQ_CODEL_MAX       (__TCA_FQ_CODEL_MAX - 1)
 
 enum {
-      TCA_FQ_CODEL_XSTATS_QDISC,
-      TCA_FQ_CODEL_XSTATS_CLASS,
+       TCA_FQ_CODEL_XSTATS_QDISC,
+       TCA_FQ_CODEL_XSTATS_CLASS,
 };
 
 struct tc_fq_codel_qd_stats {
-        __u32   maxpacket;      /* largest packet we've seen so far */
-        __u32   drop_overlimit; /* number of time max qdisc
-                                 * packet limit was hit
-                                 */
-        __u32   ecn_mark;       /* number of packets we ECN marked
-                                 * instead of being dropped
-                                 */
-        __u32   new_flow_count; /* number of time packets
-                                 * created a 'new flow'
-                                 */
-        __u32   new_flows_len;  /* count of flows in new list */
-        __u32   old_flows_len;  /* count of flows in old list */
-        __u32   ce_mark;        /* packets above ce_threshold */
-        __u32   memory_usage;   /* in bytes */
-        __u32   drop_overmemory;
+       __u32   maxpacket;      /* largest packet we've seen so far */
+       __u32   drop_overlimit; /* number of time max qdisc
+                                * packet limit was hit
+                                */
+       __u32   ecn_mark;       /* number of packets we ECN marked
+                                * instead of being dropped
+                                */
+       __u32   new_flow_count; /* number of time packets
+                                * created a 'new flow'
+                                */
+       __u32   new_flows_len;  /* count of flows in new list */
+       __u32   old_flows_len;  /* count of flows in old list */
+       __u32   ce_mark;        /* packets above ce_threshold */
+       __u32   memory_usage;   /* in bytes */
+       __u32   drop_overmemory;
 };
 
 struct tc_fq_codel_cl_stats {
-        __s32   deficit;
-        __u32   ldelay;         /* in-queue delay seen by most recently
-                                 * dequeued packet
-                                 */
-        __u32   count;
-        __u32   lastcount;
-        __u32   dropping;
-        __s32   drop_next;
+       __s32   deficit;
+       __u32   ldelay;         /* in-queue delay seen by most recently
+                                * dequeued packet
+                                */
+       __u32   count;
+       __u32   lastcount;
+       __u32   dropping;
+       __s32   drop_next;
 };
 
 struct tc_fq_codel_xstats {
-        __u32   type;
-        union {
-                struct tc_fq_codel_qd_stats qdisc_stats;
-                struct tc_fq_codel_cl_stats class_stats;
-        };
+       __u32   type;
+       union {
+               struct tc_fq_codel_qd_stats qdisc_stats;
+               struct tc_fq_codel_cl_stats class_stats;
+       };
 };
 
 /* FQ */
 
 enum {
-      TCA_FQ_UNSPEC,
+       TCA_FQ_UNSPEC,
 
-      TCA_FQ_PLIMIT,            /* limit of total number of packets in queue */
+       TCA_FQ_PLIMIT,          /* limit of total number of packets in queue */
 
-      TCA_FQ_FLOW_PLIMIT,       /* limit of packets per flow */
+       TCA_FQ_FLOW_PLIMIT,     /* limit of packets per flow */
 
-      TCA_FQ_QUANTUM,           /* RR quantum */
+       TCA_FQ_QUANTUM,         /* RR quantum */
 
-      TCA_FQ_INITIAL_QUANTUM,           /* RR quantum for new flow */
+       TCA_FQ_INITIAL_QUANTUM,         /* RR quantum for new flow */
 
-      TCA_FQ_RATE_ENABLE,       /* enable/disable rate limiting */
+       TCA_FQ_RATE_ENABLE,     /* enable/disable rate limiting */
 
-      TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
+       TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
 
-      TCA_FQ_FLOW_MAX_RATE,     /* per flow max rate */
+       TCA_FQ_FLOW_MAX_RATE,   /* per flow max rate */
 
-      TCA_FQ_BUCKETS_LOG,       /* log2(number of buckets) */
+       TCA_FQ_BUCKETS_LOG,     /* log2(number of buckets) */
 
-      TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
+       TCA_FQ_FLOW_REFILL_DELAY,       /* flow credit refill delay in usec */
 
-      TCA_FQ_ORPHAN_MASK,       /* mask applied to orphaned skb hashes */
+       TCA_FQ_ORPHAN_MASK,     /* mask applied to orphaned skb hashes */
 
-      TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
+       TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
 
-      TCA_FQ_CE_THRESHOLD,      /* DCTCP-like CE-marking threshold */
+       TCA_FQ_CE_THRESHOLD,    /* DCTCP-like CE-marking threshold */
 
-      __TCA_FQ_MAX
+       __TCA_FQ_MAX
 };
 
-#define TCA_FQ_MAX      (__TCA_FQ_MAX - 1)
+#define TCA_FQ_MAX     (__TCA_FQ_MAX - 1)
 
 struct tc_fq_qd_stats {
-        __u64   gc_flows;
-        __u64   highprio_packets;
-        __u64   tcp_retrans;
-        __u64   throttled;
-        __u64   flows_plimit;
-        __u64   pkts_too_long;
-        __u64   allocation_errors;
-        __s64   time_next_delayed_flow;
-        __u32   flows;
-        __u32   inactive_flows;
-        __u32   throttled_flows;
-        __u32   unthrottle_latency_ns;
-        __u64   ce_mark;                /* packets above ce_threshold */
+       __u64   gc_flows;
+       __u64   highprio_packets;
+       __u64   tcp_retrans;
+       __u64   throttled;
+       __u64   flows_plimit;
+       __u64   pkts_too_long;
+       __u64   allocation_errors;
+       __s64   time_next_delayed_flow;
+       __u32   flows;
+       __u32   inactive_flows;
+       __u32   throttled_flows;
+       __u32   unthrottle_latency_ns;
+       __u64   ce_mark;                /* packets above ce_threshold */
 };
 
 /* Heavy-Hitter Filter */
 
 enum {
-      TCA_HHF_UNSPEC,
-      TCA_HHF_BACKLOG_LIMIT,
-      TCA_HHF_QUANTUM,
-      TCA_HHF_HH_FLOWS_LIMIT,
-      TCA_HHF_RESET_TIMEOUT,
-      TCA_HHF_ADMIT_BYTES,
-      TCA_HHF_EVICT_TIMEOUT,
-      TCA_HHF_NON_HH_WEIGHT,
-      __TCA_HHF_MAX
+       TCA_HHF_UNSPEC,
+       TCA_HHF_BACKLOG_LIMIT,
+       TCA_HHF_QUANTUM,
+       TCA_HHF_HH_FLOWS_LIMIT,
+       TCA_HHF_RESET_TIMEOUT,
+       TCA_HHF_ADMIT_BYTES,
+       TCA_HHF_EVICT_TIMEOUT,
+       TCA_HHF_NON_HH_WEIGHT,
+       __TCA_HHF_MAX
 };
 
-#define TCA_HHF_MAX     (__TCA_HHF_MAX - 1)
+#define TCA_HHF_MAX    (__TCA_HHF_MAX - 1)
 
 struct tc_hhf_xstats {
-        __u32   drop_overlimit; /* number of times max qdisc packet limit
-                                 * was hit
-                                 */
-        __u32   hh_overlimit;   /* number of times max heavy-hitters was hit */
-        __u32   hh_tot_count;   /* number of captured heavy-hitters so far */
-        __u32   hh_cur_count;   /* number of current heavy-hitters */
+       __u32   drop_overlimit; /* number of times max qdisc packet limit
+                                * was hit
+                                */
+       __u32   hh_overlimit;   /* number of times max heavy-hitters was hit */
+       __u32   hh_tot_count;   /* number of captured heavy-hitters so far */
+       __u32   hh_cur_count;   /* number of current heavy-hitters */
 };
 
 /* PIE */
 enum {
-      TCA_PIE_UNSPEC,
-      TCA_PIE_TARGET,
-      TCA_PIE_LIMIT,
-      TCA_PIE_TUPDATE,
-      TCA_PIE_ALPHA,
-      TCA_PIE_BETA,
-      TCA_PIE_ECN,
-      TCA_PIE_BYTEMODE,
-      __TCA_PIE_MAX
+       TCA_PIE_UNSPEC,
+       TCA_PIE_TARGET,
+       TCA_PIE_LIMIT,
+       TCA_PIE_TUPDATE,
+       TCA_PIE_ALPHA,
+       TCA_PIE_BETA,
+       TCA_PIE_ECN,
+       TCA_PIE_BYTEMODE,
+       TCA_PIE_DQ_RATE_ESTIMATOR,
+       __TCA_PIE_MAX
 };
 #define TCA_PIE_MAX   (__TCA_PIE_MAX - 1)
 
 struct tc_pie_xstats {
-        __u64 prob;             /* current probability */
-        __u32 delay;            /* current delay in ms */
-        __u32 avg_dq_rate;      /* current average dq_rate in bits/pie_time */
-        __u32 packets_in;       /* total number of packets enqueued */
-        __u32 dropped;          /* packets dropped due to pie_action */
-        __u32 overlimit;        /* dropped due to lack of space in queue */
-        __u32 maxq;             /* maximum queue size */
-        __u32 ecn_mark;         /* packets marked with ecn*/
+       __u64 prob;                     /* current probability */
+       __u32 delay;                    /* current delay in ms */
+       __u32 avg_dq_rate;              /* current average dq_rate in
+                                        * bits/pie_time
+                                        */
+       __u32 dq_rate_estimating;       /* is avg_dq_rate being calculated? */
+       __u32 packets_in;               /* total number of packets enqueued */
+       __u32 dropped;                  /* packets dropped due to pie_action */
+       __u32 overlimit;                /* dropped due to lack of space
+                                        * in queue
+                                        */
+       __u32 maxq;                     /* maximum queue size */
+       __u32 ecn_mark;                 /* packets marked with ecn*/
 };
 
 /* CBS */
 struct tc_cbs_qopt {
-        __u8 offload;
-        __u8 _pad[3];
-        __s32 hicredit;
-        __s32 locredit;
-        __s32 idleslope;
-        __s32 sendslope;
+       __u8 offload;
+       __u8 _pad[3];
+       __s32 hicredit;
+       __s32 locredit;
+       __s32 idleslope;
+       __s32 sendslope;
 };
 
 enum {
-      TCA_CBS_UNSPEC,
-      TCA_CBS_PARMS,
-      __TCA_CBS_MAX,
+       TCA_CBS_UNSPEC,
+       TCA_CBS_PARMS,
+       __TCA_CBS_MAX,
 };
 
 #define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
@@ -986,18 +992,18 @@ enum {
 
 /* ETF */
 struct tc_etf_qopt {
-        __s32 delta;
-        __s32 clockid;
-        __u32 flags;
-#define TC_ETF_DEADLINE_MODE_ON _BITUL(0)
-#define TC_ETF_OFFLOAD_ON       _BITUL(1)
-#define TC_ETF_SKIP_SOCK_CHECK  _BITUL(2)
+       __s32 delta;
+       __s32 clockid;
+       __u32 flags;
+#define TC_ETF_DEADLINE_MODE_ON        _BITUL(0)
+#define TC_ETF_OFFLOAD_ON      _BITUL(1)
+#define TC_ETF_SKIP_SOCK_CHECK _BITUL(2)
 };
 
 enum {
-      TCA_ETF_UNSPEC,
-      TCA_ETF_PARMS,
-      __TCA_ETF_MAX,
+       TCA_ETF_UNSPEC,
+       TCA_ETF_PARMS,
+       __TCA_ETF_MAX,
 };
 
 #define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
@@ -1005,133 +1011,133 @@ enum {
 
 /* CAKE */
 enum {
-      TCA_CAKE_UNSPEC,
-      TCA_CAKE_PAD,
-      TCA_CAKE_BASE_RATE64,
-      TCA_CAKE_DIFFSERV_MODE,
-      TCA_CAKE_ATM,
-      TCA_CAKE_FLOW_MODE,
-      TCA_CAKE_OVERHEAD,
-      TCA_CAKE_RTT,
-      TCA_CAKE_TARGET,
-      TCA_CAKE_AUTORATE,
-      TCA_CAKE_MEMORY,
-      TCA_CAKE_NAT,
-      TCA_CAKE_RAW,
-      TCA_CAKE_WASH,
-      TCA_CAKE_MPU,
-      TCA_CAKE_INGRESS,
-      TCA_CAKE_ACK_FILTER,
-      TCA_CAKE_SPLIT_GSO,
-      TCA_CAKE_FWMARK,
-      __TCA_CAKE_MAX
-};
-#define TCA_CAKE_MAX    (__TCA_CAKE_MAX - 1)
+       TCA_CAKE_UNSPEC,
+       TCA_CAKE_PAD,
+       TCA_CAKE_BASE_RATE64,
+       TCA_CAKE_DIFFSERV_MODE,
+       TCA_CAKE_ATM,
+       TCA_CAKE_FLOW_MODE,
+       TCA_CAKE_OVERHEAD,
+       TCA_CAKE_RTT,
+       TCA_CAKE_TARGET,
+       TCA_CAKE_AUTORATE,
+       TCA_CAKE_MEMORY,
+       TCA_CAKE_NAT,
+       TCA_CAKE_RAW,
+       TCA_CAKE_WASH,
+       TCA_CAKE_MPU,
+       TCA_CAKE_INGRESS,
+       TCA_CAKE_ACK_FILTER,
+       TCA_CAKE_SPLIT_GSO,
+       TCA_CAKE_FWMARK,
+       __TCA_CAKE_MAX
+};
+#define TCA_CAKE_MAX   (__TCA_CAKE_MAX - 1)
 
 enum {
-      __TCA_CAKE_STATS_INVALID,
-      TCA_CAKE_STATS_PAD,
-      TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
-      TCA_CAKE_STATS_MEMORY_LIMIT,
-      TCA_CAKE_STATS_MEMORY_USED,
-      TCA_CAKE_STATS_AVG_NETOFF,
-      TCA_CAKE_STATS_MIN_NETLEN,
-      TCA_CAKE_STATS_MAX_NETLEN,
-      TCA_CAKE_STATS_MIN_ADJLEN,
-      TCA_CAKE_STATS_MAX_ADJLEN,
-      TCA_CAKE_STATS_TIN_STATS,
-      TCA_CAKE_STATS_DEFICIT,
-      TCA_CAKE_STATS_COBALT_COUNT,
-      TCA_CAKE_STATS_DROPPING,
-      TCA_CAKE_STATS_DROP_NEXT_US,
-      TCA_CAKE_STATS_P_DROP,
-      TCA_CAKE_STATS_BLUE_TIMER_US,
-      __TCA_CAKE_STATS_MAX
+       __TCA_CAKE_STATS_INVALID,
+       TCA_CAKE_STATS_PAD,
+       TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
+       TCA_CAKE_STATS_MEMORY_LIMIT,
+       TCA_CAKE_STATS_MEMORY_USED,
+       TCA_CAKE_STATS_AVG_NETOFF,
+       TCA_CAKE_STATS_MIN_NETLEN,
+       TCA_CAKE_STATS_MAX_NETLEN,
+       TCA_CAKE_STATS_MIN_ADJLEN,
+       TCA_CAKE_STATS_MAX_ADJLEN,
+       TCA_CAKE_STATS_TIN_STATS,
+       TCA_CAKE_STATS_DEFICIT,
+       TCA_CAKE_STATS_COBALT_COUNT,
+       TCA_CAKE_STATS_DROPPING,
+       TCA_CAKE_STATS_DROP_NEXT_US,
+       TCA_CAKE_STATS_P_DROP,
+       TCA_CAKE_STATS_BLUE_TIMER_US,
+       __TCA_CAKE_STATS_MAX
 };
 #define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
 
 enum {
-      __TCA_CAKE_TIN_STATS_INVALID,
-      TCA_CAKE_TIN_STATS_PAD,
-      TCA_CAKE_TIN_STATS_SENT_PACKETS,
-      TCA_CAKE_TIN_STATS_SENT_BYTES64,
-      TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
-      TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
-      TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
-      TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
-      TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
-      TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
-      TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
-      TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
-      TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
-      TCA_CAKE_TIN_STATS_TARGET_US,
-      TCA_CAKE_TIN_STATS_INTERVAL_US,
-      TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
-      TCA_CAKE_TIN_STATS_WAY_MISSES,
-      TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
-      TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
-      TCA_CAKE_TIN_STATS_AVG_DELAY_US,
-      TCA_CAKE_TIN_STATS_BASE_DELAY_US,
-      TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
-      TCA_CAKE_TIN_STATS_BULK_FLOWS,
-      TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
-      TCA_CAKE_TIN_STATS_MAX_SKBLEN,
-      TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
-      __TCA_CAKE_TIN_STATS_MAX
+       __TCA_CAKE_TIN_STATS_INVALID,
+       TCA_CAKE_TIN_STATS_PAD,
+       TCA_CAKE_TIN_STATS_SENT_PACKETS,
+       TCA_CAKE_TIN_STATS_SENT_BYTES64,
+       TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
+       TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
+       TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
+       TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
+       TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
+       TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
+       TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
+       TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
+       TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
+       TCA_CAKE_TIN_STATS_TARGET_US,
+       TCA_CAKE_TIN_STATS_INTERVAL_US,
+       TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
+       TCA_CAKE_TIN_STATS_WAY_MISSES,
+       TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
+       TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
+       TCA_CAKE_TIN_STATS_AVG_DELAY_US,
+       TCA_CAKE_TIN_STATS_BASE_DELAY_US,
+       TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
+       TCA_CAKE_TIN_STATS_BULK_FLOWS,
+       TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
+       TCA_CAKE_TIN_STATS_MAX_SKBLEN,
+       TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
+       __TCA_CAKE_TIN_STATS_MAX
 };
 #define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
 #define TC_CAKE_MAX_TINS (8)
 
 enum {
-      CAKE_FLOW_NONE = 0,
-      CAKE_FLOW_SRC_IP,
-      CAKE_FLOW_DST_IP,
-      CAKE_FLOW_HOSTS,    /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
-      CAKE_FLOW_FLOWS,
-      CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
-      CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
-      CAKE_FLOW_TRIPLE,   /* = CAKE_FLOW_HOSTS  | CAKE_FLOW_FLOWS */
-      CAKE_FLOW_MAX,
+       CAKE_FLOW_NONE = 0,
+       CAKE_FLOW_SRC_IP,
+       CAKE_FLOW_DST_IP,
+       CAKE_FLOW_HOSTS,    /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
+       CAKE_FLOW_FLOWS,
+       CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
+       CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
+       CAKE_FLOW_TRIPLE,   /* = CAKE_FLOW_HOSTS  | CAKE_FLOW_FLOWS */
+       CAKE_FLOW_MAX,
 };
 
 enum {
-      CAKE_DIFFSERV_DIFFSERV3 = 0,
-      CAKE_DIFFSERV_DIFFSERV4,
-      CAKE_DIFFSERV_DIFFSERV8,
-      CAKE_DIFFSERV_BESTEFFORT,
-      CAKE_DIFFSERV_PRECEDENCE,
-      CAKE_DIFFSERV_MAX
+       CAKE_DIFFSERV_DIFFSERV3 = 0,
+       CAKE_DIFFSERV_DIFFSERV4,
+       CAKE_DIFFSERV_DIFFSERV8,
+       CAKE_DIFFSERV_BESTEFFORT,
+       CAKE_DIFFSERV_PRECEDENCE,
+       CAKE_DIFFSERV_MAX
 };
 
 enum {
-      CAKE_ACK_NONE = 0,
-      CAKE_ACK_FILTER,
-      CAKE_ACK_AGGRESSIVE,
-      CAKE_ACK_MAX
+       CAKE_ACK_NONE = 0,
+       CAKE_ACK_FILTER,
+       CAKE_ACK_AGGRESSIVE,
+       CAKE_ACK_MAX
 };
 
 enum {
-      CAKE_ATM_NONE = 0,
-      CAKE_ATM_ATM,
-      CAKE_ATM_PTM,
-      CAKE_ATM_MAX
+       CAKE_ATM_NONE = 0,
+       CAKE_ATM_ATM,
+       CAKE_ATM_PTM,
+       CAKE_ATM_MAX
 };
 
 
 /* TAPRIO */
 enum {
-      TC_TAPRIO_CMD_SET_GATES = 0x00,
-      TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
-      TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
+       TC_TAPRIO_CMD_SET_GATES = 0x00,
+       TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
+       TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
 };
 
 enum {
-      TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
-      TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
-      TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
-      TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
-      TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
-      __TCA_TAPRIO_SCHED_ENTRY_MAX,
+       TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
+       TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
+       TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
+       TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
+       TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
+       __TCA_TAPRIO_SCHED_ENTRY_MAX,
 };
 #define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
 
@@ -1143,9 +1149,9 @@ enum {
  *     [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
  */
 enum {
-      TCA_TAPRIO_SCHED_UNSPEC,
-      TCA_TAPRIO_SCHED_ENTRY,
-      __TCA_TAPRIO_SCHED_MAX,
+       TCA_TAPRIO_SCHED_UNSPEC,
+       TCA_TAPRIO_SCHED_ENTRY,
+       __TCA_TAPRIO_SCHED_MAX,
 };
 
 #define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
@@ -1160,23 +1166,23 @@ enum {
  *       [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL]
  */
 
-#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST      BIT(0)
-#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD       BIT(1)
+#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST     BIT(0)
+#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD      BIT(1)
 
 enum {
-      TCA_TAPRIO_ATTR_UNSPEC,
-      TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
-      TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
-      TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
-      TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
-      TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
-      TCA_TAPRIO_PAD,
-      TCA_TAPRIO_ATTR_ADMIN_SCHED, /* The admin sched, only used in dump */
-      TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, /* s64 */
-      TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */
-      TCA_TAPRIO_ATTR_FLAGS, /* u32 */
-      TCA_TAPRIO_ATTR_TXTIME_DELAY, /* u32 */
-      __TCA_TAPRIO_ATTR_MAX,
+       TCA_TAPRIO_ATTR_UNSPEC,
+       TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
+       TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
+       TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
+       TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
+       TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
+       TCA_TAPRIO_PAD,
+       TCA_TAPRIO_ATTR_ADMIN_SCHED, /* The admin sched, only used in dump */
+       TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, /* s64 */
+       TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */
+       TCA_TAPRIO_ATTR_FLAGS, /* u32 */
+       TCA_TAPRIO_ATTR_TXTIME_DELAY, /* u32 */
+       __TCA_TAPRIO_ATTR_MAX,
 };
 
 #define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
index 80ad27fcc0c53360acb40d56e8b5b6aa1baac6c4..1418a8362bb7548a8cd0a25c77ae78e8d2d6a389 100644 (file)
@@ -157,13 +157,20 @@ enum {
        RTM_GETCHAIN,
 #define RTM_GETCHAIN RTM_GETCHAIN
 
-        RTM_NEWNEXTHOP = 104,
+       RTM_NEWNEXTHOP = 104,
 #define RTM_NEWNEXTHOP RTM_NEWNEXTHOP
-        RTM_DELNEXTHOP,
+       RTM_DELNEXTHOP,
 #define RTM_DELNEXTHOP RTM_DELNEXTHOP
-        RTM_GETNEXTHOP,
+       RTM_GETNEXTHOP,
 #define RTM_GETNEXTHOP RTM_GETNEXTHOP
 
+       RTM_NEWLINKPROP = 108,
+#define RTM_NEWLINKPROP        RTM_NEWLINKPROP
+       RTM_DELLINKPROP,
+#define RTM_DELLINKPROP        RTM_DELLINKPROP
+       RTM_GETLINKPROP,
+#define RTM_GETLINKPROP        RTM_GETLINKPROP
+
        __RTM_MAX,
 #define RTM_MAX                (((__RTM_MAX + 3) & ~3) - 1)
 };
@@ -172,7 +179,7 @@ enum {
 #define RTM_NR_FAMILIES        (RTM_NR_MSGTYPES >> 2)
 #define RTM_FAM(cmd)   (((cmd) - RTM_BASE) >> 2)
 
-/*
+/* 
    Generic structure for encapsulation of optional route information.
    It is reminiscent of sockaddr, but with sa_family replaced
    with attribute type.
@@ -212,7 +219,7 @@ struct rtmsg {
 
        unsigned char           rtm_table;      /* Routing table id */
        unsigned char           rtm_protocol;   /* Routing protocol; see below  */
-        unsigned char          rtm_scope;      /* See below */
+       unsigned char           rtm_scope;      /* See below */ 
        unsigned char           rtm_type;       /* See below    */
 
        unsigned                rtm_flags;
@@ -349,7 +356,7 @@ enum rtattr_type_t {
        RTA_IP_PROTO,
        RTA_SPORT,
        RTA_DPORT,
-        RTA_NH_ID,
+       RTA_NH_ID,
        __RTA_MAX
 };
 
@@ -523,7 +530,7 @@ struct ifinfomsg {
 };
 
 /********************************************************************
- *             prefix information
+ *             prefix information 
  ****/
 
 struct prefixmsg {
@@ -537,7 +544,7 @@ struct prefixmsg {
        unsigned char   prefix_pad3;
 };
 
-enum
+enum 
 {
        PREFIX_UNSPEC,
        PREFIX_ADDRESS,
@@ -712,7 +719,7 @@ enum rtnetlink_groups {
 #define RTNLGRP_IPV4_MROUTE_R  RTNLGRP_IPV4_MROUTE_R
        RTNLGRP_IPV6_MROUTE_R,
 #define RTNLGRP_IPV6_MROUTE_R  RTNLGRP_IPV6_MROUTE_R
-        RTNLGRP_NEXTHOP,
+       RTNLGRP_NEXTHOP,
 #define RTNLGRP_NEXTHOP                RTNLGRP_NEXTHOP
        __RTNLGRP_MAX
 };
index 071ce4167f5491805cb0357b3a726dbf0ca50dba..ae88be14c9478e2f8539e2ec5abe81ff87dcce5f 100644 (file)
  * one but not both of:
  *
  *    WGDEVICE_A_IFINDEX: NLA_U32
- *    WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1
+ *    WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1
  *
  * The kernel will then return several messages (NLM_F_MULTI) containing the
  * following tree of nested items:
  *
  *    WGDEVICE_A_IFINDEX: NLA_U32
- *    WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1
- *    WGDEVICE_A_PRIVATE_KEY: len WG_KEY_LEN
- *    WGDEVICE_A_PUBLIC_KEY: len WG_KEY_LEN
+ *    WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1
+ *    WGDEVICE_A_PRIVATE_KEY: NLA_EXACT_LEN, len WG_KEY_LEN
+ *    WGDEVICE_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN
  *    WGDEVICE_A_LISTEN_PORT: NLA_U16
  *    WGDEVICE_A_FWMARK: NLA_U32
  *    WGDEVICE_A_PEERS: NLA_NESTED
  *        0: NLA_NESTED
- *            WGPEER_A_PUBLIC_KEY: len WG_KEY_LEN
- *            WGPEER_A_PRESHARED_KEY: len WG_KEY_LEN
- *            WGPEER_A_ENDPOINT: struct sockaddr_in or struct sockaddr_in6
+ *            WGPEER_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN
+ *            WGPEER_A_PRESHARED_KEY: NLA_EXACT_LEN, len WG_KEY_LEN
+ *            WGPEER_A_ENDPOINT: NLA_MIN_LEN(struct sockaddr), struct sockaddr_in or struct sockaddr_in6
  *            WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16
- *            WGPEER_A_LAST_HANDSHAKE_TIME: struct __kernel_timespec
+ *            WGPEER_A_LAST_HANDSHAKE_TIME: NLA_EXACT_LEN, struct __kernel_timespec
  *            WGPEER_A_RX_BYTES: NLA_U64
  *            WGPEER_A_TX_BYTES: NLA_U64
  *            WGPEER_A_ALLOWEDIPS: NLA_NESTED
  *                0: NLA_NESTED
  *                    WGALLOWEDIP_A_FAMILY: NLA_U16
- *                    WGALLOWEDIP_A_IPADDR: struct in_addr or struct in6_addr
+ *                    WGALLOWEDIP_A_IPADDR: NLA_MIN_LEN(struct in_addr), struct in_addr or struct in6_addr
  *                    WGALLOWEDIP_A_CIDR_MASK: NLA_U8
  *                0: NLA_NESTED
  *                    ...
@@ -77,7 +77,7 @@
  * WGDEVICE_A_IFINDEX and WGDEVICE_A_IFNAME:
  *
  *    WGDEVICE_A_IFINDEX: NLA_U32
- *    WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1
+ *    WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1
  *    WGDEVICE_A_FLAGS: NLA_U32, 0 or WGDEVICE_F_REPLACE_PEERS if all current
  *                      peers should be removed prior to adding the list below.
  *    WGDEVICE_A_PRIVATE_KEY: len WG_KEY_LEN, all zeros to remove
  *        0: NLA_NESTED
  *            WGPEER_A_PUBLIC_KEY: len WG_KEY_LEN
  *            WGPEER_A_FLAGS: NLA_U32, 0 and/or WGPEER_F_REMOVE_ME if the
- *                            specified peer should be removed rather than
- *                            added/updated and/or WGPEER_F_REPLACE_ALLOWEDIPS
- *                            if all current allowed IPs of this peer should be
- *                            removed prior to adding the list below.
+ *                            specified peer should not exist at the end of the
+ *                            operation, rather than added/updated and/or
+ *                            WGPEER_F_REPLACE_ALLOWEDIPS if all current allowed
+ *                            IPs of this peer should be removed prior to adding
+ *                            the list below and/or WGPEER_F_UPDATE_ONLY if the
+ *                            peer should only be set if it already exists.
  *            WGPEER_A_PRESHARED_KEY: len WG_KEY_LEN, all zeros to remove
  *            WGPEER_A_ENDPOINT: struct sockaddr_in or struct sockaddr_in6
  *            WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16, 0 to disable
  * filling in information not contained in the prior. Note that if
  * WGDEVICE_F_REPLACE_PEERS is specified in the first message, it probably
  * should not be specified in fragments that come after, so that the list
- * of peers is only cleared the first time but appened after. Likewise for
+ * of peers is only cleared the first time but appended after. Likewise for
  * peers, if WGPEER_F_REPLACE_ALLOWEDIPS is specified in the first message
  * of a peer, it likely should not be specified in subsequent fragments.
  *
@@ -142,7 +144,8 @@ enum wg_cmd {
 #define WG_CMD_MAX (__WG_CMD_MAX - 1)
 
 enum wgdevice_flag {
-       WGDEVICE_F_REPLACE_PEERS = 1U << 0
+       WGDEVICE_F_REPLACE_PEERS = 1U << 0,
+       __WGDEVICE_F_ALL = WGDEVICE_F_REPLACE_PEERS
 };
 enum wgdevice_attribute {
        WGDEVICE_A_UNSPEC,
@@ -160,7 +163,10 @@ enum wgdevice_attribute {
 
 enum wgpeer_flag {
        WGPEER_F_REMOVE_ME = 1U << 0,
-       WGPEER_F_REPLACE_ALLOWEDIPS = 1U << 1
+       WGPEER_F_REPLACE_ALLOWEDIPS = 1U << 1,
+       WGPEER_F_UPDATE_ONLY = 1U << 2,
+       __WGPEER_F_ALL = WGPEER_F_REMOVE_ME | WGPEER_F_REPLACE_ALLOWEDIPS |
+                        WGPEER_F_UPDATE_ONLY
 };
 enum wgpeer_attribute {
        WGPEER_A_UNSPEC,