]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net: ibmveth: Indented struct ibmveth_adapter correctly
authorDave Marquardt <davemarq@linux.ibm.com>
Thu, 1 May 2025 19:49:42 +0000 (14:49 -0500)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 6 May 2025 08:24:37 +0000 (10:24 +0200)
Made struct ibmveth_adapter follow indentation rules

Signed-off-by: Dave Marquardt <davemarq@linux.ibm.com>
Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20250501194944.283729-2-davemarq@linux.ibm.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/ibm/ibmveth.h

index 8468e2c59d7a4c6729c369af357017ad2d2b54a3..0f72ce54e7cfbc818a03849529162505424acc94 100644 (file)
@@ -134,38 +134,38 @@ struct ibmveth_rx_q {
 };
 
 struct ibmveth_adapter {
-    struct vio_dev *vdev;
-    struct net_device *netdev;
-    struct napi_struct napi;
-    unsigned int mcastFilterSize;
-    void * buffer_list_addr;
-    void * filter_list_addr;
-    void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
-    unsigned int tx_ltb_size;
-    dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
-    dma_addr_t buffer_list_dma;
-    dma_addr_t filter_list_dma;
-    struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
-    struct ibmveth_rx_q rx_queue;
-    int rx_csum;
-    int large_send;
-    bool is_active_trunk;
-
-    u64 fw_ipv6_csum_support;
-    u64 fw_ipv4_csum_support;
-    u64 fw_large_send_support;
-    /* adapter specific stats */
-    u64 replenish_task_cycles;
-    u64 replenish_no_mem;
-    u64 replenish_add_buff_failure;
-    u64 replenish_add_buff_success;
-    u64 rx_invalid_buffer;
-    u64 rx_no_buffer;
-    u64 tx_map_failed;
-    u64 tx_send_failed;
-    u64 tx_large_packets;
-    u64 rx_large_packets;
-    /* Ethtool settings */
+       struct vio_dev *vdev;
+       struct net_device *netdev;
+       struct napi_struct napi;
+       unsigned int mcastFilterSize;
+       void *buffer_list_addr;
+       void *filter_list_addr;
+       void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
+       unsigned int tx_ltb_size;
+       dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
+       dma_addr_t buffer_list_dma;
+       dma_addr_t filter_list_dma;
+       struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
+       struct ibmveth_rx_q rx_queue;
+       int rx_csum;
+       int large_send;
+       bool is_active_trunk;
+
+       u64 fw_ipv6_csum_support;
+       u64 fw_ipv4_csum_support;
+       u64 fw_large_send_support;
+       /* adapter specific stats */
+       u64 replenish_task_cycles;
+       u64 replenish_no_mem;
+       u64 replenish_add_buff_failure;
+       u64 replenish_add_buff_success;
+       u64 rx_invalid_buffer;
+       u64 rx_no_buffer;
+       u64 tx_map_failed;
+       u64 tx_send_failed;
+       u64 tx_large_packets;
+       u64 rx_large_packets;
+       /* Ethtool settings */
        u8 duplex;
        u32 speed;
 };