HINIC3_EVENT_SRV_NIC = 1
};
+enum hinic3_fault_err_level {
+ HINIC3_FAULT_LEVEL_SERIOUS_FLR = 3,
+};
+
+enum hinic3_fault_source_type {
+ HINIC3_FAULT_SRC_HW_PHY_FAULT = 9,
+ HINIC3_FAULT_SRC_TX_TIMEOUT = 22,
+};
+
#define HINIC3_SRV_EVENT_TYPE(svc, type) (((svc) << 16) | (type))
/* driver-specific data of pci_dev */
hinic3_free_txqs(netdev);
}
+static void hinic3_periodic_work_handler(struct work_struct *work)
+{
+ struct delayed_work *delay = to_delayed_work(work);
+ struct hinic3_nic_dev *nic_dev;
+
+ nic_dev = container_of(delay, struct hinic3_nic_dev, periodic_work);
+ if (test_and_clear_bit(HINIC3_EVENT_WORK_TX_TIMEOUT,
+ &nic_dev->event_flag))
+ dev_info(nic_dev->hwdev->dev,
+ "Fault event report, src: %u, level: %u\n",
+ HINIC3_FAULT_SRC_TX_TIMEOUT,
+ HINIC3_FAULT_LEVEL_SERIOUS_FLR);
+
+ queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ);
+}
+
static int hinic3_init_nic_dev(struct net_device *netdev,
struct hinic3_hwdev *hwdev)
{
nic_dev->lro_replenish_thld = HINIC3_LRO_REPLENISH_THLD;
nic_dev->nic_svc_cap = hwdev->cfg_mgmt->cap.nic_svc_cap;
+ nic_dev->workq = create_singlethread_workqueue(HINIC3_NIC_DEV_WQ_NAME);
+ if (!nic_dev->workq) {
+ dev_err(hwdev->dev, "Failed to initialize nic workqueue\n");
+ return -ENOMEM;
+ }
+
+ INIT_DELAYED_WORK(&nic_dev->periodic_work,
+ hinic3_periodic_work_handler);
+
return 0;
}
}
}
+static void hinic3_free_nic_dev(struct hinic3_nic_dev *nic_dev)
+{
+ destroy_workqueue(nic_dev->workq);
+}
+
static int hinic3_nic_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
err = hinic3_init_nic_io(nic_dev);
if (err)
- goto err_free_netdev;
+ goto err_free_nic_dev;
err = hinic3_sw_init(netdev);
if (err)
if (err)
goto err_uninit_sw;
+ queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ);
netif_carrier_off(netdev);
err = register_netdev(netdev);
return 0;
err_uninit_nic_feature:
+ disable_delayed_work_sync(&nic_dev->periodic_work);
hinic3_update_nic_feature(nic_dev, 0);
hinic3_set_nic_feature_to_hw(nic_dev);
err_free_nic_io:
hinic3_free_nic_io(nic_dev);
-
+err_free_nic_dev:
+ hinic3_free_nic_dev(nic_dev);
err_free_netdev:
free_netdev(netdev);
netdev = nic_dev->netdev;
unregister_netdev(netdev);
+ disable_delayed_work_sync(&nic_dev->periodic_work);
+ hinic3_free_nic_dev(nic_dev);
+
hinic3_update_nic_feature(nic_dev, 0);
hinic3_set_nic_feature_to_hw(nic_dev);
hinic3_sw_uninit(netdev);
return 0;
}
+static void hinic3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_io_queue *sq;
+ u16 sw_pi, hw_ci;
+
+ sq = nic_dev->txqs[txqueue].sq;
+ sw_pi = hinic3_get_sq_local_pi(sq);
+ hw_ci = hinic3_get_sq_hw_ci(sq);
+ netdev_dbg(netdev,
+ "txq%u: sw_pi: %u, hw_ci: %u, sw_ci: %u, napi->state: 0x%lx.\n",
+ txqueue, sw_pi, hw_ci, hinic3_get_sq_local_ci(sq),
+ nic_dev->q_params.irq_cfg[txqueue].napi.state);
+
+ if (sw_pi != hw_ci)
+ set_bit(HINIC3_EVENT_WORK_TX_TIMEOUT, &nic_dev->event_flag);
+}
+
+static void hinic3_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u64 bytes, packets, dropped, errors;
+ struct hinic3_txq_stats *txq_stats;
+ struct hinic3_rxq_stats *rxq_stats;
+ struct hinic3_txq *txq;
+ struct hinic3_rxq *rxq;
+ unsigned int start;
+ int i;
+
+ bytes = 0;
+ packets = 0;
+ dropped = 0;
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ if (!nic_dev->txqs)
+ break;
+
+ txq = &nic_dev->txqs[i];
+ txq_stats = &txq->txq_stats;
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ bytes += txq_stats->bytes;
+ packets += txq_stats->packets;
+ dropped += txq_stats->dropped;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ }
+ stats->tx_packets = packets;
+ stats->tx_bytes = bytes;
+ stats->tx_dropped = dropped;
+
+ bytes = 0;
+ packets = 0;
+ errors = 0;
+ dropped = 0;
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ if (!nic_dev->rxqs)
+ break;
+
+ rxq = &nic_dev->rxqs[i];
+ rxq_stats = &rxq->rxq_stats;
+ do {
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ bytes += rxq_stats->bytes;
+ packets += rxq_stats->packets;
+ errors += rxq_stats->csum_errors +
+ rxq_stats->other_errors;
+ dropped += rxq_stats->dropped;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ }
+ stats->rx_packets = packets;
+ stats->rx_bytes = bytes;
+ stats->rx_errors = errors;
+ stats->rx_dropped = dropped;
+}
+
static const struct net_device_ops hinic3_netdev_ops = {
.ndo_open = hinic3_open,
.ndo_stop = hinic3_close,
.ndo_change_mtu = hinic3_change_mtu,
.ndo_set_mac_address = hinic3_set_mac_addr,
+ .ndo_tx_timeout = hinic3_tx_timeout,
+ .ndo_get_stats64 = hinic3_get_stats64,
.ndo_start_xmit = hinic3_xmit_frame,
};
HINIC3_RSS_ENABLE,
};
+enum hinic3_event_work_flags {
+ HINIC3_EVENT_WORK_TX_TIMEOUT,
+};
+
enum hinic3_rss_hash_type {
HINIC3_RSS_HASH_ENGINE_TYPE_XOR = 0,
HINIC3_RSS_HASH_ENGINE_TYPE_TOEP = 1,
struct hinic3_intr_coal_info *intr_coalesce;
+ struct workqueue_struct *workq;
+ struct delayed_work periodic_work;
/* lock for enable/disable port */
struct mutex port_state_mutex;
+ /* flag bits defined by hinic3_event_work_flags */
+ unsigned long event_flag;
bool link_status_up;
};
HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \
HINIC3_LRO_PKT_HDR_LEN_IPV4)
+static void hinic3_rxq_clean_stats(struct hinic3_rxq_stats *rxq_stats)
+{
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->bytes = 0;
+ rxq_stats->packets = 0;
+ rxq_stats->errors = 0;
+ rxq_stats->csum_errors = 0;
+ rxq_stats->other_errors = 0;
+ rxq_stats->dropped = 0;
+ rxq_stats->rx_buf_empty = 0;
+
+ rxq_stats->alloc_skb_err = 0;
+ rxq_stats->alloc_rx_buf_err = 0;
+ rxq_stats->restore_drop_sge = 0;
+ u64_stats_update_end(&rxq_stats->syncp);
+}
+
+static void hinic3_rxq_stats_init(struct hinic3_rxq *rxq)
+{
+ struct hinic3_rxq_stats *rxq_stats = &rxq->rxq_stats;
+
+ u64_stats_init(&rxq_stats->syncp);
+ hinic3_rxq_clean_stats(rxq_stats);
+}
+
int hinic3_alloc_rxqs(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
rxq->buf_len_shift = ilog2(nic_dev->rx_buf_len);
rxq->q_depth = nic_dev->q_params.rq_depth;
rxq->q_mask = nic_dev->q_params.rq_depth - 1;
+
+ hinic3_rxq_stats_init(rxq);
}
return 0;
#define RQ_CQE_STATUS_GET(val, member) \
FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val)
+struct hinic3_rxq_stats {
+ u64 packets;
+ u64 bytes;
+ u64 errors;
+ u64 csum_errors;
+ u64 other_errors;
+ u64 dropped;
+ u64 rx_buf_empty;
+ u64 alloc_skb_err;
+ u64 alloc_rx_buf_err;
+ u64 restore_drop_sge;
+ struct u64_stats_sync syncp;
+};
+
/* RX Completion information that is provided by HW for a specific RX WQE */
struct hinic3_rq_cqe {
__le32 status;
u16 buf_len;
u32 buf_len_shift;
+ struct hinic3_rxq_stats rxq_stats;
u32 cons_idx;
u32 delta;
#define MIN_SKB_LEN 32
+static void hinic3_txq_clean_stats(struct hinic3_txq_stats *txq_stats)
+{
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->bytes = 0;
+ txq_stats->packets = 0;
+ txq_stats->busy = 0;
+ txq_stats->dropped = 0;
+
+ txq_stats->skb_pad_err = 0;
+ txq_stats->frag_len_overflow = 0;
+ txq_stats->offload_cow_skb_err = 0;
+ txq_stats->map_frag_err = 0;
+ txq_stats->unknown_tunnel_pkt = 0;
+ txq_stats->frag_size_err = 0;
+ u64_stats_update_end(&txq_stats->syncp);
+}
+
+static void hinic3_txq_stats_init(struct hinic3_txq *txq)
+{
+ struct hinic3_txq_stats *txq_stats = &txq->txq_stats;
+
+ u64_stats_init(&txq_stats->syncp);
+ hinic3_txq_clean_stats(txq_stats);
+}
+
int hinic3_alloc_txqs(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
txq->q_depth = nic_dev->q_params.sq_depth;
txq->q_mask = nic_dev->q_params.sq_depth - 1;
txq->dev = &pdev->dev;
+
+ hinic3_txq_stats_init(txq);
}
return 0;
u32 task_type;
};
+struct hinic3_txq_stats {
+ u64 packets;
+ u64 bytes;
+ u64 busy;
+ u64 dropped;
+ u64 skb_pad_err;
+ u64 frag_len_overflow;
+ u64 offload_cow_skb_err;
+ u64 map_frag_err;
+ u64 unknown_tunnel_pkt;
+ u64 frag_size_err;
+ struct u64_stats_sync syncp;
+};
+
struct hinic3_dma_info {
dma_addr_t dma;
u32 len;
struct hinic3_tx_info *tx_info;
struct hinic3_io_queue *sq;
+
+ struct hinic3_txq_stats txq_stats;
} ____cacheline_aligned;
struct hinic3_dyna_txq_res {