}
EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi);
+static int emac_xsk_xmit_zc(struct prueth_emac *emac,
+ unsigned int q_idx)
+{
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[q_idx];
+ struct xsk_buff_pool *pool = tx_chn->xsk_pool;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *host_desc;
+ dma_addr_t dma_desc, dma_buf;
+ struct prueth_swdata *swdata;
+ struct xdp_desc xdp_desc;
+ int num_tx = 0, pkt_len;
+ int descs_avail, ret;
+ u32 *epib;
+ int i;
+
+ descs_avail = k3_cppi_desc_pool_avail(tx_chn->desc_pool);
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
+ * will be available for normal TX path and queue is stopped there if
+ * necessary
+ */
+ if (descs_avail <= MAX_SKB_FRAGS)
+ return 0;
+
+ descs_avail -= MAX_SKB_FRAGS;
+
+ for (i = 0; i < descs_avail; i++) {
+ if (!xsk_tx_peek_desc(pool, &xdp_desc))
+ break;
+
+ dma_buf = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ pkt_len = xdp_desc.len;
+ xsk_buff_raw_dma_sync_for_device(pool, dma_buf, pkt_len);
+
+ host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (unlikely(!host_desc))
+ break;
+
+ cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(host_desc, 0);
+ epib = host_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+ cppi5_hdesc_set_pktlen(host_desc, pkt_len);
+ cppi5_desc_set_tags_ids(&host_desc->hdr, 0,
+ (emac->port_id | (q_idx << 8)));
+
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf);
+ cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf,
+ pkt_len);
+
+ swdata = cppi5_hdesc_get_swdata(host_desc);
+ swdata->type = PRUETH_SWDATA_XSK;
+
+ dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
+ host_desc);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn,
+ host_desc, dma_desc);
+
+ if (ret) {
+ ndev->stats.tx_errors++;
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc);
+ break;
+ }
+
+ num_tx++;
+ }
+
+ xsk_tx_release(tx_chn->xsk_pool);
+ return num_tx;
+}
+
void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
struct cppi5_host_desc_t *desc)
{
struct cppi5_host_desc_t *first_desc, *next_desc;
dma_addr_t buf_dma, next_desc_dma;
+ struct prueth_swdata *swdata;
u32 buf_dma_len;
first_desc = desc;
next_desc = first_desc;
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ if (swdata->type == PRUETH_SWDATA_XSK)
+ goto free_pool;
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
+free_pool:
k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
}
EXPORT_SYMBOL_GPL(prueth_xmit_free);
struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn;
unsigned int total_bytes = 0;
+ int xsk_frames_done = 0;
struct xdp_frame *xdpf;
+ unsigned int pkt_len;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
total_bytes += xdpf->len;
xdp_return_frame(xdpf);
break;
+ case PRUETH_SWDATA_XSK:
+ pkt_len = cppi5_hdesc_get_pktlen(desc_tx);
+ dev_sw_netstats_tx_add(ndev, 1, pkt_len);
+ xsk_frames_done++;
+ break;
default:
prueth_xmit_free(tx_chn, desc_tx);
ndev->stats.tx_dropped++;
__netif_tx_unlock(netif_txq);
}
+ if (tx_chn->xsk_pool) {
+ if (xsk_frames_done)
+ xsk_tx_completed(tx_chn->xsk_pool, xsk_frames_done);
+
+ if (xsk_uses_need_wakeup(tx_chn->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_chn->xsk_pool);
+
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ txq_trans_cond_update(netif_txq);
+ emac_xsk_xmit_zc(emac, chn);
+ }
+
return num_tx;
}
struct prueth_tx_chn *tx_chns =
container_of(timer, struct prueth_tx_chn, tx_hrtimer);
- enable_irq(tx_chns->irq);
+ if (tx_chns->irq_disabled) {
+ tx_chns->irq_disabled = false;
+ enable_irq(tx_chns->irq);
+ }
return HRTIMER_NORESTART;
}
ns_to_ktime(tx_chn->tx_pace_timeout_ns),
HRTIMER_MODE_REL_PINNED);
} else {
- enable_irq(tx_chn->irq);
+ if (tx_chn->irq_disabled) {
+ tx_chn->irq_disabled = false;
+ enable_irq(tx_chn->irq);
+ }
}
}
{
struct prueth_tx_chn *tx_chn = dev_id;
+ tx_chn->irq_disabled = true;
disable_irq_nosync(irq);
napi_schedule(&tx_chn->napi_tx);
{
struct prueth_tx_chn *tx_chn = data;
struct cppi5_host_desc_t *desc_tx;
+ struct xsk_buff_pool *xsk_pool;
struct prueth_swdata *swdata;
struct xdp_frame *xdpf;
struct sk_buff *skb;
xdpf = swdata->data.xdpf;
xdp_return_frame(xdpf);
break;
+ case PRUETH_SWDATA_XSK:
+ xsk_pool = tx_chn->xsk_pool;
+ xsk_tx_completed(xsk_pool, 1);
+ break;
default:
break;
}