[ Upstream commit
7f04bd109d4c358a12b125bc79a6f0eac2e915ec ]
xdp_do_flush_map() is deprecated and new code should use xdp_do_flush()
instead.
Replace xdp_do_flush_map() with xdp_do_flush().
Cc: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Cc: Clark Wang <xiaoning.wang@nxp.com>
Cc: Claudiu Manoil <claudiu.manoil@nxp.com>
Cc: David Arinzon <darinzon@amazon.com>
Cc: Edward Cree <ecree.xilinx@gmail.com>
Cc: Felix Fietkau <nbd@nbd.name>
Cc: Grygorii Strashko <grygorii.strashko@ti.com>
Cc: Jassi Brar <jaswinder.singh@linaro.org>
Cc: Jesse Brandeburg <jesse.brandeburg@intel.com>
Cc: John Crispin <john@phrozen.org>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Lorenzo Bianconi <lorenzo@kernel.org>
Cc: Louis Peens <louis.peens@corigine.com>
Cc: Marcin Wojtas <mw@semihalf.com>
Cc: Mark Lee <Mark-MC.Lee@mediatek.com>
Cc: Matthias Brugger <matthias.bgg@gmail.com>
Cc: NXP Linux Team <linux-imx@nxp.com>
Cc: Noam Dagan <ndagan@amazon.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Saeed Bishara <saeedb@amazon.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>
Cc: Sean Wang <sean.wang@mediatek.com>
Cc: Shay Agroskin <shayagr@amazon.com>
Cc: Shenwei Wang <shenwei.wang@nxp.com>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Tony Nguyen <anthony.l.nguyen@intel.com>
Cc: Vladimir Oltean <vladimir.oltean@nxp.com>
Cc: Wei Fang <wei.fang@nxp.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: Arthur Kiyanovski <akiyano@amazon.com>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Acked-by: Martin Habets <habetsm.xilinx@gmail.com>
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Link: https://lore.kernel.org/r/20230908143215.869913-2-bigeasy@linutronix.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Stable-dep-of:
50bd33f6b392 ("net: enetc: fix the deadlock of enetc_mdio_lock")
Signed-off-by: Sasha Levin <sashal@kernel.org>
}
if (xdp_flags & ENA_XDP_REDIRECT)
- xdp_do_flush_map();
+ xdp_do_flush();
return work_done;
rx_ring->stats.bytes += rx_byte_cnt;
if (xdp_redirect_frm_cnt)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_tx_frm_cnt)
enetc_update_tx_ring_tail(tx_ring);
rxq->bd.cur = bdp;
if (xdp_result & FEC_ENET_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
return pkt_received;
}
void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
{
if (xdp_res & I40E_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_res & I40E_XDP_TX) {
struct i40e_ring *xdp_ring =
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx];
if (xdp_res & ICE_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_res & ICE_XDP_TX) {
if (static_branch_unlikely(&ice_xdp_locking_key))
}
if (xdp_xmit & IXGBE_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & IXGBE_XDP_TX) {
struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
}
if (xdp_xmit & IXGBE_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & IXGBE_XDP_TX) {
struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
if (ps.xdp_redirect)
- xdp_do_flush_map();
+ xdp_do_flush();
if (ps.rx_packets)
mvneta_update_stats(pp, &ps);
}
if (xdp_ret & MVPP2_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (ps.rx_packets) {
struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
net_dim(ð->rx_dim, dim_sample);
if (xdp_flush)
- xdp_do_flush_map();
+ xdp_do_flush();
return done;
}
mlx5e_xmit_xdp_doorbell(xdpsq);
if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) {
- xdp_do_flush_map();
+ xdp_do_flush();
__clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
}
}
nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring);
if (xdp_redir)
- xdp_do_flush_map();
+ xdp_do_flush();
if (tx_ring->wr_ptr_add)
nfp_net_tx_xmit_more_flush(tx_ring);
spent = efx_process_channel(channel, budget);
- xdp_do_flush_map();
+ xdp_do_flush();
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
spent = efx_process_channel(channel, budget);
- xdp_do_flush_map();
+ xdp_do_flush();
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
u16 pkts)
{
if (xdp_res & NETSEC_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_res & NETSEC_XDP_TX)
netsec_xdp_ring_tx_db(priv, pkts);
* particular hardware is sharing a common queue, so the
* incoming device might change per packet.
*/
- xdp_do_flush_map();
+ xdp_do_flush();
break;
default:
bpf_warn_invalid_xdp_action(ndev, prog, act);