return min_page_shift ? : 12;
}
-u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
+u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq_opt_param *rqo)
{
- u8 req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT;
+ struct mlx5e_xsk_param *xsk = mlx5e_rqo_xsk_param(rqo);
u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev);
+ u8 req_page_shift;
+
+ req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT;
/* Regular RQ uses order-0 pages, the NIC must be able to map them. */
if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift))
}
enum mlx5e_mpwrq_umr_mode
-mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
+mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq_opt_param *rqo)
{
/* Different memory management schemes use different mechanisms to map
* user-mode memory. The stricter guarantees we have, the faster
* 3. KLM - indirect mapping to another MKey to arbitrary addresses, and
* mappings can have different sizes.
*/
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ struct mlx5e_xsk_param *xsk = mlx5e_rqo_xsk_param(rqo);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
bool unaligned = xsk ? xsk->unaligned : false;
bool oversized = false;
}
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
u16 headroom;
- if (xsk)
- return xsk->headroom;
+ if (mlx5e_rqo_xsk_param(rqo))
+ return rqo->xsk->headroom;
headroom = NET_IP_ALIGN;
if (params->xdp_prog)
static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_opt_param *rqo,
bool mpwqe)
{
+ struct mlx5e_xsk_param *xsk = mlx5e_rqo_xsk_param(rqo);
bool no_head_tail_room;
u32 sz;
/* XSK frames are mapped as individual pages, because frames may come in
* an arbitrary order from random locations in the UMEM.
*/
- if (xsk)
- return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
+ if (xsk) {
+ return mpwqe ?
+ BIT(mlx5e_mpwrq_page_shift(mdev, rqo)) : PAGE_SIZE;
+ }
- no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk);
+ no_head_tail_room = params->xdp_prog && mpwqe &&
+ !mlx5e_rx_is_linear_skb(mdev, params, rqo);
/* When no_head_tail_room is set, headroom and tailroom are excluded from skb calculations.
* no_head_tail_room should be set in the case of XDP with Striding RQ
static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true);
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
+ u32 linear_stride_sz =
+ mlx5e_rx_get_linear_stride_sz(mdev, params, rqo, true);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
order_base_2(linear_stride_sz);
bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
+ struct mlx5e_xsk_param *xsk = mlx5e_rqo_xsk_param(rqo);
+
if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
return false;
* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
* must fit into a CPU page.
*/
- if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
+ if (mlx5e_rx_get_linear_sz_skb(params, !!xsk) > PAGE_SIZE)
return false;
/* XSK frames must be big enough to hold the packet data. */
bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
- u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
+ u8 log_wqe_num_of_strides =
+ mlx5e_mpwqe_get_log_num_strides(mdev, params, rqo);
+ u8 log_wqe_stride_size =
+ mlx5e_mpwqe_get_log_stride_size(mdev, params, rqo);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
return mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
log_wqe_num_of_strides,
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
+ u32 linear_stride_sz =
+ mlx5e_rx_get_linear_stride_sz(mdev, params, rqo, true);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
u8 log_num_strides;
u8 log_stride_sz;
u8 log_wqe_sz;
- if (!mlx5e_rx_is_linear_skb(mdev, params, xsk))
+ if (!mlx5e_rx_is_linear_skb(mdev, params, rqo))
return false;
- log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
+ log_stride_sz = order_base_2(linear_stride_sz);
log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
if (log_wqe_sz < log_stride_sz)
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
u8 log_pkts_per_wqe, page_shift, max_log_rq_size;
- log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk);
- page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, rqo);
+ page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode);
/* Numbers are unsigned, don't subtract to avoid underflow. */
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
- return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo))
+ return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params,
+ rqo, true));
/* XDP in mlx5e doesn't support multiple packets per page. */
if (params->xdp_prog)
u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
u8 log_wqe_size, log_stride_size;
log_wqe_size = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
- log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, rqo);
WARN(log_wqe_size < log_stride_size,
"Log WQE size %u < log stride size %u (page shift %u, umr mode %d, xsk on? %d)\n",
- log_wqe_size, log_stride_size, page_shift, umr_mode, !!xsk);
+ log_wqe_size, log_stride_size, page_shift, umr_mode,
+ rqo && rqo->xsk);
return log_wqe_size - log_stride_size;
}
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
+ u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, rqo);
if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
return linear_headroom;
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo))
return linear_headroom;
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
}
int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
+ struct mlx5e_xsk_param *xsk = mlx5e_rqo_xsk_param(rqo);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
u16 max_mtu_pkts;
if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) {
return -EOPNOTSUPP;
}
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) {
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo)) {
mlx5_core_err(mdev, "Striding RQ linear mode for XSK can't be activated with current params\n");
return -EINVAL;
}
mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned));
if (params->log_rq_mtu_frames > max_mtu_pkts) {
mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
- 1 << params->log_rq_mtu_frames, xsk->chunk_size);
+ 1 << params->log_rq_mtu_frames,
+ xsk->chunk_size);
return -EINVAL;
}
static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_opt_param *rqo,
struct mlx5e_rq_frags_info *info,
u32 *xdp_frag_size)
{
int max_mtu;
int i;
- if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
+ if (mlx5e_rx_is_linear_skb(mdev, params, rqo)) {
int frag_stride;
- frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, false);
+ frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, rqo,
+ false);
info->arr[0].frag_size = byte_count;
info->arr[0].frag_stride = frag_stride;
goto out;
}
- headroom = mlx5e_get_linear_rq_headroom(params, xsk);
+ headroom = mlx5e_get_linear_rq_headroom(params, rqo);
first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
- u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, rqo);
+ u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params,
+ rqo));
int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
- int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
+ int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo));
int wqe_size = BIT(log_stride_sz) * num_strides;
int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_opt_param *rqo,
struct mlx5e_cq_param *param)
{
bool hw_stridx = false;
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
- log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
+ log_cq_size =
+ mlx5e_shampo_get_log_cq_size(mdev, params, rqo);
else
- log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) +
- mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
+ log_cq_size =
+ mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo) +
+ mlx5e_mpwqe_get_log_num_strides(mdev, params,
+ rqo);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
log_cq_size = params->log_rq_mtu_frames;
static int mlx5e_mpwqe_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_opt_param *rqo,
struct mlx5e_rq_param *rq_param)
{
- u8 log_rq_sz = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ u8 log_rq_sz = mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
u8 log_wqe_num_of_strides, log_wqe_stride_size;
enum mlx5e_mpwrq_umr_mode umr_mode;
void *rqc = rq_param->rqc;
u32 lro_timeout;
void *wq;
- log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params,
- xsk);
- log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params,
- xsk);
- umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ log_wqe_num_of_strides =
+ mlx5e_mpwqe_get_log_num_strides(mdev, params, rqo);
+ log_wqe_stride_size =
+ mlx5e_mpwqe_get_log_stride_size(mdev, params, rqo);
+ umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_opt_param *rqo,
struct mlx5e_rq_param *rq_param)
{
void *rqc = rq_param->rqc;
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- err = mlx5e_mpwqe_build_rq_param(mdev, params, xsk, rq_param);
+ err = mlx5e_mpwqe_build_rq_param(mdev, params, rqo, rq_param);
if (err)
return err;
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
- err = mlx5e_build_rq_frags_info(mdev, params, xsk,
+ err = mlx5e_build_rq_frags_info(mdev, params, rqo,
&rq_param->frags_info,
&rq_param->xdp_frag_size);
if (err)
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
rq_param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
- mlx5e_build_rx_cq_param(mdev, params, xsk, &rq_param->cqp);
+ mlx5e_build_rx_cq_param(mdev, params, rqo, &rq_param->cqp);
return 0;
}
static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
u8 umr_wqebbs;
umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
- return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
+ return umr_wqebbs *
+ (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo));
}
static u32 mlx5e_max_xsk_wqebbs(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
+ struct mlx5e_rq_opt_param rqo = {0};
struct mlx5e_xsk_param xsk = {0};
u32 max_xsk_wqebbs = 0;
u8 frame_shift;
if (!params->xdp_prog)
return 0;
+ rqo.xsk = &xsk;
+
/* If XDP program is attached, XSK may be turned on at any time without
* restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
* both regular RQ and XSK RQ.
/* XSK aligned mode. */
xsk.chunk_size = 1 << frame_shift;
xsk.unaligned = false;
- total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+ total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &rqo);
max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);
/* XSK unaligned mode, frame size is a power of two. */
xsk.unaligned = true;
- total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+ total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &rqo);
max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);
/* XSK unaligned mode, frame size is not equal to stride
* size.
*/
xsk.chunk_size -= 1;
- total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+ total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &rqo);
max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);
/* XSK unaligned mode, frame size is a triple power of two. */
xsk.chunk_size = (1 << frame_shift) / 4 * 3;
- total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+ total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &rqo);
max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);
}
struct mlx5e_xsk_param *xsk,
struct mlx5e_channel_param *cparam)
{
- cparam->xsk = xsk;
- mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq);
+ cparam->rq_opt.xsk = xsk;
+ mlx5e_build_rq_param(mdev, params, &cparam->rq_opt, &cparam->rq);
mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
}
bool unaligned;
};
+struct mlx5e_rq_opt_param {
+ struct mlx5e_xsk_param *xsk;
+};
+
struct mlx5e_cq_param {
u32 cqc[MLX5_ST_SZ_DW(cqc)];
struct mlx5_wq_param wq;
struct mlx5e_channel_param {
struct mlx5e_rq_param rq;
+ struct mlx5e_rq_opt_param rq_opt;
struct mlx5e_sq_param txq_sq;
struct mlx5e_sq_param xdp_sq;
struct mlx5e_sq_param icosq;
struct mlx5e_sq_param async_icosq;
- struct mlx5e_xsk_param *xsk;
};
struct mlx5e_create_sq_param {
/* Striding RQ dynamic parameters */
-u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
+u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq_opt_param *rqo);
enum mlx5e_mpwrq_umr_mode
-mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
+mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq_opt_param *rqo);
u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode);
u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode);
bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz);
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
/* Build queue parameters */
void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c);
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_opt_param *rqo,
struct mlx5e_rq_param *param);
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_rq_param *param);
int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
static inline void mlx5e_params_print_info(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
"enhanced" : "basic");
};
+static inline struct mlx5e_xsk_param *
+mlx5e_rqo_xsk_param(struct mlx5e_rq_opt_param *rqo)
+{
+ return rqo ? rqo->xsk : NULL;
+}
+
#endif /* __MLX5_EN_PARAMS_H__ */
}
static int mlx5e_alloc_rq(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
struct mlx5e_rq_param *rq_param,
+ struct mlx5e_rq_opt_param *rqo,
int node, struct mlx5e_rq *rq)
{
void *rqc_wq = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
- rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
+ rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, rqo);
pool_size = 1 << params->log_rq_mtu_frames;
rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
- rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
+ rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
rq->mpwqe.pages_per_wqe =
mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,
rq->mpwqe.umr_mode);
rq->mpwqe.umr_mode);
pool_size = rq->mpwqe.pages_per_wqe <<
- mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
+ mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo);
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk) && params->xdp_prog)
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo) &&
+ params->xdp_prog)
pool_size *= 2; /* additional page per packet for the linear part */
- rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ rq->mpwqe.log_stride_sz =
+ mlx5e_mpwqe_get_log_stride_size(mdev, params,
+ rqo);
rq->mpwqe.num_strides =
- BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
+ BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, rqo));
rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz);
rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
goto err_rq_wq_destroy;
}
- if (xsk) {
+ if (mlx5e_rqo_xsk_param(rqo)) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL);
if (err)
}
int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *rq_param,
- struct mlx5e_xsk_param *xsk, int node, u16 q_counter,
+ struct mlx5e_rq_opt_param *rqo, int node, u16 q_counter,
struct mlx5e_rq *rq)
{
struct mlx5_core_dev *mdev = rq->mdev;
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
__set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state);
- err = mlx5e_alloc_rq(params, xsk, rq_param, node, rq);
+ err = mlx5e_alloc_rq(params, rq_param, rqo, node, rq);
if (err)
return err;
for (ix = 0; ix < chs->params.num_channels; ix++) {
struct xsk_buff_pool *xsk_pool =
mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
+ struct mlx5e_rq_opt_param rqo = {0};
struct mlx5e_xsk_param xsk;
int max_xdp_mtu;
continue;
mlx5e_build_xsk_param(xsk_pool, &xsk);
- max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &xsk);
+ rqo.xsk = &xsk;
+ max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &rqo);
/* Validate XSK params and XDP MTU in advance */
- if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev) ||
+ if (!mlx5e_validate_xsk_param(new_params, &rqo, mdev) ||
new_params->sw_mtu > max_xdp_mtu) {
- u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
+ u32 hr = mlx5e_get_linear_rq_headroom(new_params, &rqo);
int max_mtu_frame, max_mtu_page, max_mtu;
/* Two criteria must be met: