2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
35 #include <net/geneve.h>
36 #include <net/dsfield.h>
39 #include "ipoib/ipoib.h"
40 #include "en_accel/en_accel.h"
41 #include "en_accel/ktls.h"
42 #include "lib/clock.h"
44 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq
*sq
, u8 num_dma
)
48 for (i
= 0; i
< num_dma
; i
++) {
49 struct mlx5e_sq_dma
*last_pushed_dma
=
50 mlx5e_dma_get(sq
, --sq
->dma_fifo_pc
);
52 mlx5e_tx_dma_unmap(sq
->pdev
, last_pushed_dma
);
56 #ifdef CONFIG_MLX5_CORE_EN_DCB
57 static inline int mlx5e_get_dscp_up(struct mlx5e_priv
*priv
, struct sk_buff
*skb
)
61 if (skb
->protocol
== htons(ETH_P_IP
))
62 dscp_cp
= ipv4_get_dsfield(ip_hdr(skb
)) >> 2;
63 else if (skb
->protocol
== htons(ETH_P_IPV6
))
64 dscp_cp
= ipv6_get_dsfield(ipv6_hdr(skb
)) >> 2;
66 return priv
->dcbx_dp
.dscp2prio
[dscp_cp
];
70 u16
mlx5e_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
71 struct net_device
*sb_dev
)
73 int txq_ix
= netdev_pick_tx(dev
, skb
, NULL
);
74 struct mlx5e_priv
*priv
= netdev_priv(dev
);
78 if (!netdev_get_num_tc(dev
))
81 #ifdef CONFIG_MLX5_CORE_EN_DCB
82 if (priv
->dcbx_dp
.trust_state
== MLX5_QPTS_TRUST_DSCP
)
83 up
= mlx5e_get_dscp_up(priv
, skb
);
86 if (skb_vlan_tag_present(skb
))
87 up
= skb_vlan_tag_get_prio(skb
);
89 /* Normalize any picked txq_ix to [0, num_channels),
90 * So we can return a txq_ix that matches the channel and
93 ch_ix
= priv
->txq2sq
[txq_ix
]->ch_ix
;
95 return priv
->channel_tc2realtxq
[ch_ix
][up
];
98 static inline int mlx5e_skb_l2_header_offset(struct sk_buff
*skb
)
100 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
102 return max(skb_network_offset(skb
), MLX5E_MIN_INLINE
);
105 static inline int mlx5e_skb_l3_header_offset(struct sk_buff
*skb
)
107 if (skb_transport_header_was_set(skb
))
108 return skb_transport_offset(skb
);
110 return mlx5e_skb_l2_header_offset(skb
);
113 static inline u16
mlx5e_calc_min_inline(enum mlx5_inline_modes mode
,
119 case MLX5_INLINE_MODE_NONE
:
121 case MLX5_INLINE_MODE_TCP_UDP
:
122 hlen
= eth_get_headlen(skb
->dev
, skb
->data
, skb_headlen(skb
));
123 if (hlen
== ETH_HLEN
&& !skb_vlan_tag_present(skb
))
126 case MLX5_INLINE_MODE_IP
:
127 hlen
= mlx5e_skb_l3_header_offset(skb
);
129 case MLX5_INLINE_MODE_L2
:
131 hlen
= mlx5e_skb_l2_header_offset(skb
);
133 return min_t(u16
, hlen
, skb_headlen(skb
));
136 static inline void mlx5e_insert_vlan(void *start
, struct sk_buff
*skb
, u16 ihs
)
138 struct vlan_ethhdr
*vhdr
= (struct vlan_ethhdr
*)start
;
139 int cpy1_sz
= 2 * ETH_ALEN
;
140 int cpy2_sz
= ihs
- cpy1_sz
;
142 memcpy(vhdr
, skb
->data
, cpy1_sz
);
143 vhdr
->h_vlan_proto
= skb
->vlan_proto
;
144 vhdr
->h_vlan_TCI
= cpu_to_be16(skb_vlan_tag_get(skb
));
145 memcpy(&vhdr
->h_vlan_encapsulated_proto
, skb
->data
+ cpy1_sz
, cpy2_sz
);
149 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq
*sq
, struct sk_buff
*skb
, struct mlx5_wqe_eth_seg
*eseg
)
151 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
152 eseg
->cs_flags
= MLX5_ETH_WQE_L3_CSUM
;
153 if (skb
->encapsulation
) {
154 eseg
->cs_flags
|= MLX5_ETH_WQE_L3_INNER_CSUM
|
155 MLX5_ETH_WQE_L4_INNER_CSUM
;
156 sq
->stats
->csum_partial_inner
++;
158 eseg
->cs_flags
|= MLX5_ETH_WQE_L4_CSUM
;
159 sq
->stats
->csum_partial
++;
162 sq
->stats
->csum_none
++;
166 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq
*sq
, struct sk_buff
*skb
)
168 struct mlx5e_sq_stats
*stats
= sq
->stats
;
171 if (skb
->encapsulation
) {
172 ihs
= skb_inner_transport_offset(skb
) + inner_tcp_hdrlen(skb
);
173 stats
->tso_inner_packets
++;
174 stats
->tso_inner_bytes
+= skb
->len
- ihs
;
176 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
)
177 ihs
= skb_transport_offset(skb
) + sizeof(struct udphdr
);
179 ihs
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
180 stats
->tso_packets
++;
181 stats
->tso_bytes
+= skb
->len
- ihs
;
188 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq
*sq
, struct sk_buff
*skb
,
189 unsigned char *skb_data
, u16 headlen
,
190 struct mlx5_wqe_data_seg
*dseg
)
192 dma_addr_t dma_addr
= 0;
197 dma_addr
= dma_map_single(sq
->pdev
, skb_data
, headlen
,
199 if (unlikely(dma_mapping_error(sq
->pdev
, dma_addr
)))
200 goto dma_unmap_wqe_err
;
202 dseg
->addr
= cpu_to_be64(dma_addr
);
203 dseg
->lkey
= sq
->mkey_be
;
204 dseg
->byte_count
= cpu_to_be32(headlen
);
206 mlx5e_dma_push(sq
, dma_addr
, headlen
, MLX5E_DMA_MAP_SINGLE
);
211 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
212 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
213 int fsz
= skb_frag_size(frag
);
215 dma_addr
= skb_frag_dma_map(sq
->pdev
, frag
, 0, fsz
,
217 if (unlikely(dma_mapping_error(sq
->pdev
, dma_addr
)))
218 goto dma_unmap_wqe_err
;
220 dseg
->addr
= cpu_to_be64(dma_addr
);
221 dseg
->lkey
= sq
->mkey_be
;
222 dseg
->byte_count
= cpu_to_be32(fsz
);
224 mlx5e_dma_push(sq
, dma_addr
, fsz
, MLX5E_DMA_MAP_PAGE
);
232 mlx5e_dma_unmap_wqe_err(sq
, num_dma
);
237 mlx5e_txwqe_complete(struct mlx5e_txqsq
*sq
, struct sk_buff
*skb
,
238 u8 opcode
, u16 ds_cnt
, u8 num_wqebbs
, u32 num_bytes
, u8 num_dma
,
239 struct mlx5e_tx_wqe_info
*wi
, struct mlx5_wqe_ctrl_seg
*cseg
,
242 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
245 wi
->num_bytes
= num_bytes
;
246 wi
->num_dma
= num_dma
;
247 wi
->num_wqebbs
= num_wqebbs
;
250 cseg
->opmod_idx_opcode
= cpu_to_be32((sq
->pc
<< 8) | opcode
);
251 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | ds_cnt
);
253 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
))
254 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
256 sq
->pc
+= wi
->num_wqebbs
;
257 if (unlikely(!mlx5e_wqc_has_room_for(wq
, sq
->cc
, sq
->pc
, sq
->stop_room
))) {
258 netif_tx_stop_queue(sq
->txq
);
259 sq
->stats
->stopped
++;
262 send_doorbell
= __netdev_tx_sent_queue(sq
->txq
, num_bytes
,
265 mlx5e_notify_hw(wq
, sq
->pc
, sq
->uar_map
, cseg
);
268 netdev_tx_t
mlx5e_sq_xmit(struct mlx5e_txqsq
*sq
, struct sk_buff
*skb
,
269 struct mlx5e_tx_wqe
*wqe
, u16 pi
, bool xmit_more
)
271 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
272 struct mlx5_wqe_ctrl_seg
*cseg
;
273 struct mlx5_wqe_eth_seg
*eseg
;
274 struct mlx5_wqe_data_seg
*dseg
;
275 struct mlx5e_tx_wqe_info
*wi
;
277 struct mlx5e_sq_stats
*stats
= sq
->stats
;
278 u16 headlen
, ihs
, contig_wqebbs_room
;
279 u16 ds_cnt
, ds_cnt_inl
= 0;
280 u8 num_wqebbs
, opcode
;
285 /* Calc ihs and ds cnt, no writes to wqe yet */
286 ds_cnt
= sizeof(*wqe
) / MLX5_SEND_WQE_DS
;
287 if (skb_is_gso(skb
)) {
288 opcode
= MLX5_OPCODE_LSO
;
289 mss
= cpu_to_be16(skb_shinfo(skb
)->gso_size
);
290 ihs
= mlx5e_tx_get_gso_ihs(sq
, skb
);
291 num_bytes
= skb
->len
+ (skb_shinfo(skb
)->gso_segs
- 1) * ihs
;
292 stats
->packets
+= skb_shinfo(skb
)->gso_segs
;
294 u8 mode
= mlx5e_tx_wqe_inline_mode(sq
, &wqe
->ctrl
, skb
);
296 opcode
= MLX5_OPCODE_SEND
;
298 ihs
= mlx5e_calc_min_inline(mode
, skb
);
299 num_bytes
= max_t(unsigned int, skb
->len
, ETH_ZLEN
);
303 stats
->bytes
+= num_bytes
;
304 stats
->xmit_more
+= xmit_more
;
306 headlen
= skb
->len
- ihs
- skb
->data_len
;
308 ds_cnt
+= skb_shinfo(skb
)->nr_frags
;
311 ihs
+= !!skb_vlan_tag_present(skb
) * VLAN_HLEN
;
313 ds_cnt_inl
= DIV_ROUND_UP(ihs
- INL_HDR_START_SZ
, MLX5_SEND_WQE_DS
);
314 ds_cnt
+= ds_cnt_inl
;
317 num_wqebbs
= DIV_ROUND_UP(ds_cnt
, MLX5_SEND_WQEBB_NUM_DS
);
318 contig_wqebbs_room
= mlx5_wq_cyc_get_contig_wqebbs(wq
, pi
);
319 if (unlikely(contig_wqebbs_room
< num_wqebbs
)) {
320 #ifdef CONFIG_MLX5_EN_IPSEC
321 struct mlx5_wqe_eth_seg cur_eth
= wqe
->eth
;
323 #ifdef CONFIG_MLX5_EN_TLS
324 struct mlx5_wqe_ctrl_seg cur_ctrl
= wqe
->ctrl
;
326 mlx5e_fill_sq_frag_edge(sq
, wq
, pi
, contig_wqebbs_room
);
327 wqe
= mlx5e_sq_fetch_wqe(sq
, sizeof(*wqe
), &pi
);
328 #ifdef CONFIG_MLX5_EN_IPSEC
331 #ifdef CONFIG_MLX5_EN_TLS
332 wqe
->ctrl
= cur_ctrl
;
337 wi
= &sq
->db
.wqe_info
[pi
];
342 #if IS_ENABLED(CONFIG_GENEVE)
343 if (skb
->encapsulation
)
344 mlx5e_tx_tunnel_accel(skb
, eseg
);
346 mlx5e_txwqe_build_eseg_csum(sq
, skb
, eseg
);
351 eseg
->inline_hdr
.sz
= cpu_to_be16(ihs
);
352 if (skb_vlan_tag_present(skb
)) {
354 mlx5e_insert_vlan(eseg
->inline_hdr
.start
, skb
, ihs
);
355 stats
->added_vlan_packets
++;
357 memcpy(eseg
->inline_hdr
.start
, skb
->data
, ihs
);
360 } else if (skb_vlan_tag_present(skb
)) {
361 eseg
->insert
.type
= cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN
);
362 if (skb
->vlan_proto
== cpu_to_be16(ETH_P_8021AD
))
363 eseg
->insert
.type
|= cpu_to_be16(MLX5_ETH_WQE_SVLAN
);
364 eseg
->insert
.vlan_tci
= cpu_to_be16(skb_vlan_tag_get(skb
));
365 stats
->added_vlan_packets
++;
368 num_dma
= mlx5e_txwqe_build_dsegs(sq
, skb
, skb
->data
+ ihs
, headlen
, dseg
);
369 if (unlikely(num_dma
< 0))
372 mlx5e_txwqe_complete(sq
, skb
, opcode
, ds_cnt
, num_wqebbs
, num_bytes
,
373 num_dma
, wi
, cseg
, xmit_more
);
379 dev_kfree_skb_any(skb
);
384 netdev_tx_t
mlx5e_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
386 struct mlx5e_priv
*priv
= netdev_priv(dev
);
387 struct mlx5e_tx_wqe
*wqe
;
388 struct mlx5e_txqsq
*sq
;
391 sq
= priv
->txq2sq
[skb_get_queue_mapping(skb
)];
392 wqe
= mlx5e_sq_fetch_wqe(sq
, sizeof(*wqe
), &pi
);
394 /* might send skbs and update wqe and pi */
395 skb
= mlx5e_accel_handle_tx(skb
, sq
, dev
, &wqe
, &pi
);
399 return mlx5e_sq_xmit(sq
, skb
, wqe
, pi
, netdev_xmit_more());
402 static void mlx5e_dump_error_cqe(struct mlx5e_txqsq
*sq
,
403 struct mlx5_err_cqe
*err_cqe
)
405 struct mlx5_cqwq
*wq
= &sq
->cq
.wq
;
408 ci
= mlx5_cqwq_ctr2ix(wq
, wq
->cc
- 1);
410 netdev_err(sq
->channel
->netdev
,
411 "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
412 sq
->cq
.mcq
.cqn
, ci
, sq
->sqn
,
413 get_cqe_opcode((struct mlx5_cqe64
*)err_cqe
),
414 err_cqe
->syndrome
, err_cqe
->vendor_err_synd
);
415 mlx5_dump_err_cqe(sq
->cq
.mdev
, err_cqe
);
418 bool mlx5e_poll_tx_cq(struct mlx5e_cq
*cq
, int napi_budget
)
420 struct mlx5e_sq_stats
*stats
;
421 struct mlx5e_txqsq
*sq
;
422 struct mlx5_cqe64
*cqe
;
429 sq
= container_of(cq
, struct mlx5e_txqsq
, cq
);
431 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
)))
434 cqe
= mlx5_cqwq_get_cqe(&cq
->wq
);
443 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
444 * otherwise a cq overrun may occur
448 /* avoid dirtying sq cache line every cqe */
449 dma_fifo_cc
= sq
->dma_fifo_cc
;
453 struct mlx5e_tx_wqe_info
*wi
;
458 mlx5_cqwq_pop(&cq
->wq
);
460 wqe_counter
= be16_to_cpu(cqe
->wqe_counter
);
466 last_wqe
= (sqcc
== wqe_counter
);
468 ci
= mlx5_wq_cyc_ctr2ix(&sq
->wq
, sqcc
);
469 wi
= &sq
->db
.wqe_info
[ci
];
472 if (unlikely(!skb
)) {
473 mlx5e_ktls_tx_handle_resync_dump_comp(sq
, wi
, &dma_fifo_cc
);
474 sqcc
+= wi
->num_wqebbs
;
478 if (unlikely(skb_shinfo(skb
)->tx_flags
&
480 struct skb_shared_hwtstamps hwts
= {};
483 mlx5_timecounter_cyc2time(sq
->clock
,
485 skb_tstamp_tx(skb
, &hwts
);
488 for (j
= 0; j
< wi
->num_dma
; j
++) {
489 struct mlx5e_sq_dma
*dma
=
490 mlx5e_dma_get(sq
, dma_fifo_cc
++);
492 mlx5e_tx_dma_unmap(sq
->pdev
, dma
);
496 nbytes
+= wi
->num_bytes
;
497 sqcc
+= wi
->num_wqebbs
;
498 napi_consume_skb(skb
, napi_budget
);
501 if (unlikely(get_cqe_opcode(cqe
) == MLX5_CQE_REQ_ERR
)) {
502 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING
,
504 mlx5e_dump_error_cqe(sq
,
505 (struct mlx5_err_cqe
*)cqe
);
506 mlx5_wq_cyc_wqe_dump(&sq
->wq
, ci
, wi
->num_wqebbs
);
507 queue_work(cq
->channel
->priv
->wq
,
513 } while ((++i
< MLX5E_TX_CQ_POLL_BUDGET
) && (cqe
= mlx5_cqwq_get_cqe(&cq
->wq
)));
517 mlx5_cqwq_update_db_record(&cq
->wq
);
519 /* ensure cq space is freed before enabling more cqes */
522 sq
->dma_fifo_cc
= dma_fifo_cc
;
525 netdev_tx_completed_queue(sq
->txq
, npkts
, nbytes
);
527 if (netif_tx_queue_stopped(sq
->txq
) &&
528 mlx5e_wqc_has_room_for(&sq
->wq
, sq
->cc
, sq
->pc
, sq
->stop_room
) &&
529 !test_bit(MLX5E_SQ_STATE_RECOVERING
, &sq
->state
)) {
530 netif_tx_wake_queue(sq
->txq
);
534 return (i
== MLX5E_TX_CQ_POLL_BUDGET
);
537 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq
*sq
)
539 struct mlx5e_tx_wqe_info
*wi
;
547 dma_fifo_cc
= sq
->dma_fifo_cc
;
549 while (sqcc
!= sq
->pc
) {
550 ci
= mlx5_wq_cyc_ctr2ix(&sq
->wq
, sqcc
);
551 wi
= &sq
->db
.wqe_info
[ci
];
555 mlx5e_ktls_tx_handle_resync_dump_comp(sq
, wi
, &dma_fifo_cc
);
556 sqcc
+= wi
->num_wqebbs
;
560 for (i
= 0; i
< wi
->num_dma
; i
++) {
561 struct mlx5e_sq_dma
*dma
=
562 mlx5e_dma_get(sq
, dma_fifo_cc
++);
564 mlx5e_tx_dma_unmap(sq
->pdev
, dma
);
567 dev_kfree_skb_any(skb
);
568 sqcc
+= wi
->num_wqebbs
;
571 sq
->dma_fifo_cc
= dma_fifo_cc
;
575 #ifdef CONFIG_MLX5_CORE_IPOIB
577 mlx5i_txwqe_build_datagram(struct mlx5_av
*av
, u32 dqpn
, u32 dqkey
,
578 struct mlx5_wqe_datagram_seg
*dseg
)
580 memcpy(&dseg
->av
, av
, sizeof(struct mlx5_av
));
581 dseg
->av
.dqp_dct
= cpu_to_be32(dqpn
| MLX5_EXTENDED_UD_AV
);
582 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(dqkey
);
585 netdev_tx_t
mlx5i_sq_xmit(struct mlx5e_txqsq
*sq
, struct sk_buff
*skb
,
586 struct mlx5_av
*av
, u32 dqpn
, u32 dqkey
,
589 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
590 struct mlx5i_tx_wqe
*wqe
;
592 struct mlx5_wqe_datagram_seg
*datagram
;
593 struct mlx5_wqe_ctrl_seg
*cseg
;
594 struct mlx5_wqe_eth_seg
*eseg
;
595 struct mlx5_wqe_data_seg
*dseg
;
596 struct mlx5e_tx_wqe_info
*wi
;
598 struct mlx5e_sq_stats
*stats
= sq
->stats
;
599 u16 headlen
, ihs
, pi
, contig_wqebbs_room
;
600 u16 ds_cnt
, ds_cnt_inl
= 0;
601 u8 num_wqebbs
, opcode
;
606 /* Calc ihs and ds cnt, no writes to wqe yet */
607 ds_cnt
= sizeof(*wqe
) / MLX5_SEND_WQE_DS
;
608 if (skb_is_gso(skb
)) {
609 opcode
= MLX5_OPCODE_LSO
;
610 mss
= cpu_to_be16(skb_shinfo(skb
)->gso_size
);
611 ihs
= mlx5e_tx_get_gso_ihs(sq
, skb
);
612 num_bytes
= skb
->len
+ (skb_shinfo(skb
)->gso_segs
- 1) * ihs
;
613 stats
->packets
+= skb_shinfo(skb
)->gso_segs
;
615 u8 mode
= mlx5e_tx_wqe_inline_mode(sq
, NULL
, skb
);
617 opcode
= MLX5_OPCODE_SEND
;
619 ihs
= mlx5e_calc_min_inline(mode
, skb
);
620 num_bytes
= max_t(unsigned int, skb
->len
, ETH_ZLEN
);
624 stats
->bytes
+= num_bytes
;
625 stats
->xmit_more
+= xmit_more
;
627 headlen
= skb
->len
- ihs
- skb
->data_len
;
629 ds_cnt
+= skb_shinfo(skb
)->nr_frags
;
632 ds_cnt_inl
= DIV_ROUND_UP(ihs
- INL_HDR_START_SZ
, MLX5_SEND_WQE_DS
);
633 ds_cnt
+= ds_cnt_inl
;
636 num_wqebbs
= DIV_ROUND_UP(ds_cnt
, MLX5_SEND_WQEBB_NUM_DS
);
637 pi
= mlx5_wq_cyc_ctr2ix(wq
, sq
->pc
);
638 contig_wqebbs_room
= mlx5_wq_cyc_get_contig_wqebbs(wq
, pi
);
639 if (unlikely(contig_wqebbs_room
< num_wqebbs
)) {
640 mlx5e_fill_sq_frag_edge(sq
, wq
, pi
, contig_wqebbs_room
);
641 pi
= mlx5_wq_cyc_ctr2ix(wq
, sq
->pc
);
644 mlx5i_sq_fetch_wqe(sq
, &wqe
, pi
);
647 wi
= &sq
->db
.wqe_info
[pi
];
649 datagram
= &wqe
->datagram
;
653 mlx5i_txwqe_build_datagram(av
, dqpn
, dqkey
, datagram
);
655 mlx5e_txwqe_build_eseg_csum(sq
, skb
, eseg
);
660 memcpy(eseg
->inline_hdr
.start
, skb
->data
, ihs
);
661 eseg
->inline_hdr
.sz
= cpu_to_be16(ihs
);
665 num_dma
= mlx5e_txwqe_build_dsegs(sq
, skb
, skb
->data
+ ihs
, headlen
, dseg
);
666 if (unlikely(num_dma
< 0))
669 mlx5e_txwqe_complete(sq
, skb
, opcode
, ds_cnt
, num_wqebbs
, num_bytes
,
670 num_dma
, wi
, cseg
, xmit_more
);
676 dev_kfree_skb_any(skb
);