1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2020 Mellanox Technologies
8 #include "en/fs_tt_redirect.h"
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
13 struct mlx5_flow_handle
*l2_rule
;
14 struct mlx5_flow_handle
*udp_v4_rule
;
15 struct mlx5_flow_handle
*udp_v6_rule
;
19 struct mlx5e_ptp_params
{
20 struct mlx5e_params params
;
21 struct mlx5e_sq_param txq_sq_param
;
22 struct mlx5e_rq_param rq_param
;
25 struct mlx5e_ptp_port_ts_cqe_tracker
{
28 struct list_head entry
;
31 struct mlx5e_ptp_port_ts_cqe_list
{
32 struct mlx5e_ptp_port_ts_cqe_tracker
*nodes
;
33 struct list_head tracker_list_head
;
34 /* Sync list operations in xmit and napi_poll contexts */
35 spinlock_t tracker_list_lock
;
39 mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list
*list
, u8 metadata
)
41 struct mlx5e_ptp_port_ts_cqe_tracker
*tracker
= &list
->nodes
[metadata
];
43 WARN_ON_ONCE(tracker
->inuse
);
44 tracker
->inuse
= true;
45 spin_lock(&list
->tracker_list_lock
);
46 list_add_tail(&tracker
->entry
, &list
->tracker_list_head
);
47 spin_unlock(&list
->tracker_list_lock
);
51 mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list
*list
, u8 metadata
)
53 struct mlx5e_ptp_port_ts_cqe_tracker
*tracker
= &list
->nodes
[metadata
];
55 WARN_ON_ONCE(!tracker
->inuse
);
56 tracker
->inuse
= false;
57 spin_lock(&list
->tracker_list_lock
);
58 list_del(&tracker
->entry
);
59 spin_unlock(&list
->tracker_list_lock
);
62 void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq
*ptpsq
, u8 metadata
)
64 mlx5e_ptp_port_ts_cqe_list_add(ptpsq
->ts_cqe_pending_list
, metadata
);
67 struct mlx5e_skb_cb_hwtstamp
{
69 ktime_t port_hwtstamp
;
72 void mlx5e_skb_cb_hwtstamp_init(struct sk_buff
*skb
)
74 memset(skb
->cb
, 0, sizeof(struct mlx5e_skb_cb_hwtstamp
));
77 static struct mlx5e_skb_cb_hwtstamp
*mlx5e_skb_cb_get_hwts(struct sk_buff
*skb
)
79 BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp
) > sizeof(skb
->cb
));
80 return (struct mlx5e_skb_cb_hwtstamp
*)skb
->cb
;
83 static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff
*skb
,
84 struct mlx5e_ptp_cq_stats
*cq_stats
)
86 struct skb_shared_hwtstamps hwts
= {};
89 diff
= abs(mlx5e_skb_cb_get_hwts(skb
)->port_hwtstamp
-
90 mlx5e_skb_cb_get_hwts(skb
)->cqe_hwtstamp
);
92 /* Maximal allowed diff is 1 / 128 second */
93 if (diff
> (NSEC_PER_SEC
>> 7)) {
95 cq_stats
->abort_abs_diff_ns
+= diff
;
99 hwts
.hwtstamp
= mlx5e_skb_cb_get_hwts(skb
)->port_hwtstamp
;
100 skb_tstamp_tx(skb
, &hwts
);
103 void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff
*skb
, int hwtstamp_type
,
105 struct mlx5e_ptp_cq_stats
*cq_stats
)
107 switch (hwtstamp_type
) {
108 case (MLX5E_SKB_CB_CQE_HWTSTAMP
):
109 mlx5e_skb_cb_get_hwts(skb
)->cqe_hwtstamp
= hwtstamp
;
111 case (MLX5E_SKB_CB_PORT_HWTSTAMP
):
112 mlx5e_skb_cb_get_hwts(skb
)->port_hwtstamp
= hwtstamp
;
116 /* If both CQEs arrive, check and report the port tstamp, and clear skb cb as
117 * skb soon to be released.
119 if (!mlx5e_skb_cb_get_hwts(skb
)->cqe_hwtstamp
||
120 !mlx5e_skb_cb_get_hwts(skb
)->port_hwtstamp
)
123 mlx5e_skb_cb_hwtstamp_tx(skb
, cq_stats
);
124 memset(skb
->cb
, 0, sizeof(struct mlx5e_skb_cb_hwtstamp
));
127 static struct sk_buff
*
128 mlx5e_ptp_metadata_map_lookup(struct mlx5e_ptp_metadata_map
*map
, u16 metadata
)
130 return map
->data
[metadata
];
133 static struct sk_buff
*
134 mlx5e_ptp_metadata_map_remove(struct mlx5e_ptp_metadata_map
*map
, u16 metadata
)
138 skb
= map
->data
[metadata
];
139 map
->data
[metadata
] = NULL
;
144 static bool mlx5e_ptp_metadata_map_unhealthy(struct mlx5e_ptp_metadata_map
*map
)
146 /* Considered beginning unhealthy state if size * 15 / 2^4 cannot be reclaimed. */
147 return map
->undelivered_counter
> (map
->capacity
>> 4) * 15;
150 static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq
*ptpsq
,
153 struct mlx5e_ptp_port_ts_cqe_list
*cqe_list
= ptpsq
->ts_cqe_pending_list
;
154 ktime_t timeout
= ns_to_ktime(MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT
);
155 struct mlx5e_ptp_metadata_map
*metadata_map
= &ptpsq
->metadata_map
;
156 struct mlx5e_ptp_port_ts_cqe_tracker
*pos
, *n
;
158 spin_lock(&cqe_list
->tracker_list_lock
);
159 list_for_each_entry_safe(pos
, n
, &cqe_list
->tracker_list_head
, entry
) {
160 struct sk_buff
*skb
=
161 mlx5e_ptp_metadata_map_lookup(metadata_map
, pos
->metadata_id
);
162 ktime_t dma_tstamp
= mlx5e_skb_cb_get_hwts(skb
)->cqe_hwtstamp
;
165 ktime_after(ktime_add(dma_tstamp
, timeout
), port_tstamp
))
168 metadata_map
->undelivered_counter
++;
169 WARN_ON_ONCE(!pos
->inuse
);
171 list_del(&pos
->entry
);
173 spin_unlock(&cqe_list
->tracker_list_lock
);
176 #define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
178 static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq
*ptpsq
,
179 struct mlx5_cqe64
*cqe
,
182 struct mlx5e_ptp_port_ts_cqe_list
*pending_cqe_list
= ptpsq
->ts_cqe_pending_list
;
183 u8 metadata_id
= PTP_WQE_CTR2IDX(be16_to_cpu(cqe
->wqe_counter
));
184 bool is_err_cqe
= !!MLX5E_RX_ERR_CQE(cqe
);
185 struct mlx5e_txqsq
*sq
= &ptpsq
->txqsq
;
189 if (likely(pending_cqe_list
->nodes
[metadata_id
].inuse
)) {
190 mlx5e_ptp_port_ts_cqe_list_remove(pending_cqe_list
, metadata_id
);
192 /* Reclaim space in the unlikely event CQE was delivered after
195 ptpsq
->metadata_map
.undelivered_counter
--;
196 ptpsq
->cq_stats
->late_cqe
++;
199 skb
= mlx5e_ptp_metadata_map_remove(&ptpsq
->metadata_map
, metadata_id
);
201 if (unlikely(is_err_cqe
)) {
202 ptpsq
->cq_stats
->err_cqe
++;
206 hwtstamp
= mlx5e_cqe_ts_to_ns(sq
->ptp_cyc2time
, sq
->clock
, get_cqe_ts(cqe
));
207 mlx5e_skb_cb_hwtstamp_handler(skb
, MLX5E_SKB_CB_PORT_HWTSTAMP
,
208 hwtstamp
, ptpsq
->cq_stats
);
209 ptpsq
->cq_stats
->cqe
++;
211 mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq
, hwtstamp
);
213 napi_consume_skb(skb
, budget
);
214 mlx5e_ptp_metadata_fifo_push(&ptpsq
->metadata_freelist
, metadata_id
);
215 if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq
->metadata_map
)) &&
216 !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING
, &sq
->state
))
217 queue_work(ptpsq
->txqsq
.priv
->wq
, &ptpsq
->report_unhealthy_work
);
220 static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq
*cq
, int budget
)
222 struct mlx5e_ptpsq
*ptpsq
= container_of(cq
, struct mlx5e_ptpsq
, ts_cq
);
223 struct mlx5_cqwq
*cqwq
= &cq
->wq
;
224 struct mlx5_cqe64
*cqe
;
227 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED
, &ptpsq
->txqsq
.state
)))
230 cqe
= mlx5_cqwq_get_cqe(cqwq
);
237 mlx5e_ptp_handle_ts_cqe(ptpsq
, cqe
, budget
);
238 } while ((++work_done
< budget
) && (cqe
= mlx5_cqwq_get_cqe(cqwq
)));
240 mlx5_cqwq_update_db_record(cqwq
);
242 /* ensure cq space is freed before enabling more cqes */
245 mlx5e_txqsq_wake(&ptpsq
->txqsq
);
247 return work_done
== budget
;
250 static int mlx5e_ptp_napi_poll(struct napi_struct
*napi
, int budget
)
252 struct mlx5e_ptp
*c
= container_of(napi
, struct mlx5e_ptp
, napi
);
253 struct mlx5e_ch_stats
*ch_stats
= c
->stats
;
254 struct mlx5e_rq
*rq
= &c
->rq
;
263 if (test_bit(MLX5E_PTP_STATE_TX
, c
->state
)) {
264 for (i
= 0; i
< c
->num_tc
; i
++) {
265 busy
|= mlx5e_poll_tx_cq(&c
->ptpsq
[i
].txqsq
.cq
, budget
);
266 busy
|= mlx5e_ptp_poll_ts_cq(&c
->ptpsq
[i
].ts_cq
, budget
);
269 if (test_bit(MLX5E_PTP_STATE_RX
, c
->state
) && likely(budget
)) {
270 work_done
= mlx5e_poll_rx_cq(&rq
->cq
, budget
);
271 busy
|= work_done
== budget
;
272 busy
|= INDIRECT_CALL_2(rq
->post_wqes
,
273 mlx5e_post_rx_mpwqes
,
283 if (unlikely(!napi_complete_done(napi
, work_done
)))
288 if (test_bit(MLX5E_PTP_STATE_TX
, c
->state
)) {
289 for (i
= 0; i
< c
->num_tc
; i
++) {
290 mlx5e_cq_arm(&c
->ptpsq
[i
].txqsq
.cq
);
291 mlx5e_cq_arm(&c
->ptpsq
[i
].ts_cq
);
294 if (test_bit(MLX5E_PTP_STATE_RX
, c
->state
))
295 mlx5e_cq_arm(&rq
->cq
);
303 static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp
*c
, int txq_ix
,
304 struct mlx5e_params
*params
,
305 struct mlx5e_sq_param
*param
,
306 struct mlx5e_txqsq
*sq
, int tc
,
307 struct mlx5e_ptpsq
*ptpsq
)
309 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
310 struct mlx5_core_dev
*mdev
= c
->mdev
;
311 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
316 sq
->clock
= &mdev
->clock
;
317 sq
->mkey_be
= c
->mkey_be
;
318 sq
->netdev
= c
->netdev
;
321 sq
->ch_ix
= MLX5E_PTP_CHANNEL_IX
;
323 sq
->uar_map
= mdev
->mlx5e_res
.hw_objs
.bfreg
.map
;
324 sq
->min_inline_mode
= params
->tx_min_inline_mode
;
325 sq
->hw_mtu
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
326 sq
->stats
= &c
->priv
->ptp_stats
.sq
[tc
];
328 INIT_WORK(&sq
->recover_work
, mlx5e_tx_err_cqe_work
);
329 if (!MLX5_CAP_ETH(mdev
, wqe_vlan_insert
))
330 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE
, &sq
->state
);
331 sq
->stop_room
= param
->stop_room
;
332 sq
->ptp_cyc2time
= mlx5_sq_ts_translator(mdev
);
334 node
= dev_to_node(mlx5_core_dma_dev(mdev
));
336 param
->wq
.db_numa_node
= node
;
337 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, wq
, &sq
->wq_ctrl
);
340 wq
->db
= &wq
->db
[MLX5_SND_DBR
];
342 err
= mlx5e_alloc_txqsq_db(sq
, node
);
344 goto err_sq_wq_destroy
;
349 mlx5_wq_destroy(&sq
->wq_ctrl
);
354 static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev
*mdev
, u32 sqn
)
356 mlx5_core_destroy_sq(mdev
, sqn
);
359 static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq
*ptpsq
, int numa
)
361 struct mlx5e_ptp_metadata_fifo
*metadata_freelist
= &ptpsq
->metadata_freelist
;
362 struct mlx5e_ptp_metadata_map
*metadata_map
= &ptpsq
->metadata_map
;
363 struct mlx5e_ptp_port_ts_cqe_list
*cqe_list
;
367 cqe_list
= kvzalloc_node(sizeof(*ptpsq
->ts_cqe_pending_list
), GFP_KERNEL
, numa
);
370 ptpsq
->ts_cqe_pending_list
= cqe_list
;
372 db_sz
= min_t(u32
, mlx5_wq_cyc_get_size(&ptpsq
->txqsq
.wq
),
373 1 << MLX5_CAP_GEN_2(ptpsq
->txqsq
.mdev
,
374 ts_cqe_metadata_size2wqe_counter
));
375 ptpsq
->ts_cqe_ctr_mask
= db_sz
- 1;
377 cqe_list
->nodes
= kvzalloc_node(array_size(db_sz
, sizeof(*cqe_list
->nodes
)),
379 if (!cqe_list
->nodes
)
381 INIT_LIST_HEAD(&cqe_list
->tracker_list_head
);
382 spin_lock_init(&cqe_list
->tracker_list_lock
);
384 metadata_freelist
->data
=
385 kvzalloc_node(array_size(db_sz
, sizeof(*metadata_freelist
->data
)),
387 if (!metadata_freelist
->data
)
388 goto free_cqe_list_nodes
;
389 metadata_freelist
->mask
= ptpsq
->ts_cqe_ctr_mask
;
391 for (md
= 0; md
< db_sz
; ++md
) {
392 cqe_list
->nodes
[md
].metadata_id
= md
;
393 metadata_freelist
->data
[md
] = md
;
395 metadata_freelist
->pc
= db_sz
;
398 kvzalloc_node(array_size(db_sz
, sizeof(*metadata_map
->data
)),
400 if (!metadata_map
->data
)
401 goto free_metadata_freelist
;
402 metadata_map
->capacity
= db_sz
;
406 free_metadata_freelist
:
407 kvfree(metadata_freelist
->data
);
409 kvfree(cqe_list
->nodes
);
415 static void mlx5e_ptp_drain_metadata_map(struct mlx5e_ptp_metadata_map
*map
)
419 for (idx
= 0; idx
< map
->capacity
; ++idx
) {
420 struct sk_buff
*skb
= map
->data
[idx
];
422 dev_kfree_skb_any(skb
);
426 static void mlx5e_ptp_free_traffic_db(struct mlx5e_ptpsq
*ptpsq
)
428 mlx5e_ptp_drain_metadata_map(&ptpsq
->metadata_map
);
429 kvfree(ptpsq
->metadata_map
.data
);
430 kvfree(ptpsq
->metadata_freelist
.data
);
431 kvfree(ptpsq
->ts_cqe_pending_list
->nodes
);
432 kvfree(ptpsq
->ts_cqe_pending_list
);
435 static void mlx5e_ptpsq_unhealthy_work(struct work_struct
*work
)
437 struct mlx5e_ptpsq
*ptpsq
=
438 container_of(work
, struct mlx5e_ptpsq
, report_unhealthy_work
);
440 mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq
);
443 static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp
*c
, u32 tisn
,
444 int txq_ix
, struct mlx5e_ptp_params
*cparams
,
445 int tc
, struct mlx5e_ptpsq
*ptpsq
)
447 struct mlx5e_sq_param
*sqp
= &cparams
->txq_sq_param
;
448 struct mlx5e_txqsq
*txqsq
= &ptpsq
->txqsq
;
449 struct mlx5e_create_sq_param csp
= {};
452 err
= mlx5e_ptp_alloc_txqsq(c
, txq_ix
, &cparams
->params
, sqp
,
459 csp
.cqn
= txqsq
->cq
.mcq
.cqn
;
460 csp
.wq_ctrl
= &txqsq
->wq_ctrl
;
461 csp
.min_inline_mode
= txqsq
->min_inline_mode
;
462 csp
.ts_cqe_to_dest_cqn
= ptpsq
->ts_cq
.mcq
.cqn
;
464 err
= mlx5e_create_sq_rdy(c
->mdev
, sqp
, &csp
, 0, &txqsq
->sqn
);
468 err
= mlx5e_ptp_alloc_traffic_db(ptpsq
, dev_to_node(mlx5_core_dma_dev(c
->mdev
)));
472 INIT_WORK(&ptpsq
->report_unhealthy_work
, mlx5e_ptpsq_unhealthy_work
);
477 mlx5e_free_txqsq(txqsq
);
482 static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq
*ptpsq
)
484 struct mlx5e_txqsq
*sq
= &ptpsq
->txqsq
;
485 struct mlx5_core_dev
*mdev
= sq
->mdev
;
487 if (current_work() != &ptpsq
->report_unhealthy_work
)
488 cancel_work_sync(&ptpsq
->report_unhealthy_work
);
489 mlx5e_ptp_free_traffic_db(ptpsq
);
490 cancel_work_sync(&sq
->recover_work
);
491 mlx5e_ptp_destroy_sq(mdev
, sq
->sqn
);
492 mlx5e_free_txqsq_descs(sq
);
493 mlx5e_free_txqsq(sq
);
496 static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp
*c
,
497 struct mlx5e_ptp_params
*cparams
)
499 struct mlx5e_params
*params
= &cparams
->params
;
500 u8 num_tc
= mlx5e_get_dcb_num_tc(params
);
505 ix_base
= num_tc
* params
->num_channels
;
507 for (tc
= 0; tc
< num_tc
; tc
++) {
508 int txq_ix
= ix_base
+ tc
;
510 err
= mlx5e_ptp_open_txqsq(c
, c
->priv
->tisn
[c
->lag_port
][tc
], txq_ix
,
511 cparams
, tc
, &c
->ptpsq
[tc
]);
519 for (--tc
; tc
>= 0; tc
--)
520 mlx5e_ptp_close_txqsq(&c
->ptpsq
[tc
]);
525 static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp
*c
)
529 for (tc
= 0; tc
< c
->num_tc
; tc
++)
530 mlx5e_ptp_close_txqsq(&c
->ptpsq
[tc
]);
533 static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp
*c
,
534 struct mlx5e_ptp_params
*cparams
)
536 struct mlx5e_params
*params
= &cparams
->params
;
537 struct mlx5e_create_cq_param ccp
= {};
538 struct dim_cq_moder ptp_moder
= {};
539 struct mlx5e_cq_param
*cq_param
;
544 num_tc
= mlx5e_get_dcb_num_tc(params
);
546 ccp
.node
= dev_to_node(mlx5_core_dma_dev(c
->mdev
));
547 ccp
.ch_stats
= c
->stats
;
549 ccp
.ix
= MLX5E_PTP_CHANNEL_IX
;
551 cq_param
= &cparams
->txq_sq_param
.cqp
;
553 for (tc
= 0; tc
< num_tc
; tc
++) {
554 struct mlx5e_cq
*cq
= &c
->ptpsq
[tc
].txqsq
.cq
;
556 err
= mlx5e_open_cq(c
->priv
, ptp_moder
, cq_param
, &ccp
, cq
);
558 goto out_err_txqsq_cq
;
561 for (tc
= 0; tc
< num_tc
; tc
++) {
562 struct mlx5e_cq
*cq
= &c
->ptpsq
[tc
].ts_cq
;
563 struct mlx5e_ptpsq
*ptpsq
= &c
->ptpsq
[tc
];
565 err
= mlx5e_open_cq(c
->priv
, ptp_moder
, cq_param
, &ccp
, cq
);
569 ptpsq
->cq_stats
= &c
->priv
->ptp_stats
.cq
[tc
];
575 for (--tc
; tc
>= 0; tc
--)
576 mlx5e_close_cq(&c
->ptpsq
[tc
].ts_cq
);
579 for (--tc
; tc
>= 0; tc
--)
580 mlx5e_close_cq(&c
->ptpsq
[tc
].txqsq
.cq
);
585 static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp
*c
,
586 struct mlx5e_ptp_params
*cparams
)
588 struct mlx5e_create_cq_param ccp
= {};
589 struct dim_cq_moder ptp_moder
= {};
590 struct mlx5e_cq_param
*cq_param
;
591 struct mlx5e_cq
*cq
= &c
->rq
.cq
;
593 ccp
.node
= dev_to_node(mlx5_core_dma_dev(c
->mdev
));
594 ccp
.ch_stats
= c
->stats
;
596 ccp
.ix
= MLX5E_PTP_CHANNEL_IX
;
598 cq_param
= &cparams
->rq_param
.cqp
;
600 return mlx5e_open_cq(c
->priv
, ptp_moder
, cq_param
, &ccp
, cq
);
603 static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp
*c
)
607 for (tc
= 0; tc
< c
->num_tc
; tc
++)
608 mlx5e_close_cq(&c
->ptpsq
[tc
].ts_cq
);
610 for (tc
= 0; tc
< c
->num_tc
; tc
++)
611 mlx5e_close_cq(&c
->ptpsq
[tc
].txqsq
.cq
);
614 static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev
*mdev
,
615 struct mlx5e_params
*params
,
616 struct mlx5e_sq_param
*param
)
618 void *sqc
= param
->sqc
;
621 mlx5e_build_sq_param_common(mdev
, param
);
623 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
624 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_sq_size
);
625 param
->stop_room
= mlx5e_stop_room_for_max_wqe(mdev
);
626 mlx5e_build_tx_cq_param(mdev
, params
, ¶m
->cqp
);
629 static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev
*mdev
,
630 struct net_device
*netdev
,
632 struct mlx5e_ptp_params
*ptp_params
)
634 struct mlx5e_rq_param
*rq_params
= &ptp_params
->rq_param
;
635 struct mlx5e_params
*params
= &ptp_params
->params
;
637 params
->rq_wq_type
= MLX5_WQ_TYPE_CYCLIC
;
638 mlx5e_init_rq_type_params(mdev
, params
);
639 params
->sw_mtu
= netdev
->max_mtu
;
640 mlx5e_build_rq_param(mdev
, params
, NULL
, q_counter
, rq_params
);
643 static void mlx5e_ptp_build_params(struct mlx5e_ptp
*c
,
644 struct mlx5e_ptp_params
*cparams
,
645 struct mlx5e_params
*orig
)
647 struct mlx5e_params
*params
= &cparams
->params
;
649 params
->tx_min_inline_mode
= orig
->tx_min_inline_mode
;
650 params
->num_channels
= orig
->num_channels
;
651 params
->hard_mtu
= orig
->hard_mtu
;
652 params
->sw_mtu
= orig
->sw_mtu
;
653 params
->mqprio
= orig
->mqprio
;
656 if (test_bit(MLX5E_PTP_STATE_TX
, c
->state
)) {
657 params
->log_sq_size
=
658 min(MLX5_CAP_GEN_2(c
->mdev
, ts_cqe_metadata_size2wqe_counter
),
659 MLX5E_PTP_MAX_LOG_SQ_SIZE
);
660 params
->log_sq_size
= min(params
->log_sq_size
, orig
->log_sq_size
);
661 mlx5e_ptp_build_sq_param(c
->mdev
, params
, &cparams
->txq_sq_param
);
664 if (test_bit(MLX5E_PTP_STATE_RX
, c
->state
)) {
665 params
->vlan_strip_disable
= orig
->vlan_strip_disable
;
666 mlx5e_ptp_build_rq_param(c
->mdev
, c
->netdev
, c
->priv
->q_counter
, cparams
);
670 static int mlx5e_init_ptp_rq(struct mlx5e_ptp
*c
, struct mlx5e_params
*params
,
673 struct mlx5_core_dev
*mdev
= c
->mdev
;
674 struct mlx5e_priv
*priv
= c
->priv
;
677 rq
->wq_type
= params
->rq_wq_type
;
679 rq
->netdev
= priv
->netdev
;
681 rq
->clock
= &mdev
->clock
;
682 rq
->tstamp
= &priv
->tstamp
;
684 rq
->hw_mtu
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
685 rq
->stats
= &c
->priv
->ptp_stats
.rq
;
686 rq
->ix
= MLX5E_PTP_CHANNEL_IX
;
687 rq
->ptp_cyc2time
= mlx5_rq_ts_translator(mdev
);
688 err
= mlx5e_rq_set_handlers(rq
, params
, false);
692 return xdp_rxq_info_reg(&rq
->xdp_rxq
, rq
->netdev
, rq
->ix
, 0);
695 static int mlx5e_ptp_open_rq(struct mlx5e_ptp
*c
, struct mlx5e_params
*params
,
696 struct mlx5e_rq_param
*rq_param
)
698 int node
= dev_to_node(c
->mdev
->device
);
701 err
= mlx5e_init_ptp_rq(c
, params
, &c
->rq
);
705 return mlx5e_open_rq(params
, rq_param
, NULL
, node
, &c
->rq
);
708 static int mlx5e_ptp_open_queues(struct mlx5e_ptp
*c
,
709 struct mlx5e_ptp_params
*cparams
)
713 if (test_bit(MLX5E_PTP_STATE_TX
, c
->state
)) {
714 err
= mlx5e_ptp_open_tx_cqs(c
, cparams
);
718 err
= mlx5e_ptp_open_txqsqs(c
, cparams
);
722 if (test_bit(MLX5E_PTP_STATE_RX
, c
->state
)) {
723 err
= mlx5e_ptp_open_rx_cq(c
, cparams
);
727 err
= mlx5e_ptp_open_rq(c
, &cparams
->params
, &cparams
->rq_param
);
734 if (test_bit(MLX5E_PTP_STATE_RX
, c
->state
))
735 mlx5e_close_cq(&c
->rq
.cq
);
737 if (test_bit(MLX5E_PTP_STATE_TX
, c
->state
))
738 mlx5e_ptp_close_txqsqs(c
);
740 if (test_bit(MLX5E_PTP_STATE_TX
, c
->state
))
741 mlx5e_ptp_close_tx_cqs(c
);
746 static void mlx5e_ptp_close_queues(struct mlx5e_ptp
*c
)
748 if (test_bit(MLX5E_PTP_STATE_RX
, c
->state
)) {
749 mlx5e_close_rq(&c
->rq
);
750 mlx5e_close_cq(&c
->rq
.cq
);
752 if (test_bit(MLX5E_PTP_STATE_TX
, c
->state
)) {
753 mlx5e_ptp_close_txqsqs(c
);
754 mlx5e_ptp_close_tx_cqs(c
);
758 static int mlx5e_ptp_set_state(struct mlx5e_ptp
*c
, struct mlx5e_params
*params
)
760 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_TX_PORT_TS
))
761 __set_bit(MLX5E_PTP_STATE_TX
, c
->state
);
764 __set_bit(MLX5E_PTP_STATE_RX
, c
->state
);
766 return bitmap_empty(c
->state
, MLX5E_PTP_STATE_NUM_STATES
) ? -EINVAL
: 0;
769 static void mlx5e_ptp_rx_unset_fs(struct mlx5e_flow_steering
*fs
)
771 struct mlx5e_ptp_fs
*ptp_fs
= mlx5e_fs_get_ptp(fs
);
776 mlx5e_fs_tt_redirect_del_rule(ptp_fs
->l2_rule
);
777 mlx5e_fs_tt_redirect_any_destroy(fs
);
779 mlx5e_fs_tt_redirect_del_rule(ptp_fs
->udp_v6_rule
);
780 mlx5e_fs_tt_redirect_del_rule(ptp_fs
->udp_v4_rule
);
781 mlx5e_fs_tt_redirect_udp_destroy(fs
);
782 ptp_fs
->valid
= false;
785 static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv
*priv
)
787 u32 tirn
= mlx5e_rx_res_get_tirn_ptp(priv
->rx_res
);
788 struct mlx5e_flow_steering
*fs
= priv
->fs
;
789 struct mlx5_flow_handle
*rule
;
790 struct mlx5e_ptp_fs
*ptp_fs
;
793 ptp_fs
= mlx5e_fs_get_ptp(fs
);
797 err
= mlx5e_fs_tt_redirect_udp_create(fs
);
801 rule
= mlx5e_fs_tt_redirect_udp_add_rule(fs
, MLX5_TT_IPV4_UDP
,
805 goto out_destroy_fs_udp
;
807 ptp_fs
->udp_v4_rule
= rule
;
809 rule
= mlx5e_fs_tt_redirect_udp_add_rule(fs
, MLX5_TT_IPV6_UDP
,
813 goto out_destroy_udp_v4_rule
;
815 ptp_fs
->udp_v6_rule
= rule
;
817 err
= mlx5e_fs_tt_redirect_any_create(fs
);
819 goto out_destroy_udp_v6_rule
;
821 rule
= mlx5e_fs_tt_redirect_any_add_rule(fs
, tirn
, ETH_P_1588
);
824 goto out_destroy_fs_any
;
826 ptp_fs
->l2_rule
= rule
;
827 ptp_fs
->valid
= true;
832 mlx5e_fs_tt_redirect_any_destroy(fs
);
833 out_destroy_udp_v6_rule
:
834 mlx5e_fs_tt_redirect_del_rule(ptp_fs
->udp_v6_rule
);
835 out_destroy_udp_v4_rule
:
836 mlx5e_fs_tt_redirect_del_rule(ptp_fs
->udp_v4_rule
);
838 mlx5e_fs_tt_redirect_udp_destroy(fs
);
843 int mlx5e_ptp_open(struct mlx5e_priv
*priv
, struct mlx5e_params
*params
,
844 u8 lag_port
, struct mlx5e_ptp
**cp
)
846 struct net_device
*netdev
= priv
->netdev
;
847 struct mlx5_core_dev
*mdev
= priv
->mdev
;
848 struct mlx5e_ptp_params
*cparams
;
853 c
= kvzalloc_node(sizeof(*c
), GFP_KERNEL
, dev_to_node(mlx5_core_dma_dev(mdev
)));
854 cparams
= kvzalloc(sizeof(*cparams
), GFP_KERNEL
);
855 if (!c
|| !cparams
) {
861 c
->mdev
= priv
->mdev
;
862 c
->tstamp
= &priv
->tstamp
;
863 c
->pdev
= mlx5_core_dma_dev(priv
->mdev
);
864 c
->netdev
= priv
->netdev
;
865 c
->mkey_be
= cpu_to_be32(priv
->mdev
->mlx5e_res
.hw_objs
.mkey
);
866 c
->num_tc
= mlx5e_get_dcb_num_tc(params
);
867 c
->stats
= &priv
->ptp_stats
.ch
;
868 c
->lag_port
= lag_port
;
870 err
= mlx5e_ptp_set_state(c
, params
);
874 netif_napi_add(netdev
, &c
->napi
, mlx5e_ptp_napi_poll
);
876 mlx5e_ptp_build_params(c
, cparams
, params
);
878 err
= mlx5e_ptp_open_queues(c
, cparams
);
882 if (test_bit(MLX5E_PTP_STATE_RX
, c
->state
))
883 priv
->rx_ptp_opened
= true;
892 netif_napi_del(&c
->napi
);
899 void mlx5e_ptp_close(struct mlx5e_ptp
*c
)
901 mlx5e_ptp_close_queues(c
);
902 netif_napi_del(&c
->napi
);
907 void mlx5e_ptp_activate_channel(struct mlx5e_ptp
*c
)
911 napi_enable(&c
->napi
);
913 if (test_bit(MLX5E_PTP_STATE_TX
, c
->state
)) {
914 for (tc
= 0; tc
< c
->num_tc
; tc
++)
915 mlx5e_activate_txqsq(&c
->ptpsq
[tc
].txqsq
);
917 if (test_bit(MLX5E_PTP_STATE_RX
, c
->state
)) {
918 mlx5e_ptp_rx_set_fs(c
->priv
);
919 mlx5e_activate_rq(&c
->rq
);
921 mlx5e_trigger_napi_sched(&c
->napi
);
924 void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp
*c
)
928 if (test_bit(MLX5E_PTP_STATE_RX
, c
->state
))
929 mlx5e_deactivate_rq(&c
->rq
);
931 if (test_bit(MLX5E_PTP_STATE_TX
, c
->state
)) {
932 for (tc
= 0; tc
< c
->num_tc
; tc
++)
933 mlx5e_deactivate_txqsq(&c
->ptpsq
[tc
].txqsq
);
936 napi_disable(&c
->napi
);
939 int mlx5e_ptp_get_rqn(struct mlx5e_ptp
*c
, u32
*rqn
)
941 if (!c
|| !test_bit(MLX5E_PTP_STATE_RX
, c
->state
))
948 int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering
*fs
,
949 const struct mlx5e_profile
*profile
)
951 struct mlx5e_ptp_fs
*ptp_fs
;
953 if (!mlx5e_profile_feature_cap(profile
, PTP_RX
))
956 ptp_fs
= kzalloc(sizeof(*ptp_fs
), GFP_KERNEL
);
959 mlx5e_fs_set_ptp(fs
, ptp_fs
);
964 void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering
*fs
,
965 const struct mlx5e_profile
*profile
)
967 struct mlx5e_ptp_fs
*ptp_fs
= mlx5e_fs_get_ptp(fs
);
969 if (!mlx5e_profile_feature_cap(profile
, PTP_RX
))
972 mlx5e_ptp_rx_unset_fs(fs
);
976 int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv
*priv
, bool set
)
978 struct mlx5e_ptp
*c
= priv
->channels
.ptp
;
980 if (!mlx5e_profile_feature_cap(priv
->profile
, PTP_RX
))
983 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
987 if (!c
|| !test_bit(MLX5E_PTP_STATE_RX
, c
->state
)) {
988 netdev_WARN_ONCE(priv
->netdev
, "Don't try to add PTP RX-FS rules");
991 return mlx5e_ptp_rx_set_fs(priv
);
994 if (c
&& test_bit(MLX5E_PTP_STATE_RX
, c
->state
)) {
995 netdev_WARN_ONCE(priv
->netdev
, "Don't try to remove PTP RX-FS rules");
998 mlx5e_ptp_rx_unset_fs(priv
->fs
);