am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
skb_put(skb, pkt_len);
if (port->rx_ts_enabled)
- am65_cpts_rx_timestamp(common->cpts, skb);
+ am65_cpts_rx_timestamp(common->cpts, port_id, skb);
skb_mark_for_recycle(skb);
skb->protocol = eth_type_trans(skb, ndev);
am65_cpsw_nuss_rx_csum(skb, csum_info);
/* SKB TX timestamp */
if (port->tx_ts_enabled)
- am65_cpts_prep_tx_timestamp(common->cpts, skb);
+ am65_cpts_prep_tx_timestamp(common->cpts, port->port_id, skb);
q_idx = skb_get_queue_mapping(skb);
dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
bool found = false;
u32 mtype_seqid;
- mtype_seqid = event->event1 &
- (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
- AM65_CPTS_EVENT_1_EVENT_TYPE_MASK |
- AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
-
+ mtype_seqid = event->event1;
__skb_queue_head_init(&txq_list);
spin_lock_irqsave(&cpts->txq.lock, flags);
struct list_head *this, *next;
struct am65_cpts_event *event;
unsigned long flags;
- u32 mtype_seqid;
u64 ns = 0;
spin_lock_irqsave(&cpts->lock, flags);
continue;
}
- mtype_seqid = event->event1 &
- (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
- AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK |
- AM65_CPTS_EVENT_1_EVENT_TYPE_MASK);
-
- if (mtype_seqid == skb_mtype_seqid) {
+ if (event->event1 == skb_mtype_seqid) {
ns = event->timestamp;
list_move(&event->list, &cpts->pool);
break;
return ns;
}
-void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+void am65_cpts_rx_timestamp(struct am65_cpts *cpts, unsigned int port_id,
+ struct sk_buff *skb)
{
struct am65_cpts_skb_cb_data *skb_cb = (struct am65_cpts_skb_cb_data *)skb->cb;
struct skb_shared_hwtstamps *ssh;
if (!ret)
return; /* if not PTP class packet */
+ skb_cb->skb_mtype_seqid |= port_id << AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT;
skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_RX << AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
dev_dbg(cpts->dev, "%s mtype seqid %08x\n", __func__, skb_cb->skb_mtype_seqid);
/**
* am65_cpts_prep_tx_timestamp - check and prepare tx packet for timestamping
* @cpts: cpts handle
+ * @port_id: The port on which the skb will be sent
* @skb: packet
*
* This functions should be called from .xmit().
* It checks if packet can be timestamped, fills internal cpts data
* in skb-cb and marks packet as SKBTX_IN_PROGRESS.
*/
-void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, unsigned int port_id,
+ struct sk_buff *skb)
{
struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
int ret;
ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
if (!ret)
return;
+ skb_cb->skb_mtype_seqid |= port_id << AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT;
skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_TX <<
AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node);
int am65_cpts_phc_index(struct am65_cpts *cpts);
-void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
+void am65_cpts_rx_timestamp(struct am65_cpts *cpts, unsigned int port_id,
+ struct sk_buff *skb);
void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
-void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
+void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, unsigned int port_id,
+ struct sk_buff *skb);
u64 am65_cpts_ns_gettime(struct am65_cpts *cpts);
int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
struct am65_cpts_estf_cfg *cfg);
}
static inline void am65_cpts_rx_timestamp(struct am65_cpts *cpts,
+ unsigned int port_id,
struct sk_buff *skb)
{
}
}
static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts,
+ unsigned int port_id,
struct sk_buff *skb)
{
}