*/
#define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
+#define GVE_DQO_RX_HWTSTAMP_VALID 0x1
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
* Note that this means if the time delta between packet reception and the last
* clock read is greater than ~2 seconds, this will provide invalid results.
*/
-static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx, u32 hwts)
+static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx,
+ const struct gve_rx_compl_desc_dqo *desc)
{
u64 last_read = READ_ONCE(rx->gve->last_sync_nic_counter);
struct sk_buff *skb = rx->ctx.skb_head;
- u32 low = (u32)last_read;
- s32 diff = hwts - low;
-
- skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff);
+ u32 ts, low;
+ s32 diff;
+
+ if (desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID) {
+ ts = le32_to_cpu(desc->ts);
+ low = (u32)last_read;
+ diff = ts - low;
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff);
+ }
}
static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype);
if (rx->gve->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)
- gve_rx_skb_hwtstamp(rx, le32_to_cpu(desc->ts));
+ gve_rx_skb_hwtstamp(rx, desc);
/* RSC packets must set gso_size otherwise the TCP stack will complain
* that packets are larger than MTU.