const u32 now = tcp_jiffies32;
enum tcp_chrono old = tp->chrono_type;
+ /* Following WRITE_ONCE()s pair with READ_ONCE()s in
+ * tcp_get_info_chrono_stats().
+ */
if (old > TCP_CHRONO_UNSPEC)
- tp->chrono_stat[old - 1] += now - tp->chrono_start;
- tp->chrono_start = now;
- tp->chrono_type = new;
+ WRITE_ONCE(tp->chrono_stat[old - 1],
+ tp->chrono_stat[old - 1] + now - tp->chrono_start);
+ WRITE_ONCE(tp->chrono_start, now);
+ WRITE_ONCE(tp->chrono_type, new);
}
static inline void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
struct tcp_info *info)
{
u64 stats[__TCP_CHRONO_MAX], total = 0;
- enum tcp_chrono i;
+ enum tcp_chrono i, cur;
+ /* Following READ_ONCE()s pair with WRITE_ONCE()s in tcp_chrono_set().
+ * This is because socket lock might not be owned by us at this point.
+ * This is best effort, tcp_get_timestamping_opt_stats() can
+ * see wrong values. A real fix would be too costly for TCP fast path.
+ */
+ cur = READ_ONCE(tp->chrono_type);
for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) {
- stats[i] = tp->chrono_stat[i - 1];
- if (i == tp->chrono_type)
- stats[i] += tcp_jiffies32 - tp->chrono_start;
+ stats[i] = READ_ONCE(tp->chrono_stat[i - 1]);
+ if (i == cur)
+ stats[i] += tcp_jiffies32 - READ_ONCE(tp->chrono_start);
stats[i] *= USEC_PER_SEC / HZ;
total += stats[i];
}