/* RFC says return as much as we can without exceeding 576 bytes. */
- room = dst_mtu(&rt->dst);
+ room = dst4_mtu(&rt->dst);
if (room > 576)
room = 576;
room -= sizeof(struct iphdr) + icmp_param->replyopts.opt.optlen;
return -EFAULT;
cork->fragsize = ip_sk_use_pmtu(sk) ?
- dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
+ dst4_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
if (!inetdev_valid_mtu(cork->fragsize))
return -ENETUNREACH;
pmtudisc = READ_ONCE(inet->pmtudisc);
if (pmtudisc == IP_PMTUDISC_DO ||
pmtudisc == IP_PMTUDISC_PROBE ||
- (skb->len <= dst_mtu(&rt->dst) &&
+ (skb->len <= dst4_mtu(&rt->dst) &&
ip_dont_fragment(sk, &rt->dst)))
df = htons(IP_DF);
val = 0;
dst = sk_dst_get(sk);
if (dst) {
- val = dst_mtu(dst);
+ val = dst4_mtu(dst);
dst_release(dst);
}
if (!val)
return -1;
}
- if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
+ if (skb->len+encap > dst4_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
/* Do not fragment multicasts. Alas, IPv4 does not
* allow to send ICMP, so that packets will disappear
* to blackhole.
goto free_nskb;
/* "Never happens" */
- if (nskb->len > dst_mtu(skb_dst(nskb)))
+ if (nskb->len > dst4_mtu(skb_dst(nskb)))
goto free_nskb;
nf_ct_attach(nskb, oldskb);
{
struct inet_sock *inet = inet_sk(sk);
struct dst_entry *dst;
- u32 mtu;
+ u32 mtu, dmtu;
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
return;
/* Something is about to be wrong... Remember soft error
* for the case, if this connection will not able to recover.
*/
- if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
+ dmtu = dst4_mtu(dst);
+ if (mtu < dmtu && ip_dont_fragment(sk, dst))
WRITE_ONCE(sk->sk_err_soft, EMSGSIZE);
- mtu = dst_mtu(dst);
-
if (inet->pmtudisc != IP_PMTUDISC_DONT &&
ip_sk_accept_pmtu(sk) &&
- inet_csk(sk)->icsk_pmtu_cookie > mtu) {
- tcp_sync_mss(sk, mtu);
+ inet_csk(sk)->icsk_pmtu_cookie > dmtu) {
+ tcp_sync_mss(sk, dmtu);
/* Resend the TCP packet because it's
* clear that the old packet has been
tcp_ca_openreq_child(newsk, dst);
- tcp_sync_mss(newsk, dst_mtu(dst));
+ tcp_sync_mss(newsk, dst4_mtu(dst));
newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
tcp_initialize_rcv_mss(newsk);
}
if (df) {
- mtu = dst_mtu(&rt->dst) - t_hlen;
+ mtu = dst4_mtu(&rt->dst) - t_hlen;
if (mtu < IPV4_MIN_MTU) {
DEV_STATS_INC(dev, collisions);