1 From: Peter Zijlstra <a.p.zijlstra@chello.nl>
2 Subject: net: sk_allocation() - concentrate socket related allocations
4 References: FATE#303834
6 Introduce sk_allocation(), this function allows to inject sock specific
7 flags to each sock related allocation.
9 Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
10 Acked-by: Neil Brown <neilb@suse.de>
11 Acked-by: Suresh Jayaraman <sjayaraman@suse.de>
14 include/net/sock.h | 5 +++++
15 net/ipv4/tcp.c | 3 ++-
16 net/ipv4/tcp_output.c | 12 +++++++-----
17 net/ipv6/tcp_ipv6.c | 17 ++++++++++++-----
18 4 files changed, 26 insertions(+), 11 deletions(-)
20 --- a/include/net/sock.h
21 +++ b/include/net/sock.h
22 @@ -435,6 +435,11 @@ static inline int sock_flag(struct sock
23 return test_bit(flag, &sk->sk_flags);
26 +static inline gfp_t sk_allocation(struct sock *sk, gfp_t gfp_mask)
31 static inline void sk_acceptq_removed(struct sock *sk)
36 @@ -643,7 +643,8 @@ struct sk_buff *sk_stream_alloc_skb(stru
37 /* The TCP header must be at least 32-bit aligned. */
38 size = ALIGN(size, 4);
40 - skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
41 + skb = alloc_skb_fclone(size + sk->sk_prot->max_header,
42 + sk_allocation(sk, gfp));
44 if (sk_wmem_schedule(sk, skb->truesize)) {
46 --- a/net/ipv4/tcp_output.c
47 +++ b/net/ipv4/tcp_output.c
48 @@ -2159,7 +2159,8 @@ void tcp_send_fin(struct sock *sk)
50 /* Socket is locked, keep trying until memory is available. */
52 - skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
53 + skb = alloc_skb_fclone(MAX_TCP_HEADER,
54 + sk_allocation(sk, GFP_KERNEL));
58 @@ -2185,7 +2186,7 @@ void tcp_send_active_reset(struct sock *
61 /* NOTE: No TCP options attached and we never retransmit this. */
62 - skb = alloc_skb(MAX_TCP_HEADER, priority);
63 + skb = alloc_skb(MAX_TCP_HEADER, sk_allocation(sk, priority));
65 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
67 @@ -2254,7 +2255,8 @@ struct sk_buff *tcp_make_synack(struct s
68 __u8 *md5_hash_location;
71 - skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
72 + skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1,
73 + sk_allocation(sk, GFP_ATOMIC));
77 @@ -2500,7 +2502,7 @@ void tcp_send_ack(struct sock *sk)
78 * tcp_transmit_skb() will set the ownership to this
81 - buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
82 + buff = alloc_skb(MAX_TCP_HEADER, sk_allocation(sk, GFP_ATOMIC));
84 inet_csk_schedule_ack(sk);
85 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
86 @@ -2535,7 +2537,7 @@ static int tcp_xmit_probe_skb(struct soc
89 /* We don't queue it, tcp_transmit_skb() sets ownership. */
90 - skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
91 + skb = alloc_skb(MAX_TCP_HEADER, sk_allocation(sk, GFP_ATOMIC));
95 --- a/net/ipv6/tcp_ipv6.c
96 +++ b/net/ipv6/tcp_ipv6.c
97 @@ -582,7 +582,8 @@ static int tcp_v6_md5_do_add(struct sock
99 /* reallocate new list if current one is full. */
100 if (!tp->md5sig_info) {
101 - tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
102 + tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
103 + sk_allocation(sk, GFP_ATOMIC));
104 if (!tp->md5sig_info) {
107 @@ -595,7 +596,8 @@ static int tcp_v6_md5_do_add(struct sock
109 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
110 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
111 - (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
112 + (tp->md5sig_info->entries6 + 1)),
113 + sk_allocation(sk, GFP_ATOMIC));
116 tcp_free_md5sig_pool();
117 @@ -719,7 +721,8 @@ static int tcp_v6_parse_md5_keys (struct
118 struct tcp_sock *tp = tcp_sk(sk);
119 struct tcp_md5sig_info *p;
121 - p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
122 + p = kzalloc(sizeof(struct tcp_md5sig_info),
123 + sk_allocation(sk, GFP_KERNEL));
127 @@ -952,6 +955,7 @@ static void tcp_v6_send_reset(struct soc
128 #ifdef CONFIG_TCP_MD5SIG
129 struct tcp_md5sig_key *key;
131 + gfp_t gfp_mask = GFP_ATOMIC;
135 @@ -969,13 +973,16 @@ static void tcp_v6_send_reset(struct soc
136 tot_len += TCPOLEN_MD5SIG_ALIGNED;
140 + gfp_mask = sk_allocation(sk, gfp_mask);
143 * We need to grab some memory, and put together an RST,
144 * and then put it into the queue to be sent.
147 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
153 @@ -1063,7 +1070,7 @@ static void tcp_v6_send_ack(struct sk_bu
156 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
158 + sk_allocation(ctl_sk, GFP_ATOMIC));