]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.suse/SoN-18-net-sk_allocation.patch
Updated xen patches taken from suse.
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.suse / SoN-18-net-sk_allocation.patch
1 From: Peter Zijlstra <a.p.zijlstra@chello.nl>
2 Subject: net: sk_allocation() - concentrate socket related allocations
3 Patch-mainline: No
4 References: FATE#303834
5
6 Introduce sk_allocation(), this function allows to inject sock specific
7 flags to each sock related allocation.
8
9 Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
10 Acked-by: Neil Brown <neilb@suse.de>
11 Acked-by: Suresh Jayaraman <sjayaraman@suse.de>
12
13 ---
14 include/net/sock.h | 5 +++++
15 net/ipv4/tcp.c | 3 ++-
16 net/ipv4/tcp_output.c | 12 +++++++-----
17 net/ipv6/tcp_ipv6.c | 17 ++++++++++++-----
18 4 files changed, 26 insertions(+), 11 deletions(-)
19
20 --- a/include/net/sock.h
21 +++ b/include/net/sock.h
22 @@ -435,6 +435,11 @@ static inline int sock_flag(struct sock
23 return test_bit(flag, &sk->sk_flags);
24 }
25
26 +static inline gfp_t sk_allocation(struct sock *sk, gfp_t gfp_mask)
27 +{
28 + return gfp_mask;
29 +}
30 +
31 static inline void sk_acceptq_removed(struct sock *sk)
32 {
33 sk->sk_ack_backlog--;
34 --- a/net/ipv4/tcp.c
35 +++ b/net/ipv4/tcp.c
36 @@ -643,7 +643,8 @@ struct sk_buff *sk_stream_alloc_skb(stru
37 /* The TCP header must be at least 32-bit aligned. */
38 size = ALIGN(size, 4);
39
40 - skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
41 + skb = alloc_skb_fclone(size + sk->sk_prot->max_header,
42 + sk_allocation(sk, gfp));
43 if (skb) {
44 if (sk_wmem_schedule(sk, skb->truesize)) {
45 /*
46 --- a/net/ipv4/tcp_output.c
47 +++ b/net/ipv4/tcp_output.c
48 @@ -2159,7 +2159,8 @@ void tcp_send_fin(struct sock *sk)
49 } else {
50 /* Socket is locked, keep trying until memory is available. */
51 for (;;) {
52 - skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
53 + skb = alloc_skb_fclone(MAX_TCP_HEADER,
54 + sk_allocation(sk, GFP_KERNEL));
55 if (skb)
56 break;
57 yield();
58 @@ -2185,7 +2186,7 @@ void tcp_send_active_reset(struct sock *
59 struct sk_buff *skb;
60
61 /* NOTE: No TCP options attached and we never retransmit this. */
62 - skb = alloc_skb(MAX_TCP_HEADER, priority);
63 + skb = alloc_skb(MAX_TCP_HEADER, sk_allocation(sk, priority));
64 if (!skb) {
65 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
66 return;
67 @@ -2254,7 +2255,8 @@ struct sk_buff *tcp_make_synack(struct s
68 __u8 *md5_hash_location;
69 int mss;
70
71 - skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
72 + skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1,
73 + sk_allocation(sk, GFP_ATOMIC));
74 if (skb == NULL)
75 return NULL;
76
77 @@ -2500,7 +2502,7 @@ void tcp_send_ack(struct sock *sk)
78 * tcp_transmit_skb() will set the ownership to this
79 * sock.
80 */
81 - buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
82 + buff = alloc_skb(MAX_TCP_HEADER, sk_allocation(sk, GFP_ATOMIC));
83 if (buff == NULL) {
84 inet_csk_schedule_ack(sk);
85 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
86 @@ -2535,7 +2537,7 @@ static int tcp_xmit_probe_skb(struct soc
87 struct sk_buff *skb;
88
89 /* We don't queue it, tcp_transmit_skb() sets ownership. */
90 - skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
91 + skb = alloc_skb(MAX_TCP_HEADER, sk_allocation(sk, GFP_ATOMIC));
92 if (skb == NULL)
93 return -1;
94
95 --- a/net/ipv6/tcp_ipv6.c
96 +++ b/net/ipv6/tcp_ipv6.c
97 @@ -582,7 +582,8 @@ static int tcp_v6_md5_do_add(struct sock
98 } else {
99 /* reallocate new list if current one is full. */
100 if (!tp->md5sig_info) {
101 - tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
102 + tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
103 + sk_allocation(sk, GFP_ATOMIC));
104 if (!tp->md5sig_info) {
105 kfree(newkey);
106 return -ENOMEM;
107 @@ -595,7 +596,8 @@ static int tcp_v6_md5_do_add(struct sock
108 }
109 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
110 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
111 - (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
112 + (tp->md5sig_info->entries6 + 1)),
113 + sk_allocation(sk, GFP_ATOMIC));
114
115 if (!keys) {
116 tcp_free_md5sig_pool();
117 @@ -719,7 +721,8 @@ static int tcp_v6_parse_md5_keys (struct
118 struct tcp_sock *tp = tcp_sk(sk);
119 struct tcp_md5sig_info *p;
120
121 - p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
122 + p = kzalloc(sizeof(struct tcp_md5sig_info),
123 + sk_allocation(sk, GFP_KERNEL));
124 if (!p)
125 return -ENOMEM;
126
127 @@ -952,6 +955,7 @@ static void tcp_v6_send_reset(struct soc
128 #ifdef CONFIG_TCP_MD5SIG
129 struct tcp_md5sig_key *key;
130 #endif
131 + gfp_t gfp_mask = GFP_ATOMIC;
132
133 if (th->rst)
134 return;
135 @@ -969,13 +973,16 @@ static void tcp_v6_send_reset(struct soc
136 tot_len += TCPOLEN_MD5SIG_ALIGNED;
137 #endif
138
139 + if (sk)
140 + gfp_mask = sk_allocation(sk, gfp_mask);
141 +
142 /*
143 * We need to grab some memory, and put together an RST,
144 * and then put it into the queue to be sent.
145 */
146
147 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
148 - GFP_ATOMIC);
149 + gfp_mask);
150 if (buff == NULL)
151 return;
152
153 @@ -1063,7 +1070,7 @@ static void tcp_v6_send_ack(struct sk_bu
154 #endif
155
156 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
157 - GFP_ATOMIC);
158 + sk_allocation(ctl_sk, GFP_ATOMIC));
159 if (buff == NULL)
160 return;
161