1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* /proc/net/ support for AF_RXRPC
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/module.h>
10 #include <net/af_rxrpc.h>
11 #include "ar-internal.h"
13 static const char *const rxrpc_conn_states
[RXRPC_CONN__NR_STATES
] = {
14 [RXRPC_CONN_UNUSED
] = "Unused ",
15 [RXRPC_CONN_CLIENT
] = "Client ",
16 [RXRPC_CONN_SERVICE_PREALLOC
] = "SvPrealc",
17 [RXRPC_CONN_SERVICE_UNSECURED
] = "SvUnsec ",
18 [RXRPC_CONN_SERVICE_CHALLENGING
] = "SvChall ",
19 [RXRPC_CONN_SERVICE
] = "SvSecure",
20 [RXRPC_CONN_REMOTELY_ABORTED
] = "RmtAbort",
21 [RXRPC_CONN_LOCALLY_ABORTED
] = "LocAbort",
25 * generate a list of extant and dead calls in /proc/net/rxrpc_calls
27 static void *rxrpc_call_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
30 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
33 return seq_list_start_head_rcu(&rxnet
->calls
, *_pos
);
36 static void *rxrpc_call_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
38 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
40 return seq_list_next_rcu(v
, &rxnet
->calls
, pos
);
43 static void rxrpc_call_seq_stop(struct seq_file
*seq
, void *v
)
49 static int rxrpc_call_seq_show(struct seq_file
*seq
, void *v
)
51 struct rxrpc_local
*local
;
52 struct rxrpc_sock
*rx
;
53 struct rxrpc_peer
*peer
;
54 struct rxrpc_call
*call
;
55 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
56 unsigned long timeout
= 0;
57 rxrpc_seq_t tx_hard_ack
, rx_hard_ack
;
58 char lbuff
[50], rbuff
[50];
60 if (v
== &rxnet
->calls
) {
64 " SvID ConnID CallID End Use State Abort "
65 " DebugId TxSeq TW RxSeq RW RxSerial RxTimo\n");
69 call
= list_entry(v
, struct rxrpc_call
, link
);
71 rx
= rcu_dereference(call
->socket
);
73 local
= READ_ONCE(rx
->local
);
75 sprintf(lbuff
, "%pISpc", &local
->srx
.transport
);
77 strcpy(lbuff
, "no_local");
79 strcpy(lbuff
, "no_socket");
84 sprintf(rbuff
, "%pISpc", &peer
->srx
.transport
);
86 strcpy(rbuff
, "no_connection");
88 if (call
->state
!= RXRPC_CALL_SERVER_PREALLOC
) {
89 timeout
= READ_ONCE(call
->expect_rx_by
);
93 tx_hard_ack
= READ_ONCE(call
->tx_hard_ack
);
94 rx_hard_ack
= READ_ONCE(call
->rx_hard_ack
);
96 "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
97 " %-8.8s %08x %08x %08x %02x %08x %02x %08x %06lx\n",
103 rxrpc_is_service_call(call
) ? "Svc" : "Clt",
104 refcount_read(&call
->ref
),
105 rxrpc_call_states
[call
->state
],
108 tx_hard_ack
, READ_ONCE(call
->tx_top
) - tx_hard_ack
,
109 rx_hard_ack
, READ_ONCE(call
->rx_top
) - rx_hard_ack
,
116 const struct seq_operations rxrpc_call_seq_ops
= {
117 .start
= rxrpc_call_seq_start
,
118 .next
= rxrpc_call_seq_next
,
119 .stop
= rxrpc_call_seq_stop
,
120 .show
= rxrpc_call_seq_show
,
124 * generate a list of extant virtual connections in /proc/net/rxrpc_conns
126 static void *rxrpc_connection_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
127 __acquires(rxnet
->conn_lock
)
129 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
131 read_lock(&rxnet
->conn_lock
);
132 return seq_list_start_head(&rxnet
->conn_proc_list
, *_pos
);
135 static void *rxrpc_connection_seq_next(struct seq_file
*seq
, void *v
,
138 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
140 return seq_list_next(v
, &rxnet
->conn_proc_list
, pos
);
143 static void rxrpc_connection_seq_stop(struct seq_file
*seq
, void *v
)
144 __releases(rxnet
->conn_lock
)
146 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
148 read_unlock(&rxnet
->conn_lock
);
151 static int rxrpc_connection_seq_show(struct seq_file
*seq
, void *v
)
153 struct rxrpc_connection
*conn
;
154 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
155 char lbuff
[50], rbuff
[50];
157 if (v
== &rxnet
->conn_proc_list
) {
161 " SvID ConnID End Use State Key "
162 " Serial ISerial CallId0 CallId1 CallId2 CallId3\n"
167 conn
= list_entry(v
, struct rxrpc_connection
, proc_link
);
168 if (conn
->state
== RXRPC_CONN_SERVICE_PREALLOC
) {
169 strcpy(lbuff
, "no_local");
170 strcpy(rbuff
, "no_connection");
174 sprintf(lbuff
, "%pISpc", &conn
->params
.local
->srx
.transport
);
176 sprintf(rbuff
, "%pISpc", &conn
->params
.peer
->srx
.transport
);
179 "UDP %-47.47s %-47.47s %4x %08x %s %3u"
180 " %s %08x %08x %08x %08x %08x %08x %08x\n",
185 rxrpc_conn_is_service(conn
) ? "Svc" : "Clt",
186 refcount_read(&conn
->ref
),
187 rxrpc_conn_states
[conn
->state
],
188 key_serial(conn
->params
.key
),
189 atomic_read(&conn
->serial
),
191 conn
->channels
[0].call_id
,
192 conn
->channels
[1].call_id
,
193 conn
->channels
[2].call_id
,
194 conn
->channels
[3].call_id
);
199 const struct seq_operations rxrpc_connection_seq_ops
= {
200 .start
= rxrpc_connection_seq_start
,
201 .next
= rxrpc_connection_seq_next
,
202 .stop
= rxrpc_connection_seq_stop
,
203 .show
= rxrpc_connection_seq_show
,
207 * generate a list of extant virtual peers in /proc/net/rxrpc/peers
209 static int rxrpc_peer_seq_show(struct seq_file
*seq
, void *v
)
211 struct rxrpc_peer
*peer
;
213 char lbuff
[50], rbuff
[50];
215 if (v
== SEQ_START_TOKEN
) {
219 " Use CW MTU LastUse RTT RTO\n"
224 peer
= list_entry(v
, struct rxrpc_peer
, hash_link
);
226 sprintf(lbuff
, "%pISpc", &peer
->local
->srx
.transport
);
228 sprintf(rbuff
, "%pISpc", &peer
->srx
.transport
);
230 now
= ktime_get_seconds();
232 "UDP %-47.47s %-47.47s %3u"
233 " %3u %5u %6llus %8u %8u\n",
236 refcount_read(&peer
->ref
),
239 now
- peer
->last_tx_at
,
241 jiffies_to_usecs(peer
->rto_j
));
246 static void *rxrpc_peer_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
249 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
250 unsigned int bucket
, n
;
251 unsigned int shift
= 32 - HASH_BITS(rxnet
->peer_hash
);
256 if (*_pos
>= UINT_MAX
)
259 n
= *_pos
& ((1U << shift
) - 1);
260 bucket
= *_pos
>> shift
;
262 if (bucket
>= HASH_SIZE(rxnet
->peer_hash
)) {
268 return SEQ_START_TOKEN
;
273 p
= seq_hlist_start_rcu(&rxnet
->peer_hash
[bucket
], n
- 1);
278 *_pos
= (bucket
<< shift
) | n
;
282 static void *rxrpc_peer_seq_next(struct seq_file
*seq
, void *v
, loff_t
*_pos
)
284 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
285 unsigned int bucket
, n
;
286 unsigned int shift
= 32 - HASH_BITS(rxnet
->peer_hash
);
289 if (*_pos
>= UINT_MAX
)
292 bucket
= *_pos
>> shift
;
294 p
= seq_hlist_next_rcu(v
, &rxnet
->peer_hash
[bucket
], _pos
);
301 *_pos
= (bucket
<< shift
) | n
;
303 if (bucket
>= HASH_SIZE(rxnet
->peer_hash
)) {
312 p
= seq_hlist_start_rcu(&rxnet
->peer_hash
[bucket
], n
- 1);
318 static void rxrpc_peer_seq_stop(struct seq_file
*seq
, void *v
)
325 const struct seq_operations rxrpc_peer_seq_ops
= {
326 .start
= rxrpc_peer_seq_start
,
327 .next
= rxrpc_peer_seq_next
,
328 .stop
= rxrpc_peer_seq_stop
,
329 .show
= rxrpc_peer_seq_show
,
333 * Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals
335 static int rxrpc_local_seq_show(struct seq_file
*seq
, void *v
)
337 struct rxrpc_local
*local
;
340 if (v
== SEQ_START_TOKEN
) {
347 local
= hlist_entry(v
, struct rxrpc_local
, link
);
349 sprintf(lbuff
, "%pISpc", &local
->srx
.transport
);
352 "UDP %-47.47s %3u %3u\n",
354 refcount_read(&local
->ref
),
355 atomic_read(&local
->active_users
));
360 static void *rxrpc_local_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
363 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
368 if (*_pos
>= UINT_MAX
)
373 return SEQ_START_TOKEN
;
375 return seq_hlist_start_rcu(&rxnet
->local_endpoints
, n
- 1);
378 static void *rxrpc_local_seq_next(struct seq_file
*seq
, void *v
, loff_t
*_pos
)
380 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
382 if (*_pos
>= UINT_MAX
)
385 return seq_hlist_next_rcu(v
, &rxnet
->local_endpoints
, _pos
);
388 static void rxrpc_local_seq_stop(struct seq_file
*seq
, void *v
)
394 const struct seq_operations rxrpc_local_seq_ops
= {
395 .start
= rxrpc_local_seq_start
,
396 .next
= rxrpc_local_seq_next
,
397 .stop
= rxrpc_local_seq_stop
,
398 .show
= rxrpc_local_seq_show
,