1 /* /proc/net/ support for AF_RXRPC
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
14 #include <net/af_rxrpc.h>
15 #include "ar-internal.h"
17 static const char *const rxrpc_conn_states
[RXRPC_CONN__NR_STATES
] = {
18 [RXRPC_CONN_UNUSED
] = "Unused ",
19 [RXRPC_CONN_CLIENT
] = "Client ",
20 [RXRPC_CONN_SERVICE_PREALLOC
] = "SvPrealc",
21 [RXRPC_CONN_SERVICE_UNSECURED
] = "SvUnsec ",
22 [RXRPC_CONN_SERVICE_CHALLENGING
] = "SvChall ",
23 [RXRPC_CONN_SERVICE
] = "SvSecure",
24 [RXRPC_CONN_REMOTELY_ABORTED
] = "RmtAbort",
25 [RXRPC_CONN_LOCALLY_ABORTED
] = "LocAbort",
29 * generate a list of extant and dead calls in /proc/net/rxrpc_calls
31 static void *rxrpc_call_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
33 __acquires(rxnet
->call_lock
)
35 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
38 read_lock(&rxnet
->call_lock
);
39 return seq_list_start_head(&rxnet
->calls
, *_pos
);
42 static void *rxrpc_call_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
44 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
46 return seq_list_next(v
, &rxnet
->calls
, pos
);
49 static void rxrpc_call_seq_stop(struct seq_file
*seq
, void *v
)
50 __releases(rxnet
->call_lock
)
53 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
55 read_unlock(&rxnet
->call_lock
);
59 static int rxrpc_call_seq_show(struct seq_file
*seq
, void *v
)
61 struct rxrpc_local
*local
;
62 struct rxrpc_sock
*rx
;
63 struct rxrpc_peer
*peer
;
64 struct rxrpc_call
*call
;
65 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
66 unsigned long timeout
= 0;
67 rxrpc_seq_t tx_hard_ack
, rx_hard_ack
;
68 char lbuff
[50], rbuff
[50];
70 if (v
== &rxnet
->calls
) {
74 " SvID ConnID CallID End Use State Abort "
75 " UserID TxSeq TW RxSeq RW RxSerial RxTimo\n");
79 call
= list_entry(v
, struct rxrpc_call
, link
);
81 rx
= rcu_dereference(call
->socket
);
83 local
= READ_ONCE(rx
->local
);
85 sprintf(lbuff
, "%pISpc", &local
->srx
.transport
);
87 strcpy(lbuff
, "no_local");
89 strcpy(lbuff
, "no_socket");
94 sprintf(rbuff
, "%pISpc", &peer
->srx
.transport
);
96 strcpy(rbuff
, "no_connection");
98 if (call
->state
!= RXRPC_CALL_SERVER_PREALLOC
) {
99 timeout
= READ_ONCE(call
->expect_rx_by
);
103 tx_hard_ack
= READ_ONCE(call
->tx_hard_ack
);
104 rx_hard_ack
= READ_ONCE(call
->rx_hard_ack
);
106 "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
107 " %-8.8s %08x %lx %08x %02x %08x %02x %08x %06lx\n",
113 rxrpc_is_service_call(call
) ? "Svc" : "Clt",
114 atomic_read(&call
->usage
),
115 rxrpc_call_states
[call
->state
],
118 tx_hard_ack
, READ_ONCE(call
->tx_top
) - tx_hard_ack
,
119 rx_hard_ack
, READ_ONCE(call
->rx_top
) - rx_hard_ack
,
126 const struct seq_operations rxrpc_call_seq_ops
= {
127 .start
= rxrpc_call_seq_start
,
128 .next
= rxrpc_call_seq_next
,
129 .stop
= rxrpc_call_seq_stop
,
130 .show
= rxrpc_call_seq_show
,
134 * generate a list of extant virtual connections in /proc/net/rxrpc_conns
136 static void *rxrpc_connection_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
137 __acquires(rxnet
->conn_lock
)
139 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
141 read_lock(&rxnet
->conn_lock
);
142 return seq_list_start_head(&rxnet
->conn_proc_list
, *_pos
);
145 static void *rxrpc_connection_seq_next(struct seq_file
*seq
, void *v
,
148 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
150 return seq_list_next(v
, &rxnet
->conn_proc_list
, pos
);
153 static void rxrpc_connection_seq_stop(struct seq_file
*seq
, void *v
)
154 __releases(rxnet
->conn_lock
)
156 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
158 read_unlock(&rxnet
->conn_lock
);
161 static int rxrpc_connection_seq_show(struct seq_file
*seq
, void *v
)
163 struct rxrpc_connection
*conn
;
164 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
165 char lbuff
[50], rbuff
[50];
167 if (v
== &rxnet
->conn_proc_list
) {
171 " SvID ConnID End Use State Key "
177 conn
= list_entry(v
, struct rxrpc_connection
, proc_link
);
178 if (conn
->state
== RXRPC_CONN_SERVICE_PREALLOC
) {
179 strcpy(lbuff
, "no_local");
180 strcpy(rbuff
, "no_connection");
184 sprintf(lbuff
, "%pISpc", &conn
->params
.local
->srx
.transport
);
186 sprintf(rbuff
, "%pISpc", &conn
->params
.peer
->srx
.transport
);
189 "UDP %-47.47s %-47.47s %4x %08x %s %3u"
190 " %s %08x %08x %08x %08x %08x %08x %08x\n",
195 rxrpc_conn_is_service(conn
) ? "Svc" : "Clt",
196 atomic_read(&conn
->usage
),
197 rxrpc_conn_states
[conn
->state
],
198 key_serial(conn
->params
.key
),
199 atomic_read(&conn
->serial
),
201 conn
->channels
[0].call_id
,
202 conn
->channels
[1].call_id
,
203 conn
->channels
[2].call_id
,
204 conn
->channels
[3].call_id
);
209 const struct seq_operations rxrpc_connection_seq_ops
= {
210 .start
= rxrpc_connection_seq_start
,
211 .next
= rxrpc_connection_seq_next
,
212 .stop
= rxrpc_connection_seq_stop
,
213 .show
= rxrpc_connection_seq_show
,
217 * generate a list of extant virtual peers in /proc/net/rxrpc/peers
219 static int rxrpc_peer_seq_show(struct seq_file
*seq
, void *v
)
221 struct rxrpc_peer
*peer
;
223 char lbuff
[50], rbuff
[50];
225 if (v
== SEQ_START_TOKEN
) {
229 " Use CW MTU LastUse RTT Rc\n"
234 peer
= list_entry(v
, struct rxrpc_peer
, hash_link
);
236 sprintf(lbuff
, "%pISpc", &peer
->local
->srx
.transport
);
238 sprintf(rbuff
, "%pISpc", &peer
->srx
.transport
);
240 now
= ktime_get_seconds();
242 "UDP %-47.47s %-47.47s %3u"
243 " %3u %5u %6llus %12llu %2u\n",
246 atomic_read(&peer
->usage
),
249 now
- peer
->last_tx_at
,
256 static void *rxrpc_peer_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
259 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
260 unsigned int bucket
, n
;
261 unsigned int shift
= 32 - HASH_BITS(rxnet
->peer_hash
);
266 if (*_pos
>= UINT_MAX
)
269 n
= *_pos
& ((1U << shift
) - 1);
270 bucket
= *_pos
>> shift
;
272 if (bucket
>= HASH_SIZE(rxnet
->peer_hash
)) {
278 return SEQ_START_TOKEN
;
283 p
= seq_hlist_start_rcu(&rxnet
->peer_hash
[bucket
], n
- 1);
288 *_pos
= (bucket
<< shift
) | n
;
292 static void *rxrpc_peer_seq_next(struct seq_file
*seq
, void *v
, loff_t
*_pos
)
294 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
295 unsigned int bucket
, n
;
296 unsigned int shift
= 32 - HASH_BITS(rxnet
->peer_hash
);
299 if (*_pos
>= UINT_MAX
)
302 bucket
= *_pos
>> shift
;
304 p
= seq_hlist_next_rcu(v
, &rxnet
->peer_hash
[bucket
], _pos
);
311 *_pos
= (bucket
<< shift
) | n
;
313 if (bucket
>= HASH_SIZE(rxnet
->peer_hash
)) {
322 p
= seq_hlist_start_rcu(&rxnet
->peer_hash
[bucket
], n
- 1);
328 static void rxrpc_peer_seq_stop(struct seq_file
*seq
, void *v
)
335 const struct seq_operations rxrpc_peer_seq_ops
= {
336 .start
= rxrpc_peer_seq_start
,
337 .next
= rxrpc_peer_seq_next
,
338 .stop
= rxrpc_peer_seq_stop
,
339 .show
= rxrpc_peer_seq_show
,