1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* /proc/net/ support for AF_RXRPC
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/module.h>
10 #include <net/af_rxrpc.h>
11 #include "ar-internal.h"
13 static const char *const rxrpc_conn_states
[RXRPC_CONN__NR_STATES
] = {
14 [RXRPC_CONN_UNUSED
] = "Unused ",
15 [RXRPC_CONN_CLIENT
] = "Client ",
16 [RXRPC_CONN_SERVICE_PREALLOC
] = "SvPrealc",
17 [RXRPC_CONN_SERVICE_UNSECURED
] = "SvUnsec ",
18 [RXRPC_CONN_SERVICE_CHALLENGING
] = "SvChall ",
19 [RXRPC_CONN_SERVICE
] = "SvSecure",
20 [RXRPC_CONN_REMOTELY_ABORTED
] = "RmtAbort",
21 [RXRPC_CONN_LOCALLY_ABORTED
] = "LocAbort",
25 * generate a list of extant and dead calls in /proc/net/rxrpc_calls
27 static void *rxrpc_call_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
29 __acquires(rxnet
->call_lock
)
31 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
34 read_lock(&rxnet
->call_lock
);
35 return seq_list_start_head(&rxnet
->calls
, *_pos
);
38 static void *rxrpc_call_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
40 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
42 return seq_list_next(v
, &rxnet
->calls
, pos
);
45 static void rxrpc_call_seq_stop(struct seq_file
*seq
, void *v
)
46 __releases(rxnet
->call_lock
)
49 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
51 read_unlock(&rxnet
->call_lock
);
55 static int rxrpc_call_seq_show(struct seq_file
*seq
, void *v
)
57 struct rxrpc_local
*local
;
58 struct rxrpc_sock
*rx
;
59 struct rxrpc_peer
*peer
;
60 struct rxrpc_call
*call
;
61 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
62 unsigned long timeout
= 0;
63 rxrpc_seq_t tx_hard_ack
, rx_hard_ack
;
64 char lbuff
[50], rbuff
[50];
66 if (v
== &rxnet
->calls
) {
70 " SvID ConnID CallID End Use State Abort "
71 " UserID TxSeq TW RxSeq RW RxSerial RxTimo\n");
75 call
= list_entry(v
, struct rxrpc_call
, link
);
77 rx
= rcu_dereference(call
->socket
);
79 local
= READ_ONCE(rx
->local
);
81 sprintf(lbuff
, "%pISpc", &local
->srx
.transport
);
83 strcpy(lbuff
, "no_local");
85 strcpy(lbuff
, "no_socket");
90 sprintf(rbuff
, "%pISpc", &peer
->srx
.transport
);
92 strcpy(rbuff
, "no_connection");
94 if (call
->state
!= RXRPC_CALL_SERVER_PREALLOC
) {
95 timeout
= READ_ONCE(call
->expect_rx_by
);
99 tx_hard_ack
= READ_ONCE(call
->tx_hard_ack
);
100 rx_hard_ack
= READ_ONCE(call
->rx_hard_ack
);
102 "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
103 " %-8.8s %08x %lx %08x %02x %08x %02x %08x %06lx\n",
109 rxrpc_is_service_call(call
) ? "Svc" : "Clt",
110 atomic_read(&call
->usage
),
111 rxrpc_call_states
[call
->state
],
114 tx_hard_ack
, READ_ONCE(call
->tx_top
) - tx_hard_ack
,
115 rx_hard_ack
, READ_ONCE(call
->rx_top
) - rx_hard_ack
,
122 const struct seq_operations rxrpc_call_seq_ops
= {
123 .start
= rxrpc_call_seq_start
,
124 .next
= rxrpc_call_seq_next
,
125 .stop
= rxrpc_call_seq_stop
,
126 .show
= rxrpc_call_seq_show
,
130 * generate a list of extant virtual connections in /proc/net/rxrpc_conns
132 static void *rxrpc_connection_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
133 __acquires(rxnet
->conn_lock
)
135 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
137 read_lock(&rxnet
->conn_lock
);
138 return seq_list_start_head(&rxnet
->conn_proc_list
, *_pos
);
141 static void *rxrpc_connection_seq_next(struct seq_file
*seq
, void *v
,
144 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
146 return seq_list_next(v
, &rxnet
->conn_proc_list
, pos
);
149 static void rxrpc_connection_seq_stop(struct seq_file
*seq
, void *v
)
150 __releases(rxnet
->conn_lock
)
152 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
154 read_unlock(&rxnet
->conn_lock
);
157 static int rxrpc_connection_seq_show(struct seq_file
*seq
, void *v
)
159 struct rxrpc_connection
*conn
;
160 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
161 char lbuff
[50], rbuff
[50];
163 if (v
== &rxnet
->conn_proc_list
) {
167 " SvID ConnID End Use State Key "
173 conn
= list_entry(v
, struct rxrpc_connection
, proc_link
);
174 if (conn
->state
== RXRPC_CONN_SERVICE_PREALLOC
) {
175 strcpy(lbuff
, "no_local");
176 strcpy(rbuff
, "no_connection");
180 sprintf(lbuff
, "%pISpc", &conn
->params
.local
->srx
.transport
);
182 sprintf(rbuff
, "%pISpc", &conn
->params
.peer
->srx
.transport
);
185 "UDP %-47.47s %-47.47s %4x %08x %s %3u"
186 " %s %08x %08x %08x %08x %08x %08x %08x\n",
191 rxrpc_conn_is_service(conn
) ? "Svc" : "Clt",
192 atomic_read(&conn
->usage
),
193 rxrpc_conn_states
[conn
->state
],
194 key_serial(conn
->params
.key
),
195 atomic_read(&conn
->serial
),
197 conn
->channels
[0].call_id
,
198 conn
->channels
[1].call_id
,
199 conn
->channels
[2].call_id
,
200 conn
->channels
[3].call_id
);
205 const struct seq_operations rxrpc_connection_seq_ops
= {
206 .start
= rxrpc_connection_seq_start
,
207 .next
= rxrpc_connection_seq_next
,
208 .stop
= rxrpc_connection_seq_stop
,
209 .show
= rxrpc_connection_seq_show
,
213 * generate a list of extant virtual peers in /proc/net/rxrpc/peers
215 static int rxrpc_peer_seq_show(struct seq_file
*seq
, void *v
)
217 struct rxrpc_peer
*peer
;
219 char lbuff
[50], rbuff
[50];
221 if (v
== SEQ_START_TOKEN
) {
225 " Use CW MTU LastUse RTT Rc\n"
230 peer
= list_entry(v
, struct rxrpc_peer
, hash_link
);
232 sprintf(lbuff
, "%pISpc", &peer
->local
->srx
.transport
);
234 sprintf(rbuff
, "%pISpc", &peer
->srx
.transport
);
236 now
= ktime_get_seconds();
238 "UDP %-47.47s %-47.47s %3u"
239 " %3u %5u %6llus %12llu %2u\n",
242 atomic_read(&peer
->usage
),
245 now
- peer
->last_tx_at
,
252 static void *rxrpc_peer_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
255 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
256 unsigned int bucket
, n
;
257 unsigned int shift
= 32 - HASH_BITS(rxnet
->peer_hash
);
262 if (*_pos
>= UINT_MAX
)
265 n
= *_pos
& ((1U << shift
) - 1);
266 bucket
= *_pos
>> shift
;
268 if (bucket
>= HASH_SIZE(rxnet
->peer_hash
)) {
274 return SEQ_START_TOKEN
;
279 p
= seq_hlist_start_rcu(&rxnet
->peer_hash
[bucket
], n
- 1);
284 *_pos
= (bucket
<< shift
) | n
;
288 static void *rxrpc_peer_seq_next(struct seq_file
*seq
, void *v
, loff_t
*_pos
)
290 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
291 unsigned int bucket
, n
;
292 unsigned int shift
= 32 - HASH_BITS(rxnet
->peer_hash
);
295 if (*_pos
>= UINT_MAX
)
298 bucket
= *_pos
>> shift
;
300 p
= seq_hlist_next_rcu(v
, &rxnet
->peer_hash
[bucket
], _pos
);
307 *_pos
= (bucket
<< shift
) | n
;
309 if (bucket
>= HASH_SIZE(rxnet
->peer_hash
)) {
318 p
= seq_hlist_start_rcu(&rxnet
->peer_hash
[bucket
], n
- 1);
324 static void rxrpc_peer_seq_stop(struct seq_file
*seq
, void *v
)
331 const struct seq_operations rxrpc_peer_seq_ops
= {
332 .start
= rxrpc_peer_seq_start
,
333 .next
= rxrpc_peer_seq_next
,
334 .stop
= rxrpc_peer_seq_stop
,
335 .show
= rxrpc_peer_seq_show
,