1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* /proc/net/ support for AF_RXRPC
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/module.h>
10 #include <net/af_rxrpc.h>
11 #include "ar-internal.h"
13 static const char *const rxrpc_conn_states
[RXRPC_CONN__NR_STATES
] = {
14 [RXRPC_CONN_UNUSED
] = "Unused ",
15 [RXRPC_CONN_CLIENT_UNSECURED
] = "ClUnsec ",
16 [RXRPC_CONN_CLIENT
] = "Client ",
17 [RXRPC_CONN_SERVICE_PREALLOC
] = "SvPrealc",
18 [RXRPC_CONN_SERVICE_UNSECURED
] = "SvUnsec ",
19 [RXRPC_CONN_SERVICE_CHALLENGING
] = "SvChall ",
20 [RXRPC_CONN_SERVICE
] = "SvSecure",
21 [RXRPC_CONN_ABORTED
] = "Aborted ",
25 * generate a list of extant and dead calls in /proc/net/rxrpc_calls
27 static void *rxrpc_call_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
30 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
33 return seq_list_start_head_rcu(&rxnet
->calls
, *_pos
);
36 static void *rxrpc_call_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
38 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
40 return seq_list_next_rcu(v
, &rxnet
->calls
, pos
);
43 static void rxrpc_call_seq_stop(struct seq_file
*seq
, void *v
)
49 static int rxrpc_call_seq_show(struct seq_file
*seq
, void *v
)
51 struct rxrpc_local
*local
;
52 struct rxrpc_call
*call
;
53 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
54 enum rxrpc_call_state state
;
55 rxrpc_seq_t acks_hard_ack
;
56 char lbuff
[50], rbuff
[50];
59 if (v
== &rxnet
->calls
) {
63 " SvID ConnID CallID End Use State Abort "
64 " DebugId TxSeq TW RxSeq RW RxSerial CW RxTimo\n");
68 call
= list_entry(v
, struct rxrpc_call
, link
);
72 sprintf(lbuff
, "%pISpc", &local
->srx
.transport
);
74 strcpy(lbuff
, "no_local");
76 sprintf(rbuff
, "%pISpc", &call
->dest_srx
.transport
);
78 state
= rxrpc_call_state(call
);
79 if (state
!= RXRPC_CALL_SERVER_PREALLOC
)
80 timeout
= ktime_ms_delta(READ_ONCE(call
->expect_rx_by
), ktime_get_real());
82 acks_hard_ack
= READ_ONCE(call
->acks_hard_ack
);
84 "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
85 " %-8.8s %08x %08x %08x %02x %08x %02x %08x %02x %06lx\n",
88 call
->dest_srx
.srx_service
,
91 rxrpc_is_service_call(call
) ? "Svc" : "Clt",
92 refcount_read(&call
->ref
),
93 rxrpc_call_states
[state
],
96 acks_hard_ack
, READ_ONCE(call
->tx_top
) - acks_hard_ack
,
97 call
->ackr_window
, call
->ackr_wtop
- call
->ackr_window
,
105 const struct seq_operations rxrpc_call_seq_ops
= {
106 .start
= rxrpc_call_seq_start
,
107 .next
= rxrpc_call_seq_next
,
108 .stop
= rxrpc_call_seq_stop
,
109 .show
= rxrpc_call_seq_show
,
113 * generate a list of extant virtual connections in /proc/net/rxrpc_conns
115 static void *rxrpc_connection_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
116 __acquires(rxnet
->conn_lock
)
118 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
120 read_lock(&rxnet
->conn_lock
);
121 return seq_list_start_head(&rxnet
->conn_proc_list
, *_pos
);
124 static void *rxrpc_connection_seq_next(struct seq_file
*seq
, void *v
,
127 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
129 return seq_list_next(v
, &rxnet
->conn_proc_list
, pos
);
132 static void rxrpc_connection_seq_stop(struct seq_file
*seq
, void *v
)
133 __releases(rxnet
->conn_lock
)
135 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
137 read_unlock(&rxnet
->conn_lock
);
140 static int rxrpc_connection_seq_show(struct seq_file
*seq
, void *v
)
142 struct rxrpc_connection
*conn
;
143 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
145 char lbuff
[50], rbuff
[50];
147 if (v
== &rxnet
->conn_proc_list
) {
151 " SvID ConnID End Ref Act State Key "
152 " Serial ISerial CallId0 CallId1 CallId2 CallId3\n"
157 conn
= list_entry(v
, struct rxrpc_connection
, proc_link
);
158 if (conn
->state
== RXRPC_CONN_SERVICE_PREALLOC
) {
159 strcpy(lbuff
, "no_local");
160 strcpy(rbuff
, "no_connection");
164 sprintf(lbuff
, "%pISpc", &conn
->local
->srx
.transport
);
165 sprintf(rbuff
, "%pISpc", &conn
->peer
->srx
.transport
);
167 state
= rxrpc_is_conn_aborted(conn
) ?
168 rxrpc_call_completions
[conn
->completion
] :
169 rxrpc_conn_states
[conn
->state
];
171 "UDP %-47.47s %-47.47s %4x %08x %s %3u %3d"
172 " %s %08x %08x %08x %08x %08x %08x %08x\n",
177 rxrpc_conn_is_service(conn
) ? "Svc" : "Clt",
178 refcount_read(&conn
->ref
),
179 atomic_read(&conn
->active
),
181 key_serial(conn
->key
),
184 conn
->channels
[0].call_id
,
185 conn
->channels
[1].call_id
,
186 conn
->channels
[2].call_id
,
187 conn
->channels
[3].call_id
);
192 const struct seq_operations rxrpc_connection_seq_ops
= {
193 .start
= rxrpc_connection_seq_start
,
194 .next
= rxrpc_connection_seq_next
,
195 .stop
= rxrpc_connection_seq_stop
,
196 .show
= rxrpc_connection_seq_show
,
200 * generate a list of extant virtual bundles in /proc/net/rxrpc/bundles
202 static void *rxrpc_bundle_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
203 __acquires(rxnet
->conn_lock
)
205 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
207 read_lock(&rxnet
->conn_lock
);
208 return seq_list_start_head(&rxnet
->bundle_proc_list
, *_pos
);
211 static void *rxrpc_bundle_seq_next(struct seq_file
*seq
, void *v
,
214 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
216 return seq_list_next(v
, &rxnet
->bundle_proc_list
, pos
);
219 static void rxrpc_bundle_seq_stop(struct seq_file
*seq
, void *v
)
220 __releases(rxnet
->conn_lock
)
222 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
224 read_unlock(&rxnet
->conn_lock
);
227 static int rxrpc_bundle_seq_show(struct seq_file
*seq
, void *v
)
229 struct rxrpc_bundle
*bundle
;
230 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
231 char lbuff
[50], rbuff
[50];
233 if (v
== &rxnet
->bundle_proc_list
) {
237 " SvID Ref Act Flg Key |"
238 " Bundle Conn_0 Conn_1 Conn_2 Conn_3\n"
243 bundle
= list_entry(v
, struct rxrpc_bundle
, proc_link
);
245 sprintf(lbuff
, "%pISpc", &bundle
->local
->srx
.transport
);
246 sprintf(rbuff
, "%pISpc", &bundle
->peer
->srx
.transport
);
248 "UDP %-47.47s %-47.47s %4x %3u %3d"
249 " %c%c%c %08x | %08x %08x %08x %08x %08x\n",
253 refcount_read(&bundle
->ref
),
254 atomic_read(&bundle
->active
),
255 bundle
->try_upgrade
? 'U' : '-',
256 bundle
->exclusive
? 'e' : '-',
257 bundle
->upgrade
? 'u' : '-',
258 key_serial(bundle
->key
),
263 bundle
->conn_ids
[3]);
268 const struct seq_operations rxrpc_bundle_seq_ops
= {
269 .start
= rxrpc_bundle_seq_start
,
270 .next
= rxrpc_bundle_seq_next
,
271 .stop
= rxrpc_bundle_seq_stop
,
272 .show
= rxrpc_bundle_seq_show
,
276 * generate a list of extant virtual peers in /proc/net/rxrpc/peers
278 static int rxrpc_peer_seq_show(struct seq_file
*seq
, void *v
)
280 struct rxrpc_peer
*peer
;
282 char lbuff
[50], rbuff
[50];
284 if (v
== SEQ_START_TOKEN
) {
288 " Use SST MTU LastUse RTT RTO\n"
293 peer
= list_entry(v
, struct rxrpc_peer
, hash_link
);
295 sprintf(lbuff
, "%pISpc", &peer
->local
->srx
.transport
);
297 sprintf(rbuff
, "%pISpc", &peer
->srx
.transport
);
299 now
= ktime_get_seconds();
301 "UDP %-47.47s %-47.47s %3u"
302 " %3u %5u %6llus %8u %8u\n",
305 refcount_read(&peer
->ref
),
308 now
- peer
->last_tx_at
,
315 static void *rxrpc_peer_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
318 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
319 unsigned int bucket
, n
;
320 unsigned int shift
= 32 - HASH_BITS(rxnet
->peer_hash
);
325 if (*_pos
>= UINT_MAX
)
328 n
= *_pos
& ((1U << shift
) - 1);
329 bucket
= *_pos
>> shift
;
331 if (bucket
>= HASH_SIZE(rxnet
->peer_hash
)) {
337 return SEQ_START_TOKEN
;
342 p
= seq_hlist_start_rcu(&rxnet
->peer_hash
[bucket
], n
- 1);
347 *_pos
= (bucket
<< shift
) | n
;
351 static void *rxrpc_peer_seq_next(struct seq_file
*seq
, void *v
, loff_t
*_pos
)
353 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
354 unsigned int bucket
, n
;
355 unsigned int shift
= 32 - HASH_BITS(rxnet
->peer_hash
);
358 if (*_pos
>= UINT_MAX
)
361 bucket
= *_pos
>> shift
;
363 p
= seq_hlist_next_rcu(v
, &rxnet
->peer_hash
[bucket
], _pos
);
370 *_pos
= (bucket
<< shift
) | n
;
372 if (bucket
>= HASH_SIZE(rxnet
->peer_hash
)) {
381 p
= seq_hlist_start_rcu(&rxnet
->peer_hash
[bucket
], n
- 1);
387 static void rxrpc_peer_seq_stop(struct seq_file
*seq
, void *v
)
394 const struct seq_operations rxrpc_peer_seq_ops
= {
395 .start
= rxrpc_peer_seq_start
,
396 .next
= rxrpc_peer_seq_next
,
397 .stop
= rxrpc_peer_seq_stop
,
398 .show
= rxrpc_peer_seq_show
,
402 * Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals
404 static int rxrpc_local_seq_show(struct seq_file
*seq
, void *v
)
406 struct rxrpc_local
*local
;
409 if (v
== SEQ_START_TOKEN
) {
416 local
= hlist_entry(v
, struct rxrpc_local
, link
);
418 sprintf(lbuff
, "%pISpc", &local
->srx
.transport
);
421 "UDP %-47.47s %3u %3u %3u\n",
423 refcount_read(&local
->ref
),
424 atomic_read(&local
->active_users
),
425 local
->rx_queue
.qlen
);
430 static void *rxrpc_local_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
433 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
438 if (*_pos
>= UINT_MAX
)
443 return SEQ_START_TOKEN
;
445 return seq_hlist_start_rcu(&rxnet
->local_endpoints
, n
- 1);
448 static void *rxrpc_local_seq_next(struct seq_file
*seq
, void *v
, loff_t
*_pos
)
450 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
452 if (*_pos
>= UINT_MAX
)
455 return seq_hlist_next_rcu(v
, &rxnet
->local_endpoints
, _pos
);
458 static void rxrpc_local_seq_stop(struct seq_file
*seq
, void *v
)
464 const struct seq_operations rxrpc_local_seq_ops
= {
465 .start
= rxrpc_local_seq_start
,
466 .next
= rxrpc_local_seq_next
,
467 .stop
= rxrpc_local_seq_stop
,
468 .show
= rxrpc_local_seq_show
,
472 * Display stats in /proc/net/rxrpc/stats
474 int rxrpc_stats_show(struct seq_file
*seq
, void *v
)
476 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_single_net(seq
));
479 "Data : send=%u sendf=%u fail=%u\n",
480 atomic_read(&rxnet
->stat_tx_data_send
),
481 atomic_read(&rxnet
->stat_tx_data_send_frag
),
482 atomic_read(&rxnet
->stat_tx_data_send_fail
));
484 "Data-Tx : nr=%u retrans=%u uf=%u cwr=%u\n",
485 atomic_read(&rxnet
->stat_tx_data
),
486 atomic_read(&rxnet
->stat_tx_data_retrans
),
487 atomic_read(&rxnet
->stat_tx_data_underflow
),
488 atomic_read(&rxnet
->stat_tx_data_cwnd_reset
));
490 "Data-Rx : nr=%u reqack=%u jumbo=%u\n",
491 atomic_read(&rxnet
->stat_rx_data
),
492 atomic_read(&rxnet
->stat_rx_data_reqack
),
493 atomic_read(&rxnet
->stat_rx_data_jumbo
));
495 "Ack : fill=%u send=%u skip=%u\n",
496 atomic_read(&rxnet
->stat_tx_ack_fill
),
497 atomic_read(&rxnet
->stat_tx_ack_send
),
498 atomic_read(&rxnet
->stat_tx_ack_skip
));
500 "Ack-Tx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n",
501 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_REQUESTED
]),
502 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_DUPLICATE
]),
503 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_OUT_OF_SEQUENCE
]),
504 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_EXCEEDS_WINDOW
]),
505 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_NOSPACE
]),
506 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_PING
]),
507 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_PING_RESPONSE
]),
508 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_DELAY
]),
509 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_IDLE
]));
511 "Ack-Rx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n",
512 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_REQUESTED
]),
513 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_DUPLICATE
]),
514 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_OUT_OF_SEQUENCE
]),
515 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_EXCEEDS_WINDOW
]),
516 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_NOSPACE
]),
517 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_PING
]),
518 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_PING_RESPONSE
]),
519 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_DELAY
]),
520 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_IDLE
]));
522 "Why-Req-A: acklost=%u already=%u mrtt=%u ortt=%u\n",
523 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_ack_lost
]),
524 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_already_on
]),
525 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_more_rtt
]),
526 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_old_rtt
]));
528 "Why-Req-A: nolast=%u retx=%u slows=%u smtxw=%u\n",
529 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_no_srv_last
]),
530 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_retrans
]),
531 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_slow_start
]),
532 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_small_txwin
]));
534 "Buffers : txb=%u rxb=%u\n",
535 atomic_read(&rxrpc_nr_txbuf
),
536 atomic_read(&rxrpc_n_rx_skbs
));
538 "IO-thread: loops=%u\n",
539 atomic_read(&rxnet
->stat_io_loop
));
544 * Clear stats if /proc/net/rxrpc/stats is written to.
546 int rxrpc_stats_clear(struct file
*file
, char *buf
, size_t size
)
548 struct seq_file
*m
= file
->private_data
;
549 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_single_net(m
));
551 if (size
> 1 || (size
== 1 && buf
[0] != '\n'))
554 atomic_set(&rxnet
->stat_tx_data
, 0);
555 atomic_set(&rxnet
->stat_tx_data_retrans
, 0);
556 atomic_set(&rxnet
->stat_tx_data_underflow
, 0);
557 atomic_set(&rxnet
->stat_tx_data_cwnd_reset
, 0);
558 atomic_set(&rxnet
->stat_tx_data_send
, 0);
559 atomic_set(&rxnet
->stat_tx_data_send_frag
, 0);
560 atomic_set(&rxnet
->stat_tx_data_send_fail
, 0);
561 atomic_set(&rxnet
->stat_rx_data
, 0);
562 atomic_set(&rxnet
->stat_rx_data_reqack
, 0);
563 atomic_set(&rxnet
->stat_rx_data_jumbo
, 0);
565 atomic_set(&rxnet
->stat_tx_ack_fill
, 0);
566 atomic_set(&rxnet
->stat_tx_ack_send
, 0);
567 atomic_set(&rxnet
->stat_tx_ack_skip
, 0);
568 memset(&rxnet
->stat_tx_acks
, 0, sizeof(rxnet
->stat_tx_acks
));
569 memset(&rxnet
->stat_rx_acks
, 0, sizeof(rxnet
->stat_rx_acks
));
571 memset(&rxnet
->stat_why_req_ack
, 0, sizeof(rxnet
->stat_why_req_ack
));
573 atomic_set(&rxnet
->stat_io_loop
, 0);