1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* /proc/net/ support for AF_RXRPC
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/module.h>
10 #include <net/af_rxrpc.h>
11 #include "ar-internal.h"
13 static const char *const rxrpc_conn_states
[RXRPC_CONN__NR_STATES
] = {
14 [RXRPC_CONN_UNUSED
] = "Unused ",
15 [RXRPC_CONN_CLIENT_UNSECURED
] = "ClUnsec ",
16 [RXRPC_CONN_CLIENT
] = "Client ",
17 [RXRPC_CONN_SERVICE_PREALLOC
] = "SvPrealc",
18 [RXRPC_CONN_SERVICE_UNSECURED
] = "SvUnsec ",
19 [RXRPC_CONN_SERVICE_CHALLENGING
] = "SvChall ",
20 [RXRPC_CONN_SERVICE
] = "SvSecure",
21 [RXRPC_CONN_ABORTED
] = "Aborted ",
25 * generate a list of extant and dead calls in /proc/net/rxrpc_calls
27 static void *rxrpc_call_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
30 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
33 return seq_list_start_head_rcu(&rxnet
->calls
, *_pos
);
36 static void *rxrpc_call_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
38 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
40 return seq_list_next_rcu(v
, &rxnet
->calls
, pos
);
43 static void rxrpc_call_seq_stop(struct seq_file
*seq
, void *v
)
49 static int rxrpc_call_seq_show(struct seq_file
*seq
, void *v
)
51 struct rxrpc_local
*local
;
52 struct rxrpc_call
*call
;
53 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
54 enum rxrpc_call_state state
;
55 rxrpc_seq_t tx_bottom
;
56 char lbuff
[50], rbuff
[50];
59 if (v
== &rxnet
->calls
) {
63 " SvID ConnID CallID End Use State Abort "
64 " DebugId TxSeq TW RxSeq RW RxSerial CW RxTimo\n");
68 call
= list_entry(v
, struct rxrpc_call
, link
);
72 sprintf(lbuff
, "%pISpc", &local
->srx
.transport
);
74 strcpy(lbuff
, "no_local");
76 sprintf(rbuff
, "%pISpc", &call
->dest_srx
.transport
);
78 state
= rxrpc_call_state(call
);
79 if (state
!= RXRPC_CALL_SERVER_PREALLOC
)
80 timeout
= ktime_ms_delta(READ_ONCE(call
->expect_rx_by
), ktime_get_real());
82 tx_bottom
= READ_ONCE(call
->tx_bottom
);
84 "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
85 " %-8.8s %08x %08x %08x %02x %08x %02x %08x %02x %06lx\n",
88 call
->dest_srx
.srx_service
,
91 rxrpc_is_service_call(call
) ? "Svc" : "Clt",
92 refcount_read(&call
->ref
),
93 rxrpc_call_states
[state
],
96 tx_bottom
, READ_ONCE(call
->tx_top
) - tx_bottom
,
97 call
->ackr_window
, call
->ackr_wtop
- call
->ackr_window
,
105 const struct seq_operations rxrpc_call_seq_ops
= {
106 .start
= rxrpc_call_seq_start
,
107 .next
= rxrpc_call_seq_next
,
108 .stop
= rxrpc_call_seq_stop
,
109 .show
= rxrpc_call_seq_show
,
113 * generate a list of extant virtual connections in /proc/net/rxrpc_conns
115 static void *rxrpc_connection_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
116 __acquires(rxnet
->conn_lock
)
118 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
120 read_lock(&rxnet
->conn_lock
);
121 return seq_list_start_head(&rxnet
->conn_proc_list
, *_pos
);
124 static void *rxrpc_connection_seq_next(struct seq_file
*seq
, void *v
,
127 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
129 return seq_list_next(v
, &rxnet
->conn_proc_list
, pos
);
132 static void rxrpc_connection_seq_stop(struct seq_file
*seq
, void *v
)
133 __releases(rxnet
->conn_lock
)
135 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
137 read_unlock(&rxnet
->conn_lock
);
140 static int rxrpc_connection_seq_show(struct seq_file
*seq
, void *v
)
142 struct rxrpc_connection
*conn
;
143 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
145 char lbuff
[50], rbuff
[50];
147 if (v
== &rxnet
->conn_proc_list
) {
151 " SvID ConnID End Ref Act State Key "
152 " Serial ISerial CallId0 CallId1 CallId2 CallId3\n"
157 conn
= list_entry(v
, struct rxrpc_connection
, proc_link
);
158 if (conn
->state
== RXRPC_CONN_SERVICE_PREALLOC
) {
159 strcpy(lbuff
, "no_local");
160 strcpy(rbuff
, "no_connection");
164 sprintf(lbuff
, "%pISpc", &conn
->local
->srx
.transport
);
165 sprintf(rbuff
, "%pISpc", &conn
->peer
->srx
.transport
);
167 state
= rxrpc_is_conn_aborted(conn
) ?
168 rxrpc_call_completions
[conn
->completion
] :
169 rxrpc_conn_states
[conn
->state
];
171 "UDP %-47.47s %-47.47s %4x %08x %s %3u %3d"
172 " %s %08x %08x %08x %08x %08x %08x %08x\n",
177 rxrpc_conn_is_service(conn
) ? "Svc" : "Clt",
178 refcount_read(&conn
->ref
),
179 atomic_read(&conn
->active
),
181 key_serial(conn
->key
),
184 conn
->channels
[0].call_id
,
185 conn
->channels
[1].call_id
,
186 conn
->channels
[2].call_id
,
187 conn
->channels
[3].call_id
);
192 const struct seq_operations rxrpc_connection_seq_ops
= {
193 .start
= rxrpc_connection_seq_start
,
194 .next
= rxrpc_connection_seq_next
,
195 .stop
= rxrpc_connection_seq_stop
,
196 .show
= rxrpc_connection_seq_show
,
200 * generate a list of extant virtual bundles in /proc/net/rxrpc/bundles
202 static void *rxrpc_bundle_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
203 __acquires(rxnet
->conn_lock
)
205 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
207 read_lock(&rxnet
->conn_lock
);
208 return seq_list_start_head(&rxnet
->bundle_proc_list
, *_pos
);
211 static void *rxrpc_bundle_seq_next(struct seq_file
*seq
, void *v
,
214 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
216 return seq_list_next(v
, &rxnet
->bundle_proc_list
, pos
);
219 static void rxrpc_bundle_seq_stop(struct seq_file
*seq
, void *v
)
220 __releases(rxnet
->conn_lock
)
222 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
224 read_unlock(&rxnet
->conn_lock
);
227 static int rxrpc_bundle_seq_show(struct seq_file
*seq
, void *v
)
229 struct rxrpc_bundle
*bundle
;
230 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
231 char lbuff
[50], rbuff
[50];
233 if (v
== &rxnet
->bundle_proc_list
) {
237 " SvID Ref Act Flg Key |"
238 " Bundle Conn_0 Conn_1 Conn_2 Conn_3\n"
243 bundle
= list_entry(v
, struct rxrpc_bundle
, proc_link
);
245 sprintf(lbuff
, "%pISpc", &bundle
->local
->srx
.transport
);
246 sprintf(rbuff
, "%pISpc", &bundle
->peer
->srx
.transport
);
248 "UDP %-47.47s %-47.47s %4x %3u %3d"
249 " %c%c%c %08x | %08x %08x %08x %08x %08x\n",
253 refcount_read(&bundle
->ref
),
254 atomic_read(&bundle
->active
),
255 bundle
->try_upgrade
? 'U' : '-',
256 bundle
->exclusive
? 'e' : '-',
257 bundle
->upgrade
? 'u' : '-',
258 key_serial(bundle
->key
),
263 bundle
->conn_ids
[3]);
268 const struct seq_operations rxrpc_bundle_seq_ops
= {
269 .start
= rxrpc_bundle_seq_start
,
270 .next
= rxrpc_bundle_seq_next
,
271 .stop
= rxrpc_bundle_seq_stop
,
272 .show
= rxrpc_bundle_seq_show
,
276 * generate a list of extant virtual peers in /proc/net/rxrpc/peers
278 static int rxrpc_peer_seq_show(struct seq_file
*seq
, void *v
)
280 struct rxrpc_peer
*peer
;
282 char lbuff
[50], rbuff
[50];
284 if (v
== SEQ_START_TOKEN
) {
286 "Proto Local Remote Use SST Maxd LastUse RTT RTO\n"
291 peer
= list_entry(v
, struct rxrpc_peer
, hash_link
);
293 sprintf(lbuff
, "%pISpc", &peer
->local
->srx
.transport
);
295 sprintf(rbuff
, "%pISpc", &peer
->srx
.transport
);
297 now
= ktime_get_seconds();
299 "UDP %-47.47s %-47.47s %3u %4u %5u %6llus %8d %8d\n",
302 refcount_read(&peer
->ref
),
305 now
- peer
->last_tx_at
,
306 READ_ONCE(peer
->recent_srtt_us
),
307 READ_ONCE(peer
->recent_rto_us
));
312 static void *rxrpc_peer_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
315 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
316 unsigned int bucket
, n
;
317 unsigned int shift
= 32 - HASH_BITS(rxnet
->peer_hash
);
322 if (*_pos
>= UINT_MAX
)
325 n
= *_pos
& ((1U << shift
) - 1);
326 bucket
= *_pos
>> shift
;
328 if (bucket
>= HASH_SIZE(rxnet
->peer_hash
)) {
334 return SEQ_START_TOKEN
;
339 p
= seq_hlist_start_rcu(&rxnet
->peer_hash
[bucket
], n
- 1);
344 *_pos
= (bucket
<< shift
) | n
;
348 static void *rxrpc_peer_seq_next(struct seq_file
*seq
, void *v
, loff_t
*_pos
)
350 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
351 unsigned int bucket
, n
;
352 unsigned int shift
= 32 - HASH_BITS(rxnet
->peer_hash
);
355 if (*_pos
>= UINT_MAX
)
358 bucket
= *_pos
>> shift
;
360 p
= seq_hlist_next_rcu(v
, &rxnet
->peer_hash
[bucket
], _pos
);
367 *_pos
= (bucket
<< shift
) | n
;
369 if (bucket
>= HASH_SIZE(rxnet
->peer_hash
)) {
378 p
= seq_hlist_start_rcu(&rxnet
->peer_hash
[bucket
], n
- 1);
384 static void rxrpc_peer_seq_stop(struct seq_file
*seq
, void *v
)
391 const struct seq_operations rxrpc_peer_seq_ops
= {
392 .start
= rxrpc_peer_seq_start
,
393 .next
= rxrpc_peer_seq_next
,
394 .stop
= rxrpc_peer_seq_stop
,
395 .show
= rxrpc_peer_seq_show
,
399 * Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals
401 static int rxrpc_local_seq_show(struct seq_file
*seq
, void *v
)
403 struct rxrpc_local
*local
;
406 if (v
== SEQ_START_TOKEN
) {
413 local
= hlist_entry(v
, struct rxrpc_local
, link
);
415 sprintf(lbuff
, "%pISpc", &local
->srx
.transport
);
418 "UDP %-47.47s %3u %3u %3u\n",
420 refcount_read(&local
->ref
),
421 atomic_read(&local
->active_users
),
422 local
->rx_queue
.qlen
);
427 static void *rxrpc_local_seq_start(struct seq_file
*seq
, loff_t
*_pos
)
430 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
435 if (*_pos
>= UINT_MAX
)
440 return SEQ_START_TOKEN
;
442 return seq_hlist_start_rcu(&rxnet
->local_endpoints
, n
- 1);
445 static void *rxrpc_local_seq_next(struct seq_file
*seq
, void *v
, loff_t
*_pos
)
447 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_net(seq
));
449 if (*_pos
>= UINT_MAX
)
452 return seq_hlist_next_rcu(v
, &rxnet
->local_endpoints
, _pos
);
455 static void rxrpc_local_seq_stop(struct seq_file
*seq
, void *v
)
461 const struct seq_operations rxrpc_local_seq_ops
= {
462 .start
= rxrpc_local_seq_start
,
463 .next
= rxrpc_local_seq_next
,
464 .stop
= rxrpc_local_seq_stop
,
465 .show
= rxrpc_local_seq_show
,
469 * Display stats in /proc/net/rxrpc/stats
471 int rxrpc_stats_show(struct seq_file
*seq
, void *v
)
473 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_single_net(seq
));
476 "Data : send=%u sendf=%u fail=%u emsz=%u\n",
477 atomic_read(&rxnet
->stat_tx_data_send
),
478 atomic_read(&rxnet
->stat_tx_data_send_frag
),
479 atomic_read(&rxnet
->stat_tx_data_send_fail
),
480 atomic_read(&rxnet
->stat_tx_data_send_msgsize
));
482 "Data-Tx : nr=%u retrans=%u uf=%u cwr=%u\n",
483 atomic_read(&rxnet
->stat_tx_data
),
484 atomic_read(&rxnet
->stat_tx_data_retrans
),
485 atomic_read(&rxnet
->stat_tx_data_underflow
),
486 atomic_read(&rxnet
->stat_tx_data_cwnd_reset
));
488 "Data-Rx : nr=%u reqack=%u jumbo=%u\n",
489 atomic_read(&rxnet
->stat_rx_data
),
490 atomic_read(&rxnet
->stat_rx_data_reqack
),
491 atomic_read(&rxnet
->stat_rx_data_jumbo
));
493 "Ack : fill=%u send=%u skip=%u\n",
494 atomic_read(&rxnet
->stat_tx_ack_fill
),
495 atomic_read(&rxnet
->stat_tx_ack_send
),
496 atomic_read(&rxnet
->stat_tx_ack_skip
));
498 "Ack-Tx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n",
499 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_REQUESTED
]),
500 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_DUPLICATE
]),
501 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_OUT_OF_SEQUENCE
]),
502 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_EXCEEDS_WINDOW
]),
503 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_NOSPACE
]),
504 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_PING
]),
505 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_PING_RESPONSE
]),
506 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_DELAY
]),
507 atomic_read(&rxnet
->stat_tx_acks
[RXRPC_ACK_IDLE
]));
509 "Ack-Rx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u z=%u\n",
510 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_REQUESTED
]),
511 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_DUPLICATE
]),
512 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_OUT_OF_SEQUENCE
]),
513 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_EXCEEDS_WINDOW
]),
514 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_NOSPACE
]),
515 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_PING
]),
516 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_PING_RESPONSE
]),
517 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_DELAY
]),
518 atomic_read(&rxnet
->stat_rx_acks
[RXRPC_ACK_IDLE
]),
519 atomic_read(&rxnet
->stat_rx_acks
[0]));
521 "Why-Req-A: acklost=%u mrtt=%u ortt=%u stall=%u\n",
522 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_ack_lost
]),
523 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_more_rtt
]),
524 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_old_rtt
]),
525 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_app_stall
]));
527 "Why-Req-A: nolast=%u retx=%u slows=%u smtxw=%u\n",
528 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_no_srv_last
]),
529 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_retrans
]),
530 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_slow_start
]),
531 atomic_read(&rxnet
->stat_why_req_ack
[rxrpc_reqack_small_txwin
]));
533 "Jumbo-Tx : %u,%u,%u,%u,%u,%u,%u,%u,%u,%u\n",
534 atomic_read(&rxnet
->stat_tx_jumbo
[0]),
535 atomic_read(&rxnet
->stat_tx_jumbo
[1]),
536 atomic_read(&rxnet
->stat_tx_jumbo
[2]),
537 atomic_read(&rxnet
->stat_tx_jumbo
[3]),
538 atomic_read(&rxnet
->stat_tx_jumbo
[4]),
539 atomic_read(&rxnet
->stat_tx_jumbo
[5]),
540 atomic_read(&rxnet
->stat_tx_jumbo
[6]),
541 atomic_read(&rxnet
->stat_tx_jumbo
[7]),
542 atomic_read(&rxnet
->stat_tx_jumbo
[8]),
543 atomic_read(&rxnet
->stat_tx_jumbo
[9]));
545 "Jumbo-Rx : %u,%u,%u,%u,%u,%u,%u,%u,%u,%u\n",
546 atomic_read(&rxnet
->stat_rx_jumbo
[0]),
547 atomic_read(&rxnet
->stat_rx_jumbo
[1]),
548 atomic_read(&rxnet
->stat_rx_jumbo
[2]),
549 atomic_read(&rxnet
->stat_rx_jumbo
[3]),
550 atomic_read(&rxnet
->stat_rx_jumbo
[4]),
551 atomic_read(&rxnet
->stat_rx_jumbo
[5]),
552 atomic_read(&rxnet
->stat_rx_jumbo
[6]),
553 atomic_read(&rxnet
->stat_rx_jumbo
[7]),
554 atomic_read(&rxnet
->stat_rx_jumbo
[8]),
555 atomic_read(&rxnet
->stat_rx_jumbo
[9]));
557 "Buffers : txb=%u rxb=%u\n",
558 atomic_read(&rxrpc_nr_txbuf
),
559 atomic_read(&rxrpc_n_rx_skbs
));
561 "IO-thread: loops=%u\n",
562 atomic_read(&rxnet
->stat_io_loop
));
567 * Clear stats if /proc/net/rxrpc/stats is written to.
569 int rxrpc_stats_clear(struct file
*file
, char *buf
, size_t size
)
571 struct seq_file
*m
= file
->private_data
;
572 struct rxrpc_net
*rxnet
= rxrpc_net(seq_file_single_net(m
));
574 if (size
> 1 || (size
== 1 && buf
[0] != '\n'))
577 atomic_set(&rxnet
->stat_tx_data
, 0);
578 atomic_set(&rxnet
->stat_tx_data_retrans
, 0);
579 atomic_set(&rxnet
->stat_tx_data_underflow
, 0);
580 atomic_set(&rxnet
->stat_tx_data_cwnd_reset
, 0);
581 atomic_set(&rxnet
->stat_tx_data_send
, 0);
582 atomic_set(&rxnet
->stat_tx_data_send_frag
, 0);
583 atomic_set(&rxnet
->stat_tx_data_send_fail
, 0);
584 atomic_set(&rxnet
->stat_rx_data
, 0);
585 atomic_set(&rxnet
->stat_rx_data_reqack
, 0);
586 atomic_set(&rxnet
->stat_rx_data_jumbo
, 0);
588 atomic_set(&rxnet
->stat_tx_ack_fill
, 0);
589 atomic_set(&rxnet
->stat_tx_ack_send
, 0);
590 atomic_set(&rxnet
->stat_tx_ack_skip
, 0);
591 memset(&rxnet
->stat_tx_acks
, 0, sizeof(rxnet
->stat_tx_acks
));
592 memset(&rxnet
->stat_rx_acks
, 0, sizeof(rxnet
->stat_rx_acks
));
593 memset(&rxnet
->stat_tx_jumbo
, 0, sizeof(rxnet
->stat_tx_jumbo
));
594 memset(&rxnet
->stat_rx_jumbo
, 0, sizeof(rxnet
->stat_rx_jumbo
));
596 memset(&rxnet
->stat_why_req_ack
, 0, sizeof(rxnet
->stat_why_req_ack
));
598 atomic_set(&rxnet
->stat_io_loop
, 0);