1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* In-kernel rxperf server for testing purposes.
4 * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #define pr_fmt(fmt) "rxperf: " fmt
9 #include <linux/module.h>
10 #include <linux/slab.h>
12 #include <net/af_rxrpc.h>
13 #define RXRPC_TRACE_ONLY_DEFINE_ENUMS
14 #include <trace/events/rxrpc.h>
16 MODULE_DESCRIPTION("rxperf test server (afs)");
17 MODULE_AUTHOR("Red Hat, Inc.");
18 MODULE_LICENSE("GPL");
20 #define RXPERF_PORT 7009
21 #define RX_PERF_SERVICE 147
22 #define RX_PERF_VERSION 3
23 #define RX_PERF_SEND 0
24 #define RX_PERF_RECV 1
26 #define RX_PERF_FILE 4
27 #define RX_PERF_MAGIC_COOKIE 0x4711
29 struct rxperf_proto_params
{
36 static const u8 rxperf_magic_cookie
[] = { 0x00, 0x00, 0x47, 0x11 };
37 static const u8 secret
[8] = { 0xa7, 0x83, 0x8a, 0xcb, 0xc7, 0x83, 0xec, 0x94 };
39 enum rxperf_call_state
{
40 RXPERF_CALL_SV_AWAIT_PARAMS
, /* Server: Awaiting parameter block */
41 RXPERF_CALL_SV_AWAIT_REQUEST
, /* Server: Awaiting request data */
42 RXPERF_CALL_SV_REPLYING
, /* Server: Replying */
43 RXPERF_CALL_SV_AWAIT_ACK
, /* Server: Awaiting final ACK */
44 RXPERF_CALL_COMPLETE
, /* Completed or failed */
48 struct rxrpc_call
*rxcall
;
51 struct work_struct work
;
54 size_t req_len
; /* Size of request blob */
55 size_t reply_len
; /* Size of reply blob */
56 unsigned int debug_id
;
57 unsigned int operation_id
;
58 struct rxperf_proto_params params
;
61 enum rxperf_call_state state
;
63 unsigned short unmarshal
;
65 int (*deliver
)(struct rxperf_call
*call
);
66 void (*processor
)(struct work_struct
*work
);
69 static struct socket
*rxperf_socket
;
70 static struct key
*rxperf_sec_keyring
; /* Ring of security/crypto keys */
71 static struct workqueue_struct
*rxperf_workqueue
;
73 static void rxperf_deliver_to_call(struct work_struct
*work
);
74 static int rxperf_deliver_param_block(struct rxperf_call
*call
);
75 static int rxperf_deliver_request(struct rxperf_call
*call
);
76 static int rxperf_process_call(struct rxperf_call
*call
);
77 static void rxperf_charge_preallocation(struct work_struct
*work
);
79 static DECLARE_WORK(rxperf_charge_preallocation_work
,
80 rxperf_charge_preallocation
);
82 static inline void rxperf_set_call_state(struct rxperf_call
*call
,
83 enum rxperf_call_state to
)
88 static inline void rxperf_set_call_complete(struct rxperf_call
*call
,
89 int error
, s32 remote_abort
)
91 if (call
->state
!= RXPERF_CALL_COMPLETE
) {
92 call
->abort_code
= remote_abort
;
94 call
->state
= RXPERF_CALL_COMPLETE
;
98 static void rxperf_rx_discard_new_call(struct rxrpc_call
*rxcall
,
99 unsigned long user_call_ID
)
101 kfree((struct rxperf_call
*)user_call_ID
);
104 static void rxperf_rx_new_call(struct sock
*sk
, struct rxrpc_call
*rxcall
,
105 unsigned long user_call_ID
)
107 queue_work(rxperf_workqueue
, &rxperf_charge_preallocation_work
);
110 static void rxperf_queue_call_work(struct rxperf_call
*call
)
112 queue_work(rxperf_workqueue
, &call
->work
);
115 static void rxperf_notify_rx(struct sock
*sk
, struct rxrpc_call
*rxcall
,
116 unsigned long call_user_ID
)
118 struct rxperf_call
*call
= (struct rxperf_call
*)call_user_ID
;
120 if (call
->state
!= RXPERF_CALL_COMPLETE
)
121 rxperf_queue_call_work(call
);
124 static void rxperf_rx_attach(struct rxrpc_call
*rxcall
, unsigned long user_call_ID
)
126 struct rxperf_call
*call
= (struct rxperf_call
*)user_call_ID
;
128 call
->rxcall
= rxcall
;
131 static void rxperf_notify_end_reply_tx(struct sock
*sock
,
132 struct rxrpc_call
*rxcall
,
133 unsigned long call_user_ID
)
135 rxperf_set_call_state((struct rxperf_call
*)call_user_ID
,
136 RXPERF_CALL_SV_AWAIT_ACK
);
140 * Charge the incoming call preallocation.
142 static void rxperf_charge_preallocation(struct work_struct
*work
)
144 struct rxperf_call
*call
;
147 call
= kzalloc(sizeof(*call
), GFP_KERNEL
);
151 call
->type
= "unset";
152 call
->debug_id
= atomic_inc_return(&rxrpc_debug_id
);
153 call
->deliver
= rxperf_deliver_param_block
;
154 call
->state
= RXPERF_CALL_SV_AWAIT_PARAMS
;
155 call
->service_id
= RX_PERF_SERVICE
;
156 call
->iov_len
= sizeof(call
->params
);
157 call
->kvec
[0].iov_len
= sizeof(call
->params
);
158 call
->kvec
[0].iov_base
= &call
->params
;
159 iov_iter_kvec(&call
->iter
, READ
, call
->kvec
, 1, call
->iov_len
);
160 INIT_WORK(&call
->work
, rxperf_deliver_to_call
);
162 if (rxrpc_kernel_charge_accept(rxperf_socket
,
176 * Open an rxrpc socket and bind it to be a server for callback notifications
177 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
179 static int rxperf_open_socket(void)
181 struct sockaddr_rxrpc srx
;
182 struct socket
*socket
;
185 ret
= sock_create_kern(&init_net
, AF_RXRPC
, SOCK_DGRAM
, PF_INET6
,
190 socket
->sk
->sk_allocation
= GFP_NOFS
;
192 /* bind the callback manager's address to make this a server socket */
193 memset(&srx
, 0, sizeof(srx
));
194 srx
.srx_family
= AF_RXRPC
;
195 srx
.srx_service
= RX_PERF_SERVICE
;
196 srx
.transport_type
= SOCK_DGRAM
;
197 srx
.transport_len
= sizeof(srx
.transport
.sin6
);
198 srx
.transport
.sin6
.sin6_family
= AF_INET6
;
199 srx
.transport
.sin6
.sin6_port
= htons(RXPERF_PORT
);
201 ret
= rxrpc_sock_set_min_security_level(socket
->sk
,
202 RXRPC_SECURITY_ENCRYPT
);
206 ret
= rxrpc_sock_set_security_keyring(socket
->sk
, rxperf_sec_keyring
);
208 ret
= kernel_bind(socket
, (struct sockaddr
*)&srx
, sizeof(srx
));
212 rxrpc_kernel_new_call_notification(socket
, rxperf_rx_new_call
,
213 rxperf_rx_discard_new_call
);
215 ret
= kernel_listen(socket
, INT_MAX
);
219 rxperf_socket
= socket
;
220 rxperf_charge_preallocation(&rxperf_charge_preallocation_work
);
224 sock_release(socket
);
226 pr_err("Can't set up rxperf socket: %d\n", ret
);
231 * close the rxrpc socket rxperf was using
233 static void rxperf_close_socket(void)
235 kernel_listen(rxperf_socket
, 0);
236 kernel_sock_shutdown(rxperf_socket
, SHUT_RDWR
);
237 flush_workqueue(rxperf_workqueue
);
238 sock_release(rxperf_socket
);
242 * Log remote abort codes that indicate that we have a protocol disagreement
245 static void rxperf_log_error(struct rxperf_call
*call
, s32 remote_abort
)
251 switch (remote_abort
) {
252 case RX_EOF
: msg
= "unexpected EOF"; break;
253 case RXGEN_CC_MARSHAL
: msg
= "client marshalling"; break;
254 case RXGEN_CC_UNMARSHAL
: msg
= "client unmarshalling"; break;
255 case RXGEN_SS_MARSHAL
: msg
= "server marshalling"; break;
256 case RXGEN_SS_UNMARSHAL
: msg
= "server unmarshalling"; break;
257 case RXGEN_DECODE
: msg
= "opcode decode"; break;
258 case RXGEN_SS_XDRFREE
: msg
= "server XDR cleanup"; break;
259 case RXGEN_CC_XDRFREE
: msg
= "client XDR cleanup"; break;
260 case -32: msg
= "insufficient data"; break;
268 pr_info("Peer reported %s failure on %s\n", msg
, call
->type
);
273 * deliver messages to a call
275 static void rxperf_deliver_to_call(struct work_struct
*work
)
277 struct rxperf_call
*call
= container_of(work
, struct rxperf_call
, work
);
278 enum rxperf_call_state state
;
279 u32 abort_code
, remote_abort
= 0;
282 if (call
->state
== RXPERF_CALL_COMPLETE
)
285 while (state
= call
->state
,
286 state
== RXPERF_CALL_SV_AWAIT_PARAMS
||
287 state
== RXPERF_CALL_SV_AWAIT_REQUEST
||
288 state
== RXPERF_CALL_SV_AWAIT_ACK
290 if (state
== RXPERF_CALL_SV_AWAIT_ACK
) {
291 if (!rxrpc_kernel_check_life(rxperf_socket
, call
->rxcall
))
296 ret
= call
->deliver(call
);
298 ret
= rxperf_process_call(call
);
307 rxperf_log_error(call
, call
->abort_code
);
310 abort_code
= RXGEN_OPCODE
;
311 rxrpc_kernel_abort_call(rxperf_socket
, call
->rxcall
,
313 rxperf_abort_op_not_supported
);
316 abort_code
= RX_USER_ABORT
;
317 rxrpc_kernel_abort_call(rxperf_socket
, call
->rxcall
,
319 rxperf_abort_op_not_supported
);
322 pr_err("Call %u in bad state %u\n",
323 call
->debug_id
, call
->state
);
330 rxrpc_kernel_abort_call(rxperf_socket
, call
->rxcall
,
331 RXGEN_SS_UNMARSHAL
, ret
,
332 rxperf_abort_unmarshal_error
);
335 rxrpc_kernel_abort_call(rxperf_socket
, call
->rxcall
,
337 rxperf_abort_general_error
);
343 rxperf_set_call_complete(call
, ret
, remote_abort
);
344 /* The call may have been requeued */
345 rxrpc_kernel_shutdown_call(rxperf_socket
, call
->rxcall
);
346 rxrpc_kernel_put_call(rxperf_socket
, call
->rxcall
);
347 cancel_work(&call
->work
);
352 * Extract a piece of data from the received data socket buffers.
354 static int rxperf_extract_data(struct rxperf_call
*call
, bool want_more
)
356 u32 remote_abort
= 0;
359 ret
= rxrpc_kernel_recv_data(rxperf_socket
, call
->rxcall
, &call
->iter
,
360 &call
->iov_len
, want_more
, &remote_abort
,
362 pr_debug("Extract i=%zu l=%zu m=%u ret=%d\n",
363 iov_iter_count(&call
->iter
), call
->iov_len
, want_more
, ret
);
364 if (ret
== 0 || ret
== -EAGAIN
)
368 switch (call
->state
) {
369 case RXPERF_CALL_SV_AWAIT_REQUEST
:
370 rxperf_set_call_state(call
, RXPERF_CALL_SV_REPLYING
);
372 case RXPERF_CALL_COMPLETE
:
373 pr_debug("premature completion %d", call
->error
);
381 rxperf_set_call_complete(call
, ret
, remote_abort
);
386 * Grab the operation ID from an incoming manager call.
388 static int rxperf_deliver_param_block(struct rxperf_call
*call
)
393 /* Extract the parameter block */
394 ret
= rxperf_extract_data(call
, true);
398 version
= ntohl(call
->params
.version
);
399 call
->operation_id
= ntohl(call
->params
.type
);
400 call
->deliver
= rxperf_deliver_request
;
402 if (version
!= RX_PERF_VERSION
) {
403 pr_info("Version mismatch %x\n", version
);
407 switch (call
->operation_id
) {
411 call
->iov_len
= 4; /* Expect req size */
416 call
->iov_len
= 4; /* Expect reply size */
420 call
->iov_len
= 8; /* Expect req size and reply size */
429 rxperf_set_call_state(call
, RXPERF_CALL_SV_AWAIT_REQUEST
);
430 return call
->deliver(call
);
434 * Deliver the request data.
436 static int rxperf_deliver_request(struct rxperf_call
*call
)
440 switch (call
->unmarshal
) {
442 call
->kvec
[0].iov_len
= call
->iov_len
;
443 call
->kvec
[0].iov_base
= call
->tmp
;
444 iov_iter_kvec(&call
->iter
, READ
, call
->kvec
, 1, call
->iov_len
);
448 ret
= rxperf_extract_data(call
, true);
452 switch (call
->operation_id
) {
455 call
->req_len
= ntohl(call
->tmp
[0]);
461 call
->reply_len
= ntohl(call
->tmp
[0]);
465 call
->req_len
= ntohl(call
->tmp
[0]);
466 call
->reply_len
= ntohl(call
->tmp
[1]);
469 pr_info("Can't parse extra params\n");
473 pr_debug("CALL op=%s rq=%zx rp=%zx\n",
474 call
->type
, call
->req_len
, call
->reply_len
);
476 call
->iov_len
= call
->req_len
;
477 iov_iter_discard(&call
->iter
, READ
, call
->req_len
);
481 ret
= rxperf_extract_data(call
, false);
492 * Process a call for which we've received the request.
494 static int rxperf_process_call(struct rxperf_call
*call
)
496 struct msghdr msg
= {};
500 size_t reply_len
= call
->reply_len
, len
;
502 rxrpc_kernel_set_tx_length(rxperf_socket
, call
->rxcall
,
503 reply_len
+ sizeof(rxperf_magic_cookie
));
505 while (reply_len
> 0) {
506 len
= min_t(size_t, reply_len
, PAGE_SIZE
);
507 bvec_set_page(&bv
, ZERO_PAGE(0), len
, 0);
508 iov_iter_bvec(&msg
.msg_iter
, WRITE
, &bv
, 1, len
);
509 msg
.msg_flags
= MSG_MORE
;
510 n
= rxrpc_kernel_send_data(rxperf_socket
, call
->rxcall
, &msg
,
511 len
, rxperf_notify_end_reply_tx
);
519 len
= sizeof(rxperf_magic_cookie
);
520 iov
[0].iov_base
= (void *)rxperf_magic_cookie
;
521 iov
[0].iov_len
= len
;
522 iov_iter_kvec(&msg
.msg_iter
, WRITE
, iov
, 1, len
);
524 n
= rxrpc_kernel_send_data(rxperf_socket
, call
->rxcall
, &msg
, len
,
525 rxperf_notify_end_reply_tx
);
527 return 0; /* Success */
530 rxrpc_kernel_abort_call(rxperf_socket
, call
->rxcall
,
531 RXGEN_SS_MARSHAL
, -ENOMEM
,
537 * Add a key to the security keyring.
539 static int rxperf_add_key(struct key
*keyring
)
544 kref
= key_create_or_update(make_key_ref(keyring
, true),
546 __stringify(RX_PERF_SERVICE
) ":2",
549 KEY_POS_VIEW
| KEY_POS_READ
| KEY_POS_SEARCH
551 KEY_ALLOC_NOT_IN_QUOTA
);
554 pr_err("Can't allocate rxperf server key: %ld\n", PTR_ERR(kref
));
555 return PTR_ERR(kref
);
558 ret
= key_link(keyring
, key_ref_to_ptr(kref
));
560 pr_err("Can't link rxperf server key: %d\n", ret
);
566 * Initialise the rxperf server.
568 static int __init
rxperf_init(void)
573 pr_info("Server registering\n");
575 rxperf_workqueue
= alloc_workqueue("rxperf", 0, 0);
576 if (!rxperf_workqueue
)
577 goto error_workqueue
;
579 keyring
= keyring_alloc("rxperf_server",
580 GLOBAL_ROOT_UID
, GLOBAL_ROOT_GID
, current_cred(),
581 KEY_POS_VIEW
| KEY_POS_READ
| KEY_POS_SEARCH
|
583 KEY_USR_VIEW
| KEY_USR_READ
| KEY_USR_SEARCH
|
585 KEY_OTH_VIEW
| KEY_OTH_READ
| KEY_OTH_SEARCH
,
586 KEY_ALLOC_NOT_IN_QUOTA
,
588 if (IS_ERR(keyring
)) {
589 pr_err("Can't allocate rxperf server keyring: %ld\n",
593 rxperf_sec_keyring
= keyring
;
594 ret
= rxperf_add_key(keyring
);
598 ret
= rxperf_open_socket();
605 key_put(rxperf_sec_keyring
);
607 destroy_workqueue(rxperf_workqueue
);
610 pr_err("Failed to register: %d\n", ret
);
613 late_initcall(rxperf_init
); /* Must be called after net/ to create socket */
615 static void __exit
rxperf_exit(void)
617 pr_info("Server unregistering.\n");
619 rxperf_close_socket();
620 key_put(rxperf_sec_keyring
);
621 destroy_workqueue(rxperf_workqueue
);
624 module_exit(rxperf_exit
);