Linux 6.13
[linux.git] / drivers / net / mctp / mctp-serial.c
blob26c9a33fd636484b1c890397c21409d101b16308
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Management Component Transport Protocol (MCTP) - serial transport
4 * binding. This driver is an implementation of the DMTF specificiation
5 * "DSP0253 - Management Component Transport Protocol (MCTP) Serial Transport
6 * Binding", available at:
8 * https://www.dmtf.org/sites/default/files/standards/documents/DSP0253_1.0.0.pdf
10 * This driver provides DSP0253-type MCTP-over-serial transport using a Linux
11 * tty device, by setting the N_MCTP line discipline on the tty.
13 * Copyright (c) 2021 Code Construct
16 #include <linux/idr.h>
17 #include <linux/if_arp.h>
18 #include <linux/module.h>
19 #include <linux/skbuff.h>
20 #include <linux/tty.h>
21 #include <linux/workqueue.h>
22 #include <linux/crc-ccitt.h>
24 #include <linux/mctp.h>
25 #include <net/mctp.h>
26 #include <net/mctpdevice.h>
27 #include <net/pkt_sched.h>
29 #define MCTP_SERIAL_MTU 68 /* base mtu (64) + mctp header */
30 #define MCTP_SERIAL_FRAME_MTU (MCTP_SERIAL_MTU + 6) /* + serial framing */
32 #define MCTP_SERIAL_VERSION 0x1 /* DSP0253 defines a single version: 1 */
34 #define BUFSIZE MCTP_SERIAL_FRAME_MTU
36 #define BYTE_FRAME 0x7e
37 #define BYTE_ESC 0x7d
39 #define FCS_INIT 0xffff
41 static DEFINE_IDA(mctp_serial_ida);
43 enum mctp_serial_state {
44 STATE_IDLE,
45 STATE_START,
46 STATE_HEADER,
47 STATE_DATA,
48 STATE_ESCAPE,
49 STATE_TRAILER,
50 STATE_DONE,
51 STATE_ERR,
54 struct mctp_serial {
55 struct net_device *netdev;
56 struct tty_struct *tty;
58 int idx;
60 /* protects our rx & tx state machines; held during both paths */
61 spinlock_t lock;
63 struct work_struct tx_work;
64 enum mctp_serial_state txstate, rxstate;
65 u16 txfcs, rxfcs, rxfcs_rcvd;
66 unsigned int txlen, rxlen;
67 unsigned int txpos, rxpos;
68 u8 txbuf[BUFSIZE],
69 rxbuf[BUFSIZE];
72 static bool needs_escape(u8 c)
74 return c == BYTE_ESC || c == BYTE_FRAME;
77 static unsigned int next_chunk_len(struct mctp_serial *dev)
79 unsigned int i;
81 /* either we have no bytes to send ... */
82 if (dev->txpos == dev->txlen)
83 return 0;
85 /* ... or the next byte to send is an escaped byte; requiring a
86 * single-byte chunk...
88 if (needs_escape(dev->txbuf[dev->txpos]))
89 return 1;
91 /* ... or we have one or more bytes up to the next escape - this chunk
92 * will be those non-escaped bytes, and does not include the escaped
93 * byte.
95 for (i = 1; i + dev->txpos < dev->txlen; i++) {
96 if (needs_escape(dev->txbuf[dev->txpos + i]))
97 break;
100 return i;
103 static ssize_t write_chunk(struct mctp_serial *dev, u8 *buf, size_t len)
105 return dev->tty->ops->write(dev->tty, buf, len);
108 static void mctp_serial_tx_work(struct work_struct *work)
110 struct mctp_serial *dev = container_of(work, struct mctp_serial,
111 tx_work);
112 unsigned long flags;
113 ssize_t txlen;
114 unsigned int len;
115 u8 c, buf[3];
117 spin_lock_irqsave(&dev->lock, flags);
119 /* txstate represents the next thing to send */
120 switch (dev->txstate) {
121 case STATE_START:
122 dev->txpos = 0;
123 fallthrough;
124 case STATE_HEADER:
125 buf[0] = BYTE_FRAME;
126 buf[1] = MCTP_SERIAL_VERSION;
127 buf[2] = dev->txlen;
129 if (!dev->txpos)
130 dev->txfcs = crc_ccitt(FCS_INIT, buf + 1, 2);
132 txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos);
133 if (txlen <= 0) {
134 dev->txstate = STATE_ERR;
135 } else {
136 dev->txpos += txlen;
137 if (dev->txpos == 3) {
138 dev->txstate = STATE_DATA;
139 dev->txpos = 0;
142 break;
144 case STATE_ESCAPE:
145 buf[0] = dev->txbuf[dev->txpos] & ~0x20;
146 txlen = write_chunk(dev, buf, 1);
147 if (txlen <= 0) {
148 dev->txstate = STATE_ERR;
149 } else {
150 dev->txpos += txlen;
151 if (dev->txpos == dev->txlen) {
152 dev->txstate = STATE_TRAILER;
153 dev->txpos = 0;
157 break;
159 case STATE_DATA:
160 len = next_chunk_len(dev);
161 if (len) {
162 c = dev->txbuf[dev->txpos];
163 if (len == 1 && needs_escape(c)) {
164 buf[0] = BYTE_ESC;
165 buf[1] = c & ~0x20;
166 dev->txfcs = crc_ccitt_byte(dev->txfcs, c);
167 txlen = write_chunk(dev, buf, 2);
168 if (txlen == 2)
169 dev->txpos++;
170 else if (txlen == 1)
171 dev->txstate = STATE_ESCAPE;
172 else
173 dev->txstate = STATE_ERR;
174 } else {
175 txlen = write_chunk(dev,
176 dev->txbuf + dev->txpos,
177 len);
178 if (txlen <= 0) {
179 dev->txstate = STATE_ERR;
180 } else {
181 dev->txfcs = crc_ccitt(dev->txfcs,
182 dev->txbuf +
183 dev->txpos,
184 txlen);
185 dev->txpos += txlen;
188 if (dev->txstate == STATE_DATA &&
189 dev->txpos == dev->txlen) {
190 dev->txstate = STATE_TRAILER;
191 dev->txpos = 0;
193 break;
195 dev->txstate = STATE_TRAILER;
196 dev->txpos = 0;
197 fallthrough;
199 case STATE_TRAILER:
200 buf[0] = dev->txfcs >> 8;
201 buf[1] = dev->txfcs & 0xff;
202 buf[2] = BYTE_FRAME;
203 txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos);
204 if (txlen <= 0) {
205 dev->txstate = STATE_ERR;
206 } else {
207 dev->txpos += txlen;
208 if (dev->txpos == 3) {
209 dev->txstate = STATE_DONE;
210 dev->txpos = 0;
213 break;
214 default:
215 netdev_err_once(dev->netdev, "invalid tx state %d\n",
216 dev->txstate);
219 if (dev->txstate == STATE_DONE) {
220 dev->netdev->stats.tx_packets++;
221 dev->netdev->stats.tx_bytes += dev->txlen;
222 dev->txlen = 0;
223 dev->txpos = 0;
224 clear_bit(TTY_DO_WRITE_WAKEUP, &dev->tty->flags);
225 dev->txstate = STATE_IDLE;
226 spin_unlock_irqrestore(&dev->lock, flags);
228 netif_wake_queue(dev->netdev);
229 } else {
230 spin_unlock_irqrestore(&dev->lock, flags);
234 static netdev_tx_t mctp_serial_tx(struct sk_buff *skb, struct net_device *ndev)
236 struct mctp_serial *dev = netdev_priv(ndev);
237 unsigned long flags;
239 WARN_ON(dev->txstate != STATE_IDLE);
241 if (skb->len > MCTP_SERIAL_MTU) {
242 dev->netdev->stats.tx_dropped++;
243 goto out;
246 spin_lock_irqsave(&dev->lock, flags);
247 netif_stop_queue(dev->netdev);
248 skb_copy_bits(skb, 0, dev->txbuf, skb->len);
249 dev->txpos = 0;
250 dev->txlen = skb->len;
251 dev->txstate = STATE_START;
252 spin_unlock_irqrestore(&dev->lock, flags);
254 set_bit(TTY_DO_WRITE_WAKEUP, &dev->tty->flags);
255 schedule_work(&dev->tx_work);
257 out:
258 kfree_skb(skb);
259 return NETDEV_TX_OK;
262 static void mctp_serial_tty_write_wakeup(struct tty_struct *tty)
264 struct mctp_serial *dev = tty->disc_data;
266 schedule_work(&dev->tx_work);
269 static void mctp_serial_rx(struct mctp_serial *dev)
271 struct mctp_skb_cb *cb;
272 struct sk_buff *skb;
274 if (dev->rxfcs != dev->rxfcs_rcvd) {
275 dev->netdev->stats.rx_dropped++;
276 dev->netdev->stats.rx_crc_errors++;
277 return;
280 skb = netdev_alloc_skb(dev->netdev, dev->rxlen);
281 if (!skb) {
282 dev->netdev->stats.rx_dropped++;
283 return;
286 skb->protocol = htons(ETH_P_MCTP);
287 skb_put_data(skb, dev->rxbuf, dev->rxlen);
288 skb_reset_network_header(skb);
290 cb = __mctp_cb(skb);
291 cb->halen = 0;
293 netif_rx(skb);
294 dev->netdev->stats.rx_packets++;
295 dev->netdev->stats.rx_bytes += dev->rxlen;
298 static void mctp_serial_push_header(struct mctp_serial *dev, u8 c)
300 switch (dev->rxpos) {
301 case 0:
302 if (c == BYTE_FRAME)
303 dev->rxpos++;
304 else
305 dev->rxstate = STATE_ERR;
306 break;
307 case 1:
308 if (c == MCTP_SERIAL_VERSION) {
309 dev->rxpos++;
310 dev->rxfcs = crc_ccitt_byte(FCS_INIT, c);
311 } else {
312 dev->rxstate = STATE_ERR;
314 break;
315 case 2:
316 if (c > MCTP_SERIAL_FRAME_MTU) {
317 dev->rxstate = STATE_ERR;
318 } else {
319 dev->rxlen = c;
320 dev->rxpos = 0;
321 dev->rxstate = STATE_DATA;
322 dev->rxfcs = crc_ccitt_byte(dev->rxfcs, c);
324 break;
328 static void mctp_serial_push_trailer(struct mctp_serial *dev, u8 c)
330 switch (dev->rxpos) {
331 case 0:
332 dev->rxfcs_rcvd = c << 8;
333 dev->rxpos++;
334 break;
335 case 1:
336 dev->rxfcs_rcvd |= c;
337 dev->rxpos++;
338 break;
339 case 2:
340 if (c != BYTE_FRAME) {
341 dev->rxstate = STATE_ERR;
342 } else {
343 mctp_serial_rx(dev);
344 dev->rxlen = 0;
345 dev->rxpos = 0;
346 dev->rxstate = STATE_IDLE;
348 break;
352 static void mctp_serial_push(struct mctp_serial *dev, u8 c)
354 switch (dev->rxstate) {
355 case STATE_IDLE:
356 dev->rxstate = STATE_HEADER;
357 fallthrough;
358 case STATE_HEADER:
359 mctp_serial_push_header(dev, c);
360 break;
362 case STATE_ESCAPE:
363 c |= 0x20;
364 fallthrough;
365 case STATE_DATA:
366 if (dev->rxstate != STATE_ESCAPE && c == BYTE_ESC) {
367 dev->rxstate = STATE_ESCAPE;
368 } else {
369 dev->rxfcs = crc_ccitt_byte(dev->rxfcs, c);
370 dev->rxbuf[dev->rxpos] = c;
371 dev->rxpos++;
372 dev->rxstate = STATE_DATA;
373 if (dev->rxpos == dev->rxlen) {
374 dev->rxpos = 0;
375 dev->rxstate = STATE_TRAILER;
378 break;
380 case STATE_TRAILER:
381 mctp_serial_push_trailer(dev, c);
382 break;
384 case STATE_ERR:
385 if (c == BYTE_FRAME)
386 dev->rxstate = STATE_IDLE;
387 break;
389 default:
390 netdev_err_once(dev->netdev, "invalid rx state %d\n",
391 dev->rxstate);
395 static void mctp_serial_tty_receive_buf(struct tty_struct *tty, const u8 *c,
396 const u8 *f, size_t len)
398 struct mctp_serial *dev = tty->disc_data;
399 size_t i;
401 if (!netif_running(dev->netdev))
402 return;
404 /* we don't (currently) use the flag bytes, just data. */
405 for (i = 0; i < len; i++)
406 mctp_serial_push(dev, c[i]);
409 static void mctp_serial_uninit(struct net_device *ndev)
411 struct mctp_serial *dev = netdev_priv(ndev);
413 cancel_work_sync(&dev->tx_work);
416 static const struct net_device_ops mctp_serial_netdev_ops = {
417 .ndo_start_xmit = mctp_serial_tx,
418 .ndo_uninit = mctp_serial_uninit,
421 static void mctp_serial_setup(struct net_device *ndev)
423 ndev->type = ARPHRD_MCTP;
425 /* we limit at the fixed MTU, which is also the MCTP-standard
426 * baseline MTU, so is also our minimum
428 ndev->mtu = MCTP_SERIAL_MTU;
429 ndev->max_mtu = MCTP_SERIAL_MTU;
430 ndev->min_mtu = MCTP_SERIAL_MTU;
432 ndev->hard_header_len = 0;
433 ndev->addr_len = 0;
434 ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
435 ndev->flags = IFF_NOARP;
436 ndev->netdev_ops = &mctp_serial_netdev_ops;
437 ndev->needs_free_netdev = true;
440 static int mctp_serial_open(struct tty_struct *tty)
442 struct mctp_serial *dev;
443 struct net_device *ndev;
444 char name[32];
445 int idx, rc;
447 if (!capable(CAP_NET_ADMIN))
448 return -EPERM;
450 if (!tty->ops->write)
451 return -EOPNOTSUPP;
453 idx = ida_alloc(&mctp_serial_ida, GFP_KERNEL);
454 if (idx < 0)
455 return idx;
457 snprintf(name, sizeof(name), "mctpserial%d", idx);
458 ndev = alloc_netdev(sizeof(*dev), name, NET_NAME_ENUM,
459 mctp_serial_setup);
460 if (!ndev) {
461 rc = -ENOMEM;
462 goto free_ida;
465 dev = netdev_priv(ndev);
466 dev->idx = idx;
467 dev->tty = tty;
468 dev->netdev = ndev;
469 dev->txstate = STATE_IDLE;
470 dev->rxstate = STATE_IDLE;
471 spin_lock_init(&dev->lock);
472 INIT_WORK(&dev->tx_work, mctp_serial_tx_work);
474 rc = mctp_register_netdev(ndev, NULL, MCTP_PHYS_BINDING_SERIAL);
475 if (rc)
476 goto free_netdev;
478 tty->receive_room = 64 * 1024;
479 tty->disc_data = dev;
481 return 0;
483 free_netdev:
484 free_netdev(ndev);
486 free_ida:
487 ida_free(&mctp_serial_ida, idx);
488 return rc;
491 static void mctp_serial_close(struct tty_struct *tty)
493 struct mctp_serial *dev = tty->disc_data;
494 int idx = dev->idx;
496 mctp_unregister_netdev(dev->netdev);
497 ida_free(&mctp_serial_ida, idx);
500 static struct tty_ldisc_ops mctp_ldisc = {
501 .owner = THIS_MODULE,
502 .num = N_MCTP,
503 .name = "mctp",
504 .open = mctp_serial_open,
505 .close = mctp_serial_close,
506 .receive_buf = mctp_serial_tty_receive_buf,
507 .write_wakeup = mctp_serial_tty_write_wakeup,
510 static int __init mctp_serial_init(void)
512 return tty_register_ldisc(&mctp_ldisc);
515 static void __exit mctp_serial_exit(void)
517 tty_unregister_ldisc(&mctp_ldisc);
520 module_init(mctp_serial_init);
521 module_exit(mctp_serial_exit);
523 MODULE_LICENSE("GPL v2");
524 MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
525 MODULE_DESCRIPTION("MCTP Serial transport");
527 #if IS_ENABLED(CONFIG_MCTP_SERIAL_TEST)
528 #include <kunit/test.h>
530 #define MAX_CHUNKS 6
531 struct test_chunk_tx {
532 u8 input_len;
533 u8 input[MCTP_SERIAL_MTU];
534 u8 chunks[MAX_CHUNKS];
537 static void test_next_chunk_len(struct kunit *test)
539 struct mctp_serial devx;
540 struct mctp_serial *dev = &devx;
541 int next;
543 const struct test_chunk_tx *params = test->param_value;
545 memset(dev, 0x0, sizeof(*dev));
546 memcpy(dev->txbuf, params->input, params->input_len);
547 dev->txlen = params->input_len;
549 for (size_t i = 0; i < MAX_CHUNKS; i++) {
550 next = next_chunk_len(dev);
551 dev->txpos += next;
552 KUNIT_EXPECT_EQ(test, next, params->chunks[i]);
554 if (next == 0) {
555 KUNIT_EXPECT_EQ(test, dev->txpos, dev->txlen);
556 return;
560 KUNIT_FAIL_AND_ABORT(test, "Ran out of chunks");
563 static struct test_chunk_tx chunk_tx_tests[] = {
565 .input_len = 5,
566 .input = { 0x00, 0x11, 0x22, 0x7e, 0x80 },
567 .chunks = { 3, 1, 1, 0},
570 .input_len = 5,
571 .input = { 0x00, 0x11, 0x22, 0x7e, 0x7d },
572 .chunks = { 3, 1, 1, 0},
575 .input_len = 3,
576 .input = { 0x7e, 0x11, 0x22, },
577 .chunks = { 1, 2, 0},
580 .input_len = 3,
581 .input = { 0x7e, 0x7e, 0x7d, },
582 .chunks = { 1, 1, 1, 0},
585 .input_len = 4,
586 .input = { 0x7e, 0x7e, 0x00, 0x7d, },
587 .chunks = { 1, 1, 1, 1, 0},
590 .input_len = 6,
591 .input = { 0x7e, 0x7e, 0x00, 0x7d, 0x10, 0x10},
592 .chunks = { 1, 1, 1, 1, 2, 0},
595 .input_len = 1,
596 .input = { 0x7e },
597 .chunks = { 1, 0 },
600 .input_len = 1,
601 .input = { 0x80 },
602 .chunks = { 1, 0 },
605 .input_len = 3,
606 .input = { 0x80, 0x80, 0x00 },
607 .chunks = { 3, 0 },
610 .input_len = 7,
611 .input = { 0x01, 0x00, 0x08, 0xc8, 0x00, 0x80, 0x02 },
612 .chunks = { 7, 0 },
615 .input_len = 7,
616 .input = { 0x01, 0x00, 0x08, 0xc8, 0x7e, 0x80, 0x02 },
617 .chunks = { 4, 1, 2, 0 },
621 KUNIT_ARRAY_PARAM(chunk_tx, chunk_tx_tests, NULL);
623 static struct kunit_case mctp_serial_test_cases[] = {
624 KUNIT_CASE_PARAM(test_next_chunk_len, chunk_tx_gen_params),
627 static struct kunit_suite mctp_serial_test_suite = {
628 .name = "mctp_serial",
629 .test_cases = mctp_serial_test_cases,
632 kunit_test_suite(mctp_serial_test_suite);
634 #endif /* CONFIG_MCTP_SERIAL_TEST */