1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Freescale QMC HDLC Device Driver
5 * Copyright 2023 CS GROUP France
7 * Author: Herve Codina <herve.codina@bootlin.com>
10 #include <linux/array_size.h>
11 #include <linux/bug.h>
12 #include <linux/cleanup.h>
13 #include <linux/bitmap.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/device.h>
16 #include <linux/err.h>
17 #include <linux/framer/framer.h>
18 #include <linux/hdlc.h>
19 #include <linux/mod_devicetable.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
27 #include <soc/fsl/qe/qmc.h>
29 struct qmc_hdlc_desc
{
30 struct net_device
*netdev
;
31 struct sk_buff
*skb
; /* NULL if the descriptor is not in use */
38 struct qmc_chan
*qmc_chan
;
39 struct net_device
*netdev
;
40 struct framer
*framer
;
41 struct mutex carrier_lock
; /* Protect carrier detection */
42 struct notifier_block nb
;
44 spinlock_t tx_lock
; /* Protect tx descriptors */
45 struct qmc_hdlc_desc tx_descs
[8];
47 struct qmc_hdlc_desc rx_descs
[4];
51 static struct qmc_hdlc
*netdev_to_qmc_hdlc(struct net_device
*netdev
)
53 return dev_to_hdlc(netdev
)->priv
;
56 static int qmc_hdlc_framer_set_carrier(struct qmc_hdlc
*qmc_hdlc
)
58 struct framer_status framer_status
;
61 if (!qmc_hdlc
->framer
)
64 guard(mutex
)(&qmc_hdlc
->carrier_lock
);
66 ret
= framer_get_status(qmc_hdlc
->framer
, &framer_status
);
68 dev_err(qmc_hdlc
->dev
, "get framer status failed (%d)\n", ret
);
71 if (framer_status
.link_is_on
)
72 netif_carrier_on(qmc_hdlc
->netdev
);
74 netif_carrier_off(qmc_hdlc
->netdev
);
79 static int qmc_hdlc_framer_notifier(struct notifier_block
*nb
, unsigned long action
,
82 struct qmc_hdlc
*qmc_hdlc
= container_of(nb
, struct qmc_hdlc
, nb
);
85 if (action
!= FRAMER_EVENT_STATUS
)
88 ret
= qmc_hdlc_framer_set_carrier(qmc_hdlc
);
89 return ret
? NOTIFY_DONE
: NOTIFY_OK
;
92 static int qmc_hdlc_framer_start(struct qmc_hdlc
*qmc_hdlc
)
94 struct framer_status framer_status
;
97 if (!qmc_hdlc
->framer
)
100 ret
= framer_power_on(qmc_hdlc
->framer
);
102 dev_err(qmc_hdlc
->dev
, "framer power-on failed (%d)\n", ret
);
106 /* Be sure that get_status is supported */
107 ret
= framer_get_status(qmc_hdlc
->framer
, &framer_status
);
109 dev_err(qmc_hdlc
->dev
, "get framer status failed (%d)\n", ret
);
110 goto framer_power_off
;
113 qmc_hdlc
->nb
.notifier_call
= qmc_hdlc_framer_notifier
;
114 ret
= framer_notifier_register(qmc_hdlc
->framer
, &qmc_hdlc
->nb
);
116 dev_err(qmc_hdlc
->dev
, "framer notifier register failed (%d)\n", ret
);
117 goto framer_power_off
;
123 framer_power_off(qmc_hdlc
->framer
);
127 static void qmc_hdlc_framer_stop(struct qmc_hdlc
*qmc_hdlc
)
129 if (!qmc_hdlc
->framer
)
132 framer_notifier_unregister(qmc_hdlc
->framer
, &qmc_hdlc
->nb
);
133 framer_power_off(qmc_hdlc
->framer
);
136 static int qmc_hdlc_framer_set_iface(struct qmc_hdlc
*qmc_hdlc
, int if_iface
,
137 const te1_settings
*te1
)
139 struct framer_config config
;
142 if (!qmc_hdlc
->framer
)
145 ret
= framer_get_config(qmc_hdlc
->framer
, &config
);
151 config
.iface
= FRAMER_IFACE_E1
;
154 config
.iface
= FRAMER_IFACE_T1
;
160 switch (te1
->clock_type
) {
162 /* Keep current value */
165 config
.clock_type
= FRAMER_CLOCK_EXT
;
168 config
.clock_type
= FRAMER_CLOCK_INT
;
173 config
.line_clock_rate
= te1
->clock_rate
;
175 return framer_set_config(qmc_hdlc
->framer
, &config
);
178 static int qmc_hdlc_framer_get_iface(struct qmc_hdlc
*qmc_hdlc
, int *if_iface
, te1_settings
*te1
)
180 struct framer_config config
;
183 if (!qmc_hdlc
->framer
) {
184 *if_iface
= IF_IFACE_E1
;
188 ret
= framer_get_config(qmc_hdlc
->framer
, &config
);
192 switch (config
.iface
) {
193 case FRAMER_IFACE_E1
:
194 *if_iface
= IF_IFACE_E1
;
196 case FRAMER_IFACE_T1
:
197 *if_iface
= IF_IFACE_T1
;
202 return 0; /* Only iface type requested */
204 switch (config
.clock_type
) {
205 case FRAMER_CLOCK_EXT
:
206 te1
->clock_type
= CLOCK_EXT
;
208 case FRAMER_CLOCK_INT
:
209 te1
->clock_type
= CLOCK_INT
;
214 te1
->clock_rate
= config
.line_clock_rate
;
218 static int qmc_hdlc_framer_init(struct qmc_hdlc
*qmc_hdlc
)
222 if (!qmc_hdlc
->framer
)
225 ret
= framer_init(qmc_hdlc
->framer
);
227 dev_err(qmc_hdlc
->dev
, "framer init failed (%d)\n", ret
);
234 static void qmc_hdlc_framer_exit(struct qmc_hdlc
*qmc_hdlc
)
236 if (!qmc_hdlc
->framer
)
239 framer_exit(qmc_hdlc
->framer
);
242 static int qmc_hdlc_recv_queue(struct qmc_hdlc
*qmc_hdlc
, struct qmc_hdlc_desc
*desc
, size_t size
);
244 #define QMC_HDLC_RX_ERROR_FLAGS \
245 (QMC_RX_FLAG_HDLC_OVF | QMC_RX_FLAG_HDLC_UNA | \
246 QMC_RX_FLAG_HDLC_CRC | QMC_RX_FLAG_HDLC_ABORT)
248 static void qmc_hcld_recv_complete(void *context
, size_t length
, unsigned int flags
)
250 struct qmc_hdlc_desc
*desc
= context
;
251 struct net_device
*netdev
;
252 struct qmc_hdlc
*qmc_hdlc
;
256 netdev
= desc
->netdev
;
257 qmc_hdlc
= netdev_to_qmc_hdlc(netdev
);
259 dma_unmap_single(qmc_hdlc
->dev
, desc
->dma_addr
, desc
->dma_size
, DMA_FROM_DEVICE
);
261 if (flags
& QMC_HDLC_RX_ERROR_FLAGS
) {
262 netdev
->stats
.rx_errors
++;
263 if (flags
& QMC_RX_FLAG_HDLC_OVF
) /* Data overflow */
264 netdev
->stats
.rx_over_errors
++;
265 if (flags
& QMC_RX_FLAG_HDLC_UNA
) /* bits received not multiple of 8 */
266 netdev
->stats
.rx_frame_errors
++;
267 if (flags
& QMC_RX_FLAG_HDLC_ABORT
) /* Received an abort sequence */
268 netdev
->stats
.rx_frame_errors
++;
269 if (flags
& QMC_RX_FLAG_HDLC_CRC
) /* CRC error */
270 netdev
->stats
.rx_crc_errors
++;
271 kfree_skb(desc
->skb
);
275 /* Discard the CRC */
276 crc_size
= qmc_hdlc
->is_crc32
? 4 : 2;
277 if (length
< crc_size
) {
278 netdev
->stats
.rx_length_errors
++;
279 kfree_skb(desc
->skb
);
284 netdev
->stats
.rx_packets
++;
285 netdev
->stats
.rx_bytes
+= length
;
287 skb_put(desc
->skb
, length
);
288 desc
->skb
->protocol
= hdlc_type_trans(desc
->skb
, netdev
);
292 /* Re-queue a transfer using the same descriptor */
293 ret
= qmc_hdlc_recv_queue(qmc_hdlc
, desc
, desc
->dma_size
);
295 dev_err(qmc_hdlc
->dev
, "queue recv desc failed (%d)\n", ret
);
296 netdev
->stats
.rx_errors
++;
300 static int qmc_hdlc_recv_queue(struct qmc_hdlc
*qmc_hdlc
, struct qmc_hdlc_desc
*desc
, size_t size
)
304 desc
->skb
= dev_alloc_skb(size
);
308 desc
->dma_size
= size
;
309 desc
->dma_addr
= dma_map_single(qmc_hdlc
->dev
, desc
->skb
->data
,
310 desc
->dma_size
, DMA_FROM_DEVICE
);
311 ret
= dma_mapping_error(qmc_hdlc
->dev
, desc
->dma_addr
);
315 ret
= qmc_chan_read_submit(qmc_hdlc
->qmc_chan
, desc
->dma_addr
, desc
->dma_size
,
316 qmc_hcld_recv_complete
, desc
);
323 dma_unmap_single(qmc_hdlc
->dev
, desc
->dma_addr
, desc
->dma_size
, DMA_FROM_DEVICE
);
325 kfree_skb(desc
->skb
);
330 static void qmc_hdlc_xmit_complete(void *context
)
332 struct qmc_hdlc_desc
*desc
= context
;
333 struct net_device
*netdev
;
334 struct qmc_hdlc
*qmc_hdlc
;
337 netdev
= desc
->netdev
;
338 qmc_hdlc
= netdev_to_qmc_hdlc(netdev
);
340 scoped_guard(spinlock_irqsave
, &qmc_hdlc
->tx_lock
) {
341 dma_unmap_single(qmc_hdlc
->dev
, desc
->dma_addr
, desc
->dma_size
, DMA_TO_DEVICE
);
343 desc
->skb
= NULL
; /* Release the descriptor */
344 if (netif_queue_stopped(netdev
))
345 netif_wake_queue(netdev
);
348 netdev
->stats
.tx_packets
++;
349 netdev
->stats
.tx_bytes
+= skb
->len
;
351 dev_consume_skb_any(skb
);
354 static int qmc_hdlc_xmit_queue(struct qmc_hdlc
*qmc_hdlc
, struct qmc_hdlc_desc
*desc
)
358 desc
->dma_addr
= dma_map_single(qmc_hdlc
->dev
, desc
->skb
->data
,
359 desc
->dma_size
, DMA_TO_DEVICE
);
360 ret
= dma_mapping_error(qmc_hdlc
->dev
, desc
->dma_addr
);
362 dev_err(qmc_hdlc
->dev
, "failed to map skb\n");
366 ret
= qmc_chan_write_submit(qmc_hdlc
->qmc_chan
, desc
->dma_addr
, desc
->dma_size
,
367 qmc_hdlc_xmit_complete
, desc
);
369 dma_unmap_single(qmc_hdlc
->dev
, desc
->dma_addr
, desc
->dma_size
, DMA_TO_DEVICE
);
370 dev_err(qmc_hdlc
->dev
, "qmc chan write returns %d\n", ret
);
377 static netdev_tx_t
qmc_hdlc_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
379 struct qmc_hdlc
*qmc_hdlc
= netdev_to_qmc_hdlc(netdev
);
380 struct qmc_hdlc_desc
*desc
;
383 guard(spinlock_irqsave
)(&qmc_hdlc
->tx_lock
);
385 desc
= &qmc_hdlc
->tx_descs
[qmc_hdlc
->tx_out
];
386 if (WARN_ONCE(desc
->skb
, "No tx descriptors available\n")) {
387 /* Should never happen.
388 * Previous xmit should have already stopped the queue.
390 netif_stop_queue(netdev
);
391 return NETDEV_TX_BUSY
;
394 desc
->netdev
= netdev
;
395 desc
->dma_size
= skb
->len
;
397 err
= qmc_hdlc_xmit_queue(qmc_hdlc
, desc
);
399 desc
->skb
= NULL
; /* Release the descriptor */
401 netif_stop_queue(netdev
);
402 return NETDEV_TX_BUSY
;
405 netdev
->stats
.tx_dropped
++;
409 qmc_hdlc
->tx_out
= (qmc_hdlc
->tx_out
+ 1) % ARRAY_SIZE(qmc_hdlc
->tx_descs
);
411 if (qmc_hdlc
->tx_descs
[qmc_hdlc
->tx_out
].skb
)
412 netif_stop_queue(netdev
);
417 static int qmc_hdlc_xlate_slot_map(struct qmc_hdlc
*qmc_hdlc
,
418 u32 slot_map
, struct qmc_chan_ts_info
*ts_info
)
420 DECLARE_BITMAP(ts_mask_avail
, 64);
421 DECLARE_BITMAP(ts_mask
, 64);
422 DECLARE_BITMAP(map
, 64);
424 /* Tx and Rx available masks must be identical */
425 if (ts_info
->rx_ts_mask_avail
!= ts_info
->tx_ts_mask_avail
) {
426 dev_err(qmc_hdlc
->dev
, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n",
427 ts_info
->rx_ts_mask_avail
, ts_info
->tx_ts_mask_avail
);
431 bitmap_from_u64(ts_mask_avail
, ts_info
->rx_ts_mask_avail
);
432 bitmap_from_u64(map
, slot_map
);
433 bitmap_scatter(ts_mask
, map
, ts_mask_avail
, 64);
435 if (bitmap_weight(ts_mask
, 64) != bitmap_weight(map
, 64)) {
436 dev_err(qmc_hdlc
->dev
, "Cannot translate timeslots %64pb -> (%64pb, %64pb)\n",
437 map
, ts_mask_avail
, ts_mask
);
441 bitmap_to_arr64(&ts_info
->tx_ts_mask
, ts_mask
, 64);
442 ts_info
->rx_ts_mask
= ts_info
->tx_ts_mask
;
446 static int qmc_hdlc_xlate_ts_info(struct qmc_hdlc
*qmc_hdlc
,
447 const struct qmc_chan_ts_info
*ts_info
, u32
*slot_map
)
449 DECLARE_BITMAP(ts_mask_avail
, 64);
450 DECLARE_BITMAP(ts_mask
, 64);
451 DECLARE_BITMAP(map
, 64);
454 /* Tx and Rx masks and available masks must be identical */
455 if (ts_info
->rx_ts_mask_avail
!= ts_info
->tx_ts_mask_avail
) {
456 dev_err(qmc_hdlc
->dev
, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n",
457 ts_info
->rx_ts_mask_avail
, ts_info
->tx_ts_mask_avail
);
460 if (ts_info
->rx_ts_mask
!= ts_info
->tx_ts_mask
) {
461 dev_err(qmc_hdlc
->dev
, "tx and rx timeslots mismatch (0x%llx, 0x%llx)\n",
462 ts_info
->rx_ts_mask
, ts_info
->tx_ts_mask
);
466 bitmap_from_u64(ts_mask_avail
, ts_info
->rx_ts_mask_avail
);
467 bitmap_from_u64(ts_mask
, ts_info
->rx_ts_mask
);
468 bitmap_gather(map
, ts_mask
, ts_mask_avail
, 64);
470 if (bitmap_weight(ts_mask
, 64) != bitmap_weight(map
, 64)) {
471 dev_err(qmc_hdlc
->dev
, "Cannot translate timeslots (%64pb, %64pb) -> %64pb\n",
472 ts_mask_avail
, ts_mask
, map
);
476 bitmap_to_arr32(slot_array
, map
, 64);
478 dev_err(qmc_hdlc
->dev
, "Slot map out of 32bit (%64pb, %64pb) -> %64pb\n",
479 ts_mask_avail
, ts_mask
, map
);
483 *slot_map
= slot_array
[0];
487 static int qmc_hdlc_set_iface(struct qmc_hdlc
*qmc_hdlc
, int if_iface
, const te1_settings
*te1
)
489 struct qmc_chan_ts_info ts_info
;
492 ret
= qmc_chan_get_ts_info(qmc_hdlc
->qmc_chan
, &ts_info
);
494 dev_err(qmc_hdlc
->dev
, "get QMC channel ts info failed %d\n", ret
);
497 ret
= qmc_hdlc_xlate_slot_map(qmc_hdlc
, te1
->slot_map
, &ts_info
);
501 ret
= qmc_chan_set_ts_info(qmc_hdlc
->qmc_chan
, &ts_info
);
503 dev_err(qmc_hdlc
->dev
, "set QMC channel ts info failed %d\n", ret
);
507 qmc_hdlc
->slot_map
= te1
->slot_map
;
509 ret
= qmc_hdlc_framer_set_iface(qmc_hdlc
, if_iface
, te1
);
511 dev_err(qmc_hdlc
->dev
, "framer set iface failed %d\n", ret
);
518 static int qmc_hdlc_ioctl(struct net_device
*netdev
, struct if_settings
*ifs
)
520 struct qmc_hdlc
*qmc_hdlc
= netdev_to_qmc_hdlc(netdev
);
526 if (ifs
->size
< sizeof(te1
)) {
527 /* Retrieve type only */
528 ret
= qmc_hdlc_framer_get_iface(qmc_hdlc
, &ifs
->type
, NULL
);
533 return 0; /* only type requested */
535 ifs
->size
= sizeof(te1
); /* data size wanted */
539 memset(&te1
, 0, sizeof(te1
));
541 /* Retrieve info from framer */
542 ret
= qmc_hdlc_framer_get_iface(qmc_hdlc
, &ifs
->type
, &te1
);
546 /* Update slot_map */
547 te1
.slot_map
= qmc_hdlc
->slot_map
;
549 if (copy_to_user(ifs
->ifs_ifsu
.te1
, &te1
, sizeof(te1
)))
555 if (!capable(CAP_NET_ADMIN
))
558 if (netdev
->flags
& IFF_UP
)
561 if (copy_from_user(&te1
, ifs
->ifs_ifsu
.te1
, sizeof(te1
)))
564 return qmc_hdlc_set_iface(qmc_hdlc
, ifs
->type
, &te1
);
567 return hdlc_ioctl(netdev
, ifs
);
571 static int qmc_hdlc_open(struct net_device
*netdev
)
573 struct qmc_hdlc
*qmc_hdlc
= netdev_to_qmc_hdlc(netdev
);
574 struct qmc_chan_param chan_param
;
575 struct qmc_hdlc_desc
*desc
;
579 ret
= qmc_hdlc_framer_start(qmc_hdlc
);
583 ret
= hdlc_open(netdev
);
588 qmc_hdlc_framer_set_carrier(qmc_hdlc
);
590 chan_param
.mode
= QMC_HDLC
;
591 /* HDLC_MAX_MRU + 4 for the CRC
592 * HDLC_MAX_MRU + 4 + 8 for the CRC and some extraspace needed by the QMC
594 chan_param
.hdlc
.max_rx_buf_size
= HDLC_MAX_MRU
+ 4 + 8;
595 chan_param
.hdlc
.max_rx_frame_size
= HDLC_MAX_MRU
+ 4;
596 chan_param
.hdlc
.is_crc32
= qmc_hdlc
->is_crc32
;
597 ret
= qmc_chan_set_param(qmc_hdlc
->qmc_chan
, &chan_param
);
599 dev_err(qmc_hdlc
->dev
, "failed to set param (%d)\n", ret
);
603 /* Queue as many recv descriptors as possible */
604 for (i
= 0; i
< ARRAY_SIZE(qmc_hdlc
->rx_descs
); i
++) {
605 desc
= &qmc_hdlc
->rx_descs
[i
];
607 desc
->netdev
= netdev
;
608 ret
= qmc_hdlc_recv_queue(qmc_hdlc
, desc
, chan_param
.hdlc
.max_rx_buf_size
);
609 if (ret
== -EBUSY
&& i
!= 0)
610 break; /* We use all the QMC chan capability */
615 ret
= qmc_chan_start(qmc_hdlc
->qmc_chan
, QMC_CHAN_ALL
);
617 dev_err(qmc_hdlc
->dev
, "qmc chan start failed (%d)\n", ret
);
621 netif_start_queue(netdev
);
626 qmc_chan_reset(qmc_hdlc
->qmc_chan
, QMC_CHAN_ALL
);
628 desc
= &qmc_hdlc
->rx_descs
[i
];
629 dma_unmap_single(qmc_hdlc
->dev
, desc
->dma_addr
, desc
->dma_size
,
631 kfree_skb(desc
->skb
);
637 qmc_hdlc_framer_stop(qmc_hdlc
);
641 static int qmc_hdlc_close(struct net_device
*netdev
)
643 struct qmc_hdlc
*qmc_hdlc
= netdev_to_qmc_hdlc(netdev
);
644 struct qmc_hdlc_desc
*desc
;
647 qmc_chan_stop(qmc_hdlc
->qmc_chan
, QMC_CHAN_ALL
);
648 qmc_chan_reset(qmc_hdlc
->qmc_chan
, QMC_CHAN_ALL
);
650 netif_stop_queue(netdev
);
652 for (i
= 0; i
< ARRAY_SIZE(qmc_hdlc
->tx_descs
); i
++) {
653 desc
= &qmc_hdlc
->tx_descs
[i
];
656 dma_unmap_single(qmc_hdlc
->dev
, desc
->dma_addr
, desc
->dma_size
,
658 kfree_skb(desc
->skb
);
662 for (i
= 0; i
< ARRAY_SIZE(qmc_hdlc
->rx_descs
); i
++) {
663 desc
= &qmc_hdlc
->rx_descs
[i
];
666 dma_unmap_single(qmc_hdlc
->dev
, desc
->dma_addr
, desc
->dma_size
,
668 kfree_skb(desc
->skb
);
673 qmc_hdlc_framer_stop(qmc_hdlc
);
677 static int qmc_hdlc_attach(struct net_device
*netdev
, unsigned short encoding
,
678 unsigned short parity
)
680 struct qmc_hdlc
*qmc_hdlc
= netdev_to_qmc_hdlc(netdev
);
682 if (encoding
!= ENCODING_NRZ
)
686 case PARITY_CRC16_PR1_CCITT
:
687 qmc_hdlc
->is_crc32
= false;
689 case PARITY_CRC32_PR1_CCITT
:
690 qmc_hdlc
->is_crc32
= true;
693 dev_err(qmc_hdlc
->dev
, "unsupported parity %u\n", parity
);
700 static const struct net_device_ops qmc_hdlc_netdev_ops
= {
701 .ndo_open
= qmc_hdlc_open
,
702 .ndo_stop
= qmc_hdlc_close
,
703 .ndo_start_xmit
= hdlc_start_xmit
,
704 .ndo_siocwandev
= qmc_hdlc_ioctl
,
707 static int qmc_hdlc_probe(struct platform_device
*pdev
)
709 struct device
*dev
= &pdev
->dev
;
710 struct qmc_chan_ts_info ts_info
;
711 struct qmc_hdlc
*qmc_hdlc
;
712 struct qmc_chan_info info
;
716 qmc_hdlc
= devm_kzalloc(dev
, sizeof(*qmc_hdlc
), GFP_KERNEL
);
721 spin_lock_init(&qmc_hdlc
->tx_lock
);
722 mutex_init(&qmc_hdlc
->carrier_lock
);
724 qmc_hdlc
->qmc_chan
= devm_qmc_chan_get_bychild(dev
, dev
->of_node
);
725 if (IS_ERR(qmc_hdlc
->qmc_chan
))
726 return dev_err_probe(dev
, PTR_ERR(qmc_hdlc
->qmc_chan
),
727 "get QMC channel failed\n");
729 ret
= qmc_chan_get_info(qmc_hdlc
->qmc_chan
, &info
);
731 return dev_err_probe(dev
, ret
, "get QMC channel info failed\n");
733 if (info
.mode
!= QMC_HDLC
)
734 return dev_err_probe(dev
, -EINVAL
, "QMC chan mode %d is not QMC_HDLC\n",
737 ret
= qmc_chan_get_ts_info(qmc_hdlc
->qmc_chan
, &ts_info
);
739 return dev_err_probe(dev
, ret
, "get QMC channel ts info failed\n");
741 ret
= qmc_hdlc_xlate_ts_info(qmc_hdlc
, &ts_info
, &qmc_hdlc
->slot_map
);
745 qmc_hdlc
->framer
= devm_framer_optional_get(dev
, "fsl,framer");
746 if (IS_ERR(qmc_hdlc
->framer
))
747 return PTR_ERR(qmc_hdlc
->framer
);
749 ret
= qmc_hdlc_framer_init(qmc_hdlc
);
753 qmc_hdlc
->netdev
= alloc_hdlcdev(qmc_hdlc
);
754 if (!qmc_hdlc
->netdev
) {
759 hdlc
= dev_to_hdlc(qmc_hdlc
->netdev
);
760 hdlc
->attach
= qmc_hdlc_attach
;
761 hdlc
->xmit
= qmc_hdlc_xmit
;
762 SET_NETDEV_DEV(qmc_hdlc
->netdev
, dev
);
763 qmc_hdlc
->netdev
->tx_queue_len
= ARRAY_SIZE(qmc_hdlc
->tx_descs
);
764 qmc_hdlc
->netdev
->netdev_ops
= &qmc_hdlc_netdev_ops
;
765 ret
= register_hdlc_device(qmc_hdlc
->netdev
);
767 dev_err_probe(dev
, ret
, "failed to register hdlc device\n");
771 platform_set_drvdata(pdev
, qmc_hdlc
);
775 free_netdev(qmc_hdlc
->netdev
);
777 qmc_hdlc_framer_exit(qmc_hdlc
);
781 static void qmc_hdlc_remove(struct platform_device
*pdev
)
783 struct qmc_hdlc
*qmc_hdlc
= platform_get_drvdata(pdev
);
785 unregister_hdlc_device(qmc_hdlc
->netdev
);
786 free_netdev(qmc_hdlc
->netdev
);
787 qmc_hdlc_framer_exit(qmc_hdlc
);
790 static const struct of_device_id qmc_hdlc_id_table
[] = {
791 { .compatible
= "fsl,qmc-hdlc" },
794 MODULE_DEVICE_TABLE(of
, qmc_hdlc_id_table
);
796 static struct platform_driver qmc_hdlc_driver
= {
798 .name
= "fsl-qmc-hdlc",
799 .of_match_table
= qmc_hdlc_id_table
,
801 .probe
= qmc_hdlc_probe
,
802 .remove
= qmc_hdlc_remove
,
804 module_platform_driver(qmc_hdlc_driver
);
806 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
807 MODULE_DESCRIPTION("QMC HDLC driver");
808 MODULE_LICENSE("GPL");