1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for the Diolan DLN-2 USB adapter
5 * Copyright (c) 2014 Intel Corporation
9 * Copyright (c) 2010-2011 Ericsson AB
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/usb.h>
17 #include <linux/mutex.h>
18 #include <linux/platform_device.h>
19 #include <linux/mfd/core.h>
20 #include <linux/mfd/dln2.h>
21 #include <linux/rculist.h>
30 struct dln2_response
{
31 struct dln2_header hdr
;
35 #define DLN2_GENERIC_MODULE_ID 0x00
36 #define DLN2_GENERIC_CMD(cmd) DLN2_CMD(cmd, DLN2_GENERIC_MODULE_ID)
37 #define CMD_GET_DEVICE_VER DLN2_GENERIC_CMD(0x30)
38 #define CMD_GET_DEVICE_SN DLN2_GENERIC_CMD(0x31)
40 #define DLN2_HW_ID 0x200
41 #define DLN2_USB_TIMEOUT 200 /* in ms */
42 #define DLN2_MAX_RX_SLOTS 16
43 #define DLN2_MAX_URBS 16
44 #define DLN2_RX_BUF_SIZE 512
47 DLN2_HANDLE_EVENT
= 0, /* don't change, hardware defined */
57 * Receive context used between the receive demultiplexer and the transfer
58 * routine. While sending a request the transfer routine will look for a free
59 * receive context and use it to wait for a response and to receive the URB and
60 * thus the response data.
62 struct dln2_rx_context
{
63 /* completion used to wait for a response */
64 struct completion done
;
66 /* if non-NULL the URB contains the response */
69 /* if true then this context is used to wait for a response */
74 * Receive contexts for a particular DLN2 module (i2c, gpio, etc.). We use the
75 * handle header field to identify the module in dln2_dev.mod_rx_slots and then
76 * the echo header field to index the slots field and find the receive context
77 * for a particular request.
79 struct dln2_mod_rx_slots
{
81 DECLARE_BITMAP(bmap
, DLN2_MAX_RX_SLOTS
);
83 /* used to wait for a free RX slot */
86 /* used to wait for an RX operation to complete */
87 struct dln2_rx_context slots
[DLN2_MAX_RX_SLOTS
];
89 /* avoid races between alloc/free_rx_slot and dln2_rx_transfer */
94 struct usb_device
*usb_dev
;
95 struct usb_interface
*interface
;
99 struct urb
*rx_urb
[DLN2_MAX_URBS
];
100 void *rx_buf
[DLN2_MAX_URBS
];
102 struct dln2_mod_rx_slots mod_rx_slots
[DLN2_HANDLES
];
104 struct list_head event_cb_list
;
105 spinlock_t event_cb_lock
;
108 int active_transfers
;
109 wait_queue_head_t disconnect_wq
;
110 spinlock_t disconnect_lock
;
113 struct dln2_event_cb_entry
{
114 struct list_head list
;
116 struct platform_device
*pdev
;
117 dln2_event_cb_t callback
;
120 int dln2_register_event_cb(struct platform_device
*pdev
, u16 id
,
121 dln2_event_cb_t event_cb
)
123 struct dln2_dev
*dln2
= dev_get_drvdata(pdev
->dev
.parent
);
124 struct dln2_event_cb_entry
*i
, *entry
;
128 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
133 entry
->callback
= event_cb
;
136 spin_lock_irqsave(&dln2
->event_cb_lock
, flags
);
138 list_for_each_entry(i
, &dln2
->event_cb_list
, list
) {
146 list_add_rcu(&entry
->list
, &dln2
->event_cb_list
);
148 spin_unlock_irqrestore(&dln2
->event_cb_lock
, flags
);
155 EXPORT_SYMBOL(dln2_register_event_cb
);
157 void dln2_unregister_event_cb(struct platform_device
*pdev
, u16 id
)
159 struct dln2_dev
*dln2
= dev_get_drvdata(pdev
->dev
.parent
);
160 struct dln2_event_cb_entry
*i
;
164 spin_lock_irqsave(&dln2
->event_cb_lock
, flags
);
166 list_for_each_entry(i
, &dln2
->event_cb_list
, list
) {
168 list_del_rcu(&i
->list
);
174 spin_unlock_irqrestore(&dln2
->event_cb_lock
, flags
);
181 EXPORT_SYMBOL(dln2_unregister_event_cb
);
184 * Returns true if a valid transfer slot is found. In this case the URB must not
185 * be resubmitted immediately in dln2_rx as we need the data when dln2_transfer
186 * is woke up. It will be resubmitted there.
188 static bool dln2_transfer_complete(struct dln2_dev
*dln2
, struct urb
*urb
,
189 u16 handle
, u16 rx_slot
)
191 struct device
*dev
= &dln2
->interface
->dev
;
192 struct dln2_mod_rx_slots
*rxs
= &dln2
->mod_rx_slots
[handle
];
193 struct dln2_rx_context
*rxc
;
195 bool valid_slot
= false;
197 if (rx_slot
>= DLN2_MAX_RX_SLOTS
)
200 rxc
= &rxs
->slots
[rx_slot
];
202 spin_lock_irqsave(&rxs
->lock
, flags
);
203 if (rxc
->in_use
&& !rxc
->urb
) {
205 complete(&rxc
->done
);
208 spin_unlock_irqrestore(&rxs
->lock
, flags
);
212 dev_warn(dev
, "bad/late response %d/%d\n", handle
, rx_slot
);
217 static void dln2_run_event_callbacks(struct dln2_dev
*dln2
, u16 id
, u16 echo
,
220 struct dln2_event_cb_entry
*i
;
224 list_for_each_entry_rcu(i
, &dln2
->event_cb_list
, list
) {
226 i
->callback(i
->pdev
, echo
, data
, len
);
234 static void dln2_rx(struct urb
*urb
)
236 struct dln2_dev
*dln2
= urb
->context
;
237 struct dln2_header
*hdr
= urb
->transfer_buffer
;
238 struct device
*dev
= &dln2
->interface
->dev
;
239 u16 id
, echo
, handle
, size
;
244 switch (urb
->status
) {
252 /* this urb is terminated, clean up */
253 dev_dbg(dev
, "urb shutting down with status %d\n", urb
->status
);
256 dev_dbg(dev
, "nonzero urb status received %d\n", urb
->status
);
260 if (urb
->actual_length
< sizeof(struct dln2_header
)) {
261 dev_err(dev
, "short response: %d\n", urb
->actual_length
);
265 handle
= le16_to_cpu(hdr
->handle
);
266 id
= le16_to_cpu(hdr
->id
);
267 echo
= le16_to_cpu(hdr
->echo
);
268 size
= le16_to_cpu(hdr
->size
);
270 if (size
!= urb
->actual_length
) {
271 dev_err(dev
, "size mismatch: handle %x cmd %x echo %x size %d actual %d\n",
272 handle
, id
, echo
, size
, urb
->actual_length
);
276 if (handle
>= DLN2_HANDLES
) {
277 dev_warn(dev
, "invalid handle %d\n", handle
);
281 data
= urb
->transfer_buffer
+ sizeof(struct dln2_header
);
282 len
= urb
->actual_length
- sizeof(struct dln2_header
);
284 if (handle
== DLN2_HANDLE_EVENT
) {
287 spin_lock_irqsave(&dln2
->event_cb_lock
, flags
);
288 dln2_run_event_callbacks(dln2
, id
, echo
, data
, len
);
289 spin_unlock_irqrestore(&dln2
->event_cb_lock
, flags
);
291 /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */
292 if (dln2_transfer_complete(dln2
, urb
, handle
, echo
))
297 err
= usb_submit_urb(urb
, GFP_ATOMIC
);
299 dev_err(dev
, "failed to resubmit RX URB: %d\n", err
);
302 static void *dln2_prep_buf(u16 handle
, u16 cmd
, u16 echo
, const void *obuf
,
303 int *obuf_len
, gfp_t gfp
)
307 struct dln2_header
*hdr
;
309 len
= *obuf_len
+ sizeof(*hdr
);
310 buf
= kmalloc(len
, gfp
);
314 hdr
= (struct dln2_header
*)buf
;
315 hdr
->id
= cpu_to_le16(cmd
);
316 hdr
->size
= cpu_to_le16(len
);
317 hdr
->echo
= cpu_to_le16(echo
);
318 hdr
->handle
= cpu_to_le16(handle
);
320 memcpy(buf
+ sizeof(*hdr
), obuf
, *obuf_len
);
327 static int dln2_send_wait(struct dln2_dev
*dln2
, u16 handle
, u16 cmd
, u16 echo
,
328 const void *obuf
, int obuf_len
)
335 buf
= dln2_prep_buf(handle
, cmd
, echo
, obuf
, &len
, GFP_KERNEL
);
339 ret
= usb_bulk_msg(dln2
->usb_dev
,
340 usb_sndbulkpipe(dln2
->usb_dev
, dln2
->ep_out
),
341 buf
, len
, &actual
, DLN2_USB_TIMEOUT
);
348 static bool find_free_slot(struct dln2_dev
*dln2
, u16 handle
, int *slot
)
350 struct dln2_mod_rx_slots
*rxs
;
353 if (dln2
->disconnect
) {
358 rxs
= &dln2
->mod_rx_slots
[handle
];
360 spin_lock_irqsave(&rxs
->lock
, flags
);
362 *slot
= find_first_zero_bit(rxs
->bmap
, DLN2_MAX_RX_SLOTS
);
364 if (*slot
< DLN2_MAX_RX_SLOTS
) {
365 struct dln2_rx_context
*rxc
= &rxs
->slots
[*slot
];
367 set_bit(*slot
, rxs
->bmap
);
371 spin_unlock_irqrestore(&rxs
->lock
, flags
);
373 return *slot
< DLN2_MAX_RX_SLOTS
;
376 static int alloc_rx_slot(struct dln2_dev
*dln2
, u16 handle
)
382 * No need to timeout here, the wait is bounded by the timeout in
385 ret
= wait_event_interruptible(dln2
->mod_rx_slots
[handle
].wq
,
386 find_free_slot(dln2
, handle
, &slot
));
393 static void free_rx_slot(struct dln2_dev
*dln2
, u16 handle
, int slot
)
395 struct dln2_mod_rx_slots
*rxs
;
396 struct urb
*urb
= NULL
;
398 struct dln2_rx_context
*rxc
;
400 rxs
= &dln2
->mod_rx_slots
[handle
];
402 spin_lock_irqsave(&rxs
->lock
, flags
);
404 clear_bit(slot
, rxs
->bmap
);
406 rxc
= &rxs
->slots
[slot
];
410 reinit_completion(&rxc
->done
);
412 spin_unlock_irqrestore(&rxs
->lock
, flags
);
416 struct device
*dev
= &dln2
->interface
->dev
;
418 err
= usb_submit_urb(urb
, GFP_KERNEL
);
420 dev_err(dev
, "failed to resubmit RX URB: %d\n", err
);
423 wake_up_interruptible(&rxs
->wq
);
426 static int _dln2_transfer(struct dln2_dev
*dln2
, u16 handle
, u16 cmd
,
427 const void *obuf
, unsigned obuf_len
,
428 void *ibuf
, unsigned *ibuf_len
)
432 struct dln2_response
*rsp
;
433 struct dln2_rx_context
*rxc
;
434 struct device
*dev
= &dln2
->interface
->dev
;
435 const unsigned long timeout
= msecs_to_jiffies(DLN2_USB_TIMEOUT
);
436 struct dln2_mod_rx_slots
*rxs
= &dln2
->mod_rx_slots
[handle
];
439 spin_lock(&dln2
->disconnect_lock
);
440 if (!dln2
->disconnect
)
441 dln2
->active_transfers
++;
444 spin_unlock(&dln2
->disconnect_lock
);
449 rx_slot
= alloc_rx_slot(dln2
, handle
);
455 ret
= dln2_send_wait(dln2
, handle
, cmd
, rx_slot
, obuf
, obuf_len
);
457 dev_err(dev
, "USB write failed: %d\n", ret
);
458 goto out_free_rx_slot
;
461 rxc
= &rxs
->slots
[rx_slot
];
463 ret
= wait_for_completion_interruptible_timeout(&rxc
->done
, timeout
);
467 goto out_free_rx_slot
;
472 if (dln2
->disconnect
) {
474 goto out_free_rx_slot
;
477 /* if we got here we know that the response header has been checked */
478 rsp
= rxc
->urb
->transfer_buffer
;
479 size
= le16_to_cpu(rsp
->hdr
.size
);
481 if (size
< sizeof(*rsp
)) {
483 goto out_free_rx_slot
;
486 if (le16_to_cpu(rsp
->result
) > 0x80) {
487 dev_dbg(dev
, "%d received response with error %d\n",
488 handle
, le16_to_cpu(rsp
->result
));
490 goto out_free_rx_slot
;
494 goto out_free_rx_slot
;
496 if (*ibuf_len
> size
- sizeof(*rsp
))
497 *ibuf_len
= size
- sizeof(*rsp
);
499 memcpy(ibuf
, rsp
+ 1, *ibuf_len
);
502 free_rx_slot(dln2
, handle
, rx_slot
);
504 spin_lock(&dln2
->disconnect_lock
);
505 dln2
->active_transfers
--;
506 spin_unlock(&dln2
->disconnect_lock
);
507 if (dln2
->disconnect
)
508 wake_up(&dln2
->disconnect_wq
);
513 int dln2_transfer(struct platform_device
*pdev
, u16 cmd
,
514 const void *obuf
, unsigned obuf_len
,
515 void *ibuf
, unsigned *ibuf_len
)
517 struct dln2_platform_data
*dln2_pdata
;
518 struct dln2_dev
*dln2
;
521 dln2
= dev_get_drvdata(pdev
->dev
.parent
);
522 dln2_pdata
= dev_get_platdata(&pdev
->dev
);
523 handle
= dln2_pdata
->handle
;
525 return _dln2_transfer(dln2
, handle
, cmd
, obuf
, obuf_len
, ibuf
,
528 EXPORT_SYMBOL(dln2_transfer
);
530 static int dln2_check_hw(struct dln2_dev
*dln2
)
534 int len
= sizeof(hw_type
);
536 ret
= _dln2_transfer(dln2
, DLN2_HANDLE_CTRL
, CMD_GET_DEVICE_VER
,
537 NULL
, 0, &hw_type
, &len
);
540 if (len
< sizeof(hw_type
))
543 if (le32_to_cpu(hw_type
) != DLN2_HW_ID
) {
544 dev_err(&dln2
->interface
->dev
, "Device ID 0x%x not supported\n",
545 le32_to_cpu(hw_type
));
552 static int dln2_print_serialno(struct dln2_dev
*dln2
)
556 int len
= sizeof(serial_no
);
557 struct device
*dev
= &dln2
->interface
->dev
;
559 ret
= _dln2_transfer(dln2
, DLN2_HANDLE_CTRL
, CMD_GET_DEVICE_SN
, NULL
, 0,
563 if (len
< sizeof(serial_no
))
566 dev_info(dev
, "Diolan DLN2 serial %u\n", le32_to_cpu(serial_no
));
571 static int dln2_hw_init(struct dln2_dev
*dln2
)
575 ret
= dln2_check_hw(dln2
);
579 return dln2_print_serialno(dln2
);
582 static void dln2_free_rx_urbs(struct dln2_dev
*dln2
)
586 for (i
= 0; i
< DLN2_MAX_URBS
; i
++) {
587 usb_free_urb(dln2
->rx_urb
[i
]);
588 kfree(dln2
->rx_buf
[i
]);
592 static void dln2_stop_rx_urbs(struct dln2_dev
*dln2
)
596 for (i
= 0; i
< DLN2_MAX_URBS
; i
++)
597 usb_kill_urb(dln2
->rx_urb
[i
]);
600 static void dln2_free(struct dln2_dev
*dln2
)
602 dln2_free_rx_urbs(dln2
);
603 usb_put_dev(dln2
->usb_dev
);
607 static int dln2_setup_rx_urbs(struct dln2_dev
*dln2
,
608 struct usb_host_interface
*hostif
)
611 const int rx_max_size
= DLN2_RX_BUF_SIZE
;
613 for (i
= 0; i
< DLN2_MAX_URBS
; i
++) {
614 dln2
->rx_buf
[i
] = kmalloc(rx_max_size
, GFP_KERNEL
);
615 if (!dln2
->rx_buf
[i
])
618 dln2
->rx_urb
[i
] = usb_alloc_urb(0, GFP_KERNEL
);
619 if (!dln2
->rx_urb
[i
])
622 usb_fill_bulk_urb(dln2
->rx_urb
[i
], dln2
->usb_dev
,
623 usb_rcvbulkpipe(dln2
->usb_dev
, dln2
->ep_in
),
624 dln2
->rx_buf
[i
], rx_max_size
, dln2_rx
, dln2
);
630 static int dln2_start_rx_urbs(struct dln2_dev
*dln2
, gfp_t gfp
)
632 struct device
*dev
= &dln2
->interface
->dev
;
636 for (i
= 0; i
< DLN2_MAX_URBS
; i
++) {
637 ret
= usb_submit_urb(dln2
->rx_urb
[i
], gfp
);
639 dev_err(dev
, "failed to submit RX URB: %d\n", ret
);
648 DLN2_ACPI_MATCH_GPIO
= 0,
649 DLN2_ACPI_MATCH_I2C
= 1,
650 DLN2_ACPI_MATCH_SPI
= 2,
651 DLN2_ACPI_MATCH_ADC
= 3,
654 static struct dln2_platform_data dln2_pdata_gpio
= {
655 .handle
= DLN2_HANDLE_GPIO
,
658 static struct mfd_cell_acpi_match dln2_acpi_match_gpio
= {
659 .adr
= DLN2_ACPI_MATCH_GPIO
,
662 /* Only one I2C port seems to be supported on current hardware */
663 static struct dln2_platform_data dln2_pdata_i2c
= {
664 .handle
= DLN2_HANDLE_I2C
,
668 static struct mfd_cell_acpi_match dln2_acpi_match_i2c
= {
669 .adr
= DLN2_ACPI_MATCH_I2C
,
672 /* Only one SPI port supported */
673 static struct dln2_platform_data dln2_pdata_spi
= {
674 .handle
= DLN2_HANDLE_SPI
,
678 static struct mfd_cell_acpi_match dln2_acpi_match_spi
= {
679 .adr
= DLN2_ACPI_MATCH_SPI
,
682 /* Only one ADC port supported */
683 static struct dln2_platform_data dln2_pdata_adc
= {
684 .handle
= DLN2_HANDLE_ADC
,
688 static struct mfd_cell_acpi_match dln2_acpi_match_adc
= {
689 .adr
= DLN2_ACPI_MATCH_ADC
,
692 static const struct mfd_cell dln2_devs
[] = {
695 .acpi_match
= &dln2_acpi_match_gpio
,
696 .platform_data
= &dln2_pdata_gpio
,
697 .pdata_size
= sizeof(struct dln2_platform_data
),
701 .acpi_match
= &dln2_acpi_match_i2c
,
702 .platform_data
= &dln2_pdata_i2c
,
703 .pdata_size
= sizeof(struct dln2_platform_data
),
707 .acpi_match
= &dln2_acpi_match_spi
,
708 .platform_data
= &dln2_pdata_spi
,
709 .pdata_size
= sizeof(struct dln2_platform_data
),
713 .acpi_match
= &dln2_acpi_match_adc
,
714 .platform_data
= &dln2_pdata_adc
,
715 .pdata_size
= sizeof(struct dln2_platform_data
),
719 static void dln2_stop(struct dln2_dev
*dln2
)
723 /* don't allow starting new transfers */
724 spin_lock(&dln2
->disconnect_lock
);
725 dln2
->disconnect
= true;
726 spin_unlock(&dln2
->disconnect_lock
);
728 /* cancel in progress transfers */
729 for (i
= 0; i
< DLN2_HANDLES
; i
++) {
730 struct dln2_mod_rx_slots
*rxs
= &dln2
->mod_rx_slots
[i
];
733 spin_lock_irqsave(&rxs
->lock
, flags
);
735 /* cancel all response waiters */
736 for (j
= 0; j
< DLN2_MAX_RX_SLOTS
; j
++) {
737 struct dln2_rx_context
*rxc
= &rxs
->slots
[j
];
740 complete(&rxc
->done
);
743 spin_unlock_irqrestore(&rxs
->lock
, flags
);
746 /* wait for transfers to end */
747 wait_event(dln2
->disconnect_wq
, !dln2
->active_transfers
);
749 dln2_stop_rx_urbs(dln2
);
752 static void dln2_disconnect(struct usb_interface
*interface
)
754 struct dln2_dev
*dln2
= usb_get_intfdata(interface
);
758 mfd_remove_devices(&interface
->dev
);
763 static int dln2_probe(struct usb_interface
*interface
,
764 const struct usb_device_id
*usb_id
)
766 struct usb_host_interface
*hostif
= interface
->cur_altsetting
;
767 struct usb_endpoint_descriptor
*epin
;
768 struct usb_endpoint_descriptor
*epout
;
769 struct device
*dev
= &interface
->dev
;
770 struct dln2_dev
*dln2
;
774 if (hostif
->desc
.bInterfaceNumber
!= 0)
777 ret
= usb_find_common_endpoints(hostif
, &epin
, &epout
, NULL
, NULL
);
781 dln2
= kzalloc(sizeof(*dln2
), GFP_KERNEL
);
785 dln2
->ep_out
= epout
->bEndpointAddress
;
786 dln2
->ep_in
= epin
->bEndpointAddress
;
787 dln2
->usb_dev
= usb_get_dev(interface_to_usbdev(interface
));
788 dln2
->interface
= interface
;
789 usb_set_intfdata(interface
, dln2
);
790 init_waitqueue_head(&dln2
->disconnect_wq
);
792 for (i
= 0; i
< DLN2_HANDLES
; i
++) {
793 init_waitqueue_head(&dln2
->mod_rx_slots
[i
].wq
);
794 spin_lock_init(&dln2
->mod_rx_slots
[i
].lock
);
795 for (j
= 0; j
< DLN2_MAX_RX_SLOTS
; j
++)
796 init_completion(&dln2
->mod_rx_slots
[i
].slots
[j
].done
);
799 spin_lock_init(&dln2
->event_cb_lock
);
800 spin_lock_init(&dln2
->disconnect_lock
);
801 INIT_LIST_HEAD(&dln2
->event_cb_list
);
803 ret
= dln2_setup_rx_urbs(dln2
, hostif
);
807 ret
= dln2_start_rx_urbs(dln2
, GFP_KERNEL
);
811 ret
= dln2_hw_init(dln2
);
813 dev_err(dev
, "failed to initialize hardware\n");
817 ret
= mfd_add_hotplug_devices(dev
, dln2_devs
, ARRAY_SIZE(dln2_devs
));
819 dev_err(dev
, "failed to add mfd devices to core\n");
826 dln2_stop_rx_urbs(dln2
);
834 static int dln2_suspend(struct usb_interface
*iface
, pm_message_t message
)
836 struct dln2_dev
*dln2
= usb_get_intfdata(iface
);
843 static int dln2_resume(struct usb_interface
*iface
)
845 struct dln2_dev
*dln2
= usb_get_intfdata(iface
);
847 dln2
->disconnect
= false;
849 return dln2_start_rx_urbs(dln2
, GFP_NOIO
);
852 static const struct usb_device_id dln2_table
[] = {
853 { USB_DEVICE(0xa257, 0x2013) },
857 MODULE_DEVICE_TABLE(usb
, dln2_table
);
859 static struct usb_driver dln2_driver
= {
862 .disconnect
= dln2_disconnect
,
863 .id_table
= dln2_table
,
864 .suspend
= dln2_suspend
,
865 .resume
= dln2_resume
,
868 module_usb_driver(dln2_driver
);
870 MODULE_AUTHOR("Octavian Purdila <octavian.purdila@intel.com>");
871 MODULE_DESCRIPTION("Core driver for the Diolan DLN2 interface adapter");
872 MODULE_LICENSE("GPL v2");