WIP FPC-III support
[linux/fpc-iii.git] / drivers / most / most_usb.c
blob2640c5b326a493fff8f9b9fe0ddaf36b9e7a6e74
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * usb.c - Hardware dependent module for USB
5 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6 */
8 #include <linux/module.h>
9 #include <linux/fs.h>
10 #include <linux/usb.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/cdev.h>
14 #include <linux/device.h>
15 #include <linux/list.h>
16 #include <linux/completion.h>
17 #include <linux/mutex.h>
18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/workqueue.h>
21 #include <linux/sysfs.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/etherdevice.h>
24 #include <linux/uaccess.h>
25 #include <linux/most.h>
27 #define USB_MTU 512
28 #define NO_ISOCHRONOUS_URB 0
29 #define AV_PACKETS_PER_XACT 2
30 #define BUF_CHAIN_SIZE 0xFFFF
31 #define MAX_NUM_ENDPOINTS 30
32 #define MAX_SUFFIX_LEN 10
33 #define MAX_STRING_LEN 80
34 #define MAX_BUF_SIZE 0xFFFF
36 #define USB_VENDOR_ID_SMSC 0x0424 /* VID: SMSC */
37 #define USB_DEV_ID_BRDG 0xC001 /* PID: USB Bridge */
38 #define USB_DEV_ID_OS81118 0xCF18 /* PID: USB OS81118 */
39 #define USB_DEV_ID_OS81119 0xCF19 /* PID: USB OS81119 */
40 #define USB_DEV_ID_OS81210 0xCF30 /* PID: USB OS81210 */
41 /* DRCI Addresses */
42 #define DRCI_REG_NI_STATE 0x0100
43 #define DRCI_REG_PACKET_BW 0x0101
44 #define DRCI_REG_NODE_ADDR 0x0102
45 #define DRCI_REG_NODE_POS 0x0103
46 #define DRCI_REG_MEP_FILTER 0x0140
47 #define DRCI_REG_HASH_TBL0 0x0141
48 #define DRCI_REG_HASH_TBL1 0x0142
49 #define DRCI_REG_HASH_TBL2 0x0143
50 #define DRCI_REG_HASH_TBL3 0x0144
51 #define DRCI_REG_HW_ADDR_HI 0x0145
52 #define DRCI_REG_HW_ADDR_MI 0x0146
53 #define DRCI_REG_HW_ADDR_LO 0x0147
54 #define DRCI_REG_BASE 0x1100
55 #define DRCI_COMMAND 0x02
56 #define DRCI_READ_REQ 0xA0
57 #define DRCI_WRITE_REQ 0xA1
59 /**
60 * struct most_dci_obj - Direct Communication Interface
61 * @kobj:position in sysfs
62 * @usb_device: pointer to the usb device
63 * @reg_addr: register address for arbitrary DCI access
65 struct most_dci_obj {
66 struct device dev;
67 struct usb_device *usb_device;
68 u16 reg_addr;
71 #define to_dci_obj(p) container_of(p, struct most_dci_obj, dev)
73 struct most_dev;
75 struct clear_hold_work {
76 struct work_struct ws;
77 struct most_dev *mdev;
78 unsigned int channel;
79 int pipe;
82 #define to_clear_hold_work(w) container_of(w, struct clear_hold_work, ws)
84 /**
85 * struct most_dev - holds all usb interface specific stuff
86 * @usb_device: pointer to usb device
87 * @iface: hardware interface
88 * @cap: channel capabilities
89 * @conf: channel configuration
90 * @dci: direct communication interface of hardware
91 * @ep_address: endpoint address table
92 * @description: device description
93 * @suffix: suffix for channel name
94 * @channel_lock: synchronize channel access
95 * @padding_active: indicates channel uses padding
96 * @is_channel_healthy: health status table of each channel
97 * @busy_urbs: list of anchored items
98 * @io_mutex: synchronize I/O with disconnect
99 * @link_stat_timer: timer for link status reports
100 * @poll_work_obj: work for polling link status
102 struct most_dev {
103 struct device dev;
104 struct usb_device *usb_device;
105 struct most_interface iface;
106 struct most_channel_capability *cap;
107 struct most_channel_config *conf;
108 struct most_dci_obj *dci;
109 u8 *ep_address;
110 char description[MAX_STRING_LEN];
111 char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
112 spinlock_t channel_lock[MAX_NUM_ENDPOINTS]; /* sync channel access */
113 bool padding_active[MAX_NUM_ENDPOINTS];
114 bool is_channel_healthy[MAX_NUM_ENDPOINTS];
115 struct clear_hold_work clear_work[MAX_NUM_ENDPOINTS];
116 struct usb_anchor *busy_urbs;
117 struct mutex io_mutex;
118 struct timer_list link_stat_timer;
119 struct work_struct poll_work_obj;
120 void (*on_netinfo)(struct most_interface *most_iface,
121 unsigned char link_state, unsigned char *addrs);
124 #define to_mdev(d) container_of(d, struct most_dev, iface)
125 #define to_mdev_from_dev(d) container_of(d, struct most_dev, dev)
126 #define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
128 static void wq_clear_halt(struct work_struct *wq_obj);
129 static void wq_netinfo(struct work_struct *wq_obj);
132 * drci_rd_reg - read a DCI register
133 * @dev: usb device
134 * @reg: register address
135 * @buf: buffer to store data
137 * This is reads data from INIC's direct register communication interface
139 static inline int drci_rd_reg(struct usb_device *dev, u16 reg, u16 *buf)
141 int retval;
142 __le16 *dma_buf;
143 u8 req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
145 dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
146 if (!dma_buf)
147 return -ENOMEM;
149 retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
150 DRCI_READ_REQ, req_type,
151 0x0000,
152 reg, dma_buf, sizeof(*dma_buf), 5 * HZ);
153 *buf = le16_to_cpu(*dma_buf);
154 kfree(dma_buf);
156 if (retval < 0)
157 return retval;
158 return 0;
162 * drci_wr_reg - write a DCI register
163 * @dev: usb device
164 * @reg: register address
165 * @data: data to write
167 * This is writes data to INIC's direct register communication interface
169 static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
171 return usb_control_msg(dev,
172 usb_sndctrlpipe(dev, 0),
173 DRCI_WRITE_REQ,
174 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
175 data,
176 reg,
177 NULL,
179 5 * HZ);
182 static inline int start_sync_ep(struct usb_device *usb_dev, u16 ep)
184 return drci_wr_reg(usb_dev, DRCI_REG_BASE + DRCI_COMMAND + ep * 16, 1);
188 * get_stream_frame_size - calculate frame size of current configuration
189 * @dev: device structure
190 * @cfg: channel configuration
192 static unsigned int get_stream_frame_size(struct device *dev,
193 struct most_channel_config *cfg)
195 unsigned int frame_size;
196 unsigned int sub_size = cfg->subbuffer_size;
198 if (!sub_size) {
199 dev_warn(dev, "Misconfig: Subbuffer size zero.\n");
200 return 0;
202 switch (cfg->data_type) {
203 case MOST_CH_ISOC:
204 frame_size = AV_PACKETS_PER_XACT * sub_size;
205 break;
206 case MOST_CH_SYNC:
207 if (cfg->packets_per_xact == 0) {
208 dev_warn(dev, "Misconfig: Packets per XACT zero\n");
209 frame_size = 0;
210 } else if (cfg->packets_per_xact == 0xFF) {
211 frame_size = (USB_MTU / sub_size) * sub_size;
212 } else {
213 frame_size = cfg->packets_per_xact * sub_size;
215 break;
216 default:
217 dev_warn(dev, "Query frame size of non-streaming channel\n");
218 frame_size = 0;
219 break;
221 return frame_size;
225 * hdm_poison_channel - mark buffers of this channel as invalid
226 * @iface: pointer to the interface
227 * @channel: channel ID
229 * This unlinks all URBs submitted to the HCD,
230 * calls the associated completion function of the core and removes
231 * them from the list.
233 * Returns 0 on success or error code otherwise.
235 static int hdm_poison_channel(struct most_interface *iface, int channel)
237 struct most_dev *mdev = to_mdev(iface);
238 unsigned long flags;
239 spinlock_t *lock; /* temp. lock */
241 if (channel < 0 || channel >= iface->num_channels) {
242 dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
243 return -ECHRNG;
246 lock = mdev->channel_lock + channel;
247 spin_lock_irqsave(lock, flags);
248 mdev->is_channel_healthy[channel] = false;
249 spin_unlock_irqrestore(lock, flags);
251 cancel_work_sync(&mdev->clear_work[channel].ws);
253 mutex_lock(&mdev->io_mutex);
254 usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
255 if (mdev->padding_active[channel])
256 mdev->padding_active[channel] = false;
258 if (mdev->conf[channel].data_type == MOST_CH_ASYNC) {
259 del_timer_sync(&mdev->link_stat_timer);
260 cancel_work_sync(&mdev->poll_work_obj);
262 mutex_unlock(&mdev->io_mutex);
263 return 0;
267 * hdm_add_padding - add padding bytes
268 * @mdev: most device
269 * @channel: channel ID
270 * @mbo: buffer object
272 * This inserts the INIC hardware specific padding bytes into a streaming
273 * channel's buffer
275 static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
277 struct most_channel_config *conf = &mdev->conf[channel];
278 unsigned int frame_size = get_stream_frame_size(&mdev->dev, conf);
279 unsigned int j, num_frames;
281 if (!frame_size)
282 return -EINVAL;
283 num_frames = mbo->buffer_length / frame_size;
285 if (num_frames < 1) {
286 dev_err(&mdev->usb_device->dev,
287 "Missed minimal transfer unit.\n");
288 return -EINVAL;
291 for (j = num_frames - 1; j > 0; j--)
292 memmove(mbo->virt_address + j * USB_MTU,
293 mbo->virt_address + j * frame_size,
294 frame_size);
295 mbo->buffer_length = num_frames * USB_MTU;
296 return 0;
300 * hdm_remove_padding - remove padding bytes
301 * @mdev: most device
302 * @channel: channel ID
303 * @mbo: buffer object
305 * This takes the INIC hardware specific padding bytes off a streaming
306 * channel's buffer.
308 static int hdm_remove_padding(struct most_dev *mdev, int channel,
309 struct mbo *mbo)
311 struct most_channel_config *const conf = &mdev->conf[channel];
312 unsigned int frame_size = get_stream_frame_size(&mdev->dev, conf);
313 unsigned int j, num_frames;
315 if (!frame_size)
316 return -EINVAL;
317 num_frames = mbo->processed_length / USB_MTU;
319 for (j = 1; j < num_frames; j++)
320 memmove(mbo->virt_address + frame_size * j,
321 mbo->virt_address + USB_MTU * j,
322 frame_size);
324 mbo->processed_length = frame_size * num_frames;
325 return 0;
329 * hdm_write_completion - completion function for submitted Tx URBs
330 * @urb: the URB that has been completed
332 * This checks the status of the completed URB. In case the URB has been
333 * unlinked before, it is immediately freed. On any other error the MBO
334 * transfer flag is set. On success it frees allocated resources and calls
335 * the completion function.
337 * Context: interrupt!
339 static void hdm_write_completion(struct urb *urb)
341 struct mbo *mbo = urb->context;
342 struct most_dev *mdev = to_mdev(mbo->ifp);
343 unsigned int channel = mbo->hdm_channel_id;
344 spinlock_t *lock = mdev->channel_lock + channel;
345 unsigned long flags;
347 spin_lock_irqsave(lock, flags);
349 mbo->processed_length = 0;
350 mbo->status = MBO_E_INVAL;
351 if (likely(mdev->is_channel_healthy[channel])) {
352 switch (urb->status) {
353 case 0:
354 case -ESHUTDOWN:
355 mbo->processed_length = urb->actual_length;
356 mbo->status = MBO_SUCCESS;
357 break;
358 case -EPIPE:
359 dev_warn(&mdev->usb_device->dev,
360 "Broken pipe on ep%02x\n",
361 mdev->ep_address[channel]);
362 mdev->is_channel_healthy[channel] = false;
363 mdev->clear_work[channel].pipe = urb->pipe;
364 schedule_work(&mdev->clear_work[channel].ws);
365 break;
366 case -ENODEV:
367 case -EPROTO:
368 mbo->status = MBO_E_CLOSE;
369 break;
373 spin_unlock_irqrestore(lock, flags);
375 if (likely(mbo->complete))
376 mbo->complete(mbo);
377 usb_free_urb(urb);
381 * hdm_read_completion - completion function for submitted Rx URBs
382 * @urb: the URB that has been completed
384 * This checks the status of the completed URB. In case the URB has been
385 * unlinked before it is immediately freed. On any other error the MBO transfer
386 * flag is set. On success it frees allocated resources, removes
387 * padding bytes -if necessary- and calls the completion function.
389 * Context: interrupt!
391 static void hdm_read_completion(struct urb *urb)
393 struct mbo *mbo = urb->context;
394 struct most_dev *mdev = to_mdev(mbo->ifp);
395 unsigned int channel = mbo->hdm_channel_id;
396 struct device *dev = &mdev->usb_device->dev;
397 spinlock_t *lock = mdev->channel_lock + channel;
398 unsigned long flags;
400 spin_lock_irqsave(lock, flags);
402 mbo->processed_length = 0;
403 mbo->status = MBO_E_INVAL;
404 if (likely(mdev->is_channel_healthy[channel])) {
405 switch (urb->status) {
406 case 0:
407 case -ESHUTDOWN:
408 mbo->processed_length = urb->actual_length;
409 mbo->status = MBO_SUCCESS;
410 if (mdev->padding_active[channel] &&
411 hdm_remove_padding(mdev, channel, mbo)) {
412 mbo->processed_length = 0;
413 mbo->status = MBO_E_INVAL;
415 break;
416 case -EPIPE:
417 dev_warn(dev, "Broken pipe on ep%02x\n",
418 mdev->ep_address[channel]);
419 mdev->is_channel_healthy[channel] = false;
420 mdev->clear_work[channel].pipe = urb->pipe;
421 schedule_work(&mdev->clear_work[channel].ws);
422 break;
423 case -ENODEV:
424 case -EPROTO:
425 mbo->status = MBO_E_CLOSE;
426 break;
427 case -EOVERFLOW:
428 dev_warn(dev, "Babble on ep%02x\n",
429 mdev->ep_address[channel]);
430 break;
434 spin_unlock_irqrestore(lock, flags);
436 if (likely(mbo->complete))
437 mbo->complete(mbo);
438 usb_free_urb(urb);
442 * hdm_enqueue - receive a buffer to be used for data transfer
443 * @iface: interface to enqueue to
444 * @channel: ID of the channel
445 * @mbo: pointer to the buffer object
447 * This allocates a new URB and fills it according to the channel
448 * that is being used for transmission of data. Before the URB is
449 * submitted it is stored in the private anchor list.
451 * Returns 0 on success. On any error the URB is freed and a error code
452 * is returned.
454 * Context: Could in _some_ cases be interrupt!
456 static int hdm_enqueue(struct most_interface *iface, int channel,
457 struct mbo *mbo)
459 struct most_dev *mdev = to_mdev(iface);
460 struct most_channel_config *conf;
461 int retval = 0;
462 struct urb *urb;
463 unsigned long length;
464 void *virt_address;
466 if (!mbo)
467 return -EINVAL;
468 if (iface->num_channels <= channel || channel < 0)
469 return -ECHRNG;
471 urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_KERNEL);
472 if (!urb)
473 return -ENOMEM;
475 conf = &mdev->conf[channel];
477 mutex_lock(&mdev->io_mutex);
478 if (!mdev->usb_device) {
479 retval = -ENODEV;
480 goto err_free_urb;
483 if ((conf->direction & MOST_CH_TX) && mdev->padding_active[channel] &&
484 hdm_add_padding(mdev, channel, mbo)) {
485 retval = -EINVAL;
486 goto err_free_urb;
489 urb->transfer_dma = mbo->bus_address;
490 virt_address = mbo->virt_address;
491 length = mbo->buffer_length;
493 if (conf->direction & MOST_CH_TX) {
494 usb_fill_bulk_urb(urb, mdev->usb_device,
495 usb_sndbulkpipe(mdev->usb_device,
496 mdev->ep_address[channel]),
497 virt_address,
498 length,
499 hdm_write_completion,
500 mbo);
501 if (conf->data_type != MOST_CH_ISOC &&
502 conf->data_type != MOST_CH_SYNC)
503 urb->transfer_flags |= URB_ZERO_PACKET;
504 } else {
505 usb_fill_bulk_urb(urb, mdev->usb_device,
506 usb_rcvbulkpipe(mdev->usb_device,
507 mdev->ep_address[channel]),
508 virt_address,
509 length + conf->extra_len,
510 hdm_read_completion,
511 mbo);
513 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
515 usb_anchor_urb(urb, &mdev->busy_urbs[channel]);
517 retval = usb_submit_urb(urb, GFP_KERNEL);
518 if (retval) {
519 dev_err(&mdev->usb_device->dev,
520 "URB submit failed with error %d.\n", retval);
521 goto err_unanchor_urb;
523 mutex_unlock(&mdev->io_mutex);
524 return 0;
526 err_unanchor_urb:
527 usb_unanchor_urb(urb);
528 err_free_urb:
529 usb_free_urb(urb);
530 mutex_unlock(&mdev->io_mutex);
531 return retval;
534 static void *hdm_dma_alloc(struct mbo *mbo, u32 size)
536 struct most_dev *mdev = to_mdev(mbo->ifp);
538 return usb_alloc_coherent(mdev->usb_device, size, GFP_KERNEL,
539 &mbo->bus_address);
542 static void hdm_dma_free(struct mbo *mbo, u32 size)
544 struct most_dev *mdev = to_mdev(mbo->ifp);
546 usb_free_coherent(mdev->usb_device, size, mbo->virt_address,
547 mbo->bus_address);
551 * hdm_configure_channel - receive channel configuration from core
552 * @iface: interface
553 * @channel: channel ID
554 * @conf: structure that holds the configuration information
556 * The attached network interface controller (NIC) supports a padding mode
557 * to avoid short packets on USB, hence increasing the performance due to a
558 * lower interrupt load. This mode is default for synchronous data and can
559 * be switched on for isochronous data. In case padding is active the
560 * driver needs to know the frame size of the payload in order to calculate
561 * the number of bytes it needs to pad when transmitting or to cut off when
562 * receiving data.
565 static int hdm_configure_channel(struct most_interface *iface, int channel,
566 struct most_channel_config *conf)
568 unsigned int num_frames;
569 unsigned int frame_size;
570 struct most_dev *mdev = to_mdev(iface);
571 struct device *dev = &mdev->usb_device->dev;
573 if (!conf) {
574 dev_err(dev, "Bad config pointer.\n");
575 return -EINVAL;
577 if (channel < 0 || channel >= iface->num_channels) {
578 dev_err(dev, "Channel ID out of range.\n");
579 return -EINVAL;
582 mdev->is_channel_healthy[channel] = true;
583 mdev->clear_work[channel].channel = channel;
584 mdev->clear_work[channel].mdev = mdev;
585 INIT_WORK(&mdev->clear_work[channel].ws, wq_clear_halt);
587 if (!conf->num_buffers || !conf->buffer_size) {
588 dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
589 return -EINVAL;
592 if (conf->data_type != MOST_CH_SYNC &&
593 !(conf->data_type == MOST_CH_ISOC &&
594 conf->packets_per_xact != 0xFF)) {
595 mdev->padding_active[channel] = false;
597 * Since the NIC's padding mode is not going to be
598 * used, we can skip the frame size calculations and
599 * move directly on to exit.
601 goto exit;
604 mdev->padding_active[channel] = true;
606 frame_size = get_stream_frame_size(&mdev->dev, conf);
607 if (frame_size == 0 || frame_size > USB_MTU) {
608 dev_warn(dev, "Misconfig: frame size wrong\n");
609 return -EINVAL;
612 num_frames = conf->buffer_size / frame_size;
614 if (conf->buffer_size % frame_size) {
615 u16 old_size = conf->buffer_size;
617 conf->buffer_size = num_frames * frame_size;
618 dev_warn(dev, "%s: fixed buffer size (%d -> %d)\n",
619 mdev->suffix[channel], old_size, conf->buffer_size);
622 /* calculate extra length to comply w/ HW padding */
623 conf->extra_len = num_frames * (USB_MTU - frame_size);
625 exit:
626 mdev->conf[channel] = *conf;
627 if (conf->data_type == MOST_CH_ASYNC) {
628 u16 ep = mdev->ep_address[channel];
630 if (start_sync_ep(mdev->usb_device, ep) < 0)
631 dev_warn(dev, "sync for ep%02x failed", ep);
633 return 0;
637 * hdm_request_netinfo - request network information
638 * @iface: pointer to interface
639 * @channel: channel ID
641 * This is used as trigger to set up the link status timer that
642 * polls for the NI state of the INIC every 2 seconds.
645 static void hdm_request_netinfo(struct most_interface *iface, int channel,
646 void (*on_netinfo)(struct most_interface *,
647 unsigned char,
648 unsigned char *))
650 struct most_dev *mdev = to_mdev(iface);
652 mdev->on_netinfo = on_netinfo;
653 if (!on_netinfo)
654 return;
656 mdev->link_stat_timer.expires = jiffies + HZ;
657 mod_timer(&mdev->link_stat_timer, mdev->link_stat_timer.expires);
661 * link_stat_timer_handler - schedule work obtaining mac address and link status
662 * @data: pointer to USB device instance
664 * The handler runs in interrupt context. That's why we need to defer the
665 * tasks to a work queue.
667 static void link_stat_timer_handler(struct timer_list *t)
669 struct most_dev *mdev = from_timer(mdev, t, link_stat_timer);
671 schedule_work(&mdev->poll_work_obj);
672 mdev->link_stat_timer.expires = jiffies + (2 * HZ);
673 add_timer(&mdev->link_stat_timer);
677 * wq_netinfo - work queue function to deliver latest networking information
678 * @wq_obj: object that holds data for our deferred work to do
680 * This retrieves the network interface status of the USB INIC
682 static void wq_netinfo(struct work_struct *wq_obj)
684 struct most_dev *mdev = to_mdev_from_work(wq_obj);
685 struct usb_device *usb_device = mdev->usb_device;
686 struct device *dev = &usb_device->dev;
687 u16 hi, mi, lo, link;
688 u8 hw_addr[6];
690 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi)) {
691 dev_err(dev, "Vendor request 'hw_addr_hi' failed\n");
692 return;
695 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi)) {
696 dev_err(dev, "Vendor request 'hw_addr_mid' failed\n");
697 return;
700 if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo)) {
701 dev_err(dev, "Vendor request 'hw_addr_low' failed\n");
702 return;
705 if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link)) {
706 dev_err(dev, "Vendor request 'link status' failed\n");
707 return;
710 hw_addr[0] = hi >> 8;
711 hw_addr[1] = hi;
712 hw_addr[2] = mi >> 8;
713 hw_addr[3] = mi;
714 hw_addr[4] = lo >> 8;
715 hw_addr[5] = lo;
717 if (mdev->on_netinfo)
718 mdev->on_netinfo(&mdev->iface, link, hw_addr);
722 * wq_clear_halt - work queue function
723 * @wq_obj: work_struct object to execute
725 * This sends a clear_halt to the given USB pipe.
727 static void wq_clear_halt(struct work_struct *wq_obj)
729 struct clear_hold_work *clear_work = to_clear_hold_work(wq_obj);
730 struct most_dev *mdev = clear_work->mdev;
731 unsigned int channel = clear_work->channel;
732 int pipe = clear_work->pipe;
733 int snd_pipe;
734 int peer;
736 mutex_lock(&mdev->io_mutex);
737 most_stop_enqueue(&mdev->iface, channel);
738 usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
739 if (usb_clear_halt(mdev->usb_device, pipe))
740 dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
742 /* If the functional Stall condition has been set on an
743 * asynchronous rx channel, we need to clear the tx channel
744 * too, since the hardware runs its clean-up sequence on both
745 * channels, as they are physically one on the network.
747 * The USB interface that exposes the asynchronous channels
748 * contains always two endpoints, and two only.
750 if (mdev->conf[channel].data_type == MOST_CH_ASYNC &&
751 mdev->conf[channel].direction == MOST_CH_RX) {
752 if (channel == 0)
753 peer = 1;
754 else
755 peer = 0;
756 snd_pipe = usb_sndbulkpipe(mdev->usb_device,
757 mdev->ep_address[peer]);
758 usb_clear_halt(mdev->usb_device, snd_pipe);
760 mdev->is_channel_healthy[channel] = true;
761 most_resume_enqueue(&mdev->iface, channel);
762 mutex_unlock(&mdev->io_mutex);
766 * hdm_usb_fops - file operation table for USB driver
768 static const struct file_operations hdm_usb_fops = {
769 .owner = THIS_MODULE,
773 * usb_device_id - ID table for HCD device probing
775 static const struct usb_device_id usbid[] = {
776 { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_BRDG), },
777 { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81118), },
778 { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81119), },
779 { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81210), },
780 { } /* Terminating entry */
783 struct regs {
784 const char *name;
785 u16 reg;
788 static const struct regs ro_regs[] = {
789 { "ni_state", DRCI_REG_NI_STATE },
790 { "packet_bandwidth", DRCI_REG_PACKET_BW },
791 { "node_address", DRCI_REG_NODE_ADDR },
792 { "node_position", DRCI_REG_NODE_POS },
795 static const struct regs rw_regs[] = {
796 { "mep_filter", DRCI_REG_MEP_FILTER },
797 { "mep_hash0", DRCI_REG_HASH_TBL0 },
798 { "mep_hash1", DRCI_REG_HASH_TBL1 },
799 { "mep_hash2", DRCI_REG_HASH_TBL2 },
800 { "mep_hash3", DRCI_REG_HASH_TBL3 },
801 { "mep_eui48_hi", DRCI_REG_HW_ADDR_HI },
802 { "mep_eui48_mi", DRCI_REG_HW_ADDR_MI },
803 { "mep_eui48_lo", DRCI_REG_HW_ADDR_LO },
806 static int get_stat_reg_addr(const struct regs *regs, int size,
807 const char *name, u16 *reg_addr)
809 int i;
811 for (i = 0; i < size; i++) {
812 if (sysfs_streq(name, regs[i].name)) {
813 *reg_addr = regs[i].reg;
814 return 0;
817 return -EINVAL;
820 #define get_static_reg_addr(regs, name, reg_addr) \
821 get_stat_reg_addr(regs, ARRAY_SIZE(regs), name, reg_addr)
823 static ssize_t value_show(struct device *dev, struct device_attribute *attr,
824 char *buf)
826 const char *name = attr->attr.name;
827 struct most_dci_obj *dci_obj = to_dci_obj(dev);
828 u16 val;
829 u16 reg_addr;
830 int err;
832 if (sysfs_streq(name, "arb_address"))
833 return snprintf(buf, PAGE_SIZE, "%04x\n", dci_obj->reg_addr);
835 if (sysfs_streq(name, "arb_value"))
836 reg_addr = dci_obj->reg_addr;
837 else if (get_static_reg_addr(ro_regs, name, &reg_addr) &&
838 get_static_reg_addr(rw_regs, name, &reg_addr))
839 return -EINVAL;
841 err = drci_rd_reg(dci_obj->usb_device, reg_addr, &val);
842 if (err < 0)
843 return err;
845 return snprintf(buf, PAGE_SIZE, "%04x\n", val);
848 static ssize_t value_store(struct device *dev, struct device_attribute *attr,
849 const char *buf, size_t count)
851 u16 val;
852 u16 reg_addr;
853 const char *name = attr->attr.name;
854 struct most_dci_obj *dci_obj = to_dci_obj(dev);
855 struct usb_device *usb_dev = dci_obj->usb_device;
856 int err;
858 err = kstrtou16(buf, 16, &val);
859 if (err)
860 return err;
862 if (sysfs_streq(name, "arb_address")) {
863 dci_obj->reg_addr = val;
864 return count;
867 if (sysfs_streq(name, "arb_value"))
868 err = drci_wr_reg(usb_dev, dci_obj->reg_addr, val);
869 else if (sysfs_streq(name, "sync_ep"))
870 err = start_sync_ep(usb_dev, val);
871 else if (!get_static_reg_addr(rw_regs, name, &reg_addr))
872 err = drci_wr_reg(usb_dev, reg_addr, val);
873 else
874 return -EINVAL;
876 if (err < 0)
877 return err;
879 return count;
882 static DEVICE_ATTR(ni_state, 0444, value_show, NULL);
883 static DEVICE_ATTR(packet_bandwidth, 0444, value_show, NULL);
884 static DEVICE_ATTR(node_address, 0444, value_show, NULL);
885 static DEVICE_ATTR(node_position, 0444, value_show, NULL);
886 static DEVICE_ATTR(sync_ep, 0200, NULL, value_store);
887 static DEVICE_ATTR(mep_filter, 0644, value_show, value_store);
888 static DEVICE_ATTR(mep_hash0, 0644, value_show, value_store);
889 static DEVICE_ATTR(mep_hash1, 0644, value_show, value_store);
890 static DEVICE_ATTR(mep_hash2, 0644, value_show, value_store);
891 static DEVICE_ATTR(mep_hash3, 0644, value_show, value_store);
892 static DEVICE_ATTR(mep_eui48_hi, 0644, value_show, value_store);
893 static DEVICE_ATTR(mep_eui48_mi, 0644, value_show, value_store);
894 static DEVICE_ATTR(mep_eui48_lo, 0644, value_show, value_store);
895 static DEVICE_ATTR(arb_address, 0644, value_show, value_store);
896 static DEVICE_ATTR(arb_value, 0644, value_show, value_store);
898 static struct attribute *dci_attrs[] = {
899 &dev_attr_ni_state.attr,
900 &dev_attr_packet_bandwidth.attr,
901 &dev_attr_node_address.attr,
902 &dev_attr_node_position.attr,
903 &dev_attr_sync_ep.attr,
904 &dev_attr_mep_filter.attr,
905 &dev_attr_mep_hash0.attr,
906 &dev_attr_mep_hash1.attr,
907 &dev_attr_mep_hash2.attr,
908 &dev_attr_mep_hash3.attr,
909 &dev_attr_mep_eui48_hi.attr,
910 &dev_attr_mep_eui48_mi.attr,
911 &dev_attr_mep_eui48_lo.attr,
912 &dev_attr_arb_address.attr,
913 &dev_attr_arb_value.attr,
914 NULL,
917 ATTRIBUTE_GROUPS(dci);
919 static void release_dci(struct device *dev)
921 struct most_dci_obj *dci = to_dci_obj(dev);
923 put_device(dev->parent);
924 kfree(dci);
927 static void release_mdev(struct device *dev)
929 struct most_dev *mdev = to_mdev_from_dev(dev);
931 kfree(mdev);
934 * hdm_probe - probe function of USB device driver
935 * @interface: Interface of the attached USB device
936 * @id: Pointer to the USB ID table.
938 * This allocates and initializes the device instance, adds the new
939 * entry to the internal list, scans the USB descriptors and registers
940 * the interface with the core.
941 * Additionally, the DCI objects are created and the hardware is sync'd.
943 * Return 0 on success. In case of an error a negative number is returned.
945 static int
946 hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
948 struct usb_host_interface *usb_iface_desc = interface->cur_altsetting;
949 struct usb_device *usb_dev = interface_to_usbdev(interface);
950 struct device *dev = &usb_dev->dev;
951 struct most_dev *mdev;
952 unsigned int i;
953 unsigned int num_endpoints;
954 struct most_channel_capability *tmp_cap;
955 struct usb_endpoint_descriptor *ep_desc;
956 int ret = -ENOMEM;
958 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
959 if (!mdev)
960 return -ENOMEM;
962 usb_set_intfdata(interface, mdev);
963 num_endpoints = usb_iface_desc->desc.bNumEndpoints;
964 if (num_endpoints > MAX_NUM_ENDPOINTS) {
965 kfree(mdev);
966 return -EINVAL;
968 mutex_init(&mdev->io_mutex);
969 INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
970 timer_setup(&mdev->link_stat_timer, link_stat_timer_handler, 0);
972 mdev->usb_device = usb_dev;
973 mdev->link_stat_timer.expires = jiffies + (2 * HZ);
975 mdev->iface.mod = hdm_usb_fops.owner;
976 mdev->iface.dev = &mdev->dev;
977 mdev->iface.driver_dev = &interface->dev;
978 mdev->iface.interface = ITYPE_USB;
979 mdev->iface.configure = hdm_configure_channel;
980 mdev->iface.request_netinfo = hdm_request_netinfo;
981 mdev->iface.enqueue = hdm_enqueue;
982 mdev->iface.poison_channel = hdm_poison_channel;
983 mdev->iface.dma_alloc = hdm_dma_alloc;
984 mdev->iface.dma_free = hdm_dma_free;
985 mdev->iface.description = mdev->description;
986 mdev->iface.num_channels = num_endpoints;
988 snprintf(mdev->description, sizeof(mdev->description),
989 "%d-%s:%d.%d",
990 usb_dev->bus->busnum,
991 usb_dev->devpath,
992 usb_dev->config->desc.bConfigurationValue,
993 usb_iface_desc->desc.bInterfaceNumber);
995 mdev->dev.init_name = mdev->description;
996 mdev->dev.parent = &interface->dev;
997 mdev->dev.release = release_mdev;
998 mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
999 if (!mdev->conf)
1000 goto err_free_mdev;
1002 mdev->cap = kcalloc(num_endpoints, sizeof(*mdev->cap), GFP_KERNEL);
1003 if (!mdev->cap)
1004 goto err_free_conf;
1006 mdev->iface.channel_vector = mdev->cap;
1007 mdev->ep_address =
1008 kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
1009 if (!mdev->ep_address)
1010 goto err_free_cap;
1012 mdev->busy_urbs =
1013 kcalloc(num_endpoints, sizeof(*mdev->busy_urbs), GFP_KERNEL);
1014 if (!mdev->busy_urbs)
1015 goto err_free_ep_address;
1017 tmp_cap = mdev->cap;
1018 for (i = 0; i < num_endpoints; i++) {
1019 ep_desc = &usb_iface_desc->endpoint[i].desc;
1020 mdev->ep_address[i] = ep_desc->bEndpointAddress;
1021 mdev->padding_active[i] = false;
1022 mdev->is_channel_healthy[i] = true;
1024 snprintf(&mdev->suffix[i][0], MAX_SUFFIX_LEN, "ep%02x",
1025 mdev->ep_address[i]);
1027 tmp_cap->name_suffix = &mdev->suffix[i][0];
1028 tmp_cap->buffer_size_packet = MAX_BUF_SIZE;
1029 tmp_cap->buffer_size_streaming = MAX_BUF_SIZE;
1030 tmp_cap->num_buffers_packet = BUF_CHAIN_SIZE;
1031 tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
1032 tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
1033 MOST_CH_ISOC | MOST_CH_SYNC;
1034 if (usb_endpoint_dir_in(ep_desc))
1035 tmp_cap->direction = MOST_CH_RX;
1036 else
1037 tmp_cap->direction = MOST_CH_TX;
1038 tmp_cap++;
1039 init_usb_anchor(&mdev->busy_urbs[i]);
1040 spin_lock_init(&mdev->channel_lock[i]);
1042 dev_dbg(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
1043 le16_to_cpu(usb_dev->descriptor.idVendor),
1044 le16_to_cpu(usb_dev->descriptor.idProduct),
1045 usb_dev->bus->busnum,
1046 usb_dev->devnum);
1048 dev_dbg(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
1049 usb_dev->bus->busnum,
1050 usb_dev->devpath,
1051 usb_dev->config->desc.bConfigurationValue,
1052 usb_iface_desc->desc.bInterfaceNumber);
1054 ret = most_register_interface(&mdev->iface);
1055 if (ret)
1056 goto err_free_busy_urbs;
1058 mutex_lock(&mdev->io_mutex);
1059 if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81118 ||
1060 le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81119 ||
1061 le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81210) {
1062 mdev->dci = kzalloc(sizeof(*mdev->dci), GFP_KERNEL);
1063 if (!mdev->dci) {
1064 mutex_unlock(&mdev->io_mutex);
1065 most_deregister_interface(&mdev->iface);
1066 ret = -ENOMEM;
1067 goto err_free_busy_urbs;
1070 mdev->dci->dev.init_name = "dci";
1071 mdev->dci->dev.parent = get_device(mdev->iface.dev);
1072 mdev->dci->dev.groups = dci_groups;
1073 mdev->dci->dev.release = release_dci;
1074 if (device_register(&mdev->dci->dev)) {
1075 mutex_unlock(&mdev->io_mutex);
1076 most_deregister_interface(&mdev->iface);
1077 ret = -ENOMEM;
1078 goto err_free_dci;
1080 mdev->dci->usb_device = mdev->usb_device;
1082 mutex_unlock(&mdev->io_mutex);
1083 return 0;
1084 err_free_dci:
1085 put_device(&mdev->dci->dev);
1086 err_free_busy_urbs:
1087 kfree(mdev->busy_urbs);
1088 err_free_ep_address:
1089 kfree(mdev->ep_address);
1090 err_free_cap:
1091 kfree(mdev->cap);
1092 err_free_conf:
1093 kfree(mdev->conf);
1094 err_free_mdev:
1095 put_device(&mdev->dev);
1096 return ret;
1100 * hdm_disconnect - disconnect function of USB device driver
1101 * @interface: Interface of the attached USB device
1103 * This deregisters the interface with the core, removes the kernel timer
1104 * and frees resources.
1106 * Context: hub kernel thread
1108 static void hdm_disconnect(struct usb_interface *interface)
1110 struct most_dev *mdev = usb_get_intfdata(interface);
1112 mutex_lock(&mdev->io_mutex);
1113 usb_set_intfdata(interface, NULL);
1114 mdev->usb_device = NULL;
1115 mutex_unlock(&mdev->io_mutex);
1117 del_timer_sync(&mdev->link_stat_timer);
1118 cancel_work_sync(&mdev->poll_work_obj);
1120 if (mdev->dci)
1121 device_unregister(&mdev->dci->dev);
1122 most_deregister_interface(&mdev->iface);
1124 kfree(mdev->busy_urbs);
1125 kfree(mdev->cap);
1126 kfree(mdev->conf);
1127 kfree(mdev->ep_address);
1128 put_device(&mdev->dci->dev);
1129 put_device(&mdev->dev);
1132 static int hdm_suspend(struct usb_interface *interface, pm_message_t message)
1134 struct most_dev *mdev = usb_get_intfdata(interface);
1135 int i;
1137 mutex_lock(&mdev->io_mutex);
1138 for (i = 0; i < mdev->iface.num_channels; i++) {
1139 most_stop_enqueue(&mdev->iface, i);
1140 usb_kill_anchored_urbs(&mdev->busy_urbs[i]);
1142 mutex_unlock(&mdev->io_mutex);
1143 return 0;
1146 static int hdm_resume(struct usb_interface *interface)
1148 struct most_dev *mdev = usb_get_intfdata(interface);
1149 int i;
1151 mutex_lock(&mdev->io_mutex);
1152 for (i = 0; i < mdev->iface.num_channels; i++)
1153 most_resume_enqueue(&mdev->iface, i);
1154 mutex_unlock(&mdev->io_mutex);
1155 return 0;
1158 static struct usb_driver hdm_usb = {
1159 .name = "hdm_usb",
1160 .id_table = usbid,
1161 .probe = hdm_probe,
1162 .disconnect = hdm_disconnect,
1163 .resume = hdm_resume,
1164 .suspend = hdm_suspend,
1167 module_usb_driver(hdm_usb);
1168 MODULE_LICENSE("GPL");
1169 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1170 MODULE_DESCRIPTION("HDM_4_USB");