PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / usb / gadget / bcm63xx_udc.c
blob888fbb43b338ecebcaf7cf8cb267fb2de35cc240
1 /*
2 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
4 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
5 * Copyright (C) 2012 Broadcom Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <linux/bitops.h>
14 #include <linux/bug.h>
15 #include <linux/clk.h>
16 #include <linux/compiler.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kconfig.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/platform_device.h>
30 #include <linux/sched.h>
31 #include <linux/seq_file.h>
32 #include <linux/slab.h>
33 #include <linux/timer.h>
34 #include <linux/usb/ch9.h>
35 #include <linux/usb/gadget.h>
36 #include <linux/workqueue.h>
38 #include <bcm63xx_cpu.h>
39 #include <bcm63xx_iudma.h>
40 #include <bcm63xx_dev_usb_usbd.h>
41 #include <bcm63xx_io.h>
42 #include <bcm63xx_regs.h>
44 #define DRV_MODULE_NAME "bcm63xx_udc"
46 static const char bcm63xx_ep0name[] = "ep0";
47 static const char *const bcm63xx_ep_name[] = {
48 bcm63xx_ep0name,
49 "ep1in-bulk", "ep2out-bulk", "ep3in-int", "ep4out-int",
52 static bool use_fullspeed;
53 module_param(use_fullspeed, bool, S_IRUGO);
54 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
57 * RX IRQ coalescing options:
59 * false (default) - one IRQ per DATAx packet. Slow but reliable. The
60 * driver is able to pass the "testusb" suite and recover from conditions like:
62 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
63 * 2) Host sends 512 bytes of data
64 * 3) Host decides to reconfigure the device and sends SET_INTERFACE
65 * 4) Device shuts down the endpoint and cancels the RX transaction
67 * true - one IRQ per transfer, for transfers <= 2048B. Generates
68 * considerably fewer IRQs, but error recovery is less robust. Does not
69 * reliably pass "testusb".
71 * TX always uses coalescing, because we can cancel partially complete TX
72 * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
73 * this on RX.
75 static bool irq_coalesce;
76 module_param(irq_coalesce, bool, S_IRUGO);
77 MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
79 #define BCM63XX_NUM_EP 5
80 #define BCM63XX_NUM_IUDMA 6
81 #define BCM63XX_NUM_FIFO_PAIRS 3
83 #define IUDMA_RESET_TIMEOUT_US 10000
85 #define IUDMA_EP0_RXCHAN 0
86 #define IUDMA_EP0_TXCHAN 1
88 #define IUDMA_MAX_FRAGMENT 2048
89 #define BCM63XX_MAX_CTRL_PKT 64
91 #define BCMEP_CTRL 0x00
92 #define BCMEP_ISOC 0x01
93 #define BCMEP_BULK 0x02
94 #define BCMEP_INTR 0x03
96 #define BCMEP_OUT 0x00
97 #define BCMEP_IN 0x01
99 #define BCM63XX_SPD_FULL 1
100 #define BCM63XX_SPD_HIGH 0
102 #define IUDMA_DMAC_OFFSET 0x200
103 #define IUDMA_DMAS_OFFSET 0x400
105 enum bcm63xx_ep0_state {
106 EP0_REQUEUE,
107 EP0_IDLE,
108 EP0_IN_DATA_PHASE_SETUP,
109 EP0_IN_DATA_PHASE_COMPLETE,
110 EP0_OUT_DATA_PHASE_SETUP,
111 EP0_OUT_DATA_PHASE_COMPLETE,
112 EP0_OUT_STATUS_PHASE,
113 EP0_IN_FAKE_STATUS_PHASE,
114 EP0_SHUTDOWN,
117 static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
118 "REQUEUE",
119 "IDLE",
120 "IN_DATA_PHASE_SETUP",
121 "IN_DATA_PHASE_COMPLETE",
122 "OUT_DATA_PHASE_SETUP",
123 "OUT_DATA_PHASE_COMPLETE",
124 "OUT_STATUS_PHASE",
125 "IN_FAKE_STATUS_PHASE",
126 "SHUTDOWN",
130 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
131 * @ep_num: USB endpoint number.
132 * @n_bds: Number of buffer descriptors in the ring.
133 * @ep_type: Endpoint type (control, bulk, interrupt).
134 * @dir: Direction (in, out).
135 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
136 * @max_pkt_hs: Maximum packet size in high speed mode.
137 * @max_pkt_fs: Maximum packet size in full speed mode.
139 struct iudma_ch_cfg {
140 int ep_num;
141 int n_bds;
142 int ep_type;
143 int dir;
144 int n_fifo_slots;
145 int max_pkt_hs;
146 int max_pkt_fs;
149 static const struct iudma_ch_cfg iudma_defaults[] = {
151 /* This controller was designed to support a CDC/RNDIS application.
152 It may be possible to reconfigure some of the endpoints, but
153 the hardware limitations (FIFO sizing and number of DMA channels)
154 may significantly impact flexibility and/or stability. Change
155 these values at your own risk.
157 ep_num ep_type n_fifo_slots max_pkt_fs
158 idx | n_bds | dir | max_pkt_hs |
159 | | | | | | | | */
160 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
161 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
162 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
163 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
164 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
165 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
168 struct bcm63xx_udc;
171 * struct iudma_ch - Represents the current state of a single IUDMA channel.
172 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
173 * @ep_num: USB endpoint number. -1 for ep0 RX.
174 * @enabled: Whether bcm63xx_ep_enable() has been called.
175 * @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
176 * @is_tx: true for TX, false for RX.
177 * @bep: Pointer to the associated endpoint. NULL for ep0 RX.
178 * @udc: Reference to the device controller.
179 * @read_bd: Next buffer descriptor to reap from the hardware.
180 * @write_bd: Next BD available for a new packet.
181 * @end_bd: Points to the final BD in the ring.
182 * @n_bds_used: Number of BD entries currently occupied.
183 * @bd_ring: Base pointer to the BD ring.
184 * @bd_ring_dma: Physical (DMA) address of bd_ring.
185 * @n_bds: Total number of BDs in the ring.
187 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
188 * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
189 * only.
191 * Each bulk/intr endpoint has a single IUDMA channel and a single
192 * struct usb_ep.
194 struct iudma_ch {
195 unsigned int ch_idx;
196 int ep_num;
197 bool enabled;
198 int max_pkt;
199 bool is_tx;
200 struct bcm63xx_ep *bep;
201 struct bcm63xx_udc *udc;
203 struct bcm_enet_desc *read_bd;
204 struct bcm_enet_desc *write_bd;
205 struct bcm_enet_desc *end_bd;
206 int n_bds_used;
208 struct bcm_enet_desc *bd_ring;
209 dma_addr_t bd_ring_dma;
210 unsigned int n_bds;
214 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
215 * @ep_num: USB endpoint number.
216 * @iudma: Pointer to IUDMA channel state.
217 * @ep: USB gadget layer representation of the EP.
218 * @udc: Reference to the device controller.
219 * @queue: Linked list of outstanding requests for this EP.
220 * @halted: 1 if the EP is stalled; 0 otherwise.
222 struct bcm63xx_ep {
223 unsigned int ep_num;
224 struct iudma_ch *iudma;
225 struct usb_ep ep;
226 struct bcm63xx_udc *udc;
227 struct list_head queue;
228 unsigned halted:1;
232 * struct bcm63xx_req - Internal (driver) state of a single request.
233 * @queue: Links back to the EP's request list.
234 * @req: USB gadget layer representation of the request.
235 * @offset: Current byte offset into the data buffer (next byte to queue).
236 * @bd_bytes: Number of data bytes in outstanding BD entries.
237 * @iudma: IUDMA channel used for the request.
239 struct bcm63xx_req {
240 struct list_head queue; /* ep's requests */
241 struct usb_request req;
242 unsigned int offset;
243 unsigned int bd_bytes;
244 struct iudma_ch *iudma;
248 * struct bcm63xx_udc - Driver/hardware private context.
249 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
250 * @dev: Generic Linux device structure.
251 * @pd: Platform data (board/port info).
252 * @usbd_clk: Clock descriptor for the USB device block.
253 * @usbh_clk: Clock descriptor for the USB host block.
254 * @gadget: USB slave device.
255 * @driver: Driver for USB slave devices.
256 * @usbd_regs: Base address of the USBD/USB20D block.
257 * @iudma_regs: Base address of the USBD's associated IUDMA block.
258 * @bep: Array of endpoints, including ep0.
259 * @iudma: Array of all IUDMA channels used by this controller.
260 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
261 * @iface: USB interface number, from SET_INTERFACE wIndex.
262 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
263 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
264 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
265 * @ep0state: Current state of the ep0 state machine.
266 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
267 * @wedgemap: Bitmap of wedged endpoints.
268 * @ep0_req_reset: USB reset is pending.
269 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
270 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
271 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
272 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
273 * @ep0_reply: Pending reply from gadget driver.
274 * @ep0_request: Outstanding ep0 request.
275 * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
276 * @debugfs_usbd: debugfs file "usbd" for controller state.
277 * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
279 struct bcm63xx_udc {
280 spinlock_t lock;
282 struct device *dev;
283 struct bcm63xx_usbd_platform_data *pd;
284 struct clk *usbd_clk;
285 struct clk *usbh_clk;
287 struct usb_gadget gadget;
288 struct usb_gadget_driver *driver;
290 void __iomem *usbd_regs;
291 void __iomem *iudma_regs;
293 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
294 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
296 int cfg;
297 int iface;
298 int alt_iface;
300 struct bcm63xx_req ep0_ctrl_req;
301 u8 *ep0_ctrl_buf;
303 int ep0state;
304 struct work_struct ep0_wq;
306 unsigned long wedgemap;
308 unsigned ep0_req_reset:1;
309 unsigned ep0_req_set_cfg:1;
310 unsigned ep0_req_set_iface:1;
311 unsigned ep0_req_shutdown:1;
313 unsigned ep0_req_completed:1;
314 struct usb_request *ep0_reply;
315 struct usb_request *ep0_request;
317 struct dentry *debugfs_root;
318 struct dentry *debugfs_usbd;
319 struct dentry *debugfs_iudma;
322 static const struct usb_ep_ops bcm63xx_udc_ep_ops;
324 /***********************************************************************
325 * Convenience functions
326 ***********************************************************************/
328 static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
330 return container_of(g, struct bcm63xx_udc, gadget);
333 static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
335 return container_of(ep, struct bcm63xx_ep, ep);
338 static inline struct bcm63xx_req *our_req(struct usb_request *req)
340 return container_of(req, struct bcm63xx_req, req);
343 static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
345 return bcm_readl(udc->usbd_regs + off);
348 static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
350 bcm_writel(val, udc->usbd_regs + off);
353 static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
355 return bcm_readl(udc->iudma_regs + off);
358 static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
360 bcm_writel(val, udc->iudma_regs + off);
363 static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off)
365 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
368 static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
370 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
373 static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off)
375 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
378 static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
380 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
383 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
385 if (is_enabled) {
386 clk_enable(udc->usbh_clk);
387 clk_enable(udc->usbd_clk);
388 udelay(10);
389 } else {
390 clk_disable(udc->usbd_clk);
391 clk_disable(udc->usbh_clk);
395 /***********************************************************************
396 * Low-level IUDMA / FIFO operations
397 ***********************************************************************/
400 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
401 * @udc: Reference to the device controller.
402 * @idx: Desired init_sel value.
404 * The "init_sel" signal is used as a selection index for both endpoints
405 * and IUDMA channels. Since these do not map 1:1, the use of this signal
406 * depends on the context.
408 static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
410 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
412 val &= ~USBD_CONTROL_INIT_SEL_MASK;
413 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
414 usbd_writel(udc, val, USBD_CONTROL_REG);
418 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
419 * @udc: Reference to the device controller.
420 * @bep: Endpoint on which to operate.
421 * @is_stalled: true to enable stall, false to disable.
423 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
424 * halt/stall conditions.
426 static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
427 bool is_stalled)
429 u32 val;
431 val = USBD_STALL_UPDATE_MASK |
432 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
433 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
434 usbd_writel(udc, val, USBD_STALL_REG);
438 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
439 * @udc: Reference to the device controller.
441 * These parameters depend on the USB link speed. Settings are
442 * per-IUDMA-channel-pair.
444 static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
446 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
447 u32 i, val, rx_fifo_slot, tx_fifo_slot;
449 /* set up FIFO boundaries and packet sizes; this is done in pairs */
450 rx_fifo_slot = tx_fifo_slot = 0;
451 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
452 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
453 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
455 bcm63xx_ep_dma_select(udc, i >> 1);
457 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
458 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
459 USBD_RXFIFO_CONFIG_END_SHIFT);
460 rx_fifo_slot += rx_cfg->n_fifo_slots;
461 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
462 usbd_writel(udc,
463 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
464 USBD_RXFIFO_EPSIZE_REG);
466 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
467 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
468 USBD_TXFIFO_CONFIG_END_SHIFT);
469 tx_fifo_slot += tx_cfg->n_fifo_slots;
470 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
471 usbd_writel(udc,
472 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
473 USBD_TXFIFO_EPSIZE_REG);
475 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
480 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
481 * @udc: Reference to the device controller.
482 * @ep_num: Endpoint number.
484 static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
486 u32 val;
488 bcm63xx_ep_dma_select(udc, ep_num);
490 val = usbd_readl(udc, USBD_CONTROL_REG);
491 val |= USBD_CONTROL_FIFO_RESET_MASK;
492 usbd_writel(udc, val, USBD_CONTROL_REG);
493 usbd_readl(udc, USBD_CONTROL_REG);
497 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
498 * @udc: Reference to the device controller.
500 static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
502 int i;
504 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
505 bcm63xx_fifo_reset_ep(udc, i);
509 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
510 * @udc: Reference to the device controller.
512 static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
514 u32 i, val;
516 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
517 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
519 if (cfg->ep_num < 0)
520 continue;
522 bcm63xx_ep_dma_select(udc, cfg->ep_num);
523 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
524 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
525 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
530 * bcm63xx_ep_setup - Configure per-endpoint settings.
531 * @udc: Reference to the device controller.
533 * This needs to be rerun if the speed/cfg/intf/altintf changes.
535 static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
537 u32 val, i;
539 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
541 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
542 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
543 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
544 cfg->max_pkt_hs : cfg->max_pkt_fs;
545 int idx = cfg->ep_num;
547 udc->iudma[i].max_pkt = max_pkt;
549 if (idx < 0)
550 continue;
551 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
553 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
554 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
555 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
556 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
557 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
558 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
559 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
560 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
565 * iudma_write - Queue a single IUDMA transaction.
566 * @udc: Reference to the device controller.
567 * @iudma: IUDMA channel to use.
568 * @breq: Request containing the transaction data.
570 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
571 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
572 * So iudma_write() may be called several times to fulfill a single
573 * usb_request.
575 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
577 static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
578 struct bcm63xx_req *breq)
580 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
581 unsigned int bytes_left = breq->req.length - breq->offset;
582 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
583 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
585 iudma->n_bds_used = 0;
586 breq->bd_bytes = 0;
587 breq->iudma = iudma;
589 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
590 extra_zero_pkt = 1;
592 do {
593 struct bcm_enet_desc *d = iudma->write_bd;
594 u32 dmaflags = 0;
595 unsigned int n_bytes;
597 if (d == iudma->end_bd) {
598 dmaflags |= DMADESC_WRAP_MASK;
599 iudma->write_bd = iudma->bd_ring;
600 } else {
601 iudma->write_bd++;
603 iudma->n_bds_used++;
605 n_bytes = min_t(int, bytes_left, max_bd_bytes);
606 if (n_bytes)
607 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
608 else
609 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
610 DMADESC_USB_ZERO_MASK;
612 dmaflags |= DMADESC_OWNER_MASK;
613 if (first_bd) {
614 dmaflags |= DMADESC_SOP_MASK;
615 first_bd = 0;
619 * extra_zero_pkt forces one more iteration through the loop
620 * after all data is queued up, to send the zero packet
622 if (extra_zero_pkt && !bytes_left)
623 extra_zero_pkt = 0;
625 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
626 (n_bytes == bytes_left && !extra_zero_pkt)) {
627 last_bd = 1;
628 dmaflags |= DMADESC_EOP_MASK;
631 d->address = breq->req.dma + breq->offset;
632 mb();
633 d->len_stat = dmaflags;
635 breq->offset += n_bytes;
636 breq->bd_bytes += n_bytes;
637 bytes_left -= n_bytes;
638 } while (!last_bd);
640 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
641 ENETDMAC_CHANCFG_REG(iudma->ch_idx));
645 * iudma_read - Check for IUDMA buffer completion.
646 * @udc: Reference to the device controller.
647 * @iudma: IUDMA channel to use.
649 * This checks to see if ALL of the outstanding BDs on the DMA channel
650 * have been filled. If so, it returns the actual transfer length;
651 * otherwise it returns -EBUSY.
653 static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
655 int i, actual_len = 0;
656 struct bcm_enet_desc *d = iudma->read_bd;
658 if (!iudma->n_bds_used)
659 return -EINVAL;
661 for (i = 0; i < iudma->n_bds_used; i++) {
662 u32 dmaflags;
664 dmaflags = d->len_stat;
666 if (dmaflags & DMADESC_OWNER_MASK)
667 return -EBUSY;
669 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
670 DMADESC_LENGTH_SHIFT;
671 if (d == iudma->end_bd)
672 d = iudma->bd_ring;
673 else
674 d++;
677 iudma->read_bd = d;
678 iudma->n_bds_used = 0;
679 return actual_len;
683 * iudma_reset_channel - Stop DMA on a single channel.
684 * @udc: Reference to the device controller.
685 * @iudma: IUDMA channel to reset.
687 static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
689 int timeout = IUDMA_RESET_TIMEOUT_US;
690 struct bcm_enet_desc *d;
691 int ch_idx = iudma->ch_idx;
693 if (!iudma->is_tx)
694 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
696 /* stop DMA, then wait for the hardware to wrap up */
697 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx));
699 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) &
700 ENETDMAC_CHANCFG_EN_MASK) {
701 udelay(1);
703 /* repeatedly flush the FIFO data until the BD completes */
704 if (iudma->is_tx && iudma->ep_num >= 0)
705 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
707 if (!timeout--) {
708 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
709 ch_idx);
710 break;
712 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
713 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
714 ch_idx);
715 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
716 ENETDMAC_CHANCFG_REG(ch_idx));
719 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx));
721 /* don't leave "live" HW-owned entries for the next guy to step on */
722 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
723 d->len_stat = 0;
724 mb();
726 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
727 iudma->n_bds_used = 0;
729 /* set up IRQs, UBUS burst size, and BD base for this channel */
730 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
731 ENETDMAC_IRMASK_REG(ch_idx));
732 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx));
734 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx));
735 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx));
739 * iudma_init_channel - One-time IUDMA channel initialization.
740 * @udc: Reference to the device controller.
741 * @ch_idx: Channel to initialize.
743 static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
745 struct iudma_ch *iudma = &udc->iudma[ch_idx];
746 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
747 unsigned int n_bds = cfg->n_bds;
748 struct bcm63xx_ep *bep = NULL;
750 iudma->ep_num = cfg->ep_num;
751 iudma->ch_idx = ch_idx;
752 iudma->is_tx = !!(ch_idx & 0x01);
753 if (iudma->ep_num >= 0) {
754 bep = &udc->bep[iudma->ep_num];
755 bep->iudma = iudma;
756 INIT_LIST_HEAD(&bep->queue);
759 iudma->bep = bep;
760 iudma->udc = udc;
762 /* ep0 is always active; others are controlled by the gadget driver */
763 if (iudma->ep_num <= 0)
764 iudma->enabled = true;
766 iudma->n_bds = n_bds;
767 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
768 n_bds * sizeof(struct bcm_enet_desc),
769 &iudma->bd_ring_dma, GFP_KERNEL);
770 if (!iudma->bd_ring)
771 return -ENOMEM;
772 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
774 return 0;
778 * iudma_init - One-time initialization of all IUDMA channels.
779 * @udc: Reference to the device controller.
781 * Enable DMA, flush channels, and enable global IUDMA IRQs.
783 static int iudma_init(struct bcm63xx_udc *udc)
785 int i, rc;
787 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
789 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
790 rc = iudma_init_channel(udc, i);
791 if (rc)
792 return rc;
793 iudma_reset_channel(udc, &udc->iudma[i]);
796 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
797 return 0;
801 * iudma_uninit - Uninitialize IUDMA channels.
802 * @udc: Reference to the device controller.
804 * Kill global IUDMA IRQs, flush channels, and kill DMA.
806 static void iudma_uninit(struct bcm63xx_udc *udc)
808 int i;
810 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
812 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
813 iudma_reset_channel(udc, &udc->iudma[i]);
815 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
818 /***********************************************************************
819 * Other low-level USBD operations
820 ***********************************************************************/
823 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
824 * @udc: Reference to the device controller.
825 * @enable_irqs: true to enable, false to disable.
827 static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
829 u32 val;
831 usbd_writel(udc, 0, USBD_STATUS_REG);
833 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
834 BIT(USBD_EVENT_IRQ_SETUP) |
835 BIT(USBD_EVENT_IRQ_SETCFG) |
836 BIT(USBD_EVENT_IRQ_SETINTF) |
837 BIT(USBD_EVENT_IRQ_USB_LINK);
838 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
839 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
843 * bcm63xx_select_phy_mode - Select between USB device and host mode.
844 * @udc: Reference to the device controller.
845 * @is_device: true for device, false for host.
847 * This should probably be reworked to use the drivers/usb/otg
848 * infrastructure.
850 * By default, the AFE/pullups are disabled in device mode, until
851 * bcm63xx_select_pullup() is called.
853 static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
855 u32 val, portmask = BIT(udc->pd->port_no);
857 if (BCMCPU_IS_6328()) {
858 /* configure pinmux to sense VBUS signal */
859 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
860 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
861 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
862 GPIO_PINMUX_OTHR_6328_USB_HOST;
863 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
866 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
867 if (is_device) {
868 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
869 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
870 } else {
871 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
872 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
874 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
876 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
877 if (is_device)
878 val |= USBH_PRIV_SWAP_USBD_MASK;
879 else
880 val &= ~USBH_PRIV_SWAP_USBD_MASK;
881 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
885 * bcm63xx_select_pullup - Enable/disable the pullup on D+
886 * @udc: Reference to the device controller.
887 * @is_on: true to enable the pullup, false to disable.
889 * If the pullup is active, the host will sense a FS/HS device connected to
890 * the port. If the pullup is inactive, the host will think the USB
891 * device has been disconnected.
893 static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
895 u32 val, portmask = BIT(udc->pd->port_no);
897 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
898 if (is_on)
899 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
900 else
901 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
902 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
906 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
907 * @udc: Reference to the device controller.
909 * This just masks the IUDMA IRQs and releases the clocks. It is assumed
910 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
912 static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
914 set_clocks(udc, true);
915 iudma_uninit(udc);
916 set_clocks(udc, false);
918 clk_put(udc->usbd_clk);
919 clk_put(udc->usbh_clk);
923 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
924 * @udc: Reference to the device controller.
926 static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
928 int i, rc = 0;
929 u32 val;
931 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
932 GFP_KERNEL);
933 if (!udc->ep0_ctrl_buf)
934 return -ENOMEM;
936 INIT_LIST_HEAD(&udc->gadget.ep_list);
937 for (i = 0; i < BCM63XX_NUM_EP; i++) {
938 struct bcm63xx_ep *bep = &udc->bep[i];
940 bep->ep.name = bcm63xx_ep_name[i];
941 bep->ep_num = i;
942 bep->ep.ops = &bcm63xx_udc_ep_ops;
943 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
944 bep->halted = 0;
945 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
946 bep->udc = udc;
947 bep->ep.desc = NULL;
948 INIT_LIST_HEAD(&bep->queue);
951 udc->gadget.ep0 = &udc->bep[0].ep;
952 list_del(&udc->bep[0].ep.ep_list);
954 udc->gadget.speed = USB_SPEED_UNKNOWN;
955 udc->ep0state = EP0_SHUTDOWN;
957 udc->usbh_clk = clk_get(udc->dev, "usbh");
958 if (IS_ERR(udc->usbh_clk))
959 return -EIO;
961 udc->usbd_clk = clk_get(udc->dev, "usbd");
962 if (IS_ERR(udc->usbd_clk)) {
963 clk_put(udc->usbh_clk);
964 return -EIO;
967 set_clocks(udc, true);
969 val = USBD_CONTROL_AUTO_CSRS_MASK |
970 USBD_CONTROL_DONE_CSRS_MASK |
971 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
972 usbd_writel(udc, val, USBD_CONTROL_REG);
974 val = USBD_STRAPS_APP_SELF_PWR_MASK |
975 USBD_STRAPS_APP_RAM_IF_MASK |
976 USBD_STRAPS_APP_CSRPRGSUP_MASK |
977 USBD_STRAPS_APP_8BITPHY_MASK |
978 USBD_STRAPS_APP_RMTWKUP_MASK;
980 if (udc->gadget.max_speed == USB_SPEED_HIGH)
981 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
982 else
983 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
984 usbd_writel(udc, val, USBD_STRAPS_REG);
986 bcm63xx_set_ctrl_irqs(udc, false);
988 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
990 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
991 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
992 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
994 rc = iudma_init(udc);
995 set_clocks(udc, false);
996 if (rc)
997 bcm63xx_uninit_udc_hw(udc);
999 return 0;
1002 /***********************************************************************
1003 * Standard EP gadget operations
1004 ***********************************************************************/
1007 * bcm63xx_ep_enable - Enable one endpoint.
1008 * @ep: Endpoint to enable.
1009 * @desc: Contains max packet, direction, etc.
1011 * Most of the endpoint parameters are fixed in this controller, so there
1012 * isn't much for this function to do.
1014 static int bcm63xx_ep_enable(struct usb_ep *ep,
1015 const struct usb_endpoint_descriptor *desc)
1017 struct bcm63xx_ep *bep = our_ep(ep);
1018 struct bcm63xx_udc *udc = bep->udc;
1019 struct iudma_ch *iudma = bep->iudma;
1020 unsigned long flags;
1022 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1023 return -EINVAL;
1025 if (!udc->driver)
1026 return -ESHUTDOWN;
1028 spin_lock_irqsave(&udc->lock, flags);
1029 if (iudma->enabled) {
1030 spin_unlock_irqrestore(&udc->lock, flags);
1031 return -EINVAL;
1034 iudma->enabled = true;
1035 BUG_ON(!list_empty(&bep->queue));
1037 iudma_reset_channel(udc, iudma);
1039 bep->halted = 0;
1040 bcm63xx_set_stall(udc, bep, false);
1041 clear_bit(bep->ep_num, &udc->wedgemap);
1043 ep->desc = desc;
1044 ep->maxpacket = usb_endpoint_maxp(desc);
1046 spin_unlock_irqrestore(&udc->lock, flags);
1047 return 0;
1051 * bcm63xx_ep_disable - Disable one endpoint.
1052 * @ep: Endpoint to disable.
1054 static int bcm63xx_ep_disable(struct usb_ep *ep)
1056 struct bcm63xx_ep *bep = our_ep(ep);
1057 struct bcm63xx_udc *udc = bep->udc;
1058 struct iudma_ch *iudma = bep->iudma;
1059 struct list_head *pos, *n;
1060 unsigned long flags;
1062 if (!ep || !ep->desc)
1063 return -EINVAL;
1065 spin_lock_irqsave(&udc->lock, flags);
1066 if (!iudma->enabled) {
1067 spin_unlock_irqrestore(&udc->lock, flags);
1068 return -EINVAL;
1070 iudma->enabled = false;
1072 iudma_reset_channel(udc, iudma);
1074 if (!list_empty(&bep->queue)) {
1075 list_for_each_safe(pos, n, &bep->queue) {
1076 struct bcm63xx_req *breq =
1077 list_entry(pos, struct bcm63xx_req, queue);
1079 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1080 iudma->is_tx);
1081 list_del(&breq->queue);
1082 breq->req.status = -ESHUTDOWN;
1084 spin_unlock_irqrestore(&udc->lock, flags);
1085 breq->req.complete(&iudma->bep->ep, &breq->req);
1086 spin_lock_irqsave(&udc->lock, flags);
1089 ep->desc = NULL;
1091 spin_unlock_irqrestore(&udc->lock, flags);
1092 return 0;
1096 * bcm63xx_udc_alloc_request - Allocate a new request.
1097 * @ep: Endpoint associated with the request.
1098 * @mem_flags: Flags to pass to kzalloc().
1100 static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1101 gfp_t mem_flags)
1103 struct bcm63xx_req *breq;
1105 breq = kzalloc(sizeof(*breq), mem_flags);
1106 if (!breq)
1107 return NULL;
1108 return &breq->req;
1112 * bcm63xx_udc_free_request - Free a request.
1113 * @ep: Endpoint associated with the request.
1114 * @req: Request to free.
1116 static void bcm63xx_udc_free_request(struct usb_ep *ep,
1117 struct usb_request *req)
1119 struct bcm63xx_req *breq = our_req(req);
1120 kfree(breq);
1124 * bcm63xx_udc_queue - Queue up a new request.
1125 * @ep: Endpoint associated with the request.
1126 * @req: Request to add.
1127 * @mem_flags: Unused.
1129 * If the queue is empty, start this request immediately. Otherwise, add
1130 * it to the list.
1132 * ep0 replies are sent through this function from the gadget driver, but
1133 * they are treated differently because they need to be handled by the ep0
1134 * state machine. (Sometimes they are replies to control requests that
1135 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1137 static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1138 gfp_t mem_flags)
1140 struct bcm63xx_ep *bep = our_ep(ep);
1141 struct bcm63xx_udc *udc = bep->udc;
1142 struct bcm63xx_req *breq = our_req(req);
1143 unsigned long flags;
1144 int rc = 0;
1146 if (unlikely(!req || !req->complete || !req->buf || !ep))
1147 return -EINVAL;
1149 req->actual = 0;
1150 req->status = 0;
1151 breq->offset = 0;
1153 if (bep == &udc->bep[0]) {
1154 /* only one reply per request, please */
1155 if (udc->ep0_reply)
1156 return -EINVAL;
1158 udc->ep0_reply = req;
1159 schedule_work(&udc->ep0_wq);
1160 return 0;
1163 spin_lock_irqsave(&udc->lock, flags);
1164 if (!bep->iudma->enabled) {
1165 rc = -ESHUTDOWN;
1166 goto out;
1169 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1170 if (rc == 0) {
1171 list_add_tail(&breq->queue, &bep->queue);
1172 if (list_is_singular(&bep->queue))
1173 iudma_write(udc, bep->iudma, breq);
1176 out:
1177 spin_unlock_irqrestore(&udc->lock, flags);
1178 return rc;
1182 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1183 * @ep: Endpoint associated with the request.
1184 * @req: Request to remove.
1186 * If the request is not at the head of the queue, this is easy - just nuke
1187 * it. If the request is at the head of the queue, we'll need to stop the
1188 * DMA transaction and then queue up the successor.
1190 static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1192 struct bcm63xx_ep *bep = our_ep(ep);
1193 struct bcm63xx_udc *udc = bep->udc;
1194 struct bcm63xx_req *breq = our_req(req), *cur;
1195 unsigned long flags;
1196 int rc = 0;
1198 spin_lock_irqsave(&udc->lock, flags);
1199 if (list_empty(&bep->queue)) {
1200 rc = -EINVAL;
1201 goto out;
1204 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1205 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1207 if (breq == cur) {
1208 iudma_reset_channel(udc, bep->iudma);
1209 list_del(&breq->queue);
1211 if (!list_empty(&bep->queue)) {
1212 struct bcm63xx_req *next;
1214 next = list_first_entry(&bep->queue,
1215 struct bcm63xx_req, queue);
1216 iudma_write(udc, bep->iudma, next);
1218 } else {
1219 list_del(&breq->queue);
1222 out:
1223 spin_unlock_irqrestore(&udc->lock, flags);
1225 req->status = -ESHUTDOWN;
1226 req->complete(ep, req);
1228 return rc;
1232 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1233 * @ep: Endpoint to halt.
1234 * @value: Zero to clear halt; nonzero to set halt.
1236 * See comments in bcm63xx_update_wedge().
1238 static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1240 struct bcm63xx_ep *bep = our_ep(ep);
1241 struct bcm63xx_udc *udc = bep->udc;
1242 unsigned long flags;
1244 spin_lock_irqsave(&udc->lock, flags);
1245 bcm63xx_set_stall(udc, bep, !!value);
1246 bep->halted = value;
1247 spin_unlock_irqrestore(&udc->lock, flags);
1249 return 0;
1253 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1254 * @ep: Endpoint to wedge.
1256 * See comments in bcm63xx_update_wedge().
1258 static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1260 struct bcm63xx_ep *bep = our_ep(ep);
1261 struct bcm63xx_udc *udc = bep->udc;
1262 unsigned long flags;
1264 spin_lock_irqsave(&udc->lock, flags);
1265 set_bit(bep->ep_num, &udc->wedgemap);
1266 bcm63xx_set_stall(udc, bep, true);
1267 spin_unlock_irqrestore(&udc->lock, flags);
1269 return 0;
1272 static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1273 .enable = bcm63xx_ep_enable,
1274 .disable = bcm63xx_ep_disable,
1276 .alloc_request = bcm63xx_udc_alloc_request,
1277 .free_request = bcm63xx_udc_free_request,
1279 .queue = bcm63xx_udc_queue,
1280 .dequeue = bcm63xx_udc_dequeue,
1282 .set_halt = bcm63xx_udc_set_halt,
1283 .set_wedge = bcm63xx_udc_set_wedge,
1286 /***********************************************************************
1287 * EP0 handling
1288 ***********************************************************************/
1291 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1292 * @udc: Reference to the device controller.
1293 * @ctrl: 8-byte SETUP request.
1295 static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1296 struct usb_ctrlrequest *ctrl)
1298 int rc;
1300 spin_unlock_irq(&udc->lock);
1301 rc = udc->driver->setup(&udc->gadget, ctrl);
1302 spin_lock_irq(&udc->lock);
1303 return rc;
1307 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1308 * @udc: Reference to the device controller.
1310 * Many standard requests are handled automatically in the hardware, but
1311 * we still need to pass them to the gadget driver so that it can
1312 * reconfigure the interfaces/endpoints if necessary.
1314 * Unfortunately we are not able to send a STALL response if the host
1315 * requests an invalid configuration. If this happens, we'll have to be
1316 * content with printing a warning.
1318 static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1320 struct usb_ctrlrequest ctrl;
1321 int rc;
1323 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1324 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1325 ctrl.wValue = cpu_to_le16(udc->cfg);
1326 ctrl.wIndex = 0;
1327 ctrl.wLength = 0;
1329 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1330 if (rc < 0) {
1331 dev_warn_ratelimited(udc->dev,
1332 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1333 udc->cfg);
1335 return rc;
1339 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1340 * @udc: Reference to the device controller.
1342 static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1344 struct usb_ctrlrequest ctrl;
1345 int rc;
1347 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1348 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1349 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1350 ctrl.wIndex = cpu_to_le16(udc->iface);
1351 ctrl.wLength = 0;
1353 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1354 if (rc < 0) {
1355 dev_warn_ratelimited(udc->dev,
1356 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1357 udc->iface, udc->alt_iface);
1359 return rc;
1363 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1364 * @udc: Reference to the device controller.
1365 * @ch_idx: IUDMA channel number.
1366 * @req: USB gadget layer representation of the request.
1368 static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1369 struct usb_request *req)
1371 struct bcm63xx_req *breq = our_req(req);
1372 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1374 BUG_ON(udc->ep0_request);
1375 udc->ep0_request = req;
1377 req->actual = 0;
1378 breq->offset = 0;
1379 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1380 iudma_write(udc, iudma, breq);
1384 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1385 * @udc: Reference to the device controller.
1386 * @req: USB gadget layer representation of the request.
1387 * @status: Status to return to the gadget driver.
1389 static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1390 struct usb_request *req, int status)
1392 req->status = status;
1393 if (status)
1394 req->actual = 0;
1395 if (req->complete) {
1396 spin_unlock_irq(&udc->lock);
1397 req->complete(&udc->bep[0].ep, req);
1398 spin_lock_irq(&udc->lock);
1403 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1404 * reset/shutdown.
1405 * @udc: Reference to the device controller.
1406 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1408 static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1410 struct usb_request *req = udc->ep0_reply;
1412 udc->ep0_reply = NULL;
1413 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1414 if (udc->ep0_request == req) {
1415 udc->ep0_req_completed = 0;
1416 udc->ep0_request = NULL;
1418 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1422 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1423 * transfer len.
1424 * @udc: Reference to the device controller.
1426 static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1428 struct usb_request *req = udc->ep0_request;
1430 udc->ep0_req_completed = 0;
1431 udc->ep0_request = NULL;
1433 return req->actual;
1437 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1438 * @udc: Reference to the device controller.
1439 * @ch_idx: IUDMA channel number.
1440 * @length: Number of bytes to TX/RX.
1442 * Used for simple transfers performed by the ep0 worker. This will always
1443 * use ep0_ctrl_req / ep0_ctrl_buf.
1445 static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1446 int length)
1448 struct usb_request *req = &udc->ep0_ctrl_req.req;
1450 req->buf = udc->ep0_ctrl_buf;
1451 req->length = length;
1452 req->complete = NULL;
1454 bcm63xx_ep0_map_write(udc, ch_idx, req);
1458 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1459 * @udc: Reference to the device controller.
1461 * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
1462 * for the next packet. Anything else means the transaction requires multiple
1463 * stages of handling.
1465 static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1467 int rc;
1468 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1470 rc = bcm63xx_ep0_read_complete(udc);
1472 if (rc < 0) {
1473 dev_err(udc->dev, "missing SETUP packet\n");
1474 return EP0_IDLE;
1478 * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
1479 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1480 * just throw it away.
1482 if (rc == 0)
1483 return EP0_REQUEUE;
1485 /* Drop malformed SETUP packets */
1486 if (rc != sizeof(*ctrl)) {
1487 dev_warn_ratelimited(udc->dev,
1488 "malformed SETUP packet (%d bytes)\n", rc);
1489 return EP0_REQUEUE;
1492 /* Process new SETUP packet arriving on ep0 */
1493 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1494 if (rc < 0) {
1495 bcm63xx_set_stall(udc, &udc->bep[0], true);
1496 return EP0_REQUEUE;
1499 if (!ctrl->wLength)
1500 return EP0_REQUEUE;
1501 else if (ctrl->bRequestType & USB_DIR_IN)
1502 return EP0_IN_DATA_PHASE_SETUP;
1503 else
1504 return EP0_OUT_DATA_PHASE_SETUP;
1508 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1509 * @udc: Reference to the device controller.
1511 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1512 * filled with a SETUP packet from the host. This function handles new
1513 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1514 * and reset/shutdown events.
1516 * Returns 0 if work was done; -EAGAIN if nothing to do.
1518 static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1520 if (udc->ep0_req_reset) {
1521 udc->ep0_req_reset = 0;
1522 } else if (udc->ep0_req_set_cfg) {
1523 udc->ep0_req_set_cfg = 0;
1524 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1525 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1526 } else if (udc->ep0_req_set_iface) {
1527 udc->ep0_req_set_iface = 0;
1528 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1529 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1530 } else if (udc->ep0_req_completed) {
1531 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1532 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1533 } else if (udc->ep0_req_shutdown) {
1534 udc->ep0_req_shutdown = 0;
1535 udc->ep0_req_completed = 0;
1536 udc->ep0_request = NULL;
1537 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1538 usb_gadget_unmap_request(&udc->gadget,
1539 &udc->ep0_ctrl_req.req, 0);
1541 /* bcm63xx_udc_pullup() is waiting for this */
1542 mb();
1543 udc->ep0state = EP0_SHUTDOWN;
1544 } else if (udc->ep0_reply) {
1546 * This could happen if a USB RESET shows up during an ep0
1547 * transaction (especially if a laggy driver like gadgetfs
1548 * is in use).
1550 dev_warn(udc->dev, "nuking unexpected reply\n");
1551 bcm63xx_ep0_nuke_reply(udc, 0);
1552 } else {
1553 return -EAGAIN;
1556 return 0;
1560 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1561 * @udc: Reference to the device controller.
1563 * Returns 0 if work was done; -EAGAIN if nothing to do.
1565 static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1567 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1568 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1570 switch (udc->ep0state) {
1571 case EP0_REQUEUE:
1572 /* set up descriptor to receive SETUP packet */
1573 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1574 BCM63XX_MAX_CTRL_PKT);
1575 ep0state = EP0_IDLE;
1576 break;
1577 case EP0_IDLE:
1578 return bcm63xx_ep0_do_idle(udc);
1579 case EP0_IN_DATA_PHASE_SETUP:
1581 * Normal case: TX request is in ep0_reply (queued by the
1582 * callback), or will be queued shortly. When it's here,
1583 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1585 * Shutdown case: Stop waiting for the reply. Just
1586 * REQUEUE->IDLE. The gadget driver is NOT expected to
1587 * queue anything else now.
1589 if (udc->ep0_reply) {
1590 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1591 udc->ep0_reply);
1592 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1593 } else if (shutdown) {
1594 ep0state = EP0_REQUEUE;
1596 break;
1597 case EP0_IN_DATA_PHASE_COMPLETE: {
1599 * Normal case: TX packet (ep0_reply) is in flight; wait for
1600 * it to finish, then go back to REQUEUE->IDLE.
1602 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1603 * completion to the gadget driver, then REQUEUE->IDLE.
1605 if (udc->ep0_req_completed) {
1606 udc->ep0_reply = NULL;
1607 bcm63xx_ep0_read_complete(udc);
1609 * the "ack" sometimes gets eaten (see
1610 * bcm63xx_ep0_do_idle)
1612 ep0state = EP0_REQUEUE;
1613 } else if (shutdown) {
1614 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1615 bcm63xx_ep0_nuke_reply(udc, 1);
1616 ep0state = EP0_REQUEUE;
1618 break;
1620 case EP0_OUT_DATA_PHASE_SETUP:
1621 /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1622 if (udc->ep0_reply) {
1623 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1624 udc->ep0_reply);
1625 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1626 } else if (shutdown) {
1627 ep0state = EP0_REQUEUE;
1629 break;
1630 case EP0_OUT_DATA_PHASE_COMPLETE: {
1631 /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1632 if (udc->ep0_req_completed) {
1633 udc->ep0_reply = NULL;
1634 bcm63xx_ep0_read_complete(udc);
1636 /* send 0-byte ack to host */
1637 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1638 ep0state = EP0_OUT_STATUS_PHASE;
1639 } else if (shutdown) {
1640 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1641 bcm63xx_ep0_nuke_reply(udc, 0);
1642 ep0state = EP0_REQUEUE;
1644 break;
1646 case EP0_OUT_STATUS_PHASE:
1648 * Normal case: 0-byte OUT ack packet is in flight; wait
1649 * for it to finish, then go back to REQUEUE->IDLE.
1651 * Shutdown case: just cancel the transmission. Don't bother
1652 * calling the completion, because it originated from this
1653 * function anyway. Then go back to REQUEUE->IDLE.
1655 if (udc->ep0_req_completed) {
1656 bcm63xx_ep0_read_complete(udc);
1657 ep0state = EP0_REQUEUE;
1658 } else if (shutdown) {
1659 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1660 udc->ep0_request = NULL;
1661 ep0state = EP0_REQUEUE;
1663 break;
1664 case EP0_IN_FAKE_STATUS_PHASE: {
1666 * Normal case: we spoofed a SETUP packet and are now
1667 * waiting for the gadget driver to send a 0-byte reply.
1668 * This doesn't actually get sent to the HW because the
1669 * HW has already sent its own reply. Once we get the
1670 * response, return to IDLE.
1672 * Shutdown case: return to IDLE immediately.
1674 * Note that the ep0 RX descriptor has remained queued
1675 * (and possibly unfilled) during this entire transaction.
1676 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1677 * or SET_INTERFACE transactions.
1679 struct usb_request *r = udc->ep0_reply;
1681 if (!r) {
1682 if (shutdown)
1683 ep0state = EP0_IDLE;
1684 break;
1687 bcm63xx_ep0_complete(udc, r, 0);
1688 udc->ep0_reply = NULL;
1689 ep0state = EP0_IDLE;
1690 break;
1692 case EP0_SHUTDOWN:
1693 break;
1696 if (udc->ep0state == ep0state)
1697 return -EAGAIN;
1699 udc->ep0state = ep0state;
1700 return 0;
1704 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1705 * @w: Workqueue struct.
1707 * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
1708 * is used to synchronize ep0 events and ensure that both HW and SW events
1709 * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
1710 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1711 * by the USBD hardware.
1713 * The worker function will continue iterating around the state machine
1714 * until there is nothing left to do. Usually "nothing left to do" means
1715 * that we're waiting for a new event from the hardware.
1717 static void bcm63xx_ep0_process(struct work_struct *w)
1719 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1720 spin_lock_irq(&udc->lock);
1721 while (bcm63xx_ep0_one_round(udc) == 0)
1723 spin_unlock_irq(&udc->lock);
1726 /***********************************************************************
1727 * Standard UDC gadget operations
1728 ***********************************************************************/
1731 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1732 * @gadget: USB slave device.
1734 static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1736 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1738 return (usbd_readl(udc, USBD_STATUS_REG) &
1739 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1743 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1744 * @gadget: USB slave device.
1745 * @is_on: 0 to disable pullup, 1 to enable.
1747 * See notes in bcm63xx_select_pullup().
1749 static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1751 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1752 unsigned long flags;
1753 int i, rc = -EINVAL;
1755 spin_lock_irqsave(&udc->lock, flags);
1756 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1757 udc->gadget.speed = USB_SPEED_UNKNOWN;
1758 udc->ep0state = EP0_REQUEUE;
1759 bcm63xx_fifo_setup(udc);
1760 bcm63xx_fifo_reset(udc);
1761 bcm63xx_ep_setup(udc);
1763 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1764 for (i = 0; i < BCM63XX_NUM_EP; i++)
1765 bcm63xx_set_stall(udc, &udc->bep[i], false);
1767 bcm63xx_set_ctrl_irqs(udc, true);
1768 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1769 rc = 0;
1770 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1771 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1773 udc->ep0_req_shutdown = 1;
1774 spin_unlock_irqrestore(&udc->lock, flags);
1776 while (1) {
1777 schedule_work(&udc->ep0_wq);
1778 if (udc->ep0state == EP0_SHUTDOWN)
1779 break;
1780 msleep(50);
1782 bcm63xx_set_ctrl_irqs(udc, false);
1783 cancel_work_sync(&udc->ep0_wq);
1784 return 0;
1787 spin_unlock_irqrestore(&udc->lock, flags);
1788 return rc;
1792 * bcm63xx_udc_start - Start the controller.
1793 * @gadget: USB slave device.
1794 * @driver: Driver for USB slave devices.
1796 static int bcm63xx_udc_start(struct usb_gadget *gadget,
1797 struct usb_gadget_driver *driver)
1799 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1800 unsigned long flags;
1802 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1803 !driver->setup)
1804 return -EINVAL;
1805 if (!udc)
1806 return -ENODEV;
1807 if (udc->driver)
1808 return -EBUSY;
1810 spin_lock_irqsave(&udc->lock, flags);
1812 set_clocks(udc, true);
1813 bcm63xx_fifo_setup(udc);
1814 bcm63xx_ep_init(udc);
1815 bcm63xx_ep_setup(udc);
1816 bcm63xx_fifo_reset(udc);
1817 bcm63xx_select_phy_mode(udc, true);
1819 udc->driver = driver;
1820 driver->driver.bus = NULL;
1821 udc->gadget.dev.of_node = udc->dev->of_node;
1823 spin_unlock_irqrestore(&udc->lock, flags);
1825 return 0;
1829 * bcm63xx_udc_stop - Shut down the controller.
1830 * @gadget: USB slave device.
1831 * @driver: Driver for USB slave devices.
1833 static int bcm63xx_udc_stop(struct usb_gadget *gadget,
1834 struct usb_gadget_driver *driver)
1836 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1837 unsigned long flags;
1839 spin_lock_irqsave(&udc->lock, flags);
1841 udc->driver = NULL;
1844 * If we switch the PHY too abruptly after dropping D+, the host
1845 * will often complain:
1847 * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1849 msleep(100);
1851 bcm63xx_select_phy_mode(udc, false);
1852 set_clocks(udc, false);
1854 spin_unlock_irqrestore(&udc->lock, flags);
1856 return 0;
1859 static const struct usb_gadget_ops bcm63xx_udc_ops = {
1860 .get_frame = bcm63xx_udc_get_frame,
1861 .pullup = bcm63xx_udc_pullup,
1862 .udc_start = bcm63xx_udc_start,
1863 .udc_stop = bcm63xx_udc_stop,
1866 /***********************************************************************
1867 * IRQ handling
1868 ***********************************************************************/
1871 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1872 * @udc: Reference to the device controller.
1874 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1875 * The driver never sees the raw control packets coming in on the ep0
1876 * IUDMA channel, but at least we get an interrupt event to tell us that
1877 * new values are waiting in the USBD_STATUS register.
1879 static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1881 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1883 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1884 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1885 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1886 USBD_STATUS_ALTINTF_SHIFT;
1887 bcm63xx_ep_setup(udc);
1891 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1892 * @udc: Reference to the device controller.
1894 * The link speed update coincides with a SETUP IRQ. Returns 1 if the
1895 * speed has changed, so that the caller can update the endpoint settings.
1897 static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1899 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1900 enum usb_device_speed oldspeed = udc->gadget.speed;
1902 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1903 case BCM63XX_SPD_HIGH:
1904 udc->gadget.speed = USB_SPEED_HIGH;
1905 break;
1906 case BCM63XX_SPD_FULL:
1907 udc->gadget.speed = USB_SPEED_FULL;
1908 break;
1909 default:
1910 /* this should never happen */
1911 udc->gadget.speed = USB_SPEED_UNKNOWN;
1912 dev_err(udc->dev,
1913 "received SETUP packet with invalid link speed\n");
1914 return 0;
1917 if (udc->gadget.speed != oldspeed) {
1918 dev_info(udc->dev, "link up, %s-speed mode\n",
1919 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1920 return 1;
1921 } else {
1922 return 0;
1927 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1928 * @udc: Reference to the device controller.
1929 * @new_status: true to "refresh" wedge status; false to clear it.
1931 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1932 * because the controller hardware is designed to automatically clear
1933 * stalls in response to a CLEAR_FEATURE request from the host.
1935 * On a RESET interrupt, we do want to restore all wedged endpoints.
1937 static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1939 int i;
1941 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1942 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1943 if (!new_status)
1944 clear_bit(i, &udc->wedgemap);
1949 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1950 * @irq: IRQ number (unused).
1951 * @dev_id: Reference to the device controller.
1953 * This is where we handle link (VBUS) down, USB reset, speed changes,
1954 * SET_CONFIGURATION, and SET_INTERFACE events.
1956 static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1958 struct bcm63xx_udc *udc = dev_id;
1959 u32 stat;
1960 bool disconnected = false;
1962 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1963 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1965 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1967 spin_lock(&udc->lock);
1968 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1969 /* VBUS toggled */
1971 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1972 USBD_EVENTS_USB_LINK_MASK) &&
1973 udc->gadget.speed != USB_SPEED_UNKNOWN)
1974 dev_info(udc->dev, "link down\n");
1976 udc->gadget.speed = USB_SPEED_UNKNOWN;
1977 disconnected = true;
1979 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1980 bcm63xx_fifo_setup(udc);
1981 bcm63xx_fifo_reset(udc);
1982 bcm63xx_ep_setup(udc);
1984 bcm63xx_update_wedge(udc, false);
1986 udc->ep0_req_reset = 1;
1987 schedule_work(&udc->ep0_wq);
1988 disconnected = true;
1990 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
1991 if (bcm63xx_update_link_speed(udc)) {
1992 bcm63xx_fifo_setup(udc);
1993 bcm63xx_ep_setup(udc);
1995 bcm63xx_update_wedge(udc, true);
1997 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
1998 bcm63xx_update_cfg_iface(udc);
1999 udc->ep0_req_set_cfg = 1;
2000 schedule_work(&udc->ep0_wq);
2002 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2003 bcm63xx_update_cfg_iface(udc);
2004 udc->ep0_req_set_iface = 1;
2005 schedule_work(&udc->ep0_wq);
2007 spin_unlock(&udc->lock);
2009 if (disconnected && udc->driver)
2010 udc->driver->disconnect(&udc->gadget);
2012 return IRQ_HANDLED;
2016 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2017 * @irq: IRQ number (unused).
2018 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2020 * For the two ep0 channels, we have special handling that triggers the
2021 * ep0 worker thread. For normal bulk/intr channels, either queue up
2022 * the next buffer descriptor for the transaction (incomplete transaction),
2023 * or invoke the completion callback (complete transactions).
2025 static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2027 struct iudma_ch *iudma = dev_id;
2028 struct bcm63xx_udc *udc = iudma->udc;
2029 struct bcm63xx_ep *bep;
2030 struct usb_request *req = NULL;
2031 struct bcm63xx_req *breq = NULL;
2032 int rc;
2033 bool is_done = false;
2035 spin_lock(&udc->lock);
2037 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2038 ENETDMAC_IR_REG(iudma->ch_idx));
2039 bep = iudma->bep;
2040 rc = iudma_read(udc, iudma);
2042 /* special handling for EP0 RX (0) and TX (1) */
2043 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2044 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2045 req = udc->ep0_request;
2046 breq = our_req(req);
2048 /* a single request could require multiple submissions */
2049 if (rc >= 0) {
2050 req->actual += rc;
2052 if (req->actual >= req->length || breq->bd_bytes > rc) {
2053 udc->ep0_req_completed = 1;
2054 is_done = true;
2055 schedule_work(&udc->ep0_wq);
2057 /* "actual" on a ZLP is 1 byte */
2058 req->actual = min(req->actual, req->length);
2059 } else {
2060 /* queue up the next BD (same request) */
2061 iudma_write(udc, iudma, breq);
2064 } else if (!list_empty(&bep->queue)) {
2065 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2066 req = &breq->req;
2068 if (rc >= 0) {
2069 req->actual += rc;
2071 if (req->actual >= req->length || breq->bd_bytes > rc) {
2072 is_done = true;
2073 list_del(&breq->queue);
2075 req->actual = min(req->actual, req->length);
2077 if (!list_empty(&bep->queue)) {
2078 struct bcm63xx_req *next;
2080 next = list_first_entry(&bep->queue,
2081 struct bcm63xx_req, queue);
2082 iudma_write(udc, iudma, next);
2084 } else {
2085 iudma_write(udc, iudma, breq);
2089 spin_unlock(&udc->lock);
2091 if (is_done) {
2092 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2093 if (req->complete)
2094 req->complete(&bep->ep, req);
2097 return IRQ_HANDLED;
2100 /***********************************************************************
2101 * Debug filesystem
2102 ***********************************************************************/
2105 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2106 * @s: seq_file to which the information will be written.
2107 * @p: Unused.
2109 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2111 static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2113 struct bcm63xx_udc *udc = s->private;
2115 if (!udc->driver)
2116 return -ENODEV;
2118 seq_printf(s, "ep0 state: %s\n",
2119 bcm63xx_ep0_state_names[udc->ep0state]);
2120 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2121 udc->ep0_req_reset ? "reset " : "",
2122 udc->ep0_req_set_cfg ? "set_cfg " : "",
2123 udc->ep0_req_set_iface ? "set_iface " : "",
2124 udc->ep0_req_shutdown ? "shutdown " : "",
2125 udc->ep0_request ? "pending " : "",
2126 udc->ep0_req_completed ? "completed " : "",
2127 udc->ep0_reply ? "reply " : "");
2128 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2129 udc->cfg, udc->iface, udc->alt_iface);
2130 seq_printf(s, "regs:\n");
2131 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2132 usbd_readl(udc, USBD_CONTROL_REG),
2133 usbd_readl(udc, USBD_STRAPS_REG),
2134 usbd_readl(udc, USBD_STATUS_REG));
2135 seq_printf(s, " events: %08x; stall: %08x\n",
2136 usbd_readl(udc, USBD_EVENTS_REG),
2137 usbd_readl(udc, USBD_STALL_REG));
2139 return 0;
2143 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2144 * @s: seq_file to which the information will be written.
2145 * @p: Unused.
2147 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2149 static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2151 struct bcm63xx_udc *udc = s->private;
2152 int ch_idx, i;
2153 u32 sram2, sram3;
2155 if (!udc->driver)
2156 return -ENODEV;
2158 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2159 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2160 struct list_head *pos;
2162 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2163 switch (iudma_defaults[ch_idx].ep_type) {
2164 case BCMEP_CTRL:
2165 seq_printf(s, "control");
2166 break;
2167 case BCMEP_BULK:
2168 seq_printf(s, "bulk");
2169 break;
2170 case BCMEP_INTR:
2171 seq_printf(s, "interrupt");
2172 break;
2174 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2175 seq_printf(s, " [ep%d]:\n",
2176 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2177 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2178 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)),
2179 usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)),
2180 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)),
2181 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx)));
2183 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx));
2184 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx));
2185 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2186 usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)),
2187 sram2 >> 16, sram2 & 0xffff,
2188 sram3 >> 16, sram3 & 0xffff,
2189 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx)));
2190 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2191 iudma->n_bds);
2193 if (iudma->bep) {
2194 i = 0;
2195 list_for_each(pos, &iudma->bep->queue)
2196 i++;
2197 seq_printf(s, "; %d queued\n", i);
2198 } else {
2199 seq_printf(s, "\n");
2202 for (i = 0; i < iudma->n_bds; i++) {
2203 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2205 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2206 i * sizeof(*d), i,
2207 d->len_stat >> 16, d->len_stat & 0xffff,
2208 d->address);
2209 if (d == iudma->read_bd)
2210 seq_printf(s, " <<RD");
2211 if (d == iudma->write_bd)
2212 seq_printf(s, " <<WR");
2213 seq_printf(s, "\n");
2216 seq_printf(s, "\n");
2219 return 0;
2222 static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2224 return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2227 static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2229 return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2232 static const struct file_operations usbd_dbg_fops = {
2233 .owner = THIS_MODULE,
2234 .open = bcm63xx_usbd_dbg_open,
2235 .llseek = seq_lseek,
2236 .read = seq_read,
2237 .release = single_release,
2240 static const struct file_operations iudma_dbg_fops = {
2241 .owner = THIS_MODULE,
2242 .open = bcm63xx_iudma_dbg_open,
2243 .llseek = seq_lseek,
2244 .read = seq_read,
2245 .release = single_release,
2250 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2251 * @udc: Reference to the device controller.
2253 static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2255 struct dentry *root, *usbd, *iudma;
2257 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2258 return;
2260 root = debugfs_create_dir(udc->gadget.name, NULL);
2261 if (IS_ERR(root) || !root)
2262 goto err_root;
2264 usbd = debugfs_create_file("usbd", 0400, root, udc,
2265 &usbd_dbg_fops);
2266 if (!usbd)
2267 goto err_usbd;
2268 iudma = debugfs_create_file("iudma", 0400, root, udc,
2269 &iudma_dbg_fops);
2270 if (!iudma)
2271 goto err_iudma;
2273 udc->debugfs_root = root;
2274 udc->debugfs_usbd = usbd;
2275 udc->debugfs_iudma = iudma;
2276 return;
2277 err_iudma:
2278 debugfs_remove(usbd);
2279 err_usbd:
2280 debugfs_remove(root);
2281 err_root:
2282 dev_err(udc->dev, "debugfs is not available\n");
2286 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2287 * @udc: Reference to the device controller.
2289 * debugfs_remove() is safe to call with a NULL argument.
2291 static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2293 debugfs_remove(udc->debugfs_iudma);
2294 debugfs_remove(udc->debugfs_usbd);
2295 debugfs_remove(udc->debugfs_root);
2296 udc->debugfs_iudma = NULL;
2297 udc->debugfs_usbd = NULL;
2298 udc->debugfs_root = NULL;
2301 /***********************************************************************
2302 * Driver init/exit
2303 ***********************************************************************/
2306 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2307 * @pdev: Platform device struct from the bcm63xx BSP code.
2309 * Note that platform data is required, because pd.port_no varies from chip
2310 * to chip and is used to switch the correct USB port to device mode.
2312 static int bcm63xx_udc_probe(struct platform_device *pdev)
2314 struct device *dev = &pdev->dev;
2315 struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2316 struct bcm63xx_udc *udc;
2317 struct resource *res;
2318 int rc = -ENOMEM, i, irq;
2320 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2321 if (!udc) {
2322 dev_err(dev, "cannot allocate memory\n");
2323 return -ENOMEM;
2326 platform_set_drvdata(pdev, udc);
2327 udc->dev = dev;
2328 udc->pd = pd;
2330 if (!pd) {
2331 dev_err(dev, "missing platform data\n");
2332 return -EINVAL;
2335 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2336 udc->usbd_regs = devm_ioremap_resource(dev, res);
2337 if (IS_ERR(udc->usbd_regs))
2338 return PTR_ERR(udc->usbd_regs);
2340 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2341 udc->iudma_regs = devm_ioremap_resource(dev, res);
2342 if (IS_ERR(udc->iudma_regs))
2343 return PTR_ERR(udc->iudma_regs);
2345 spin_lock_init(&udc->lock);
2346 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2348 udc->gadget.ops = &bcm63xx_udc_ops;
2349 udc->gadget.name = dev_name(dev);
2351 if (!pd->use_fullspeed && !use_fullspeed)
2352 udc->gadget.max_speed = USB_SPEED_HIGH;
2353 else
2354 udc->gadget.max_speed = USB_SPEED_FULL;
2356 /* request clocks, allocate buffers, and clear any pending IRQs */
2357 rc = bcm63xx_init_udc_hw(udc);
2358 if (rc)
2359 return rc;
2361 rc = -ENXIO;
2363 /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2364 irq = platform_get_irq(pdev, 0);
2365 if (irq < 0) {
2366 dev_err(dev, "missing IRQ resource #0\n");
2367 goto out_uninit;
2369 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2370 dev_name(dev), udc) < 0) {
2371 dev_err(dev, "error requesting IRQ #%d\n", irq);
2372 goto out_uninit;
2375 /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2376 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2377 irq = platform_get_irq(pdev, i + 1);
2378 if (irq < 0) {
2379 dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2380 goto out_uninit;
2382 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2383 dev_name(dev), &udc->iudma[i]) < 0) {
2384 dev_err(dev, "error requesting IRQ #%d\n", irq);
2385 goto out_uninit;
2389 bcm63xx_udc_init_debugfs(udc);
2390 rc = usb_add_gadget_udc(dev, &udc->gadget);
2391 if (!rc)
2392 return 0;
2394 bcm63xx_udc_cleanup_debugfs(udc);
2395 out_uninit:
2396 bcm63xx_uninit_udc_hw(udc);
2397 return rc;
2401 * bcm63xx_udc_remove - Remove the device from the system.
2402 * @pdev: Platform device struct from the bcm63xx BSP code.
2404 static int bcm63xx_udc_remove(struct platform_device *pdev)
2406 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2408 bcm63xx_udc_cleanup_debugfs(udc);
2409 usb_del_gadget_udc(&udc->gadget);
2410 BUG_ON(udc->driver);
2412 bcm63xx_uninit_udc_hw(udc);
2414 return 0;
2417 static struct platform_driver bcm63xx_udc_driver = {
2418 .probe = bcm63xx_udc_probe,
2419 .remove = bcm63xx_udc_remove,
2420 .driver = {
2421 .name = DRV_MODULE_NAME,
2422 .owner = THIS_MODULE,
2425 module_platform_driver(bcm63xx_udc_driver);
2427 MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2428 MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2429 MODULE_LICENSE("GPL");
2430 MODULE_ALIAS("platform:" DRV_MODULE_NAME);