2 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
4 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
5 * Copyright (C) 2012 Broadcom Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <linux/bitops.h>
14 #include <linux/bug.h>
15 #include <linux/clk.h>
16 #include <linux/compiler.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kernel.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/platform_device.h>
29 #include <linux/sched.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/timer.h>
33 #include <linux/usb/ch9.h>
34 #include <linux/usb/gadget.h>
35 #include <linux/workqueue.h>
37 #include <bcm63xx_cpu.h>
38 #include <bcm63xx_iudma.h>
39 #include <bcm63xx_dev_usb_usbd.h>
40 #include <bcm63xx_io.h>
41 #include <bcm63xx_regs.h>
43 #define DRV_MODULE_NAME "bcm63xx_udc"
45 static const char bcm63xx_ep0name
[] = "ep0";
49 const struct usb_ep_caps caps
;
50 } bcm63xx_ep_info
[] = {
51 #define EP_INFO(_name, _caps) \
57 EP_INFO(bcm63xx_ep0name
,
58 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL
, USB_EP_CAPS_DIR_ALL
)),
60 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK
, USB_EP_CAPS_DIR_IN
)),
61 EP_INFO("ep2out-bulk",
62 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK
, USB_EP_CAPS_DIR_OUT
)),
64 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT
, USB_EP_CAPS_DIR_IN
)),
66 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT
, USB_EP_CAPS_DIR_OUT
)),
71 static bool use_fullspeed
;
72 module_param(use_fullspeed
, bool, S_IRUGO
);
73 MODULE_PARM_DESC(use_fullspeed
, "true for fullspeed only");
76 * RX IRQ coalescing options:
78 * false (default) - one IRQ per DATAx packet. Slow but reliable. The
79 * driver is able to pass the "testusb" suite and recover from conditions like:
81 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
82 * 2) Host sends 512 bytes of data
83 * 3) Host decides to reconfigure the device and sends SET_INTERFACE
84 * 4) Device shuts down the endpoint and cancels the RX transaction
86 * true - one IRQ per transfer, for transfers <= 2048B. Generates
87 * considerably fewer IRQs, but error recovery is less robust. Does not
88 * reliably pass "testusb".
90 * TX always uses coalescing, because we can cancel partially complete TX
91 * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
94 static bool irq_coalesce
;
95 module_param(irq_coalesce
, bool, S_IRUGO
);
96 MODULE_PARM_DESC(irq_coalesce
, "take one IRQ per RX transfer");
98 #define BCM63XX_NUM_EP 5
99 #define BCM63XX_NUM_IUDMA 6
100 #define BCM63XX_NUM_FIFO_PAIRS 3
102 #define IUDMA_RESET_TIMEOUT_US 10000
104 #define IUDMA_EP0_RXCHAN 0
105 #define IUDMA_EP0_TXCHAN 1
107 #define IUDMA_MAX_FRAGMENT 2048
108 #define BCM63XX_MAX_CTRL_PKT 64
110 #define BCMEP_CTRL 0x00
111 #define BCMEP_ISOC 0x01
112 #define BCMEP_BULK 0x02
113 #define BCMEP_INTR 0x03
115 #define BCMEP_OUT 0x00
116 #define BCMEP_IN 0x01
118 #define BCM63XX_SPD_FULL 1
119 #define BCM63XX_SPD_HIGH 0
121 #define IUDMA_DMAC_OFFSET 0x200
122 #define IUDMA_DMAS_OFFSET 0x400
124 enum bcm63xx_ep0_state
{
127 EP0_IN_DATA_PHASE_SETUP
,
128 EP0_IN_DATA_PHASE_COMPLETE
,
129 EP0_OUT_DATA_PHASE_SETUP
,
130 EP0_OUT_DATA_PHASE_COMPLETE
,
131 EP0_OUT_STATUS_PHASE
,
132 EP0_IN_FAKE_STATUS_PHASE
,
136 static const char __maybe_unused bcm63xx_ep0_state_names
[][32] = {
139 "IN_DATA_PHASE_SETUP",
140 "IN_DATA_PHASE_COMPLETE",
141 "OUT_DATA_PHASE_SETUP",
142 "OUT_DATA_PHASE_COMPLETE",
144 "IN_FAKE_STATUS_PHASE",
149 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
150 * @ep_num: USB endpoint number.
151 * @n_bds: Number of buffer descriptors in the ring.
152 * @ep_type: Endpoint type (control, bulk, interrupt).
153 * @dir: Direction (in, out).
154 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
155 * @max_pkt_hs: Maximum packet size in high speed mode.
156 * @max_pkt_fs: Maximum packet size in full speed mode.
158 struct iudma_ch_cfg
{
168 static const struct iudma_ch_cfg iudma_defaults
[] = {
170 /* This controller was designed to support a CDC/RNDIS application.
171 It may be possible to reconfigure some of the endpoints, but
172 the hardware limitations (FIFO sizing and number of DMA channels)
173 may significantly impact flexibility and/or stability. Change
174 these values at your own risk.
176 ep_num ep_type n_fifo_slots max_pkt_fs
177 idx | n_bds | dir | max_pkt_hs |
179 [0] = { -1, 4, BCMEP_CTRL
, BCMEP_OUT
, 32, 64, 64 },
180 [1] = { 0, 4, BCMEP_CTRL
, BCMEP_OUT
, 32, 64, 64 },
181 [2] = { 2, 16, BCMEP_BULK
, BCMEP_OUT
, 128, 512, 64 },
182 [3] = { 1, 16, BCMEP_BULK
, BCMEP_IN
, 128, 512, 64 },
183 [4] = { 4, 4, BCMEP_INTR
, BCMEP_OUT
, 32, 64, 64 },
184 [5] = { 3, 4, BCMEP_INTR
, BCMEP_IN
, 32, 64, 64 },
190 * struct iudma_ch - Represents the current state of a single IUDMA channel.
191 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
192 * @ep_num: USB endpoint number. -1 for ep0 RX.
193 * @enabled: Whether bcm63xx_ep_enable() has been called.
194 * @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
195 * @is_tx: true for TX, false for RX.
196 * @bep: Pointer to the associated endpoint. NULL for ep0 RX.
197 * @udc: Reference to the device controller.
198 * @read_bd: Next buffer descriptor to reap from the hardware.
199 * @write_bd: Next BD available for a new packet.
200 * @end_bd: Points to the final BD in the ring.
201 * @n_bds_used: Number of BD entries currently occupied.
202 * @bd_ring: Base pointer to the BD ring.
203 * @bd_ring_dma: Physical (DMA) address of bd_ring.
204 * @n_bds: Total number of BDs in the ring.
206 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
207 * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
210 * Each bulk/intr endpoint has a single IUDMA channel and a single
219 struct bcm63xx_ep
*bep
;
220 struct bcm63xx_udc
*udc
;
222 struct bcm_enet_desc
*read_bd
;
223 struct bcm_enet_desc
*write_bd
;
224 struct bcm_enet_desc
*end_bd
;
227 struct bcm_enet_desc
*bd_ring
;
228 dma_addr_t bd_ring_dma
;
233 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
234 * @ep_num: USB endpoint number.
235 * @iudma: Pointer to IUDMA channel state.
236 * @ep: USB gadget layer representation of the EP.
237 * @udc: Reference to the device controller.
238 * @queue: Linked list of outstanding requests for this EP.
239 * @halted: 1 if the EP is stalled; 0 otherwise.
243 struct iudma_ch
*iudma
;
245 struct bcm63xx_udc
*udc
;
246 struct list_head queue
;
251 * struct bcm63xx_req - Internal (driver) state of a single request.
252 * @queue: Links back to the EP's request list.
253 * @req: USB gadget layer representation of the request.
254 * @offset: Current byte offset into the data buffer (next byte to queue).
255 * @bd_bytes: Number of data bytes in outstanding BD entries.
256 * @iudma: IUDMA channel used for the request.
259 struct list_head queue
; /* ep's requests */
260 struct usb_request req
;
262 unsigned int bd_bytes
;
263 struct iudma_ch
*iudma
;
267 * struct bcm63xx_udc - Driver/hardware private context.
268 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
269 * @dev: Generic Linux device structure.
270 * @pd: Platform data (board/port info).
271 * @usbd_clk: Clock descriptor for the USB device block.
272 * @usbh_clk: Clock descriptor for the USB host block.
273 * @gadget: USB slave device.
274 * @driver: Driver for USB slave devices.
275 * @usbd_regs: Base address of the USBD/USB20D block.
276 * @iudma_regs: Base address of the USBD's associated IUDMA block.
277 * @bep: Array of endpoints, including ep0.
278 * @iudma: Array of all IUDMA channels used by this controller.
279 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
280 * @iface: USB interface number, from SET_INTERFACE wIndex.
281 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
282 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
283 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
284 * @ep0state: Current state of the ep0 state machine.
285 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
286 * @wedgemap: Bitmap of wedged endpoints.
287 * @ep0_req_reset: USB reset is pending.
288 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
289 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
290 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
291 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
292 * @ep0_reply: Pending reply from gadget driver.
293 * @ep0_request: Outstanding ep0 request.
294 * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
295 * @debugfs_usbd: debugfs file "usbd" for controller state.
296 * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
302 struct bcm63xx_usbd_platform_data
*pd
;
303 struct clk
*usbd_clk
;
304 struct clk
*usbh_clk
;
306 struct usb_gadget gadget
;
307 struct usb_gadget_driver
*driver
;
309 void __iomem
*usbd_regs
;
310 void __iomem
*iudma_regs
;
312 struct bcm63xx_ep bep
[BCM63XX_NUM_EP
];
313 struct iudma_ch iudma
[BCM63XX_NUM_IUDMA
];
319 struct bcm63xx_req ep0_ctrl_req
;
323 struct work_struct ep0_wq
;
325 unsigned long wedgemap
;
327 unsigned ep0_req_reset
:1;
328 unsigned ep0_req_set_cfg
:1;
329 unsigned ep0_req_set_iface
:1;
330 unsigned ep0_req_shutdown
:1;
332 unsigned ep0_req_completed
:1;
333 struct usb_request
*ep0_reply
;
334 struct usb_request
*ep0_request
;
336 struct dentry
*debugfs_root
;
337 struct dentry
*debugfs_usbd
;
338 struct dentry
*debugfs_iudma
;
341 static const struct usb_ep_ops bcm63xx_udc_ep_ops
;
343 /***********************************************************************
344 * Convenience functions
345 ***********************************************************************/
347 static inline struct bcm63xx_udc
*gadget_to_udc(struct usb_gadget
*g
)
349 return container_of(g
, struct bcm63xx_udc
, gadget
);
352 static inline struct bcm63xx_ep
*our_ep(struct usb_ep
*ep
)
354 return container_of(ep
, struct bcm63xx_ep
, ep
);
357 static inline struct bcm63xx_req
*our_req(struct usb_request
*req
)
359 return container_of(req
, struct bcm63xx_req
, req
);
362 static inline u32
usbd_readl(struct bcm63xx_udc
*udc
, u32 off
)
364 return bcm_readl(udc
->usbd_regs
+ off
);
367 static inline void usbd_writel(struct bcm63xx_udc
*udc
, u32 val
, u32 off
)
369 bcm_writel(val
, udc
->usbd_regs
+ off
);
372 static inline u32
usb_dma_readl(struct bcm63xx_udc
*udc
, u32 off
)
374 return bcm_readl(udc
->iudma_regs
+ off
);
377 static inline void usb_dma_writel(struct bcm63xx_udc
*udc
, u32 val
, u32 off
)
379 bcm_writel(val
, udc
->iudma_regs
+ off
);
382 static inline u32
usb_dmac_readl(struct bcm63xx_udc
*udc
, u32 off
, int chan
)
384 return bcm_readl(udc
->iudma_regs
+ IUDMA_DMAC_OFFSET
+ off
+
385 (ENETDMA_CHAN_WIDTH
* chan
));
388 static inline void usb_dmac_writel(struct bcm63xx_udc
*udc
, u32 val
, u32 off
,
391 bcm_writel(val
, udc
->iudma_regs
+ IUDMA_DMAC_OFFSET
+ off
+
392 (ENETDMA_CHAN_WIDTH
* chan
));
395 static inline u32
usb_dmas_readl(struct bcm63xx_udc
*udc
, u32 off
, int chan
)
397 return bcm_readl(udc
->iudma_regs
+ IUDMA_DMAS_OFFSET
+ off
+
398 (ENETDMA_CHAN_WIDTH
* chan
));
401 static inline void usb_dmas_writel(struct bcm63xx_udc
*udc
, u32 val
, u32 off
,
404 bcm_writel(val
, udc
->iudma_regs
+ IUDMA_DMAS_OFFSET
+ off
+
405 (ENETDMA_CHAN_WIDTH
* chan
));
408 static inline void set_clocks(struct bcm63xx_udc
*udc
, bool is_enabled
)
411 clk_enable(udc
->usbh_clk
);
412 clk_enable(udc
->usbd_clk
);
415 clk_disable(udc
->usbd_clk
);
416 clk_disable(udc
->usbh_clk
);
420 /***********************************************************************
421 * Low-level IUDMA / FIFO operations
422 ***********************************************************************/
425 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
426 * @udc: Reference to the device controller.
427 * @idx: Desired init_sel value.
429 * The "init_sel" signal is used as a selection index for both endpoints
430 * and IUDMA channels. Since these do not map 1:1, the use of this signal
431 * depends on the context.
433 static void bcm63xx_ep_dma_select(struct bcm63xx_udc
*udc
, int idx
)
435 u32 val
= usbd_readl(udc
, USBD_CONTROL_REG
);
437 val
&= ~USBD_CONTROL_INIT_SEL_MASK
;
438 val
|= idx
<< USBD_CONTROL_INIT_SEL_SHIFT
;
439 usbd_writel(udc
, val
, USBD_CONTROL_REG
);
443 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
444 * @udc: Reference to the device controller.
445 * @bep: Endpoint on which to operate.
446 * @is_stalled: true to enable stall, false to disable.
448 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
449 * halt/stall conditions.
451 static void bcm63xx_set_stall(struct bcm63xx_udc
*udc
, struct bcm63xx_ep
*bep
,
456 val
= USBD_STALL_UPDATE_MASK
|
457 (is_stalled
? USBD_STALL_ENABLE_MASK
: 0) |
458 (bep
->ep_num
<< USBD_STALL_EPNUM_SHIFT
);
459 usbd_writel(udc
, val
, USBD_STALL_REG
);
463 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
464 * @udc: Reference to the device controller.
466 * These parameters depend on the USB link speed. Settings are
467 * per-IUDMA-channel-pair.
469 static void bcm63xx_fifo_setup(struct bcm63xx_udc
*udc
)
471 int is_hs
= udc
->gadget
.speed
== USB_SPEED_HIGH
;
472 u32 i
, val
, rx_fifo_slot
, tx_fifo_slot
;
474 /* set up FIFO boundaries and packet sizes; this is done in pairs */
475 rx_fifo_slot
= tx_fifo_slot
= 0;
476 for (i
= 0; i
< BCM63XX_NUM_IUDMA
; i
+= 2) {
477 const struct iudma_ch_cfg
*rx_cfg
= &iudma_defaults
[i
];
478 const struct iudma_ch_cfg
*tx_cfg
= &iudma_defaults
[i
+ 1];
480 bcm63xx_ep_dma_select(udc
, i
>> 1);
482 val
= (rx_fifo_slot
<< USBD_RXFIFO_CONFIG_START_SHIFT
) |
483 ((rx_fifo_slot
+ rx_cfg
->n_fifo_slots
- 1) <<
484 USBD_RXFIFO_CONFIG_END_SHIFT
);
485 rx_fifo_slot
+= rx_cfg
->n_fifo_slots
;
486 usbd_writel(udc
, val
, USBD_RXFIFO_CONFIG_REG
);
488 is_hs
? rx_cfg
->max_pkt_hs
: rx_cfg
->max_pkt_fs
,
489 USBD_RXFIFO_EPSIZE_REG
);
491 val
= (tx_fifo_slot
<< USBD_TXFIFO_CONFIG_START_SHIFT
) |
492 ((tx_fifo_slot
+ tx_cfg
->n_fifo_slots
- 1) <<
493 USBD_TXFIFO_CONFIG_END_SHIFT
);
494 tx_fifo_slot
+= tx_cfg
->n_fifo_slots
;
495 usbd_writel(udc
, val
, USBD_TXFIFO_CONFIG_REG
);
497 is_hs
? tx_cfg
->max_pkt_hs
: tx_cfg
->max_pkt_fs
,
498 USBD_TXFIFO_EPSIZE_REG
);
500 usbd_readl(udc
, USBD_TXFIFO_EPSIZE_REG
);
505 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
506 * @udc: Reference to the device controller.
507 * @ep_num: Endpoint number.
509 static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc
*udc
, int ep_num
)
513 bcm63xx_ep_dma_select(udc
, ep_num
);
515 val
= usbd_readl(udc
, USBD_CONTROL_REG
);
516 val
|= USBD_CONTROL_FIFO_RESET_MASK
;
517 usbd_writel(udc
, val
, USBD_CONTROL_REG
);
518 usbd_readl(udc
, USBD_CONTROL_REG
);
522 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
523 * @udc: Reference to the device controller.
525 static void bcm63xx_fifo_reset(struct bcm63xx_udc
*udc
)
529 for (i
= 0; i
< BCM63XX_NUM_FIFO_PAIRS
; i
++)
530 bcm63xx_fifo_reset_ep(udc
, i
);
534 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
535 * @udc: Reference to the device controller.
537 static void bcm63xx_ep_init(struct bcm63xx_udc
*udc
)
541 for (i
= 0; i
< BCM63XX_NUM_IUDMA
; i
++) {
542 const struct iudma_ch_cfg
*cfg
= &iudma_defaults
[i
];
547 bcm63xx_ep_dma_select(udc
, cfg
->ep_num
);
548 val
= (cfg
->ep_type
<< USBD_EPNUM_TYPEMAP_TYPE_SHIFT
) |
549 ((i
>> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT
);
550 usbd_writel(udc
, val
, USBD_EPNUM_TYPEMAP_REG
);
555 * bcm63xx_ep_setup - Configure per-endpoint settings.
556 * @udc: Reference to the device controller.
558 * This needs to be rerun if the speed/cfg/intf/altintf changes.
560 static void bcm63xx_ep_setup(struct bcm63xx_udc
*udc
)
564 usbd_writel(udc
, USBD_CSR_SETUPADDR_DEF
, USBD_CSR_SETUPADDR_REG
);
566 for (i
= 0; i
< BCM63XX_NUM_IUDMA
; i
++) {
567 const struct iudma_ch_cfg
*cfg
= &iudma_defaults
[i
];
568 int max_pkt
= udc
->gadget
.speed
== USB_SPEED_HIGH
?
569 cfg
->max_pkt_hs
: cfg
->max_pkt_fs
;
570 int idx
= cfg
->ep_num
;
572 udc
->iudma
[i
].max_pkt
= max_pkt
;
576 usb_ep_set_maxpacket_limit(&udc
->bep
[idx
].ep
, max_pkt
);
578 val
= (idx
<< USBD_CSR_EP_LOG_SHIFT
) |
579 (cfg
->dir
<< USBD_CSR_EP_DIR_SHIFT
) |
580 (cfg
->ep_type
<< USBD_CSR_EP_TYPE_SHIFT
) |
581 (udc
->cfg
<< USBD_CSR_EP_CFG_SHIFT
) |
582 (udc
->iface
<< USBD_CSR_EP_IFACE_SHIFT
) |
583 (udc
->alt_iface
<< USBD_CSR_EP_ALTIFACE_SHIFT
) |
584 (max_pkt
<< USBD_CSR_EP_MAXPKT_SHIFT
);
585 usbd_writel(udc
, val
, USBD_CSR_EP_REG(idx
));
590 * iudma_write - Queue a single IUDMA transaction.
591 * @udc: Reference to the device controller.
592 * @iudma: IUDMA channel to use.
593 * @breq: Request containing the transaction data.
595 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
596 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
597 * So iudma_write() may be called several times to fulfill a single
600 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
602 static void iudma_write(struct bcm63xx_udc
*udc
, struct iudma_ch
*iudma
,
603 struct bcm63xx_req
*breq
)
605 int first_bd
= 1, last_bd
= 0, extra_zero_pkt
= 0;
606 unsigned int bytes_left
= breq
->req
.length
- breq
->offset
;
607 const int max_bd_bytes
= !irq_coalesce
&& !iudma
->is_tx
?
608 iudma
->max_pkt
: IUDMA_MAX_FRAGMENT
;
610 iudma
->n_bds_used
= 0;
614 if ((bytes_left
% iudma
->max_pkt
== 0) && bytes_left
&& breq
->req
.zero
)
618 struct bcm_enet_desc
*d
= iudma
->write_bd
;
620 unsigned int n_bytes
;
622 if (d
== iudma
->end_bd
) {
623 dmaflags
|= DMADESC_WRAP_MASK
;
624 iudma
->write_bd
= iudma
->bd_ring
;
630 n_bytes
= min_t(int, bytes_left
, max_bd_bytes
);
632 dmaflags
|= n_bytes
<< DMADESC_LENGTH_SHIFT
;
634 dmaflags
|= (1 << DMADESC_LENGTH_SHIFT
) |
635 DMADESC_USB_ZERO_MASK
;
637 dmaflags
|= DMADESC_OWNER_MASK
;
639 dmaflags
|= DMADESC_SOP_MASK
;
644 * extra_zero_pkt forces one more iteration through the loop
645 * after all data is queued up, to send the zero packet
647 if (extra_zero_pkt
&& !bytes_left
)
650 if (!iudma
->is_tx
|| iudma
->n_bds_used
== iudma
->n_bds
||
651 (n_bytes
== bytes_left
&& !extra_zero_pkt
)) {
653 dmaflags
|= DMADESC_EOP_MASK
;
656 d
->address
= breq
->req
.dma
+ breq
->offset
;
658 d
->len_stat
= dmaflags
;
660 breq
->offset
+= n_bytes
;
661 breq
->bd_bytes
+= n_bytes
;
662 bytes_left
-= n_bytes
;
665 usb_dmac_writel(udc
, ENETDMAC_CHANCFG_EN_MASK
,
666 ENETDMAC_CHANCFG_REG
, iudma
->ch_idx
);
670 * iudma_read - Check for IUDMA buffer completion.
671 * @udc: Reference to the device controller.
672 * @iudma: IUDMA channel to use.
674 * This checks to see if ALL of the outstanding BDs on the DMA channel
675 * have been filled. If so, it returns the actual transfer length;
676 * otherwise it returns -EBUSY.
678 static int iudma_read(struct bcm63xx_udc
*udc
, struct iudma_ch
*iudma
)
680 int i
, actual_len
= 0;
681 struct bcm_enet_desc
*d
= iudma
->read_bd
;
683 if (!iudma
->n_bds_used
)
686 for (i
= 0; i
< iudma
->n_bds_used
; i
++) {
689 dmaflags
= d
->len_stat
;
691 if (dmaflags
& DMADESC_OWNER_MASK
)
694 actual_len
+= (dmaflags
& DMADESC_LENGTH_MASK
) >>
695 DMADESC_LENGTH_SHIFT
;
696 if (d
== iudma
->end_bd
)
703 iudma
->n_bds_used
= 0;
708 * iudma_reset_channel - Stop DMA on a single channel.
709 * @udc: Reference to the device controller.
710 * @iudma: IUDMA channel to reset.
712 static void iudma_reset_channel(struct bcm63xx_udc
*udc
, struct iudma_ch
*iudma
)
714 int timeout
= IUDMA_RESET_TIMEOUT_US
;
715 struct bcm_enet_desc
*d
;
716 int ch_idx
= iudma
->ch_idx
;
719 bcm63xx_fifo_reset_ep(udc
, max(0, iudma
->ep_num
));
721 /* stop DMA, then wait for the hardware to wrap up */
722 usb_dmac_writel(udc
, 0, ENETDMAC_CHANCFG_REG
, ch_idx
);
724 while (usb_dmac_readl(udc
, ENETDMAC_CHANCFG_REG
, ch_idx
) &
725 ENETDMAC_CHANCFG_EN_MASK
) {
728 /* repeatedly flush the FIFO data until the BD completes */
729 if (iudma
->is_tx
&& iudma
->ep_num
>= 0)
730 bcm63xx_fifo_reset_ep(udc
, iudma
->ep_num
);
733 dev_err(udc
->dev
, "can't reset IUDMA channel %d\n",
737 if (timeout
== IUDMA_RESET_TIMEOUT_US
/ 2) {
738 dev_warn(udc
->dev
, "forcibly halting IUDMA channel %d\n",
740 usb_dmac_writel(udc
, ENETDMAC_CHANCFG_BUFHALT_MASK
,
741 ENETDMAC_CHANCFG_REG
, ch_idx
);
744 usb_dmac_writel(udc
, ~0, ENETDMAC_IR_REG
, ch_idx
);
746 /* don't leave "live" HW-owned entries for the next guy to step on */
747 for (d
= iudma
->bd_ring
; d
<= iudma
->end_bd
; d
++)
751 iudma
->read_bd
= iudma
->write_bd
= iudma
->bd_ring
;
752 iudma
->n_bds_used
= 0;
754 /* set up IRQs, UBUS burst size, and BD base for this channel */
755 usb_dmac_writel(udc
, ENETDMAC_IR_BUFDONE_MASK
,
756 ENETDMAC_IRMASK_REG
, ch_idx
);
757 usb_dmac_writel(udc
, 8, ENETDMAC_MAXBURST_REG
, ch_idx
);
759 usb_dmas_writel(udc
, iudma
->bd_ring_dma
, ENETDMAS_RSTART_REG
, ch_idx
);
760 usb_dmas_writel(udc
, 0, ENETDMAS_SRAM2_REG
, ch_idx
);
764 * iudma_init_channel - One-time IUDMA channel initialization.
765 * @udc: Reference to the device controller.
766 * @ch_idx: Channel to initialize.
768 static int iudma_init_channel(struct bcm63xx_udc
*udc
, unsigned int ch_idx
)
770 struct iudma_ch
*iudma
= &udc
->iudma
[ch_idx
];
771 const struct iudma_ch_cfg
*cfg
= &iudma_defaults
[ch_idx
];
772 unsigned int n_bds
= cfg
->n_bds
;
773 struct bcm63xx_ep
*bep
= NULL
;
775 iudma
->ep_num
= cfg
->ep_num
;
776 iudma
->ch_idx
= ch_idx
;
777 iudma
->is_tx
= !!(ch_idx
& 0x01);
778 if (iudma
->ep_num
>= 0) {
779 bep
= &udc
->bep
[iudma
->ep_num
];
781 INIT_LIST_HEAD(&bep
->queue
);
787 /* ep0 is always active; others are controlled by the gadget driver */
788 if (iudma
->ep_num
<= 0)
789 iudma
->enabled
= true;
791 iudma
->n_bds
= n_bds
;
792 iudma
->bd_ring
= dmam_alloc_coherent(udc
->dev
,
793 n_bds
* sizeof(struct bcm_enet_desc
),
794 &iudma
->bd_ring_dma
, GFP_KERNEL
);
797 iudma
->end_bd
= &iudma
->bd_ring
[n_bds
- 1];
803 * iudma_init - One-time initialization of all IUDMA channels.
804 * @udc: Reference to the device controller.
806 * Enable DMA, flush channels, and enable global IUDMA IRQs.
808 static int iudma_init(struct bcm63xx_udc
*udc
)
812 usb_dma_writel(udc
, ENETDMA_CFG_EN_MASK
, ENETDMA_CFG_REG
);
814 for (i
= 0; i
< BCM63XX_NUM_IUDMA
; i
++) {
815 rc
= iudma_init_channel(udc
, i
);
818 iudma_reset_channel(udc
, &udc
->iudma
[i
]);
821 usb_dma_writel(udc
, BIT(BCM63XX_NUM_IUDMA
)-1, ENETDMA_GLB_IRQMASK_REG
);
826 * iudma_uninit - Uninitialize IUDMA channels.
827 * @udc: Reference to the device controller.
829 * Kill global IUDMA IRQs, flush channels, and kill DMA.
831 static void iudma_uninit(struct bcm63xx_udc
*udc
)
835 usb_dma_writel(udc
, 0, ENETDMA_GLB_IRQMASK_REG
);
837 for (i
= 0; i
< BCM63XX_NUM_IUDMA
; i
++)
838 iudma_reset_channel(udc
, &udc
->iudma
[i
]);
840 usb_dma_writel(udc
, 0, ENETDMA_CFG_REG
);
843 /***********************************************************************
844 * Other low-level USBD operations
845 ***********************************************************************/
848 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
849 * @udc: Reference to the device controller.
850 * @enable_irqs: true to enable, false to disable.
852 static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc
*udc
, bool enable_irqs
)
856 usbd_writel(udc
, 0, USBD_STATUS_REG
);
858 val
= BIT(USBD_EVENT_IRQ_USB_RESET
) |
859 BIT(USBD_EVENT_IRQ_SETUP
) |
860 BIT(USBD_EVENT_IRQ_SETCFG
) |
861 BIT(USBD_EVENT_IRQ_SETINTF
) |
862 BIT(USBD_EVENT_IRQ_USB_LINK
);
863 usbd_writel(udc
, enable_irqs
? val
: 0, USBD_EVENT_IRQ_MASK_REG
);
864 usbd_writel(udc
, val
, USBD_EVENT_IRQ_STATUS_REG
);
868 * bcm63xx_select_phy_mode - Select between USB device and host mode.
869 * @udc: Reference to the device controller.
870 * @is_device: true for device, false for host.
872 * This should probably be reworked to use the drivers/usb/otg
875 * By default, the AFE/pullups are disabled in device mode, until
876 * bcm63xx_select_pullup() is called.
878 static void bcm63xx_select_phy_mode(struct bcm63xx_udc
*udc
, bool is_device
)
880 u32 val
, portmask
= BIT(udc
->pd
->port_no
);
882 if (BCMCPU_IS_6328()) {
883 /* configure pinmux to sense VBUS signal */
884 val
= bcm_gpio_readl(GPIO_PINMUX_OTHR_REG
);
885 val
&= ~GPIO_PINMUX_OTHR_6328_USB_MASK
;
886 val
|= is_device
? GPIO_PINMUX_OTHR_6328_USB_DEV
:
887 GPIO_PINMUX_OTHR_6328_USB_HOST
;
888 bcm_gpio_writel(val
, GPIO_PINMUX_OTHR_REG
);
891 val
= bcm_rset_readl(RSET_USBH_PRIV
, USBH_PRIV_UTMI_CTL_6368_REG
);
893 val
|= (portmask
<< USBH_PRIV_UTMI_CTL_HOSTB_SHIFT
);
894 val
|= (portmask
<< USBH_PRIV_UTMI_CTL_NODRIV_SHIFT
);
896 val
&= ~(portmask
<< USBH_PRIV_UTMI_CTL_HOSTB_SHIFT
);
897 val
&= ~(portmask
<< USBH_PRIV_UTMI_CTL_NODRIV_SHIFT
);
899 bcm_rset_writel(RSET_USBH_PRIV
, val
, USBH_PRIV_UTMI_CTL_6368_REG
);
901 val
= bcm_rset_readl(RSET_USBH_PRIV
, USBH_PRIV_SWAP_6368_REG
);
903 val
|= USBH_PRIV_SWAP_USBD_MASK
;
905 val
&= ~USBH_PRIV_SWAP_USBD_MASK
;
906 bcm_rset_writel(RSET_USBH_PRIV
, val
, USBH_PRIV_SWAP_6368_REG
);
910 * bcm63xx_select_pullup - Enable/disable the pullup on D+
911 * @udc: Reference to the device controller.
912 * @is_on: true to enable the pullup, false to disable.
914 * If the pullup is active, the host will sense a FS/HS device connected to
915 * the port. If the pullup is inactive, the host will think the USB
916 * device has been disconnected.
918 static void bcm63xx_select_pullup(struct bcm63xx_udc
*udc
, bool is_on
)
920 u32 val
, portmask
= BIT(udc
->pd
->port_no
);
922 val
= bcm_rset_readl(RSET_USBH_PRIV
, USBH_PRIV_UTMI_CTL_6368_REG
);
924 val
&= ~(portmask
<< USBH_PRIV_UTMI_CTL_NODRIV_SHIFT
);
926 val
|= (portmask
<< USBH_PRIV_UTMI_CTL_NODRIV_SHIFT
);
927 bcm_rset_writel(RSET_USBH_PRIV
, val
, USBH_PRIV_UTMI_CTL_6368_REG
);
931 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
932 * @udc: Reference to the device controller.
934 * This just masks the IUDMA IRQs and releases the clocks. It is assumed
935 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
937 static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc
*udc
)
939 set_clocks(udc
, true);
941 set_clocks(udc
, false);
943 clk_put(udc
->usbd_clk
);
944 clk_put(udc
->usbh_clk
);
948 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
949 * @udc: Reference to the device controller.
951 static int bcm63xx_init_udc_hw(struct bcm63xx_udc
*udc
)
956 udc
->ep0_ctrl_buf
= devm_kzalloc(udc
->dev
, BCM63XX_MAX_CTRL_PKT
,
958 if (!udc
->ep0_ctrl_buf
)
961 INIT_LIST_HEAD(&udc
->gadget
.ep_list
);
962 for (i
= 0; i
< BCM63XX_NUM_EP
; i
++) {
963 struct bcm63xx_ep
*bep
= &udc
->bep
[i
];
965 bep
->ep
.name
= bcm63xx_ep_info
[i
].name
;
966 bep
->ep
.caps
= bcm63xx_ep_info
[i
].caps
;
968 bep
->ep
.ops
= &bcm63xx_udc_ep_ops
;
969 list_add_tail(&bep
->ep
.ep_list
, &udc
->gadget
.ep_list
);
971 usb_ep_set_maxpacket_limit(&bep
->ep
, BCM63XX_MAX_CTRL_PKT
);
974 INIT_LIST_HEAD(&bep
->queue
);
977 udc
->gadget
.ep0
= &udc
->bep
[0].ep
;
978 list_del(&udc
->bep
[0].ep
.ep_list
);
980 udc
->gadget
.speed
= USB_SPEED_UNKNOWN
;
981 udc
->ep0state
= EP0_SHUTDOWN
;
983 udc
->usbh_clk
= clk_get(udc
->dev
, "usbh");
984 if (IS_ERR(udc
->usbh_clk
))
987 udc
->usbd_clk
= clk_get(udc
->dev
, "usbd");
988 if (IS_ERR(udc
->usbd_clk
)) {
989 clk_put(udc
->usbh_clk
);
993 set_clocks(udc
, true);
995 val
= USBD_CONTROL_AUTO_CSRS_MASK
|
996 USBD_CONTROL_DONE_CSRS_MASK
|
997 (irq_coalesce
? USBD_CONTROL_RXZSCFG_MASK
: 0);
998 usbd_writel(udc
, val
, USBD_CONTROL_REG
);
1000 val
= USBD_STRAPS_APP_SELF_PWR_MASK
|
1001 USBD_STRAPS_APP_RAM_IF_MASK
|
1002 USBD_STRAPS_APP_CSRPRGSUP_MASK
|
1003 USBD_STRAPS_APP_8BITPHY_MASK
|
1004 USBD_STRAPS_APP_RMTWKUP_MASK
;
1006 if (udc
->gadget
.max_speed
== USB_SPEED_HIGH
)
1007 val
|= (BCM63XX_SPD_HIGH
<< USBD_STRAPS_SPEED_SHIFT
);
1009 val
|= (BCM63XX_SPD_FULL
<< USBD_STRAPS_SPEED_SHIFT
);
1010 usbd_writel(udc
, val
, USBD_STRAPS_REG
);
1012 bcm63xx_set_ctrl_irqs(udc
, false);
1014 usbd_writel(udc
, 0, USBD_EVENT_IRQ_CFG_LO_REG
);
1016 val
= USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON
) |
1017 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS
);
1018 usbd_writel(udc
, val
, USBD_EVENT_IRQ_CFG_HI_REG
);
1020 rc
= iudma_init(udc
);
1021 set_clocks(udc
, false);
1023 bcm63xx_uninit_udc_hw(udc
);
1028 /***********************************************************************
1029 * Standard EP gadget operations
1030 ***********************************************************************/
1033 * bcm63xx_ep_enable - Enable one endpoint.
1034 * @ep: Endpoint to enable.
1035 * @desc: Contains max packet, direction, etc.
1037 * Most of the endpoint parameters are fixed in this controller, so there
1038 * isn't much for this function to do.
1040 static int bcm63xx_ep_enable(struct usb_ep
*ep
,
1041 const struct usb_endpoint_descriptor
*desc
)
1043 struct bcm63xx_ep
*bep
= our_ep(ep
);
1044 struct bcm63xx_udc
*udc
= bep
->udc
;
1045 struct iudma_ch
*iudma
= bep
->iudma
;
1046 unsigned long flags
;
1048 if (!ep
|| !desc
|| ep
->name
== bcm63xx_ep0name
)
1054 spin_lock_irqsave(&udc
->lock
, flags
);
1055 if (iudma
->enabled
) {
1056 spin_unlock_irqrestore(&udc
->lock
, flags
);
1060 iudma
->enabled
= true;
1061 BUG_ON(!list_empty(&bep
->queue
));
1063 iudma_reset_channel(udc
, iudma
);
1066 bcm63xx_set_stall(udc
, bep
, false);
1067 clear_bit(bep
->ep_num
, &udc
->wedgemap
);
1070 ep
->maxpacket
= usb_endpoint_maxp(desc
);
1072 spin_unlock_irqrestore(&udc
->lock
, flags
);
1077 * bcm63xx_ep_disable - Disable one endpoint.
1078 * @ep: Endpoint to disable.
1080 static int bcm63xx_ep_disable(struct usb_ep
*ep
)
1082 struct bcm63xx_ep
*bep
= our_ep(ep
);
1083 struct bcm63xx_udc
*udc
= bep
->udc
;
1084 struct iudma_ch
*iudma
= bep
->iudma
;
1085 struct bcm63xx_req
*breq
, *n
;
1086 unsigned long flags
;
1088 if (!ep
|| !ep
->desc
)
1091 spin_lock_irqsave(&udc
->lock
, flags
);
1092 if (!iudma
->enabled
) {
1093 spin_unlock_irqrestore(&udc
->lock
, flags
);
1096 iudma
->enabled
= false;
1098 iudma_reset_channel(udc
, iudma
);
1100 if (!list_empty(&bep
->queue
)) {
1101 list_for_each_entry_safe(breq
, n
, &bep
->queue
, queue
) {
1102 usb_gadget_unmap_request(&udc
->gadget
, &breq
->req
,
1104 list_del(&breq
->queue
);
1105 breq
->req
.status
= -ESHUTDOWN
;
1107 spin_unlock_irqrestore(&udc
->lock
, flags
);
1108 usb_gadget_giveback_request(&iudma
->bep
->ep
, &breq
->req
);
1109 spin_lock_irqsave(&udc
->lock
, flags
);
1114 spin_unlock_irqrestore(&udc
->lock
, flags
);
1119 * bcm63xx_udc_alloc_request - Allocate a new request.
1120 * @ep: Endpoint associated with the request.
1121 * @mem_flags: Flags to pass to kzalloc().
1123 static struct usb_request
*bcm63xx_udc_alloc_request(struct usb_ep
*ep
,
1126 struct bcm63xx_req
*breq
;
1128 breq
= kzalloc(sizeof(*breq
), mem_flags
);
1135 * bcm63xx_udc_free_request - Free a request.
1136 * @ep: Endpoint associated with the request.
1137 * @req: Request to free.
1139 static void bcm63xx_udc_free_request(struct usb_ep
*ep
,
1140 struct usb_request
*req
)
1142 struct bcm63xx_req
*breq
= our_req(req
);
1147 * bcm63xx_udc_queue - Queue up a new request.
1148 * @ep: Endpoint associated with the request.
1149 * @req: Request to add.
1150 * @mem_flags: Unused.
1152 * If the queue is empty, start this request immediately. Otherwise, add
1155 * ep0 replies are sent through this function from the gadget driver, but
1156 * they are treated differently because they need to be handled by the ep0
1157 * state machine. (Sometimes they are replies to control requests that
1158 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1160 static int bcm63xx_udc_queue(struct usb_ep
*ep
, struct usb_request
*req
,
1163 struct bcm63xx_ep
*bep
= our_ep(ep
);
1164 struct bcm63xx_udc
*udc
= bep
->udc
;
1165 struct bcm63xx_req
*breq
= our_req(req
);
1166 unsigned long flags
;
1169 if (unlikely(!req
|| !req
->complete
|| !req
->buf
|| !ep
))
1176 if (bep
== &udc
->bep
[0]) {
1177 /* only one reply per request, please */
1181 udc
->ep0_reply
= req
;
1182 schedule_work(&udc
->ep0_wq
);
1186 spin_lock_irqsave(&udc
->lock
, flags
);
1187 if (!bep
->iudma
->enabled
) {
1192 rc
= usb_gadget_map_request(&udc
->gadget
, req
, bep
->iudma
->is_tx
);
1194 list_add_tail(&breq
->queue
, &bep
->queue
);
1195 if (list_is_singular(&bep
->queue
))
1196 iudma_write(udc
, bep
->iudma
, breq
);
1200 spin_unlock_irqrestore(&udc
->lock
, flags
);
1205 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1206 * @ep: Endpoint associated with the request.
1207 * @req: Request to remove.
1209 * If the request is not at the head of the queue, this is easy - just nuke
1210 * it. If the request is at the head of the queue, we'll need to stop the
1211 * DMA transaction and then queue up the successor.
1213 static int bcm63xx_udc_dequeue(struct usb_ep
*ep
, struct usb_request
*req
)
1215 struct bcm63xx_ep
*bep
= our_ep(ep
);
1216 struct bcm63xx_udc
*udc
= bep
->udc
;
1217 struct bcm63xx_req
*breq
= our_req(req
), *cur
;
1218 unsigned long flags
;
1221 spin_lock_irqsave(&udc
->lock
, flags
);
1222 if (list_empty(&bep
->queue
)) {
1227 cur
= list_first_entry(&bep
->queue
, struct bcm63xx_req
, queue
);
1228 usb_gadget_unmap_request(&udc
->gadget
, &breq
->req
, bep
->iudma
->is_tx
);
1231 iudma_reset_channel(udc
, bep
->iudma
);
1232 list_del(&breq
->queue
);
1234 if (!list_empty(&bep
->queue
)) {
1235 struct bcm63xx_req
*next
;
1237 next
= list_first_entry(&bep
->queue
,
1238 struct bcm63xx_req
, queue
);
1239 iudma_write(udc
, bep
->iudma
, next
);
1242 list_del(&breq
->queue
);
1246 spin_unlock_irqrestore(&udc
->lock
, flags
);
1248 req
->status
= -ESHUTDOWN
;
1249 req
->complete(ep
, req
);
1255 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1256 * @ep: Endpoint to halt.
1257 * @value: Zero to clear halt; nonzero to set halt.
1259 * See comments in bcm63xx_update_wedge().
1261 static int bcm63xx_udc_set_halt(struct usb_ep
*ep
, int value
)
1263 struct bcm63xx_ep
*bep
= our_ep(ep
);
1264 struct bcm63xx_udc
*udc
= bep
->udc
;
1265 unsigned long flags
;
1267 spin_lock_irqsave(&udc
->lock
, flags
);
1268 bcm63xx_set_stall(udc
, bep
, !!value
);
1269 bep
->halted
= value
;
1270 spin_unlock_irqrestore(&udc
->lock
, flags
);
1276 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1277 * @ep: Endpoint to wedge.
1279 * See comments in bcm63xx_update_wedge().
1281 static int bcm63xx_udc_set_wedge(struct usb_ep
*ep
)
1283 struct bcm63xx_ep
*bep
= our_ep(ep
);
1284 struct bcm63xx_udc
*udc
= bep
->udc
;
1285 unsigned long flags
;
1287 spin_lock_irqsave(&udc
->lock
, flags
);
1288 set_bit(bep
->ep_num
, &udc
->wedgemap
);
1289 bcm63xx_set_stall(udc
, bep
, true);
1290 spin_unlock_irqrestore(&udc
->lock
, flags
);
1295 static const struct usb_ep_ops bcm63xx_udc_ep_ops
= {
1296 .enable
= bcm63xx_ep_enable
,
1297 .disable
= bcm63xx_ep_disable
,
1299 .alloc_request
= bcm63xx_udc_alloc_request
,
1300 .free_request
= bcm63xx_udc_free_request
,
1302 .queue
= bcm63xx_udc_queue
,
1303 .dequeue
= bcm63xx_udc_dequeue
,
1305 .set_halt
= bcm63xx_udc_set_halt
,
1306 .set_wedge
= bcm63xx_udc_set_wedge
,
1309 /***********************************************************************
1311 ***********************************************************************/
1314 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1315 * @udc: Reference to the device controller.
1316 * @ctrl: 8-byte SETUP request.
1318 static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc
*udc
,
1319 struct usb_ctrlrequest
*ctrl
)
1323 spin_unlock_irq(&udc
->lock
);
1324 rc
= udc
->driver
->setup(&udc
->gadget
, ctrl
);
1325 spin_lock_irq(&udc
->lock
);
1330 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1331 * @udc: Reference to the device controller.
1333 * Many standard requests are handled automatically in the hardware, but
1334 * we still need to pass them to the gadget driver so that it can
1335 * reconfigure the interfaces/endpoints if necessary.
1337 * Unfortunately we are not able to send a STALL response if the host
1338 * requests an invalid configuration. If this happens, we'll have to be
1339 * content with printing a warning.
1341 static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc
*udc
)
1343 struct usb_ctrlrequest ctrl
;
1346 ctrl
.bRequestType
= USB_DIR_OUT
| USB_RECIP_DEVICE
;
1347 ctrl
.bRequest
= USB_REQ_SET_CONFIGURATION
;
1348 ctrl
.wValue
= cpu_to_le16(udc
->cfg
);
1352 rc
= bcm63xx_ep0_setup_callback(udc
, &ctrl
);
1354 dev_warn_ratelimited(udc
->dev
,
1355 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1362 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1363 * @udc: Reference to the device controller.
1365 static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc
*udc
)
1367 struct usb_ctrlrequest ctrl
;
1370 ctrl
.bRequestType
= USB_DIR_OUT
| USB_RECIP_INTERFACE
;
1371 ctrl
.bRequest
= USB_REQ_SET_INTERFACE
;
1372 ctrl
.wValue
= cpu_to_le16(udc
->alt_iface
);
1373 ctrl
.wIndex
= cpu_to_le16(udc
->iface
);
1376 rc
= bcm63xx_ep0_setup_callback(udc
, &ctrl
);
1378 dev_warn_ratelimited(udc
->dev
,
1379 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1380 udc
->iface
, udc
->alt_iface
);
1386 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1387 * @udc: Reference to the device controller.
1388 * @ch_idx: IUDMA channel number.
1389 * @req: USB gadget layer representation of the request.
1391 static void bcm63xx_ep0_map_write(struct bcm63xx_udc
*udc
, int ch_idx
,
1392 struct usb_request
*req
)
1394 struct bcm63xx_req
*breq
= our_req(req
);
1395 struct iudma_ch
*iudma
= &udc
->iudma
[ch_idx
];
1397 BUG_ON(udc
->ep0_request
);
1398 udc
->ep0_request
= req
;
1402 usb_gadget_map_request(&udc
->gadget
, req
, iudma
->is_tx
);
1403 iudma_write(udc
, iudma
, breq
);
1407 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1408 * @udc: Reference to the device controller.
1409 * @req: USB gadget layer representation of the request.
1410 * @status: Status to return to the gadget driver.
1412 static void bcm63xx_ep0_complete(struct bcm63xx_udc
*udc
,
1413 struct usb_request
*req
, int status
)
1415 req
->status
= status
;
1418 if (req
->complete
) {
1419 spin_unlock_irq(&udc
->lock
);
1420 req
->complete(&udc
->bep
[0].ep
, req
);
1421 spin_lock_irq(&udc
->lock
);
1426 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1428 * @udc: Reference to the device controller.
1429 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1431 static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc
*udc
, int is_tx
)
1433 struct usb_request
*req
= udc
->ep0_reply
;
1435 udc
->ep0_reply
= NULL
;
1436 usb_gadget_unmap_request(&udc
->gadget
, req
, is_tx
);
1437 if (udc
->ep0_request
== req
) {
1438 udc
->ep0_req_completed
= 0;
1439 udc
->ep0_request
= NULL
;
1441 bcm63xx_ep0_complete(udc
, req
, -ESHUTDOWN
);
1445 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1447 * @udc: Reference to the device controller.
1449 static int bcm63xx_ep0_read_complete(struct bcm63xx_udc
*udc
)
1451 struct usb_request
*req
= udc
->ep0_request
;
1453 udc
->ep0_req_completed
= 0;
1454 udc
->ep0_request
= NULL
;
1460 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1461 * @udc: Reference to the device controller.
1462 * @ch_idx: IUDMA channel number.
1463 * @length: Number of bytes to TX/RX.
1465 * Used for simple transfers performed by the ep0 worker. This will always
1466 * use ep0_ctrl_req / ep0_ctrl_buf.
1468 static void bcm63xx_ep0_internal_request(struct bcm63xx_udc
*udc
, int ch_idx
,
1471 struct usb_request
*req
= &udc
->ep0_ctrl_req
.req
;
1473 req
->buf
= udc
->ep0_ctrl_buf
;
1474 req
->length
= length
;
1475 req
->complete
= NULL
;
1477 bcm63xx_ep0_map_write(udc
, ch_idx
, req
);
1481 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1482 * @udc: Reference to the device controller.
1484 * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
1485 * for the next packet. Anything else means the transaction requires multiple
1486 * stages of handling.
1488 static enum bcm63xx_ep0_state
bcm63xx_ep0_do_setup(struct bcm63xx_udc
*udc
)
1491 struct usb_ctrlrequest
*ctrl
= (void *)udc
->ep0_ctrl_buf
;
1493 rc
= bcm63xx_ep0_read_complete(udc
);
1496 dev_err(udc
->dev
, "missing SETUP packet\n");
1501 * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
1502 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1503 * just throw it away.
1508 /* Drop malformed SETUP packets */
1509 if (rc
!= sizeof(*ctrl
)) {
1510 dev_warn_ratelimited(udc
->dev
,
1511 "malformed SETUP packet (%d bytes)\n", rc
);
1515 /* Process new SETUP packet arriving on ep0 */
1516 rc
= bcm63xx_ep0_setup_callback(udc
, ctrl
);
1518 bcm63xx_set_stall(udc
, &udc
->bep
[0], true);
1524 else if (ctrl
->bRequestType
& USB_DIR_IN
)
1525 return EP0_IN_DATA_PHASE_SETUP
;
1527 return EP0_OUT_DATA_PHASE_SETUP
;
1531 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1532 * @udc: Reference to the device controller.
1534 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1535 * filled with a SETUP packet from the host. This function handles new
1536 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1537 * and reset/shutdown events.
1539 * Returns 0 if work was done; -EAGAIN if nothing to do.
1541 static int bcm63xx_ep0_do_idle(struct bcm63xx_udc
*udc
)
1543 if (udc
->ep0_req_reset
) {
1544 udc
->ep0_req_reset
= 0;
1545 } else if (udc
->ep0_req_set_cfg
) {
1546 udc
->ep0_req_set_cfg
= 0;
1547 if (bcm63xx_ep0_spoof_set_cfg(udc
) >= 0)
1548 udc
->ep0state
= EP0_IN_FAKE_STATUS_PHASE
;
1549 } else if (udc
->ep0_req_set_iface
) {
1550 udc
->ep0_req_set_iface
= 0;
1551 if (bcm63xx_ep0_spoof_set_iface(udc
) >= 0)
1552 udc
->ep0state
= EP0_IN_FAKE_STATUS_PHASE
;
1553 } else if (udc
->ep0_req_completed
) {
1554 udc
->ep0state
= bcm63xx_ep0_do_setup(udc
);
1555 return udc
->ep0state
== EP0_IDLE
? -EAGAIN
: 0;
1556 } else if (udc
->ep0_req_shutdown
) {
1557 udc
->ep0_req_shutdown
= 0;
1558 udc
->ep0_req_completed
= 0;
1559 udc
->ep0_request
= NULL
;
1560 iudma_reset_channel(udc
, &udc
->iudma
[IUDMA_EP0_RXCHAN
]);
1561 usb_gadget_unmap_request(&udc
->gadget
,
1562 &udc
->ep0_ctrl_req
.req
, 0);
1564 /* bcm63xx_udc_pullup() is waiting for this */
1566 udc
->ep0state
= EP0_SHUTDOWN
;
1567 } else if (udc
->ep0_reply
) {
1569 * This could happen if a USB RESET shows up during an ep0
1570 * transaction (especially if a laggy driver like gadgetfs
1573 dev_warn(udc
->dev
, "nuking unexpected reply\n");
1574 bcm63xx_ep0_nuke_reply(udc
, 0);
1583 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1584 * @udc: Reference to the device controller.
1586 * Returns 0 if work was done; -EAGAIN if nothing to do.
1588 static int bcm63xx_ep0_one_round(struct bcm63xx_udc
*udc
)
1590 enum bcm63xx_ep0_state ep0state
= udc
->ep0state
;
1591 bool shutdown
= udc
->ep0_req_reset
|| udc
->ep0_req_shutdown
;
1593 switch (udc
->ep0state
) {
1595 /* set up descriptor to receive SETUP packet */
1596 bcm63xx_ep0_internal_request(udc
, IUDMA_EP0_RXCHAN
,
1597 BCM63XX_MAX_CTRL_PKT
);
1598 ep0state
= EP0_IDLE
;
1601 return bcm63xx_ep0_do_idle(udc
);
1602 case EP0_IN_DATA_PHASE_SETUP
:
1604 * Normal case: TX request is in ep0_reply (queued by the
1605 * callback), or will be queued shortly. When it's here,
1606 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1608 * Shutdown case: Stop waiting for the reply. Just
1609 * REQUEUE->IDLE. The gadget driver is NOT expected to
1610 * queue anything else now.
1612 if (udc
->ep0_reply
) {
1613 bcm63xx_ep0_map_write(udc
, IUDMA_EP0_TXCHAN
,
1615 ep0state
= EP0_IN_DATA_PHASE_COMPLETE
;
1616 } else if (shutdown
) {
1617 ep0state
= EP0_REQUEUE
;
1620 case EP0_IN_DATA_PHASE_COMPLETE
: {
1622 * Normal case: TX packet (ep0_reply) is in flight; wait for
1623 * it to finish, then go back to REQUEUE->IDLE.
1625 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1626 * completion to the gadget driver, then REQUEUE->IDLE.
1628 if (udc
->ep0_req_completed
) {
1629 udc
->ep0_reply
= NULL
;
1630 bcm63xx_ep0_read_complete(udc
);
1632 * the "ack" sometimes gets eaten (see
1633 * bcm63xx_ep0_do_idle)
1635 ep0state
= EP0_REQUEUE
;
1636 } else if (shutdown
) {
1637 iudma_reset_channel(udc
, &udc
->iudma
[IUDMA_EP0_TXCHAN
]);
1638 bcm63xx_ep0_nuke_reply(udc
, 1);
1639 ep0state
= EP0_REQUEUE
;
1643 case EP0_OUT_DATA_PHASE_SETUP
:
1644 /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1645 if (udc
->ep0_reply
) {
1646 bcm63xx_ep0_map_write(udc
, IUDMA_EP0_RXCHAN
,
1648 ep0state
= EP0_OUT_DATA_PHASE_COMPLETE
;
1649 } else if (shutdown
) {
1650 ep0state
= EP0_REQUEUE
;
1653 case EP0_OUT_DATA_PHASE_COMPLETE
: {
1654 /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1655 if (udc
->ep0_req_completed
) {
1656 udc
->ep0_reply
= NULL
;
1657 bcm63xx_ep0_read_complete(udc
);
1659 /* send 0-byte ack to host */
1660 bcm63xx_ep0_internal_request(udc
, IUDMA_EP0_TXCHAN
, 0);
1661 ep0state
= EP0_OUT_STATUS_PHASE
;
1662 } else if (shutdown
) {
1663 iudma_reset_channel(udc
, &udc
->iudma
[IUDMA_EP0_RXCHAN
]);
1664 bcm63xx_ep0_nuke_reply(udc
, 0);
1665 ep0state
= EP0_REQUEUE
;
1669 case EP0_OUT_STATUS_PHASE
:
1671 * Normal case: 0-byte OUT ack packet is in flight; wait
1672 * for it to finish, then go back to REQUEUE->IDLE.
1674 * Shutdown case: just cancel the transmission. Don't bother
1675 * calling the completion, because it originated from this
1676 * function anyway. Then go back to REQUEUE->IDLE.
1678 if (udc
->ep0_req_completed
) {
1679 bcm63xx_ep0_read_complete(udc
);
1680 ep0state
= EP0_REQUEUE
;
1681 } else if (shutdown
) {
1682 iudma_reset_channel(udc
, &udc
->iudma
[IUDMA_EP0_TXCHAN
]);
1683 udc
->ep0_request
= NULL
;
1684 ep0state
= EP0_REQUEUE
;
1687 case EP0_IN_FAKE_STATUS_PHASE
: {
1689 * Normal case: we spoofed a SETUP packet and are now
1690 * waiting for the gadget driver to send a 0-byte reply.
1691 * This doesn't actually get sent to the HW because the
1692 * HW has already sent its own reply. Once we get the
1693 * response, return to IDLE.
1695 * Shutdown case: return to IDLE immediately.
1697 * Note that the ep0 RX descriptor has remained queued
1698 * (and possibly unfilled) during this entire transaction.
1699 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1700 * or SET_INTERFACE transactions.
1702 struct usb_request
*r
= udc
->ep0_reply
;
1706 ep0state
= EP0_IDLE
;
1710 bcm63xx_ep0_complete(udc
, r
, 0);
1711 udc
->ep0_reply
= NULL
;
1712 ep0state
= EP0_IDLE
;
1719 if (udc
->ep0state
== ep0state
)
1722 udc
->ep0state
= ep0state
;
1727 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1728 * @w: Workqueue struct.
1730 * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
1731 * is used to synchronize ep0 events and ensure that both HW and SW events
1732 * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
1733 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1734 * by the USBD hardware.
1736 * The worker function will continue iterating around the state machine
1737 * until there is nothing left to do. Usually "nothing left to do" means
1738 * that we're waiting for a new event from the hardware.
1740 static void bcm63xx_ep0_process(struct work_struct
*w
)
1742 struct bcm63xx_udc
*udc
= container_of(w
, struct bcm63xx_udc
, ep0_wq
);
1743 spin_lock_irq(&udc
->lock
);
1744 while (bcm63xx_ep0_one_round(udc
) == 0)
1746 spin_unlock_irq(&udc
->lock
);
1749 /***********************************************************************
1750 * Standard UDC gadget operations
1751 ***********************************************************************/
1754 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1755 * @gadget: USB slave device.
1757 static int bcm63xx_udc_get_frame(struct usb_gadget
*gadget
)
1759 struct bcm63xx_udc
*udc
= gadget_to_udc(gadget
);
1761 return (usbd_readl(udc
, USBD_STATUS_REG
) &
1762 USBD_STATUS_SOF_MASK
) >> USBD_STATUS_SOF_SHIFT
;
1766 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1767 * @gadget: USB slave device.
1768 * @is_on: 0 to disable pullup, 1 to enable.
1770 * See notes in bcm63xx_select_pullup().
1772 static int bcm63xx_udc_pullup(struct usb_gadget
*gadget
, int is_on
)
1774 struct bcm63xx_udc
*udc
= gadget_to_udc(gadget
);
1775 unsigned long flags
;
1776 int i
, rc
= -EINVAL
;
1778 spin_lock_irqsave(&udc
->lock
, flags
);
1779 if (is_on
&& udc
->ep0state
== EP0_SHUTDOWN
) {
1780 udc
->gadget
.speed
= USB_SPEED_UNKNOWN
;
1781 udc
->ep0state
= EP0_REQUEUE
;
1782 bcm63xx_fifo_setup(udc
);
1783 bcm63xx_fifo_reset(udc
);
1784 bcm63xx_ep_setup(udc
);
1786 bitmap_zero(&udc
->wedgemap
, BCM63XX_NUM_EP
);
1787 for (i
= 0; i
< BCM63XX_NUM_EP
; i
++)
1788 bcm63xx_set_stall(udc
, &udc
->bep
[i
], false);
1790 bcm63xx_set_ctrl_irqs(udc
, true);
1791 bcm63xx_select_pullup(gadget_to_udc(gadget
), true);
1793 } else if (!is_on
&& udc
->ep0state
!= EP0_SHUTDOWN
) {
1794 bcm63xx_select_pullup(gadget_to_udc(gadget
), false);
1796 udc
->ep0_req_shutdown
= 1;
1797 spin_unlock_irqrestore(&udc
->lock
, flags
);
1800 schedule_work(&udc
->ep0_wq
);
1801 if (udc
->ep0state
== EP0_SHUTDOWN
)
1805 bcm63xx_set_ctrl_irqs(udc
, false);
1806 cancel_work_sync(&udc
->ep0_wq
);
1810 spin_unlock_irqrestore(&udc
->lock
, flags
);
1815 * bcm63xx_udc_start - Start the controller.
1816 * @gadget: USB slave device.
1817 * @driver: Driver for USB slave devices.
1819 static int bcm63xx_udc_start(struct usb_gadget
*gadget
,
1820 struct usb_gadget_driver
*driver
)
1822 struct bcm63xx_udc
*udc
= gadget_to_udc(gadget
);
1823 unsigned long flags
;
1825 if (!driver
|| driver
->max_speed
< USB_SPEED_HIGH
||
1833 spin_lock_irqsave(&udc
->lock
, flags
);
1835 set_clocks(udc
, true);
1836 bcm63xx_fifo_setup(udc
);
1837 bcm63xx_ep_init(udc
);
1838 bcm63xx_ep_setup(udc
);
1839 bcm63xx_fifo_reset(udc
);
1840 bcm63xx_select_phy_mode(udc
, true);
1842 udc
->driver
= driver
;
1843 driver
->driver
.bus
= NULL
;
1844 udc
->gadget
.dev
.of_node
= udc
->dev
->of_node
;
1846 spin_unlock_irqrestore(&udc
->lock
, flags
);
1852 * bcm63xx_udc_stop - Shut down the controller.
1853 * @gadget: USB slave device.
1854 * @driver: Driver for USB slave devices.
1856 static int bcm63xx_udc_stop(struct usb_gadget
*gadget
)
1858 struct bcm63xx_udc
*udc
= gadget_to_udc(gadget
);
1859 unsigned long flags
;
1861 spin_lock_irqsave(&udc
->lock
, flags
);
1866 * If we switch the PHY too abruptly after dropping D+, the host
1867 * will often complain:
1869 * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1873 bcm63xx_select_phy_mode(udc
, false);
1874 set_clocks(udc
, false);
1876 spin_unlock_irqrestore(&udc
->lock
, flags
);
1881 static const struct usb_gadget_ops bcm63xx_udc_ops
= {
1882 .get_frame
= bcm63xx_udc_get_frame
,
1883 .pullup
= bcm63xx_udc_pullup
,
1884 .udc_start
= bcm63xx_udc_start
,
1885 .udc_stop
= bcm63xx_udc_stop
,
1888 /***********************************************************************
1890 ***********************************************************************/
1893 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1894 * @udc: Reference to the device controller.
1896 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1897 * The driver never sees the raw control packets coming in on the ep0
1898 * IUDMA channel, but at least we get an interrupt event to tell us that
1899 * new values are waiting in the USBD_STATUS register.
1901 static void bcm63xx_update_cfg_iface(struct bcm63xx_udc
*udc
)
1903 u32 reg
= usbd_readl(udc
, USBD_STATUS_REG
);
1905 udc
->cfg
= (reg
& USBD_STATUS_CFG_MASK
) >> USBD_STATUS_CFG_SHIFT
;
1906 udc
->iface
= (reg
& USBD_STATUS_INTF_MASK
) >> USBD_STATUS_INTF_SHIFT
;
1907 udc
->alt_iface
= (reg
& USBD_STATUS_ALTINTF_MASK
) >>
1908 USBD_STATUS_ALTINTF_SHIFT
;
1909 bcm63xx_ep_setup(udc
);
1913 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1914 * @udc: Reference to the device controller.
1916 * The link speed update coincides with a SETUP IRQ. Returns 1 if the
1917 * speed has changed, so that the caller can update the endpoint settings.
1919 static int bcm63xx_update_link_speed(struct bcm63xx_udc
*udc
)
1921 u32 reg
= usbd_readl(udc
, USBD_STATUS_REG
);
1922 enum usb_device_speed oldspeed
= udc
->gadget
.speed
;
1924 switch ((reg
& USBD_STATUS_SPD_MASK
) >> USBD_STATUS_SPD_SHIFT
) {
1925 case BCM63XX_SPD_HIGH
:
1926 udc
->gadget
.speed
= USB_SPEED_HIGH
;
1928 case BCM63XX_SPD_FULL
:
1929 udc
->gadget
.speed
= USB_SPEED_FULL
;
1932 /* this should never happen */
1933 udc
->gadget
.speed
= USB_SPEED_UNKNOWN
;
1935 "received SETUP packet with invalid link speed\n");
1939 if (udc
->gadget
.speed
!= oldspeed
) {
1940 dev_info(udc
->dev
, "link up, %s-speed mode\n",
1941 udc
->gadget
.speed
== USB_SPEED_HIGH
? "high" : "full");
1949 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1950 * @udc: Reference to the device controller.
1951 * @new_status: true to "refresh" wedge status; false to clear it.
1953 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1954 * because the controller hardware is designed to automatically clear
1955 * stalls in response to a CLEAR_FEATURE request from the host.
1957 * On a RESET interrupt, we do want to restore all wedged endpoints.
1959 static void bcm63xx_update_wedge(struct bcm63xx_udc
*udc
, bool new_status
)
1963 for_each_set_bit(i
, &udc
->wedgemap
, BCM63XX_NUM_EP
) {
1964 bcm63xx_set_stall(udc
, &udc
->bep
[i
], new_status
);
1966 clear_bit(i
, &udc
->wedgemap
);
1971 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1972 * @irq: IRQ number (unused).
1973 * @dev_id: Reference to the device controller.
1975 * This is where we handle link (VBUS) down, USB reset, speed changes,
1976 * SET_CONFIGURATION, and SET_INTERFACE events.
1978 static irqreturn_t
bcm63xx_udc_ctrl_isr(int irq
, void *dev_id
)
1980 struct bcm63xx_udc
*udc
= dev_id
;
1982 bool disconnected
= false, bus_reset
= false;
1984 stat
= usbd_readl(udc
, USBD_EVENT_IRQ_STATUS_REG
) &
1985 usbd_readl(udc
, USBD_EVENT_IRQ_MASK_REG
);
1987 usbd_writel(udc
, stat
, USBD_EVENT_IRQ_STATUS_REG
);
1989 spin_lock(&udc
->lock
);
1990 if (stat
& BIT(USBD_EVENT_IRQ_USB_LINK
)) {
1993 if (!(usbd_readl(udc
, USBD_EVENTS_REG
) &
1994 USBD_EVENTS_USB_LINK_MASK
) &&
1995 udc
->gadget
.speed
!= USB_SPEED_UNKNOWN
)
1996 dev_info(udc
->dev
, "link down\n");
1998 udc
->gadget
.speed
= USB_SPEED_UNKNOWN
;
1999 disconnected
= true;
2001 if (stat
& BIT(USBD_EVENT_IRQ_USB_RESET
)) {
2002 bcm63xx_fifo_setup(udc
);
2003 bcm63xx_fifo_reset(udc
);
2004 bcm63xx_ep_setup(udc
);
2006 bcm63xx_update_wedge(udc
, false);
2008 udc
->ep0_req_reset
= 1;
2009 schedule_work(&udc
->ep0_wq
);
2012 if (stat
& BIT(USBD_EVENT_IRQ_SETUP
)) {
2013 if (bcm63xx_update_link_speed(udc
)) {
2014 bcm63xx_fifo_setup(udc
);
2015 bcm63xx_ep_setup(udc
);
2017 bcm63xx_update_wedge(udc
, true);
2019 if (stat
& BIT(USBD_EVENT_IRQ_SETCFG
)) {
2020 bcm63xx_update_cfg_iface(udc
);
2021 udc
->ep0_req_set_cfg
= 1;
2022 schedule_work(&udc
->ep0_wq
);
2024 if (stat
& BIT(USBD_EVENT_IRQ_SETINTF
)) {
2025 bcm63xx_update_cfg_iface(udc
);
2026 udc
->ep0_req_set_iface
= 1;
2027 schedule_work(&udc
->ep0_wq
);
2029 spin_unlock(&udc
->lock
);
2031 if (disconnected
&& udc
->driver
)
2032 udc
->driver
->disconnect(&udc
->gadget
);
2033 else if (bus_reset
&& udc
->driver
)
2034 usb_gadget_udc_reset(&udc
->gadget
, udc
->driver
);
2040 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2041 * @irq: IRQ number (unused).
2042 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2044 * For the two ep0 channels, we have special handling that triggers the
2045 * ep0 worker thread. For normal bulk/intr channels, either queue up
2046 * the next buffer descriptor for the transaction (incomplete transaction),
2047 * or invoke the completion callback (complete transactions).
2049 static irqreturn_t
bcm63xx_udc_data_isr(int irq
, void *dev_id
)
2051 struct iudma_ch
*iudma
= dev_id
;
2052 struct bcm63xx_udc
*udc
= iudma
->udc
;
2053 struct bcm63xx_ep
*bep
;
2054 struct usb_request
*req
= NULL
;
2055 struct bcm63xx_req
*breq
= NULL
;
2057 bool is_done
= false;
2059 spin_lock(&udc
->lock
);
2061 usb_dmac_writel(udc
, ENETDMAC_IR_BUFDONE_MASK
,
2062 ENETDMAC_IR_REG
, iudma
->ch_idx
);
2064 rc
= iudma_read(udc
, iudma
);
2066 /* special handling for EP0 RX (0) and TX (1) */
2067 if (iudma
->ch_idx
== IUDMA_EP0_RXCHAN
||
2068 iudma
->ch_idx
== IUDMA_EP0_TXCHAN
) {
2069 req
= udc
->ep0_request
;
2070 breq
= our_req(req
);
2072 /* a single request could require multiple submissions */
2076 if (req
->actual
>= req
->length
|| breq
->bd_bytes
> rc
) {
2077 udc
->ep0_req_completed
= 1;
2079 schedule_work(&udc
->ep0_wq
);
2081 /* "actual" on a ZLP is 1 byte */
2082 req
->actual
= min(req
->actual
, req
->length
);
2084 /* queue up the next BD (same request) */
2085 iudma_write(udc
, iudma
, breq
);
2088 } else if (!list_empty(&bep
->queue
)) {
2089 breq
= list_first_entry(&bep
->queue
, struct bcm63xx_req
, queue
);
2095 if (req
->actual
>= req
->length
|| breq
->bd_bytes
> rc
) {
2097 list_del(&breq
->queue
);
2099 req
->actual
= min(req
->actual
, req
->length
);
2101 if (!list_empty(&bep
->queue
)) {
2102 struct bcm63xx_req
*next
;
2104 next
= list_first_entry(&bep
->queue
,
2105 struct bcm63xx_req
, queue
);
2106 iudma_write(udc
, iudma
, next
);
2109 iudma_write(udc
, iudma
, breq
);
2113 spin_unlock(&udc
->lock
);
2116 usb_gadget_unmap_request(&udc
->gadget
, req
, iudma
->is_tx
);
2118 req
->complete(&bep
->ep
, req
);
2124 /***********************************************************************
2126 ***********************************************************************/
2129 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2130 * @s: seq_file to which the information will be written.
2133 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2135 static int bcm63xx_usbd_dbg_show(struct seq_file
*s
, void *p
)
2137 struct bcm63xx_udc
*udc
= s
->private;
2142 seq_printf(s
, "ep0 state: %s\n",
2143 bcm63xx_ep0_state_names
[udc
->ep0state
]);
2144 seq_printf(s
, " pending requests: %s%s%s%s%s%s%s\n",
2145 udc
->ep0_req_reset
? "reset " : "",
2146 udc
->ep0_req_set_cfg
? "set_cfg " : "",
2147 udc
->ep0_req_set_iface
? "set_iface " : "",
2148 udc
->ep0_req_shutdown
? "shutdown " : "",
2149 udc
->ep0_request
? "pending " : "",
2150 udc
->ep0_req_completed
? "completed " : "",
2151 udc
->ep0_reply
? "reply " : "");
2152 seq_printf(s
, "cfg: %d; iface: %d; alt_iface: %d\n",
2153 udc
->cfg
, udc
->iface
, udc
->alt_iface
);
2154 seq_printf(s
, "regs:\n");
2155 seq_printf(s
, " control: %08x; straps: %08x; status: %08x\n",
2156 usbd_readl(udc
, USBD_CONTROL_REG
),
2157 usbd_readl(udc
, USBD_STRAPS_REG
),
2158 usbd_readl(udc
, USBD_STATUS_REG
));
2159 seq_printf(s
, " events: %08x; stall: %08x\n",
2160 usbd_readl(udc
, USBD_EVENTS_REG
),
2161 usbd_readl(udc
, USBD_STALL_REG
));
2167 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2168 * @s: seq_file to which the information will be written.
2171 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2173 static int bcm63xx_iudma_dbg_show(struct seq_file
*s
, void *p
)
2175 struct bcm63xx_udc
*udc
= s
->private;
2182 for (ch_idx
= 0; ch_idx
< BCM63XX_NUM_IUDMA
; ch_idx
++) {
2183 struct iudma_ch
*iudma
= &udc
->iudma
[ch_idx
];
2184 struct list_head
*pos
;
2186 seq_printf(s
, "IUDMA channel %d -- ", ch_idx
);
2187 switch (iudma_defaults
[ch_idx
].ep_type
) {
2189 seq_printf(s
, "control");
2192 seq_printf(s
, "bulk");
2195 seq_printf(s
, "interrupt");
2198 seq_printf(s
, ch_idx
& 0x01 ? " tx" : " rx");
2199 seq_printf(s
, " [ep%d]:\n",
2200 max_t(int, iudma_defaults
[ch_idx
].ep_num
, 0));
2201 seq_printf(s
, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2202 usb_dmac_readl(udc
, ENETDMAC_CHANCFG_REG
, ch_idx
),
2203 usb_dmac_readl(udc
, ENETDMAC_IR_REG
, ch_idx
),
2204 usb_dmac_readl(udc
, ENETDMAC_IRMASK_REG
, ch_idx
),
2205 usb_dmac_readl(udc
, ENETDMAC_MAXBURST_REG
, ch_idx
));
2207 sram2
= usb_dmas_readl(udc
, ENETDMAS_SRAM2_REG
, ch_idx
);
2208 sram3
= usb_dmas_readl(udc
, ENETDMAS_SRAM3_REG
, ch_idx
);
2209 seq_printf(s
, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2210 usb_dmas_readl(udc
, ENETDMAS_RSTART_REG
, ch_idx
),
2211 sram2
>> 16, sram2
& 0xffff,
2212 sram3
>> 16, sram3
& 0xffff,
2213 usb_dmas_readl(udc
, ENETDMAS_SRAM4_REG
, ch_idx
));
2214 seq_printf(s
, " desc: %d/%d used", iudma
->n_bds_used
,
2219 list_for_each(pos
, &iudma
->bep
->queue
)
2221 seq_printf(s
, "; %d queued\n", i
);
2223 seq_printf(s
, "\n");
2226 for (i
= 0; i
< iudma
->n_bds
; i
++) {
2227 struct bcm_enet_desc
*d
= &iudma
->bd_ring
[i
];
2229 seq_printf(s
, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2231 d
->len_stat
>> 16, d
->len_stat
& 0xffff,
2233 if (d
== iudma
->read_bd
)
2234 seq_printf(s
, " <<RD");
2235 if (d
== iudma
->write_bd
)
2236 seq_printf(s
, " <<WR");
2237 seq_printf(s
, "\n");
2240 seq_printf(s
, "\n");
2246 static int bcm63xx_usbd_dbg_open(struct inode
*inode
, struct file
*file
)
2248 return single_open(file
, bcm63xx_usbd_dbg_show
, inode
->i_private
);
2251 static int bcm63xx_iudma_dbg_open(struct inode
*inode
, struct file
*file
)
2253 return single_open(file
, bcm63xx_iudma_dbg_show
, inode
->i_private
);
2256 static const struct file_operations usbd_dbg_fops
= {
2257 .owner
= THIS_MODULE
,
2258 .open
= bcm63xx_usbd_dbg_open
,
2259 .llseek
= seq_lseek
,
2261 .release
= single_release
,
2264 static const struct file_operations iudma_dbg_fops
= {
2265 .owner
= THIS_MODULE
,
2266 .open
= bcm63xx_iudma_dbg_open
,
2267 .llseek
= seq_lseek
,
2269 .release
= single_release
,
2274 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2275 * @udc: Reference to the device controller.
2277 static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc
*udc
)
2279 struct dentry
*root
, *usbd
, *iudma
;
2281 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS
))
2284 root
= debugfs_create_dir(udc
->gadget
.name
, NULL
);
2285 if (IS_ERR(root
) || !root
)
2288 usbd
= debugfs_create_file("usbd", 0400, root
, udc
,
2292 iudma
= debugfs_create_file("iudma", 0400, root
, udc
,
2297 udc
->debugfs_root
= root
;
2298 udc
->debugfs_usbd
= usbd
;
2299 udc
->debugfs_iudma
= iudma
;
2302 debugfs_remove(usbd
);
2304 debugfs_remove(root
);
2306 dev_err(udc
->dev
, "debugfs is not available\n");
2310 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2311 * @udc: Reference to the device controller.
2313 * debugfs_remove() is safe to call with a NULL argument.
2315 static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc
*udc
)
2317 debugfs_remove(udc
->debugfs_iudma
);
2318 debugfs_remove(udc
->debugfs_usbd
);
2319 debugfs_remove(udc
->debugfs_root
);
2320 udc
->debugfs_iudma
= NULL
;
2321 udc
->debugfs_usbd
= NULL
;
2322 udc
->debugfs_root
= NULL
;
2325 /***********************************************************************
2327 ***********************************************************************/
2330 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2331 * @pdev: Platform device struct from the bcm63xx BSP code.
2333 * Note that platform data is required, because pd.port_no varies from chip
2334 * to chip and is used to switch the correct USB port to device mode.
2336 static int bcm63xx_udc_probe(struct platform_device
*pdev
)
2338 struct device
*dev
= &pdev
->dev
;
2339 struct bcm63xx_usbd_platform_data
*pd
= dev_get_platdata(dev
);
2340 struct bcm63xx_udc
*udc
;
2341 struct resource
*res
;
2342 int rc
= -ENOMEM
, i
, irq
;
2344 udc
= devm_kzalloc(dev
, sizeof(*udc
), GFP_KERNEL
);
2348 platform_set_drvdata(pdev
, udc
);
2353 dev_err(dev
, "missing platform data\n");
2357 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2358 udc
->usbd_regs
= devm_ioremap_resource(dev
, res
);
2359 if (IS_ERR(udc
->usbd_regs
))
2360 return PTR_ERR(udc
->usbd_regs
);
2362 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2363 udc
->iudma_regs
= devm_ioremap_resource(dev
, res
);
2364 if (IS_ERR(udc
->iudma_regs
))
2365 return PTR_ERR(udc
->iudma_regs
);
2367 spin_lock_init(&udc
->lock
);
2368 INIT_WORK(&udc
->ep0_wq
, bcm63xx_ep0_process
);
2370 udc
->gadget
.ops
= &bcm63xx_udc_ops
;
2371 udc
->gadget
.name
= dev_name(dev
);
2373 if (!pd
->use_fullspeed
&& !use_fullspeed
)
2374 udc
->gadget
.max_speed
= USB_SPEED_HIGH
;
2376 udc
->gadget
.max_speed
= USB_SPEED_FULL
;
2378 /* request clocks, allocate buffers, and clear any pending IRQs */
2379 rc
= bcm63xx_init_udc_hw(udc
);
2385 /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2386 irq
= platform_get_irq(pdev
, 0);
2388 dev_err(dev
, "missing IRQ resource #0\n");
2391 if (devm_request_irq(dev
, irq
, &bcm63xx_udc_ctrl_isr
, 0,
2392 dev_name(dev
), udc
) < 0) {
2393 dev_err(dev
, "error requesting IRQ #%d\n", irq
);
2397 /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2398 for (i
= 0; i
< BCM63XX_NUM_IUDMA
; i
++) {
2399 irq
= platform_get_irq(pdev
, i
+ 1);
2401 dev_err(dev
, "missing IRQ resource #%d\n", i
+ 1);
2404 if (devm_request_irq(dev
, irq
, &bcm63xx_udc_data_isr
, 0,
2405 dev_name(dev
), &udc
->iudma
[i
]) < 0) {
2406 dev_err(dev
, "error requesting IRQ #%d\n", irq
);
2411 bcm63xx_udc_init_debugfs(udc
);
2412 rc
= usb_add_gadget_udc(dev
, &udc
->gadget
);
2416 bcm63xx_udc_cleanup_debugfs(udc
);
2418 bcm63xx_uninit_udc_hw(udc
);
2423 * bcm63xx_udc_remove - Remove the device from the system.
2424 * @pdev: Platform device struct from the bcm63xx BSP code.
2426 static int bcm63xx_udc_remove(struct platform_device
*pdev
)
2428 struct bcm63xx_udc
*udc
= platform_get_drvdata(pdev
);
2430 bcm63xx_udc_cleanup_debugfs(udc
);
2431 usb_del_gadget_udc(&udc
->gadget
);
2432 BUG_ON(udc
->driver
);
2434 bcm63xx_uninit_udc_hw(udc
);
2439 static struct platform_driver bcm63xx_udc_driver
= {
2440 .probe
= bcm63xx_udc_probe
,
2441 .remove
= bcm63xx_udc_remove
,
2443 .name
= DRV_MODULE_NAME
,
2446 module_platform_driver(bcm63xx_udc_driver
);
2448 MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2449 MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2450 MODULE_LICENSE("GPL");
2451 MODULE_ALIAS("platform:" DRV_MODULE_NAME
);