1 // SPDX-License-Identifier: GPL-2.0+
3 * NVIDIA Tegra XUSB device mode controller
5 * Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved.
6 * Copyright (c) 2015, Google Inc.
10 #include <linux/completion.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
19 #include <linux/of_device.h>
20 #include <linux/phy/phy.h>
21 #include <linux/phy/tegra/xusb.h>
22 #include <linux/pm_domain.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/reset.h>
27 #include <linux/usb/ch9.h>
28 #include <linux/usb/gadget.h>
29 #include <linux/usb/otg.h>
30 #include <linux/usb/role.h>
31 #include <linux/usb/phy.h>
32 #include <linux/workqueue.h>
34 /* XUSB_DEV registers */
36 #define SPARAM_ERSTMAX_MASK GENMASK(20, 16)
37 #define SPARAM_ERSTMAX(x) (((x) << 16) & SPARAM_ERSTMAX_MASK)
39 #define DB_TARGET_MASK GENMASK(15, 8)
40 #define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK)
41 #define DB_STREAMID_MASK GENMASK(31, 16)
42 #define DB_STREAMID(x) (((x) << 16) & DB_STREAMID_MASK)
44 #define ERSTSZ_ERSTXSZ_SHIFT(x) ((x) * 16)
45 #define ERSTSZ_ERSTXSZ_MASK GENMASK(15, 0)
46 #define ERSTXBALO(x) (0x010 + 8 * (x))
47 #define ERSTXBAHI(x) (0x014 + 8 * (x))
49 #define ERDPLO_EHB BIT(3)
52 #define EREPLO_ECS BIT(0)
53 #define EREPLO_SEGI BIT(1)
56 #define CTRL_RUN BIT(0)
57 #define CTRL_LSE BIT(1)
58 #define CTRL_IE BIT(4)
59 #define CTRL_SMI_EVT BIT(5)
60 #define CTRL_SMI_DSE BIT(6)
61 #define CTRL_EWE BIT(7)
62 #define CTRL_DEVADDR_MASK GENMASK(30, 24)
63 #define CTRL_DEVADDR(x) (((x) << 24) & CTRL_DEVADDR_MASK)
64 #define CTRL_ENABLE BIT(31)
69 #define RT_IMOD_IMODI_MASK GENMASK(15, 0)
70 #define RT_IMOD_IMODI(x) ((x) & RT_IMOD_IMODI_MASK)
71 #define RT_IMOD_IMODC_MASK GENMASK(31, 16)
72 #define RT_IMOD_IMODC(x) (((x) << 16) & RT_IMOD_IMODC_MASK)
74 #define PORTSC_CCS BIT(0)
75 #define PORTSC_PED BIT(1)
76 #define PORTSC_PR BIT(4)
77 #define PORTSC_PLS_SHIFT 5
78 #define PORTSC_PLS_MASK GENMASK(8, 5)
79 #define PORTSC_PLS_U0 0x0
80 #define PORTSC_PLS_U2 0x2
81 #define PORTSC_PLS_U3 0x3
82 #define PORTSC_PLS_DISABLED 0x4
83 #define PORTSC_PLS_RXDETECT 0x5
84 #define PORTSC_PLS_INACTIVE 0x6
85 #define PORTSC_PLS_RESUME 0xf
86 #define PORTSC_PLS(x) (((x) << PORTSC_PLS_SHIFT) & PORTSC_PLS_MASK)
87 #define PORTSC_PS_SHIFT 10
88 #define PORTSC_PS_MASK GENMASK(13, 10)
89 #define PORTSC_PS_UNDEFINED 0x0
90 #define PORTSC_PS_FS 0x1
91 #define PORTSC_PS_LS 0x2
92 #define PORTSC_PS_HS 0x3
93 #define PORTSC_PS_SS 0x4
94 #define PORTSC_LWS BIT(16)
95 #define PORTSC_CSC BIT(17)
96 #define PORTSC_WRC BIT(19)
97 #define PORTSC_PRC BIT(21)
98 #define PORTSC_PLC BIT(22)
99 #define PORTSC_CEC BIT(23)
100 #define PORTSC_WPR BIT(30)
101 #define PORTSC_CHANGE_MASK (PORTSC_CSC | PORTSC_WRC | PORTSC_PRC | \
102 PORTSC_PLC | PORTSC_CEC)
105 #define MFINDEX 0x048
106 #define MFINDEX_FRAME_SHIFT 3
107 #define MFINDEX_FRAME_MASK GENMASK(13, 3)
109 #define PORTPM_L1S_MASK GENMASK(1, 0)
110 #define PORTPM_L1S_DROP 0x0
111 #define PORTPM_L1S_ACCEPT 0x1
112 #define PORTPM_L1S_NYET 0x2
113 #define PORTPM_L1S_STALL 0x3
114 #define PORTPM_L1S(x) ((x) & PORTPM_L1S_MASK)
115 #define PORTPM_RWE BIT(3)
116 #define PORTPM_U2TIMEOUT_MASK GENMASK(15, 8)
117 #define PORTPM_U1TIMEOUT_MASK GENMASK(23, 16)
118 #define PORTPM_FLA BIT(24)
119 #define PORTPM_VBA BIT(25)
120 #define PORTPM_WOC BIT(26)
121 #define PORTPM_WOD BIT(27)
122 #define PORTPM_U1E BIT(28)
123 #define PORTPM_U2E BIT(29)
124 #define PORTPM_FRWE BIT(30)
125 #define PORTPM_PNG_CYA BIT(31)
126 #define EP_HALT 0x050
127 #define EP_PAUSE 0x054
128 #define EP_RELOAD 0x058
129 #define EP_STCHG 0x05c
130 #define DEVNOTIF_LO 0x064
131 #define DEVNOTIF_LO_TRIG BIT(0)
132 #define DEVNOTIF_LO_TYPE_MASK GENMASK(7, 4)
133 #define DEVNOTIF_LO_TYPE(x) (((x) << 4) & DEVNOTIF_LO_TYPE_MASK)
134 #define DEVNOTIF_LO_TYPE_FUNCTION_WAKE 0x1
135 #define DEVNOTIF_HI 0x068
136 #define PORTHALT 0x06c
137 #define PORTHALT_HALT_LTSSM BIT(0)
138 #define PORTHALT_HALT_REJECT BIT(1)
139 #define PORTHALT_STCHG_REQ BIT(20)
140 #define PORTHALT_STCHG_INTR_EN BIT(24)
141 #define PORT_TM 0x070
142 #define EP_THREAD_ACTIVE 0x074
143 #define EP_STOPPED 0x078
144 #define HSFSPI_COUNT0 0x100
145 #define HSFSPI_COUNT13 0x134
146 #define HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK GENMASK(29, 0)
147 #define HSFSPI_COUNT13_U2_RESUME_K_DURATION(x) ((x) & \
148 HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK)
150 #define SSPX_CORE_CNT0 0x610
151 #define SSPX_CORE_CNT0_PING_TBURST_MASK GENMASK(7, 0)
152 #define SSPX_CORE_CNT0_PING_TBURST(x) ((x) & SSPX_CORE_CNT0_PING_TBURST_MASK)
153 #define SSPX_CORE_CNT30 0x688
154 #define SSPX_CORE_CNT30_LMPITP_TIMER_MASK GENMASK(19, 0)
155 #define SSPX_CORE_CNT30_LMPITP_TIMER(x) ((x) & \
156 SSPX_CORE_CNT30_LMPITP_TIMER_MASK)
157 #define SSPX_CORE_CNT32 0x690
158 #define SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0)
159 #define SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \
160 SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK)
161 #define SSPX_CORE_PADCTL4 0x750
162 #define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0)
163 #define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \
164 SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK)
165 #define BLCG_DFPCI BIT(0)
166 #define BLCG_UFPCI BIT(1)
167 #define BLCG_FE BIT(2)
168 #define BLCG_COREPLL_PWRDN BIT(8)
169 #define BLCG_IOPLL_0_PWRDN BIT(9)
170 #define BLCG_IOPLL_1_PWRDN BIT(10)
171 #define BLCG_IOPLL_2_PWRDN BIT(11)
172 #define BLCG_ALL 0x1ff
173 #define CFG_DEV_SSPI_XFER 0x858
174 #define CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK GENMASK(31, 0)
175 #define CFG_DEV_SSPI_XFER_ACKTIMEOUT(x) ((x) & \
176 CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK)
177 #define CFG_DEV_FE 0x85c
178 #define CFG_DEV_FE_PORTREGSEL_MASK GENMASK(1, 0)
179 #define CFG_DEV_FE_PORTREGSEL_SS_PI 1
180 #define CFG_DEV_FE_PORTREGSEL_HSFS_PI 2
181 #define CFG_DEV_FE_PORTREGSEL(x) ((x) & CFG_DEV_FE_PORTREGSEL_MASK)
182 #define CFG_DEV_FE_INFINITE_SS_RETRY BIT(29)
185 #define XUSB_DEV_CFG_1 0x004
186 #define XUSB_DEV_CFG_1_IO_SPACE_EN BIT(0)
187 #define XUSB_DEV_CFG_1_MEMORY_SPACE_EN BIT(1)
188 #define XUSB_DEV_CFG_1_BUS_MASTER_EN BIT(2)
189 #define XUSB_DEV_CFG_4 0x010
190 #define XUSB_DEV_CFG_4_BASE_ADDR_MASK GENMASK(31, 15)
191 #define XUSB_DEV_CFG_5 0x014
194 #define XUSB_DEV_CONFIGURATION_0 0x180
195 #define XUSB_DEV_CONFIGURATION_0_EN_FPCI BIT(0)
196 #define XUSB_DEV_INTR_MASK_0 0x188
197 #define XUSB_DEV_INTR_MASK_0_IP_INT_MASK BIT(16)
199 struct tegra_xudc_ep_context
{
208 #define EP_STATE_DISABLED 0
209 #define EP_STATE_RUNNING 1
210 #define EP_STATE_HALTED 2
211 #define EP_STATE_STOPPED 3
212 #define EP_STATE_ERROR 4
214 #define EP_TYPE_INVALID 0
215 #define EP_TYPE_ISOCH_OUT 1
216 #define EP_TYPE_BULK_OUT 2
217 #define EP_TYPE_INTERRUPT_OUT 3
218 #define EP_TYPE_CONTROL 4
219 #define EP_TYPE_ISCOH_IN 5
220 #define EP_TYPE_BULK_IN 6
221 #define EP_TYPE_INTERRUPT_IN 7
223 #define BUILD_EP_CONTEXT_RW(name, member, shift, mask) \
224 static inline u32 ep_ctx_read_##name(struct tegra_xudc_ep_context *ctx) \
226 return (le32_to_cpu(ctx->member) >> (shift)) & (mask); \
229 ep_ctx_write_##name(struct tegra_xudc_ep_context *ctx, u32 val) \
233 tmp = le32_to_cpu(ctx->member) & ~((mask) << (shift)); \
234 tmp |= (val & (mask)) << (shift); \
235 ctx->member = cpu_to_le32(tmp); \
238 BUILD_EP_CONTEXT_RW(state
, info0
, 0, 0x7)
239 BUILD_EP_CONTEXT_RW(mult
, info0
, 8, 0x3)
240 BUILD_EP_CONTEXT_RW(max_pstreams
, info0
, 10, 0x1f)
241 BUILD_EP_CONTEXT_RW(lsa
, info0
, 15, 0x1)
242 BUILD_EP_CONTEXT_RW(interval
, info0
, 16, 0xff)
243 BUILD_EP_CONTEXT_RW(cerr
, info1
, 1, 0x3)
244 BUILD_EP_CONTEXT_RW(type
, info1
, 3, 0x7)
245 BUILD_EP_CONTEXT_RW(hid
, info1
, 7, 0x1)
246 BUILD_EP_CONTEXT_RW(max_burst_size
, info1
, 8, 0xff)
247 BUILD_EP_CONTEXT_RW(max_packet_size
, info1
, 16, 0xffff)
248 BUILD_EP_CONTEXT_RW(dcs
, deq_lo
, 0, 0x1)
249 BUILD_EP_CONTEXT_RW(deq_lo
, deq_lo
, 4, 0xfffffff)
250 BUILD_EP_CONTEXT_RW(deq_hi
, deq_hi
, 0, 0xffffffff)
251 BUILD_EP_CONTEXT_RW(avg_trb_len
, tx_info
, 0, 0xffff)
252 BUILD_EP_CONTEXT_RW(max_esit_payload
, tx_info
, 16, 0xffff)
253 BUILD_EP_CONTEXT_RW(edtla
, rsvd
[0], 0, 0xffffff)
254 BUILD_EP_CONTEXT_RW(seq_num
, rsvd
[0], 24, 0xff)
255 BUILD_EP_CONTEXT_RW(partial_td
, rsvd
[0], 25, 0x1)
256 BUILD_EP_CONTEXT_RW(cerrcnt
, rsvd
[1], 18, 0x3)
257 BUILD_EP_CONTEXT_RW(data_offset
, rsvd
[2], 0, 0x1ffff)
258 BUILD_EP_CONTEXT_RW(numtrbs
, rsvd
[2], 22, 0x1f)
259 BUILD_EP_CONTEXT_RW(devaddr
, rsvd
[6], 0, 0x7f)
261 static inline u64
ep_ctx_read_deq_ptr(struct tegra_xudc_ep_context
*ctx
)
263 return ((u64
)ep_ctx_read_deq_hi(ctx
) << 32) |
264 (ep_ctx_read_deq_lo(ctx
) << 4);
268 ep_ctx_write_deq_ptr(struct tegra_xudc_ep_context
*ctx
, u64 addr
)
270 ep_ctx_write_deq_lo(ctx
, lower_32_bits(addr
) >> 4);
271 ep_ctx_write_deq_hi(ctx
, upper_32_bits(addr
));
274 struct tegra_xudc_trb
{
281 #define TRB_TYPE_RSVD 0
282 #define TRB_TYPE_NORMAL 1
283 #define TRB_TYPE_SETUP_STAGE 2
284 #define TRB_TYPE_DATA_STAGE 3
285 #define TRB_TYPE_STATUS_STAGE 4
286 #define TRB_TYPE_ISOCH 5
287 #define TRB_TYPE_LINK 6
288 #define TRB_TYPE_TRANSFER_EVENT 32
289 #define TRB_TYPE_PORT_STATUS_CHANGE_EVENT 34
290 #define TRB_TYPE_STREAM 48
291 #define TRB_TYPE_SETUP_PACKET_EVENT 63
293 #define TRB_CMPL_CODE_INVALID 0
294 #define TRB_CMPL_CODE_SUCCESS 1
295 #define TRB_CMPL_CODE_DATA_BUFFER_ERR 2
296 #define TRB_CMPL_CODE_BABBLE_DETECTED_ERR 3
297 #define TRB_CMPL_CODE_USB_TRANS_ERR 4
298 #define TRB_CMPL_CODE_TRB_ERR 5
299 #define TRB_CMPL_CODE_STALL 6
300 #define TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR 10
301 #define TRB_CMPL_CODE_SHORT_PACKET 13
302 #define TRB_CMPL_CODE_RING_UNDERRUN 14
303 #define TRB_CMPL_CODE_RING_OVERRUN 15
304 #define TRB_CMPL_CODE_EVENT_RING_FULL_ERR 21
305 #define TRB_CMPL_CODE_STOPPED 26
306 #define TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN 31
307 #define TRB_CMPL_CODE_STREAM_NUMP_ERROR 219
308 #define TRB_CMPL_CODE_PRIME_PIPE_RECEIVED 220
309 #define TRB_CMPL_CODE_HOST_REJECTED 221
310 #define TRB_CMPL_CODE_CTRL_DIR_ERR 222
311 #define TRB_CMPL_CODE_CTRL_SEQNUM_ERR 223
313 #define BUILD_TRB_RW(name, member, shift, mask) \
314 static inline u32 trb_read_##name(struct tegra_xudc_trb *trb) \
316 return (le32_to_cpu(trb->member) >> (shift)) & (mask); \
319 trb_write_##name(struct tegra_xudc_trb *trb, u32 val) \
323 tmp = le32_to_cpu(trb->member) & ~((mask) << (shift)); \
324 tmp |= (val & (mask)) << (shift); \
325 trb->member = cpu_to_le32(tmp); \
328 BUILD_TRB_RW(data_lo
, data_lo
, 0, 0xffffffff)
329 BUILD_TRB_RW(data_hi
, data_hi
, 0, 0xffffffff)
330 BUILD_TRB_RW(seq_num
, status
, 0, 0xffff)
331 BUILD_TRB_RW(transfer_len
, status
, 0, 0xffffff)
332 BUILD_TRB_RW(td_size
, status
, 17, 0x1f)
333 BUILD_TRB_RW(cmpl_code
, status
, 24, 0xff)
334 BUILD_TRB_RW(cycle
, control
, 0, 0x1)
335 BUILD_TRB_RW(toggle_cycle
, control
, 1, 0x1)
336 BUILD_TRB_RW(isp
, control
, 2, 0x1)
337 BUILD_TRB_RW(chain
, control
, 4, 0x1)
338 BUILD_TRB_RW(ioc
, control
, 5, 0x1)
339 BUILD_TRB_RW(type
, control
, 10, 0x3f)
340 BUILD_TRB_RW(stream_id
, control
, 16, 0xffff)
341 BUILD_TRB_RW(endpoint_id
, control
, 16, 0x1f)
342 BUILD_TRB_RW(tlbpc
, control
, 16, 0xf)
343 BUILD_TRB_RW(data_stage_dir
, control
, 16, 0x1)
344 BUILD_TRB_RW(frame_id
, control
, 20, 0x7ff)
345 BUILD_TRB_RW(sia
, control
, 31, 0x1)
347 static inline u64
trb_read_data_ptr(struct tegra_xudc_trb
*trb
)
349 return ((u64
)trb_read_data_hi(trb
) << 32) |
350 trb_read_data_lo(trb
);
353 static inline void trb_write_data_ptr(struct tegra_xudc_trb
*trb
, u64 addr
)
355 trb_write_data_lo(trb
, lower_32_bits(addr
));
356 trb_write_data_hi(trb
, upper_32_bits(addr
));
359 struct tegra_xudc_request
{
360 struct usb_request usb_req
;
363 unsigned int trbs_queued
;
364 unsigned int trbs_needed
;
367 struct tegra_xudc_trb
*first_trb
;
368 struct tegra_xudc_trb
*last_trb
;
370 struct list_head list
;
373 struct tegra_xudc_ep
{
374 struct tegra_xudc
*xudc
;
375 struct usb_ep usb_ep
;
379 struct tegra_xudc_ep_context
*context
;
381 #define XUDC_TRANSFER_RING_SIZE 64
382 struct tegra_xudc_trb
*transfer_ring
;
383 dma_addr_t transfer_ring_phys
;
385 unsigned int enq_ptr
;
386 unsigned int deq_ptr
;
389 bool stream_rejected
;
391 struct list_head queue
;
392 const struct usb_endpoint_descriptor
*desc
;
393 const struct usb_ss_ep_comp_descriptor
*comp_desc
;
396 struct tegra_xudc_sel_timing
{
403 enum tegra_xudc_setup_state
{
411 struct tegra_xudc_setup_packet
{
412 struct usb_ctrlrequest ctrl_req
;
413 unsigned int seq_num
;
416 struct tegra_xudc_save_regs
{
423 const struct tegra_xudc_soc
*soc
;
424 struct tegra_xusb_padctl
*padctl
;
428 struct usb_gadget gadget
;
429 struct usb_gadget_driver
*driver
;
431 #define XUDC_NR_EVENT_RINGS 2
432 #define XUDC_EVENT_RING_SIZE 4096
433 struct tegra_xudc_trb
*event_ring
[XUDC_NR_EVENT_RINGS
];
434 dma_addr_t event_ring_phys
[XUDC_NR_EVENT_RINGS
];
435 unsigned int event_ring_index
;
436 unsigned int event_ring_deq_ptr
;
439 #define XUDC_NR_EPS 32
440 struct tegra_xudc_ep ep
[XUDC_NR_EPS
];
441 struct tegra_xudc_ep_context
*ep_context
;
442 dma_addr_t ep_context_phys
;
444 struct device
*genpd_dev_device
;
445 struct device
*genpd_dev_ss
;
446 struct device_link
*genpd_dl_device
;
447 struct device_link
*genpd_dl_ss
;
449 struct dma_pool
*transfer_ring_pool
;
451 bool queued_setup_packet
;
452 struct tegra_xudc_setup_packet setup_packet
;
453 enum tegra_xudc_setup_state setup_state
;
458 struct tegra_xudc_sel_timing sel_timing
;
459 u8 test_mode_pattern
;
461 struct tegra_xudc_request
*ep0_req
;
465 unsigned int nr_enabled_eps
;
466 unsigned int nr_isoch_eps
;
468 unsigned int device_state
;
469 unsigned int resume_state
;
474 resource_size_t phys_base
;
478 struct regulator_bulk_data
*supplies
;
480 struct clk_bulk_data
*clks
;
483 struct work_struct usb_role_sw_work
;
485 struct phy
**usb3_phy
;
486 struct phy
*curr_usb3_phy
;
487 struct phy
**utmi_phy
;
488 struct phy
*curr_utmi_phy
;
490 struct tegra_xudc_save_regs saved_regs
;
494 struct usb_phy
**usbphy
;
495 struct notifier_block vbus_nb
;
497 struct completion disconnect_complete
;
501 #define TOGGLE_VBUS_WAIT_MS 100
502 struct delayed_work plc_reset_work
;
505 struct delayed_work port_reset_war_work
;
506 bool wait_for_sec_prc
;
509 #define XUDC_TRB_MAX_BUFFER_SIZE 65536
510 #define XUDC_MAX_ISOCH_EPS 4
511 #define XUDC_INTERRUPT_MODERATION_US 0
513 static struct usb_endpoint_descriptor tegra_xudc_ep0_desc
= {
514 .bLength
= USB_DT_ENDPOINT_SIZE
,
515 .bDescriptorType
= USB_DT_ENDPOINT
,
516 .bEndpointAddress
= 0,
517 .bmAttributes
= USB_ENDPOINT_XFER_CONTROL
,
518 .wMaxPacketSize
= cpu_to_le16(64),
521 struct tegra_xudc_soc
{
522 const char * const *supply_names
;
523 unsigned int num_supplies
;
524 const char * const *clock_names
;
525 unsigned int num_clks
;
526 unsigned int num_phys
;
530 bool invalid_seq_num
;
532 bool port_reset_quirk
;
536 static inline u32
fpci_readl(struct tegra_xudc
*xudc
, unsigned int offset
)
538 return readl(xudc
->fpci
+ offset
);
541 static inline void fpci_writel(struct tegra_xudc
*xudc
, u32 val
,
544 writel(val
, xudc
->fpci
+ offset
);
547 static inline u32
ipfs_readl(struct tegra_xudc
*xudc
, unsigned int offset
)
549 return readl(xudc
->ipfs
+ offset
);
552 static inline void ipfs_writel(struct tegra_xudc
*xudc
, u32 val
,
555 writel(val
, xudc
->ipfs
+ offset
);
558 static inline u32
xudc_readl(struct tegra_xudc
*xudc
, unsigned int offset
)
560 return readl(xudc
->base
+ offset
);
563 static inline void xudc_writel(struct tegra_xudc
*xudc
, u32 val
,
566 writel(val
, xudc
->base
+ offset
);
569 static inline int xudc_readl_poll(struct tegra_xudc
*xudc
,
570 unsigned int offset
, u32 mask
, u32 val
)
574 return readl_poll_timeout_atomic(xudc
->base
+ offset
, regval
,
575 (regval
& mask
) == val
, 1, 100);
578 static inline struct tegra_xudc
*to_xudc(struct usb_gadget
*gadget
)
580 return container_of(gadget
, struct tegra_xudc
, gadget
);
583 static inline struct tegra_xudc_ep
*to_xudc_ep(struct usb_ep
*ep
)
585 return container_of(ep
, struct tegra_xudc_ep
, usb_ep
);
588 static inline struct tegra_xudc_request
*to_xudc_req(struct usb_request
*req
)
590 return container_of(req
, struct tegra_xudc_request
, usb_req
);
593 static inline void dump_trb(struct tegra_xudc
*xudc
, const char *type
,
594 struct tegra_xudc_trb
*trb
)
597 "%s: %p, lo = %#x, hi = %#x, status = %#x, control = %#x\n",
598 type
, trb
, trb
->data_lo
, trb
->data_hi
, trb
->status
,
602 static void tegra_xudc_device_mode_on(struct tegra_xudc
*xudc
)
606 pm_runtime_get_sync(xudc
->dev
);
608 err
= phy_power_on(xudc
->curr_utmi_phy
);
610 dev_err(xudc
->dev
, "utmi power on failed %d\n", err
);
612 err
= phy_power_on(xudc
->curr_usb3_phy
);
614 dev_err(xudc
->dev
, "usb3 phy power on failed %d\n", err
);
616 dev_dbg(xudc
->dev
, "device mode on\n");
618 phy_set_mode_ext(xudc
->curr_utmi_phy
, PHY_MODE_USB_OTG
,
622 static void tegra_xudc_device_mode_off(struct tegra_xudc
*xudc
)
624 bool connected
= false;
628 dev_dbg(xudc
->dev
, "device mode off\n");
630 connected
= !!(xudc_readl(xudc
, PORTSC
) & PORTSC_CCS
);
632 reinit_completion(&xudc
->disconnect_complete
);
634 phy_set_mode_ext(xudc
->curr_utmi_phy
, PHY_MODE_USB_OTG
, USB_ROLE_NONE
);
636 pls
= (xudc_readl(xudc
, PORTSC
) & PORTSC_PLS_MASK
) >>
639 /* Direct link to U0 if disconnected in RESUME or U2. */
640 if (xudc
->soc
->pls_quirk
&& xudc
->gadget
.speed
== USB_SPEED_SUPER
&&
641 (pls
== PORTSC_PLS_RESUME
|| pls
== PORTSC_PLS_U2
)) {
642 val
= xudc_readl(xudc
, PORTPM
);
644 xudc_writel(xudc
, val
, PORTPM
);
646 val
= xudc_readl(xudc
, PORTSC
);
647 val
&= ~(PORTSC_CHANGE_MASK
| PORTSC_PLS_MASK
);
648 val
|= PORTSC_LWS
| PORTSC_PLS(PORTSC_PLS_U0
);
649 xudc_writel(xudc
, val
, PORTSC
);
652 /* Wait for disconnect event. */
654 wait_for_completion(&xudc
->disconnect_complete
);
656 /* Make sure interrupt handler has completed before powergating. */
657 synchronize_irq(xudc
->irq
);
659 err
= phy_power_off(xudc
->curr_utmi_phy
);
661 dev_err(xudc
->dev
, "utmi_phy power off failed %d\n", err
);
663 err
= phy_power_off(xudc
->curr_usb3_phy
);
665 dev_err(xudc
->dev
, "usb3_phy power off failed %d\n", err
);
667 pm_runtime_put(xudc
->dev
);
670 static void tegra_xudc_usb_role_sw_work(struct work_struct
*work
)
672 struct tegra_xudc
*xudc
= container_of(work
, struct tegra_xudc
,
675 if (xudc
->device_mode
)
676 tegra_xudc_device_mode_on(xudc
);
678 tegra_xudc_device_mode_off(xudc
);
681 static int tegra_xudc_get_phy_index(struct tegra_xudc
*xudc
,
682 struct usb_phy
*usbphy
)
686 for (i
= 0; i
< xudc
->soc
->num_phys
; i
++) {
687 if (xudc
->usbphy
[i
] && usbphy
== xudc
->usbphy
[i
])
691 dev_info(xudc
->dev
, "phy index could not be found for shared USB PHY");
695 static int tegra_xudc_vbus_notify(struct notifier_block
*nb
,
696 unsigned long action
, void *data
)
698 struct tegra_xudc
*xudc
= container_of(nb
, struct tegra_xudc
,
700 struct usb_phy
*usbphy
= (struct usb_phy
*)data
;
703 dev_dbg(xudc
->dev
, "%s(): event is %d\n", __func__
, usbphy
->last_event
);
705 if ((xudc
->device_mode
&& usbphy
->last_event
== USB_EVENT_VBUS
) ||
706 (!xudc
->device_mode
&& usbphy
->last_event
!= USB_EVENT_VBUS
)) {
707 dev_dbg(xudc
->dev
, "Same role(%d) received. Ignore",
712 xudc
->device_mode
= (usbphy
->last_event
== USB_EVENT_VBUS
) ? true :
715 phy_index
= tegra_xudc_get_phy_index(xudc
, usbphy
);
716 dev_dbg(xudc
->dev
, "%s(): current phy index is %d\n", __func__
,
719 if (!xudc
->suspended
&& phy_index
!= -1) {
720 xudc
->curr_utmi_phy
= xudc
->utmi_phy
[phy_index
];
721 xudc
->curr_usb3_phy
= xudc
->usb3_phy
[phy_index
];
722 schedule_work(&xudc
->usb_role_sw_work
);
728 static void tegra_xudc_plc_reset_work(struct work_struct
*work
)
730 struct delayed_work
*dwork
= to_delayed_work(work
);
731 struct tegra_xudc
*xudc
= container_of(dwork
, struct tegra_xudc
,
735 spin_lock_irqsave(&xudc
->lock
, flags
);
737 if (xudc
->wait_csc
) {
738 u32 pls
= (xudc_readl(xudc
, PORTSC
) & PORTSC_PLS_MASK
) >>
741 if (pls
== PORTSC_PLS_INACTIVE
) {
742 dev_info(xudc
->dev
, "PLS = Inactive. Toggle VBUS\n");
743 phy_set_mode_ext(xudc
->curr_utmi_phy
, PHY_MODE_USB_OTG
,
745 phy_set_mode_ext(xudc
->curr_utmi_phy
, PHY_MODE_USB_OTG
,
748 xudc
->wait_csc
= false;
752 spin_unlock_irqrestore(&xudc
->lock
, flags
);
755 static void tegra_xudc_port_reset_war_work(struct work_struct
*work
)
757 struct delayed_work
*dwork
= to_delayed_work(work
);
758 struct tegra_xudc
*xudc
=
759 container_of(dwork
, struct tegra_xudc
, port_reset_war_work
);
764 spin_lock_irqsave(&xudc
->lock
, flags
);
766 if (xudc
->device_mode
&& xudc
->wait_for_sec_prc
) {
767 pls
= (xudc_readl(xudc
, PORTSC
) & PORTSC_PLS_MASK
) >>
769 dev_dbg(xudc
->dev
, "pls = %x\n", pls
);
771 if (pls
== PORTSC_PLS_DISABLED
) {
772 dev_dbg(xudc
->dev
, "toggle vbus\n");
773 /* PRC doesn't complete in 100ms, toggle the vbus */
774 ret
= tegra_phy_xusb_utmi_port_reset(
775 xudc
->curr_utmi_phy
);
777 xudc
->wait_for_sec_prc
= 0;
781 spin_unlock_irqrestore(&xudc
->lock
, flags
);
784 static dma_addr_t
trb_virt_to_phys(struct tegra_xudc_ep
*ep
,
785 struct tegra_xudc_trb
*trb
)
789 index
= trb
- ep
->transfer_ring
;
791 if (WARN_ON(index
>= XUDC_TRANSFER_RING_SIZE
))
794 return (ep
->transfer_ring_phys
+ index
* sizeof(*trb
));
797 static struct tegra_xudc_trb
*trb_phys_to_virt(struct tegra_xudc_ep
*ep
,
800 struct tegra_xudc_trb
*trb
;
803 index
= (addr
- ep
->transfer_ring_phys
) / sizeof(*trb
);
805 if (WARN_ON(index
>= XUDC_TRANSFER_RING_SIZE
))
808 trb
= &ep
->transfer_ring
[index
];
813 static void ep_reload(struct tegra_xudc
*xudc
, unsigned int ep
)
815 xudc_writel(xudc
, BIT(ep
), EP_RELOAD
);
816 xudc_readl_poll(xudc
, EP_RELOAD
, BIT(ep
), 0);
819 static void ep_pause(struct tegra_xudc
*xudc
, unsigned int ep
)
823 val
= xudc_readl(xudc
, EP_PAUSE
);
828 xudc_writel(xudc
, val
, EP_PAUSE
);
830 xudc_readl_poll(xudc
, EP_STCHG
, BIT(ep
), BIT(ep
));
832 xudc_writel(xudc
, BIT(ep
), EP_STCHG
);
835 static void ep_unpause(struct tegra_xudc
*xudc
, unsigned int ep
)
839 val
= xudc_readl(xudc
, EP_PAUSE
);
840 if (!(val
& BIT(ep
)))
844 xudc_writel(xudc
, val
, EP_PAUSE
);
846 xudc_readl_poll(xudc
, EP_STCHG
, BIT(ep
), BIT(ep
));
848 xudc_writel(xudc
, BIT(ep
), EP_STCHG
);
851 static void ep_unpause_all(struct tegra_xudc
*xudc
)
855 val
= xudc_readl(xudc
, EP_PAUSE
);
857 xudc_writel(xudc
, 0, EP_PAUSE
);
859 xudc_readl_poll(xudc
, EP_STCHG
, val
, val
);
861 xudc_writel(xudc
, val
, EP_STCHG
);
864 static void ep_halt(struct tegra_xudc
*xudc
, unsigned int ep
)
868 val
= xudc_readl(xudc
, EP_HALT
);
872 xudc_writel(xudc
, val
, EP_HALT
);
874 xudc_readl_poll(xudc
, EP_STCHG
, BIT(ep
), BIT(ep
));
876 xudc_writel(xudc
, BIT(ep
), EP_STCHG
);
879 static void ep_unhalt(struct tegra_xudc
*xudc
, unsigned int ep
)
883 val
= xudc_readl(xudc
, EP_HALT
);
884 if (!(val
& BIT(ep
)))
887 xudc_writel(xudc
, val
, EP_HALT
);
889 xudc_readl_poll(xudc
, EP_STCHG
, BIT(ep
), BIT(ep
));
891 xudc_writel(xudc
, BIT(ep
), EP_STCHG
);
894 static void ep_unhalt_all(struct tegra_xudc
*xudc
)
898 val
= xudc_readl(xudc
, EP_HALT
);
901 xudc_writel(xudc
, 0, EP_HALT
);
903 xudc_readl_poll(xudc
, EP_STCHG
, val
, val
);
905 xudc_writel(xudc
, val
, EP_STCHG
);
908 static void ep_wait_for_stopped(struct tegra_xudc
*xudc
, unsigned int ep
)
910 xudc_readl_poll(xudc
, EP_STOPPED
, BIT(ep
), BIT(ep
));
911 xudc_writel(xudc
, BIT(ep
), EP_STOPPED
);
914 static void ep_wait_for_inactive(struct tegra_xudc
*xudc
, unsigned int ep
)
916 xudc_readl_poll(xudc
, EP_THREAD_ACTIVE
, BIT(ep
), 0);
919 static void tegra_xudc_req_done(struct tegra_xudc_ep
*ep
,
920 struct tegra_xudc_request
*req
, int status
)
922 struct tegra_xudc
*xudc
= ep
->xudc
;
924 dev_dbg(xudc
->dev
, "completing request %p on EP %u with status %d\n",
925 req
, ep
->index
, status
);
927 if (likely(req
->usb_req
.status
== -EINPROGRESS
))
928 req
->usb_req
.status
= status
;
930 list_del_init(&req
->list
);
932 if (usb_endpoint_xfer_control(ep
->desc
)) {
933 usb_gadget_unmap_request(&xudc
->gadget
, &req
->usb_req
,
934 (xudc
->setup_state
==
937 usb_gadget_unmap_request(&xudc
->gadget
, &req
->usb_req
,
938 usb_endpoint_dir_in(ep
->desc
));
941 spin_unlock(&xudc
->lock
);
942 usb_gadget_giveback_request(&ep
->usb_ep
, &req
->usb_req
);
943 spin_lock(&xudc
->lock
);
946 static void tegra_xudc_ep_nuke(struct tegra_xudc_ep
*ep
, int status
)
948 struct tegra_xudc_request
*req
;
950 while (!list_empty(&ep
->queue
)) {
951 req
= list_first_entry(&ep
->queue
, struct tegra_xudc_request
,
953 tegra_xudc_req_done(ep
, req
, status
);
957 static unsigned int ep_available_trbs(struct tegra_xudc_ep
*ep
)
962 if (ep
->deq_ptr
> ep
->enq_ptr
)
963 return ep
->deq_ptr
- ep
->enq_ptr
- 1;
965 return XUDC_TRANSFER_RING_SIZE
- (ep
->enq_ptr
- ep
->deq_ptr
) - 2;
968 static void tegra_xudc_queue_one_trb(struct tegra_xudc_ep
*ep
,
969 struct tegra_xudc_request
*req
,
970 struct tegra_xudc_trb
*trb
,
973 struct tegra_xudc
*xudc
= ep
->xudc
;
977 len
= min_t(size_t, XUDC_TRB_MAX_BUFFER_SIZE
, req
->usb_req
.length
-
980 buf_addr
= req
->usb_req
.dma
+ req
->buf_queued
;
984 trb_write_data_ptr(trb
, buf_addr
);
986 trb_write_transfer_len(trb
, len
);
987 trb_write_td_size(trb
, req
->trbs_needed
- req
->trbs_queued
- 1);
989 if (req
->trbs_queued
== req
->trbs_needed
- 1 ||
990 (req
->need_zlp
&& req
->trbs_queued
== req
->trbs_needed
- 2))
991 trb_write_chain(trb
, 0);
993 trb_write_chain(trb
, 1);
995 trb_write_ioc(trb
, ioc
);
997 if (usb_endpoint_dir_out(ep
->desc
) ||
998 (usb_endpoint_xfer_control(ep
->desc
) &&
999 (xudc
->setup_state
== DATA_STAGE_RECV
)))
1000 trb_write_isp(trb
, 1);
1002 trb_write_isp(trb
, 0);
1004 if (usb_endpoint_xfer_control(ep
->desc
)) {
1005 if (xudc
->setup_state
== DATA_STAGE_XFER
||
1006 xudc
->setup_state
== DATA_STAGE_RECV
)
1007 trb_write_type(trb
, TRB_TYPE_DATA_STAGE
);
1009 trb_write_type(trb
, TRB_TYPE_STATUS_STAGE
);
1011 if (xudc
->setup_state
== DATA_STAGE_XFER
||
1012 xudc
->setup_state
== STATUS_STAGE_XFER
)
1013 trb_write_data_stage_dir(trb
, 1);
1015 trb_write_data_stage_dir(trb
, 0);
1016 } else if (usb_endpoint_xfer_isoc(ep
->desc
)) {
1017 trb_write_type(trb
, TRB_TYPE_ISOCH
);
1018 trb_write_sia(trb
, 1);
1019 trb_write_frame_id(trb
, 0);
1020 trb_write_tlbpc(trb
, 0);
1021 } else if (usb_ss_max_streams(ep
->comp_desc
)) {
1022 trb_write_type(trb
, TRB_TYPE_STREAM
);
1023 trb_write_stream_id(trb
, req
->usb_req
.stream_id
);
1025 trb_write_type(trb
, TRB_TYPE_NORMAL
);
1026 trb_write_stream_id(trb
, 0);
1029 trb_write_cycle(trb
, ep
->pcs
);
1032 req
->buf_queued
+= len
;
1034 dump_trb(xudc
, "TRANSFER", trb
);
1037 static unsigned int tegra_xudc_queue_trbs(struct tegra_xudc_ep
*ep
,
1038 struct tegra_xudc_request
*req
)
1040 unsigned int i
, count
, available
;
1041 bool wait_td
= false;
1043 available
= ep_available_trbs(ep
);
1044 count
= req
->trbs_needed
- req
->trbs_queued
;
1045 if (available
< count
) {
1047 ep
->ring_full
= true;
1051 * To generate zero-length packet on USB bus, SW needs schedule a
1052 * standalone zero-length TD. According to HW's behavior, SW needs
1053 * to schedule TDs in different ways for different endpoint types.
1055 * For control endpoint:
1056 * - Data stage TD (IOC = 1, CH = 0)
1057 * - Ring doorbell and wait transfer event
1058 * - Data stage TD for ZLP (IOC = 1, CH = 0)
1061 * For bulk and interrupt endpoints:
1062 * - Normal transfer TD (IOC = 0, CH = 0)
1063 * - Normal transfer TD for ZLP (IOC = 1, CH = 0)
1067 if (req
->need_zlp
&& usb_endpoint_xfer_control(ep
->desc
) && count
> 1)
1070 if (!req
->first_trb
)
1071 req
->first_trb
= &ep
->transfer_ring
[ep
->enq_ptr
];
1073 for (i
= 0; i
< count
; i
++) {
1074 struct tegra_xudc_trb
*trb
= &ep
->transfer_ring
[ep
->enq_ptr
];
1077 if ((i
== count
- 1) || (wait_td
&& i
== count
- 2))
1080 tegra_xudc_queue_one_trb(ep
, req
, trb
, ioc
);
1081 req
->last_trb
= trb
;
1084 if (ep
->enq_ptr
== XUDC_TRANSFER_RING_SIZE
- 1) {
1085 trb
= &ep
->transfer_ring
[ep
->enq_ptr
];
1086 trb_write_cycle(trb
, ep
->pcs
);
1098 static void tegra_xudc_ep_ring_doorbell(struct tegra_xudc_ep
*ep
)
1100 struct tegra_xudc
*xudc
= ep
->xudc
;
1103 if (list_empty(&ep
->queue
))
1106 val
= DB_TARGET(ep
->index
);
1107 if (usb_endpoint_xfer_control(ep
->desc
)) {
1108 val
|= DB_STREAMID(xudc
->setup_seq_num
);
1109 } else if (usb_ss_max_streams(ep
->comp_desc
) > 0) {
1110 struct tegra_xudc_request
*req
;
1112 /* Don't ring doorbell if the stream has been rejected. */
1113 if (ep
->stream_rejected
)
1116 req
= list_first_entry(&ep
->queue
, struct tegra_xudc_request
,
1118 val
|= DB_STREAMID(req
->usb_req
.stream_id
);
1121 dev_dbg(xudc
->dev
, "ring doorbell: %#x\n", val
);
1122 xudc_writel(xudc
, val
, DB
);
1125 static void tegra_xudc_ep_kick_queue(struct tegra_xudc_ep
*ep
)
1127 struct tegra_xudc_request
*req
;
1128 bool trbs_queued
= false;
1130 list_for_each_entry(req
, &ep
->queue
, list
) {
1134 if (tegra_xudc_queue_trbs(ep
, req
) > 0)
1139 tegra_xudc_ep_ring_doorbell(ep
);
1143 __tegra_xudc_ep_queue(struct tegra_xudc_ep
*ep
, struct tegra_xudc_request
*req
)
1145 struct tegra_xudc
*xudc
= ep
->xudc
;
1148 if (usb_endpoint_xfer_control(ep
->desc
) && !list_empty(&ep
->queue
)) {
1149 dev_err(xudc
->dev
, "control EP has pending transfers\n");
1153 if (usb_endpoint_xfer_control(ep
->desc
)) {
1154 err
= usb_gadget_map_request(&xudc
->gadget
, &req
->usb_req
,
1155 (xudc
->setup_state
==
1158 err
= usb_gadget_map_request(&xudc
->gadget
, &req
->usb_req
,
1159 usb_endpoint_dir_in(ep
->desc
));
1163 dev_err(xudc
->dev
, "failed to map request: %d\n", err
);
1167 req
->first_trb
= NULL
;
1168 req
->last_trb
= NULL
;
1169 req
->buf_queued
= 0;
1170 req
->trbs_queued
= 0;
1171 req
->need_zlp
= false;
1172 req
->trbs_needed
= DIV_ROUND_UP(req
->usb_req
.length
,
1173 XUDC_TRB_MAX_BUFFER_SIZE
);
1174 if (req
->usb_req
.length
== 0)
1177 if (!usb_endpoint_xfer_isoc(ep
->desc
) &&
1178 req
->usb_req
.zero
&& req
->usb_req
.length
&&
1179 ((req
->usb_req
.length
% ep
->usb_ep
.maxpacket
) == 0)) {
1181 req
->need_zlp
= true;
1184 req
->usb_req
.status
= -EINPROGRESS
;
1185 req
->usb_req
.actual
= 0;
1187 list_add_tail(&req
->list
, &ep
->queue
);
1189 tegra_xudc_ep_kick_queue(ep
);
1195 tegra_xudc_ep_queue(struct usb_ep
*usb_ep
, struct usb_request
*usb_req
,
1198 struct tegra_xudc_request
*req
;
1199 struct tegra_xudc_ep
*ep
;
1200 struct tegra_xudc
*xudc
;
1201 unsigned long flags
;
1204 if (!usb_ep
|| !usb_req
)
1207 ep
= to_xudc_ep(usb_ep
);
1208 req
= to_xudc_req(usb_req
);
1211 spin_lock_irqsave(&xudc
->lock
, flags
);
1212 if (xudc
->powergated
|| !ep
->desc
) {
1217 ret
= __tegra_xudc_ep_queue(ep
, req
);
1219 spin_unlock_irqrestore(&xudc
->lock
, flags
);
1224 static void squeeze_transfer_ring(struct tegra_xudc_ep
*ep
,
1225 struct tegra_xudc_request
*req
)
1227 struct tegra_xudc_trb
*trb
= req
->first_trb
;
1228 bool pcs_enq
= trb_read_cycle(trb
);
1232 * Clear out all the TRBs part of or after the cancelled request,
1233 * and must correct trb cycle bit to the last un-enqueued state.
1235 while (trb
!= &ep
->transfer_ring
[ep
->enq_ptr
]) {
1236 pcs
= trb_read_cycle(trb
);
1237 memset(trb
, 0, sizeof(*trb
));
1238 trb_write_cycle(trb
, !pcs
);
1241 if (trb_read_type(trb
) == TRB_TYPE_LINK
)
1242 trb
= ep
->transfer_ring
;
1245 /* Requests will be re-queued at the start of the cancelled request. */
1246 ep
->enq_ptr
= req
->first_trb
- ep
->transfer_ring
;
1248 * Retrieve the correct cycle bit state from the first trb of
1249 * the cancelled request.
1252 ep
->ring_full
= false;
1253 list_for_each_entry_continue(req
, &ep
->queue
, list
) {
1254 req
->usb_req
.status
= -EINPROGRESS
;
1255 req
->usb_req
.actual
= 0;
1257 req
->first_trb
= NULL
;
1258 req
->last_trb
= NULL
;
1259 req
->buf_queued
= 0;
1260 req
->trbs_queued
= 0;
1265 * Determine if the given TRB is in the range [first trb, last trb] for the
1268 static bool trb_in_request(struct tegra_xudc_ep
*ep
,
1269 struct tegra_xudc_request
*req
,
1270 struct tegra_xudc_trb
*trb
)
1272 dev_dbg(ep
->xudc
->dev
, "%s: request %p -> %p; trb %p\n", __func__
,
1273 req
->first_trb
, req
->last_trb
, trb
);
1275 if (trb
>= req
->first_trb
&& (trb
<= req
->last_trb
||
1276 req
->last_trb
< req
->first_trb
))
1279 if (trb
< req
->first_trb
&& trb
<= req
->last_trb
&&
1280 req
->last_trb
< req
->first_trb
)
1287 * Determine if the given TRB is in the range [EP enqueue pointer, first TRB)
1288 * for the given endpoint and request.
1290 static bool trb_before_request(struct tegra_xudc_ep
*ep
,
1291 struct tegra_xudc_request
*req
,
1292 struct tegra_xudc_trb
*trb
)
1294 struct tegra_xudc_trb
*enq_trb
= &ep
->transfer_ring
[ep
->enq_ptr
];
1296 dev_dbg(ep
->xudc
->dev
, "%s: request %p -> %p; enq ptr: %p; trb %p\n",
1297 __func__
, req
->first_trb
, req
->last_trb
, enq_trb
, trb
);
1299 if (trb
< req
->first_trb
&& (enq_trb
<= trb
||
1300 req
->first_trb
< enq_trb
))
1303 if (trb
> req
->first_trb
&& req
->first_trb
< enq_trb
&& enq_trb
<= trb
)
1310 __tegra_xudc_ep_dequeue(struct tegra_xudc_ep
*ep
,
1311 struct tegra_xudc_request
*req
)
1313 struct tegra_xudc
*xudc
= ep
->xudc
;
1314 struct tegra_xudc_request
*r
;
1315 struct tegra_xudc_trb
*deq_trb
;
1316 bool busy
, kick_queue
= false;
1319 /* Make sure the request is actually queued to this endpoint. */
1320 list_for_each_entry(r
, &ep
->queue
, list
) {
1328 /* Request hasn't been queued in the transfer ring yet. */
1329 if (!req
->trbs_queued
) {
1330 tegra_xudc_req_done(ep
, req
, -ECONNRESET
);
1334 /* Halt DMA for this endpiont. */
1335 if (ep_ctx_read_state(ep
->context
) == EP_STATE_RUNNING
) {
1336 ep_pause(xudc
, ep
->index
);
1337 ep_wait_for_inactive(xudc
, ep
->index
);
1340 deq_trb
= trb_phys_to_virt(ep
, ep_ctx_read_deq_ptr(ep
->context
));
1341 /* Is the hardware processing the TRB at the dequeue pointer? */
1342 busy
= (trb_read_cycle(deq_trb
) == ep_ctx_read_dcs(ep
->context
));
1344 if (trb_in_request(ep
, req
, deq_trb
) && busy
) {
1346 * Request has been partially completed or it hasn't
1347 * started processing yet.
1351 squeeze_transfer_ring(ep
, req
);
1353 req
->usb_req
.actual
= ep_ctx_read_edtla(ep
->context
);
1354 tegra_xudc_req_done(ep
, req
, -ECONNRESET
);
1357 /* EDTLA is > 0: request has been partially completed */
1358 if (req
->usb_req
.actual
> 0) {
1360 * Abort the pending transfer and update the dequeue
1363 ep_ctx_write_edtla(ep
->context
, 0);
1364 ep_ctx_write_partial_td(ep
->context
, 0);
1365 ep_ctx_write_data_offset(ep
->context
, 0);
1367 deq_ptr
= trb_virt_to_phys(ep
,
1368 &ep
->transfer_ring
[ep
->enq_ptr
]);
1370 if (dma_mapping_error(xudc
->dev
, deq_ptr
)) {
1373 ep_ctx_write_deq_ptr(ep
->context
, deq_ptr
);
1374 ep_ctx_write_dcs(ep
->context
, ep
->pcs
);
1375 ep_reload(xudc
, ep
->index
);
1378 } else if (trb_before_request(ep
, req
, deq_trb
) && busy
) {
1379 /* Request hasn't started processing yet. */
1380 squeeze_transfer_ring(ep
, req
);
1382 tegra_xudc_req_done(ep
, req
, -ECONNRESET
);
1386 * Request has completed, but we haven't processed the
1387 * completion event yet.
1389 tegra_xudc_req_done(ep
, req
, -ECONNRESET
);
1393 /* Resume the endpoint. */
1394 ep_unpause(xudc
, ep
->index
);
1397 tegra_xudc_ep_kick_queue(ep
);
1403 tegra_xudc_ep_dequeue(struct usb_ep
*usb_ep
, struct usb_request
*usb_req
)
1405 struct tegra_xudc_request
*req
;
1406 struct tegra_xudc_ep
*ep
;
1407 struct tegra_xudc
*xudc
;
1408 unsigned long flags
;
1411 if (!usb_ep
|| !usb_req
)
1414 ep
= to_xudc_ep(usb_ep
);
1415 req
= to_xudc_req(usb_req
);
1418 spin_lock_irqsave(&xudc
->lock
, flags
);
1420 if (xudc
->powergated
|| !ep
->desc
) {
1425 ret
= __tegra_xudc_ep_dequeue(ep
, req
);
1427 spin_unlock_irqrestore(&xudc
->lock
, flags
);
1432 static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep
*ep
, bool halt
)
1434 struct tegra_xudc
*xudc
= ep
->xudc
;
1439 if (usb_endpoint_xfer_isoc(ep
->desc
)) {
1440 dev_err(xudc
->dev
, "can't halt isoc EP\n");
1444 if (!!(xudc_readl(xudc
, EP_HALT
) & BIT(ep
->index
)) == halt
) {
1445 dev_dbg(xudc
->dev
, "EP %u already %s\n", ep
->index
,
1446 halt
? "halted" : "not halted");
1451 ep_halt(xudc
, ep
->index
);
1453 ep_ctx_write_state(ep
->context
, EP_STATE_DISABLED
);
1455 ep_reload(xudc
, ep
->index
);
1457 ep_ctx_write_state(ep
->context
, EP_STATE_RUNNING
);
1458 ep_ctx_write_seq_num(ep
->context
, 0);
1460 ep_reload(xudc
, ep
->index
);
1461 ep_unpause(xudc
, ep
->index
);
1462 ep_unhalt(xudc
, ep
->index
);
1464 tegra_xudc_ep_ring_doorbell(ep
);
1470 static int tegra_xudc_ep_set_halt(struct usb_ep
*usb_ep
, int value
)
1472 struct tegra_xudc_ep
*ep
;
1473 struct tegra_xudc
*xudc
;
1474 unsigned long flags
;
1480 ep
= to_xudc_ep(usb_ep
);
1483 spin_lock_irqsave(&xudc
->lock
, flags
);
1484 if (xudc
->powergated
) {
1489 if (value
&& usb_endpoint_dir_in(ep
->desc
) &&
1490 !list_empty(&ep
->queue
)) {
1491 dev_err(xudc
->dev
, "can't halt EP with requests pending\n");
1496 ret
= __tegra_xudc_ep_set_halt(ep
, value
);
1498 spin_unlock_irqrestore(&xudc
->lock
, flags
);
1503 static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep
*ep
)
1505 const struct usb_endpoint_descriptor
*desc
= ep
->desc
;
1506 const struct usb_ss_ep_comp_descriptor
*comp_desc
= ep
->comp_desc
;
1507 struct tegra_xudc
*xudc
= ep
->xudc
;
1508 u16 maxpacket
, maxburst
= 0, esit
= 0;
1511 maxpacket
= usb_endpoint_maxp(desc
) & 0x7ff;
1512 if (xudc
->gadget
.speed
== USB_SPEED_SUPER
) {
1513 if (!usb_endpoint_xfer_control(desc
))
1514 maxburst
= comp_desc
->bMaxBurst
;
1516 if (usb_endpoint_xfer_int(desc
) || usb_endpoint_xfer_isoc(desc
))
1517 esit
= le16_to_cpu(comp_desc
->wBytesPerInterval
);
1518 } else if ((xudc
->gadget
.speed
< USB_SPEED_SUPER
) &&
1519 (usb_endpoint_xfer_int(desc
) ||
1520 usb_endpoint_xfer_isoc(desc
))) {
1521 if (xudc
->gadget
.speed
== USB_SPEED_HIGH
) {
1522 maxburst
= (usb_endpoint_maxp(desc
) >> 11) & 0x3;
1523 if (maxburst
== 0x3) {
1525 "invalid endpoint maxburst\n");
1529 esit
= maxpacket
* (maxburst
+ 1);
1532 memset(ep
->context
, 0, sizeof(*ep
->context
));
1534 ep_ctx_write_state(ep
->context
, EP_STATE_RUNNING
);
1535 ep_ctx_write_interval(ep
->context
, desc
->bInterval
);
1536 if (xudc
->gadget
.speed
== USB_SPEED_SUPER
) {
1537 if (usb_endpoint_xfer_isoc(desc
)) {
1538 ep_ctx_write_mult(ep
->context
,
1539 comp_desc
->bmAttributes
& 0x3);
1542 if (usb_endpoint_xfer_bulk(desc
)) {
1543 ep_ctx_write_max_pstreams(ep
->context
,
1544 comp_desc
->bmAttributes
&
1546 ep_ctx_write_lsa(ep
->context
, 1);
1550 if (!usb_endpoint_xfer_control(desc
) && usb_endpoint_dir_out(desc
))
1551 val
= usb_endpoint_type(desc
);
1553 val
= usb_endpoint_type(desc
) + EP_TYPE_CONTROL
;
1555 ep_ctx_write_type(ep
->context
, val
);
1556 ep_ctx_write_cerr(ep
->context
, 0x3);
1557 ep_ctx_write_max_packet_size(ep
->context
, maxpacket
);
1558 ep_ctx_write_max_burst_size(ep
->context
, maxburst
);
1560 ep_ctx_write_deq_ptr(ep
->context
, ep
->transfer_ring_phys
);
1561 ep_ctx_write_dcs(ep
->context
, ep
->pcs
);
1563 /* Select a reasonable average TRB length based on endpoint type. */
1564 switch (usb_endpoint_type(desc
)) {
1565 case USB_ENDPOINT_XFER_CONTROL
:
1568 case USB_ENDPOINT_XFER_INT
:
1571 case USB_ENDPOINT_XFER_BULK
:
1572 case USB_ENDPOINT_XFER_ISOC
:
1578 ep_ctx_write_avg_trb_len(ep
->context
, val
);
1579 ep_ctx_write_max_esit_payload(ep
->context
, esit
);
1581 ep_ctx_write_cerrcnt(ep
->context
, 0x3);
1584 static void setup_link_trb(struct tegra_xudc_ep
*ep
,
1585 struct tegra_xudc_trb
*trb
)
1587 trb_write_data_ptr(trb
, ep
->transfer_ring_phys
);
1588 trb_write_type(trb
, TRB_TYPE_LINK
);
1589 trb_write_toggle_cycle(trb
, 1);
1592 static int __tegra_xudc_ep_disable(struct tegra_xudc_ep
*ep
)
1594 struct tegra_xudc
*xudc
= ep
->xudc
;
1596 if (ep_ctx_read_state(ep
->context
) == EP_STATE_DISABLED
) {
1597 dev_err(xudc
->dev
, "endpoint %u already disabled\n",
1602 ep_ctx_write_state(ep
->context
, EP_STATE_DISABLED
);
1604 ep_reload(xudc
, ep
->index
);
1606 tegra_xudc_ep_nuke(ep
, -ESHUTDOWN
);
1608 xudc
->nr_enabled_eps
--;
1609 if (usb_endpoint_xfer_isoc(ep
->desc
))
1610 xudc
->nr_isoch_eps
--;
1613 ep
->comp_desc
= NULL
;
1615 memset(ep
->context
, 0, sizeof(*ep
->context
));
1617 ep_unpause(xudc
, ep
->index
);
1618 ep_unhalt(xudc
, ep
->index
);
1619 if (xudc_readl(xudc
, EP_STOPPED
) & BIT(ep
->index
))
1620 xudc_writel(xudc
, BIT(ep
->index
), EP_STOPPED
);
1623 * If this is the last endpoint disabled in a de-configure request,
1624 * switch back to address state.
1626 if ((xudc
->device_state
== USB_STATE_CONFIGURED
) &&
1627 (xudc
->nr_enabled_eps
== 1)) {
1630 xudc
->device_state
= USB_STATE_ADDRESS
;
1631 usb_gadget_set_state(&xudc
->gadget
, xudc
->device_state
);
1633 val
= xudc_readl(xudc
, CTRL
);
1635 xudc_writel(xudc
, val
, CTRL
);
1638 dev_info(xudc
->dev
, "ep %u disabled\n", ep
->index
);
1643 static int tegra_xudc_ep_disable(struct usb_ep
*usb_ep
)
1645 struct tegra_xudc_ep
*ep
;
1646 struct tegra_xudc
*xudc
;
1647 unsigned long flags
;
1653 ep
= to_xudc_ep(usb_ep
);
1656 spin_lock_irqsave(&xudc
->lock
, flags
);
1657 if (xudc
->powergated
) {
1662 ret
= __tegra_xudc_ep_disable(ep
);
1664 spin_unlock_irqrestore(&xudc
->lock
, flags
);
1669 static int __tegra_xudc_ep_enable(struct tegra_xudc_ep
*ep
,
1670 const struct usb_endpoint_descriptor
*desc
)
1672 struct tegra_xudc
*xudc
= ep
->xudc
;
1676 if (xudc
->gadget
.speed
== USB_SPEED_SUPER
&&
1677 !usb_endpoint_xfer_control(desc
) && !ep
->usb_ep
.comp_desc
)
1680 /* Disable the EP if it is not disabled */
1681 if (ep_ctx_read_state(ep
->context
) != EP_STATE_DISABLED
)
1682 __tegra_xudc_ep_disable(ep
);
1685 ep
->comp_desc
= ep
->usb_ep
.comp_desc
;
1687 if (usb_endpoint_xfer_isoc(desc
)) {
1688 if (xudc
->nr_isoch_eps
> XUDC_MAX_ISOCH_EPS
) {
1689 dev_err(xudc
->dev
, "too many isoch endpoints\n");
1692 xudc
->nr_isoch_eps
++;
1695 memset(ep
->transfer_ring
, 0, XUDC_TRANSFER_RING_SIZE
*
1696 sizeof(*ep
->transfer_ring
));
1697 setup_link_trb(ep
, &ep
->transfer_ring
[XUDC_TRANSFER_RING_SIZE
- 1]);
1702 ep
->ring_full
= false;
1703 xudc
->nr_enabled_eps
++;
1705 tegra_xudc_ep_context_setup(ep
);
1708 * No need to reload and un-halt EP0. This will be done automatically
1709 * once a valid SETUP packet is received.
1711 if (usb_endpoint_xfer_control(desc
))
1715 * Transition to configured state once the first non-control
1716 * endpoint is enabled.
1718 if (xudc
->device_state
== USB_STATE_ADDRESS
) {
1719 val
= xudc_readl(xudc
, CTRL
);
1721 xudc_writel(xudc
, val
, CTRL
);
1723 xudc
->device_state
= USB_STATE_CONFIGURED
;
1724 usb_gadget_set_state(&xudc
->gadget
, xudc
->device_state
);
1727 if (usb_endpoint_xfer_isoc(desc
)) {
1729 * Pause all bulk endpoints when enabling an isoch endpoint
1730 * to ensure the isoch endpoint is allocated enough bandwidth.
1732 for (i
= 0; i
< ARRAY_SIZE(xudc
->ep
); i
++) {
1733 if (xudc
->ep
[i
].desc
&&
1734 usb_endpoint_xfer_bulk(xudc
->ep
[i
].desc
))
1739 ep_reload(xudc
, ep
->index
);
1740 ep_unpause(xudc
, ep
->index
);
1741 ep_unhalt(xudc
, ep
->index
);
1743 if (usb_endpoint_xfer_isoc(desc
)) {
1744 for (i
= 0; i
< ARRAY_SIZE(xudc
->ep
); i
++) {
1745 if (xudc
->ep
[i
].desc
&&
1746 usb_endpoint_xfer_bulk(xudc
->ep
[i
].desc
))
1747 ep_unpause(xudc
, i
);
1752 dev_info(xudc
->dev
, "EP %u (type: %s, dir: %s) enabled\n", ep
->index
,
1753 usb_ep_type_string(usb_endpoint_type(ep
->desc
)),
1754 usb_endpoint_dir_in(ep
->desc
) ? "in" : "out");
1759 static int tegra_xudc_ep_enable(struct usb_ep
*usb_ep
,
1760 const struct usb_endpoint_descriptor
*desc
)
1762 struct tegra_xudc_ep
*ep
;
1763 struct tegra_xudc
*xudc
;
1764 unsigned long flags
;
1767 if (!usb_ep
|| !desc
|| (desc
->bDescriptorType
!= USB_DT_ENDPOINT
))
1770 ep
= to_xudc_ep(usb_ep
);
1773 spin_lock_irqsave(&xudc
->lock
, flags
);
1774 if (xudc
->powergated
) {
1779 ret
= __tegra_xudc_ep_enable(ep
, desc
);
1781 spin_unlock_irqrestore(&xudc
->lock
, flags
);
1786 static struct usb_request
*
1787 tegra_xudc_ep_alloc_request(struct usb_ep
*usb_ep
, gfp_t gfp
)
1789 struct tegra_xudc_request
*req
;
1791 req
= kzalloc(sizeof(*req
), gfp
);
1795 INIT_LIST_HEAD(&req
->list
);
1797 return &req
->usb_req
;
1800 static void tegra_xudc_ep_free_request(struct usb_ep
*usb_ep
,
1801 struct usb_request
*usb_req
)
1803 struct tegra_xudc_request
*req
= to_xudc_req(usb_req
);
1808 static struct usb_ep_ops tegra_xudc_ep_ops
= {
1809 .enable
= tegra_xudc_ep_enable
,
1810 .disable
= tegra_xudc_ep_disable
,
1811 .alloc_request
= tegra_xudc_ep_alloc_request
,
1812 .free_request
= tegra_xudc_ep_free_request
,
1813 .queue
= tegra_xudc_ep_queue
,
1814 .dequeue
= tegra_xudc_ep_dequeue
,
1815 .set_halt
= tegra_xudc_ep_set_halt
,
1818 static int tegra_xudc_ep0_enable(struct usb_ep
*usb_ep
,
1819 const struct usb_endpoint_descriptor
*desc
)
1824 static int tegra_xudc_ep0_disable(struct usb_ep
*usb_ep
)
1829 static struct usb_ep_ops tegra_xudc_ep0_ops
= {
1830 .enable
= tegra_xudc_ep0_enable
,
1831 .disable
= tegra_xudc_ep0_disable
,
1832 .alloc_request
= tegra_xudc_ep_alloc_request
,
1833 .free_request
= tegra_xudc_ep_free_request
,
1834 .queue
= tegra_xudc_ep_queue
,
1835 .dequeue
= tegra_xudc_ep_dequeue
,
1836 .set_halt
= tegra_xudc_ep_set_halt
,
1839 static int tegra_xudc_gadget_get_frame(struct usb_gadget
*gadget
)
1841 struct tegra_xudc
*xudc
= to_xudc(gadget
);
1842 unsigned long flags
;
1845 spin_lock_irqsave(&xudc
->lock
, flags
);
1846 if (xudc
->powergated
) {
1851 ret
= (xudc_readl(xudc
, MFINDEX
) & MFINDEX_FRAME_MASK
) >>
1852 MFINDEX_FRAME_SHIFT
;
1854 spin_unlock_irqrestore(&xudc
->lock
, flags
);
1859 static void tegra_xudc_resume_device_state(struct tegra_xudc
*xudc
)
1864 ep_unpause_all(xudc
);
1866 /* Direct link to U0. */
1867 val
= xudc_readl(xudc
, PORTSC
);
1868 if (((val
& PORTSC_PLS_MASK
) >> PORTSC_PLS_SHIFT
) != PORTSC_PLS_U0
) {
1869 val
&= ~(PORTSC_CHANGE_MASK
| PORTSC_PLS_MASK
);
1870 val
|= PORTSC_LWS
| PORTSC_PLS(PORTSC_PLS_U0
);
1871 xudc_writel(xudc
, val
, PORTSC
);
1874 if (xudc
->device_state
== USB_STATE_SUSPENDED
) {
1875 xudc
->device_state
= xudc
->resume_state
;
1876 usb_gadget_set_state(&xudc
->gadget
, xudc
->device_state
);
1877 xudc
->resume_state
= 0;
1881 * Doorbells may be dropped if they are sent too soon (< ~200ns)
1882 * after unpausing the endpoint. Wait for 500ns just to be safe.
1885 for (i
= 0; i
< ARRAY_SIZE(xudc
->ep
); i
++)
1886 tegra_xudc_ep_ring_doorbell(&xudc
->ep
[i
]);
1889 static int tegra_xudc_gadget_wakeup(struct usb_gadget
*gadget
)
1891 struct tegra_xudc
*xudc
= to_xudc(gadget
);
1892 unsigned long flags
;
1896 spin_lock_irqsave(&xudc
->lock
, flags
);
1898 if (xudc
->powergated
) {
1902 val
= xudc_readl(xudc
, PORTPM
);
1903 dev_dbg(xudc
->dev
, "%s: PORTPM=%#x, speed=%x\n", __func__
,
1904 val
, gadget
->speed
);
1906 if (((xudc
->gadget
.speed
<= USB_SPEED_HIGH
) &&
1907 (val
& PORTPM_RWE
)) ||
1908 ((xudc
->gadget
.speed
== USB_SPEED_SUPER
) &&
1909 (val
& PORTPM_FRWE
))) {
1910 tegra_xudc_resume_device_state(xudc
);
1912 /* Send Device Notification packet. */
1913 if (xudc
->gadget
.speed
== USB_SPEED_SUPER
) {
1914 val
= DEVNOTIF_LO_TYPE(DEVNOTIF_LO_TYPE_FUNCTION_WAKE
)
1916 xudc_writel(xudc
, 0, DEVNOTIF_HI
);
1917 xudc_writel(xudc
, val
, DEVNOTIF_LO
);
1922 dev_dbg(xudc
->dev
, "%s: ret value is %d", __func__
, ret
);
1923 spin_unlock_irqrestore(&xudc
->lock
, flags
);
1928 static int tegra_xudc_gadget_pullup(struct usb_gadget
*gadget
, int is_on
)
1930 struct tegra_xudc
*xudc
= to_xudc(gadget
);
1931 unsigned long flags
;
1934 pm_runtime_get_sync(xudc
->dev
);
1936 spin_lock_irqsave(&xudc
->lock
, flags
);
1938 if (is_on
!= xudc
->pullup
) {
1939 val
= xudc_readl(xudc
, CTRL
);
1943 val
&= ~CTRL_ENABLE
;
1944 xudc_writel(xudc
, val
, CTRL
);
1947 xudc
->pullup
= is_on
;
1948 dev_dbg(xudc
->dev
, "%s: pullup:%d", __func__
, is_on
);
1950 spin_unlock_irqrestore(&xudc
->lock
, flags
);
1952 pm_runtime_put(xudc
->dev
);
1957 static int tegra_xudc_gadget_start(struct usb_gadget
*gadget
,
1958 struct usb_gadget_driver
*driver
)
1960 struct tegra_xudc
*xudc
= to_xudc(gadget
);
1961 unsigned long flags
;
1969 pm_runtime_get_sync(xudc
->dev
);
1971 spin_lock_irqsave(&xudc
->lock
, flags
);
1978 xudc
->setup_state
= WAIT_FOR_SETUP
;
1979 xudc
->device_state
= USB_STATE_DEFAULT
;
1980 usb_gadget_set_state(&xudc
->gadget
, xudc
->device_state
);
1982 ret
= __tegra_xudc_ep_enable(&xudc
->ep
[0], &tegra_xudc_ep0_desc
);
1986 val
= xudc_readl(xudc
, CTRL
);
1987 val
|= CTRL_IE
| CTRL_LSE
;
1988 xudc_writel(xudc
, val
, CTRL
);
1990 val
= xudc_readl(xudc
, PORTHALT
);
1991 val
|= PORTHALT_STCHG_INTR_EN
;
1992 xudc_writel(xudc
, val
, PORTHALT
);
1995 val
= xudc_readl(xudc
, CTRL
);
1997 xudc_writel(xudc
, val
, CTRL
);
2000 for (i
= 0; i
< xudc
->soc
->num_phys
; i
++)
2001 if (xudc
->usbphy
[i
])
2002 otg_set_peripheral(xudc
->usbphy
[i
]->otg
, gadget
);
2004 xudc
->driver
= driver
;
2006 dev_dbg(xudc
->dev
, "%s: ret value is %d", __func__
, ret
);
2007 spin_unlock_irqrestore(&xudc
->lock
, flags
);
2009 pm_runtime_put(xudc
->dev
);
2014 static int tegra_xudc_gadget_stop(struct usb_gadget
*gadget
)
2016 struct tegra_xudc
*xudc
= to_xudc(gadget
);
2017 unsigned long flags
;
2021 pm_runtime_get_sync(xudc
->dev
);
2023 spin_lock_irqsave(&xudc
->lock
, flags
);
2025 for (i
= 0; i
< xudc
->soc
->num_phys
; i
++)
2026 if (xudc
->usbphy
[i
])
2027 otg_set_peripheral(xudc
->usbphy
[i
]->otg
, NULL
);
2029 val
= xudc_readl(xudc
, CTRL
);
2030 val
&= ~(CTRL_IE
| CTRL_ENABLE
);
2031 xudc_writel(xudc
, val
, CTRL
);
2033 __tegra_xudc_ep_disable(&xudc
->ep
[0]);
2035 xudc
->driver
= NULL
;
2036 dev_dbg(xudc
->dev
, "Gadget stopped");
2038 spin_unlock_irqrestore(&xudc
->lock
, flags
);
2040 pm_runtime_put(xudc
->dev
);
2045 static int tegra_xudc_set_selfpowered(struct usb_gadget
*gadget
, int is_on
)
2047 struct tegra_xudc
*xudc
= to_xudc(gadget
);
2049 dev_dbg(xudc
->dev
, "%s: %d\n", __func__
, is_on
);
2050 xudc
->selfpowered
= !!is_on
;
2055 static struct usb_gadget_ops tegra_xudc_gadget_ops
= {
2056 .get_frame
= tegra_xudc_gadget_get_frame
,
2057 .wakeup
= tegra_xudc_gadget_wakeup
,
2058 .pullup
= tegra_xudc_gadget_pullup
,
2059 .udc_start
= tegra_xudc_gadget_start
,
2060 .udc_stop
= tegra_xudc_gadget_stop
,
2061 .set_selfpowered
= tegra_xudc_set_selfpowered
,
2064 static void no_op_complete(struct usb_ep
*ep
, struct usb_request
*req
)
2069 tegra_xudc_ep0_queue_status(struct tegra_xudc
*xudc
,
2070 void (*cmpl
)(struct usb_ep
*, struct usb_request
*))
2072 xudc
->ep0_req
->usb_req
.buf
= NULL
;
2073 xudc
->ep0_req
->usb_req
.dma
= 0;
2074 xudc
->ep0_req
->usb_req
.length
= 0;
2075 xudc
->ep0_req
->usb_req
.complete
= cmpl
;
2076 xudc
->ep0_req
->usb_req
.context
= xudc
;
2078 return __tegra_xudc_ep_queue(&xudc
->ep
[0], xudc
->ep0_req
);
2082 tegra_xudc_ep0_queue_data(struct tegra_xudc
*xudc
, void *buf
, size_t len
,
2083 void (*cmpl
)(struct usb_ep
*, struct usb_request
*))
2085 xudc
->ep0_req
->usb_req
.buf
= buf
;
2086 xudc
->ep0_req
->usb_req
.length
= len
;
2087 xudc
->ep0_req
->usb_req
.complete
= cmpl
;
2088 xudc
->ep0_req
->usb_req
.context
= xudc
;
2090 return __tegra_xudc_ep_queue(&xudc
->ep
[0], xudc
->ep0_req
);
2093 static void tegra_xudc_ep0_req_done(struct tegra_xudc
*xudc
)
2095 switch (xudc
->setup_state
) {
2096 case DATA_STAGE_XFER
:
2097 xudc
->setup_state
= STATUS_STAGE_RECV
;
2098 tegra_xudc_ep0_queue_status(xudc
, no_op_complete
);
2100 case DATA_STAGE_RECV
:
2101 xudc
->setup_state
= STATUS_STAGE_XFER
;
2102 tegra_xudc_ep0_queue_status(xudc
, no_op_complete
);
2105 xudc
->setup_state
= WAIT_FOR_SETUP
;
2110 static int tegra_xudc_ep0_delegate_req(struct tegra_xudc
*xudc
,
2111 struct usb_ctrlrequest
*ctrl
)
2115 spin_unlock(&xudc
->lock
);
2116 ret
= xudc
->driver
->setup(&xudc
->gadget
, ctrl
);
2117 spin_lock(&xudc
->lock
);
2122 static void set_feature_complete(struct usb_ep
*ep
, struct usb_request
*req
)
2124 struct tegra_xudc
*xudc
= req
->context
;
2126 if (xudc
->test_mode_pattern
) {
2127 xudc_writel(xudc
, xudc
->test_mode_pattern
, PORT_TM
);
2128 xudc
->test_mode_pattern
= 0;
2132 static int tegra_xudc_ep0_set_feature(struct tegra_xudc
*xudc
,
2133 struct usb_ctrlrequest
*ctrl
)
2135 bool set
= (ctrl
->bRequest
== USB_REQ_SET_FEATURE
);
2136 u32 feature
= le16_to_cpu(ctrl
->wValue
);
2137 u32 index
= le16_to_cpu(ctrl
->wIndex
);
2141 if (le16_to_cpu(ctrl
->wLength
) != 0)
2144 switch (ctrl
->bRequestType
& USB_RECIP_MASK
) {
2145 case USB_RECIP_DEVICE
:
2147 case USB_DEVICE_REMOTE_WAKEUP
:
2148 if ((xudc
->gadget
.speed
== USB_SPEED_SUPER
) ||
2149 (xudc
->device_state
== USB_STATE_DEFAULT
))
2152 val
= xudc_readl(xudc
, PORTPM
);
2158 xudc_writel(xudc
, val
, PORTPM
);
2160 case USB_DEVICE_U1_ENABLE
:
2161 case USB_DEVICE_U2_ENABLE
:
2162 if ((xudc
->device_state
!= USB_STATE_CONFIGURED
) ||
2163 (xudc
->gadget
.speed
!= USB_SPEED_SUPER
))
2166 val
= xudc_readl(xudc
, PORTPM
);
2167 if ((feature
== USB_DEVICE_U1_ENABLE
) &&
2168 xudc
->soc
->u1_enable
) {
2175 if ((feature
== USB_DEVICE_U2_ENABLE
) &&
2176 xudc
->soc
->u2_enable
) {
2183 xudc_writel(xudc
, val
, PORTPM
);
2185 case USB_DEVICE_TEST_MODE
:
2186 if (xudc
->gadget
.speed
!= USB_SPEED_HIGH
)
2192 xudc
->test_mode_pattern
= index
>> 8;
2199 case USB_RECIP_INTERFACE
:
2200 if (xudc
->device_state
!= USB_STATE_CONFIGURED
)
2204 case USB_INTRF_FUNC_SUSPEND
:
2206 val
= xudc_readl(xudc
, PORTPM
);
2208 if (index
& USB_INTRF_FUNC_SUSPEND_RW
)
2211 val
&= ~PORTPM_FRWE
;
2213 xudc_writel(xudc
, val
, PORTPM
);
2216 return tegra_xudc_ep0_delegate_req(xudc
, ctrl
);
2222 case USB_RECIP_ENDPOINT
:
2223 ep
= (index
& USB_ENDPOINT_NUMBER_MASK
) * 2 +
2224 ((index
& USB_DIR_IN
) ? 1 : 0);
2226 if ((xudc
->device_state
== USB_STATE_DEFAULT
) ||
2227 ((xudc
->device_state
== USB_STATE_ADDRESS
) &&
2231 ret
= __tegra_xudc_ep_set_halt(&xudc
->ep
[ep
], set
);
2239 return tegra_xudc_ep0_queue_status(xudc
, set_feature_complete
);
2242 static int tegra_xudc_ep0_get_status(struct tegra_xudc
*xudc
,
2243 struct usb_ctrlrequest
*ctrl
)
2245 struct tegra_xudc_ep_context
*ep_ctx
;
2246 u32 val
, ep
, index
= le16_to_cpu(ctrl
->wIndex
);
2249 if (!(ctrl
->bRequestType
& USB_DIR_IN
))
2252 if ((le16_to_cpu(ctrl
->wValue
) != 0) ||
2253 (le16_to_cpu(ctrl
->wLength
) != 2))
2256 switch (ctrl
->bRequestType
& USB_RECIP_MASK
) {
2257 case USB_RECIP_DEVICE
:
2258 val
= xudc_readl(xudc
, PORTPM
);
2260 if (xudc
->selfpowered
)
2261 status
|= BIT(USB_DEVICE_SELF_POWERED
);
2263 if ((xudc
->gadget
.speed
< USB_SPEED_SUPER
) &&
2265 status
|= BIT(USB_DEVICE_REMOTE_WAKEUP
);
2267 if (xudc
->gadget
.speed
== USB_SPEED_SUPER
) {
2268 if (val
& PORTPM_U1E
)
2269 status
|= BIT(USB_DEV_STAT_U1_ENABLED
);
2270 if (val
& PORTPM_U2E
)
2271 status
|= BIT(USB_DEV_STAT_U2_ENABLED
);
2274 case USB_RECIP_INTERFACE
:
2275 if (xudc
->gadget
.speed
== USB_SPEED_SUPER
) {
2276 status
|= USB_INTRF_STAT_FUNC_RW_CAP
;
2277 val
= xudc_readl(xudc
, PORTPM
);
2278 if (val
& PORTPM_FRWE
)
2279 status
|= USB_INTRF_STAT_FUNC_RW
;
2282 case USB_RECIP_ENDPOINT
:
2283 ep
= (index
& USB_ENDPOINT_NUMBER_MASK
) * 2 +
2284 ((index
& USB_DIR_IN
) ? 1 : 0);
2285 ep_ctx
= &xudc
->ep_context
[ep
];
2287 if ((xudc
->device_state
!= USB_STATE_CONFIGURED
) &&
2288 ((xudc
->device_state
!= USB_STATE_ADDRESS
) || (ep
!= 0)))
2291 if (ep_ctx_read_state(ep_ctx
) == EP_STATE_DISABLED
)
2294 if (xudc_readl(xudc
, EP_HALT
) & BIT(ep
))
2295 status
|= BIT(USB_ENDPOINT_HALT
);
2301 xudc
->status_buf
= cpu_to_le16(status
);
2302 return tegra_xudc_ep0_queue_data(xudc
, &xudc
->status_buf
,
2303 sizeof(xudc
->status_buf
),
2307 static void set_sel_complete(struct usb_ep
*ep
, struct usb_request
*req
)
2309 /* Nothing to do with SEL values */
2312 static int tegra_xudc_ep0_set_sel(struct tegra_xudc
*xudc
,
2313 struct usb_ctrlrequest
*ctrl
)
2315 if (ctrl
->bRequestType
!= (USB_DIR_OUT
| USB_RECIP_DEVICE
|
2319 if (xudc
->device_state
== USB_STATE_DEFAULT
)
2322 if ((le16_to_cpu(ctrl
->wIndex
) != 0) ||
2323 (le16_to_cpu(ctrl
->wValue
) != 0) ||
2324 (le16_to_cpu(ctrl
->wLength
) != 6))
2327 return tegra_xudc_ep0_queue_data(xudc
, &xudc
->sel_timing
,
2328 sizeof(xudc
->sel_timing
),
2332 static void set_isoch_delay_complete(struct usb_ep
*ep
, struct usb_request
*req
)
2334 /* Nothing to do with isoch delay */
2337 static int tegra_xudc_ep0_set_isoch_delay(struct tegra_xudc
*xudc
,
2338 struct usb_ctrlrequest
*ctrl
)
2340 u32 delay
= le16_to_cpu(ctrl
->wValue
);
2342 if (ctrl
->bRequestType
!= (USB_DIR_OUT
| USB_RECIP_DEVICE
|
2346 if ((delay
> 65535) || (le16_to_cpu(ctrl
->wIndex
) != 0) ||
2347 (le16_to_cpu(ctrl
->wLength
) != 0))
2350 xudc
->isoch_delay
= delay
;
2352 return tegra_xudc_ep0_queue_status(xudc
, set_isoch_delay_complete
);
2355 static void set_address_complete(struct usb_ep
*ep
, struct usb_request
*req
)
2357 struct tegra_xudc
*xudc
= req
->context
;
2359 if ((xudc
->device_state
== USB_STATE_DEFAULT
) &&
2360 (xudc
->dev_addr
!= 0)) {
2361 xudc
->device_state
= USB_STATE_ADDRESS
;
2362 usb_gadget_set_state(&xudc
->gadget
, xudc
->device_state
);
2363 } else if ((xudc
->device_state
== USB_STATE_ADDRESS
) &&
2364 (xudc
->dev_addr
== 0)) {
2365 xudc
->device_state
= USB_STATE_DEFAULT
;
2366 usb_gadget_set_state(&xudc
->gadget
, xudc
->device_state
);
2370 static int tegra_xudc_ep0_set_address(struct tegra_xudc
*xudc
,
2371 struct usb_ctrlrequest
*ctrl
)
2373 struct tegra_xudc_ep
*ep0
= &xudc
->ep
[0];
2374 u32 val
, addr
= le16_to_cpu(ctrl
->wValue
);
2376 if (ctrl
->bRequestType
!= (USB_DIR_OUT
| USB_RECIP_DEVICE
|
2380 if ((addr
> 127) || (le16_to_cpu(ctrl
->wIndex
) != 0) ||
2381 (le16_to_cpu(ctrl
->wLength
) != 0))
2384 if (xudc
->device_state
== USB_STATE_CONFIGURED
)
2387 dev_dbg(xudc
->dev
, "set address: %u\n", addr
);
2389 xudc
->dev_addr
= addr
;
2390 val
= xudc_readl(xudc
, CTRL
);
2391 val
&= ~(CTRL_DEVADDR_MASK
);
2392 val
|= CTRL_DEVADDR(addr
);
2393 xudc_writel(xudc
, val
, CTRL
);
2395 ep_ctx_write_devaddr(ep0
->context
, addr
);
2397 return tegra_xudc_ep0_queue_status(xudc
, set_address_complete
);
2400 static int tegra_xudc_ep0_standard_req(struct tegra_xudc
*xudc
,
2401 struct usb_ctrlrequest
*ctrl
)
2405 switch (ctrl
->bRequest
) {
2406 case USB_REQ_GET_STATUS
:
2407 dev_dbg(xudc
->dev
, "USB_REQ_GET_STATUS\n");
2408 ret
= tegra_xudc_ep0_get_status(xudc
, ctrl
);
2410 case USB_REQ_SET_ADDRESS
:
2411 dev_dbg(xudc
->dev
, "USB_REQ_SET_ADDRESS\n");
2412 ret
= tegra_xudc_ep0_set_address(xudc
, ctrl
);
2414 case USB_REQ_SET_SEL
:
2415 dev_dbg(xudc
->dev
, "USB_REQ_SET_SEL\n");
2416 ret
= tegra_xudc_ep0_set_sel(xudc
, ctrl
);
2418 case USB_REQ_SET_ISOCH_DELAY
:
2419 dev_dbg(xudc
->dev
, "USB_REQ_SET_ISOCH_DELAY\n");
2420 ret
= tegra_xudc_ep0_set_isoch_delay(xudc
, ctrl
);
2422 case USB_REQ_CLEAR_FEATURE
:
2423 case USB_REQ_SET_FEATURE
:
2424 dev_dbg(xudc
->dev
, "USB_REQ_CLEAR/SET_FEATURE\n");
2425 ret
= tegra_xudc_ep0_set_feature(xudc
, ctrl
);
2427 case USB_REQ_SET_CONFIGURATION
:
2428 dev_dbg(xudc
->dev
, "USB_REQ_SET_CONFIGURATION\n");
2430 * In theory we need to clear RUN bit before status stage of
2431 * deconfig request sent, but this seems to be causing problems.
2432 * Clear RUN once all endpoints are disabled instead.
2436 ret
= tegra_xudc_ep0_delegate_req(xudc
, ctrl
);
2443 static void tegra_xudc_handle_ep0_setup_packet(struct tegra_xudc
*xudc
,
2444 struct usb_ctrlrequest
*ctrl
,
2449 xudc
->setup_seq_num
= seq_num
;
2451 /* Ensure EP0 is unhalted. */
2455 * On Tegra210, setup packets with sequence numbers 0xfffe or 0xffff
2456 * are invalid. Halt EP0 until we get a valid packet.
2458 if (xudc
->soc
->invalid_seq_num
&&
2459 (seq_num
== 0xfffe || seq_num
== 0xffff)) {
2460 dev_warn(xudc
->dev
, "invalid sequence number detected\n");
2466 xudc
->setup_state
= (ctrl
->bRequestType
& USB_DIR_IN
) ?
2467 DATA_STAGE_XFER
: DATA_STAGE_RECV
;
2469 xudc
->setup_state
= STATUS_STAGE_XFER
;
2471 if ((ctrl
->bRequestType
& USB_TYPE_MASK
) == USB_TYPE_STANDARD
)
2472 ret
= tegra_xudc_ep0_standard_req(xudc
, ctrl
);
2474 ret
= tegra_xudc_ep0_delegate_req(xudc
, ctrl
);
2477 dev_warn(xudc
->dev
, "setup request failed: %d\n", ret
);
2478 xudc
->setup_state
= WAIT_FOR_SETUP
;
2483 static void tegra_xudc_handle_ep0_event(struct tegra_xudc
*xudc
,
2484 struct tegra_xudc_trb
*event
)
2486 struct usb_ctrlrequest
*ctrl
= (struct usb_ctrlrequest
*)event
;
2487 u16 seq_num
= trb_read_seq_num(event
);
2489 if (xudc
->setup_state
!= WAIT_FOR_SETUP
) {
2491 * The controller is in the process of handling another
2492 * setup request. Queue subsequent requests and handle
2493 * the last one once the controller reports a sequence
2496 memcpy(&xudc
->setup_packet
.ctrl_req
, ctrl
, sizeof(*ctrl
));
2497 xudc
->setup_packet
.seq_num
= seq_num
;
2498 xudc
->queued_setup_packet
= true;
2500 tegra_xudc_handle_ep0_setup_packet(xudc
, ctrl
, seq_num
);
2504 static struct tegra_xudc_request
*
2505 trb_to_request(struct tegra_xudc_ep
*ep
, struct tegra_xudc_trb
*trb
)
2507 struct tegra_xudc_request
*req
;
2509 list_for_each_entry(req
, &ep
->queue
, list
) {
2510 if (!req
->trbs_queued
)
2513 if (trb_in_request(ep
, req
, trb
))
2520 static void tegra_xudc_handle_transfer_completion(struct tegra_xudc
*xudc
,
2521 struct tegra_xudc_ep
*ep
,
2522 struct tegra_xudc_trb
*event
)
2524 struct tegra_xudc_request
*req
;
2525 struct tegra_xudc_trb
*trb
;
2528 short_packet
= (trb_read_cmpl_code(event
) ==
2529 TRB_CMPL_CODE_SHORT_PACKET
);
2531 trb
= trb_phys_to_virt(ep
, trb_read_data_ptr(event
));
2532 req
= trb_to_request(ep
, trb
);
2535 * TDs are complete on short packet or when the completed TRB is the
2536 * last TRB in the TD (the CHAIN bit is unset).
2538 if (req
&& (short_packet
|| (!trb_read_chain(trb
) &&
2539 (req
->trbs_needed
== req
->trbs_queued
)))) {
2540 struct tegra_xudc_trb
*last
= req
->last_trb
;
2541 unsigned int residual
;
2543 residual
= trb_read_transfer_len(event
);
2544 req
->usb_req
.actual
= req
->usb_req
.length
- residual
;
2546 dev_dbg(xudc
->dev
, "bytes transferred %u / %u\n",
2547 req
->usb_req
.actual
, req
->usb_req
.length
);
2549 tegra_xudc_req_done(ep
, req
, 0);
2551 if (ep
->desc
&& usb_endpoint_xfer_control(ep
->desc
))
2552 tegra_xudc_ep0_req_done(xudc
);
2555 * Advance the dequeue pointer past the end of the current TD
2556 * on short packet completion.
2559 ep
->deq_ptr
= (last
- ep
->transfer_ring
) + 1;
2560 if (ep
->deq_ptr
== XUDC_TRANSFER_RING_SIZE
- 1)
2564 dev_warn(xudc
->dev
, "transfer event on dequeued request\n");
2568 tegra_xudc_ep_kick_queue(ep
);
2571 static void tegra_xudc_handle_transfer_event(struct tegra_xudc
*xudc
,
2572 struct tegra_xudc_trb
*event
)
2574 unsigned int ep_index
= trb_read_endpoint_id(event
);
2575 struct tegra_xudc_ep
*ep
= &xudc
->ep
[ep_index
];
2576 struct tegra_xudc_trb
*trb
;
2579 if (ep_ctx_read_state(ep
->context
) == EP_STATE_DISABLED
) {
2580 dev_warn(xudc
->dev
, "transfer event on disabled EP %u\n",
2585 /* Update transfer ring dequeue pointer. */
2586 trb
= trb_phys_to_virt(ep
, trb_read_data_ptr(event
));
2587 comp_code
= trb_read_cmpl_code(event
);
2588 if (comp_code
!= TRB_CMPL_CODE_BABBLE_DETECTED_ERR
) {
2589 ep
->deq_ptr
= (trb
- ep
->transfer_ring
) + 1;
2591 if (ep
->deq_ptr
== XUDC_TRANSFER_RING_SIZE
- 1)
2593 ep
->ring_full
= false;
2596 switch (comp_code
) {
2597 case TRB_CMPL_CODE_SUCCESS
:
2598 case TRB_CMPL_CODE_SHORT_PACKET
:
2599 tegra_xudc_handle_transfer_completion(xudc
, ep
, event
);
2601 case TRB_CMPL_CODE_HOST_REJECTED
:
2602 dev_info(xudc
->dev
, "stream rejected on EP %u\n", ep_index
);
2604 ep
->stream_rejected
= true;
2606 case TRB_CMPL_CODE_PRIME_PIPE_RECEIVED
:
2607 dev_info(xudc
->dev
, "prime pipe received on EP %u\n", ep_index
);
2609 if (ep
->stream_rejected
) {
2610 ep
->stream_rejected
= false;
2612 * An EP is stopped when a stream is rejected. Wait
2613 * for the EP to report that it is stopped and then
2616 ep_wait_for_stopped(xudc
, ep_index
);
2618 tegra_xudc_ep_ring_doorbell(ep
);
2620 case TRB_CMPL_CODE_BABBLE_DETECTED_ERR
:
2622 * Wait for the EP to be stopped so the controller stops
2623 * processing doorbells.
2625 ep_wait_for_stopped(xudc
, ep_index
);
2626 ep
->enq_ptr
= ep
->deq_ptr
;
2627 tegra_xudc_ep_nuke(ep
, -EIO
);
2629 case TRB_CMPL_CODE_STREAM_NUMP_ERROR
:
2630 case TRB_CMPL_CODE_CTRL_DIR_ERR
:
2631 case TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR
:
2632 case TRB_CMPL_CODE_RING_UNDERRUN
:
2633 case TRB_CMPL_CODE_RING_OVERRUN
:
2634 case TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN
:
2635 case TRB_CMPL_CODE_USB_TRANS_ERR
:
2636 case TRB_CMPL_CODE_TRB_ERR
:
2637 dev_err(xudc
->dev
, "completion error %#x on EP %u\n",
2638 comp_code
, ep_index
);
2640 ep_halt(xudc
, ep_index
);
2642 case TRB_CMPL_CODE_CTRL_SEQNUM_ERR
:
2643 dev_info(xudc
->dev
, "sequence number error\n");
2646 * Kill any queued control request and skip to the last
2647 * setup packet we received.
2649 tegra_xudc_ep_nuke(ep
, -EINVAL
);
2650 xudc
->setup_state
= WAIT_FOR_SETUP
;
2651 if (!xudc
->queued_setup_packet
)
2654 tegra_xudc_handle_ep0_setup_packet(xudc
,
2655 &xudc
->setup_packet
.ctrl_req
,
2656 xudc
->setup_packet
.seq_num
);
2657 xudc
->queued_setup_packet
= false;
2659 case TRB_CMPL_CODE_STOPPED
:
2660 dev_dbg(xudc
->dev
, "stop completion code on EP %u\n",
2664 tegra_xudc_ep_nuke(ep
, -ECONNREFUSED
);
2667 dev_dbg(xudc
->dev
, "completion event %#x on EP %u\n",
2668 comp_code
, ep_index
);
2673 static void tegra_xudc_reset(struct tegra_xudc
*xudc
)
2675 struct tegra_xudc_ep
*ep0
= &xudc
->ep
[0];
2679 xudc
->setup_state
= WAIT_FOR_SETUP
;
2680 xudc
->device_state
= USB_STATE_DEFAULT
;
2681 usb_gadget_set_state(&xudc
->gadget
, xudc
->device_state
);
2683 ep_unpause_all(xudc
);
2685 for (i
= 0; i
< ARRAY_SIZE(xudc
->ep
); i
++)
2686 tegra_xudc_ep_nuke(&xudc
->ep
[i
], -ESHUTDOWN
);
2689 * Reset sequence number and dequeue pointer to flush the transfer
2692 ep0
->deq_ptr
= ep0
->enq_ptr
;
2693 ep0
->ring_full
= false;
2695 xudc
->setup_seq_num
= 0;
2696 xudc
->queued_setup_packet
= false;
2698 ep_ctx_write_seq_num(ep0
->context
, xudc
->setup_seq_num
);
2700 deq_ptr
= trb_virt_to_phys(ep0
, &ep0
->transfer_ring
[ep0
->deq_ptr
]);
2702 if (!dma_mapping_error(xudc
->dev
, deq_ptr
)) {
2703 ep_ctx_write_deq_ptr(ep0
->context
, deq_ptr
);
2704 ep_ctx_write_dcs(ep0
->context
, ep0
->pcs
);
2707 ep_unhalt_all(xudc
);
2709 ep_unpause(xudc
, 0);
2712 static void tegra_xudc_port_connect(struct tegra_xudc
*xudc
)
2714 struct tegra_xudc_ep
*ep0
= &xudc
->ep
[0];
2718 val
= (xudc_readl(xudc
, PORTSC
) & PORTSC_PS_MASK
) >> PORTSC_PS_SHIFT
;
2721 xudc
->gadget
.speed
= USB_SPEED_LOW
;
2724 xudc
->gadget
.speed
= USB_SPEED_FULL
;
2727 xudc
->gadget
.speed
= USB_SPEED_HIGH
;
2730 xudc
->gadget
.speed
= USB_SPEED_SUPER
;
2733 xudc
->gadget
.speed
= USB_SPEED_UNKNOWN
;
2737 xudc
->device_state
= USB_STATE_DEFAULT
;
2738 usb_gadget_set_state(&xudc
->gadget
, xudc
->device_state
);
2740 xudc
->setup_state
= WAIT_FOR_SETUP
;
2742 if (xudc
->gadget
.speed
== USB_SPEED_SUPER
)
2747 ep_ctx_write_max_packet_size(ep0
->context
, maxpacket
);
2748 tegra_xudc_ep0_desc
.wMaxPacketSize
= cpu_to_le16(maxpacket
);
2749 usb_ep_set_maxpacket_limit(&ep0
->usb_ep
, maxpacket
);
2751 if (!xudc
->soc
->u1_enable
) {
2752 val
= xudc_readl(xudc
, PORTPM
);
2753 val
&= ~(PORTPM_U1TIMEOUT_MASK
);
2754 xudc_writel(xudc
, val
, PORTPM
);
2757 if (!xudc
->soc
->u2_enable
) {
2758 val
= xudc_readl(xudc
, PORTPM
);
2759 val
&= ~(PORTPM_U2TIMEOUT_MASK
);
2760 xudc_writel(xudc
, val
, PORTPM
);
2763 if (xudc
->gadget
.speed
<= USB_SPEED_HIGH
) {
2764 val
= xudc_readl(xudc
, PORTPM
);
2765 val
&= ~(PORTPM_L1S_MASK
);
2766 if (xudc
->soc
->lpm_enable
)
2767 val
|= PORTPM_L1S(PORTPM_L1S_ACCEPT
);
2769 val
|= PORTPM_L1S(PORTPM_L1S_NYET
);
2770 xudc_writel(xudc
, val
, PORTPM
);
2773 val
= xudc_readl(xudc
, ST
);
2775 xudc_writel(xudc
, ST_RC
, ST
);
2778 static void tegra_xudc_port_disconnect(struct tegra_xudc
*xudc
)
2780 tegra_xudc_reset(xudc
);
2782 if (xudc
->driver
&& xudc
->driver
->disconnect
) {
2783 spin_unlock(&xudc
->lock
);
2784 xudc
->driver
->disconnect(&xudc
->gadget
);
2785 spin_lock(&xudc
->lock
);
2788 xudc
->device_state
= USB_STATE_NOTATTACHED
;
2789 usb_gadget_set_state(&xudc
->gadget
, xudc
->device_state
);
2791 complete(&xudc
->disconnect_complete
);
2794 static void tegra_xudc_port_reset(struct tegra_xudc
*xudc
)
2796 tegra_xudc_reset(xudc
);
2799 spin_unlock(&xudc
->lock
);
2800 usb_gadget_udc_reset(&xudc
->gadget
, xudc
->driver
);
2801 spin_lock(&xudc
->lock
);
2804 tegra_xudc_port_connect(xudc
);
2807 static void tegra_xudc_port_suspend(struct tegra_xudc
*xudc
)
2809 dev_dbg(xudc
->dev
, "port suspend\n");
2811 xudc
->resume_state
= xudc
->device_state
;
2812 xudc
->device_state
= USB_STATE_SUSPENDED
;
2813 usb_gadget_set_state(&xudc
->gadget
, xudc
->device_state
);
2815 if (xudc
->driver
->suspend
) {
2816 spin_unlock(&xudc
->lock
);
2817 xudc
->driver
->suspend(&xudc
->gadget
);
2818 spin_lock(&xudc
->lock
);
2822 static void tegra_xudc_port_resume(struct tegra_xudc
*xudc
)
2824 dev_dbg(xudc
->dev
, "port resume\n");
2826 tegra_xudc_resume_device_state(xudc
);
2828 if (xudc
->driver
->resume
) {
2829 spin_unlock(&xudc
->lock
);
2830 xudc
->driver
->resume(&xudc
->gadget
);
2831 spin_lock(&xudc
->lock
);
2835 static inline void clear_port_change(struct tegra_xudc
*xudc
, u32 flag
)
2839 val
= xudc_readl(xudc
, PORTSC
);
2840 val
&= ~PORTSC_CHANGE_MASK
;
2842 xudc_writel(xudc
, val
, PORTSC
);
2845 static void __tegra_xudc_handle_port_status(struct tegra_xudc
*xudc
)
2847 u32 portsc
, porthalt
;
2849 porthalt
= xudc_readl(xudc
, PORTHALT
);
2850 if ((porthalt
& PORTHALT_STCHG_REQ
) &&
2851 (porthalt
& PORTHALT_HALT_LTSSM
)) {
2852 dev_dbg(xudc
->dev
, "STCHG_REQ, PORTHALT = %#x\n", porthalt
);
2853 porthalt
&= ~PORTHALT_HALT_LTSSM
;
2854 xudc_writel(xudc
, porthalt
, PORTHALT
);
2857 portsc
= xudc_readl(xudc
, PORTSC
);
2858 if ((portsc
& PORTSC_PRC
) && (portsc
& PORTSC_PR
)) {
2859 dev_dbg(xudc
->dev
, "PRC, PR, PORTSC = %#x\n", portsc
);
2860 clear_port_change(xudc
, PORTSC_PRC
| PORTSC_PED
);
2861 #define TOGGLE_VBUS_WAIT_MS 100
2862 if (xudc
->soc
->port_reset_quirk
) {
2863 schedule_delayed_work(&xudc
->port_reset_war_work
,
2864 msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS
));
2865 xudc
->wait_for_sec_prc
= 1;
2869 if ((portsc
& PORTSC_PRC
) && !(portsc
& PORTSC_PR
)) {
2870 dev_dbg(xudc
->dev
, "PRC, Not PR, PORTSC = %#x\n", portsc
);
2871 clear_port_change(xudc
, PORTSC_PRC
| PORTSC_PED
);
2872 tegra_xudc_port_reset(xudc
);
2873 cancel_delayed_work(&xudc
->port_reset_war_work
);
2874 xudc
->wait_for_sec_prc
= 0;
2877 portsc
= xudc_readl(xudc
, PORTSC
);
2878 if (portsc
& PORTSC_WRC
) {
2879 dev_dbg(xudc
->dev
, "WRC, PORTSC = %#x\n", portsc
);
2880 clear_port_change(xudc
, PORTSC_WRC
| PORTSC_PED
);
2881 if (!(xudc_readl(xudc
, PORTSC
) & PORTSC_WPR
))
2882 tegra_xudc_port_reset(xudc
);
2885 portsc
= xudc_readl(xudc
, PORTSC
);
2886 if (portsc
& PORTSC_CSC
) {
2887 dev_dbg(xudc
->dev
, "CSC, PORTSC = %#x\n", portsc
);
2888 clear_port_change(xudc
, PORTSC_CSC
);
2890 if (portsc
& PORTSC_CCS
)
2891 tegra_xudc_port_connect(xudc
);
2893 tegra_xudc_port_disconnect(xudc
);
2895 if (xudc
->wait_csc
) {
2896 cancel_delayed_work(&xudc
->plc_reset_work
);
2897 xudc
->wait_csc
= false;
2901 portsc
= xudc_readl(xudc
, PORTSC
);
2902 if (portsc
& PORTSC_PLC
) {
2903 u32 pls
= (portsc
& PORTSC_PLS_MASK
) >> PORTSC_PLS_SHIFT
;
2905 dev_dbg(xudc
->dev
, "PLC, PORTSC = %#x\n", portsc
);
2906 clear_port_change(xudc
, PORTSC_PLC
);
2909 tegra_xudc_port_suspend(xudc
);
2912 if (xudc
->gadget
.speed
< USB_SPEED_SUPER
)
2913 tegra_xudc_port_resume(xudc
);
2915 case PORTSC_PLS_RESUME
:
2916 if (xudc
->gadget
.speed
== USB_SPEED_SUPER
)
2917 tegra_xudc_port_resume(xudc
);
2919 case PORTSC_PLS_INACTIVE
:
2920 schedule_delayed_work(&xudc
->plc_reset_work
,
2921 msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS
));
2922 xudc
->wait_csc
= true;
2929 if (portsc
& PORTSC_CEC
) {
2930 dev_warn(xudc
->dev
, "CEC, PORTSC = %#x\n", portsc
);
2931 clear_port_change(xudc
, PORTSC_CEC
);
2934 dev_dbg(xudc
->dev
, "PORTSC = %#x\n", xudc_readl(xudc
, PORTSC
));
2937 static void tegra_xudc_handle_port_status(struct tegra_xudc
*xudc
)
2939 while ((xudc_readl(xudc
, PORTSC
) & PORTSC_CHANGE_MASK
) ||
2940 (xudc_readl(xudc
, PORTHALT
) & PORTHALT_STCHG_REQ
))
2941 __tegra_xudc_handle_port_status(xudc
);
2944 static void tegra_xudc_handle_event(struct tegra_xudc
*xudc
,
2945 struct tegra_xudc_trb
*event
)
2947 u32 type
= trb_read_type(event
);
2949 dump_trb(xudc
, "EVENT", event
);
2952 case TRB_TYPE_PORT_STATUS_CHANGE_EVENT
:
2953 tegra_xudc_handle_port_status(xudc
);
2955 case TRB_TYPE_TRANSFER_EVENT
:
2956 tegra_xudc_handle_transfer_event(xudc
, event
);
2958 case TRB_TYPE_SETUP_PACKET_EVENT
:
2959 tegra_xudc_handle_ep0_event(xudc
, event
);
2962 dev_info(xudc
->dev
, "Unrecognized TRB type = %#x\n", type
);
2967 static void tegra_xudc_process_event_ring(struct tegra_xudc
*xudc
)
2969 struct tegra_xudc_trb
*event
;
2973 event
= xudc
->event_ring
[xudc
->event_ring_index
] +
2974 xudc
->event_ring_deq_ptr
;
2976 if (trb_read_cycle(event
) != xudc
->ccs
)
2979 tegra_xudc_handle_event(xudc
, event
);
2981 xudc
->event_ring_deq_ptr
++;
2982 if (xudc
->event_ring_deq_ptr
== XUDC_EVENT_RING_SIZE
) {
2983 xudc
->event_ring_deq_ptr
= 0;
2984 xudc
->event_ring_index
++;
2987 if (xudc
->event_ring_index
== XUDC_NR_EVENT_RINGS
) {
2988 xudc
->event_ring_index
= 0;
2989 xudc
->ccs
= !xudc
->ccs
;
2993 erdp
= xudc
->event_ring_phys
[xudc
->event_ring_index
] +
2994 xudc
->event_ring_deq_ptr
* sizeof(*event
);
2996 xudc_writel(xudc
, upper_32_bits(erdp
), ERDPHI
);
2997 xudc_writel(xudc
, lower_32_bits(erdp
) | ERDPLO_EHB
, ERDPLO
);
3000 static irqreturn_t
tegra_xudc_irq(int irq
, void *data
)
3002 struct tegra_xudc
*xudc
= data
;
3003 unsigned long flags
;
3006 val
= xudc_readl(xudc
, ST
);
3009 xudc_writel(xudc
, ST_IP
, ST
);
3011 spin_lock_irqsave(&xudc
->lock
, flags
);
3012 tegra_xudc_process_event_ring(xudc
);
3013 spin_unlock_irqrestore(&xudc
->lock
, flags
);
3018 static int tegra_xudc_alloc_ep(struct tegra_xudc
*xudc
, unsigned int index
)
3020 struct tegra_xudc_ep
*ep
= &xudc
->ep
[index
];
3024 ep
->context
= &xudc
->ep_context
[index
];
3025 INIT_LIST_HEAD(&ep
->queue
);
3028 * EP1 would be the input endpoint corresponding to EP0, but since
3029 * EP0 is bi-directional, EP1 is unused.
3034 ep
->transfer_ring
= dma_pool_alloc(xudc
->transfer_ring_pool
,
3036 &ep
->transfer_ring_phys
);
3037 if (!ep
->transfer_ring
)
3041 snprintf(ep
->name
, sizeof(ep
->name
), "ep%u%s", index
/ 2,
3042 (index
% 2 == 0) ? "out" : "in");
3043 ep
->usb_ep
.name
= ep
->name
;
3044 usb_ep_set_maxpacket_limit(&ep
->usb_ep
, 1024);
3045 ep
->usb_ep
.max_streams
= 16;
3046 ep
->usb_ep
.ops
= &tegra_xudc_ep_ops
;
3047 ep
->usb_ep
.caps
.type_bulk
= true;
3048 ep
->usb_ep
.caps
.type_int
= true;
3050 ep
->usb_ep
.caps
.dir_in
= true;
3052 ep
->usb_ep
.caps
.dir_out
= true;
3053 list_add_tail(&ep
->usb_ep
.ep_list
, &xudc
->gadget
.ep_list
);
3055 strscpy(ep
->name
, "ep0", 3);
3056 ep
->usb_ep
.name
= ep
->name
;
3057 usb_ep_set_maxpacket_limit(&ep
->usb_ep
, 512);
3058 ep
->usb_ep
.ops
= &tegra_xudc_ep0_ops
;
3059 ep
->usb_ep
.caps
.type_control
= true;
3060 ep
->usb_ep
.caps
.dir_in
= true;
3061 ep
->usb_ep
.caps
.dir_out
= true;
3067 static void tegra_xudc_free_ep(struct tegra_xudc
*xudc
, unsigned int index
)
3069 struct tegra_xudc_ep
*ep
= &xudc
->ep
[index
];
3072 * EP1 would be the input endpoint corresponding to EP0, but since
3073 * EP0 is bi-directional, EP1 is unused.
3078 dma_pool_free(xudc
->transfer_ring_pool
, ep
->transfer_ring
,
3079 ep
->transfer_ring_phys
);
3082 static int tegra_xudc_alloc_eps(struct tegra_xudc
*xudc
)
3084 struct usb_request
*req
;
3089 dma_alloc_coherent(xudc
->dev
, XUDC_NR_EPS
*
3090 sizeof(*xudc
->ep_context
),
3091 &xudc
->ep_context_phys
, GFP_KERNEL
);
3092 if (!xudc
->ep_context
)
3095 xudc
->transfer_ring_pool
=
3096 dmam_pool_create(dev_name(xudc
->dev
), xudc
->dev
,
3097 XUDC_TRANSFER_RING_SIZE
*
3098 sizeof(struct tegra_xudc_trb
),
3099 sizeof(struct tegra_xudc_trb
), 0);
3100 if (!xudc
->transfer_ring_pool
) {
3102 goto free_ep_context
;
3105 INIT_LIST_HEAD(&xudc
->gadget
.ep_list
);
3106 for (i
= 0; i
< ARRAY_SIZE(xudc
->ep
); i
++) {
3107 err
= tegra_xudc_alloc_ep(xudc
, i
);
3112 req
= tegra_xudc_ep_alloc_request(&xudc
->ep
[0].usb_ep
, GFP_KERNEL
);
3117 xudc
->ep0_req
= to_xudc_req(req
);
3123 tegra_xudc_free_ep(xudc
, i
- 1);
3125 dma_free_coherent(xudc
->dev
, XUDC_NR_EPS
* sizeof(*xudc
->ep_context
),
3126 xudc
->ep_context
, xudc
->ep_context_phys
);
3130 static void tegra_xudc_init_eps(struct tegra_xudc
*xudc
)
3132 xudc_writel(xudc
, lower_32_bits(xudc
->ep_context_phys
), ECPLO
);
3133 xudc_writel(xudc
, upper_32_bits(xudc
->ep_context_phys
), ECPHI
);
3136 static void tegra_xudc_free_eps(struct tegra_xudc
*xudc
)
3140 tegra_xudc_ep_free_request(&xudc
->ep
[0].usb_ep
,
3141 &xudc
->ep0_req
->usb_req
);
3143 for (i
= 0; i
< ARRAY_SIZE(xudc
->ep
); i
++)
3144 tegra_xudc_free_ep(xudc
, i
);
3146 dma_free_coherent(xudc
->dev
, XUDC_NR_EPS
* sizeof(*xudc
->ep_context
),
3147 xudc
->ep_context
, xudc
->ep_context_phys
);
3150 static int tegra_xudc_alloc_event_ring(struct tegra_xudc
*xudc
)
3154 for (i
= 0; i
< ARRAY_SIZE(xudc
->event_ring
); i
++) {
3155 xudc
->event_ring
[i
] =
3156 dma_alloc_coherent(xudc
->dev
, XUDC_EVENT_RING_SIZE
*
3157 sizeof(*xudc
->event_ring
[i
]),
3158 &xudc
->event_ring_phys
[i
],
3160 if (!xudc
->event_ring
[i
])
3167 for (; i
> 0; i
--) {
3168 dma_free_coherent(xudc
->dev
, XUDC_EVENT_RING_SIZE
*
3169 sizeof(*xudc
->event_ring
[i
- 1]),
3170 xudc
->event_ring
[i
- 1],
3171 xudc
->event_ring_phys
[i
- 1]);
3176 static void tegra_xudc_init_event_ring(struct tegra_xudc
*xudc
)
3181 val
= xudc_readl(xudc
, SPARAM
);
3182 val
&= ~(SPARAM_ERSTMAX_MASK
);
3183 val
|= SPARAM_ERSTMAX(XUDC_NR_EVENT_RINGS
);
3184 xudc_writel(xudc
, val
, SPARAM
);
3186 for (i
= 0; i
< ARRAY_SIZE(xudc
->event_ring
); i
++) {
3187 memset(xudc
->event_ring
[i
], 0, XUDC_EVENT_RING_SIZE
*
3188 sizeof(*xudc
->event_ring
[i
]));
3190 val
= xudc_readl(xudc
, ERSTSZ
);
3191 val
&= ~(ERSTSZ_ERSTXSZ_MASK
<< ERSTSZ_ERSTXSZ_SHIFT(i
));
3192 val
|= XUDC_EVENT_RING_SIZE
<< ERSTSZ_ERSTXSZ_SHIFT(i
);
3193 xudc_writel(xudc
, val
, ERSTSZ
);
3195 xudc_writel(xudc
, lower_32_bits(xudc
->event_ring_phys
[i
]),
3197 xudc_writel(xudc
, upper_32_bits(xudc
->event_ring_phys
[i
]),
3201 val
= lower_32_bits(xudc
->event_ring_phys
[0]);
3202 xudc_writel(xudc
, val
, ERDPLO
);
3204 xudc_writel(xudc
, val
, EREPLO
);
3206 val
= upper_32_bits(xudc
->event_ring_phys
[0]);
3207 xudc_writel(xudc
, val
, ERDPHI
);
3208 xudc_writel(xudc
, val
, EREPHI
);
3211 xudc
->event_ring_index
= 0;
3212 xudc
->event_ring_deq_ptr
= 0;
3215 static void tegra_xudc_free_event_ring(struct tegra_xudc
*xudc
)
3219 for (i
= 0; i
< ARRAY_SIZE(xudc
->event_ring
); i
++) {
3220 dma_free_coherent(xudc
->dev
, XUDC_EVENT_RING_SIZE
*
3221 sizeof(*xudc
->event_ring
[i
]),
3222 xudc
->event_ring
[i
],
3223 xudc
->event_ring_phys
[i
]);
3227 static void tegra_xudc_fpci_ipfs_init(struct tegra_xudc
*xudc
)
3231 if (xudc
->soc
->has_ipfs
) {
3232 val
= ipfs_readl(xudc
, XUSB_DEV_CONFIGURATION_0
);
3233 val
|= XUSB_DEV_CONFIGURATION_0_EN_FPCI
;
3234 ipfs_writel(xudc
, val
, XUSB_DEV_CONFIGURATION_0
);
3235 usleep_range(10, 15);
3238 /* Enable bus master */
3239 val
= XUSB_DEV_CFG_1_IO_SPACE_EN
| XUSB_DEV_CFG_1_MEMORY_SPACE_EN
|
3240 XUSB_DEV_CFG_1_BUS_MASTER_EN
;
3241 fpci_writel(xudc
, val
, XUSB_DEV_CFG_1
);
3243 /* Program BAR0 space */
3244 val
= fpci_readl(xudc
, XUSB_DEV_CFG_4
);
3245 val
&= ~(XUSB_DEV_CFG_4_BASE_ADDR_MASK
);
3246 val
|= xudc
->phys_base
& (XUSB_DEV_CFG_4_BASE_ADDR_MASK
);
3248 fpci_writel(xudc
, val
, XUSB_DEV_CFG_4
);
3249 fpci_writel(xudc
, upper_32_bits(xudc
->phys_base
), XUSB_DEV_CFG_5
);
3251 usleep_range(100, 200);
3253 if (xudc
->soc
->has_ipfs
) {
3254 /* Enable interrupt assertion */
3255 val
= ipfs_readl(xudc
, XUSB_DEV_INTR_MASK_0
);
3256 val
|= XUSB_DEV_INTR_MASK_0_IP_INT_MASK
;
3257 ipfs_writel(xudc
, val
, XUSB_DEV_INTR_MASK_0
);
3261 static void tegra_xudc_device_params_init(struct tegra_xudc
*xudc
)
3265 if (xudc
->soc
->has_ipfs
) {
3266 val
= xudc_readl(xudc
, BLCG
);
3268 val
&= ~(BLCG_DFPCI
| BLCG_UFPCI
| BLCG_FE
|
3269 BLCG_COREPLL_PWRDN
);
3270 val
|= BLCG_IOPLL_0_PWRDN
;
3271 val
|= BLCG_IOPLL_1_PWRDN
;
3272 val
|= BLCG_IOPLL_2_PWRDN
;
3274 xudc_writel(xudc
, val
, BLCG
);
3277 /* Set a reasonable U3 exit timer value. */
3278 val
= xudc_readl(xudc
, SSPX_CORE_PADCTL4
);
3279 val
&= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK
);
3280 val
|= SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(0x5dc0);
3281 xudc_writel(xudc
, val
, SSPX_CORE_PADCTL4
);
3283 /* Default ping LFPS tBurst is too large. */
3284 val
= xudc_readl(xudc
, SSPX_CORE_CNT0
);
3285 val
&= ~(SSPX_CORE_CNT0_PING_TBURST_MASK
);
3286 val
|= SSPX_CORE_CNT0_PING_TBURST(0xa);
3287 xudc_writel(xudc
, val
, SSPX_CORE_CNT0
);
3289 /* Default tPortConfiguration timeout is too small. */
3290 val
= xudc_readl(xudc
, SSPX_CORE_CNT30
);
3291 val
&= ~(SSPX_CORE_CNT30_LMPITP_TIMER_MASK
);
3292 val
|= SSPX_CORE_CNT30_LMPITP_TIMER(0x978);
3293 xudc_writel(xudc
, val
, SSPX_CORE_CNT30
);
3295 if (xudc
->soc
->lpm_enable
) {
3296 /* Set L1 resume duration to 95 us. */
3297 val
= xudc_readl(xudc
, HSFSPI_COUNT13
);
3298 val
&= ~(HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK
);
3299 val
|= HSFSPI_COUNT13_U2_RESUME_K_DURATION(0x2c88);
3300 xudc_writel(xudc
, val
, HSFSPI_COUNT13
);
3304 * Compliacne suite appears to be violating polling LFPS tBurst max
3305 * of 1.4us. Send 1.45us instead.
3307 val
= xudc_readl(xudc
, SSPX_CORE_CNT32
);
3308 val
&= ~(SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK
);
3309 val
|= SSPX_CORE_CNT32_POLL_TBURST_MAX(0xb0);
3310 xudc_writel(xudc
, val
, SSPX_CORE_CNT32
);
3312 /* Direct HS/FS port instance to RxDetect. */
3313 val
= xudc_readl(xudc
, CFG_DEV_FE
);
3314 val
&= ~(CFG_DEV_FE_PORTREGSEL_MASK
);
3315 val
|= CFG_DEV_FE_PORTREGSEL(CFG_DEV_FE_PORTREGSEL_HSFS_PI
);
3316 xudc_writel(xudc
, val
, CFG_DEV_FE
);
3318 val
= xudc_readl(xudc
, PORTSC
);
3319 val
&= ~(PORTSC_CHANGE_MASK
| PORTSC_PLS_MASK
);
3320 val
|= PORTSC_LWS
| PORTSC_PLS(PORTSC_PLS_RXDETECT
);
3321 xudc_writel(xudc
, val
, PORTSC
);
3323 /* Direct SS port instance to RxDetect. */
3324 val
= xudc_readl(xudc
, CFG_DEV_FE
);
3325 val
&= ~(CFG_DEV_FE_PORTREGSEL_MASK
);
3326 val
|= CFG_DEV_FE_PORTREGSEL_SS_PI
& CFG_DEV_FE_PORTREGSEL_MASK
;
3327 xudc_writel(xudc
, val
, CFG_DEV_FE
);
3329 val
= xudc_readl(xudc
, PORTSC
);
3330 val
&= ~(PORTSC_CHANGE_MASK
| PORTSC_PLS_MASK
);
3331 val
|= PORTSC_LWS
| PORTSC_PLS(PORTSC_PLS_RXDETECT
);
3332 xudc_writel(xudc
, val
, PORTSC
);
3334 /* Restore port instance. */
3335 val
= xudc_readl(xudc
, CFG_DEV_FE
);
3336 val
&= ~(CFG_DEV_FE_PORTREGSEL_MASK
);
3337 xudc_writel(xudc
, val
, CFG_DEV_FE
);
3340 * Enable INFINITE_SS_RETRY to prevent device from entering
3341 * Disabled.Error when attached to buggy SuperSpeed hubs.
3343 val
= xudc_readl(xudc
, CFG_DEV_FE
);
3344 val
|= CFG_DEV_FE_INFINITE_SS_RETRY
;
3345 xudc_writel(xudc
, val
, CFG_DEV_FE
);
3347 /* Set interrupt moderation. */
3348 imod
= XUDC_INTERRUPT_MODERATION_US
* 4;
3349 val
= xudc_readl(xudc
, RT_IMOD
);
3350 val
&= ~((RT_IMOD_IMODI_MASK
) | (RT_IMOD_IMODC_MASK
));
3351 val
|= (RT_IMOD_IMODI(imod
) | RT_IMOD_IMODC(imod
));
3352 xudc_writel(xudc
, val
, RT_IMOD
);
3354 /* increase SSPI transaction timeout from 32us to 512us */
3355 val
= xudc_readl(xudc
, CFG_DEV_SSPI_XFER
);
3356 val
&= ~(CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK
);
3357 val
|= CFG_DEV_SSPI_XFER_ACKTIMEOUT(0xf000);
3358 xudc_writel(xudc
, val
, CFG_DEV_SSPI_XFER
);
3361 static int tegra_xudc_phy_get(struct tegra_xudc
*xudc
)
3366 xudc
->utmi_phy
= devm_kcalloc(xudc
->dev
, xudc
->soc
->num_phys
,
3367 sizeof(*xudc
->utmi_phy
), GFP_KERNEL
);
3368 if (!xudc
->utmi_phy
)
3371 xudc
->usb3_phy
= devm_kcalloc(xudc
->dev
, xudc
->soc
->num_phys
,
3372 sizeof(*xudc
->usb3_phy
), GFP_KERNEL
);
3373 if (!xudc
->usb3_phy
)
3376 xudc
->usbphy
= devm_kcalloc(xudc
->dev
, xudc
->soc
->num_phys
,
3377 sizeof(*xudc
->usbphy
), GFP_KERNEL
);
3381 xudc
->vbus_nb
.notifier_call
= tegra_xudc_vbus_notify
;
3383 for (i
= 0; i
< xudc
->soc
->num_phys
; i
++) {
3384 char phy_name
[] = "usb.-.";
3387 snprintf(phy_name
, sizeof(phy_name
), "usb2-%d", i
);
3388 xudc
->utmi_phy
[i
] = devm_phy_optional_get(xudc
->dev
, phy_name
);
3389 if (IS_ERR(xudc
->utmi_phy
[i
])) {
3390 err
= PTR_ERR(xudc
->utmi_phy
[i
]);
3391 if (err
!= -EPROBE_DEFER
)
3392 dev_err(xudc
->dev
, "failed to get usb2-%d phy: %d\n",
3396 } else if (xudc
->utmi_phy
[i
]) {
3397 /* Get usb-phy, if utmi phy is available */
3398 xudc
->usbphy
[i
] = devm_usb_get_phy_by_node(xudc
->dev
,
3399 xudc
->utmi_phy
[i
]->dev
.of_node
,
3401 if (IS_ERR(xudc
->usbphy
[i
])) {
3402 err
= PTR_ERR(xudc
->usbphy
[i
]);
3403 dev_err(xudc
->dev
, "failed to get usbphy-%d: %d\n",
3407 } else if (!xudc
->utmi_phy
[i
]) {
3408 /* if utmi phy is not available, ignore USB3 phy get */
3413 usb3
= tegra_xusb_padctl_get_usb3_companion(xudc
->padctl
, i
);
3417 snprintf(phy_name
, sizeof(phy_name
), "usb3-%d", usb3
);
3418 xudc
->usb3_phy
[i
] = devm_phy_optional_get(xudc
->dev
, phy_name
);
3419 if (IS_ERR(xudc
->usb3_phy
[i
])) {
3420 err
= PTR_ERR(xudc
->usb3_phy
[i
]);
3421 if (err
!= -EPROBE_DEFER
)
3422 dev_err(xudc
->dev
, "failed to get usb3-%d phy: %d\n",
3426 } else if (xudc
->usb3_phy
[i
])
3427 dev_dbg(xudc
->dev
, "usb3_phy-%d registered", usb3
);
3433 for (i
= 0; i
< xudc
->soc
->num_phys
; i
++) {
3434 xudc
->usb3_phy
[i
] = NULL
;
3435 xudc
->utmi_phy
[i
] = NULL
;
3436 xudc
->usbphy
[i
] = NULL
;
3442 static void tegra_xudc_phy_exit(struct tegra_xudc
*xudc
)
3446 for (i
= 0; i
< xudc
->soc
->num_phys
; i
++) {
3447 phy_exit(xudc
->usb3_phy
[i
]);
3448 phy_exit(xudc
->utmi_phy
[i
]);
3452 static int tegra_xudc_phy_init(struct tegra_xudc
*xudc
)
3457 for (i
= 0; i
< xudc
->soc
->num_phys
; i
++) {
3458 err
= phy_init(xudc
->utmi_phy
[i
]);
3460 dev_err(xudc
->dev
, "utmi phy init failed: %d\n", err
);
3464 err
= phy_init(xudc
->usb3_phy
[i
]);
3466 dev_err(xudc
->dev
, "usb3 phy init failed: %d\n", err
);
3473 tegra_xudc_phy_exit(xudc
);
3477 static const char * const tegra210_xudc_supply_names
[] = {
3482 static const char * const tegra210_xudc_clock_names
[] = {
3490 static const char * const tegra186_xudc_clock_names
[] = {
3497 static struct tegra_xudc_soc tegra210_xudc_soc_data
= {
3498 .supply_names
= tegra210_xudc_supply_names
,
3499 .num_supplies
= ARRAY_SIZE(tegra210_xudc_supply_names
),
3500 .clock_names
= tegra210_xudc_clock_names
,
3501 .num_clks
= ARRAY_SIZE(tegra210_xudc_clock_names
),
3505 .lpm_enable
= false,
3506 .invalid_seq_num
= true,
3508 .port_reset_quirk
= true,
3512 static struct tegra_xudc_soc tegra186_xudc_soc_data
= {
3513 .clock_names
= tegra186_xudc_clock_names
,
3514 .num_clks
= ARRAY_SIZE(tegra186_xudc_clock_names
),
3518 .lpm_enable
= false,
3519 .invalid_seq_num
= false,
3521 .port_reset_quirk
= false,
3525 static const struct of_device_id tegra_xudc_of_match
[] = {
3527 .compatible
= "nvidia,tegra210-xudc",
3528 .data
= &tegra210_xudc_soc_data
3531 .compatible
= "nvidia,tegra186-xudc",
3532 .data
= &tegra186_xudc_soc_data
3536 MODULE_DEVICE_TABLE(of
, tegra_xudc_of_match
);
3538 static void tegra_xudc_powerdomain_remove(struct tegra_xudc
*xudc
)
3540 if (xudc
->genpd_dl_ss
)
3541 device_link_del(xudc
->genpd_dl_ss
);
3542 if (xudc
->genpd_dl_device
)
3543 device_link_del(xudc
->genpd_dl_device
);
3544 if (xudc
->genpd_dev_ss
)
3545 dev_pm_domain_detach(xudc
->genpd_dev_ss
, true);
3546 if (xudc
->genpd_dev_device
)
3547 dev_pm_domain_detach(xudc
->genpd_dev_device
, true);
3550 static int tegra_xudc_powerdomain_init(struct tegra_xudc
*xudc
)
3552 struct device
*dev
= xudc
->dev
;
3555 xudc
->genpd_dev_device
= dev_pm_domain_attach_by_name(dev
,
3557 if (IS_ERR(xudc
->genpd_dev_device
)) {
3558 err
= PTR_ERR(xudc
->genpd_dev_device
);
3559 dev_err(dev
, "failed to get dev pm-domain: %d\n", err
);
3563 xudc
->genpd_dev_ss
= dev_pm_domain_attach_by_name(dev
, "ss");
3564 if (IS_ERR(xudc
->genpd_dev_ss
)) {
3565 err
= PTR_ERR(xudc
->genpd_dev_ss
);
3566 dev_err(dev
, "failed to get superspeed pm-domain: %d\n", err
);
3570 xudc
->genpd_dl_device
= device_link_add(dev
, xudc
->genpd_dev_device
,
3571 DL_FLAG_PM_RUNTIME
|
3573 if (!xudc
->genpd_dl_device
) {
3574 dev_err(dev
, "adding usb device device link failed!\n");
3578 xudc
->genpd_dl_ss
= device_link_add(dev
, xudc
->genpd_dev_ss
,
3579 DL_FLAG_PM_RUNTIME
|
3581 if (!xudc
->genpd_dl_ss
) {
3582 dev_err(dev
, "adding superspeed device link failed!\n");
3589 static int tegra_xudc_probe(struct platform_device
*pdev
)
3591 struct tegra_xudc
*xudc
;
3592 struct resource
*res
;
3596 xudc
= devm_kzalloc(&pdev
->dev
, sizeof(*xudc
), GFP_ATOMIC
);
3600 xudc
->dev
= &pdev
->dev
;
3601 platform_set_drvdata(pdev
, xudc
);
3603 xudc
->soc
= of_device_get_match_data(&pdev
->dev
);
3607 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "base");
3608 xudc
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
3609 if (IS_ERR(xudc
->base
))
3610 return PTR_ERR(xudc
->base
);
3611 xudc
->phys_base
= res
->start
;
3613 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "fpci");
3614 xudc
->fpci
= devm_ioremap_resource(&pdev
->dev
, res
);
3615 if (IS_ERR(xudc
->fpci
))
3616 return PTR_ERR(xudc
->fpci
);
3618 if (xudc
->soc
->has_ipfs
) {
3619 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
3621 xudc
->ipfs
= devm_ioremap_resource(&pdev
->dev
, res
);
3622 if (IS_ERR(xudc
->ipfs
))
3623 return PTR_ERR(xudc
->ipfs
);
3626 xudc
->irq
= platform_get_irq(pdev
, 0);
3630 err
= devm_request_irq(&pdev
->dev
, xudc
->irq
, tegra_xudc_irq
, 0,
3631 dev_name(&pdev
->dev
), xudc
);
3633 dev_err(xudc
->dev
, "failed to claim IRQ#%u: %d\n", xudc
->irq
,
3638 xudc
->clks
= devm_kcalloc(&pdev
->dev
, xudc
->soc
->num_clks
,
3639 sizeof(*xudc
->clks
), GFP_KERNEL
);
3643 for (i
= 0; i
< xudc
->soc
->num_clks
; i
++)
3644 xudc
->clks
[i
].id
= xudc
->soc
->clock_names
[i
];
3646 err
= devm_clk_bulk_get(&pdev
->dev
, xudc
->soc
->num_clks
,
3649 dev_err(xudc
->dev
, "failed to request clks %d\n", err
);
3653 xudc
->supplies
= devm_kcalloc(&pdev
->dev
, xudc
->soc
->num_supplies
,
3654 sizeof(*xudc
->supplies
), GFP_KERNEL
);
3655 if (!xudc
->supplies
)
3658 for (i
= 0; i
< xudc
->soc
->num_supplies
; i
++)
3659 xudc
->supplies
[i
].supply
= xudc
->soc
->supply_names
[i
];
3661 err
= devm_regulator_bulk_get(&pdev
->dev
, xudc
->soc
->num_supplies
,
3664 dev_err(xudc
->dev
, "failed to request regulators %d\n", err
);
3668 xudc
->padctl
= tegra_xusb_padctl_get(&pdev
->dev
);
3669 if (IS_ERR(xudc
->padctl
))
3670 return PTR_ERR(xudc
->padctl
);
3672 err
= regulator_bulk_enable(xudc
->soc
->num_supplies
, xudc
->supplies
);
3674 dev_err(xudc
->dev
, "failed to enable regulators %d\n", err
);
3678 err
= tegra_xudc_phy_get(xudc
);
3680 goto disable_regulator
;
3682 err
= tegra_xudc_powerdomain_init(xudc
);
3684 goto put_powerdomains
;
3686 err
= tegra_xudc_phy_init(xudc
);
3688 goto put_powerdomains
;
3690 err
= tegra_xudc_alloc_event_ring(xudc
);
3694 err
= tegra_xudc_alloc_eps(xudc
);
3696 goto free_event_ring
;
3698 spin_lock_init(&xudc
->lock
);
3700 init_completion(&xudc
->disconnect_complete
);
3702 INIT_WORK(&xudc
->usb_role_sw_work
, tegra_xudc_usb_role_sw_work
);
3704 INIT_DELAYED_WORK(&xudc
->plc_reset_work
, tegra_xudc_plc_reset_work
);
3706 INIT_DELAYED_WORK(&xudc
->port_reset_war_work
,
3707 tegra_xudc_port_reset_war_work
);
3709 pm_runtime_enable(&pdev
->dev
);
3711 xudc
->gadget
.ops
= &tegra_xudc_gadget_ops
;
3712 xudc
->gadget
.ep0
= &xudc
->ep
[0].usb_ep
;
3713 xudc
->gadget
.name
= "tegra-xudc";
3714 xudc
->gadget
.max_speed
= USB_SPEED_SUPER
;
3716 err
= usb_add_gadget_udc(&pdev
->dev
, &xudc
->gadget
);
3718 dev_err(&pdev
->dev
, "failed to add USB gadget: %d\n", err
);
3725 tegra_xudc_free_eps(xudc
);
3727 tegra_xudc_free_event_ring(xudc
);
3729 tegra_xudc_phy_exit(xudc
);
3731 tegra_xudc_powerdomain_remove(xudc
);
3733 regulator_bulk_disable(xudc
->soc
->num_supplies
, xudc
->supplies
);
3735 tegra_xusb_padctl_put(xudc
->padctl
);
3740 static int tegra_xudc_remove(struct platform_device
*pdev
)
3742 struct tegra_xudc
*xudc
= platform_get_drvdata(pdev
);
3745 pm_runtime_get_sync(xudc
->dev
);
3747 cancel_delayed_work(&xudc
->plc_reset_work
);
3748 cancel_work_sync(&xudc
->usb_role_sw_work
);
3750 usb_del_gadget_udc(&xudc
->gadget
);
3752 tegra_xudc_free_eps(xudc
);
3753 tegra_xudc_free_event_ring(xudc
);
3755 tegra_xudc_powerdomain_remove(xudc
);
3757 regulator_bulk_disable(xudc
->soc
->num_supplies
, xudc
->supplies
);
3759 for (i
= 0; i
< xudc
->soc
->num_phys
; i
++) {
3760 phy_power_off(xudc
->utmi_phy
[i
]);
3761 phy_power_off(xudc
->usb3_phy
[i
]);
3764 tegra_xudc_phy_exit(xudc
);
3766 pm_runtime_disable(xudc
->dev
);
3767 pm_runtime_put(xudc
->dev
);
3769 tegra_xusb_padctl_put(xudc
->padctl
);
3774 static int __maybe_unused
tegra_xudc_powergate(struct tegra_xudc
*xudc
)
3776 unsigned long flags
;
3778 dev_dbg(xudc
->dev
, "entering ELPG\n");
3780 spin_lock_irqsave(&xudc
->lock
, flags
);
3782 xudc
->powergated
= true;
3783 xudc
->saved_regs
.ctrl
= xudc_readl(xudc
, CTRL
);
3784 xudc
->saved_regs
.portpm
= xudc_readl(xudc
, PORTPM
);
3785 xudc_writel(xudc
, 0, CTRL
);
3787 spin_unlock_irqrestore(&xudc
->lock
, flags
);
3789 clk_bulk_disable_unprepare(xudc
->soc
->num_clks
, xudc
->clks
);
3791 regulator_bulk_disable(xudc
->soc
->num_supplies
, xudc
->supplies
);
3793 dev_dbg(xudc
->dev
, "entering ELPG done\n");
3797 static int __maybe_unused
tegra_xudc_unpowergate(struct tegra_xudc
*xudc
)
3799 unsigned long flags
;
3802 dev_dbg(xudc
->dev
, "exiting ELPG\n");
3804 err
= regulator_bulk_enable(xudc
->soc
->num_supplies
,
3809 err
= clk_bulk_prepare_enable(xudc
->soc
->num_clks
, xudc
->clks
);
3813 tegra_xudc_fpci_ipfs_init(xudc
);
3815 tegra_xudc_device_params_init(xudc
);
3817 tegra_xudc_init_event_ring(xudc
);
3819 tegra_xudc_init_eps(xudc
);
3821 xudc_writel(xudc
, xudc
->saved_regs
.portpm
, PORTPM
);
3822 xudc_writel(xudc
, xudc
->saved_regs
.ctrl
, CTRL
);
3824 spin_lock_irqsave(&xudc
->lock
, flags
);
3825 xudc
->powergated
= false;
3826 spin_unlock_irqrestore(&xudc
->lock
, flags
);
3828 dev_dbg(xudc
->dev
, "exiting ELPG done\n");
3832 static int __maybe_unused
tegra_xudc_suspend(struct device
*dev
)
3834 struct tegra_xudc
*xudc
= dev_get_drvdata(dev
);
3835 unsigned long flags
;
3837 spin_lock_irqsave(&xudc
->lock
, flags
);
3838 xudc
->suspended
= true;
3839 spin_unlock_irqrestore(&xudc
->lock
, flags
);
3841 flush_work(&xudc
->usb_role_sw_work
);
3843 /* Forcibly disconnect before powergating. */
3844 tegra_xudc_device_mode_off(xudc
);
3846 if (!pm_runtime_status_suspended(dev
))
3847 tegra_xudc_powergate(xudc
);
3849 pm_runtime_disable(dev
);
3854 static int __maybe_unused
tegra_xudc_resume(struct device
*dev
)
3856 struct tegra_xudc
*xudc
= dev_get_drvdata(dev
);
3857 unsigned long flags
;
3860 err
= tegra_xudc_unpowergate(xudc
);
3864 spin_lock_irqsave(&xudc
->lock
, flags
);
3865 xudc
->suspended
= false;
3866 spin_unlock_irqrestore(&xudc
->lock
, flags
);
3868 schedule_work(&xudc
->usb_role_sw_work
);
3870 pm_runtime_enable(dev
);
3875 static int __maybe_unused
tegra_xudc_runtime_suspend(struct device
*dev
)
3877 struct tegra_xudc
*xudc
= dev_get_drvdata(dev
);
3879 return tegra_xudc_powergate(xudc
);
3882 static int __maybe_unused
tegra_xudc_runtime_resume(struct device
*dev
)
3884 struct tegra_xudc
*xudc
= dev_get_drvdata(dev
);
3886 return tegra_xudc_unpowergate(xudc
);
3889 static const struct dev_pm_ops tegra_xudc_pm_ops
= {
3890 SET_SYSTEM_SLEEP_PM_OPS(tegra_xudc_suspend
, tegra_xudc_resume
)
3891 SET_RUNTIME_PM_OPS(tegra_xudc_runtime_suspend
,
3892 tegra_xudc_runtime_resume
, NULL
)
3895 static struct platform_driver tegra_xudc_driver
= {
3896 .probe
= tegra_xudc_probe
,
3897 .remove
= tegra_xudc_remove
,
3899 .name
= "tegra-xudc",
3900 .pm
= &tegra_xudc_pm_ops
,
3901 .of_match_table
= tegra_xudc_of_match
,
3904 module_platform_driver(tegra_xudc_driver
);
3906 MODULE_DESCRIPTION("NVIDIA Tegra XUSB Device Controller");
3907 MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
3908 MODULE_AUTHOR("Hui Fu <hfu@nvidia.com>");
3909 MODULE_AUTHOR("Nagarjuna Kristam <nkristam@nvidia.com>");
3910 MODULE_LICENSE("GPL v2");