1 // SPDX-License-Identifier: GPL-2.0+
3 * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
5 * ep0.c - Endpoint 0 handling
7 * Copyright 2017 IBM Corporation
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/delay.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/interrupt.h>
24 #include <linux/proc_fs.h>
25 #include <linux/prefetch.h>
26 #include <linux/clk.h>
27 #include <linux/usb/gadget.h>
29 #include <linux/of_gpio.h>
30 #include <linux/regmap.h>
31 #include <linux/dma-mapping.h>
35 int ast_vhub_reply(struct ast_vhub_ep
*ep
, char *ptr
, int len
)
37 struct usb_request
*req
= &ep
->ep0
.req
.req
;
40 if (WARN_ON(ep
->d_idx
!= 0))
42 if (WARN_ON(!ep
->ep0
.dir_in
))
44 if (WARN_ON(len
> AST_VHUB_EP0_MAX_PACKET
))
46 if (WARN_ON(req
->status
== -EINPROGRESS
))
55 * Call internal queue directly after dropping the lock. This is
56 * safe to do as the reply is always the last thing done when
57 * processing a SETUP packet, usually as a tail call
59 spin_unlock(&ep
->vhub
->lock
);
60 if (ep
->ep
.ops
->queue(&ep
->ep
, req
, GFP_ATOMIC
))
64 spin_lock(&ep
->vhub
->lock
);
68 int __ast_vhub_simple_reply(struct ast_vhub_ep
*ep
, int len
, ...)
76 /* Copy data directly into EP buffer */
77 for (i
= 0; i
< len
; i
++)
78 buffer
[i
] = va_arg(args
, int);
81 /* req->buf NULL means data is already there */
82 return ast_vhub_reply(ep
, NULL
, len
);
85 void ast_vhub_ep0_handle_setup(struct ast_vhub_ep
*ep
)
87 struct usb_ctrlrequest crq
;
88 enum std_req_rc std_req_rc
;
91 if (WARN_ON(ep
->d_idx
!= 0))
95 * Grab the setup packet from the chip and byteswap
98 memcpy_fromio(&crq
, ep
->ep0
.setup
, sizeof(crq
));
100 EPDBG(ep
, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
101 crq
.bRequestType
, crq
.bRequest
,
102 le16_to_cpu(crq
.wValue
),
103 le16_to_cpu(crq
.wIndex
),
104 le16_to_cpu(crq
.wLength
),
105 (crq
.bRequestType
& USB_DIR_IN
) ? "in" : "out",
108 /* Check our state, cancel pending requests if needed */
109 if (ep
->ep0
.state
!= ep0_state_token
) {
110 EPDBG(ep
, "wrong state\n");
111 ast_vhub_nuke(ep
, -EIO
);
114 * Accept the packet regardless, this seems to happen
115 * when stalling a SETUP packet that has an OUT data
118 ast_vhub_nuke(ep
, 0);
122 /* Calculate next state for EP0 */
123 ep
->ep0
.state
= ep0_state_data
;
124 ep
->ep0
.dir_in
= !!(crq
.bRequestType
& USB_DIR_IN
);
126 /* If this is the vHub, we handle requests differently */
127 std_req_rc
= std_req_driver
;
128 if (ep
->dev
== NULL
) {
129 if ((crq
.bRequestType
& USB_TYPE_MASK
) == USB_TYPE_STANDARD
)
130 std_req_rc
= ast_vhub_std_hub_request(ep
, &crq
);
131 else if ((crq
.bRequestType
& USB_TYPE_MASK
) == USB_TYPE_CLASS
)
132 std_req_rc
= ast_vhub_class_hub_request(ep
, &crq
);
134 std_req_rc
= std_req_stall
;
135 } else if ((crq
.bRequestType
& USB_TYPE_MASK
) == USB_TYPE_STANDARD
)
136 std_req_rc
= ast_vhub_std_dev_request(ep
, &crq
);
138 /* Act upon result */
140 case std_req_complete
:
150 /* Pass request up to the gadget driver */
151 if (WARN_ON(!ep
->dev
))
153 if (ep
->dev
->driver
) {
154 EPDBG(ep
, "forwarding to gadget...\n");
155 spin_unlock(&ep
->vhub
->lock
);
156 rc
= ep
->dev
->driver
->setup(&ep
->dev
->gadget
, &crq
);
157 spin_lock(&ep
->vhub
->lock
);
158 EPDBG(ep
, "driver returned %d\n", rc
);
160 EPDBG(ep
, "no gadget for request !\n");
166 EPDBG(ep
, "stalling\n");
167 writel(VHUB_EP0_CTRL_STALL
, ep
->ep0
.ctlstat
);
168 ep
->ep0
.state
= ep0_state_status
;
169 ep
->ep0
.dir_in
= false;
173 EPVDBG(ep
, "sending [in] status with no data\n");
174 writel(VHUB_EP0_TX_BUFF_RDY
, ep
->ep0
.ctlstat
);
175 ep
->ep0
.state
= ep0_state_status
;
176 ep
->ep0
.dir_in
= false;
180 static void ast_vhub_ep0_do_send(struct ast_vhub_ep
*ep
,
181 struct ast_vhub_req
*req
)
186 /* If this is a 0-length request, it's the gadget trying to
187 * send a status on our behalf. We take it from here.
189 if (req
->req
.length
== 0)
192 /* Are we done ? Complete request, otherwise wait for next interrupt */
193 if (req
->last_desc
>= 0) {
194 EPVDBG(ep
, "complete send %d/%d\n",
195 req
->req
.actual
, req
->req
.length
);
196 ep
->ep0
.state
= ep0_state_status
;
197 writel(VHUB_EP0_RX_BUFF_RDY
, ep
->ep0
.ctlstat
);
198 ast_vhub_done(ep
, req
, 0);
203 * Next chunk cropped to max packet size. Also check if this
206 chunk
= req
->req
.length
- req
->req
.actual
;
207 if (chunk
> ep
->ep
.maxpacket
)
208 chunk
= ep
->ep
.maxpacket
;
209 else if ((chunk
< ep
->ep
.maxpacket
) || !req
->req
.zero
)
212 EPVDBG(ep
, "send chunk=%d last=%d, req->act=%d mp=%d\n",
213 chunk
, req
->last_desc
, req
->req
.actual
, ep
->ep
.maxpacket
);
216 * Copy data if any (internal requests already have data
219 if (chunk
&& req
->req
.buf
)
220 memcpy(ep
->buf
, req
->req
.buf
+ req
->req
.actual
, chunk
);
222 vhub_dma_workaround(ep
->buf
);
224 /* Remember chunk size and trigger send */
225 reg
= VHUB_EP0_SET_TX_LEN(chunk
);
226 writel(reg
, ep
->ep0
.ctlstat
);
227 writel(reg
| VHUB_EP0_TX_BUFF_RDY
, ep
->ep0
.ctlstat
);
228 req
->req
.actual
+= chunk
;
231 static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep
*ep
)
233 EPVDBG(ep
, "rx prime\n");
235 /* Prime endpoint for receiving data */
236 writel(VHUB_EP0_RX_BUFF_RDY
, ep
->ep0
.ctlstat
);
239 static void ast_vhub_ep0_do_receive(struct ast_vhub_ep
*ep
, struct ast_vhub_req
*req
,
245 /* We are receiving... grab request */
246 remain
= req
->req
.length
- req
->req
.actual
;
248 EPVDBG(ep
, "receive got=%d remain=%d\n", len
, remain
);
250 /* Are we getting more than asked ? */
252 EPDBG(ep
, "receiving too much (ovf: %d) !\n",
257 if (len
&& req
->req
.buf
)
258 memcpy(req
->req
.buf
+ req
->req
.actual
, ep
->buf
, len
);
259 req
->req
.actual
+= len
;
262 if (len
< ep
->ep
.maxpacket
|| len
== remain
) {
263 ep
->ep0
.state
= ep0_state_status
;
264 writel(VHUB_EP0_TX_BUFF_RDY
, ep
->ep0
.ctlstat
);
265 ast_vhub_done(ep
, req
, rc
);
267 ast_vhub_ep0_rx_prime(ep
);
270 void ast_vhub_ep0_handle_ack(struct ast_vhub_ep
*ep
, bool in_ack
)
272 struct ast_vhub_req
*req
;
273 struct ast_vhub
*vhub
= ep
->vhub
;
274 struct device
*dev
= &vhub
->pdev
->dev
;
278 /* Read EP0 status */
279 stat
= readl(ep
->ep0
.ctlstat
);
281 /* Grab current request if any */
282 req
= list_first_entry_or_null(&ep
->queue
, struct ast_vhub_req
, queue
);
284 EPVDBG(ep
, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
285 stat
, ep
->ep0
.state
, ep
->ep0
.dir_in
, in_ack
, req
);
287 switch(ep
->ep0
.state
) {
288 case ep0_state_token
:
289 /* There should be no request queued in that state... */
291 dev_warn(dev
, "request present while in TOKEN state\n");
292 ast_vhub_nuke(ep
, -EINVAL
);
294 dev_warn(dev
, "ack while in TOKEN state\n");
298 /* Check the state bits corresponding to our direction */
299 if ((ep
->ep0
.dir_in
&& (stat
& VHUB_EP0_TX_BUFF_RDY
)) ||
300 (!ep
->ep0
.dir_in
&& (stat
& VHUB_EP0_RX_BUFF_RDY
)) ||
301 (ep
->ep0
.dir_in
!= in_ack
)) {
302 dev_warn(dev
, "irq state mismatch");
307 * We are in data phase and there's no request, something is
311 dev_warn(dev
, "data phase, no request\n");
316 /* We have a request, handle data transfers */
318 ast_vhub_ep0_do_send(ep
, req
);
320 ast_vhub_ep0_do_receive(ep
, req
, VHUB_EP0_RX_LEN(stat
));
322 case ep0_state_status
:
323 /* Nuke stale requests */
325 dev_warn(dev
, "request present while in STATUS state\n");
326 ast_vhub_nuke(ep
, -EINVAL
);
330 * If the status phase completes with the wrong ack, stall
331 * the endpoint just in case, to abort whatever the host
334 if (ep
->ep0
.dir_in
== in_ack
) {
335 dev_warn(dev
, "status direction mismatch\n");
340 /* Reset to token state */
341 ep
->ep0
.state
= ep0_state_token
;
343 writel(VHUB_EP0_CTRL_STALL
, ep
->ep0
.ctlstat
);
346 static int ast_vhub_ep0_queue(struct usb_ep
* u_ep
, struct usb_request
*u_req
,
349 struct ast_vhub_req
*req
= to_ast_req(u_req
);
350 struct ast_vhub_ep
*ep
= to_ast_ep(u_ep
);
351 struct ast_vhub
*vhub
= ep
->vhub
;
352 struct device
*dev
= &vhub
->pdev
->dev
;
356 if (!u_req
|| (!u_req
->complete
&& !req
->internal
)) {
357 dev_warn(dev
, "Bogus EP0 request ! u_req=%p\n", u_req
);
359 dev_warn(dev
, "complete=%p internal=%d\n",
360 u_req
->complete
, req
->internal
);
365 /* Not endpoint 0 ? */
366 if (WARN_ON(ep
->d_idx
!= 0))
369 /* Disabled device */
370 if (ep
->dev
&& (!ep
->dev
->enabled
|| ep
->dev
->suspended
))
373 /* Data, no buffer and not internal ? */
374 if (u_req
->length
&& !u_req
->buf
&& !req
->internal
) {
375 dev_warn(dev
, "Request with no buffer !\n");
379 EPVDBG(ep
, "enqueue req @%p\n", req
);
380 EPVDBG(ep
, " l=%d zero=%d noshort=%d is_in=%d\n",
381 u_req
->length
, u_req
->zero
,
382 u_req
->short_not_ok
, ep
->ep0
.dir_in
);
384 /* Initialize request progress fields */
385 u_req
->status
= -EINPROGRESS
;
390 spin_lock_irqsave(&vhub
->lock
, flags
);
392 /* EP0 can only support a single request at a time */
393 if (!list_empty(&ep
->queue
) || ep
->ep0
.state
== ep0_state_token
) {
394 dev_warn(dev
, "EP0: Request in wrong state\n");
395 spin_unlock_irqrestore(&vhub
->lock
, flags
);
399 /* Add request to list and kick processing if empty */
400 list_add_tail(&req
->queue
, &ep
->queue
);
402 if (ep
->ep0
.dir_in
) {
403 /* IN request, send data */
404 ast_vhub_ep0_do_send(ep
, req
);
405 } else if (u_req
->length
== 0) {
406 /* 0-len request, send completion as rx */
407 EPVDBG(ep
, "0-length rx completion\n");
408 ep
->ep0
.state
= ep0_state_status
;
409 writel(VHUB_EP0_TX_BUFF_RDY
, ep
->ep0
.ctlstat
);
410 ast_vhub_done(ep
, req
, 0);
412 /* OUT request, start receiver */
413 ast_vhub_ep0_rx_prime(ep
);
416 spin_unlock_irqrestore(&vhub
->lock
, flags
);
421 static int ast_vhub_ep0_dequeue(struct usb_ep
* u_ep
, struct usb_request
*u_req
)
423 struct ast_vhub_ep
*ep
= to_ast_ep(u_ep
);
424 struct ast_vhub
*vhub
= ep
->vhub
;
425 struct ast_vhub_req
*req
;
429 spin_lock_irqsave(&vhub
->lock
, flags
);
431 /* Only one request can be in the queue */
432 req
= list_first_entry_or_null(&ep
->queue
, struct ast_vhub_req
, queue
);
435 if (req
&& u_req
== &req
->req
) {
436 EPVDBG(ep
, "dequeue req @%p\n", req
);
439 * We don't have to deal with "active" as all
440 * DMAs go to the EP buffers, not the request.
442 ast_vhub_done(ep
, req
, -ECONNRESET
);
444 /* We do stall the EP to clean things up in HW */
445 writel(VHUB_EP0_CTRL_STALL
, ep
->ep0
.ctlstat
);
446 ep
->ep0
.state
= ep0_state_status
;
447 ep
->ep0
.dir_in
= false;
450 spin_unlock_irqrestore(&vhub
->lock
, flags
);
455 static const struct usb_ep_ops ast_vhub_ep0_ops
= {
456 .queue
= ast_vhub_ep0_queue
,
457 .dequeue
= ast_vhub_ep0_dequeue
,
458 .alloc_request
= ast_vhub_alloc_request
,
459 .free_request
= ast_vhub_free_request
,
462 void ast_vhub_init_ep0(struct ast_vhub
*vhub
, struct ast_vhub_ep
*ep
,
463 struct ast_vhub_dev
*dev
)
465 memset(ep
, 0, sizeof(*ep
));
467 INIT_LIST_HEAD(&ep
->ep
.ep_list
);
468 INIT_LIST_HEAD(&ep
->queue
);
469 ep
->ep
.ops
= &ast_vhub_ep0_ops
;
471 ep
->ep
.caps
.type_control
= true;
472 usb_ep_set_maxpacket_limit(&ep
->ep
, AST_VHUB_EP0_MAX_PACKET
);
476 ep
->ep0
.state
= ep0_state_token
;
477 INIT_LIST_HEAD(&ep
->ep0
.req
.queue
);
478 ep
->ep0
.req
.internal
= true;
480 /* Small difference between vHub and devices */
482 ep
->ep0
.ctlstat
= dev
->regs
+ AST_VHUB_DEV_EP0_CTRL
;
483 ep
->ep0
.setup
= vhub
->regs
+
484 AST_VHUB_SETUP0
+ 8 * (dev
->index
+ 1);
485 ep
->buf
= vhub
->ep0_bufs
+
486 AST_VHUB_EP0_MAX_PACKET
* (dev
->index
+ 1);
487 ep
->buf_dma
= vhub
->ep0_bufs_dma
+
488 AST_VHUB_EP0_MAX_PACKET
* (dev
->index
+ 1);
490 ep
->ep0
.ctlstat
= vhub
->regs
+ AST_VHUB_EP0_CTRL
;
491 ep
->ep0
.setup
= vhub
->regs
+ AST_VHUB_SETUP0
;
492 ep
->buf
= vhub
->ep0_bufs
;
493 ep
->buf_dma
= vhub
->ep0_bufs_dma
;