inet: frag: enforce memory limits earlier
[linux/fpc-iii.git] / drivers / usb / gadget / udc / r8a66597-udc.c
blobf2c8862093a26b7b362905dc72034be0914b321d
1 /*
2 * R8A66597 UDC (USB gadget)
4 * Copyright (C) 2006-2009 Renesas Solutions Corp.
6 * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/delay.h>
16 #include <linux/io.h>
17 #include <linux/platform_device.h>
18 #include <linux/clk.h>
19 #include <linux/err.h>
20 #include <linux/slab.h>
21 #include <linux/dma-mapping.h>
23 #include <linux/usb/ch9.h>
24 #include <linux/usb/gadget.h>
26 #include "r8a66597-udc.h"
28 #define DRIVER_VERSION "2011-09-26"
30 static const char udc_name[] = "r8a66597_udc";
31 static const char *r8a66597_ep_name[] = {
32 "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7",
33 "ep8", "ep9",
36 static void init_controller(struct r8a66597 *r8a66597);
37 static void disable_controller(struct r8a66597 *r8a66597);
38 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
39 static void irq_packet_write(struct r8a66597_ep *ep,
40 struct r8a66597_request *req);
41 static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
42 gfp_t gfp_flags);
44 static void transfer_complete(struct r8a66597_ep *ep,
45 struct r8a66597_request *req, int status);
47 /*-------------------------------------------------------------------------*/
48 static inline u16 get_usb_speed(struct r8a66597 *r8a66597)
50 return r8a66597_read(r8a66597, DVSTCTR0) & RHST;
53 static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
54 unsigned long reg)
56 u16 tmp;
58 tmp = r8a66597_read(r8a66597, INTENB0);
59 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
60 INTENB0);
61 r8a66597_bset(r8a66597, (1 << pipenum), reg);
62 r8a66597_write(r8a66597, tmp, INTENB0);
65 static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
66 unsigned long reg)
68 u16 tmp;
70 tmp = r8a66597_read(r8a66597, INTENB0);
71 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
72 INTENB0);
73 r8a66597_bclr(r8a66597, (1 << pipenum), reg);
74 r8a66597_write(r8a66597, tmp, INTENB0);
77 static void r8a66597_usb_connect(struct r8a66597 *r8a66597)
79 r8a66597_bset(r8a66597, CTRE, INTENB0);
80 r8a66597_bset(r8a66597, BEMPE | BRDYE, INTENB0);
82 r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
85 static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597)
86 __releases(r8a66597->lock)
87 __acquires(r8a66597->lock)
89 r8a66597_bclr(r8a66597, CTRE, INTENB0);
90 r8a66597_bclr(r8a66597, BEMPE | BRDYE, INTENB0);
91 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
93 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
94 spin_unlock(&r8a66597->lock);
95 r8a66597->driver->disconnect(&r8a66597->gadget);
96 spin_lock(&r8a66597->lock);
98 disable_controller(r8a66597);
99 init_controller(r8a66597);
100 r8a66597_bset(r8a66597, VBSE, INTENB0);
101 INIT_LIST_HEAD(&r8a66597->ep[0].queue);
104 static inline u16 control_reg_get_pid(struct r8a66597 *r8a66597, u16 pipenum)
106 u16 pid = 0;
107 unsigned long offset;
109 if (pipenum == 0) {
110 pid = r8a66597_read(r8a66597, DCPCTR) & PID;
111 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
112 offset = get_pipectr_addr(pipenum);
113 pid = r8a66597_read(r8a66597, offset) & PID;
114 } else {
115 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
116 pipenum);
119 return pid;
122 static inline void control_reg_set_pid(struct r8a66597 *r8a66597, u16 pipenum,
123 u16 pid)
125 unsigned long offset;
127 if (pipenum == 0) {
128 r8a66597_mdfy(r8a66597, pid, PID, DCPCTR);
129 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
130 offset = get_pipectr_addr(pipenum);
131 r8a66597_mdfy(r8a66597, pid, PID, offset);
132 } else {
133 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
134 pipenum);
138 static inline void pipe_start(struct r8a66597 *r8a66597, u16 pipenum)
140 control_reg_set_pid(r8a66597, pipenum, PID_BUF);
143 static inline void pipe_stop(struct r8a66597 *r8a66597, u16 pipenum)
145 control_reg_set_pid(r8a66597, pipenum, PID_NAK);
148 static inline void pipe_stall(struct r8a66597 *r8a66597, u16 pipenum)
150 control_reg_set_pid(r8a66597, pipenum, PID_STALL);
153 static inline u16 control_reg_get(struct r8a66597 *r8a66597, u16 pipenum)
155 u16 ret = 0;
156 unsigned long offset;
158 if (pipenum == 0) {
159 ret = r8a66597_read(r8a66597, DCPCTR);
160 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
161 offset = get_pipectr_addr(pipenum);
162 ret = r8a66597_read(r8a66597, offset);
163 } else {
164 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
165 pipenum);
168 return ret;
171 static inline void control_reg_sqclr(struct r8a66597 *r8a66597, u16 pipenum)
173 unsigned long offset;
175 pipe_stop(r8a66597, pipenum);
177 if (pipenum == 0) {
178 r8a66597_bset(r8a66597, SQCLR, DCPCTR);
179 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
180 offset = get_pipectr_addr(pipenum);
181 r8a66597_bset(r8a66597, SQCLR, offset);
182 } else {
183 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
184 pipenum);
188 static void control_reg_sqset(struct r8a66597 *r8a66597, u16 pipenum)
190 unsigned long offset;
192 pipe_stop(r8a66597, pipenum);
194 if (pipenum == 0) {
195 r8a66597_bset(r8a66597, SQSET, DCPCTR);
196 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
197 offset = get_pipectr_addr(pipenum);
198 r8a66597_bset(r8a66597, SQSET, offset);
199 } else {
200 dev_err(r8a66597_to_dev(r8a66597),
201 "unexpect pipe num(%d)\n", pipenum);
205 static u16 control_reg_sqmon(struct r8a66597 *r8a66597, u16 pipenum)
207 unsigned long offset;
209 if (pipenum == 0) {
210 return r8a66597_read(r8a66597, DCPCTR) & SQMON;
211 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
212 offset = get_pipectr_addr(pipenum);
213 return r8a66597_read(r8a66597, offset) & SQMON;
214 } else {
215 dev_err(r8a66597_to_dev(r8a66597),
216 "unexpect pipe num(%d)\n", pipenum);
219 return 0;
222 static u16 save_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum)
224 return control_reg_sqmon(r8a66597, pipenum);
227 static void restore_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum,
228 u16 toggle)
230 if (toggle)
231 control_reg_sqset(r8a66597, pipenum);
232 else
233 control_reg_sqclr(r8a66597, pipenum);
236 static inline int get_buffer_size(struct r8a66597 *r8a66597, u16 pipenum)
238 u16 tmp;
239 int size;
241 if (pipenum == 0) {
242 tmp = r8a66597_read(r8a66597, DCPCFG);
243 if ((tmp & R8A66597_CNTMD) != 0)
244 size = 256;
245 else {
246 tmp = r8a66597_read(r8a66597, DCPMAXP);
247 size = tmp & MAXP;
249 } else {
250 r8a66597_write(r8a66597, pipenum, PIPESEL);
251 tmp = r8a66597_read(r8a66597, PIPECFG);
252 if ((tmp & R8A66597_CNTMD) != 0) {
253 tmp = r8a66597_read(r8a66597, PIPEBUF);
254 size = ((tmp >> 10) + 1) * 64;
255 } else {
256 tmp = r8a66597_read(r8a66597, PIPEMAXP);
257 size = tmp & MXPS;
261 return size;
264 static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
266 if (r8a66597->pdata->on_chip)
267 return MBW_32;
268 else
269 return MBW_16;
272 static void r8a66597_change_curpipe(struct r8a66597 *r8a66597, u16 pipenum,
273 u16 isel, u16 fifosel)
275 u16 tmp, mask, loop;
276 int i = 0;
278 if (!pipenum) {
279 mask = ISEL | CURPIPE;
280 loop = isel;
281 } else {
282 mask = CURPIPE;
283 loop = pipenum;
285 r8a66597_mdfy(r8a66597, loop, mask, fifosel);
287 do {
288 tmp = r8a66597_read(r8a66597, fifosel);
289 if (i++ > 1000000) {
290 dev_err(r8a66597_to_dev(r8a66597),
291 "r8a66597: register%x, loop %x "
292 "is timeout\n", fifosel, loop);
293 break;
295 ndelay(1);
296 } while ((tmp & mask) != loop);
299 static void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
301 struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
303 if (ep->use_dma)
304 r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
306 r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel);
308 ndelay(450);
310 if (r8a66597_is_sudmac(r8a66597) && ep->use_dma)
311 r8a66597_bclr(r8a66597, mbw_value(r8a66597), ep->fifosel);
312 else
313 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
315 if (ep->use_dma)
316 r8a66597_bset(r8a66597, DREQE, ep->fifosel);
319 static int pipe_buffer_setting(struct r8a66597 *r8a66597,
320 struct r8a66597_pipe_info *info)
322 u16 bufnum = 0, buf_bsize = 0;
323 u16 pipecfg = 0;
325 if (info->pipe == 0)
326 return -EINVAL;
328 r8a66597_write(r8a66597, info->pipe, PIPESEL);
330 if (info->dir_in)
331 pipecfg |= R8A66597_DIR;
332 pipecfg |= info->type;
333 pipecfg |= info->epnum;
334 switch (info->type) {
335 case R8A66597_INT:
336 bufnum = 4 + (info->pipe - R8A66597_BASE_PIPENUM_INT);
337 buf_bsize = 0;
338 break;
339 case R8A66597_BULK:
340 /* isochronous pipes may be used as bulk pipes */
341 if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
342 bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
343 else
344 bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
346 bufnum = R8A66597_BASE_BUFNUM + (bufnum * 16);
347 buf_bsize = 7;
348 pipecfg |= R8A66597_DBLB;
349 if (!info->dir_in)
350 pipecfg |= R8A66597_SHTNAK;
351 break;
352 case R8A66597_ISO:
353 bufnum = R8A66597_BASE_BUFNUM +
354 (info->pipe - R8A66597_BASE_PIPENUM_ISOC) * 16;
355 buf_bsize = 7;
356 break;
359 if (buf_bsize && ((bufnum + 16) >= R8A66597_MAX_BUFNUM)) {
360 pr_err("r8a66597 pipe memory is insufficient\n");
361 return -ENOMEM;
364 r8a66597_write(r8a66597, pipecfg, PIPECFG);
365 r8a66597_write(r8a66597, (buf_bsize << 10) | (bufnum), PIPEBUF);
366 r8a66597_write(r8a66597, info->maxpacket, PIPEMAXP);
367 if (info->interval)
368 info->interval--;
369 r8a66597_write(r8a66597, info->interval, PIPEPERI);
371 return 0;
374 static void pipe_buffer_release(struct r8a66597 *r8a66597,
375 struct r8a66597_pipe_info *info)
377 if (info->pipe == 0)
378 return;
380 if (is_bulk_pipe(info->pipe)) {
381 r8a66597->bulk--;
382 } else if (is_interrupt_pipe(info->pipe)) {
383 r8a66597->interrupt--;
384 } else if (is_isoc_pipe(info->pipe)) {
385 r8a66597->isochronous--;
386 if (info->type == R8A66597_BULK)
387 r8a66597->bulk--;
388 } else {
389 dev_err(r8a66597_to_dev(r8a66597),
390 "ep_release: unexpect pipenum (%d)\n", info->pipe);
394 static void pipe_initialize(struct r8a66597_ep *ep)
396 struct r8a66597 *r8a66597 = ep->r8a66597;
398 r8a66597_mdfy(r8a66597, 0, CURPIPE, ep->fifosel);
400 r8a66597_write(r8a66597, ACLRM, ep->pipectr);
401 r8a66597_write(r8a66597, 0, ep->pipectr);
402 r8a66597_write(r8a66597, SQCLR, ep->pipectr);
403 if (ep->use_dma) {
404 r8a66597_mdfy(r8a66597, ep->pipenum, CURPIPE, ep->fifosel);
406 ndelay(450);
408 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
412 static void r8a66597_ep_setting(struct r8a66597 *r8a66597,
413 struct r8a66597_ep *ep,
414 const struct usb_endpoint_descriptor *desc,
415 u16 pipenum, int dma)
417 ep->use_dma = 0;
418 ep->fifoaddr = CFIFO;
419 ep->fifosel = CFIFOSEL;
420 ep->fifoctr = CFIFOCTR;
422 ep->pipectr = get_pipectr_addr(pipenum);
423 if (is_bulk_pipe(pipenum) || is_isoc_pipe(pipenum)) {
424 ep->pipetre = get_pipetre_addr(pipenum);
425 ep->pipetrn = get_pipetrn_addr(pipenum);
426 } else {
427 ep->pipetre = 0;
428 ep->pipetrn = 0;
430 ep->pipenum = pipenum;
431 ep->ep.maxpacket = usb_endpoint_maxp(desc);
432 r8a66597->pipenum2ep[pipenum] = ep;
433 r8a66597->epaddr2ep[usb_endpoint_num(desc)]
434 = ep;
435 INIT_LIST_HEAD(&ep->queue);
438 static void r8a66597_ep_release(struct r8a66597_ep *ep)
440 struct r8a66597 *r8a66597 = ep->r8a66597;
441 u16 pipenum = ep->pipenum;
443 if (pipenum == 0)
444 return;
446 if (ep->use_dma)
447 r8a66597->num_dma--;
448 ep->pipenum = 0;
449 ep->busy = 0;
450 ep->use_dma = 0;
453 static int alloc_pipe_config(struct r8a66597_ep *ep,
454 const struct usb_endpoint_descriptor *desc)
456 struct r8a66597 *r8a66597 = ep->r8a66597;
457 struct r8a66597_pipe_info info;
458 int dma = 0;
459 unsigned char *counter;
460 int ret;
462 ep->ep.desc = desc;
464 if (ep->pipenum) /* already allocated pipe */
465 return 0;
467 switch (usb_endpoint_type(desc)) {
468 case USB_ENDPOINT_XFER_BULK:
469 if (r8a66597->bulk >= R8A66597_MAX_NUM_BULK) {
470 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
471 dev_err(r8a66597_to_dev(r8a66597),
472 "bulk pipe is insufficient\n");
473 return -ENODEV;
474 } else {
475 info.pipe = R8A66597_BASE_PIPENUM_ISOC
476 + r8a66597->isochronous;
477 counter = &r8a66597->isochronous;
479 } else {
480 info.pipe = R8A66597_BASE_PIPENUM_BULK + r8a66597->bulk;
481 counter = &r8a66597->bulk;
483 info.type = R8A66597_BULK;
484 dma = 1;
485 break;
486 case USB_ENDPOINT_XFER_INT:
487 if (r8a66597->interrupt >= R8A66597_MAX_NUM_INT) {
488 dev_err(r8a66597_to_dev(r8a66597),
489 "interrupt pipe is insufficient\n");
490 return -ENODEV;
492 info.pipe = R8A66597_BASE_PIPENUM_INT + r8a66597->interrupt;
493 info.type = R8A66597_INT;
494 counter = &r8a66597->interrupt;
495 break;
496 case USB_ENDPOINT_XFER_ISOC:
497 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
498 dev_err(r8a66597_to_dev(r8a66597),
499 "isochronous pipe is insufficient\n");
500 return -ENODEV;
502 info.pipe = R8A66597_BASE_PIPENUM_ISOC + r8a66597->isochronous;
503 info.type = R8A66597_ISO;
504 counter = &r8a66597->isochronous;
505 break;
506 default:
507 dev_err(r8a66597_to_dev(r8a66597), "unexpect xfer type\n");
508 return -EINVAL;
510 ep->type = info.type;
512 info.epnum = usb_endpoint_num(desc);
513 info.maxpacket = usb_endpoint_maxp(desc);
514 info.interval = desc->bInterval;
515 if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
516 info.dir_in = 1;
517 else
518 info.dir_in = 0;
520 ret = pipe_buffer_setting(r8a66597, &info);
521 if (ret < 0) {
522 dev_err(r8a66597_to_dev(r8a66597),
523 "pipe_buffer_setting fail\n");
524 return ret;
527 (*counter)++;
528 if ((counter == &r8a66597->isochronous) && info.type == R8A66597_BULK)
529 r8a66597->bulk++;
531 r8a66597_ep_setting(r8a66597, ep, desc, info.pipe, dma);
532 pipe_initialize(ep);
534 return 0;
537 static int free_pipe_config(struct r8a66597_ep *ep)
539 struct r8a66597 *r8a66597 = ep->r8a66597;
540 struct r8a66597_pipe_info info;
542 info.pipe = ep->pipenum;
543 info.type = ep->type;
544 pipe_buffer_release(r8a66597, &info);
545 r8a66597_ep_release(ep);
547 return 0;
550 /*-------------------------------------------------------------------------*/
551 static void pipe_irq_enable(struct r8a66597 *r8a66597, u16 pipenum)
553 enable_irq_ready(r8a66597, pipenum);
554 enable_irq_nrdy(r8a66597, pipenum);
557 static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
559 disable_irq_ready(r8a66597, pipenum);
560 disable_irq_nrdy(r8a66597, pipenum);
563 /* if complete is true, gadget driver complete function is not call */
564 static void control_end(struct r8a66597 *r8a66597, unsigned ccpl)
566 r8a66597->ep[0].internal_ccpl = ccpl;
567 pipe_start(r8a66597, 0);
568 r8a66597_bset(r8a66597, CCPL, DCPCTR);
571 static void start_ep0_write(struct r8a66597_ep *ep,
572 struct r8a66597_request *req)
574 struct r8a66597 *r8a66597 = ep->r8a66597;
576 pipe_change(r8a66597, ep->pipenum);
577 r8a66597_mdfy(r8a66597, ISEL, (ISEL | CURPIPE), CFIFOSEL);
578 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
579 if (req->req.length == 0) {
580 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
581 pipe_start(r8a66597, 0);
582 transfer_complete(ep, req, 0);
583 } else {
584 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
585 irq_ep0_write(ep, req);
589 static void disable_fifosel(struct r8a66597 *r8a66597, u16 pipenum,
590 u16 fifosel)
592 u16 tmp;
594 tmp = r8a66597_read(r8a66597, fifosel) & CURPIPE;
595 if (tmp == pipenum)
596 r8a66597_change_curpipe(r8a66597, 0, 0, fifosel);
599 static void change_bfre_mode(struct r8a66597 *r8a66597, u16 pipenum,
600 int enable)
602 struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
603 u16 tmp, toggle;
605 /* check current BFRE bit */
606 r8a66597_write(r8a66597, pipenum, PIPESEL);
607 tmp = r8a66597_read(r8a66597, PIPECFG) & R8A66597_BFRE;
608 if ((enable && tmp) || (!enable && !tmp))
609 return;
611 /* change BFRE bit */
612 pipe_stop(r8a66597, pipenum);
613 disable_fifosel(r8a66597, pipenum, CFIFOSEL);
614 disable_fifosel(r8a66597, pipenum, D0FIFOSEL);
615 disable_fifosel(r8a66597, pipenum, D1FIFOSEL);
617 toggle = save_usb_toggle(r8a66597, pipenum);
619 r8a66597_write(r8a66597, pipenum, PIPESEL);
620 if (enable)
621 r8a66597_bset(r8a66597, R8A66597_BFRE, PIPECFG);
622 else
623 r8a66597_bclr(r8a66597, R8A66597_BFRE, PIPECFG);
625 /* initialize for internal BFRE flag */
626 r8a66597_bset(r8a66597, ACLRM, ep->pipectr);
627 r8a66597_bclr(r8a66597, ACLRM, ep->pipectr);
629 restore_usb_toggle(r8a66597, pipenum, toggle);
632 static int sudmac_alloc_channel(struct r8a66597 *r8a66597,
633 struct r8a66597_ep *ep,
634 struct r8a66597_request *req)
636 struct r8a66597_dma *dma;
638 if (!r8a66597_is_sudmac(r8a66597))
639 return -ENODEV;
641 /* Check transfer type */
642 if (!is_bulk_pipe(ep->pipenum))
643 return -EIO;
645 if (r8a66597->dma.used)
646 return -EBUSY;
648 /* set SUDMAC parameters */
649 dma = &r8a66597->dma;
650 dma->used = 1;
651 if (ep->ep.desc->bEndpointAddress & USB_DIR_IN) {
652 dma->dir = 1;
653 } else {
654 dma->dir = 0;
655 change_bfre_mode(r8a66597, ep->pipenum, 1);
658 /* set r8a66597_ep paramters */
659 ep->use_dma = 1;
660 ep->dma = dma;
661 ep->fifoaddr = D0FIFO;
662 ep->fifosel = D0FIFOSEL;
663 ep->fifoctr = D0FIFOCTR;
665 /* dma mapping */
666 return usb_gadget_map_request(&r8a66597->gadget, &req->req, dma->dir);
669 static void sudmac_free_channel(struct r8a66597 *r8a66597,
670 struct r8a66597_ep *ep,
671 struct r8a66597_request *req)
673 if (!r8a66597_is_sudmac(r8a66597))
674 return;
676 usb_gadget_unmap_request(&r8a66597->gadget, &req->req, ep->dma->dir);
678 r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
679 r8a66597_change_curpipe(r8a66597, 0, 0, ep->fifosel);
681 ep->dma->used = 0;
682 ep->use_dma = 0;
683 ep->fifoaddr = CFIFO;
684 ep->fifosel = CFIFOSEL;
685 ep->fifoctr = CFIFOCTR;
688 static void sudmac_start(struct r8a66597 *r8a66597, struct r8a66597_ep *ep,
689 struct r8a66597_request *req)
691 BUG_ON(req->req.length == 0);
693 r8a66597_sudmac_write(r8a66597, LBA_WAIT, CH0CFG);
694 r8a66597_sudmac_write(r8a66597, req->req.dma, CH0BA);
695 r8a66597_sudmac_write(r8a66597, req->req.length, CH0BBC);
696 r8a66597_sudmac_write(r8a66597, CH0ENDE, DINTCTRL);
698 r8a66597_sudmac_write(r8a66597, DEN, CH0DEN);
701 static void start_packet_write(struct r8a66597_ep *ep,
702 struct r8a66597_request *req)
704 struct r8a66597 *r8a66597 = ep->r8a66597;
705 u16 tmp;
707 pipe_change(r8a66597, ep->pipenum);
708 disable_irq_empty(r8a66597, ep->pipenum);
709 pipe_start(r8a66597, ep->pipenum);
711 if (req->req.length == 0) {
712 transfer_complete(ep, req, 0);
713 } else {
714 r8a66597_write(r8a66597, ~(1 << ep->pipenum), BRDYSTS);
715 if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
716 /* PIO mode */
717 pipe_change(r8a66597, ep->pipenum);
718 disable_irq_empty(r8a66597, ep->pipenum);
719 pipe_start(r8a66597, ep->pipenum);
720 tmp = r8a66597_read(r8a66597, ep->fifoctr);
721 if (unlikely((tmp & FRDY) == 0))
722 pipe_irq_enable(r8a66597, ep->pipenum);
723 else
724 irq_packet_write(ep, req);
725 } else {
726 /* DMA mode */
727 pipe_change(r8a66597, ep->pipenum);
728 disable_irq_nrdy(r8a66597, ep->pipenum);
729 pipe_start(r8a66597, ep->pipenum);
730 enable_irq_nrdy(r8a66597, ep->pipenum);
731 sudmac_start(r8a66597, ep, req);
736 static void start_packet_read(struct r8a66597_ep *ep,
737 struct r8a66597_request *req)
739 struct r8a66597 *r8a66597 = ep->r8a66597;
740 u16 pipenum = ep->pipenum;
742 if (ep->pipenum == 0) {
743 r8a66597_mdfy(r8a66597, 0, (ISEL | CURPIPE), CFIFOSEL);
744 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
745 pipe_start(r8a66597, pipenum);
746 pipe_irq_enable(r8a66597, pipenum);
747 } else {
748 pipe_stop(r8a66597, pipenum);
749 if (ep->pipetre) {
750 enable_irq_nrdy(r8a66597, pipenum);
751 r8a66597_write(r8a66597, TRCLR, ep->pipetre);
752 r8a66597_write(r8a66597,
753 DIV_ROUND_UP(req->req.length, ep->ep.maxpacket),
754 ep->pipetrn);
755 r8a66597_bset(r8a66597, TRENB, ep->pipetre);
758 if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
759 /* PIO mode */
760 change_bfre_mode(r8a66597, ep->pipenum, 0);
761 pipe_start(r8a66597, pipenum); /* trigger once */
762 pipe_irq_enable(r8a66597, pipenum);
763 } else {
764 pipe_change(r8a66597, pipenum);
765 sudmac_start(r8a66597, ep, req);
766 pipe_start(r8a66597, pipenum); /* trigger once */
771 static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req)
773 if (ep->ep.desc->bEndpointAddress & USB_DIR_IN)
774 start_packet_write(ep, req);
775 else
776 start_packet_read(ep, req);
779 static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
781 u16 ctsq;
783 ctsq = r8a66597_read(ep->r8a66597, INTSTS0) & CTSQ;
785 switch (ctsq) {
786 case CS_RDDS:
787 start_ep0_write(ep, req);
788 break;
789 case CS_WRDS:
790 start_packet_read(ep, req);
791 break;
793 case CS_WRND:
794 control_end(ep->r8a66597, 0);
795 break;
796 default:
797 dev_err(r8a66597_to_dev(ep->r8a66597),
798 "start_ep0: unexpect ctsq(%x)\n", ctsq);
799 break;
803 static void init_controller(struct r8a66597 *r8a66597)
805 u16 vif = r8a66597->pdata->vif ? LDRV : 0;
806 u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
807 u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
809 if (r8a66597->pdata->on_chip) {
810 if (r8a66597->pdata->buswait)
811 r8a66597_write(r8a66597, r8a66597->pdata->buswait,
812 SYSCFG1);
813 else
814 r8a66597_write(r8a66597, 0x0f, SYSCFG1);
815 r8a66597_bset(r8a66597, HSE, SYSCFG0);
817 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
818 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
819 r8a66597_bset(r8a66597, USBE, SYSCFG0);
821 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
823 r8a66597_bset(r8a66597, irq_sense, INTENB1);
824 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
825 DMA0CFG);
826 } else {
827 r8a66597_bset(r8a66597, vif | endian, PINCFG);
828 r8a66597_bset(r8a66597, HSE, SYSCFG0); /* High spd */
829 r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
830 XTAL, SYSCFG0);
832 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
833 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
834 r8a66597_bset(r8a66597, USBE, SYSCFG0);
836 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
838 msleep(3);
840 r8a66597_bset(r8a66597, PLLC, SYSCFG0);
842 msleep(1);
844 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
846 r8a66597_bset(r8a66597, irq_sense, INTENB1);
847 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
848 DMA0CFG);
852 static void disable_controller(struct r8a66597 *r8a66597)
854 if (r8a66597->pdata->on_chip) {
855 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
856 r8a66597_bclr(r8a66597, UTST, TESTMODE);
858 /* disable interrupts */
859 r8a66597_write(r8a66597, 0, INTENB0);
860 r8a66597_write(r8a66597, 0, INTENB1);
861 r8a66597_write(r8a66597, 0, BRDYENB);
862 r8a66597_write(r8a66597, 0, BEMPENB);
863 r8a66597_write(r8a66597, 0, NRDYENB);
865 /* clear status */
866 r8a66597_write(r8a66597, 0, BRDYSTS);
867 r8a66597_write(r8a66597, 0, NRDYSTS);
868 r8a66597_write(r8a66597, 0, BEMPSTS);
870 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
871 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
873 } else {
874 r8a66597_bclr(r8a66597, UTST, TESTMODE);
875 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
876 udelay(1);
877 r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
878 udelay(1);
879 udelay(1);
880 r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
884 static void r8a66597_start_xclock(struct r8a66597 *r8a66597)
886 u16 tmp;
888 if (!r8a66597->pdata->on_chip) {
889 tmp = r8a66597_read(r8a66597, SYSCFG0);
890 if (!(tmp & XCKE))
891 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
895 static struct r8a66597_request *get_request_from_ep(struct r8a66597_ep *ep)
897 return list_entry(ep->queue.next, struct r8a66597_request, queue);
900 /*-------------------------------------------------------------------------*/
901 static void transfer_complete(struct r8a66597_ep *ep,
902 struct r8a66597_request *req, int status)
903 __releases(r8a66597->lock)
904 __acquires(r8a66597->lock)
906 int restart = 0;
908 if (unlikely(ep->pipenum == 0)) {
909 if (ep->internal_ccpl) {
910 ep->internal_ccpl = 0;
911 return;
915 list_del_init(&req->queue);
916 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
917 req->req.status = -ESHUTDOWN;
918 else
919 req->req.status = status;
921 if (!list_empty(&ep->queue))
922 restart = 1;
924 if (ep->use_dma)
925 sudmac_free_channel(ep->r8a66597, ep, req);
927 spin_unlock(&ep->r8a66597->lock);
928 usb_gadget_giveback_request(&ep->ep, &req->req);
929 spin_lock(&ep->r8a66597->lock);
931 if (restart) {
932 req = get_request_from_ep(ep);
933 if (ep->ep.desc)
934 start_packet(ep, req);
938 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
940 int i;
941 u16 tmp;
942 unsigned bufsize;
943 size_t size;
944 void *buf;
945 u16 pipenum = ep->pipenum;
946 struct r8a66597 *r8a66597 = ep->r8a66597;
948 pipe_change(r8a66597, pipenum);
949 r8a66597_bset(r8a66597, ISEL, ep->fifosel);
951 i = 0;
952 do {
953 tmp = r8a66597_read(r8a66597, ep->fifoctr);
954 if (i++ > 100000) {
955 dev_err(r8a66597_to_dev(r8a66597),
956 "pipe0 is busy. maybe cpu i/o bus "
957 "conflict. please power off this controller.");
958 return;
960 ndelay(1);
961 } while ((tmp & FRDY) == 0);
963 /* prepare parameters */
964 bufsize = get_buffer_size(r8a66597, pipenum);
965 buf = req->req.buf + req->req.actual;
966 size = min(bufsize, req->req.length - req->req.actual);
968 /* write fifo */
969 if (req->req.buf) {
970 if (size > 0)
971 r8a66597_write_fifo(r8a66597, ep, buf, size);
972 if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
973 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
976 /* update parameters */
977 req->req.actual += size;
979 /* check transfer finish */
980 if ((!req->req.zero && (req->req.actual == req->req.length))
981 || (size % ep->ep.maxpacket)
982 || (size == 0)) {
983 disable_irq_ready(r8a66597, pipenum);
984 disable_irq_empty(r8a66597, pipenum);
985 } else {
986 disable_irq_ready(r8a66597, pipenum);
987 enable_irq_empty(r8a66597, pipenum);
989 pipe_start(r8a66597, pipenum);
992 static void irq_packet_write(struct r8a66597_ep *ep,
993 struct r8a66597_request *req)
995 u16 tmp;
996 unsigned bufsize;
997 size_t size;
998 void *buf;
999 u16 pipenum = ep->pipenum;
1000 struct r8a66597 *r8a66597 = ep->r8a66597;
1002 pipe_change(r8a66597, pipenum);
1003 tmp = r8a66597_read(r8a66597, ep->fifoctr);
1004 if (unlikely((tmp & FRDY) == 0)) {
1005 pipe_stop(r8a66597, pipenum);
1006 pipe_irq_disable(r8a66597, pipenum);
1007 dev_err(r8a66597_to_dev(r8a66597),
1008 "write fifo not ready. pipnum=%d\n", pipenum);
1009 return;
1012 /* prepare parameters */
1013 bufsize = get_buffer_size(r8a66597, pipenum);
1014 buf = req->req.buf + req->req.actual;
1015 size = min(bufsize, req->req.length - req->req.actual);
1017 /* write fifo */
1018 if (req->req.buf) {
1019 r8a66597_write_fifo(r8a66597, ep, buf, size);
1020 if ((size == 0)
1021 || ((size % ep->ep.maxpacket) != 0)
1022 || ((bufsize != ep->ep.maxpacket)
1023 && (bufsize > size)))
1024 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
1027 /* update parameters */
1028 req->req.actual += size;
1029 /* check transfer finish */
1030 if ((!req->req.zero && (req->req.actual == req->req.length))
1031 || (size % ep->ep.maxpacket)
1032 || (size == 0)) {
1033 disable_irq_ready(r8a66597, pipenum);
1034 enable_irq_empty(r8a66597, pipenum);
1035 } else {
1036 disable_irq_empty(r8a66597, pipenum);
1037 pipe_irq_enable(r8a66597, pipenum);
1041 static void irq_packet_read(struct r8a66597_ep *ep,
1042 struct r8a66597_request *req)
1044 u16 tmp;
1045 int rcv_len, bufsize, req_len;
1046 int size;
1047 void *buf;
1048 u16 pipenum = ep->pipenum;
1049 struct r8a66597 *r8a66597 = ep->r8a66597;
1050 int finish = 0;
1052 pipe_change(r8a66597, pipenum);
1053 tmp = r8a66597_read(r8a66597, ep->fifoctr);
1054 if (unlikely((tmp & FRDY) == 0)) {
1055 req->req.status = -EPIPE;
1056 pipe_stop(r8a66597, pipenum);
1057 pipe_irq_disable(r8a66597, pipenum);
1058 dev_err(r8a66597_to_dev(r8a66597), "read fifo not ready");
1059 return;
1062 /* prepare parameters */
1063 rcv_len = tmp & DTLN;
1064 bufsize = get_buffer_size(r8a66597, pipenum);
1066 buf = req->req.buf + req->req.actual;
1067 req_len = req->req.length - req->req.actual;
1068 if (rcv_len < bufsize)
1069 size = min(rcv_len, req_len);
1070 else
1071 size = min(bufsize, req_len);
1073 /* update parameters */
1074 req->req.actual += size;
1076 /* check transfer finish */
1077 if ((!req->req.zero && (req->req.actual == req->req.length))
1078 || (size % ep->ep.maxpacket)
1079 || (size == 0)) {
1080 pipe_stop(r8a66597, pipenum);
1081 pipe_irq_disable(r8a66597, pipenum);
1082 finish = 1;
1085 /* read fifo */
1086 if (req->req.buf) {
1087 if (size == 0)
1088 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
1089 else
1090 r8a66597_read_fifo(r8a66597, ep->fifoaddr, buf, size);
1094 if ((ep->pipenum != 0) && finish)
1095 transfer_complete(ep, req, 0);
1098 static void irq_pipe_ready(struct r8a66597 *r8a66597, u16 status, u16 enb)
1100 u16 check;
1101 u16 pipenum;
1102 struct r8a66597_ep *ep;
1103 struct r8a66597_request *req;
1105 if ((status & BRDY0) && (enb & BRDY0)) {
1106 r8a66597_write(r8a66597, ~BRDY0, BRDYSTS);
1107 r8a66597_mdfy(r8a66597, 0, CURPIPE, CFIFOSEL);
1109 ep = &r8a66597->ep[0];
1110 req = get_request_from_ep(ep);
1111 irq_packet_read(ep, req);
1112 } else {
1113 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
1114 check = 1 << pipenum;
1115 if ((status & check) && (enb & check)) {
1116 r8a66597_write(r8a66597, ~check, BRDYSTS);
1117 ep = r8a66597->pipenum2ep[pipenum];
1118 req = get_request_from_ep(ep);
1119 if (ep->ep.desc->bEndpointAddress & USB_DIR_IN)
1120 irq_packet_write(ep, req);
1121 else
1122 irq_packet_read(ep, req);
1128 static void irq_pipe_empty(struct r8a66597 *r8a66597, u16 status, u16 enb)
1130 u16 tmp;
1131 u16 check;
1132 u16 pipenum;
1133 struct r8a66597_ep *ep;
1134 struct r8a66597_request *req;
1136 if ((status & BEMP0) && (enb & BEMP0)) {
1137 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
1139 ep = &r8a66597->ep[0];
1140 req = get_request_from_ep(ep);
1141 irq_ep0_write(ep, req);
1142 } else {
1143 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
1144 check = 1 << pipenum;
1145 if ((status & check) && (enb & check)) {
1146 r8a66597_write(r8a66597, ~check, BEMPSTS);
1147 tmp = control_reg_get(r8a66597, pipenum);
1148 if ((tmp & INBUFM) == 0) {
1149 disable_irq_empty(r8a66597, pipenum);
1150 pipe_irq_disable(r8a66597, pipenum);
1151 pipe_stop(r8a66597, pipenum);
1152 ep = r8a66597->pipenum2ep[pipenum];
1153 req = get_request_from_ep(ep);
1154 if (!list_empty(&ep->queue))
1155 transfer_complete(ep, req, 0);
1162 static void get_status(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1163 __releases(r8a66597->lock)
1164 __acquires(r8a66597->lock)
1166 struct r8a66597_ep *ep;
1167 u16 pid;
1168 u16 status = 0;
1169 u16 w_index = le16_to_cpu(ctrl->wIndex);
1171 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1172 case USB_RECIP_DEVICE:
1173 status = r8a66597->device_status;
1174 break;
1175 case USB_RECIP_INTERFACE:
1176 status = 0;
1177 break;
1178 case USB_RECIP_ENDPOINT:
1179 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1180 pid = control_reg_get_pid(r8a66597, ep->pipenum);
1181 if (pid == PID_STALL)
1182 status = 1 << USB_ENDPOINT_HALT;
1183 else
1184 status = 0;
1185 break;
1186 default:
1187 pipe_stall(r8a66597, 0);
1188 return; /* exit */
1191 r8a66597->ep0_data = cpu_to_le16(status);
1192 r8a66597->ep0_req->buf = &r8a66597->ep0_data;
1193 r8a66597->ep0_req->length = 2;
1194 /* AV: what happens if we get called again before that gets through? */
1195 spin_unlock(&r8a66597->lock);
1196 r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
1197 spin_lock(&r8a66597->lock);
1200 static void clear_feature(struct r8a66597 *r8a66597,
1201 struct usb_ctrlrequest *ctrl)
1203 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1204 case USB_RECIP_DEVICE:
1205 control_end(r8a66597, 1);
1206 break;
1207 case USB_RECIP_INTERFACE:
1208 control_end(r8a66597, 1);
1209 break;
1210 case USB_RECIP_ENDPOINT: {
1211 struct r8a66597_ep *ep;
1212 struct r8a66597_request *req;
1213 u16 w_index = le16_to_cpu(ctrl->wIndex);
1215 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1216 if (!ep->wedge) {
1217 pipe_stop(r8a66597, ep->pipenum);
1218 control_reg_sqclr(r8a66597, ep->pipenum);
1219 spin_unlock(&r8a66597->lock);
1220 usb_ep_clear_halt(&ep->ep);
1221 spin_lock(&r8a66597->lock);
1224 control_end(r8a66597, 1);
1226 req = get_request_from_ep(ep);
1227 if (ep->busy) {
1228 ep->busy = 0;
1229 if (list_empty(&ep->queue))
1230 break;
1231 start_packet(ep, req);
1232 } else if (!list_empty(&ep->queue))
1233 pipe_start(r8a66597, ep->pipenum);
1235 break;
1236 default:
1237 pipe_stall(r8a66597, 0);
1238 break;
1242 static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1244 u16 tmp;
1245 int timeout = 3000;
1247 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1248 case USB_RECIP_DEVICE:
1249 switch (le16_to_cpu(ctrl->wValue)) {
1250 case USB_DEVICE_TEST_MODE:
1251 control_end(r8a66597, 1);
1252 /* Wait for the completion of status stage */
1253 do {
1254 tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1255 udelay(1);
1256 } while (tmp != CS_IDST || timeout-- > 0);
1258 if (tmp == CS_IDST)
1259 r8a66597_bset(r8a66597,
1260 le16_to_cpu(ctrl->wIndex >> 8),
1261 TESTMODE);
1262 break;
1263 default:
1264 pipe_stall(r8a66597, 0);
1265 break;
1267 break;
1268 case USB_RECIP_INTERFACE:
1269 control_end(r8a66597, 1);
1270 break;
1271 case USB_RECIP_ENDPOINT: {
1272 struct r8a66597_ep *ep;
1273 u16 w_index = le16_to_cpu(ctrl->wIndex);
1275 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1276 pipe_stall(r8a66597, ep->pipenum);
1278 control_end(r8a66597, 1);
1280 break;
1281 default:
1282 pipe_stall(r8a66597, 0);
1283 break;
1287 /* if return value is true, call class driver's setup() */
1288 static int setup_packet(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1290 u16 *p = (u16 *)ctrl;
1291 unsigned long offset = USBREQ;
1292 int i, ret = 0;
1294 /* read fifo */
1295 r8a66597_write(r8a66597, ~VALID, INTSTS0);
1297 for (i = 0; i < 4; i++)
1298 p[i] = r8a66597_read(r8a66597, offset + i*2);
1300 /* check request */
1301 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1302 switch (ctrl->bRequest) {
1303 case USB_REQ_GET_STATUS:
1304 get_status(r8a66597, ctrl);
1305 break;
1306 case USB_REQ_CLEAR_FEATURE:
1307 clear_feature(r8a66597, ctrl);
1308 break;
1309 case USB_REQ_SET_FEATURE:
1310 set_feature(r8a66597, ctrl);
1311 break;
1312 default:
1313 ret = 1;
1314 break;
1316 } else
1317 ret = 1;
1318 return ret;
1321 static void r8a66597_update_usb_speed(struct r8a66597 *r8a66597)
1323 u16 speed = get_usb_speed(r8a66597);
1325 switch (speed) {
1326 case HSMODE:
1327 r8a66597->gadget.speed = USB_SPEED_HIGH;
1328 break;
1329 case FSMODE:
1330 r8a66597->gadget.speed = USB_SPEED_FULL;
1331 break;
1332 default:
1333 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
1334 dev_err(r8a66597_to_dev(r8a66597), "USB speed unknown\n");
1338 static void irq_device_state(struct r8a66597 *r8a66597)
1340 u16 dvsq;
1342 dvsq = r8a66597_read(r8a66597, INTSTS0) & DVSQ;
1343 r8a66597_write(r8a66597, ~DVST, INTSTS0);
1345 if (dvsq == DS_DFLT) {
1346 /* bus reset */
1347 spin_unlock(&r8a66597->lock);
1348 usb_gadget_udc_reset(&r8a66597->gadget, r8a66597->driver);
1349 spin_lock(&r8a66597->lock);
1350 r8a66597_update_usb_speed(r8a66597);
1352 if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
1353 r8a66597_update_usb_speed(r8a66597);
1354 if ((dvsq == DS_CNFG || dvsq == DS_ADDS)
1355 && r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1356 r8a66597_update_usb_speed(r8a66597);
1358 r8a66597->old_dvsq = dvsq;
1361 static void irq_control_stage(struct r8a66597 *r8a66597)
1362 __releases(r8a66597->lock)
1363 __acquires(r8a66597->lock)
1365 struct usb_ctrlrequest ctrl;
1366 u16 ctsq;
1368 ctsq = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1369 r8a66597_write(r8a66597, ~CTRT, INTSTS0);
1371 switch (ctsq) {
1372 case CS_IDST: {
1373 struct r8a66597_ep *ep;
1374 struct r8a66597_request *req;
1375 ep = &r8a66597->ep[0];
1376 req = get_request_from_ep(ep);
1377 transfer_complete(ep, req, 0);
1379 break;
1381 case CS_RDDS:
1382 case CS_WRDS:
1383 case CS_WRND:
1384 if (setup_packet(r8a66597, &ctrl)) {
1385 spin_unlock(&r8a66597->lock);
1386 if (r8a66597->driver->setup(&r8a66597->gadget, &ctrl)
1387 < 0)
1388 pipe_stall(r8a66597, 0);
1389 spin_lock(&r8a66597->lock);
1391 break;
1392 case CS_RDSS:
1393 case CS_WRSS:
1394 control_end(r8a66597, 0);
1395 break;
1396 default:
1397 dev_err(r8a66597_to_dev(r8a66597),
1398 "ctrl_stage: unexpect ctsq(%x)\n", ctsq);
1399 break;
1403 static void sudmac_finish(struct r8a66597 *r8a66597, struct r8a66597_ep *ep)
1405 u16 pipenum;
1406 struct r8a66597_request *req;
1407 u32 len;
1408 int i = 0;
1410 pipenum = ep->pipenum;
1411 pipe_change(r8a66597, pipenum);
1413 while (!(r8a66597_read(r8a66597, ep->fifoctr) & FRDY)) {
1414 udelay(1);
1415 if (unlikely(i++ >= 10000)) { /* timeout = 10 msec */
1416 dev_err(r8a66597_to_dev(r8a66597),
1417 "%s: FRDY was not set (%d)\n",
1418 __func__, pipenum);
1419 return;
1423 r8a66597_bset(r8a66597, BCLR, ep->fifoctr);
1424 req = get_request_from_ep(ep);
1426 /* prepare parameters */
1427 len = r8a66597_sudmac_read(r8a66597, CH0CBC);
1428 req->req.actual += len;
1430 /* clear */
1431 r8a66597_sudmac_write(r8a66597, CH0STCLR, DSTSCLR);
1433 /* check transfer finish */
1434 if ((!req->req.zero && (req->req.actual == req->req.length))
1435 || (len % ep->ep.maxpacket)) {
1436 if (ep->dma->dir) {
1437 disable_irq_ready(r8a66597, pipenum);
1438 enable_irq_empty(r8a66597, pipenum);
1439 } else {
1440 /* Clear the interrupt flag for next transfer */
1441 r8a66597_write(r8a66597, ~(1 << pipenum), BRDYSTS);
1442 transfer_complete(ep, req, 0);
1447 static void r8a66597_sudmac_irq(struct r8a66597 *r8a66597)
1449 u32 irqsts;
1450 struct r8a66597_ep *ep;
1451 u16 pipenum;
1453 irqsts = r8a66597_sudmac_read(r8a66597, DINTSTS);
1454 if (irqsts & CH0ENDS) {
1455 r8a66597_sudmac_write(r8a66597, CH0ENDC, DINTSTSCLR);
1456 pipenum = (r8a66597_read(r8a66597, D0FIFOSEL) & CURPIPE);
1457 ep = r8a66597->pipenum2ep[pipenum];
1458 sudmac_finish(r8a66597, ep);
1462 static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
1464 struct r8a66597 *r8a66597 = _r8a66597;
1465 u16 intsts0;
1466 u16 intenb0;
1467 u16 savepipe;
1468 u16 mask0;
1470 spin_lock(&r8a66597->lock);
1472 if (r8a66597_is_sudmac(r8a66597))
1473 r8a66597_sudmac_irq(r8a66597);
1475 intsts0 = r8a66597_read(r8a66597, INTSTS0);
1476 intenb0 = r8a66597_read(r8a66597, INTENB0);
1478 savepipe = r8a66597_read(r8a66597, CFIFOSEL);
1480 mask0 = intsts0 & intenb0;
1481 if (mask0) {
1482 u16 brdysts = r8a66597_read(r8a66597, BRDYSTS);
1483 u16 bempsts = r8a66597_read(r8a66597, BEMPSTS);
1484 u16 brdyenb = r8a66597_read(r8a66597, BRDYENB);
1485 u16 bempenb = r8a66597_read(r8a66597, BEMPENB);
1487 if (mask0 & VBINT) {
1488 r8a66597_write(r8a66597, 0xffff & ~VBINT,
1489 INTSTS0);
1490 r8a66597_start_xclock(r8a66597);
1492 /* start vbus sampling */
1493 r8a66597->old_vbus = r8a66597_read(r8a66597, INTSTS0)
1494 & VBSTS;
1495 r8a66597->scount = R8A66597_MAX_SAMPLING;
1497 mod_timer(&r8a66597->timer,
1498 jiffies + msecs_to_jiffies(50));
1500 if (intsts0 & DVSQ)
1501 irq_device_state(r8a66597);
1503 if ((intsts0 & BRDY) && (intenb0 & BRDYE)
1504 && (brdysts & brdyenb))
1505 irq_pipe_ready(r8a66597, brdysts, brdyenb);
1506 if ((intsts0 & BEMP) && (intenb0 & BEMPE)
1507 && (bempsts & bempenb))
1508 irq_pipe_empty(r8a66597, bempsts, bempenb);
1510 if (intsts0 & CTRT)
1511 irq_control_stage(r8a66597);
1514 r8a66597_write(r8a66597, savepipe, CFIFOSEL);
1516 spin_unlock(&r8a66597->lock);
1517 return IRQ_HANDLED;
1520 static void r8a66597_timer(unsigned long _r8a66597)
1522 struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
1523 unsigned long flags;
1524 u16 tmp;
1526 spin_lock_irqsave(&r8a66597->lock, flags);
1527 tmp = r8a66597_read(r8a66597, SYSCFG0);
1528 if (r8a66597->scount > 0) {
1529 tmp = r8a66597_read(r8a66597, INTSTS0) & VBSTS;
1530 if (tmp == r8a66597->old_vbus) {
1531 r8a66597->scount--;
1532 if (r8a66597->scount == 0) {
1533 if (tmp == VBSTS)
1534 r8a66597_usb_connect(r8a66597);
1535 else
1536 r8a66597_usb_disconnect(r8a66597);
1537 } else {
1538 mod_timer(&r8a66597->timer,
1539 jiffies + msecs_to_jiffies(50));
1541 } else {
1542 r8a66597->scount = R8A66597_MAX_SAMPLING;
1543 r8a66597->old_vbus = tmp;
1544 mod_timer(&r8a66597->timer,
1545 jiffies + msecs_to_jiffies(50));
1548 spin_unlock_irqrestore(&r8a66597->lock, flags);
1551 /*-------------------------------------------------------------------------*/
1552 static int r8a66597_enable(struct usb_ep *_ep,
1553 const struct usb_endpoint_descriptor *desc)
1555 struct r8a66597_ep *ep;
1557 ep = container_of(_ep, struct r8a66597_ep, ep);
1558 return alloc_pipe_config(ep, desc);
1561 static int r8a66597_disable(struct usb_ep *_ep)
1563 struct r8a66597_ep *ep;
1564 struct r8a66597_request *req;
1565 unsigned long flags;
1567 ep = container_of(_ep, struct r8a66597_ep, ep);
1568 BUG_ON(!ep);
1570 while (!list_empty(&ep->queue)) {
1571 req = get_request_from_ep(ep);
1572 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1573 transfer_complete(ep, req, -ECONNRESET);
1574 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1577 pipe_irq_disable(ep->r8a66597, ep->pipenum);
1578 return free_pipe_config(ep);
1581 static struct usb_request *r8a66597_alloc_request(struct usb_ep *_ep,
1582 gfp_t gfp_flags)
1584 struct r8a66597_request *req;
1586 req = kzalloc(sizeof(struct r8a66597_request), gfp_flags);
1587 if (!req)
1588 return NULL;
1590 INIT_LIST_HEAD(&req->queue);
1592 return &req->req;
1595 static void r8a66597_free_request(struct usb_ep *_ep, struct usb_request *_req)
1597 struct r8a66597_request *req;
1599 req = container_of(_req, struct r8a66597_request, req);
1600 kfree(req);
1603 static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
1604 gfp_t gfp_flags)
1606 struct r8a66597_ep *ep;
1607 struct r8a66597_request *req;
1608 unsigned long flags;
1609 int request = 0;
1611 ep = container_of(_ep, struct r8a66597_ep, ep);
1612 req = container_of(_req, struct r8a66597_request, req);
1614 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1615 return -ESHUTDOWN;
1617 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1619 if (list_empty(&ep->queue))
1620 request = 1;
1622 list_add_tail(&req->queue, &ep->queue);
1623 req->req.actual = 0;
1624 req->req.status = -EINPROGRESS;
1626 if (ep->ep.desc == NULL) /* control */
1627 start_ep0(ep, req);
1628 else {
1629 if (request && !ep->busy)
1630 start_packet(ep, req);
1633 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1635 return 0;
1638 static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1640 struct r8a66597_ep *ep;
1641 struct r8a66597_request *req;
1642 unsigned long flags;
1644 ep = container_of(_ep, struct r8a66597_ep, ep);
1645 req = container_of(_req, struct r8a66597_request, req);
1647 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1648 if (!list_empty(&ep->queue))
1649 transfer_complete(ep, req, -ECONNRESET);
1650 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1652 return 0;
1655 static int r8a66597_set_halt(struct usb_ep *_ep, int value)
1657 struct r8a66597_ep *ep = container_of(_ep, struct r8a66597_ep, ep);
1658 unsigned long flags;
1659 int ret = 0;
1661 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1662 if (!list_empty(&ep->queue)) {
1663 ret = -EAGAIN;
1664 } else if (value) {
1665 ep->busy = 1;
1666 pipe_stall(ep->r8a66597, ep->pipenum);
1667 } else {
1668 ep->busy = 0;
1669 ep->wedge = 0;
1670 pipe_stop(ep->r8a66597, ep->pipenum);
1672 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1673 return ret;
1676 static int r8a66597_set_wedge(struct usb_ep *_ep)
1678 struct r8a66597_ep *ep;
1679 unsigned long flags;
1681 ep = container_of(_ep, struct r8a66597_ep, ep);
1683 if (!ep || !ep->ep.desc)
1684 return -EINVAL;
1686 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1687 ep->wedge = 1;
1688 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1690 return usb_ep_set_halt(_ep);
1693 static void r8a66597_fifo_flush(struct usb_ep *_ep)
1695 struct r8a66597_ep *ep;
1696 unsigned long flags;
1698 ep = container_of(_ep, struct r8a66597_ep, ep);
1699 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1700 if (list_empty(&ep->queue) && !ep->busy) {
1701 pipe_stop(ep->r8a66597, ep->pipenum);
1702 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
1703 r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr);
1704 r8a66597_write(ep->r8a66597, 0, ep->pipectr);
1706 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1709 static struct usb_ep_ops r8a66597_ep_ops = {
1710 .enable = r8a66597_enable,
1711 .disable = r8a66597_disable,
1713 .alloc_request = r8a66597_alloc_request,
1714 .free_request = r8a66597_free_request,
1716 .queue = r8a66597_queue,
1717 .dequeue = r8a66597_dequeue,
1719 .set_halt = r8a66597_set_halt,
1720 .set_wedge = r8a66597_set_wedge,
1721 .fifo_flush = r8a66597_fifo_flush,
1724 /*-------------------------------------------------------------------------*/
1725 static int r8a66597_start(struct usb_gadget *gadget,
1726 struct usb_gadget_driver *driver)
1728 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1730 if (!driver
1731 || driver->max_speed < USB_SPEED_HIGH
1732 || !driver->setup)
1733 return -EINVAL;
1734 if (!r8a66597)
1735 return -ENODEV;
1737 /* hook up the driver */
1738 r8a66597->driver = driver;
1740 init_controller(r8a66597);
1741 r8a66597_bset(r8a66597, VBSE, INTENB0);
1742 if (r8a66597_read(r8a66597, INTSTS0) & VBSTS) {
1743 r8a66597_start_xclock(r8a66597);
1744 /* start vbus sampling */
1745 r8a66597->old_vbus = r8a66597_read(r8a66597,
1746 INTSTS0) & VBSTS;
1747 r8a66597->scount = R8A66597_MAX_SAMPLING;
1748 mod_timer(&r8a66597->timer, jiffies + msecs_to_jiffies(50));
1751 return 0;
1754 static int r8a66597_stop(struct usb_gadget *gadget)
1756 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1757 unsigned long flags;
1759 spin_lock_irqsave(&r8a66597->lock, flags);
1760 r8a66597_bclr(r8a66597, VBSE, INTENB0);
1761 disable_controller(r8a66597);
1762 spin_unlock_irqrestore(&r8a66597->lock, flags);
1764 r8a66597->driver = NULL;
1765 return 0;
1768 /*-------------------------------------------------------------------------*/
1769 static int r8a66597_get_frame(struct usb_gadget *_gadget)
1771 struct r8a66597 *r8a66597 = gadget_to_r8a66597(_gadget);
1772 return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
1775 static int r8a66597_pullup(struct usb_gadget *gadget, int is_on)
1777 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1778 unsigned long flags;
1780 spin_lock_irqsave(&r8a66597->lock, flags);
1781 if (is_on)
1782 r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
1783 else
1784 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
1785 spin_unlock_irqrestore(&r8a66597->lock, flags);
1787 return 0;
1790 static int r8a66597_set_selfpowered(struct usb_gadget *gadget, int is_self)
1792 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1794 gadget->is_selfpowered = (is_self != 0);
1795 if (is_self)
1796 r8a66597->device_status |= 1 << USB_DEVICE_SELF_POWERED;
1797 else
1798 r8a66597->device_status &= ~(1 << USB_DEVICE_SELF_POWERED);
1800 return 0;
1803 static const struct usb_gadget_ops r8a66597_gadget_ops = {
1804 .get_frame = r8a66597_get_frame,
1805 .udc_start = r8a66597_start,
1806 .udc_stop = r8a66597_stop,
1807 .pullup = r8a66597_pullup,
1808 .set_selfpowered = r8a66597_set_selfpowered,
1811 static int r8a66597_remove(struct platform_device *pdev)
1813 struct r8a66597 *r8a66597 = platform_get_drvdata(pdev);
1815 usb_del_gadget_udc(&r8a66597->gadget);
1816 del_timer_sync(&r8a66597->timer);
1817 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
1819 if (r8a66597->pdata->on_chip) {
1820 clk_disable_unprepare(r8a66597->clk);
1823 return 0;
1826 static void nop_completion(struct usb_ep *ep, struct usb_request *r)
1830 static int r8a66597_sudmac_ioremap(struct r8a66597 *r8a66597,
1831 struct platform_device *pdev)
1833 struct resource *res;
1835 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sudmac");
1836 r8a66597->sudmac_reg = devm_ioremap_resource(&pdev->dev, res);
1837 return PTR_ERR_OR_ZERO(r8a66597->sudmac_reg);
1840 static int r8a66597_probe(struct platform_device *pdev)
1842 struct device *dev = &pdev->dev;
1843 char clk_name[8];
1844 struct resource *res, *ires;
1845 int irq;
1846 void __iomem *reg = NULL;
1847 struct r8a66597 *r8a66597 = NULL;
1848 int ret = 0;
1849 int i;
1850 unsigned long irq_trigger;
1852 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1853 reg = devm_ioremap_resource(&pdev->dev, res);
1854 if (IS_ERR(reg))
1855 return PTR_ERR(reg);
1857 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1858 irq = ires->start;
1859 irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
1861 if (irq < 0) {
1862 dev_err(dev, "platform_get_irq error.\n");
1863 return -ENODEV;
1866 /* initialize ucd */
1867 r8a66597 = devm_kzalloc(dev, sizeof(struct r8a66597), GFP_KERNEL);
1868 if (r8a66597 == NULL)
1869 return -ENOMEM;
1871 spin_lock_init(&r8a66597->lock);
1872 platform_set_drvdata(pdev, r8a66597);
1873 r8a66597->pdata = dev_get_platdata(dev);
1874 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
1876 r8a66597->gadget.ops = &r8a66597_gadget_ops;
1877 r8a66597->gadget.max_speed = USB_SPEED_HIGH;
1878 r8a66597->gadget.name = udc_name;
1880 init_timer(&r8a66597->timer);
1881 r8a66597->timer.function = r8a66597_timer;
1882 r8a66597->timer.data = (unsigned long)r8a66597;
1883 r8a66597->reg = reg;
1885 if (r8a66597->pdata->on_chip) {
1886 snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
1887 r8a66597->clk = devm_clk_get(dev, clk_name);
1888 if (IS_ERR(r8a66597->clk)) {
1889 dev_err(dev, "cannot get clock \"%s\"\n", clk_name);
1890 return PTR_ERR(r8a66597->clk);
1892 clk_prepare_enable(r8a66597->clk);
1895 if (r8a66597->pdata->sudmac) {
1896 ret = r8a66597_sudmac_ioremap(r8a66597, pdev);
1897 if (ret < 0)
1898 goto clean_up2;
1901 disable_controller(r8a66597); /* make sure controller is disabled */
1903 ret = devm_request_irq(dev, irq, r8a66597_irq, IRQF_SHARED,
1904 udc_name, r8a66597);
1905 if (ret < 0) {
1906 dev_err(dev, "request_irq error (%d)\n", ret);
1907 goto clean_up2;
1910 INIT_LIST_HEAD(&r8a66597->gadget.ep_list);
1911 r8a66597->gadget.ep0 = &r8a66597->ep[0].ep;
1912 INIT_LIST_HEAD(&r8a66597->gadget.ep0->ep_list);
1913 for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
1914 struct r8a66597_ep *ep = &r8a66597->ep[i];
1916 if (i != 0) {
1917 INIT_LIST_HEAD(&r8a66597->ep[i].ep.ep_list);
1918 list_add_tail(&r8a66597->ep[i].ep.ep_list,
1919 &r8a66597->gadget.ep_list);
1921 ep->r8a66597 = r8a66597;
1922 INIT_LIST_HEAD(&ep->queue);
1923 ep->ep.name = r8a66597_ep_name[i];
1924 ep->ep.ops = &r8a66597_ep_ops;
1925 usb_ep_set_maxpacket_limit(&ep->ep, 512);
1927 if (i == 0) {
1928 ep->ep.caps.type_control = true;
1929 } else {
1930 ep->ep.caps.type_iso = true;
1931 ep->ep.caps.type_bulk = true;
1932 ep->ep.caps.type_int = true;
1934 ep->ep.caps.dir_in = true;
1935 ep->ep.caps.dir_out = true;
1937 usb_ep_set_maxpacket_limit(&r8a66597->ep[0].ep, 64);
1938 r8a66597->ep[0].pipenum = 0;
1939 r8a66597->ep[0].fifoaddr = CFIFO;
1940 r8a66597->ep[0].fifosel = CFIFOSEL;
1941 r8a66597->ep[0].fifoctr = CFIFOCTR;
1942 r8a66597->ep[0].pipectr = get_pipectr_addr(0);
1943 r8a66597->pipenum2ep[0] = &r8a66597->ep[0];
1944 r8a66597->epaddr2ep[0] = &r8a66597->ep[0];
1946 r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
1947 GFP_KERNEL);
1948 if (r8a66597->ep0_req == NULL) {
1949 ret = -ENOMEM;
1950 goto clean_up2;
1952 r8a66597->ep0_req->complete = nop_completion;
1954 ret = usb_add_gadget_udc(dev, &r8a66597->gadget);
1955 if (ret)
1956 goto err_add_udc;
1958 dev_info(dev, "version %s\n", DRIVER_VERSION);
1959 return 0;
1961 err_add_udc:
1962 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
1963 clean_up2:
1964 if (r8a66597->pdata->on_chip)
1965 clk_disable_unprepare(r8a66597->clk);
1967 if (r8a66597->ep0_req)
1968 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
1970 return ret;
1973 /*-------------------------------------------------------------------------*/
1974 static struct platform_driver r8a66597_driver = {
1975 .remove = r8a66597_remove,
1976 .driver = {
1977 .name = (char *) udc_name,
1981 module_platform_driver_probe(r8a66597_driver, r8a66597_probe);
1983 MODULE_DESCRIPTION("R8A66597 USB gadget driver");
1984 MODULE_LICENSE("GPL");
1985 MODULE_AUTHOR("Yoshihiro Shimoda");
1986 MODULE_ALIAS("platform:r8a66597_udc");