dm thin metadata: fix __udivdi3 undefined on 32-bit
[linux/fpc-iii.git] / drivers / usb / gadget / udc / r8a66597-udc.c
blobe340946476037b63c72bd558c41d429b0cc40734
1 /*
2 * R8A66597 UDC (USB gadget)
4 * Copyright (C) 2006-2009 Renesas Solutions Corp.
6 * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/delay.h>
16 #include <linux/io.h>
17 #include <linux/platform_device.h>
18 #include <linux/clk.h>
19 #include <linux/err.h>
20 #include <linux/slab.h>
21 #include <linux/dma-mapping.h>
23 #include <linux/usb/ch9.h>
24 #include <linux/usb/gadget.h>
26 #include "r8a66597-udc.h"
28 #define DRIVER_VERSION "2011-09-26"
30 static const char udc_name[] = "r8a66597_udc";
31 static const char *r8a66597_ep_name[] = {
32 "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7",
33 "ep8", "ep9",
36 static void init_controller(struct r8a66597 *r8a66597);
37 static void disable_controller(struct r8a66597 *r8a66597);
38 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
39 static void irq_packet_write(struct r8a66597_ep *ep,
40 struct r8a66597_request *req);
41 static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
42 gfp_t gfp_flags);
44 static void transfer_complete(struct r8a66597_ep *ep,
45 struct r8a66597_request *req, int status);
47 /*-------------------------------------------------------------------------*/
48 static inline u16 get_usb_speed(struct r8a66597 *r8a66597)
50 return r8a66597_read(r8a66597, DVSTCTR0) & RHST;
53 static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
54 unsigned long reg)
56 u16 tmp;
58 tmp = r8a66597_read(r8a66597, INTENB0);
59 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
60 INTENB0);
61 r8a66597_bset(r8a66597, (1 << pipenum), reg);
62 r8a66597_write(r8a66597, tmp, INTENB0);
65 static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
66 unsigned long reg)
68 u16 tmp;
70 tmp = r8a66597_read(r8a66597, INTENB0);
71 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
72 INTENB0);
73 r8a66597_bclr(r8a66597, (1 << pipenum), reg);
74 r8a66597_write(r8a66597, tmp, INTENB0);
77 static void r8a66597_usb_connect(struct r8a66597 *r8a66597)
79 r8a66597_bset(r8a66597, CTRE, INTENB0);
80 r8a66597_bset(r8a66597, BEMPE | BRDYE, INTENB0);
82 r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
85 static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597)
86 __releases(r8a66597->lock)
87 __acquires(r8a66597->lock)
89 r8a66597_bclr(r8a66597, CTRE, INTENB0);
90 r8a66597_bclr(r8a66597, BEMPE | BRDYE, INTENB0);
91 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
93 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
94 spin_unlock(&r8a66597->lock);
95 r8a66597->driver->disconnect(&r8a66597->gadget);
96 spin_lock(&r8a66597->lock);
98 disable_controller(r8a66597);
99 init_controller(r8a66597);
100 r8a66597_bset(r8a66597, VBSE, INTENB0);
101 INIT_LIST_HEAD(&r8a66597->ep[0].queue);
104 static inline u16 control_reg_get_pid(struct r8a66597 *r8a66597, u16 pipenum)
106 u16 pid = 0;
107 unsigned long offset;
109 if (pipenum == 0) {
110 pid = r8a66597_read(r8a66597, DCPCTR) & PID;
111 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
112 offset = get_pipectr_addr(pipenum);
113 pid = r8a66597_read(r8a66597, offset) & PID;
114 } else {
115 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
116 pipenum);
119 return pid;
122 static inline void control_reg_set_pid(struct r8a66597 *r8a66597, u16 pipenum,
123 u16 pid)
125 unsigned long offset;
127 if (pipenum == 0) {
128 r8a66597_mdfy(r8a66597, pid, PID, DCPCTR);
129 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
130 offset = get_pipectr_addr(pipenum);
131 r8a66597_mdfy(r8a66597, pid, PID, offset);
132 } else {
133 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
134 pipenum);
138 static inline void pipe_start(struct r8a66597 *r8a66597, u16 pipenum)
140 control_reg_set_pid(r8a66597, pipenum, PID_BUF);
143 static inline void pipe_stop(struct r8a66597 *r8a66597, u16 pipenum)
145 control_reg_set_pid(r8a66597, pipenum, PID_NAK);
148 static inline void pipe_stall(struct r8a66597 *r8a66597, u16 pipenum)
150 control_reg_set_pid(r8a66597, pipenum, PID_STALL);
153 static inline u16 control_reg_get(struct r8a66597 *r8a66597, u16 pipenum)
155 u16 ret = 0;
156 unsigned long offset;
158 if (pipenum == 0) {
159 ret = r8a66597_read(r8a66597, DCPCTR);
160 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
161 offset = get_pipectr_addr(pipenum);
162 ret = r8a66597_read(r8a66597, offset);
163 } else {
164 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
165 pipenum);
168 return ret;
171 static inline void control_reg_sqclr(struct r8a66597 *r8a66597, u16 pipenum)
173 unsigned long offset;
175 pipe_stop(r8a66597, pipenum);
177 if (pipenum == 0) {
178 r8a66597_bset(r8a66597, SQCLR, DCPCTR);
179 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
180 offset = get_pipectr_addr(pipenum);
181 r8a66597_bset(r8a66597, SQCLR, offset);
182 } else {
183 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
184 pipenum);
188 static void control_reg_sqset(struct r8a66597 *r8a66597, u16 pipenum)
190 unsigned long offset;
192 pipe_stop(r8a66597, pipenum);
194 if (pipenum == 0) {
195 r8a66597_bset(r8a66597, SQSET, DCPCTR);
196 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
197 offset = get_pipectr_addr(pipenum);
198 r8a66597_bset(r8a66597, SQSET, offset);
199 } else {
200 dev_err(r8a66597_to_dev(r8a66597),
201 "unexpect pipe num(%d)\n", pipenum);
205 static u16 control_reg_sqmon(struct r8a66597 *r8a66597, u16 pipenum)
207 unsigned long offset;
209 if (pipenum == 0) {
210 return r8a66597_read(r8a66597, DCPCTR) & SQMON;
211 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
212 offset = get_pipectr_addr(pipenum);
213 return r8a66597_read(r8a66597, offset) & SQMON;
214 } else {
215 dev_err(r8a66597_to_dev(r8a66597),
216 "unexpect pipe num(%d)\n", pipenum);
219 return 0;
222 static u16 save_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum)
224 return control_reg_sqmon(r8a66597, pipenum);
227 static void restore_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum,
228 u16 toggle)
230 if (toggle)
231 control_reg_sqset(r8a66597, pipenum);
232 else
233 control_reg_sqclr(r8a66597, pipenum);
236 static inline int get_buffer_size(struct r8a66597 *r8a66597, u16 pipenum)
238 u16 tmp;
239 int size;
241 if (pipenum == 0) {
242 tmp = r8a66597_read(r8a66597, DCPCFG);
243 if ((tmp & R8A66597_CNTMD) != 0)
244 size = 256;
245 else {
246 tmp = r8a66597_read(r8a66597, DCPMAXP);
247 size = tmp & MAXP;
249 } else {
250 r8a66597_write(r8a66597, pipenum, PIPESEL);
251 tmp = r8a66597_read(r8a66597, PIPECFG);
252 if ((tmp & R8A66597_CNTMD) != 0) {
253 tmp = r8a66597_read(r8a66597, PIPEBUF);
254 size = ((tmp >> 10) + 1) * 64;
255 } else {
256 tmp = r8a66597_read(r8a66597, PIPEMAXP);
257 size = tmp & MXPS;
261 return size;
264 static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
266 if (r8a66597->pdata->on_chip)
267 return MBW_32;
268 else
269 return MBW_16;
272 static void r8a66597_change_curpipe(struct r8a66597 *r8a66597, u16 pipenum,
273 u16 isel, u16 fifosel)
275 u16 tmp, mask, loop;
276 int i = 0;
278 if (!pipenum) {
279 mask = ISEL | CURPIPE;
280 loop = isel;
281 } else {
282 mask = CURPIPE;
283 loop = pipenum;
285 r8a66597_mdfy(r8a66597, loop, mask, fifosel);
287 do {
288 tmp = r8a66597_read(r8a66597, fifosel);
289 if (i++ > 1000000) {
290 dev_err(r8a66597_to_dev(r8a66597),
291 "r8a66597: register%x, loop %x "
292 "is timeout\n", fifosel, loop);
293 break;
295 ndelay(1);
296 } while ((tmp & mask) != loop);
299 static inline void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
301 struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
303 if (ep->use_dma)
304 r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
306 r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel);
308 ndelay(450);
310 if (r8a66597_is_sudmac(r8a66597) && ep->use_dma)
311 r8a66597_bclr(r8a66597, mbw_value(r8a66597), ep->fifosel);
312 else
313 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
315 if (ep->use_dma)
316 r8a66597_bset(r8a66597, DREQE, ep->fifosel);
319 static int pipe_buffer_setting(struct r8a66597 *r8a66597,
320 struct r8a66597_pipe_info *info)
322 u16 bufnum = 0, buf_bsize = 0;
323 u16 pipecfg = 0;
325 if (info->pipe == 0)
326 return -EINVAL;
328 r8a66597_write(r8a66597, info->pipe, PIPESEL);
330 if (info->dir_in)
331 pipecfg |= R8A66597_DIR;
332 pipecfg |= info->type;
333 pipecfg |= info->epnum;
334 switch (info->type) {
335 case R8A66597_INT:
336 bufnum = 4 + (info->pipe - R8A66597_BASE_PIPENUM_INT);
337 buf_bsize = 0;
338 break;
339 case R8A66597_BULK:
340 /* isochronous pipes may be used as bulk pipes */
341 if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
342 bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
343 else
344 bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
346 bufnum = R8A66597_BASE_BUFNUM + (bufnum * 16);
347 buf_bsize = 7;
348 pipecfg |= R8A66597_DBLB;
349 if (!info->dir_in)
350 pipecfg |= R8A66597_SHTNAK;
351 break;
352 case R8A66597_ISO:
353 bufnum = R8A66597_BASE_BUFNUM +
354 (info->pipe - R8A66597_BASE_PIPENUM_ISOC) * 16;
355 buf_bsize = 7;
356 break;
359 if (buf_bsize && ((bufnum + 16) >= R8A66597_MAX_BUFNUM)) {
360 pr_err("r8a66597 pipe memory is insufficient\n");
361 return -ENOMEM;
364 r8a66597_write(r8a66597, pipecfg, PIPECFG);
365 r8a66597_write(r8a66597, (buf_bsize << 10) | (bufnum), PIPEBUF);
366 r8a66597_write(r8a66597, info->maxpacket, PIPEMAXP);
367 if (info->interval)
368 info->interval--;
369 r8a66597_write(r8a66597, info->interval, PIPEPERI);
371 return 0;
374 static void pipe_buffer_release(struct r8a66597 *r8a66597,
375 struct r8a66597_pipe_info *info)
377 if (info->pipe == 0)
378 return;
380 if (is_bulk_pipe(info->pipe)) {
381 r8a66597->bulk--;
382 } else if (is_interrupt_pipe(info->pipe)) {
383 r8a66597->interrupt--;
384 } else if (is_isoc_pipe(info->pipe)) {
385 r8a66597->isochronous--;
386 if (info->type == R8A66597_BULK)
387 r8a66597->bulk--;
388 } else {
389 dev_err(r8a66597_to_dev(r8a66597),
390 "ep_release: unexpect pipenum (%d)\n", info->pipe);
394 static void pipe_initialize(struct r8a66597_ep *ep)
396 struct r8a66597 *r8a66597 = ep->r8a66597;
398 r8a66597_mdfy(r8a66597, 0, CURPIPE, ep->fifosel);
400 r8a66597_write(r8a66597, ACLRM, ep->pipectr);
401 r8a66597_write(r8a66597, 0, ep->pipectr);
402 r8a66597_write(r8a66597, SQCLR, ep->pipectr);
403 if (ep->use_dma) {
404 r8a66597_mdfy(r8a66597, ep->pipenum, CURPIPE, ep->fifosel);
406 ndelay(450);
408 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
412 static void r8a66597_ep_setting(struct r8a66597 *r8a66597,
413 struct r8a66597_ep *ep,
414 const struct usb_endpoint_descriptor *desc,
415 u16 pipenum, int dma)
417 ep->use_dma = 0;
418 ep->fifoaddr = CFIFO;
419 ep->fifosel = CFIFOSEL;
420 ep->fifoctr = CFIFOCTR;
422 ep->pipectr = get_pipectr_addr(pipenum);
423 if (is_bulk_pipe(pipenum) || is_isoc_pipe(pipenum)) {
424 ep->pipetre = get_pipetre_addr(pipenum);
425 ep->pipetrn = get_pipetrn_addr(pipenum);
426 } else {
427 ep->pipetre = 0;
428 ep->pipetrn = 0;
430 ep->pipenum = pipenum;
431 ep->ep.maxpacket = usb_endpoint_maxp(desc);
432 r8a66597->pipenum2ep[pipenum] = ep;
433 r8a66597->epaddr2ep[usb_endpoint_num(desc)]
434 = ep;
435 INIT_LIST_HEAD(&ep->queue);
438 static void r8a66597_ep_release(struct r8a66597_ep *ep)
440 struct r8a66597 *r8a66597 = ep->r8a66597;
441 u16 pipenum = ep->pipenum;
443 if (pipenum == 0)
444 return;
446 if (ep->use_dma)
447 r8a66597->num_dma--;
448 ep->pipenum = 0;
449 ep->busy = 0;
450 ep->use_dma = 0;
453 static int alloc_pipe_config(struct r8a66597_ep *ep,
454 const struct usb_endpoint_descriptor *desc)
456 struct r8a66597 *r8a66597 = ep->r8a66597;
457 struct r8a66597_pipe_info info;
458 int dma = 0;
459 unsigned char *counter;
460 int ret;
462 ep->ep.desc = desc;
464 if (ep->pipenum) /* already allocated pipe */
465 return 0;
467 switch (usb_endpoint_type(desc)) {
468 case USB_ENDPOINT_XFER_BULK:
469 if (r8a66597->bulk >= R8A66597_MAX_NUM_BULK) {
470 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
471 dev_err(r8a66597_to_dev(r8a66597),
472 "bulk pipe is insufficient\n");
473 return -ENODEV;
474 } else {
475 info.pipe = R8A66597_BASE_PIPENUM_ISOC
476 + r8a66597->isochronous;
477 counter = &r8a66597->isochronous;
479 } else {
480 info.pipe = R8A66597_BASE_PIPENUM_BULK + r8a66597->bulk;
481 counter = &r8a66597->bulk;
483 info.type = R8A66597_BULK;
484 dma = 1;
485 break;
486 case USB_ENDPOINT_XFER_INT:
487 if (r8a66597->interrupt >= R8A66597_MAX_NUM_INT) {
488 dev_err(r8a66597_to_dev(r8a66597),
489 "interrupt pipe is insufficient\n");
490 return -ENODEV;
492 info.pipe = R8A66597_BASE_PIPENUM_INT + r8a66597->interrupt;
493 info.type = R8A66597_INT;
494 counter = &r8a66597->interrupt;
495 break;
496 case USB_ENDPOINT_XFER_ISOC:
497 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
498 dev_err(r8a66597_to_dev(r8a66597),
499 "isochronous pipe is insufficient\n");
500 return -ENODEV;
502 info.pipe = R8A66597_BASE_PIPENUM_ISOC + r8a66597->isochronous;
503 info.type = R8A66597_ISO;
504 counter = &r8a66597->isochronous;
505 break;
506 default:
507 dev_err(r8a66597_to_dev(r8a66597), "unexpect xfer type\n");
508 return -EINVAL;
510 ep->type = info.type;
512 info.epnum = usb_endpoint_num(desc);
513 info.maxpacket = usb_endpoint_maxp(desc);
514 info.interval = desc->bInterval;
515 if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
516 info.dir_in = 1;
517 else
518 info.dir_in = 0;
520 ret = pipe_buffer_setting(r8a66597, &info);
521 if (ret < 0) {
522 dev_err(r8a66597_to_dev(r8a66597),
523 "pipe_buffer_setting fail\n");
524 return ret;
527 (*counter)++;
528 if ((counter == &r8a66597->isochronous) && info.type == R8A66597_BULK)
529 r8a66597->bulk++;
531 r8a66597_ep_setting(r8a66597, ep, desc, info.pipe, dma);
532 pipe_initialize(ep);
534 return 0;
537 static int free_pipe_config(struct r8a66597_ep *ep)
539 struct r8a66597 *r8a66597 = ep->r8a66597;
540 struct r8a66597_pipe_info info;
542 info.pipe = ep->pipenum;
543 info.type = ep->type;
544 pipe_buffer_release(r8a66597, &info);
545 r8a66597_ep_release(ep);
547 return 0;
550 /*-------------------------------------------------------------------------*/
551 static void pipe_irq_enable(struct r8a66597 *r8a66597, u16 pipenum)
553 enable_irq_ready(r8a66597, pipenum);
554 enable_irq_nrdy(r8a66597, pipenum);
557 static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
559 disable_irq_ready(r8a66597, pipenum);
560 disable_irq_nrdy(r8a66597, pipenum);
563 /* if complete is true, gadget driver complete function is not call */
564 static void control_end(struct r8a66597 *r8a66597, unsigned ccpl)
566 r8a66597->ep[0].internal_ccpl = ccpl;
567 pipe_start(r8a66597, 0);
568 r8a66597_bset(r8a66597, CCPL, DCPCTR);
571 static void start_ep0_write(struct r8a66597_ep *ep,
572 struct r8a66597_request *req)
574 struct r8a66597 *r8a66597 = ep->r8a66597;
576 pipe_change(r8a66597, ep->pipenum);
577 r8a66597_mdfy(r8a66597, ISEL, (ISEL | CURPIPE), CFIFOSEL);
578 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
579 if (req->req.length == 0) {
580 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
581 pipe_start(r8a66597, 0);
582 transfer_complete(ep, req, 0);
583 } else {
584 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
585 irq_ep0_write(ep, req);
589 static void disable_fifosel(struct r8a66597 *r8a66597, u16 pipenum,
590 u16 fifosel)
592 u16 tmp;
594 tmp = r8a66597_read(r8a66597, fifosel) & CURPIPE;
595 if (tmp == pipenum)
596 r8a66597_change_curpipe(r8a66597, 0, 0, fifosel);
599 static void change_bfre_mode(struct r8a66597 *r8a66597, u16 pipenum,
600 int enable)
602 struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
603 u16 tmp, toggle;
605 /* check current BFRE bit */
606 r8a66597_write(r8a66597, pipenum, PIPESEL);
607 tmp = r8a66597_read(r8a66597, PIPECFG) & R8A66597_BFRE;
608 if ((enable && tmp) || (!enable && !tmp))
609 return;
611 /* change BFRE bit */
612 pipe_stop(r8a66597, pipenum);
613 disable_fifosel(r8a66597, pipenum, CFIFOSEL);
614 disable_fifosel(r8a66597, pipenum, D0FIFOSEL);
615 disable_fifosel(r8a66597, pipenum, D1FIFOSEL);
617 toggle = save_usb_toggle(r8a66597, pipenum);
619 r8a66597_write(r8a66597, pipenum, PIPESEL);
620 if (enable)
621 r8a66597_bset(r8a66597, R8A66597_BFRE, PIPECFG);
622 else
623 r8a66597_bclr(r8a66597, R8A66597_BFRE, PIPECFG);
625 /* initialize for internal BFRE flag */
626 r8a66597_bset(r8a66597, ACLRM, ep->pipectr);
627 r8a66597_bclr(r8a66597, ACLRM, ep->pipectr);
629 restore_usb_toggle(r8a66597, pipenum, toggle);
632 static int sudmac_alloc_channel(struct r8a66597 *r8a66597,
633 struct r8a66597_ep *ep,
634 struct r8a66597_request *req)
636 struct r8a66597_dma *dma;
638 if (!r8a66597_is_sudmac(r8a66597))
639 return -ENODEV;
641 /* Check transfer type */
642 if (!is_bulk_pipe(ep->pipenum))
643 return -EIO;
645 if (r8a66597->dma.used)
646 return -EBUSY;
648 /* set SUDMAC parameters */
649 dma = &r8a66597->dma;
650 dma->used = 1;
651 if (ep->ep.desc->bEndpointAddress & USB_DIR_IN) {
652 dma->dir = 1;
653 } else {
654 dma->dir = 0;
655 change_bfre_mode(r8a66597, ep->pipenum, 1);
658 /* set r8a66597_ep paramters */
659 ep->use_dma = 1;
660 ep->dma = dma;
661 ep->fifoaddr = D0FIFO;
662 ep->fifosel = D0FIFOSEL;
663 ep->fifoctr = D0FIFOCTR;
665 /* dma mapping */
666 return usb_gadget_map_request(&r8a66597->gadget, &req->req, dma->dir);
669 static void sudmac_free_channel(struct r8a66597 *r8a66597,
670 struct r8a66597_ep *ep,
671 struct r8a66597_request *req)
673 if (!r8a66597_is_sudmac(r8a66597))
674 return;
676 usb_gadget_unmap_request(&r8a66597->gadget, &req->req, ep->dma->dir);
678 r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
679 r8a66597_change_curpipe(r8a66597, 0, 0, ep->fifosel);
681 ep->dma->used = 0;
682 ep->use_dma = 0;
683 ep->fifoaddr = CFIFO;
684 ep->fifosel = CFIFOSEL;
685 ep->fifoctr = CFIFOCTR;
688 static void sudmac_start(struct r8a66597 *r8a66597, struct r8a66597_ep *ep,
689 struct r8a66597_request *req)
691 BUG_ON(req->req.length == 0);
693 r8a66597_sudmac_write(r8a66597, LBA_WAIT, CH0CFG);
694 r8a66597_sudmac_write(r8a66597, req->req.dma, CH0BA);
695 r8a66597_sudmac_write(r8a66597, req->req.length, CH0BBC);
696 r8a66597_sudmac_write(r8a66597, CH0ENDE, DINTCTRL);
698 r8a66597_sudmac_write(r8a66597, DEN, CH0DEN);
701 static void start_packet_write(struct r8a66597_ep *ep,
702 struct r8a66597_request *req)
704 struct r8a66597 *r8a66597 = ep->r8a66597;
705 u16 tmp;
707 pipe_change(r8a66597, ep->pipenum);
708 disable_irq_empty(r8a66597, ep->pipenum);
709 pipe_start(r8a66597, ep->pipenum);
711 if (req->req.length == 0) {
712 transfer_complete(ep, req, 0);
713 } else {
714 r8a66597_write(r8a66597, ~(1 << ep->pipenum), BRDYSTS);
715 if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
716 /* PIO mode */
717 pipe_change(r8a66597, ep->pipenum);
718 disable_irq_empty(r8a66597, ep->pipenum);
719 pipe_start(r8a66597, ep->pipenum);
720 tmp = r8a66597_read(r8a66597, ep->fifoctr);
721 if (unlikely((tmp & FRDY) == 0))
722 pipe_irq_enable(r8a66597, ep->pipenum);
723 else
724 irq_packet_write(ep, req);
725 } else {
726 /* DMA mode */
727 pipe_change(r8a66597, ep->pipenum);
728 disable_irq_nrdy(r8a66597, ep->pipenum);
729 pipe_start(r8a66597, ep->pipenum);
730 enable_irq_nrdy(r8a66597, ep->pipenum);
731 sudmac_start(r8a66597, ep, req);
736 static void start_packet_read(struct r8a66597_ep *ep,
737 struct r8a66597_request *req)
739 struct r8a66597 *r8a66597 = ep->r8a66597;
740 u16 pipenum = ep->pipenum;
742 if (ep->pipenum == 0) {
743 r8a66597_mdfy(r8a66597, 0, (ISEL | CURPIPE), CFIFOSEL);
744 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
745 pipe_start(r8a66597, pipenum);
746 pipe_irq_enable(r8a66597, pipenum);
747 } else {
748 pipe_stop(r8a66597, pipenum);
749 if (ep->pipetre) {
750 enable_irq_nrdy(r8a66597, pipenum);
751 r8a66597_write(r8a66597, TRCLR, ep->pipetre);
752 r8a66597_write(r8a66597,
753 DIV_ROUND_UP(req->req.length, ep->ep.maxpacket),
754 ep->pipetrn);
755 r8a66597_bset(r8a66597, TRENB, ep->pipetre);
758 if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
759 /* PIO mode */
760 change_bfre_mode(r8a66597, ep->pipenum, 0);
761 pipe_start(r8a66597, pipenum); /* trigger once */
762 pipe_irq_enable(r8a66597, pipenum);
763 } else {
764 pipe_change(r8a66597, pipenum);
765 sudmac_start(r8a66597, ep, req);
766 pipe_start(r8a66597, pipenum); /* trigger once */
771 static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req)
773 if (ep->ep.desc->bEndpointAddress & USB_DIR_IN)
774 start_packet_write(ep, req);
775 else
776 start_packet_read(ep, req);
779 static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
781 u16 ctsq;
783 ctsq = r8a66597_read(ep->r8a66597, INTSTS0) & CTSQ;
785 switch (ctsq) {
786 case CS_RDDS:
787 start_ep0_write(ep, req);
788 break;
789 case CS_WRDS:
790 start_packet_read(ep, req);
791 break;
793 case CS_WRND:
794 control_end(ep->r8a66597, 0);
795 break;
796 default:
797 dev_err(r8a66597_to_dev(ep->r8a66597),
798 "start_ep0: unexpect ctsq(%x)\n", ctsq);
799 break;
803 static void init_controller(struct r8a66597 *r8a66597)
805 u16 vif = r8a66597->pdata->vif ? LDRV : 0;
806 u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
807 u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
809 if (r8a66597->pdata->on_chip) {
810 if (r8a66597->pdata->buswait)
811 r8a66597_write(r8a66597, r8a66597->pdata->buswait,
812 SYSCFG1);
813 else
814 r8a66597_write(r8a66597, 0x0f, SYSCFG1);
815 r8a66597_bset(r8a66597, HSE, SYSCFG0);
817 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
818 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
819 r8a66597_bset(r8a66597, USBE, SYSCFG0);
821 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
823 r8a66597_bset(r8a66597, irq_sense, INTENB1);
824 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
825 DMA0CFG);
826 } else {
827 r8a66597_bset(r8a66597, vif | endian, PINCFG);
828 r8a66597_bset(r8a66597, HSE, SYSCFG0); /* High spd */
829 r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
830 XTAL, SYSCFG0);
832 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
833 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
834 r8a66597_bset(r8a66597, USBE, SYSCFG0);
836 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
838 mdelay(3);
840 r8a66597_bset(r8a66597, PLLC, SYSCFG0);
842 mdelay(1);
844 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
846 r8a66597_bset(r8a66597, irq_sense, INTENB1);
847 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
848 DMA0CFG);
852 static void disable_controller(struct r8a66597 *r8a66597)
854 if (r8a66597->pdata->on_chip) {
855 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
856 r8a66597_bclr(r8a66597, UTST, TESTMODE);
858 /* disable interrupts */
859 r8a66597_write(r8a66597, 0, INTENB0);
860 r8a66597_write(r8a66597, 0, INTENB1);
861 r8a66597_write(r8a66597, 0, BRDYENB);
862 r8a66597_write(r8a66597, 0, BEMPENB);
863 r8a66597_write(r8a66597, 0, NRDYENB);
865 /* clear status */
866 r8a66597_write(r8a66597, 0, BRDYSTS);
867 r8a66597_write(r8a66597, 0, NRDYSTS);
868 r8a66597_write(r8a66597, 0, BEMPSTS);
870 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
871 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
873 } else {
874 r8a66597_bclr(r8a66597, UTST, TESTMODE);
875 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
876 udelay(1);
877 r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
878 udelay(1);
879 udelay(1);
880 r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
884 static void r8a66597_start_xclock(struct r8a66597 *r8a66597)
886 u16 tmp;
888 if (!r8a66597->pdata->on_chip) {
889 tmp = r8a66597_read(r8a66597, SYSCFG0);
890 if (!(tmp & XCKE))
891 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
895 static struct r8a66597_request *get_request_from_ep(struct r8a66597_ep *ep)
897 return list_entry(ep->queue.next, struct r8a66597_request, queue);
900 /*-------------------------------------------------------------------------*/
901 static void transfer_complete(struct r8a66597_ep *ep,
902 struct r8a66597_request *req, int status)
903 __releases(r8a66597->lock)
904 __acquires(r8a66597->lock)
906 int restart = 0;
908 if (unlikely(ep->pipenum == 0)) {
909 if (ep->internal_ccpl) {
910 ep->internal_ccpl = 0;
911 return;
915 list_del_init(&req->queue);
916 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
917 req->req.status = -ESHUTDOWN;
918 else
919 req->req.status = status;
921 if (!list_empty(&ep->queue))
922 restart = 1;
924 if (ep->use_dma)
925 sudmac_free_channel(ep->r8a66597, ep, req);
927 spin_unlock(&ep->r8a66597->lock);
928 usb_gadget_giveback_request(&ep->ep, &req->req);
929 spin_lock(&ep->r8a66597->lock);
931 if (restart) {
932 req = get_request_from_ep(ep);
933 if (ep->ep.desc)
934 start_packet(ep, req);
938 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
940 int i;
941 u16 tmp;
942 unsigned bufsize;
943 size_t size;
944 void *buf;
945 u16 pipenum = ep->pipenum;
946 struct r8a66597 *r8a66597 = ep->r8a66597;
948 pipe_change(r8a66597, pipenum);
949 r8a66597_bset(r8a66597, ISEL, ep->fifosel);
951 i = 0;
952 do {
953 tmp = r8a66597_read(r8a66597, ep->fifoctr);
954 if (i++ > 100000) {
955 dev_err(r8a66597_to_dev(r8a66597),
956 "pipe0 is busy. maybe cpu i/o bus "
957 "conflict. please power off this controller.");
958 return;
960 ndelay(1);
961 } while ((tmp & FRDY) == 0);
963 /* prepare parameters */
964 bufsize = get_buffer_size(r8a66597, pipenum);
965 buf = req->req.buf + req->req.actual;
966 size = min(bufsize, req->req.length - req->req.actual);
968 /* write fifo */
969 if (req->req.buf) {
970 if (size > 0)
971 r8a66597_write_fifo(r8a66597, ep, buf, size);
972 if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
973 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
976 /* update parameters */
977 req->req.actual += size;
979 /* check transfer finish */
980 if ((!req->req.zero && (req->req.actual == req->req.length))
981 || (size % ep->ep.maxpacket)
982 || (size == 0)) {
983 disable_irq_ready(r8a66597, pipenum);
984 disable_irq_empty(r8a66597, pipenum);
985 } else {
986 disable_irq_ready(r8a66597, pipenum);
987 enable_irq_empty(r8a66597, pipenum);
989 pipe_start(r8a66597, pipenum);
992 static void irq_packet_write(struct r8a66597_ep *ep,
993 struct r8a66597_request *req)
995 u16 tmp;
996 unsigned bufsize;
997 size_t size;
998 void *buf;
999 u16 pipenum = ep->pipenum;
1000 struct r8a66597 *r8a66597 = ep->r8a66597;
1002 pipe_change(r8a66597, pipenum);
1003 tmp = r8a66597_read(r8a66597, ep->fifoctr);
1004 if (unlikely((tmp & FRDY) == 0)) {
1005 pipe_stop(r8a66597, pipenum);
1006 pipe_irq_disable(r8a66597, pipenum);
1007 dev_err(r8a66597_to_dev(r8a66597),
1008 "write fifo not ready. pipnum=%d\n", pipenum);
1009 return;
1012 /* prepare parameters */
1013 bufsize = get_buffer_size(r8a66597, pipenum);
1014 buf = req->req.buf + req->req.actual;
1015 size = min(bufsize, req->req.length - req->req.actual);
1017 /* write fifo */
1018 if (req->req.buf) {
1019 r8a66597_write_fifo(r8a66597, ep, buf, size);
1020 if ((size == 0)
1021 || ((size % ep->ep.maxpacket) != 0)
1022 || ((bufsize != ep->ep.maxpacket)
1023 && (bufsize > size)))
1024 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
1027 /* update parameters */
1028 req->req.actual += size;
1029 /* check transfer finish */
1030 if ((!req->req.zero && (req->req.actual == req->req.length))
1031 || (size % ep->ep.maxpacket)
1032 || (size == 0)) {
1033 disable_irq_ready(r8a66597, pipenum);
1034 enable_irq_empty(r8a66597, pipenum);
1035 } else {
1036 disable_irq_empty(r8a66597, pipenum);
1037 pipe_irq_enable(r8a66597, pipenum);
1041 static void irq_packet_read(struct r8a66597_ep *ep,
1042 struct r8a66597_request *req)
1044 u16 tmp;
1045 int rcv_len, bufsize, req_len;
1046 int size;
1047 void *buf;
1048 u16 pipenum = ep->pipenum;
1049 struct r8a66597 *r8a66597 = ep->r8a66597;
1050 int finish = 0;
1052 pipe_change(r8a66597, pipenum);
1053 tmp = r8a66597_read(r8a66597, ep->fifoctr);
1054 if (unlikely((tmp & FRDY) == 0)) {
1055 req->req.status = -EPIPE;
1056 pipe_stop(r8a66597, pipenum);
1057 pipe_irq_disable(r8a66597, pipenum);
1058 dev_err(r8a66597_to_dev(r8a66597), "read fifo not ready");
1059 return;
1062 /* prepare parameters */
1063 rcv_len = tmp & DTLN;
1064 bufsize = get_buffer_size(r8a66597, pipenum);
1066 buf = req->req.buf + req->req.actual;
1067 req_len = req->req.length - req->req.actual;
1068 if (rcv_len < bufsize)
1069 size = min(rcv_len, req_len);
1070 else
1071 size = min(bufsize, req_len);
1073 /* update parameters */
1074 req->req.actual += size;
1076 /* check transfer finish */
1077 if ((!req->req.zero && (req->req.actual == req->req.length))
1078 || (size % ep->ep.maxpacket)
1079 || (size == 0)) {
1080 pipe_stop(r8a66597, pipenum);
1081 pipe_irq_disable(r8a66597, pipenum);
1082 finish = 1;
1085 /* read fifo */
1086 if (req->req.buf) {
1087 if (size == 0)
1088 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
1089 else
1090 r8a66597_read_fifo(r8a66597, ep->fifoaddr, buf, size);
1094 if ((ep->pipenum != 0) && finish)
1095 transfer_complete(ep, req, 0);
1098 static void irq_pipe_ready(struct r8a66597 *r8a66597, u16 status, u16 enb)
1100 u16 check;
1101 u16 pipenum;
1102 struct r8a66597_ep *ep;
1103 struct r8a66597_request *req;
1105 if ((status & BRDY0) && (enb & BRDY0)) {
1106 r8a66597_write(r8a66597, ~BRDY0, BRDYSTS);
1107 r8a66597_mdfy(r8a66597, 0, CURPIPE, CFIFOSEL);
1109 ep = &r8a66597->ep[0];
1110 req = get_request_from_ep(ep);
1111 irq_packet_read(ep, req);
1112 } else {
1113 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
1114 check = 1 << pipenum;
1115 if ((status & check) && (enb & check)) {
1116 r8a66597_write(r8a66597, ~check, BRDYSTS);
1117 ep = r8a66597->pipenum2ep[pipenum];
1118 req = get_request_from_ep(ep);
1119 if (ep->ep.desc->bEndpointAddress & USB_DIR_IN)
1120 irq_packet_write(ep, req);
1121 else
1122 irq_packet_read(ep, req);
1128 static void irq_pipe_empty(struct r8a66597 *r8a66597, u16 status, u16 enb)
1130 u16 tmp;
1131 u16 check;
1132 u16 pipenum;
1133 struct r8a66597_ep *ep;
1134 struct r8a66597_request *req;
1136 if ((status & BEMP0) && (enb & BEMP0)) {
1137 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
1139 ep = &r8a66597->ep[0];
1140 req = get_request_from_ep(ep);
1141 irq_ep0_write(ep, req);
1142 } else {
1143 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
1144 check = 1 << pipenum;
1145 if ((status & check) && (enb & check)) {
1146 r8a66597_write(r8a66597, ~check, BEMPSTS);
1147 tmp = control_reg_get(r8a66597, pipenum);
1148 if ((tmp & INBUFM) == 0) {
1149 disable_irq_empty(r8a66597, pipenum);
1150 pipe_irq_disable(r8a66597, pipenum);
1151 pipe_stop(r8a66597, pipenum);
1152 ep = r8a66597->pipenum2ep[pipenum];
1153 req = get_request_from_ep(ep);
1154 if (!list_empty(&ep->queue))
1155 transfer_complete(ep, req, 0);
1162 static void get_status(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1163 __releases(r8a66597->lock)
1164 __acquires(r8a66597->lock)
1166 struct r8a66597_ep *ep;
1167 u16 pid;
1168 u16 status = 0;
1169 u16 w_index = le16_to_cpu(ctrl->wIndex);
1171 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1172 case USB_RECIP_DEVICE:
1173 status = r8a66597->device_status;
1174 break;
1175 case USB_RECIP_INTERFACE:
1176 status = 0;
1177 break;
1178 case USB_RECIP_ENDPOINT:
1179 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1180 pid = control_reg_get_pid(r8a66597, ep->pipenum);
1181 if (pid == PID_STALL)
1182 status = 1 << USB_ENDPOINT_HALT;
1183 else
1184 status = 0;
1185 break;
1186 default:
1187 pipe_stall(r8a66597, 0);
1188 return; /* exit */
1191 r8a66597->ep0_data = cpu_to_le16(status);
1192 r8a66597->ep0_req->buf = &r8a66597->ep0_data;
1193 r8a66597->ep0_req->length = 2;
1194 /* AV: what happens if we get called again before that gets through? */
1195 spin_unlock(&r8a66597->lock);
1196 r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
1197 spin_lock(&r8a66597->lock);
1200 static void clear_feature(struct r8a66597 *r8a66597,
1201 struct usb_ctrlrequest *ctrl)
1203 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1204 case USB_RECIP_DEVICE:
1205 control_end(r8a66597, 1);
1206 break;
1207 case USB_RECIP_INTERFACE:
1208 control_end(r8a66597, 1);
1209 break;
1210 case USB_RECIP_ENDPOINT: {
1211 struct r8a66597_ep *ep;
1212 struct r8a66597_request *req;
1213 u16 w_index = le16_to_cpu(ctrl->wIndex);
1215 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1216 if (!ep->wedge) {
1217 pipe_stop(r8a66597, ep->pipenum);
1218 control_reg_sqclr(r8a66597, ep->pipenum);
1219 spin_unlock(&r8a66597->lock);
1220 usb_ep_clear_halt(&ep->ep);
1221 spin_lock(&r8a66597->lock);
1224 control_end(r8a66597, 1);
1226 req = get_request_from_ep(ep);
1227 if (ep->busy) {
1228 ep->busy = 0;
1229 if (list_empty(&ep->queue))
1230 break;
1231 start_packet(ep, req);
1232 } else if (!list_empty(&ep->queue))
1233 pipe_start(r8a66597, ep->pipenum);
1235 break;
1236 default:
1237 pipe_stall(r8a66597, 0);
1238 break;
1242 static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1244 u16 tmp;
1245 int timeout = 3000;
1247 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1248 case USB_RECIP_DEVICE:
1249 switch (le16_to_cpu(ctrl->wValue)) {
1250 case USB_DEVICE_TEST_MODE:
1251 control_end(r8a66597, 1);
1252 /* Wait for the completion of status stage */
1253 do {
1254 tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1255 udelay(1);
1256 } while (tmp != CS_IDST || timeout-- > 0);
1258 if (tmp == CS_IDST)
1259 r8a66597_bset(r8a66597,
1260 le16_to_cpu(ctrl->wIndex >> 8),
1261 TESTMODE);
1262 break;
1263 default:
1264 pipe_stall(r8a66597, 0);
1265 break;
1267 break;
1268 case USB_RECIP_INTERFACE:
1269 control_end(r8a66597, 1);
1270 break;
1271 case USB_RECIP_ENDPOINT: {
1272 struct r8a66597_ep *ep;
1273 u16 w_index = le16_to_cpu(ctrl->wIndex);
1275 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1276 pipe_stall(r8a66597, ep->pipenum);
1278 control_end(r8a66597, 1);
1280 break;
1281 default:
1282 pipe_stall(r8a66597, 0);
1283 break;
1287 /* if return value is true, call class driver's setup() */
1288 static int setup_packet(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1290 u16 *p = (u16 *)ctrl;
1291 unsigned long offset = USBREQ;
1292 int i, ret = 0;
1294 /* read fifo */
1295 r8a66597_write(r8a66597, ~VALID, INTSTS0);
1297 for (i = 0; i < 4; i++)
1298 p[i] = r8a66597_read(r8a66597, offset + i*2);
1300 /* check request */
1301 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1302 switch (ctrl->bRequest) {
1303 case USB_REQ_GET_STATUS:
1304 get_status(r8a66597, ctrl);
1305 break;
1306 case USB_REQ_CLEAR_FEATURE:
1307 clear_feature(r8a66597, ctrl);
1308 break;
1309 case USB_REQ_SET_FEATURE:
1310 set_feature(r8a66597, ctrl);
1311 break;
1312 default:
1313 ret = 1;
1314 break;
1316 } else
1317 ret = 1;
1318 return ret;
1321 static void r8a66597_update_usb_speed(struct r8a66597 *r8a66597)
1323 u16 speed = get_usb_speed(r8a66597);
1325 switch (speed) {
1326 case HSMODE:
1327 r8a66597->gadget.speed = USB_SPEED_HIGH;
1328 break;
1329 case FSMODE:
1330 r8a66597->gadget.speed = USB_SPEED_FULL;
1331 break;
1332 default:
1333 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
1334 dev_err(r8a66597_to_dev(r8a66597), "USB speed unknown\n");
1338 static void irq_device_state(struct r8a66597 *r8a66597)
1340 u16 dvsq;
1342 dvsq = r8a66597_read(r8a66597, INTSTS0) & DVSQ;
1343 r8a66597_write(r8a66597, ~DVST, INTSTS0);
1345 if (dvsq == DS_DFLT) {
1346 /* bus reset */
1347 spin_unlock(&r8a66597->lock);
1348 usb_gadget_udc_reset(&r8a66597->gadget, r8a66597->driver);
1349 spin_lock(&r8a66597->lock);
1350 r8a66597_update_usb_speed(r8a66597);
1352 if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
1353 r8a66597_update_usb_speed(r8a66597);
1354 if ((dvsq == DS_CNFG || dvsq == DS_ADDS)
1355 && r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1356 r8a66597_update_usb_speed(r8a66597);
1358 r8a66597->old_dvsq = dvsq;
1361 static void irq_control_stage(struct r8a66597 *r8a66597)
1362 __releases(r8a66597->lock)
1363 __acquires(r8a66597->lock)
1365 struct usb_ctrlrequest ctrl;
1366 u16 ctsq;
1368 ctsq = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1369 r8a66597_write(r8a66597, ~CTRT, INTSTS0);
1371 switch (ctsq) {
1372 case CS_IDST: {
1373 struct r8a66597_ep *ep;
1374 struct r8a66597_request *req;
1375 ep = &r8a66597->ep[0];
1376 req = get_request_from_ep(ep);
1377 transfer_complete(ep, req, 0);
1379 break;
1381 case CS_RDDS:
1382 case CS_WRDS:
1383 case CS_WRND:
1384 if (setup_packet(r8a66597, &ctrl)) {
1385 spin_unlock(&r8a66597->lock);
1386 if (r8a66597->driver->setup(&r8a66597->gadget, &ctrl)
1387 < 0)
1388 pipe_stall(r8a66597, 0);
1389 spin_lock(&r8a66597->lock);
1391 break;
1392 case CS_RDSS:
1393 case CS_WRSS:
1394 control_end(r8a66597, 0);
1395 break;
1396 default:
1397 dev_err(r8a66597_to_dev(r8a66597),
1398 "ctrl_stage: unexpect ctsq(%x)\n", ctsq);
1399 break;
1403 static void sudmac_finish(struct r8a66597 *r8a66597, struct r8a66597_ep *ep)
1405 u16 pipenum;
1406 struct r8a66597_request *req;
1407 u32 len;
1408 int i = 0;
1410 pipenum = ep->pipenum;
1411 pipe_change(r8a66597, pipenum);
1413 while (!(r8a66597_read(r8a66597, ep->fifoctr) & FRDY)) {
1414 udelay(1);
1415 if (unlikely(i++ >= 10000)) { /* timeout = 10 msec */
1416 dev_err(r8a66597_to_dev(r8a66597),
1417 "%s: FRDY was not set (%d)\n",
1418 __func__, pipenum);
1419 return;
1423 r8a66597_bset(r8a66597, BCLR, ep->fifoctr);
1424 req = get_request_from_ep(ep);
1426 /* prepare parameters */
1427 len = r8a66597_sudmac_read(r8a66597, CH0CBC);
1428 req->req.actual += len;
1430 /* clear */
1431 r8a66597_sudmac_write(r8a66597, CH0STCLR, DSTSCLR);
1433 /* check transfer finish */
1434 if ((!req->req.zero && (req->req.actual == req->req.length))
1435 || (len % ep->ep.maxpacket)) {
1436 if (ep->dma->dir) {
1437 disable_irq_ready(r8a66597, pipenum);
1438 enable_irq_empty(r8a66597, pipenum);
1439 } else {
1440 /* Clear the interrupt flag for next transfer */
1441 r8a66597_write(r8a66597, ~(1 << pipenum), BRDYSTS);
1442 transfer_complete(ep, req, 0);
1447 static void r8a66597_sudmac_irq(struct r8a66597 *r8a66597)
1449 u32 irqsts;
1450 struct r8a66597_ep *ep;
1451 u16 pipenum;
1453 irqsts = r8a66597_sudmac_read(r8a66597, DINTSTS);
1454 if (irqsts & CH0ENDS) {
1455 r8a66597_sudmac_write(r8a66597, CH0ENDC, DINTSTSCLR);
1456 pipenum = (r8a66597_read(r8a66597, D0FIFOSEL) & CURPIPE);
1457 ep = r8a66597->pipenum2ep[pipenum];
1458 sudmac_finish(r8a66597, ep);
1462 static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
1464 struct r8a66597 *r8a66597 = _r8a66597;
1465 u16 intsts0;
1466 u16 intenb0;
1467 u16 brdysts, nrdysts, bempsts;
1468 u16 brdyenb, nrdyenb, bempenb;
1469 u16 savepipe;
1470 u16 mask0;
1472 spin_lock(&r8a66597->lock);
1474 if (r8a66597_is_sudmac(r8a66597))
1475 r8a66597_sudmac_irq(r8a66597);
1477 intsts0 = r8a66597_read(r8a66597, INTSTS0);
1478 intenb0 = r8a66597_read(r8a66597, INTENB0);
1480 savepipe = r8a66597_read(r8a66597, CFIFOSEL);
1482 mask0 = intsts0 & intenb0;
1483 if (mask0) {
1484 brdysts = r8a66597_read(r8a66597, BRDYSTS);
1485 nrdysts = r8a66597_read(r8a66597, NRDYSTS);
1486 bempsts = r8a66597_read(r8a66597, BEMPSTS);
1487 brdyenb = r8a66597_read(r8a66597, BRDYENB);
1488 nrdyenb = r8a66597_read(r8a66597, NRDYENB);
1489 bempenb = r8a66597_read(r8a66597, BEMPENB);
1491 if (mask0 & VBINT) {
1492 r8a66597_write(r8a66597, 0xffff & ~VBINT,
1493 INTSTS0);
1494 r8a66597_start_xclock(r8a66597);
1496 /* start vbus sampling */
1497 r8a66597->old_vbus = r8a66597_read(r8a66597, INTSTS0)
1498 & VBSTS;
1499 r8a66597->scount = R8A66597_MAX_SAMPLING;
1501 mod_timer(&r8a66597->timer,
1502 jiffies + msecs_to_jiffies(50));
1504 if (intsts0 & DVSQ)
1505 irq_device_state(r8a66597);
1507 if ((intsts0 & BRDY) && (intenb0 & BRDYE)
1508 && (brdysts & brdyenb))
1509 irq_pipe_ready(r8a66597, brdysts, brdyenb);
1510 if ((intsts0 & BEMP) && (intenb0 & BEMPE)
1511 && (bempsts & bempenb))
1512 irq_pipe_empty(r8a66597, bempsts, bempenb);
1514 if (intsts0 & CTRT)
1515 irq_control_stage(r8a66597);
1518 r8a66597_write(r8a66597, savepipe, CFIFOSEL);
1520 spin_unlock(&r8a66597->lock);
1521 return IRQ_HANDLED;
1524 static void r8a66597_timer(unsigned long _r8a66597)
1526 struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
1527 unsigned long flags;
1528 u16 tmp;
1530 spin_lock_irqsave(&r8a66597->lock, flags);
1531 tmp = r8a66597_read(r8a66597, SYSCFG0);
1532 if (r8a66597->scount > 0) {
1533 tmp = r8a66597_read(r8a66597, INTSTS0) & VBSTS;
1534 if (tmp == r8a66597->old_vbus) {
1535 r8a66597->scount--;
1536 if (r8a66597->scount == 0) {
1537 if (tmp == VBSTS)
1538 r8a66597_usb_connect(r8a66597);
1539 else
1540 r8a66597_usb_disconnect(r8a66597);
1541 } else {
1542 mod_timer(&r8a66597->timer,
1543 jiffies + msecs_to_jiffies(50));
1545 } else {
1546 r8a66597->scount = R8A66597_MAX_SAMPLING;
1547 r8a66597->old_vbus = tmp;
1548 mod_timer(&r8a66597->timer,
1549 jiffies + msecs_to_jiffies(50));
1552 spin_unlock_irqrestore(&r8a66597->lock, flags);
1555 /*-------------------------------------------------------------------------*/
1556 static int r8a66597_enable(struct usb_ep *_ep,
1557 const struct usb_endpoint_descriptor *desc)
1559 struct r8a66597_ep *ep;
1561 ep = container_of(_ep, struct r8a66597_ep, ep);
1562 return alloc_pipe_config(ep, desc);
1565 static int r8a66597_disable(struct usb_ep *_ep)
1567 struct r8a66597_ep *ep;
1568 struct r8a66597_request *req;
1569 unsigned long flags;
1571 ep = container_of(_ep, struct r8a66597_ep, ep);
1572 BUG_ON(!ep);
1574 while (!list_empty(&ep->queue)) {
1575 req = get_request_from_ep(ep);
1576 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1577 transfer_complete(ep, req, -ECONNRESET);
1578 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1581 pipe_irq_disable(ep->r8a66597, ep->pipenum);
1582 return free_pipe_config(ep);
1585 static struct usb_request *r8a66597_alloc_request(struct usb_ep *_ep,
1586 gfp_t gfp_flags)
1588 struct r8a66597_request *req;
1590 req = kzalloc(sizeof(struct r8a66597_request), gfp_flags);
1591 if (!req)
1592 return NULL;
1594 INIT_LIST_HEAD(&req->queue);
1596 return &req->req;
1599 static void r8a66597_free_request(struct usb_ep *_ep, struct usb_request *_req)
1601 struct r8a66597_request *req;
1603 req = container_of(_req, struct r8a66597_request, req);
1604 kfree(req);
1607 static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
1608 gfp_t gfp_flags)
1610 struct r8a66597_ep *ep;
1611 struct r8a66597_request *req;
1612 unsigned long flags;
1613 int request = 0;
1615 ep = container_of(_ep, struct r8a66597_ep, ep);
1616 req = container_of(_req, struct r8a66597_request, req);
1618 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1619 return -ESHUTDOWN;
1621 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1623 if (list_empty(&ep->queue))
1624 request = 1;
1626 list_add_tail(&req->queue, &ep->queue);
1627 req->req.actual = 0;
1628 req->req.status = -EINPROGRESS;
1630 if (ep->ep.desc == NULL) /* control */
1631 start_ep0(ep, req);
1632 else {
1633 if (request && !ep->busy)
1634 start_packet(ep, req);
1637 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1639 return 0;
1642 static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1644 struct r8a66597_ep *ep;
1645 struct r8a66597_request *req;
1646 unsigned long flags;
1648 ep = container_of(_ep, struct r8a66597_ep, ep);
1649 req = container_of(_req, struct r8a66597_request, req);
1651 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1652 if (!list_empty(&ep->queue))
1653 transfer_complete(ep, req, -ECONNRESET);
1654 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1656 return 0;
1659 static int r8a66597_set_halt(struct usb_ep *_ep, int value)
1661 struct r8a66597_ep *ep;
1662 struct r8a66597_request *req;
1663 unsigned long flags;
1664 int ret = 0;
1666 ep = container_of(_ep, struct r8a66597_ep, ep);
1667 req = get_request_from_ep(ep);
1669 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1670 if (!list_empty(&ep->queue)) {
1671 ret = -EAGAIN;
1672 goto out;
1674 if (value) {
1675 ep->busy = 1;
1676 pipe_stall(ep->r8a66597, ep->pipenum);
1677 } else {
1678 ep->busy = 0;
1679 ep->wedge = 0;
1680 pipe_stop(ep->r8a66597, ep->pipenum);
1683 out:
1684 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1685 return ret;
1688 static int r8a66597_set_wedge(struct usb_ep *_ep)
1690 struct r8a66597_ep *ep;
1691 unsigned long flags;
1693 ep = container_of(_ep, struct r8a66597_ep, ep);
1695 if (!ep || !ep->ep.desc)
1696 return -EINVAL;
1698 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1699 ep->wedge = 1;
1700 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1702 return usb_ep_set_halt(_ep);
1705 static void r8a66597_fifo_flush(struct usb_ep *_ep)
1707 struct r8a66597_ep *ep;
1708 unsigned long flags;
1710 ep = container_of(_ep, struct r8a66597_ep, ep);
1711 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1712 if (list_empty(&ep->queue) && !ep->busy) {
1713 pipe_stop(ep->r8a66597, ep->pipenum);
1714 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
1715 r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr);
1716 r8a66597_write(ep->r8a66597, 0, ep->pipectr);
1718 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1721 static struct usb_ep_ops r8a66597_ep_ops = {
1722 .enable = r8a66597_enable,
1723 .disable = r8a66597_disable,
1725 .alloc_request = r8a66597_alloc_request,
1726 .free_request = r8a66597_free_request,
1728 .queue = r8a66597_queue,
1729 .dequeue = r8a66597_dequeue,
1731 .set_halt = r8a66597_set_halt,
1732 .set_wedge = r8a66597_set_wedge,
1733 .fifo_flush = r8a66597_fifo_flush,
1736 /*-------------------------------------------------------------------------*/
1737 static int r8a66597_start(struct usb_gadget *gadget,
1738 struct usb_gadget_driver *driver)
1740 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1742 if (!driver
1743 || driver->max_speed < USB_SPEED_HIGH
1744 || !driver->setup)
1745 return -EINVAL;
1746 if (!r8a66597)
1747 return -ENODEV;
1749 /* hook up the driver */
1750 r8a66597->driver = driver;
1752 init_controller(r8a66597);
1753 r8a66597_bset(r8a66597, VBSE, INTENB0);
1754 if (r8a66597_read(r8a66597, INTSTS0) & VBSTS) {
1755 r8a66597_start_xclock(r8a66597);
1756 /* start vbus sampling */
1757 r8a66597->old_vbus = r8a66597_read(r8a66597,
1758 INTSTS0) & VBSTS;
1759 r8a66597->scount = R8A66597_MAX_SAMPLING;
1760 mod_timer(&r8a66597->timer, jiffies + msecs_to_jiffies(50));
1763 return 0;
1766 static int r8a66597_stop(struct usb_gadget *gadget)
1768 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1769 unsigned long flags;
1771 spin_lock_irqsave(&r8a66597->lock, flags);
1772 r8a66597_bclr(r8a66597, VBSE, INTENB0);
1773 disable_controller(r8a66597);
1774 spin_unlock_irqrestore(&r8a66597->lock, flags);
1776 r8a66597->driver = NULL;
1777 return 0;
1780 /*-------------------------------------------------------------------------*/
1781 static int r8a66597_get_frame(struct usb_gadget *_gadget)
1783 struct r8a66597 *r8a66597 = gadget_to_r8a66597(_gadget);
1784 return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
1787 static int r8a66597_pullup(struct usb_gadget *gadget, int is_on)
1789 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1790 unsigned long flags;
1792 spin_lock_irqsave(&r8a66597->lock, flags);
1793 if (is_on)
1794 r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
1795 else
1796 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
1797 spin_unlock_irqrestore(&r8a66597->lock, flags);
1799 return 0;
1802 static int r8a66597_set_selfpowered(struct usb_gadget *gadget, int is_self)
1804 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1806 gadget->is_selfpowered = (is_self != 0);
1807 if (is_self)
1808 r8a66597->device_status |= 1 << USB_DEVICE_SELF_POWERED;
1809 else
1810 r8a66597->device_status &= ~(1 << USB_DEVICE_SELF_POWERED);
1812 return 0;
1815 static const struct usb_gadget_ops r8a66597_gadget_ops = {
1816 .get_frame = r8a66597_get_frame,
1817 .udc_start = r8a66597_start,
1818 .udc_stop = r8a66597_stop,
1819 .pullup = r8a66597_pullup,
1820 .set_selfpowered = r8a66597_set_selfpowered,
1823 static int r8a66597_remove(struct platform_device *pdev)
1825 struct r8a66597 *r8a66597 = platform_get_drvdata(pdev);
1827 usb_del_gadget_udc(&r8a66597->gadget);
1828 del_timer_sync(&r8a66597->timer);
1829 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
1831 if (r8a66597->pdata->on_chip) {
1832 clk_disable_unprepare(r8a66597->clk);
1835 return 0;
1838 static void nop_completion(struct usb_ep *ep, struct usb_request *r)
1842 static int r8a66597_sudmac_ioremap(struct r8a66597 *r8a66597,
1843 struct platform_device *pdev)
1845 struct resource *res;
1847 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sudmac");
1848 r8a66597->sudmac_reg = devm_ioremap_resource(&pdev->dev, res);
1849 return PTR_ERR_OR_ZERO(r8a66597->sudmac_reg);
1852 static int r8a66597_probe(struct platform_device *pdev)
1854 struct device *dev = &pdev->dev;
1855 char clk_name[8];
1856 struct resource *res, *ires;
1857 int irq;
1858 void __iomem *reg = NULL;
1859 struct r8a66597 *r8a66597 = NULL;
1860 int ret = 0;
1861 int i;
1862 unsigned long irq_trigger;
1864 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1865 reg = devm_ioremap_resource(&pdev->dev, res);
1866 if (IS_ERR(reg))
1867 return PTR_ERR(reg);
1869 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1870 irq = ires->start;
1871 irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
1873 if (irq < 0) {
1874 dev_err(dev, "platform_get_irq error.\n");
1875 return -ENODEV;
1878 /* initialize ucd */
1879 r8a66597 = devm_kzalloc(dev, sizeof(struct r8a66597), GFP_KERNEL);
1880 if (r8a66597 == NULL)
1881 return -ENOMEM;
1883 spin_lock_init(&r8a66597->lock);
1884 platform_set_drvdata(pdev, r8a66597);
1885 r8a66597->pdata = dev_get_platdata(dev);
1886 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
1888 r8a66597->gadget.ops = &r8a66597_gadget_ops;
1889 r8a66597->gadget.max_speed = USB_SPEED_HIGH;
1890 r8a66597->gadget.name = udc_name;
1892 init_timer(&r8a66597->timer);
1893 r8a66597->timer.function = r8a66597_timer;
1894 r8a66597->timer.data = (unsigned long)r8a66597;
1895 r8a66597->reg = reg;
1897 if (r8a66597->pdata->on_chip) {
1898 snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
1899 r8a66597->clk = devm_clk_get(dev, clk_name);
1900 if (IS_ERR(r8a66597->clk)) {
1901 dev_err(dev, "cannot get clock \"%s\"\n", clk_name);
1902 return PTR_ERR(r8a66597->clk);
1904 clk_prepare_enable(r8a66597->clk);
1907 if (r8a66597->pdata->sudmac) {
1908 ret = r8a66597_sudmac_ioremap(r8a66597, pdev);
1909 if (ret < 0)
1910 goto clean_up2;
1913 disable_controller(r8a66597); /* make sure controller is disabled */
1915 ret = devm_request_irq(dev, irq, r8a66597_irq, IRQF_SHARED,
1916 udc_name, r8a66597);
1917 if (ret < 0) {
1918 dev_err(dev, "request_irq error (%d)\n", ret);
1919 goto clean_up2;
1922 INIT_LIST_HEAD(&r8a66597->gadget.ep_list);
1923 r8a66597->gadget.ep0 = &r8a66597->ep[0].ep;
1924 INIT_LIST_HEAD(&r8a66597->gadget.ep0->ep_list);
1925 for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
1926 struct r8a66597_ep *ep = &r8a66597->ep[i];
1928 if (i != 0) {
1929 INIT_LIST_HEAD(&r8a66597->ep[i].ep.ep_list);
1930 list_add_tail(&r8a66597->ep[i].ep.ep_list,
1931 &r8a66597->gadget.ep_list);
1933 ep->r8a66597 = r8a66597;
1934 INIT_LIST_HEAD(&ep->queue);
1935 ep->ep.name = r8a66597_ep_name[i];
1936 ep->ep.ops = &r8a66597_ep_ops;
1937 usb_ep_set_maxpacket_limit(&ep->ep, 512);
1939 if (i == 0) {
1940 ep->ep.caps.type_control = true;
1941 } else {
1942 ep->ep.caps.type_iso = true;
1943 ep->ep.caps.type_bulk = true;
1944 ep->ep.caps.type_int = true;
1946 ep->ep.caps.dir_in = true;
1947 ep->ep.caps.dir_out = true;
1949 usb_ep_set_maxpacket_limit(&r8a66597->ep[0].ep, 64);
1950 r8a66597->ep[0].pipenum = 0;
1951 r8a66597->ep[0].fifoaddr = CFIFO;
1952 r8a66597->ep[0].fifosel = CFIFOSEL;
1953 r8a66597->ep[0].fifoctr = CFIFOCTR;
1954 r8a66597->ep[0].pipectr = get_pipectr_addr(0);
1955 r8a66597->pipenum2ep[0] = &r8a66597->ep[0];
1956 r8a66597->epaddr2ep[0] = &r8a66597->ep[0];
1958 r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
1959 GFP_KERNEL);
1960 if (r8a66597->ep0_req == NULL) {
1961 ret = -ENOMEM;
1962 goto clean_up2;
1964 r8a66597->ep0_req->complete = nop_completion;
1966 ret = usb_add_gadget_udc(dev, &r8a66597->gadget);
1967 if (ret)
1968 goto err_add_udc;
1970 dev_info(dev, "version %s\n", DRIVER_VERSION);
1971 return 0;
1973 err_add_udc:
1974 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
1975 clean_up2:
1976 if (r8a66597->pdata->on_chip)
1977 clk_disable_unprepare(r8a66597->clk);
1979 if (r8a66597->ep0_req)
1980 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
1982 return ret;
1985 /*-------------------------------------------------------------------------*/
1986 static struct platform_driver r8a66597_driver = {
1987 .remove = r8a66597_remove,
1988 .driver = {
1989 .name = (char *) udc_name,
1993 module_platform_driver_probe(r8a66597_driver, r8a66597_probe);
1995 MODULE_DESCRIPTION("R8A66597 USB gadget driver");
1996 MODULE_LICENSE("GPL");
1997 MODULE_AUTHOR("Yoshihiro Shimoda");
1998 MODULE_ALIAS("platform:r8a66597_udc");