Full support for Ginger Console
[linux-ginger.git] / drivers / usb / gadget / r8a66597-udc.c
blobe220fb8091a3618d536123f86c724d95bc7e8325
1 /*
2 * R8A66597 UDC (USB gadget)
4 * Copyright (C) 2006-2009 Renesas Solutions Corp.
6 * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/io.h>
27 #include <linux/platform_device.h>
28 #include <linux/clk.h>
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
33 #include "r8a66597-udc.h"
35 #define DRIVER_VERSION "2009-08-18"
37 static const char udc_name[] = "r8a66597_udc";
38 static const char *r8a66597_ep_name[] = {
39 "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7",
40 "ep8", "ep9",
43 static void disable_controller(struct r8a66597 *r8a66597);
44 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
45 static void irq_packet_write(struct r8a66597_ep *ep,
46 struct r8a66597_request *req);
47 static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
48 gfp_t gfp_flags);
50 static void transfer_complete(struct r8a66597_ep *ep,
51 struct r8a66597_request *req, int status);
53 /*-------------------------------------------------------------------------*/
54 static inline u16 get_usb_speed(struct r8a66597 *r8a66597)
56 return r8a66597_read(r8a66597, DVSTCTR0) & RHST;
59 static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
60 unsigned long reg)
62 u16 tmp;
64 tmp = r8a66597_read(r8a66597, INTENB0);
65 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
66 INTENB0);
67 r8a66597_bset(r8a66597, (1 << pipenum), reg);
68 r8a66597_write(r8a66597, tmp, INTENB0);
71 static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
72 unsigned long reg)
74 u16 tmp;
76 tmp = r8a66597_read(r8a66597, INTENB0);
77 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
78 INTENB0);
79 r8a66597_bclr(r8a66597, (1 << pipenum), reg);
80 r8a66597_write(r8a66597, tmp, INTENB0);
83 static void r8a66597_usb_connect(struct r8a66597 *r8a66597)
85 r8a66597_bset(r8a66597, CTRE, INTENB0);
86 r8a66597_bset(r8a66597, BEMPE | BRDYE, INTENB0);
88 r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
91 static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597)
92 __releases(r8a66597->lock)
93 __acquires(r8a66597->lock)
95 r8a66597_bclr(r8a66597, CTRE, INTENB0);
96 r8a66597_bclr(r8a66597, BEMPE | BRDYE, INTENB0);
97 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
99 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
100 spin_unlock(&r8a66597->lock);
101 r8a66597->driver->disconnect(&r8a66597->gadget);
102 spin_lock(&r8a66597->lock);
104 disable_controller(r8a66597);
105 INIT_LIST_HEAD(&r8a66597->ep[0].queue);
108 static inline u16 control_reg_get_pid(struct r8a66597 *r8a66597, u16 pipenum)
110 u16 pid = 0;
111 unsigned long offset;
113 if (pipenum == 0)
114 pid = r8a66597_read(r8a66597, DCPCTR) & PID;
115 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
116 offset = get_pipectr_addr(pipenum);
117 pid = r8a66597_read(r8a66597, offset) & PID;
118 } else
119 printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
121 return pid;
124 static inline void control_reg_set_pid(struct r8a66597 *r8a66597, u16 pipenum,
125 u16 pid)
127 unsigned long offset;
129 if (pipenum == 0)
130 r8a66597_mdfy(r8a66597, pid, PID, DCPCTR);
131 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
132 offset = get_pipectr_addr(pipenum);
133 r8a66597_mdfy(r8a66597, pid, PID, offset);
134 } else
135 printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
138 static inline void pipe_start(struct r8a66597 *r8a66597, u16 pipenum)
140 control_reg_set_pid(r8a66597, pipenum, PID_BUF);
143 static inline void pipe_stop(struct r8a66597 *r8a66597, u16 pipenum)
145 control_reg_set_pid(r8a66597, pipenum, PID_NAK);
148 static inline void pipe_stall(struct r8a66597 *r8a66597, u16 pipenum)
150 control_reg_set_pid(r8a66597, pipenum, PID_STALL);
153 static inline u16 control_reg_get(struct r8a66597 *r8a66597, u16 pipenum)
155 u16 ret = 0;
156 unsigned long offset;
158 if (pipenum == 0)
159 ret = r8a66597_read(r8a66597, DCPCTR);
160 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
161 offset = get_pipectr_addr(pipenum);
162 ret = r8a66597_read(r8a66597, offset);
163 } else
164 printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
166 return ret;
169 static inline void control_reg_sqclr(struct r8a66597 *r8a66597, u16 pipenum)
171 unsigned long offset;
173 pipe_stop(r8a66597, pipenum);
175 if (pipenum == 0)
176 r8a66597_bset(r8a66597, SQCLR, DCPCTR);
177 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
178 offset = get_pipectr_addr(pipenum);
179 r8a66597_bset(r8a66597, SQCLR, offset);
180 } else
181 printk(KERN_ERR "unexpect pipe num(%d)\n", pipenum);
184 static inline int get_buffer_size(struct r8a66597 *r8a66597, u16 pipenum)
186 u16 tmp;
187 int size;
189 if (pipenum == 0) {
190 tmp = r8a66597_read(r8a66597, DCPCFG);
191 if ((tmp & R8A66597_CNTMD) != 0)
192 size = 256;
193 else {
194 tmp = r8a66597_read(r8a66597, DCPMAXP);
195 size = tmp & MAXP;
197 } else {
198 r8a66597_write(r8a66597, pipenum, PIPESEL);
199 tmp = r8a66597_read(r8a66597, PIPECFG);
200 if ((tmp & R8A66597_CNTMD) != 0) {
201 tmp = r8a66597_read(r8a66597, PIPEBUF);
202 size = ((tmp >> 10) + 1) * 64;
203 } else {
204 tmp = r8a66597_read(r8a66597, PIPEMAXP);
205 size = tmp & MXPS;
209 return size;
212 static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
214 if (r8a66597->pdata->on_chip)
215 return MBW_32;
216 else
217 return MBW_16;
220 static inline void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
222 struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
224 if (ep->use_dma)
225 return;
227 r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel);
229 ndelay(450);
231 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
234 static int pipe_buffer_setting(struct r8a66597 *r8a66597,
235 struct r8a66597_pipe_info *info)
237 u16 bufnum = 0, buf_bsize = 0;
238 u16 pipecfg = 0;
240 if (info->pipe == 0)
241 return -EINVAL;
243 r8a66597_write(r8a66597, info->pipe, PIPESEL);
245 if (info->dir_in)
246 pipecfg |= R8A66597_DIR;
247 pipecfg |= info->type;
248 pipecfg |= info->epnum;
249 switch (info->type) {
250 case R8A66597_INT:
251 bufnum = 4 + (info->pipe - R8A66597_BASE_PIPENUM_INT);
252 buf_bsize = 0;
253 break;
254 case R8A66597_BULK:
255 /* isochronous pipes may be used as bulk pipes */
256 if (info->pipe > R8A66597_BASE_PIPENUM_BULK)
257 bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
258 else
259 bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
261 bufnum = R8A66597_BASE_BUFNUM + (bufnum * 16);
262 buf_bsize = 7;
263 pipecfg |= R8A66597_DBLB;
264 if (!info->dir_in)
265 pipecfg |= R8A66597_SHTNAK;
266 break;
267 case R8A66597_ISO:
268 bufnum = R8A66597_BASE_BUFNUM +
269 (info->pipe - R8A66597_BASE_PIPENUM_ISOC) * 16;
270 buf_bsize = 7;
271 break;
274 if (buf_bsize && ((bufnum + 16) >= R8A66597_MAX_BUFNUM)) {
275 pr_err(KERN_ERR "r8a66597 pipe memory is insufficient\n");
276 return -ENOMEM;
279 r8a66597_write(r8a66597, pipecfg, PIPECFG);
280 r8a66597_write(r8a66597, (buf_bsize << 10) | (bufnum), PIPEBUF);
281 r8a66597_write(r8a66597, info->maxpacket, PIPEMAXP);
282 if (info->interval)
283 info->interval--;
284 r8a66597_write(r8a66597, info->interval, PIPEPERI);
286 return 0;
289 static void pipe_buffer_release(struct r8a66597 *r8a66597,
290 struct r8a66597_pipe_info *info)
292 if (info->pipe == 0)
293 return;
295 if (is_bulk_pipe(info->pipe))
296 r8a66597->bulk--;
297 else if (is_interrupt_pipe(info->pipe))
298 r8a66597->interrupt--;
299 else if (is_isoc_pipe(info->pipe)) {
300 r8a66597->isochronous--;
301 if (info->type == R8A66597_BULK)
302 r8a66597->bulk--;
303 } else
304 printk(KERN_ERR "ep_release: unexpect pipenum (%d)\n",
305 info->pipe);
308 static void pipe_initialize(struct r8a66597_ep *ep)
310 struct r8a66597 *r8a66597 = ep->r8a66597;
312 r8a66597_mdfy(r8a66597, 0, CURPIPE, ep->fifosel);
314 r8a66597_write(r8a66597, ACLRM, ep->pipectr);
315 r8a66597_write(r8a66597, 0, ep->pipectr);
316 r8a66597_write(r8a66597, SQCLR, ep->pipectr);
317 if (ep->use_dma) {
318 r8a66597_mdfy(r8a66597, ep->pipenum, CURPIPE, ep->fifosel);
320 ndelay(450);
322 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
326 static void r8a66597_ep_setting(struct r8a66597 *r8a66597,
327 struct r8a66597_ep *ep,
328 const struct usb_endpoint_descriptor *desc,
329 u16 pipenum, int dma)
331 ep->use_dma = 0;
332 ep->fifoaddr = CFIFO;
333 ep->fifosel = CFIFOSEL;
334 ep->fifoctr = CFIFOCTR;
335 ep->fifotrn = 0;
337 ep->pipectr = get_pipectr_addr(pipenum);
338 ep->pipenum = pipenum;
339 ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
340 r8a66597->pipenum2ep[pipenum] = ep;
341 r8a66597->epaddr2ep[desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK]
342 = ep;
343 INIT_LIST_HEAD(&ep->queue);
346 static void r8a66597_ep_release(struct r8a66597_ep *ep)
348 struct r8a66597 *r8a66597 = ep->r8a66597;
349 u16 pipenum = ep->pipenum;
351 if (pipenum == 0)
352 return;
354 if (ep->use_dma)
355 r8a66597->num_dma--;
356 ep->pipenum = 0;
357 ep->busy = 0;
358 ep->use_dma = 0;
361 static int alloc_pipe_config(struct r8a66597_ep *ep,
362 const struct usb_endpoint_descriptor *desc)
364 struct r8a66597 *r8a66597 = ep->r8a66597;
365 struct r8a66597_pipe_info info;
366 int dma = 0;
367 unsigned char *counter;
368 int ret;
370 ep->desc = desc;
372 if (ep->pipenum) /* already allocated pipe */
373 return 0;
375 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
376 case USB_ENDPOINT_XFER_BULK:
377 if (r8a66597->bulk >= R8A66597_MAX_NUM_BULK) {
378 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
379 printk(KERN_ERR "bulk pipe is insufficient\n");
380 return -ENODEV;
381 } else {
382 info.pipe = R8A66597_BASE_PIPENUM_ISOC
383 + r8a66597->isochronous;
384 counter = &r8a66597->isochronous;
386 } else {
387 info.pipe = R8A66597_BASE_PIPENUM_BULK + r8a66597->bulk;
388 counter = &r8a66597->bulk;
390 info.type = R8A66597_BULK;
391 dma = 1;
392 break;
393 case USB_ENDPOINT_XFER_INT:
394 if (r8a66597->interrupt >= R8A66597_MAX_NUM_INT) {
395 printk(KERN_ERR "interrupt pipe is insufficient\n");
396 return -ENODEV;
398 info.pipe = R8A66597_BASE_PIPENUM_INT + r8a66597->interrupt;
399 info.type = R8A66597_INT;
400 counter = &r8a66597->interrupt;
401 break;
402 case USB_ENDPOINT_XFER_ISOC:
403 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
404 printk(KERN_ERR "isochronous pipe is insufficient\n");
405 return -ENODEV;
407 info.pipe = R8A66597_BASE_PIPENUM_ISOC + r8a66597->isochronous;
408 info.type = R8A66597_ISO;
409 counter = &r8a66597->isochronous;
410 break;
411 default:
412 printk(KERN_ERR "unexpect xfer type\n");
413 return -EINVAL;
415 ep->type = info.type;
417 info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
418 info.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
419 info.interval = desc->bInterval;
420 if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
421 info.dir_in = 1;
422 else
423 info.dir_in = 0;
425 ret = pipe_buffer_setting(r8a66597, &info);
426 if (ret < 0) {
427 printk(KERN_ERR "pipe_buffer_setting fail\n");
428 return ret;
431 (*counter)++;
432 if ((counter == &r8a66597->isochronous) && info.type == R8A66597_BULK)
433 r8a66597->bulk++;
435 r8a66597_ep_setting(r8a66597, ep, desc, info.pipe, dma);
436 pipe_initialize(ep);
438 return 0;
441 static int free_pipe_config(struct r8a66597_ep *ep)
443 struct r8a66597 *r8a66597 = ep->r8a66597;
444 struct r8a66597_pipe_info info;
446 info.pipe = ep->pipenum;
447 info.type = ep->type;
448 pipe_buffer_release(r8a66597, &info);
449 r8a66597_ep_release(ep);
451 return 0;
454 /*-------------------------------------------------------------------------*/
455 static void pipe_irq_enable(struct r8a66597 *r8a66597, u16 pipenum)
457 enable_irq_ready(r8a66597, pipenum);
458 enable_irq_nrdy(r8a66597, pipenum);
461 static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
463 disable_irq_ready(r8a66597, pipenum);
464 disable_irq_nrdy(r8a66597, pipenum);
467 /* if complete is true, gadget driver complete function is not call */
468 static void control_end(struct r8a66597 *r8a66597, unsigned ccpl)
470 r8a66597->ep[0].internal_ccpl = ccpl;
471 pipe_start(r8a66597, 0);
472 r8a66597_bset(r8a66597, CCPL, DCPCTR);
475 static void start_ep0_write(struct r8a66597_ep *ep,
476 struct r8a66597_request *req)
478 struct r8a66597 *r8a66597 = ep->r8a66597;
480 pipe_change(r8a66597, ep->pipenum);
481 r8a66597_mdfy(r8a66597, ISEL, (ISEL | CURPIPE), CFIFOSEL);
482 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
483 if (req->req.length == 0) {
484 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
485 pipe_start(r8a66597, 0);
486 transfer_complete(ep, req, 0);
487 } else {
488 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
489 irq_ep0_write(ep, req);
493 static void start_packet_write(struct r8a66597_ep *ep,
494 struct r8a66597_request *req)
496 struct r8a66597 *r8a66597 = ep->r8a66597;
497 u16 tmp;
499 pipe_change(r8a66597, ep->pipenum);
500 disable_irq_empty(r8a66597, ep->pipenum);
501 pipe_start(r8a66597, ep->pipenum);
503 tmp = r8a66597_read(r8a66597, ep->fifoctr);
504 if (unlikely((tmp & FRDY) == 0))
505 pipe_irq_enable(r8a66597, ep->pipenum);
506 else
507 irq_packet_write(ep, req);
510 static void start_packet_read(struct r8a66597_ep *ep,
511 struct r8a66597_request *req)
513 struct r8a66597 *r8a66597 = ep->r8a66597;
514 u16 pipenum = ep->pipenum;
516 if (ep->pipenum == 0) {
517 r8a66597_mdfy(r8a66597, 0, (ISEL | CURPIPE), CFIFOSEL);
518 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
519 pipe_start(r8a66597, pipenum);
520 pipe_irq_enable(r8a66597, pipenum);
521 } else {
522 if (ep->use_dma) {
523 r8a66597_bset(r8a66597, TRCLR, ep->fifosel);
524 pipe_change(r8a66597, pipenum);
525 r8a66597_bset(r8a66597, TRENB, ep->fifosel);
526 r8a66597_write(r8a66597,
527 (req->req.length + ep->ep.maxpacket - 1)
528 / ep->ep.maxpacket,
529 ep->fifotrn);
531 pipe_start(r8a66597, pipenum); /* trigger once */
532 pipe_irq_enable(r8a66597, pipenum);
536 static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req)
538 if (ep->desc->bEndpointAddress & USB_DIR_IN)
539 start_packet_write(ep, req);
540 else
541 start_packet_read(ep, req);
544 static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
546 u16 ctsq;
548 ctsq = r8a66597_read(ep->r8a66597, INTSTS0) & CTSQ;
550 switch (ctsq) {
551 case CS_RDDS:
552 start_ep0_write(ep, req);
553 break;
554 case CS_WRDS:
555 start_packet_read(ep, req);
556 break;
558 case CS_WRND:
559 control_end(ep->r8a66597, 0);
560 break;
561 default:
562 printk(KERN_ERR "start_ep0: unexpect ctsq(%x)\n", ctsq);
563 break;
567 static void init_controller(struct r8a66597 *r8a66597)
569 u16 vif = r8a66597->pdata->vif ? LDRV : 0;
570 u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
571 u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
573 if (r8a66597->pdata->on_chip) {
574 r8a66597_bset(r8a66597, 0x04, SYSCFG1);
575 r8a66597_bset(r8a66597, HSE, SYSCFG0);
577 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
578 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
579 r8a66597_bset(r8a66597, USBE, SYSCFG0);
581 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
583 r8a66597_bset(r8a66597, irq_sense, INTENB1);
584 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
585 DMA0CFG);
586 } else {
587 r8a66597_bset(r8a66597, vif | endian, PINCFG);
588 r8a66597_bset(r8a66597, HSE, SYSCFG0); /* High spd */
589 r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
590 XTAL, SYSCFG0);
592 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
593 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
594 r8a66597_bset(r8a66597, USBE, SYSCFG0);
596 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
598 msleep(3);
600 r8a66597_bset(r8a66597, PLLC, SYSCFG0);
602 msleep(1);
604 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
606 r8a66597_bset(r8a66597, irq_sense, INTENB1);
607 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
608 DMA0CFG);
612 static void disable_controller(struct r8a66597 *r8a66597)
614 if (r8a66597->pdata->on_chip) {
615 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
617 /* disable interrupts */
618 r8a66597_write(r8a66597, 0, INTENB0);
619 r8a66597_write(r8a66597, 0, INTENB1);
620 r8a66597_write(r8a66597, 0, BRDYENB);
621 r8a66597_write(r8a66597, 0, BEMPENB);
622 r8a66597_write(r8a66597, 0, NRDYENB);
624 /* clear status */
625 r8a66597_write(r8a66597, 0, BRDYSTS);
626 r8a66597_write(r8a66597, 0, NRDYSTS);
627 r8a66597_write(r8a66597, 0, BEMPSTS);
629 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
630 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
632 } else {
633 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
634 udelay(1);
635 r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
636 udelay(1);
637 udelay(1);
638 r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
642 static void r8a66597_start_xclock(struct r8a66597 *r8a66597)
644 u16 tmp;
646 if (!r8a66597->pdata->on_chip) {
647 tmp = r8a66597_read(r8a66597, SYSCFG0);
648 if (!(tmp & XCKE))
649 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
653 static struct r8a66597_request *get_request_from_ep(struct r8a66597_ep *ep)
655 return list_entry(ep->queue.next, struct r8a66597_request, queue);
658 /*-------------------------------------------------------------------------*/
659 static void transfer_complete(struct r8a66597_ep *ep,
660 struct r8a66597_request *req, int status)
661 __releases(r8a66597->lock)
662 __acquires(r8a66597->lock)
664 int restart = 0;
666 if (unlikely(ep->pipenum == 0)) {
667 if (ep->internal_ccpl) {
668 ep->internal_ccpl = 0;
669 return;
673 list_del_init(&req->queue);
674 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
675 req->req.status = -ESHUTDOWN;
676 else
677 req->req.status = status;
679 if (!list_empty(&ep->queue))
680 restart = 1;
682 spin_unlock(&ep->r8a66597->lock);
683 req->req.complete(&ep->ep, &req->req);
684 spin_lock(&ep->r8a66597->lock);
686 if (restart) {
687 req = get_request_from_ep(ep);
688 if (ep->desc)
689 start_packet(ep, req);
693 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
695 int i;
696 u16 tmp;
697 unsigned bufsize;
698 size_t size;
699 void *buf;
700 u16 pipenum = ep->pipenum;
701 struct r8a66597 *r8a66597 = ep->r8a66597;
703 pipe_change(r8a66597, pipenum);
704 r8a66597_bset(r8a66597, ISEL, ep->fifosel);
706 i = 0;
707 do {
708 tmp = r8a66597_read(r8a66597, ep->fifoctr);
709 if (i++ > 100000) {
710 printk(KERN_ERR "pipe0 is busy. maybe cpu i/o bus"
711 "conflict. please power off this controller.");
712 return;
714 ndelay(1);
715 } while ((tmp & FRDY) == 0);
717 /* prepare parameters */
718 bufsize = get_buffer_size(r8a66597, pipenum);
719 buf = req->req.buf + req->req.actual;
720 size = min(bufsize, req->req.length - req->req.actual);
722 /* write fifo */
723 if (req->req.buf) {
724 if (size > 0)
725 r8a66597_write_fifo(r8a66597, ep->fifoaddr, buf, size);
726 if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
727 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
730 /* update parameters */
731 req->req.actual += size;
733 /* check transfer finish */
734 if ((!req->req.zero && (req->req.actual == req->req.length))
735 || (size % ep->ep.maxpacket)
736 || (size == 0)) {
737 disable_irq_ready(r8a66597, pipenum);
738 disable_irq_empty(r8a66597, pipenum);
739 } else {
740 disable_irq_ready(r8a66597, pipenum);
741 enable_irq_empty(r8a66597, pipenum);
743 pipe_start(r8a66597, pipenum);
746 static void irq_packet_write(struct r8a66597_ep *ep,
747 struct r8a66597_request *req)
749 u16 tmp;
750 unsigned bufsize;
751 size_t size;
752 void *buf;
753 u16 pipenum = ep->pipenum;
754 struct r8a66597 *r8a66597 = ep->r8a66597;
756 pipe_change(r8a66597, pipenum);
757 tmp = r8a66597_read(r8a66597, ep->fifoctr);
758 if (unlikely((tmp & FRDY) == 0)) {
759 pipe_stop(r8a66597, pipenum);
760 pipe_irq_disable(r8a66597, pipenum);
761 printk(KERN_ERR "write fifo not ready. pipnum=%d\n", pipenum);
762 return;
765 /* prepare parameters */
766 bufsize = get_buffer_size(r8a66597, pipenum);
767 buf = req->req.buf + req->req.actual;
768 size = min(bufsize, req->req.length - req->req.actual);
770 /* write fifo */
771 if (req->req.buf) {
772 r8a66597_write_fifo(r8a66597, ep->fifoaddr, buf, size);
773 if ((size == 0)
774 || ((size % ep->ep.maxpacket) != 0)
775 || ((bufsize != ep->ep.maxpacket)
776 && (bufsize > size)))
777 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
780 /* update parameters */
781 req->req.actual += size;
782 /* check transfer finish */
783 if ((!req->req.zero && (req->req.actual == req->req.length))
784 || (size % ep->ep.maxpacket)
785 || (size == 0)) {
786 disable_irq_ready(r8a66597, pipenum);
787 enable_irq_empty(r8a66597, pipenum);
788 } else {
789 disable_irq_empty(r8a66597, pipenum);
790 pipe_irq_enable(r8a66597, pipenum);
794 static void irq_packet_read(struct r8a66597_ep *ep,
795 struct r8a66597_request *req)
797 u16 tmp;
798 int rcv_len, bufsize, req_len;
799 int size;
800 void *buf;
801 u16 pipenum = ep->pipenum;
802 struct r8a66597 *r8a66597 = ep->r8a66597;
803 int finish = 0;
805 pipe_change(r8a66597, pipenum);
806 tmp = r8a66597_read(r8a66597, ep->fifoctr);
807 if (unlikely((tmp & FRDY) == 0)) {
808 req->req.status = -EPIPE;
809 pipe_stop(r8a66597, pipenum);
810 pipe_irq_disable(r8a66597, pipenum);
811 printk(KERN_ERR "read fifo not ready");
812 return;
815 /* prepare parameters */
816 rcv_len = tmp & DTLN;
817 bufsize = get_buffer_size(r8a66597, pipenum);
819 buf = req->req.buf + req->req.actual;
820 req_len = req->req.length - req->req.actual;
821 if (rcv_len < bufsize)
822 size = min(rcv_len, req_len);
823 else
824 size = min(bufsize, req_len);
826 /* update parameters */
827 req->req.actual += size;
829 /* check transfer finish */
830 if ((!req->req.zero && (req->req.actual == req->req.length))
831 || (size % ep->ep.maxpacket)
832 || (size == 0)) {
833 pipe_stop(r8a66597, pipenum);
834 pipe_irq_disable(r8a66597, pipenum);
835 finish = 1;
838 /* read fifo */
839 if (req->req.buf) {
840 if (size == 0)
841 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
842 else
843 r8a66597_read_fifo(r8a66597, ep->fifoaddr, buf, size);
847 if ((ep->pipenum != 0) && finish)
848 transfer_complete(ep, req, 0);
851 static void irq_pipe_ready(struct r8a66597 *r8a66597, u16 status, u16 enb)
853 u16 check;
854 u16 pipenum;
855 struct r8a66597_ep *ep;
856 struct r8a66597_request *req;
858 if ((status & BRDY0) && (enb & BRDY0)) {
859 r8a66597_write(r8a66597, ~BRDY0, BRDYSTS);
860 r8a66597_mdfy(r8a66597, 0, CURPIPE, CFIFOSEL);
862 ep = &r8a66597->ep[0];
863 req = get_request_from_ep(ep);
864 irq_packet_read(ep, req);
865 } else {
866 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
867 check = 1 << pipenum;
868 if ((status & check) && (enb & check)) {
869 r8a66597_write(r8a66597, ~check, BRDYSTS);
870 ep = r8a66597->pipenum2ep[pipenum];
871 req = get_request_from_ep(ep);
872 if (ep->desc->bEndpointAddress & USB_DIR_IN)
873 irq_packet_write(ep, req);
874 else
875 irq_packet_read(ep, req);
881 static void irq_pipe_empty(struct r8a66597 *r8a66597, u16 status, u16 enb)
883 u16 tmp;
884 u16 check;
885 u16 pipenum;
886 struct r8a66597_ep *ep;
887 struct r8a66597_request *req;
889 if ((status & BEMP0) && (enb & BEMP0)) {
890 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
892 ep = &r8a66597->ep[0];
893 req = get_request_from_ep(ep);
894 irq_ep0_write(ep, req);
895 } else {
896 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
897 check = 1 << pipenum;
898 if ((status & check) && (enb & check)) {
899 r8a66597_write(r8a66597, ~check, BEMPSTS);
900 tmp = control_reg_get(r8a66597, pipenum);
901 if ((tmp & INBUFM) == 0) {
902 disable_irq_empty(r8a66597, pipenum);
903 pipe_irq_disable(r8a66597, pipenum);
904 pipe_stop(r8a66597, pipenum);
905 ep = r8a66597->pipenum2ep[pipenum];
906 req = get_request_from_ep(ep);
907 if (!list_empty(&ep->queue))
908 transfer_complete(ep, req, 0);
915 static void get_status(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
916 __releases(r8a66597->lock)
917 __acquires(r8a66597->lock)
919 struct r8a66597_ep *ep;
920 u16 pid;
921 u16 status = 0;
922 u16 w_index = le16_to_cpu(ctrl->wIndex);
924 switch (ctrl->bRequestType & USB_RECIP_MASK) {
925 case USB_RECIP_DEVICE:
926 status = 1 << USB_DEVICE_SELF_POWERED;
927 break;
928 case USB_RECIP_INTERFACE:
929 status = 0;
930 break;
931 case USB_RECIP_ENDPOINT:
932 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
933 pid = control_reg_get_pid(r8a66597, ep->pipenum);
934 if (pid == PID_STALL)
935 status = 1 << USB_ENDPOINT_HALT;
936 else
937 status = 0;
938 break;
939 default:
940 pipe_stall(r8a66597, 0);
941 return; /* exit */
944 r8a66597->ep0_data = cpu_to_le16(status);
945 r8a66597->ep0_req->buf = &r8a66597->ep0_data;
946 r8a66597->ep0_req->length = 2;
947 /* AV: what happens if we get called again before that gets through? */
948 spin_unlock(&r8a66597->lock);
949 r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
950 spin_lock(&r8a66597->lock);
953 static void clear_feature(struct r8a66597 *r8a66597,
954 struct usb_ctrlrequest *ctrl)
956 switch (ctrl->bRequestType & USB_RECIP_MASK) {
957 case USB_RECIP_DEVICE:
958 control_end(r8a66597, 1);
959 break;
960 case USB_RECIP_INTERFACE:
961 control_end(r8a66597, 1);
962 break;
963 case USB_RECIP_ENDPOINT: {
964 struct r8a66597_ep *ep;
965 struct r8a66597_request *req;
966 u16 w_index = le16_to_cpu(ctrl->wIndex);
968 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
969 if (!ep->wedge) {
970 pipe_stop(r8a66597, ep->pipenum);
971 control_reg_sqclr(r8a66597, ep->pipenum);
972 spin_unlock(&r8a66597->lock);
973 usb_ep_clear_halt(&ep->ep);
974 spin_lock(&r8a66597->lock);
977 control_end(r8a66597, 1);
979 req = get_request_from_ep(ep);
980 if (ep->busy) {
981 ep->busy = 0;
982 if (list_empty(&ep->queue))
983 break;
984 start_packet(ep, req);
985 } else if (!list_empty(&ep->queue))
986 pipe_start(r8a66597, ep->pipenum);
988 break;
989 default:
990 pipe_stall(r8a66597, 0);
991 break;
995 static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
998 switch (ctrl->bRequestType & USB_RECIP_MASK) {
999 case USB_RECIP_DEVICE:
1000 control_end(r8a66597, 1);
1001 break;
1002 case USB_RECIP_INTERFACE:
1003 control_end(r8a66597, 1);
1004 break;
1005 case USB_RECIP_ENDPOINT: {
1006 struct r8a66597_ep *ep;
1007 u16 w_index = le16_to_cpu(ctrl->wIndex);
1009 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1010 pipe_stall(r8a66597, ep->pipenum);
1012 control_end(r8a66597, 1);
1014 break;
1015 default:
1016 pipe_stall(r8a66597, 0);
1017 break;
1021 /* if return value is true, call class driver's setup() */
1022 static int setup_packet(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1024 u16 *p = (u16 *)ctrl;
1025 unsigned long offset = USBREQ;
1026 int i, ret = 0;
1028 /* read fifo */
1029 r8a66597_write(r8a66597, ~VALID, INTSTS0);
1031 for (i = 0; i < 4; i++)
1032 p[i] = r8a66597_read(r8a66597, offset + i*2);
1034 /* check request */
1035 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1036 switch (ctrl->bRequest) {
1037 case USB_REQ_GET_STATUS:
1038 get_status(r8a66597, ctrl);
1039 break;
1040 case USB_REQ_CLEAR_FEATURE:
1041 clear_feature(r8a66597, ctrl);
1042 break;
1043 case USB_REQ_SET_FEATURE:
1044 set_feature(r8a66597, ctrl);
1045 break;
1046 default:
1047 ret = 1;
1048 break;
1050 } else
1051 ret = 1;
1052 return ret;
1055 static void r8a66597_update_usb_speed(struct r8a66597 *r8a66597)
1057 u16 speed = get_usb_speed(r8a66597);
1059 switch (speed) {
1060 case HSMODE:
1061 r8a66597->gadget.speed = USB_SPEED_HIGH;
1062 break;
1063 case FSMODE:
1064 r8a66597->gadget.speed = USB_SPEED_FULL;
1065 break;
1066 default:
1067 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
1068 printk(KERN_ERR "USB speed unknown\n");
1072 static void irq_device_state(struct r8a66597 *r8a66597)
1074 u16 dvsq;
1076 dvsq = r8a66597_read(r8a66597, INTSTS0) & DVSQ;
1077 r8a66597_write(r8a66597, ~DVST, INTSTS0);
1079 if (dvsq == DS_DFLT) {
1080 /* bus reset */
1081 r8a66597->driver->disconnect(&r8a66597->gadget);
1082 r8a66597_update_usb_speed(r8a66597);
1084 if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
1085 r8a66597_update_usb_speed(r8a66597);
1086 if ((dvsq == DS_CNFG || dvsq == DS_ADDS)
1087 && r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1088 r8a66597_update_usb_speed(r8a66597);
1090 r8a66597->old_dvsq = dvsq;
1093 static void irq_control_stage(struct r8a66597 *r8a66597)
1094 __releases(r8a66597->lock)
1095 __acquires(r8a66597->lock)
1097 struct usb_ctrlrequest ctrl;
1098 u16 ctsq;
1100 ctsq = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1101 r8a66597_write(r8a66597, ~CTRT, INTSTS0);
1103 switch (ctsq) {
1104 case CS_IDST: {
1105 struct r8a66597_ep *ep;
1106 struct r8a66597_request *req;
1107 ep = &r8a66597->ep[0];
1108 req = get_request_from_ep(ep);
1109 transfer_complete(ep, req, 0);
1111 break;
1113 case CS_RDDS:
1114 case CS_WRDS:
1115 case CS_WRND:
1116 if (setup_packet(r8a66597, &ctrl)) {
1117 spin_unlock(&r8a66597->lock);
1118 if (r8a66597->driver->setup(&r8a66597->gadget, &ctrl)
1119 < 0)
1120 pipe_stall(r8a66597, 0);
1121 spin_lock(&r8a66597->lock);
1123 break;
1124 case CS_RDSS:
1125 case CS_WRSS:
1126 control_end(r8a66597, 0);
1127 break;
1128 default:
1129 printk(KERN_ERR "ctrl_stage: unexpect ctsq(%x)\n", ctsq);
1130 break;
1134 static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
1136 struct r8a66597 *r8a66597 = _r8a66597;
1137 u16 intsts0;
1138 u16 intenb0;
1139 u16 brdysts, nrdysts, bempsts;
1140 u16 brdyenb, nrdyenb, bempenb;
1141 u16 savepipe;
1142 u16 mask0;
1144 spin_lock(&r8a66597->lock);
1146 intsts0 = r8a66597_read(r8a66597, INTSTS0);
1147 intenb0 = r8a66597_read(r8a66597, INTENB0);
1149 savepipe = r8a66597_read(r8a66597, CFIFOSEL);
1151 mask0 = intsts0 & intenb0;
1152 if (mask0) {
1153 brdysts = r8a66597_read(r8a66597, BRDYSTS);
1154 nrdysts = r8a66597_read(r8a66597, NRDYSTS);
1155 bempsts = r8a66597_read(r8a66597, BEMPSTS);
1156 brdyenb = r8a66597_read(r8a66597, BRDYENB);
1157 nrdyenb = r8a66597_read(r8a66597, NRDYENB);
1158 bempenb = r8a66597_read(r8a66597, BEMPENB);
1160 if (mask0 & VBINT) {
1161 r8a66597_write(r8a66597, 0xffff & ~VBINT,
1162 INTSTS0);
1163 r8a66597_start_xclock(r8a66597);
1165 /* start vbus sampling */
1166 r8a66597->old_vbus = r8a66597_read(r8a66597, INTSTS0)
1167 & VBSTS;
1168 r8a66597->scount = R8A66597_MAX_SAMPLING;
1170 mod_timer(&r8a66597->timer,
1171 jiffies + msecs_to_jiffies(50));
1173 if (intsts0 & DVSQ)
1174 irq_device_state(r8a66597);
1176 if ((intsts0 & BRDY) && (intenb0 & BRDYE)
1177 && (brdysts & brdyenb))
1178 irq_pipe_ready(r8a66597, brdysts, brdyenb);
1179 if ((intsts0 & BEMP) && (intenb0 & BEMPE)
1180 && (bempsts & bempenb))
1181 irq_pipe_empty(r8a66597, bempsts, bempenb);
1183 if (intsts0 & CTRT)
1184 irq_control_stage(r8a66597);
1187 r8a66597_write(r8a66597, savepipe, CFIFOSEL);
1189 spin_unlock(&r8a66597->lock);
1190 return IRQ_HANDLED;
1193 static void r8a66597_timer(unsigned long _r8a66597)
1195 struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
1196 unsigned long flags;
1197 u16 tmp;
1199 spin_lock_irqsave(&r8a66597->lock, flags);
1200 tmp = r8a66597_read(r8a66597, SYSCFG0);
1201 if (r8a66597->scount > 0) {
1202 tmp = r8a66597_read(r8a66597, INTSTS0) & VBSTS;
1203 if (tmp == r8a66597->old_vbus) {
1204 r8a66597->scount--;
1205 if (r8a66597->scount == 0) {
1206 if (tmp == VBSTS)
1207 r8a66597_usb_connect(r8a66597);
1208 else
1209 r8a66597_usb_disconnect(r8a66597);
1210 } else {
1211 mod_timer(&r8a66597->timer,
1212 jiffies + msecs_to_jiffies(50));
1214 } else {
1215 r8a66597->scount = R8A66597_MAX_SAMPLING;
1216 r8a66597->old_vbus = tmp;
1217 mod_timer(&r8a66597->timer,
1218 jiffies + msecs_to_jiffies(50));
1221 spin_unlock_irqrestore(&r8a66597->lock, flags);
1224 /*-------------------------------------------------------------------------*/
1225 static int r8a66597_enable(struct usb_ep *_ep,
1226 const struct usb_endpoint_descriptor *desc)
1228 struct r8a66597_ep *ep;
1230 ep = container_of(_ep, struct r8a66597_ep, ep);
1231 return alloc_pipe_config(ep, desc);
1234 static int r8a66597_disable(struct usb_ep *_ep)
1236 struct r8a66597_ep *ep;
1237 struct r8a66597_request *req;
1238 unsigned long flags;
1240 ep = container_of(_ep, struct r8a66597_ep, ep);
1241 BUG_ON(!ep);
1243 while (!list_empty(&ep->queue)) {
1244 req = get_request_from_ep(ep);
1245 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1246 transfer_complete(ep, req, -ECONNRESET);
1247 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1250 pipe_irq_disable(ep->r8a66597, ep->pipenum);
1251 return free_pipe_config(ep);
1254 static struct usb_request *r8a66597_alloc_request(struct usb_ep *_ep,
1255 gfp_t gfp_flags)
1257 struct r8a66597_request *req;
1259 req = kzalloc(sizeof(struct r8a66597_request), gfp_flags);
1260 if (!req)
1261 return NULL;
1263 INIT_LIST_HEAD(&req->queue);
1265 return &req->req;
1268 static void r8a66597_free_request(struct usb_ep *_ep, struct usb_request *_req)
1270 struct r8a66597_request *req;
1272 req = container_of(_req, struct r8a66597_request, req);
1273 kfree(req);
1276 static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
1277 gfp_t gfp_flags)
1279 struct r8a66597_ep *ep;
1280 struct r8a66597_request *req;
1281 unsigned long flags;
1282 int request = 0;
1284 ep = container_of(_ep, struct r8a66597_ep, ep);
1285 req = container_of(_req, struct r8a66597_request, req);
1287 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1288 return -ESHUTDOWN;
1290 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1292 if (list_empty(&ep->queue))
1293 request = 1;
1295 list_add_tail(&req->queue, &ep->queue);
1296 req->req.actual = 0;
1297 req->req.status = -EINPROGRESS;
1299 if (ep->desc == NULL) /* control */
1300 start_ep0(ep, req);
1301 else {
1302 if (request && !ep->busy)
1303 start_packet(ep, req);
1306 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1308 return 0;
1311 static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1313 struct r8a66597_ep *ep;
1314 struct r8a66597_request *req;
1315 unsigned long flags;
1317 ep = container_of(_ep, struct r8a66597_ep, ep);
1318 req = container_of(_req, struct r8a66597_request, req);
1320 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1321 if (!list_empty(&ep->queue))
1322 transfer_complete(ep, req, -ECONNRESET);
1323 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1325 return 0;
1328 static int r8a66597_set_halt(struct usb_ep *_ep, int value)
1330 struct r8a66597_ep *ep;
1331 struct r8a66597_request *req;
1332 unsigned long flags;
1333 int ret = 0;
1335 ep = container_of(_ep, struct r8a66597_ep, ep);
1336 req = get_request_from_ep(ep);
1338 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1339 if (!list_empty(&ep->queue)) {
1340 ret = -EAGAIN;
1341 goto out;
1343 if (value) {
1344 ep->busy = 1;
1345 pipe_stall(ep->r8a66597, ep->pipenum);
1346 } else {
1347 ep->busy = 0;
1348 ep->wedge = 0;
1349 pipe_stop(ep->r8a66597, ep->pipenum);
1352 out:
1353 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1354 return ret;
1357 static int r8a66597_set_wedge(struct usb_ep *_ep)
1359 struct r8a66597_ep *ep;
1360 unsigned long flags;
1362 ep = container_of(_ep, struct r8a66597_ep, ep);
1364 if (!ep || !ep->desc)
1365 return -EINVAL;
1367 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1368 ep->wedge = 1;
1369 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1371 return usb_ep_set_halt(_ep);
1374 static void r8a66597_fifo_flush(struct usb_ep *_ep)
1376 struct r8a66597_ep *ep;
1377 unsigned long flags;
1379 ep = container_of(_ep, struct r8a66597_ep, ep);
1380 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1381 if (list_empty(&ep->queue) && !ep->busy) {
1382 pipe_stop(ep->r8a66597, ep->pipenum);
1383 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
1385 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1388 static struct usb_ep_ops r8a66597_ep_ops = {
1389 .enable = r8a66597_enable,
1390 .disable = r8a66597_disable,
1392 .alloc_request = r8a66597_alloc_request,
1393 .free_request = r8a66597_free_request,
1395 .queue = r8a66597_queue,
1396 .dequeue = r8a66597_dequeue,
1398 .set_halt = r8a66597_set_halt,
1399 .set_wedge = r8a66597_set_wedge,
1400 .fifo_flush = r8a66597_fifo_flush,
1403 /*-------------------------------------------------------------------------*/
1404 static struct r8a66597 *the_controller;
1406 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1408 struct r8a66597 *r8a66597 = the_controller;
1409 int retval;
1411 if (!driver
1412 || driver->speed != USB_SPEED_HIGH
1413 || !driver->bind
1414 || !driver->setup)
1415 return -EINVAL;
1416 if (!r8a66597)
1417 return -ENODEV;
1418 if (r8a66597->driver)
1419 return -EBUSY;
1421 /* hook up the driver */
1422 driver->driver.bus = NULL;
1423 r8a66597->driver = driver;
1424 r8a66597->gadget.dev.driver = &driver->driver;
1426 retval = device_add(&r8a66597->gadget.dev);
1427 if (retval) {
1428 printk(KERN_ERR "device_add error (%d)\n", retval);
1429 goto error;
1432 retval = driver->bind(&r8a66597->gadget);
1433 if (retval) {
1434 printk(KERN_ERR "bind to driver error (%d)\n", retval);
1435 device_del(&r8a66597->gadget.dev);
1436 goto error;
1439 r8a66597_bset(r8a66597, VBSE, INTENB0);
1440 if (r8a66597_read(r8a66597, INTSTS0) & VBSTS) {
1441 r8a66597_start_xclock(r8a66597);
1442 /* start vbus sampling */
1443 r8a66597->old_vbus = r8a66597_read(r8a66597,
1444 INTSTS0) & VBSTS;
1445 r8a66597->scount = R8A66597_MAX_SAMPLING;
1446 mod_timer(&r8a66597->timer, jiffies + msecs_to_jiffies(50));
1449 return 0;
1451 error:
1452 r8a66597->driver = NULL;
1453 r8a66597->gadget.dev.driver = NULL;
1455 return retval;
1457 EXPORT_SYMBOL(usb_gadget_register_driver);
1459 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1461 struct r8a66597 *r8a66597 = the_controller;
1462 unsigned long flags;
1464 if (driver != r8a66597->driver || !driver->unbind)
1465 return -EINVAL;
1467 spin_lock_irqsave(&r8a66597->lock, flags);
1468 if (r8a66597->gadget.speed != USB_SPEED_UNKNOWN)
1469 r8a66597_usb_disconnect(r8a66597);
1470 spin_unlock_irqrestore(&r8a66597->lock, flags);
1472 r8a66597_bclr(r8a66597, VBSE, INTENB0);
1474 driver->unbind(&r8a66597->gadget);
1476 init_controller(r8a66597);
1477 disable_controller(r8a66597);
1479 device_del(&r8a66597->gadget.dev);
1480 r8a66597->driver = NULL;
1481 return 0;
1483 EXPORT_SYMBOL(usb_gadget_unregister_driver);
1485 /*-------------------------------------------------------------------------*/
1486 static int r8a66597_get_frame(struct usb_gadget *_gadget)
1488 struct r8a66597 *r8a66597 = gadget_to_r8a66597(_gadget);
1489 return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
1492 static struct usb_gadget_ops r8a66597_gadget_ops = {
1493 .get_frame = r8a66597_get_frame,
1496 static int __exit r8a66597_remove(struct platform_device *pdev)
1498 struct r8a66597 *r8a66597 = dev_get_drvdata(&pdev->dev);
1500 del_timer_sync(&r8a66597->timer);
1501 iounmap((void *)r8a66597->reg);
1502 free_irq(platform_get_irq(pdev, 0), r8a66597);
1503 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
1504 #ifdef CONFIG_HAVE_CLK
1505 if (r8a66597->pdata->on_chip) {
1506 clk_disable(r8a66597->clk);
1507 clk_put(r8a66597->clk);
1509 #endif
1510 kfree(r8a66597);
1511 return 0;
1514 static void nop_completion(struct usb_ep *ep, struct usb_request *r)
1518 static int __init r8a66597_probe(struct platform_device *pdev)
1520 #ifdef CONFIG_HAVE_CLK
1521 char clk_name[8];
1522 #endif
1523 struct resource *res, *ires;
1524 int irq;
1525 void __iomem *reg = NULL;
1526 struct r8a66597 *r8a66597 = NULL;
1527 int ret = 0;
1528 int i;
1529 unsigned long irq_trigger;
1531 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1532 if (!res) {
1533 ret = -ENODEV;
1534 printk(KERN_ERR "platform_get_resource error.\n");
1535 goto clean_up;
1538 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1539 irq = ires->start;
1540 irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
1542 if (irq < 0) {
1543 ret = -ENODEV;
1544 printk(KERN_ERR "platform_get_irq error.\n");
1545 goto clean_up;
1548 reg = ioremap(res->start, resource_size(res));
1549 if (reg == NULL) {
1550 ret = -ENOMEM;
1551 printk(KERN_ERR "ioremap error.\n");
1552 goto clean_up;
1555 /* initialize ucd */
1556 r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
1557 if (r8a66597 == NULL) {
1558 printk(KERN_ERR "kzalloc error\n");
1559 goto clean_up;
1562 spin_lock_init(&r8a66597->lock);
1563 dev_set_drvdata(&pdev->dev, r8a66597);
1564 r8a66597->pdata = pdev->dev.platform_data;
1565 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
1567 r8a66597->gadget.ops = &r8a66597_gadget_ops;
1568 device_initialize(&r8a66597->gadget.dev);
1569 dev_set_name(&r8a66597->gadget.dev, "gadget");
1570 r8a66597->gadget.is_dualspeed = 1;
1571 r8a66597->gadget.dev.parent = &pdev->dev;
1572 r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
1573 r8a66597->gadget.dev.release = pdev->dev.release;
1574 r8a66597->gadget.name = udc_name;
1576 init_timer(&r8a66597->timer);
1577 r8a66597->timer.function = r8a66597_timer;
1578 r8a66597->timer.data = (unsigned long)r8a66597;
1579 r8a66597->reg = (unsigned long)reg;
1581 #ifdef CONFIG_HAVE_CLK
1582 if (r8a66597->pdata->on_chip) {
1583 snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
1584 r8a66597->clk = clk_get(&pdev->dev, clk_name);
1585 if (IS_ERR(r8a66597->clk)) {
1586 dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
1587 clk_name);
1588 ret = PTR_ERR(r8a66597->clk);
1589 goto clean_up;
1591 clk_enable(r8a66597->clk);
1593 #endif
1595 disable_controller(r8a66597); /* make sure controller is disabled */
1597 ret = request_irq(irq, r8a66597_irq, IRQF_DISABLED | IRQF_SHARED,
1598 udc_name, r8a66597);
1599 if (ret < 0) {
1600 printk(KERN_ERR "request_irq error (%d)\n", ret);
1601 goto clean_up2;
1604 INIT_LIST_HEAD(&r8a66597->gadget.ep_list);
1605 r8a66597->gadget.ep0 = &r8a66597->ep[0].ep;
1606 INIT_LIST_HEAD(&r8a66597->gadget.ep0->ep_list);
1607 for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
1608 struct r8a66597_ep *ep = &r8a66597->ep[i];
1610 if (i != 0) {
1611 INIT_LIST_HEAD(&r8a66597->ep[i].ep.ep_list);
1612 list_add_tail(&r8a66597->ep[i].ep.ep_list,
1613 &r8a66597->gadget.ep_list);
1615 ep->r8a66597 = r8a66597;
1616 INIT_LIST_HEAD(&ep->queue);
1617 ep->ep.name = r8a66597_ep_name[i];
1618 ep->ep.ops = &r8a66597_ep_ops;
1619 ep->ep.maxpacket = 512;
1621 r8a66597->ep[0].ep.maxpacket = 64;
1622 r8a66597->ep[0].pipenum = 0;
1623 r8a66597->ep[0].fifoaddr = CFIFO;
1624 r8a66597->ep[0].fifosel = CFIFOSEL;
1625 r8a66597->ep[0].fifoctr = CFIFOCTR;
1626 r8a66597->ep[0].fifotrn = 0;
1627 r8a66597->ep[0].pipectr = get_pipectr_addr(0);
1628 r8a66597->pipenum2ep[0] = &r8a66597->ep[0];
1629 r8a66597->epaddr2ep[0] = &r8a66597->ep[0];
1631 the_controller = r8a66597;
1633 r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
1634 GFP_KERNEL);
1635 if (r8a66597->ep0_req == NULL)
1636 goto clean_up3;
1637 r8a66597->ep0_req->complete = nop_completion;
1639 init_controller(r8a66597);
1641 dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
1642 return 0;
1644 clean_up3:
1645 free_irq(irq, r8a66597);
1646 clean_up2:
1647 #ifdef CONFIG_HAVE_CLK
1648 if (r8a66597->pdata->on_chip) {
1649 clk_disable(r8a66597->clk);
1650 clk_put(r8a66597->clk);
1652 #endif
1653 clean_up:
1654 if (r8a66597) {
1655 if (r8a66597->ep0_req)
1656 r8a66597_free_request(&r8a66597->ep[0].ep,
1657 r8a66597->ep0_req);
1658 kfree(r8a66597);
1660 if (reg)
1661 iounmap(reg);
1663 return ret;
1666 /*-------------------------------------------------------------------------*/
1667 static struct platform_driver r8a66597_driver = {
1668 .remove = __exit_p(r8a66597_remove),
1669 .driver = {
1670 .name = (char *) udc_name,
1674 static int __init r8a66597_udc_init(void)
1676 return platform_driver_probe(&r8a66597_driver, r8a66597_probe);
1678 module_init(r8a66597_udc_init);
1680 static void __exit r8a66597_udc_cleanup(void)
1682 platform_driver_unregister(&r8a66597_driver);
1684 module_exit(r8a66597_udc_cleanup);
1686 MODULE_DESCRIPTION("R8A66597 USB gadget driver");
1687 MODULE_LICENSE("GPL");
1688 MODULE_AUTHOR("Yoshihiro Shimoda");