Add linux-next specific files for 20110831
[linux-2.6/next.git] / drivers / usb / gadget / r8a66597-udc.c
blob61d0c65802e8fb917d4cd41ae1a7b8c0bf779552
1 /*
2 * R8A66597 UDC (USB gadget)
4 * Copyright (C) 2006-2009 Renesas Solutions Corp.
6 * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/io.h>
27 #include <linux/platform_device.h>
28 #include <linux/clk.h>
29 #include <linux/err.h>
30 #include <linux/slab.h>
32 #include <linux/usb/ch9.h>
33 #include <linux/usb/gadget.h>
35 #include "r8a66597-udc.h"
37 #define DRIVER_VERSION "2009-08-18"
39 static const char udc_name[] = "r8a66597_udc";
40 static const char *r8a66597_ep_name[] = {
41 "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7",
42 "ep8", "ep9",
45 static void init_controller(struct r8a66597 *r8a66597);
46 static void disable_controller(struct r8a66597 *r8a66597);
47 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
48 static void irq_packet_write(struct r8a66597_ep *ep,
49 struct r8a66597_request *req);
50 static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
51 gfp_t gfp_flags);
53 static void transfer_complete(struct r8a66597_ep *ep,
54 struct r8a66597_request *req, int status);
56 /*-------------------------------------------------------------------------*/
57 static inline u16 get_usb_speed(struct r8a66597 *r8a66597)
59 return r8a66597_read(r8a66597, DVSTCTR0) & RHST;
62 static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
63 unsigned long reg)
65 u16 tmp;
67 tmp = r8a66597_read(r8a66597, INTENB0);
68 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
69 INTENB0);
70 r8a66597_bset(r8a66597, (1 << pipenum), reg);
71 r8a66597_write(r8a66597, tmp, INTENB0);
74 static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
75 unsigned long reg)
77 u16 tmp;
79 tmp = r8a66597_read(r8a66597, INTENB0);
80 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
81 INTENB0);
82 r8a66597_bclr(r8a66597, (1 << pipenum), reg);
83 r8a66597_write(r8a66597, tmp, INTENB0);
86 static void r8a66597_usb_connect(struct r8a66597 *r8a66597)
88 r8a66597_bset(r8a66597, CTRE, INTENB0);
89 r8a66597_bset(r8a66597, BEMPE | BRDYE, INTENB0);
91 r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
94 static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597)
95 __releases(r8a66597->lock)
96 __acquires(r8a66597->lock)
98 r8a66597_bclr(r8a66597, CTRE, INTENB0);
99 r8a66597_bclr(r8a66597, BEMPE | BRDYE, INTENB0);
100 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
102 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
103 spin_unlock(&r8a66597->lock);
104 r8a66597->driver->disconnect(&r8a66597->gadget);
105 spin_lock(&r8a66597->lock);
107 disable_controller(r8a66597);
108 init_controller(r8a66597);
109 r8a66597_bset(r8a66597, VBSE, INTENB0);
110 INIT_LIST_HEAD(&r8a66597->ep[0].queue);
113 static inline u16 control_reg_get_pid(struct r8a66597 *r8a66597, u16 pipenum)
115 u16 pid = 0;
116 unsigned long offset;
118 if (pipenum == 0)
119 pid = r8a66597_read(r8a66597, DCPCTR) & PID;
120 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
121 offset = get_pipectr_addr(pipenum);
122 pid = r8a66597_read(r8a66597, offset) & PID;
123 } else
124 printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
126 return pid;
129 static inline void control_reg_set_pid(struct r8a66597 *r8a66597, u16 pipenum,
130 u16 pid)
132 unsigned long offset;
134 if (pipenum == 0)
135 r8a66597_mdfy(r8a66597, pid, PID, DCPCTR);
136 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
137 offset = get_pipectr_addr(pipenum);
138 r8a66597_mdfy(r8a66597, pid, PID, offset);
139 } else
140 printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
143 static inline void pipe_start(struct r8a66597 *r8a66597, u16 pipenum)
145 control_reg_set_pid(r8a66597, pipenum, PID_BUF);
148 static inline void pipe_stop(struct r8a66597 *r8a66597, u16 pipenum)
150 control_reg_set_pid(r8a66597, pipenum, PID_NAK);
153 static inline void pipe_stall(struct r8a66597 *r8a66597, u16 pipenum)
155 control_reg_set_pid(r8a66597, pipenum, PID_STALL);
158 static inline u16 control_reg_get(struct r8a66597 *r8a66597, u16 pipenum)
160 u16 ret = 0;
161 unsigned long offset;
163 if (pipenum == 0)
164 ret = r8a66597_read(r8a66597, DCPCTR);
165 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
166 offset = get_pipectr_addr(pipenum);
167 ret = r8a66597_read(r8a66597, offset);
168 } else
169 printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
171 return ret;
174 static inline void control_reg_sqclr(struct r8a66597 *r8a66597, u16 pipenum)
176 unsigned long offset;
178 pipe_stop(r8a66597, pipenum);
180 if (pipenum == 0)
181 r8a66597_bset(r8a66597, SQCLR, DCPCTR);
182 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
183 offset = get_pipectr_addr(pipenum);
184 r8a66597_bset(r8a66597, SQCLR, offset);
185 } else
186 printk(KERN_ERR "unexpect pipe num(%d)\n", pipenum);
189 static inline int get_buffer_size(struct r8a66597 *r8a66597, u16 pipenum)
191 u16 tmp;
192 int size;
194 if (pipenum == 0) {
195 tmp = r8a66597_read(r8a66597, DCPCFG);
196 if ((tmp & R8A66597_CNTMD) != 0)
197 size = 256;
198 else {
199 tmp = r8a66597_read(r8a66597, DCPMAXP);
200 size = tmp & MAXP;
202 } else {
203 r8a66597_write(r8a66597, pipenum, PIPESEL);
204 tmp = r8a66597_read(r8a66597, PIPECFG);
205 if ((tmp & R8A66597_CNTMD) != 0) {
206 tmp = r8a66597_read(r8a66597, PIPEBUF);
207 size = ((tmp >> 10) + 1) * 64;
208 } else {
209 tmp = r8a66597_read(r8a66597, PIPEMAXP);
210 size = tmp & MXPS;
214 return size;
217 static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
219 if (r8a66597->pdata->on_chip)
220 return MBW_32;
221 else
222 return MBW_16;
225 static inline void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
227 struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
229 if (ep->use_dma)
230 return;
232 r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel);
234 ndelay(450);
236 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
239 static int pipe_buffer_setting(struct r8a66597 *r8a66597,
240 struct r8a66597_pipe_info *info)
242 u16 bufnum = 0, buf_bsize = 0;
243 u16 pipecfg = 0;
245 if (info->pipe == 0)
246 return -EINVAL;
248 r8a66597_write(r8a66597, info->pipe, PIPESEL);
250 if (info->dir_in)
251 pipecfg |= R8A66597_DIR;
252 pipecfg |= info->type;
253 pipecfg |= info->epnum;
254 switch (info->type) {
255 case R8A66597_INT:
256 bufnum = 4 + (info->pipe - R8A66597_BASE_PIPENUM_INT);
257 buf_bsize = 0;
258 break;
259 case R8A66597_BULK:
260 /* isochronous pipes may be used as bulk pipes */
261 if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
262 bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
263 else
264 bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
266 bufnum = R8A66597_BASE_BUFNUM + (bufnum * 16);
267 buf_bsize = 7;
268 pipecfg |= R8A66597_DBLB;
269 if (!info->dir_in)
270 pipecfg |= R8A66597_SHTNAK;
271 break;
272 case R8A66597_ISO:
273 bufnum = R8A66597_BASE_BUFNUM +
274 (info->pipe - R8A66597_BASE_PIPENUM_ISOC) * 16;
275 buf_bsize = 7;
276 break;
279 if (buf_bsize && ((bufnum + 16) >= R8A66597_MAX_BUFNUM)) {
280 pr_err("r8a66597 pipe memory is insufficient\n");
281 return -ENOMEM;
284 r8a66597_write(r8a66597, pipecfg, PIPECFG);
285 r8a66597_write(r8a66597, (buf_bsize << 10) | (bufnum), PIPEBUF);
286 r8a66597_write(r8a66597, info->maxpacket, PIPEMAXP);
287 if (info->interval)
288 info->interval--;
289 r8a66597_write(r8a66597, info->interval, PIPEPERI);
291 return 0;
294 static void pipe_buffer_release(struct r8a66597 *r8a66597,
295 struct r8a66597_pipe_info *info)
297 if (info->pipe == 0)
298 return;
300 if (is_bulk_pipe(info->pipe))
301 r8a66597->bulk--;
302 else if (is_interrupt_pipe(info->pipe))
303 r8a66597->interrupt--;
304 else if (is_isoc_pipe(info->pipe)) {
305 r8a66597->isochronous--;
306 if (info->type == R8A66597_BULK)
307 r8a66597->bulk--;
308 } else
309 printk(KERN_ERR "ep_release: unexpect pipenum (%d)\n",
310 info->pipe);
313 static void pipe_initialize(struct r8a66597_ep *ep)
315 struct r8a66597 *r8a66597 = ep->r8a66597;
317 r8a66597_mdfy(r8a66597, 0, CURPIPE, ep->fifosel);
319 r8a66597_write(r8a66597, ACLRM, ep->pipectr);
320 r8a66597_write(r8a66597, 0, ep->pipectr);
321 r8a66597_write(r8a66597, SQCLR, ep->pipectr);
322 if (ep->use_dma) {
323 r8a66597_mdfy(r8a66597, ep->pipenum, CURPIPE, ep->fifosel);
325 ndelay(450);
327 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
331 static void r8a66597_ep_setting(struct r8a66597 *r8a66597,
332 struct r8a66597_ep *ep,
333 const struct usb_endpoint_descriptor *desc,
334 u16 pipenum, int dma)
336 ep->use_dma = 0;
337 ep->fifoaddr = CFIFO;
338 ep->fifosel = CFIFOSEL;
339 ep->fifoctr = CFIFOCTR;
340 ep->fifotrn = 0;
342 ep->pipectr = get_pipectr_addr(pipenum);
343 ep->pipenum = pipenum;
344 ep->ep.maxpacket = usb_endpoint_maxp(desc);
345 r8a66597->pipenum2ep[pipenum] = ep;
346 r8a66597->epaddr2ep[desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK]
347 = ep;
348 INIT_LIST_HEAD(&ep->queue);
351 static void r8a66597_ep_release(struct r8a66597_ep *ep)
353 struct r8a66597 *r8a66597 = ep->r8a66597;
354 u16 pipenum = ep->pipenum;
356 if (pipenum == 0)
357 return;
359 if (ep->use_dma)
360 r8a66597->num_dma--;
361 ep->pipenum = 0;
362 ep->busy = 0;
363 ep->use_dma = 0;
366 static int alloc_pipe_config(struct r8a66597_ep *ep,
367 const struct usb_endpoint_descriptor *desc)
369 struct r8a66597 *r8a66597 = ep->r8a66597;
370 struct r8a66597_pipe_info info;
371 int dma = 0;
372 unsigned char *counter;
373 int ret;
375 ep->desc = desc;
377 if (ep->pipenum) /* already allocated pipe */
378 return 0;
380 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
381 case USB_ENDPOINT_XFER_BULK:
382 if (r8a66597->bulk >= R8A66597_MAX_NUM_BULK) {
383 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
384 printk(KERN_ERR "bulk pipe is insufficient\n");
385 return -ENODEV;
386 } else {
387 info.pipe = R8A66597_BASE_PIPENUM_ISOC
388 + r8a66597->isochronous;
389 counter = &r8a66597->isochronous;
391 } else {
392 info.pipe = R8A66597_BASE_PIPENUM_BULK + r8a66597->bulk;
393 counter = &r8a66597->bulk;
395 info.type = R8A66597_BULK;
396 dma = 1;
397 break;
398 case USB_ENDPOINT_XFER_INT:
399 if (r8a66597->interrupt >= R8A66597_MAX_NUM_INT) {
400 printk(KERN_ERR "interrupt pipe is insufficient\n");
401 return -ENODEV;
403 info.pipe = R8A66597_BASE_PIPENUM_INT + r8a66597->interrupt;
404 info.type = R8A66597_INT;
405 counter = &r8a66597->interrupt;
406 break;
407 case USB_ENDPOINT_XFER_ISOC:
408 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
409 printk(KERN_ERR "isochronous pipe is insufficient\n");
410 return -ENODEV;
412 info.pipe = R8A66597_BASE_PIPENUM_ISOC + r8a66597->isochronous;
413 info.type = R8A66597_ISO;
414 counter = &r8a66597->isochronous;
415 break;
416 default:
417 printk(KERN_ERR "unexpect xfer type\n");
418 return -EINVAL;
420 ep->type = info.type;
422 info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
423 info.maxpacket = usb_endpoint_maxp(desc);
424 info.interval = desc->bInterval;
425 if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
426 info.dir_in = 1;
427 else
428 info.dir_in = 0;
430 ret = pipe_buffer_setting(r8a66597, &info);
431 if (ret < 0) {
432 printk(KERN_ERR "pipe_buffer_setting fail\n");
433 return ret;
436 (*counter)++;
437 if ((counter == &r8a66597->isochronous) && info.type == R8A66597_BULK)
438 r8a66597->bulk++;
440 r8a66597_ep_setting(r8a66597, ep, desc, info.pipe, dma);
441 pipe_initialize(ep);
443 return 0;
446 static int free_pipe_config(struct r8a66597_ep *ep)
448 struct r8a66597 *r8a66597 = ep->r8a66597;
449 struct r8a66597_pipe_info info;
451 info.pipe = ep->pipenum;
452 info.type = ep->type;
453 pipe_buffer_release(r8a66597, &info);
454 r8a66597_ep_release(ep);
456 return 0;
459 /*-------------------------------------------------------------------------*/
460 static void pipe_irq_enable(struct r8a66597 *r8a66597, u16 pipenum)
462 enable_irq_ready(r8a66597, pipenum);
463 enable_irq_nrdy(r8a66597, pipenum);
466 static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
468 disable_irq_ready(r8a66597, pipenum);
469 disable_irq_nrdy(r8a66597, pipenum);
472 /* if complete is true, gadget driver complete function is not call */
473 static void control_end(struct r8a66597 *r8a66597, unsigned ccpl)
475 r8a66597->ep[0].internal_ccpl = ccpl;
476 pipe_start(r8a66597, 0);
477 r8a66597_bset(r8a66597, CCPL, DCPCTR);
480 static void start_ep0_write(struct r8a66597_ep *ep,
481 struct r8a66597_request *req)
483 struct r8a66597 *r8a66597 = ep->r8a66597;
485 pipe_change(r8a66597, ep->pipenum);
486 r8a66597_mdfy(r8a66597, ISEL, (ISEL | CURPIPE), CFIFOSEL);
487 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
488 if (req->req.length == 0) {
489 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
490 pipe_start(r8a66597, 0);
491 transfer_complete(ep, req, 0);
492 } else {
493 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
494 irq_ep0_write(ep, req);
498 static void start_packet_write(struct r8a66597_ep *ep,
499 struct r8a66597_request *req)
501 struct r8a66597 *r8a66597 = ep->r8a66597;
502 u16 tmp;
504 pipe_change(r8a66597, ep->pipenum);
505 disable_irq_empty(r8a66597, ep->pipenum);
506 pipe_start(r8a66597, ep->pipenum);
508 tmp = r8a66597_read(r8a66597, ep->fifoctr);
509 if (unlikely((tmp & FRDY) == 0))
510 pipe_irq_enable(r8a66597, ep->pipenum);
511 else
512 irq_packet_write(ep, req);
515 static void start_packet_read(struct r8a66597_ep *ep,
516 struct r8a66597_request *req)
518 struct r8a66597 *r8a66597 = ep->r8a66597;
519 u16 pipenum = ep->pipenum;
521 if (ep->pipenum == 0) {
522 r8a66597_mdfy(r8a66597, 0, (ISEL | CURPIPE), CFIFOSEL);
523 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
524 pipe_start(r8a66597, pipenum);
525 pipe_irq_enable(r8a66597, pipenum);
526 } else {
527 if (ep->use_dma) {
528 r8a66597_bset(r8a66597, TRCLR, ep->fifosel);
529 pipe_change(r8a66597, pipenum);
530 r8a66597_bset(r8a66597, TRENB, ep->fifosel);
531 r8a66597_write(r8a66597,
532 (req->req.length + ep->ep.maxpacket - 1)
533 / ep->ep.maxpacket,
534 ep->fifotrn);
536 pipe_start(r8a66597, pipenum); /* trigger once */
537 pipe_irq_enable(r8a66597, pipenum);
541 static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req)
543 if (ep->desc->bEndpointAddress & USB_DIR_IN)
544 start_packet_write(ep, req);
545 else
546 start_packet_read(ep, req);
549 static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
551 u16 ctsq;
553 ctsq = r8a66597_read(ep->r8a66597, INTSTS0) & CTSQ;
555 switch (ctsq) {
556 case CS_RDDS:
557 start_ep0_write(ep, req);
558 break;
559 case CS_WRDS:
560 start_packet_read(ep, req);
561 break;
563 case CS_WRND:
564 control_end(ep->r8a66597, 0);
565 break;
566 default:
567 printk(KERN_ERR "start_ep0: unexpect ctsq(%x)\n", ctsq);
568 break;
572 static void init_controller(struct r8a66597 *r8a66597)
574 u16 vif = r8a66597->pdata->vif ? LDRV : 0;
575 u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
576 u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
578 if (r8a66597->pdata->on_chip) {
579 if (r8a66597->pdata->buswait)
580 r8a66597_write(r8a66597, r8a66597->pdata->buswait,
581 SYSCFG1);
582 else
583 r8a66597_write(r8a66597, 0x0f, SYSCFG1);
584 r8a66597_bset(r8a66597, HSE, SYSCFG0);
586 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
587 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
588 r8a66597_bset(r8a66597, USBE, SYSCFG0);
590 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
592 r8a66597_bset(r8a66597, irq_sense, INTENB1);
593 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
594 DMA0CFG);
595 } else {
596 r8a66597_bset(r8a66597, vif | endian, PINCFG);
597 r8a66597_bset(r8a66597, HSE, SYSCFG0); /* High spd */
598 r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
599 XTAL, SYSCFG0);
601 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
602 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
603 r8a66597_bset(r8a66597, USBE, SYSCFG0);
605 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
607 msleep(3);
609 r8a66597_bset(r8a66597, PLLC, SYSCFG0);
611 msleep(1);
613 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
615 r8a66597_bset(r8a66597, irq_sense, INTENB1);
616 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
617 DMA0CFG);
621 static void disable_controller(struct r8a66597 *r8a66597)
623 if (r8a66597->pdata->on_chip) {
624 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
625 r8a66597_bclr(r8a66597, UTST, TESTMODE);
627 /* disable interrupts */
628 r8a66597_write(r8a66597, 0, INTENB0);
629 r8a66597_write(r8a66597, 0, INTENB1);
630 r8a66597_write(r8a66597, 0, BRDYENB);
631 r8a66597_write(r8a66597, 0, BEMPENB);
632 r8a66597_write(r8a66597, 0, NRDYENB);
634 /* clear status */
635 r8a66597_write(r8a66597, 0, BRDYSTS);
636 r8a66597_write(r8a66597, 0, NRDYSTS);
637 r8a66597_write(r8a66597, 0, BEMPSTS);
639 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
640 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
642 } else {
643 r8a66597_bclr(r8a66597, UTST, TESTMODE);
644 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
645 udelay(1);
646 r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
647 udelay(1);
648 udelay(1);
649 r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
653 static void r8a66597_start_xclock(struct r8a66597 *r8a66597)
655 u16 tmp;
657 if (!r8a66597->pdata->on_chip) {
658 tmp = r8a66597_read(r8a66597, SYSCFG0);
659 if (!(tmp & XCKE))
660 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
664 static struct r8a66597_request *get_request_from_ep(struct r8a66597_ep *ep)
666 return list_entry(ep->queue.next, struct r8a66597_request, queue);
669 /*-------------------------------------------------------------------------*/
670 static void transfer_complete(struct r8a66597_ep *ep,
671 struct r8a66597_request *req, int status)
672 __releases(r8a66597->lock)
673 __acquires(r8a66597->lock)
675 int restart = 0;
677 if (unlikely(ep->pipenum == 0)) {
678 if (ep->internal_ccpl) {
679 ep->internal_ccpl = 0;
680 return;
684 list_del_init(&req->queue);
685 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
686 req->req.status = -ESHUTDOWN;
687 else
688 req->req.status = status;
690 if (!list_empty(&ep->queue))
691 restart = 1;
693 spin_unlock(&ep->r8a66597->lock);
694 req->req.complete(&ep->ep, &req->req);
695 spin_lock(&ep->r8a66597->lock);
697 if (restart) {
698 req = get_request_from_ep(ep);
699 if (ep->desc)
700 start_packet(ep, req);
704 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
706 int i;
707 u16 tmp;
708 unsigned bufsize;
709 size_t size;
710 void *buf;
711 u16 pipenum = ep->pipenum;
712 struct r8a66597 *r8a66597 = ep->r8a66597;
714 pipe_change(r8a66597, pipenum);
715 r8a66597_bset(r8a66597, ISEL, ep->fifosel);
717 i = 0;
718 do {
719 tmp = r8a66597_read(r8a66597, ep->fifoctr);
720 if (i++ > 100000) {
721 printk(KERN_ERR "pipe0 is busy. maybe cpu i/o bus"
722 "conflict. please power off this controller.");
723 return;
725 ndelay(1);
726 } while ((tmp & FRDY) == 0);
728 /* prepare parameters */
729 bufsize = get_buffer_size(r8a66597, pipenum);
730 buf = req->req.buf + req->req.actual;
731 size = min(bufsize, req->req.length - req->req.actual);
733 /* write fifo */
734 if (req->req.buf) {
735 if (size > 0)
736 r8a66597_write_fifo(r8a66597, ep->fifoaddr, buf, size);
737 if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
738 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
741 /* update parameters */
742 req->req.actual += size;
744 /* check transfer finish */
745 if ((!req->req.zero && (req->req.actual == req->req.length))
746 || (size % ep->ep.maxpacket)
747 || (size == 0)) {
748 disable_irq_ready(r8a66597, pipenum);
749 disable_irq_empty(r8a66597, pipenum);
750 } else {
751 disable_irq_ready(r8a66597, pipenum);
752 enable_irq_empty(r8a66597, pipenum);
754 pipe_start(r8a66597, pipenum);
757 static void irq_packet_write(struct r8a66597_ep *ep,
758 struct r8a66597_request *req)
760 u16 tmp;
761 unsigned bufsize;
762 size_t size;
763 void *buf;
764 u16 pipenum = ep->pipenum;
765 struct r8a66597 *r8a66597 = ep->r8a66597;
767 pipe_change(r8a66597, pipenum);
768 tmp = r8a66597_read(r8a66597, ep->fifoctr);
769 if (unlikely((tmp & FRDY) == 0)) {
770 pipe_stop(r8a66597, pipenum);
771 pipe_irq_disable(r8a66597, pipenum);
772 printk(KERN_ERR "write fifo not ready. pipnum=%d\n", pipenum);
773 return;
776 /* prepare parameters */
777 bufsize = get_buffer_size(r8a66597, pipenum);
778 buf = req->req.buf + req->req.actual;
779 size = min(bufsize, req->req.length - req->req.actual);
781 /* write fifo */
782 if (req->req.buf) {
783 r8a66597_write_fifo(r8a66597, ep->fifoaddr, buf, size);
784 if ((size == 0)
785 || ((size % ep->ep.maxpacket) != 0)
786 || ((bufsize != ep->ep.maxpacket)
787 && (bufsize > size)))
788 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
791 /* update parameters */
792 req->req.actual += size;
793 /* check transfer finish */
794 if ((!req->req.zero && (req->req.actual == req->req.length))
795 || (size % ep->ep.maxpacket)
796 || (size == 0)) {
797 disable_irq_ready(r8a66597, pipenum);
798 enable_irq_empty(r8a66597, pipenum);
799 } else {
800 disable_irq_empty(r8a66597, pipenum);
801 pipe_irq_enable(r8a66597, pipenum);
805 static void irq_packet_read(struct r8a66597_ep *ep,
806 struct r8a66597_request *req)
808 u16 tmp;
809 int rcv_len, bufsize, req_len;
810 int size;
811 void *buf;
812 u16 pipenum = ep->pipenum;
813 struct r8a66597 *r8a66597 = ep->r8a66597;
814 int finish = 0;
816 pipe_change(r8a66597, pipenum);
817 tmp = r8a66597_read(r8a66597, ep->fifoctr);
818 if (unlikely((tmp & FRDY) == 0)) {
819 req->req.status = -EPIPE;
820 pipe_stop(r8a66597, pipenum);
821 pipe_irq_disable(r8a66597, pipenum);
822 printk(KERN_ERR "read fifo not ready");
823 return;
826 /* prepare parameters */
827 rcv_len = tmp & DTLN;
828 bufsize = get_buffer_size(r8a66597, pipenum);
830 buf = req->req.buf + req->req.actual;
831 req_len = req->req.length - req->req.actual;
832 if (rcv_len < bufsize)
833 size = min(rcv_len, req_len);
834 else
835 size = min(bufsize, req_len);
837 /* update parameters */
838 req->req.actual += size;
840 /* check transfer finish */
841 if ((!req->req.zero && (req->req.actual == req->req.length))
842 || (size % ep->ep.maxpacket)
843 || (size == 0)) {
844 pipe_stop(r8a66597, pipenum);
845 pipe_irq_disable(r8a66597, pipenum);
846 finish = 1;
849 /* read fifo */
850 if (req->req.buf) {
851 if (size == 0)
852 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
853 else
854 r8a66597_read_fifo(r8a66597, ep->fifoaddr, buf, size);
858 if ((ep->pipenum != 0) && finish)
859 transfer_complete(ep, req, 0);
862 static void irq_pipe_ready(struct r8a66597 *r8a66597, u16 status, u16 enb)
864 u16 check;
865 u16 pipenum;
866 struct r8a66597_ep *ep;
867 struct r8a66597_request *req;
869 if ((status & BRDY0) && (enb & BRDY0)) {
870 r8a66597_write(r8a66597, ~BRDY0, BRDYSTS);
871 r8a66597_mdfy(r8a66597, 0, CURPIPE, CFIFOSEL);
873 ep = &r8a66597->ep[0];
874 req = get_request_from_ep(ep);
875 irq_packet_read(ep, req);
876 } else {
877 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
878 check = 1 << pipenum;
879 if ((status & check) && (enb & check)) {
880 r8a66597_write(r8a66597, ~check, BRDYSTS);
881 ep = r8a66597->pipenum2ep[pipenum];
882 req = get_request_from_ep(ep);
883 if (ep->desc->bEndpointAddress & USB_DIR_IN)
884 irq_packet_write(ep, req);
885 else
886 irq_packet_read(ep, req);
892 static void irq_pipe_empty(struct r8a66597 *r8a66597, u16 status, u16 enb)
894 u16 tmp;
895 u16 check;
896 u16 pipenum;
897 struct r8a66597_ep *ep;
898 struct r8a66597_request *req;
900 if ((status & BEMP0) && (enb & BEMP0)) {
901 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
903 ep = &r8a66597->ep[0];
904 req = get_request_from_ep(ep);
905 irq_ep0_write(ep, req);
906 } else {
907 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
908 check = 1 << pipenum;
909 if ((status & check) && (enb & check)) {
910 r8a66597_write(r8a66597, ~check, BEMPSTS);
911 tmp = control_reg_get(r8a66597, pipenum);
912 if ((tmp & INBUFM) == 0) {
913 disable_irq_empty(r8a66597, pipenum);
914 pipe_irq_disable(r8a66597, pipenum);
915 pipe_stop(r8a66597, pipenum);
916 ep = r8a66597->pipenum2ep[pipenum];
917 req = get_request_from_ep(ep);
918 if (!list_empty(&ep->queue))
919 transfer_complete(ep, req, 0);
926 static void get_status(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
927 __releases(r8a66597->lock)
928 __acquires(r8a66597->lock)
930 struct r8a66597_ep *ep;
931 u16 pid;
932 u16 status = 0;
933 u16 w_index = le16_to_cpu(ctrl->wIndex);
935 switch (ctrl->bRequestType & USB_RECIP_MASK) {
936 case USB_RECIP_DEVICE:
937 status = 1 << USB_DEVICE_SELF_POWERED;
938 break;
939 case USB_RECIP_INTERFACE:
940 status = 0;
941 break;
942 case USB_RECIP_ENDPOINT:
943 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
944 pid = control_reg_get_pid(r8a66597, ep->pipenum);
945 if (pid == PID_STALL)
946 status = 1 << USB_ENDPOINT_HALT;
947 else
948 status = 0;
949 break;
950 default:
951 pipe_stall(r8a66597, 0);
952 return; /* exit */
955 r8a66597->ep0_data = cpu_to_le16(status);
956 r8a66597->ep0_req->buf = &r8a66597->ep0_data;
957 r8a66597->ep0_req->length = 2;
958 /* AV: what happens if we get called again before that gets through? */
959 spin_unlock(&r8a66597->lock);
960 r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
961 spin_lock(&r8a66597->lock);
964 static void clear_feature(struct r8a66597 *r8a66597,
965 struct usb_ctrlrequest *ctrl)
967 switch (ctrl->bRequestType & USB_RECIP_MASK) {
968 case USB_RECIP_DEVICE:
969 control_end(r8a66597, 1);
970 break;
971 case USB_RECIP_INTERFACE:
972 control_end(r8a66597, 1);
973 break;
974 case USB_RECIP_ENDPOINT: {
975 struct r8a66597_ep *ep;
976 struct r8a66597_request *req;
977 u16 w_index = le16_to_cpu(ctrl->wIndex);
979 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
980 if (!ep->wedge) {
981 pipe_stop(r8a66597, ep->pipenum);
982 control_reg_sqclr(r8a66597, ep->pipenum);
983 spin_unlock(&r8a66597->lock);
984 usb_ep_clear_halt(&ep->ep);
985 spin_lock(&r8a66597->lock);
988 control_end(r8a66597, 1);
990 req = get_request_from_ep(ep);
991 if (ep->busy) {
992 ep->busy = 0;
993 if (list_empty(&ep->queue))
994 break;
995 start_packet(ep, req);
996 } else if (!list_empty(&ep->queue))
997 pipe_start(r8a66597, ep->pipenum);
999 break;
1000 default:
1001 pipe_stall(r8a66597, 0);
1002 break;
1006 static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1008 u16 tmp;
1009 int timeout = 3000;
1011 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1012 case USB_RECIP_DEVICE:
1013 switch (le16_to_cpu(ctrl->wValue)) {
1014 case USB_DEVICE_TEST_MODE:
1015 control_end(r8a66597, 1);
1016 /* Wait for the completion of status stage */
1017 do {
1018 tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1019 udelay(1);
1020 } while (tmp != CS_IDST || timeout-- > 0);
1022 if (tmp == CS_IDST)
1023 r8a66597_bset(r8a66597,
1024 le16_to_cpu(ctrl->wIndex >> 8),
1025 TESTMODE);
1026 break;
1027 default:
1028 pipe_stall(r8a66597, 0);
1029 break;
1031 break;
1032 case USB_RECIP_INTERFACE:
1033 control_end(r8a66597, 1);
1034 break;
1035 case USB_RECIP_ENDPOINT: {
1036 struct r8a66597_ep *ep;
1037 u16 w_index = le16_to_cpu(ctrl->wIndex);
1039 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1040 pipe_stall(r8a66597, ep->pipenum);
1042 control_end(r8a66597, 1);
1044 break;
1045 default:
1046 pipe_stall(r8a66597, 0);
1047 break;
1051 /* if return value is true, call class driver's setup() */
1052 static int setup_packet(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1054 u16 *p = (u16 *)ctrl;
1055 unsigned long offset = USBREQ;
1056 int i, ret = 0;
1058 /* read fifo */
1059 r8a66597_write(r8a66597, ~VALID, INTSTS0);
1061 for (i = 0; i < 4; i++)
1062 p[i] = r8a66597_read(r8a66597, offset + i*2);
1064 /* check request */
1065 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1066 switch (ctrl->bRequest) {
1067 case USB_REQ_GET_STATUS:
1068 get_status(r8a66597, ctrl);
1069 break;
1070 case USB_REQ_CLEAR_FEATURE:
1071 clear_feature(r8a66597, ctrl);
1072 break;
1073 case USB_REQ_SET_FEATURE:
1074 set_feature(r8a66597, ctrl);
1075 break;
1076 default:
1077 ret = 1;
1078 break;
1080 } else
1081 ret = 1;
1082 return ret;
1085 static void r8a66597_update_usb_speed(struct r8a66597 *r8a66597)
1087 u16 speed = get_usb_speed(r8a66597);
1089 switch (speed) {
1090 case HSMODE:
1091 r8a66597->gadget.speed = USB_SPEED_HIGH;
1092 break;
1093 case FSMODE:
1094 r8a66597->gadget.speed = USB_SPEED_FULL;
1095 break;
1096 default:
1097 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
1098 printk(KERN_ERR "USB speed unknown\n");
1102 static void irq_device_state(struct r8a66597 *r8a66597)
1104 u16 dvsq;
1106 dvsq = r8a66597_read(r8a66597, INTSTS0) & DVSQ;
1107 r8a66597_write(r8a66597, ~DVST, INTSTS0);
1109 if (dvsq == DS_DFLT) {
1110 /* bus reset */
1111 spin_unlock(&r8a66597->lock);
1112 r8a66597->driver->disconnect(&r8a66597->gadget);
1113 spin_lock(&r8a66597->lock);
1114 r8a66597_update_usb_speed(r8a66597);
1116 if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
1117 r8a66597_update_usb_speed(r8a66597);
1118 if ((dvsq == DS_CNFG || dvsq == DS_ADDS)
1119 && r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1120 r8a66597_update_usb_speed(r8a66597);
1122 r8a66597->old_dvsq = dvsq;
1125 static void irq_control_stage(struct r8a66597 *r8a66597)
1126 __releases(r8a66597->lock)
1127 __acquires(r8a66597->lock)
1129 struct usb_ctrlrequest ctrl;
1130 u16 ctsq;
1132 ctsq = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1133 r8a66597_write(r8a66597, ~CTRT, INTSTS0);
1135 switch (ctsq) {
1136 case CS_IDST: {
1137 struct r8a66597_ep *ep;
1138 struct r8a66597_request *req;
1139 ep = &r8a66597->ep[0];
1140 req = get_request_from_ep(ep);
1141 transfer_complete(ep, req, 0);
1143 break;
1145 case CS_RDDS:
1146 case CS_WRDS:
1147 case CS_WRND:
1148 if (setup_packet(r8a66597, &ctrl)) {
1149 spin_unlock(&r8a66597->lock);
1150 if (r8a66597->driver->setup(&r8a66597->gadget, &ctrl)
1151 < 0)
1152 pipe_stall(r8a66597, 0);
1153 spin_lock(&r8a66597->lock);
1155 break;
1156 case CS_RDSS:
1157 case CS_WRSS:
1158 control_end(r8a66597, 0);
1159 break;
1160 default:
1161 printk(KERN_ERR "ctrl_stage: unexpect ctsq(%x)\n", ctsq);
1162 break;
1166 static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
1168 struct r8a66597 *r8a66597 = _r8a66597;
1169 u16 intsts0;
1170 u16 intenb0;
1171 u16 brdysts, nrdysts, bempsts;
1172 u16 brdyenb, nrdyenb, bempenb;
1173 u16 savepipe;
1174 u16 mask0;
1176 spin_lock(&r8a66597->lock);
1178 intsts0 = r8a66597_read(r8a66597, INTSTS0);
1179 intenb0 = r8a66597_read(r8a66597, INTENB0);
1181 savepipe = r8a66597_read(r8a66597, CFIFOSEL);
1183 mask0 = intsts0 & intenb0;
1184 if (mask0) {
1185 brdysts = r8a66597_read(r8a66597, BRDYSTS);
1186 nrdysts = r8a66597_read(r8a66597, NRDYSTS);
1187 bempsts = r8a66597_read(r8a66597, BEMPSTS);
1188 brdyenb = r8a66597_read(r8a66597, BRDYENB);
1189 nrdyenb = r8a66597_read(r8a66597, NRDYENB);
1190 bempenb = r8a66597_read(r8a66597, BEMPENB);
1192 if (mask0 & VBINT) {
1193 r8a66597_write(r8a66597, 0xffff & ~VBINT,
1194 INTSTS0);
1195 r8a66597_start_xclock(r8a66597);
1197 /* start vbus sampling */
1198 r8a66597->old_vbus = r8a66597_read(r8a66597, INTSTS0)
1199 & VBSTS;
1200 r8a66597->scount = R8A66597_MAX_SAMPLING;
1202 mod_timer(&r8a66597->timer,
1203 jiffies + msecs_to_jiffies(50));
1205 if (intsts0 & DVSQ)
1206 irq_device_state(r8a66597);
1208 if ((intsts0 & BRDY) && (intenb0 & BRDYE)
1209 && (brdysts & brdyenb))
1210 irq_pipe_ready(r8a66597, brdysts, brdyenb);
1211 if ((intsts0 & BEMP) && (intenb0 & BEMPE)
1212 && (bempsts & bempenb))
1213 irq_pipe_empty(r8a66597, bempsts, bempenb);
1215 if (intsts0 & CTRT)
1216 irq_control_stage(r8a66597);
1219 r8a66597_write(r8a66597, savepipe, CFIFOSEL);
1221 spin_unlock(&r8a66597->lock);
1222 return IRQ_HANDLED;
1225 static void r8a66597_timer(unsigned long _r8a66597)
1227 struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
1228 unsigned long flags;
1229 u16 tmp;
1231 spin_lock_irqsave(&r8a66597->lock, flags);
1232 tmp = r8a66597_read(r8a66597, SYSCFG0);
1233 if (r8a66597->scount > 0) {
1234 tmp = r8a66597_read(r8a66597, INTSTS0) & VBSTS;
1235 if (tmp == r8a66597->old_vbus) {
1236 r8a66597->scount--;
1237 if (r8a66597->scount == 0) {
1238 if (tmp == VBSTS)
1239 r8a66597_usb_connect(r8a66597);
1240 else
1241 r8a66597_usb_disconnect(r8a66597);
1242 } else {
1243 mod_timer(&r8a66597->timer,
1244 jiffies + msecs_to_jiffies(50));
1246 } else {
1247 r8a66597->scount = R8A66597_MAX_SAMPLING;
1248 r8a66597->old_vbus = tmp;
1249 mod_timer(&r8a66597->timer,
1250 jiffies + msecs_to_jiffies(50));
1253 spin_unlock_irqrestore(&r8a66597->lock, flags);
1256 /*-------------------------------------------------------------------------*/
1257 static int r8a66597_enable(struct usb_ep *_ep,
1258 const struct usb_endpoint_descriptor *desc)
1260 struct r8a66597_ep *ep;
1262 ep = container_of(_ep, struct r8a66597_ep, ep);
1263 return alloc_pipe_config(ep, desc);
1266 static int r8a66597_disable(struct usb_ep *_ep)
1268 struct r8a66597_ep *ep;
1269 struct r8a66597_request *req;
1270 unsigned long flags;
1272 ep = container_of(_ep, struct r8a66597_ep, ep);
1273 BUG_ON(!ep);
1275 while (!list_empty(&ep->queue)) {
1276 req = get_request_from_ep(ep);
1277 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1278 transfer_complete(ep, req, -ECONNRESET);
1279 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1282 pipe_irq_disable(ep->r8a66597, ep->pipenum);
1283 return free_pipe_config(ep);
1286 static struct usb_request *r8a66597_alloc_request(struct usb_ep *_ep,
1287 gfp_t gfp_flags)
1289 struct r8a66597_request *req;
1291 req = kzalloc(sizeof(struct r8a66597_request), gfp_flags);
1292 if (!req)
1293 return NULL;
1295 INIT_LIST_HEAD(&req->queue);
1297 return &req->req;
1300 static void r8a66597_free_request(struct usb_ep *_ep, struct usb_request *_req)
1302 struct r8a66597_request *req;
1304 req = container_of(_req, struct r8a66597_request, req);
1305 kfree(req);
1308 static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
1309 gfp_t gfp_flags)
1311 struct r8a66597_ep *ep;
1312 struct r8a66597_request *req;
1313 unsigned long flags;
1314 int request = 0;
1316 ep = container_of(_ep, struct r8a66597_ep, ep);
1317 req = container_of(_req, struct r8a66597_request, req);
1319 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1320 return -ESHUTDOWN;
1322 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1324 if (list_empty(&ep->queue))
1325 request = 1;
1327 list_add_tail(&req->queue, &ep->queue);
1328 req->req.actual = 0;
1329 req->req.status = -EINPROGRESS;
1331 if (ep->desc == NULL) /* control */
1332 start_ep0(ep, req);
1333 else {
1334 if (request && !ep->busy)
1335 start_packet(ep, req);
1338 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1340 return 0;
1343 static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1345 struct r8a66597_ep *ep;
1346 struct r8a66597_request *req;
1347 unsigned long flags;
1349 ep = container_of(_ep, struct r8a66597_ep, ep);
1350 req = container_of(_req, struct r8a66597_request, req);
1352 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1353 if (!list_empty(&ep->queue))
1354 transfer_complete(ep, req, -ECONNRESET);
1355 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1357 return 0;
1360 static int r8a66597_set_halt(struct usb_ep *_ep, int value)
1362 struct r8a66597_ep *ep;
1363 struct r8a66597_request *req;
1364 unsigned long flags;
1365 int ret = 0;
1367 ep = container_of(_ep, struct r8a66597_ep, ep);
1368 req = get_request_from_ep(ep);
1370 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1371 if (!list_empty(&ep->queue)) {
1372 ret = -EAGAIN;
1373 goto out;
1375 if (value) {
1376 ep->busy = 1;
1377 pipe_stall(ep->r8a66597, ep->pipenum);
1378 } else {
1379 ep->busy = 0;
1380 ep->wedge = 0;
1381 pipe_stop(ep->r8a66597, ep->pipenum);
1384 out:
1385 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1386 return ret;
1389 static int r8a66597_set_wedge(struct usb_ep *_ep)
1391 struct r8a66597_ep *ep;
1392 unsigned long flags;
1394 ep = container_of(_ep, struct r8a66597_ep, ep);
1396 if (!ep || !ep->desc)
1397 return -EINVAL;
1399 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1400 ep->wedge = 1;
1401 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1403 return usb_ep_set_halt(_ep);
1406 static void r8a66597_fifo_flush(struct usb_ep *_ep)
1408 struct r8a66597_ep *ep;
1409 unsigned long flags;
1411 ep = container_of(_ep, struct r8a66597_ep, ep);
1412 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1413 if (list_empty(&ep->queue) && !ep->busy) {
1414 pipe_stop(ep->r8a66597, ep->pipenum);
1415 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
1417 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1420 static struct usb_ep_ops r8a66597_ep_ops = {
1421 .enable = r8a66597_enable,
1422 .disable = r8a66597_disable,
1424 .alloc_request = r8a66597_alloc_request,
1425 .free_request = r8a66597_free_request,
1427 .queue = r8a66597_queue,
1428 .dequeue = r8a66597_dequeue,
1430 .set_halt = r8a66597_set_halt,
1431 .set_wedge = r8a66597_set_wedge,
1432 .fifo_flush = r8a66597_fifo_flush,
1435 /*-------------------------------------------------------------------------*/
1436 static struct r8a66597 *the_controller;
1438 static int r8a66597_start(struct usb_gadget_driver *driver,
1439 int (*bind)(struct usb_gadget *))
1441 struct r8a66597 *r8a66597 = the_controller;
1442 int retval;
1444 if (!driver
1445 || driver->speed != USB_SPEED_HIGH
1446 || !bind
1447 || !driver->setup)
1448 return -EINVAL;
1449 if (!r8a66597)
1450 return -ENODEV;
1451 if (r8a66597->driver)
1452 return -EBUSY;
1454 /* hook up the driver */
1455 driver->driver.bus = NULL;
1456 r8a66597->driver = driver;
1457 r8a66597->gadget.dev.driver = &driver->driver;
1459 retval = device_add(&r8a66597->gadget.dev);
1460 if (retval) {
1461 printk(KERN_ERR "device_add error (%d)\n", retval);
1462 goto error;
1465 retval = bind(&r8a66597->gadget);
1466 if (retval) {
1467 printk(KERN_ERR "bind to driver error (%d)\n", retval);
1468 device_del(&r8a66597->gadget.dev);
1469 goto error;
1472 init_controller(r8a66597);
1473 r8a66597_bset(r8a66597, VBSE, INTENB0);
1474 if (r8a66597_read(r8a66597, INTSTS0) & VBSTS) {
1475 r8a66597_start_xclock(r8a66597);
1476 /* start vbus sampling */
1477 r8a66597->old_vbus = r8a66597_read(r8a66597,
1478 INTSTS0) & VBSTS;
1479 r8a66597->scount = R8A66597_MAX_SAMPLING;
1480 mod_timer(&r8a66597->timer, jiffies + msecs_to_jiffies(50));
1483 return 0;
1485 error:
1486 r8a66597->driver = NULL;
1487 r8a66597->gadget.dev.driver = NULL;
1489 return retval;
1492 static int r8a66597_stop(struct usb_gadget_driver *driver)
1494 struct r8a66597 *r8a66597 = the_controller;
1495 unsigned long flags;
1497 if (driver != r8a66597->driver || !driver->unbind)
1498 return -EINVAL;
1500 spin_lock_irqsave(&r8a66597->lock, flags);
1501 if (r8a66597->gadget.speed != USB_SPEED_UNKNOWN)
1502 r8a66597_usb_disconnect(r8a66597);
1503 r8a66597_bclr(r8a66597, VBSE, INTENB0);
1504 disable_controller(r8a66597);
1505 spin_unlock_irqrestore(&r8a66597->lock, flags);
1507 driver->unbind(&r8a66597->gadget);
1509 device_del(&r8a66597->gadget.dev);
1510 r8a66597->driver = NULL;
1511 return 0;
1514 /*-------------------------------------------------------------------------*/
1515 static int r8a66597_get_frame(struct usb_gadget *_gadget)
1517 struct r8a66597 *r8a66597 = gadget_to_r8a66597(_gadget);
1518 return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
1521 static int r8a66597_pullup(struct usb_gadget *gadget, int is_on)
1523 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1524 unsigned long flags;
1526 spin_lock_irqsave(&r8a66597->lock, flags);
1527 if (is_on)
1528 r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
1529 else
1530 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
1531 spin_unlock_irqrestore(&r8a66597->lock, flags);
1533 return 0;
1536 static struct usb_gadget_ops r8a66597_gadget_ops = {
1537 .get_frame = r8a66597_get_frame,
1538 .start = r8a66597_start,
1539 .stop = r8a66597_stop,
1540 .pullup = r8a66597_pullup,
1543 static int __exit r8a66597_remove(struct platform_device *pdev)
1545 struct r8a66597 *r8a66597 = dev_get_drvdata(&pdev->dev);
1547 usb_del_gadget_udc(&r8a66597->gadget);
1548 del_timer_sync(&r8a66597->timer);
1549 iounmap(r8a66597->reg);
1550 free_irq(platform_get_irq(pdev, 0), r8a66597);
1551 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
1552 #ifdef CONFIG_HAVE_CLK
1553 if (r8a66597->pdata->on_chip) {
1554 clk_disable(r8a66597->clk);
1555 clk_put(r8a66597->clk);
1557 #endif
1558 kfree(r8a66597);
1559 return 0;
1562 static void nop_completion(struct usb_ep *ep, struct usb_request *r)
1566 static int __init r8a66597_probe(struct platform_device *pdev)
1568 #ifdef CONFIG_HAVE_CLK
1569 char clk_name[8];
1570 #endif
1571 struct resource *res, *ires;
1572 int irq;
1573 void __iomem *reg = NULL;
1574 struct r8a66597 *r8a66597 = NULL;
1575 int ret = 0;
1576 int i;
1577 unsigned long irq_trigger;
1579 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1580 if (!res) {
1581 ret = -ENODEV;
1582 printk(KERN_ERR "platform_get_resource error.\n");
1583 goto clean_up;
1586 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1587 irq = ires->start;
1588 irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
1590 if (irq < 0) {
1591 ret = -ENODEV;
1592 printk(KERN_ERR "platform_get_irq error.\n");
1593 goto clean_up;
1596 reg = ioremap(res->start, resource_size(res));
1597 if (reg == NULL) {
1598 ret = -ENOMEM;
1599 printk(KERN_ERR "ioremap error.\n");
1600 goto clean_up;
1603 /* initialize ucd */
1604 r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
1605 if (r8a66597 == NULL) {
1606 ret = -ENOMEM;
1607 printk(KERN_ERR "kzalloc error\n");
1608 goto clean_up;
1611 spin_lock_init(&r8a66597->lock);
1612 dev_set_drvdata(&pdev->dev, r8a66597);
1613 r8a66597->pdata = pdev->dev.platform_data;
1614 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
1616 r8a66597->gadget.ops = &r8a66597_gadget_ops;
1617 device_initialize(&r8a66597->gadget.dev);
1618 dev_set_name(&r8a66597->gadget.dev, "gadget");
1619 r8a66597->gadget.is_dualspeed = 1;
1620 r8a66597->gadget.dev.parent = &pdev->dev;
1621 r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
1622 r8a66597->gadget.dev.release = pdev->dev.release;
1623 r8a66597->gadget.name = udc_name;
1625 init_timer(&r8a66597->timer);
1626 r8a66597->timer.function = r8a66597_timer;
1627 r8a66597->timer.data = (unsigned long)r8a66597;
1628 r8a66597->reg = reg;
1630 #ifdef CONFIG_HAVE_CLK
1631 if (r8a66597->pdata->on_chip) {
1632 snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
1633 r8a66597->clk = clk_get(&pdev->dev, clk_name);
1634 if (IS_ERR(r8a66597->clk)) {
1635 dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
1636 clk_name);
1637 ret = PTR_ERR(r8a66597->clk);
1638 goto clean_up;
1640 clk_enable(r8a66597->clk);
1642 #endif
1644 disable_controller(r8a66597); /* make sure controller is disabled */
1646 ret = request_irq(irq, r8a66597_irq, IRQF_DISABLED | IRQF_SHARED,
1647 udc_name, r8a66597);
1648 if (ret < 0) {
1649 printk(KERN_ERR "request_irq error (%d)\n", ret);
1650 goto clean_up2;
1653 INIT_LIST_HEAD(&r8a66597->gadget.ep_list);
1654 r8a66597->gadget.ep0 = &r8a66597->ep[0].ep;
1655 INIT_LIST_HEAD(&r8a66597->gadget.ep0->ep_list);
1656 for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
1657 struct r8a66597_ep *ep = &r8a66597->ep[i];
1659 if (i != 0) {
1660 INIT_LIST_HEAD(&r8a66597->ep[i].ep.ep_list);
1661 list_add_tail(&r8a66597->ep[i].ep.ep_list,
1662 &r8a66597->gadget.ep_list);
1664 ep->r8a66597 = r8a66597;
1665 INIT_LIST_HEAD(&ep->queue);
1666 ep->ep.name = r8a66597_ep_name[i];
1667 ep->ep.ops = &r8a66597_ep_ops;
1668 ep->ep.maxpacket = 512;
1670 r8a66597->ep[0].ep.maxpacket = 64;
1671 r8a66597->ep[0].pipenum = 0;
1672 r8a66597->ep[0].fifoaddr = CFIFO;
1673 r8a66597->ep[0].fifosel = CFIFOSEL;
1674 r8a66597->ep[0].fifoctr = CFIFOCTR;
1675 r8a66597->ep[0].fifotrn = 0;
1676 r8a66597->ep[0].pipectr = get_pipectr_addr(0);
1677 r8a66597->pipenum2ep[0] = &r8a66597->ep[0];
1678 r8a66597->epaddr2ep[0] = &r8a66597->ep[0];
1680 the_controller = r8a66597;
1682 r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
1683 GFP_KERNEL);
1684 if (r8a66597->ep0_req == NULL)
1685 goto clean_up3;
1686 r8a66597->ep0_req->complete = nop_completion;
1688 ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget);
1689 if (ret)
1690 goto err_add_udc;
1692 dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
1693 return 0;
1695 err_add_udc:
1696 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
1697 clean_up3:
1698 free_irq(irq, r8a66597);
1699 clean_up2:
1700 #ifdef CONFIG_HAVE_CLK
1701 if (r8a66597->pdata->on_chip) {
1702 clk_disable(r8a66597->clk);
1703 clk_put(r8a66597->clk);
1705 #endif
1706 clean_up:
1707 if (r8a66597) {
1708 if (r8a66597->ep0_req)
1709 r8a66597_free_request(&r8a66597->ep[0].ep,
1710 r8a66597->ep0_req);
1711 kfree(r8a66597);
1713 if (reg)
1714 iounmap(reg);
1716 return ret;
1719 /*-------------------------------------------------------------------------*/
1720 static struct platform_driver r8a66597_driver = {
1721 .remove = __exit_p(r8a66597_remove),
1722 .driver = {
1723 .name = (char *) udc_name,
1726 MODULE_ALIAS("platform:r8a66597_udc");
1728 static int __init r8a66597_udc_init(void)
1730 return platform_driver_probe(&r8a66597_driver, r8a66597_probe);
1732 module_init(r8a66597_udc_init);
1734 static void __exit r8a66597_udc_cleanup(void)
1736 platform_driver_unregister(&r8a66597_driver);
1738 module_exit(r8a66597_udc_cleanup);
1740 MODULE_DESCRIPTION("R8A66597 USB gadget driver");
1741 MODULE_LICENSE("GPL");
1742 MODULE_AUTHOR("Yoshihiro Shimoda");