perf callchain: Use al.addr to set up call chain
[linux/fpc-iii.git] / drivers / usb / renesas_usbhs / fifo.c
blobb0c97a3f1bfed74e262c3aa818817259f31e987e
1 /*
2 * Renesas USB driver
4 * Copyright (C) 2011 Renesas Solutions Corp.
5 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 #include <linux/delay.h>
18 #include <linux/io.h>
19 #include <linux/scatterlist.h>
20 #include "common.h"
21 #include "pipe.h"
23 #define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo))
24 #define usbhsf_get_d0fifo(p) (&((p)->fifo_info.d0fifo))
25 #define usbhsf_get_d1fifo(p) (&((p)->fifo_info.d1fifo))
26 #define usbhsf_is_cfifo(p, f) (usbhsf_get_cfifo(p) == f)
28 #define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */
31 * packet initialize
33 void usbhs_pkt_init(struct usbhs_pkt *pkt)
35 INIT_LIST_HEAD(&pkt->node);
39 * packet control function
41 static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
43 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
44 struct device *dev = usbhs_priv_to_dev(priv);
46 dev_err(dev, "null handler\n");
48 return -EINVAL;
51 static struct usbhs_pkt_handle usbhsf_null_handler = {
52 .prepare = usbhsf_null_handle,
53 .try_run = usbhsf_null_handle,
56 void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
57 void (*done)(struct usbhs_priv *priv,
58 struct usbhs_pkt *pkt),
59 void *buf, int len, int zero, int sequence)
61 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
62 struct device *dev = usbhs_priv_to_dev(priv);
63 unsigned long flags;
65 if (!done) {
66 dev_err(dev, "no done function\n");
67 return;
70 /******************** spin lock ********************/
71 usbhs_lock(priv, flags);
73 if (!pipe->handler) {
74 dev_err(dev, "no handler function\n");
75 pipe->handler = &usbhsf_null_handler;
78 list_move_tail(&pkt->node, &pipe->list);
81 * each pkt must hold own handler.
82 * because handler might be changed by its situation.
83 * dma handler -> pio handler.
85 pkt->pipe = pipe;
86 pkt->buf = buf;
87 pkt->handler = pipe->handler;
88 pkt->length = len;
89 pkt->zero = zero;
90 pkt->actual = 0;
91 pkt->done = done;
92 pkt->sequence = sequence;
94 usbhs_unlock(priv, flags);
95 /******************** spin unlock ******************/
98 static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
100 list_del_init(&pkt->node);
103 static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
105 if (list_empty(&pipe->list))
106 return NULL;
108 return list_first_entry(&pipe->list, struct usbhs_pkt, node);
111 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
112 struct usbhs_fifo *fifo);
113 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
114 struct usbhs_fifo *fifo);
115 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
116 struct usbhs_pkt *pkt);
117 #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
118 #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
119 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
120 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
122 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
123 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
124 unsigned long flags;
126 /******************** spin lock ********************/
127 usbhs_lock(priv, flags);
129 usbhs_pipe_disable(pipe);
131 if (!pkt)
132 pkt = __usbhsf_pkt_get(pipe);
134 if (pkt) {
135 struct dma_chan *chan = NULL;
137 if (fifo)
138 chan = usbhsf_dma_chan_get(fifo, pkt);
139 if (chan) {
140 dmaengine_terminate_all(chan);
141 usbhsf_fifo_clear(pipe, fifo);
142 usbhsf_dma_unmap(pkt);
145 __usbhsf_pkt_del(pkt);
148 if (fifo)
149 usbhsf_fifo_unselect(pipe, fifo);
151 usbhs_unlock(priv, flags);
152 /******************** spin unlock ******************/
154 return pkt;
157 enum {
158 USBHSF_PKT_PREPARE,
159 USBHSF_PKT_TRY_RUN,
160 USBHSF_PKT_DMA_DONE,
163 static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
165 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
166 struct usbhs_pkt *pkt;
167 struct device *dev = usbhs_priv_to_dev(priv);
168 int (*func)(struct usbhs_pkt *pkt, int *is_done);
169 unsigned long flags;
170 int ret = 0;
171 int is_done = 0;
173 /******************** spin lock ********************/
174 usbhs_lock(priv, flags);
176 pkt = __usbhsf_pkt_get(pipe);
177 if (!pkt)
178 goto __usbhs_pkt_handler_end;
180 switch (type) {
181 case USBHSF_PKT_PREPARE:
182 func = pkt->handler->prepare;
183 break;
184 case USBHSF_PKT_TRY_RUN:
185 func = pkt->handler->try_run;
186 break;
187 case USBHSF_PKT_DMA_DONE:
188 func = pkt->handler->dma_done;
189 break;
190 default:
191 dev_err(dev, "unknown pkt handler\n");
192 goto __usbhs_pkt_handler_end;
195 ret = func(pkt, &is_done);
197 if (is_done)
198 __usbhsf_pkt_del(pkt);
200 __usbhs_pkt_handler_end:
201 usbhs_unlock(priv, flags);
202 /******************** spin unlock ******************/
204 if (is_done) {
205 pkt->done(priv, pkt);
206 usbhs_pkt_start(pipe);
209 return ret;
212 void usbhs_pkt_start(struct usbhs_pipe *pipe)
214 usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
218 * irq enable/disable function
220 #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e)
221 #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e)
222 #define usbhsf_irq_callback_ctrl(pipe, status, enable) \
223 ({ \
224 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); \
225 struct usbhs_mod *mod = usbhs_mod_get_current(priv); \
226 u16 status = (1 << usbhs_pipe_number(pipe)); \
227 if (!mod) \
228 return; \
229 if (enable) \
230 mod->status |= status; \
231 else \
232 mod->status &= ~status; \
233 usbhs_irq_callback_update(priv, mod); \
236 static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
239 * And DCP pipe can NOT use "ready interrupt" for "send"
240 * it should use "empty" interrupt.
241 * see
242 * "Operation" - "Interrupt Function" - "BRDY Interrupt"
244 * on the other hand, normal pipe can use "ready interrupt" for "send"
245 * even though it is single/double buffer
247 if (usbhs_pipe_is_dcp(pipe))
248 usbhsf_irq_empty_ctrl(pipe, enable);
249 else
250 usbhsf_irq_ready_ctrl(pipe, enable);
253 static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
255 usbhsf_irq_ready_ctrl(pipe, enable);
259 * FIFO ctrl
261 static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
262 struct usbhs_fifo *fifo)
264 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
266 usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
269 static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
270 struct usbhs_fifo *fifo)
272 int timeout = 1024;
274 do {
275 /* The FIFO port is accessible */
276 if (usbhs_read(priv, fifo->ctr) & FRDY)
277 return 0;
279 udelay(10);
280 } while (timeout--);
282 return -EBUSY;
285 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
286 struct usbhs_fifo *fifo)
288 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
290 if (!usbhs_pipe_is_dcp(pipe))
291 usbhsf_fifo_barrier(priv, fifo);
293 usbhs_write(priv, fifo->ctr, BCLR);
296 static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
297 struct usbhs_fifo *fifo)
299 return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
302 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
303 struct usbhs_fifo *fifo)
305 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
307 usbhs_pipe_select_fifo(pipe, NULL);
308 usbhs_write(priv, fifo->sel, 0);
311 static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
312 struct usbhs_fifo *fifo,
313 int write)
315 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
316 struct device *dev = usbhs_priv_to_dev(priv);
317 int timeout = 1024;
318 u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */
319 u16 base = usbhs_pipe_number(pipe); /* CURPIPE */
321 if (usbhs_pipe_is_busy(pipe) ||
322 usbhsf_fifo_is_busy(fifo))
323 return -EBUSY;
325 if (usbhs_pipe_is_dcp(pipe)) {
326 base |= (1 == write) << 5; /* ISEL */
328 if (usbhs_mod_is_host(priv))
329 usbhs_dcp_dir_for_host(pipe, write);
332 /* "base" will be used below */
333 if (usbhs_get_dparam(priv, has_sudmac) && !usbhsf_is_cfifo(priv, fifo))
334 usbhs_write(priv, fifo->sel, base);
335 else
336 usbhs_write(priv, fifo->sel, base | MBW_32);
338 /* check ISEL and CURPIPE value */
339 while (timeout--) {
340 if (base == (mask & usbhs_read(priv, fifo->sel))) {
341 usbhs_pipe_select_fifo(pipe, fifo);
342 return 0;
344 udelay(10);
347 dev_err(dev, "fifo select error\n");
349 return -EIO;
353 * DCP status stage
355 static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
357 struct usbhs_pipe *pipe = pkt->pipe;
358 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
359 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
360 struct device *dev = usbhs_priv_to_dev(priv);
361 int ret;
363 usbhs_pipe_disable(pipe);
365 ret = usbhsf_fifo_select(pipe, fifo, 1);
366 if (ret < 0) {
367 dev_err(dev, "%s() faile\n", __func__);
368 return ret;
371 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
373 usbhsf_fifo_clear(pipe, fifo);
374 usbhsf_send_terminator(pipe, fifo);
376 usbhsf_fifo_unselect(pipe, fifo);
378 usbhsf_tx_irq_ctrl(pipe, 1);
379 usbhs_pipe_enable(pipe);
381 return ret;
384 static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
386 struct usbhs_pipe *pipe = pkt->pipe;
387 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
388 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
389 struct device *dev = usbhs_priv_to_dev(priv);
390 int ret;
392 usbhs_pipe_disable(pipe);
394 ret = usbhsf_fifo_select(pipe, fifo, 0);
395 if (ret < 0) {
396 dev_err(dev, "%s() fail\n", __func__);
397 return ret;
400 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
401 usbhsf_fifo_clear(pipe, fifo);
403 usbhsf_fifo_unselect(pipe, fifo);
405 usbhsf_rx_irq_ctrl(pipe, 1);
406 usbhs_pipe_enable(pipe);
408 return ret;
412 static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
414 struct usbhs_pipe *pipe = pkt->pipe;
416 if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
417 usbhsf_tx_irq_ctrl(pipe, 0);
418 else
419 usbhsf_rx_irq_ctrl(pipe, 0);
421 pkt->actual = pkt->length;
422 *is_done = 1;
424 return 0;
427 struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
428 .prepare = usbhs_dcp_dir_switch_to_write,
429 .try_run = usbhs_dcp_dir_switch_done,
432 struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
433 .prepare = usbhs_dcp_dir_switch_to_read,
434 .try_run = usbhs_dcp_dir_switch_done,
438 * DCP data stage (push)
440 static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
442 struct usbhs_pipe *pipe = pkt->pipe;
444 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
447 * change handler to PIO push
449 pkt->handler = &usbhs_fifo_pio_push_handler;
451 return pkt->handler->prepare(pkt, is_done);
454 struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
455 .prepare = usbhsf_dcp_data_stage_try_push,
459 * DCP data stage (pop)
461 static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
462 int *is_done)
464 struct usbhs_pipe *pipe = pkt->pipe;
465 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
466 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
468 if (usbhs_pipe_is_busy(pipe))
469 return 0;
472 * prepare pop for DCP should
473 * - change DCP direction,
474 * - clear fifo
475 * - DATA1
477 usbhs_pipe_disable(pipe);
479 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
481 usbhsf_fifo_select(pipe, fifo, 0);
482 usbhsf_fifo_clear(pipe, fifo);
483 usbhsf_fifo_unselect(pipe, fifo);
486 * change handler to PIO pop
488 pkt->handler = &usbhs_fifo_pio_pop_handler;
490 return pkt->handler->prepare(pkt, is_done);
493 struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
494 .prepare = usbhsf_dcp_data_stage_prepare_pop,
498 * PIO push handler
500 static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
502 struct usbhs_pipe *pipe = pkt->pipe;
503 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
504 struct device *dev = usbhs_priv_to_dev(priv);
505 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
506 void __iomem *addr = priv->base + fifo->port;
507 u8 *buf;
508 int maxp = usbhs_pipe_get_maxpacket(pipe);
509 int total_len;
510 int i, ret, len;
511 int is_short;
513 usbhs_pipe_data_sequence(pipe, pkt->sequence);
514 pkt->sequence = -1; /* -1 sequence will be ignored */
516 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
518 ret = usbhsf_fifo_select(pipe, fifo, 1);
519 if (ret < 0)
520 return 0;
522 ret = usbhs_pipe_is_accessible(pipe);
523 if (ret < 0) {
524 /* inaccessible pipe is not an error */
525 ret = 0;
526 goto usbhs_fifo_write_busy;
529 ret = usbhsf_fifo_barrier(priv, fifo);
530 if (ret < 0)
531 goto usbhs_fifo_write_busy;
533 buf = pkt->buf + pkt->actual;
534 len = pkt->length - pkt->actual;
535 len = min(len, maxp);
536 total_len = len;
537 is_short = total_len < maxp;
540 * FIXME
542 * 32-bit access only
544 if (len >= 4 && !((unsigned long)buf & 0x03)) {
545 iowrite32_rep(addr, buf, len / 4);
546 len %= 4;
547 buf += total_len - len;
550 /* the rest operation */
551 for (i = 0; i < len; i++)
552 iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
555 * variable update
557 pkt->actual += total_len;
559 if (pkt->actual < pkt->length)
560 *is_done = 0; /* there are remainder data */
561 else if (is_short)
562 *is_done = 1; /* short packet */
563 else
564 *is_done = !pkt->zero; /* send zero packet ? */
567 * pipe/irq handling
569 if (is_short)
570 usbhsf_send_terminator(pipe, fifo);
572 usbhsf_tx_irq_ctrl(pipe, !*is_done);
573 usbhs_pipe_running(pipe, !*is_done);
574 usbhs_pipe_enable(pipe);
576 dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n",
577 usbhs_pipe_number(pipe),
578 pkt->length, pkt->actual, *is_done, pkt->zero);
581 * Transmission end
583 if (*is_done) {
584 if (usbhs_pipe_is_dcp(pipe))
585 usbhs_dcp_control_transfer_done(pipe);
588 usbhsf_fifo_unselect(pipe, fifo);
590 return 0;
592 usbhs_fifo_write_busy:
593 usbhsf_fifo_unselect(pipe, fifo);
596 * pipe is busy.
597 * retry in interrupt
599 usbhsf_tx_irq_ctrl(pipe, 1);
600 usbhs_pipe_running(pipe, 1);
602 return ret;
605 static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
607 if (usbhs_pipe_is_running(pkt->pipe))
608 return 0;
610 return usbhsf_pio_try_push(pkt, is_done);
613 struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
614 .prepare = usbhsf_pio_prepare_push,
615 .try_run = usbhsf_pio_try_push,
619 * PIO pop handler
621 static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
623 struct usbhs_pipe *pipe = pkt->pipe;
625 if (usbhs_pipe_is_busy(pipe))
626 return 0;
628 if (usbhs_pipe_is_running(pipe))
629 return 0;
632 * pipe enable to prepare packet receive
634 usbhs_pipe_data_sequence(pipe, pkt->sequence);
635 pkt->sequence = -1; /* -1 sequence will be ignored */
637 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
638 usbhs_pipe_enable(pipe);
639 usbhs_pipe_running(pipe, 1);
640 usbhsf_rx_irq_ctrl(pipe, 1);
642 return 0;
645 static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
647 struct usbhs_pipe *pipe = pkt->pipe;
648 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
649 struct device *dev = usbhs_priv_to_dev(priv);
650 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
651 void __iomem *addr = priv->base + fifo->port;
652 u8 *buf;
653 u32 data = 0;
654 int maxp = usbhs_pipe_get_maxpacket(pipe);
655 int rcv_len, len;
656 int i, ret;
657 int total_len = 0;
659 ret = usbhsf_fifo_select(pipe, fifo, 0);
660 if (ret < 0)
661 return 0;
663 ret = usbhsf_fifo_barrier(priv, fifo);
664 if (ret < 0)
665 goto usbhs_fifo_read_busy;
667 rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
669 buf = pkt->buf + pkt->actual;
670 len = pkt->length - pkt->actual;
671 len = min(len, rcv_len);
672 total_len = len;
675 * update actual length first here to decide disable pipe.
676 * if this pipe keeps BUF status and all data were popped,
677 * then, next interrupt/token will be issued again
679 pkt->actual += total_len;
681 if ((pkt->actual == pkt->length) || /* receive all data */
682 (total_len < maxp)) { /* short packet */
683 *is_done = 1;
684 usbhsf_rx_irq_ctrl(pipe, 0);
685 usbhs_pipe_running(pipe, 0);
686 usbhs_pipe_disable(pipe); /* disable pipe first */
690 * Buffer clear if Zero-Length packet
692 * see
693 * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
695 if (0 == rcv_len) {
696 pkt->zero = 1;
697 usbhsf_fifo_clear(pipe, fifo);
698 goto usbhs_fifo_read_end;
702 * FIXME
704 * 32-bit access only
706 if (len >= 4 && !((unsigned long)buf & 0x03)) {
707 ioread32_rep(addr, buf, len / 4);
708 len %= 4;
709 buf += total_len - len;
712 /* the rest operation */
713 for (i = 0; i < len; i++) {
714 if (!(i & 0x03))
715 data = ioread32(addr);
717 buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
720 usbhs_fifo_read_end:
721 dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n",
722 usbhs_pipe_number(pipe),
723 pkt->length, pkt->actual, *is_done, pkt->zero);
726 * Transmission end
728 if (*is_done) {
729 if (usbhs_pipe_is_dcp(pipe))
730 usbhs_dcp_control_transfer_done(pipe);
733 usbhs_fifo_read_busy:
734 usbhsf_fifo_unselect(pipe, fifo);
736 return ret;
739 struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
740 .prepare = usbhsf_prepare_pop,
741 .try_run = usbhsf_pio_try_pop,
745 * DCP ctrol statge handler
747 static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
749 usbhs_dcp_control_transfer_done(pkt->pipe);
751 *is_done = 1;
753 return 0;
756 struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
757 .prepare = usbhsf_ctrl_stage_end,
758 .try_run = usbhsf_ctrl_stage_end,
762 * DMA fifo functions
764 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
765 struct usbhs_pkt *pkt)
767 if (&usbhs_fifo_dma_push_handler == pkt->handler)
768 return fifo->tx_chan;
770 if (&usbhs_fifo_dma_pop_handler == pkt->handler)
771 return fifo->rx_chan;
773 return NULL;
776 static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
777 struct usbhs_pkt *pkt)
779 struct usbhs_fifo *fifo;
781 /* DMA :: D0FIFO */
782 fifo = usbhsf_get_d0fifo(priv);
783 if (usbhsf_dma_chan_get(fifo, pkt) &&
784 !usbhsf_fifo_is_busy(fifo))
785 return fifo;
787 /* DMA :: D1FIFO */
788 fifo = usbhsf_get_d1fifo(priv);
789 if (usbhsf_dma_chan_get(fifo, pkt) &&
790 !usbhsf_fifo_is_busy(fifo))
791 return fifo;
793 return NULL;
796 #define usbhsf_dma_start(p, f) __usbhsf_dma_ctrl(p, f, DREQE)
797 #define usbhsf_dma_stop(p, f) __usbhsf_dma_ctrl(p, f, 0)
798 static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
799 struct usbhs_fifo *fifo,
800 u16 dreqe)
802 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
804 usbhs_bset(priv, fifo->sel, DREQE, dreqe);
807 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
809 struct usbhs_pipe *pipe = pkt->pipe;
810 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
811 struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
813 return info->dma_map_ctrl(pkt, map);
816 static void usbhsf_dma_complete(void *arg);
817 static void xfer_work(struct work_struct *work)
819 struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
820 struct usbhs_pipe *pipe = pkt->pipe;
821 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
822 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
823 struct dma_async_tx_descriptor *desc;
824 struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
825 struct device *dev = usbhs_priv_to_dev(priv);
826 enum dma_transfer_direction dir;
828 dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
830 desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
831 pkt->trans, dir,
832 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
833 if (!desc)
834 return;
836 desc->callback = usbhsf_dma_complete;
837 desc->callback_param = pipe;
839 if (dmaengine_submit(desc) < 0) {
840 dev_err(dev, "Failed to submit dma descriptor\n");
841 return;
844 dev_dbg(dev, " %s %d (%d/ %d)\n",
845 fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
847 usbhs_pipe_running(pipe, 1);
848 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
849 usbhs_pipe_enable(pipe);
850 usbhsf_dma_start(pipe, fifo);
851 dma_async_issue_pending(chan);
855 * DMA push handler
857 static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
859 struct usbhs_pipe *pipe = pkt->pipe;
860 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
861 struct usbhs_fifo *fifo;
862 int len = pkt->length - pkt->actual;
863 int ret;
865 if (usbhs_pipe_is_busy(pipe))
866 return 0;
868 /* use PIO if packet is less than pio_dma_border or pipe is DCP */
869 if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
870 usbhs_pipe_is_dcp(pipe))
871 goto usbhsf_pio_prepare_push;
873 if (len & 0x7) /* 8byte alignment */
874 goto usbhsf_pio_prepare_push;
876 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
877 goto usbhsf_pio_prepare_push;
879 /* return at this time if the pipe is running */
880 if (usbhs_pipe_is_running(pipe))
881 return 0;
883 /* get enable DMA fifo */
884 fifo = usbhsf_get_dma_fifo(priv, pkt);
885 if (!fifo)
886 goto usbhsf_pio_prepare_push;
888 if (usbhsf_dma_map(pkt) < 0)
889 goto usbhsf_pio_prepare_push;
891 ret = usbhsf_fifo_select(pipe, fifo, 0);
892 if (ret < 0)
893 goto usbhsf_pio_prepare_push_unmap;
895 pkt->trans = len;
897 INIT_WORK(&pkt->work, xfer_work);
898 schedule_work(&pkt->work);
900 return 0;
902 usbhsf_pio_prepare_push_unmap:
903 usbhsf_dma_unmap(pkt);
904 usbhsf_pio_prepare_push:
906 * change handler to PIO
908 pkt->handler = &usbhs_fifo_pio_push_handler;
910 return pkt->handler->prepare(pkt, is_done);
913 static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
915 struct usbhs_pipe *pipe = pkt->pipe;
916 int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe);
918 pkt->actual += pkt->trans;
920 if (pkt->actual < pkt->length)
921 *is_done = 0; /* there are remainder data */
922 else if (is_short)
923 *is_done = 1; /* short packet */
924 else
925 *is_done = !pkt->zero; /* send zero packet? */
927 usbhs_pipe_running(pipe, !*is_done);
929 usbhsf_dma_stop(pipe, pipe->fifo);
930 usbhsf_dma_unmap(pkt);
931 usbhsf_fifo_unselect(pipe, pipe->fifo);
933 if (!*is_done) {
934 /* change handler to PIO */
935 pkt->handler = &usbhs_fifo_pio_push_handler;
936 return pkt->handler->try_run(pkt, is_done);
939 return 0;
942 struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
943 .prepare = usbhsf_dma_prepare_push,
944 .dma_done = usbhsf_dma_push_done,
948 * DMA pop handler
950 static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
952 struct usbhs_pipe *pipe = pkt->pipe;
953 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
954 struct usbhs_fifo *fifo;
955 int len, ret;
957 if (usbhs_pipe_is_busy(pipe))
958 return 0;
960 if (usbhs_pipe_is_dcp(pipe))
961 goto usbhsf_pio_prepare_pop;
963 /* get enable DMA fifo */
964 fifo = usbhsf_get_dma_fifo(priv, pkt);
965 if (!fifo)
966 goto usbhsf_pio_prepare_pop;
968 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
969 goto usbhsf_pio_prepare_pop;
971 ret = usbhsf_fifo_select(pipe, fifo, 0);
972 if (ret < 0)
973 goto usbhsf_pio_prepare_pop;
975 /* use PIO if packet is less than pio_dma_border */
976 len = usbhsf_fifo_rcv_len(priv, fifo);
977 len = min(pkt->length - pkt->actual, len);
978 if (len & 0x7) /* 8byte alignment */
979 goto usbhsf_pio_prepare_pop_unselect;
981 if (len < usbhs_get_dparam(priv, pio_dma_border))
982 goto usbhsf_pio_prepare_pop_unselect;
984 ret = usbhsf_fifo_barrier(priv, fifo);
985 if (ret < 0)
986 goto usbhsf_pio_prepare_pop_unselect;
988 if (usbhsf_dma_map(pkt) < 0)
989 goto usbhsf_pio_prepare_pop_unselect;
991 /* DMA */
994 * usbhs_fifo_dma_pop_handler :: prepare
995 * enabled irq to come here.
996 * but it is no longer needed for DMA. disable it.
998 usbhsf_rx_irq_ctrl(pipe, 0);
1000 pkt->trans = len;
1002 INIT_WORK(&pkt->work, xfer_work);
1003 schedule_work(&pkt->work);
1005 return 0;
1007 usbhsf_pio_prepare_pop_unselect:
1008 usbhsf_fifo_unselect(pipe, fifo);
1009 usbhsf_pio_prepare_pop:
1012 * change handler to PIO
1014 pkt->handler = &usbhs_fifo_pio_pop_handler;
1016 return pkt->handler->try_run(pkt, is_done);
1019 static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
1021 struct usbhs_pipe *pipe = pkt->pipe;
1022 int maxp = usbhs_pipe_get_maxpacket(pipe);
1024 usbhsf_dma_stop(pipe, pipe->fifo);
1025 usbhsf_dma_unmap(pkt);
1026 usbhsf_fifo_unselect(pipe, pipe->fifo);
1028 pkt->actual += pkt->trans;
1030 if ((pkt->actual == pkt->length) || /* receive all data */
1031 (pkt->trans < maxp)) { /* short packet */
1032 *is_done = 1;
1033 usbhs_pipe_running(pipe, 0);
1034 } else {
1035 /* re-enable */
1036 usbhs_pipe_running(pipe, 0);
1037 usbhsf_prepare_pop(pkt, is_done);
1040 return 0;
1043 struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
1044 .prepare = usbhsf_prepare_pop,
1045 .try_run = usbhsf_dma_try_pop,
1046 .dma_done = usbhsf_dma_pop_done
1050 * DMA setting
1052 static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
1054 struct sh_dmae_slave *slave = param;
1057 * FIXME
1059 * usbhs doesn't recognize id = 0 as valid DMA
1061 if (0 == slave->shdma_slave.slave_id)
1062 return false;
1064 chan->private = slave;
1066 return true;
1069 static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
1071 if (fifo->tx_chan)
1072 dma_release_channel(fifo->tx_chan);
1073 if (fifo->rx_chan)
1074 dma_release_channel(fifo->rx_chan);
1076 fifo->tx_chan = NULL;
1077 fifo->rx_chan = NULL;
1080 static void usbhsf_dma_init(struct usbhs_priv *priv,
1081 struct usbhs_fifo *fifo)
1083 struct device *dev = usbhs_priv_to_dev(priv);
1084 dma_cap_mask_t mask;
1086 dma_cap_zero(mask);
1087 dma_cap_set(DMA_SLAVE, mask);
1088 fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1089 &fifo->tx_slave);
1091 dma_cap_zero(mask);
1092 dma_cap_set(DMA_SLAVE, mask);
1093 fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1094 &fifo->rx_slave);
1096 if (fifo->tx_chan || fifo->rx_chan)
1097 dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
1098 fifo->name,
1099 fifo->tx_chan ? "[TX]" : " ",
1100 fifo->rx_chan ? "[RX]" : " ");
1104 * irq functions
1106 static int usbhsf_irq_empty(struct usbhs_priv *priv,
1107 struct usbhs_irq_state *irq_state)
1109 struct usbhs_pipe *pipe;
1110 struct device *dev = usbhs_priv_to_dev(priv);
1111 int i, ret;
1113 if (!irq_state->bempsts) {
1114 dev_err(dev, "debug %s !!\n", __func__);
1115 return -EIO;
1118 dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
1121 * search interrupted "pipe"
1122 * not "uep".
1124 usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1125 if (!(irq_state->bempsts & (1 << i)))
1126 continue;
1128 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1129 if (ret < 0)
1130 dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
1133 return 0;
1136 static int usbhsf_irq_ready(struct usbhs_priv *priv,
1137 struct usbhs_irq_state *irq_state)
1139 struct usbhs_pipe *pipe;
1140 struct device *dev = usbhs_priv_to_dev(priv);
1141 int i, ret;
1143 if (!irq_state->brdysts) {
1144 dev_err(dev, "debug %s !!\n", __func__);
1145 return -EIO;
1148 dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
1151 * search interrupted "pipe"
1152 * not "uep".
1154 usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1155 if (!(irq_state->brdysts & (1 << i)))
1156 continue;
1158 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1159 if (ret < 0)
1160 dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
1163 return 0;
1166 static void usbhsf_dma_complete(void *arg)
1168 struct usbhs_pipe *pipe = arg;
1169 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1170 struct device *dev = usbhs_priv_to_dev(priv);
1171 int ret;
1173 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
1174 if (ret < 0)
1175 dev_err(dev, "dma_complete run_error %d : %d\n",
1176 usbhs_pipe_number(pipe), ret);
1180 * fifo init
1182 void usbhs_fifo_init(struct usbhs_priv *priv)
1184 struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1185 struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
1186 struct usbhs_fifo *d0fifo = usbhsf_get_d0fifo(priv);
1187 struct usbhs_fifo *d1fifo = usbhsf_get_d1fifo(priv);
1189 mod->irq_empty = usbhsf_irq_empty;
1190 mod->irq_ready = usbhsf_irq_ready;
1191 mod->irq_bempsts = 0;
1192 mod->irq_brdysts = 0;
1194 cfifo->pipe = NULL;
1195 d0fifo->pipe = NULL;
1196 d1fifo->pipe = NULL;
1199 void usbhs_fifo_quit(struct usbhs_priv *priv)
1201 struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1203 mod->irq_empty = NULL;
1204 mod->irq_ready = NULL;
1205 mod->irq_bempsts = 0;
1206 mod->irq_brdysts = 0;
1209 int usbhs_fifo_probe(struct usbhs_priv *priv)
1211 struct usbhs_fifo *fifo;
1213 /* CFIFO */
1214 fifo = usbhsf_get_cfifo(priv);
1215 fifo->name = "CFIFO";
1216 fifo->port = CFIFO;
1217 fifo->sel = CFIFOSEL;
1218 fifo->ctr = CFIFOCTR;
1220 /* D0FIFO */
1221 fifo = usbhsf_get_d0fifo(priv);
1222 fifo->name = "D0FIFO";
1223 fifo->port = D0FIFO;
1224 fifo->sel = D0FIFOSEL;
1225 fifo->ctr = D0FIFOCTR;
1226 fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id);
1227 fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id);
1228 usbhsf_dma_init(priv, fifo);
1230 /* D1FIFO */
1231 fifo = usbhsf_get_d1fifo(priv);
1232 fifo->name = "D1FIFO";
1233 fifo->port = D1FIFO;
1234 fifo->sel = D1FIFOSEL;
1235 fifo->ctr = D1FIFOCTR;
1236 fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id);
1237 fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id);
1238 usbhsf_dma_init(priv, fifo);
1240 return 0;
1243 void usbhs_fifo_remove(struct usbhs_priv *priv)
1245 usbhsf_dma_quit(priv, usbhsf_get_d0fifo(priv));
1246 usbhsf_dma_quit(priv, usbhsf_get_d1fifo(priv));