ARM: 7409/1: Do not call flush_cache_user_range with mmap_sem held
[linux/fpc-iii.git] / drivers / net / can / bfin_can.c
blobb6e890d28366c68bf2ae078b4a73f688dfe75962
1 /*
2 * Blackfin On-Chip CAN Driver
4 * Copyright 2004-2009 Analog Devices Inc.
6 * Enter bugs at http://blackfin.uclinux.org/
8 * Licensed under the GPL-2 or later.
9 */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15 #include <linux/interrupt.h>
16 #include <linux/errno.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/platform_device.h>
21 #include <linux/can/dev.h>
22 #include <linux/can/error.h>
24 #include <asm/bfin_can.h>
25 #include <asm/portmux.h>
27 #define DRV_NAME "bfin_can"
28 #define BFIN_CAN_TIMEOUT 100
29 #define TX_ECHO_SKB_MAX 1
32 * bfin can private data
34 struct bfin_can_priv {
35 struct can_priv can; /* must be the first member */
36 struct net_device *dev;
37 void __iomem *membase;
38 int rx_irq;
39 int tx_irq;
40 int err_irq;
41 unsigned short *pin_list;
45 * bfin can timing parameters
47 static struct can_bittiming_const bfin_can_bittiming_const = {
48 .name = DRV_NAME,
49 .tseg1_min = 1,
50 .tseg1_max = 16,
51 .tseg2_min = 1,
52 .tseg2_max = 8,
53 .sjw_max = 4,
55 * Although the BRP field can be set to any value, it is recommended
56 * that the value be greater than or equal to 4, as restrictions
57 * apply to the bit timing configuration when BRP is less than 4.
59 .brp_min = 4,
60 .brp_max = 1024,
61 .brp_inc = 1,
64 static int bfin_can_set_bittiming(struct net_device *dev)
66 struct bfin_can_priv *priv = netdev_priv(dev);
67 struct bfin_can_regs __iomem *reg = priv->membase;
68 struct can_bittiming *bt = &priv->can.bittiming;
69 u16 clk, timing;
71 clk = bt->brp - 1;
72 timing = ((bt->sjw - 1) << 8) | (bt->prop_seg + bt->phase_seg1 - 1) |
73 ((bt->phase_seg2 - 1) << 4);
76 * If the SAM bit is set, the input signal is oversampled three times
77 * at the SCLK rate.
79 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
80 timing |= SAM;
82 bfin_write16(&reg->clock, clk);
83 bfin_write16(&reg->timing, timing);
85 dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n",
86 clk, timing);
88 return 0;
91 static void bfin_can_set_reset_mode(struct net_device *dev)
93 struct bfin_can_priv *priv = netdev_priv(dev);
94 struct bfin_can_regs __iomem *reg = priv->membase;
95 int timeout = BFIN_CAN_TIMEOUT;
96 int i;
98 /* disable interrupts */
99 bfin_write16(&reg->mbim1, 0);
100 bfin_write16(&reg->mbim2, 0);
101 bfin_write16(&reg->gim, 0);
103 /* reset can and enter configuration mode */
104 bfin_write16(&reg->control, SRS | CCR);
105 SSYNC();
106 bfin_write16(&reg->control, CCR);
107 SSYNC();
108 while (!(bfin_read16(&reg->control) & CCA)) {
109 udelay(10);
110 if (--timeout == 0) {
111 dev_err(dev->dev.parent,
112 "fail to enter configuration mode\n");
113 BUG();
118 * All mailbox configurations are marked as inactive
119 * by writing to CAN Mailbox Configuration Registers 1 and 2
120 * For all bits: 0 - Mailbox disabled, 1 - Mailbox enabled
122 bfin_write16(&reg->mc1, 0);
123 bfin_write16(&reg->mc2, 0);
125 /* Set Mailbox Direction */
126 bfin_write16(&reg->md1, 0xFFFF); /* mailbox 1-16 are RX */
127 bfin_write16(&reg->md2, 0); /* mailbox 17-32 are TX */
129 /* RECEIVE_STD_CHL */
130 for (i = 0; i < 2; i++) {
131 bfin_write16(&reg->chl[RECEIVE_STD_CHL + i].id0, 0);
132 bfin_write16(&reg->chl[RECEIVE_STD_CHL + i].id1, AME);
133 bfin_write16(&reg->chl[RECEIVE_STD_CHL + i].dlc, 0);
134 bfin_write16(&reg->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF);
135 bfin_write16(&reg->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF);
138 /* RECEIVE_EXT_CHL */
139 for (i = 0; i < 2; i++) {
140 bfin_write16(&reg->chl[RECEIVE_EXT_CHL + i].id0, 0);
141 bfin_write16(&reg->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE);
142 bfin_write16(&reg->chl[RECEIVE_EXT_CHL + i].dlc, 0);
143 bfin_write16(&reg->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF);
144 bfin_write16(&reg->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF);
147 bfin_write16(&reg->mc2, BIT(TRANSMIT_CHL - 16));
148 bfin_write16(&reg->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
149 SSYNC();
151 priv->can.state = CAN_STATE_STOPPED;
154 static void bfin_can_set_normal_mode(struct net_device *dev)
156 struct bfin_can_priv *priv = netdev_priv(dev);
157 struct bfin_can_regs __iomem *reg = priv->membase;
158 int timeout = BFIN_CAN_TIMEOUT;
161 * leave configuration mode
163 bfin_write16(&reg->control, bfin_read16(&reg->control) & ~CCR);
165 while (bfin_read16(&reg->status) & CCA) {
166 udelay(10);
167 if (--timeout == 0) {
168 dev_err(dev->dev.parent,
169 "fail to leave configuration mode\n");
170 BUG();
175 * clear _All_ tx and rx interrupts
177 bfin_write16(&reg->mbtif1, 0xFFFF);
178 bfin_write16(&reg->mbtif2, 0xFFFF);
179 bfin_write16(&reg->mbrif1, 0xFFFF);
180 bfin_write16(&reg->mbrif2, 0xFFFF);
183 * clear global interrupt status register
185 bfin_write16(&reg->gis, 0x7FF); /* overwrites with '1' */
188 * Initialize Interrupts
189 * - set bits in the mailbox interrupt mask register
190 * - global interrupt mask
192 bfin_write16(&reg->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
193 bfin_write16(&reg->mbim2, BIT(TRANSMIT_CHL - 16));
195 bfin_write16(&reg->gim, EPIM | BOIM | RMLIM);
196 SSYNC();
199 static void bfin_can_start(struct net_device *dev)
201 struct bfin_can_priv *priv = netdev_priv(dev);
203 /* enter reset mode */
204 if (priv->can.state != CAN_STATE_STOPPED)
205 bfin_can_set_reset_mode(dev);
207 /* leave reset mode */
208 bfin_can_set_normal_mode(dev);
211 static int bfin_can_set_mode(struct net_device *dev, enum can_mode mode)
213 switch (mode) {
214 case CAN_MODE_START:
215 bfin_can_start(dev);
216 if (netif_queue_stopped(dev))
217 netif_wake_queue(dev);
218 break;
220 default:
221 return -EOPNOTSUPP;
224 return 0;
227 static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
229 struct bfin_can_priv *priv = netdev_priv(dev);
230 struct bfin_can_regs __iomem *reg = priv->membase;
231 struct can_frame *cf = (struct can_frame *)skb->data;
232 u8 dlc = cf->can_dlc;
233 canid_t id = cf->can_id;
234 u8 *data = cf->data;
235 u16 val;
236 int i;
238 if (can_dropped_invalid_skb(dev, skb))
239 return NETDEV_TX_OK;
241 netif_stop_queue(dev);
243 /* fill id */
244 if (id & CAN_EFF_FLAG) {
245 bfin_write16(&reg->chl[TRANSMIT_CHL].id0, id);
246 if (id & CAN_RTR_FLAG)
247 writew(((id & 0x1FFF0000) >> 16) | IDE | AME | RTR,
248 &reg->chl[TRANSMIT_CHL].id1);
249 else
250 writew(((id & 0x1FFF0000) >> 16) | IDE | AME,
251 &reg->chl[TRANSMIT_CHL].id1);
253 } else {
254 if (id & CAN_RTR_FLAG)
255 writew((id << 2) | AME | RTR,
256 &reg->chl[TRANSMIT_CHL].id1);
257 else
258 bfin_write16(&reg->chl[TRANSMIT_CHL].id1,
259 (id << 2) | AME);
262 /* fill payload */
263 for (i = 0; i < 8; i += 2) {
264 val = ((7 - i) < dlc ? (data[7 - i]) : 0) +
265 ((6 - i) < dlc ? (data[6 - i] << 8) : 0);
266 bfin_write16(&reg->chl[TRANSMIT_CHL].data[i], val);
269 /* fill data length code */
270 bfin_write16(&reg->chl[TRANSMIT_CHL].dlc, dlc);
272 can_put_echo_skb(skb, dev, 0);
274 /* set transmit request */
275 bfin_write16(&reg->trs2, BIT(TRANSMIT_CHL - 16));
277 return 0;
280 static void bfin_can_rx(struct net_device *dev, u16 isrc)
282 struct bfin_can_priv *priv = netdev_priv(dev);
283 struct net_device_stats *stats = &dev->stats;
284 struct bfin_can_regs __iomem *reg = priv->membase;
285 struct can_frame *cf;
286 struct sk_buff *skb;
287 int obj;
288 int i;
289 u16 val;
291 skb = alloc_can_skb(dev, &cf);
292 if (skb == NULL)
293 return;
295 /* get id */
296 if (isrc & BIT(RECEIVE_EXT_CHL)) {
297 /* extended frame format (EFF) */
298 cf->can_id = ((bfin_read16(&reg->chl[RECEIVE_EXT_CHL].id1)
299 & 0x1FFF) << 16)
300 + bfin_read16(&reg->chl[RECEIVE_EXT_CHL].id0);
301 cf->can_id |= CAN_EFF_FLAG;
302 obj = RECEIVE_EXT_CHL;
303 } else {
304 /* standard frame format (SFF) */
305 cf->can_id = (bfin_read16(&reg->chl[RECEIVE_STD_CHL].id1)
306 & 0x1ffc) >> 2;
307 obj = RECEIVE_STD_CHL;
309 if (bfin_read16(&reg->chl[obj].id1) & RTR)
310 cf->can_id |= CAN_RTR_FLAG;
312 /* get data length code */
313 cf->can_dlc = get_can_dlc(bfin_read16(&reg->chl[obj].dlc) & 0xF);
315 /* get payload */
316 for (i = 0; i < 8; i += 2) {
317 val = bfin_read16(&reg->chl[obj].data[i]);
318 cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0;
319 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
322 netif_rx(skb);
324 stats->rx_packets++;
325 stats->rx_bytes += cf->can_dlc;
328 static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
330 struct bfin_can_priv *priv = netdev_priv(dev);
331 struct bfin_can_regs __iomem *reg = priv->membase;
332 struct net_device_stats *stats = &dev->stats;
333 struct can_frame *cf;
334 struct sk_buff *skb;
335 enum can_state state = priv->can.state;
337 skb = alloc_can_err_skb(dev, &cf);
338 if (skb == NULL)
339 return -ENOMEM;
341 if (isrc & RMLIS) {
342 /* data overrun interrupt */
343 dev_dbg(dev->dev.parent, "data overrun interrupt\n");
344 cf->can_id |= CAN_ERR_CRTL;
345 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
346 stats->rx_over_errors++;
347 stats->rx_errors++;
350 if (isrc & BOIS) {
351 dev_dbg(dev->dev.parent, "bus-off mode interrupt\n");
352 state = CAN_STATE_BUS_OFF;
353 cf->can_id |= CAN_ERR_BUSOFF;
354 can_bus_off(dev);
357 if (isrc & EPIS) {
358 /* error passive interrupt */
359 dev_dbg(dev->dev.parent, "error passive interrupt\n");
360 state = CAN_STATE_ERROR_PASSIVE;
363 if ((isrc & EWTIS) || (isrc & EWRIS)) {
364 dev_dbg(dev->dev.parent,
365 "Error Warning Transmit/Receive Interrupt\n");
366 state = CAN_STATE_ERROR_WARNING;
369 if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
370 state == CAN_STATE_ERROR_PASSIVE)) {
371 u16 cec = bfin_read16(&reg->cec);
372 u8 rxerr = cec;
373 u8 txerr = cec >> 8;
375 cf->can_id |= CAN_ERR_CRTL;
376 if (state == CAN_STATE_ERROR_WARNING) {
377 priv->can.can_stats.error_warning++;
378 cf->data[1] = (txerr > rxerr) ?
379 CAN_ERR_CRTL_TX_WARNING :
380 CAN_ERR_CRTL_RX_WARNING;
381 } else {
382 priv->can.can_stats.error_passive++;
383 cf->data[1] = (txerr > rxerr) ?
384 CAN_ERR_CRTL_TX_PASSIVE :
385 CAN_ERR_CRTL_RX_PASSIVE;
389 if (status) {
390 priv->can.can_stats.bus_error++;
392 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
394 if (status & BEF)
395 cf->data[2] |= CAN_ERR_PROT_BIT;
396 else if (status & FER)
397 cf->data[2] |= CAN_ERR_PROT_FORM;
398 else if (status & SER)
399 cf->data[2] |= CAN_ERR_PROT_STUFF;
400 else
401 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
404 priv->can.state = state;
406 netif_rx(skb);
408 stats->rx_packets++;
409 stats->rx_bytes += cf->can_dlc;
411 return 0;
414 irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
416 struct net_device *dev = dev_id;
417 struct bfin_can_priv *priv = netdev_priv(dev);
418 struct bfin_can_regs __iomem *reg = priv->membase;
419 struct net_device_stats *stats = &dev->stats;
420 u16 status, isrc;
422 if ((irq == priv->tx_irq) && bfin_read16(&reg->mbtif2)) {
423 /* transmission complete interrupt */
424 bfin_write16(&reg->mbtif2, 0xFFFF);
425 stats->tx_packets++;
426 stats->tx_bytes += bfin_read16(&reg->chl[TRANSMIT_CHL].dlc);
427 can_get_echo_skb(dev, 0);
428 netif_wake_queue(dev);
429 } else if ((irq == priv->rx_irq) && bfin_read16(&reg->mbrif1)) {
430 /* receive interrupt */
431 isrc = bfin_read16(&reg->mbrif1);
432 bfin_write16(&reg->mbrif1, 0xFFFF);
433 bfin_can_rx(dev, isrc);
434 } else if ((irq == priv->err_irq) && bfin_read16(&reg->gis)) {
435 /* error interrupt */
436 isrc = bfin_read16(&reg->gis);
437 status = bfin_read16(&reg->esr);
438 bfin_write16(&reg->gis, 0x7FF);
439 bfin_can_err(dev, isrc, status);
440 } else {
441 return IRQ_NONE;
444 return IRQ_HANDLED;
447 static int bfin_can_open(struct net_device *dev)
449 struct bfin_can_priv *priv = netdev_priv(dev);
450 int err;
452 /* set chip into reset mode */
453 bfin_can_set_reset_mode(dev);
455 /* common open */
456 err = open_candev(dev);
457 if (err)
458 goto exit_open;
460 /* register interrupt handler */
461 err = request_irq(priv->rx_irq, &bfin_can_interrupt, 0,
462 "bfin-can-rx", dev);
463 if (err)
464 goto exit_rx_irq;
465 err = request_irq(priv->tx_irq, &bfin_can_interrupt, 0,
466 "bfin-can-tx", dev);
467 if (err)
468 goto exit_tx_irq;
469 err = request_irq(priv->err_irq, &bfin_can_interrupt, 0,
470 "bfin-can-err", dev);
471 if (err)
472 goto exit_err_irq;
474 bfin_can_start(dev);
476 netif_start_queue(dev);
478 return 0;
480 exit_err_irq:
481 free_irq(priv->tx_irq, dev);
482 exit_tx_irq:
483 free_irq(priv->rx_irq, dev);
484 exit_rx_irq:
485 close_candev(dev);
486 exit_open:
487 return err;
490 static int bfin_can_close(struct net_device *dev)
492 struct bfin_can_priv *priv = netdev_priv(dev);
494 netif_stop_queue(dev);
495 bfin_can_set_reset_mode(dev);
497 close_candev(dev);
499 free_irq(priv->rx_irq, dev);
500 free_irq(priv->tx_irq, dev);
501 free_irq(priv->err_irq, dev);
503 return 0;
506 struct net_device *alloc_bfin_candev(void)
508 struct net_device *dev;
509 struct bfin_can_priv *priv;
511 dev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
512 if (!dev)
513 return NULL;
515 priv = netdev_priv(dev);
517 priv->dev = dev;
518 priv->can.bittiming_const = &bfin_can_bittiming_const;
519 priv->can.do_set_bittiming = bfin_can_set_bittiming;
520 priv->can.do_set_mode = bfin_can_set_mode;
521 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
523 return dev;
526 static const struct net_device_ops bfin_can_netdev_ops = {
527 .ndo_open = bfin_can_open,
528 .ndo_stop = bfin_can_close,
529 .ndo_start_xmit = bfin_can_start_xmit,
532 static int __devinit bfin_can_probe(struct platform_device *pdev)
534 int err;
535 struct net_device *dev;
536 struct bfin_can_priv *priv;
537 struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
538 unsigned short *pdata;
540 pdata = pdev->dev.platform_data;
541 if (!pdata) {
542 dev_err(&pdev->dev, "No platform data provided!\n");
543 err = -EINVAL;
544 goto exit;
547 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
548 rx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
549 tx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
550 err_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
551 if (!res_mem || !rx_irq || !tx_irq || !err_irq) {
552 err = -EINVAL;
553 goto exit;
556 if (!request_mem_region(res_mem->start, resource_size(res_mem),
557 dev_name(&pdev->dev))) {
558 err = -EBUSY;
559 goto exit;
562 /* request peripheral pins */
563 err = peripheral_request_list(pdata, dev_name(&pdev->dev));
564 if (err)
565 goto exit_mem_release;
567 dev = alloc_bfin_candev();
568 if (!dev) {
569 err = -ENOMEM;
570 goto exit_peri_pin_free;
573 priv = netdev_priv(dev);
574 priv->membase = (void __iomem *)res_mem->start;
575 priv->rx_irq = rx_irq->start;
576 priv->tx_irq = tx_irq->start;
577 priv->err_irq = err_irq->start;
578 priv->pin_list = pdata;
579 priv->can.clock.freq = get_sclk();
581 dev_set_drvdata(&pdev->dev, dev);
582 SET_NETDEV_DEV(dev, &pdev->dev);
584 dev->flags |= IFF_ECHO; /* we support local echo */
585 dev->netdev_ops = &bfin_can_netdev_ops;
587 bfin_can_set_reset_mode(dev);
589 err = register_candev(dev);
590 if (err) {
591 dev_err(&pdev->dev, "registering failed (err=%d)\n", err);
592 goto exit_candev_free;
595 dev_info(&pdev->dev,
596 "%s device registered"
597 "(&reg_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n",
598 DRV_NAME, (void *)priv->membase, priv->rx_irq,
599 priv->tx_irq, priv->err_irq, priv->can.clock.freq);
600 return 0;
602 exit_candev_free:
603 free_candev(dev);
604 exit_peri_pin_free:
605 peripheral_free_list(pdata);
606 exit_mem_release:
607 release_mem_region(res_mem->start, resource_size(res_mem));
608 exit:
609 return err;
612 static int __devexit bfin_can_remove(struct platform_device *pdev)
614 struct net_device *dev = dev_get_drvdata(&pdev->dev);
615 struct bfin_can_priv *priv = netdev_priv(dev);
616 struct resource *res;
618 bfin_can_set_reset_mode(dev);
620 unregister_candev(dev);
622 dev_set_drvdata(&pdev->dev, NULL);
624 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
625 release_mem_region(res->start, resource_size(res));
627 peripheral_free_list(priv->pin_list);
629 free_candev(dev);
630 return 0;
633 #ifdef CONFIG_PM
634 static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
636 struct net_device *dev = dev_get_drvdata(&pdev->dev);
637 struct bfin_can_priv *priv = netdev_priv(dev);
638 struct bfin_can_regs __iomem *reg = priv->membase;
639 int timeout = BFIN_CAN_TIMEOUT;
641 if (netif_running(dev)) {
642 /* enter sleep mode */
643 bfin_write16(&reg->control, bfin_read16(&reg->control) | SMR);
644 SSYNC();
645 while (!(bfin_read16(&reg->intr) & SMACK)) {
646 udelay(10);
647 if (--timeout == 0) {
648 dev_err(dev->dev.parent,
649 "fail to enter sleep mode\n");
650 BUG();
655 return 0;
658 static int bfin_can_resume(struct platform_device *pdev)
660 struct net_device *dev = dev_get_drvdata(&pdev->dev);
661 struct bfin_can_priv *priv = netdev_priv(dev);
662 struct bfin_can_regs __iomem *reg = priv->membase;
664 if (netif_running(dev)) {
665 /* leave sleep mode */
666 bfin_write16(&reg->intr, 0);
667 SSYNC();
670 return 0;
672 #else
673 #define bfin_can_suspend NULL
674 #define bfin_can_resume NULL
675 #endif /* CONFIG_PM */
677 static struct platform_driver bfin_can_driver = {
678 .probe = bfin_can_probe,
679 .remove = __devexit_p(bfin_can_remove),
680 .suspend = bfin_can_suspend,
681 .resume = bfin_can_resume,
682 .driver = {
683 .name = DRV_NAME,
684 .owner = THIS_MODULE,
688 static int __init bfin_can_init(void)
690 return platform_driver_register(&bfin_can_driver);
692 module_init(bfin_can_init);
694 static void __exit bfin_can_exit(void)
696 platform_driver_unregister(&bfin_can_driver);
698 module_exit(bfin_can_exit);
700 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
701 MODULE_LICENSE("GPL");
702 MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver");