Avoid beyond bounds copy while caching ACL
[zen-stable.git] / drivers / net / can / bfin_can.c
blob349e0fabb63abb8b589d49dceab652a73a52aca5
1 /*
2 * Blackfin On-Chip CAN Driver
4 * Copyright 2004-2009 Analog Devices Inc.
6 * Enter bugs at http://blackfin.uclinux.org/
8 * Licensed under the GPL-2 or later.
9 */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15 #include <linux/interrupt.h>
16 #include <linux/errno.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/platform_device.h>
21 #include <linux/can/dev.h>
22 #include <linux/can/error.h>
24 #include <asm/bfin_can.h>
25 #include <asm/portmux.h>
27 #define DRV_NAME "bfin_can"
28 #define BFIN_CAN_TIMEOUT 100
29 #define TX_ECHO_SKB_MAX 1
32 * bfin can private data
34 struct bfin_can_priv {
35 struct can_priv can; /* must be the first member */
36 struct net_device *dev;
37 void __iomem *membase;
38 int rx_irq;
39 int tx_irq;
40 int err_irq;
41 unsigned short *pin_list;
45 * bfin can timing parameters
47 static struct can_bittiming_const bfin_can_bittiming_const = {
48 .name = DRV_NAME,
49 .tseg1_min = 1,
50 .tseg1_max = 16,
51 .tseg2_min = 1,
52 .tseg2_max = 8,
53 .sjw_max = 4,
55 * Although the BRP field can be set to any value, it is recommended
56 * that the value be greater than or equal to 4, as restrictions
57 * apply to the bit timing configuration when BRP is less than 4.
59 .brp_min = 4,
60 .brp_max = 1024,
61 .brp_inc = 1,
64 static int bfin_can_set_bittiming(struct net_device *dev)
66 struct bfin_can_priv *priv = netdev_priv(dev);
67 struct bfin_can_regs __iomem *reg = priv->membase;
68 struct can_bittiming *bt = &priv->can.bittiming;
69 u16 clk, timing;
71 clk = bt->brp - 1;
72 timing = ((bt->sjw - 1) << 8) | (bt->prop_seg + bt->phase_seg1 - 1) |
73 ((bt->phase_seg2 - 1) << 4);
76 * If the SAM bit is set, the input signal is oversampled three times
77 * at the SCLK rate.
79 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
80 timing |= SAM;
82 bfin_write(&reg->clock, clk);
83 bfin_write(&reg->timing, timing);
85 dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n",
86 clk, timing);
88 return 0;
91 static void bfin_can_set_reset_mode(struct net_device *dev)
93 struct bfin_can_priv *priv = netdev_priv(dev);
94 struct bfin_can_regs __iomem *reg = priv->membase;
95 int timeout = BFIN_CAN_TIMEOUT;
96 int i;
98 /* disable interrupts */
99 bfin_write(&reg->mbim1, 0);
100 bfin_write(&reg->mbim2, 0);
101 bfin_write(&reg->gim, 0);
103 /* reset can and enter configuration mode */
104 bfin_write(&reg->control, SRS | CCR);
105 SSYNC();
106 bfin_write(&reg->control, CCR);
107 SSYNC();
108 while (!(bfin_read(&reg->control) & CCA)) {
109 udelay(10);
110 if (--timeout == 0) {
111 dev_err(dev->dev.parent,
112 "fail to enter configuration mode\n");
113 BUG();
118 * All mailbox configurations are marked as inactive
119 * by writing to CAN Mailbox Configuration Registers 1 and 2
120 * For all bits: 0 - Mailbox disabled, 1 - Mailbox enabled
122 bfin_write(&reg->mc1, 0);
123 bfin_write(&reg->mc2, 0);
125 /* Set Mailbox Direction */
126 bfin_write(&reg->md1, 0xFFFF); /* mailbox 1-16 are RX */
127 bfin_write(&reg->md2, 0); /* mailbox 17-32 are TX */
129 /* RECEIVE_STD_CHL */
130 for (i = 0; i < 2; i++) {
131 bfin_write(&reg->chl[RECEIVE_STD_CHL + i].id0, 0);
132 bfin_write(&reg->chl[RECEIVE_STD_CHL + i].id1, AME);
133 bfin_write(&reg->chl[RECEIVE_STD_CHL + i].dlc, 0);
134 bfin_write(&reg->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF);
135 bfin_write(&reg->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF);
138 /* RECEIVE_EXT_CHL */
139 for (i = 0; i < 2; i++) {
140 bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].id0, 0);
141 bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE);
142 bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].dlc, 0);
143 bfin_write(&reg->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF);
144 bfin_write(&reg->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF);
147 bfin_write(&reg->mc2, BIT(TRANSMIT_CHL - 16));
148 bfin_write(&reg->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
149 SSYNC();
151 priv->can.state = CAN_STATE_STOPPED;
154 static void bfin_can_set_normal_mode(struct net_device *dev)
156 struct bfin_can_priv *priv = netdev_priv(dev);
157 struct bfin_can_regs __iomem *reg = priv->membase;
158 int timeout = BFIN_CAN_TIMEOUT;
161 * leave configuration mode
163 bfin_write(&reg->control, bfin_read(&reg->control) & ~CCR);
165 while (bfin_read(&reg->status) & CCA) {
166 udelay(10);
167 if (--timeout == 0) {
168 dev_err(dev->dev.parent,
169 "fail to leave configuration mode\n");
170 BUG();
175 * clear _All_ tx and rx interrupts
177 bfin_write(&reg->mbtif1, 0xFFFF);
178 bfin_write(&reg->mbtif2, 0xFFFF);
179 bfin_write(&reg->mbrif1, 0xFFFF);
180 bfin_write(&reg->mbrif2, 0xFFFF);
183 * clear global interrupt status register
185 bfin_write(&reg->gis, 0x7FF); /* overwrites with '1' */
188 * Initialize Interrupts
189 * - set bits in the mailbox interrupt mask register
190 * - global interrupt mask
192 bfin_write(&reg->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
193 bfin_write(&reg->mbim2, BIT(TRANSMIT_CHL - 16));
195 bfin_write(&reg->gim, EPIM | BOIM | RMLIM);
196 SSYNC();
199 static void bfin_can_start(struct net_device *dev)
201 struct bfin_can_priv *priv = netdev_priv(dev);
203 /* enter reset mode */
204 if (priv->can.state != CAN_STATE_STOPPED)
205 bfin_can_set_reset_mode(dev);
207 /* leave reset mode */
208 bfin_can_set_normal_mode(dev);
211 static int bfin_can_set_mode(struct net_device *dev, enum can_mode mode)
213 switch (mode) {
214 case CAN_MODE_START:
215 bfin_can_start(dev);
216 if (netif_queue_stopped(dev))
217 netif_wake_queue(dev);
218 break;
220 default:
221 return -EOPNOTSUPP;
224 return 0;
227 static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
229 struct bfin_can_priv *priv = netdev_priv(dev);
230 struct bfin_can_regs __iomem *reg = priv->membase;
231 struct can_frame *cf = (struct can_frame *)skb->data;
232 u8 dlc = cf->can_dlc;
233 canid_t id = cf->can_id;
234 u8 *data = cf->data;
235 u16 val;
236 int i;
238 if (can_dropped_invalid_skb(dev, skb))
239 return NETDEV_TX_OK;
241 netif_stop_queue(dev);
243 /* fill id */
244 if (id & CAN_EFF_FLAG) {
245 bfin_write(&reg->chl[TRANSMIT_CHL].id0, id);
246 val = ((id & 0x1FFF0000) >> 16) | IDE;
247 } else
248 val = (id << 2);
249 if (id & CAN_RTR_FLAG)
250 val |= RTR;
251 bfin_write(&reg->chl[TRANSMIT_CHL].id1, val | AME);
253 /* fill payload */
254 for (i = 0; i < 8; i += 2) {
255 val = ((7 - i) < dlc ? (data[7 - i]) : 0) +
256 ((6 - i) < dlc ? (data[6 - i] << 8) : 0);
257 bfin_write(&reg->chl[TRANSMIT_CHL].data[i], val);
260 /* fill data length code */
261 bfin_write(&reg->chl[TRANSMIT_CHL].dlc, dlc);
263 can_put_echo_skb(skb, dev, 0);
265 /* set transmit request */
266 bfin_write(&reg->trs2, BIT(TRANSMIT_CHL - 16));
268 return 0;
271 static void bfin_can_rx(struct net_device *dev, u16 isrc)
273 struct bfin_can_priv *priv = netdev_priv(dev);
274 struct net_device_stats *stats = &dev->stats;
275 struct bfin_can_regs __iomem *reg = priv->membase;
276 struct can_frame *cf;
277 struct sk_buff *skb;
278 int obj;
279 int i;
280 u16 val;
282 skb = alloc_can_skb(dev, &cf);
283 if (skb == NULL)
284 return;
286 /* get id */
287 if (isrc & BIT(RECEIVE_EXT_CHL)) {
288 /* extended frame format (EFF) */
289 cf->can_id = ((bfin_read(&reg->chl[RECEIVE_EXT_CHL].id1)
290 & 0x1FFF) << 16)
291 + bfin_read(&reg->chl[RECEIVE_EXT_CHL].id0);
292 cf->can_id |= CAN_EFF_FLAG;
293 obj = RECEIVE_EXT_CHL;
294 } else {
295 /* standard frame format (SFF) */
296 cf->can_id = (bfin_read(&reg->chl[RECEIVE_STD_CHL].id1)
297 & 0x1ffc) >> 2;
298 obj = RECEIVE_STD_CHL;
300 if (bfin_read(&reg->chl[obj].id1) & RTR)
301 cf->can_id |= CAN_RTR_FLAG;
303 /* get data length code */
304 cf->can_dlc = get_can_dlc(bfin_read(&reg->chl[obj].dlc) & 0xF);
306 /* get payload */
307 for (i = 0; i < 8; i += 2) {
308 val = bfin_read(&reg->chl[obj].data[i]);
309 cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0;
310 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
313 netif_rx(skb);
315 stats->rx_packets++;
316 stats->rx_bytes += cf->can_dlc;
319 static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
321 struct bfin_can_priv *priv = netdev_priv(dev);
322 struct bfin_can_regs __iomem *reg = priv->membase;
323 struct net_device_stats *stats = &dev->stats;
324 struct can_frame *cf;
325 struct sk_buff *skb;
326 enum can_state state = priv->can.state;
328 skb = alloc_can_err_skb(dev, &cf);
329 if (skb == NULL)
330 return -ENOMEM;
332 if (isrc & RMLIS) {
333 /* data overrun interrupt */
334 dev_dbg(dev->dev.parent, "data overrun interrupt\n");
335 cf->can_id |= CAN_ERR_CRTL;
336 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
337 stats->rx_over_errors++;
338 stats->rx_errors++;
341 if (isrc & BOIS) {
342 dev_dbg(dev->dev.parent, "bus-off mode interrupt\n");
343 state = CAN_STATE_BUS_OFF;
344 cf->can_id |= CAN_ERR_BUSOFF;
345 can_bus_off(dev);
348 if (isrc & EPIS) {
349 /* error passive interrupt */
350 dev_dbg(dev->dev.parent, "error passive interrupt\n");
351 state = CAN_STATE_ERROR_PASSIVE;
354 if ((isrc & EWTIS) || (isrc & EWRIS)) {
355 dev_dbg(dev->dev.parent,
356 "Error Warning Transmit/Receive Interrupt\n");
357 state = CAN_STATE_ERROR_WARNING;
360 if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
361 state == CAN_STATE_ERROR_PASSIVE)) {
362 u16 cec = bfin_read(&reg->cec);
363 u8 rxerr = cec;
364 u8 txerr = cec >> 8;
366 cf->can_id |= CAN_ERR_CRTL;
367 if (state == CAN_STATE_ERROR_WARNING) {
368 priv->can.can_stats.error_warning++;
369 cf->data[1] = (txerr > rxerr) ?
370 CAN_ERR_CRTL_TX_WARNING :
371 CAN_ERR_CRTL_RX_WARNING;
372 } else {
373 priv->can.can_stats.error_passive++;
374 cf->data[1] = (txerr > rxerr) ?
375 CAN_ERR_CRTL_TX_PASSIVE :
376 CAN_ERR_CRTL_RX_PASSIVE;
380 if (status) {
381 priv->can.can_stats.bus_error++;
383 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
385 if (status & BEF)
386 cf->data[2] |= CAN_ERR_PROT_BIT;
387 else if (status & FER)
388 cf->data[2] |= CAN_ERR_PROT_FORM;
389 else if (status & SER)
390 cf->data[2] |= CAN_ERR_PROT_STUFF;
391 else
392 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
395 priv->can.state = state;
397 netif_rx(skb);
399 stats->rx_packets++;
400 stats->rx_bytes += cf->can_dlc;
402 return 0;
405 irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
407 struct net_device *dev = dev_id;
408 struct bfin_can_priv *priv = netdev_priv(dev);
409 struct bfin_can_regs __iomem *reg = priv->membase;
410 struct net_device_stats *stats = &dev->stats;
411 u16 status, isrc;
413 if ((irq == priv->tx_irq) && bfin_read(&reg->mbtif2)) {
414 /* transmission complete interrupt */
415 bfin_write(&reg->mbtif2, 0xFFFF);
416 stats->tx_packets++;
417 stats->tx_bytes += bfin_read(&reg->chl[TRANSMIT_CHL].dlc);
418 can_get_echo_skb(dev, 0);
419 netif_wake_queue(dev);
420 } else if ((irq == priv->rx_irq) && bfin_read(&reg->mbrif1)) {
421 /* receive interrupt */
422 isrc = bfin_read(&reg->mbrif1);
423 bfin_write(&reg->mbrif1, 0xFFFF);
424 bfin_can_rx(dev, isrc);
425 } else if ((irq == priv->err_irq) && bfin_read(&reg->gis)) {
426 /* error interrupt */
427 isrc = bfin_read(&reg->gis);
428 status = bfin_read(&reg->esr);
429 bfin_write(&reg->gis, 0x7FF);
430 bfin_can_err(dev, isrc, status);
431 } else {
432 return IRQ_NONE;
435 return IRQ_HANDLED;
438 static int bfin_can_open(struct net_device *dev)
440 struct bfin_can_priv *priv = netdev_priv(dev);
441 int err;
443 /* set chip into reset mode */
444 bfin_can_set_reset_mode(dev);
446 /* common open */
447 err = open_candev(dev);
448 if (err)
449 goto exit_open;
451 /* register interrupt handler */
452 err = request_irq(priv->rx_irq, &bfin_can_interrupt, 0,
453 "bfin-can-rx", dev);
454 if (err)
455 goto exit_rx_irq;
456 err = request_irq(priv->tx_irq, &bfin_can_interrupt, 0,
457 "bfin-can-tx", dev);
458 if (err)
459 goto exit_tx_irq;
460 err = request_irq(priv->err_irq, &bfin_can_interrupt, 0,
461 "bfin-can-err", dev);
462 if (err)
463 goto exit_err_irq;
465 bfin_can_start(dev);
467 netif_start_queue(dev);
469 return 0;
471 exit_err_irq:
472 free_irq(priv->tx_irq, dev);
473 exit_tx_irq:
474 free_irq(priv->rx_irq, dev);
475 exit_rx_irq:
476 close_candev(dev);
477 exit_open:
478 return err;
481 static int bfin_can_close(struct net_device *dev)
483 struct bfin_can_priv *priv = netdev_priv(dev);
485 netif_stop_queue(dev);
486 bfin_can_set_reset_mode(dev);
488 close_candev(dev);
490 free_irq(priv->rx_irq, dev);
491 free_irq(priv->tx_irq, dev);
492 free_irq(priv->err_irq, dev);
494 return 0;
497 struct net_device *alloc_bfin_candev(void)
499 struct net_device *dev;
500 struct bfin_can_priv *priv;
502 dev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
503 if (!dev)
504 return NULL;
506 priv = netdev_priv(dev);
508 priv->dev = dev;
509 priv->can.bittiming_const = &bfin_can_bittiming_const;
510 priv->can.do_set_bittiming = bfin_can_set_bittiming;
511 priv->can.do_set_mode = bfin_can_set_mode;
512 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
514 return dev;
517 static const struct net_device_ops bfin_can_netdev_ops = {
518 .ndo_open = bfin_can_open,
519 .ndo_stop = bfin_can_close,
520 .ndo_start_xmit = bfin_can_start_xmit,
523 static int __devinit bfin_can_probe(struct platform_device *pdev)
525 int err;
526 struct net_device *dev;
527 struct bfin_can_priv *priv;
528 struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
529 unsigned short *pdata;
531 pdata = pdev->dev.platform_data;
532 if (!pdata) {
533 dev_err(&pdev->dev, "No platform data provided!\n");
534 err = -EINVAL;
535 goto exit;
538 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
539 rx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
540 tx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
541 err_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
542 if (!res_mem || !rx_irq || !tx_irq || !err_irq) {
543 err = -EINVAL;
544 goto exit;
547 if (!request_mem_region(res_mem->start, resource_size(res_mem),
548 dev_name(&pdev->dev))) {
549 err = -EBUSY;
550 goto exit;
553 /* request peripheral pins */
554 err = peripheral_request_list(pdata, dev_name(&pdev->dev));
555 if (err)
556 goto exit_mem_release;
558 dev = alloc_bfin_candev();
559 if (!dev) {
560 err = -ENOMEM;
561 goto exit_peri_pin_free;
564 priv = netdev_priv(dev);
565 priv->membase = (void __iomem *)res_mem->start;
566 priv->rx_irq = rx_irq->start;
567 priv->tx_irq = tx_irq->start;
568 priv->err_irq = err_irq->start;
569 priv->pin_list = pdata;
570 priv->can.clock.freq = get_sclk();
572 dev_set_drvdata(&pdev->dev, dev);
573 SET_NETDEV_DEV(dev, &pdev->dev);
575 dev->flags |= IFF_ECHO; /* we support local echo */
576 dev->netdev_ops = &bfin_can_netdev_ops;
578 bfin_can_set_reset_mode(dev);
580 err = register_candev(dev);
581 if (err) {
582 dev_err(&pdev->dev, "registering failed (err=%d)\n", err);
583 goto exit_candev_free;
586 dev_info(&pdev->dev,
587 "%s device registered"
588 "(&reg_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n",
589 DRV_NAME, (void *)priv->membase, priv->rx_irq,
590 priv->tx_irq, priv->err_irq, priv->can.clock.freq);
591 return 0;
593 exit_candev_free:
594 free_candev(dev);
595 exit_peri_pin_free:
596 peripheral_free_list(pdata);
597 exit_mem_release:
598 release_mem_region(res_mem->start, resource_size(res_mem));
599 exit:
600 return err;
603 static int __devexit bfin_can_remove(struct platform_device *pdev)
605 struct net_device *dev = dev_get_drvdata(&pdev->dev);
606 struct bfin_can_priv *priv = netdev_priv(dev);
607 struct resource *res;
609 bfin_can_set_reset_mode(dev);
611 unregister_candev(dev);
613 dev_set_drvdata(&pdev->dev, NULL);
615 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
616 release_mem_region(res->start, resource_size(res));
618 peripheral_free_list(priv->pin_list);
620 free_candev(dev);
621 return 0;
624 #ifdef CONFIG_PM
625 static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
627 struct net_device *dev = dev_get_drvdata(&pdev->dev);
628 struct bfin_can_priv *priv = netdev_priv(dev);
629 struct bfin_can_regs __iomem *reg = priv->membase;
630 int timeout = BFIN_CAN_TIMEOUT;
632 if (netif_running(dev)) {
633 /* enter sleep mode */
634 bfin_write(&reg->control, bfin_read(&reg->control) | SMR);
635 SSYNC();
636 while (!(bfin_read(&reg->intr) & SMACK)) {
637 udelay(10);
638 if (--timeout == 0) {
639 dev_err(dev->dev.parent,
640 "fail to enter sleep mode\n");
641 BUG();
646 return 0;
649 static int bfin_can_resume(struct platform_device *pdev)
651 struct net_device *dev = dev_get_drvdata(&pdev->dev);
652 struct bfin_can_priv *priv = netdev_priv(dev);
653 struct bfin_can_regs __iomem *reg = priv->membase;
655 if (netif_running(dev)) {
656 /* leave sleep mode */
657 bfin_write(&reg->intr, 0);
658 SSYNC();
661 return 0;
663 #else
664 #define bfin_can_suspend NULL
665 #define bfin_can_resume NULL
666 #endif /* CONFIG_PM */
668 static struct platform_driver bfin_can_driver = {
669 .probe = bfin_can_probe,
670 .remove = __devexit_p(bfin_can_remove),
671 .suspend = bfin_can_suspend,
672 .resume = bfin_can_resume,
673 .driver = {
674 .name = DRV_NAME,
675 .owner = THIS_MODULE,
679 module_platform_driver(bfin_can_driver);
681 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
682 MODULE_LICENSE("GPL");
683 MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver");