Merge tag 'for-linus-20190706' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / net / can / pch_can.c
blobdb41dddd57716924194f8267a12ab7015ac3f52c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 1999 - 2010 Intel Corporation.
4 * Copyright (C) 2010 LAPIS SEMICONDUCTOR CO., LTD.
5 */
7 #include <linux/interrupt.h>
8 #include <linux/delay.h>
9 #include <linux/io.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/pci.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/netdevice.h>
17 #include <linux/skbuff.h>
18 #include <linux/can.h>
19 #include <linux/can/dev.h>
20 #include <linux/can/error.h>
22 #define PCH_CTRL_INIT BIT(0) /* The INIT bit of CANCONT register. */
23 #define PCH_CTRL_IE BIT(1) /* The IE bit of CAN control register */
24 #define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1))
25 #define PCH_CTRL_CCE BIT(6)
26 #define PCH_CTRL_OPT BIT(7) /* The OPT bit of CANCONT register. */
27 #define PCH_OPT_SILENT BIT(3) /* The Silent bit of CANOPT reg. */
28 #define PCH_OPT_LBACK BIT(4) /* The LoopBack bit of CANOPT reg. */
30 #define PCH_CMASK_RX_TX_SET 0x00f3
31 #define PCH_CMASK_RX_TX_GET 0x0073
32 #define PCH_CMASK_ALL 0xff
33 #define PCH_CMASK_NEWDAT BIT(2)
34 #define PCH_CMASK_CLRINTPND BIT(3)
35 #define PCH_CMASK_CTRL BIT(4)
36 #define PCH_CMASK_ARB BIT(5)
37 #define PCH_CMASK_MASK BIT(6)
38 #define PCH_CMASK_RDWR BIT(7)
39 #define PCH_IF_MCONT_NEWDAT BIT(15)
40 #define PCH_IF_MCONT_MSGLOST BIT(14)
41 #define PCH_IF_MCONT_INTPND BIT(13)
42 #define PCH_IF_MCONT_UMASK BIT(12)
43 #define PCH_IF_MCONT_TXIE BIT(11)
44 #define PCH_IF_MCONT_RXIE BIT(10)
45 #define PCH_IF_MCONT_RMTEN BIT(9)
46 #define PCH_IF_MCONT_TXRQXT BIT(8)
47 #define PCH_IF_MCONT_EOB BIT(7)
48 #define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3))
49 #define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15))
50 #define PCH_ID2_DIR BIT(13)
51 #define PCH_ID2_XTD BIT(14)
52 #define PCH_ID_MSGVAL BIT(15)
53 #define PCH_IF_CREQ_BUSY BIT(15)
55 #define PCH_STATUS_INT 0x8000
56 #define PCH_RP 0x00008000
57 #define PCH_REC 0x00007f00
58 #define PCH_TEC 0x000000ff
60 #define PCH_TX_OK BIT(3)
61 #define PCH_RX_OK BIT(4)
62 #define PCH_EPASSIV BIT(5)
63 #define PCH_EWARN BIT(6)
64 #define PCH_BUS_OFF BIT(7)
66 /* bit position of certain controller bits. */
67 #define PCH_BIT_BRP_SHIFT 0
68 #define PCH_BIT_SJW_SHIFT 6
69 #define PCH_BIT_TSEG1_SHIFT 8
70 #define PCH_BIT_TSEG2_SHIFT 12
71 #define PCH_BIT_BRPE_BRPE_SHIFT 6
73 #define PCH_MSK_BITT_BRP 0x3f
74 #define PCH_MSK_BRPE_BRPE 0x3c0
75 #define PCH_MSK_CTRL_IE_SIE_EIE 0x07
76 #define PCH_COUNTER_LIMIT 10
78 #define PCH_CAN_CLK 50000000 /* 50MHz */
81 * Define the number of message object.
82 * PCH CAN communications are done via Message RAM.
83 * The Message RAM consists of 32 message objects.
85 #define PCH_RX_OBJ_NUM 26
86 #define PCH_TX_OBJ_NUM 6
87 #define PCH_RX_OBJ_START 1
88 #define PCH_RX_OBJ_END PCH_RX_OBJ_NUM
89 #define PCH_TX_OBJ_START (PCH_RX_OBJ_END + 1)
90 #define PCH_TX_OBJ_END (PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM)
92 #define PCH_FIFO_THRESH 16
94 /* TxRqst2 show status of MsgObjNo.17~32 */
95 #define PCH_TREQ2_TX_MASK (((1 << PCH_TX_OBJ_NUM) - 1) <<\
96 (PCH_RX_OBJ_END - 16))
98 enum pch_ifreg {
99 PCH_RX_IFREG,
100 PCH_TX_IFREG,
103 enum pch_can_err {
104 PCH_STUF_ERR = 1,
105 PCH_FORM_ERR,
106 PCH_ACK_ERR,
107 PCH_BIT1_ERR,
108 PCH_BIT0_ERR,
109 PCH_CRC_ERR,
110 PCH_LEC_ALL,
113 enum pch_can_mode {
114 PCH_CAN_ENABLE,
115 PCH_CAN_DISABLE,
116 PCH_CAN_ALL,
117 PCH_CAN_NONE,
118 PCH_CAN_STOP,
119 PCH_CAN_RUN,
122 struct pch_can_if_regs {
123 u32 creq;
124 u32 cmask;
125 u32 mask1;
126 u32 mask2;
127 u32 id1;
128 u32 id2;
129 u32 mcont;
130 u32 data[4];
131 u32 rsv[13];
134 struct pch_can_regs {
135 u32 cont;
136 u32 stat;
137 u32 errc;
138 u32 bitt;
139 u32 intr;
140 u32 opt;
141 u32 brpe;
142 u32 reserve;
143 struct pch_can_if_regs ifregs[2]; /* [0]=if1 [1]=if2 */
144 u32 reserve1[8];
145 u32 treq1;
146 u32 treq2;
147 u32 reserve2[6];
148 u32 data1;
149 u32 data2;
150 u32 reserve3[6];
151 u32 canipend1;
152 u32 canipend2;
153 u32 reserve4[6];
154 u32 canmval1;
155 u32 canmval2;
156 u32 reserve5[37];
157 u32 srst;
160 struct pch_can_priv {
161 struct can_priv can;
162 struct pci_dev *dev;
163 u32 tx_enable[PCH_TX_OBJ_END];
164 u32 rx_enable[PCH_TX_OBJ_END];
165 u32 rx_link[PCH_TX_OBJ_END];
166 u32 int_enables;
167 struct net_device *ndev;
168 struct pch_can_regs __iomem *regs;
169 struct napi_struct napi;
170 int tx_obj; /* Point next Tx Obj index */
171 int use_msi;
174 static const struct can_bittiming_const pch_can_bittiming_const = {
175 .name = KBUILD_MODNAME,
176 .tseg1_min = 2,
177 .tseg1_max = 16,
178 .tseg2_min = 1,
179 .tseg2_max = 8,
180 .sjw_max = 4,
181 .brp_min = 1,
182 .brp_max = 1024, /* 6bit + extended 4bit */
183 .brp_inc = 1,
186 static const struct pci_device_id pch_pci_tbl[] = {
187 {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
188 {0,}
190 MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
192 static inline void pch_can_bit_set(void __iomem *addr, u32 mask)
194 iowrite32(ioread32(addr) | mask, addr);
197 static inline void pch_can_bit_clear(void __iomem *addr, u32 mask)
199 iowrite32(ioread32(addr) & ~mask, addr);
202 static void pch_can_set_run_mode(struct pch_can_priv *priv,
203 enum pch_can_mode mode)
205 switch (mode) {
206 case PCH_CAN_RUN:
207 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
208 break;
210 case PCH_CAN_STOP:
211 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
212 break;
214 default:
215 netdev_err(priv->ndev, "%s -> Invalid Mode.\n", __func__);
216 break;
220 static void pch_can_set_optmode(struct pch_can_priv *priv)
222 u32 reg_val = ioread32(&priv->regs->opt);
224 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
225 reg_val |= PCH_OPT_SILENT;
227 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
228 reg_val |= PCH_OPT_LBACK;
230 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
231 iowrite32(reg_val, &priv->regs->opt);
234 static void pch_can_rw_msg_obj(void __iomem *creq_addr, u32 num)
236 int counter = PCH_COUNTER_LIMIT;
237 u32 ifx_creq;
239 iowrite32(num, creq_addr);
240 while (counter) {
241 ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
242 if (!ifx_creq)
243 break;
244 counter--;
245 udelay(1);
247 if (!counter)
248 pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
251 static void pch_can_set_int_enables(struct pch_can_priv *priv,
252 enum pch_can_mode interrupt_no)
254 switch (interrupt_no) {
255 case PCH_CAN_DISABLE:
256 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
257 break;
259 case PCH_CAN_ALL:
260 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
261 break;
263 case PCH_CAN_NONE:
264 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
265 break;
267 default:
268 netdev_err(priv->ndev, "Invalid interrupt number.\n");
269 break;
273 static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num,
274 int set, enum pch_ifreg dir)
276 u32 ie;
278 if (dir)
279 ie = PCH_IF_MCONT_TXIE;
280 else
281 ie = PCH_IF_MCONT_RXIE;
283 /* Reading the Msg buffer from Message RAM to IF1/2 registers. */
284 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
285 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
287 /* Setting the IF1/2MASK1 register to access MsgVal and RxIE bits */
288 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
289 &priv->regs->ifregs[dir].cmask);
291 if (set) {
292 /* Setting the MsgVal and RxIE/TxIE bits */
293 pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie);
294 pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
295 } else {
296 /* Clearing the MsgVal and RxIE/TxIE bits */
297 pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie);
298 pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
301 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
304 static void pch_can_set_rx_all(struct pch_can_priv *priv, int set)
306 int i;
308 /* Traversing to obtain the object configured as receivers. */
309 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++)
310 pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG);
313 static void pch_can_set_tx_all(struct pch_can_priv *priv, int set)
315 int i;
317 /* Traversing to obtain the object configured as transmit object. */
318 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
319 pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG);
322 static u32 pch_can_int_pending(struct pch_can_priv *priv)
324 return ioread32(&priv->regs->intr) & 0xffff;
327 static void pch_can_clear_if_buffers(struct pch_can_priv *priv)
329 int i; /* Msg Obj ID (1~32) */
331 for (i = PCH_RX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
332 iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask);
333 iowrite32(0xffff, &priv->regs->ifregs[0].mask1);
334 iowrite32(0xffff, &priv->regs->ifregs[0].mask2);
335 iowrite32(0x0, &priv->regs->ifregs[0].id1);
336 iowrite32(0x0, &priv->regs->ifregs[0].id2);
337 iowrite32(0x0, &priv->regs->ifregs[0].mcont);
338 iowrite32(0x0, &priv->regs->ifregs[0].data[0]);
339 iowrite32(0x0, &priv->regs->ifregs[0].data[1]);
340 iowrite32(0x0, &priv->regs->ifregs[0].data[2]);
341 iowrite32(0x0, &priv->regs->ifregs[0].data[3]);
342 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
343 PCH_CMASK_ARB | PCH_CMASK_CTRL,
344 &priv->regs->ifregs[0].cmask);
345 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
349 static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
351 int i;
353 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
354 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
355 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
357 iowrite32(0x0, &priv->regs->ifregs[0].id1);
358 iowrite32(0x0, &priv->regs->ifregs[0].id2);
360 pch_can_bit_set(&priv->regs->ifregs[0].mcont,
361 PCH_IF_MCONT_UMASK);
363 /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
364 if (i == PCH_RX_OBJ_END)
365 pch_can_bit_set(&priv->regs->ifregs[0].mcont,
366 PCH_IF_MCONT_EOB);
367 else
368 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
369 PCH_IF_MCONT_EOB);
371 iowrite32(0, &priv->regs->ifregs[0].mask1);
372 pch_can_bit_clear(&priv->regs->ifregs[0].mask2,
373 0x1fff | PCH_MASK2_MDIR_MXTD);
375 /* Setting CMASK for writing */
376 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
377 PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask);
379 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
382 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
383 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask);
384 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
386 /* Resetting DIR bit for reception */
387 iowrite32(0x0, &priv->regs->ifregs[1].id1);
388 iowrite32(PCH_ID2_DIR, &priv->regs->ifregs[1].id2);
390 /* Setting EOB bit for transmitter */
391 iowrite32(PCH_IF_MCONT_EOB | PCH_IF_MCONT_UMASK,
392 &priv->regs->ifregs[1].mcont);
394 iowrite32(0, &priv->regs->ifregs[1].mask1);
395 pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff);
397 /* Setting CMASK for writing */
398 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
399 PCH_CMASK_CTRL, &priv->regs->ifregs[1].cmask);
401 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
405 static void pch_can_init(struct pch_can_priv *priv)
407 /* Stopping the Can device. */
408 pch_can_set_run_mode(priv, PCH_CAN_STOP);
410 /* Clearing all the message object buffers. */
411 pch_can_clear_if_buffers(priv);
413 /* Configuring the respective message object as either rx/tx object. */
414 pch_can_config_rx_tx_buffers(priv);
416 /* Enabling the interrupts. */
417 pch_can_set_int_enables(priv, PCH_CAN_ALL);
420 static void pch_can_release(struct pch_can_priv *priv)
422 /* Stooping the CAN device. */
423 pch_can_set_run_mode(priv, PCH_CAN_STOP);
425 /* Disabling the interrupts. */
426 pch_can_set_int_enables(priv, PCH_CAN_NONE);
428 /* Disabling all the receive object. */
429 pch_can_set_rx_all(priv, 0);
431 /* Disabling all the transmit object. */
432 pch_can_set_tx_all(priv, 0);
435 /* This function clears interrupt(s) from the CAN device. */
436 static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
438 /* Clear interrupt for transmit object */
439 if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) {
440 /* Setting CMASK for clearing the reception interrupts. */
441 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
442 &priv->regs->ifregs[0].cmask);
444 /* Clearing the Dir bit. */
445 pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
447 /* Clearing NewDat & IntPnd */
448 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
449 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
451 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, mask);
452 } else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) {
454 * Setting CMASK for clearing interrupts for frame transmission.
456 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
457 &priv->regs->ifregs[1].cmask);
459 /* Resetting the ID registers. */
460 pch_can_bit_set(&priv->regs->ifregs[1].id2,
461 PCH_ID2_DIR | (0x7ff << 2));
462 iowrite32(0x0, &priv->regs->ifregs[1].id1);
464 /* Claring NewDat, TxRqst & IntPnd */
465 pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
466 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
467 PCH_IF_MCONT_TXRQXT);
468 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, mask);
472 static void pch_can_reset(struct pch_can_priv *priv)
474 /* write to sw reset register */
475 iowrite32(1, &priv->regs->srst);
476 iowrite32(0, &priv->regs->srst);
479 static void pch_can_error(struct net_device *ndev, u32 status)
481 struct sk_buff *skb;
482 struct pch_can_priv *priv = netdev_priv(ndev);
483 struct can_frame *cf;
484 u32 errc, lec;
485 struct net_device_stats *stats = &(priv->ndev->stats);
486 enum can_state state = priv->can.state;
488 skb = alloc_can_err_skb(ndev, &cf);
489 if (!skb)
490 return;
492 if (status & PCH_BUS_OFF) {
493 pch_can_set_tx_all(priv, 0);
494 pch_can_set_rx_all(priv, 0);
495 state = CAN_STATE_BUS_OFF;
496 cf->can_id |= CAN_ERR_BUSOFF;
497 priv->can.can_stats.bus_off++;
498 can_bus_off(ndev);
501 errc = ioread32(&priv->regs->errc);
502 /* Warning interrupt. */
503 if (status & PCH_EWARN) {
504 state = CAN_STATE_ERROR_WARNING;
505 priv->can.can_stats.error_warning++;
506 cf->can_id |= CAN_ERR_CRTL;
507 if (((errc & PCH_REC) >> 8) > 96)
508 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
509 if ((errc & PCH_TEC) > 96)
510 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
511 netdev_dbg(ndev,
512 "%s -> Error Counter is more than 96.\n", __func__);
514 /* Error passive interrupt. */
515 if (status & PCH_EPASSIV) {
516 priv->can.can_stats.error_passive++;
517 state = CAN_STATE_ERROR_PASSIVE;
518 cf->can_id |= CAN_ERR_CRTL;
519 if (errc & PCH_RP)
520 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
521 if ((errc & PCH_TEC) > 127)
522 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
523 netdev_dbg(ndev,
524 "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
527 lec = status & PCH_LEC_ALL;
528 switch (lec) {
529 case PCH_STUF_ERR:
530 cf->data[2] |= CAN_ERR_PROT_STUFF;
531 priv->can.can_stats.bus_error++;
532 stats->rx_errors++;
533 break;
534 case PCH_FORM_ERR:
535 cf->data[2] |= CAN_ERR_PROT_FORM;
536 priv->can.can_stats.bus_error++;
537 stats->rx_errors++;
538 break;
539 case PCH_ACK_ERR:
540 cf->can_id |= CAN_ERR_ACK;
541 priv->can.can_stats.bus_error++;
542 stats->rx_errors++;
543 break;
544 case PCH_BIT1_ERR:
545 case PCH_BIT0_ERR:
546 cf->data[2] |= CAN_ERR_PROT_BIT;
547 priv->can.can_stats.bus_error++;
548 stats->rx_errors++;
549 break;
550 case PCH_CRC_ERR:
551 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
552 priv->can.can_stats.bus_error++;
553 stats->rx_errors++;
554 break;
555 case PCH_LEC_ALL: /* Written by CPU. No error status */
556 break;
559 cf->data[6] = errc & PCH_TEC;
560 cf->data[7] = (errc & PCH_REC) >> 8;
562 priv->can.state = state;
563 netif_receive_skb(skb);
565 stats->rx_packets++;
566 stats->rx_bytes += cf->can_dlc;
569 static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
571 struct net_device *ndev = (struct net_device *)dev_id;
572 struct pch_can_priv *priv = netdev_priv(ndev);
574 if (!pch_can_int_pending(priv))
575 return IRQ_NONE;
577 pch_can_set_int_enables(priv, PCH_CAN_NONE);
578 napi_schedule(&priv->napi);
579 return IRQ_HANDLED;
582 static void pch_fifo_thresh(struct pch_can_priv *priv, int obj_id)
584 if (obj_id < PCH_FIFO_THRESH) {
585 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
586 PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask);
588 /* Clearing the Dir bit. */
589 pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
591 /* Clearing NewDat & IntPnd */
592 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
593 PCH_IF_MCONT_INTPND);
594 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
595 } else if (obj_id > PCH_FIFO_THRESH) {
596 pch_can_int_clr(priv, obj_id);
597 } else if (obj_id == PCH_FIFO_THRESH) {
598 int cnt;
599 for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
600 pch_can_int_clr(priv, cnt + 1);
604 static void pch_can_rx_msg_lost(struct net_device *ndev, int obj_id)
606 struct pch_can_priv *priv = netdev_priv(ndev);
607 struct net_device_stats *stats = &(priv->ndev->stats);
608 struct sk_buff *skb;
609 struct can_frame *cf;
611 netdev_dbg(priv->ndev, "Msg Obj is overwritten.\n");
612 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
613 PCH_IF_MCONT_MSGLOST);
614 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
615 &priv->regs->ifregs[0].cmask);
616 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
618 skb = alloc_can_err_skb(ndev, &cf);
619 if (!skb)
620 return;
622 cf->can_id |= CAN_ERR_CRTL;
623 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
624 stats->rx_over_errors++;
625 stats->rx_errors++;
627 netif_receive_skb(skb);
630 static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
632 u32 reg;
633 canid_t id;
634 int rcv_pkts = 0;
635 struct sk_buff *skb;
636 struct can_frame *cf;
637 struct pch_can_priv *priv = netdev_priv(ndev);
638 struct net_device_stats *stats = &(priv->ndev->stats);
639 int i;
640 u32 id2;
641 u16 data_reg;
643 do {
644 /* Reading the message object from the Message RAM */
645 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
646 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_num);
648 /* Reading the MCONT register. */
649 reg = ioread32(&priv->regs->ifregs[0].mcont);
651 if (reg & PCH_IF_MCONT_EOB)
652 break;
654 /* If MsgLost bit set. */
655 if (reg & PCH_IF_MCONT_MSGLOST) {
656 pch_can_rx_msg_lost(ndev, obj_num);
657 rcv_pkts++;
658 quota--;
659 obj_num++;
660 continue;
661 } else if (!(reg & PCH_IF_MCONT_NEWDAT)) {
662 obj_num++;
663 continue;
666 skb = alloc_can_skb(priv->ndev, &cf);
667 if (!skb) {
668 netdev_err(ndev, "alloc_can_skb Failed\n");
669 return rcv_pkts;
672 /* Get Received data */
673 id2 = ioread32(&priv->regs->ifregs[0].id2);
674 if (id2 & PCH_ID2_XTD) {
675 id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff);
676 id |= (((id2) & 0x1fff) << 16);
677 cf->can_id = id | CAN_EFF_FLAG;
678 } else {
679 id = (id2 >> 2) & CAN_SFF_MASK;
680 cf->can_id = id;
683 if (id2 & PCH_ID2_DIR)
684 cf->can_id |= CAN_RTR_FLAG;
686 cf->can_dlc = get_can_dlc((ioread32(&priv->regs->
687 ifregs[0].mcont)) & 0xF);
689 for (i = 0; i < cf->can_dlc; i += 2) {
690 data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
691 cf->data[i] = data_reg;
692 cf->data[i + 1] = data_reg >> 8;
695 netif_receive_skb(skb);
696 rcv_pkts++;
697 stats->rx_packets++;
698 quota--;
699 stats->rx_bytes += cf->can_dlc;
701 pch_fifo_thresh(priv, obj_num);
702 obj_num++;
703 } while (quota > 0);
705 return rcv_pkts;
708 static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
710 struct pch_can_priv *priv = netdev_priv(ndev);
711 struct net_device_stats *stats = &(priv->ndev->stats);
712 u32 dlc;
714 can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1);
715 iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
716 &priv->regs->ifregs[1].cmask);
717 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
718 dlc = get_can_dlc(ioread32(&priv->regs->ifregs[1].mcont) &
719 PCH_IF_MCONT_DLC);
720 stats->tx_bytes += dlc;
721 stats->tx_packets++;
722 if (int_stat == PCH_TX_OBJ_END)
723 netif_wake_queue(ndev);
726 static int pch_can_poll(struct napi_struct *napi, int quota)
728 struct net_device *ndev = napi->dev;
729 struct pch_can_priv *priv = netdev_priv(ndev);
730 u32 int_stat;
731 u32 reg_stat;
732 int quota_save = quota;
734 int_stat = pch_can_int_pending(priv);
735 if (!int_stat)
736 goto end;
738 if (int_stat == PCH_STATUS_INT) {
739 reg_stat = ioread32(&priv->regs->stat);
741 if ((reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) &&
742 ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)) {
743 pch_can_error(ndev, reg_stat);
744 quota--;
747 if (reg_stat & (PCH_TX_OK | PCH_RX_OK))
748 pch_can_bit_clear(&priv->regs->stat,
749 reg_stat & (PCH_TX_OK | PCH_RX_OK));
751 int_stat = pch_can_int_pending(priv);
754 if (quota == 0)
755 goto end;
757 if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) {
758 quota -= pch_can_rx_normal(ndev, int_stat, quota);
759 } else if ((int_stat >= PCH_TX_OBJ_START) &&
760 (int_stat <= PCH_TX_OBJ_END)) {
761 /* Handle transmission interrupt */
762 pch_can_tx_complete(ndev, int_stat);
765 end:
766 napi_complete(napi);
767 pch_can_set_int_enables(priv, PCH_CAN_ALL);
769 return quota_save - quota;
772 static int pch_set_bittiming(struct net_device *ndev)
774 struct pch_can_priv *priv = netdev_priv(ndev);
775 const struct can_bittiming *bt = &priv->can.bittiming;
776 u32 canbit;
777 u32 bepe;
779 /* Setting the CCE bit for accessing the Can Timing register. */
780 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
782 canbit = (bt->brp - 1) & PCH_MSK_BITT_BRP;
783 canbit |= (bt->sjw - 1) << PCH_BIT_SJW_SHIFT;
784 canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1_SHIFT;
785 canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2_SHIFT;
786 bepe = ((bt->brp - 1) & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE_SHIFT;
787 iowrite32(canbit, &priv->regs->bitt);
788 iowrite32(bepe, &priv->regs->brpe);
789 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
791 return 0;
794 static void pch_can_start(struct net_device *ndev)
796 struct pch_can_priv *priv = netdev_priv(ndev);
798 if (priv->can.state != CAN_STATE_STOPPED)
799 pch_can_reset(priv);
801 pch_set_bittiming(ndev);
802 pch_can_set_optmode(priv);
804 pch_can_set_tx_all(priv, 1);
805 pch_can_set_rx_all(priv, 1);
807 /* Setting the CAN to run mode. */
808 pch_can_set_run_mode(priv, PCH_CAN_RUN);
810 priv->can.state = CAN_STATE_ERROR_ACTIVE;
812 return;
815 static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
817 int ret = 0;
819 switch (mode) {
820 case CAN_MODE_START:
821 pch_can_start(ndev);
822 netif_wake_queue(ndev);
823 break;
824 default:
825 ret = -EOPNOTSUPP;
826 break;
829 return ret;
832 static int pch_can_open(struct net_device *ndev)
834 struct pch_can_priv *priv = netdev_priv(ndev);
835 int retval;
837 /* Regstering the interrupt. */
838 retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
839 ndev->name, ndev);
840 if (retval) {
841 netdev_err(ndev, "request_irq failed.\n");
842 goto req_irq_err;
845 /* Open common can device */
846 retval = open_candev(ndev);
847 if (retval) {
848 netdev_err(ndev, "open_candev() failed %d\n", retval);
849 goto err_open_candev;
852 pch_can_init(priv);
853 pch_can_start(ndev);
854 napi_enable(&priv->napi);
855 netif_start_queue(ndev);
857 return 0;
859 err_open_candev:
860 free_irq(priv->dev->irq, ndev);
861 req_irq_err:
862 pch_can_release(priv);
864 return retval;
867 static int pch_close(struct net_device *ndev)
869 struct pch_can_priv *priv = netdev_priv(ndev);
871 netif_stop_queue(ndev);
872 napi_disable(&priv->napi);
873 pch_can_release(priv);
874 free_irq(priv->dev->irq, ndev);
875 close_candev(ndev);
876 priv->can.state = CAN_STATE_STOPPED;
877 return 0;
880 static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
882 struct pch_can_priv *priv = netdev_priv(ndev);
883 struct can_frame *cf = (struct can_frame *)skb->data;
884 int tx_obj_no;
885 int i;
886 u32 id2;
888 if (can_dropped_invalid_skb(ndev, skb))
889 return NETDEV_TX_OK;
891 tx_obj_no = priv->tx_obj;
892 if (priv->tx_obj == PCH_TX_OBJ_END) {
893 if (ioread32(&priv->regs->treq2) & PCH_TREQ2_TX_MASK)
894 netif_stop_queue(ndev);
896 priv->tx_obj = PCH_TX_OBJ_START;
897 } else {
898 priv->tx_obj++;
901 /* Setting the CMASK register. */
902 pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL);
904 /* If ID extended is set. */
905 if (cf->can_id & CAN_EFF_FLAG) {
906 iowrite32(cf->can_id & 0xffff, &priv->regs->ifregs[1].id1);
907 id2 = ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD;
908 } else {
909 iowrite32(0, &priv->regs->ifregs[1].id1);
910 id2 = (cf->can_id & CAN_SFF_MASK) << 2;
913 id2 |= PCH_ID_MSGVAL;
915 /* If remote frame has to be transmitted.. */
916 if (!(cf->can_id & CAN_RTR_FLAG))
917 id2 |= PCH_ID2_DIR;
919 iowrite32(id2, &priv->regs->ifregs[1].id2);
921 /* Copy data to register */
922 for (i = 0; i < cf->can_dlc; i += 2) {
923 iowrite16(cf->data[i] | (cf->data[i + 1] << 8),
924 &priv->regs->ifregs[1].data[i / 2]);
927 can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1);
929 /* Set the size of the data. Update if2_mcont */
930 iowrite32(cf->can_dlc | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
931 PCH_IF_MCONT_TXIE, &priv->regs->ifregs[1].mcont);
933 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, tx_obj_no);
935 return NETDEV_TX_OK;
938 static const struct net_device_ops pch_can_netdev_ops = {
939 .ndo_open = pch_can_open,
940 .ndo_stop = pch_close,
941 .ndo_start_xmit = pch_xmit,
942 .ndo_change_mtu = can_change_mtu,
945 static void pch_can_remove(struct pci_dev *pdev)
947 struct net_device *ndev = pci_get_drvdata(pdev);
948 struct pch_can_priv *priv = netdev_priv(ndev);
950 unregister_candev(priv->ndev);
951 if (priv->use_msi)
952 pci_disable_msi(priv->dev);
953 pci_release_regions(pdev);
954 pci_disable_device(pdev);
955 pch_can_reset(priv);
956 pci_iounmap(pdev, priv->regs);
957 free_candev(priv->ndev);
960 #ifdef CONFIG_PM
961 static void pch_can_set_int_custom(struct pch_can_priv *priv)
963 /* Clearing the IE, SIE and EIE bits of Can control register. */
964 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
966 /* Appropriately setting them. */
967 pch_can_bit_set(&priv->regs->cont,
968 ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
971 /* This function retrieves interrupt enabled for the CAN device. */
972 static u32 pch_can_get_int_enables(struct pch_can_priv *priv)
974 /* Obtaining the status of IE, SIE and EIE interrupt bits. */
975 return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1;
978 static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num,
979 enum pch_ifreg dir)
981 u32 ie, enable;
983 if (dir)
984 ie = PCH_IF_MCONT_RXIE;
985 else
986 ie = PCH_IF_MCONT_TXIE;
988 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
989 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
991 if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) &&
992 ((ioread32(&priv->regs->ifregs[dir].mcont)) & ie))
993 enable = 1;
994 else
995 enable = 0;
997 return enable;
1000 static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
1001 u32 buffer_num, int set)
1003 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
1004 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
1005 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
1006 &priv->regs->ifregs[0].cmask);
1007 if (set)
1008 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
1009 PCH_IF_MCONT_EOB);
1010 else
1011 pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB);
1013 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
1016 static u32 pch_can_get_rx_buffer_link(struct pch_can_priv *priv, u32 buffer_num)
1018 u32 link;
1020 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
1021 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
1023 if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB)
1024 link = 0;
1025 else
1026 link = 1;
1027 return link;
1030 static int pch_can_get_buffer_status(struct pch_can_priv *priv)
1032 return (ioread32(&priv->regs->treq1) & 0xffff) |
1033 (ioread32(&priv->regs->treq2) << 16);
1036 static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
1038 int i;
1039 int retval;
1040 u32 buf_stat; /* Variable for reading the transmit buffer status. */
1041 int counter = PCH_COUNTER_LIMIT;
1043 struct net_device *dev = pci_get_drvdata(pdev);
1044 struct pch_can_priv *priv = netdev_priv(dev);
1046 /* Stop the CAN controller */
1047 pch_can_set_run_mode(priv, PCH_CAN_STOP);
1049 /* Indicate that we are aboutto/in suspend */
1050 priv->can.state = CAN_STATE_STOPPED;
1052 /* Waiting for all transmission to complete. */
1053 while (counter) {
1054 buf_stat = pch_can_get_buffer_status(priv);
1055 if (!buf_stat)
1056 break;
1057 counter--;
1058 udelay(1);
1060 if (!counter)
1061 dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
1063 /* Save interrupt configuration and then disable them */
1064 priv->int_enables = pch_can_get_int_enables(priv);
1065 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1067 /* Save Tx buffer enable state */
1068 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1069 priv->tx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
1070 PCH_TX_IFREG);
1072 /* Disable all Transmit buffers */
1073 pch_can_set_tx_all(priv, 0);
1075 /* Save Rx buffer enable state */
1076 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1077 priv->rx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
1078 PCH_RX_IFREG);
1079 priv->rx_link[i - 1] = pch_can_get_rx_buffer_link(priv, i);
1082 /* Disable all Receive buffers */
1083 pch_can_set_rx_all(priv, 0);
1084 retval = pci_save_state(pdev);
1085 if (retval) {
1086 dev_err(&pdev->dev, "pci_save_state failed.\n");
1087 } else {
1088 pci_enable_wake(pdev, PCI_D3hot, 0);
1089 pci_disable_device(pdev);
1090 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1093 return retval;
1096 static int pch_can_resume(struct pci_dev *pdev)
1098 int i;
1099 int retval;
1100 struct net_device *dev = pci_get_drvdata(pdev);
1101 struct pch_can_priv *priv = netdev_priv(dev);
1103 pci_set_power_state(pdev, PCI_D0);
1104 pci_restore_state(pdev);
1105 retval = pci_enable_device(pdev);
1106 if (retval) {
1107 dev_err(&pdev->dev, "pci_enable_device failed.\n");
1108 return retval;
1111 pci_enable_wake(pdev, PCI_D3hot, 0);
1113 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1115 /* Disabling all interrupts. */
1116 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1118 /* Setting the CAN device in Stop Mode. */
1119 pch_can_set_run_mode(priv, PCH_CAN_STOP);
1121 /* Configuring the transmit and receive buffers. */
1122 pch_can_config_rx_tx_buffers(priv);
1124 /* Restore the CAN state */
1125 pch_set_bittiming(dev);
1127 /* Listen/Active */
1128 pch_can_set_optmode(priv);
1130 /* Enabling the transmit buffer. */
1131 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1132 pch_can_set_rxtx(priv, i, priv->tx_enable[i - 1], PCH_TX_IFREG);
1134 /* Configuring the receive buffer and enabling them. */
1135 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1136 /* Restore buffer link */
1137 pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i - 1]);
1139 /* Restore buffer enables */
1140 pch_can_set_rxtx(priv, i, priv->rx_enable[i - 1], PCH_RX_IFREG);
1143 /* Enable CAN Interrupts */
1144 pch_can_set_int_custom(priv);
1146 /* Restore Run Mode */
1147 pch_can_set_run_mode(priv, PCH_CAN_RUN);
1149 return retval;
1151 #else
1152 #define pch_can_suspend NULL
1153 #define pch_can_resume NULL
1154 #endif
1156 static int pch_can_get_berr_counter(const struct net_device *dev,
1157 struct can_berr_counter *bec)
1159 struct pch_can_priv *priv = netdev_priv(dev);
1160 u32 errc = ioread32(&priv->regs->errc);
1162 bec->txerr = errc & PCH_TEC;
1163 bec->rxerr = (errc & PCH_REC) >> 8;
1165 return 0;
1168 static int pch_can_probe(struct pci_dev *pdev,
1169 const struct pci_device_id *id)
1171 struct net_device *ndev;
1172 struct pch_can_priv *priv;
1173 int rc;
1174 void __iomem *addr;
1176 rc = pci_enable_device(pdev);
1177 if (rc) {
1178 dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
1179 goto probe_exit_endev;
1182 rc = pci_request_regions(pdev, KBUILD_MODNAME);
1183 if (rc) {
1184 dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
1185 goto probe_exit_pcireq;
1188 addr = pci_iomap(pdev, 1, 0);
1189 if (!addr) {
1190 rc = -EIO;
1191 dev_err(&pdev->dev, "Failed pci_iomap\n");
1192 goto probe_exit_ipmap;
1195 ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END);
1196 if (!ndev) {
1197 rc = -ENOMEM;
1198 dev_err(&pdev->dev, "Failed alloc_candev\n");
1199 goto probe_exit_alloc_candev;
1202 priv = netdev_priv(ndev);
1203 priv->ndev = ndev;
1204 priv->regs = addr;
1205 priv->dev = pdev;
1206 priv->can.bittiming_const = &pch_can_bittiming_const;
1207 priv->can.do_set_mode = pch_can_do_set_mode;
1208 priv->can.do_get_berr_counter = pch_can_get_berr_counter;
1209 priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
1210 CAN_CTRLMODE_LOOPBACK;
1211 priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj */
1213 ndev->irq = pdev->irq;
1214 ndev->flags |= IFF_ECHO;
1216 pci_set_drvdata(pdev, ndev);
1217 SET_NETDEV_DEV(ndev, &pdev->dev);
1218 ndev->netdev_ops = &pch_can_netdev_ops;
1219 priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
1221 netif_napi_add(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
1223 rc = pci_enable_msi(priv->dev);
1224 if (rc) {
1225 netdev_err(ndev, "PCH CAN opened without MSI\n");
1226 priv->use_msi = 0;
1227 } else {
1228 netdev_err(ndev, "PCH CAN opened with MSI\n");
1229 pci_set_master(pdev);
1230 priv->use_msi = 1;
1233 rc = register_candev(ndev);
1234 if (rc) {
1235 dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
1236 goto probe_exit_reg_candev;
1239 return 0;
1241 probe_exit_reg_candev:
1242 if (priv->use_msi)
1243 pci_disable_msi(priv->dev);
1244 free_candev(ndev);
1245 probe_exit_alloc_candev:
1246 pci_iounmap(pdev, addr);
1247 probe_exit_ipmap:
1248 pci_release_regions(pdev);
1249 probe_exit_pcireq:
1250 pci_disable_device(pdev);
1251 probe_exit_endev:
1252 return rc;
1255 static struct pci_driver pch_can_pci_driver = {
1256 .name = "pch_can",
1257 .id_table = pch_pci_tbl,
1258 .probe = pch_can_probe,
1259 .remove = pch_can_remove,
1260 .suspend = pch_can_suspend,
1261 .resume = pch_can_resume,
1264 module_pci_driver(pch_can_pci_driver);
1266 MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
1267 MODULE_LICENSE("GPL v2");
1268 MODULE_VERSION("0.94");