1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2 /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
3 * Parts of this driver are based on the following:
4 * - Kvaser linux pciefd driver (version 5.25)
5 * - PEAK linux canfd driver
6 * - Altera Avalon EPCS flash controller driver
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/device.h>
12 #include <linux/pci.h>
13 #include <linux/can/dev.h>
14 #include <linux/timer.h>
15 #include <linux/netdevice.h>
16 #include <linux/crc32.h>
17 #include <linux/iopoll.h>
19 MODULE_LICENSE("Dual BSD/GPL");
20 MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
21 MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
23 #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd"
25 #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
26 #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
27 #define KVASER_PCIEFD_MAX_ERR_REP 256
28 #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17
29 #define KVASER_PCIEFD_MAX_CAN_CHANNELS 4
30 #define KVASER_PCIEFD_DMA_COUNT 2
32 #define KVASER_PCIEFD_DMA_SIZE (4 * 1024)
33 #define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
35 #define KVASER_PCIEFD_VENDOR 0x1a07
36 #define KVASER_PCIEFD_4HS_ID 0x0d
37 #define KVASER_PCIEFD_2HS_ID 0x0e
38 #define KVASER_PCIEFD_HS_ID 0x0f
39 #define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10
40 #define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11
42 /* PCIe IRQ registers */
43 #define KVASER_PCIEFD_IRQ_REG 0x40
44 #define KVASER_PCIEFD_IEN_REG 0x50
46 #define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
47 /* Kvaser KCAN CAN controller registers */
48 #define KVASER_PCIEFD_KCAN0_BASE 0x10000
49 #define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
50 #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
51 #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
52 #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
53 #define KVASER_PCIEFD_KCAN_CMD_REG 0x400
54 #define KVASER_PCIEFD_KCAN_IEN_REG 0x408
55 #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
56 #define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414
57 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418
58 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
59 #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
60 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
61 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430
62 /* Loopback control register */
63 #define KVASER_PCIEFD_LOOP_REG 0x1f000
64 /* System identification and information registers */
65 #define KVASER_PCIEFD_SYSID_BASE 0x1f020
66 #define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8)
67 #define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc)
68 #define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10)
69 #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
70 /* Shared receive buffer registers */
71 #define KVASER_PCIEFD_SRB_BASE 0x1f200
72 #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
73 #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
74 #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
75 #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
76 #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
77 /* EPCS flash controller registers */
78 #define KVASER_PCIEFD_SPI_BASE 0x1fc00
79 #define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE
80 #define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4)
81 #define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8)
82 #define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc)
83 #define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14)
85 #define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f
86 #define KVASER_PCIEFD_IRQ_SRB BIT(4)
88 #define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24
89 #define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16
90 #define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1
92 /* Reset DMA buffer 0, 1 and FIFO offset */
93 #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
94 #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
95 #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
97 /* DMA packet done, buffer 0 and 1 */
98 #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
99 #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
100 /* DMA overflow, buffer 0 and 1 */
101 #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
102 #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
103 /* DMA underflow, buffer 0 and 1 */
104 #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
105 #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
108 #define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
110 #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
113 #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
115 /* EPCS flash controller definitions */
116 #define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024)
117 #define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L)
118 #define KVASER_PCIEFD_CFG_MAX_PARAMS 256
119 #define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d
120 #define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24
121 #define KVASER_PCIEFD_CFG_SYS_VER 1
122 #define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130
123 #define KVASER_PCIEFD_SPI_TMT BIT(5)
124 #define KVASER_PCIEFD_SPI_TRDY BIT(6)
125 #define KVASER_PCIEFD_SPI_RRDY BIT(7)
126 #define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14
127 /* Commands for controlling the onboard flash */
128 #define KVASER_PCIEFD_FLASH_RES_CMD 0xab
129 #define KVASER_PCIEFD_FLASH_READ_CMD 0x3
130 #define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5
132 /* Kvaser KCAN definitions */
133 #define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29)
134 #define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29)
136 #define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16
137 /* Request status packet */
138 #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
139 /* Abort, flush and reset */
140 #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
142 /* Tx FIFO unaligned read */
143 #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
144 /* Tx FIFO unaligned end */
145 #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
146 /* Bus parameter protection error */
147 #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
148 /* FDF bit when controller is in classic mode */
149 #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
150 /* Rx FIFO overflow */
151 #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
153 #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
154 /* Tx buffer flush done */
155 #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
156 /* Tx FIFO overflow */
157 #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
159 #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
160 /* Transmitter unaligned */
161 #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
163 #define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16
165 #define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24
167 #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
168 /* Idle state. Controller in reset mode and no abort or flush pending */
169 #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
171 #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
172 /* Reset mode request */
173 #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
174 /* Controller in reset mode */
175 #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
176 /* Controller got one-shot capability */
177 #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
178 /* Controller got CAN FD capability */
179 #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
180 #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \
181 KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \
182 KVASER_PCIEFD_KCAN_STAT_IRM)
185 #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
186 /* Listen only mode */
187 #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
188 /* Error packet enable */
189 #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
191 #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
192 /* Acknowledgment packet type */
193 #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
194 /* Active error flag enable. Clear to force error passive */
195 #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
196 /* Classic CAN mode */
197 #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
199 #define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13
200 #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17
201 #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26
203 #define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16
205 /* Kvaser KCAN packet types */
206 #define KVASER_PCIEFD_PACK_TYPE_DATA 0
207 #define KVASER_PCIEFD_PACK_TYPE_ACK 1
208 #define KVASER_PCIEFD_PACK_TYPE_TXRQ 2
209 #define KVASER_PCIEFD_PACK_TYPE_ERROR 3
210 #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4
211 #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5
212 #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6
213 #define KVASER_PCIEFD_PACK_TYPE_STATUS 8
214 #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9
216 /* Kvaser KCAN packet common definitions */
217 #define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff
218 #define KVASER_PCIEFD_PACKET_CHID_SHIFT 25
219 #define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28
221 /* Kvaser KCAN TDATA and RDATA first word */
222 #define KVASER_PCIEFD_RPACKET_IDE BIT(30)
223 #define KVASER_PCIEFD_RPACKET_RTR BIT(29)
224 /* Kvaser KCAN TDATA and RDATA second word */
225 #define KVASER_PCIEFD_RPACKET_ESI BIT(13)
226 #define KVASER_PCIEFD_RPACKET_BRS BIT(14)
227 #define KVASER_PCIEFD_RPACKET_FDF BIT(15)
228 #define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8
229 /* Kvaser KCAN TDATA second word */
230 #define KVASER_PCIEFD_TPACKET_SMS BIT(16)
231 #define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
233 /* Kvaser KCAN APACKET */
234 #define KVASER_PCIEFD_APACKET_FLU BIT(8)
235 #define KVASER_PCIEFD_APACKET_CT BIT(9)
236 #define KVASER_PCIEFD_APACKET_ABL BIT(10)
237 #define KVASER_PCIEFD_APACKET_NACK BIT(11)
239 /* Kvaser KCAN SPACK first word */
240 #define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8
241 #define KVASER_PCIEFD_SPACK_BOFF BIT(16)
242 #define KVASER_PCIEFD_SPACK_IDET BIT(20)
243 #define KVASER_PCIEFD_SPACK_IRM BIT(21)
244 #define KVASER_PCIEFD_SPACK_RMCD BIT(22)
245 /* Kvaser KCAN SPACK second word */
246 #define KVASER_PCIEFD_SPACK_AUTO BIT(21)
247 #define KVASER_PCIEFD_SPACK_EWLR BIT(23)
248 #define KVASER_PCIEFD_SPACK_EPLR BIT(24)
250 struct kvaser_pciefd
;
252 struct kvaser_pciefd_can
{
254 struct kvaser_pciefd
*kv_pcie
;
255 void __iomem
*reg_base
;
256 struct can_berr_counter bec
;
260 spinlock_t lock
; /* Locks sensitive registers (e.g. MODE) */
261 spinlock_t echo_lock
; /* Locks the message echo buffer */
262 struct timer_list bec_poll_timer
;
263 struct completion start_comp
, flush_comp
;
266 struct kvaser_pciefd
{
268 void __iomem
*reg_base
;
269 struct kvaser_pciefd_can
*can
[KVASER_PCIEFD_MAX_CAN_CHANNELS
];
270 void *dma_data
[KVASER_PCIEFD_DMA_COUNT
];
274 u32 freq_to_ticks_div
;
277 struct kvaser_pciefd_rx_packet
{
282 struct kvaser_pciefd_tx_packet
{
287 static const struct can_bittiming_const kvaser_pciefd_bittiming_const
= {
288 .name
= KVASER_PCIEFD_DRV_NAME
,
299 struct kvaser_pciefd_cfg_param
{
303 u8 data
[KVASER_PCIEFD_CFG_PARAM_MAX_SZ
];
306 struct kvaser_pciefd_cfg_img
{
310 struct kvaser_pciefd_cfg_param params
[KVASER_PCIEFD_CFG_MAX_PARAMS
];
313 static struct pci_device_id kvaser_pciefd_id_table
[] = {
314 { PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_4HS_ID
), },
315 { PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_2HS_ID
), },
316 { PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_HS_ID
), },
317 { PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_MINIPCIE_HS_ID
), },
318 { PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_MINIPCIE_2HS_ID
), },
321 MODULE_DEVICE_TABLE(pci
, kvaser_pciefd_id_table
);
323 /* Onboard flash memory functions */
324 static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd
*pcie
, int msk
)
329 ret
= readl_poll_timeout(pcie
->reg_base
+ KVASER_PCIEFD_SPI_STATUS_REG
,
330 res
, res
& msk
, 0, 10);
335 static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd
*pcie
, const u8
*tx
,
336 u32 tx_len
, u8
*rx
, u32 rx_len
)
340 iowrite32(BIT(0), pcie
->reg_base
+ KVASER_PCIEFD_SPI_SSEL_REG
);
341 iowrite32(BIT(10), pcie
->reg_base
+ KVASER_PCIEFD_SPI_CTRL_REG
);
342 ioread32(pcie
->reg_base
+ KVASER_PCIEFD_SPI_RX_REG
);
346 if (kvaser_pciefd_spi_wait_loop(pcie
, KVASER_PCIEFD_SPI_TRDY
))
349 iowrite32(*tx
++, pcie
->reg_base
+ KVASER_PCIEFD_SPI_TX_REG
);
351 if (kvaser_pciefd_spi_wait_loop(pcie
, KVASER_PCIEFD_SPI_RRDY
))
354 ioread32(pcie
->reg_base
+ KVASER_PCIEFD_SPI_RX_REG
);
359 if (kvaser_pciefd_spi_wait_loop(pcie
, KVASER_PCIEFD_SPI_TRDY
))
362 iowrite32(0, pcie
->reg_base
+ KVASER_PCIEFD_SPI_TX_REG
);
364 if (kvaser_pciefd_spi_wait_loop(pcie
, KVASER_PCIEFD_SPI_RRDY
))
367 *rx
++ = ioread32(pcie
->reg_base
+ KVASER_PCIEFD_SPI_RX_REG
);
370 if (kvaser_pciefd_spi_wait_loop(pcie
, KVASER_PCIEFD_SPI_TMT
))
373 iowrite32(0, pcie
->reg_base
+ KVASER_PCIEFD_SPI_CTRL_REG
);
376 dev_err(&pcie
->pci
->dev
, "Flash SPI transfer failed\n");
383 static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd
*pcie
,
384 struct kvaser_pciefd_cfg_img
*img
)
386 int offset
= KVASER_PCIEFD_CFG_IMG_OFFSET
;
391 KVASER_PCIEFD_FLASH_READ_CMD
,
392 (u8
)((offset
>> 16) & 0xff),
393 (u8
)((offset
>> 8) & 0xff),
397 res
= kvaser_pciefd_spi_cmd(pcie
, cmd
, ARRAY_SIZE(cmd
), (u8
*)img
,
398 KVASER_PCIEFD_CFG_IMG_SZ
);
402 crc_buff
= (u8
*)img
->params
;
404 if (le32_to_cpu(img
->version
) != KVASER_PCIEFD_CFG_SYS_VER
) {
405 dev_err(&pcie
->pci
->dev
,
406 "Config flash corrupted, version number is wrong\n");
410 if (le32_to_cpu(img
->magic
) != KVASER_PCIEFD_CFG_MAGIC
) {
411 dev_err(&pcie
->pci
->dev
,
412 "Config flash corrupted, magic number is wrong\n");
416 crc
= ~crc32_be(0xffffffff, crc_buff
, sizeof(img
->params
));
417 if (le32_to_cpu(img
->crc
) != crc
) {
418 dev_err(&pcie
->pci
->dev
,
419 "Stored CRC does not match flash image contents\n");
426 static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd
*pcie
,
427 struct kvaser_pciefd_cfg_img
*img
)
429 struct kvaser_pciefd_cfg_param
*param
;
431 param
= &img
->params
[KVASER_PCIEFD_CFG_PARAM_NR_CHAN
];
432 memcpy(&pcie
->nr_channels
, param
->data
, le32_to_cpu(param
->len
));
435 static int kvaser_pciefd_read_cfg(struct kvaser_pciefd
*pcie
)
438 struct kvaser_pciefd_cfg_img
*img
;
440 /* Read electronic signature */
441 u8 cmd
[] = {KVASER_PCIEFD_FLASH_RES_CMD
, 0, 0, 0};
443 res
= kvaser_pciefd_spi_cmd(pcie
, cmd
, ARRAY_SIZE(cmd
), cmd
, 1);
447 img
= kmalloc(KVASER_PCIEFD_CFG_IMG_SZ
, GFP_KERNEL
);
451 if (cmd
[0] != KVASER_PCIEFD_FLASH_ID_EPCS16
) {
452 dev_err(&pcie
->pci
->dev
,
453 "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n",
454 cmd
[0], KVASER_PCIEFD_FLASH_ID_EPCS16
);
460 cmd
[0] = KVASER_PCIEFD_FLASH_STATUS_CMD
;
461 res
= kvaser_pciefd_spi_cmd(pcie
, cmd
, 1, cmd
, 1);
464 } else if (cmd
[0] & 1) {
466 /* No write is ever done, the WIP should never be set */
467 dev_err(&pcie
->pci
->dev
, "Unexpected WIP bit set in flash\n");
471 res
= kvaser_pciefd_cfg_read_and_verify(pcie
, img
);
477 kvaser_pciefd_cfg_read_params(pcie
, img
);
484 static void kvaser_pciefd_request_status(struct kvaser_pciefd_can
*can
)
488 cmd
= KVASER_PCIEFD_KCAN_CMD_SRQ
;
489 cmd
|= ++can
->cmd_seq
<< KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT
;
490 iowrite32(cmd
, can
->reg_base
+ KVASER_PCIEFD_KCAN_CMD_REG
);
493 static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can
*can
)
498 spin_lock_irqsave(&can
->lock
, irq
);
499 mode
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
500 if (!(mode
& KVASER_PCIEFD_KCAN_MODE_EPEN
)) {
501 mode
|= KVASER_PCIEFD_KCAN_MODE_EPEN
;
502 iowrite32(mode
, can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
504 spin_unlock_irqrestore(&can
->lock
, irq
);
507 static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can
*can
)
512 spin_lock_irqsave(&can
->lock
, irq
);
513 mode
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
514 mode
&= ~KVASER_PCIEFD_KCAN_MODE_EPEN
;
515 iowrite32(mode
, can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
516 spin_unlock_irqrestore(&can
->lock
, irq
);
519 static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can
*can
)
523 msk
= KVASER_PCIEFD_KCAN_IRQ_TE
| KVASER_PCIEFD_KCAN_IRQ_ROF
|
524 KVASER_PCIEFD_KCAN_IRQ_TOF
| KVASER_PCIEFD_KCAN_IRQ_ABD
|
525 KVASER_PCIEFD_KCAN_IRQ_TAE
| KVASER_PCIEFD_KCAN_IRQ_TAL
|
526 KVASER_PCIEFD_KCAN_IRQ_FDIC
| KVASER_PCIEFD_KCAN_IRQ_BPP
|
527 KVASER_PCIEFD_KCAN_IRQ_TAR
| KVASER_PCIEFD_KCAN_IRQ_TFD
;
529 iowrite32(msk
, can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
534 static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can
*can
)
539 spin_lock_irqsave(&can
->lock
, irq
);
541 mode
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
542 if (can
->can
.ctrlmode
& CAN_CTRLMODE_FD
) {
543 mode
&= ~KVASER_PCIEFD_KCAN_MODE_CCM
;
544 if (can
->can
.ctrlmode
& CAN_CTRLMODE_FD_NON_ISO
)
545 mode
|= KVASER_PCIEFD_KCAN_MODE_NIFDEN
;
547 mode
&= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN
;
549 mode
|= KVASER_PCIEFD_KCAN_MODE_CCM
;
550 mode
&= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN
;
553 if (can
->can
.ctrlmode
& CAN_CTRLMODE_LISTENONLY
)
554 mode
|= KVASER_PCIEFD_KCAN_MODE_LOM
;
556 mode
|= KVASER_PCIEFD_KCAN_MODE_EEN
;
557 mode
|= KVASER_PCIEFD_KCAN_MODE_EPEN
;
558 /* Use ACK packet type */
559 mode
&= ~KVASER_PCIEFD_KCAN_MODE_APT
;
560 mode
&= ~KVASER_PCIEFD_KCAN_MODE_RM
;
561 iowrite32(mode
, can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
563 spin_unlock_irqrestore(&can
->lock
, irq
);
566 static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can
*can
)
571 spin_lock_irqsave(&can
->lock
, irq
);
572 iowrite32(-1, can
->reg_base
+ KVASER_PCIEFD_KCAN_IRQ_REG
);
573 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD
| KVASER_PCIEFD_KCAN_IRQ_TFD
,
574 can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
576 status
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_STAT_REG
);
577 if (status
& KVASER_PCIEFD_KCAN_STAT_IDLE
) {
580 /* If controller is already idle, run abort, flush and reset */
581 cmd
= KVASER_PCIEFD_KCAN_CMD_AT
;
582 cmd
|= ++can
->cmd_seq
<< KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT
;
583 iowrite32(cmd
, can
->reg_base
+ KVASER_PCIEFD_KCAN_CMD_REG
);
584 } else if (!(status
& KVASER_PCIEFD_KCAN_STAT_RMR
)) {
587 /* Put controller in reset mode */
588 mode
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
589 mode
|= KVASER_PCIEFD_KCAN_MODE_RM
;
590 iowrite32(mode
, can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
593 spin_unlock_irqrestore(&can
->lock
, irq
);
596 static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can
*can
)
601 del_timer(&can
->bec_poll_timer
);
603 if (!completion_done(&can
->flush_comp
))
604 kvaser_pciefd_start_controller_flush(can
);
606 if (!wait_for_completion_timeout(&can
->flush_comp
,
607 KVASER_PCIEFD_WAIT_TIMEOUT
)) {
608 netdev_err(can
->can
.dev
, "Timeout during bus on flush\n");
612 spin_lock_irqsave(&can
->lock
, irq
);
613 iowrite32(0, can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
614 iowrite32(-1, can
->reg_base
+ KVASER_PCIEFD_KCAN_IRQ_REG
);
616 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD
| KVASER_PCIEFD_KCAN_IRQ_TFD
,
617 can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
619 mode
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
620 mode
&= ~KVASER_PCIEFD_KCAN_MODE_RM
;
621 iowrite32(mode
, can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
622 spin_unlock_irqrestore(&can
->lock
, irq
);
624 if (!wait_for_completion_timeout(&can
->start_comp
,
625 KVASER_PCIEFD_WAIT_TIMEOUT
)) {
626 netdev_err(can
->can
.dev
, "Timeout during bus on reset\n");
629 /* Reset interrupt handling */
630 iowrite32(0, can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
631 iowrite32(-1, can
->reg_base
+ KVASER_PCIEFD_KCAN_IRQ_REG
);
633 kvaser_pciefd_set_tx_irq(can
);
634 kvaser_pciefd_setup_controller(can
);
636 can
->can
.state
= CAN_STATE_ERROR_ACTIVE
;
637 netif_wake_queue(can
->can
.dev
);
640 can
->err_rep_cnt
= 0;
645 static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can
*can
)
651 spin_lock_irqsave(&can
->lock
, irq
);
652 pwm_ctrl
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_PWM_REG
);
653 top
= (pwm_ctrl
>> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT
) & 0xff;
655 /* Set duty cycle to zero */
657 iowrite32(pwm_ctrl
, can
->reg_base
+ KVASER_PCIEFD_KCAN_PWM_REG
);
658 spin_unlock_irqrestore(&can
->lock
, irq
);
661 static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can
*can
)
667 kvaser_pciefd_pwm_stop(can
);
668 spin_lock_irqsave(&can
->lock
, irq
);
670 /* Set frequency to 500 KHz*/
671 top
= can
->kv_pcie
->bus_freq
/ (2 * 500000) - 1;
673 pwm_ctrl
= top
& 0xff;
674 pwm_ctrl
|= (top
& 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT
;
675 iowrite32(pwm_ctrl
, can
->reg_base
+ KVASER_PCIEFD_KCAN_PWM_REG
);
677 /* Set duty cycle to 95 */
678 trigger
= (100 * top
- 95 * (top
+ 1) + 50) / 100;
679 pwm_ctrl
= trigger
& 0xff;
680 pwm_ctrl
|= (top
& 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT
;
681 iowrite32(pwm_ctrl
, can
->reg_base
+ KVASER_PCIEFD_KCAN_PWM_REG
);
682 spin_unlock_irqrestore(&can
->lock
, irq
);
685 static int kvaser_pciefd_open(struct net_device
*netdev
)
688 struct kvaser_pciefd_can
*can
= netdev_priv(netdev
);
690 err
= open_candev(netdev
);
694 err
= kvaser_pciefd_bus_on(can
);
701 static int kvaser_pciefd_stop(struct net_device
*netdev
)
703 struct kvaser_pciefd_can
*can
= netdev_priv(netdev
);
706 /* Don't interrupt ongoing flush */
707 if (!completion_done(&can
->flush_comp
))
708 kvaser_pciefd_start_controller_flush(can
);
710 if (!wait_for_completion_timeout(&can
->flush_comp
,
711 KVASER_PCIEFD_WAIT_TIMEOUT
)) {
712 netdev_err(can
->can
.dev
, "Timeout during stop\n");
715 iowrite32(0, can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
716 del_timer(&can
->bec_poll_timer
);
718 close_candev(netdev
);
723 static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet
*p
,
724 struct kvaser_pciefd_can
*can
,
727 struct canfd_frame
*cf
= (struct canfd_frame
*)skb
->data
;
729 int seq
= can
->echo_idx
;
731 memset(p
, 0, sizeof(*p
));
733 if (can
->can
.ctrlmode
& CAN_CTRLMODE_ONE_SHOT
)
734 p
->header
[1] |= KVASER_PCIEFD_TPACKET_SMS
;
736 if (cf
->can_id
& CAN_RTR_FLAG
)
737 p
->header
[0] |= KVASER_PCIEFD_RPACKET_RTR
;
739 if (cf
->can_id
& CAN_EFF_FLAG
)
740 p
->header
[0] |= KVASER_PCIEFD_RPACKET_IDE
;
742 p
->header
[0] |= cf
->can_id
& CAN_EFF_MASK
;
743 p
->header
[1] |= can_len2dlc(cf
->len
) << KVASER_PCIEFD_RPACKET_DLC_SHIFT
;
744 p
->header
[1] |= KVASER_PCIEFD_TPACKET_AREQ
;
746 if (can_is_canfd_skb(skb
)) {
747 p
->header
[1] |= KVASER_PCIEFD_RPACKET_FDF
;
748 if (cf
->flags
& CANFD_BRS
)
749 p
->header
[1] |= KVASER_PCIEFD_RPACKET_BRS
;
750 if (cf
->flags
& CANFD_ESI
)
751 p
->header
[1] |= KVASER_PCIEFD_RPACKET_ESI
;
754 p
->header
[1] |= seq
& KVASER_PCIEFD_PACKET_SEQ_MSK
;
756 packet_size
= cf
->len
;
757 memcpy(p
->data
, cf
->data
, packet_size
);
759 return DIV_ROUND_UP(packet_size
, 4);
762 static netdev_tx_t
kvaser_pciefd_start_xmit(struct sk_buff
*skb
,
763 struct net_device
*netdev
)
765 struct kvaser_pciefd_can
*can
= netdev_priv(netdev
);
766 unsigned long irq_flags
;
767 struct kvaser_pciefd_tx_packet packet
;
771 if (can_dropped_invalid_skb(netdev
, skb
))
774 nwords
= kvaser_pciefd_prepare_tx_packet(&packet
, can
, skb
);
776 spin_lock_irqsave(&can
->echo_lock
, irq_flags
);
778 /* Prepare and save echo skb in internal slot */
779 can_put_echo_skb(skb
, netdev
, can
->echo_idx
);
781 /* Move echo index to the next slot */
782 can
->echo_idx
= (can
->echo_idx
+ 1) % can
->can
.echo_skb_max
;
784 /* Write header to fifo */
785 iowrite32(packet
.header
[0],
786 can
->reg_base
+ KVASER_PCIEFD_KCAN_FIFO_REG
);
787 iowrite32(packet
.header
[1],
788 can
->reg_base
+ KVASER_PCIEFD_KCAN_FIFO_REG
);
791 u32 data_last
= ((u32
*)packet
.data
)[nwords
- 1];
793 /* Write data to fifo, except last word */
794 iowrite32_rep(can
->reg_base
+
795 KVASER_PCIEFD_KCAN_FIFO_REG
, packet
.data
,
797 /* Write last word to end of fifo */
798 __raw_writel(data_last
, can
->reg_base
+
799 KVASER_PCIEFD_KCAN_FIFO_LAST_REG
);
801 /* Complete write to fifo */
802 __raw_writel(0, can
->reg_base
+
803 KVASER_PCIEFD_KCAN_FIFO_LAST_REG
);
806 count
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_TX_NPACKETS_REG
);
807 /* No room for a new message, stop the queue until at least one
808 * successful transmit
810 if (count
>= KVASER_PCIEFD_CAN_TX_MAX_COUNT
||
811 can
->can
.echo_skb
[can
->echo_idx
])
812 netif_stop_queue(netdev
);
814 spin_unlock_irqrestore(&can
->echo_lock
, irq_flags
);
819 static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can
*can
, bool data
)
821 u32 mode
, test
, btrn
;
822 unsigned long irq_flags
;
824 struct can_bittiming
*bt
;
827 bt
= &can
->can
.data_bittiming
;
829 bt
= &can
->can
.bittiming
;
831 btrn
= ((bt
->phase_seg2
- 1) & 0x1f) <<
832 KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT
|
833 (((bt
->prop_seg
+ bt
->phase_seg1
) - 1) & 0x1ff) <<
834 KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT
|
835 ((bt
->sjw
- 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT
|
836 ((bt
->brp
- 1) & 0x1fff);
838 spin_lock_irqsave(&can
->lock
, irq_flags
);
839 mode
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
841 /* Put the circuit in reset mode */
842 iowrite32(mode
| KVASER_PCIEFD_KCAN_MODE_RM
,
843 can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
845 /* Can only set bittiming if in reset mode */
846 ret
= readl_poll_timeout(can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
,
847 test
, test
& KVASER_PCIEFD_KCAN_MODE_RM
,
851 spin_unlock_irqrestore(&can
->lock
, irq_flags
);
856 iowrite32(btrn
, can
->reg_base
+ KVASER_PCIEFD_KCAN_BTRD_REG
);
858 iowrite32(btrn
, can
->reg_base
+ KVASER_PCIEFD_KCAN_BTRN_REG
);
860 /* Restore previous reset mode status */
861 iowrite32(mode
, can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
863 spin_unlock_irqrestore(&can
->lock
, irq_flags
);
867 static int kvaser_pciefd_set_nominal_bittiming(struct net_device
*ndev
)
869 return kvaser_pciefd_set_bittiming(netdev_priv(ndev
), false);
872 static int kvaser_pciefd_set_data_bittiming(struct net_device
*ndev
)
874 return kvaser_pciefd_set_bittiming(netdev_priv(ndev
), true);
877 static int kvaser_pciefd_set_mode(struct net_device
*ndev
, enum can_mode mode
)
879 struct kvaser_pciefd_can
*can
= netdev_priv(ndev
);
884 if (!can
->can
.restart_ms
)
885 ret
= kvaser_pciefd_bus_on(can
);
894 static int kvaser_pciefd_get_berr_counter(const struct net_device
*ndev
,
895 struct can_berr_counter
*bec
)
897 struct kvaser_pciefd_can
*can
= netdev_priv(ndev
);
899 bec
->rxerr
= can
->bec
.rxerr
;
900 bec
->txerr
= can
->bec
.txerr
;
904 static void kvaser_pciefd_bec_poll_timer(struct timer_list
*data
)
906 struct kvaser_pciefd_can
*can
= from_timer(can
, data
, bec_poll_timer
);
908 kvaser_pciefd_enable_err_gen(can
);
909 kvaser_pciefd_request_status(can
);
910 can
->err_rep_cnt
= 0;
913 static const struct net_device_ops kvaser_pciefd_netdev_ops
= {
914 .ndo_open
= kvaser_pciefd_open
,
915 .ndo_stop
= kvaser_pciefd_stop
,
916 .ndo_start_xmit
= kvaser_pciefd_start_xmit
,
917 .ndo_change_mtu
= can_change_mtu
,
920 static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd
*pcie
)
924 for (i
= 0; i
< pcie
->nr_channels
; i
++) {
925 struct net_device
*netdev
;
926 struct kvaser_pciefd_can
*can
;
927 u32 status
, tx_npackets
;
929 netdev
= alloc_candev(sizeof(struct kvaser_pciefd_can
),
930 KVASER_PCIEFD_CAN_TX_MAX_COUNT
);
934 can
= netdev_priv(netdev
);
935 netdev
->netdev_ops
= &kvaser_pciefd_netdev_ops
;
936 can
->reg_base
= pcie
->reg_base
+ KVASER_PCIEFD_KCAN0_BASE
+
937 i
* KVASER_PCIEFD_KCAN_BASE_OFFSET
;
941 can
->err_rep_cnt
= 0;
945 init_completion(&can
->start_comp
);
946 init_completion(&can
->flush_comp
);
947 timer_setup(&can
->bec_poll_timer
, kvaser_pciefd_bec_poll_timer
,
950 tx_npackets
= ioread32(can
->reg_base
+
951 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG
);
952 if (((tx_npackets
>> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT
) &
953 0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT
) {
954 dev_err(&pcie
->pci
->dev
,
955 "Max Tx count is smaller than expected\n");
961 can
->can
.clock
.freq
= pcie
->freq
;
962 can
->can
.echo_skb_max
= KVASER_PCIEFD_CAN_TX_MAX_COUNT
;
964 spin_lock_init(&can
->echo_lock
);
965 spin_lock_init(&can
->lock
);
966 can
->can
.bittiming_const
= &kvaser_pciefd_bittiming_const
;
967 can
->can
.data_bittiming_const
= &kvaser_pciefd_bittiming_const
;
969 can
->can
.do_set_bittiming
= kvaser_pciefd_set_nominal_bittiming
;
970 can
->can
.do_set_data_bittiming
=
971 kvaser_pciefd_set_data_bittiming
;
973 can
->can
.do_set_mode
= kvaser_pciefd_set_mode
;
974 can
->can
.do_get_berr_counter
= kvaser_pciefd_get_berr_counter
;
976 can
->can
.ctrlmode_supported
= CAN_CTRLMODE_LISTENONLY
|
978 CAN_CTRLMODE_FD_NON_ISO
;
980 status
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_STAT_REG
);
981 if (!(status
& KVASER_PCIEFD_KCAN_STAT_FD
)) {
982 dev_err(&pcie
->pci
->dev
,
983 "CAN FD not supported as expected %d\n", i
);
989 if (status
& KVASER_PCIEFD_KCAN_STAT_CAP
)
990 can
->can
.ctrlmode_supported
|= CAN_CTRLMODE_ONE_SHOT
;
992 netdev
->flags
|= IFF_ECHO
;
994 SET_NETDEV_DEV(netdev
, &pcie
->pci
->dev
);
996 iowrite32(-1, can
->reg_base
+ KVASER_PCIEFD_KCAN_IRQ_REG
);
997 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD
|
998 KVASER_PCIEFD_KCAN_IRQ_TFD
,
999 can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
1002 kvaser_pciefd_pwm_start(can
);
1008 static int kvaser_pciefd_reg_candev(struct kvaser_pciefd
*pcie
)
1012 for (i
= 0; i
< pcie
->nr_channels
; i
++) {
1013 int err
= register_candev(pcie
->can
[i
]->can
.dev
);
1018 /* Unregister all successfully registered devices. */
1019 for (j
= 0; j
< i
; j
++)
1020 unregister_candev(pcie
->can
[j
]->can
.dev
);
1028 static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd
*pcie
,
1029 dma_addr_t addr
, int offset
)
1033 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1034 word1
= addr
| KVASER_PCIEFD_64BIT_DMA_BIT
;
1040 iowrite32(word1
, pcie
->reg_base
+ offset
);
1041 iowrite32(word2
, pcie
->reg_base
+ offset
+ 4);
1044 static int kvaser_pciefd_setup_dma(struct kvaser_pciefd
*pcie
)
1048 dma_addr_t dma_addr
[KVASER_PCIEFD_DMA_COUNT
];
1050 /* Disable the DMA */
1051 iowrite32(0, pcie
->reg_base
+ KVASER_PCIEFD_SRB_CTRL_REG
);
1052 for (i
= 0; i
< KVASER_PCIEFD_DMA_COUNT
; i
++) {
1053 unsigned int offset
= KVASER_PCIEFD_DMA_MAP_BASE
+ 8 * i
;
1056 dmam_alloc_coherent(&pcie
->pci
->dev
,
1057 KVASER_PCIEFD_DMA_SIZE
,
1061 if (!pcie
->dma_data
[i
] || !dma_addr
[i
]) {
1062 dev_err(&pcie
->pci
->dev
, "Rx dma_alloc(%u) failure\n",
1063 KVASER_PCIEFD_DMA_SIZE
);
1067 kvaser_pciefd_write_dma_map(pcie
, dma_addr
[i
], offset
);
1070 /* Reset Rx FIFO, and both DMA buffers */
1071 iowrite32(KVASER_PCIEFD_SRB_CMD_FOR
| KVASER_PCIEFD_SRB_CMD_RDB0
|
1072 KVASER_PCIEFD_SRB_CMD_RDB1
,
1073 pcie
->reg_base
+ KVASER_PCIEFD_SRB_CMD_REG
);
1075 srb_status
= ioread32(pcie
->reg_base
+ KVASER_PCIEFD_SRB_STAT_REG
);
1076 if (!(srb_status
& KVASER_PCIEFD_SRB_STAT_DI
)) {
1077 dev_err(&pcie
->pci
->dev
, "DMA not idle before enabling\n");
1081 /* Enable the DMA */
1082 iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE
,
1083 pcie
->reg_base
+ KVASER_PCIEFD_SRB_CTRL_REG
);
1088 static int kvaser_pciefd_setup_board(struct kvaser_pciefd
*pcie
)
1090 u32 sysid
, srb_status
, build
;
1094 ret
= kvaser_pciefd_read_cfg(pcie
);
1098 sysid
= ioread32(pcie
->reg_base
+ KVASER_PCIEFD_SYSID_VERSION_REG
);
1099 sysid_nr_chan
= (sysid
>> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT
) & 0xff;
1100 if (pcie
->nr_channels
!= sysid_nr_chan
) {
1101 dev_err(&pcie
->pci
->dev
,
1102 "Number of channels does not match: %u vs %u\n",
1108 if (pcie
->nr_channels
> KVASER_PCIEFD_MAX_CAN_CHANNELS
)
1109 pcie
->nr_channels
= KVASER_PCIEFD_MAX_CAN_CHANNELS
;
1111 build
= ioread32(pcie
->reg_base
+ KVASER_PCIEFD_SYSID_BUILD_REG
);
1112 dev_dbg(&pcie
->pci
->dev
, "Version %u.%u.%u\n",
1113 (sysid
>> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT
) & 0xff,
1115 (build
>> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT
) & 0x7fff);
1117 srb_status
= ioread32(pcie
->reg_base
+ KVASER_PCIEFD_SRB_STAT_REG
);
1118 if (!(srb_status
& KVASER_PCIEFD_SRB_STAT_DMA
)) {
1119 dev_err(&pcie
->pci
->dev
,
1120 "Hardware without DMA is not supported\n");
1124 pcie
->bus_freq
= ioread32(pcie
->reg_base
+
1125 KVASER_PCIEFD_SYSID_BUSFREQ_REG
);
1126 pcie
->freq
= ioread32(pcie
->reg_base
+ KVASER_PCIEFD_SYSID_CANFREQ_REG
);
1127 pcie
->freq_to_ticks_div
= pcie
->freq
/ 1000000;
1128 if (pcie
->freq_to_ticks_div
== 0)
1129 pcie
->freq_to_ticks_div
= 1;
1131 /* Turn off all loopback functionality */
1132 iowrite32(0, pcie
->reg_base
+ KVASER_PCIEFD_LOOP_REG
);
1136 static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd
*pcie
,
1137 struct kvaser_pciefd_rx_packet
*p
,
1140 struct sk_buff
*skb
;
1141 struct canfd_frame
*cf
;
1142 struct can_priv
*priv
;
1143 struct net_device_stats
*stats
;
1144 struct skb_shared_hwtstamps
*shhwtstamps
;
1145 u8 ch_id
= (p
->header
[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT
) & 0x7;
1147 if (ch_id
>= pcie
->nr_channels
)
1150 priv
= &pcie
->can
[ch_id
]->can
;
1151 stats
= &priv
->dev
->stats
;
1153 if (p
->header
[1] & KVASER_PCIEFD_RPACKET_FDF
) {
1154 skb
= alloc_canfd_skb(priv
->dev
, &cf
);
1156 stats
->rx_dropped
++;
1160 if (p
->header
[1] & KVASER_PCIEFD_RPACKET_BRS
)
1161 cf
->flags
|= CANFD_BRS
;
1163 if (p
->header
[1] & KVASER_PCIEFD_RPACKET_ESI
)
1164 cf
->flags
|= CANFD_ESI
;
1166 skb
= alloc_can_skb(priv
->dev
, (struct can_frame
**)&cf
);
1168 stats
->rx_dropped
++;
1173 cf
->can_id
= p
->header
[0] & CAN_EFF_MASK
;
1174 if (p
->header
[0] & KVASER_PCIEFD_RPACKET_IDE
)
1175 cf
->can_id
|= CAN_EFF_FLAG
;
1177 cf
->len
= can_dlc2len(p
->header
[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT
);
1179 if (p
->header
[0] & KVASER_PCIEFD_RPACKET_RTR
)
1180 cf
->can_id
|= CAN_RTR_FLAG
;
1182 memcpy(cf
->data
, data
, cf
->len
);
1184 shhwtstamps
= skb_hwtstamps(skb
);
1186 shhwtstamps
->hwtstamp
=
1187 ns_to_ktime(div_u64(p
->timestamp
* 1000,
1188 pcie
->freq_to_ticks_div
));
1190 stats
->rx_bytes
+= cf
->len
;
1191 stats
->rx_packets
++;
1193 return netif_rx(skb
);
1196 static void kvaser_pciefd_change_state(struct kvaser_pciefd_can
*can
,
1197 struct can_frame
*cf
,
1198 enum can_state new_state
,
1199 enum can_state tx_state
,
1200 enum can_state rx_state
)
1202 can_change_state(can
->can
.dev
, cf
, tx_state
, rx_state
);
1204 if (new_state
== CAN_STATE_BUS_OFF
) {
1205 struct net_device
*ndev
= can
->can
.dev
;
1206 unsigned long irq_flags
;
1208 spin_lock_irqsave(&can
->lock
, irq_flags
);
1209 netif_stop_queue(can
->can
.dev
);
1210 spin_unlock_irqrestore(&can
->lock
, irq_flags
);
1212 /* Prevent CAN controller from auto recover from bus off */
1213 if (!can
->can
.restart_ms
) {
1214 kvaser_pciefd_start_controller_flush(can
);
1220 static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet
*p
,
1221 struct can_berr_counter
*bec
,
1222 enum can_state
*new_state
,
1223 enum can_state
*tx_state
,
1224 enum can_state
*rx_state
)
1226 if (p
->header
[0] & KVASER_PCIEFD_SPACK_BOFF
||
1227 p
->header
[0] & KVASER_PCIEFD_SPACK_IRM
)
1228 *new_state
= CAN_STATE_BUS_OFF
;
1229 else if (bec
->txerr
>= 255 || bec
->rxerr
>= 255)
1230 *new_state
= CAN_STATE_BUS_OFF
;
1231 else if (p
->header
[1] & KVASER_PCIEFD_SPACK_EPLR
)
1232 *new_state
= CAN_STATE_ERROR_PASSIVE
;
1233 else if (bec
->txerr
>= 128 || bec
->rxerr
>= 128)
1234 *new_state
= CAN_STATE_ERROR_PASSIVE
;
1235 else if (p
->header
[1] & KVASER_PCIEFD_SPACK_EWLR
)
1236 *new_state
= CAN_STATE_ERROR_WARNING
;
1237 else if (bec
->txerr
>= 96 || bec
->rxerr
>= 96)
1238 *new_state
= CAN_STATE_ERROR_WARNING
;
1240 *new_state
= CAN_STATE_ERROR_ACTIVE
;
1242 *tx_state
= bec
->txerr
>= bec
->rxerr
? *new_state
: 0;
1243 *rx_state
= bec
->txerr
<= bec
->rxerr
? *new_state
: 0;
1246 static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can
*can
,
1247 struct kvaser_pciefd_rx_packet
*p
)
1249 struct can_berr_counter bec
;
1250 enum can_state old_state
, new_state
, tx_state
, rx_state
;
1251 struct net_device
*ndev
= can
->can
.dev
;
1252 struct sk_buff
*skb
;
1253 struct can_frame
*cf
= NULL
;
1254 struct skb_shared_hwtstamps
*shhwtstamps
;
1255 struct net_device_stats
*stats
= &ndev
->stats
;
1257 old_state
= can
->can
.state
;
1259 bec
.txerr
= p
->header
[0] & 0xff;
1260 bec
.rxerr
= (p
->header
[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT
) & 0xff;
1262 kvaser_pciefd_packet_to_state(p
, &bec
, &new_state
, &tx_state
,
1265 skb
= alloc_can_err_skb(ndev
, &cf
);
1267 if (new_state
!= old_state
) {
1268 kvaser_pciefd_change_state(can
, cf
, new_state
, tx_state
,
1271 if (old_state
== CAN_STATE_BUS_OFF
&&
1272 new_state
== CAN_STATE_ERROR_ACTIVE
&&
1273 can
->can
.restart_ms
) {
1274 can
->can
.can_stats
.restarts
++;
1276 cf
->can_id
|= CAN_ERR_RESTARTED
;
1281 can
->can
.can_stats
.bus_error
++;
1284 can
->bec
.txerr
= bec
.txerr
;
1285 can
->bec
.rxerr
= bec
.rxerr
;
1288 stats
->rx_dropped
++;
1292 shhwtstamps
= skb_hwtstamps(skb
);
1293 shhwtstamps
->hwtstamp
=
1294 ns_to_ktime(div_u64(p
->timestamp
* 1000,
1295 can
->kv_pcie
->freq_to_ticks_div
));
1296 cf
->can_id
|= CAN_ERR_BUSERROR
;
1298 cf
->data
[6] = bec
.txerr
;
1299 cf
->data
[7] = bec
.rxerr
;
1301 stats
->rx_packets
++;
1302 stats
->rx_bytes
+= cf
->can_dlc
;
1308 static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd
*pcie
,
1309 struct kvaser_pciefd_rx_packet
*p
)
1311 struct kvaser_pciefd_can
*can
;
1312 u8 ch_id
= (p
->header
[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT
) & 0x7;
1314 if (ch_id
>= pcie
->nr_channels
)
1317 can
= pcie
->can
[ch_id
];
1319 kvaser_pciefd_rx_error_frame(can
, p
);
1320 if (can
->err_rep_cnt
>= KVASER_PCIEFD_MAX_ERR_REP
)
1321 /* Do not report more errors, until bec_poll_timer expires */
1322 kvaser_pciefd_disable_err_gen(can
);
1323 /* Start polling the error counters */
1324 mod_timer(&can
->bec_poll_timer
, KVASER_PCIEFD_BEC_POLL_FREQ
);
1328 static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can
*can
,
1329 struct kvaser_pciefd_rx_packet
*p
)
1331 struct can_berr_counter bec
;
1332 enum can_state old_state
, new_state
, tx_state
, rx_state
;
1334 old_state
= can
->can
.state
;
1336 bec
.txerr
= p
->header
[0] & 0xff;
1337 bec
.rxerr
= (p
->header
[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT
) & 0xff;
1339 kvaser_pciefd_packet_to_state(p
, &bec
, &new_state
, &tx_state
,
1342 if (new_state
!= old_state
) {
1343 struct net_device
*ndev
= can
->can
.dev
;
1344 struct sk_buff
*skb
;
1345 struct can_frame
*cf
;
1346 struct skb_shared_hwtstamps
*shhwtstamps
;
1348 skb
= alloc_can_err_skb(ndev
, &cf
);
1350 struct net_device_stats
*stats
= &ndev
->stats
;
1352 stats
->rx_dropped
++;
1356 kvaser_pciefd_change_state(can
, cf
, new_state
, tx_state
,
1359 if (old_state
== CAN_STATE_BUS_OFF
&&
1360 new_state
== CAN_STATE_ERROR_ACTIVE
&&
1361 can
->can
.restart_ms
) {
1362 can
->can
.can_stats
.restarts
++;
1363 cf
->can_id
|= CAN_ERR_RESTARTED
;
1366 shhwtstamps
= skb_hwtstamps(skb
);
1367 shhwtstamps
->hwtstamp
=
1368 ns_to_ktime(div_u64(p
->timestamp
* 1000,
1369 can
->kv_pcie
->freq_to_ticks_div
));
1371 cf
->data
[6] = bec
.txerr
;
1372 cf
->data
[7] = bec
.rxerr
;
1376 can
->bec
.txerr
= bec
.txerr
;
1377 can
->bec
.rxerr
= bec
.rxerr
;
1378 /* Check if we need to poll the error counters */
1379 if (bec
.txerr
|| bec
.rxerr
)
1380 mod_timer(&can
->bec_poll_timer
, KVASER_PCIEFD_BEC_POLL_FREQ
);
1385 static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd
*pcie
,
1386 struct kvaser_pciefd_rx_packet
*p
)
1388 struct kvaser_pciefd_can
*can
;
1391 u8 ch_id
= (p
->header
[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT
) & 0x7;
1393 if (ch_id
>= pcie
->nr_channels
)
1396 can
= pcie
->can
[ch_id
];
1398 status
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_STAT_REG
);
1399 cmdseq
= (status
>> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT
) & 0xff;
1401 /* Reset done, start abort and flush */
1402 if (p
->header
[0] & KVASER_PCIEFD_SPACK_IRM
&&
1403 p
->header
[0] & KVASER_PCIEFD_SPACK_RMCD
&&
1404 p
->header
[1] & KVASER_PCIEFD_SPACK_AUTO
&&
1405 cmdseq
== (p
->header
[1] & KVASER_PCIEFD_PACKET_SEQ_MSK
) &&
1406 status
& KVASER_PCIEFD_KCAN_STAT_IDLE
) {
1409 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD
,
1410 can
->reg_base
+ KVASER_PCIEFD_KCAN_IRQ_REG
);
1411 cmd
= KVASER_PCIEFD_KCAN_CMD_AT
;
1412 cmd
|= ++can
->cmd_seq
<< KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT
;
1413 iowrite32(cmd
, can
->reg_base
+ KVASER_PCIEFD_KCAN_CMD_REG
);
1415 iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD
,
1416 can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
1417 } else if (p
->header
[0] & KVASER_PCIEFD_SPACK_IDET
&&
1418 p
->header
[0] & KVASER_PCIEFD_SPACK_IRM
&&
1419 cmdseq
== (p
->header
[1] & KVASER_PCIEFD_PACKET_SEQ_MSK
) &&
1420 status
& KVASER_PCIEFD_KCAN_STAT_IDLE
) {
1421 /* Reset detected, send end of flush if no packet are in FIFO */
1422 u8 count
= ioread32(can
->reg_base
+
1423 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG
) & 0xff;
1426 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH
,
1427 can
->reg_base
+ KVASER_PCIEFD_KCAN_CTRL_REG
);
1428 } else if (!(p
->header
[1] & KVASER_PCIEFD_SPACK_AUTO
) &&
1429 cmdseq
== (p
->header
[1] & KVASER_PCIEFD_PACKET_SEQ_MSK
)) {
1430 /* Response to status request received */
1431 kvaser_pciefd_handle_status_resp(can
, p
);
1432 if (can
->can
.state
!= CAN_STATE_BUS_OFF
&&
1433 can
->can
.state
!= CAN_STATE_ERROR_ACTIVE
) {
1434 mod_timer(&can
->bec_poll_timer
,
1435 KVASER_PCIEFD_BEC_POLL_FREQ
);
1437 } else if (p
->header
[0] & KVASER_PCIEFD_SPACK_RMCD
&&
1438 !(status
& KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK
)) {
1439 /* Reset to bus on detected */
1440 if (!completion_done(&can
->start_comp
))
1441 complete(&can
->start_comp
);
1447 static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd
*pcie
,
1448 struct kvaser_pciefd_rx_packet
*p
)
1450 struct kvaser_pciefd_can
*can
;
1451 u8 ch_id
= (p
->header
[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT
) & 0x7;
1453 if (ch_id
>= pcie
->nr_channels
)
1456 can
= pcie
->can
[ch_id
];
1458 /* If this is the last flushed packet, send end of flush */
1459 if (p
->header
[0] & KVASER_PCIEFD_APACKET_FLU
) {
1460 u8 count
= ioread32(can
->reg_base
+
1461 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG
) & 0xff;
1464 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH
,
1465 can
->reg_base
+ KVASER_PCIEFD_KCAN_CTRL_REG
);
1467 int echo_idx
= p
->header
[0] & KVASER_PCIEFD_PACKET_SEQ_MSK
;
1468 int dlc
= can_get_echo_skb(can
->can
.dev
, echo_idx
);
1469 struct net_device_stats
*stats
= &can
->can
.dev
->stats
;
1471 stats
->tx_bytes
+= dlc
;
1472 stats
->tx_packets
++;
1474 if (netif_queue_stopped(can
->can
.dev
))
1475 netif_wake_queue(can
->can
.dev
);
1481 static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can
*can
,
1482 struct kvaser_pciefd_rx_packet
*p
)
1484 struct sk_buff
*skb
;
1485 struct net_device_stats
*stats
= &can
->can
.dev
->stats
;
1486 struct can_frame
*cf
;
1488 skb
= alloc_can_err_skb(can
->can
.dev
, &cf
);
1491 if (p
->header
[0] & KVASER_PCIEFD_APACKET_ABL
) {
1493 cf
->can_id
|= CAN_ERR_LOSTARB
;
1494 can
->can
.can_stats
.arbitration_lost
++;
1496 cf
->can_id
|= CAN_ERR_ACK
;
1500 cf
->can_id
|= CAN_ERR_BUSERROR
;
1501 stats
->rx_bytes
+= cf
->can_dlc
;
1502 stats
->rx_packets
++;
1505 stats
->rx_dropped
++;
1506 netdev_warn(can
->can
.dev
, "No memory left for err_skb\n");
1510 static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd
*pcie
,
1511 struct kvaser_pciefd_rx_packet
*p
)
1513 struct kvaser_pciefd_can
*can
;
1514 bool one_shot_fail
= false;
1515 u8 ch_id
= (p
->header
[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT
) & 0x7;
1517 if (ch_id
>= pcie
->nr_channels
)
1520 can
= pcie
->can
[ch_id
];
1521 /* Ignore control packet ACK */
1522 if (p
->header
[0] & KVASER_PCIEFD_APACKET_CT
)
1525 if (p
->header
[0] & KVASER_PCIEFD_APACKET_NACK
) {
1526 kvaser_pciefd_handle_nack_packet(can
, p
);
1527 one_shot_fail
= true;
1530 if (p
->header
[0] & KVASER_PCIEFD_APACKET_FLU
) {
1531 netdev_dbg(can
->can
.dev
, "Packet was flushed\n");
1533 int echo_idx
= p
->header
[0] & KVASER_PCIEFD_PACKET_SEQ_MSK
;
1534 int dlc
= can_get_echo_skb(can
->can
.dev
, echo_idx
);
1535 u8 count
= ioread32(can
->reg_base
+
1536 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG
) & 0xff;
1538 if (count
< KVASER_PCIEFD_CAN_TX_MAX_COUNT
&&
1539 netif_queue_stopped(can
->can
.dev
))
1540 netif_wake_queue(can
->can
.dev
);
1542 if (!one_shot_fail
) {
1543 struct net_device_stats
*stats
= &can
->can
.dev
->stats
;
1545 stats
->tx_bytes
+= dlc
;
1546 stats
->tx_packets
++;
1553 static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd
*pcie
,
1554 struct kvaser_pciefd_rx_packet
*p
)
1556 struct kvaser_pciefd_can
*can
;
1557 u8 ch_id
= (p
->header
[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT
) & 0x7;
1559 if (ch_id
>= pcie
->nr_channels
)
1562 can
= pcie
->can
[ch_id
];
1564 if (!completion_done(&can
->flush_comp
))
1565 complete(&can
->flush_comp
);
1570 static int kvaser_pciefd_read_packet(struct kvaser_pciefd
*pcie
, int *start_pos
,
1573 __le32
*buffer
= pcie
->dma_data
[dma_buf
];
1575 struct kvaser_pciefd_rx_packet packet
;
1576 struct kvaser_pciefd_rx_packet
*p
= &packet
;
1578 int pos
= *start_pos
;
1582 size
= le32_to_cpu(buffer
[pos
++]);
1588 p
->header
[0] = le32_to_cpu(buffer
[pos
++]);
1589 p
->header
[1] = le32_to_cpu(buffer
[pos
++]);
1591 /* Read 64-bit timestamp */
1592 memcpy(×tamp
, &buffer
[pos
], sizeof(__le64
));
1594 p
->timestamp
= le64_to_cpu(timestamp
);
1596 type
= (p
->header
[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT
) & 0xf;
1598 case KVASER_PCIEFD_PACK_TYPE_DATA
:
1599 ret
= kvaser_pciefd_handle_data_packet(pcie
, p
, &buffer
[pos
]);
1600 if (!(p
->header
[0] & KVASER_PCIEFD_RPACKET_RTR
)) {
1603 data_len
= can_dlc2len(p
->header
[1] >>
1604 KVASER_PCIEFD_RPACKET_DLC_SHIFT
);
1605 pos
+= DIV_ROUND_UP(data_len
, 4);
1609 case KVASER_PCIEFD_PACK_TYPE_ACK
:
1610 ret
= kvaser_pciefd_handle_ack_packet(pcie
, p
);
1613 case KVASER_PCIEFD_PACK_TYPE_STATUS
:
1614 ret
= kvaser_pciefd_handle_status_packet(pcie
, p
);
1617 case KVASER_PCIEFD_PACK_TYPE_ERROR
:
1618 ret
= kvaser_pciefd_handle_error_packet(pcie
, p
);
1621 case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK
:
1622 ret
= kvaser_pciefd_handle_eack_packet(pcie
, p
);
1625 case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK
:
1626 ret
= kvaser_pciefd_handle_eflush_packet(pcie
, p
);
1629 case KVASER_PCIEFD_PACK_TYPE_ACK_DATA
:
1630 case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD
:
1631 case KVASER_PCIEFD_PACK_TYPE_TXRQ
:
1632 dev_info(&pcie
->pci
->dev
,
1633 "Received unexpected packet type 0x%08X\n", type
);
1637 dev_err(&pcie
->pci
->dev
, "Unknown packet type 0x%08X\n", type
);
1645 /* Position does not point to the end of the package,
1646 * corrupted packet size?
1648 if ((*start_pos
+ size
) != pos
)
1651 /* Point to the next packet header, if any */
1657 static int kvaser_pciefd_read_buffer(struct kvaser_pciefd
*pcie
, int dma_buf
)
1663 res
= kvaser_pciefd_read_packet(pcie
, &pos
, dma_buf
);
1664 } while (!res
&& pos
> 0 && pos
< KVASER_PCIEFD_DMA_SIZE
);
1669 static int kvaser_pciefd_receive_irq(struct kvaser_pciefd
*pcie
)
1673 irq
= ioread32(pcie
->reg_base
+ KVASER_PCIEFD_SRB_IRQ_REG
);
1674 if (irq
& KVASER_PCIEFD_SRB_IRQ_DPD0
) {
1675 kvaser_pciefd_read_buffer(pcie
, 0);
1676 /* Reset DMA buffer 0 */
1677 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0
,
1678 pcie
->reg_base
+ KVASER_PCIEFD_SRB_CMD_REG
);
1681 if (irq
& KVASER_PCIEFD_SRB_IRQ_DPD1
) {
1682 kvaser_pciefd_read_buffer(pcie
, 1);
1683 /* Reset DMA buffer 1 */
1684 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1
,
1685 pcie
->reg_base
+ KVASER_PCIEFD_SRB_CMD_REG
);
1688 if (irq
& KVASER_PCIEFD_SRB_IRQ_DOF0
||
1689 irq
& KVASER_PCIEFD_SRB_IRQ_DOF1
||
1690 irq
& KVASER_PCIEFD_SRB_IRQ_DUF0
||
1691 irq
& KVASER_PCIEFD_SRB_IRQ_DUF1
)
1692 dev_err(&pcie
->pci
->dev
, "DMA IRQ error 0x%08X\n", irq
);
1694 iowrite32(irq
, pcie
->reg_base
+ KVASER_PCIEFD_SRB_IRQ_REG
);
1698 static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can
*can
)
1700 u32 irq
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_IRQ_REG
);
1702 if (irq
& KVASER_PCIEFD_KCAN_IRQ_TOF
)
1703 netdev_err(can
->can
.dev
, "Tx FIFO overflow\n");
1705 if (irq
& KVASER_PCIEFD_KCAN_IRQ_TFD
) {
1706 u8 count
= ioread32(can
->reg_base
+
1707 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG
) & 0xff;
1710 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH
,
1711 can
->reg_base
+ KVASER_PCIEFD_KCAN_CTRL_REG
);
1714 if (irq
& KVASER_PCIEFD_KCAN_IRQ_BPP
)
1715 netdev_err(can
->can
.dev
,
1716 "Fail to change bittiming, when not in reset mode\n");
1718 if (irq
& KVASER_PCIEFD_KCAN_IRQ_FDIC
)
1719 netdev_err(can
->can
.dev
, "CAN FD frame in CAN mode\n");
1721 if (irq
& KVASER_PCIEFD_KCAN_IRQ_ROF
)
1722 netdev_err(can
->can
.dev
, "Rx FIFO overflow\n");
1724 iowrite32(irq
, can
->reg_base
+ KVASER_PCIEFD_KCAN_IRQ_REG
);
1728 static irqreturn_t
kvaser_pciefd_irq_handler(int irq
, void *dev
)
1730 struct kvaser_pciefd
*pcie
= (struct kvaser_pciefd
*)dev
;
1734 board_irq
= ioread32(pcie
->reg_base
+ KVASER_PCIEFD_IRQ_REG
);
1736 if (!(board_irq
& KVASER_PCIEFD_IRQ_ALL_MSK
))
1739 if (board_irq
& KVASER_PCIEFD_IRQ_SRB
)
1740 kvaser_pciefd_receive_irq(pcie
);
1742 for (i
= 0; i
< pcie
->nr_channels
; i
++) {
1743 if (!pcie
->can
[i
]) {
1744 dev_err(&pcie
->pci
->dev
,
1745 "IRQ mask points to unallocated controller\n");
1749 /* Check that mask matches channel (i) IRQ mask */
1750 if (board_irq
& (1 << i
))
1751 kvaser_pciefd_transmit_irq(pcie
->can
[i
]);
1754 iowrite32(board_irq
, pcie
->reg_base
+ KVASER_PCIEFD_IRQ_REG
);
1758 static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd
*pcie
)
1761 struct kvaser_pciefd_can
*can
;
1763 for (i
= 0; i
< pcie
->nr_channels
; i
++) {
1767 can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
1768 kvaser_pciefd_pwm_stop(can
);
1769 free_candev(can
->can
.dev
);
1774 static int kvaser_pciefd_probe(struct pci_dev
*pdev
,
1775 const struct pci_device_id
*id
)
1778 struct kvaser_pciefd
*pcie
;
1780 pcie
= devm_kzalloc(&pdev
->dev
, sizeof(*pcie
), GFP_KERNEL
);
1784 pci_set_drvdata(pdev
, pcie
);
1787 err
= pci_enable_device(pdev
);
1791 err
= pci_request_regions(pdev
, KVASER_PCIEFD_DRV_NAME
);
1793 goto err_disable_pci
;
1795 pcie
->reg_base
= pci_iomap(pdev
, 0, 0);
1796 if (!pcie
->reg_base
) {
1798 goto err_release_regions
;
1801 err
= kvaser_pciefd_setup_board(pcie
);
1803 goto err_pci_iounmap
;
1805 err
= kvaser_pciefd_setup_dma(pcie
);
1807 goto err_pci_iounmap
;
1809 pci_set_master(pdev
);
1811 err
= kvaser_pciefd_setup_can_ctrls(pcie
);
1813 goto err_teardown_can_ctrls
;
1815 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0
| KVASER_PCIEFD_SRB_IRQ_DPD1
,
1816 pcie
->reg_base
+ KVASER_PCIEFD_SRB_IRQ_REG
);
1818 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0
| KVASER_PCIEFD_SRB_IRQ_DPD1
|
1819 KVASER_PCIEFD_SRB_IRQ_DOF0
| KVASER_PCIEFD_SRB_IRQ_DOF1
|
1820 KVASER_PCIEFD_SRB_IRQ_DUF0
| KVASER_PCIEFD_SRB_IRQ_DUF1
,
1821 pcie
->reg_base
+ KVASER_PCIEFD_SRB_IEN_REG
);
1823 /* Reset IRQ handling, expected to be off before */
1824 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK
,
1825 pcie
->reg_base
+ KVASER_PCIEFD_IRQ_REG
);
1826 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK
,
1827 pcie
->reg_base
+ KVASER_PCIEFD_IEN_REG
);
1829 /* Ready the DMA buffers */
1830 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0
,
1831 pcie
->reg_base
+ KVASER_PCIEFD_SRB_CMD_REG
);
1832 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1
,
1833 pcie
->reg_base
+ KVASER_PCIEFD_SRB_CMD_REG
);
1835 err
= request_irq(pcie
->pci
->irq
, kvaser_pciefd_irq_handler
,
1836 IRQF_SHARED
, KVASER_PCIEFD_DRV_NAME
, pcie
);
1838 goto err_teardown_can_ctrls
;
1840 err
= kvaser_pciefd_reg_candev(pcie
);
1847 free_irq(pcie
->pci
->irq
, pcie
);
1849 err_teardown_can_ctrls
:
1850 kvaser_pciefd_teardown_can_ctrls(pcie
);
1851 iowrite32(0, pcie
->reg_base
+ KVASER_PCIEFD_SRB_CTRL_REG
);
1852 pci_clear_master(pdev
);
1855 pci_iounmap(pdev
, pcie
->reg_base
);
1857 err_release_regions
:
1858 pci_release_regions(pdev
);
1861 pci_disable_device(pdev
);
1866 static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd
*pcie
)
1868 struct kvaser_pciefd_can
*can
;
1871 for (i
= 0; i
< pcie
->nr_channels
; i
++) {
1875 can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
1876 unregister_candev(can
->can
.dev
);
1877 del_timer(&can
->bec_poll_timer
);
1878 kvaser_pciefd_pwm_stop(can
);
1879 free_candev(can
->can
.dev
);
1884 static void kvaser_pciefd_remove(struct pci_dev
*pdev
)
1886 struct kvaser_pciefd
*pcie
= pci_get_drvdata(pdev
);
1888 kvaser_pciefd_remove_all_ctrls(pcie
);
1890 /* Turn off IRQ generation */
1891 iowrite32(0, pcie
->reg_base
+ KVASER_PCIEFD_SRB_CTRL_REG
);
1892 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK
,
1893 pcie
->reg_base
+ KVASER_PCIEFD_IRQ_REG
);
1894 iowrite32(0, pcie
->reg_base
+ KVASER_PCIEFD_IEN_REG
);
1896 free_irq(pcie
->pci
->irq
, pcie
);
1898 pci_clear_master(pdev
);
1899 pci_iounmap(pdev
, pcie
->reg_base
);
1900 pci_release_regions(pdev
);
1901 pci_disable_device(pdev
);
1904 static struct pci_driver kvaser_pciefd
= {
1905 .name
= KVASER_PCIEFD_DRV_NAME
,
1906 .id_table
= kvaser_pciefd_id_table
,
1907 .probe
= kvaser_pciefd_probe
,
1908 .remove
= kvaser_pciefd_remove
,
1911 module_pci_driver(kvaser_pciefd
)