1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2 /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
3 * Parts of this driver are based on the following:
4 * - Kvaser linux pciefd driver (version 5.42)
5 * - PEAK linux canfd driver
8 #include <linux/bitfield.h>
9 #include <linux/can/dev.h>
10 #include <linux/device.h>
11 #include <linux/ethtool.h>
12 #include <linux/iopoll.h>
13 #include <linux/kernel.h>
14 #include <linux/minmax.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/pci.h>
18 #include <linux/timer.h>
20 MODULE_LICENSE("Dual BSD/GPL");
21 MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
22 MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
24 #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd"
26 #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
27 #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
28 #define KVASER_PCIEFD_MAX_ERR_REP 256U
29 #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
30 #define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
31 #define KVASER_PCIEFD_DMA_COUNT 2U
32 #define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
34 #define KVASER_PCIEFD_VENDOR 0x1a07
36 /* Altera based devices */
37 #define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d
38 #define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e
39 #define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f
40 #define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010
41 #define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011
43 /* SmartFusion2 based devices */
44 #define KVASER_PCIEFD_2CAN_V3_DEVICE_ID 0x0012
45 #define KVASER_PCIEFD_1CAN_V3_DEVICE_ID 0x0013
46 #define KVASER_PCIEFD_4CAN_V2_DEVICE_ID 0x0014
47 #define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015
48 #define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016
50 /* Xilinx based devices */
51 #define KVASER_PCIEFD_M2_4CAN_DEVICE_ID 0x0017
52 #define KVASER_PCIEFD_8CAN_DEVICE_ID 0x0019
54 /* Altera SerDes Enable 64-bit DMA address translation */
55 #define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0)
57 /* SmartFusion2 SerDes LSB address translation mask */
58 #define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12)
60 /* Xilinx SerDes LSB address translation mask */
61 #define KVASER_PCIEFD_XILINX_DMA_LSB_MASK GENMASK(31, 12)
63 /* Kvaser KCAN CAN controller registers */
64 #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
65 #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
66 #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
67 #define KVASER_PCIEFD_KCAN_CMD_REG 0x400
68 #define KVASER_PCIEFD_KCAN_IEN_REG 0x408
69 #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
70 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414
71 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418
72 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
73 #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
74 #define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
75 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
76 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430
77 /* System identification and information registers */
78 #define KVASER_PCIEFD_SYSID_VERSION_REG 0x8
79 #define KVASER_PCIEFD_SYSID_CANFREQ_REG 0xc
80 #define KVASER_PCIEFD_SYSID_BUSFREQ_REG 0x10
81 #define KVASER_PCIEFD_SYSID_BUILD_REG 0x14
82 /* Shared receive buffer FIFO registers */
83 #define KVASER_PCIEFD_SRB_FIFO_LAST_REG 0x1f4
84 /* Shared receive buffer registers */
85 #define KVASER_PCIEFD_SRB_CMD_REG 0x0
86 #define KVASER_PCIEFD_SRB_IEN_REG 0x04
87 #define KVASER_PCIEFD_SRB_IRQ_REG 0x0c
88 #define KVASER_PCIEFD_SRB_STAT_REG 0x10
89 #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG 0x14
90 #define KVASER_PCIEFD_SRB_CTRL_REG 0x18
92 /* System build information fields */
93 #define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24)
94 #define KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK GENMASK(23, 16)
95 #define KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK GENMASK(7, 0)
96 #define KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK GENMASK(15, 1)
98 /* Reset DMA buffer 0, 1 and FIFO offset */
99 #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
100 #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
101 #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
103 /* DMA underflow, buffer 0 and 1 */
104 #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
105 #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
106 /* DMA overflow, buffer 0 and 1 */
107 #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
108 #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
109 /* DMA packet done, buffer 0 and 1 */
110 #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
111 #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
113 /* Got DMA support */
114 #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
116 #define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
118 /* SRB current packet level */
119 #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK GENMASK(7, 0)
122 #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
124 /* KCAN CTRL packet types */
125 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK GENMASK(31, 29)
126 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH 0x4
127 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFRAME 0x5
129 /* Command sequence number */
130 #define KVASER_PCIEFD_KCAN_CMD_SEQ_MASK GENMASK(23, 16)
132 #define KVASER_PCIEFD_KCAN_CMD_MASK GENMASK(5, 0)
133 /* Abort, flush and reset */
134 #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
135 /* Request status packet */
136 #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
138 /* Transmitter unaligned */
139 #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
141 #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
142 /* Tx FIFO overflow */
143 #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
144 /* Tx buffer flush done */
145 #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
147 #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
148 /* Rx FIFO overflow */
149 #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
150 /* FDF bit when controller is in classic CAN mode */
151 #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
152 /* Bus parameter protection error */
153 #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
154 /* Tx FIFO unaligned end */
155 #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
156 /* Tx FIFO unaligned read */
157 #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
160 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK GENMASK(23, 16)
161 /* Tx FIFO current packet level */
162 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK GENMASK(7, 0)
164 /* Current status packet sequence number */
165 #define KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK GENMASK(31, 24)
166 /* Controller got CAN FD capability */
167 #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
168 /* Controller got one-shot capability */
169 #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
170 /* Controller in reset mode */
171 #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
172 /* Reset mode request */
173 #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
175 #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
176 /* Idle state. Controller in reset mode and no abort or flush pending */
177 #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
179 #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
180 /* Controller is bus off */
181 #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK \
182 (KVASER_PCIEFD_KCAN_STAT_AR | KVASER_PCIEFD_KCAN_STAT_BOFF | \
183 KVASER_PCIEFD_KCAN_STAT_RMR | KVASER_PCIEFD_KCAN_STAT_IRM)
185 /* Classic CAN mode */
186 #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
187 /* Active error flag enable. Clear to force error passive */
188 #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
189 /* Acknowledgment packet type */
190 #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
192 #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
193 /* Error packet enable */
194 #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
195 /* Listen only mode */
196 #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
198 #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
200 /* BTRN and BTRD fields */
201 #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK GENMASK(30, 26)
202 #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK GENMASK(25, 17)
203 #define KVASER_PCIEFD_KCAN_BTRN_SJW_MASK GENMASK(16, 13)
204 #define KVASER_PCIEFD_KCAN_BTRN_BRP_MASK GENMASK(12, 0)
206 /* PWM Control fields */
207 #define KVASER_PCIEFD_KCAN_PWM_TOP_MASK GENMASK(23, 16)
208 #define KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK GENMASK(7, 0)
210 /* KCAN packet type IDs */
211 #define KVASER_PCIEFD_PACK_TYPE_DATA 0x0
212 #define KVASER_PCIEFD_PACK_TYPE_ACK 0x1
213 #define KVASER_PCIEFD_PACK_TYPE_TXRQ 0x2
214 #define KVASER_PCIEFD_PACK_TYPE_ERROR 0x3
215 #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 0x4
216 #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 0x5
217 #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 0x6
218 #define KVASER_PCIEFD_PACK_TYPE_STATUS 0x8
219 #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 0x9
221 /* Common KCAN packet definitions, second word */
222 #define KVASER_PCIEFD_PACKET_TYPE_MASK GENMASK(31, 28)
223 #define KVASER_PCIEFD_PACKET_CHID_MASK GENMASK(27, 25)
224 #define KVASER_PCIEFD_PACKET_SEQ_MASK GENMASK(7, 0)
226 /* KCAN Transmit/Receive data packet, first word */
227 #define KVASER_PCIEFD_RPACKET_IDE BIT(30)
228 #define KVASER_PCIEFD_RPACKET_RTR BIT(29)
229 #define KVASER_PCIEFD_RPACKET_ID_MASK GENMASK(28, 0)
230 /* KCAN Transmit data packet, second word */
231 #define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
232 #define KVASER_PCIEFD_TPACKET_SMS BIT(16)
233 /* KCAN Transmit/Receive data packet, second word */
234 #define KVASER_PCIEFD_RPACKET_FDF BIT(15)
235 #define KVASER_PCIEFD_RPACKET_BRS BIT(14)
236 #define KVASER_PCIEFD_RPACKET_ESI BIT(13)
237 #define KVASER_PCIEFD_RPACKET_DLC_MASK GENMASK(11, 8)
239 /* KCAN Transmit acknowledge packet, first word */
240 #define KVASER_PCIEFD_APACKET_NACK BIT(11)
241 #define KVASER_PCIEFD_APACKET_ABL BIT(10)
242 #define KVASER_PCIEFD_APACKET_CT BIT(9)
243 #define KVASER_PCIEFD_APACKET_FLU BIT(8)
245 /* KCAN Status packet, first word */
246 #define KVASER_PCIEFD_SPACK_RMCD BIT(22)
247 #define KVASER_PCIEFD_SPACK_IRM BIT(21)
248 #define KVASER_PCIEFD_SPACK_IDET BIT(20)
249 #define KVASER_PCIEFD_SPACK_BOFF BIT(16)
250 #define KVASER_PCIEFD_SPACK_RXERR_MASK GENMASK(15, 8)
251 #define KVASER_PCIEFD_SPACK_TXERR_MASK GENMASK(7, 0)
252 /* KCAN Status packet, second word */
253 #define KVASER_PCIEFD_SPACK_EPLR BIT(24)
254 #define KVASER_PCIEFD_SPACK_EWLR BIT(23)
255 #define KVASER_PCIEFD_SPACK_AUTO BIT(21)
257 /* KCAN Error detected packet, second word */
258 #define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
260 /* Macros for calculating addresses of registers */
261 #define KVASER_PCIEFD_GET_BLOCK_ADDR(pcie, block) \
262 ((pcie)->reg_base + (pcie)->driver_data->address_offset->block)
263 #define KVASER_PCIEFD_PCI_IEN_ADDR(pcie) \
264 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_ien))
265 #define KVASER_PCIEFD_PCI_IRQ_ADDR(pcie) \
266 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_irq))
267 #define KVASER_PCIEFD_SERDES_ADDR(pcie) \
268 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), serdes))
269 #define KVASER_PCIEFD_SYSID_ADDR(pcie) \
270 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), sysid))
271 #define KVASER_PCIEFD_LOOPBACK_ADDR(pcie) \
272 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), loopback))
273 #define KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) \
274 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb_fifo))
275 #define KVASER_PCIEFD_SRB_ADDR(pcie) \
276 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb))
277 #define KVASER_PCIEFD_KCAN_CH0_ADDR(pcie) \
278 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch0))
279 #define KVASER_PCIEFD_KCAN_CH1_ADDR(pcie) \
280 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch1))
281 #define KVASER_PCIEFD_KCAN_CHANNEL_SPAN(pcie) \
282 (KVASER_PCIEFD_KCAN_CH1_ADDR((pcie)) - KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)))
283 #define KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i) \
284 (KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)) + (i) * KVASER_PCIEFD_KCAN_CHANNEL_SPAN((pcie)))
286 struct kvaser_pciefd
;
287 static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd
*pcie
,
288 dma_addr_t addr
, int index
);
289 static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd
*pcie
,
290 dma_addr_t addr
, int index
);
291 static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd
*pcie
,
292 dma_addr_t addr
, int index
);
294 struct kvaser_pciefd_address_offset
{
306 struct kvaser_pciefd_dev_ops
{
307 void (*kvaser_pciefd_write_dma_map
)(struct kvaser_pciefd
*pcie
,
308 dma_addr_t addr
, int index
);
311 struct kvaser_pciefd_irq_mask
{
313 u32 kcan_tx
[KVASER_PCIEFD_MAX_CAN_CHANNELS
];
317 struct kvaser_pciefd_driver_data
{
318 const struct kvaser_pciefd_address_offset
*address_offset
;
319 const struct kvaser_pciefd_irq_mask
*irq_mask
;
320 const struct kvaser_pciefd_dev_ops
*ops
;
323 static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset
= {
329 .kcan_srb_fifo
= 0x1f200,
335 static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offset
= {
340 .loopback
= 0x103000,
341 .kcan_srb_fifo
= 0x120000,
342 .kcan_srb
= 0x121000,
343 .kcan_ch0
= 0x140000,
344 .kcan_ch1
= 0x142000,
347 static const struct kvaser_pciefd_address_offset kvaser_pciefd_xilinx_address_offset
= {
352 .loopback
= 0x103000,
353 .kcan_srb_fifo
= 0x120000,
354 .kcan_srb
= 0x121000,
355 .kcan_ch0
= 0x140000,
356 .kcan_ch1
= 0x142000,
359 static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask
= {
361 .kcan_tx
= { BIT(0), BIT(1), BIT(2), BIT(3) },
362 .all
= GENMASK(4, 0),
365 static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask
= {
367 .kcan_tx
= { BIT(16), BIT(17), BIT(18), BIT(19) },
368 .all
= GENMASK(19, 16) | BIT(4),
371 static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask
= {
373 .kcan_tx
= { BIT(16), BIT(17), BIT(18), BIT(19), BIT(20), BIT(21), BIT(22), BIT(23) },
374 .all
= GENMASK(23, 16) | BIT(4),
377 static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops
= {
378 .kvaser_pciefd_write_dma_map
= kvaser_pciefd_write_dma_map_altera
,
381 static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops
= {
382 .kvaser_pciefd_write_dma_map
= kvaser_pciefd_write_dma_map_sf2
,
385 static const struct kvaser_pciefd_dev_ops kvaser_pciefd_xilinx_dev_ops
= {
386 .kvaser_pciefd_write_dma_map
= kvaser_pciefd_write_dma_map_xilinx
,
389 static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data
= {
390 .address_offset
= &kvaser_pciefd_altera_address_offset
,
391 .irq_mask
= &kvaser_pciefd_altera_irq_mask
,
392 .ops
= &kvaser_pciefd_altera_dev_ops
,
395 static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data
= {
396 .address_offset
= &kvaser_pciefd_sf2_address_offset
,
397 .irq_mask
= &kvaser_pciefd_sf2_irq_mask
,
398 .ops
= &kvaser_pciefd_sf2_dev_ops
,
401 static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data
= {
402 .address_offset
= &kvaser_pciefd_xilinx_address_offset
,
403 .irq_mask
= &kvaser_pciefd_xilinx_irq_mask
,
404 .ops
= &kvaser_pciefd_xilinx_dev_ops
,
407 struct kvaser_pciefd_can
{
409 struct kvaser_pciefd
*kv_pcie
;
410 void __iomem
*reg_base
;
411 struct can_berr_counter bec
;
415 spinlock_t lock
; /* Locks sensitive registers (e.g. MODE) */
416 spinlock_t echo_lock
; /* Locks the message echo buffer */
417 struct timer_list bec_poll_timer
;
418 struct completion start_comp
, flush_comp
;
421 struct kvaser_pciefd
{
423 void __iomem
*reg_base
;
424 struct kvaser_pciefd_can
*can
[KVASER_PCIEFD_MAX_CAN_CHANNELS
];
425 const struct kvaser_pciefd_driver_data
*driver_data
;
426 void *dma_data
[KVASER_PCIEFD_DMA_COUNT
];
430 u32 freq_to_ticks_div
;
433 struct kvaser_pciefd_rx_packet
{
438 struct kvaser_pciefd_tx_packet
{
443 static const struct can_bittiming_const kvaser_pciefd_bittiming_const
= {
444 .name
= KVASER_PCIEFD_DRV_NAME
,
455 static struct pci_device_id kvaser_pciefd_id_table
[] = {
457 PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_4HS_DEVICE_ID
),
458 .driver_data
= (kernel_ulong_t
)&kvaser_pciefd_altera_driver_data
,
461 PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_2HS_V2_DEVICE_ID
),
462 .driver_data
= (kernel_ulong_t
)&kvaser_pciefd_altera_driver_data
,
465 PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_HS_V2_DEVICE_ID
),
466 .driver_data
= (kernel_ulong_t
)&kvaser_pciefd_altera_driver_data
,
469 PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID
),
470 .driver_data
= (kernel_ulong_t
)&kvaser_pciefd_altera_driver_data
,
473 PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID
),
474 .driver_data
= (kernel_ulong_t
)&kvaser_pciefd_altera_driver_data
,
477 PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_2CAN_V3_DEVICE_ID
),
478 .driver_data
= (kernel_ulong_t
)&kvaser_pciefd_sf2_driver_data
,
481 PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_1CAN_V3_DEVICE_ID
),
482 .driver_data
= (kernel_ulong_t
)&kvaser_pciefd_sf2_driver_data
,
485 PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_4CAN_V2_DEVICE_ID
),
486 .driver_data
= (kernel_ulong_t
)&kvaser_pciefd_sf2_driver_data
,
489 PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID
),
490 .driver_data
= (kernel_ulong_t
)&kvaser_pciefd_sf2_driver_data
,
493 PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID
),
494 .driver_data
= (kernel_ulong_t
)&kvaser_pciefd_sf2_driver_data
,
497 PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_M2_4CAN_DEVICE_ID
),
498 .driver_data
= (kernel_ulong_t
)&kvaser_pciefd_xilinx_driver_data
,
501 PCI_DEVICE(KVASER_PCIEFD_VENDOR
, KVASER_PCIEFD_8CAN_DEVICE_ID
),
502 .driver_data
= (kernel_ulong_t
)&kvaser_pciefd_xilinx_driver_data
,
508 MODULE_DEVICE_TABLE(pci
, kvaser_pciefd_id_table
);
510 static inline void kvaser_pciefd_send_kcan_cmd(struct kvaser_pciefd_can
*can
, u32 cmd
)
512 iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_MASK
, cmd
) |
513 FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_SEQ_MASK
, ++can
->cmd_seq
),
514 can
->reg_base
+ KVASER_PCIEFD_KCAN_CMD_REG
);
517 static inline void kvaser_pciefd_request_status(struct kvaser_pciefd_can
*can
)
519 kvaser_pciefd_send_kcan_cmd(can
, KVASER_PCIEFD_KCAN_CMD_SRQ
);
522 static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can
*can
)
524 kvaser_pciefd_send_kcan_cmd(can
, KVASER_PCIEFD_KCAN_CMD_AT
);
527 static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can
*can
)
532 spin_lock_irqsave(&can
->lock
, irq
);
533 mode
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
534 if (!(mode
& KVASER_PCIEFD_KCAN_MODE_EPEN
)) {
535 mode
|= KVASER_PCIEFD_KCAN_MODE_EPEN
;
536 iowrite32(mode
, can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
538 spin_unlock_irqrestore(&can
->lock
, irq
);
541 static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can
*can
)
546 spin_lock_irqsave(&can
->lock
, irq
);
547 mode
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
548 mode
&= ~KVASER_PCIEFD_KCAN_MODE_EPEN
;
549 iowrite32(mode
, can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
550 spin_unlock_irqrestore(&can
->lock
, irq
);
553 static inline void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can
*can
)
557 msk
= KVASER_PCIEFD_KCAN_IRQ_TE
| KVASER_PCIEFD_KCAN_IRQ_ROF
|
558 KVASER_PCIEFD_KCAN_IRQ_TOF
| KVASER_PCIEFD_KCAN_IRQ_ABD
|
559 KVASER_PCIEFD_KCAN_IRQ_TAE
| KVASER_PCIEFD_KCAN_IRQ_TAL
|
560 KVASER_PCIEFD_KCAN_IRQ_FDIC
| KVASER_PCIEFD_KCAN_IRQ_BPP
|
561 KVASER_PCIEFD_KCAN_IRQ_TAR
;
563 iowrite32(msk
, can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
566 static inline void kvaser_pciefd_set_skb_timestamp(const struct kvaser_pciefd
*pcie
,
567 struct sk_buff
*skb
, u64 timestamp
)
569 skb_hwtstamps(skb
)->hwtstamp
=
570 ns_to_ktime(div_u64(timestamp
* 1000, pcie
->freq_to_ticks_div
));
573 static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can
*can
)
578 spin_lock_irqsave(&can
->lock
, irq
);
579 mode
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
580 if (can
->can
.ctrlmode
& CAN_CTRLMODE_FD
) {
581 mode
&= ~KVASER_PCIEFD_KCAN_MODE_CCM
;
582 if (can
->can
.ctrlmode
& CAN_CTRLMODE_FD_NON_ISO
)
583 mode
|= KVASER_PCIEFD_KCAN_MODE_NIFDEN
;
585 mode
&= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN
;
587 mode
|= KVASER_PCIEFD_KCAN_MODE_CCM
;
588 mode
&= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN
;
591 if (can
->can
.ctrlmode
& CAN_CTRLMODE_LISTENONLY
)
592 mode
|= KVASER_PCIEFD_KCAN_MODE_LOM
;
594 mode
&= ~KVASER_PCIEFD_KCAN_MODE_LOM
;
595 mode
|= KVASER_PCIEFD_KCAN_MODE_EEN
;
596 mode
|= KVASER_PCIEFD_KCAN_MODE_EPEN
;
597 /* Use ACK packet type */
598 mode
&= ~KVASER_PCIEFD_KCAN_MODE_APT
;
599 mode
&= ~KVASER_PCIEFD_KCAN_MODE_RM
;
600 iowrite32(mode
, can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
602 spin_unlock_irqrestore(&can
->lock
, irq
);
605 static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can
*can
)
610 spin_lock_irqsave(&can
->lock
, irq
);
611 iowrite32(GENMASK(31, 0), can
->reg_base
+ KVASER_PCIEFD_KCAN_IRQ_REG
);
612 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD
,
613 can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
614 status
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_STAT_REG
);
615 if (status
& KVASER_PCIEFD_KCAN_STAT_IDLE
) {
616 /* If controller is already idle, run abort, flush and reset */
617 kvaser_pciefd_abort_flush_reset(can
);
618 } else if (!(status
& KVASER_PCIEFD_KCAN_STAT_RMR
)) {
621 /* Put controller in reset mode */
622 mode
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
623 mode
|= KVASER_PCIEFD_KCAN_MODE_RM
;
624 iowrite32(mode
, can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
626 spin_unlock_irqrestore(&can
->lock
, irq
);
629 static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can
*can
)
634 del_timer(&can
->bec_poll_timer
);
635 if (!completion_done(&can
->flush_comp
))
636 kvaser_pciefd_start_controller_flush(can
);
638 if (!wait_for_completion_timeout(&can
->flush_comp
,
639 KVASER_PCIEFD_WAIT_TIMEOUT
)) {
640 netdev_err(can
->can
.dev
, "Timeout during bus on flush\n");
644 spin_lock_irqsave(&can
->lock
, irq
);
645 iowrite32(0, can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
646 iowrite32(GENMASK(31, 0), can
->reg_base
+ KVASER_PCIEFD_KCAN_IRQ_REG
);
647 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD
,
648 can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
649 mode
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
650 mode
&= ~KVASER_PCIEFD_KCAN_MODE_RM
;
651 iowrite32(mode
, can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
652 spin_unlock_irqrestore(&can
->lock
, irq
);
654 if (!wait_for_completion_timeout(&can
->start_comp
,
655 KVASER_PCIEFD_WAIT_TIMEOUT
)) {
656 netdev_err(can
->can
.dev
, "Timeout during bus on reset\n");
659 /* Reset interrupt handling */
660 iowrite32(0, can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
661 iowrite32(GENMASK(31, 0), can
->reg_base
+ KVASER_PCIEFD_KCAN_IRQ_REG
);
663 kvaser_pciefd_set_tx_irq(can
);
664 kvaser_pciefd_setup_controller(can
);
665 can
->can
.state
= CAN_STATE_ERROR_ACTIVE
;
666 netif_wake_queue(can
->can
.dev
);
669 can
->err_rep_cnt
= 0;
674 static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can
*can
)
680 spin_lock_irqsave(&can
->lock
, irq
);
681 pwm_ctrl
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_PWM_REG
);
682 top
= FIELD_GET(KVASER_PCIEFD_KCAN_PWM_TOP_MASK
, pwm_ctrl
);
683 /* Set duty cycle to zero */
684 pwm_ctrl
|= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK
, top
);
685 iowrite32(pwm_ctrl
, can
->reg_base
+ KVASER_PCIEFD_KCAN_PWM_REG
);
686 spin_unlock_irqrestore(&can
->lock
, irq
);
689 static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can
*can
)
695 kvaser_pciefd_pwm_stop(can
);
696 spin_lock_irqsave(&can
->lock
, irq
);
697 /* Set frequency to 500 KHz */
698 top
= can
->kv_pcie
->bus_freq
/ (2 * 500000) - 1;
700 pwm_ctrl
= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK
, top
);
701 pwm_ctrl
|= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK
, top
);
702 iowrite32(pwm_ctrl
, can
->reg_base
+ KVASER_PCIEFD_KCAN_PWM_REG
);
704 /* Set duty cycle to 95 */
705 trigger
= (100 * top
- 95 * (top
+ 1) + 50) / 100;
706 pwm_ctrl
= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK
, trigger
);
707 pwm_ctrl
|= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK
, top
);
708 iowrite32(pwm_ctrl
, can
->reg_base
+ KVASER_PCIEFD_KCAN_PWM_REG
);
709 spin_unlock_irqrestore(&can
->lock
, irq
);
712 static int kvaser_pciefd_open(struct net_device
*netdev
)
715 struct kvaser_pciefd_can
*can
= netdev_priv(netdev
);
717 ret
= open_candev(netdev
);
721 ret
= kvaser_pciefd_bus_on(can
);
723 close_candev(netdev
);
730 static int kvaser_pciefd_stop(struct net_device
*netdev
)
732 struct kvaser_pciefd_can
*can
= netdev_priv(netdev
);
735 /* Don't interrupt ongoing flush */
736 if (!completion_done(&can
->flush_comp
))
737 kvaser_pciefd_start_controller_flush(can
);
739 if (!wait_for_completion_timeout(&can
->flush_comp
,
740 KVASER_PCIEFD_WAIT_TIMEOUT
)) {
741 netdev_err(can
->can
.dev
, "Timeout during stop\n");
744 iowrite32(0, can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
745 del_timer(&can
->bec_poll_timer
);
747 can
->can
.state
= CAN_STATE_STOPPED
;
748 close_candev(netdev
);
753 static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet
*p
,
754 struct kvaser_pciefd_can
*can
,
757 struct canfd_frame
*cf
= (struct canfd_frame
*)skb
->data
;
759 int seq
= can
->echo_idx
;
761 memset(p
, 0, sizeof(*p
));
762 if (can
->can
.ctrlmode
& CAN_CTRLMODE_ONE_SHOT
)
763 p
->header
[1] |= KVASER_PCIEFD_TPACKET_SMS
;
765 if (cf
->can_id
& CAN_RTR_FLAG
)
766 p
->header
[0] |= KVASER_PCIEFD_RPACKET_RTR
;
768 if (cf
->can_id
& CAN_EFF_FLAG
)
769 p
->header
[0] |= KVASER_PCIEFD_RPACKET_IDE
;
771 p
->header
[0] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_ID_MASK
, cf
->can_id
);
772 p
->header
[1] |= KVASER_PCIEFD_TPACKET_AREQ
;
774 if (can_is_canfd_skb(skb
)) {
775 p
->header
[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK
,
776 can_fd_len2dlc(cf
->len
));
777 p
->header
[1] |= KVASER_PCIEFD_RPACKET_FDF
;
778 if (cf
->flags
& CANFD_BRS
)
779 p
->header
[1] |= KVASER_PCIEFD_RPACKET_BRS
;
780 if (cf
->flags
& CANFD_ESI
)
781 p
->header
[1] |= KVASER_PCIEFD_RPACKET_ESI
;
784 FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK
,
785 can_get_cc_dlc((struct can_frame
*)cf
, can
->can
.ctrlmode
));
788 p
->header
[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK
, seq
);
790 packet_size
= cf
->len
;
791 memcpy(p
->data
, cf
->data
, packet_size
);
793 return DIV_ROUND_UP(packet_size
, 4);
796 static netdev_tx_t
kvaser_pciefd_start_xmit(struct sk_buff
*skb
,
797 struct net_device
*netdev
)
799 struct kvaser_pciefd_can
*can
= netdev_priv(netdev
);
800 unsigned long irq_flags
;
801 struct kvaser_pciefd_tx_packet packet
;
805 if (can_dev_dropped_skb(netdev
, skb
))
808 nr_words
= kvaser_pciefd_prepare_tx_packet(&packet
, can
, skb
);
810 spin_lock_irqsave(&can
->echo_lock
, irq_flags
);
811 /* Prepare and save echo skb in internal slot */
812 can_put_echo_skb(skb
, netdev
, can
->echo_idx
, 0);
814 /* Move echo index to the next slot */
815 can
->echo_idx
= (can
->echo_idx
+ 1) % can
->can
.echo_skb_max
;
817 /* Write header to fifo */
818 iowrite32(packet
.header
[0],
819 can
->reg_base
+ KVASER_PCIEFD_KCAN_FIFO_REG
);
820 iowrite32(packet
.header
[1],
821 can
->reg_base
+ KVASER_PCIEFD_KCAN_FIFO_REG
);
824 u32 data_last
= ((u32
*)packet
.data
)[nr_words
- 1];
826 /* Write data to fifo, except last word */
827 iowrite32_rep(can
->reg_base
+
828 KVASER_PCIEFD_KCAN_FIFO_REG
, packet
.data
,
830 /* Write last word to end of fifo */
831 __raw_writel(data_last
, can
->reg_base
+
832 KVASER_PCIEFD_KCAN_FIFO_LAST_REG
);
834 /* Complete write to fifo */
835 __raw_writel(0, can
->reg_base
+
836 KVASER_PCIEFD_KCAN_FIFO_LAST_REG
);
839 count
= FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK
,
840 ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG
));
841 /* No room for a new message, stop the queue until at least one
842 * successful transmit
844 if (count
>= can
->can
.echo_skb_max
|| can
->can
.echo_skb
[can
->echo_idx
])
845 netif_stop_queue(netdev
);
846 spin_unlock_irqrestore(&can
->echo_lock
, irq_flags
);
851 static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can
*can
, bool data
)
853 u32 mode
, test
, btrn
;
854 unsigned long irq_flags
;
856 struct can_bittiming
*bt
;
859 bt
= &can
->can
.data_bittiming
;
861 bt
= &can
->can
.bittiming
;
863 btrn
= FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK
, bt
->phase_seg2
- 1) |
864 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK
, bt
->prop_seg
+ bt
->phase_seg1
- 1) |
865 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_SJW_MASK
, bt
->sjw
- 1) |
866 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_BRP_MASK
, bt
->brp
- 1);
868 spin_lock_irqsave(&can
->lock
, irq_flags
);
869 mode
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
870 /* Put the circuit in reset mode */
871 iowrite32(mode
| KVASER_PCIEFD_KCAN_MODE_RM
,
872 can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
874 /* Can only set bittiming if in reset mode */
875 ret
= readl_poll_timeout(can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
,
876 test
, test
& KVASER_PCIEFD_KCAN_MODE_RM
, 0, 10);
878 spin_unlock_irqrestore(&can
->lock
, irq_flags
);
883 iowrite32(btrn
, can
->reg_base
+ KVASER_PCIEFD_KCAN_BTRD_REG
);
885 iowrite32(btrn
, can
->reg_base
+ KVASER_PCIEFD_KCAN_BTRN_REG
);
886 /* Restore previous reset mode status */
887 iowrite32(mode
, can
->reg_base
+ KVASER_PCIEFD_KCAN_MODE_REG
);
888 spin_unlock_irqrestore(&can
->lock
, irq_flags
);
893 static int kvaser_pciefd_set_nominal_bittiming(struct net_device
*ndev
)
895 return kvaser_pciefd_set_bittiming(netdev_priv(ndev
), false);
898 static int kvaser_pciefd_set_data_bittiming(struct net_device
*ndev
)
900 return kvaser_pciefd_set_bittiming(netdev_priv(ndev
), true);
903 static int kvaser_pciefd_set_mode(struct net_device
*ndev
, enum can_mode mode
)
905 struct kvaser_pciefd_can
*can
= netdev_priv(ndev
);
910 if (!can
->can
.restart_ms
)
911 ret
= kvaser_pciefd_bus_on(can
);
920 static int kvaser_pciefd_get_berr_counter(const struct net_device
*ndev
,
921 struct can_berr_counter
*bec
)
923 struct kvaser_pciefd_can
*can
= netdev_priv(ndev
);
925 bec
->rxerr
= can
->bec
.rxerr
;
926 bec
->txerr
= can
->bec
.txerr
;
931 static void kvaser_pciefd_bec_poll_timer(struct timer_list
*data
)
933 struct kvaser_pciefd_can
*can
= from_timer(can
, data
, bec_poll_timer
);
935 kvaser_pciefd_enable_err_gen(can
);
936 kvaser_pciefd_request_status(can
);
937 can
->err_rep_cnt
= 0;
940 static const struct net_device_ops kvaser_pciefd_netdev_ops
= {
941 .ndo_open
= kvaser_pciefd_open
,
942 .ndo_stop
= kvaser_pciefd_stop
,
943 .ndo_eth_ioctl
= can_eth_ioctl_hwts
,
944 .ndo_start_xmit
= kvaser_pciefd_start_xmit
,
945 .ndo_change_mtu
= can_change_mtu
,
948 static const struct ethtool_ops kvaser_pciefd_ethtool_ops
= {
949 .get_ts_info
= can_ethtool_op_get_ts_info_hwts
,
952 static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd
*pcie
)
956 for (i
= 0; i
< pcie
->nr_channels
; i
++) {
957 struct net_device
*netdev
;
958 struct kvaser_pciefd_can
*can
;
959 u32 status
, tx_nr_packets_max
;
961 netdev
= alloc_candev(sizeof(struct kvaser_pciefd_can
),
962 KVASER_PCIEFD_CAN_TX_MAX_COUNT
);
966 can
= netdev_priv(netdev
);
967 netdev
->netdev_ops
= &kvaser_pciefd_netdev_ops
;
968 netdev
->ethtool_ops
= &kvaser_pciefd_ethtool_ops
;
969 can
->reg_base
= KVASER_PCIEFD_KCAN_CHX_ADDR(pcie
, i
);
972 can
->err_rep_cnt
= 0;
976 init_completion(&can
->start_comp
);
977 init_completion(&can
->flush_comp
);
978 timer_setup(&can
->bec_poll_timer
, kvaser_pciefd_bec_poll_timer
, 0);
980 /* Disable Bus load reporting */
981 iowrite32(0, can
->reg_base
+ KVASER_PCIEFD_KCAN_BUS_LOAD_REG
);
984 FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK
,
985 ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG
));
987 can
->can
.clock
.freq
= pcie
->freq
;
988 can
->can
.echo_skb_max
= min(KVASER_PCIEFD_CAN_TX_MAX_COUNT
, tx_nr_packets_max
- 1);
990 spin_lock_init(&can
->echo_lock
);
991 spin_lock_init(&can
->lock
);
993 can
->can
.bittiming_const
= &kvaser_pciefd_bittiming_const
;
994 can
->can
.data_bittiming_const
= &kvaser_pciefd_bittiming_const
;
995 can
->can
.do_set_bittiming
= kvaser_pciefd_set_nominal_bittiming
;
996 can
->can
.do_set_data_bittiming
= kvaser_pciefd_set_data_bittiming
;
997 can
->can
.do_set_mode
= kvaser_pciefd_set_mode
;
998 can
->can
.do_get_berr_counter
= kvaser_pciefd_get_berr_counter
;
999 can
->can
.ctrlmode_supported
= CAN_CTRLMODE_LISTENONLY
|
1001 CAN_CTRLMODE_FD_NON_ISO
|
1002 CAN_CTRLMODE_CC_LEN8_DLC
;
1004 status
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_STAT_REG
);
1005 if (!(status
& KVASER_PCIEFD_KCAN_STAT_FD
)) {
1006 dev_err(&pcie
->pci
->dev
,
1007 "CAN FD not supported as expected %d\n", i
);
1009 free_candev(netdev
);
1013 if (status
& KVASER_PCIEFD_KCAN_STAT_CAP
)
1014 can
->can
.ctrlmode_supported
|= CAN_CTRLMODE_ONE_SHOT
;
1016 netdev
->flags
|= IFF_ECHO
;
1017 SET_NETDEV_DEV(netdev
, &pcie
->pci
->dev
);
1019 iowrite32(GENMASK(31, 0), can
->reg_base
+ KVASER_PCIEFD_KCAN_IRQ_REG
);
1020 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD
,
1021 can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
1024 kvaser_pciefd_pwm_start(can
);
1030 static int kvaser_pciefd_reg_candev(struct kvaser_pciefd
*pcie
)
1034 for (i
= 0; i
< pcie
->nr_channels
; i
++) {
1035 int ret
= register_candev(pcie
->can
[i
]->can
.dev
);
1040 /* Unregister all successfully registered devices. */
1041 for (j
= 0; j
< i
; j
++)
1042 unregister_candev(pcie
->can
[j
]->can
.dev
);
1050 static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd
*pcie
,
1051 dma_addr_t addr
, int index
)
1053 void __iomem
*serdes_base
;
1056 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT
)) {
1057 word1
= lower_32_bits(addr
) | KVASER_PCIEFD_ALTERA_DMA_64BIT
;
1058 word2
= upper_32_bits(addr
);
1063 serdes_base
= KVASER_PCIEFD_SERDES_ADDR(pcie
) + 0x8 * index
;
1064 iowrite32(word1
, serdes_base
);
1065 iowrite32(word2
, serdes_base
+ 0x4);
1068 static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd
*pcie
,
1069 dma_addr_t addr
, int index
)
1071 void __iomem
*serdes_base
;
1072 u32 lsb
= addr
& KVASER_PCIEFD_SF2_DMA_LSB_MASK
;
1075 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT
))
1076 msb
= upper_32_bits(addr
);
1078 serdes_base
= KVASER_PCIEFD_SERDES_ADDR(pcie
) + 0x10 * index
;
1079 iowrite32(lsb
, serdes_base
);
1080 iowrite32(msb
, serdes_base
+ 0x4);
1083 static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd
*pcie
,
1084 dma_addr_t addr
, int index
)
1086 void __iomem
*serdes_base
;
1087 u32 lsb
= addr
& KVASER_PCIEFD_XILINX_DMA_LSB_MASK
;
1090 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT
))
1091 msb
= upper_32_bits(addr
);
1093 serdes_base
= KVASER_PCIEFD_SERDES_ADDR(pcie
) + 0x8 * index
;
1094 iowrite32(msb
, serdes_base
);
1095 iowrite32(lsb
, serdes_base
+ 0x4);
1098 static int kvaser_pciefd_setup_dma(struct kvaser_pciefd
*pcie
)
1102 u32 srb_packet_count
;
1103 dma_addr_t dma_addr
[KVASER_PCIEFD_DMA_COUNT
];
1105 /* Disable the DMA */
1106 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie
) + KVASER_PCIEFD_SRB_CTRL_REG
);
1108 dma_set_mask_and_coherent(&pcie
->pci
->dev
, DMA_BIT_MASK(64));
1110 for (i
= 0; i
< KVASER_PCIEFD_DMA_COUNT
; i
++) {
1111 pcie
->dma_data
[i
] = dmam_alloc_coherent(&pcie
->pci
->dev
,
1112 KVASER_PCIEFD_DMA_SIZE
,
1116 if (!pcie
->dma_data
[i
] || !dma_addr
[i
]) {
1117 dev_err(&pcie
->pci
->dev
, "Rx dma_alloc(%u) failure\n",
1118 KVASER_PCIEFD_DMA_SIZE
);
1121 pcie
->driver_data
->ops
->kvaser_pciefd_write_dma_map(pcie
, dma_addr
[i
], i
);
1124 /* Reset Rx FIFO, and both DMA buffers */
1125 iowrite32(KVASER_PCIEFD_SRB_CMD_FOR
| KVASER_PCIEFD_SRB_CMD_RDB0
|
1126 KVASER_PCIEFD_SRB_CMD_RDB1
,
1127 KVASER_PCIEFD_SRB_ADDR(pcie
) + KVASER_PCIEFD_SRB_CMD_REG
);
1130 FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK
,
1131 ioread32(KVASER_PCIEFD_SRB_ADDR(pcie
) +
1132 KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG
));
1133 while (srb_packet_count
) {
1134 /* Drop current packet in FIFO */
1135 ioread32(KVASER_PCIEFD_SRB_FIFO_ADDR(pcie
) + KVASER_PCIEFD_SRB_FIFO_LAST_REG
);
1139 srb_status
= ioread32(KVASER_PCIEFD_SRB_ADDR(pcie
) + KVASER_PCIEFD_SRB_STAT_REG
);
1140 if (!(srb_status
& KVASER_PCIEFD_SRB_STAT_DI
)) {
1141 dev_err(&pcie
->pci
->dev
, "DMA not idle before enabling\n");
1145 /* Enable the DMA */
1146 iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE
,
1147 KVASER_PCIEFD_SRB_ADDR(pcie
) + KVASER_PCIEFD_SRB_CTRL_REG
);
1152 static int kvaser_pciefd_setup_board(struct kvaser_pciefd
*pcie
)
1154 u32 version
, srb_status
, build
;
1156 version
= ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie
) + KVASER_PCIEFD_SYSID_VERSION_REG
);
1157 pcie
->nr_channels
= min(KVASER_PCIEFD_MAX_CAN_CHANNELS
,
1158 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK
, version
));
1160 build
= ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie
) + KVASER_PCIEFD_SYSID_BUILD_REG
);
1161 dev_dbg(&pcie
->pci
->dev
, "Version %lu.%lu.%lu\n",
1162 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK
, version
),
1163 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK
, version
),
1164 FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK
, build
));
1166 srb_status
= ioread32(KVASER_PCIEFD_SRB_ADDR(pcie
) + KVASER_PCIEFD_SRB_STAT_REG
);
1167 if (!(srb_status
& KVASER_PCIEFD_SRB_STAT_DMA
)) {
1168 dev_err(&pcie
->pci
->dev
, "Hardware without DMA is not supported\n");
1172 pcie
->bus_freq
= ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie
) + KVASER_PCIEFD_SYSID_BUSFREQ_REG
);
1173 pcie
->freq
= ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie
) + KVASER_PCIEFD_SYSID_CANFREQ_REG
);
1174 pcie
->freq_to_ticks_div
= pcie
->freq
/ 1000000;
1175 if (pcie
->freq_to_ticks_div
== 0)
1176 pcie
->freq_to_ticks_div
= 1;
1177 /* Turn off all loopback functionality */
1178 iowrite32(0, KVASER_PCIEFD_LOOPBACK_ADDR(pcie
));
1183 static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd
*pcie
,
1184 struct kvaser_pciefd_rx_packet
*p
,
1187 struct sk_buff
*skb
;
1188 struct canfd_frame
*cf
;
1189 struct can_priv
*priv
;
1190 u8 ch_id
= FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK
, p
->header
[1]);
1193 if (ch_id
>= pcie
->nr_channels
)
1196 priv
= &pcie
->can
[ch_id
]->can
;
1197 dlc
= FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK
, p
->header
[1]);
1199 if (p
->header
[1] & KVASER_PCIEFD_RPACKET_FDF
) {
1200 skb
= alloc_canfd_skb(priv
->dev
, &cf
);
1202 priv
->dev
->stats
.rx_dropped
++;
1206 cf
->len
= can_fd_dlc2len(dlc
);
1207 if (p
->header
[1] & KVASER_PCIEFD_RPACKET_BRS
)
1208 cf
->flags
|= CANFD_BRS
;
1209 if (p
->header
[1] & KVASER_PCIEFD_RPACKET_ESI
)
1210 cf
->flags
|= CANFD_ESI
;
1212 skb
= alloc_can_skb(priv
->dev
, (struct can_frame
**)&cf
);
1214 priv
->dev
->stats
.rx_dropped
++;
1217 can_frame_set_cc_len((struct can_frame
*)cf
, dlc
, priv
->ctrlmode
);
1220 cf
->can_id
= FIELD_GET(KVASER_PCIEFD_RPACKET_ID_MASK
, p
->header
[0]);
1221 if (p
->header
[0] & KVASER_PCIEFD_RPACKET_IDE
)
1222 cf
->can_id
|= CAN_EFF_FLAG
;
1224 if (p
->header
[0] & KVASER_PCIEFD_RPACKET_RTR
) {
1225 cf
->can_id
|= CAN_RTR_FLAG
;
1227 memcpy(cf
->data
, data
, cf
->len
);
1228 priv
->dev
->stats
.rx_bytes
+= cf
->len
;
1230 priv
->dev
->stats
.rx_packets
++;
1231 kvaser_pciefd_set_skb_timestamp(pcie
, skb
, p
->timestamp
);
1233 return netif_rx(skb
);
1236 static void kvaser_pciefd_change_state(struct kvaser_pciefd_can
*can
,
1237 struct can_frame
*cf
,
1238 enum can_state new_state
,
1239 enum can_state tx_state
,
1240 enum can_state rx_state
)
1242 can_change_state(can
->can
.dev
, cf
, tx_state
, rx_state
);
1244 if (new_state
== CAN_STATE_BUS_OFF
) {
1245 struct net_device
*ndev
= can
->can
.dev
;
1246 unsigned long irq_flags
;
1248 spin_lock_irqsave(&can
->lock
, irq_flags
);
1249 netif_stop_queue(can
->can
.dev
);
1250 spin_unlock_irqrestore(&can
->lock
, irq_flags
);
1251 /* Prevent CAN controller from auto recover from bus off */
1252 if (!can
->can
.restart_ms
) {
1253 kvaser_pciefd_start_controller_flush(can
);
1259 static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet
*p
,
1260 struct can_berr_counter
*bec
,
1261 enum can_state
*new_state
,
1262 enum can_state
*tx_state
,
1263 enum can_state
*rx_state
)
1265 if (p
->header
[0] & KVASER_PCIEFD_SPACK_BOFF
||
1266 p
->header
[0] & KVASER_PCIEFD_SPACK_IRM
)
1267 *new_state
= CAN_STATE_BUS_OFF
;
1268 else if (bec
->txerr
>= 255 || bec
->rxerr
>= 255)
1269 *new_state
= CAN_STATE_BUS_OFF
;
1270 else if (p
->header
[1] & KVASER_PCIEFD_SPACK_EPLR
)
1271 *new_state
= CAN_STATE_ERROR_PASSIVE
;
1272 else if (bec
->txerr
>= 128 || bec
->rxerr
>= 128)
1273 *new_state
= CAN_STATE_ERROR_PASSIVE
;
1274 else if (p
->header
[1] & KVASER_PCIEFD_SPACK_EWLR
)
1275 *new_state
= CAN_STATE_ERROR_WARNING
;
1276 else if (bec
->txerr
>= 96 || bec
->rxerr
>= 96)
1277 *new_state
= CAN_STATE_ERROR_WARNING
;
1279 *new_state
= CAN_STATE_ERROR_ACTIVE
;
1281 *tx_state
= bec
->txerr
>= bec
->rxerr
? *new_state
: 0;
1282 *rx_state
= bec
->txerr
<= bec
->rxerr
? *new_state
: 0;
1285 static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can
*can
,
1286 struct kvaser_pciefd_rx_packet
*p
)
1288 struct can_berr_counter bec
;
1289 enum can_state old_state
, new_state
, tx_state
, rx_state
;
1290 struct net_device
*ndev
= can
->can
.dev
;
1291 struct sk_buff
*skb
;
1292 struct can_frame
*cf
= NULL
;
1294 old_state
= can
->can
.state
;
1296 bec
.txerr
= FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK
, p
->header
[0]);
1297 bec
.rxerr
= FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK
, p
->header
[0]);
1299 kvaser_pciefd_packet_to_state(p
, &bec
, &new_state
, &tx_state
, &rx_state
);
1300 skb
= alloc_can_err_skb(ndev
, &cf
);
1301 if (new_state
!= old_state
) {
1302 kvaser_pciefd_change_state(can
, cf
, new_state
, tx_state
, rx_state
);
1303 if (old_state
== CAN_STATE_BUS_OFF
&&
1304 new_state
== CAN_STATE_ERROR_ACTIVE
&&
1305 can
->can
.restart_ms
) {
1306 can
->can
.can_stats
.restarts
++;
1308 cf
->can_id
|= CAN_ERR_RESTARTED
;
1313 can
->can
.can_stats
.bus_error
++;
1314 if (p
->header
[1] & KVASER_PCIEFD_EPACK_DIR_TX
)
1315 ndev
->stats
.tx_errors
++;
1317 ndev
->stats
.rx_errors
++;
1319 can
->bec
.txerr
= bec
.txerr
;
1320 can
->bec
.rxerr
= bec
.rxerr
;
1323 ndev
->stats
.rx_dropped
++;
1327 kvaser_pciefd_set_skb_timestamp(can
->kv_pcie
, skb
, p
->timestamp
);
1328 cf
->can_id
|= CAN_ERR_BUSERROR
| CAN_ERR_CNT
;
1329 cf
->data
[6] = bec
.txerr
;
1330 cf
->data
[7] = bec
.rxerr
;
1337 static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd
*pcie
,
1338 struct kvaser_pciefd_rx_packet
*p
)
1340 struct kvaser_pciefd_can
*can
;
1341 u8 ch_id
= FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK
, p
->header
[1]);
1343 if (ch_id
>= pcie
->nr_channels
)
1346 can
= pcie
->can
[ch_id
];
1347 kvaser_pciefd_rx_error_frame(can
, p
);
1348 if (can
->err_rep_cnt
>= KVASER_PCIEFD_MAX_ERR_REP
)
1349 /* Do not report more errors, until bec_poll_timer expires */
1350 kvaser_pciefd_disable_err_gen(can
);
1351 /* Start polling the error counters */
1352 mod_timer(&can
->bec_poll_timer
, KVASER_PCIEFD_BEC_POLL_FREQ
);
1357 static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can
*can
,
1358 struct kvaser_pciefd_rx_packet
*p
)
1360 struct can_berr_counter bec
;
1361 enum can_state old_state
, new_state
, tx_state
, rx_state
;
1363 old_state
= can
->can
.state
;
1365 bec
.txerr
= FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK
, p
->header
[0]);
1366 bec
.rxerr
= FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK
, p
->header
[0]);
1368 kvaser_pciefd_packet_to_state(p
, &bec
, &new_state
, &tx_state
, &rx_state
);
1369 if (new_state
!= old_state
) {
1370 struct net_device
*ndev
= can
->can
.dev
;
1371 struct sk_buff
*skb
;
1372 struct can_frame
*cf
;
1374 skb
= alloc_can_err_skb(ndev
, &cf
);
1376 ndev
->stats
.rx_dropped
++;
1380 kvaser_pciefd_change_state(can
, cf
, new_state
, tx_state
, rx_state
);
1381 if (old_state
== CAN_STATE_BUS_OFF
&&
1382 new_state
== CAN_STATE_ERROR_ACTIVE
&&
1383 can
->can
.restart_ms
) {
1384 can
->can
.can_stats
.restarts
++;
1385 cf
->can_id
|= CAN_ERR_RESTARTED
;
1388 kvaser_pciefd_set_skb_timestamp(can
->kv_pcie
, skb
, p
->timestamp
);
1390 cf
->data
[6] = bec
.txerr
;
1391 cf
->data
[7] = bec
.rxerr
;
1395 can
->bec
.txerr
= bec
.txerr
;
1396 can
->bec
.rxerr
= bec
.rxerr
;
1397 /* Check if we need to poll the error counters */
1398 if (bec
.txerr
|| bec
.rxerr
)
1399 mod_timer(&can
->bec_poll_timer
, KVASER_PCIEFD_BEC_POLL_FREQ
);
1404 static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd
*pcie
,
1405 struct kvaser_pciefd_rx_packet
*p
)
1407 struct kvaser_pciefd_can
*can
;
1410 u8 ch_id
= FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK
, p
->header
[1]);
1412 if (ch_id
>= pcie
->nr_channels
)
1415 can
= pcie
->can
[ch_id
];
1417 status
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_STAT_REG
);
1418 cmdseq
= FIELD_GET(KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK
, status
);
1420 /* Reset done, start abort and flush */
1421 if (p
->header
[0] & KVASER_PCIEFD_SPACK_IRM
&&
1422 p
->header
[0] & KVASER_PCIEFD_SPACK_RMCD
&&
1423 p
->header
[1] & KVASER_PCIEFD_SPACK_AUTO
&&
1424 cmdseq
== FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK
, p
->header
[1]) &&
1425 status
& KVASER_PCIEFD_KCAN_STAT_IDLE
) {
1426 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD
,
1427 can
->reg_base
+ KVASER_PCIEFD_KCAN_IRQ_REG
);
1428 kvaser_pciefd_abort_flush_reset(can
);
1429 } else if (p
->header
[0] & KVASER_PCIEFD_SPACK_IDET
&&
1430 p
->header
[0] & KVASER_PCIEFD_SPACK_IRM
&&
1431 cmdseq
== FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK
, p
->header
[1]) &&
1432 status
& KVASER_PCIEFD_KCAN_STAT_IDLE
) {
1433 /* Reset detected, send end of flush if no packet are in FIFO */
1436 count
= FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK
,
1437 ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG
));
1439 iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK
,
1440 KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH
),
1441 can
->reg_base
+ KVASER_PCIEFD_KCAN_CTRL_REG
);
1442 } else if (!(p
->header
[1] & KVASER_PCIEFD_SPACK_AUTO
) &&
1443 cmdseq
== FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK
, p
->header
[1])) {
1444 /* Response to status request received */
1445 kvaser_pciefd_handle_status_resp(can
, p
);
1446 if (can
->can
.state
!= CAN_STATE_BUS_OFF
&&
1447 can
->can
.state
!= CAN_STATE_ERROR_ACTIVE
) {
1448 mod_timer(&can
->bec_poll_timer
, KVASER_PCIEFD_BEC_POLL_FREQ
);
1450 } else if (p
->header
[0] & KVASER_PCIEFD_SPACK_RMCD
&&
1451 !(status
& KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK
)) {
1452 /* Reset to bus on detected */
1453 if (!completion_done(&can
->start_comp
))
1454 complete(&can
->start_comp
);
1460 static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can
*can
,
1461 struct kvaser_pciefd_rx_packet
*p
)
1463 struct sk_buff
*skb
;
1464 struct can_frame
*cf
;
1466 skb
= alloc_can_err_skb(can
->can
.dev
, &cf
);
1467 can
->can
.dev
->stats
.tx_errors
++;
1468 if (p
->header
[0] & KVASER_PCIEFD_APACKET_ABL
) {
1470 cf
->can_id
|= CAN_ERR_LOSTARB
;
1471 can
->can
.can_stats
.arbitration_lost
++;
1473 cf
->can_id
|= CAN_ERR_ACK
;
1477 cf
->can_id
|= CAN_ERR_BUSERROR
;
1478 kvaser_pciefd_set_skb_timestamp(can
->kv_pcie
, skb
, p
->timestamp
);
1481 can
->can
.dev
->stats
.rx_dropped
++;
1482 netdev_warn(can
->can
.dev
, "No memory left for err_skb\n");
1486 static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd
*pcie
,
1487 struct kvaser_pciefd_rx_packet
*p
)
1489 struct kvaser_pciefd_can
*can
;
1490 bool one_shot_fail
= false;
1491 u8 ch_id
= FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK
, p
->header
[1]);
1493 if (ch_id
>= pcie
->nr_channels
)
1496 can
= pcie
->can
[ch_id
];
1497 /* Ignore control packet ACK */
1498 if (p
->header
[0] & KVASER_PCIEFD_APACKET_CT
)
1501 if (p
->header
[0] & KVASER_PCIEFD_APACKET_NACK
) {
1502 kvaser_pciefd_handle_nack_packet(can
, p
);
1503 one_shot_fail
= true;
1506 if (p
->header
[0] & KVASER_PCIEFD_APACKET_FLU
) {
1507 netdev_dbg(can
->can
.dev
, "Packet was flushed\n");
1509 int echo_idx
= FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK
, p
->header
[0]);
1512 struct sk_buff
*skb
;
1514 skb
= can
->can
.echo_skb
[echo_idx
];
1516 kvaser_pciefd_set_skb_timestamp(pcie
, skb
, p
->timestamp
);
1517 len
= can_get_echo_skb(can
->can
.dev
, echo_idx
, NULL
);
1518 count
= FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK
,
1519 ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG
));
1521 if (count
< can
->can
.echo_skb_max
&& netif_queue_stopped(can
->can
.dev
))
1522 netif_wake_queue(can
->can
.dev
);
1524 if (!one_shot_fail
) {
1525 can
->can
.dev
->stats
.tx_bytes
+= len
;
1526 can
->can
.dev
->stats
.tx_packets
++;
1533 static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd
*pcie
,
1534 struct kvaser_pciefd_rx_packet
*p
)
1536 struct kvaser_pciefd_can
*can
;
1537 u8 ch_id
= FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK
, p
->header
[1]);
1539 if (ch_id
>= pcie
->nr_channels
)
1542 can
= pcie
->can
[ch_id
];
1544 if (!completion_done(&can
->flush_comp
))
1545 complete(&can
->flush_comp
);
1550 static int kvaser_pciefd_read_packet(struct kvaser_pciefd
*pcie
, int *start_pos
,
1553 __le32
*buffer
= pcie
->dma_data
[dma_buf
];
1555 struct kvaser_pciefd_rx_packet packet
;
1556 struct kvaser_pciefd_rx_packet
*p
= &packet
;
1558 int pos
= *start_pos
;
1562 size
= le32_to_cpu(buffer
[pos
++]);
1568 p
->header
[0] = le32_to_cpu(buffer
[pos
++]);
1569 p
->header
[1] = le32_to_cpu(buffer
[pos
++]);
1571 /* Read 64-bit timestamp */
1572 memcpy(×tamp
, &buffer
[pos
], sizeof(__le64
));
1574 p
->timestamp
= le64_to_cpu(timestamp
);
1576 type
= FIELD_GET(KVASER_PCIEFD_PACKET_TYPE_MASK
, p
->header
[1]);
1578 case KVASER_PCIEFD_PACK_TYPE_DATA
:
1579 ret
= kvaser_pciefd_handle_data_packet(pcie
, p
, &buffer
[pos
]);
1580 if (!(p
->header
[0] & KVASER_PCIEFD_RPACKET_RTR
)) {
1583 data_len
= can_fd_dlc2len(FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK
,
1585 pos
+= DIV_ROUND_UP(data_len
, 4);
1589 case KVASER_PCIEFD_PACK_TYPE_ACK
:
1590 ret
= kvaser_pciefd_handle_ack_packet(pcie
, p
);
1593 case KVASER_PCIEFD_PACK_TYPE_STATUS
:
1594 ret
= kvaser_pciefd_handle_status_packet(pcie
, p
);
1597 case KVASER_PCIEFD_PACK_TYPE_ERROR
:
1598 ret
= kvaser_pciefd_handle_error_packet(pcie
, p
);
1601 case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK
:
1602 ret
= kvaser_pciefd_handle_eflush_packet(pcie
, p
);
1605 case KVASER_PCIEFD_PACK_TYPE_ACK_DATA
:
1606 case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD
:
1607 case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK
:
1608 case KVASER_PCIEFD_PACK_TYPE_TXRQ
:
1609 dev_info(&pcie
->pci
->dev
,
1610 "Received unexpected packet type 0x%08X\n", type
);
1614 dev_err(&pcie
->pci
->dev
, "Unknown packet type 0x%08X\n", type
);
1622 /* Position does not point to the end of the package,
1623 * corrupted packet size?
1625 if (unlikely((*start_pos
+ size
) != pos
))
1628 /* Point to the next packet header, if any */
1634 static int kvaser_pciefd_read_buffer(struct kvaser_pciefd
*pcie
, int dma_buf
)
1640 res
= kvaser_pciefd_read_packet(pcie
, &pos
, dma_buf
);
1641 } while (!res
&& pos
> 0 && pos
< KVASER_PCIEFD_DMA_SIZE
);
1646 static u32
kvaser_pciefd_receive_irq(struct kvaser_pciefd
*pcie
)
1648 u32 irq
= ioread32(KVASER_PCIEFD_SRB_ADDR(pcie
) + KVASER_PCIEFD_SRB_IRQ_REG
);
1650 if (irq
& KVASER_PCIEFD_SRB_IRQ_DPD0
)
1651 kvaser_pciefd_read_buffer(pcie
, 0);
1653 if (irq
& KVASER_PCIEFD_SRB_IRQ_DPD1
)
1654 kvaser_pciefd_read_buffer(pcie
, 1);
1656 if (unlikely(irq
& KVASER_PCIEFD_SRB_IRQ_DOF0
||
1657 irq
& KVASER_PCIEFD_SRB_IRQ_DOF1
||
1658 irq
& KVASER_PCIEFD_SRB_IRQ_DUF0
||
1659 irq
& KVASER_PCIEFD_SRB_IRQ_DUF1
))
1660 dev_err(&pcie
->pci
->dev
, "DMA IRQ error 0x%08X\n", irq
);
1662 iowrite32(irq
, KVASER_PCIEFD_SRB_ADDR(pcie
) + KVASER_PCIEFD_SRB_IRQ_REG
);
1666 static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can
*can
)
1668 u32 irq
= ioread32(can
->reg_base
+ KVASER_PCIEFD_KCAN_IRQ_REG
);
1670 if (irq
& KVASER_PCIEFD_KCAN_IRQ_TOF
)
1671 netdev_err(can
->can
.dev
, "Tx FIFO overflow\n");
1673 if (irq
& KVASER_PCIEFD_KCAN_IRQ_BPP
)
1674 netdev_err(can
->can
.dev
,
1675 "Fail to change bittiming, when not in reset mode\n");
1677 if (irq
& KVASER_PCIEFD_KCAN_IRQ_FDIC
)
1678 netdev_err(can
->can
.dev
, "CAN FD frame in CAN mode\n");
1680 if (irq
& KVASER_PCIEFD_KCAN_IRQ_ROF
)
1681 netdev_err(can
->can
.dev
, "Rx FIFO overflow\n");
1683 iowrite32(irq
, can
->reg_base
+ KVASER_PCIEFD_KCAN_IRQ_REG
);
1686 static irqreturn_t
kvaser_pciefd_irq_handler(int irq
, void *dev
)
1688 struct kvaser_pciefd
*pcie
= (struct kvaser_pciefd
*)dev
;
1689 const struct kvaser_pciefd_irq_mask
*irq_mask
= pcie
->driver_data
->irq_mask
;
1690 u32 pci_irq
= ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie
));
1692 u32 srb_release
= 0;
1695 if (!(pci_irq
& irq_mask
->all
))
1698 if (pci_irq
& irq_mask
->kcan_rx0
)
1699 srb_irq
= kvaser_pciefd_receive_irq(pcie
);
1701 for (i
= 0; i
< pcie
->nr_channels
; i
++) {
1702 if (pci_irq
& irq_mask
->kcan_tx
[i
])
1703 kvaser_pciefd_transmit_irq(pcie
->can
[i
]);
1706 if (srb_irq
& KVASER_PCIEFD_SRB_IRQ_DPD0
)
1707 srb_release
|= KVASER_PCIEFD_SRB_CMD_RDB0
;
1709 if (srb_irq
& KVASER_PCIEFD_SRB_IRQ_DPD1
)
1710 srb_release
|= KVASER_PCIEFD_SRB_CMD_RDB1
;
1713 iowrite32(srb_release
, KVASER_PCIEFD_SRB_ADDR(pcie
) + KVASER_PCIEFD_SRB_CMD_REG
);
1718 static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd
*pcie
)
1722 for (i
= 0; i
< pcie
->nr_channels
; i
++) {
1723 struct kvaser_pciefd_can
*can
= pcie
->can
[i
];
1726 iowrite32(0, can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
1727 kvaser_pciefd_pwm_stop(can
);
1728 free_candev(can
->can
.dev
);
1733 static int kvaser_pciefd_probe(struct pci_dev
*pdev
,
1734 const struct pci_device_id
*id
)
1737 struct kvaser_pciefd
*pcie
;
1738 const struct kvaser_pciefd_irq_mask
*irq_mask
;
1739 void __iomem
*irq_en_base
;
1741 pcie
= devm_kzalloc(&pdev
->dev
, sizeof(*pcie
), GFP_KERNEL
);
1745 pci_set_drvdata(pdev
, pcie
);
1747 pcie
->driver_data
= (const struct kvaser_pciefd_driver_data
*)id
->driver_data
;
1748 irq_mask
= pcie
->driver_data
->irq_mask
;
1750 ret
= pci_enable_device(pdev
);
1754 ret
= pci_request_regions(pdev
, KVASER_PCIEFD_DRV_NAME
);
1756 goto err_disable_pci
;
1758 pcie
->reg_base
= pci_iomap(pdev
, 0, 0);
1759 if (!pcie
->reg_base
) {
1761 goto err_release_regions
;
1764 ret
= kvaser_pciefd_setup_board(pcie
);
1766 goto err_pci_iounmap
;
1768 ret
= kvaser_pciefd_setup_dma(pcie
);
1770 goto err_pci_iounmap
;
1772 pci_set_master(pdev
);
1774 ret
= kvaser_pciefd_setup_can_ctrls(pcie
);
1776 goto err_teardown_can_ctrls
;
1778 ret
= pci_alloc_irq_vectors(pcie
->pci
, 1, 1, PCI_IRQ_INTX
| PCI_IRQ_MSI
);
1780 dev_err(&pcie
->pci
->dev
, "Failed to allocate IRQ vectors.\n");
1781 goto err_teardown_can_ctrls
;
1784 ret
= pci_irq_vector(pcie
->pci
, 0);
1786 goto err_pci_free_irq_vectors
;
1788 pcie
->pci
->irq
= ret
;
1790 ret
= request_irq(pcie
->pci
->irq
, kvaser_pciefd_irq_handler
,
1791 IRQF_SHARED
, KVASER_PCIEFD_DRV_NAME
, pcie
);
1793 dev_err(&pcie
->pci
->dev
, "Failed to request IRQ %d\n", pcie
->pci
->irq
);
1794 goto err_pci_free_irq_vectors
;
1796 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0
| KVASER_PCIEFD_SRB_IRQ_DPD1
,
1797 KVASER_PCIEFD_SRB_ADDR(pcie
) + KVASER_PCIEFD_SRB_IRQ_REG
);
1799 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0
| KVASER_PCIEFD_SRB_IRQ_DPD1
|
1800 KVASER_PCIEFD_SRB_IRQ_DOF0
| KVASER_PCIEFD_SRB_IRQ_DOF1
|
1801 KVASER_PCIEFD_SRB_IRQ_DUF0
| KVASER_PCIEFD_SRB_IRQ_DUF1
,
1802 KVASER_PCIEFD_SRB_ADDR(pcie
) + KVASER_PCIEFD_SRB_IEN_REG
);
1804 /* Enable PCI interrupts */
1805 irq_en_base
= KVASER_PCIEFD_PCI_IEN_ADDR(pcie
);
1806 iowrite32(irq_mask
->all
, irq_en_base
);
1807 /* Ready the DMA buffers */
1808 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0
,
1809 KVASER_PCIEFD_SRB_ADDR(pcie
) + KVASER_PCIEFD_SRB_CMD_REG
);
1810 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1
,
1811 KVASER_PCIEFD_SRB_ADDR(pcie
) + KVASER_PCIEFD_SRB_CMD_REG
);
1813 ret
= kvaser_pciefd_reg_candev(pcie
);
1820 /* Disable PCI interrupts */
1821 iowrite32(0, irq_en_base
);
1822 free_irq(pcie
->pci
->irq
, pcie
);
1824 err_pci_free_irq_vectors
:
1825 pci_free_irq_vectors(pcie
->pci
);
1827 err_teardown_can_ctrls
:
1828 kvaser_pciefd_teardown_can_ctrls(pcie
);
1829 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie
) + KVASER_PCIEFD_SRB_CTRL_REG
);
1830 pci_clear_master(pdev
);
1833 pci_iounmap(pdev
, pcie
->reg_base
);
1835 err_release_regions
:
1836 pci_release_regions(pdev
);
1839 pci_disable_device(pdev
);
1844 static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd
*pcie
)
1848 for (i
= 0; i
< pcie
->nr_channels
; i
++) {
1849 struct kvaser_pciefd_can
*can
= pcie
->can
[i
];
1852 iowrite32(0, can
->reg_base
+ KVASER_PCIEFD_KCAN_IEN_REG
);
1853 unregister_candev(can
->can
.dev
);
1854 del_timer(&can
->bec_poll_timer
);
1855 kvaser_pciefd_pwm_stop(can
);
1856 free_candev(can
->can
.dev
);
1861 static void kvaser_pciefd_remove(struct pci_dev
*pdev
)
1863 struct kvaser_pciefd
*pcie
= pci_get_drvdata(pdev
);
1865 kvaser_pciefd_remove_all_ctrls(pcie
);
1867 /* Disable interrupts */
1868 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie
) + KVASER_PCIEFD_SRB_CTRL_REG
);
1869 iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie
));
1871 free_irq(pcie
->pci
->irq
, pcie
);
1872 pci_free_irq_vectors(pcie
->pci
);
1873 pci_iounmap(pdev
, pcie
->reg_base
);
1874 pci_release_regions(pdev
);
1875 pci_disable_device(pdev
);
1878 static struct pci_driver kvaser_pciefd
= {
1879 .name
= KVASER_PCIEFD_DRV_NAME
,
1880 .id_table
= kvaser_pciefd_id_table
,
1881 .probe
= kvaser_pciefd_probe
,
1882 .remove
= kvaser_pciefd_remove
,
1885 module_pci_driver(kvaser_pciefd
)