2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/etherdevice.h>
18 #include "thunder_bgx.h"
20 #define DRV_NAME "thunder-nic"
21 #define DRV_VERSION "1.0"
27 u8 num_vf_en
; /* No of VF enabled */
28 bool vf_enabled
[MAX_NUM_VFS_SUPPORTED
];
29 void __iomem
*reg_base
; /* Register start address */
30 u8 num_sqs_en
; /* Secondary qsets enabled */
31 u64 nicvf
[MAX_NUM_VFS_SUPPORTED
];
32 u8 vf_sqs
[MAX_NUM_VFS_SUPPORTED
][MAX_SQS_PER_VF
];
33 u8 pqs_vf
[MAX_NUM_VFS_SUPPORTED
];
34 bool sqs_used
[MAX_NUM_VFS_SUPPORTED
];
35 struct pkind_cfg pkind
;
36 #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
37 #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
38 #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
39 u8 vf_lmac_map
[MAX_LMAC
];
40 struct delayed_work dwork
;
41 struct workqueue_struct
*check_link
;
45 u16 cpi_base
[MAX_NUM_VFS_SUPPORTED
];
46 u16 rssi_base
[MAX_NUM_VFS_SUPPORTED
];
48 bool mbx_lock
[MAX_NUM_VFS_SUPPORTED
];
53 struct msix_entry msix_entries
[NIC_PF_MSIX_VECTORS
];
54 bool irq_allocated
[NIC_PF_MSIX_VECTORS
];
57 /* Supported devices */
58 static const struct pci_device_id nic_id_table
[] = {
59 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVICE_ID_THUNDER_NIC_PF
) },
60 { 0, } /* end of table */
63 MODULE_AUTHOR("Sunil Goutham");
64 MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver");
65 MODULE_LICENSE("GPL v2");
66 MODULE_VERSION(DRV_VERSION
);
67 MODULE_DEVICE_TABLE(pci
, nic_id_table
);
69 /* The Cavium ThunderX network controller can *only* be found in SoCs
70 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
71 * registers on this platform are implicitly strongly ordered with respect
72 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
73 * with no memory barriers in this driver. The readq()/writeq() functions add
74 * explicit ordering operation which in this case are redundant, and only
78 /* Register read/write APIs */
79 static void nic_reg_write(struct nicpf
*nic
, u64 offset
, u64 val
)
81 writeq_relaxed(val
, nic
->reg_base
+ offset
);
84 static u64
nic_reg_read(struct nicpf
*nic
, u64 offset
)
86 return readq_relaxed(nic
->reg_base
+ offset
);
89 /* PF -> VF mailbox communication APIs */
90 static void nic_enable_mbx_intr(struct nicpf
*nic
)
92 /* Enable mailbox interrupt for all 128 VFs */
93 nic_reg_write(nic
, NIC_PF_MAILBOX_ENA_W1S
, ~0ull);
94 nic_reg_write(nic
, NIC_PF_MAILBOX_ENA_W1S
+ sizeof(u64
), ~0ull);
97 static void nic_clear_mbx_intr(struct nicpf
*nic
, int vf
, int mbx_reg
)
99 nic_reg_write(nic
, NIC_PF_MAILBOX_INT
+ (mbx_reg
<< 3), BIT_ULL(vf
));
102 static u64
nic_get_mbx_addr(int vf
)
104 return NIC_PF_VF_0_127_MAILBOX_0_1
+ (vf
<< NIC_VF_NUM_SHIFT
);
107 /* Send a mailbox message to VF
108 * @vf: vf to which this message to be sent
109 * @mbx: Message to be sent
111 static void nic_send_msg_to_vf(struct nicpf
*nic
, int vf
, union nic_mbx
*mbx
)
113 void __iomem
*mbx_addr
= nic
->reg_base
+ nic_get_mbx_addr(vf
);
114 u64
*msg
= (u64
*)mbx
;
116 /* In first revision HW, mbox interrupt is triggerred
117 * when PF writes to MBOX(1), in next revisions when
118 * PF writes to MBOX(0)
120 if (pass1_silicon(nic
->pdev
)) {
121 /* see the comment for nic_reg_write()/nic_reg_read()
124 writeq_relaxed(msg
[0], mbx_addr
);
125 writeq_relaxed(msg
[1], mbx_addr
+ 8);
127 writeq_relaxed(msg
[1], mbx_addr
+ 8);
128 writeq_relaxed(msg
[0], mbx_addr
);
132 /* Responds to VF's READY message with VF's
133 * ID, node, MAC address e.t.c
134 * @vf: VF which sent READY message
136 static void nic_mbx_send_ready(struct nicpf
*nic
, int vf
)
138 union nic_mbx mbx
= {};
142 mbx
.nic_cfg
.msg
= NIC_MBOX_MSG_READY
;
143 mbx
.nic_cfg
.vf_id
= vf
;
145 mbx
.nic_cfg
.tns_mode
= NIC_TNS_BYPASS_MODE
;
148 bgx_idx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vf
]);
149 lmac
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vf
]);
151 mac
= bgx_get_lmac_mac(nic
->node
, bgx_idx
, lmac
);
153 ether_addr_copy((u8
*)&mbx
.nic_cfg
.mac_addr
, mac
);
155 mbx
.nic_cfg
.sqs_mode
= (vf
>= nic
->num_vf_en
) ? true : false;
156 mbx
.nic_cfg
.node_id
= nic
->node
;
158 mbx
.nic_cfg
.loopback_supported
= vf
< MAX_LMAC
;
160 nic_send_msg_to_vf(nic
, vf
, &mbx
);
163 /* ACKs VF's mailbox message
164 * @vf: VF to which ACK to be sent
166 static void nic_mbx_send_ack(struct nicpf
*nic
, int vf
)
168 union nic_mbx mbx
= {};
170 mbx
.msg
.msg
= NIC_MBOX_MSG_ACK
;
171 nic_send_msg_to_vf(nic
, vf
, &mbx
);
174 /* NACKs VF's mailbox message that PF is not able to
175 * complete the action
176 * @vf: VF to which ACK to be sent
178 static void nic_mbx_send_nack(struct nicpf
*nic
, int vf
)
180 union nic_mbx mbx
= {};
182 mbx
.msg
.msg
= NIC_MBOX_MSG_NACK
;
183 nic_send_msg_to_vf(nic
, vf
, &mbx
);
186 /* Flush all in flight receive packets to memory and
187 * bring down an active RQ
189 static int nic_rcv_queue_sw_sync(struct nicpf
*nic
)
193 nic_reg_write(nic
, NIC_PF_SW_SYNC_RX
, 0x01);
194 /* Wait till sync cycle is finished */
196 if (nic_reg_read(nic
, NIC_PF_SW_SYNC_RX_DONE
) & 0x1)
200 nic_reg_write(nic
, NIC_PF_SW_SYNC_RX
, 0x00);
202 dev_err(&nic
->pdev
->dev
, "Receive queue software sync failed");
208 /* Get BGX Rx/Tx stats and respond to VF's request */
209 static void nic_get_bgx_stats(struct nicpf
*nic
, struct bgx_stats_msg
*bgx
)
212 union nic_mbx mbx
= {};
214 bgx_idx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[bgx
->vf_id
]);
215 lmac
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[bgx
->vf_id
]);
217 mbx
.bgx_stats
.msg
= NIC_MBOX_MSG_BGX_STATS
;
218 mbx
.bgx_stats
.vf_id
= bgx
->vf_id
;
219 mbx
.bgx_stats
.rx
= bgx
->rx
;
220 mbx
.bgx_stats
.idx
= bgx
->idx
;
222 mbx
.bgx_stats
.stats
= bgx_get_rx_stats(nic
->node
, bgx_idx
,
225 mbx
.bgx_stats
.stats
= bgx_get_tx_stats(nic
->node
, bgx_idx
,
227 nic_send_msg_to_vf(nic
, bgx
->vf_id
, &mbx
);
230 /* Update hardware min/max frame size */
231 static int nic_update_hw_frs(struct nicpf
*nic
, int new_frs
, int vf
)
233 if ((new_frs
> NIC_HW_MAX_FRS
) || (new_frs
< NIC_HW_MIN_FRS
)) {
234 dev_err(&nic
->pdev
->dev
,
235 "Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
236 vf
, NIC_HW_MIN_FRS
, NIC_HW_MAX_FRS
);
240 if (new_frs
<= nic
->pkind
.maxlen
)
243 nic
->pkind
.maxlen
= new_frs
;
244 nic_reg_write(nic
, NIC_PF_PKIND_0_15_CFG
, *(u64
*)&nic
->pkind
);
248 /* Set minimum transmit packet size */
249 static void nic_set_tx_pkt_pad(struct nicpf
*nic
, int size
)
254 /* There is a issue in HW where-in while sending GSO sized
255 * pkts as part of TSO, if pkt len falls below this size
256 * NIC will zero PAD packet and also updates IP total length.
257 * Hence set this value to lessthan min pkt size of MAC+IP+TCP
258 * headers, BGX will do the padding to transmit 64 byte pkt.
263 for (lmac
= 0; lmac
< (MAX_BGX_PER_CN88XX
* MAX_LMAC_PER_BGX
); lmac
++) {
264 lmac_cfg
= nic_reg_read(nic
, NIC_PF_LMAC_0_7_CFG
| (lmac
<< 3));
265 lmac_cfg
&= ~(0xF << 2);
266 lmac_cfg
|= ((size
/ 4) << 2);
267 nic_reg_write(nic
, NIC_PF_LMAC_0_7_CFG
| (lmac
<< 3), lmac_cfg
);
271 /* Function to check number of LMACs present and set VF::LMAC mapping.
272 * Mapping will be used while initializing channels.
274 static void nic_set_lmac_vf_mapping(struct nicpf
*nic
)
276 unsigned bgx_map
= bgx_get_map(nic
->node
);
277 int bgx
, next_bgx_lmac
= 0;
278 int lmac
, lmac_cnt
= 0;
283 for (bgx
= 0; bgx
< NIC_MAX_BGX
; bgx
++) {
284 if (!(bgx_map
& (1 << bgx
)))
286 lmac_cnt
= bgx_get_lmac_count(nic
->node
, bgx
);
287 for (lmac
= 0; lmac
< lmac_cnt
; lmac
++)
288 nic
->vf_lmac_map
[next_bgx_lmac
++] =
289 NIC_SET_VF_LMAC_MAP(bgx
, lmac
);
290 nic
->num_vf_en
+= lmac_cnt
;
292 /* Program LMAC credits */
293 lmac_credit
= (1ull << 1); /* channel credit enable */
294 lmac_credit
|= (0x1ff << 2); /* Max outstanding pkt count */
295 /* 48KB BGX Tx buffer size, each unit is of size 16bytes */
296 lmac_credit
|= (((((48 * 1024) / lmac_cnt
) -
297 NIC_HW_MAX_FRS
) / 16) << 12);
298 lmac
= bgx
* MAX_LMAC_PER_BGX
;
299 for (; lmac
< lmac_cnt
+ (bgx
* MAX_LMAC_PER_BGX
); lmac
++)
301 NIC_PF_LMAC_0_7_CREDIT
+ (lmac
* 8),
309 static void nic_init_hw(struct nicpf
*nic
)
314 /* Enable NIC HW block */
315 nic_reg_write(nic
, NIC_PF_CFG
, 0x3);
317 /* Enable backpressure */
318 nic_reg_write(nic
, NIC_PF_BP_CFG
, (1ULL << 6) | 0x03);
320 /* Disable TNS mode on both interfaces */
321 nic_reg_write(nic
, NIC_PF_INTF_0_1_SEND_CFG
,
322 (NIC_TNS_BYPASS_MODE
<< 7) | BGX0_BLOCK
);
323 nic_reg_write(nic
, NIC_PF_INTF_0_1_SEND_CFG
| (1 << 8),
324 (NIC_TNS_BYPASS_MODE
<< 7) | BGX1_BLOCK
);
325 nic_reg_write(nic
, NIC_PF_INTF_0_1_BP_CFG
,
326 (1ULL << 63) | BGX0_BLOCK
);
327 nic_reg_write(nic
, NIC_PF_INTF_0_1_BP_CFG
+ (1 << 8),
328 (1ULL << 63) | BGX1_BLOCK
);
330 /* PKIND configuration */
331 nic
->pkind
.minlen
= 0;
332 nic
->pkind
.maxlen
= NIC_HW_MAX_FRS
+ ETH_HLEN
;
333 nic
->pkind
.lenerr_en
= 1;
334 nic
->pkind
.rx_hdr
= 0;
335 nic
->pkind
.hdr_sl
= 0;
337 for (i
= 0; i
< NIC_MAX_PKIND
; i
++)
338 nic_reg_write(nic
, NIC_PF_PKIND_0_15_CFG
| (i
<< 3),
339 *(u64
*)&nic
->pkind
);
341 nic_set_tx_pkt_pad(nic
, NIC_HW_MIN_FRS
);
344 nic_reg_write(nic
, NIC_PF_INTR_TIMER_CFG
, NICPF_CLK_PER_INT_TICK
);
346 /* Enable VLAN ethertype matching and stripping */
347 nic_reg_write(nic
, NIC_PF_RX_ETYPE_0_7
,
348 (2 << 19) | (ETYPE_ALG_VLAN_STRIP
<< 16) | ETH_P_8021Q
);
350 /* Check if HW expected value is higher (could be in future chips) */
351 cqm_cfg
= nic_reg_read(nic
, NIC_PF_CQM_CFG
);
352 if (cqm_cfg
< NICPF_CQM_MIN_DROP_LEVEL
)
353 nic_reg_write(nic
, NIC_PF_CQM_CFG
, NICPF_CQM_MIN_DROP_LEVEL
);
356 /* Channel parse index configuration */
357 static void nic_config_cpi(struct nicpf
*nic
, struct cpi_cfg_msg
*cfg
)
359 u32 vnic
, bgx
, lmac
, chan
;
360 u32 padd
, cpi_count
= 0;
361 u64 cpi_base
, cpi
, rssi_base
, rssi
;
365 bgx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vnic
]);
366 lmac
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vnic
]);
368 chan
= (lmac
* MAX_BGX_CHANS_PER_LMAC
) + (bgx
* NIC_CHANS_PER_INF
);
369 cpi_base
= (lmac
* NIC_MAX_CPI_PER_LMAC
) + (bgx
* NIC_CPI_PER_BGX
);
370 rssi_base
= (lmac
* nic
->rss_ind_tbl_size
) + (bgx
* NIC_RSSI_PER_BGX
);
372 /* Rx channel configuration */
373 nic_reg_write(nic
, NIC_PF_CHAN_0_255_RX_BP_CFG
| (chan
<< 3),
374 (1ull << 63) | (vnic
<< 0));
375 nic_reg_write(nic
, NIC_PF_CHAN_0_255_RX_CFG
| (chan
<< 3),
376 ((u64
)cfg
->cpi_alg
<< 62) | (cpi_base
<< 48));
378 if (cfg
->cpi_alg
== CPI_ALG_NONE
)
380 else if (cfg
->cpi_alg
== CPI_ALG_VLAN
) /* 3 bits of PCP */
382 else if (cfg
->cpi_alg
== CPI_ALG_VLAN16
) /* 3 bits PCP + DEI */
384 else if (cfg
->cpi_alg
== CPI_ALG_DIFF
) /* 6bits DSCP */
385 cpi_count
= NIC_MAX_CPI_PER_LMAC
;
387 /* RSS Qset, Qidx mapping */
390 for (; rssi
< (rssi_base
+ cfg
->rq_cnt
); rssi
++) {
391 nic_reg_write(nic
, NIC_PF_RSSI_0_4097_RQ
| (rssi
<< 3),
392 (qset
<< 3) | rq_idx
);
398 for (; cpi
< (cpi_base
+ cpi_count
); cpi
++) {
399 /* Determine port to channel adder */
400 if (cfg
->cpi_alg
!= CPI_ALG_DIFF
)
401 padd
= cpi
% cpi_count
;
403 padd
= cpi
% 8; /* 3 bits CS out of 6bits DSCP */
405 /* Leave RSS_SIZE as '0' to disable RSS */
406 if (pass1_silicon(nic
->pdev
)) {
407 nic_reg_write(nic
, NIC_PF_CPI_0_2047_CFG
| (cpi
<< 3),
408 (vnic
<< 24) | (padd
<< 16) |
411 /* Set MPI_ALG to '0' to disable MCAM parsing */
412 nic_reg_write(nic
, NIC_PF_CPI_0_2047_CFG
| (cpi
<< 3),
414 /* MPI index is same as CPI if MPI_ALG is not enabled */
415 nic_reg_write(nic
, NIC_PF_MPI_0_2047_CFG
| (cpi
<< 3),
416 (vnic
<< 24) | (rssi_base
+ rssi
));
419 if ((rssi
+ 1) >= cfg
->rq_cnt
)
422 if (cfg
->cpi_alg
== CPI_ALG_VLAN
)
424 else if (cfg
->cpi_alg
== CPI_ALG_VLAN16
)
425 rssi
= ((cpi
- cpi_base
) & 0xe) >> 1;
426 else if (cfg
->cpi_alg
== CPI_ALG_DIFF
)
427 rssi
= ((cpi
- cpi_base
) & 0x38) >> 3;
429 nic
->cpi_base
[cfg
->vf_id
] = cpi_base
;
430 nic
->rssi_base
[cfg
->vf_id
] = rssi_base
;
433 /* Responsds to VF with its RSS indirection table size */
434 static void nic_send_rss_size(struct nicpf
*nic
, int vf
)
436 union nic_mbx mbx
= {};
441 mbx
.rss_size
.msg
= NIC_MBOX_MSG_RSS_SIZE
;
442 mbx
.rss_size
.ind_tbl_size
= nic
->rss_ind_tbl_size
;
443 nic_send_msg_to_vf(nic
, vf
, &mbx
);
446 /* Receive side scaling configuration
449 * - indir table i.e hash::RQ mapping
450 * - no of hash bits to consider
452 static void nic_config_rss(struct nicpf
*nic
, struct rss_cfg_msg
*cfg
)
455 u64 cpi_cfg
, cpi_base
, rssi_base
, rssi
;
458 rssi_base
= nic
->rssi_base
[cfg
->vf_id
] + cfg
->tbl_offset
;
463 for (; rssi
< (rssi_base
+ cfg
->tbl_len
); rssi
++) {
464 u8 svf
= cfg
->ind_tbl
[idx
] >> 3;
467 qset
= nic
->vf_sqs
[cfg
->vf_id
][svf
- 1];
470 nic_reg_write(nic
, NIC_PF_RSSI_0_4097_RQ
| (rssi
<< 3),
471 (qset
<< 3) | (cfg
->ind_tbl
[idx
] & 0x7));
475 cpi_base
= nic
->cpi_base
[cfg
->vf_id
];
476 if (pass1_silicon(nic
->pdev
))
477 idx_addr
= NIC_PF_CPI_0_2047_CFG
;
479 idx_addr
= NIC_PF_MPI_0_2047_CFG
;
480 cpi_cfg
= nic_reg_read(nic
, idx_addr
| (cpi_base
<< 3));
481 cpi_cfg
&= ~(0xFULL
<< 20);
482 cpi_cfg
|= (cfg
->hash_bits
<< 20);
483 nic_reg_write(nic
, idx_addr
| (cpi_base
<< 3), cpi_cfg
);
486 /* 4 level transmit side scheduler configutation
487 * for TNS bypass mode
489 * Sample configuration for SQ0
490 * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
491 * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
492 * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
493 * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0
494 * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1
495 * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1
496 * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
497 * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
499 static void nic_tx_channel_cfg(struct nicpf
*nic
, u8 vnic
,
500 struct sq_cfg_msg
*sq
)
505 u8 sq_idx
= sq
->sq_num
;
510 pqs_vnic
= nic
->pqs_vf
[vnic
];
514 bgx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[pqs_vnic
]);
515 lmac
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[pqs_vnic
]);
517 /* 24 bytes for FCS, IPG and preamble */
518 rr_quantum
= ((NIC_HW_MAX_FRS
+ 24) / 4);
521 tl4
= (lmac
* NIC_TL4_PER_LMAC
) + (bgx
* NIC_TL4_PER_BGX
);
523 for (svf
= 0; svf
< MAX_SQS_PER_VF
; svf
++) {
524 if (nic
->vf_sqs
[pqs_vnic
][svf
] == vnic
)
527 tl4
= (MAX_LMAC_PER_BGX
* NIC_TL4_PER_LMAC
);
528 tl4
+= (lmac
* NIC_TL4_PER_LMAC
* MAX_SQS_PER_VF
);
529 tl4
+= (svf
* NIC_TL4_PER_LMAC
);
530 tl4
+= (bgx
* NIC_TL4_PER_BGX
);
534 tl3
= tl4
/ (NIC_MAX_TL4
/ NIC_MAX_TL3
);
535 nic_reg_write(nic
, NIC_PF_QSET_0_127_SQ_0_7_CFG2
|
536 ((u64
)vnic
<< NIC_QS_ID_SHIFT
) |
537 ((u32
)sq_idx
<< NIC_Q_NUM_SHIFT
), tl4
);
538 nic_reg_write(nic
, NIC_PF_TL4_0_1023_CFG
| (tl4
<< 3),
539 ((u64
)vnic
<< 27) | ((u32
)sq_idx
<< 24) | rr_quantum
);
541 nic_reg_write(nic
, NIC_PF_TL3_0_255_CFG
| (tl3
<< 3), rr_quantum
);
542 chan
= (lmac
* MAX_BGX_CHANS_PER_LMAC
) + (bgx
* NIC_CHANS_PER_INF
);
543 nic_reg_write(nic
, NIC_PF_TL3_0_255_CHAN
| (tl3
<< 3), chan
);
544 /* Enable backpressure on the channel */
545 nic_reg_write(nic
, NIC_PF_CHAN_0_255_TX_CFG
| (chan
<< 3), 1);
548 nic_reg_write(nic
, NIC_PF_TL3A_0_63_CFG
| (tl2
<< 3), tl2
);
549 nic_reg_write(nic
, NIC_PF_TL2_0_63_CFG
| (tl2
<< 3), rr_quantum
);
550 /* No priorities as of now */
551 nic_reg_write(nic
, NIC_PF_TL2_0_63_PRI
| (tl2
<< 3), 0x00);
554 /* Send primary nicvf pointer to secondary QS's VF */
555 static void nic_send_pnicvf(struct nicpf
*nic
, int sqs
)
557 union nic_mbx mbx
= {};
559 mbx
.nicvf
.msg
= NIC_MBOX_MSG_PNICVF_PTR
;
560 mbx
.nicvf
.nicvf
= nic
->nicvf
[nic
->pqs_vf
[sqs
]];
561 nic_send_msg_to_vf(nic
, sqs
, &mbx
);
564 /* Send SQS's nicvf pointer to primary QS's VF */
565 static void nic_send_snicvf(struct nicpf
*nic
, struct nicvf_ptr
*nicvf
)
567 union nic_mbx mbx
= {};
568 int sqs_id
= nic
->vf_sqs
[nicvf
->vf_id
][nicvf
->sqs_id
];
570 mbx
.nicvf
.msg
= NIC_MBOX_MSG_SNICVF_PTR
;
571 mbx
.nicvf
.sqs_id
= nicvf
->sqs_id
;
572 mbx
.nicvf
.nicvf
= nic
->nicvf
[sqs_id
];
573 nic_send_msg_to_vf(nic
, nicvf
->vf_id
, &mbx
);
576 /* Find next available Qset that can be assigned as a
577 * secondary Qset to a VF.
579 static int nic_nxt_avail_sqs(struct nicpf
*nic
)
583 for (sqs
= 0; sqs
< nic
->num_sqs_en
; sqs
++) {
584 if (!nic
->sqs_used
[sqs
])
585 nic
->sqs_used
[sqs
] = true;
588 return sqs
+ nic
->num_vf_en
;
593 /* Allocate additional Qsets for requested VF */
594 static void nic_alloc_sqs(struct nicpf
*nic
, struct sqs_alloc
*sqs
)
596 union nic_mbx mbx
= {};
597 int idx
, alloc_qs
= 0;
600 if (!nic
->num_sqs_en
)
603 for (idx
= 0; idx
< sqs
->qs_count
; idx
++) {
604 sqs_id
= nic_nxt_avail_sqs(nic
);
607 nic
->vf_sqs
[sqs
->vf_id
][idx
] = sqs_id
;
608 nic
->pqs_vf
[sqs_id
] = sqs
->vf_id
;
613 mbx
.sqs_alloc
.msg
= NIC_MBOX_MSG_ALLOC_SQS
;
614 mbx
.sqs_alloc
.vf_id
= sqs
->vf_id
;
615 mbx
.sqs_alloc
.qs_count
= alloc_qs
;
616 nic_send_msg_to_vf(nic
, sqs
->vf_id
, &mbx
);
619 static int nic_config_loopback(struct nicpf
*nic
, struct set_loopback
*lbk
)
621 int bgx_idx
, lmac_idx
;
623 if (lbk
->vf_id
> MAX_LMAC
)
626 bgx_idx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[lbk
->vf_id
]);
627 lmac_idx
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[lbk
->vf_id
]);
629 bgx_lmac_internal_loopback(nic
->node
, bgx_idx
, lmac_idx
, lbk
->enable
);
634 static void nic_enable_vf(struct nicpf
*nic
, int vf
, bool enable
)
638 nic
->vf_enabled
[vf
] = enable
;
640 if (vf
>= nic
->num_vf_en
)
643 bgx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vf
]);
644 lmac
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vf
]);
646 bgx_lmac_rx_tx_enable(nic
->node
, bgx
, lmac
, enable
);
649 /* Interrupt handler to handle mailbox messages from VFs */
650 static void nic_handle_mbx_intr(struct nicpf
*nic
, int vf
)
652 union nic_mbx mbx
= {};
661 nic
->mbx_lock
[vf
] = true;
663 mbx_addr
= nic_get_mbx_addr(vf
);
664 mbx_data
= (u64
*)&mbx
;
666 for (i
= 0; i
< NIC_PF_VF_MAILBOX_SIZE
; i
++) {
667 *mbx_data
= nic_reg_read(nic
, mbx_addr
);
669 mbx_addr
+= sizeof(u64
);
672 dev_dbg(&nic
->pdev
->dev
, "%s: Mailbox msg %d from VF%d\n",
673 __func__
, mbx
.msg
.msg
, vf
);
674 switch (mbx
.msg
.msg
) {
675 case NIC_MBOX_MSG_READY
:
676 nic_mbx_send_ready(nic
, vf
);
684 case NIC_MBOX_MSG_QS_CFG
:
685 reg_addr
= NIC_PF_QSET_0_127_CFG
|
686 (mbx
.qs
.num
<< NIC_QS_ID_SHIFT
);
688 /* Check if its a secondary Qset */
689 if (vf
>= nic
->num_vf_en
) {
690 cfg
= cfg
& (~0x7FULL
);
691 /* Assign this Qset to primary Qset's VF */
692 cfg
|= nic
->pqs_vf
[vf
];
694 nic_reg_write(nic
, reg_addr
, cfg
);
696 case NIC_MBOX_MSG_RQ_CFG
:
697 reg_addr
= NIC_PF_QSET_0_127_RQ_0_7_CFG
|
698 (mbx
.rq
.qs_num
<< NIC_QS_ID_SHIFT
) |
699 (mbx
.rq
.rq_num
<< NIC_Q_NUM_SHIFT
);
700 nic_reg_write(nic
, reg_addr
, mbx
.rq
.cfg
);
702 case NIC_MBOX_MSG_RQ_BP_CFG
:
703 reg_addr
= NIC_PF_QSET_0_127_RQ_0_7_BP_CFG
|
704 (mbx
.rq
.qs_num
<< NIC_QS_ID_SHIFT
) |
705 (mbx
.rq
.rq_num
<< NIC_Q_NUM_SHIFT
);
706 nic_reg_write(nic
, reg_addr
, mbx
.rq
.cfg
);
708 case NIC_MBOX_MSG_RQ_SW_SYNC
:
709 ret
= nic_rcv_queue_sw_sync(nic
);
711 case NIC_MBOX_MSG_RQ_DROP_CFG
:
712 reg_addr
= NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG
|
713 (mbx
.rq
.qs_num
<< NIC_QS_ID_SHIFT
) |
714 (mbx
.rq
.rq_num
<< NIC_Q_NUM_SHIFT
);
715 nic_reg_write(nic
, reg_addr
, mbx
.rq
.cfg
);
717 case NIC_MBOX_MSG_SQ_CFG
:
718 reg_addr
= NIC_PF_QSET_0_127_SQ_0_7_CFG
|
719 (mbx
.sq
.qs_num
<< NIC_QS_ID_SHIFT
) |
720 (mbx
.sq
.sq_num
<< NIC_Q_NUM_SHIFT
);
721 nic_reg_write(nic
, reg_addr
, mbx
.sq
.cfg
);
722 nic_tx_channel_cfg(nic
, mbx
.qs
.num
, &mbx
.sq
);
724 case NIC_MBOX_MSG_SET_MAC
:
725 if (vf
>= nic
->num_vf_en
)
727 lmac
= mbx
.mac
.vf_id
;
728 bgx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[lmac
]);
729 lmac
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[lmac
]);
730 bgx_set_lmac_mac(nic
->node
, bgx
, lmac
, mbx
.mac
.mac_addr
);
732 case NIC_MBOX_MSG_SET_MAX_FRS
:
733 ret
= nic_update_hw_frs(nic
, mbx
.frs
.max_frs
,
736 case NIC_MBOX_MSG_CPI_CFG
:
737 nic_config_cpi(nic
, &mbx
.cpi_cfg
);
739 case NIC_MBOX_MSG_RSS_SIZE
:
740 nic_send_rss_size(nic
, vf
);
742 case NIC_MBOX_MSG_RSS_CFG
:
743 case NIC_MBOX_MSG_RSS_CFG_CONT
:
744 nic_config_rss(nic
, &mbx
.rss_cfg
);
746 case NIC_MBOX_MSG_CFG_DONE
:
747 /* Last message of VF config msg sequence */
748 nic_enable_vf(nic
, vf
, true);
750 case NIC_MBOX_MSG_SHUTDOWN
:
751 /* First msg in VF teardown sequence */
752 if (vf
>= nic
->num_vf_en
)
753 nic
->sqs_used
[vf
- nic
->num_vf_en
] = false;
755 nic_enable_vf(nic
, vf
, false);
757 case NIC_MBOX_MSG_ALLOC_SQS
:
758 nic_alloc_sqs(nic
, &mbx
.sqs_alloc
);
760 case NIC_MBOX_MSG_NICVF_PTR
:
761 nic
->nicvf
[vf
] = mbx
.nicvf
.nicvf
;
763 case NIC_MBOX_MSG_PNICVF_PTR
:
764 nic_send_pnicvf(nic
, vf
);
766 case NIC_MBOX_MSG_SNICVF_PTR
:
767 nic_send_snicvf(nic
, &mbx
.nicvf
);
769 case NIC_MBOX_MSG_BGX_STATS
:
770 nic_get_bgx_stats(nic
, &mbx
.bgx_stats
);
772 case NIC_MBOX_MSG_LOOPBACK
:
773 ret
= nic_config_loopback(nic
, &mbx
.lbk
);
776 dev_err(&nic
->pdev
->dev
,
777 "Invalid msg from VF%d, msg 0x%x\n", vf
, mbx
.msg
.msg
);
782 nic_mbx_send_ack(nic
, vf
);
783 else if (mbx
.msg
.msg
!= NIC_MBOX_MSG_READY
)
784 nic_mbx_send_nack(nic
, vf
);
786 nic
->mbx_lock
[vf
] = false;
789 static void nic_mbx_intr_handler (struct nicpf
*nic
, int mbx
)
792 u8 vf
, vf_per_mbx_reg
= 64;
794 intr
= nic_reg_read(nic
, NIC_PF_MAILBOX_INT
+ (mbx
<< 3));
795 dev_dbg(&nic
->pdev
->dev
, "PF interrupt Mbox%d 0x%llx\n", mbx
, intr
);
796 for (vf
= 0; vf
< vf_per_mbx_reg
; vf
++) {
797 if (intr
& (1ULL << vf
)) {
798 dev_dbg(&nic
->pdev
->dev
, "Intr from VF %d\n",
799 vf
+ (mbx
* vf_per_mbx_reg
));
801 nic_handle_mbx_intr(nic
, vf
+ (mbx
* vf_per_mbx_reg
));
802 nic_clear_mbx_intr(nic
, vf
, mbx
);
807 static irqreturn_t
nic_mbx0_intr_handler (int irq
, void *nic_irq
)
809 struct nicpf
*nic
= (struct nicpf
*)nic_irq
;
811 nic_mbx_intr_handler(nic
, 0);
816 static irqreturn_t
nic_mbx1_intr_handler (int irq
, void *nic_irq
)
818 struct nicpf
*nic
= (struct nicpf
*)nic_irq
;
820 nic_mbx_intr_handler(nic
, 1);
825 static int nic_enable_msix(struct nicpf
*nic
)
829 nic
->num_vec
= NIC_PF_MSIX_VECTORS
;
831 for (i
= 0; i
< nic
->num_vec
; i
++)
832 nic
->msix_entries
[i
].entry
= i
;
834 ret
= pci_enable_msix(nic
->pdev
, nic
->msix_entries
, nic
->num_vec
);
836 dev_err(&nic
->pdev
->dev
,
837 "Request for #%d msix vectors failed\n",
842 nic
->msix_enabled
= 1;
846 static void nic_disable_msix(struct nicpf
*nic
)
848 if (nic
->msix_enabled
) {
849 pci_disable_msix(nic
->pdev
);
850 nic
->msix_enabled
= 0;
855 static void nic_free_all_interrupts(struct nicpf
*nic
)
859 for (irq
= 0; irq
< nic
->num_vec
; irq
++) {
860 if (nic
->irq_allocated
[irq
])
861 free_irq(nic
->msix_entries
[irq
].vector
, nic
);
862 nic
->irq_allocated
[irq
] = false;
866 static int nic_register_interrupts(struct nicpf
*nic
)
871 ret
= nic_enable_msix(nic
);
875 /* Register mailbox interrupt handlers */
876 ret
= request_irq(nic
->msix_entries
[NIC_PF_INTR_ID_MBOX0
].vector
,
877 nic_mbx0_intr_handler
, 0, "NIC Mbox0", nic
);
881 nic
->irq_allocated
[NIC_PF_INTR_ID_MBOX0
] = true;
883 ret
= request_irq(nic
->msix_entries
[NIC_PF_INTR_ID_MBOX1
].vector
,
884 nic_mbx1_intr_handler
, 0, "NIC Mbox1", nic
);
888 nic
->irq_allocated
[NIC_PF_INTR_ID_MBOX1
] = true;
890 /* Enable mailbox interrupt */
891 nic_enable_mbx_intr(nic
);
895 dev_err(&nic
->pdev
->dev
, "Request irq failed\n");
896 nic_free_all_interrupts(nic
);
900 static void nic_unregister_interrupts(struct nicpf
*nic
)
902 nic_free_all_interrupts(nic
);
903 nic_disable_msix(nic
);
906 static int nic_num_sqs_en(struct nicpf
*nic
, int vf_en
)
908 int pos
, sqs_per_vf
= MAX_SQS_PER_VF_SINGLE_NODE
;
911 /* Check if its a multi-node environment */
913 sqs_per_vf
= MAX_SQS_PER_VF
;
915 pos
= pci_find_ext_capability(nic
->pdev
, PCI_EXT_CAP_ID_SRIOV
);
916 pci_read_config_word(nic
->pdev
, (pos
+ PCI_SRIOV_TOTAL_VF
), &total_vf
);
917 return min(total_vf
- vf_en
, vf_en
* sqs_per_vf
);
920 static int nic_sriov_init(struct pci_dev
*pdev
, struct nicpf
*nic
)
927 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_SRIOV
);
929 dev_err(&pdev
->dev
, "SRIOV capability is not found in PCIe config space\n");
933 pci_read_config_word(pdev
, (pos
+ PCI_SRIOV_TOTAL_VF
), &total_vf_cnt
);
934 if (total_vf_cnt
< nic
->num_vf_en
)
935 nic
->num_vf_en
= total_vf_cnt
;
940 vf_en
= nic
->num_vf_en
;
941 nic
->num_sqs_en
= nic_num_sqs_en(nic
, nic
->num_vf_en
);
942 vf_en
+= nic
->num_sqs_en
;
944 err
= pci_enable_sriov(pdev
, vf_en
);
946 dev_err(&pdev
->dev
, "SRIOV enable failed, num VF is %d\n",
952 dev_info(&pdev
->dev
, "SRIOV enabled, number of VF available %d\n",
955 nic
->flags
|= NIC_SRIOV_ENABLED
;
959 /* Poll for BGX LMAC link status and update corresponding VF
960 * if there is a change, valid only if internal L2 switch
961 * is not present otherwise VF link is always treated as up
963 static void nic_poll_for_link(struct work_struct
*work
)
965 union nic_mbx mbx
= {};
967 struct bgx_link_status link
;
970 nic
= container_of(work
, struct nicpf
, dwork
.work
);
972 mbx
.link_status
.msg
= NIC_MBOX_MSG_BGX_LINK_CHANGE
;
974 for (vf
= 0; vf
< nic
->num_vf_en
; vf
++) {
975 /* Poll only if VF is UP */
976 if (!nic
->vf_enabled
[vf
])
979 /* Get BGX, LMAC indices for the VF */
980 bgx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vf
]);
981 lmac
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vf
]);
982 /* Get interface link status */
983 bgx_get_lmac_link_state(nic
->node
, bgx
, lmac
, &link
);
985 /* Inform VF only if link status changed */
986 if (nic
->link
[vf
] == link
.link_up
)
989 if (!nic
->mbx_lock
[vf
]) {
990 nic
->link
[vf
] = link
.link_up
;
991 nic
->duplex
[vf
] = link
.duplex
;
992 nic
->speed
[vf
] = link
.speed
;
994 /* Send a mbox message to VF with current link status */
995 mbx
.link_status
.link_up
= link
.link_up
;
996 mbx
.link_status
.duplex
= link
.duplex
;
997 mbx
.link_status
.speed
= link
.speed
;
998 nic_send_msg_to_vf(nic
, vf
, &mbx
);
1001 queue_delayed_work(nic
->check_link
, &nic
->dwork
, HZ
* 2);
1004 static int nic_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1006 struct device
*dev
= &pdev
->dev
;
1010 BUILD_BUG_ON(sizeof(union nic_mbx
) > 16);
1012 nic
= devm_kzalloc(dev
, sizeof(*nic
), GFP_KERNEL
);
1016 pci_set_drvdata(pdev
, nic
);
1020 err
= pci_enable_device(pdev
);
1022 dev_err(dev
, "Failed to enable PCI device\n");
1023 pci_set_drvdata(pdev
, NULL
);
1027 err
= pci_request_regions(pdev
, DRV_NAME
);
1029 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
1030 goto err_disable_device
;
1033 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(48));
1035 dev_err(dev
, "Unable to get usable DMA configuration\n");
1036 goto err_release_regions
;
1039 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(48));
1041 dev_err(dev
, "Unable to get 48-bit DMA for consistent allocations\n");
1042 goto err_release_regions
;
1045 /* MAP PF's configuration registers */
1046 nic
->reg_base
= pcim_iomap(pdev
, PCI_CFG_REG_BAR_NUM
, 0);
1047 if (!nic
->reg_base
) {
1048 dev_err(dev
, "Cannot map config register space, aborting\n");
1050 goto err_release_regions
;
1053 nic
->node
= nic_get_node_id(pdev
);
1055 nic_set_lmac_vf_mapping(nic
);
1057 /* Initialize hardware */
1060 /* Set RSS TBL size for each VF */
1061 nic
->rss_ind_tbl_size
= NIC_MAX_RSS_IDR_TBL_SIZE
;
1063 /* Register interrupts */
1064 err
= nic_register_interrupts(nic
);
1066 goto err_release_regions
;
1068 /* Configure SRIOV */
1069 err
= nic_sriov_init(pdev
, nic
);
1071 goto err_unregister_interrupts
;
1073 /* Register a physical link status poll fn() */
1074 nic
->check_link
= alloc_workqueue("check_link_status",
1075 WQ_UNBOUND
| WQ_MEM_RECLAIM
, 1);
1076 if (!nic
->check_link
) {
1078 goto err_disable_sriov
;
1081 INIT_DELAYED_WORK(&nic
->dwork
, nic_poll_for_link
);
1082 queue_delayed_work(nic
->check_link
, &nic
->dwork
, 0);
1087 if (nic
->flags
& NIC_SRIOV_ENABLED
)
1088 pci_disable_sriov(pdev
);
1089 err_unregister_interrupts
:
1090 nic_unregister_interrupts(nic
);
1091 err_release_regions
:
1092 pci_release_regions(pdev
);
1094 pci_disable_device(pdev
);
1095 pci_set_drvdata(pdev
, NULL
);
1099 static void nic_remove(struct pci_dev
*pdev
)
1101 struct nicpf
*nic
= pci_get_drvdata(pdev
);
1103 if (nic
->flags
& NIC_SRIOV_ENABLED
)
1104 pci_disable_sriov(pdev
);
1106 if (nic
->check_link
) {
1107 /* Destroy work Queue */
1108 cancel_delayed_work_sync(&nic
->dwork
);
1109 destroy_workqueue(nic
->check_link
);
1112 nic_unregister_interrupts(nic
);
1113 pci_release_regions(pdev
);
1114 pci_disable_device(pdev
);
1115 pci_set_drvdata(pdev
, NULL
);
1118 static struct pci_driver nic_driver
= {
1120 .id_table
= nic_id_table
,
1122 .remove
= nic_remove
,
1125 static int __init
nic_init_module(void)
1127 pr_info("%s, ver %s\n", DRV_NAME
, DRV_VERSION
);
1129 return pci_register_driver(&nic_driver
);
1132 static void __exit
nic_cleanup_module(void)
1134 pci_unregister_driver(&nic_driver
);
1137 module_init(nic_init_module
);
1138 module_exit(nic_cleanup_module
);