1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <asm/param.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/etherdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/stddef.h>
21 #include <linux/string.h>
22 #include <linux/version.h>
23 #include <linux/workqueue.h>
24 #include <linux/bitops.h>
25 #include <linux/bug.h>
27 #include <linux/qed/qed_chain.h>
29 #include "qed_dev_api.h"
30 #include <linux/qed/qed_eth_if.h>
35 #include "qed_reg_addr.h"
41 QED_RSS_IPV4_TCP
= 0x4,
42 QED_RSS_IPV6_TCP
= 0x8,
43 QED_RSS_IPV4_UDP
= 0x10,
44 QED_RSS_IPV6_UDP
= 0x20,
47 /* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
48 #define QED_RSS_IND_TABLE_SIZE 128
49 #define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
51 struct qed_rss_params
{
55 u8 update_rss_capabilities
;
56 u8 update_rss_ind_table
;
59 u8 rss_table_size_log
;
60 u16 rss_ind_table
[QED_RSS_IND_TABLE_SIZE
];
61 u32 rss_key
[QED_RSS_KEY_SIZE
];
64 enum qed_filter_opcode
{
68 QED_FILTER_REPLACE
, /* Delete all MACs and add new one instead */
69 QED_FILTER_FLUSH
, /* Removes all filters */
72 enum qed_filter_ucast_type
{
77 QED_FILTER_INNER_VLAN
,
78 QED_FILTER_INNER_PAIR
,
79 QED_FILTER_INNER_MAC_VNI_PAIR
,
80 QED_FILTER_MAC_VNI_PAIR
,
84 struct qed_filter_ucast
{
85 enum qed_filter_opcode opcode
;
86 enum qed_filter_ucast_type type
;
90 u8 vport_to_remove_from
;
91 unsigned char mac
[ETH_ALEN
];
97 struct qed_filter_mcast
{
98 /* MOVE is not supported for multicast */
99 enum qed_filter_opcode opcode
;
101 u8 vport_to_remove_from
;
103 #define QED_MAX_MC_ADDRS 64
104 unsigned char mac
[QED_MAX_MC_ADDRS
][ETH_ALEN
];
107 struct qed_filter_accept_flags
{
108 u8 update_rx_mode_config
;
109 u8 update_tx_mode_config
;
112 #define QED_ACCEPT_NONE 0x01
113 #define QED_ACCEPT_UCAST_MATCHED 0x02
114 #define QED_ACCEPT_UCAST_UNMATCHED 0x04
115 #define QED_ACCEPT_MCAST_MATCHED 0x08
116 #define QED_ACCEPT_MCAST_UNMATCHED 0x10
117 #define QED_ACCEPT_BCAST 0x20
120 struct qed_sp_vport_update_params
{
123 u8 update_vport_active_rx_flg
;
124 u8 vport_active_rx_flg
;
125 u8 update_vport_active_tx_flg
;
126 u8 vport_active_tx_flg
;
127 u8 update_approx_mcast_flg
;
128 u8 update_accept_any_vlan_flg
;
130 unsigned long bins
[8];
131 struct qed_rss_params
*rss_params
;
132 struct qed_filter_accept_flags accept_flags
;
142 struct qed_sp_vport_start_params
{
143 enum qed_tpa_mode tpa_mode
;
144 bool remove_inner_vlan
;
146 u8 max_buffers_per_cqe
;
153 #define QED_MAX_SGES_NUM 16
154 #define CRC32_POLY 0x1edc6f41
156 static int qed_sp_vport_start(struct qed_hwfn
*p_hwfn
,
157 struct qed_sp_vport_start_params
*p_params
)
159 struct vport_start_ramrod_data
*p_ramrod
= NULL
;
160 struct qed_spq_entry
*p_ent
= NULL
;
161 struct qed_sp_init_data init_data
;
166 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
170 memset(&init_data
, 0, sizeof(init_data
));
171 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
172 init_data
.opaque_fid
= p_params
->opaque_fid
;
173 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
175 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
176 ETH_RAMROD_VPORT_START
,
177 PROTOCOLID_ETH
, &init_data
);
181 p_ramrod
= &p_ent
->ramrod
.vport_start
;
182 p_ramrod
->vport_id
= abs_vport_id
;
184 p_ramrod
->mtu
= cpu_to_le16(p_params
->mtu
);
185 p_ramrod
->inner_vlan_removal_en
= p_params
->remove_inner_vlan
;
186 p_ramrod
->drop_ttl0_en
= p_params
->drop_ttl0
;
188 SET_FIELD(rx_mode
, ETH_VPORT_RX_MODE_UCAST_DROP_ALL
, 1);
189 SET_FIELD(rx_mode
, ETH_VPORT_RX_MODE_MCAST_DROP_ALL
, 1);
191 p_ramrod
->rx_mode
.state
= cpu_to_le16(rx_mode
);
193 /* TPA related fields */
194 memset(&p_ramrod
->tpa_param
, 0,
195 sizeof(struct eth_vport_tpa_param
));
197 p_ramrod
->tpa_param
.max_buff_num
= p_params
->max_buffers_per_cqe
;
199 switch (p_params
->tpa_mode
) {
200 case QED_TPA_MODE_GRO
:
201 p_ramrod
->tpa_param
.tpa_max_aggs_num
= ETH_TPA_MAX_AGGS_NUM
;
202 p_ramrod
->tpa_param
.tpa_max_size
= (u16
)-1;
203 p_ramrod
->tpa_param
.tpa_min_size_to_cont
= p_params
->mtu
/ 2;
204 p_ramrod
->tpa_param
.tpa_min_size_to_start
= p_params
->mtu
/ 2;
205 p_ramrod
->tpa_param
.tpa_ipv4_en_flg
= 1;
206 p_ramrod
->tpa_param
.tpa_ipv6_en_flg
= 1;
207 p_ramrod
->tpa_param
.tpa_pkt_split_flg
= 1;
208 p_ramrod
->tpa_param
.tpa_gro_consistent_flg
= 1;
214 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
215 p_ramrod
->sw_fid
= qed_concrete_to_sw_fid(p_hwfn
->cdev
,
216 p_params
->concrete_fid
);
218 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
222 qed_sp_vport_update_rss(struct qed_hwfn
*p_hwfn
,
223 struct vport_update_ramrod_data
*p_ramrod
,
224 struct qed_rss_params
*p_params
)
226 struct eth_vport_rss_config
*rss
= &p_ramrod
->rss_config
;
227 u16 abs_l2_queue
= 0, capabilities
= 0;
231 p_ramrod
->common
.update_rss_flg
= 0;
235 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE
!=
236 ETH_RSS_IND_TABLE_ENTRIES_NUM
);
238 rc
= qed_fw_rss_eng(p_hwfn
, p_params
->rss_eng_id
, &rss
->rss_id
);
242 p_ramrod
->common
.update_rss_flg
= p_params
->update_rss_config
;
243 rss
->update_rss_capabilities
= p_params
->update_rss_capabilities
;
244 rss
->update_rss_ind_table
= p_params
->update_rss_ind_table
;
245 rss
->update_rss_key
= p_params
->update_rss_key
;
247 rss
->rss_mode
= p_params
->rss_enable
?
248 ETH_VPORT_RSS_MODE_REGULAR
:
249 ETH_VPORT_RSS_MODE_DISABLED
;
251 SET_FIELD(capabilities
,
252 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY
,
253 !!(p_params
->rss_caps
& QED_RSS_IPV4
));
254 SET_FIELD(capabilities
,
255 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY
,
256 !!(p_params
->rss_caps
& QED_RSS_IPV6
));
257 SET_FIELD(capabilities
,
258 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY
,
259 !!(p_params
->rss_caps
& QED_RSS_IPV4_TCP
));
260 SET_FIELD(capabilities
,
261 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY
,
262 !!(p_params
->rss_caps
& QED_RSS_IPV6_TCP
));
263 SET_FIELD(capabilities
,
264 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY
,
265 !!(p_params
->rss_caps
& QED_RSS_IPV4_UDP
));
266 SET_FIELD(capabilities
,
267 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY
,
268 !!(p_params
->rss_caps
& QED_RSS_IPV6_UDP
));
269 rss
->tbl_size
= p_params
->rss_table_size_log
;
271 rss
->capabilities
= cpu_to_le16(capabilities
);
273 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
,
274 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
275 p_ramrod
->common
.update_rss_flg
,
276 rss
->rss_mode
, rss
->update_rss_capabilities
,
277 capabilities
, rss
->update_rss_ind_table
,
278 rss
->update_rss_key
);
280 for (i
= 0; i
< QED_RSS_IND_TABLE_SIZE
; i
++) {
281 rc
= qed_fw_l2_queue(p_hwfn
,
282 (u8
)p_params
->rss_ind_table
[i
],
287 rss
->indirection_table
[i
] = cpu_to_le16(abs_l2_queue
);
288 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
, "i= %d, queue = %d\n",
289 i
, rss
->indirection_table
[i
]);
292 for (i
= 0; i
< 10; i
++)
293 rss
->rss_key
[i
] = cpu_to_le32(p_params
->rss_key
[i
]);
299 qed_sp_update_accept_mode(struct qed_hwfn
*p_hwfn
,
300 struct vport_update_ramrod_data
*p_ramrod
,
301 struct qed_filter_accept_flags accept_flags
)
303 p_ramrod
->common
.update_rx_mode_flg
=
304 accept_flags
.update_rx_mode_config
;
306 p_ramrod
->common
.update_tx_mode_flg
=
307 accept_flags
.update_tx_mode_config
;
309 /* Set Rx mode accept flags */
310 if (p_ramrod
->common
.update_rx_mode_flg
) {
311 u8 accept_filter
= accept_flags
.rx_accept_filter
;
314 SET_FIELD(state
, ETH_VPORT_RX_MODE_UCAST_DROP_ALL
,
315 !(!!(accept_filter
& QED_ACCEPT_UCAST_MATCHED
) ||
316 !!(accept_filter
& QED_ACCEPT_UCAST_UNMATCHED
)));
318 SET_FIELD(state
, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED
,
319 !!(accept_filter
& QED_ACCEPT_UCAST_UNMATCHED
));
321 SET_FIELD(state
, ETH_VPORT_RX_MODE_MCAST_DROP_ALL
,
322 !(!!(accept_filter
& QED_ACCEPT_MCAST_MATCHED
) ||
323 !!(accept_filter
& QED_ACCEPT_MCAST_UNMATCHED
)));
325 SET_FIELD(state
, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL
,
326 (!!(accept_filter
& QED_ACCEPT_MCAST_MATCHED
) &&
327 !!(accept_filter
& QED_ACCEPT_MCAST_UNMATCHED
)));
329 SET_FIELD(state
, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL
,
330 !!(accept_filter
& QED_ACCEPT_BCAST
));
332 p_ramrod
->rx_mode
.state
= cpu_to_le16(state
);
333 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
334 "p_ramrod->rx_mode.state = 0x%x\n", state
);
337 /* Set Tx mode accept flags */
338 if (p_ramrod
->common
.update_tx_mode_flg
) {
339 u8 accept_filter
= accept_flags
.tx_accept_filter
;
342 SET_FIELD(state
, ETH_VPORT_TX_MODE_UCAST_DROP_ALL
,
343 !!(accept_filter
& QED_ACCEPT_NONE
));
345 SET_FIELD(state
, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL
,
346 (!!(accept_filter
& QED_ACCEPT_UCAST_MATCHED
) &&
347 !!(accept_filter
& QED_ACCEPT_UCAST_UNMATCHED
)));
349 SET_FIELD(state
, ETH_VPORT_TX_MODE_MCAST_DROP_ALL
,
350 !!(accept_filter
& QED_ACCEPT_NONE
));
352 SET_FIELD(state
, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL
,
353 (!!(accept_filter
& QED_ACCEPT_MCAST_MATCHED
) &&
354 !!(accept_filter
& QED_ACCEPT_MCAST_UNMATCHED
)));
356 SET_FIELD(state
, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL
,
357 !!(accept_filter
& QED_ACCEPT_BCAST
));
359 p_ramrod
->tx_mode
.state
= cpu_to_le16(state
);
360 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
361 "p_ramrod->tx_mode.state = 0x%x\n", state
);
366 qed_sp_update_mcast_bin(struct qed_hwfn
*p_hwfn
,
367 struct vport_update_ramrod_data
*p_ramrod
,
368 struct qed_sp_vport_update_params
*p_params
)
372 memset(&p_ramrod
->approx_mcast
.bins
, 0,
373 sizeof(p_ramrod
->approx_mcast
.bins
));
375 if (p_params
->update_approx_mcast_flg
) {
376 p_ramrod
->common
.update_approx_mcast_flg
= 1;
377 for (i
= 0; i
< ETH_MULTICAST_MAC_BINS_IN_REGS
; i
++) {
378 u32
*p_bins
= (u32
*)p_params
->bins
;
379 __le32 val
= cpu_to_le32(p_bins
[i
]);
381 p_ramrod
->approx_mcast
.bins
[i
] = val
;
387 qed_sp_vport_update(struct qed_hwfn
*p_hwfn
,
388 struct qed_sp_vport_update_params
*p_params
,
389 enum spq_mode comp_mode
,
390 struct qed_spq_comp_cb
*p_comp_data
)
392 struct qed_rss_params
*p_rss_params
= p_params
->rss_params
;
393 struct vport_update_ramrod_data_cmn
*p_cmn
;
394 struct qed_sp_init_data init_data
;
395 struct vport_update_ramrod_data
*p_ramrod
= NULL
;
396 struct qed_spq_entry
*p_ent
= NULL
;
400 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
404 memset(&init_data
, 0, sizeof(init_data
));
405 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
406 init_data
.opaque_fid
= p_params
->opaque_fid
;
407 init_data
.comp_mode
= comp_mode
;
408 init_data
.p_comp_data
= p_comp_data
;
410 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
411 ETH_RAMROD_VPORT_UPDATE
,
412 PROTOCOLID_ETH
, &init_data
);
416 /* Copy input params to ramrod according to FW struct */
417 p_ramrod
= &p_ent
->ramrod
.vport_update
;
418 p_cmn
= &p_ramrod
->common
;
420 p_cmn
->vport_id
= abs_vport_id
;
421 p_cmn
->rx_active_flg
= p_params
->vport_active_rx_flg
;
422 p_cmn
->update_rx_active_flg
= p_params
->update_vport_active_rx_flg
;
423 p_cmn
->tx_active_flg
= p_params
->vport_active_tx_flg
;
424 p_cmn
->update_tx_active_flg
= p_params
->update_vport_active_tx_flg
;
425 p_cmn
->accept_any_vlan
= p_params
->accept_any_vlan
;
426 p_cmn
->update_accept_any_vlan_flg
=
427 p_params
->update_accept_any_vlan_flg
;
428 rc
= qed_sp_vport_update_rss(p_hwfn
, p_ramrod
, p_rss_params
);
430 /* Return spq entry which is taken in qed_sp_init_request()*/
431 qed_spq_return_entry(p_hwfn
, p_ent
);
435 /* Update mcast bins for VFs, PF doesn't use this functionality */
436 qed_sp_update_mcast_bin(p_hwfn
, p_ramrod
, p_params
);
438 qed_sp_update_accept_mode(p_hwfn
, p_ramrod
, p_params
->accept_flags
);
439 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
442 static int qed_sp_vport_stop(struct qed_hwfn
*p_hwfn
,
446 struct vport_stop_ramrod_data
*p_ramrod
;
447 struct qed_sp_init_data init_data
;
448 struct qed_spq_entry
*p_ent
;
452 rc
= qed_fw_vport(p_hwfn
, vport_id
, &abs_vport_id
);
456 memset(&init_data
, 0, sizeof(init_data
));
457 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
458 init_data
.opaque_fid
= opaque_fid
;
459 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
461 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
462 ETH_RAMROD_VPORT_STOP
,
463 PROTOCOLID_ETH
, &init_data
);
467 p_ramrod
= &p_ent
->ramrod
.vport_stop
;
468 p_ramrod
->vport_id
= abs_vport_id
;
470 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
473 static int qed_filter_accept_cmd(struct qed_dev
*cdev
,
475 struct qed_filter_accept_flags accept_flags
,
476 u8 update_accept_any_vlan
,
478 enum spq_mode comp_mode
,
479 struct qed_spq_comp_cb
*p_comp_data
)
481 struct qed_sp_vport_update_params vport_update_params
;
484 /* Prepare and send the vport rx_mode change */
485 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
486 vport_update_params
.vport_id
= vport
;
487 vport_update_params
.accept_flags
= accept_flags
;
488 vport_update_params
.update_accept_any_vlan_flg
= update_accept_any_vlan
;
489 vport_update_params
.accept_any_vlan
= accept_any_vlan
;
491 for_each_hwfn(cdev
, i
) {
492 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
494 vport_update_params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
496 rc
= qed_sp_vport_update(p_hwfn
, &vport_update_params
,
497 comp_mode
, p_comp_data
);
499 DP_ERR(cdev
, "Update rx_mode failed %d\n", rc
);
503 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
504 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
505 accept_flags
.rx_accept_filter
,
506 accept_flags
.tx_accept_filter
);
507 if (update_accept_any_vlan
)
508 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
509 "accept_any_vlan=%d configured\n",
516 static int qed_sp_release_queue_cid(
517 struct qed_hwfn
*p_hwfn
,
518 struct qed_hw_cid_data
*p_cid_data
)
520 if (!p_cid_data
->b_cid_allocated
)
523 qed_cxt_release_cid(p_hwfn
, p_cid_data
->cid
);
525 p_cid_data
->b_cid_allocated
= false;
531 qed_sp_eth_rxq_start_ramrod(struct qed_hwfn
*p_hwfn
,
534 struct qed_queue_start_common_params
*params
,
537 dma_addr_t bd_chain_phys_addr
,
538 dma_addr_t cqe_pbl_addr
,
541 struct rx_queue_start_ramrod_data
*p_ramrod
= NULL
;
542 struct qed_spq_entry
*p_ent
= NULL
;
543 struct qed_sp_init_data init_data
;
544 struct qed_hw_cid_data
*p_rx_cid
;
549 /* Store information for the stop */
550 p_rx_cid
= &p_hwfn
->p_rx_cids
[params
->queue_id
];
552 p_rx_cid
->opaque_fid
= opaque_fid
;
553 p_rx_cid
->vport_id
= params
->vport_id
;
555 rc
= qed_fw_vport(p_hwfn
, params
->vport_id
, &abs_vport_id
);
559 rc
= qed_fw_l2_queue(p_hwfn
, params
->queue_id
, &abs_rx_q_id
);
563 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
564 "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
565 opaque_fid
, cid
, params
->queue_id
, params
->vport_id
,
569 memset(&init_data
, 0, sizeof(init_data
));
571 init_data
.opaque_fid
= opaque_fid
;
572 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
574 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
575 ETH_RAMROD_RX_QUEUE_START
,
576 PROTOCOLID_ETH
, &init_data
);
580 p_ramrod
= &p_ent
->ramrod
.rx_queue_start
;
582 p_ramrod
->sb_id
= cpu_to_le16(params
->sb
);
583 p_ramrod
->sb_index
= params
->sb_idx
;
584 p_ramrod
->vport_id
= abs_vport_id
;
585 p_ramrod
->stats_counter_id
= stats_id
;
586 p_ramrod
->rx_queue_id
= cpu_to_le16(abs_rx_q_id
);
587 p_ramrod
->complete_cqe_flg
= 0;
588 p_ramrod
->complete_event_flg
= 1;
590 p_ramrod
->bd_max_bytes
= cpu_to_le16(bd_max_bytes
);
591 DMA_REGPAIR_LE(p_ramrod
->bd_base
, bd_chain_phys_addr
);
593 p_ramrod
->num_of_pbl_pages
= cpu_to_le16(cqe_pbl_size
);
594 DMA_REGPAIR_LE(p_ramrod
->cqe_pbl_addr
, cqe_pbl_addr
);
596 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
602 qed_sp_eth_rx_queue_start(struct qed_hwfn
*p_hwfn
,
604 struct qed_queue_start_common_params
*params
,
606 dma_addr_t bd_chain_phys_addr
,
607 dma_addr_t cqe_pbl_addr
,
609 void __iomem
**pp_prod
)
611 struct qed_hw_cid_data
*p_rx_cid
;
612 u64 init_prod_val
= 0;
613 u16 abs_l2_queue
= 0;
617 rc
= qed_fw_l2_queue(p_hwfn
, params
->queue_id
, &abs_l2_queue
);
621 rc
= qed_fw_vport(p_hwfn
, params
->vport_id
, &abs_stats_id
);
625 *pp_prod
= (u8 __iomem
*)p_hwfn
->regview
+
626 GTT_BAR0_MAP_REG_MSDM_RAM
+
627 MSTORM_PRODS_OFFSET(abs_l2_queue
);
629 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
630 __internal_ram_wr(p_hwfn
, *pp_prod
, sizeof(u64
),
631 (u32
*)(&init_prod_val
));
633 /* Allocate a CID for the queue */
634 p_rx_cid
= &p_hwfn
->p_rx_cids
[params
->queue_id
];
635 rc
= qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_ETH
,
638 DP_NOTICE(p_hwfn
, "Failed to acquire cid\n");
641 p_rx_cid
->b_cid_allocated
= true;
643 rc
= qed_sp_eth_rxq_start_ramrod(p_hwfn
,
654 qed_sp_release_queue_cid(p_hwfn
, p_rx_cid
);
659 static int qed_sp_eth_rx_queue_stop(struct qed_hwfn
*p_hwfn
,
661 bool eq_completion_only
,
664 struct qed_hw_cid_data
*p_rx_cid
= &p_hwfn
->p_rx_cids
[rx_queue_id
];
665 struct rx_queue_stop_ramrod_data
*p_ramrod
= NULL
;
666 struct qed_spq_entry
*p_ent
= NULL
;
667 struct qed_sp_init_data init_data
;
672 memset(&init_data
, 0, sizeof(init_data
));
673 init_data
.cid
= p_rx_cid
->cid
;
674 init_data
.opaque_fid
= p_rx_cid
->opaque_fid
;
675 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
677 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
678 ETH_RAMROD_RX_QUEUE_STOP
,
679 PROTOCOLID_ETH
, &init_data
);
683 p_ramrod
= &p_ent
->ramrod
.rx_queue_stop
;
685 qed_fw_vport(p_hwfn
, p_rx_cid
->vport_id
, &p_ramrod
->vport_id
);
686 qed_fw_l2_queue(p_hwfn
, rx_queue_id
, &abs_rx_q_id
);
687 p_ramrod
->rx_queue_id
= cpu_to_le16(abs_rx_q_id
);
689 /* Cleaning the queue requires the completion to arrive there.
690 * In addition, VFs require the answer to come as eqe to PF.
692 p_ramrod
->complete_cqe_flg
=
693 (!!(p_rx_cid
->opaque_fid
== p_hwfn
->hw_info
.opaque_fid
) &&
694 !eq_completion_only
) || cqe_completion
;
695 p_ramrod
->complete_event_flg
=
696 !(p_rx_cid
->opaque_fid
== p_hwfn
->hw_info
.opaque_fid
) ||
699 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
703 return qed_sp_release_queue_cid(p_hwfn
, p_rx_cid
);
707 qed_sp_eth_txq_start_ramrod(struct qed_hwfn
*p_hwfn
,
710 struct qed_queue_start_common_params
*p_params
,
714 union qed_qm_pq_params
*p_pq_params
)
716 struct tx_queue_start_ramrod_data
*p_ramrod
= NULL
;
717 struct qed_spq_entry
*p_ent
= NULL
;
718 struct qed_sp_init_data init_data
;
719 struct qed_hw_cid_data
*p_tx_cid
;
724 /* Store information for the stop */
725 p_tx_cid
= &p_hwfn
->p_tx_cids
[p_params
->queue_id
];
727 p_tx_cid
->opaque_fid
= opaque_fid
;
729 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
734 memset(&init_data
, 0, sizeof(init_data
));
736 init_data
.opaque_fid
= opaque_fid
;
737 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
739 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
740 ETH_RAMROD_TX_QUEUE_START
,
741 PROTOCOLID_ETH
, &init_data
);
745 p_ramrod
= &p_ent
->ramrod
.tx_queue_start
;
746 p_ramrod
->vport_id
= abs_vport_id
;
748 p_ramrod
->sb_id
= cpu_to_le16(p_params
->sb
);
749 p_ramrod
->sb_index
= p_params
->sb_idx
;
750 p_ramrod
->stats_counter_id
= stats_id
;
752 p_ramrod
->pbl_size
= cpu_to_le16(pbl_size
);
753 DMA_REGPAIR_LE(p_ramrod
->pbl_base_addr
, pbl_addr
);
755 pq_id
= qed_get_qm_pq(p_hwfn
,
758 p_ramrod
->qm_pq_id
= cpu_to_le16(pq_id
);
760 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
764 qed_sp_eth_tx_queue_start(struct qed_hwfn
*p_hwfn
,
766 struct qed_queue_start_common_params
*p_params
,
769 void __iomem
**pp_doorbell
)
771 struct qed_hw_cid_data
*p_tx_cid
;
772 union qed_qm_pq_params pq_params
;
776 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_stats_id
);
780 p_tx_cid
= &p_hwfn
->p_tx_cids
[p_params
->queue_id
];
781 memset(p_tx_cid
, 0, sizeof(*p_tx_cid
));
782 memset(&pq_params
, 0, sizeof(pq_params
));
784 /* Allocate a CID for the queue */
785 rc
= qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_ETH
,
788 DP_NOTICE(p_hwfn
, "Failed to acquire cid\n");
791 p_tx_cid
->b_cid_allocated
= true;
793 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
794 "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
795 opaque_fid
, p_tx_cid
->cid
,
796 p_params
->queue_id
, p_params
->vport_id
, p_params
->sb
);
798 rc
= qed_sp_eth_txq_start_ramrod(p_hwfn
,
807 *pp_doorbell
= (u8 __iomem
*)p_hwfn
->doorbells
+
808 qed_db_addr(p_tx_cid
->cid
, DQ_DEMS_LEGACY
);
811 qed_sp_release_queue_cid(p_hwfn
, p_tx_cid
);
816 static int qed_sp_eth_tx_queue_stop(struct qed_hwfn
*p_hwfn
,
819 struct qed_hw_cid_data
*p_tx_cid
= &p_hwfn
->p_tx_cids
[tx_queue_id
];
820 struct qed_spq_entry
*p_ent
= NULL
;
821 struct qed_sp_init_data init_data
;
825 memset(&init_data
, 0, sizeof(init_data
));
826 init_data
.cid
= p_tx_cid
->cid
;
827 init_data
.opaque_fid
= p_tx_cid
->opaque_fid
;
828 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
830 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
831 ETH_RAMROD_TX_QUEUE_STOP
,
832 PROTOCOLID_ETH
, &init_data
);
836 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
840 return qed_sp_release_queue_cid(p_hwfn
, p_tx_cid
);
843 static enum eth_filter_action
844 qed_filter_action(enum qed_filter_opcode opcode
)
846 enum eth_filter_action action
= MAX_ETH_FILTER_ACTION
;
850 action
= ETH_FILTER_ACTION_ADD
;
852 case QED_FILTER_REMOVE
:
853 action
= ETH_FILTER_ACTION_REMOVE
;
855 case QED_FILTER_FLUSH
:
856 action
= ETH_FILTER_ACTION_REMOVE_ALL
;
859 action
= MAX_ETH_FILTER_ACTION
;
865 static void qed_set_fw_mac_addr(__le16
*fw_msb
,
870 ((u8
*)fw_msb
)[0] = mac
[1];
871 ((u8
*)fw_msb
)[1] = mac
[0];
872 ((u8
*)fw_mid
)[0] = mac
[3];
873 ((u8
*)fw_mid
)[1] = mac
[2];
874 ((u8
*)fw_lsb
)[0] = mac
[5];
875 ((u8
*)fw_lsb
)[1] = mac
[4];
879 qed_filter_ucast_common(struct qed_hwfn
*p_hwfn
,
881 struct qed_filter_ucast
*p_filter_cmd
,
882 struct vport_filter_update_ramrod_data
**pp_ramrod
,
883 struct qed_spq_entry
**pp_ent
,
884 enum spq_mode comp_mode
,
885 struct qed_spq_comp_cb
*p_comp_data
)
887 u8 vport_to_add_to
= 0, vport_to_remove_from
= 0;
888 struct vport_filter_update_ramrod_data
*p_ramrod
;
889 struct eth_filter_cmd
*p_first_filter
;
890 struct eth_filter_cmd
*p_second_filter
;
891 struct qed_sp_init_data init_data
;
892 enum eth_filter_action action
;
895 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_remove_from
,
896 &vport_to_remove_from
);
900 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_add_to
,
906 memset(&init_data
, 0, sizeof(init_data
));
907 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
908 init_data
.opaque_fid
= opaque_fid
;
909 init_data
.comp_mode
= comp_mode
;
910 init_data
.p_comp_data
= p_comp_data
;
912 rc
= qed_sp_init_request(p_hwfn
, pp_ent
,
913 ETH_RAMROD_FILTERS_UPDATE
,
914 PROTOCOLID_ETH
, &init_data
);
918 *pp_ramrod
= &(*pp_ent
)->ramrod
.vport_filter_update
;
919 p_ramrod
= *pp_ramrod
;
920 p_ramrod
->filter_cmd_hdr
.rx
= p_filter_cmd
->is_rx_filter
? 1 : 0;
921 p_ramrod
->filter_cmd_hdr
.tx
= p_filter_cmd
->is_tx_filter
? 1 : 0;
923 switch (p_filter_cmd
->opcode
) {
924 case QED_FILTER_REPLACE
:
925 case QED_FILTER_MOVE
:
926 p_ramrod
->filter_cmd_hdr
.cmd_cnt
= 2; break;
928 p_ramrod
->filter_cmd_hdr
.cmd_cnt
= 1; break;
931 p_first_filter
= &p_ramrod
->filter_cmds
[0];
932 p_second_filter
= &p_ramrod
->filter_cmds
[1];
934 switch (p_filter_cmd
->type
) {
936 p_first_filter
->type
= ETH_FILTER_TYPE_MAC
; break;
937 case QED_FILTER_VLAN
:
938 p_first_filter
->type
= ETH_FILTER_TYPE_VLAN
; break;
939 case QED_FILTER_MAC_VLAN
:
940 p_first_filter
->type
= ETH_FILTER_TYPE_PAIR
; break;
941 case QED_FILTER_INNER_MAC
:
942 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_MAC
; break;
943 case QED_FILTER_INNER_VLAN
:
944 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_VLAN
; break;
945 case QED_FILTER_INNER_PAIR
:
946 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_PAIR
; break;
947 case QED_FILTER_INNER_MAC_VNI_PAIR
:
948 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
;
950 case QED_FILTER_MAC_VNI_PAIR
:
951 p_first_filter
->type
= ETH_FILTER_TYPE_MAC_VNI_PAIR
; break;
953 p_first_filter
->type
= ETH_FILTER_TYPE_VNI
; break;
956 if ((p_first_filter
->type
== ETH_FILTER_TYPE_MAC
) ||
957 (p_first_filter
->type
== ETH_FILTER_TYPE_PAIR
) ||
958 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC
) ||
959 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_PAIR
) ||
960 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
) ||
961 (p_first_filter
->type
== ETH_FILTER_TYPE_MAC_VNI_PAIR
)) {
962 qed_set_fw_mac_addr(&p_first_filter
->mac_msb
,
963 &p_first_filter
->mac_mid
,
964 &p_first_filter
->mac_lsb
,
965 (u8
*)p_filter_cmd
->mac
);
968 if ((p_first_filter
->type
== ETH_FILTER_TYPE_VLAN
) ||
969 (p_first_filter
->type
== ETH_FILTER_TYPE_PAIR
) ||
970 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_VLAN
) ||
971 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_PAIR
))
972 p_first_filter
->vlan_id
= cpu_to_le16(p_filter_cmd
->vlan
);
974 if ((p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
) ||
975 (p_first_filter
->type
== ETH_FILTER_TYPE_MAC_VNI_PAIR
) ||
976 (p_first_filter
->type
== ETH_FILTER_TYPE_VNI
))
977 p_first_filter
->vni
= cpu_to_le32(p_filter_cmd
->vni
);
979 if (p_filter_cmd
->opcode
== QED_FILTER_MOVE
) {
980 p_second_filter
->type
= p_first_filter
->type
;
981 p_second_filter
->mac_msb
= p_first_filter
->mac_msb
;
982 p_second_filter
->mac_mid
= p_first_filter
->mac_mid
;
983 p_second_filter
->mac_lsb
= p_first_filter
->mac_lsb
;
984 p_second_filter
->vlan_id
= p_first_filter
->vlan_id
;
985 p_second_filter
->vni
= p_first_filter
->vni
;
987 p_first_filter
->action
= ETH_FILTER_ACTION_REMOVE
;
989 p_first_filter
->vport_id
= vport_to_remove_from
;
991 p_second_filter
->action
= ETH_FILTER_ACTION_ADD
;
992 p_second_filter
->vport_id
= vport_to_add_to
;
993 } else if (p_filter_cmd
->opcode
== QED_FILTER_REPLACE
) {
994 p_first_filter
->vport_id
= vport_to_add_to
;
995 memcpy(p_second_filter
, p_first_filter
,
996 sizeof(*p_second_filter
));
997 p_first_filter
->action
= ETH_FILTER_ACTION_REMOVE_ALL
;
998 p_second_filter
->action
= ETH_FILTER_ACTION_ADD
;
1000 action
= qed_filter_action(p_filter_cmd
->opcode
);
1002 if (action
== MAX_ETH_FILTER_ACTION
) {
1004 "%d is not supported yet\n",
1005 p_filter_cmd
->opcode
);
1009 p_first_filter
->action
= action
;
1010 p_first_filter
->vport_id
= (p_filter_cmd
->opcode
==
1011 QED_FILTER_REMOVE
) ?
1012 vport_to_remove_from
:
1019 static int qed_sp_eth_filter_ucast(struct qed_hwfn
*p_hwfn
,
1021 struct qed_filter_ucast
*p_filter_cmd
,
1022 enum spq_mode comp_mode
,
1023 struct qed_spq_comp_cb
*p_comp_data
)
1025 struct vport_filter_update_ramrod_data
*p_ramrod
= NULL
;
1026 struct qed_spq_entry
*p_ent
= NULL
;
1027 struct eth_filter_cmd_header
*p_header
;
1030 rc
= qed_filter_ucast_common(p_hwfn
, opaque_fid
, p_filter_cmd
,
1032 comp_mode
, p_comp_data
);
1034 DP_ERR(p_hwfn
, "Uni. filter command failed %d\n", rc
);
1037 p_header
= &p_ramrod
->filter_cmd_hdr
;
1038 p_header
->assert_on_error
= p_filter_cmd
->assert_on_error
;
1040 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1043 "Unicast filter ADD command failed %d\n",
1048 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
1049 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1050 (p_filter_cmd
->opcode
== QED_FILTER_ADD
) ? "ADD" :
1051 ((p_filter_cmd
->opcode
== QED_FILTER_REMOVE
) ?
1053 ((p_filter_cmd
->opcode
== QED_FILTER_MOVE
) ?
1054 "MOVE" : "REPLACE")),
1055 (p_filter_cmd
->type
== QED_FILTER_MAC
) ? "MAC" :
1056 ((p_filter_cmd
->type
== QED_FILTER_VLAN
) ?
1057 "VLAN" : "MAC & VLAN"),
1058 p_ramrod
->filter_cmd_hdr
.cmd_cnt
,
1059 p_filter_cmd
->is_rx_filter
,
1060 p_filter_cmd
->is_tx_filter
);
1061 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
1062 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1063 p_filter_cmd
->vport_to_add_to
,
1064 p_filter_cmd
->vport_to_remove_from
,
1065 p_filter_cmd
->mac
[0],
1066 p_filter_cmd
->mac
[1],
1067 p_filter_cmd
->mac
[2],
1068 p_filter_cmd
->mac
[3],
1069 p_filter_cmd
->mac
[4],
1070 p_filter_cmd
->mac
[5],
1071 p_filter_cmd
->vlan
);
1076 /*******************************************************************************
1078 * Calculates crc 32 on a buffer
1079 * Note: crc32_length MUST be aligned to 8
1081 ******************************************************************************/
1082 static u32
qed_calc_crc32c(u8
*crc32_packet
,
1090 u8 current_byte
= 0;
1091 u32 crc32_result
= crc32_seed
;
1093 if ((!crc32_packet
) ||
1094 (crc32_length
== 0) ||
1095 ((crc32_length
% 8) != 0))
1096 return crc32_result
;
1097 for (byte
= 0; byte
< crc32_length
; byte
++) {
1098 current_byte
= crc32_packet
[byte
];
1099 for (bit
= 0; bit
< 8; bit
++) {
1100 msb
= (u8
)(crc32_result
>> 31);
1101 crc32_result
= crc32_result
<< 1;
1102 if (msb
!= (0x1 & (current_byte
>> bit
))) {
1103 crc32_result
= crc32_result
^ CRC32_POLY
;
1104 crc32_result
|= 1; /*crc32_result[0] = 1;*/
1108 return crc32_result
;
1111 static inline u32
qed_crc32c_le(u32 seed
,
1115 u32 packet_buf
[2] = { 0 };
1117 memcpy((u8
*)(&packet_buf
[0]), &mac
[0], 6);
1118 return qed_calc_crc32c((u8
*)packet_buf
, 8, seed
, 0);
1121 static u8
qed_mcast_bin_from_mac(u8
*mac
)
1123 u32 crc
= qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED
,
1130 qed_sp_eth_filter_mcast(struct qed_hwfn
*p_hwfn
,
1132 struct qed_filter_mcast
*p_filter_cmd
,
1133 enum spq_mode comp_mode
,
1134 struct qed_spq_comp_cb
*p_comp_data
)
1136 unsigned long bins
[ETH_MULTICAST_MAC_BINS_IN_REGS
];
1137 struct vport_update_ramrod_data
*p_ramrod
= NULL
;
1138 struct qed_spq_entry
*p_ent
= NULL
;
1139 struct qed_sp_init_data init_data
;
1140 u8 abs_vport_id
= 0;
1143 if (p_filter_cmd
->opcode
== QED_FILTER_ADD
) {
1144 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_add_to
,
1149 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_remove_from
,
1156 memset(&init_data
, 0, sizeof(init_data
));
1157 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
1158 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1159 init_data
.comp_mode
= comp_mode
;
1160 init_data
.p_comp_data
= p_comp_data
;
1162 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1163 ETH_RAMROD_VPORT_UPDATE
,
1164 PROTOCOLID_ETH
, &init_data
);
1166 DP_ERR(p_hwfn
, "Multi-cast command failed %d\n", rc
);
1170 p_ramrod
= &p_ent
->ramrod
.vport_update
;
1171 p_ramrod
->common
.update_approx_mcast_flg
= 1;
1173 /* explicitly clear out the entire vector */
1174 memset(&p_ramrod
->approx_mcast
.bins
, 0,
1175 sizeof(p_ramrod
->approx_mcast
.bins
));
1176 memset(bins
, 0, sizeof(unsigned long) *
1177 ETH_MULTICAST_MAC_BINS_IN_REGS
);
1178 /* filter ADD op is explicit set op and it removes
1179 * any existing filters for the vport
1181 if (p_filter_cmd
->opcode
== QED_FILTER_ADD
) {
1182 for (i
= 0; i
< p_filter_cmd
->num_mc_addrs
; i
++) {
1185 bit
= qed_mcast_bin_from_mac(p_filter_cmd
->mac
[i
]);
1186 __set_bit(bit
, bins
);
1189 /* Convert to correct endianity */
1190 for (i
= 0; i
< ETH_MULTICAST_MAC_BINS_IN_REGS
; i
++) {
1191 u32
*p_bins
= (u32
*)bins
;
1192 struct vport_update_ramrod_mcast
*approx_mcast
;
1194 approx_mcast
= &p_ramrod
->approx_mcast
;
1195 approx_mcast
->bins
[i
] = cpu_to_le32(p_bins
[i
]);
1199 p_ramrod
->common
.vport_id
= abs_vport_id
;
1201 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1205 qed_filter_mcast_cmd(struct qed_dev
*cdev
,
1206 struct qed_filter_mcast
*p_filter_cmd
,
1207 enum spq_mode comp_mode
,
1208 struct qed_spq_comp_cb
*p_comp_data
)
1213 /* only ADD and REMOVE operations are supported for multi-cast */
1214 if ((p_filter_cmd
->opcode
!= QED_FILTER_ADD
&&
1215 (p_filter_cmd
->opcode
!= QED_FILTER_REMOVE
)) ||
1216 (p_filter_cmd
->num_mc_addrs
> QED_MAX_MC_ADDRS
))
1219 for_each_hwfn(cdev
, i
) {
1220 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1227 opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1229 rc
= qed_sp_eth_filter_mcast(p_hwfn
,
1238 static int qed_filter_ucast_cmd(struct qed_dev
*cdev
,
1239 struct qed_filter_ucast
*p_filter_cmd
,
1240 enum spq_mode comp_mode
,
1241 struct qed_spq_comp_cb
*p_comp_data
)
1246 for_each_hwfn(cdev
, i
) {
1247 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1253 opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1255 rc
= qed_sp_eth_filter_ucast(p_hwfn
,
1265 /* Statistics related code */
1266 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn
*p_hwfn
,
1271 *p_addr
= BAR0_MAP_REG_PSDM_RAM
+
1272 PSTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1273 *p_len
= sizeof(struct eth_pstorm_per_queue_stat
);
1276 static void __qed_get_vport_pstats(struct qed_hwfn
*p_hwfn
,
1277 struct qed_ptt
*p_ptt
,
1278 struct qed_eth_stats
*p_stats
,
1281 struct eth_pstorm_per_queue_stat pstats
;
1282 u32 pstats_addr
= 0, pstats_len
= 0;
1284 __qed_get_vport_pstats_addrlen(p_hwfn
, &pstats_addr
, &pstats_len
,
1287 memset(&pstats
, 0, sizeof(pstats
));
1288 qed_memcpy_from(p_hwfn
, p_ptt
, &pstats
,
1289 pstats_addr
, pstats_len
);
1291 p_stats
->tx_ucast_bytes
+=
1292 HILO_64_REGPAIR(pstats
.sent_ucast_bytes
);
1293 p_stats
->tx_mcast_bytes
+=
1294 HILO_64_REGPAIR(pstats
.sent_mcast_bytes
);
1295 p_stats
->tx_bcast_bytes
+=
1296 HILO_64_REGPAIR(pstats
.sent_bcast_bytes
);
1297 p_stats
->tx_ucast_pkts
+=
1298 HILO_64_REGPAIR(pstats
.sent_ucast_pkts
);
1299 p_stats
->tx_mcast_pkts
+=
1300 HILO_64_REGPAIR(pstats
.sent_mcast_pkts
);
1301 p_stats
->tx_bcast_pkts
+=
1302 HILO_64_REGPAIR(pstats
.sent_bcast_pkts
);
1303 p_stats
->tx_err_drop_pkts
+=
1304 HILO_64_REGPAIR(pstats
.error_drop_pkts
);
1307 static void __qed_get_vport_tstats_addrlen(struct qed_hwfn
*p_hwfn
,
1311 *p_addr
= BAR0_MAP_REG_TSDM_RAM
+
1312 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn
));
1313 *p_len
= sizeof(struct tstorm_per_port_stat
);
1316 static void __qed_get_vport_tstats(struct qed_hwfn
*p_hwfn
,
1317 struct qed_ptt
*p_ptt
,
1318 struct qed_eth_stats
*p_stats
,
1321 u32 tstats_addr
= 0, tstats_len
= 0;
1322 struct tstorm_per_port_stat tstats
;
1324 __qed_get_vport_tstats_addrlen(p_hwfn
, &tstats_addr
, &tstats_len
);
1326 memset(&tstats
, 0, sizeof(tstats
));
1327 qed_memcpy_from(p_hwfn
, p_ptt
, &tstats
,
1328 tstats_addr
, tstats_len
);
1330 p_stats
->mftag_filter_discards
+=
1331 HILO_64_REGPAIR(tstats
.mftag_filter_discard
);
1332 p_stats
->mac_filter_discards
+=
1333 HILO_64_REGPAIR(tstats
.eth_mac_filter_discard
);
1336 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn
*p_hwfn
,
1341 *p_addr
= BAR0_MAP_REG_USDM_RAM
+
1342 USTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1343 *p_len
= sizeof(struct eth_ustorm_per_queue_stat
);
1346 static void __qed_get_vport_ustats(struct qed_hwfn
*p_hwfn
,
1347 struct qed_ptt
*p_ptt
,
1348 struct qed_eth_stats
*p_stats
,
1351 struct eth_ustorm_per_queue_stat ustats
;
1352 u32 ustats_addr
= 0, ustats_len
= 0;
1354 __qed_get_vport_ustats_addrlen(p_hwfn
, &ustats_addr
, &ustats_len
,
1357 memset(&ustats
, 0, sizeof(ustats
));
1358 qed_memcpy_from(p_hwfn
, p_ptt
, &ustats
,
1359 ustats_addr
, ustats_len
);
1361 p_stats
->rx_ucast_bytes
+=
1362 HILO_64_REGPAIR(ustats
.rcv_ucast_bytes
);
1363 p_stats
->rx_mcast_bytes
+=
1364 HILO_64_REGPAIR(ustats
.rcv_mcast_bytes
);
1365 p_stats
->rx_bcast_bytes
+=
1366 HILO_64_REGPAIR(ustats
.rcv_bcast_bytes
);
1367 p_stats
->rx_ucast_pkts
+=
1368 HILO_64_REGPAIR(ustats
.rcv_ucast_pkts
);
1369 p_stats
->rx_mcast_pkts
+=
1370 HILO_64_REGPAIR(ustats
.rcv_mcast_pkts
);
1371 p_stats
->rx_bcast_pkts
+=
1372 HILO_64_REGPAIR(ustats
.rcv_bcast_pkts
);
1375 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn
*p_hwfn
,
1380 *p_addr
= BAR0_MAP_REG_MSDM_RAM
+
1381 MSTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1382 *p_len
= sizeof(struct eth_mstorm_per_queue_stat
);
1385 static void __qed_get_vport_mstats(struct qed_hwfn
*p_hwfn
,
1386 struct qed_ptt
*p_ptt
,
1387 struct qed_eth_stats
*p_stats
,
1390 struct eth_mstorm_per_queue_stat mstats
;
1391 u32 mstats_addr
= 0, mstats_len
= 0;
1393 __qed_get_vport_mstats_addrlen(p_hwfn
, &mstats_addr
, &mstats_len
,
1396 memset(&mstats
, 0, sizeof(mstats
));
1397 qed_memcpy_from(p_hwfn
, p_ptt
, &mstats
,
1398 mstats_addr
, mstats_len
);
1400 p_stats
->no_buff_discards
+=
1401 HILO_64_REGPAIR(mstats
.no_buff_discard
);
1402 p_stats
->packet_too_big_discard
+=
1403 HILO_64_REGPAIR(mstats
.packet_too_big_discard
);
1404 p_stats
->ttl0_discard
+=
1405 HILO_64_REGPAIR(mstats
.ttl0_discard
);
1406 p_stats
->tpa_coalesced_pkts
+=
1407 HILO_64_REGPAIR(mstats
.tpa_coalesced_pkts
);
1408 p_stats
->tpa_coalesced_events
+=
1409 HILO_64_REGPAIR(mstats
.tpa_coalesced_events
);
1410 p_stats
->tpa_aborts_num
+=
1411 HILO_64_REGPAIR(mstats
.tpa_aborts_num
);
1412 p_stats
->tpa_coalesced_bytes
+=
1413 HILO_64_REGPAIR(mstats
.tpa_coalesced_bytes
);
1416 static void __qed_get_vport_port_stats(struct qed_hwfn
*p_hwfn
,
1417 struct qed_ptt
*p_ptt
,
1418 struct qed_eth_stats
*p_stats
)
1420 struct port_stats port_stats
;
1423 memset(&port_stats
, 0, sizeof(port_stats
));
1425 qed_memcpy_from(p_hwfn
, p_ptt
, &port_stats
,
1426 p_hwfn
->mcp_info
->port_addr
+
1427 offsetof(struct public_port
, stats
),
1428 sizeof(port_stats
));
1430 p_stats
->rx_64_byte_packets
+= port_stats
.pmm
.r64
;
1431 p_stats
->rx_127_byte_packets
+= port_stats
.pmm
.r127
;
1432 p_stats
->rx_255_byte_packets
+= port_stats
.pmm
.r255
;
1433 p_stats
->rx_511_byte_packets
+= port_stats
.pmm
.r511
;
1434 p_stats
->rx_1023_byte_packets
+= port_stats
.pmm
.r1023
;
1435 p_stats
->rx_1518_byte_packets
+= port_stats
.pmm
.r1518
;
1436 p_stats
->rx_1522_byte_packets
+= port_stats
.pmm
.r1522
;
1437 p_stats
->rx_2047_byte_packets
+= port_stats
.pmm
.r2047
;
1438 p_stats
->rx_4095_byte_packets
+= port_stats
.pmm
.r4095
;
1439 p_stats
->rx_9216_byte_packets
+= port_stats
.pmm
.r9216
;
1440 p_stats
->rx_16383_byte_packets
+= port_stats
.pmm
.r16383
;
1441 p_stats
->rx_crc_errors
+= port_stats
.pmm
.rfcs
;
1442 p_stats
->rx_mac_crtl_frames
+= port_stats
.pmm
.rxcf
;
1443 p_stats
->rx_pause_frames
+= port_stats
.pmm
.rxpf
;
1444 p_stats
->rx_pfc_frames
+= port_stats
.pmm
.rxpp
;
1445 p_stats
->rx_align_errors
+= port_stats
.pmm
.raln
;
1446 p_stats
->rx_carrier_errors
+= port_stats
.pmm
.rfcr
;
1447 p_stats
->rx_oversize_packets
+= port_stats
.pmm
.rovr
;
1448 p_stats
->rx_jabbers
+= port_stats
.pmm
.rjbr
;
1449 p_stats
->rx_undersize_packets
+= port_stats
.pmm
.rund
;
1450 p_stats
->rx_fragments
+= port_stats
.pmm
.rfrg
;
1451 p_stats
->tx_64_byte_packets
+= port_stats
.pmm
.t64
;
1452 p_stats
->tx_65_to_127_byte_packets
+= port_stats
.pmm
.t127
;
1453 p_stats
->tx_128_to_255_byte_packets
+= port_stats
.pmm
.t255
;
1454 p_stats
->tx_256_to_511_byte_packets
+= port_stats
.pmm
.t511
;
1455 p_stats
->tx_512_to_1023_byte_packets
+= port_stats
.pmm
.t1023
;
1456 p_stats
->tx_1024_to_1518_byte_packets
+= port_stats
.pmm
.t1518
;
1457 p_stats
->tx_1519_to_2047_byte_packets
+= port_stats
.pmm
.t2047
;
1458 p_stats
->tx_2048_to_4095_byte_packets
+= port_stats
.pmm
.t4095
;
1459 p_stats
->tx_4096_to_9216_byte_packets
+= port_stats
.pmm
.t9216
;
1460 p_stats
->tx_9217_to_16383_byte_packets
+= port_stats
.pmm
.t16383
;
1461 p_stats
->tx_pause_frames
+= port_stats
.pmm
.txpf
;
1462 p_stats
->tx_pfc_frames
+= port_stats
.pmm
.txpp
;
1463 p_stats
->tx_lpi_entry_count
+= port_stats
.pmm
.tlpiec
;
1464 p_stats
->tx_total_collisions
+= port_stats
.pmm
.tncl
;
1465 p_stats
->rx_mac_bytes
+= port_stats
.pmm
.rbyte
;
1466 p_stats
->rx_mac_uc_packets
+= port_stats
.pmm
.rxuca
;
1467 p_stats
->rx_mac_mc_packets
+= port_stats
.pmm
.rxmca
;
1468 p_stats
->rx_mac_bc_packets
+= port_stats
.pmm
.rxbca
;
1469 p_stats
->rx_mac_frames_ok
+= port_stats
.pmm
.rxpok
;
1470 p_stats
->tx_mac_bytes
+= port_stats
.pmm
.tbyte
;
1471 p_stats
->tx_mac_uc_packets
+= port_stats
.pmm
.txuca
;
1472 p_stats
->tx_mac_mc_packets
+= port_stats
.pmm
.txmca
;
1473 p_stats
->tx_mac_bc_packets
+= port_stats
.pmm
.txbca
;
1474 p_stats
->tx_mac_ctrl_frames
+= port_stats
.pmm
.txcf
;
1475 for (j
= 0; j
< 8; j
++) {
1476 p_stats
->brb_truncates
+= port_stats
.brb
.brb_truncate
[j
];
1477 p_stats
->brb_discards
+= port_stats
.brb
.brb_discard
[j
];
1481 static void __qed_get_vport_stats(struct qed_hwfn
*p_hwfn
,
1482 struct qed_ptt
*p_ptt
,
1483 struct qed_eth_stats
*stats
,
1486 __qed_get_vport_mstats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1487 __qed_get_vport_ustats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1488 __qed_get_vport_tstats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1489 __qed_get_vport_pstats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1491 if (p_hwfn
->mcp_info
)
1492 __qed_get_vport_port_stats(p_hwfn
, p_ptt
, stats
);
1495 static void _qed_get_vport_stats(struct qed_dev
*cdev
,
1496 struct qed_eth_stats
*stats
)
1501 memset(stats
, 0, sizeof(*stats
));
1503 for_each_hwfn(cdev
, i
) {
1504 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1505 struct qed_ptt
*p_ptt
;
1507 /* The main vport index is relative first */
1508 if (qed_fw_vport(p_hwfn
, 0, &fw_vport
)) {
1509 DP_ERR(p_hwfn
, "No vport available!\n");
1513 p_ptt
= qed_ptt_acquire(p_hwfn
);
1515 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
1519 __qed_get_vport_stats(p_hwfn
, p_ptt
, stats
, fw_vport
);
1521 qed_ptt_release(p_hwfn
, p_ptt
);
1525 void qed_get_vport_stats(struct qed_dev
*cdev
,
1526 struct qed_eth_stats
*stats
)
1531 memset(stats
, 0, sizeof(*stats
));
1535 _qed_get_vport_stats(cdev
, stats
);
1537 if (!cdev
->reset_stats
)
1540 /* Reduce the statistics baseline */
1541 for (i
= 0; i
< sizeof(struct qed_eth_stats
) / sizeof(u64
); i
++)
1542 ((u64
*)stats
)[i
] -= ((u64
*)cdev
->reset_stats
)[i
];
1545 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1546 void qed_reset_vport_stats(struct qed_dev
*cdev
)
1550 for_each_hwfn(cdev
, i
) {
1551 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1552 struct eth_mstorm_per_queue_stat mstats
;
1553 struct eth_ustorm_per_queue_stat ustats
;
1554 struct eth_pstorm_per_queue_stat pstats
;
1555 struct qed_ptt
*p_ptt
= qed_ptt_acquire(p_hwfn
);
1556 u32 addr
= 0, len
= 0;
1559 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
1563 memset(&mstats
, 0, sizeof(mstats
));
1564 __qed_get_vport_mstats_addrlen(p_hwfn
, &addr
, &len
, 0);
1565 qed_memcpy_to(p_hwfn
, p_ptt
, addr
, &mstats
, len
);
1567 memset(&ustats
, 0, sizeof(ustats
));
1568 __qed_get_vport_ustats_addrlen(p_hwfn
, &addr
, &len
, 0);
1569 qed_memcpy_to(p_hwfn
, p_ptt
, addr
, &ustats
, len
);
1571 memset(&pstats
, 0, sizeof(pstats
));
1572 __qed_get_vport_pstats_addrlen(p_hwfn
, &addr
, &len
, 0);
1573 qed_memcpy_to(p_hwfn
, p_ptt
, addr
, &pstats
, len
);
1575 qed_ptt_release(p_hwfn
, p_ptt
);
1578 /* PORT statistics are not necessarily reset, so we need to
1579 * read and create a baseline for future statistics.
1581 if (!cdev
->reset_stats
)
1582 DP_INFO(cdev
, "Reset stats not allocated\n");
1584 _qed_get_vport_stats(cdev
, cdev
->reset_stats
);
1587 static int qed_fill_eth_dev_info(struct qed_dev
*cdev
,
1588 struct qed_dev_eth_info
*info
)
1592 memset(info
, 0, sizeof(*info
));
1596 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
1597 for_each_hwfn(cdev
, i
)
1598 info
->num_queues
+= FEAT_NUM(&cdev
->hwfns
[i
],
1600 if (cdev
->int_params
.fp_msix_cnt
)
1601 info
->num_queues
= min_t(u8
, info
->num_queues
,
1602 cdev
->int_params
.fp_msix_cnt
);
1604 info
->num_queues
= cdev
->num_hwfns
;
1607 info
->num_vlan_filters
= RESC_NUM(&cdev
->hwfns
[0], QED_VLAN
);
1608 ether_addr_copy(info
->port_mac
,
1609 cdev
->hwfns
[0].hw_info
.hw_mac_addr
);
1611 qed_fill_dev_info(cdev
, &info
->common
);
1616 static void qed_register_eth_ops(struct qed_dev
*cdev
,
1617 struct qed_eth_cb_ops
*ops
,
1620 cdev
->protocol_ops
.eth
= ops
;
1621 cdev
->ops_cookie
= cookie
;
1624 static int qed_start_vport(struct qed_dev
*cdev
,
1625 struct qed_start_vport_params
*params
)
1629 for_each_hwfn(cdev
, i
) {
1630 struct qed_sp_vport_start_params start
= { 0 };
1631 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1633 start
.tpa_mode
= params
->gro_enable
? QED_TPA_MODE_GRO
:
1635 start
.remove_inner_vlan
= params
->remove_inner_vlan
;
1636 start
.drop_ttl0
= params
->drop_ttl0
;
1637 start
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1638 start
.concrete_fid
= p_hwfn
->hw_info
.concrete_fid
;
1639 start
.vport_id
= params
->vport_id
;
1640 start
.max_buffers_per_cqe
= 16;
1641 start
.mtu
= params
->mtu
;
1643 rc
= qed_sp_vport_start(p_hwfn
, &start
);
1645 DP_ERR(cdev
, "Failed to start VPORT\n");
1649 qed_hw_start_fastpath(p_hwfn
);
1651 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1652 "Started V-PORT %d with MTU %d\n",
1653 start
.vport_id
, start
.mtu
);
1656 qed_reset_vport_stats(cdev
);
1661 static int qed_stop_vport(struct qed_dev
*cdev
,
1666 for_each_hwfn(cdev
, i
) {
1667 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1669 rc
= qed_sp_vport_stop(p_hwfn
,
1670 p_hwfn
->hw_info
.opaque_fid
,
1674 DP_ERR(cdev
, "Failed to stop VPORT\n");
1681 static int qed_update_vport(struct qed_dev
*cdev
,
1682 struct qed_update_vport_params
*params
)
1684 struct qed_sp_vport_update_params sp_params
;
1685 struct qed_rss_params sp_rss_params
;
1691 memset(&sp_params
, 0, sizeof(sp_params
));
1692 memset(&sp_rss_params
, 0, sizeof(sp_rss_params
));
1694 /* Translate protocol params into sp params */
1695 sp_params
.vport_id
= params
->vport_id
;
1696 sp_params
.update_vport_active_rx_flg
=
1697 params
->update_vport_active_flg
;
1698 sp_params
.update_vport_active_tx_flg
=
1699 params
->update_vport_active_flg
;
1700 sp_params
.vport_active_rx_flg
= params
->vport_active_flg
;
1701 sp_params
.vport_active_tx_flg
= params
->vport_active_flg
;
1702 sp_params
.accept_any_vlan
= params
->accept_any_vlan
;
1703 sp_params
.update_accept_any_vlan_flg
=
1704 params
->update_accept_any_vlan_flg
;
1706 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
1707 * We need to re-fix the rss values per engine for CMT.
1709 if (cdev
->num_hwfns
> 1 && params
->update_rss_flg
) {
1710 struct qed_update_vport_rss_params
*rss
=
1711 ¶ms
->rss_params
;
1714 /* Find largest entry, since it's possible RSS needs to
1715 * be disabled [in case only 1 queue per-hwfn]
1717 for (k
= 0; k
< QED_RSS_IND_TABLE_SIZE
; k
++)
1718 max
= (max
> rss
->rss_ind_table
[k
]) ?
1719 max
: rss
->rss_ind_table
[k
];
1721 /* Either fix RSS values or disable RSS */
1722 if (cdev
->num_hwfns
< max
+ 1) {
1723 int divisor
= (max
+ cdev
->num_hwfns
- 1) /
1726 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1727 "CMT - fixing RSS values (modulo %02x)\n",
1730 for (k
= 0; k
< QED_RSS_IND_TABLE_SIZE
; k
++)
1731 rss
->rss_ind_table
[k
] =
1732 rss
->rss_ind_table
[k
] % divisor
;
1734 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1735 "CMT - 1 queue per-hwfn; Disabling RSS\n");
1736 params
->update_rss_flg
= 0;
1740 /* Now, update the RSS configuration for actual configuration */
1741 if (params
->update_rss_flg
) {
1742 sp_rss_params
.update_rss_config
= 1;
1743 sp_rss_params
.rss_enable
= 1;
1744 sp_rss_params
.update_rss_capabilities
= 1;
1745 sp_rss_params
.update_rss_ind_table
= 1;
1746 sp_rss_params
.update_rss_key
= 1;
1747 sp_rss_params
.rss_caps
= QED_RSS_IPV4
|
1749 QED_RSS_IPV4_TCP
| QED_RSS_IPV6_TCP
;
1750 sp_rss_params
.rss_table_size_log
= 7; /* 2^7 = 128 */
1751 memcpy(sp_rss_params
.rss_ind_table
,
1752 params
->rss_params
.rss_ind_table
,
1753 QED_RSS_IND_TABLE_SIZE
* sizeof(u16
));
1754 memcpy(sp_rss_params
.rss_key
, params
->rss_params
.rss_key
,
1755 QED_RSS_KEY_SIZE
* sizeof(u32
));
1757 sp_params
.rss_params
= &sp_rss_params
;
1759 for_each_hwfn(cdev
, i
) {
1760 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1762 sp_params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1763 rc
= qed_sp_vport_update(p_hwfn
, &sp_params
,
1764 QED_SPQ_MODE_EBLOCK
,
1767 DP_ERR(cdev
, "Failed to update VPORT\n");
1771 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1772 "Updated V-PORT %d: active_flag %d [update %d]\n",
1773 params
->vport_id
, params
->vport_active_flg
,
1774 params
->update_vport_active_flg
);
1780 static int qed_start_rxq(struct qed_dev
*cdev
,
1781 struct qed_queue_start_common_params
*params
,
1783 dma_addr_t bd_chain_phys_addr
,
1784 dma_addr_t cqe_pbl_addr
,
1786 void __iomem
**pp_prod
)
1789 struct qed_hwfn
*p_hwfn
;
1791 hwfn_index
= params
->rss_id
% cdev
->num_hwfns
;
1792 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1794 /* Fix queue ID in 100g mode */
1795 params
->queue_id
/= cdev
->num_hwfns
;
1797 rc
= qed_sp_eth_rx_queue_start(p_hwfn
,
1798 p_hwfn
->hw_info
.opaque_fid
,
1807 DP_ERR(cdev
, "Failed to start RXQ#%d\n", params
->queue_id
);
1811 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1812 "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1813 params
->queue_id
, params
->rss_id
, params
->vport_id
,
1819 static int qed_stop_rxq(struct qed_dev
*cdev
,
1820 struct qed_stop_rxq_params
*params
)
1823 struct qed_hwfn
*p_hwfn
;
1825 hwfn_index
= params
->rss_id
% cdev
->num_hwfns
;
1826 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1828 rc
= qed_sp_eth_rx_queue_stop(p_hwfn
,
1829 params
->rx_queue_id
/ cdev
->num_hwfns
,
1830 params
->eq_completion_only
,
1833 DP_ERR(cdev
, "Failed to stop RXQ#%d\n", params
->rx_queue_id
);
1840 static int qed_start_txq(struct qed_dev
*cdev
,
1841 struct qed_queue_start_common_params
*p_params
,
1842 dma_addr_t pbl_addr
,
1844 void __iomem
**pp_doorbell
)
1846 struct qed_hwfn
*p_hwfn
;
1849 hwfn_index
= p_params
->rss_id
% cdev
->num_hwfns
;
1850 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1852 /* Fix queue ID in 100g mode */
1853 p_params
->queue_id
/= cdev
->num_hwfns
;
1855 rc
= qed_sp_eth_tx_queue_start(p_hwfn
,
1856 p_hwfn
->hw_info
.opaque_fid
,
1863 DP_ERR(cdev
, "Failed to start TXQ#%d\n", p_params
->queue_id
);
1867 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1868 "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1869 p_params
->queue_id
, p_params
->rss_id
, p_params
->vport_id
,
1875 #define QED_HW_STOP_RETRY_LIMIT (10)
1876 static int qed_fastpath_stop(struct qed_dev
*cdev
)
1878 qed_hw_stop_fastpath(cdev
);
1883 static int qed_stop_txq(struct qed_dev
*cdev
,
1884 struct qed_stop_txq_params
*params
)
1886 struct qed_hwfn
*p_hwfn
;
1889 hwfn_index
= params
->rss_id
% cdev
->num_hwfns
;
1890 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1892 rc
= qed_sp_eth_tx_queue_stop(p_hwfn
,
1893 params
->tx_queue_id
/ cdev
->num_hwfns
);
1895 DP_ERR(cdev
, "Failed to stop TXQ#%d\n", params
->tx_queue_id
);
1902 static int qed_configure_filter_rx_mode(struct qed_dev
*cdev
,
1903 enum qed_filter_rx_mode_type type
)
1905 struct qed_filter_accept_flags accept_flags
;
1907 memset(&accept_flags
, 0, sizeof(accept_flags
));
1909 accept_flags
.update_rx_mode_config
= 1;
1910 accept_flags
.update_tx_mode_config
= 1;
1911 accept_flags
.rx_accept_filter
= QED_ACCEPT_UCAST_MATCHED
|
1912 QED_ACCEPT_MCAST_MATCHED
|
1914 accept_flags
.tx_accept_filter
= QED_ACCEPT_UCAST_MATCHED
|
1915 QED_ACCEPT_MCAST_MATCHED
|
1918 if (type
== QED_FILTER_RX_MODE_TYPE_PROMISC
)
1919 accept_flags
.rx_accept_filter
|= QED_ACCEPT_UCAST_UNMATCHED
|
1920 QED_ACCEPT_MCAST_UNMATCHED
;
1921 else if (type
== QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
)
1922 accept_flags
.rx_accept_filter
|= QED_ACCEPT_MCAST_UNMATCHED
;
1924 return qed_filter_accept_cmd(cdev
, 0, accept_flags
, false, false,
1925 QED_SPQ_MODE_CB
, NULL
);
1928 static int qed_configure_filter_ucast(struct qed_dev
*cdev
,
1929 struct qed_filter_ucast_params
*params
)
1931 struct qed_filter_ucast ucast
;
1933 if (!params
->vlan_valid
&& !params
->mac_valid
) {
1936 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
1940 memset(&ucast
, 0, sizeof(ucast
));
1941 switch (params
->type
) {
1942 case QED_FILTER_XCAST_TYPE_ADD
:
1943 ucast
.opcode
= QED_FILTER_ADD
;
1945 case QED_FILTER_XCAST_TYPE_DEL
:
1946 ucast
.opcode
= QED_FILTER_REMOVE
;
1948 case QED_FILTER_XCAST_TYPE_REPLACE
:
1949 ucast
.opcode
= QED_FILTER_REPLACE
;
1952 DP_NOTICE(cdev
, "Unknown unicast filter type %d\n",
1956 if (params
->vlan_valid
&& params
->mac_valid
) {
1957 ucast
.type
= QED_FILTER_MAC_VLAN
;
1958 ether_addr_copy(ucast
.mac
, params
->mac
);
1959 ucast
.vlan
= params
->vlan
;
1960 } else if (params
->mac_valid
) {
1961 ucast
.type
= QED_FILTER_MAC
;
1962 ether_addr_copy(ucast
.mac
, params
->mac
);
1964 ucast
.type
= QED_FILTER_VLAN
;
1965 ucast
.vlan
= params
->vlan
;
1968 ucast
.is_rx_filter
= true;
1969 ucast
.is_tx_filter
= true;
1971 return qed_filter_ucast_cmd(cdev
, &ucast
, QED_SPQ_MODE_CB
, NULL
);
1974 static int qed_configure_filter_mcast(struct qed_dev
*cdev
,
1975 struct qed_filter_mcast_params
*params
)
1977 struct qed_filter_mcast mcast
;
1980 memset(&mcast
, 0, sizeof(mcast
));
1981 switch (params
->type
) {
1982 case QED_FILTER_XCAST_TYPE_ADD
:
1983 mcast
.opcode
= QED_FILTER_ADD
;
1985 case QED_FILTER_XCAST_TYPE_DEL
:
1986 mcast
.opcode
= QED_FILTER_REMOVE
;
1989 DP_NOTICE(cdev
, "Unknown multicast filter type %d\n",
1993 mcast
.num_mc_addrs
= params
->num
;
1994 for (i
= 0; i
< mcast
.num_mc_addrs
; i
++)
1995 ether_addr_copy(mcast
.mac
[i
], params
->mac
[i
]);
1997 return qed_filter_mcast_cmd(cdev
, &mcast
,
1998 QED_SPQ_MODE_CB
, NULL
);
2001 static int qed_configure_filter(struct qed_dev
*cdev
,
2002 struct qed_filter_params
*params
)
2004 enum qed_filter_rx_mode_type accept_flags
;
2006 switch (params
->type
) {
2007 case QED_FILTER_TYPE_UCAST
:
2008 return qed_configure_filter_ucast(cdev
, ¶ms
->filter
.ucast
);
2009 case QED_FILTER_TYPE_MCAST
:
2010 return qed_configure_filter_mcast(cdev
, ¶ms
->filter
.mcast
);
2011 case QED_FILTER_TYPE_RX_MODE
:
2012 accept_flags
= params
->filter
.accept_flags
;
2013 return qed_configure_filter_rx_mode(cdev
, accept_flags
);
2015 DP_NOTICE(cdev
, "Unknown filter type %d\n",
2021 static int qed_fp_cqe_completion(struct qed_dev
*dev
,
2023 struct eth_slow_path_rx_cqe
*cqe
)
2025 return qed_eth_cqe_completion(&dev
->hwfns
[rss_id
% dev
->num_hwfns
],
2029 static const struct qed_eth_ops qed_eth_ops_pass
= {
2030 .common
= &qed_common_ops_pass
,
2031 .fill_dev_info
= &qed_fill_eth_dev_info
,
2032 .register_ops
= &qed_register_eth_ops
,
2033 .vport_start
= &qed_start_vport
,
2034 .vport_stop
= &qed_stop_vport
,
2035 .vport_update
= &qed_update_vport
,
2036 .q_rx_start
= &qed_start_rxq
,
2037 .q_rx_stop
= &qed_stop_rxq
,
2038 .q_tx_start
= &qed_start_txq
,
2039 .q_tx_stop
= &qed_stop_txq
,
2040 .filter_config
= &qed_configure_filter
,
2041 .fastpath_stop
= &qed_fastpath_stop
,
2042 .eth_cqe_completion
= &qed_fp_cqe_completion
,
2043 .get_vport_stats
= &qed_get_vport_stats
,
2046 const struct qed_eth_ops
*qed_get_eth_ops(u32 version
)
2048 if (version
!= QED_ETH_INTERFACE_VERSION
) {
2049 pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
2050 version
, QED_ETH_INTERFACE_VERSION
);
2054 return &qed_eth_ops_pass
;
2056 EXPORT_SYMBOL(qed_get_eth_ops
);
2058 void qed_put_eth_ops(void)
2060 /* TODO - reference count for module? */
2062 EXPORT_SYMBOL(qed_put_eth_ops
);